sync with OpenBSD -current

This commit is contained in:
purplerain 2025-01-08 01:55:14 +00:00
parent a48b7fc94f
commit df306e9b72
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
1354 changed files with 105229 additions and 31150 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: acpi_apm.c,v 1.3 2023/08/06 14:30:08 tobhe Exp $ */
/* $OpenBSD: acpi_apm.c,v 1.4 2024/10/30 06:16:27 jsg Exp $ */
/*
* Copyright (c) 2005 Thorsten Lockert <tholo@sigmasoft.com>
* Copyright (c) 2005 Jordan Hargrave <jordan@openbsd.org>
@ -138,7 +138,7 @@ acpiioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
error = EBADF;
break;
}
if (get_hibernate_io_function(swdevt[0].sw_dev) == NULL) {
if (get_hibernate_io_function(swdevt[0]) == NULL) {
error = EOPNOTSUPP;
break;
}
@ -225,7 +225,7 @@ request_sleep(int sleepmode)
#ifdef HIBERNATE
if (sleepmode == SLEEP_HIBERNATE) {
if (get_hibernate_io_function(swdevt[0].sw_dev) == NULL)
if (get_hibernate_io_function(swdevt[0]) == NULL)
return EOPNOTSUPP;
}
#endif

View file

@ -1,4 +1,4 @@
/* $OpenBSD: acpicpu.c,v 1.94 2024/07/14 14:04:16 jmatthew Exp $ */
/* $OpenBSD: acpicpu.c,v 1.95 2024/10/22 21:50:02 jsg Exp $ */
/*
* Copyright (c) 2005 Marco Peereboom <marco@openbsd.org>
* Copyright (c) 2015 Philip Guenther <guenther@openbsd.org>
@ -101,8 +101,7 @@ void acpicpu_setperf_ppc_change(struct acpicpu_pss *, int);
/* Make sure throttling bits are valid,a=addr,o=offset,w=width */
#define valid_throttle(o,w,a) (a && w && (o+w)<=31 && (o>4 || (o+w)<=4))
struct acpi_cstate
{
struct acpi_cstate {
SLIST_ENTRY(acpi_cstate) link;
u_short state;

View file

@ -408,8 +408,7 @@ context_set_user(struct context_entry *ce, int v)
* 126 = Type (0 = Read, 1 = Write)
* 127 = Fault bit
*/
struct fault_entry
{
struct fault_entry {
uint64_t lo;
uint64_t hi;
};

View file

@ -1,4 +1,4 @@
/* $OpenBSD: dsdt.c,v 1.271 2024/09/20 02:00:46 jsg Exp $ */
/* $OpenBSD: dsdt.c,v 1.272 2024/12/24 12:06:34 mglocker Exp $ */
/*
* Copyright (c) 2005 Jordan Hargrave <jordan@openbsd.org>
*
@ -2574,6 +2574,7 @@ aml_rwgsb(struct aml_value *conn, int len, int bpos, int blen,
buflen = len;
break;
case 0x0e: /* AttribRawBytes */
case 0x0f: /* AttribRawProcessBytes */
cmdlen = 0;
buflen = len;
break;
@ -2664,6 +2665,7 @@ aml_rwgsb(struct aml_value *conn, int len, int bpos, int blen,
break;
case 0x0b: /* AttribBytes */
case 0x0e: /* AttribRawBytes */
case 0x0f: /* AttribRawProcessBytes */
buflen = len;
break;
default:

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qcgpio.c,v 1.11 2024/07/15 15:33:54 mglocker Exp $ */
/* $OpenBSD: qcgpio.c,v 1.13 2024/12/22 21:55:20 mglocker Exp $ */
/*
* Copyright (c) 2022 Mark Kettenis <kettenis@openbsd.org>
*
@ -25,6 +25,14 @@
#include <dev/acpi/amltypes.h>
#include <dev/acpi/dsdt.h>
//#define QCGPIO_DEBUG
#ifdef QCGPIO_DEBUG
int qcgpio_debug = 1;
#define DPRINTF(l, x...) do { if ((l) <= qcgpio_debug) printf(x); } while (0)
#else
#define DPRINTF(l, x...)
#endif
/* Registers. */
#define TLMM_GPIO_IN_OUT(pin) (0x0004 + 0x1000 * (pin))
#define TLMM_GPIO_IN_OUT_GPIO_IN (1 << 0)
@ -62,6 +70,11 @@ struct qcgpio_intrhand {
void *ih_arg;
};
struct qcgpio_pdcmap {
int pm_pin;
uint32_t pm_irq;
};
struct qcgpio_softc {
struct device sc_dev;
struct acpi_softc *sc_acpi;
@ -73,10 +86,15 @@ struct qcgpio_softc {
void *sc_ih;
uint32_t sc_npins;
int (*sc_pin_map)(int, bus_size_t *);
int (*sc_pin_map)(struct qcgpio_softc *, int,
bus_size_t *);
struct qcgpio_intrhand *sc_pin_ih;
struct acpi_gpio sc_gpio;
struct qcgpio_pdcmap *sc_pdcmap;
uint32_t sc_npdcmap;
uint32_t sc_ipdcmap;
};
int qcgpio_acpi_match(struct device *, void *, void *);
@ -97,9 +115,29 @@ const char *qcgpio_hids[] = {
NULL
};
int qcgpio_sc7180_pin_map(int, bus_size_t *);
int qcgpio_sc8280xp_pin_map(int, bus_size_t *);
int qcgpio_x1e80100_pin_map(int, bus_size_t *);
/* 98b9b2a4-1663-4a5f-82f2-c6c99a394726 */
static uint8_t qcgpio_gpio_dsm_uuid[] = {
0xa4, 0xb2, 0xb9, 0x98, 0x63, 0x16, 0x5f, 0x4a,
0x82, 0xf2, 0xc6, 0xc9, 0x9a, 0x39, 0x47, 0x26
};
#define QCGPIO_GPIO_DSM_REV 0
#define QCGPIO_GPIO_DSM_FUNC_NUM_PINS 2
/* 921b0fd4-567c-43a0-bb14-2648f7b2a18c */
static uint8_t qcgpio_pdc_dsm_uuid[] = {
0xd4, 0x0f, 0x1b, 0x92, 0x7c, 0x56, 0xa0, 0x43,
0xbb, 0x14, 0x26, 0x48, 0xf7, 0xb2, 0xa1, 0x8c
};
#define QCGPIO_PDC_DSM_REV 0
#define QCGPIO_PDC_DSM_FUNC_CIPR 2
int qcgpio_get_nirq(int, union acpi_resource *, void *);
int qcgpio_get_irqs(int, union acpi_resource *, void *);
void qcgpio_fill_pdcmap(struct qcgpio_softc *);
int qcgpio_get_pin_count(struct acpi_softc *, struct aml_node *);
int qcgpio_sc7180_pin_map(struct qcgpio_softc *, int, bus_size_t *);
int qcgpio_sc8280xp_pin_map(struct qcgpio_softc *, int, bus_size_t *);
int qcgpio_x1e80100_pin_map(struct qcgpio_softc *, int, bus_size_t *);
int qcgpio_read_pin(void *, int);
void qcgpio_write_pin(void *, int, int);
@ -108,6 +146,137 @@ void qcgpio_intr_enable(void *, int);
void qcgpio_intr_disable(void *, int);
int qcgpio_intr(void *);
int
qcgpio_get_nirq(int crsidx, union acpi_resource *crs, void *arg)
{
struct qcgpio_softc *sc = arg;
int typ;
typ = AML_CRSTYPE(crs);
switch (typ) {
case LR_EXTIRQ:
sc->sc_npdcmap++;
break;
}
return 0;
}
int
qcgpio_get_irqs(int crsidx, union acpi_resource *crs, void *arg)
{
struct qcgpio_softc *sc = arg;
int typ;
typ = AML_CRSTYPE(crs);
switch (typ) {
case LR_EXTIRQ:
sc->sc_pdcmap[sc->sc_ipdcmap].pm_irq = crs->lr_extirq.irq[0];
sc->sc_pdcmap[sc->sc_ipdcmap].pm_pin = -1;
DPRINTF(1, "%s: irq index %d: irq %d\n",
__func__, sc->sc_ipdcmap,
sc->sc_pdcmap[sc->sc_ipdcmap].pm_irq);
sc->sc_ipdcmap++;
break;
}
return 0;
}
void
qcgpio_fill_pdcmap(struct qcgpio_softc *sc)
{
struct aml_value cmd[4], res, *ref;
int i, j, pin;
uint32_t irq;
bzero(&cmd, sizeof(cmd));
cmd[0].type = AML_OBJTYPE_BUFFER;
cmd[0].v_buffer = (uint8_t *)&qcgpio_pdc_dsm_uuid;
cmd[0].length = sizeof(qcgpio_pdc_dsm_uuid);
/* rev */
cmd[1].type = AML_OBJTYPE_INTEGER;
cmd[1].v_integer = QCGPIO_PDC_DSM_REV;
cmd[1].length = 1;
/* func */
cmd[2].type = AML_OBJTYPE_INTEGER;
cmd[2].v_integer = QCGPIO_PDC_DSM_FUNC_CIPR;
cmd[2].length = 1;
/* not used */
cmd[3].type = AML_OBJTYPE_PACKAGE;
cmd[3].v_integer = 0;
cmd[3].length = 0;
if (aml_evalname(sc->sc_acpi, sc->sc_node, "_DSM", 4, cmd, &res)) {
printf("%s: PDC _DSM failed\n", __func__);
return;
}
for (i = 0; i < res.length; i++) {
ref = res.v_package[i];
if (ref->type != AML_OBJTYPE_PACKAGE ||
ref->length < 3 ||
ref->v_package[0]->type != AML_OBJTYPE_INTEGER ||
ref->v_package[1]->type != AML_OBJTYPE_INTEGER ||
ref->v_package[2]->type != AML_OBJTYPE_INTEGER) {
continue;
}
irq = ref->v_package[2]->v_integer;
pin = ref->v_package[1]->v_integer;
DPRINTF(1, "%s: pdc index %d: probing irq %d, pin %d\n",
__func__, i, irq, pin);
for (j = 0; j < sc->sc_npdcmap; j++) {
if (sc->sc_pdcmap[j].pm_irq == irq) {
sc->sc_pdcmap[j].pm_pin = pin;
break;
}
}
}
#ifdef QCGPIO_DEBUG
for (i = 0; i < sc->sc_npdcmap; i++) {
printf("%s: irq index %d: irq=%d, pin=%d\n",
__func__, i, sc->sc_pdcmap[i].pm_irq,
sc->sc_pdcmap[i].pm_pin);
}
#endif
}
int
qcgpio_get_pin_count(struct acpi_softc *sc, struct aml_node *node)
{
struct aml_value cmd[4];
int64_t npins;
bzero(&cmd, sizeof(cmd));
cmd[0].type = AML_OBJTYPE_BUFFER;
cmd[0].v_buffer = (uint8_t *)&qcgpio_gpio_dsm_uuid;
cmd[0].length = sizeof(qcgpio_gpio_dsm_uuid);
/* rev */
cmd[1].type = AML_OBJTYPE_INTEGER;
cmd[1].v_integer = QCGPIO_GPIO_DSM_REV;
cmd[1].length = 1;
/* func */
cmd[2].type = AML_OBJTYPE_INTEGER;
cmd[2].v_integer = QCGPIO_GPIO_DSM_FUNC_NUM_PINS;
cmd[2].length = 1;
/* not used */
cmd[3].type = AML_OBJTYPE_PACKAGE;
cmd[3].v_integer = 0;
cmd[3].length = 0;
if (aml_evalinteger(sc, node, "_DSM", 4, cmd, &npins)) {
printf("%s: GPIO _DSM failed\n", __func__);
return 0;
}
return (uint32_t)npins;
}
int
qcgpio_acpi_match(struct device *parent, void *match, void *aux)
{
@ -124,6 +293,7 @@ qcgpio_acpi_attach(struct device *parent, struct device *self, void *aux)
{
struct acpi_attach_args *aaa = aux;
struct qcgpio_softc *sc = (struct qcgpio_softc *)self;
struct aml_value res;
sc->sc_acpi = (struct acpi_softc *)parent;
sc->sc_node = aaa->aaa_node;
@ -145,7 +315,25 @@ qcgpio_acpi_attach(struct device *parent, struct device *self, void *aux)
sc->sc_npins = 228;
sc->sc_pin_map = qcgpio_sc8280xp_pin_map;
} else if (strcmp(aaa->aaa_dev, "QCOM0C0C") == 0) {
sc->sc_npins = 239;
if (aml_evalname(sc->sc_acpi, sc->sc_node, "_CRS", 0, NULL,
&res)) {
printf("no _CRS method\n");
return;
}
if (res.type != AML_OBJTYPE_BUFFER || res.length < 5) {
printf("invalid _CRS object\n");
aml_freevalue(&res);
return;
}
aml_parse_resource(&res, qcgpio_get_nirq, sc);
DPRINTF(1, "\n%s: npdcmap=%d\n", __func__, sc->sc_npdcmap);
sc->sc_pdcmap = mallocarray(sc->sc_npdcmap,
sizeof(*sc->sc_pdcmap), M_DEVBUF, M_WAITOK | M_ZERO);
aml_parse_resource(&res, qcgpio_get_irqs, sc);
aml_freevalue(&res);
qcgpio_fill_pdcmap(sc);
sc->sc_npins = qcgpio_get_pin_count(sc->sc_acpi, sc->sc_node);
DPRINTF(1, "%s: npins=%d\n", __func__, sc->sc_npins);
sc->sc_pin_map = qcgpio_x1e80100_pin_map;
}
KASSERT(sc->sc_npins != 0);
@ -180,11 +368,12 @@ unmap:
if (sc->sc_ih)
acpi_intr_disestablish(sc->sc_ih);
free(sc->sc_pin_ih, M_DEVBUF, sc->sc_npins * sizeof(*sc->sc_pin_ih));
free(sc->sc_pdcmap, M_DEVBUF, sc->sc_npdcmap * sizeof(*sc->sc_pdcmap));
bus_space_unmap(sc->sc_iot, sc->sc_ioh, aaa->aaa_size[0]);
}
int
qcgpio_sc7180_pin_map(int pin, bus_size_t *off)
qcgpio_sc7180_pin_map(struct qcgpio_softc *sc, int pin, bus_size_t *off)
{
switch (pin) {
case 30:
@ -211,7 +400,7 @@ qcgpio_sc7180_pin_map(int pin, bus_size_t *off)
}
int
qcgpio_sc8280xp_pin_map(int pin, bus_size_t *off)
qcgpio_sc8280xp_pin_map(struct qcgpio_softc *sc, int pin, bus_size_t *off)
{
switch (pin) {
case 107:
@ -229,21 +418,19 @@ qcgpio_sc8280xp_pin_map(int pin, bus_size_t *off)
}
int
qcgpio_x1e80100_pin_map(int pin, bus_size_t *off)
qcgpio_x1e80100_pin_map(struct qcgpio_softc *sc, int pin, bus_size_t *off)
{
switch (pin) {
case 3:
case 51:
return pin;
case 0x180:
return 67;
case 0x380:
return 33;
case 0x3c0:
return 3;
default:
return -1;
int real_pin = -1;
if (pin < sc->sc_npins) {
real_pin = pin;
} else if (pin / 64 < sc->sc_npdcmap) {
real_pin = sc->sc_pdcmap[pin / 64].pm_pin;
}
DPRINTF(2, "%s: map pin %d to real_pin %d\n", __func__, pin, real_pin);
return real_pin;
}
int
@ -253,7 +440,7 @@ qcgpio_read_pin(void *cookie, int pin)
bus_size_t off = 0;
uint32_t reg;
pin = sc->sc_pin_map(pin, &off);
pin = sc->sc_pin_map(sc, pin, &off);
if (pin < 0 || pin >= sc->sc_npins)
return 0;
@ -267,7 +454,7 @@ qcgpio_write_pin(void *cookie, int pin, int val)
struct qcgpio_softc *sc = cookie;
bus_size_t off = 0;
pin = sc->sc_pin_map(pin, &off);
pin = sc->sc_pin_map(sc, pin, &off);
if (pin < 0 || pin >= sc->sc_npins)
return;
@ -288,7 +475,7 @@ qcgpio_intr_establish(void *cookie, int pin, int flags,
bus_size_t off = 0;
uint32_t reg;
pin = sc->sc_pin_map(pin, &off);
pin = sc->sc_pin_map(sc, pin, &off);
if (pin < 0 || pin >= sc->sc_npins)
return;
@ -335,7 +522,7 @@ qcgpio_intr_enable(void *cookie, int pin)
struct qcgpio_softc *sc = cookie;
bus_size_t off = 0;
pin = sc->sc_pin_map(pin, &off);
pin = sc->sc_pin_map(sc, pin, &off);
if (pin < 0 || pin >= sc->sc_npins)
return;
@ -349,7 +536,7 @@ qcgpio_intr_disable(void *cookie, int pin)
struct qcgpio_softc *sc = cookie;
bus_size_t off = 0;
pin = sc->sc_pin_map(pin, &off);
pin = sc->sc_pin_map(sc, pin, &off);
if (pin < 0 || pin >= sc->sc_npins)
return;
@ -369,7 +556,7 @@ qcgpio_intr(void *arg)
if (sc->sc_pin_ih[pin].ih_func == NULL)
continue;
sc->sc_pin_map(pin, &off);
sc->sc_pin_map(sc, pin, &off);
stat = HREAD4(sc, off + TLMM_GPIO_INTR_STATUS(pin));
if (stat & TLMM_GPIO_INTR_STATUS_INTR_STATUS) {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: audio.c,v 1.208 2024/08/20 07:44:36 mvs Exp $ */
/* $OpenBSD: audio.c,v 1.210 2024/12/30 02:46:00 guenther Exp $ */
/*
* Copyright (c) 2015 Alexandre Ratchov <alex@caoua.org>
*
@ -88,8 +88,7 @@ struct audio_buf {
};
#if NWSKBD > 0
struct wskbd_vol
{
struct wskbd_vol {
int val; /* index of the value control */
int mute; /* index of the mute control */
int step; /* increment/decrement step */
@ -1753,9 +1752,6 @@ audio_ioctl(struct audio_softc *sc, unsigned long cmd, void *addr)
tsleep_nsec(&sc->quiesce, 0, "au_qio", INFSLP);
switch (cmd) {
case FIONBIO:
/* All handled in the upper FS layer. */
break;
case AUDIO_GETPOS:
mtx_enter(&audio_lock);
ap = (struct audio_pos *)addr;
@ -1908,9 +1904,6 @@ audio_ioctl_mixer(struct audio_softc *sc, unsigned long cmd, void *addr,
tsleep_nsec(&sc->quiesce, 0, "mix_qio", INFSLP);
switch (cmd) {
case FIONBIO:
/* All handled in the upper FS layer. */
break;
case AUDIO_MIXER_DEVINFO:
return audio_mixer_devinfo(sc, addr);
case AUDIO_MIXER_READ:

View file

@ -1,4 +1,4 @@
/* $OpenBSD: dt_dev.c,v 1.37 2024/09/06 08:38:21 mpi Exp $ */
/* $OpenBSD: dt_dev.c,v 1.42 2024/12/04 09:37:33 mpi Exp $ */
/*
* Copyright (c) 2019 Martin Pieuchot <mpi@openbsd.org>
@ -85,14 +85,37 @@
#define DPRINTF(x...) /* nothing */
/*
* Per-CPU Event States
*
* Locks used to protect struct members:
* r owned by thread doing read(2)
* c owned by CPU
* s sliced ownership, based on read/write indexes
* p written by CPU, read by thread doing read(2)
*/
struct dt_cpubuf {
unsigned int dc_prod; /* [r] read index */
unsigned int dc_cons; /* [c] write index */
struct dt_evt *dc_ring; /* [s] ring of event states */
unsigned int dc_inevt; /* [c] in event already? */
/* Counters */
unsigned int dc_dropevt; /* [p] # of events dropped */
unsigned int dc_skiptick; /* [p] # of ticks skipped */
unsigned int dc_recurevt; /* [p] # of recursive events */
unsigned int dc_readevt; /* [r] # of events read */
};
/*
* Descriptor associated with each program opening /dev/dt. It is used
* to keep track of enabled PCBs.
*
* Locks used to protect struct members in this file:
* a atomic
* m per-softc mutex
* K kernel lock
* r owned by thread doing read(2)
* I invariant after initialization
*/
struct dt_softc {
SLIST_ENTRY(dt_softc) ds_next; /* [K] descriptor list */
@ -100,15 +123,12 @@ struct dt_softc {
pid_t ds_pid; /* [I] PID of tracing program */
void *ds_si; /* [I] to defer wakeup(9) */
struct mutex ds_mtx;
struct dt_pcb_list ds_pcbs; /* [K] list of enabled PCBs */
int ds_recording; /* [K] currently recording? */
int ds_evtcnt; /* [m] # of readable evts */
unsigned int ds_evtcnt; /* [a] # of readable evts */
/* Counters */
uint64_t ds_readevt; /* [m] # of events read */
uint64_t ds_dropevt; /* [m] # of events dropped */
struct dt_cpubuf ds_cpu[MAXCPUS]; /* [I] Per-cpu event states */
unsigned int ds_lastcpu; /* [r] last CPU ring read(2). */
};
SLIST_HEAD(, dt_softc) dtdev_list; /* [K] list of open /dev/dt nodes */
@ -132,6 +152,8 @@ int dtread(dev_t, struct uio *, int);
int dtioctl(dev_t, u_long, caddr_t, int, struct proc *);
struct dt_softc *dtlookup(int);
struct dt_softc *dtalloc(void);
void dtfree(struct dt_softc *);
int dt_ioctl_list_probes(struct dt_softc *, struct dtioc_probe *);
int dt_ioctl_get_args(struct dt_softc *, struct dtioc_arg *);
@ -142,8 +164,7 @@ int dt_ioctl_probe_enable(struct dt_softc *, struct dtioc_req *);
int dt_ioctl_probe_disable(struct dt_softc *, struct dtioc_req *);
int dt_ioctl_get_auxbase(struct dt_softc *, struct dtioc_getaux *);
int dt_pcb_ring_copy(struct dt_pcb *, struct uio *, size_t, size_t *,
uint64_t *);
int dt_ring_copy(struct dt_cpubuf *, struct uio *, size_t, size_t *);
void dt_wakeup(struct dt_softc *);
void dt_deferred_wakeup(void *);
@ -172,28 +193,21 @@ dtopen(dev_t dev, int flags, int mode, struct proc *p)
if (atomic_load_int(&allowdt) == 0)
return EPERM;
sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
sc = dtalloc();
if (sc == NULL)
return ENOMEM;
/* no sleep after this point */
if (dtlookup(unit) != NULL) {
free(sc, M_DEVBUF, sizeof(*sc));
dtfree(sc);
return EBUSY;
}
sc->ds_unit = unit;
sc->ds_pid = p->p_p->ps_pid;
TAILQ_INIT(&sc->ds_pcbs);
mtx_init(&sc->ds_mtx, IPL_HIGH);
sc->ds_lastcpu = 0;
sc->ds_evtcnt = 0;
sc->ds_readevt = 0;
sc->ds_dropevt = 0;
sc->ds_si = softintr_establish(IPL_SOFTCLOCK, dt_deferred_wakeup, sc);
if (sc->ds_si == NULL) {
free(sc, M_DEVBUF, sizeof(*sc));
return ENOMEM;
}
SLIST_INSERT_HEAD(&dtdev_list, sc, ds_next);
@ -216,9 +230,7 @@ dtclose(dev_t dev, int flags, int mode, struct proc *p)
SLIST_REMOVE(&dtdev_list, sc, dt_softc, ds_next);
dt_ioctl_record_stop(sc);
dt_pcb_purge(&sc->ds_pcbs);
softintr_disestablish(sc->ds_si);
free(sc, M_DEVBUF, sizeof(*sc));
dtfree(sc);
return 0;
}
@ -227,10 +239,9 @@ int
dtread(dev_t dev, struct uio *uio, int flags)
{
struct dt_softc *sc;
struct dt_pcb *dp;
int error = 0, unit = minor(dev);
struct dt_cpubuf *dc;
int i, error = 0, unit = minor(dev);
size_t count, max, read = 0;
uint64_t dropped = 0;
sc = dtlookup(unit);
KASSERT(sc != NULL);
@ -239,9 +250,9 @@ dtread(dev_t dev, struct uio *uio, int flags)
if (max < 1)
return (EMSGSIZE);
while (!sc->ds_evtcnt) {
while (!atomic_load_int(&sc->ds_evtcnt)) {
sleep_setup(sc, PWAIT | PCATCH, "dtread");
error = sleep_finish(0, !sc->ds_evtcnt);
error = sleep_finish(0, !atomic_load_int(&sc->ds_evtcnt));
if (error == EINTR || error == ERESTART)
break;
}
@ -249,9 +260,10 @@ dtread(dev_t dev, struct uio *uio, int flags)
return error;
KERNEL_ASSERT_LOCKED();
TAILQ_FOREACH(dp, &sc->ds_pcbs, dp_snext) {
for (i = 0; i < ncpusfound; i++) {
count = 0;
error = dt_pcb_ring_copy(dp, uio, max, &count, &dropped);
dc = &sc->ds_cpu[(sc->ds_lastcpu + i) % ncpusfound];
error = dt_ring_copy(dc, uio, max, &count);
if (error && count == 0)
break;
@ -260,12 +272,9 @@ dtread(dev_t dev, struct uio *uio, int flags)
if (max == 0)
break;
}
sc->ds_lastcpu += i % ncpusfound;
mtx_enter(&sc->ds_mtx);
sc->ds_evtcnt -= read;
sc->ds_readevt += read;
sc->ds_dropevt += dropped;
mtx_leave(&sc->ds_mtx);
atomic_sub_int(&sc->ds_evtcnt, read);
return error;
}
@ -339,6 +348,54 @@ dtlookup(int unit)
return sc;
}
struct dt_softc *
dtalloc(void)
{
struct dt_softc *sc;
struct dt_evt *dtev;
int i;
sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
if (sc == NULL)
return NULL;
for (i = 0; i < ncpusfound; i++) {
dtev = mallocarray(DT_EVTRING_SIZE, sizeof(*dtev), M_DEVBUF,
M_WAITOK|M_CANFAIL|M_ZERO);
if (dtev == NULL)
break;
sc->ds_cpu[i].dc_ring = dtev;
}
if (i < ncpusfound) {
dtfree(sc);
return NULL;
}
sc->ds_si = softintr_establish(IPL_SOFTCLOCK, dt_deferred_wakeup, sc);
if (sc->ds_si == NULL) {
dtfree(sc);
return NULL;
}
return sc;
}
void
dtfree(struct dt_softc *sc)
{
struct dt_evt *dtev;
int i;
if (sc->ds_si != NULL)
softintr_disestablish(sc->ds_si);
for (i = 0; i < ncpusfound; i++) {
dtev = sc->ds_cpu[i].dc_ring;
free(dtev, M_DEVBUF, DT_EVTRING_SIZE * sizeof(*dtev));
}
free(sc, M_DEVBUF, sizeof(*sc));
}
int
dt_ioctl_list_probes(struct dt_softc *sc, struct dtioc_probe *dtpr)
{
@ -434,11 +491,25 @@ dt_ioctl_get_args(struct dt_softc *sc, struct dtioc_arg *dtar)
int
dt_ioctl_get_stats(struct dt_softc *sc, struct dtioc_stat *dtst)
{
mtx_enter(&sc->ds_mtx);
dtst->dtst_readevt = sc->ds_readevt;
dtst->dtst_dropevt = sc->ds_dropevt;
mtx_leave(&sc->ds_mtx);
struct dt_cpubuf *dc;
uint64_t readevt, dropevt, skiptick, recurevt;
int i;
readevt = dropevt = skiptick = 0;
for (i = 0; i < ncpusfound; i++) {
dc = &sc->ds_cpu[i];
membar_consumer();
dropevt += dc->dc_dropevt;
skiptick = dc->dc_skiptick;
recurevt = dc->dc_recurevt;
readevt += dc->dc_readevt;
}
dtst->dtst_readevt = readevt;
dtst->dtst_dropevt = dropevt;
dtst->dtst_skiptick = skiptick;
dtst->dtst_recurevt = recurevt;
return 0;
}
@ -518,6 +589,7 @@ dt_ioctl_probe_enable(struct dt_softc *sc, struct dtioc_req *dtrq)
{
struct dt_pcb_list plist;
struct dt_probe *dtp;
struct dt_pcb *dp;
int error;
SIMPLEQ_FOREACH(dtp, &dt_probe_list, dtp_next) {
@ -527,6 +599,12 @@ dt_ioctl_probe_enable(struct dt_softc *sc, struct dtioc_req *dtrq)
if (dtp == NULL)
return ENOENT;
/* Only allow one probe of each type. */
TAILQ_FOREACH(dp, &sc->ds_pcbs, dp_snext) {
if (dp->dp_dtp->dtp_pbn == dtrq->dtrq_pbn)
return EEXIST;
}
TAILQ_INIT(&plist);
error = dtp->dtp_prov->dtpv_alloc(dtp, sc, &plist, dtrq);
if (error)
@ -637,28 +715,16 @@ dt_pcb_alloc(struct dt_probe *dtp, struct dt_softc *sc)
dp = malloc(sizeof(*dp), M_DT, M_WAITOK|M_CANFAIL|M_ZERO);
if (dp == NULL)
goto bad;
return NULL;
dp->dp_ring = mallocarray(DT_EVTRING_SIZE, sizeof(*dp->dp_ring), M_DT,
M_WAITOK|M_CANFAIL|M_ZERO);
if (dp->dp_ring == NULL)
goto bad;
mtx_init(&dp->dp_mtx, IPL_HIGH);
dp->dp_sc = sc;
dp->dp_dtp = dtp;
return dp;
bad:
dt_pcb_free(dp);
return NULL;
}
void
dt_pcb_free(struct dt_pcb *dp)
{
if (dp == NULL)
return;
free(dp->dp_ring, M_DT, DT_EVTRING_SIZE * sizeof(*dp->dp_ring));
free(dp, M_DT, sizeof(*dp));
}
@ -673,6 +739,15 @@ dt_pcb_purge(struct dt_pcb_list *plist)
}
}
void
dt_pcb_ring_skiptick(struct dt_pcb *dp, unsigned int skip)
{
struct dt_cpubuf *dc = &dp->dp_sc->ds_cpu[cpu_number()];
dc->dc_skiptick += skip;
membar_producer();
}
/*
* Get a reference to the next free event state from the ring.
*/
@ -681,21 +756,34 @@ dt_pcb_ring_get(struct dt_pcb *dp, int profiling)
{
struct proc *p = curproc;
struct dt_evt *dtev;
int distance;
int prod, cons, distance;
struct dt_cpubuf *dc = &dp->dp_sc->ds_cpu[cpu_number()];
mtx_enter(&dp->dp_mtx);
distance = dp->dp_prod - dp->dp_cons;
if (dc->dc_inevt == 1) {
dc->dc_recurevt++;
membar_producer();
return NULL;
}
dc->dc_inevt = 1;
membar_consumer();
prod = dc->dc_prod;
cons = dc->dc_cons;
distance = prod - cons;
if (distance == 1 || distance == (1 - DT_EVTRING_SIZE)) {
/* read(2) isn't finished */
dp->dp_dropevt++;
mtx_leave(&dp->dp_mtx);
dc->dc_dropevt++;
membar_producer();
dc->dc_inevt = 0;
return NULL;
}
/*
* Save states in next free event slot.
*/
dtev = &dp->dp_ring[dp->dp_cons];
dtev = &dc->dc_ring[cons];
memset(dtev, 0, sizeof(*dtev));
dtev->dtev_pbn = dp->dp_dtp->dtp_pbn;
@ -722,25 +810,25 @@ dt_pcb_ring_get(struct dt_pcb *dp, int profiling)
void
dt_pcb_ring_consume(struct dt_pcb *dp, struct dt_evt *dtev)
{
MUTEX_ASSERT_LOCKED(&dp->dp_mtx);
KASSERT(dtev == &dp->dp_ring[dp->dp_cons]);
struct dt_cpubuf *dc = &dp->dp_sc->ds_cpu[cpu_number()];
dp->dp_cons = (dp->dp_cons + 1) % DT_EVTRING_SIZE;
mtx_leave(&dp->dp_mtx);
KASSERT(dtev == &dc->dc_ring[dc->dc_cons]);
dc->dc_cons = (dc->dc_cons + 1) % DT_EVTRING_SIZE;
membar_producer();
atomic_inc_int(&dp->dp_sc->ds_evtcnt);
dc->dc_inevt = 0;
mtx_enter(&dp->dp_sc->ds_mtx);
dp->dp_sc->ds_evtcnt++;
mtx_leave(&dp->dp_sc->ds_mtx);
dt_wakeup(dp->dp_sc);
}
/*
* Copy at most `max' events from `dp', producing the same amount
* Copy at most `max' events from `dc', producing the same amount
* of free slots.
*/
int
dt_pcb_ring_copy(struct dt_pcb *dp, struct uio *uio, size_t max,
size_t *rcvd, uint64_t *dropped)
dt_ring_copy(struct dt_cpubuf *dc, struct uio *uio, size_t max, size_t *rcvd)
{
size_t count, copied = 0;
unsigned int cons, prod;
@ -748,12 +836,9 @@ dt_pcb_ring_copy(struct dt_pcb *dp, struct uio *uio, size_t max,
KASSERT(max > 0);
mtx_enter(&dp->dp_mtx);
cons = dp->dp_cons;
prod = dp->dp_prod;
*dropped += dp->dp_dropevt;
dp->dp_dropevt = 0;
mtx_leave(&dp->dp_mtx);
membar_consumer();
cons = dc->dc_cons;
prod = dc->dc_prod;
if (cons < prod)
count = DT_EVTRING_SIZE - prod;
@ -764,7 +849,7 @@ dt_pcb_ring_copy(struct dt_pcb *dp, struct uio *uio, size_t max,
return 0;
count = MIN(count, max);
error = uiomove(&dp->dp_ring[prod], count * sizeof(struct dt_evt), uio);
error = uiomove(&dc->dc_ring[prod], count * sizeof(struct dt_evt), uio);
if (error)
return error;
copied += count;
@ -777,7 +862,7 @@ dt_pcb_ring_copy(struct dt_pcb *dp, struct uio *uio, size_t max,
goto out;
count = MIN(cons, (max - copied));
error = uiomove(&dp->dp_ring[0], count * sizeof(struct dt_evt), uio);
error = uiomove(&dc->dc_ring[0], count * sizeof(struct dt_evt), uio);
if (error)
goto out;
@ -785,9 +870,9 @@ dt_pcb_ring_copy(struct dt_pcb *dp, struct uio *uio, size_t max,
prod += count;
out:
mtx_enter(&dp->dp_mtx);
dp->dp_prod = prod;
mtx_leave(&dp->dp_mtx);
dc->dc_readevt += copied;
dc->dc_prod = prod;
membar_producer();
*rcvd = copied;
return error;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: dt_prov_kprobe.c,v 1.8 2024/04/06 11:18:02 mpi Exp $ */
/* $OpenBSD: dt_prov_kprobe.c,v 1.9 2024/11/08 12:28:00 mpi Exp $ */
/*
* Copyright (c) 2020 Tom Rollet <tom.rollet@epita.fr>
@ -120,7 +120,7 @@ dt_prov_kprobe_init(void)
struct dt_probe *dtp;
struct kprobe_probe *kprobe_dtp;
Elf_Sym *symp, *symtab_start, *symtab_end;
char *strtab, *name;
const char *strtab, *name;
vaddr_t inst, limit;
int nb_sym, nb_probes;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: dt_prov_profile.c,v 1.8 2024/04/06 11:18:02 mpi Exp $ */
/* $OpenBSD: dt_prov_profile.c,v 1.9 2024/11/26 10:28:27 mpi Exp $ */
/*
* Copyright (c) 2019 Martin Pieuchot <mpi@openbsd.org>
@ -101,15 +101,18 @@ dt_prov_profile_alloc(struct dt_probe *dtp, struct dt_softc *sc,
void
dt_clock(struct clockrequest *cr, void *cf, void *arg)
{
uint64_t count, i;
uint64_t count;
struct dt_evt *dtev;
struct dt_pcb *dp = arg;
count = clockrequest_advance(cr, dp->dp_nsecs);
for (i = 0; i < count; i++) {
dtev = dt_pcb_ring_get(dp, 1);
if (dtev == NULL)
return;
dt_pcb_ring_consume(dp, dtev);
}
if (count == 0)
return;
else if (count > 1)
dt_pcb_ring_skiptick(dp, count - 1);
dtev = dt_pcb_ring_get(dp, 1);
if (dtev == NULL)
return;
dt_pcb_ring_consume(dp, dtev);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: dtvar.h,v 1.19 2024/04/06 11:18:02 mpi Exp $ */
/* $OpenBSD: dtvar.h,v 1.21 2024/11/26 10:28:27 mpi Exp $ */
/*
* Copyright (c) 2019 Martin Pieuchot <mpi@openbsd.org>
@ -116,6 +116,8 @@ struct dtioc_req {
struct dtioc_stat {
uint64_t dtst_readevt; /* events read */
uint64_t dtst_dropevt; /* events dropped */
uint64_t dtst_skiptick; /* clock ticks skipped */
uint64_t dtst_recurevt; /* recursive events */
};
struct dtioc_getaux {
@ -163,12 +165,6 @@ struct dt_pcb {
SMR_SLIST_ENTRY(dt_pcb) dp_pnext; /* [K,S] next PCB per probe */
TAILQ_ENTRY(dt_pcb) dp_snext; /* [K] next PCB per softc */
/* Event states ring */
unsigned int dp_prod; /* [m] read index */
unsigned int dp_cons; /* [m] write index */
struct dt_evt *dp_ring; /* [m] ring of event states */
struct mutex dp_mtx;
struct dt_softc *dp_sc; /* [I] related softc */
struct dt_probe *dp_dtp; /* [I] related probe */
uint64_t dp_evtflags; /* [I] event states to record */
@ -177,9 +173,6 @@ struct dt_pcb {
struct clockintr dp_clockintr; /* [D] profiling handle */
uint64_t dp_nsecs; /* [I] profiling period */
struct cpu_info *dp_cpu; /* [I] on which CPU */
/* Counters */
uint64_t dp_dropevt; /* [m] # dropped event */
};
TAILQ_HEAD(dt_pcb_list, dt_pcb);
@ -188,6 +181,7 @@ struct dt_pcb *dt_pcb_alloc(struct dt_probe *, struct dt_softc *);
void dt_pcb_free(struct dt_pcb *);
void dt_pcb_purge(struct dt_pcb_list *);
void dt_pcb_ring_skiptick(struct dt_pcb *, unsigned int);
struct dt_evt *dt_pcb_ring_get(struct dt_pcb *, int);
void dt_pcb_ring_consume(struct dt_pcb *, struct dt_evt *);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: efiio.h,v 1.1 2023/01/14 12:11:11 kettenis Exp $ */
/* $OpenBSD: efiio.h,v 1.2 2024/10/22 21:50:02 jsg Exp $ */
/*-
* Copyright (c) 2016 Netflix, Inc.
* Copyright (c) 2022 3mdeb <contact@3mdeb.com>
@ -55,16 +55,14 @@ struct efi_esrt_entry_v1 {
uint32_t last_attempt_status;
};
struct efi_get_table_ioc
{
struct efi_get_table_ioc {
void *buf; /* Pointer to userspace buffer */
struct uuid uuid; /* UUID to look up */
size_t table_len; /* Table size */
size_t buf_len; /* Size of the buffer */
};
struct efi_var_ioc
{
struct efi_var_ioc {
uint16_t *name; /* User pointer to name, in UCS2 chars */
size_t namesize; /* Number of *bytes* in the name including
terminator */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: dwmmc.c,v 1.29 2023/07/01 08:27:26 jsing Exp $ */
/* $OpenBSD: dwmmc.c,v 1.31 2024/12/19 18:02:47 patrick Exp $ */
/*
* Copyright (c) 2017 Mark Kettenis
*
@ -28,6 +28,7 @@
#include <dev/ofw/ofw_clock.h>
#include <dev/ofw/ofw_gpio.h>
#include <dev/ofw/ofw_pinctrl.h>
#include <dev/ofw/ofw_regulator.h>
#include <dev/ofw/fdt.h>
#include <dev/sdmmc/sdmmcvar.h>
@ -212,6 +213,7 @@ struct dwmmc_softc {
uint32_t sc_gpio[4];
int sc_sdio_irq;
uint32_t sc_vqmmc;
uint32_t sc_pwrseq;
uint32_t sc_vdd;
@ -241,6 +243,7 @@ int dwmmc_bus_width(sdmmc_chipset_handle_t, int);
void dwmmc_exec_command(sdmmc_chipset_handle_t, struct sdmmc_command *);
void dwmmc_card_intr_mask(sdmmc_chipset_handle_t, int);
void dwmmc_card_intr_ack(sdmmc_chipset_handle_t);
int dwmmc_signal_voltage(sdmmc_chipset_handle_t, int);
struct sdmmc_chip_functions dwmmc_chip_functions = {
.host_reset = dwmmc_host_reset,
@ -253,6 +256,7 @@ struct sdmmc_chip_functions dwmmc_chip_functions = {
.exec_command = dwmmc_exec_command,
.card_intr_mask = dwmmc_card_intr_mask,
.card_intr_ack = dwmmc_card_intr_ack,
.signal_voltage = dwmmc_signal_voltage,
};
void dwmmc_pio_mode(struct dwmmc_softc *);
@ -379,6 +383,7 @@ dwmmc_attach(struct device *parent, struct device *self, void *aux)
gpio_controller_config_pin(sc->sc_gpio, GPIO_CONFIG_INPUT);
sc->sc_sdio_irq = (OF_getproplen(sc->sc_node, "cap-sdio-irq") == 0);
sc->sc_vqmmc = OF_getpropint(sc->sc_node, "vqmmc-supply", 0);
sc->sc_pwrseq = OF_getpropint(sc->sc_node, "mmc-pwrseq", 0);
printf(": %d MHz base clock\n", sc->sc_clkbase / 1000000);
@ -1252,6 +1257,7 @@ void
dwmmc_pwrseq_post(uint32_t phandle)
{
uint32_t *gpios, *gpio;
int post_delay;
int node;
int len;
@ -1275,5 +1281,35 @@ dwmmc_pwrseq_post(uint32_t phandle)
gpio = gpio_controller_next_pin(gpio);
}
post_delay = OF_getpropint(node, "post-power-on-delay-ms", 0);
if (post_delay)
delay(post_delay * 1000);
free(gpios, M_TEMP, len);
}
int
dwmmc_signal_voltage(sdmmc_chipset_handle_t sch, int signal_voltage)
{
struct dwmmc_softc *sc = sch;
uint32_t vccq;
if (sc->sc_vqmmc == 0)
return ENODEV;
switch (signal_voltage) {
case SDMMC_SIGNAL_VOLTAGE_180:
vccq = 1800000;
break;
case SDMMC_SIGNAL_VOLTAGE_330:
vccq = 3300000;
break;
default:
return EINVAL;
}
if (regulator_get_voltage(sc->sc_vqmmc) == vccq)
return 0;
return regulator_set_voltage(sc->sc_vqmmc, vccq);
}

View file

@ -1,4 +1,4 @@
# $OpenBSD: files.fdt,v 1.203 2024/07/31 10:07:33 mglocker Exp $
# $OpenBSD: files.fdt,v 1.204 2024/11/16 21:17:54 tobhe Exp $
#
# Config file and device description for machine-independent FDT code.
# Included by ports that need it.
@ -684,6 +684,11 @@ device qccpu
attach qccpu at fdt
file dev/fdt/qccpu.c qccpu
# Qualcomm CPUCP Mailbox
device qccpucp
attach qccpucp at fdt
file dev/fdt/qccpucp.c qccpucp
device qcdwusb: fdt
attach qcdwusb at fdt
file dev/fdt/qcdwusb.c qcdwusb

166
sys/dev/fdt/qccpucp.c Normal file
View file

@ -0,0 +1,166 @@
/* $OpenBSD: qccpucp.c,v 1.1 2024/11/16 21:17:54 tobhe Exp $ */
/*
* Copyright (c) 2024 Tobias Heider <tobhe@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
#include <sys/malloc.h>
#include <machine/bus.h>
#include <machine/fdt.h>
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_misc.h>
#include <dev/ofw/fdt.h>
#define CPUCP_MAX_CHANNELS 3
/* Registers */
#define CPUCP_REG_CMD(i) (0x104 + ((i) * 8))
#define CPUCP_MASK_CMD 0xffffffffffffffffULL
#define CPUCP_REG_RX_MAP 0x4000
#define CPUCP_REG_RX_STAT 0x4400
#define CPUCP_REG_RX_CLEAR 0x4800
#define CPUCP_REG_RX_EN 0x4C00
#define RXREAD8(sc, reg) \
(bus_space_read_8((sc)->sc_iot, (sc)->sc_rx_ioh, (reg)))
#define RXWRITE8(sc, reg, val) \
bus_space_write_8((sc)->sc_iot, (sc)->sc_rx_ioh, (reg), (val))
#define TXWRITE4(sc, reg, val) \
bus_space_write_4((sc)->sc_iot, (sc)->sc_tx_ioh, (reg), (val))
struct qccpucp_channel {
struct qccpucp_softc *ch_sc;
int ch_idx;
};
struct qccpucp_softc {
struct device sc_dev;
bus_space_tag_t sc_iot;
bus_space_handle_t sc_rx_ioh;
bus_space_handle_t sc_tx_ioh;
void *sc_ih;
struct qccpucp_channel sc_chans[CPUCP_MAX_CHANNELS];
struct mbox_device sc_md;
};
int qccpucp_match(struct device *, void *, void *);
void qccpucp_attach(struct device *, struct device *, void *);
const struct cfattach qccpucp_ca = {
sizeof (struct qccpucp_softc), qccpucp_match, qccpucp_attach
};
struct cfdriver qccpucp_cd = {
NULL, "qccpucp", DV_DULL
};
void *qccpucp_channel(void *, uint32_t *, struct mbox_client *);
int qccpucp_send(void *, const void *, size_t);
int
qccpucp_match(struct device *parent, void *match, void *aux)
{
struct fdt_attach_args *faa = aux;
return OF_is_compatible(faa->fa_node, "qcom,x1e80100-cpucp-mbox");
}
void
qccpucp_attach(struct device *parent, struct device *self, void *aux)
{
struct qccpucp_softc *sc = (struct qccpucp_softc *)self;
struct fdt_attach_args *faa = aux;
if (faa->fa_nreg < 2) {
printf(": no registers\n");
return;
}
sc->sc_iot = faa->fa_iot;
if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
faa->fa_reg[0].size, 0, &sc->sc_rx_ioh)) {
printf(": can't map registers\n");
return;
}
if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
faa->fa_reg[1].size, 0, &sc->sc_tx_ioh)) {
printf(": can't map registers\n");
bus_space_unmap(sc->sc_iot, sc->sc_rx_ioh,
faa->fa_reg[0].size);
return;
}
RXWRITE8(sc, CPUCP_REG_RX_EN, 0);
RXWRITE8(sc, CPUCP_REG_RX_CLEAR, 0);
RXWRITE8(sc, CPUCP_REG_RX_MAP, 0);
printf("\n");
RXWRITE8(sc, CPUCP_REG_RX_MAP, CPUCP_MASK_CMD);
sc->sc_md.md_node = faa->fa_node;
sc->sc_md.md_cookie = sc;
sc->sc_md.md_channel = qccpucp_channel;
sc->sc_md.md_send = qccpucp_send;
mbox_register(&sc->sc_md);
}
void *
qccpucp_channel(void *cookie, uint32_t *cells, struct mbox_client *mc)
{
struct qccpucp_softc *sc = cookie;
struct qccpucp_channel *ch = NULL;
uint64_t val;
int i;
for (i = 0; i < CPUCP_MAX_CHANNELS; i++) {
if (sc->sc_chans[i].ch_sc == NULL) {
ch = &sc->sc_chans[i];
break;
}
}
if (ch == NULL)
return NULL;
val = RXREAD8(sc, CPUCP_REG_RX_EN);
val |= (1 << i);
RXWRITE8(sc, CPUCP_REG_RX_EN, val);
ch->ch_idx = i;
ch->ch_sc = sc;
return ch;
}
int
qccpucp_send(void *cookie, const void *data, size_t len)
{
struct qccpucp_channel *ch = cookie;
struct qccpucp_softc *sc = ch->ch_sc;
TXWRITE4(sc, CPUCP_REG_CMD(ch->ch_idx), 0);
return 0;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qcpas.c,v 1.7 2024/09/01 03:14:48 jsg Exp $ */
/* $OpenBSD: qcpas.c,v 1.8 2024/11/08 21:13:34 landry Exp $ */
/*
* Copyright (c) 2023 Patrick Wildt <patrick@blueri.se>
*
@ -1461,6 +1461,7 @@ qcpas_pmic_rtr_bat_status(struct qcpas_softc *sc,
extern int hw_power;
struct apm_power_info *info = &qcpas_pmic_rtr_apm_power_info;
uint32_t delta;
u_char nblife;
#endif
#ifndef SMALL_KERNEL
@ -1509,8 +1510,10 @@ qcpas_pmic_rtr_bat_status(struct qcpas_softc *sc,
return;
}
info->battery_life =
((bat->capacity * 100) / sc->sc_last_full_capacity);
nblife = ((bat->capacity * 100) / sc->sc_last_full_capacity);
if (info->battery_life != nblife)
apm_record_event(APM_POWER_CHANGE);
info->battery_life = nblife;
if (info->battery_life > 50)
info->battery_state = APM_BATT_HIGH;
else if (info->battery_life > 25)
@ -1532,9 +1535,13 @@ qcpas_pmic_rtr_bat_status(struct qcpas_softc *sc,
info->minutes_left = (60 * delta) / abs(bat->rate);
if (bat->power_state & BATTMGR_PWR_STATE_AC_ON) {
if (info->ac_state != APM_AC_ON)
apm_record_event(APM_POWER_CHANGE);
info->ac_state = APM_AC_ON;
hw_power = 1;
} else {
if (info->ac_state != APM_AC_OFF)
apm_record_event(APM_POWER_CHANGE);
info->ac_state = APM_AC_OFF;
hw_power = 0;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qcpmic.c,v 1.2 2024/08/14 10:54:58 mglocker Exp $ */
/* $OpenBSD: qcpmic.c,v 1.3 2025/01/03 14:13:55 kettenis Exp $ */
/*
* Copyright (c) 2022 Patrick Wildt <patrick@blueri.se>
*
@ -26,8 +26,6 @@
#include <dev/fdt/spmivar.h>
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_clock.h>
#include <dev/ofw/ofw_power.h>
#include <dev/ofw/fdt.h>
/* PMIC Registers. */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qcpon.c,v 1.4 2023/04/24 14:34:13 patrick Exp $ */
/* $OpenBSD: qcpon.c,v 1.6 2025/01/03 14:14:49 kettenis Exp $ */
/*
* Copyright (c) 2022 Patrick Wildt <patrick@blueri.se>
*
@ -28,19 +28,22 @@
#include <dev/fdt/spmivar.h>
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_clock.h>
#include <dev/ofw/ofw_power.h>
#include <dev/ofw/fdt.h>
/* Registers. */
#define PON_RT_STS 0x10
#define PON_PMK8350_KPDPWR_N_SET (1U << 7)
struct qcpon_softc {
struct device sc_dev;
int sc_node;
spmi_tag_t sc_tag;
int8_t sc_sid;
uint16_t sc_addr;
void *sc_pwrkey_ih;
int sc_pwrkey_debounce;
uint32_t sc_last_sts;
struct task sc_powerdown_task;
};
@ -72,11 +75,19 @@ qcpon_attach(struct device *parent, struct device *self, void *aux)
{
struct spmi_attach_args *saa = aux;
struct qcpon_softc *sc = (struct qcpon_softc *)self;
uint32_t reg[2];
int node;
if (OF_getpropintarray(saa->sa_node, "reg",
reg, sizeof(reg)) != sizeof(reg)) {
printf(": can't find registers\n");
return;
}
sc->sc_node = saa->sa_node;
sc->sc_tag = saa->sa_tag;
sc->sc_sid = saa->sa_sid;
sc->sc_addr = reg[0];
task_set(&sc->sc_powerdown_task, qcpon_powerdown_task, sc);
@ -106,19 +117,27 @@ qcpon_pwrkey_intr(void *arg)
#ifdef SUSPEND
extern int cpu_suspended;
#endif
/* Ignore presses, handle releases. */
sc->sc_pwrkey_debounce = (sc->sc_pwrkey_debounce + 1) % 2;
if (sc->sc_pwrkey_debounce == 1)
return 1;
uint32_t sts;
int error;
#ifdef SUSPEND
if (cpu_suspended)
if (cpu_suspended) {
cpu_suspended = 0;
else
return 1;
}
#endif
error = spmi_cmd_read(sc->sc_tag, sc->sc_sid, SPMI_CMD_EXT_READL,
sc->sc_addr + PON_RT_STS, &sts, sizeof(sts));
if (error)
return 0;
/* Ignore presses, handle releases. */
if ((sc->sc_last_sts & PON_PMK8350_KPDPWR_N_SET) &&
(sts & PON_PMK8350_KPDPWR_N_SET) == 0)
task_add(systq, &sc->sc_powerdown_task);
sc->sc_last_sts = sts;
return 1;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qcspmi.c,v 1.6 2024/08/14 10:54:58 mglocker Exp $ */
/* $OpenBSD: qcspmi.c,v 1.7 2025/01/03 14:13:25 kettenis Exp $ */
/*
* Copyright (c) 2022 Patrick Wildt <patrick@blueri.se>
*
@ -25,8 +25,6 @@
#include <dev/fdt/spmivar.h>
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_clock.h>
#include <dev/ofw/ofw_power.h>
#include <dev/ofw/fdt.h>
/* Core registers. */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: rkclock.c,v 1.90 2024/07/15 09:54:38 patrick Exp $ */
/* $OpenBSD: rkclock.c,v 1.91 2024/11/24 22:19:59 kettenis Exp $ */
/*
* Copyright (c) 2017, 2018 Mark Kettenis <kettenis@openbsd.org>
*
@ -2998,22 +2998,22 @@ rk3399_enable(void *cookie, uint32_t *cells, int on)
switch (idx) {
case RK3399_CLK_USB2PHY0_REF:
HWRITE4(sc, RK3399_CRU_CLKGATE_CON(6), (5 << 0) << 16);
HWRITE4(sc, RK3399_CRU_CLKGATE_CON(6), (1 << 5) << 16);
break;
case RK3399_CLK_USB2PHY1_REF:
HWRITE4(sc, RK3399_CRU_CLKGATE_CON(6), (6 << 0) << 16);
HWRITE4(sc, RK3399_CRU_CLKGATE_CON(6), (1 << 6) << 16);
break;
case RK3399_CLK_UPHY0_TCPDPHY_REF:
HWRITE4(sc, RK3399_CRU_CLKGATE_CON(13), (4 << 0) << 16);
HWRITE4(sc, RK3399_CRU_CLKGATE_CON(13), (1 << 4) << 16);
break;
case RK3399_CLK_UPHY0_TCPDCORE:
HWRITE4(sc, RK3399_CRU_CLKGATE_CON(13), (5 << 0) << 16);
HWRITE4(sc, RK3399_CRU_CLKGATE_CON(13), (1 << 5) << 16);
break;
case RK3399_CLK_UPHY1_TCPDPHY_REF:
HWRITE4(sc, RK3399_CRU_CLKGATE_CON(13), (6 << 0) << 16);
HWRITE4(sc, RK3399_CRU_CLKGATE_CON(13), (1 << 6) << 16);
break;
case RK3399_CLK_UPHY1_TCPDCORE:
HWRITE4(sc, RK3399_CRU_CLKGATE_CON(13), (7 << 0) << 16);
HWRITE4(sc, RK3399_CRU_CLKGATE_CON(13), (1 << 7) << 16);
break;
case RK3399_ACLK_GMAC:
HWRITE4(sc, RK3399_CRU_CLKGATE_CON(32), (1 << 0) << 16);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: rkpmic.c,v 1.17 2024/05/26 18:06:21 kettenis Exp $ */
/* $OpenBSD: rkpmic.c,v 1.18 2024/11/23 21:24:03 kettenis Exp $ */
/*
* Copyright (c) 2017 Mark Kettenis <kettenis@openbsd.org>
*
@ -33,6 +33,8 @@
#include <dev/clock_subr.h>
extern void (*powerdownfn)(void);
#define RK80X_SECONDS 0x00
#define RK80X_MINUTES 0x01
#define RK80X_HOURS 0x02
@ -52,10 +54,20 @@
#define RK809_RTC_STATUS 0x0e
#define RK80X_RTC_STATUS_POWER_UP 0x80
#define RK805_DEV_CTRL 0x4b
#define RK805_DEV_CTRL_DEV_OFF 0x01
#define RK806_SYS_CFG3 0x72
#define RK806_SYS_CFG3_DEV_OFF 0x01
#define RK808_DEVCTRL 0x4b
#define RK808_DEVCTRL_DEV_OFF_RST 0x08
#define RK809_PMIC_SYS_CFG3 0xf4
#define RK809_PMIC_SYS_CFG3_SLP_FUN_MASK 0x18
#define RK809_PMIC_SYS_CFG3_SLP_FUN_NONE 0x00
#define RK809_PMIC_SYS_CFG3_SLP_FUN_SLEEP 0x08
#define RK809_PMIC_SYS_CFG3_DEV_OFF 0x01
#define RK809_PMIC_INT_STS0 0xf8
#define RK809_PMIC_INT_MSK0 0xf9
#define RK809_PMIC_INT_MSK0_PWRON_FALL_INT_IM 0x01
@ -338,6 +350,8 @@ struct rkpmic_softc {
struct spi_config sc_spi_conf;
int sc_rtc_ctrl_reg, sc_rtc_status_reg;
uint8_t sc_dev_ctrl_reg, sc_dev_off_val;
struct todr_chip_handle sc_todr;
const struct rkpmic_regdata *sc_regdata;
@ -382,6 +396,9 @@ int rkpmic_clock_write(struct rkpmic_softc *, struct clock_ymdhms *);
int rkpmic_gettime(struct todr_chip_handle *, struct timeval *);
int rkpmic_settime(struct todr_chip_handle *, struct timeval *);
struct rkpmic_softc *rkpmic_sc;
void rkpmic_powerdown(void);
int
rkpmic_i2c_match(struct device *parent, void *match, void *aux)
{
@ -447,24 +464,34 @@ rkpmic_attach(struct device *parent, struct device *self, void *aux)
chip = "RK805";
sc->sc_rtc_ctrl_reg = RK805_RTC_CTRL;
sc->sc_rtc_status_reg = RK805_RTC_STATUS;
sc->sc_dev_ctrl_reg = RK805_DEV_CTRL;
sc->sc_dev_off_val = RK805_DEV_CTRL_DEV_OFF;
sc->sc_regdata = rk805_regdata;
} else if (OF_is_compatible(sc->sc_node, "rockchip,rk806")) {
chip = "RK806";
sc->sc_dev_ctrl_reg = RK806_SYS_CFG3;
sc->sc_dev_off_val = RK806_SYS_CFG3_DEV_OFF;
sc->sc_regdata = rk806_regdata;
} else if (OF_is_compatible(sc->sc_node, "rockchip,rk808")) {
chip = "RK808";
sc->sc_rtc_ctrl_reg = RK808_RTC_CTRL;
sc->sc_rtc_status_reg = RK808_RTC_STATUS;
sc->sc_dev_ctrl_reg = RK808_DEVCTRL;
sc->sc_dev_off_val = RK808_DEVCTRL_DEV_OFF_RST;
sc->sc_regdata = rk808_regdata;
} else if (OF_is_compatible(sc->sc_node, "rockchip,rk809")) {
chip = "RK809";
sc->sc_rtc_ctrl_reg = RK809_RTC_CTRL;
sc->sc_rtc_status_reg = RK809_RTC_STATUS;
sc->sc_dev_ctrl_reg = RK809_PMIC_SYS_CFG3;
sc->sc_dev_off_val = RK809_PMIC_SYS_CFG3_DEV_OFF;
sc->sc_regdata = rk809_regdata;
} else {
chip = "RK817";
sc->sc_rtc_ctrl_reg = RK809_RTC_CTRL;
sc->sc_rtc_status_reg = RK809_RTC_STATUS;
sc->sc_dev_ctrl_reg = RK809_PMIC_SYS_CFG3;
sc->sc_dev_off_val = RK809_PMIC_SYS_CFG3_DEV_OFF;
sc->sc_regdata = rk817_regdata;
}
printf(": %s\n", chip);
@ -511,6 +538,12 @@ rkpmic_attach(struct device *parent, struct device *self, void *aux)
device_register_wakeup(&sc->sc_dev);
#endif
}
if (OF_getpropbool(sc->sc_node, "system-power-controller") ||
OF_getpropbool(sc->sc_node, "rockchip,system-power-controller")) {
rkpmic_sc = sc;
powerdownfn = rkpmic_powerdown;
}
}
int
@ -557,6 +590,14 @@ rkpmic_intr(void *arg)
return 1;
}
void
rkpmic_powerdown(void)
{
struct rkpmic_softc *sc = rkpmic_sc;
rkpmic_reg_write(sc, sc->sc_dev_ctrl_reg,
rkpmic_reg_read(sc, sc->sc_dev_ctrl_reg) | sc->sc_dev_off_val);
}
struct rkpmic_regulator {
struct rkpmic_softc *rr_sc;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: rkusbphy.c,v 1.5 2024/06/23 10:18:11 kettenis Exp $ */
/* $OpenBSD: rkusbphy.c,v 1.6 2024/11/24 22:46:54 kettenis Exp $ */
/*
* Copyright (c) 2023 David Gwynne <dlg@openbsd.org>
@ -61,6 +61,47 @@ struct rkusbphy_chip {
const struct rkusbphy_regs *c_regs;
};
/*
* RK3399 has two USB2PHY nodes that share a GRF.
*/
static const struct rkusbphy_regs rkusbphy_rk3399_usb0_regs = {
/* shift, mask, set */
.clk_enable = { 0xe450, 4, 0x1, 0x0 },
.otg = {
.phy_enable = { 0xe454, 0, 0x3, 0x2 },
},
.host = {
.phy_enable = { 0xe458, 0, 0x3, 0x2 },
},
};
static const struct rkusbphy_regs rkusbphy_rk3399_usb1_regs = {
/* shift, mask, set */
.clk_enable = { 0xe460, 4, 0x1, 0x0 },
.otg = {
.phy_enable = { 0xe464, 0, 0x3, 0x2 },
},
.host = {
.phy_enable = { 0xe468, 0, 0x3, 0x2 },
},
};
static const struct rkusbphy_chip rkusbphy_rk3399[] = {
{
.c_base_addr = 0xe450,
.c_regs = &rkusbphy_rk3399_usb0_regs,
},
{
.c_base_addr = 0xe460,
.c_regs = &rkusbphy_rk3399_usb1_regs,
},
};
/*
* RK3568 has two USB2PHY nodes that have a GRF each. Each GRF has
* the same register layout.
@ -190,6 +231,7 @@ struct rkusbphy_id {
#define RKUSBPHY_ID(_n, _c) { _n, _c, nitems(_c) }
static const struct rkusbphy_id rkusbphy_ids[] = {
RKUSBPHY_ID("rockchip,rk3399-usb2phy", rkusbphy_rk3399),
RKUSBPHY_ID("rockchip,rk3568-usb2phy", rkusbphy_rk3568),
RKUSBPHY_ID("rockchip,rk3588-usb2phy", rkusbphy_rk3588),
};

View file

@ -1,7 +1,8 @@
/* $OpenBSD: scmi.c,v 1.1 2023/02/13 19:26:15 kettenis Exp $ */
/* $OpenBSD: scmi.c,v 1.2 2024/11/25 22:12:18 tobhe Exp $ */
/*
* Copyright (c) 2023 Mark Kettenis <kettenis@openbsd.org>
* Copyright (c) 2024 Tobias Heider <tobhe@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -19,12 +20,16 @@
#include <sys/param.h>
#include <sys/device.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/sensors.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
#include <machine/fdt.h>
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_clock.h>
#include <dev/ofw/ofw_misc.h>
#include <dev/ofw/fdt.h>
#include <dev/fdt/pscivar.h>
@ -49,6 +54,7 @@ struct scmi_shmem {
/* Protocols */
#define SCMI_BASE 0x10
#define SCMI_PERF 0x13
#define SCMI_CLOCK 0x14
/* Common messages */
@ -64,6 +70,24 @@ struct scmi_shmem {
#define SCMI_CLOCK_CONFIG_SET 0x7
#define SCMI_CLOCK_CONFIG_SET_ENABLE (1 << 0)
/* Performance management messages */
#define SCMI_PERF_DOMAIN_ATTRIBUTES 0x3
#define SCMI_PERF_DESCRIBE_LEVELS 0x4
#define SCMI_PERF_LEVEL_GET 0x8
struct scmi_resp_perf_describe_levels_40 {
uint16_t pl_nret;
uint16_t pl_nrem;
struct {
uint32_t pe_perf;
uint32_t pe_cost;
uint16_t pe_latency;
uint16_t pe_reserved;
uint32_t pe_ifreq;
uint32_t pe_lindex;
} pl_entry[];
};
static inline void
scmi_message_header(volatile struct scmi_shmem *shmem,
uint32_t protocol_id, uint32_t message_id)
@ -71,20 +95,58 @@ scmi_message_header(volatile struct scmi_shmem *shmem,
shmem->message_header = (protocol_id << 10) | (message_id << 0);
}
struct scmi_perf_level {
uint32_t pl_perf;
uint32_t pl_cost;
uint32_t pl_ifreq;
};
struct scmi_perf_domain {
size_t pd_nlevels;
struct scmi_perf_level *pd_levels;
int pd_curlevel;
};
struct scmi_softc {
struct device sc_dev;
bus_space_tag_t sc_iot;
bus_space_handle_t sc_ioh;
volatile struct scmi_shmem *sc_shmem;
int sc_node;
bus_space_handle_t sc_ioh_tx;
bus_space_handle_t sc_ioh_rx;
volatile struct scmi_shmem *sc_shmem_tx;
volatile struct scmi_shmem *sc_shmem_rx;
uint32_t sc_smc_id;
struct mbox_channel *sc_mc_tx;
struct mbox_channel *sc_mc_rx;
uint16_t sc_ver_major;
uint16_t sc_ver_minor;
/* SCMI_CLOCK */
struct clock_device sc_cd;
/* SCMI_PERF */
int sc_perf_power_unit;
#define SCMI_POWER_UNIT_UW 0x2
#define SCMI_POWER_UNIT_MW 0x1
#define SCMI_POWER_UNIT_NONE 0x0
size_t sc_perf_ndomains;
struct scmi_perf_domain *sc_perf_domains;
struct ksensordev sc_perf_sensordev;
struct ksensordev sc_perf_psensordev;
struct ksensor *sc_perf_fsensors;
struct ksensor *sc_perf_psensors;
int32_t (*sc_command)(struct scmi_softc *);
};
int scmi_match(struct device *, void *, void *);
void scmi_attach(struct device *, struct device *, void *);
int scmi_attach_smc(struct scmi_softc *, struct fdt_attach_args *);
void scmi_attach_mbox_deferred(struct device *);
const struct cfattach scmi_ca = {
sizeof(struct scmi_softc), scmi_match, scmi_attach
@ -96,22 +158,42 @@ struct cfdriver scmi_cd = {
void scmi_attach_proto(struct scmi_softc *, int);
void scmi_attach_clock(struct scmi_softc *, int);
int32_t scmi_command(struct scmi_softc *);
void scmi_attach_perf(struct scmi_softc *, int);
int32_t scmi_smc_command(struct scmi_softc *);
int32_t scmi_mbox_command(struct scmi_softc *);
int
scmi_match(struct device *parent, void *match, void *aux)
{
struct fdt_attach_args *faa = aux;
return OF_is_compatible(faa->fa_node, "arm,scmi-smc");
return OF_is_compatible(faa->fa_node, "arm,scmi-smc") ||
OF_is_compatible(faa->fa_node, "arm,scmi");
}
void
scmi_attach(struct device *parent, struct device *self, void *aux)
{
struct scmi_softc *sc = (struct scmi_softc *)self;
volatile struct scmi_shmem *shmem;
struct fdt_attach_args *faa = aux;
sc->sc_iot = faa->fa_iot;
sc->sc_node = faa->fa_node;
if (OF_is_compatible(faa->fa_node, "arm,scmi-smc")) {
scmi_attach_smc(sc, faa);
} else if (OF_is_compatible(faa->fa_node, "arm,scmi")) {
printf("\n");
/* Defer because we need the mailbox driver attached first */
config_defer(self, scmi_attach_mbox_deferred);
}
}
int
scmi_attach_smc(struct scmi_softc *sc, struct fdt_attach_args *faa)
{
volatile struct scmi_shmem *shmem;
struct fdt_reg reg;
int32_t status;
uint32_t version;
@ -119,53 +201,142 @@ scmi_attach(struct device *parent, struct device *self, void *aux)
void *node;
int proto;
sc->sc_smc_id = OF_getpropint(faa->fa_node, "arm,smc-id", 0);
if (sc->sc_smc_id == 0) {
printf(": no SMC id\n");
return -1;
}
phandle = OF_getpropint(faa->fa_node, "shmem", 0);
node = fdt_find_phandle(phandle);
if (node == NULL || !fdt_is_compatible(node, "arm,scmi-shmem") ||
fdt_get_reg(node, 0, &reg)) {
printf(": no shared memory\n");
return;
return -1;
}
sc->sc_smc_id = OF_getpropint(faa->fa_node, "arm,smc-id", 0);
if (sc->sc_smc_id == 0) {
printf(": no SMC id\n");
return;
}
sc->sc_iot = faa->fa_iot;
if (bus_space_map(sc->sc_iot, reg.addr,
reg.size, 0, &sc->sc_ioh)) {
reg.size, 0, &sc->sc_ioh_tx)) {
printf(": can't map shared memory\n");
return;
return -1;
}
sc->sc_shmem = bus_space_vaddr(sc->sc_iot, sc->sc_ioh);
shmem = sc->sc_shmem;
sc->sc_shmem_tx = bus_space_vaddr(sc->sc_iot, sc->sc_ioh_tx);
shmem = sc->sc_shmem_tx;
sc->sc_command = scmi_smc_command;
if ((shmem->channel_status & SCMI_CHANNEL_FREE) == 0) {
printf(": channel busy\n");
return;
return -1;
}
scmi_message_header(shmem, SCMI_BASE, SCMI_PROTOCOL_VERSION);
shmem->length = sizeof(uint32_t);
status = scmi_command(sc);
status = sc->sc_command(sc);
if (status != SCMI_SUCCESS) {
printf(": protocol version command failed\n");
return;
return -1;
}
version = shmem->message_payload[1];
printf(": SCMI %d.%d\n", version >> 16, version & 0xffff);
sc->sc_ver_major = version >> 16;
sc->sc_ver_minor = version & 0xfffff;
printf(": SCMI %d.%d\n", sc->sc_ver_major, sc->sc_ver_minor);
for (proto = OF_child(faa->fa_node); proto; proto = OF_peer(proto))
scmi_attach_proto(sc, proto);
return 0;
}
void
scmi_attach_mbox_deferred(struct device *self)
{
struct scmi_softc *sc = (struct scmi_softc *)self;
uint32_t *shmems;
int32_t status;
uint32_t version;
struct fdt_reg reg;
int len;
void *node;
int proto;
/* we only support the 2 mbox / 2 shmem case */
len = OF_getproplen(sc->sc_node, "mboxes");
if (len != 4 * sizeof(uint32_t)) {
printf("%s: invalid number of mboxes\n", sc->sc_dev.dv_xname);
return;
}
len = OF_getproplen(sc->sc_node, "shmem");
if (len != 2 * sizeof(uint32_t)) {
printf("%s: invalid number of shmems\n", sc->sc_dev.dv_xname);
return;
}
shmems = malloc(len, M_DEVBUF, M_WAITOK);
OF_getpropintarray(sc->sc_node, "shmem", shmems, len);
sc->sc_mc_tx = mbox_channel(sc->sc_node, "tx", NULL);
if (sc->sc_mc_tx == NULL) {
printf("%s: no tx mbox\n", sc->sc_dev.dv_xname);
return;
}
sc->sc_mc_rx = mbox_channel(sc->sc_node, "rx", NULL);
if (sc->sc_mc_rx == NULL) {
printf("%s: no rx mbox\n", sc->sc_dev.dv_xname);
return;
}
node = fdt_find_phandle(shmems[0]);
if (node == NULL || !fdt_is_compatible(node, "arm,scmi-shmem") ||
fdt_get_reg(node, 0, &reg)) {
printf("%s: no shared memory\n", sc->sc_dev.dv_xname);
return;
}
if (bus_space_map(sc->sc_iot, reg.addr, reg.size, 0, &sc->sc_ioh_tx)) {
printf("%s: can't map shared memory\n", sc->sc_dev.dv_xname);
return;
}
sc->sc_shmem_tx = bus_space_vaddr(sc->sc_iot, sc->sc_ioh_tx);
node = fdt_find_phandle(shmems[1]);
if (node == NULL || !fdt_is_compatible(node, "arm,scmi-shmem") ||
fdt_get_reg(node, 0, &reg)) {
printf("%s: no shared memory\n", sc->sc_dev.dv_xname);
return;
}
if (bus_space_map(sc->sc_iot, reg.addr, reg.size, 0, &sc->sc_ioh_rx)) {
printf("%s: can't map shared memory\n", sc->sc_dev.dv_xname);
return;
}
sc->sc_shmem_rx = bus_space_vaddr(sc->sc_iot, sc->sc_ioh_rx);
sc->sc_command = scmi_mbox_command;
scmi_message_header(sc->sc_shmem_tx, SCMI_BASE, SCMI_PROTOCOL_VERSION);
sc->sc_shmem_tx->length = sizeof(uint32_t);
status = sc->sc_command(sc);
if (status != SCMI_SUCCESS) {
printf("%s: protocol version command failed\n",
sc->sc_dev.dv_xname);
return;
}
version = sc->sc_shmem_tx->message_payload[1];
sc->sc_ver_major = version >> 16;
sc->sc_ver_minor = version & 0xfffff;
printf("%s: SCMI %d.%d\n", sc->sc_dev.dv_xname, sc->sc_ver_major,
sc->sc_ver_minor);
for (proto = OF_child(sc->sc_node); proto; proto = OF_peer(proto))
scmi_attach_proto(sc, proto);
}
int32_t
scmi_command(struct scmi_softc *sc)
scmi_smc_command(struct scmi_softc *sc)
{
volatile struct scmi_shmem *shmem = sc->sc_shmem;
volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
int32_t status;
shmem->channel_status = 0;
@ -179,6 +350,32 @@ scmi_command(struct scmi_softc *sc)
return shmem->message_payload[0];
}
int32_t
scmi_mbox_command(struct scmi_softc *sc)
{
volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
int ret;
int i;
shmem->channel_status = 0;
ret = mbox_send(sc->sc_mc_tx, NULL, 0);
if (ret != 0)
return SCMI_NOT_SUPPORTED;
/* XXX: poll for now */
for (i = 0; i < 20; i++) {
if (shmem->channel_status & SCMI_CHANNEL_FREE)
break;
delay(10);
}
if ((shmem->channel_status & SCMI_CHANNEL_ERROR))
return SCMI_COMMS_ERROR;
if ((shmem->channel_status & SCMI_CHANNEL_FREE) == 0)
return SCMI_BUSY;
return shmem->message_payload[0];
}
void
scmi_attach_proto(struct scmi_softc *sc, int node)
{
@ -186,6 +383,9 @@ scmi_attach_proto(struct scmi_softc *sc, int node)
case SCMI_CLOCK:
scmi_attach_clock(sc, node);
break;
case SCMI_PERF:
scmi_attach_perf(sc, node);
break;
default:
break;
}
@ -200,13 +400,13 @@ int scmi_clock_set_frequency(void *, uint32_t *, uint32_t);
void
scmi_attach_clock(struct scmi_softc *sc, int node)
{
volatile struct scmi_shmem *shmem = sc->sc_shmem;
volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
int32_t status;
int nclocks;
scmi_message_header(shmem, SCMI_CLOCK, SCMI_PROTOCOL_ATTRIBUTES);
shmem->length = sizeof(uint32_t);
status = scmi_command(sc);
status = sc->sc_command(sc);
if (status != SCMI_SUCCESS)
return;
@ -226,28 +426,28 @@ void
scmi_clock_enable(void *cookie, uint32_t *cells, int on)
{
struct scmi_softc *sc = cookie;
volatile struct scmi_shmem *shmem = sc->sc_shmem;
volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
uint32_t idx = cells[0];
scmi_message_header(shmem, SCMI_CLOCK, SCMI_CLOCK_CONFIG_SET);
shmem->length = 3 * sizeof(uint32_t);
shmem->message_payload[0] = idx;
shmem->message_payload[1] = on ? SCMI_CLOCK_CONFIG_SET_ENABLE : 0;
scmi_command(sc);
sc->sc_command(sc);
}
uint32_t
scmi_clock_get_frequency(void *cookie, uint32_t *cells)
{
struct scmi_softc *sc = cookie;
volatile struct scmi_shmem *shmem = sc->sc_shmem;
volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
uint32_t idx = cells[0];
int32_t status;
scmi_message_header(shmem, SCMI_CLOCK, SCMI_CLOCK_RATE_GET);
shmem->length = 2 * sizeof(uint32_t);
shmem->message_payload[0] = idx;
status = scmi_command(sc);
status = sc->sc_command(sc);
if (status != SCMI_SUCCESS)
return 0;
if (shmem->message_payload[2] != 0)
@ -260,7 +460,7 @@ int
scmi_clock_set_frequency(void *cookie, uint32_t *cells, uint32_t freq)
{
struct scmi_softc *sc = cookie;
volatile struct scmi_shmem *shmem = sc->sc_shmem;
volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
uint32_t idx = cells[0];
int32_t status;
@ -270,9 +470,188 @@ scmi_clock_set_frequency(void *cookie, uint32_t *cells, uint32_t freq)
shmem->message_payload[1] = idx;
shmem->message_payload[2] = freq;
shmem->message_payload[3] = 0;
status = scmi_command(sc);
status = sc->sc_command(sc);
if (status != SCMI_SUCCESS)
return -1;
return 0;
}
/* Performance management */
void scmi_perf_descr_levels(struct scmi_softc *, int);
void scmi_perf_refresh_sensor(void *);
void
scmi_attach_perf(struct scmi_softc *sc, int node)
{
volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
int32_t status;
uint32_t version;
int i;
scmi_message_header(sc->sc_shmem_tx, SCMI_PERF, SCMI_PROTOCOL_VERSION);
sc->sc_shmem_tx->length = sizeof(uint32_t);
status = sc->sc_command(sc);
if (status != SCMI_SUCCESS) {
printf("%s: SCMI_PROTOCOL_VERSION failed\n",
sc->sc_dev.dv_xname);
return;
}
version = shmem->message_payload[1];
if (version != 0x40000) {
printf("%s: invalid perf protocol version (0x%x != 0x4000)",
sc->sc_dev.dv_xname, version);
return;
}
scmi_message_header(shmem, SCMI_PERF, SCMI_PROTOCOL_ATTRIBUTES);
shmem->length = sizeof(uint32_t);
status = sc->sc_command(sc);
if (status != SCMI_SUCCESS) {
printf("%s: SCMI_PROTOCOL_ATTRIBUTES failed\n",
sc->sc_dev.dv_xname);
return;
}
sc->sc_perf_ndomains = shmem->message_payload[1] & 0xffff;
sc->sc_perf_domains = malloc(sc->sc_perf_ndomains *
sizeof(struct scmi_perf_domain), M_DEVBUF, M_ZERO | M_WAITOK);
sc->sc_perf_power_unit = (shmem->message_payload[1] >> 16) & 0x3;
strlcpy(sc->sc_perf_sensordev.xname, sc->sc_dev.dv_xname,
sizeof(sc->sc_perf_sensordev.xname));
sc->sc_perf_fsensors =
malloc(sc->sc_perf_ndomains * sizeof(struct ksensor),
M_DEVBUF, M_ZERO | M_WAITOK);
sc->sc_perf_psensors =
malloc(sc->sc_perf_ndomains * sizeof(struct ksensor),
M_DEVBUF, M_ZERO | M_WAITOK);
/* Add one frequency sensor per perf domain */
for (i = 0; i < sc->sc_perf_ndomains; i++) {
scmi_message_header(shmem, SCMI_PERF,
SCMI_PERF_DOMAIN_ATTRIBUTES);
shmem->length = 2 * sizeof(uint32_t);
shmem->message_payload[0] = i;
status = sc->sc_command(sc);
if (status != SCMI_SUCCESS) {
printf("%s: SCMI_PERF_DOMAIN_ATTRIBUTES failed\n",
sc->sc_dev.dv_xname);
goto err;
}
scmi_perf_descr_levels(sc, i);
sc->sc_perf_fsensors[i].type = SENSOR_FREQ;
sensor_attach(&sc->sc_perf_sensordev, &sc->sc_perf_fsensors[i]);
sc->sc_perf_psensors[i].type = SENSOR_WATTS;
sensor_attach(&sc->sc_perf_sensordev, &sc->sc_perf_psensors[i]);
}
sensordev_install(&sc->sc_perf_sensordev);
sensor_task_register(sc, scmi_perf_refresh_sensor, 1);
return;
err:
free(sc->sc_perf_fsensors, M_DEVBUF,
sc->sc_perf_ndomains * sizeof(struct ksensor));
free(sc->sc_perf_psensors, M_DEVBUF,
sc->sc_perf_ndomains * sizeof(struct ksensor));
}
void
scmi_perf_descr_levels(struct scmi_softc *sc, int domain)
{
volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
volatile struct scmi_resp_perf_describe_levels_40 *pl;
struct scmi_perf_domain *pd = &sc->sc_perf_domains[domain];
int status, i, idx;
idx = 0;
do {
scmi_message_header(shmem, SCMI_PERF,
SCMI_PERF_DESCRIBE_LEVELS);
shmem->length = sizeof(uint32_t) * 3;
shmem->message_payload[0] = domain;
shmem->message_payload[1] = idx;
status = sc->sc_command(sc);
if (status != SCMI_SUCCESS) {
printf("%s: SCMI_PERF_DESCRIBE_LEVELS failed\n",
sc->sc_dev.dv_xname);
return;
}
pl = (struct scmi_resp_perf_describe_levels_40 *)
&shmem->message_payload[1];
if (pd->pd_levels == NULL) {
pd->pd_nlevels = pl->pl_nret + pl->pl_nrem;
pd->pd_levels = malloc(pd->pd_nlevels *
sizeof(struct scmi_perf_level),
M_DEVBUF, M_ZERO | M_WAITOK);
}
for (i = 0; i < pl->pl_nret; i++) {
pd->pd_levels[idx + i].pl_cost =
pl->pl_entry[i].pe_cost;
pd->pd_levels[idx + i].pl_perf =
pl->pl_entry[i].pe_perf;
pd->pd_levels[idx + i].pl_ifreq =
pl->pl_entry[i].pe_ifreq;
}
idx += pl->pl_nret;
} while (pl->pl_nrem);
}
void
scmi_perf_refresh_sensor(void *arg)
{
struct scmi_softc *sc = arg;
volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
uint64_t power_cost;
int32_t status;
int level, i;
if (sc->sc_perf_domains == NULL)
return;
for (i = 0; i < sc->sc_perf_ndomains; i++) {
if (sc->sc_perf_domains[i].pd_levels == NULL)
return;
scmi_message_header(shmem, SCMI_PERF,
SCMI_PERF_LEVEL_GET);
shmem->length = sizeof(uint32_t) * 2;
shmem->message_payload[0] = i;
status = sc->sc_command(sc);
if (status != SCMI_SUCCESS) {
printf("%s: SCMI_PERF_LEVEL_GET failed\n",
sc->sc_dev.dv_xname);
return;
}
level = shmem->message_payload[1];
if (sc->sc_perf_fsensors == NULL ||
sc->sc_perf_psensors == NULL)
return;
sc->sc_perf_domains[i].pd_curlevel = level;
sc->sc_perf_fsensors[i].value =
(uint64_t)sc->sc_perf_domains[i].
pd_levels[level].pl_ifreq * 1000000000;
switch (sc->sc_perf_power_unit) {
case SCMI_POWER_UNIT_UW:
power_cost = (uint64_t)sc->sc_perf_domains[i].
pd_levels[level].pl_cost;
break;
case SCMI_POWER_UNIT_MW:
power_cost = (uint64_t)sc->sc_perf_domains[i].
pd_levels[level].pl_cost * 1000;
break;
default:
continue;
}
sc->sc_perf_psensors[i].value = power_cost;
}
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: simplefb.c,v 1.20 2023/04/16 11:34:32 kettenis Exp $ */
/* $OpenBSD: simplefb.c,v 1.21 2024/11/12 20:52:35 tobhe Exp $ */
/*
* Copyright (c) 2016 Mark Kettenis
*
@ -47,10 +47,10 @@ struct simplefb_format {
* rasops defaults.
*/
const struct simplefb_format simplefb_formats[] = {
{ "r5g6b5", 16 },
{ "x1r5g5b5", 15 },
{ "a1r5g5b5", 15 },
{ "r8g8b8", 24 },
{ "r5g6b5", 16, 11, 5, 5, 6, 0, 5 },
{ "x1r5g5b5", 15, 10, 5, 5, 5, 0, 5 },
{ "a1r5g5b5", 15, 10, 5, 5, 5, 0, 5 },
{ "r8g8b8", 24, 16, 8, 8, 8, 0, 8 },
{ "x8r8g8b8", 32, 16, 8, 8, 8, 0, 8 },
{ "a8r8g8b8", 32, 16, 8, 8, 8, 0, 8 },
{ "x8b8g8r8", 32 },

View file

@ -1,4 +1,4 @@
/* $OpenBSD: virtio_mmio.c,v 1.17 2024/09/02 08:26:26 sf Exp $ */
/* $OpenBSD: virtio_mmio.c,v 1.21 2024/12/20 22:18:27 sf Exp $ */
/* $NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $ */
/*
@ -98,11 +98,13 @@ void virtio_mmio_write_device_config_8(struct virtio_softc *, int, uint64_t);
uint16_t virtio_mmio_read_queue_size(struct virtio_softc *, uint16_t);
void virtio_mmio_setup_queue(struct virtio_softc *, struct virtqueue *, uint64_t);
void virtio_mmio_setup_intrs(struct virtio_softc *);
int virtio_mmio_attach_finish(struct virtio_softc *, struct virtio_attach_args *);
int virtio_mmio_get_status(struct virtio_softc *);
void virtio_mmio_set_status(struct virtio_softc *, int);
int virtio_mmio_negotiate_features(struct virtio_softc *,
const struct virtio_feature_name *);
int virtio_mmio_intr(void *);
void virtio_mmio_intr_barrier(struct virtio_softc *);
struct virtio_mmio_softc {
struct virtio_softc sc_sc;
@ -118,6 +120,11 @@ struct virtio_mmio_softc {
uint32_t sc_version;
};
struct virtio_mmio_attach_args {
struct virtio_attach_args vma_va;
struct fdt_attach_args *vma_fa;
};
const struct cfattach virtio_mmio_ca = {
sizeof(struct virtio_mmio_softc),
virtio_mmio_match,
@ -150,7 +157,9 @@ const struct virtio_ops virtio_mmio_ops = {
virtio_mmio_get_status,
virtio_mmio_set_status,
virtio_mmio_negotiate_features,
virtio_mmio_attach_finish,
virtio_mmio_intr,
virtio_mmio_intr_barrier,
};
uint16_t
@ -248,7 +257,7 @@ virtio_mmio_attach(struct device *parent, struct device *self, void *aux)
struct virtio_mmio_softc *sc = (struct virtio_mmio_softc *)self;
struct virtio_softc *vsc = &sc->sc_sc;
uint32_t id, magic;
struct virtio_attach_args va = { 0 };
struct virtio_mmio_attach_args vma = { { 0 }, faa };
if (faa->fa_nreg < 1) {
printf(": no register data\n");
@ -297,38 +306,45 @@ virtio_mmio_attach(struct device *parent, struct device *self, void *aux)
virtio_mmio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
virtio_mmio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
va.va_devid = id;
va.va_nintr = 1;
vma.vma_va.va_devid = id;
vma.vma_va.va_nintr = 1;
vsc->sc_child = NULL;
config_found(self, &va, NULL);
config_found(self, &vma, NULL);
if (vsc->sc_child == NULL) {
printf("%s: no matching child driver; not configured\n",
vsc->sc_dev.dv_xname);
goto fail_1;
goto fail;
}
if (vsc->sc_child == VIRTIO_CHILD_ERROR) {
printf("%s: virtio configuration failed\n",
vsc->sc_dev.dv_xname);
goto fail_1;
}
sc->sc_ih = fdt_intr_establish(faa->fa_node, vsc->sc_ipl,
virtio_mmio_intr, sc, vsc->sc_dev.dv_xname);
if (sc->sc_ih == NULL) {
printf("%s: couldn't establish interrupt\n",
vsc->sc_dev.dv_xname);
goto fail_2;
goto fail;
}
return;
fail_2:
config_detach(vsc->sc_child, 0);
fail_1:
/* no mmio_mapreg_unmap() or mmio_intr_unmap() */
fail:
virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
}
int
virtio_mmio_attach_finish(struct virtio_softc *vsc,
struct virtio_attach_args *va)
{
struct virtio_mmio_softc *sc = (struct virtio_mmio_softc *)vsc;
struct virtio_mmio_attach_args *vma =
(struct virtio_mmio_attach_args *)va;
sc->sc_ih = fdt_intr_establish(vma->vma_fa->fa_node, vsc->sc_ipl,
virtio_mmio_intr, sc, vsc->sc_dev.dv_xname);
if (sc->sc_ih == NULL) {
printf("%s: couldn't establish interrupt\n",
vsc->sc_dev.dv_xname);
return -EIO;
}
return 0;
}
int
virtio_mmio_detach(struct device *self, int flags)
{
@ -522,3 +538,11 @@ virtio_mmio_kick(struct virtio_softc *vsc, uint16_t idx)
bus_space_write_4(sc->sc_iot, sc->sc_ioh, VIRTIO_MMIO_QUEUE_NOTIFY,
idx);
}
void
virtio_mmio_intr_barrier(struct virtio_softc *vsc)
{
struct virtio_mmio_softc *sc = (struct virtio_mmio_softc *)vsc;
if (sc->sc_ih)
intr_barrier(sc->sc_ih);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: hidkbd.c,v 1.14 2024/09/01 03:08:56 jsg Exp $ */
/* $OpenBSD: hidkbd.c,v 1.15 2024/10/21 19:05:31 miod Exp $ */
/* $NetBSD: ukbd.c,v 1.85 2003/03/11 16:44:00 augustss Exp $ */
/*
@ -81,7 +81,7 @@ const u_int8_t hidkbd_trtab[256] = {
0x1c, 0x01, 0x0e, 0x0f, 0x39, 0x0c, 0x0d, 0x1a, /* 28 - 2f */
0x1b, 0x2b, 0x2b, 0x27, 0x28, 0x29, 0x33, 0x34, /* 30 - 37 */
0x35, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, /* 38 - 3f */
0x41, 0x42, 0x43, 0x44, 0x57, 0x58, 0xaa, 0x46, /* 40 - 47 */
0x41, 0x42, 0x43, 0x44, 0x57, 0x58, 0xb7, 0x46, /* 40 - 47 */
0x7f, 0xd2, 0xc7, 0xc9, 0xd3, 0xcf, 0xd1, 0xcd, /* 48 - 4f */
0xcb, 0xd0, 0xc8, 0x45, 0xb5, 0x37, 0x4a, 0x4e, /* 50 - 57 */
0x9c, 0x4f, 0x50, 0x51, 0x4b, 0x4c, 0x4d, 0x47, /* 58 - 5f */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: hotplug.c,v 1.24 2023/09/22 22:12:32 mvs Exp $ */
/* $OpenBSD: hotplug.c,v 1.25 2024/12/30 02:46:00 guenther Exp $ */
/*
* Copyright (c) 2004 Alexander Yurchenko <grange@openbsd.org>
*
@ -193,9 +193,6 @@ hotplugioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
switch (cmd) {
case FIOASYNC:
/* ignore */
case FIONBIO:
/* handled in the upper fs layer */
break;
default:
return (ENOTTY);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ihidev.c,v 1.33 2024/10/18 12:53:49 tobhe Exp $ */
/* $OpenBSD: ihidev.c,v 1.38 2025/01/07 23:13:56 kirill Exp $ */
/*
* HID-over-i2c driver
*
@ -55,6 +55,7 @@ enum {
/* pseudo commands */
I2C_HID_REPORT_DESCR = 0x100,
I2C_HID_RESET_RESPONSE = 0x101,
};
static int I2C_HID_POWER_ON = 0x0;
@ -75,6 +76,17 @@ int ihidev_maxrepid(void *buf, int len);
int ihidev_print(void *aux, const char *pnp);
int ihidev_submatch(struct device *parent, void *cf, void *aux);
#define IHIDEV_QUIRK_RE_POWER_ON 0x1
const struct ihidev_quirks {
uint16_t ihq_vid;
uint16_t ihq_pid;
int ihq_quirks;
} ihidev_devs[] = {
/* HONOR MagicBook Art 14 Touchpad (QTEC0002) */
{ 0x35cc, 0x0104, IHIDEV_QUIRK_RE_POWER_ON },
};
const struct cfattach ihidev_ca = {
sizeof(struct ihidev_softc),
ihidev_match,
@ -98,6 +110,25 @@ ihidev_match(struct device *parent, void *match, void *aux)
return (0);
}
int
ihidev_quirks(struct ihidev_softc *sc)
{
const struct ihidev_quirks *q;
uint16_t vid, pid;
int i, nent;
nent = nitems(ihidev_devs);
vid = letoh16(sc->hid_desc.wVendorID);
pid = letoh16(sc->hid_desc.wProductID);
for (i = 0, q = ihidev_devs; i < nent; i++, q++)
if (vid == q->ihq_vid && pid == q->ihq_pid)
return (q->ihq_quirks);
return (0);
}
void
ihidev_attach(struct device *parent, struct device *self, void *aux)
{
@ -118,32 +149,10 @@ ihidev_attach(struct device *parent, struct device *self, void *aux)
return;
}
if (ia->ia_intr) {
printf(" %s", iic_intr_string(sc->sc_tag, ia->ia_intr));
sc->sc_ih = iic_intr_establish(sc->sc_tag, ia->ia_intr,
IPL_TTY, ihidev_intr, sc, sc->sc_dev.dv_xname);
if (sc->sc_ih == NULL)
printf(", can't establish interrupt");
}
if (ia->ia_poll || !sc->sc_ih) {
printf(" (polling)");
sc->sc_poll = 1;
sc->sc_fastpoll = 1;
}
printf(", vendor 0x%x product 0x%x, %s\n",
letoh16(sc->hid_desc.wVendorID), letoh16(sc->hid_desc.wProductID),
(char *)ia->ia_cookie);
sc->sc_nrepid = ihidev_maxrepid(sc->sc_report, sc->sc_reportlen);
if (sc->sc_nrepid < 0)
return;
printf("%s: %d report id%s\n", sc->sc_dev.dv_xname, sc->sc_nrepid,
sc->sc_nrepid > 1 ? "s" : "");
sc->sc_nrepid++;
sc->sc_subdevs = mallocarray(sc->sc_nrepid, sizeof(struct ihidev *),
M_DEVBUF, M_WAITOK | M_ZERO);
@ -162,6 +171,29 @@ ihidev_attach(struct device *parent, struct device *self, void *aux)
}
sc->sc_ibuf = malloc(sc->sc_isize, M_DEVBUF, M_WAITOK | M_ZERO);
if (ia->ia_intr) {
printf(" %s", iic_intr_string(sc->sc_tag, ia->ia_intr));
sc->sc_ih = iic_intr_establish(sc->sc_tag, ia->ia_intr,
IPL_TTY, ihidev_intr, sc, sc->sc_dev.dv_xname);
if (sc->sc_ih == NULL)
printf("%s: can't establish interrupt\n",
sc->sc_dev.dv_xname);
}
if (ia->ia_poll || !sc->sc_ih) {
printf(" (polling)");
sc->sc_poll = 1;
sc->sc_fastpoll = 1;
}
printf(", vendor 0x%x product 0x%x, %s\n",
letoh16(sc->hid_desc.wVendorID), letoh16(sc->hid_desc.wProductID),
(char *)ia->ia_cookie);
printf("%s: %d report id%s\n", sc->sc_dev.dv_xname, (sc->sc_nrepid - 1),
(sc->sc_nrepid - 1) > 1 ? "s" : "");
iha.iaa = ia;
iha.parent = sc;
@ -445,17 +477,8 @@ ihidev_hid_command(struct ihidev_softc *sc, int hidcmd, void *arg)
cmd[2] = report_id | rreq->type << 4;
if (rreq->type == I2C_HID_REPORT_TYPE_FEATURE) {
cmd[dataoff++] = htole16(sc->hid_desc.wDataRegister)
& 0xff;
cmd[dataoff++] = htole16(sc->hid_desc.wDataRegister)
>> 8;
} else {
cmd[dataoff++] = htole16(sc->hid_desc.wOutputRegister)
& 0xff;
cmd[dataoff++] = htole16(sc->hid_desc.wOutputRegister)
>> 8;
}
cmd[dataoff++] = htole16(sc->hid_desc.wDataRegister) & 0xff;
cmd[dataoff++] = htole16(sc->hid_desc.wDataRegister) >> 8;
cmd[dataoff++] = report_len & 0xff;
cmd[dataoff++] = report_len >> 8;
@ -515,6 +538,32 @@ ihidev_hid_command(struct ihidev_softc *sc, int hidcmd, void *arg)
break;
}
case I2C_HID_RESET_RESPONSE: {
int i;
uint8_t buf[2] = { 0xff, 0xff };
DPRINTF(("%s: HID command I2C_HID_RESET_RESPONSE\n",
sc->sc_dev.dv_xname));
/*
* 7.2.1 states that a device should response for RESET
* in less than 5 seconds. It uses poll instead of
* tsleep because interrupts are blocked during autoconf.
*/
for (i = 0; i < 50; i++) {
ihidev_sleep(sc, 100);
res = iic_exec(sc->sc_tag, I2C_OP_READ_WITH_STOP,
sc->sc_addr, NULL, 0, buf, sizeof(buf), 0);
DPRINTF(("%s: read attempt %d: 0x%x, 0x%x, res: %d\n",
sc->sc_dev.dv_xname, i, buf[0], buf[1], res));
if (!res)
res = (buf[0] != 0x00 || buf[1] != 0x00);
if (!res)
break;
}
break;
}
default:
printf("%s: unknown command %d\n", sc->sc_dev.dv_xname,
hidcmd);
@ -556,7 +605,11 @@ ihidev_reset(struct ihidev_softc *sc)
return (1);
}
ihidev_sleep(sc, 100);
if (ihidev_hid_command(sc, I2C_HID_RESET_RESPONSE, 0)) {
printf("%s: unexpected reset response\n",
sc->sc_dev.dv_xname);
return (1);
}
return (0);
}
@ -570,7 +623,7 @@ ihidev_reset(struct ihidev_softc *sc)
int
ihidev_hid_desc_parse(struct ihidev_softc *sc)
{
int retries = 3;
sc->sc_quirks = ihidev_quirks(sc);
/* must be v01.00 */
if (letoh16(sc->hid_desc.bcdVersion) != 0x0100) {
@ -597,16 +650,8 @@ ihidev_hid_desc_parse(struct ihidev_softc *sc)
return (1);
}
while (retries-- > 0) {
if (ihidev_reset(sc)) {
if (retries == 0)
return(1);
ihidev_sleep(sc, 10);
}
else
break;
}
if (ihidev_reset(sc))
return (1);
sc->sc_reportlen = letoh16(sc->hid_desc.wReportDescLength);
sc->sc_report = malloc(sc->sc_reportlen, M_DEVBUF, M_WAITOK | M_ZERO);
@ -617,6 +662,23 @@ ihidev_hid_desc_parse(struct ihidev_softc *sc)
return (1);
}
if (sc->sc_quirks & IHIDEV_QUIRK_RE_POWER_ON) {
if (ihidev_poweron(sc))
return (1);
/*
* 7.2.8 states that a device shall not respond back
* after receiving the power on command, and must ensure
* that it transitions to power on state in less than 1
* second. The ihidev_poweron function uses a shorter
* sleep, sufficient for the ON-RESET sequence. Here,
* however, it sleeps for the full second to accommodate
* cold boot scenarios on affected devices.
*/
ihidev_sleep(sc, 1000);
}
return (0);
}
@ -898,3 +960,34 @@ ihidev_set_report(struct device *dev, int type, int id, void *data, int len)
return 0;
}
int
ihidev_send_report(struct device *dev, int repid, void *data, int data_len)
{
struct ihidev_softc *sc = (struct ihidev_softc *)dev;
uint8_t *finalcmd, cmd[5];
int cmd_len, report_len, res;
cmd_len = sizeof(cmd);
report_len = 2 + 1 + data_len;
cmd[0] = htole16(sc->hid_desc.wOutputRegister) & 0xff;
cmd[1] = htole16(sc->hid_desc.wOutputRegister) >> 8;
cmd[2] = report_len & 0xff;
cmd[3] = report_len >> 8;
cmd[4] = repid;
finalcmd = malloc(cmd_len + data_len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (finalcmd == NULL)
return ENOMEM;
memcpy(finalcmd, cmd, cmd_len);
memcpy(finalcmd + cmd_len, data, data_len);
res = iic_exec(sc->sc_tag, I2C_OP_WRITE_WITH_STOP, sc->sc_addr,
finalcmd, cmd_len + data_len, NULL, 0, 0);
free(finalcmd, M_DEVBUF, cmd_len + data_len);
return res;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ihidev.h,v 1.9 2022/09/03 15:48:16 kettenis Exp $ */
/* $OpenBSD: ihidev.h,v 1.11 2025/01/07 19:26:14 mglocker Exp $ */
/*
* HID-over-i2c driver
*
@ -93,6 +93,8 @@ struct ihidev_softc {
int sc_fastpoll;
struct timeout sc_timer;
int sc_dying;
int sc_quirks;
};
struct ihidev {
@ -135,5 +137,6 @@ int ihidev_ioctl(struct ihidev *, u_long, caddr_t, int, struct proc *);
int ihidev_report_type_conv(int);
int ihidev_set_report(struct device *, int, int, void *, int);
int ihidev_get_report(struct device *, int, int, void *, int);
int ihidev_send_report(struct device *, int, void *, int);
void ihidev_poll(void *);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ikbd.c,v 1.2 2022/09/03 15:48:16 kettenis Exp $ */
/* $OpenBSD: ikbd.c,v 1.3 2025/01/07 19:26:14 mglocker Exp $ */
/*
* HID-over-i2c keyboard driver
*
@ -36,6 +36,7 @@
struct ikbd_softc {
struct ihidev sc_hdev;
#define sc_ledsize sc_hdev.sc_osize
struct hidkbd sc_kbd;
int sc_spl;
};
@ -167,6 +168,14 @@ ikbd_enable(void *v, int on)
void
ikbd_set_leds(void *v, int leds)
{
struct ikbd_softc *sc = v;
struct hidkbd *kbd = &sc->sc_kbd;
uint8_t res;
if (sc->sc_ledsize && hidkbd_set_leds(kbd, leds, &res) != 0) {
ihidev_send_report((struct device *)sc->sc_hdev.sc_parent,
sc->sc_hdev.sc_report_id, &res, 1);
}
}
int
@ -181,6 +190,9 @@ ikbd_ioctl(void *v, u_long cmd, caddr_t data, int flag, struct proc *p)
/* XXX: should we set something else? */
*(u_int *)data = WSKBD_TYPE_USB;
return 0;
case WSKBDIO_SETLEDS:
ikbd_set_leds(v, *(int *)data);
return 0;
default:
rc = ihidev_ioctl(&sc->sc_hdev, cmd, data, flag, p);
if (rc != -1)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: aacvar.h,v 1.18 2023/09/11 08:40:25 mvs Exp $ */
/* $OpenBSD: aacvar.h,v 1.19 2024/10/22 21:50:02 jsg Exp $ */
/*-
* Copyright (c) 2000 Michael Smith
@ -235,8 +235,7 @@ typedef struct rwlock aac_lock_t;
/*
* Per-container data structure
*/
struct aac_container
{
struct aac_container {
struct aac_mntobj co_mntobj;
int co_found;
TAILQ_ENTRY(aac_container) co_link;
@ -246,8 +245,7 @@ struct aac_container
* A command control block, one for each corresponding command index of the
* controller.
*/
struct aac_command
{
struct aac_command {
TAILQ_ENTRY(aac_command) cm_link; /* list linkage */
struct aac_softc *cm_sc; /* controller that owns us */
@ -311,8 +309,7 @@ struct aac_qstat {
/*
* Per-controller structure.
*/
struct aac_softc
{
struct aac_softc {
struct device aac_dev;
void *aac_ih;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: adv.h,v 1.7 2020/02/18 20:24:52 krw Exp $ */
/* $OpenBSD: adv.h,v 1.8 2024/10/22 21:50:02 jsg Exp $ */
/* $NetBSD: adv.h,v 1.3 1998/09/26 16:02:56 dante Exp $ */
/*
@ -39,8 +39,7 @@
/******************************************************************************/
struct adv_ccb
{
struct adv_ccb {
ASC_SG_HEAD sghead;
ASC_SCSI_Q scsiq;
@ -67,8 +66,7 @@ typedef struct adv_ccb ADV_CCB;
#define ADV_MAX_CCB 32
struct adv_control
{
struct adv_control {
ADV_CCB ccbs[ADV_MAX_CCB]; /* all our control blocks */
};

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ahci.c,v 1.42 2024/09/04 07:54:52 mglocker Exp $ */
/* $OpenBSD: ahci.c,v 1.43 2024/11/22 09:29:41 jan Exp $ */
/*
* Copyright (c) 2006 David Gwynne <dlg@openbsd.org>
@ -321,6 +321,9 @@ noccc:
sc->sc_atascsi = atascsi_attach(&sc->sc_dev, &aaa);
/* Flush all residual bits of the interrupt status register */
ahci_write(sc, AHCI_REG_IS, ahci_read(sc, AHCI_REG_IS));
/* Enable interrupts */
ahci_write(sc, AHCI_REG_GHC, AHCI_REG_GHC_AE | AHCI_REG_GHC_IE);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: aic79xx.h,v 1.33 2024/09/04 07:54:52 mglocker Exp $ */
/* $OpenBSD: aic79xx.h,v 1.34 2024/10/22 21:50:02 jsg Exp $ */
/*
* Copyright (c) 2004 Milos Urbanek, Kenneth R. Westerback & Marco Peereboom
@ -1034,8 +1034,7 @@ typedef uint8_t ahd_mode_state;
typedef void ahd_callback_t (void *);
struct ahd_completion
{
struct ahd_completion {
uint16_t tag;
uint8_t sg_status;
uint8_t valid_tag;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: bcmgenet.c,v 1.7 2023/11/10 15:51:20 bluhm Exp $ */
/* $OpenBSD: bcmgenet.c,v 1.8 2024/11/05 18:58:59 miod Exp $ */
/* $NetBSD: bcmgenet.c,v 1.3 2020/02/27 17:30:07 jmcneill Exp $ */
/*-
@ -82,7 +82,7 @@ CTASSERT(MCLBYTES == 2048);
bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
struct cfdriver bse_cd = {
0, "bse", DV_IFNET
NULL, "bse", DV_IFNET
};
int

View file

@ -1,4 +1,4 @@
/* $OpenBSD: dc.c,v 1.158 2024/08/31 16:23:09 deraadt Exp $ */
/* $OpenBSD: dc.c,v 1.159 2024/11/05 18:58:59 miod Exp $ */
/*
* Copyright (c) 1997, 1998, 1999
@ -3110,5 +3110,5 @@ dc_detach(struct dc_softc *sc)
}
struct cfdriver dc_cd = {
0, "dc", DV_IFNET
NULL, "dc", DV_IFNET
};

View file

@ -1,4 +1,4 @@
/* $OpenBSD: mtd8xx.c,v 1.35 2022/01/09 05:42:38 jsg Exp $ */
/* $OpenBSD: mtd8xx.c,v 1.36 2024/11/05 18:58:59 miod Exp $ */
/*
* Copyright (c) 2003 Oleg Safiullin <form@pdp11.org.ru>
@ -1059,5 +1059,5 @@ mtd_txeof(struct mtd_softc *sc)
}
struct cfdriver mtd_cd = {
0, "mtd", DV_IFNET
NULL, "mtd", DV_IFNET
};

View file

@ -1,4 +1,4 @@
/* $OpenBSD: psp.c,v 1.5 2024/10/04 16:58:26 bluhm Exp $ */
/* $OpenBSD: psp.c,v 1.15 2024/11/20 13:36:55 bluhm Exp $ */
/*
* Copyright (c) 2023, 2024 Hans-Joerg Hoexer <hshoexer@genua.de>
@ -19,13 +19,15 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/pledge.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <machine/bus.h>
#include <sys/proc.h>
#include <uvm/uvm.h>
#include <uvm/uvm_extern.h>
#include <crypto/xform.h>
#include <dev/ic/ccpvar.h>
@ -37,7 +39,12 @@ struct psp_softc {
bus_space_handle_t sc_ioh;
bus_dma_tag_t sc_dmat;
uint32_t sc_capabilities;
bus_size_t sc_reg_inten;
bus_size_t sc_reg_intsts;
bus_size_t sc_reg_cmdresp;
bus_size_t sc_reg_addrlo;
bus_size_t sc_reg_addrhi;
bus_dmamap_t sc_cmd_map;
bus_dma_segment_t sc_cmd_seg;
@ -50,12 +57,23 @@ struct psp_softc {
caddr_t sc_tmr_kva;
struct rwlock sc_lock;
struct mutex psp_lock;
uint32_t sc_flags;
#define PSPF_INITIALIZED 0x1
#define PSPF_UCODELOADED 0x2
#define PSPF_NOUCODE 0x4
u_char *sc_ucodebuf;
size_t sc_ucodelen;
};
int psp_get_pstatus(struct psp_softc *, struct psp_platform_status *);
int psp_init(struct psp_softc *, struct psp_init *);
int psp_reinit(struct psp_softc *);
int psp_match(struct device *, void *, void *);
void psp_attach(struct device *, struct device *, void *);
int psp_load_ucode(struct psp_softc *);
struct cfdriver psp_cd = {
NULL, "psp", DV_DULL
@ -74,8 +92,10 @@ psp_sev_intr(void *arg)
struct psp_softc *sc = (struct psp_softc *)csc->sc_psp;
uint32_t status;
status = bus_space_read_4(sc->sc_iot, sc->sc_ioh, PSP_REG_INTSTS);
bus_space_write_4(sc->sc_iot, sc->sc_ioh, PSP_REG_INTSTS, status);
mtx_enter(&sc->psp_lock);
status = bus_space_read_4(sc->sc_iot, sc->sc_ioh, sc->sc_reg_intsts);
bus_space_write_4(sc->sc_iot, sc->sc_ioh, sc->sc_reg_intsts, status);
mtx_leave(&sc->psp_lock);
if (!(status & PSP_CMDRESP_COMPLETE))
return (0);
@ -97,129 +117,114 @@ psp_attach(struct device *parent, struct device *self, void *aux)
struct psp_softc *sc = (struct psp_softc *)self;
struct psp_attach_args *arg = aux;
struct psp_platform_status pst;
struct psp_init init;
size_t size;
int nsegs;
int nsegs, error;
printf(":");
sc->sc_iot = arg->iot;
sc->sc_ioh = arg->ioh;
sc->sc_dmat = arg->dmat;
sc->sc_capabilities = arg->capabilities;
if (arg->version == 1) {
sc->sc_reg_inten = PSPV1_REG_INTEN;
sc->sc_reg_intsts = PSPV1_REG_INTSTS;
sc->sc_reg_cmdresp = PSPV1_REG_CMDRESP;
sc->sc_reg_addrlo = PSPV1_REG_ADDRLO;
sc->sc_reg_addrhi = PSPV1_REG_ADDRHI;
} else {
sc->sc_reg_inten = PSP_REG_INTEN;
sc->sc_reg_intsts = PSP_REG_INTSTS;
sc->sc_reg_cmdresp = PSP_REG_CMDRESP;
sc->sc_reg_addrlo = PSP_REG_ADDRLO;
sc->sc_reg_addrhi = PSP_REG_ADDRHI;
}
if (arg->version)
printf(" vers %d,", arg->version);
rw_init(&sc->sc_lock, "psp_lock");
mtx_init(&sc->psp_lock, IPL_BIO);
/* create and map SEV command buffer */
sc->sc_cmd_size = size = PAGE_SIZE;
if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
&sc->sc_cmd_map) != 0)
return;
if (bus_dmamem_alloc(sc->sc_dmat, size, 0, 0, &sc->sc_cmd_seg, 1,
&nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, &sc->sc_cmd_map);
if (error)
goto fail_0;
if (bus_dmamem_map(sc->sc_dmat, &sc->sc_cmd_seg, nsegs, size,
&sc->sc_cmd_kva, BUS_DMA_WAITOK) != 0)
error = bus_dmamem_alloc(sc->sc_dmat, size, 0, 0, &sc->sc_cmd_seg, 1,
&nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
if (error)
goto fail_1;
if (bus_dmamap_load(sc->sc_dmat, sc->sc_cmd_map, sc->sc_cmd_kva,
size, NULL, BUS_DMA_WAITOK) != 0)
error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cmd_seg, nsegs, size,
&sc->sc_cmd_kva, BUS_DMA_WAITOK);
if (error)
goto fail_2;
if (psp_get_pstatus(sc, &pst) || pst.state != 0)
error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmd_map, sc->sc_cmd_kva,
size, NULL, BUS_DMA_WAITOK);
if (error)
goto fail_3;
/*
* create and map Trusted Memory Region (TMR); size 1 Mbyte,
* needs to be aligned to 1 Mbyte.
*/
sc->sc_tmr_size = size = PSP_TMR_SIZE;
if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
&sc->sc_tmr_map) != 0)
goto fail_3;
if (bus_dmamem_alloc(sc->sc_dmat, size, size, 0, &sc->sc_tmr_seg, 1,
&nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
if (psp_get_pstatus(sc, &pst)) {
printf(" platform status");
goto fail_4;
}
if (pst.state != PSP_PSTATE_UNINIT) {
printf(" uninitialized state");
goto fail_4;
}
printf(" api %u.%u, build %u, SEV, SEV-ES",
pst.api_major, pst.api_minor, pst.cfges_build >> 24);
if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tmr_seg, nsegs, size,
&sc->sc_tmr_kva, BUS_DMA_WAITOK) != 0)
goto fail_5;
if (bus_dmamap_load(sc->sc_dmat, sc->sc_tmr_map, sc->sc_tmr_kva,
size, NULL, BUS_DMA_WAITOK) != 0)
goto fail_6;
memset(&init, 0, sizeof(init));
init.enable_es = 1;
init.tmr_length = PSP_TMR_SIZE;
init.tmr_paddr = sc->sc_tmr_map->dm_segs[0].ds_addr;
if (psp_init(sc, &init))
goto fail_7;
printf(": SEV");
psp_get_pstatus(sc, &pst);
if ((pst.state == 1) && (pst.cfges_build & 0x1))
printf(", SEV-ES");
/* enable interrupts */
bus_space_write_4(sc->sc_iot, sc->sc_ioh, sc->sc_reg_inten, -1);
printf("\n");
return;
fail_7:
bus_dmamap_unload(sc->sc_dmat, sc->sc_tmr_map);
fail_6:
bus_dmamem_unmap(sc->sc_dmat, sc->sc_tmr_kva, size);
fail_5:
bus_dmamem_free(sc->sc_dmat, &sc->sc_tmr_seg, 1);
fail_4:
bus_dmamap_destroy(sc->sc_dmat, sc->sc_tmr_map);
fail_3:
bus_dmamap_unload(sc->sc_dmat, sc->sc_cmd_map);
fail_2:
fail_3:
bus_dmamem_unmap(sc->sc_dmat, sc->sc_cmd_kva, size);
fail_2:
bus_dmamem_free(sc->sc_dmat, &sc->sc_cmd_seg, nsegs);
fail_1:
bus_dmamem_free(sc->sc_dmat, &sc->sc_cmd_seg, 1);
fail_0:
bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmd_map);
printf("\n");
return;
fail_0:
printf(" failed\n");
}
static int
ccp_wait(struct psp_softc *sc, uint32_t *status, int poll)
{
uint32_t cmdword;
int count;
int count, error;
MUTEX_ASSERT_LOCKED(&sc->psp_lock);
if (poll) {
count = 0;
while (count++ < 100) {
while (count++ < 400) {
cmdword = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
PSP_REG_CMDRESP);
sc->sc_reg_cmdresp);
if (cmdword & PSP_CMDRESP_RESPONSE)
goto done;
delay(5000);
}
/* timeout */
return (1);
return (EWOULDBLOCK);
}
if (tsleep_nsec(sc, PWAIT, "psp", SEC_TO_NSEC(2)) == EWOULDBLOCK)
return (1);
error = msleep_nsec(sc, &sc->psp_lock, PWAIT, "psp", SEC_TO_NSEC(2));
if (error)
return (error);
cmdword = bus_space_read_4(sc->sc_iot, sc->sc_ioh, sc->sc_reg_cmdresp);
done:
if (status) {
*status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
PSP_REG_CMDRESP);
}
if (status != NULL)
*status = cmdword;
return (0);
}
@ -227,6 +232,7 @@ static int
ccp_docmd(struct psp_softc *sc, int cmd, uint64_t paddr)
{
uint32_t plo, phi, cmdword, status;
int error;
plo = ((paddr >> 0) & 0xffffffff);
phi = ((paddr >> 32) & 0xffffffff);
@ -234,17 +240,20 @@ ccp_docmd(struct psp_softc *sc, int cmd, uint64_t paddr)
if (!cold)
cmdword |= PSP_CMDRESP_IOC;
bus_space_write_4(sc->sc_iot, sc->sc_ioh, PSP_REG_ADDRLO, plo);
bus_space_write_4(sc->sc_iot, sc->sc_ioh, PSP_REG_ADDRHI, phi);
bus_space_write_4(sc->sc_iot, sc->sc_ioh, PSP_REG_CMDRESP, cmdword);
mtx_enter(&sc->psp_lock);
bus_space_write_4(sc->sc_iot, sc->sc_ioh, sc->sc_reg_addrlo, plo);
bus_space_write_4(sc->sc_iot, sc->sc_ioh, sc->sc_reg_addrhi, phi);
bus_space_write_4(sc->sc_iot, sc->sc_ioh, sc->sc_reg_cmdresp, cmdword);
if (ccp_wait(sc, &status, cold))
return (1);
error = ccp_wait(sc, &status, cold);
mtx_leave(&sc->psp_lock);
if (error)
return (error);
/* Did PSP sent a response code? */
if (status & PSP_CMDRESP_RESPONSE) {
if ((status & PSP_STATUS_MASK) != PSP_STATUS_SUCCESS)
return (1);
return (EIO);
}
return (0);
@ -254,7 +263,7 @@ int
psp_init(struct psp_softc *sc, struct psp_init *uinit)
{
struct psp_init *init;
int ret;
int error;
init = (struct psp_init *)sc->sc_cmd_kva;
bzero(init, sizeof(*init));
@ -263,11 +272,102 @@ psp_init(struct psp_softc *sc, struct psp_init *uinit)
init->tmr_paddr = uinit->tmr_paddr;
init->tmr_length = uinit->tmr_length;
ret = ccp_docmd(sc, PSP_CMD_INIT, sc->sc_cmd_map->dm_segs[0].ds_addr);
if (ret != 0)
return (EIO);
error = ccp_docmd(sc, PSP_CMD_INIT, sc->sc_cmd_map->dm_segs[0].ds_addr);
if (error)
return (error);
wbinvd_on_all_cpus();
wbinvd_on_all_cpus_acked();
sc->sc_flags |= PSPF_INITIALIZED;
return (0);
}
int
psp_reinit(struct psp_softc *sc)
{
struct psp_init init;
size_t size;
int nsegs, error;
if (sc->sc_flags & PSPF_INITIALIZED) {
printf("%s: invalid flags 0x%x\n", __func__, sc->sc_flags);
return (EBUSY);
}
if (sc->sc_tmr_map != NULL)
return (EBUSY);
/*
* create and map Trusted Memory Region (TMR); size 1 Mbyte,
* needs to be aligend to 1 Mbyte.
*/
sc->sc_tmr_size = size = PSP_TMR_SIZE;
error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, &sc->sc_tmr_map);
if (error)
goto fail_0;
error = bus_dmamem_alloc(sc->sc_dmat, size, size, 0, &sc->sc_tmr_seg, 1,
&nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
if (error)
goto fail_1;
error = bus_dmamem_map(sc->sc_dmat, &sc->sc_tmr_seg, nsegs, size,
&sc->sc_tmr_kva, BUS_DMA_WAITOK);
if (error)
goto fail_2;
error = bus_dmamap_load(sc->sc_dmat, sc->sc_tmr_map, sc->sc_tmr_kva,
size, NULL, BUS_DMA_WAITOK);
if (error)
goto fail_3;
memset(&init, 0, sizeof(init));
init.enable_es = 1;
init.tmr_length = PSP_TMR_SIZE;
init.tmr_paddr = sc->sc_tmr_map->dm_segs[0].ds_addr;
if ((error = psp_init(sc, &init)) != 0)
goto fail_4;
return (0);
fail_4:
bus_dmamap_unload(sc->sc_dmat, sc->sc_tmr_map);
fail_3:
bus_dmamem_unmap(sc->sc_dmat, sc->sc_tmr_kva, size);
fail_2:
bus_dmamem_free(sc->sc_dmat, &sc->sc_tmr_seg, nsegs);
fail_1:
bus_dmamap_destroy(sc->sc_dmat, sc->sc_tmr_map);
fail_0:
return (error);
}
int
psp_shutdown(struct psp_softc *sc)
{
int error;
if (sc->sc_tmr_map == NULL)
return (EINVAL);
error = ccp_docmd(sc, PSP_CMD_SHUTDOWN, 0x0);
if (error)
return (error);
/* wbinvd right after SHUTDOWN */
wbinvd_on_all_cpus_acked();
/* release TMR */
bus_dmamap_unload(sc->sc_dmat, sc->sc_tmr_map);
bus_dmamem_unmap(sc->sc_dmat, sc->sc_tmr_kva, sc->sc_tmr_size);
bus_dmamem_free(sc->sc_dmat, &sc->sc_tmr_seg, 1);
bus_dmamap_destroy(sc->sc_dmat, sc->sc_tmr_map);
sc->sc_tmr_map = NULL;
/* reset flags */
sc->sc_flags = 0;
return (0);
}
@ -275,17 +375,16 @@ psp_init(struct psp_softc *sc, struct psp_init *uinit)
int
psp_get_pstatus(struct psp_softc *sc, struct psp_platform_status *ustatus)
{
struct psp_platform_status *status;
int ret;
struct psp_platform_status *status;
int error;
status = (struct psp_platform_status *)sc->sc_cmd_kva;
bzero(status, sizeof(*status));
ret = ccp_docmd(sc, PSP_CMD_PLATFORMSTATUS,
error = ccp_docmd(sc, PSP_CMD_PLATFORMSTATUS,
sc->sc_cmd_map->dm_segs[0].ds_addr);
if (ret != 0)
return (EIO);
if (error)
return (error);
bcopy(status, ustatus, sizeof(*ustatus));
@ -295,54 +394,47 @@ psp_get_pstatus(struct psp_softc *sc, struct psp_platform_status *ustatus)
int
psp_df_flush(struct psp_softc *sc)
{
int ret;
int error;
wbinvd_on_all_cpus();
wbinvd_on_all_cpus_acked();
ret = ccp_docmd(sc, PSP_CMD_DF_FLUSH, 0x0);
error = ccp_docmd(sc, PSP_CMD_DF_FLUSH, 0x0);
if (ret != 0)
return (EIO);
return (0);
return (error);
}
int
psp_decommission(struct psp_softc *sc, struct psp_decommission *udecom)
{
struct psp_decommission *decom;
int ret;
int error;
decom = (struct psp_decommission *)sc->sc_cmd_kva;
bzero(decom, sizeof(*decom));
decom->handle = udecom->handle;
ret = ccp_docmd(sc, PSP_CMD_DECOMMISSION,
error = ccp_docmd(sc, PSP_CMD_DECOMMISSION,
sc->sc_cmd_map->dm_segs[0].ds_addr);
if (ret != 0)
return (EIO);
return (0);
return (error);
}
int
psp_get_gstatus(struct psp_softc *sc, struct psp_guest_status *ustatus)
{
struct psp_guest_status *status;
int ret;
int error;
status = (struct psp_guest_status *)sc->sc_cmd_kva;
bzero(status, sizeof(*status));
status->handle = ustatus->handle;
ret = ccp_docmd(sc, PSP_CMD_GUESTSTATUS,
error = ccp_docmd(sc, PSP_CMD_GUESTSTATUS,
sc->sc_cmd_map->dm_segs[0].ds_addr);
if (ret != 0)
return (EIO);
if (error)
return (error);
ustatus->policy = status->policy;
ustatus->asid = status->asid;
@ -355,7 +447,7 @@ int
psp_launch_start(struct psp_softc *sc, struct psp_launch_start *ustart)
{
struct psp_launch_start *start;
int ret;
int error;
start = (struct psp_launch_start *)sc->sc_cmd_kva;
bzero(start, sizeof(*start));
@ -363,11 +455,10 @@ psp_launch_start(struct psp_softc *sc, struct psp_launch_start *ustart)
start->handle = ustart->handle;
start->policy = ustart->policy;
ret = ccp_docmd(sc, PSP_CMD_LAUNCH_START,
error = ccp_docmd(sc, PSP_CMD_LAUNCH_START,
sc->sc_cmd_map->dm_segs[0].ds_addr);
if (ret != 0)
return (EIO);
if (error)
return (error);
/* If requested, return new handle. */
if (ustart->handle == 0)
@ -382,9 +473,9 @@ psp_launch_update_data(struct psp_softc *sc,
{
struct psp_launch_update_data *ludata;
pmap_t pmap;
vaddr_t v, next, end;
vaddr_t v, next, start, end;
size_t size, len, off;
int ret;
int error;
/* Ensure AES_XTS_BLOCKSIZE alignment and multiplicity. */
if ((ulud->paddr & (AES_XTS_BLOCKSIZE - 1)) != 0 ||
@ -397,7 +488,7 @@ psp_launch_update_data(struct psp_softc *sc,
ludata->handle = ulud->handle;
/* Drain caches before we encrypt memory. */
wbinvd_on_all_cpus();
wbinvd_on_all_cpus_acked();
/*
* Launch update one physical page at a time. We could
@ -407,39 +498,51 @@ psp_launch_update_data(struct psp_softc *sc,
* to system physical address.
*/
pmap = vm_map_pmap(&p->p_vmspace->vm_map);
start = ulud->paddr;
size = ulud->length;
end = ulud->paddr + ulud->length;
end = start + size;
/* Wire mapping. */
error = uvm_map_pageable(&p->p_vmspace->vm_map, start, end, FALSE, 0);
if (error)
goto out;
for (v = ulud->paddr; v < end; v = next) {
off = v & PAGE_MASK;
len = MIN(PAGE_SIZE - off, size);
/* Wire mapping. */
if (uvm_map_pageable(&p->p_vmspace->vm_map, v, v+len, FALSE, 0))
return (EINVAL);
if (!pmap_extract(pmap, v, (paddr_t *)&ludata->paddr))
return (EINVAL);
if (!pmap_extract(pmap, v, (paddr_t *)&ludata->paddr)) {
error = EINVAL;
goto out;
}
ludata->length = len;
ret = ccp_docmd(sc, PSP_CMD_LAUNCH_UPDATE_DATA,
error = ccp_docmd(sc, PSP_CMD_LAUNCH_UPDATE_DATA,
sc->sc_cmd_map->dm_segs[0].ds_addr);
if (ret != 0)
return (EIO);
if (error)
goto out;
size -= len;
next = v + len;
}
return (0);
out:
/*
* Unwire again. Ignore new error. Error has either been set,
* or PSP command has already succeeded.
*/
(void) uvm_map_pageable(&p->p_vmspace->vm_map, start, end, TRUE, 0);
return (error);
}
int
psp_launch_measure(struct psp_softc *sc, struct psp_launch_measure *ulm)
{
struct psp_launch_measure *lm;
int ret;
uint64_t paddr;
struct psp_launch_measure *lm;
uint64_t paddr;
int error;
if (ulm->measure_len != sizeof(ulm->psp_measure))
return (EINVAL);
@ -453,10 +556,11 @@ psp_launch_measure(struct psp_softc *sc, struct psp_launch_measure *ulm)
paddr + offsetof(struct psp_launch_measure, psp_measure);
lm->measure_len = sizeof(lm->psp_measure);
ret = ccp_docmd(sc, PSP_CMD_LAUNCH_MEASURE, paddr);
if (ret != 0 || lm->measure_len != ulm->measure_len)
return (EIO);
error = ccp_docmd(sc, PSP_CMD_LAUNCH_MEASURE, paddr);
if (error)
return (error);
if (lm->measure_len != ulm->measure_len)
return (ERANGE);
bcopy(&lm->psp_measure, &ulm->psp_measure, ulm->measure_len);
@ -466,29 +570,26 @@ psp_launch_measure(struct psp_softc *sc, struct psp_launch_measure *ulm)
int
psp_launch_finish(struct psp_softc *sc, struct psp_launch_finish *ulf)
{
struct psp_launch_finish *lf;
int ret;
struct psp_launch_finish *lf;
int error;
lf = (struct psp_launch_finish *)sc->sc_cmd_kva;
bzero(lf, sizeof(*lf));
lf->handle = ulf->handle;
ret = ccp_docmd(sc, PSP_CMD_LAUNCH_FINISH,
error = ccp_docmd(sc, PSP_CMD_LAUNCH_FINISH,
sc->sc_cmd_map->dm_segs[0].ds_addr);
if (ret != 0)
return (EIO);
return (0);
return (error);
}
int
psp_attestation(struct psp_softc *sc, struct psp_attestation *uat)
{
struct psp_attestation *at;
int ret;
uint64_t paddr;
int error;
if (uat->attest_len != sizeof(uat->psp_report))
return (EINVAL);
@ -503,10 +604,11 @@ psp_attestation(struct psp_softc *sc, struct psp_attestation *uat)
bcopy(uat->attest_nonce, at->attest_nonce, sizeof(at->attest_nonce));
at->attest_len = sizeof(at->psp_report);
ret = ccp_docmd(sc, PSP_CMD_ATTESTATION, paddr);
if (ret != 0 || at->attest_len != uat->attest_len)
return (EIO);
error = ccp_docmd(sc, PSP_CMD_ATTESTATION, paddr);
if (error)
return (error);
if (at->attest_len != uat->attest_len)
return (ERANGE);
bcopy(&at->psp_report, &uat->psp_report, uat->attest_len);
@ -517,7 +619,7 @@ int
psp_activate(struct psp_softc *sc, struct psp_activate *uact)
{
struct psp_activate *act;
int ret;
int error;
act = (struct psp_activate *)sc->sc_cmd_kva;
bzero(act, sizeof(*act));
@ -525,33 +627,78 @@ psp_activate(struct psp_softc *sc, struct psp_activate *uact)
act->handle = uact->handle;
act->asid = uact->asid;
ret = ccp_docmd(sc, PSP_CMD_ACTIVATE,
error = ccp_docmd(sc, PSP_CMD_ACTIVATE,
sc->sc_cmd_map->dm_segs[0].ds_addr);
if (ret != 0)
return (EIO);
return (0);
return (error);
}
int
psp_deactivate(struct psp_softc *sc, struct psp_deactivate *udeact)
{
struct psp_deactivate *deact;
int ret;
int error;
deact = (struct psp_deactivate *)sc->sc_cmd_kva;
bzero(deact, sizeof(*deact));
deact->handle = udeact->handle;
ret = ccp_docmd(sc, PSP_CMD_DEACTIVATE,
error = ccp_docmd(sc, PSP_CMD_DEACTIVATE,
sc->sc_cmd_map->dm_segs[0].ds_addr);
if (ret != 0)
return (EIO);
return (error);
}
return (0);
int
psp_downloadfirmware(struct psp_softc *sc, struct psp_downloadfirmware *udlfw)
{
struct psp_downloadfirmware *dlfw;
bus_dmamap_t map;
bus_dma_segment_t seg;
caddr_t kva;
int nsegs, error;
dlfw = (struct psp_downloadfirmware *)sc->sc_cmd_kva;
bzero(dlfw, sizeof(*dlfw));
error = bus_dmamap_create(sc->sc_dmat, udlfw->fw_len, 1, udlfw->fw_len,
0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, &map);
if (error)
goto fail_0;
error = bus_dmamem_alloc(sc->sc_dmat, udlfw->fw_len, 0, 0, &seg, 1,
&nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
if (error)
goto fail_1;
error = bus_dmamem_map(sc->sc_dmat, &seg, nsegs, udlfw->fw_len, &kva,
BUS_DMA_WAITOK);
if (error)
goto fail_2;
error = bus_dmamap_load(sc->sc_dmat, map, kva, udlfw->fw_len, NULL,
BUS_DMA_WAITOK);
if (error)
goto fail_3;
bcopy((void *)udlfw->fw_paddr, kva, udlfw->fw_len);
dlfw->fw_paddr = map->dm_segs[0].ds_addr;
dlfw->fw_len = map->dm_segs[0].ds_len;
error = ccp_docmd(sc, PSP_CMD_DOWNLOADFIRMWARE,
sc->sc_cmd_map->dm_segs[0].ds_addr);
bus_dmamap_unload(sc->sc_dmat, map);
fail_3:
bus_dmamem_unmap(sc->sc_dmat, kva, udlfw->fw_len);
fail_2:
bus_dmamem_free(sc->sc_dmat, &seg, nsegs);
fail_1:
bus_dmamap_destroy(sc->sc_dmat, map);
fail_0:
return (error);
}
int
@ -559,20 +706,20 @@ psp_guest_shutdown(struct psp_softc *sc, struct psp_guest_shutdown *ugshutdown)
{
struct psp_deactivate deact;
struct psp_decommission decom;
int ret;
int error;
bzero(&deact, sizeof(deact));
deact.handle = ugshutdown->handle;
if ((ret = psp_deactivate(sc, &deact)) != 0)
return (ret);
if ((error = psp_deactivate(sc, &deact)) != 0)
return (error);
if ((ret = psp_df_flush(sc)) != 0)
return (ret);
if ((error = psp_df_flush(sc)) != 0)
return (error);
bzero(&decom, sizeof(decom));
decom.handle = ugshutdown->handle;
if ((ret = psp_decommission(sc, &decom)) != 0)
return (ret);
if ((error = psp_decommission(sc, &decom)) != 0)
return (error);
return (0);
}
@ -581,17 +728,16 @@ int
psp_snp_get_pstatus(struct psp_softc *sc,
struct psp_snp_platform_status *ustatus)
{
struct psp_snp_platform_status *status;
int ret;
struct psp_snp_platform_status *status;
int error;
status = (struct psp_snp_platform_status *)sc->sc_cmd_kva;
bzero(status, sizeof(*status));
ret = ccp_docmd(sc, PSP_CMD_SNP_PLATFORMSTATUS,
error = ccp_docmd(sc, PSP_CMD_SNP_PLATFORMSTATUS,
sc->sc_cmd_map->dm_segs[0].ds_addr);
if (ret != 0)
return (EIO);
if (error)
return (error);
bcopy(status, ustatus, sizeof(*ustatus));
@ -607,6 +753,12 @@ pspopen(dev_t dev, int flag, int mode, struct proc *p)
if (sc == NULL)
return (ENXIO);
/* Ignore error, proceed without new firmware. */
(void) psp_load_ucode(sc);
if (!(sc->sc_flags & PSPF_INITIALIZED))
return (psp_reinit(sc));
return (0);
}
@ -625,65 +777,77 @@ pspclose(dev_t dev, int flag, int mode, struct proc *p)
int
pspioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
{
struct psp_softc *sc;
int ret;
struct psp_softc *sc;
int error;
sc = (struct psp_softc *)device_lookup(&psp_cd, minor(dev));
if (sc == NULL)
return (ENXIO);
KERNEL_UNLOCK();
rw_enter_write(&sc->sc_lock);
switch (cmd) {
case PSP_IOC_INIT:
error = psp_reinit(sc);
break;
case PSP_IOC_SHUTDOWN:
error = psp_shutdown(sc);
break;
case PSP_IOC_GET_PSTATUS:
ret = psp_get_pstatus(sc, (struct psp_platform_status *)data);
error = psp_get_pstatus(sc, (struct psp_platform_status *)data);
break;
case PSP_IOC_DF_FLUSH:
ret = psp_df_flush(sc);
error = psp_df_flush(sc);
break;
case PSP_IOC_DECOMMISSION:
ret = psp_decommission(sc, (struct psp_decommission *)data);
error = psp_decommission(sc, (struct psp_decommission *)data);
break;
case PSP_IOC_GET_GSTATUS:
ret = psp_get_gstatus(sc, (struct psp_guest_status *)data);
error = psp_get_gstatus(sc, (struct psp_guest_status *)data);
break;
case PSP_IOC_LAUNCH_START:
ret = psp_launch_start(sc, (struct psp_launch_start *)data);
error = psp_launch_start(sc, (struct psp_launch_start *)data);
break;
case PSP_IOC_LAUNCH_UPDATE_DATA:
ret = psp_launch_update_data(sc,
error = psp_launch_update_data(sc,
(struct psp_launch_update_data *)data, p);
break;
case PSP_IOC_LAUNCH_MEASURE:
ret = psp_launch_measure(sc, (struct psp_launch_measure *)data);
error = psp_launch_measure(sc,
(struct psp_launch_measure *)data);
break;
case PSP_IOC_LAUNCH_FINISH:
ret = psp_launch_finish(sc, (struct psp_launch_finish *)data);
error = psp_launch_finish(sc, (struct psp_launch_finish *)data);
break;
case PSP_IOC_ATTESTATION:
ret = psp_attestation(sc, (struct psp_attestation *)data);
error = psp_attestation(sc, (struct psp_attestation *)data);
break;
case PSP_IOC_ACTIVATE:
ret = psp_activate(sc, (struct psp_activate *)data);
error = psp_activate(sc, (struct psp_activate *)data);
break;
case PSP_IOC_DEACTIVATE:
ret = psp_deactivate(sc, (struct psp_deactivate *)data);
error = psp_deactivate(sc, (struct psp_deactivate *)data);
break;
case PSP_IOC_GUEST_SHUTDOWN:
ret = psp_guest_shutdown(sc, (struct psp_guest_shutdown *)data);
error = psp_guest_shutdown(sc,
(struct psp_guest_shutdown *)data);
break;
case PSP_IOC_SNP_GET_PSTATUS:
ret = psp_snp_get_pstatus(sc,
error = psp_snp_get_pstatus(sc,
(struct psp_snp_platform_status *)data);
break;
default:
ret = ENOTTY;
error = ENOTTY;
break;
}
rw_exit_write(&sc->sc_lock);
return (ret);
KERNEL_LOCK();
return (error);
}
int
@ -721,3 +885,72 @@ pspsubmatch(struct device *parent, void *match, void *aux)
return (0);
return ((*cf->cf_attach->ca_match)(parent, cf, aux));
}
struct ucode {
uint8_t family;
uint8_t model;
const char *uname;
} const psp_ucode_table[] = {
{ 0x17, 0x0, "amdsev/amd_sev_fam17h_model0xh.sbin" },
{ 0x17, 0x3, "amdsev/amd_sev_fam17h_model3xh.sbin" },
{ 0x19, 0x0, "amdsev/amd_sev_fam19h_model0xh.sbin" },
{ 0x19, 0x1, "amdsev/amd_sev_fam19h_model1xh.sbin" },
{ 0, 0, NULL }
};
int
psp_load_ucode(struct psp_softc *sc)
{
struct psp_downloadfirmware dlfw;
struct cpu_info *ci = &cpu_info_primary;
const struct ucode *uc;
uint8_t family, model;
int error;
if ((sc->sc_flags & PSPF_UCODELOADED) ||
(sc->sc_flags & PSPF_NOUCODE) ||
(sc->sc_flags & PSPF_INITIALIZED))
return (EBUSY);
family = ci->ci_family;
model = (ci->ci_model & 0xf0) >> 4;
for (uc = psp_ucode_table; uc->uname; uc++) {
if ((uc->family == family) && (uc->model == model))
break;
}
if (uc->uname == NULL) {
printf("%s: no firmware found, CPU family 0x%x model 0x%x\n",
sc->sc_dev.dv_xname, family, model);
sc->sc_flags |= PSPF_NOUCODE;
return (EOPNOTSUPP);
}
error = loadfirmware(uc->uname, &sc->sc_ucodebuf, &sc->sc_ucodelen);
if (error) {
if (error != ENOENT) {
printf("%s: error %d, could not read firmware %s\n",
sc->sc_dev.dv_xname, error, uc->uname);
}
sc->sc_flags |= PSPF_NOUCODE;
return (error);
}
bzero(&dlfw, sizeof(dlfw));
dlfw.fw_len = sc->sc_ucodelen;
dlfw.fw_paddr = (uint64_t)sc->sc_ucodebuf;
if ((error = psp_downloadfirmware(sc, &dlfw)) != 0)
goto out;
sc->sc_flags |= PSPF_UCODELOADED;
out:
if (sc->sc_ucodebuf) {
free(sc->sc_ucodebuf, M_DEVBUF, sc->sc_ucodelen);
sc->sc_ucodebuf = NULL;
sc->sc_ucodelen = 0;
}
return (error);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pspvar.h,v 1.2 2024/09/04 07:45:08 jsg Exp $ */
/* $OpenBSD: pspvar.h,v 1.6 2024/11/05 13:28:35 bluhm Exp $ */
/*
* Copyright (c) 2023, 2024 Hans-Joerg Hoexer <hshoexer@genua.de>
@ -19,6 +19,13 @@
#include <sys/ioctl.h>
/* AMD 17h */
#define PSPV1_REG_INTEN 0x10610
#define PSPV1_REG_INTSTS 0x10614
#define PSPV1_REG_CMDRESP 0x10580
#define PSPV1_REG_ADDRLO 0x105e0
#define PSPV1_REG_ADDRHI 0x105e4
#define PSPV1_REG_CAPABILITIES 0x105fc
#define PSP_REG_INTEN 0x10690
#define PSP_REG_INTSTS 0x10694
#define PSP_REG_CMDRESP 0x10980
@ -69,8 +76,10 @@
/* Selection of PSP commands of the SEV API Version 0.24 */
#define PSP_CMD_INIT 0x1
#define PSP_CMD_SHUTDOWN 0x2
#define PSP_CMD_PLATFORMSTATUS 0x4
#define PSP_CMD_DF_FLUSH 0xa
#define PSP_CMD_DOWNLOADFIRMWARE 0xb
#define PSP_CMD_DECOMMISSION 0x20
#define PSP_CMD_ACTIVATE 0x21
#define PSP_CMD_DEACTIVATE 0x22
@ -207,6 +216,11 @@ struct psp_init {
uint32_t tmr_length;
} __packed;
struct psp_downloadfirmware {
/* Input parameters for PSP_CMD_DOWNLOADFIRMWARE */
uint64_t fw_paddr;
uint32_t fw_len;
} __packed;
struct psp_guest_shutdown {
/* Input parameter for PSP_CMD_GUEST_SHUTDOWN */
@ -242,6 +256,8 @@ struct psp_snp_platform_status {
#define PSP_IOC_ACTIVATE _IOW('P', 9, struct psp_activate)
#define PSP_IOC_DEACTIVATE _IOW('P', 10, struct psp_deactivate)
#define PSP_IOC_SNP_GET_PSTATUS _IOR('P', 11, struct psp_snp_platform_status)
#define PSP_IOC_INIT _IO('P', 12)
#define PSP_IOC_SHUTDOWN _IO('P', 13)
#define PSP_IOC_GUEST_SHUTDOWN _IOW('P', 255, struct psp_guest_shutdown)
#ifdef _KERNEL
@ -252,10 +268,18 @@ struct psp_attach_args {
bus_dma_tag_t dmat;
uint32_t capabilities;
int version;
};
int pspsubmatch(struct device *, void *, void *);
int pspprint(void *aux, const char *pnp);
int psp_sev_intr(void *);
struct ccp_softc;
struct pci_attach_args;
int psp_pci_match(struct ccp_softc *, struct pci_attach_args *);
void psp_pci_intr_map(struct ccp_softc *, struct pci_attach_args *);
void psp_pci_attach(struct ccp_softc *, struct pci_attach_args *);
#endif /* _KERNEL */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qwzvar.h,v 1.6 2024/08/20 21:24:15 patrick Exp $ */
/* $OpenBSD: qwzvar.h,v 1.11 2024/12/22 23:30:27 patrick Exp $ */
/*
* Copyright (c) 2018-2019 The Linux Foundation.
@ -101,7 +101,9 @@ struct ath12k_hal_tcl_to_wbm_rbm_map {
enum hal_rx_buf_return_buf_manager {
HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST,
HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST,
HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST,
HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST,
HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST,
HAL_RX_BUF_RBM_FW_BM,
HAL_RX_BUF_RBM_SW0_BM,
HAL_RX_BUF_RBM_SW1_BM,
@ -167,6 +169,9 @@ struct hal_tx_status {
struct hal_ops {
int (*create_srng_config)(struct qwz_softc *);
uint16_t (*rxdma_ring_wmask_rx_mpdu_start)(void);
uint32_t (*rxdma_ring_wmask_rx_msdu_end)(void);
const struct hal_rx_ops *(*get_hal_rx_compact_ops)(void);
const struct ath12k_hal_tcl_to_wbm_rbm_map *tcl_to_wbm_rbm_map;
};
@ -218,17 +223,18 @@ struct ath12k_hw_params {
bool supports_monitor;
bool full_monitor_mode;
#endif
bool reoq_lut_support;
bool supports_shadow_regs;
bool idle_ps;
bool supports_sta_ps;
uint32_t num_vdevs;
uint32_t num_peers;
bool supports_suspend;
uint32_t hal_desc_sz;
bool fix_l1ss;
bool credit_flow;
uint8_t max_tx_ring;
uint32_t num_tcl_banks;
uint32_t max_tx_ring;
const struct ath12k_hw_hal_params *hal_params;
void (*wmi_init)(struct qwz_softc *sc,
struct wmi_resource_config_arg *config);
const struct hal_ops *hal_ops;
uint64_t qmi_cnss_feature_bitmap;
#if notyet
@ -256,14 +262,12 @@ struct ath12k_hw_params {
struct ath12k_hw_ops {
uint8_t (*get_hw_mac_from_pdev_id)(int pdev_id);
void (*wmi_init_config)(struct qwz_softc *sc,
struct target_resource_config *config);
int (*mac_id_to_pdev_id)(struct ath12k_hw_params *hw, int mac_id);
int (*mac_id_to_srng_id)(struct ath12k_hw_params *hw, int mac_id);
#if notyet
void (*tx_mesh_enable)(struct ath12k_base *ab,
struct hal_tcl_data_cmd *tcl_cmd);
#endif
bool (*dp_srng_is_tx_comp_ring)(int ring_num);
};
struct hal_rx_ops {
int (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc);
#if notyet
bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc);
@ -275,7 +279,6 @@ struct ath12k_hw_ops {
uint8_t (*rx_desc_get_decap_type)(struct hal_rx_desc *desc);
#ifdef notyet
uint8_t (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc);
bool (*rx_desc_get_ldpc_support)(struct hal_rx_desc *desc);
bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc);
bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc);
uint16_t (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc);
@ -287,27 +290,46 @@ struct ath12k_hw_ops {
uint8_t (*rx_desc_get_msdu_rx_bw)(struct hal_rx_desc *desc);
#endif
uint32_t (*rx_desc_get_msdu_freq)(struct hal_rx_desc *desc);
#ifdef notyet
uint8_t (*rx_desc_get_msdu_pkt_type)(struct hal_rx_desc *desc);
uint8_t (*rx_desc_get_msdu_nss)(struct hal_rx_desc *desc);
uint8_t (*rx_desc_get_mpdu_tid)(struct hal_rx_desc *desc);
uint16_t (*rx_desc_get_mpdu_peer_id)(struct hal_rx_desc *desc);
void (*rx_desc_copy_attn_end_tlv)(struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc);
void (*rx_desc_copy_end_tlv)(struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc);
uint32_t (*rx_desc_get_mpdu_start_tag)(struct hal_rx_desc *desc);
uint32_t (*rx_desc_get_mpdu_ppdu_id)(struct hal_rx_desc *desc);
void (*rx_desc_set_msdu_len)(struct hal_rx_desc *desc, uint16_t len);
#endif
struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc);
#ifdef notyet
uint8_t *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
uint16_t (*mpdu_info_get_peerid)(uint8_t *tlv_data);
uint32_t (*rx_desc_get_mpdu_start_offset)(void);
uint32_t (*rx_desc_get_msdu_end_offset)(void);
bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
uint8_t* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
uint32_t (*get_ring_selector)(struct sk_buff *skb);
#endif
int (*rx_desc_is_da_mcbc)(struct hal_rx_desc *desc);
#ifdef notyet
void (*rx_desc_get_dot11_hdr)(struct hal_rx_desc *desc,
struct ieee80211_hdr *hdr);
uint16_t (*rx_desc_get_mpdu_frame_ctl)(struct hal_rx_desc *desc);
void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc,
uint8_t *crypto_hdr,
enum hal_encrypt_type enctype);
#endif
bool (*dp_rx_h_msdu_done)(struct hal_rx_desc *desc);
#ifdef notyet
bool (*dp_rx_h_l4_cksum_fail)(struct hal_rx_desc *desc);
bool (*dp_rx_h_ip_cksum_fail)(struct hal_rx_desc *desc);
#endif
int (*dp_rx_h_is_decrypted)(struct hal_rx_desc *desc);
uint32_t (*dp_rx_h_mpdu_err)(struct hal_rx_desc *desc);
uint32_t (*rx_desc_get_desc_size)(void);
#ifdef notyet
uint8_t (*rx_desc_get_msdu_src_link_id)(struct hal_rx_desc *desc);
#endif
};
extern const struct hal_rx_ops hal_rx_wcn7850_ops;
extern const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850;
struct ath12k_hw_regs {
@ -485,9 +507,8 @@ enum hal_srng_dir {
#define HAL_SRNG_FLAGS_DATA_TLV_SWAP 0x00000020
#define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN 0x00010000
#define HAL_SRNG_FLAGS_MSI_INTR 0x00020000
#define HAL_SRNG_FLAGS_CACHED 0x20000000
#define HAL_SRNG_FLAGS_HIGH_THRESH_INTR_EN 0x00080000
#define HAL_SRNG_FLAGS_LMAC_RING 0x80000000
#define HAL_SRNG_FLAGS_REMAP_CE_RING 0x10000000
#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1)
#define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10)
@ -784,6 +805,8 @@ struct ath12k_hal {
#ifdef notyet
struct lock_class_key srng_key[HAL_SRNG_RING_ID_MAX];
#endif
uint32_t hal_desc_sz;
};
enum hal_pn_type {
@ -945,8 +968,7 @@ struct qwz_dp_htt_wbm_tx_status {
#define DP_BA_WIN_SZ_MAX 256
#define DP_TCL_NUM_RING_MAX 3
#define DP_TCL_NUM_RING_MAX_QCA6390 1
#define DP_TCL_NUM_RING_MAX 4
#define DP_IDLE_SCATTER_BUFS_MAX 16
@ -1075,7 +1097,8 @@ struct qwz_hp_update_timer {
struct ath12k_rx_desc_info {
TAILQ_ENTRY(ath12k_rx_desc_info) entry;
// struct sk_buff *skb;
struct mbuf *m;
bus_dmamap_t map;
uint32_t cookie;
uint32_t magic;
uint8_t in_use : 1,
@ -1084,7 +1107,8 @@ struct ath12k_rx_desc_info {
struct ath12k_tx_desc_info {
TAILQ_ENTRY(ath12k_tx_desc_info) entry;
// struct sk_buff *skb;
struct mbuf *m;
bus_dmamap_t map;
uint32_t desc_id; /* Cookie */
uint8_t mac_id;
uint8_t pool_id;
@ -1148,7 +1172,6 @@ struct dp_srng {
bus_addr_t paddr;
int size;
uint32_t ring_id;
uint8_t cached;
};
struct dp_tx_ring {
@ -1189,8 +1212,31 @@ struct hal_wbm_idle_scatter_list {
struct hal_wbm_link_desc *vaddr;
};
struct dp_rxdma_mon_ring {
struct dp_srng refill_buf_ring;
#if 0
struct idr bufs_idr;
/* Protects bufs_idr */
spinlock_t idr_lock;
#else
struct qwz_rx_data *rx_data;
#endif
int bufs_max;
uint8_t freemap[howmany(DP_RXDMA_BUF_RING_SIZE, 8)];
};
struct dp_rxdma_ring {
struct dp_srng refill_buf_ring;
struct qwz_rx_data *rx_data;
int bufs_max;
};
#define MAX_RXDMA_PER_PDEV 2
struct qwz_dp {
struct qwz_softc *sc;
uint8_t num_bank_profiles;
struct ath12k_dp_tx_bank_profile *bank_profiles;
enum ath12k_htc_ep_id eid;
int htt_tgt_version_received;
uint8_t htt_tgt_ver_major;
@ -1203,6 +1249,7 @@ struct qwz_dp {
struct dp_srng reo_except_ring;
struct dp_srng reo_cmd_ring;
struct dp_srng reo_status_ring;
enum peer_metadata_version peer_metadata_ver;
struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
@ -1212,6 +1259,7 @@ struct qwz_dp {
struct list_head dp_full_mon_mpdu_list;
#endif
uint32_t reo_cmd_cache_flush_count;
enum hal_rx_buf_return_buf_manager idle_link_rbm;
#if 0
/**
* protects access to below fields,
@ -1236,6 +1284,10 @@ struct qwz_dp {
/* protects the free and used desc lists */
spinlock_t tx_desc_lock[ATH12K_HW_MAX_QUEUES];
#endif
struct dp_rxdma_ring rx_refill_buf_ring;
struct dp_srng rx_mac_buf_ring[MAX_RXDMA_PER_PDEV];
struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
struct dp_rxdma_mon_ring rxdma_mon_buf_ring;
};
#define ATH12K_SHADOW_DP_TIMER_INTERVAL 20
@ -1266,7 +1318,6 @@ struct qwz_ce {
/* Protects rings of all ce pipes */
spinlock_t ce_lock;
#endif
struct qwz_hp_update_timer hp_timer[CE_COUNT_MAX];
};
@ -1317,7 +1368,6 @@ struct qwz_pdev_wmi {
enum ath12k_htc_ep_id eid;
const struct wmi_peer_flags_map *peer_flags;
uint32_t rx_decap_mode;
int tx_ce_desc;
};
#define QWZ_MAX_RADIOS 3
@ -1536,19 +1586,6 @@ struct qwz_dbring_cap {
uint32_t min_buf_align;
};
struct dp_rxdma_ring {
struct dp_srng refill_buf_ring;
#if 0
struct idr bufs_idr;
/* Protects bufs_idr */
spinlock_t idr_lock;
#else
struct qwz_rx_data *rx_data;
#endif
int bufs_max;
uint8_t freemap[howmany(DP_RXDMA_BUF_RING_SIZE, 8)];
};
enum hal_rx_mon_status {
HAL_RX_MON_STATUS_PPDU_NOT_DONE,
HAL_RX_MON_STATUS_PPDU_DONE,
@ -1729,23 +1766,14 @@ struct qwz_mon_data {
};
#define MAX_RXDMA_PER_PDEV 2
struct qwz_pdev_dp {
uint32_t mac_id;
uint32_t mon_dest_ring_stuck_cnt;
#if 0
atomic_t num_tx_pending;
wait_queue_head_t tx_empty_waitq;
#endif
struct dp_rxdma_ring rx_refill_buf_ring;
struct dp_srng rx_mac_buf_ring[MAX_RXDMA_PER_PDEV];
struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
struct dp_srng rxdma_mon_dst_ring[MAX_RXDMA_PER_PDEV];
struct dp_srng tx_mon_dst_ring[MAX_RXDMA_PER_PDEV];
struct dp_srng rxdma_mon_desc_ring;
struct dp_rxdma_ring rxdma_mon_buf_ring;
struct dp_rxdma_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV];
#if 0
struct ieee80211_rx_status rx_status;
#endif
@ -1938,6 +1966,8 @@ struct qwz_softc {
struct qwz_pdev_dp pdev_dp;
struct qwz_wmi_base wmi;
struct qwz_htc htc;
const struct hal_rx_ops *hal_rx_ops;
uint32_t wmi_conf_rx_decap_mode;
enum ath12k_firmware_mode fw_mode;
enum ath12k_crypt_mode crypto_mode;
@ -1952,6 +1982,7 @@ struct qwz_softc {
struct qwz_qmi_dev_mem_info qmi_dev_mem[ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01];
struct ath12k_targ_cap target_caps;
int num_radios;
uint8_t device_id;
uint32_t cc_freq_hz;
uint32_t cfg_tx_chainmask;
uint32_t cfg_rx_chainmask;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: re.c,v 1.218 2024/08/12 06:47:11 dlg Exp $ */
/* $OpenBSD: re.c,v 1.219 2024/11/05 18:58:59 miod Exp $ */
/* $FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $ */
/*
* Copyright (c) 1997, 1998-2003
@ -205,7 +205,7 @@ void re_kstat_detach(struct rl_softc *);
void in_delayed_cksum(struct mbuf *);
struct cfdriver re_cd = {
0, "re", DV_IFNET
NULL, "re", DV_IFNET
};
#define EE_SET(x) \

View file

@ -1,4 +1,4 @@
/* $OpenBSD: rtl81x9.c,v 1.99 2024/08/31 16:23:09 deraadt Exp $ */
/* $OpenBSD: rtl81x9.c,v 1.100 2024/11/05 18:58:59 miod Exp $ */
/*
* Copyright (c) 1997, 1998
@ -1368,5 +1368,5 @@ rl_detach(struct rl_softc *sc)
}
struct cfdriver rl_cd = {
0, "rl", DV_IFNET
NULL, "rl", DV_IFNET
};

View file

@ -1,4 +1,4 @@
/* $OpenBSD: smc83c170.c,v 1.31 2023/11/10 15:51:20 bluhm Exp $ */
/* $OpenBSD: smc83c170.c,v 1.32 2024/11/05 18:58:59 miod Exp $ */
/* $NetBSD: smc83c170.c,v 1.59 2005/02/27 00:27:02 perry Exp $ */
/*-
@ -91,7 +91,7 @@ int epic_mediachange(struct ifnet *);
void epic_mediastatus(struct ifnet *, struct ifmediareq *);
struct cfdriver epic_cd = {
0, "epic", DV_IFNET
NULL, "epic", DV_IFNET
};
#define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \

View file

@ -1,4 +1,4 @@
/* $OpenBSD: trm.h,v 1.8 2024/09/04 07:54:52 mglocker Exp $
/* $OpenBSD: trm.h,v 1.9 2024/10/22 21:50:02 jsg Exp $
* ------------------------------------------------------------
* O.S : OpenBSD
* File Name : trm.h
@ -41,8 +41,7 @@
* Segment Entry
* ------------------------------------------------------------
*/
struct SGentry
{
struct SGentry {
u_int32_t address;
u_int32_t length;
};
@ -68,8 +67,7 @@ struct SGentry
* SCSI Request Block
*-----------------------------------------------------------------------
*/
struct trm_scsi_req_q
{
struct trm_scsi_req_q {
TAILQ_ENTRY(trm_scsi_req_q) link;
bus_dmamap_t dmamapxfer;
u_int32_t PhysSRB;
@ -144,8 +142,7 @@ TAILQ_HEAD(SRB_HEAD, trm_scsi_req_q);
* Device Control Block
*-----------------------------------------------------------------------
*/
struct trm_dcb
{
struct trm_dcb {
u_int32_t TagMask;
u_int16_t DCBFlag;
@ -180,8 +177,7 @@ struct trm_dcb
* Adapter Control Block
*-----------------------------------------------------------------------
*/
struct trm_softc
{
struct trm_softc {
struct device sc_device;
bus_space_handle_t sc_iohandle;
@ -226,8 +222,7 @@ struct trm_softc
/*
* The SEEPROM structure for TRM_S1040
*/
struct trm_target_nvram
{
struct trm_target_nvram {
u_int8_t NvmTarCfg0; /* Target configuration byte 0 */
#define TRM_WIDE 0x20 /* Wide negotiate */
#define TRM_TAG_QUEUING 0x10 /* Enable SCSI tag queuing */
@ -241,8 +236,7 @@ struct trm_target_nvram
u_int8_t NvmTarCfg3; /* Target configuration byte 3 */
};
struct trm_adapter_nvram
{
struct trm_adapter_nvram {
u_int8_t NvramSubVendorID[2]; /*0,1 Sub Vendor ID */
u_int8_t NvramSubSysID[2]; /*2,3 Sub System ID */
u_int8_t NvramSubClass; /*4 Sub Class */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ufshci.c,v 1.42 2024/10/08 00:46:29 jsg Exp $ */
/* $OpenBSD: ufshci.c,v 1.43 2024/11/09 22:41:34 jsg Exp $ */
/*
* Copyright (c) 2022 Marcus Glocker <mglocker@openbsd.org>
@ -97,9 +97,6 @@ void ufshci_ccb_put(void *, void *);
void ufshci_ccb_free(struct ufshci_softc*, int);
void ufshci_scsi_cmd(struct scsi_xfer *);
void ufshci_minphys(struct buf *, struct scsi_link *);
int ufshci_scsi_probe(struct scsi_link *);
void ufshci_scsi_free(struct scsi_link *);
void ufshci_scsi_inquiry(struct scsi_xfer *);
void ufshci_scsi_capacity16(struct scsi_xfer *);
@ -1552,26 +1549,6 @@ ufshci_scsi_cmd(struct scsi_xfer *xs)
mtx_leave(&sc->sc_cmd_mtx);
}
void
ufshci_minphys(struct buf *bp, struct scsi_link *link)
{
DPRINTF(3, "%s\n", __func__);
}
int
ufshci_scsi_probe(struct scsi_link *link)
{
DPRINTF(3, "%s\n", __func__);
return 0;
}
void
ufshci_scsi_free(struct scsi_link *link)
{
DPRINTF(3, "%s\n", __func__);
}
void
ufshci_scsi_inquiry(struct scsi_xfer *xs)
{

View file

@ -1,4 +1,4 @@
/* $OpenBSD: xl.c,v 1.140 2024/08/31 16:23:09 deraadt Exp $ */
/* $OpenBSD: xl.c,v 1.141 2024/11/05 18:58:59 miod Exp $ */
/*
* Copyright (c) 1997, 1998, 1999
@ -2654,5 +2654,5 @@ xl_wol(struct ifnet *ifp, int enable)
#endif
struct cfdriver xl_cd = {
0, "xl", DV_IFNET
NULL, "xl", DV_IFNET
};

View file

@ -1,4 +1,4 @@
/* $OpenBSD: essvar.h,v 1.5 2002/03/14 03:16:05 millert Exp $ */
/* $OpenBSD: essvar.h,v 1.6 2024/10/22 21:50:02 jsg Exp $ */
/* $NetBSD: essvar.h,v 1.14 1999/03/18 06:03:31 mycroft Exp $ */
/*
* Copyright 1997
@ -34,7 +34,7 @@
*/
/*
** @(#) $RCSfile: essvar.h,v $ $Revision: 1.5 $ (SHARK) $Date: 2002/03/14 03:16:05 $
** @(#) $RCSfile: essvar.h,v $ $Revision: 1.6 $ (SHARK) $Date: 2024/10/22 21:50:02 $
**
**++
**
@ -95,8 +95,7 @@
#define ESS_1888_NDEVS 22
#define ESS_MAX_NDEVS 22
struct ess_audio_channel
{
struct ess_audio_channel {
int drq; /* DMA channel */
#define IS16BITDRQ(drq) ((drq) >= 4)
int irq; /* IRQ line for this DMA channel */
@ -118,8 +117,7 @@ struct ess_audio_channel
int blksize; /* current block size */
};
struct ess_softc
{
struct ess_softc {
struct device sc_dev; /* base device */
struct device *sc_isa;
isa_chipset_tag_t sc_ic;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kcov.c,v 1.49 2023/07/29 06:52:50 anton Exp $ */
/* $OpenBSD: kcov.c,v 1.50 2024/11/10 10:04:33 jsg Exp $ */
/*
* Copyright (c) 2018 Anton Lindqvist <anton@openbsd.org>
@ -132,12 +132,7 @@ struct pool kr_pool;
static inline int
inintr(struct cpu_info *ci)
{
#if defined(__amd64__) || defined(__arm__) || defined(__arm64__) || \
defined(__i386__)
return (ci->ci_idepth > 0);
#else
return (0);
#endif
}
/*

View file

@ -1,4 +1,4 @@
/* $OpenBSD: midi.c,v 1.57 2024/05/13 01:15:50 jsg Exp $ */
/* $OpenBSD: midi.c,v 1.58 2024/12/30 02:46:00 guenther Exp $ */
/*
* Copyright (c) 2003, 2004 Alexandre Ratchov
@ -410,9 +410,6 @@ midiioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
return ENXIO;
error = 0;
switch(cmd) {
case FIONBIO:
/* All handled in the upper FS layer */
break;
default:
error = ENOTTY;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: openfirm.h,v 1.19 2024/05/13 01:15:51 jsg Exp $ */
/* $OpenBSD: openfirm.h,v 1.20 2024/11/08 12:48:00 miod Exp $ */
/* $NetBSD: openfirm.h,v 1.1 1996/09/30 16:35:10 ws Exp $ */
/*
@ -59,8 +59,6 @@ int OF_setprop(int, char *, const void *, int);
int OF_nextprop(int, char *, void *);
int OF_finddevice(char *name);
int OF_is_compatible(int, const char *);
int OF_instance_to_path(int ihandle, char *buf, int buflen);
int OF_package_to_path(int phandle, char *buf, int buflen);
int OF_call_method_1(char *method, int ihandle, int nargs, ...);
int OF_call_method(char *method, int ihandle, int nargs, int nreturns, ...);
int OF_open(char *dname);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ccp_pci.c,v 1.13 2024/09/04 07:45:08 jsg Exp $ */
/* $OpenBSD: ccp_pci.c,v 1.14 2024/10/24 18:52:59 bluhm Exp $ */
/*
* Copyright (c) 2018 David Gwynne <dlg@openbsd.org>
@ -36,9 +36,6 @@
int ccp_pci_match(struct device *, void *, void *);
void ccp_pci_attach(struct device *, struct device *, void *);
void ccp_pci_intr_map(struct ccp_softc *, struct pci_attach_args *);
void ccp_pci_psp_attach(struct ccp_softc *, struct pci_attach_args *);
const struct cfattach ccp_pci_ca = {
sizeof(struct ccp_softc),
ccp_pci_match,
@ -67,6 +64,9 @@ ccp_pci_attach(struct device *parent, struct device *self, void *aux)
struct ccp_softc *sc = (struct ccp_softc *)self;
struct pci_attach_args *pa = aux;
pcireg_t memtype;
#if NPSP > 0
int psp_matched;
#endif
memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, CCP_PCI_BAR);
if (PCI_MAPREG_TYPE(memtype) != PCI_MAPREG_TYPE_MEM) {
@ -80,59 +80,16 @@ ccp_pci_attach(struct device *parent, struct device *self, void *aux)
return;
}
ccp_pci_intr_map(sc, pa);
#if NPSP > 0
psp_matched = psp_pci_match(sc, aux);
if (psp_matched)
psp_pci_intr_map(sc, pa);
#endif
ccp_attach(sc);
ccp_pci_psp_attach(sc, pa);
}
void
ccp_pci_intr_map(struct ccp_softc *sc, struct pci_attach_args *pa)
{
#if NPSP > 0
pci_intr_handle_t ih;
const char *intrstr = NULL;
/* clear and disable interrupts */
bus_space_write_4(sc->sc_iot, sc->sc_ioh, PSP_REG_INTEN, 0);
bus_space_write_4(sc->sc_iot, sc->sc_ioh, PSP_REG_INTSTS, -1);
if (pci_intr_map_msix(pa, 0, &ih) != 0 &&
pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
printf(": couldn't map interrupt\n");
return;
}
intrstr = pci_intr_string(pa->pa_pc, ih);
sc->sc_irqh = pci_intr_establish(pa->pa_pc, ih, IPL_BIO, psp_sev_intr,
sc, sc->sc_dev.dv_xname);
if (sc->sc_irqh != NULL)
printf(": %s", intrstr);
#endif
}
void
ccp_pci_psp_attach(struct ccp_softc *sc, struct pci_attach_args *pa)
{
#if NPSP > 0
struct psp_attach_args arg;
struct device *self = (struct device *)sc;
memset(&arg, 0, sizeof(arg));
arg.iot = sc->sc_iot;
arg.ioh = sc->sc_ioh;
arg.dmat = pa->pa_dmat;
arg.capabilities = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
PSP_REG_CAPABILITIES);
sc->sc_psp = config_found_sm(self, &arg, pspprint, pspsubmatch);
if (sc->sc_psp == NULL) {
pci_intr_disestablish(pa->pa_pc, sc->sc_irqh);
return;
}
/* enable interrupts */
bus_space_write_4(sc->sc_iot, sc->sc_ioh, PSP_REG_INTEN, -1);
if (psp_matched)
psp_pci_attach(sc, pa);
#endif
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: cz.c,v 1.29 2024/05/24 06:02:53 jsg Exp $ */
/* $OpenBSD: cz.c,v 1.30 2024/11/05 18:58:59 miod Exp $ */
/* $NetBSD: cz.c,v 1.15 2001/01/20 19:10:36 thorpej Exp $ */
/*-
@ -192,7 +192,7 @@ int cztty_to_tiocm(struct cztty_softc *sc);
void cztty_diag(void *arg);
struct cfdriver cz_cd = {
0, "cz", DV_TTY
NULL, "cz", DV_TTY
};
/*

View file

@ -149,6 +149,7 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
struct acpi_buffer *params)
{
acpi_status status;
union acpi_object *obj;
union acpi_object atif_arg_elements[2];
struct acpi_object_list atif_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@ -171,16 +172,24 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
&buffer);
obj = (union acpi_object *)buffer.pointer;
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
/* Fail if calling the method fails */
if (ACPI_FAILURE(status)) {
DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
acpi_format_exception(status));
kfree(buffer.pointer);
kfree(obj);
return NULL;
}
return buffer.pointer;
if (obj->type != ACPI_TYPE_BUFFER) {
DRM_DEBUG_DRIVER("bad object returned from ATIF: %d\n",
obj->type);
kfree(obj);
return NULL;
}
return obj;
}
/**
@ -791,6 +800,7 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
return -EIO;
}
kfree(info);
return 0;
}

View file

@ -265,7 +265,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
/* Only a single BO list is allowed to simplify handling. */
if (p->bo_list)
ret = -EINVAL;
goto free_partial_kdata;
ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
if (ret)

View file

@ -402,7 +402,7 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz
int r;
uint32_t *data, x;
if (size & 0x3 || *pos & 0x3)
if (size > 4096 || size & 0x3 || *pos & 0x3)
return -EINVAL;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
@ -1641,7 +1641,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
ent = debugfs_create_file(debugfs_regs_names[i],
S_IFREG | 0444, root,
S_IFREG | 0400, root,
adev, debugfs_regs[i]);
if (!i && !IS_ERR_OR_NULL(ent))
i_size_write(ent->d_inode, adev->rmmio_size);
@ -2176,11 +2176,11 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_securedisplay_debugfs_init(adev);
amdgpu_fw_attestation_debugfs_init(adev);
debugfs_create_file("amdgpu_evict_vram", 0444, root, adev,
debugfs_create_file("amdgpu_evict_vram", 0400, root, adev,
&amdgpu_evict_vram_fops);
debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev,
debugfs_create_file("amdgpu_evict_gtt", 0400, root, adev,
&amdgpu_evict_gtt_fops);
debugfs_create_file("amdgpu_test_ib", 0444, root, adev,
debugfs_create_file("amdgpu_test_ib", 0400, root, adev,
&amdgpu_debugfs_test_ib_fops);
debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
&amdgpu_debugfs_vm_info_fops);

View file

@ -3200,7 +3200,7 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
*
* @adev: amdgpu_device pointer
*
* First resume function for hardware IPs. The list of all the hardware
* Second resume function for hardware IPs. The list of all the hardware
* IPs that make up the asic is walked and the resume callbacks are run for
* all blocks except COMMON, GMC, and IH. resume puts the hardware into a
* functional state after a suspend and updates the software state as
@ -3218,6 +3218,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
continue;
r = adev->ip_blocks[i].version->funcs->resume(adev);
@ -3232,6 +3233,36 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
return 0;
}
/**
* amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
*
* @adev: amdgpu_device pointer
*
* Third resume function for hardware IPs. The list of all the hardware
* IPs that make up the asic is walked and the resume callbacks are run for
* all DCE. resume puts the hardware into a functional state after a suspend
* and updates the software state as necessary. This function is also used
* for restoring the GPU after a GPU reset.
*
* Returns 0 on success, negative error code on failure.
*/
static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
{
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r)
return r;
}
}
return 0;
}
/**
* amdgpu_device_ip_resume - run resume for hardware IPs
*
@ -3258,6 +3289,13 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
r = amdgpu_device_ip_resume_phase2(adev);
if (r)
return r;
amdgpu_fence_driver_hw_init(adev);
r = amdgpu_device_ip_resume_phase3(adev);
return r;
}
@ -4124,8 +4162,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
int idx;
bool px;
amdgpu_fence_driver_sw_fini(adev);
amdgpu_device_ip_fini(adev);
amdgpu_fence_driver_sw_fini(adev);
amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
adev->accel_working = false;
dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
@ -4354,7 +4392,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
goto exit;
}
amdgpu_fence_driver_hw_init(adev);
r = amdgpu_device_ip_late_init(adev);
if (r)
@ -5124,6 +5161,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
if (r)
goto out;
r = amdgpu_device_ip_resume_phase3(tmp_adev);
if (r)
goto out;
if (vram_lost)
amdgpu_device_fill_reset_magic(tmp_adev);
@ -5991,6 +6032,9 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
struct amdgpu_device *adev = drm_to_adev(dev);
int r;
if (amdgpu_sriov_vf(adev))
return false;
r = pci_save_state(pdev);
if (!r) {
kfree(adev->pci_state);

View file

@ -249,6 +249,7 @@ static const struct pci_matchid amdgpu_devices[] = {
{0x1002, 0x150e }, /* Radeon 880M / 890M */
/* GC 11.5.1, DCN 3.5.1, APU, linux 6.9 */
/* GC 11.5.2, DCN 3.5.1, APU, linux 6.11 */
/* GC 12.0.0, DCN 4.0.1, dGPU, linux 6.11 */
/* GC 12.0.1, DCN 4.0.1, dGPU, linux 6.11 */

View file

@ -99,19 +99,7 @@ amdgpu_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
ret = ttm_bo_vm_reserve(bo);
if (ret) {
switch (ret) {
case VM_FAULT_NOPAGE:
ret = VM_PAGER_OK;
break;
case VM_FAULT_RETRY:
ret = VM_PAGER_REFAULT;
break;
default:
ret = VM_PAGER_BAD;
break;
}
uvmfault_unlockall(ufi, NULL, uobj);
return ret;
goto out;
}
if (drm_dev_enter(ddev, &idx)) {
@ -137,18 +125,19 @@ amdgpu_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
#endif
unlock:
dma_resv_unlock(bo->base.resv);
out:
switch (ret) {
case VM_FAULT_NOPAGE:
ret = VM_PAGER_OK;
ret = 0;
break;
case VM_FAULT_RETRY:
ret = VM_PAGER_REFAULT;
ret = ERESTART;
break;
default:
ret = VM_PAGER_BAD;
ret = EACCES;
break;
}
dma_resv_unlock(bo->base.resv);
uvmfault_unlockall(ufi, NULL, uobj);
return ret;
}

View file

@ -159,7 +159,6 @@ void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
struct dma_fence *f;
unsigned i;
@ -172,7 +171,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
f = NULL;
for (i = 0; i < job->num_ibs; ++i)
amdgpu_ib_free(ring->adev, &job->ibs[i], f);
amdgpu_ib_free(NULL, &job->ibs[i], f);
}
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)

View file

@ -1053,8 +1053,10 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
if (r) {
amdgpu_mes_unlock(&adev->mes);
goto clean_up_memory;
}
amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
@ -1087,7 +1089,6 @@ clean_up_ring:
amdgpu_ring_fini(ring);
clean_up_memory:
kfree(ring);
amdgpu_mes_unlock(&adev->mes);
return r;
}

View file

@ -797,7 +797,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
/* Map SG to device */
r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
if (r)
goto release_sg;
goto release_sg_table;
/* convert SG to linear array of pages and dma addresses */
drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
@ -805,6 +805,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
return 0;
release_sg_table:
sg_free_table(ttm->sg);
release_sg:
kfree(ttm->sg);
ttm->sg = NULL;
@ -1870,6 +1872,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
rw_init(&adev->mman.gtt_window_lock, "gttwin");
dma_set_max_seg_size(adev->dev, UINT_MAX);
/* No others user of address space so set it to 0 */
#ifdef notyet
r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,

View file

@ -214,15 +214,15 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
drm_sched_entity_destroy(&adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
(void **)&adev->vce.cpu_addr);
for (i = 0; i < adev->vce.num_rings; i++)
amdgpu_ring_fini(&adev->vce.ring[i]);
amdgpu_ucode_release(&adev->vce.fw);
mutex_destroy(&adev->vce.idle_mutex);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
(void **)&adev->vce.cpu_addr);
return 0;
}

View file

@ -1223,10 +1223,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
* next command submission.
*/
if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
uint32_t mem_type = bo->tbo.resource->mem_type;
if (!(bo->preferred_domains &
amdgpu_mem_type_to_domain(mem_type)))
if (bo->tbo.resource &&
!(bo->preferred_domains &
amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
amdgpu_vm_bo_evicted(&bo_va->base);
else
amdgpu_vm_bo_idle(&bo_va->base);

View file

@ -440,7 +440,7 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
case AMDGPU_SPX_PARTITION_MODE:
return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
case AMDGPU_DPX_PARTITION_MODE:
return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0;
return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0;
case AMDGPU_TPX_PARTITION_MODE:
return (adev->gmc.num_mem_partitions == 1 ||
adev->gmc.num_mem_partitions == 3) &&

View file

@ -31,13 +31,15 @@
static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
if (!ring || !ring->funcs->emit_wreg) {
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0);
else
RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
} else {
amdgpu_ring_emit_wreg(ring,
(adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0);
}
}
static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,

View file

@ -247,6 +247,12 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data);
switch (adev->ip_versions[NBIO_HWIP][0]) {
case IP_VERSION(7, 7, 0):
data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23);
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data);
break;
}
}
static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,

View file

@ -1286,7 +1286,7 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_job *job,
struct amdgpu_ib *ib)
{
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
struct amdgpu_ring *ring = amdgpu_job_ring(job);
unsigned i;
/* No patching necessary for the first instance */

View file

@ -77,6 +77,20 @@ static int vcn_v4_0_3_early_init(void *handle)
return amdgpu_vcn_early_init(adev);
}
static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
{
struct amdgpu_vcn4_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
fw_shared->sq.is_enabled = 1;
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
return 0;
}
/**
* vcn_v4_0_3_sw_init - sw init for VCN block
*
@ -107,8 +121,6 @@ static int vcn_v4_0_3_sw_init(void *handle)
return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
@ -131,12 +143,7 @@ static int vcn_v4_0_3_sw_init(void *handle)
if (r)
return r;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
fw_shared->sq.is_enabled = true;
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
vcn_v4_0_3_fw_shared_init(adev, i);
}
if (amdgpu_sriov_vf(adev)) {
@ -221,6 +228,8 @@ static int vcn_v4_0_3_hw_init(void *handle)
}
} else {
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
struct amdgpu_vcn4_fw_shared *fw_shared;
vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
@ -244,6 +253,11 @@ static int vcn_v4_0_3_hw_init(void *handle)
regVCN_RB1_DB_CTRL);
}
/* Re-init fw_shared when RAS fatal error occurred */
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
if (!fw_shared->sq.is_enabled)
vcn_v4_0_3_fw_shared_init(adev, i);
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;

View file

@ -114,6 +114,33 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
if (enable) {
/* Unset the CLEAR_OVERFLOW bit to make sure the next step
* is switching the bit from 0 to 1
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
return -ETIMEDOUT;
} else {
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
}
/* Clear RB_OVERFLOW bit */
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
return -ETIMEDOUT;
} else {
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
}
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
}
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));

View file

@ -1164,7 +1164,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
size >>= 1;
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage);
}
mutex_unlock(&p->mutex);
@ -1235,7 +1235,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
kfd_process_device_remove_obj_handle(
pdd, GET_IDR_HANDLE(args->handle));
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
atomic64_sub(size, &pdd->vram_usage);
err_unlock:
err_pdd:
@ -2352,7 +2352,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
bo_bucket->restored_offset = offset;
/* Update the VRAM usage count */
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size);
atomic64_add(bo_bucket->size, &pdd->vram_usage);
}
return 0;
}

View file

@ -123,7 +123,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
memset(kq->pq_kernel_addr, 0, queue_size);
memset(kq->rptr_kernel, 0, sizeof(*kq->rptr_kernel));
memset(kq->wptr_kernel, 0, sizeof(*kq->wptr_kernel));
memset(kq->wptr_kernel, 0, dev->kfd->device_info.doorbell_size);
prop.queue_size = queue_size;
prop.is_interop = false;

View file

@ -765,7 +765,7 @@ struct kfd_process_device {
enum kfd_pdd_bound bound;
/* VRAM usage */
uint64_t vram_usage;
atomic64_t vram_usage;
struct attribute attr_vram;
char vram_filename[MAX_SYSFS_FILENAME_LEN];

View file

@ -306,14 +306,14 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
} else if (strncmp(attr->name, "vram_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_vram);
return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
} else if (strncmp(attr->name, "sdma_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_sdma);
struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
kfd_sdma_activity_worker);
INIT_WORK_ONSTACK(&sdma_activity_work_handler.sdma_activity_work,
kfd_sdma_activity_worker);
sdma_activity_work_handler.pdd = pdd;
sdma_activity_work_handler.sdma_activity_counter = 0;
@ -321,6 +321,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
schedule_work(&sdma_activity_work_handler.sdma_activity_work);
flush_work(&sdma_activity_work_handler.sdma_activity_work);
destroy_work_on_stack(&sdma_activity_work_handler.sdma_activity_work);
return snprintf(buffer, PAGE_SIZE, "%llu\n",
(sdma_activity_work_handler.sdma_activity_counter)/
@ -1589,7 +1590,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
pdd->runtime_inuse = false;
pdd->vram_usage = 0;
atomic64_set(&pdd->vram_usage, 0);
pdd->sdma_past_activity_counter = 0;
pdd->user_gpu_id = dev->id;
atomic64_set(&pdd->evict_duration_counter, 0);

View file

@ -391,6 +391,27 @@ static void svm_range_bo_release(struct kref *kref)
spin_lock(&svm_bo->list_lock);
}
spin_unlock(&svm_bo->list_lock);
if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
struct kfd_process_device *pdd;
struct kfd_process *p;
struct mm_struct *mm;
mm = svm_bo->eviction_fence->mm;
/*
* The forked child process takes svm_bo device pages ref, svm_bo could be
* released after parent process is gone.
*/
p = kfd_lookup_process_by_mm(mm);
if (p) {
pdd = kfd_get_process_device_data(svm_bo->node, p);
if (pdd)
atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage);
kfd_unref_process(p);
}
mmput(mm);
}
if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
/* We're not in the eviction worker. Signal the fence. */
dma_fence_signal(&svm_bo->eviction_fence->base);
@ -518,6 +539,7 @@ int
svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
bool clear)
{
struct kfd_process_device *pdd;
struct amdgpu_bo_param bp;
struct svm_range_bo *svm_bo;
struct amdgpu_bo_user *ubo;
@ -609,6 +631,10 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
list_add(&prange->svm_bo_list, &svm_bo->range_list);
spin_unlock(&svm_bo->list_lock);
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage);
return 0;
reserve_bo_failed:

View file

@ -10732,7 +10732,7 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
break;
}
while (j < EDID_LENGTH) {
while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) {
struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);

View file

@ -179,6 +179,8 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
dc_sink_release(dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
aconnector->dsc_aux = NULL;
port->passthrough_aux = NULL;
}
aconnector->mst_status = MST_STATUS_DEFAULT;
@ -487,6 +489,8 @@ dm_dp_mst_detect(struct drm_connector *connector,
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
aconnector->dsc_aux = NULL;
port->passthrough_aux = NULL;
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,

View file

@ -560,11 +560,19 @@ void dcn3_clk_mgr_construct(
dce_clock_read_ss_info(clk_mgr);
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
if (!clk_mgr->base.bw_params) {
BREAK_TO_DEBUGGER();
return;
}
/* need physical address of table to give to PMFW */
clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx,
DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t),
&clk_mgr->wm_range_table_addr);
if (!clk_mgr->wm_range_table) {
BREAK_TO_DEBUGGER();
return;
}
}
void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)

View file

@ -1022,11 +1022,19 @@ void dcn32_clk_mgr_construct(
clk_mgr->smu_present = false;
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
if (!clk_mgr->base.bw_params) {
BREAK_TO_DEBUGGER();
return;
}
/* need physical address of table to give to PMFW */
clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx,
DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t),
&clk_mgr->wm_range_table_addr);
if (!clk_mgr->wm_range_table) {
BREAK_TO_DEBUGGER();
return;
}
}
void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)

View file

@ -727,6 +727,9 @@ void hwss_setup_dpp(union block_sequence_params *params)
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
if (!plane_state)
return;
if (dpp && dpp->funcs->dpp_setup) {
// program the input csc
dpp->funcs->dpp_setup(dpp,

View file

@ -880,7 +880,8 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
/*
* if above if is not executed then 'params' equal to 0 and set in bypass
*/
mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
if (mpc->funcs->set_output_gamma)
mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
return true;
}
@ -1732,17 +1733,26 @@ static void dcn20_program_pipe(
dc->res_pool->hubbub->funcs->program_det_size(
dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
if (pipe_ctx->update_flags.raw ||
(pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) ||
pipe_ctx->stream->update_flags.raw)
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
if (pipe_ctx->update_flags.bits.enable
|| pipe_ctx->plane_state->update_flags.bits.hdr_mult)
if (pipe_ctx->update_flags.bits.enable ||
(pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.bits.hdr_mult))
hws->funcs.set_hdr_multiplier(pipe_ctx);
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change ||
pipe_ctx->plane_state->update_flags.bits.lut_3d)
if ((pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.bits.hdr_mult) ||
pipe_ctx->update_flags.bits.enable)
hws->funcs.set_hdr_multiplier(pipe_ctx);
if ((pipe_ctx->plane_state &&
pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change) ||
(pipe_ctx->plane_state &&
pipe_ctx->plane_state->update_flags.bits.gamma_change) ||
(pipe_ctx->plane_state &&
pipe_ctx->plane_state->update_flags.bits.lut_3d) ||
pipe_ctx->update_flags.bits.enable)
hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
@ -1752,7 +1762,8 @@ static void dcn20_program_pipe(
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->update_flags.bits.plane_changed ||
pipe_ctx->stream->update_flags.bits.out_tf ||
pipe_ctx->plane_state->update_flags.bits.output_tf_change)
(pipe_ctx->plane_state &&
pipe_ctx->plane_state->update_flags.bits.output_tf_change))
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
@ -1776,7 +1787,7 @@ static void dcn20_program_pipe(
}
/* Set ABM pipe after other pipe configurations done */
if (pipe_ctx->plane_state->visible) {
if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
if (pipe_ctx->stream_res.abm) {
dc->hwss.set_pipe(pipe_ctx);
pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,

View file

@ -440,7 +440,7 @@ void dcn30_init_hw(struct dc *dc)
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
// Initialize the dccg
@ -599,11 +599,12 @@ void dcn30_init_hw(struct dc *dc)
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
if (dc->clk_mgr->funcs->notify_wm_ranges)
if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
//if softmax is enabled then hardmax will be set by a different call
if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->set_hard_max_memclk &&
!dc->clk_mgr->dc_mode_softmax_enabled)
dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
@ -735,6 +736,9 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
stream = dc->current_state->streams[0];
plane = (stream ? dc->current_state->stream_status[0].plane_states[0] : NULL);
if (!stream || !plane)
return false;
if (stream && plane) {
cursor_cache_enable = stream->cursor_position.enable &&
plane->address.grph.cursor_cache_addr.quad_part;

View file

@ -2045,6 +2045,9 @@ bool dcn30_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
if (!pipes)
goto validate_fail;
DC_FP_START();
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
DC_FP_END();

View file

@ -1308,6 +1308,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
if (!hpo_dp_enc31)
return NULL; /* out of memory */
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
@ -1764,6 +1766,9 @@ bool dcn31_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
if (!pipes)
goto validate_fail;
DC_FP_START();
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
DC_FP_END();

View file

@ -1381,6 +1381,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
if (!hpo_dp_enc31)
return NULL; /* out of memory */
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
@ -1741,6 +1743,9 @@ bool dcn314_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
if (!pipes)
goto validate_fail;
if (filter_modes_for_single_channel_workaround(dc, context))
goto validate_fail;

View file

@ -1308,6 +1308,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
if (!hpo_dp_enc31)
return NULL; /* out of memory */
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],

View file

@ -1305,6 +1305,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
if (!hpo_dp_enc31)
return NULL; /* out of memory */
hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],

View file

@ -587,7 +587,9 @@ bool dcn32_set_output_transfer_func(struct dc *dc,
}
}
mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
if (mpc->funcs->set_output_gamma)
mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
return ret;
}
@ -771,7 +773,7 @@ void dcn32_init_hw(struct dc *dc)
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
// Initialize the dccg
@ -948,10 +950,11 @@ void dcn32_init_hw(struct dc *dc)
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
if (dc->clk_mgr->funcs->notify_wm_ranges)
if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->set_hard_max_memclk &&
!dc->clk_mgr->dc_mode_softmax_enabled)
dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)

View file

@ -1299,6 +1299,8 @@ static struct hpo_dp_link_encoder *dcn32_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
if (!hpo_dp_enc31)
return NULL; /* out of memory */
#undef REG_STRUCT
#define REG_STRUCT hpo_dp_link_enc_regs
@ -1786,6 +1788,9 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context,
// be a valid candidate for SubVP (i.e. has a plane, stream, doesn't
// already have phantom pipe assigned, etc.) by previous checks.
phantom_stream = dcn32_enable_phantom_stream(dc, context, pipes, pipe_cnt, index);
if (!phantom_stream)
return;
dcn32_enable_phantom_plane(dc, context, phantom_stream, index);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@ -1842,6 +1847,9 @@ bool dcn32_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
if (!pipes)
goto validate_fail;
DC_FP_START();
out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
DC_FP_END();

View file

@ -1285,6 +1285,8 @@ static struct hpo_dp_link_encoder *dcn321_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
if (!hpo_dp_enc31)
return NULL; /* out of memory */
#undef REG_STRUCT
#define REG_STRUCT hpo_dp_link_enc_regs

View file

@ -78,7 +78,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
unsigned int ret_val = 0;
unsigned int ret_val = 1;
if (source_format == dm_444_16) {
if (!is_chroma)

View file

@ -932,8 +932,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
* for VBLANK: (VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time,
* and the max of (VBLANK blanking time, MALL region)).
*/
if (stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 &&
subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0)
if (drr_timing &&
stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 &&
subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0)
schedulable = true;
return schedulable;
@ -995,7 +996,7 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
subvp_pipe = pipe;
}
if (found) {
if (found && subvp_pipe) {
main_timing = &subvp_pipe->stream->timing;
phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;

View file

@ -39,7 +39,7 @@
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
unsigned int ret_val = 0;
unsigned int ret_val = 1;
if (source_format == dm_444_16) {
if (!is_chroma)

View file

@ -841,6 +841,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
isPSRSUSupported = false;
else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
isPSRSUSupported = false;
else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x01)
isPSRSUSupported = false;
else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
isPSRSUSupported = true;
}

View file

@ -1843,7 +1843,7 @@ static int smu_bump_power_profile_mode(struct smu_context *smu,
static int smu_adjust_power_state_dynamic(struct smu_context *smu,
enum amd_dpm_forced_level level,
bool skip_display_settings,
bool force_update)
bool init)
{
int ret = 0;
int index = 0;
@ -1872,7 +1872,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
}
if (force_update || smu_dpm_ctx->dpm_level != level) {
if (smu_dpm_ctx->dpm_level != level) {
ret = smu_asic_set_performance_level(smu, level);
if (ret) {
dev_err(smu->adev->dev, "Failed to set performance level!");
@ -1889,7 +1889,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
if (force_update || smu->power_profile_mode != workload[0])
if (init || smu->power_profile_mode != workload[0])
smu_bump_power_profile_mode(smu, workload, 0);
}

View file

@ -256,10 +256,9 @@ static int vangogh_tables_init(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
if (smu_version >= 0x043F3E00)
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_3);
else
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_3));
smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_4));
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err1_out;

View file

@ -1727,6 +1727,8 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK];
gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1];

View file

@ -160,11 +160,11 @@ EXPORT_SYMBOL(drm_dp_dual_mode_write);
static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
{
static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] =
static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN + 1] =
"DP-HDMI ADAPTOR\x04";
return memcmp(hdmi_id, dp_dual_mode_hdmi_id,
sizeof(dp_dual_mode_hdmi_id)) == 0;
DP_DUAL_MODE_HDMI_ID_LEN) == 0;
}
static bool is_type1_adaptor(uint8_t adaptor_id)

View file

@ -319,6 +319,9 @@ static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr
hdr->broadcast = (buf[idx] >> 7) & 0x1;
hdr->path_msg = (buf[idx] >> 6) & 0x1;
hdr->msg_len = buf[idx] & 0x3f;
if (hdr->msg_len < 1) /* min space for body CRC */
return false;
idx++;
hdr->somt = (buf[idx] >> 7) & 0x1;
hdr->eomt = (buf[idx] >> 6) & 0x1;
@ -3662,8 +3665,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
ret = 0;
mgr->payload_id_table_cleared = false;
memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
mgr->reset_rx_state = true;
}
out_unlock:
@ -3791,6 +3793,11 @@ out_fail:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
static void reset_msg_rx_state(struct drm_dp_sideband_msg_rx *msg)
{
memset(msg, 0, sizeof(*msg));
}
static bool
drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
struct drm_dp_mst_branch **mstb)
@ -3869,6 +3876,34 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
return true;
}
static int get_msg_request_type(u8 data)
{
return data & 0x7f;
}
static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr,
const struct drm_dp_sideband_msg_tx *txmsg,
const struct drm_dp_sideband_msg_rx *rxmsg)
{
const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr;
const struct drm_dp_mst_branch *mstb = txmsg->dst;
int tx_req_type = get_msg_request_type(txmsg->msg[0]);
int rx_req_type = get_msg_request_type(rxmsg->msg[0]);
char rad_str[64];
if (tx_req_type == rx_req_type)
return true;
drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str));
drm_dbg_kms(mgr->dev,
"Got unexpected MST reply, mstb: %p seqno: %d lct: %d rad: %s rx_req_type: %s (%02x) != tx_req_type: %s (%02x)\n",
mstb, hdr->seqno, mstb->lct, rad_str,
drm_dp_mst_req_type_str(rx_req_type), rx_req_type,
drm_dp_mst_req_type_str(tx_req_type), tx_req_type);
return false;
}
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
@ -3898,6 +3933,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
goto out_clear_reply;
}
if (!verify_rx_request_type(mgr, txmsg, msg))
goto out_clear_reply;
drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
@ -4073,6 +4111,17 @@ out:
return 0;
}
static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr)
{
mutex_lock(&mgr->lock);
if (mgr->reset_rx_state) {
mgr->reset_rx_state = false;
reset_msg_rx_state(&mgr->down_rep_recv);
reset_msg_rx_state(&mgr->up_req_recv);
}
mutex_unlock(&mgr->lock);
}
/**
* drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
* @mgr: manager to notify irq for.
@ -4107,6 +4156,8 @@ int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u
*handled = true;
}
update_msg_rx_state(mgr);
if (esi[1] & DP_DOWN_REP_MSG_RDY) {
ret = drm_dp_mst_handle_down_rep(mgr);
*handled = true;

View file

@ -1576,7 +1576,7 @@ const struct cfattach drm_ca = {
};
struct cfdriver drm_cd = {
0, "drm", DV_DULL
NULL, "drm", DV_DULL
};
const struct pci_device_id *

Some files were not shown because too many files have changed in this diff Show more