sync code with last improvements from OpenBSD
This commit is contained in:
parent
454dab66ed
commit
27298272ec
237 changed files with 4666 additions and 2149 deletions
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: tpm.c,v 1.17 2022/04/06 18:59:27 naddy Exp $ */
|
||||
/* $OpenBSD: tpm.c,v 1.18 2023/08/15 08:27:29 miod Exp $ */
|
||||
|
||||
/*
|
||||
* Minimal interface to Trusted Platform Module chips implementing the
|
||||
|
@ -150,7 +150,7 @@
|
|||
#define TPM_CRB_CTRL_CANCEL_CLEAR 0x0
|
||||
|
||||
#define TPM_CRB_CTRL_START_CMD (1 << 0)
|
||||
#define TPM_CRB_INT_ENABLED_BIT (1 << 31)
|
||||
#define TPM_CRB_INT_ENABLED_BIT (1U << 31)
|
||||
|
||||
#define TPM2_RC_SUCCESS 0x0000
|
||||
#define TPM2_RC_INITIALIZE 0x0100
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: dt_prov_static.c,v 1.20 2023/07/06 19:46:53 kn Exp $ */
|
||||
/* $OpenBSD: dt_prov_static.c,v 1.21 2023/08/14 08:33:24 mpi Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2019 Martin Pieuchot <mpi@openbsd.org>
|
||||
|
@ -39,11 +39,14 @@ struct dt_provider dt_prov_static = {
|
|||
*/
|
||||
DT_STATIC_PROBE2(sched, dequeue, "pid_t", "pid_t");
|
||||
DT_STATIC_PROBE2(sched, enqueue, "pid_t", "pid_t");
|
||||
DT_STATIC_PROBE3(sched, fork, "pid_t", "pid_t", "int");
|
||||
DT_STATIC_PROBE2(sched, off__cpu, "pid_t", "pid_t");
|
||||
DT_STATIC_PROBE0(sched, on__cpu);
|
||||
DT_STATIC_PROBE0(sched, remain__cpu);
|
||||
DT_STATIC_PROBE0(sched, sleep);
|
||||
DT_STATIC_PROBE0(sched, wakeup);
|
||||
DT_STATIC_PROBE3(sched, steal, "pid_t", "pid_t", "int");
|
||||
DT_STATIC_PROBE2(sched, unsleep, "pid_t", "pid_t");
|
||||
DT_STATIC_PROBE3(sched, wakeup, "pid_t", "pid_t", "int");
|
||||
|
||||
/*
|
||||
* Raw syscalls
|
||||
|
@ -106,10 +109,13 @@ struct dt_probe *const dtps_static[] = {
|
|||
/* Scheduler */
|
||||
&_DT_STATIC_P(sched, dequeue),
|
||||
&_DT_STATIC_P(sched, enqueue),
|
||||
&_DT_STATIC_P(sched, fork),
|
||||
&_DT_STATIC_P(sched, off__cpu),
|
||||
&_DT_STATIC_P(sched, on__cpu),
|
||||
&_DT_STATIC_P(sched, remain__cpu),
|
||||
&_DT_STATIC_P(sched, sleep),
|
||||
&_DT_STATIC_P(sched, steal),
|
||||
&_DT_STATIC_P(sched, unsleep),
|
||||
&_DT_STATIC_P(sched, wakeup),
|
||||
/* Raw syscalls */
|
||||
&_DT_STATIC_P(raw_syscalls, sys_enter),
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: amlclock.c,v 1.14 2022/06/28 23:43:12 naddy Exp $ */
|
||||
/* $OpenBSD: amlclock.c,v 1.15 2023/08/15 08:27:29 miod Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2019 Mark Kettenis <kettenis@openbsd.org>
|
||||
*
|
||||
|
@ -89,7 +89,7 @@
|
|||
#define HHI_NAND_CLK_CNTL 0x97
|
||||
#define HHI_SD_EMMC_CLK_CNTL 0x99
|
||||
#define HHI_SYS_PLL_CNTL0 0xbd
|
||||
#define HHI_SYS_DPLL_LOCK (1 << 31)
|
||||
#define HHI_SYS_DPLL_LOCK (1U << 31)
|
||||
#define HHI_SYS_DPLL_RESET (1 << 29)
|
||||
#define HHI_SYS_DPLL_EN (1 << 28)
|
||||
#define HHI_SYS_DPLL_OD(x) (((x) >> 16) & 0x7)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: com_fdt.c,v 1.7 2022/01/11 11:51:14 uaa Exp $ */
|
||||
/* $OpenBSD: com_fdt.c,v 1.8 2023/08/15 07:56:27 miod Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2016 Patrick Wildt <patrick@blueri.se>
|
||||
*
|
||||
|
@ -130,7 +130,7 @@ com_fdt_attach(struct device *parent, struct device *self, void *aux)
|
|||
sc->sc_uarttype = COM_UART_16550;
|
||||
sc->sc_frequency = freq ? freq : COM_FREQ;
|
||||
|
||||
if (OF_is_compatible(stdout_node, "ns16550a")) {
|
||||
if (OF_is_compatible(faa->fa_node, "ns16550a")) {
|
||||
width = 1;
|
||||
shift = 0;
|
||||
} else {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: if_cad.c,v 1.12 2022/08/14 21:10:08 jca Exp $ */
|
||||
/* $OpenBSD: if_cad.c,v 1.13 2023/08/15 08:27:30 miod Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2021-2022 Visa Hankala
|
||||
|
@ -220,7 +220,7 @@ struct cad_desc64 {
|
|||
#define GEM_RXD_ADDR_WRAP (1 << 1)
|
||||
#define GEM_RXD_ADDR_USED (1 << 0)
|
||||
|
||||
#define GEM_RXD_BCAST (1 << 31)
|
||||
#define GEM_RXD_BCAST (1U << 31)
|
||||
#define GEM_RXD_MCAST (1 << 30)
|
||||
#define GEM_RXD_UCAST (1 << 29)
|
||||
#define GEM_RXD_SPEC (1 << 27)
|
||||
|
@ -237,7 +237,7 @@ struct cad_desc64 {
|
|||
#define GEM_RXD_BADFCS (1 << 13)
|
||||
#define GEM_RXD_LEN_MASK 0x1fff
|
||||
|
||||
#define GEM_TXD_USED (1 << 31)
|
||||
#define GEM_TXD_USED (1U << 31)
|
||||
#define GEM_TXD_WRAP (1 << 30)
|
||||
#define GEM_TXD_RLIMIT (1 << 29)
|
||||
#define GEM_TXD_CORRUPT (1 << 27)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: if_dwge.c,v 1.18 2023/07/06 08:32:37 jmatthew Exp $ */
|
||||
/* $OpenBSD: if_dwge.c,v 1.19 2023/08/15 08:27:30 miod Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
|
||||
* Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
|
||||
|
@ -194,7 +194,7 @@ struct dwge_desc {
|
|||
#define TDES0_PCE (1 << 12)
|
||||
#define TDES0_JT (1 << 14)
|
||||
#define TDES0_IHE (1 << 16)
|
||||
#define TDES0_OWN (1 << 31)
|
||||
#define TDES0_OWN (1U << 31)
|
||||
|
||||
#define ETDES0_TCH (1 << 20)
|
||||
#define ETDES0_FS (1 << 28)
|
||||
|
@ -217,7 +217,7 @@ struct dwge_desc {
|
|||
#define RDES0_FL_MASK 0x3fff
|
||||
#define RDES0_FL_SHIFT 16
|
||||
#define RDES0_AFM (1 << 30)
|
||||
#define RDES0_OWN (1 << 31)
|
||||
#define RDES0_OWN (1U << 31)
|
||||
|
||||
/* Tx size bits */
|
||||
#define TDES1_TBS1 (0xfff << 0)
|
||||
|
@ -229,12 +229,12 @@ struct dwge_desc {
|
|||
#define TDES1_CIC_FULL (3 << 27)
|
||||
#define TDES1_FS (1 << 29)
|
||||
#define TDES1_LS (1 << 30)
|
||||
#define TDES1_IC (1 << 31)
|
||||
#define TDES1_IC (1U << 31)
|
||||
|
||||
/* Rx size bits */
|
||||
#define RDES1_RBS1 (0xfff << 0)
|
||||
#define RDES1_RCH (1 << 24)
|
||||
#define RDES1_DIC (1 << 31)
|
||||
#define RDES1_DIC (1U << 31)
|
||||
|
||||
#define ERDES1_RCH (1 << 14)
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: if_dwxe.c,v 1.21 2022/07/09 20:51:39 kettenis Exp $ */
|
||||
/* $OpenBSD: if_dwxe.c,v 1.22 2023/08/15 08:27:30 miod Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2008 Mark Kettenis
|
||||
* Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
|
||||
|
@ -184,7 +184,7 @@ struct dwxe_desc {
|
|||
#define DWXE_TX_PAYLOAD_ERR (1 << 12)
|
||||
#define DWXE_TX_LENGTH_ERR (1 << 14)
|
||||
#define DWXE_TX_HEADER_ERR (1 << 16)
|
||||
#define DWXE_TX_DESC_CTL (1 << 31)
|
||||
#define DWXE_TX_DESC_CTL (1U << 31)
|
||||
|
||||
/* Rx status bits */
|
||||
#define DWXE_RX_PAYLOAD_ERR (1 << 0)
|
||||
|
@ -202,7 +202,7 @@ struct dwxe_desc {
|
|||
#define DWXE_RX_FRM_LEN_MASK 0x3fff
|
||||
#define DWXE_RX_FRM_LEN_SHIFT 16
|
||||
#define DWXE_RX_DAF_FAIL (1 << 30)
|
||||
#define DWXE_RX_DESC_CTL (1 << 31)
|
||||
#define DWXE_RX_DESC_CTL (1U << 31)
|
||||
|
||||
/* Tx size bits */
|
||||
#define DWXE_TX_BUF_SIZE (0xfff << 0)
|
||||
|
@ -213,11 +213,11 @@ struct dwxe_desc {
|
|||
#define DWXE_TX_CHECKSUM_CTL_FULL (3 << 27)
|
||||
#define DWXE_TX_FIR_DESC (1 << 29)
|
||||
#define DWXE_TX_LAST_DESC (1 << 30)
|
||||
#define DWXE_TX_INT_CTL (1 << 31)
|
||||
#define DWXE_TX_INT_CTL (1U << 31)
|
||||
|
||||
/* Rx size bits */
|
||||
#define DWXE_RX_BUF_SIZE (0xfff << 0)
|
||||
#define DWXE_RX_INT_CTL (1 << 31)
|
||||
#define DWXE_RX_INT_CTL (1U << 31)
|
||||
|
||||
/* EMAC syscon bits */
|
||||
#define SYSCON_EMAC 0x30
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: imxiomuxc.c,v 1.7 2021/10/24 17:52:26 mpi Exp $ */
|
||||
/* $OpenBSD: imxiomuxc.c,v 1.8 2023/08/15 08:27:30 miod Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2013 Patrick Wildt <patrick@blueri.se>
|
||||
* Copyright (c) 2016 Mark Kettenis <kettenis@openbsd.org>
|
||||
|
@ -35,7 +35,7 @@
|
|||
|
||||
#define IOMUX_CONFIG_SION (1 << 4)
|
||||
|
||||
#define IMX_PINCTRL_NO_PAD_CTL (1 << 31)
|
||||
#define IMX_PINCTRL_NO_PAD_CTL (1U << 31)
|
||||
#define IMX_PINCTRL_SION (1 << 30)
|
||||
|
||||
struct imxiomuxc_softc {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: mvpinctrl.c,v 1.11 2022/06/28 23:43:12 naddy Exp $ */
|
||||
/* $OpenBSD: mvpinctrl.c,v 1.12 2023/08/15 08:27:30 miod Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2013,2016 Patrick Wildt <patrick@blueri.se>
|
||||
* Copyright (c) 2016 Mark Kettenis <kettenis@openbsd.org>
|
||||
|
@ -332,7 +332,7 @@ mvpinctrl_set_pin(void *cookie, uint32_t *cells, int val)
|
|||
/* Armada 3700 XTAL block */
|
||||
|
||||
#define XTAL 0xc
|
||||
#define XTAL_MODE (1 << 31)
|
||||
#define XTAL_MODE (1U << 31)
|
||||
|
||||
uint32_t
|
||||
a3700_xtal_get_frequency(void *cookie, uint32_t *cells)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: sxiccmu.c,v 1.31 2022/06/28 23:43:12 naddy Exp $ */
|
||||
/* $OpenBSD: sxiccmu.c,v 1.32 2023/08/15 08:27:30 miod Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
|
||||
* Copyright (c) 2013 Artturi Alm
|
||||
|
@ -1158,7 +1158,7 @@ sxiccmu_a80_get_frequency(struct sxiccmu_softc *sc, uint32_t idx)
|
|||
|
||||
/* Allwinner H3/H5 */
|
||||
#define H3_PLL_CPUX_CTRL_REG 0x0000
|
||||
#define H3_PLL_CPUX_ENABLE (1 << 31)
|
||||
#define H3_PLL_CPUX_ENABLE (1U << 31)
|
||||
#define H3_PLL_CPUX_LOCK (1 << 28)
|
||||
#define H3_PLL_CPUX_OUT_EXT_DIVP(x) (((x) >> 16) & 0x3)
|
||||
#define H3_PLL_CPUX_OUT_EXT_DIVP_MASK (0x3 << 16)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: hid.c,v 1.5 2022/05/20 05:03:45 anton Exp $ */
|
||||
/* $OpenBSD: hid.c,v 1.6 2023/08/12 20:47:06 miod Exp $ */
|
||||
/* $NetBSD: hid.c,v 1.23 2002/07/11 21:14:25 augustss Exp $ */
|
||||
/* $FreeBSD: src/sys/dev/usb/hid.c,v 1.11 1999/11/17 22:33:39 n_hibma Exp $ */
|
||||
|
||||
|
@ -657,3 +657,52 @@ hid_is_collection(const void *desc, int size, uint8_t id, int32_t usage)
|
|||
hid_end_parse(hd);
|
||||
return (0);
|
||||
}
|
||||
|
||||
struct hid_data *
|
||||
hid_get_collection_data(const void *desc, int size, int32_t usage,
|
||||
uint32_t collection)
|
||||
{
|
||||
struct hid_data *hd;
|
||||
struct hid_item hi;
|
||||
|
||||
hd = hid_start_parse(desc, size, hid_all);
|
||||
|
||||
DPRINTF("%s: usage=0x%x\n", __func__, usage);
|
||||
while (hid_get_item(hd, &hi)) {
|
||||
DPRINTF("%s: kind=%d id=%d usage=0x%x(0x%x)\n", __func__,
|
||||
hi.kind, hi.report_ID, hi.usage, usage);
|
||||
if (hi.kind == hid_collection &&
|
||||
hi.collection == collection && hi.usage == usage) {
|
||||
DPRINTF("%s: found\n", __func__);
|
||||
return hd;
|
||||
}
|
||||
}
|
||||
DPRINTF("%s: not found\n", __func__);
|
||||
hid_end_parse(hd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int
|
||||
hid_get_id_of_collection(const void *desc, int size, int32_t usage,
|
||||
uint32_t collection)
|
||||
{
|
||||
struct hid_data *hd;
|
||||
struct hid_item hi;
|
||||
|
||||
hd = hid_start_parse(desc, size, hid_all);
|
||||
|
||||
DPRINTF("%s: id=%d usage=0x%x\n", __func__, id, usage);
|
||||
while (hid_get_item(hd, &hi)) {
|
||||
DPRINTF("%s: kind=%d id=%d usage=0x%x(0x%x)\n", __func__,
|
||||
hi.kind, hi.report_ID, hi.usage, usage);
|
||||
if (hi.kind == hid_collection &&
|
||||
hi.collection == collection && hi.usage == usage) {
|
||||
DPRINTF("%s: found\n", __func__);
|
||||
hid_end_parse(hd);
|
||||
return hi.report_ID;
|
||||
}
|
||||
}
|
||||
DPRINTF("%s: not found\n", __func__);
|
||||
hid_end_parse(hd);
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: hid.h,v 1.10 2022/05/20 05:03:45 anton Exp $ */
|
||||
/* $OpenBSD: hid.h,v 1.11 2023/08/12 20:47:06 miod Exp $ */
|
||||
/* $NetBSD: hid.h,v 1.8 2002/07/11 21:14:25 augustss Exp $ */
|
||||
/* $FreeBSD: src/sys/dev/usb/hid.h,v 1.7 1999/11/17 22:33:40 n_hibma Exp $ */
|
||||
|
||||
|
@ -93,6 +93,8 @@ int hid_locate(const void *, int, int32_t, uint8_t, enum hid_kind,
|
|||
int32_t hid_get_data(const uint8_t *buf, int, struct hid_location *);
|
||||
uint32_t hid_get_udata(const uint8_t *buf, int, struct hid_location *);
|
||||
int hid_is_collection(const void *, int, uint8_t, int32_t);
|
||||
struct hid_data *hid_get_collection_data(const void *, int, int32_t, uint32_t);
|
||||
int hid_get_id_of_collection(const void *, int, int32_t, uint32_t);
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
||||
|
@ -353,6 +355,7 @@ int hid_is_collection(const void *, int, uint8_t, int32_t);
|
|||
#define HUD_TOUCHSCREEN 0x0004
|
||||
#define HUD_TOUCHPAD 0x0005
|
||||
#define HUD_CONFIG 0x000e
|
||||
#define HUD_STYLUS 0x0020
|
||||
#define HUD_FINGER 0x0022
|
||||
#define HUD_TIP_PRESSURE 0x0030
|
||||
#define HUD_BARREL_PRESSURE 0x0031
|
||||
|
@ -387,6 +390,12 @@ int hid_is_collection(const void *, int, uint8_t, int32_t);
|
|||
#define HUD_CONTACT_MAX 0x0055
|
||||
#define HUD_SCAN_TIME 0x0056
|
||||
#define HUD_BUTTON_TYPE 0x0059
|
||||
#define HUD_SECONDARY_BARREL_SWITCH 0x005A
|
||||
#define HUD_WACOM_X 0x0130
|
||||
#define HUD_WACOM_Y 0x0131
|
||||
#define HUD_WACOM_DISTANCE 0x0132
|
||||
#define HUD_WACOM_PAD_BUTTONS00 0x0910
|
||||
#define HUD_WACOM_BATTERY 0x1013
|
||||
|
||||
/* Usages, LED */
|
||||
#define HUL_NUM_LOCK 0x0001
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: hidms.c,v 1.9 2022/06/16 20:52:38 bru Exp $ */
|
||||
/* $OpenBSD: hidms.c,v 1.10 2023/08/12 20:47:06 miod Exp $ */
|
||||
/* $NetBSD: ums.c,v 1.60 2003/03/11 16:44:00 augustss Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -61,6 +61,188 @@ int hidmsdebug = 0;
|
|||
#define MOUSE_FLAGS_MASK (HIO_CONST | HIO_RELATIVE)
|
||||
#define NOTMOUSE(f) (((f) & MOUSE_FLAGS_MASK) != HIO_RELATIVE)
|
||||
|
||||
void
|
||||
hidms_stylus_hid_parse(struct hidms *ms, struct hid_data *d,
|
||||
struct hid_location *loc_stylus_btn)
|
||||
{
|
||||
struct hid_item h;
|
||||
|
||||
while (hid_get_item(d, &h)) {
|
||||
if (h.kind == hid_endcollection)
|
||||
break;
|
||||
if (h.kind != hid_input || (h.flags & HIO_CONST) != 0)
|
||||
continue;
|
||||
/* All the possible stylus reported usages go here */
|
||||
#ifdef HIDMS_DEBUG
|
||||
printf("stylus usage: 0x%x\n", h.usage);
|
||||
#endif
|
||||
switch (h.usage) {
|
||||
/* Buttons */
|
||||
case HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS, HUD_TIP_SWITCH):
|
||||
DPRINTF("Stylus usage tip set\n");
|
||||
if (ms->sc_num_stylus_buttons >= MAX_BUTTONS)
|
||||
break;
|
||||
loc_stylus_btn[ms->sc_num_stylus_buttons++] = h.loc;
|
||||
ms->sc_flags |= HIDMS_TIP;
|
||||
break;
|
||||
case HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS, HUD_BARREL_SWITCH):
|
||||
DPRINTF("Stylus usage barrel set\n");
|
||||
if (ms->sc_num_stylus_buttons >= MAX_BUTTONS)
|
||||
break;
|
||||
loc_stylus_btn[ms->sc_num_stylus_buttons++] = h.loc;
|
||||
ms->sc_flags |= HIDMS_BARREL;
|
||||
break;
|
||||
case HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS,
|
||||
HUD_SECONDARY_BARREL_SWITCH):
|
||||
DPRINTF("Stylus usage secondary barrel set\n");
|
||||
if (ms->sc_num_stylus_buttons >= MAX_BUTTONS)
|
||||
break;
|
||||
loc_stylus_btn[ms->sc_num_stylus_buttons++] = h.loc;
|
||||
ms->sc_flags |= HIDMS_SEC_BARREL;
|
||||
break;
|
||||
case HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS, HUD_IN_RANGE):
|
||||
DPRINTF("Stylus usage in range set\n");
|
||||
if (ms->sc_num_stylus_buttons >= MAX_BUTTONS)
|
||||
break;
|
||||
loc_stylus_btn[ms->sc_num_stylus_buttons++] = h.loc;
|
||||
break;
|
||||
case HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS, HUD_QUALITY):
|
||||
DPRINTF("Stylus usage quality set\n");
|
||||
if (ms->sc_num_stylus_buttons >= MAX_BUTTONS)
|
||||
break;
|
||||
loc_stylus_btn[ms->sc_num_stylus_buttons++] = h.loc;
|
||||
break;
|
||||
/* Axes */
|
||||
case HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS, HUD_WACOM_X):
|
||||
DPRINTF("Stylus usage x set\n");
|
||||
ms->sc_loc_x = h.loc;
|
||||
ms->sc_tsscale.minx = h.logical_minimum;
|
||||
ms->sc_tsscale.maxx = h.logical_maximum;
|
||||
ms->sc_flags |= HIDMS_ABSX;
|
||||
break;
|
||||
case HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS, HUD_WACOM_Y):
|
||||
DPRINTF("Stylus usage y set\n");
|
||||
ms->sc_loc_y = h.loc;
|
||||
ms->sc_tsscale.miny = h.logical_minimum;
|
||||
ms->sc_tsscale.maxy = h.logical_maximum;
|
||||
ms->sc_flags |= HIDMS_ABSY;
|
||||
break;
|
||||
case HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS, HUD_TIP_PRESSURE):
|
||||
DPRINTF("Stylus usage pressure set\n");
|
||||
ms->sc_loc_z = h.loc;
|
||||
ms->sc_tsscale.minz = h.logical_minimum;
|
||||
ms->sc_tsscale.maxz = h.logical_maximum;
|
||||
ms->sc_flags |= HIDMS_Z;
|
||||
break;
|
||||
case HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS, HUD_WACOM_DISTANCE):
|
||||
DPRINTF("Stylus usage distance set\n");
|
||||
ms->sc_loc_w = h.loc;
|
||||
ms->sc_tsscale.minw = h.logical_minimum;
|
||||
ms->sc_tsscale.maxw = h.logical_maximum;
|
||||
ms->sc_flags |= HIDMS_W;
|
||||
break;
|
||||
default:
|
||||
#ifdef HIDMS_DEBUG
|
||||
printf("Unknown stylus usage: 0x%x\n",
|
||||
h.usage);
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
hidms_pad_buttons_hid_parse(struct hidms *ms, struct hid_data *d,
|
||||
struct hid_location *loc_pad_btn)
|
||||
{
|
||||
struct hid_item h;
|
||||
|
||||
while (hid_get_item(d, &h)) {
|
||||
if (h.kind == hid_endcollection)
|
||||
break;
|
||||
if (h.kind == hid_input && (h.flags & HIO_CONST) != 0 &&
|
||||
h.usage == HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS,
|
||||
HUD_WACOM_PAD_BUTTONS00 | ms->sc_num_pad_buttons)) {
|
||||
if (ms->sc_num_pad_buttons >= MAX_BUTTONS)
|
||||
break;
|
||||
loc_pad_btn[ms->sc_num_pad_buttons++] = h.loc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
hidms_wacom_setup(struct device *self, struct hidms *ms, void *desc, int dlen)
|
||||
{
|
||||
struct hid_data *hd;
|
||||
int i;
|
||||
struct hid_location loc_pad_btn[MAX_BUTTONS];
|
||||
struct hid_location loc_stylus_btn[MAX_BUTTONS];
|
||||
|
||||
ms->sc_flags = 0;
|
||||
|
||||
/* Set x,y,z and w to zero by default */
|
||||
ms->sc_loc_x.size = 0;
|
||||
ms->sc_loc_y.size = 0;
|
||||
ms->sc_loc_z.size = 0;
|
||||
ms->sc_loc_w.size = 0;
|
||||
|
||||
if ((hd = hid_get_collection_data(desc, dlen,
|
||||
HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS, HUD_DIGITIZER),
|
||||
HCOLL_APPLICATION))) {
|
||||
DPRINTF("found the global collection\n");
|
||||
hid_end_parse(hd);
|
||||
if ((hd = hid_get_collection_data(desc, dlen,
|
||||
HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS, HUD_STYLUS),
|
||||
HCOLL_PHYSICAL))) {
|
||||
DPRINTF("found stylus collection\n");
|
||||
hidms_stylus_hid_parse(ms, hd, loc_stylus_btn);
|
||||
hid_end_parse(hd);
|
||||
}
|
||||
if ((hd = hid_get_collection_data(desc, dlen,
|
||||
HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS, HUD_TABLET_FKEYS),
|
||||
HCOLL_PHYSICAL))) {
|
||||
DPRINTF("found tablet keys collection\n");
|
||||
hidms_pad_buttons_hid_parse(ms, hd, loc_pad_btn);
|
||||
hid_end_parse(hd);
|
||||
}
|
||||
#ifdef notyet
|
||||
if ((hd = hid_get_collection_data(desc, dlen,
|
||||
HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS, HUD_WACOM_BATTERY),
|
||||
HCOLL_PHYSICAL))) {
|
||||
DPRINTF("found battery collection\n");
|
||||
/* parse and set the battery info */
|
||||
/* not yet used */
|
||||
hid_end_parse(hd);
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Ignore the device config, it's not really needed;
|
||||
* Ignore the usage 0x10AC which is the debug collection, and
|
||||
* ignore firmware collection and other collections for now.
|
||||
*/
|
||||
}
|
||||
|
||||
/* Map the pad and stylus buttons to mouse buttons */
|
||||
for (i = 0; i < ms->sc_num_stylus_buttons; i++)
|
||||
memcpy(&ms->sc_loc_btn[i], &loc_stylus_btn[i],
|
||||
sizeof(struct hid_location));
|
||||
if (ms->sc_num_pad_buttons + ms->sc_num_stylus_buttons >= MAX_BUTTONS)
|
||||
ms->sc_num_pad_buttons =
|
||||
MAX_BUTTONS - ms->sc_num_stylus_buttons;
|
||||
for (; i < ms->sc_num_pad_buttons + ms->sc_num_stylus_buttons; i++)
|
||||
memcpy(&ms->sc_loc_btn[i], &loc_pad_btn[i],
|
||||
sizeof(struct hid_location));
|
||||
ms->sc_num_buttons = i;
|
||||
DPRINTF("Button information\n");
|
||||
#ifdef HIDMS_DEBUG
|
||||
for (i = 0; i < ms->sc_num_buttons; i++)
|
||||
printf("size: 0x%x, pos: 0x%x, count: 0x%x\n",
|
||||
ms->sc_loc_btn[i].size, ms->sc_loc_btn[i].pos,
|
||||
ms->sc_loc_btn[i].count);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
hidms_setup(struct device *self, struct hidms *ms, uint32_t quirks,
|
||||
int id, void *desc, int dlen)
|
||||
|
@ -75,11 +257,15 @@ hidms_setup(struct device *self, struct hidms *ms, uint32_t quirks,
|
|||
|
||||
ms->sc_flags = quirks;
|
||||
|
||||
/* We are setting up a Wacom tablet, not a regular mouse */
|
||||
if (quirks & HIDMS_WACOM_SETUP)
|
||||
return hidms_wacom_setup(self, ms, desc, dlen);
|
||||
|
||||
if (!hid_locate(desc, dlen, HID_USAGE2(HUP_GENERIC_DESKTOP, HUG_X), id,
|
||||
hid_input, &ms->sc_loc_x, &flags))
|
||||
ms->sc_loc_x.size = 0;
|
||||
|
||||
switch(flags & MOUSE_FLAGS_MASK) {
|
||||
switch (flags & MOUSE_FLAGS_MASK) {
|
||||
case 0:
|
||||
ms->sc_flags |= HIDMS_ABSX;
|
||||
break;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: hidmsvar.h,v 1.2 2021/01/10 16:32:48 thfr Exp $ */
|
||||
/* $OpenBSD: hidmsvar.h,v 1.3 2023/08/12 20:47:06 miod Exp $ */
|
||||
/* $NetBSD: ums.c,v 1.60 2003/03/11 16:44:00 augustss Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -36,11 +36,16 @@
|
|||
struct tsscale {
|
||||
int minx, maxx;
|
||||
int miny, maxy;
|
||||
int minz, maxz;
|
||||
int minw, maxw;
|
||||
int swapxy;
|
||||
int resx, resy;
|
||||
};
|
||||
|
||||
struct hidms {
|
||||
struct device *sc_device;
|
||||
struct device *sc_wsmousedev;
|
||||
|
||||
int sc_enabled;
|
||||
int sc_flags; /* device configuration */
|
||||
#define HIDMS_SPUR_BUT_UP 0x0001 /* spurious button up events */
|
||||
|
@ -51,17 +56,20 @@ struct hidms {
|
|||
#define HIDMS_LEADINGBYTE 0x0020 /* Unknown leading byte */
|
||||
#define HIDMS_ABSX 0x0040 /* X-axis is absolute */
|
||||
#define HIDMS_ABSY 0x0080 /* Y-axis is absolute */
|
||||
#define HIDMS_TIP 0x0100 /* Tip switch on a digitiser pen */
|
||||
#define HIDMS_TIP 0x0100 /* Tip switch on a digitiser pen */
|
||||
#define HIDMS_BARREL 0x0200 /* Barrel switch on a digitiser pen */
|
||||
#define HIDMS_ERASER 0x0400 /* Eraser switch on a digitiser pen */
|
||||
#define HIDMS_ERASER 0x0400 /* Eraser switch on a digitiser pen */
|
||||
#define HIDMS_MS_BAD_CLASS 0x0800 /* Mouse doesn't identify properly */
|
||||
#define HIDMS_VENDOR_BUTTONS 0x1000 /* extra buttons in vendor page */
|
||||
#define HIDMS_SEC_BARREL 0x2000 /* Secondary Barrel switch on a digitiser pen */
|
||||
#define HIDMS_WACOM_SETUP 0x4000 /* Requires Wacom-style setup */
|
||||
|
||||
int sc_num_buttons;
|
||||
u_int32_t sc_buttons; /* mouse button status */
|
||||
|
||||
struct device *sc_device;
|
||||
struct device *sc_wsmousedev;
|
||||
/* Wacom-specific fields */
|
||||
int sc_num_pad_buttons;
|
||||
int sc_num_stylus_buttons;
|
||||
|
||||
/* locators */
|
||||
struct hid_location sc_loc_x;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: ihidev.c,v 1.28 2022/08/31 15:14:01 kettenis Exp $ */
|
||||
/* $OpenBSD: ihidev.c,v 1.29 2023/08/12 10:03:05 kettenis Exp $ */
|
||||
/*
|
||||
* HID-over-i2c driver
|
||||
*
|
||||
|
@ -198,6 +198,9 @@ ihidev_attach(struct device *parent, struct device *self, void *aux)
|
|||
sc->sc_subdevs[repid] = (struct ihidev *)dev;
|
||||
}
|
||||
|
||||
if (sc->sc_refcnt > 0)
|
||||
return;
|
||||
|
||||
/* power down until we're opened */
|
||||
if (ihidev_hid_command(sc, I2C_HID_CMD_SET_POWER, &I2C_HID_POWER_OFF)) {
|
||||
printf("%s: failed to power down\n", sc->sc_dev.dv_xname);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: ufshcireg.h,v 1.2 2023/02/17 08:01:03 jsg Exp $ */
|
||||
/* $OpenBSD: ufshcireg.h,v 1.3 2023/08/15 08:27:30 miod Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2022 Marcus Glocker <mglocker@openbsd.org>
|
||||
|
@ -119,7 +119,7 @@
|
|||
#define UFSHCI_REG_UECDME 0x48
|
||||
/* UTP Transfer Request Interrupt Aggregation Control Register */
|
||||
#define UFSHCI_REG_UTRIACR 0x4C
|
||||
#define UFSHCI_REG_UTRIACR_IAEN (1 << 31) /* RW */
|
||||
#define UFSHCI_REG_UTRIACR_IAEN (1U << 31) /* RW */
|
||||
#define UFSHCI_REG_UTRIACR_IAPWEN (1 << 24) /* WO */
|
||||
#define UFSHCI_REG_UTRIACR_IASB (1 << 20) /* RO */
|
||||
#define UFSHCI_REG_UTRIACR_CTR (1 << 16) /* WO */
|
||||
|
|
|
@ -246,6 +246,7 @@ extern int amdgpu_num_kcq;
|
|||
|
||||
#define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
|
||||
extern int amdgpu_vcnfw_log;
|
||||
extern int amdgpu_sg_display;
|
||||
|
||||
#define AMDGPU_VM_MAX_NUM_CTX 4096
|
||||
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
|
||||
|
@ -287,6 +288,9 @@ extern int amdgpu_vcnfw_log;
|
|||
#define AMDGPU_SMARTSHIFT_MAX_BIAS (100)
|
||||
#define AMDGPU_SMARTSHIFT_MIN_BIAS (-100)
|
||||
|
||||
/* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */
|
||||
#define AMDGPU_SWCTF_EXTRA_DELAY 50
|
||||
|
||||
struct amdgpu_device;
|
||||
struct amdgpu_irq_src;
|
||||
struct amdgpu_fpriv;
|
||||
|
@ -1291,6 +1295,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
|
||||
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_need_post(struct amdgpu_device *adev);
|
||||
bool amdgpu_sg_display_supported(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_pcie_dynamic_switching_supported(void);
|
||||
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_aspm_support_quirk(void);
|
||||
|
|
|
@ -101,39 +101,97 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev,
|
||||
struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes)
|
||||
{
|
||||
uint32_t start_addr, fw_size, drv_size;
|
||||
|
||||
start_addr = le32_to_cpu(fw_usage->start_address_in_kb);
|
||||
fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
|
||||
drv_size = le16_to_cpu(fw_usage->used_by_driver_in_kb);
|
||||
|
||||
DRM_DEBUG("atom firmware v2_1 requested %08x %dkb fw %dkb drv\n",
|
||||
start_addr,
|
||||
fw_size,
|
||||
drv_size);
|
||||
|
||||
if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
|
||||
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
|
||||
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
|
||||
/* Firmware request VRAM reservation for SR-IOV */
|
||||
adev->mman.fw_vram_usage_start_offset = (start_addr &
|
||||
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
|
||||
adev->mman.fw_vram_usage_size = fw_size << 10;
|
||||
/* Use the default scratch size */
|
||||
*usage_bytes = 0;
|
||||
} else {
|
||||
*usage_bytes = drv_size << 10;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev,
|
||||
struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes)
|
||||
{
|
||||
uint32_t fw_start_addr, fw_size, drv_start_addr, drv_size;
|
||||
|
||||
fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb);
|
||||
fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
|
||||
|
||||
drv_start_addr = le32_to_cpu(fw_usage->driver_region0_start_address_in_kb);
|
||||
drv_size = le32_to_cpu(fw_usage->used_by_driver_region0_in_kb);
|
||||
|
||||
DRM_DEBUG("atom requested fw start at %08x %dkb and drv start at %08x %dkb\n",
|
||||
fw_start_addr,
|
||||
fw_size,
|
||||
drv_start_addr,
|
||||
drv_size);
|
||||
|
||||
if ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) {
|
||||
/* Firmware request VRAM reservation for SR-IOV */
|
||||
adev->mman.fw_vram_usage_start_offset = (fw_start_addr &
|
||||
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
|
||||
adev->mman.fw_vram_usage_size = fw_size << 10;
|
||||
}
|
||||
|
||||
if ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) {
|
||||
/* driver request VRAM reservation for SR-IOV */
|
||||
adev->mman.drv_vram_usage_start_offset = (drv_start_addr &
|
||||
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
|
||||
adev->mman.drv_vram_usage_size = drv_size << 10;
|
||||
}
|
||||
|
||||
*usage_bytes = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
|
||||
{
|
||||
struct atom_context *ctx = adev->mode_info.atom_context;
|
||||
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
vram_usagebyfirmware);
|
||||
struct vram_usagebyfirmware_v2_1 *firmware_usage;
|
||||
uint32_t start_addr, size;
|
||||
struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1;
|
||||
struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2;
|
||||
uint16_t data_offset;
|
||||
uint8_t frev, crev;
|
||||
int usage_bytes = 0;
|
||||
|
||||
if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
|
||||
firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
|
||||
DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
|
||||
le32_to_cpu(firmware_usage->start_address_in_kb),
|
||||
le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
|
||||
le16_to_cpu(firmware_usage->used_by_driver_in_kb));
|
||||
|
||||
start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
|
||||
size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
|
||||
|
||||
if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
|
||||
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
|
||||
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
|
||||
/* Firmware request VRAM reservation for SR-IOV */
|
||||
adev->mman.fw_vram_usage_start_offset = (start_addr &
|
||||
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
|
||||
adev->mman.fw_vram_usage_size = size << 10;
|
||||
/* Use the default scratch size */
|
||||
usage_bytes = 0;
|
||||
} else {
|
||||
usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
|
||||
if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
|
||||
if (frev == 2 && crev == 1) {
|
||||
fw_usage_v2_1 =
|
||||
(struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
|
||||
amdgpu_atomfirmware_allocate_fb_v2_1(adev,
|
||||
fw_usage_v2_1,
|
||||
&usage_bytes);
|
||||
} else if (frev >= 2 && crev >= 2) {
|
||||
fw_usage_v2_2 =
|
||||
(struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
|
||||
amdgpu_atomfirmware_allocate_fb_v2_2(adev,
|
||||
fw_usage_v2_2,
|
||||
&usage_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
ctx->scratch_size_bytes = 0;
|
||||
if (usage_bytes == 0)
|
||||
usage_bytes = 20 * 1024;
|
||||
|
|
|
@ -287,7 +287,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
|
|||
|
||||
if (!p->gang_size) {
|
||||
ret = -EINVAL;
|
||||
goto free_partial_kdata;
|
||||
goto free_all_kdata;
|
||||
}
|
||||
|
||||
for (i = 0; i < p->gang_size; ++i) {
|
||||
|
|
|
@ -1351,6 +1351,32 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
|
|||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* On APUs with >= 64GB white flickering has been observed w/ SG enabled.
|
||||
* Disable S/G on such systems until we have a proper fix.
|
||||
* https://gitlab.freedesktop.org/drm/amd/-/issues/2354
|
||||
* https://gitlab.freedesktop.org/drm/amd/-/issues/2735
|
||||
*/
|
||||
bool amdgpu_sg_display_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (amdgpu_sg_display) {
|
||||
case -1:
|
||||
break;
|
||||
case 0:
|
||||
return false;
|
||||
case 1:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
if ((totalram_pages() << (PAGE_SHIFT - 10)) +
|
||||
(adev->gmc.real_vram_size / 1024) >= 64000000) {
|
||||
DRM_WARN("Disabling S/G due to >=64GB RAM\n");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
|
||||
* speed switching. Until we have confirmation from Intel that a specific host
|
||||
|
|
|
@ -185,6 +185,7 @@ int amdgpu_num_kcq = -1;
|
|||
int amdgpu_smartshift_bias;
|
||||
int amdgpu_use_xgmi_p2p = 1;
|
||||
int amdgpu_vcnfw_log;
|
||||
int amdgpu_sg_display = -1; /* auto */
|
||||
|
||||
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
|
||||
|
||||
|
@ -929,6 +930,16 @@ module_param_named(num_kcq, amdgpu_num_kcq, int, 0444);
|
|||
MODULE_PARM_DESC(vcnfw_log, "Enable vcnfw log(0 = disable (default value), 1 = enable)");
|
||||
module_param_named(vcnfw_log, amdgpu_vcnfw_log, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: sg_display (int)
|
||||
* Disable S/G (scatter/gather) display (i.e., display from system memory).
|
||||
* This option is only relevant on APUs. Set this option to 0 to disable
|
||||
* S/G display if you experience flickering or other issues under memory
|
||||
* pressure and report the issue.
|
||||
*/
|
||||
MODULE_PARM_DESC(sg_display, "S/G Display (-1 = auto (default), 0 = disable)");
|
||||
module_param_named(sg_display, amdgpu_sg_display, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: smu_pptable_id (int)
|
||||
* Used to override pptable id. id = 0 use VBIOS pptable.
|
||||
|
|
|
@ -348,17 +348,16 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
|
|||
* @adev: amdgpu device object
|
||||
* @offset: offset of the BO
|
||||
* @size: size of the BO
|
||||
* @domain: where to place it
|
||||
* @bo_ptr: used to initialize BOs in structures
|
||||
* @cpu_addr: optional CPU address mapping
|
||||
*
|
||||
* Creates a kernel BO at a specific offset in the address space of the domain.
|
||||
* Creates a kernel BO at a specific offset in VRAM.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, negative error code otherwise.
|
||||
*/
|
||||
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
|
||||
uint64_t offset, uint64_t size, uint32_t domain,
|
||||
uint64_t offset, uint64_t size,
|
||||
struct amdgpu_bo **bo_ptr, void **cpu_addr)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
|
@ -368,8 +367,9 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
|
|||
offset &= LINUX_PAGE_MASK;
|
||||
size = roundup2(size, PAGE_SIZE);
|
||||
|
||||
r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
|
||||
NULL, cpu_addr);
|
||||
r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
|
||||
cpu_addr);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -285,7 +285,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
|
|||
u32 domain, struct amdgpu_bo **bo_ptr,
|
||||
u64 *gpu_addr, void **cpu_addr);
|
||||
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
|
||||
uint64_t offset, uint64_t size, uint32_t domain,
|
||||
uint64_t offset, uint64_t size,
|
||||
struct amdgpu_bo **bo_ptr, void **cpu_addr);
|
||||
int amdgpu_bo_create_user(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_param *bp,
|
||||
|
|
|
@ -1574,6 +1574,23 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
|
|||
NULL, &adev->mman.fw_vram_usage_va);
|
||||
}
|
||||
|
||||
/*
|
||||
* Driver Reservation functions
|
||||
*/
|
||||
/**
|
||||
* amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* free drv reserved vram if it has been reserved.
|
||||
*/
|
||||
static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo,
|
||||
NULL,
|
||||
NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
|
||||
*
|
||||
|
@ -1595,11 +1612,34 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
|
|||
return amdgpu_bo_create_kernel_at(adev,
|
||||
adev->mman.fw_vram_usage_start_offset,
|
||||
adev->mman.fw_vram_usage_size,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->mman.fw_vram_usage_reserved_bo,
|
||||
&adev->mman.fw_vram_usage_va);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* create bo vram reservation from drv.
|
||||
*/
|
||||
static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
|
||||
{
|
||||
uint64_t vram_size = adev->gmc.visible_vram_size;
|
||||
|
||||
adev->mman.drv_vram_usage_reserved_bo = NULL;
|
||||
|
||||
if (adev->mman.drv_vram_usage_size == 0 ||
|
||||
adev->mman.drv_vram_usage_size > vram_size)
|
||||
return 0;
|
||||
|
||||
return amdgpu_bo_create_kernel_at(adev,
|
||||
adev->mman.drv_vram_usage_start_offset,
|
||||
adev->mman.drv_vram_usage_size,
|
||||
&adev->mman.drv_vram_usage_reserved_bo,
|
||||
NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Memoy training reservation functions
|
||||
*/
|
||||
|
@ -1622,14 +1662,15 @@ static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
|
||||
static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev,
|
||||
uint32_t reserve_size)
|
||||
{
|
||||
struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
|
||||
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
|
||||
ctx->c2p_train_data_offset =
|
||||
roundup2((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
|
||||
roundup2((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M);
|
||||
ctx->p2c_train_data_offset =
|
||||
(adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
|
||||
ctx->train_data_size =
|
||||
|
@ -1647,9 +1688,10 @@ static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
|
|||
*/
|
||||
static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret;
|
||||
struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
|
||||
bool mem_train_support = false;
|
||||
uint32_t reserve_size = 0;
|
||||
int ret;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
if (amdgpu_atomfirmware_mem_training_supported(adev))
|
||||
|
@ -1665,18 +1707,18 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
|
|||
* Otherwise, fallback to legacy approach to check and reserve tmr block for ip
|
||||
* discovery data and G6 memory training data respectively
|
||||
*/
|
||||
adev->mman.discovery_tmr_size =
|
||||
amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
|
||||
if (!adev->mman.discovery_tmr_size)
|
||||
adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
|
||||
if (adev->bios)
|
||||
reserve_size =
|
||||
amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
|
||||
if (!reserve_size)
|
||||
reserve_size = DISCOVERY_TMR_OFFSET;
|
||||
|
||||
if (mem_train_support) {
|
||||
/* reserve vram for mem train according to TMR location */
|
||||
amdgpu_ttm_training_data_block_init(adev);
|
||||
amdgpu_ttm_training_data_block_init(adev, reserve_size);
|
||||
ret = amdgpu_bo_create_kernel_at(adev,
|
||||
ctx->c2p_train_data_offset,
|
||||
ctx->train_data_size,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&ctx->c2p_bo,
|
||||
NULL);
|
||||
if (ret) {
|
||||
|
@ -1688,14 +1730,14 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
ret = amdgpu_bo_create_kernel_at(adev,
|
||||
adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
|
||||
adev->mman.discovery_tmr_size,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->mman.discovery_memory,
|
||||
adev->gmc.real_vram_size - reserve_size,
|
||||
reserve_size,
|
||||
&adev->mman.fw_reserved_memory,
|
||||
NULL);
|
||||
if (ret) {
|
||||
DRM_ERROR("alloc tmr failed(%d)!\n", ret);
|
||||
amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
|
||||
amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory,
|
||||
NULL, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1788,6 +1830,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
*The reserved vram for driver must be pinned to the specified
|
||||
*place on the VRAM, so reserve it early.
|
||||
*/
|
||||
r = amdgpu_ttm_drv_reserve_vram_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/*
|
||||
* only NAVI10 and onwards ASIC support for IP discovery.
|
||||
* If IP discovery enabled, a block of memory should be
|
||||
|
@ -1804,21 +1854,18 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||
* avoid display artifacts while transitioning between pre-OS
|
||||
* and driver. */
|
||||
r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->mman.stolen_vga_memory,
|
||||
NULL);
|
||||
if (r)
|
||||
return r;
|
||||
r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
|
||||
adev->mman.stolen_extended_size,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->mman.stolen_extended_memory,
|
||||
NULL);
|
||||
if (r)
|
||||
return r;
|
||||
r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
|
||||
adev->mman.stolen_reserved_size,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->mman.stolen_reserved_memory,
|
||||
NULL);
|
||||
if (r)
|
||||
|
@ -1910,14 +1957,16 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
|
|||
/* return the stolen vga memory back to VRAM */
|
||||
amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
|
||||
amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
|
||||
/* return the IP Discovery TMR memory back to VRAM */
|
||||
amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
|
||||
/* return the FW reserved memory back to VRAM */
|
||||
amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
|
||||
NULL);
|
||||
if (adev->mman.stolen_reserved_size)
|
||||
amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
|
||||
NULL, NULL);
|
||||
amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
|
||||
&adev->mman.sdma_access_ptr);
|
||||
amdgpu_ttm_fw_reserve_vram_fini(adev);
|
||||
amdgpu_ttm_drv_reserve_vram_fini(adev);
|
||||
|
||||
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
|
||||
|
||||
|
|
|
@ -79,7 +79,8 @@ struct amdgpu_mman {
|
|||
/* discovery */
|
||||
uint8_t *discovery_bin;
|
||||
uint32_t discovery_tmr_size;
|
||||
struct amdgpu_bo *discovery_memory;
|
||||
/* fw reserved memory */
|
||||
struct amdgpu_bo *fw_reserved_memory;
|
||||
|
||||
/* firmware VRAM reservation */
|
||||
u64 fw_vram_usage_start_offset;
|
||||
|
@ -87,6 +88,11 @@ struct amdgpu_mman {
|
|||
struct amdgpu_bo *fw_vram_usage_reserved_bo;
|
||||
void *fw_vram_usage_va;
|
||||
|
||||
/* driver VRAM reservation */
|
||||
u64 drv_vram_usage_start_offset;
|
||||
u64 drv_vram_usage_size;
|
||||
struct amdgpu_bo *drv_vram_usage_reserved_bo;
|
||||
|
||||
/* PAGE_SIZE'd BO for process memory r/w over SDMA. */
|
||||
struct amdgpu_bo *sdma_access_bo;
|
||||
void *sdma_access_ptr;
|
||||
|
|
|
@ -391,7 +391,6 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
|
|||
*/
|
||||
if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
|
||||
AMDGPU_GPU_PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&bo, NULL))
|
||||
DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
|
||||
|
||||
|
|
|
@ -351,6 +351,19 @@ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
|
||||
int planes_count)
|
||||
{
|
||||
int i, j;
|
||||
struct dc_surface_update surface_updates_temp;
|
||||
|
||||
for (i = 0, j = planes_count - 1; i < j; i++, j--) {
|
||||
surface_updates_temp = array_of_surface_update[i];
|
||||
array_of_surface_update[i] = array_of_surface_update[j];
|
||||
array_of_surface_update[j] = surface_updates_temp;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* update_planes_and_stream_adapter() - Send planes to be updated in DC
|
||||
*
|
||||
|
@ -367,6 +380,8 @@ static inline bool update_planes_and_stream_adapter(struct dc *dc,
|
|||
struct dc_stream_update *stream_update,
|
||||
struct dc_surface_update *array_of_surface_update)
|
||||
{
|
||||
reverse_planes_order(array_of_surface_update, planes_count);
|
||||
|
||||
/*
|
||||
* Previous frame finished and HW is ready for optimization.
|
||||
*/
|
||||
|
@ -1621,6 +1636,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||
}
|
||||
break;
|
||||
}
|
||||
if (init_data.flags.gpu_vm_support)
|
||||
init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev);
|
||||
|
||||
if (init_data.flags.gpu_vm_support)
|
||||
adev->mode_info.gpu_vm_support = true;
|
||||
|
|
|
@ -1079,6 +1079,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
|
|||
struct dc_state *dangling_context = dc_create_state(dc);
|
||||
struct dc_state *current_ctx;
|
||||
struct pipe_ctx *pipe;
|
||||
struct timing_generator *tg;
|
||||
|
||||
if (dangling_context == NULL)
|
||||
return;
|
||||
|
@ -1122,6 +1123,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
|
|||
|
||||
if (should_disable && old_stream) {
|
||||
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
tg = pipe->stream_res.tg;
|
||||
/* When disabling plane for a phantom pipe, we must turn on the
|
||||
* phantom OTG so the disable programming gets the double buffer
|
||||
* update. Otherwise the pipe will be left in a partially disabled
|
||||
|
@ -1129,7 +1131,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
|
|||
* again for different use.
|
||||
*/
|
||||
if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
||||
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
|
||||
if (tg->funcs->enable_crtc)
|
||||
tg->funcs->enable_crtc(tg);
|
||||
}
|
||||
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
|
||||
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
|
||||
|
@ -1146,6 +1149,15 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
|
|||
dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
|
||||
dc->hwss.post_unlock_program_front_end(dc, dangling_context);
|
||||
}
|
||||
/* We need to put the phantom OTG back into it's default (disabled) state or we
|
||||
* can get corruption when transition from one SubVP config to a different one.
|
||||
* The OTG is set to disable on falling edge of VUPDATE so the plane disable
|
||||
* will still get it's double buffer update.
|
||||
*/
|
||||
if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
||||
if (tg->funcs->disable_phantom_crtc)
|
||||
tg->funcs->disable_phantom_crtc(tg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1942,6 +1954,9 @@ enum dc_status dc_commit_streams(struct dc *dc,
|
|||
struct pipe_ctx *pipe;
|
||||
bool handle_exit_odm2to1 = false;
|
||||
|
||||
if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
|
||||
return res;
|
||||
|
||||
if (!streams_changed(dc, streams, stream_count))
|
||||
return res;
|
||||
|
||||
|
@ -1984,21 +1999,33 @@ enum dc_status dc_commit_streams(struct dc *dc,
|
|||
|
||||
dc_resource_state_copy_construct_current(dc, context);
|
||||
|
||||
/*
|
||||
* Previous validation was perfomred with fast_validation = true and
|
||||
* the full DML state required for hardware programming was skipped.
|
||||
*
|
||||
* Re-validate here to calculate these parameters / watermarks.
|
||||
*/
|
||||
res = dc_validate_global_state(dc, context, false);
|
||||
res = dc_validate_with_context(dc, set, stream_count, context, false);
|
||||
if (res != DC_OK) {
|
||||
DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
|
||||
dc_status_to_str(res), res);
|
||||
return res;
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto fail;
|
||||
}
|
||||
|
||||
res = dc_commit_state_no_check(dc, context);
|
||||
|
||||
for (i = 0; i < stream_count; i++) {
|
||||
for (j = 0; j < context->stream_count; j++) {
|
||||
if (streams[i]->stream_id == context->streams[j]->stream_id)
|
||||
streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
|
||||
|
||||
if (dc_is_embedded_signal(streams[i]->signal)) {
|
||||
struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]);
|
||||
|
||||
if (dc->hwss.is_abm_supported)
|
||||
status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
|
||||
else
|
||||
status->is_abm_supported = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fail:
|
||||
dc_release_state(context);
|
||||
|
||||
context_alloc_fail:
|
||||
|
||||
DC_LOG_DC("%s Finished.\n", __func__);
|
||||
|
@ -3122,6 +3149,19 @@ static bool update_planes_and_stream_state(struct dc *dc,
|
|||
|
||||
if (update_type == UPDATE_TYPE_FULL) {
|
||||
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
|
||||
/* For phantom pipes we remove and create a new set of phantom pipes
|
||||
* for each full update (because we don't know if we'll need phantom
|
||||
* pipes until after the first round of validation). However, if validation
|
||||
* fails we need to keep the existing phantom pipes (because we don't update
|
||||
* the dc->current_state).
|
||||
*
|
||||
* The phantom stream/plane refcount is decremented for validation because
|
||||
* we assume it'll be removed (the free comes when the dc_state is freed),
|
||||
* but if validation fails we have to increment back the refcount so it's
|
||||
* consistent.
|
||||
*/
|
||||
if (dc->res_pool->funcs->retain_phantom_pipes)
|
||||
dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state);
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto fail;
|
||||
}
|
||||
|
@ -3987,6 +4027,18 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
|||
struct dc_context *dc_ctx = dc->ctx;
|
||||
int i, j;
|
||||
|
||||
/* TODO: Since change commit sequence can have a huge impact,
|
||||
* we decided to only enable it for DCN3x. However, as soon as
|
||||
* we get more confident about this change we'll need to enable
|
||||
* the new sequence for all ASICs.
|
||||
*/
|
||||
if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
|
||||
dc_update_planes_and_stream(dc, srf_updates,
|
||||
surface_count, stream,
|
||||
stream_update);
|
||||
return;
|
||||
}
|
||||
|
||||
stream_status = dc_stream_get_status(stream);
|
||||
context = dc->current_state;
|
||||
|
||||
|
|
|
@ -1141,6 +1141,11 @@ static bool detect_link_and_local_sink(struct dc_link *link,
|
|||
(link->dpcd_caps.dongle_type !=
|
||||
DISPLAY_DONGLE_DP_HDMI_CONVERTER))
|
||||
converter_disable_audio = true;
|
||||
|
||||
/* limited link rate to HBR3 for DPIA until we implement USB4 V2 */
|
||||
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
|
||||
link->reported_link_cap.link_rate > LINK_RATE_HIGH3)
|
||||
link->reported_link_cap.link_rate = LINK_RATE_HIGH3;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2092,6 +2097,7 @@ static enum dc_status enable_link_dp_mst(
|
|||
struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc_link *link = pipe_ctx->stream->link;
|
||||
unsigned char mstm_cntl;
|
||||
|
||||
/* sink signal type after MST branch is MST. Multiple MST sinks
|
||||
* share one link. Link DP PHY is enable or training only once.
|
||||
|
@ -2100,7 +2106,9 @@ static enum dc_status enable_link_dp_mst(
|
|||
return DC_OK;
|
||||
|
||||
/* clear payload table */
|
||||
dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
|
||||
core_link_read_dpcd(link, DP_MSTM_CTRL, &mstm_cntl, 1);
|
||||
if (mstm_cntl & DP_MST_EN)
|
||||
dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
|
||||
|
||||
/* to make sure the pending down rep can be processed
|
||||
* before enabling the link
|
||||
|
|
|
@ -2616,15 +2616,241 @@ bool dc_resource_is_dsc_encoding_supported(const struct dc *dc)
|
|||
return dc->res_pool->res_cap->num_dsc > 0;
|
||||
}
|
||||
|
||||
static bool planes_changed_for_existing_stream(struct dc_state *context,
|
||||
struct dc_stream_state *stream,
|
||||
const struct dc_validation_set set[],
|
||||
int set_count)
|
||||
{
|
||||
int i, j;
|
||||
struct dc_stream_status *stream_status = NULL;
|
||||
|
||||
for (i = 0; i < context->stream_count; i++) {
|
||||
if (context->streams[i] == stream) {
|
||||
stream_status = &context->stream_status[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!stream_status)
|
||||
ASSERT(0);
|
||||
|
||||
for (i = 0; i < set_count; i++)
|
||||
if (set[i].stream == stream)
|
||||
break;
|
||||
|
||||
if (i == set_count)
|
||||
ASSERT(0);
|
||||
|
||||
if (set[i].plane_count != stream_status->plane_count)
|
||||
return true;
|
||||
|
||||
for (j = 0; j < set[i].plane_count; j++)
|
||||
if (set[i].plane_states[j] != stream_status->plane_states[j])
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* dc_validate_global_state() - Determine if HW can support a given state
|
||||
* Checks HW resource availability and bandwidth requirement.
|
||||
* dc_validate_with_context - Validate and update the potential new stream in the context object
|
||||
*
|
||||
* @dc: Used to get the current state status
|
||||
* @set: An array of dc_validation_set with all the current streams reference
|
||||
* @set_count: Total of streams
|
||||
* @context: New context
|
||||
* @fast_validate: Enable or disable fast validation
|
||||
*
|
||||
* This function updates the potential new stream in the context object. It
|
||||
* creates multiple lists for the add, remove, and unchanged streams. In
|
||||
* particular, if the unchanged streams have a plane that changed, it is
|
||||
* necessary to remove all planes from the unchanged streams. In summary, this
|
||||
* function is responsible for validating the new context.
|
||||
*
|
||||
* Return:
|
||||
* In case of success, return DC_OK (1), otherwise, return a DC error.
|
||||
*/
|
||||
enum dc_status dc_validate_with_context(struct dc *dc,
|
||||
const struct dc_validation_set set[],
|
||||
int set_count,
|
||||
struct dc_state *context,
|
||||
bool fast_validate)
|
||||
{
|
||||
struct dc_stream_state *unchanged_streams[MAX_PIPES] = { 0 };
|
||||
struct dc_stream_state *del_streams[MAX_PIPES] = { 0 };
|
||||
struct dc_stream_state *add_streams[MAX_PIPES] = { 0 };
|
||||
int old_stream_count = context->stream_count;
|
||||
enum dc_status res = DC_ERROR_UNEXPECTED;
|
||||
int unchanged_streams_count = 0;
|
||||
int del_streams_count = 0;
|
||||
int add_streams_count = 0;
|
||||
bool found = false;
|
||||
int i, j, k;
|
||||
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
/* First build a list of streams to be remove from current context */
|
||||
for (i = 0; i < old_stream_count; i++) {
|
||||
struct dc_stream_state *stream = context->streams[i];
|
||||
|
||||
for (j = 0; j < set_count; j++) {
|
||||
if (stream == set[j].stream) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
del_streams[del_streams_count++] = stream;
|
||||
|
||||
found = false;
|
||||
}
|
||||
|
||||
/* Second, build a list of new streams */
|
||||
for (i = 0; i < set_count; i++) {
|
||||
struct dc_stream_state *stream = set[i].stream;
|
||||
|
||||
for (j = 0; j < old_stream_count; j++) {
|
||||
if (stream == context->streams[j]) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
add_streams[add_streams_count++] = stream;
|
||||
|
||||
found = false;
|
||||
}
|
||||
|
||||
/* Build a list of unchanged streams which is necessary for handling
|
||||
* planes change such as added, removed, and updated.
|
||||
*/
|
||||
for (i = 0; i < set_count; i++) {
|
||||
/* Check if stream is part of the delete list */
|
||||
for (j = 0; j < del_streams_count; j++) {
|
||||
if (set[i].stream == del_streams[j]) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
/* Check if stream is part of the add list */
|
||||
for (j = 0; j < add_streams_count; j++) {
|
||||
if (set[i].stream == add_streams[j]) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
unchanged_streams[unchanged_streams_count++] = set[i].stream;
|
||||
|
||||
found = false;
|
||||
}
|
||||
|
||||
/* Remove all planes for unchanged streams if planes changed */
|
||||
for (i = 0; i < unchanged_streams_count; i++) {
|
||||
if (planes_changed_for_existing_stream(context,
|
||||
unchanged_streams[i],
|
||||
set,
|
||||
set_count)) {
|
||||
if (!dc_rem_all_planes_for_stream(dc,
|
||||
unchanged_streams[i],
|
||||
context)) {
|
||||
res = DC_FAIL_DETACH_SURFACES;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Remove all planes for removed streams and then remove the streams */
|
||||
for (i = 0; i < del_streams_count; i++) {
|
||||
/* Need to cpy the dwb data from the old stream in order to efc to work */
|
||||
if (del_streams[i]->num_wb_info > 0) {
|
||||
for (j = 0; j < add_streams_count; j++) {
|
||||
if (del_streams[i]->sink == add_streams[j]->sink) {
|
||||
add_streams[j]->num_wb_info = del_streams[i]->num_wb_info;
|
||||
for (k = 0; k < del_streams[i]->num_wb_info; k++)
|
||||
add_streams[j]->writeback_info[k] = del_streams[i]->writeback_info[k];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
|
||||
res = DC_FAIL_DETACH_SURFACES;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
|
||||
if (res != DC_OK)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Swap seamless boot stream to pipe 0 (if needed) to ensure pipe_ctx
|
||||
* matches. This may change in the future if seamless_boot_stream can be
|
||||
* multiple.
|
||||
*/
|
||||
for (i = 0; i < add_streams_count; i++) {
|
||||
mark_seamless_boot_stream(dc, add_streams[i]);
|
||||
if (add_streams[i]->apply_seamless_boot_optimization && i != 0) {
|
||||
struct dc_stream_state *temp = add_streams[0];
|
||||
|
||||
add_streams[0] = add_streams[i];
|
||||
add_streams[i] = temp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Add new streams and then add all planes for the new stream */
|
||||
for (i = 0; i < add_streams_count; i++) {
|
||||
calculate_phy_pix_clks(add_streams[i]);
|
||||
res = dc_add_stream_to_ctx(dc, context, add_streams[i]);
|
||||
if (res != DC_OK)
|
||||
goto fail;
|
||||
|
||||
if (!add_all_planes_for_stream(dc, add_streams[i], set, set_count, context)) {
|
||||
res = DC_FAIL_ATTACH_SURFACES;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
/* Add all planes for unchanged streams if planes changed */
|
||||
for (i = 0; i < unchanged_streams_count; i++) {
|
||||
if (planes_changed_for_existing_stream(context,
|
||||
unchanged_streams[i],
|
||||
set,
|
||||
set_count)) {
|
||||
if (!add_all_planes_for_stream(dc, unchanged_streams[i], set, set_count, context)) {
|
||||
res = DC_FAIL_ATTACH_SURFACES;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res = dc_validate_global_state(dc, context, fast_validate);
|
||||
|
||||
fail:
|
||||
if (res != DC_OK)
|
||||
DC_LOG_WARNING("%s:resource validation failed, dc_status:%d\n",
|
||||
__func__,
|
||||
res);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* dc_validate_global_state() - Determine if hardware can support a given state
|
||||
*
|
||||
* @dc: dc struct for this driver
|
||||
* @new_ctx: state to be validated
|
||||
* @fast_validate: set to true if only yes/no to support matters
|
||||
*
|
||||
* Return: DC_OK if the result can be programmed. Otherwise, an error code.
|
||||
* Checks hardware resource availability and bandwidth requirement.
|
||||
*
|
||||
* Return:
|
||||
* DC_OK if the result can be programmed. Otherwise, an error code.
|
||||
*/
|
||||
enum dc_status dc_validate_global_state(
|
||||
struct dc *dc,
|
||||
|
@ -3757,4 +3983,4 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
|
|||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1298,6 +1298,12 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla
|
|||
|
||||
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
|
||||
|
||||
enum dc_status dc_validate_with_context(struct dc *dc,
|
||||
const struct dc_validation_set set[],
|
||||
int set_count,
|
||||
struct dc_state *context,
|
||||
bool fast_validate);
|
||||
|
||||
bool dc_set_generic_gpio_for_stereo(bool enable,
|
||||
struct gpio_service *gpio_service);
|
||||
|
||||
|
|
|
@ -2284,6 +2284,12 @@ void dcn10_enable_timing_synchronization(
|
|||
opp = grouped_pipes[i]->stream_res.opp;
|
||||
tg = grouped_pipes[i]->stream_res.tg;
|
||||
tg->funcs->get_otg_active_size(tg, &width, &height);
|
||||
|
||||
if (!tg->funcs->is_tg_enabled(tg)) {
|
||||
DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (opp->funcs->opp_program_dpg_dimensions)
|
||||
opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
|
||||
}
|
||||
|
|
|
@ -357,8 +357,11 @@ void dpp3_set_cursor_attributes(
|
|||
int cur_rom_en = 0;
|
||||
|
||||
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
|
||||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA)
|
||||
cur_rom_en = 1;
|
||||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
|
||||
if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
|
||||
cur_rom_en = 1;
|
||||
}
|
||||
}
|
||||
|
||||
REG_UPDATE_3(CURSOR0_CONTROL,
|
||||
CUR0_MODE, color_format,
|
||||
|
|
|
@ -167,6 +167,13 @@ static void optc32_phantom_crtc_post_enable(struct timing_generator *optc)
|
|||
REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000);
|
||||
}
|
||||
|
||||
static void optc32_disable_phantom_otg(struct timing_generator *optc)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
||||
REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
|
||||
}
|
||||
|
||||
static void optc32_set_odm_bypass(struct timing_generator *optc,
|
||||
const struct dc_crtc_timing *dc_crtc_timing)
|
||||
{
|
||||
|
@ -260,6 +267,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
|
|||
.enable_crtc = optc32_enable_crtc,
|
||||
.disable_crtc = optc32_disable_crtc,
|
||||
.phantom_crtc_post_enable = optc32_phantom_crtc_post_enable,
|
||||
.disable_phantom_crtc = optc32_disable_phantom_otg,
|
||||
/* used by enable_timing_synchronization. Not need for FPGA */
|
||||
.is_counter_moving = optc1_is_counter_moving,
|
||||
.get_position = optc1_get_position,
|
||||
|
|
|
@ -1719,6 +1719,27 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc,
|
|||
return phantom_stream;
|
||||
}
|
||||
|
||||
void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
int i;
|
||||
struct dc_plane_state *phantom_plane = NULL;
|
||||
struct dc_stream_state *phantom_stream = NULL;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe->top_pipe && !pipe->prev_odm_pipe &&
|
||||
pipe->plane_state && pipe->stream &&
|
||||
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
||||
phantom_plane = pipe->plane_state;
|
||||
phantom_stream = pipe->stream;
|
||||
|
||||
dc_plane_state_retain(phantom_plane);
|
||||
dc_stream_retain(phantom_stream);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// return true if removed piped from ctx, false otherwise
|
||||
bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
|
@ -2035,6 +2056,7 @@ static struct resource_funcs dcn32_res_pool_funcs = {
|
|||
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
|
||||
.add_phantom_pipes = dcn32_add_phantom_pipes,
|
||||
.remove_phantom_pipes = dcn32_remove_phantom_pipes,
|
||||
.retain_phantom_pipes = dcn32_retain_phantom_pipes,
|
||||
};
|
||||
|
||||
static uint32_t read_pipe_fuses(struct dc_context *ctx)
|
||||
|
|
|
@ -83,6 +83,9 @@ bool dcn32_release_post_bldn_3dlut(
|
|||
bool dcn32_remove_phantom_pipes(struct dc *dc,
|
||||
struct dc_state *context);
|
||||
|
||||
void dcn32_retain_phantom_pipes(struct dc *dc,
|
||||
struct dc_state *context);
|
||||
|
||||
void dcn32_add_phantom_pipes(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
|
|
|
@ -1619,6 +1619,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
|
|||
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
|
||||
.add_phantom_pipes = dcn32_add_phantom_pipes,
|
||||
.remove_phantom_pipes = dcn32_remove_phantom_pipes,
|
||||
.retain_phantom_pipes = dcn32_retain_phantom_pipes,
|
||||
};
|
||||
|
||||
static uint32_t read_pipe_fuses(struct dc_context *ctx)
|
||||
|
|
|
@ -234,6 +234,7 @@ struct resource_funcs {
|
|||
unsigned int index);
|
||||
|
||||
bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context);
|
||||
void (*retain_phantom_pipes)(struct dc *dc, struct dc_state *context);
|
||||
void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
|
||||
};
|
||||
|
||||
|
|
|
@ -185,6 +185,7 @@ struct timing_generator_funcs {
|
|||
#ifdef CONFIG_DRM_AMD_DC_DCN
|
||||
void (*phantom_crtc_post_enable)(struct timing_generator *tg);
|
||||
#endif
|
||||
void (*disable_phantom_crtc)(struct timing_generator *tg);
|
||||
bool (*immediate_disable_crtc)(struct timing_generator *tg);
|
||||
bool (*is_counter_moving)(struct timing_generator *tg);
|
||||
void (*get_position)(struct timing_generator *tg,
|
||||
|
|
|
@ -705,20 +705,65 @@ struct atom_gpio_pin_lut_v2_1
|
|||
};
|
||||
|
||||
|
||||
/*
|
||||
***************************************************************************
|
||||
Data Table vram_usagebyfirmware structure
|
||||
***************************************************************************
|
||||
*/
|
||||
/*
|
||||
* VBIOS/PRE-OS always reserve a FB region at the top of frame buffer. driver should not write
|
||||
* access that region. driver can allocate their own reservation region as long as it does not
|
||||
* overlap firwmare's reservation region.
|
||||
* if (pre-NV1X) atom data table firmwareInfoTable version < 3.3:
|
||||
* in this case, atom data table vram_usagebyfirmwareTable version always <= 2.1
|
||||
* if VBIOS/UEFI GOP is posted:
|
||||
* VBIOS/UEFIGOP update used_by_firmware_in_kb = total reserved size by VBIOS
|
||||
* update start_address_in_kb = total_mem_size_in_kb - used_by_firmware_in_kb;
|
||||
* ( total_mem_size_in_kb = reg(CONFIG_MEMSIZE)<<10)
|
||||
* driver can allocate driver reservation region under firmware reservation,
|
||||
* used_by_driver_in_kb = driver reservation size
|
||||
* driver reservation start address = (start_address_in_kb - used_by_driver_in_kb)
|
||||
* Comment1[hchan]: There is only one reservation at the beginning of the FB reserved by
|
||||
* host driver. Host driver would overwrite the table with the following
|
||||
* used_by_firmware_in_kb = total reserved size for pf-vf info exchange and
|
||||
* set SRIOV_MSG_SHARE_RESERVATION mask start_address_in_kb = 0
|
||||
* else there is no VBIOS reservation region:
|
||||
* driver must allocate driver reservation region at top of FB.
|
||||
* driver set used_by_driver_in_kb = driver reservation size
|
||||
* driver reservation start address = (total_mem_size_in_kb - used_by_driver_in_kb)
|
||||
* same as Comment1
|
||||
* else (NV1X and after):
|
||||
* if VBIOS/UEFI GOP is posted:
|
||||
* VBIOS/UEFIGOP update:
|
||||
* used_by_firmware_in_kb = atom_firmware_Info_v3_3.fw_reserved_size_in_kb;
|
||||
* start_address_in_kb = total_mem_size_in_kb - used_by_firmware_in_kb;
|
||||
* (total_mem_size_in_kb = reg(CONFIG_MEMSIZE)<<10)
|
||||
* if vram_usagebyfirmwareTable version <= 2.1:
|
||||
* driver can allocate driver reservation region under firmware reservation,
|
||||
* driver set used_by_driver_in_kb = driver reservation size
|
||||
* driver reservation start address = start_address_in_kb - used_by_driver_in_kb
|
||||
* same as Comment1
|
||||
* else driver can:
|
||||
* allocate it reservation any place as long as it does overlap pre-OS FW reservation area
|
||||
* set used_by_driver_region0_in_kb = driver reservation size
|
||||
* set driver_region0_start_address_in_kb = driver reservation region start address
|
||||
* Comment2[hchan]: Host driver can set used_by_firmware_in_kb and start_address_in_kb to
|
||||
* zero as the reservation for VF as it doesn’t exist. And Host driver should also
|
||||
* update atom_firmware_Info table to remove the same VBIOS reservation as well.
|
||||
*/
|
||||
|
||||
struct vram_usagebyfirmware_v2_1
|
||||
{
|
||||
struct atom_common_table_header table_header;
|
||||
uint32_t start_address_in_kb;
|
||||
uint16_t used_by_firmware_in_kb;
|
||||
uint16_t used_by_driver_in_kb;
|
||||
struct atom_common_table_header table_header;
|
||||
uint32_t start_address_in_kb;
|
||||
uint16_t used_by_firmware_in_kb;
|
||||
uint16_t used_by_driver_in_kb;
|
||||
};
|
||||
|
||||
struct vram_usagebyfirmware_v2_2 {
|
||||
struct atom_common_table_header table_header;
|
||||
uint32_t fw_region_start_address_in_kb;
|
||||
uint16_t used_by_firmware_in_kb;
|
||||
uint16_t reserved;
|
||||
uint32_t driver_region0_start_address_in_kb;
|
||||
uint32_t used_by_driver_region0_in_kb;
|
||||
uint32_t reserved32[7];
|
||||
};
|
||||
|
||||
/*
|
||||
***************************************************************************
|
||||
|
|
|
@ -139,6 +139,8 @@ enum amd_pp_sensors {
|
|||
AMDGPU_PP_SENSOR_MIN_FAN_RPM,
|
||||
AMDGPU_PP_SENSOR_MAX_FAN_RPM,
|
||||
AMDGPU_PP_SENSOR_VCN_POWER_STATE,
|
||||
AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
|
||||
AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
|
||||
};
|
||||
|
||||
enum amd_pp_task {
|
||||
|
|
|
@ -89,6 +89,8 @@ struct amdgpu_dpm_thermal {
|
|||
int max_mem_crit_temp;
|
||||
/* memory max emergency(shutdown) temp */
|
||||
int max_mem_emergency_temp;
|
||||
/* SWCTF threshold */
|
||||
int sw_ctf_threshold;
|
||||
/* was last interrupt low to high or high to low */
|
||||
bool high_to_low;
|
||||
/* interrupt source */
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/gfp.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/reboot.h>
|
||||
#include "amd_shared.h"
|
||||
#include "amd_powerplay.h"
|
||||
#include "power_state.h"
|
||||
|
@ -91,6 +92,45 @@ static int pp_early_init(void *handle)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void pp_swctf_delayed_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr =
|
||||
container_of(work, struct pp_hwmgr, swctf_delayed_work.work);
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
struct amdgpu_dpm_thermal *range =
|
||||
&adev->pm.dpm.thermal;
|
||||
uint32_t gpu_temperature, size;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If the hotspot/edge temperature is confirmed as below SW CTF setting point
|
||||
* after the delay enforced, nothing will be done.
|
||||
* Otherwise, a graceful shutdown will be performed to prevent further damage.
|
||||
*/
|
||||
if (range->sw_ctf_threshold &&
|
||||
hwmgr->hwmgr_func->read_sensor) {
|
||||
ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
|
||||
AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
|
||||
&gpu_temperature,
|
||||
&size);
|
||||
/*
|
||||
* For some legacy ASICs, hotspot temperature retrieving might be not
|
||||
* supported. Check the edge temperature instead then.
|
||||
*/
|
||||
if (ret == -EOPNOTSUPP)
|
||||
ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
|
||||
AMDGPU_PP_SENSOR_EDGE_TEMP,
|
||||
&gpu_temperature,
|
||||
&size);
|
||||
if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold)
|
||||
return;
|
||||
}
|
||||
|
||||
dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
|
||||
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
|
||||
orderly_poweroff(true);
|
||||
}
|
||||
|
||||
static int pp_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = handle;
|
||||
|
@ -101,6 +141,10 @@ static int pp_sw_init(void *handle)
|
|||
|
||||
pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
|
||||
|
||||
if (!ret)
|
||||
INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work,
|
||||
pp_swctf_delayed_work_handler);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -136,6 +180,8 @@ static int pp_hw_fini(void *handle)
|
|||
struct amdgpu_device *adev = handle;
|
||||
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
|
||||
|
||||
cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
|
||||
|
||||
hwmgr_hw_fini(hwmgr);
|
||||
|
||||
return 0;
|
||||
|
@ -222,6 +268,8 @@ static int pp_suspend(void *handle)
|
|||
struct amdgpu_device *adev = handle;
|
||||
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
|
||||
|
||||
cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
|
||||
|
||||
return hwmgr_suspend(hwmgr);
|
||||
}
|
||||
|
||||
|
@ -769,10 +817,16 @@ static int pp_dpm_read_sensor(void *handle, int idx,
|
|||
|
||||
switch (idx) {
|
||||
case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
|
||||
*((uint32_t *)value) = hwmgr->pstate_sclk;
|
||||
*((uint32_t *)value) = hwmgr->pstate_sclk * 100;
|
||||
return 0;
|
||||
case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
|
||||
*((uint32_t *)value) = hwmgr->pstate_mclk;
|
||||
*((uint32_t *)value) = hwmgr->pstate_mclk * 100;
|
||||
return 0;
|
||||
case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
|
||||
*((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100;
|
||||
return 0;
|
||||
case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
|
||||
*((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100;
|
||||
return 0;
|
||||
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
|
||||
*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
|
||||
|
|
|
@ -241,7 +241,8 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
|
|||
TEMP_RANGE_MAX,
|
||||
TEMP_RANGE_MIN,
|
||||
TEMP_RANGE_MAX,
|
||||
TEMP_RANGE_MAX};
|
||||
TEMP_RANGE_MAX,
|
||||
0};
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
|
||||
if (!hwmgr->not_vf)
|
||||
|
@ -265,6 +266,7 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
|
|||
adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
|
||||
adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
|
||||
adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
|
||||
adev->pm.dpm.thermal.sw_ctf_threshold = range.sw_ctf_threshold;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -375,6 +375,17 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void smu10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK;
|
||||
hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr,
|
||||
PPSMC_MSG_GetMaxGfxclkFrequency,
|
||||
&hwmgr->pstate_sclk_peak);
|
||||
hwmgr->pstate_mclk_peak = SMU10_UMD_PSTATE_PEAK_FCLK;
|
||||
}
|
||||
|
||||
static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
|
@ -398,6 +409,8 @@ static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
smu10_populate_umdpstate_clocks(hwmgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -574,9 +587,6 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
|
||||
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
|
||||
|
||||
hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
|
||||
hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
|
||||
|
||||
/* enable the pp_od_clk_voltage sysfs file */
|
||||
hwmgr->od_enabled = 1;
|
||||
/* disabled fine grain tuning function by default */
|
||||
|
|
|
@ -1501,6 +1501,67 @@ static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void smu7_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
|
||||
int32_t tmp_sclk, count, percentage;
|
||||
|
||||
if (golden_dpm_table->mclk_table.count == 1) {
|
||||
percentage = 70;
|
||||
hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[0].value;
|
||||
} else {
|
||||
percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
|
||||
golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
|
||||
hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
|
||||
}
|
||||
|
||||
tmp_sclk = hwmgr->pstate_mclk * percentage / 100;
|
||||
|
||||
if (hwmgr->pp_table_version == PP_TABLE_V0) {
|
||||
struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk =
|
||||
hwmgr->dyn_state.vddc_dependency_on_sclk;
|
||||
|
||||
for (count = vddc_dependency_on_sclk->count - 1; count >= 0; count--) {
|
||||
if (tmp_sclk >= vddc_dependency_on_sclk->entries[count].clk) {
|
||||
hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[count].clk;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (count < 0)
|
||||
hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[0].clk;
|
||||
|
||||
hwmgr->pstate_sclk_peak =
|
||||
vddc_dependency_on_sclk->entries[vddc_dependency_on_sclk->count - 1].clk;
|
||||
} else if (hwmgr->pp_table_version == PP_TABLE_V1) {
|
||||
struct phm_ppt_v1_information *table_info =
|
||||
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
||||
struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk =
|
||||
table_info->vdd_dep_on_sclk;
|
||||
|
||||
for (count = vdd_dep_on_sclk->count - 1; count >= 0; count--) {
|
||||
if (tmp_sclk >= vdd_dep_on_sclk->entries[count].clk) {
|
||||
hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[count].clk;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (count < 0)
|
||||
hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[0].clk;
|
||||
|
||||
hwmgr->pstate_sclk_peak =
|
||||
vdd_dep_on_sclk->entries[vdd_dep_on_sclk->count - 1].clk;
|
||||
}
|
||||
|
||||
hwmgr->pstate_mclk_peak =
|
||||
golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
|
||||
|
||||
/* make sure the output is in Mhz */
|
||||
hwmgr->pstate_sclk /= 100;
|
||||
hwmgr->pstate_mclk /= 100;
|
||||
hwmgr->pstate_sclk_peak /= 100;
|
||||
hwmgr->pstate_mclk_peak /= 100;
|
||||
}
|
||||
|
||||
static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int tmp_result = 0;
|
||||
|
@ -1625,6 +1686,8 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|||
PP_ASSERT_WITH_CODE((0 == tmp_result),
|
||||
"pcie performance request failed!", result = tmp_result);
|
||||
|
||||
smu7_populate_umdpstate_clocks(hwmgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3143,15 +3206,12 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
|
|||
for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
|
||||
count >= 0; count--) {
|
||||
if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
|
||||
tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
|
||||
*sclk_mask = count;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
|
||||
if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
|
||||
*sclk_mask = 0;
|
||||
tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
|
||||
}
|
||||
|
||||
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
|
||||
*sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
|
||||
|
@ -3161,15 +3221,12 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
|
|||
|
||||
for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
|
||||
if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
|
||||
tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
|
||||
*sclk_mask = count;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
|
||||
if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
|
||||
*sclk_mask = 0;
|
||||
tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
|
||||
}
|
||||
|
||||
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
|
||||
*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
|
||||
|
@ -3181,8 +3238,6 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
|
|||
*mclk_mask = golden_dpm_table->mclk_table.count - 1;
|
||||
|
||||
*pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
|
||||
hwmgr->pstate_sclk = tmp_sclk;
|
||||
hwmgr->pstate_mclk = tmp_mclk;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3195,9 +3250,6 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
|
|||
uint32_t mclk_mask = 0;
|
||||
uint32_t pcie_mask = 0;
|
||||
|
||||
if (hwmgr->pstate_sclk == 0)
|
||||
smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
|
||||
|
||||
switch (level) {
|
||||
case AMD_DPM_FORCED_LEVEL_HIGH:
|
||||
ret = smu7_force_dpm_highest(hwmgr);
|
||||
|
@ -5381,6 +5433,8 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
|
|||
thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
|
||||
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
|
||||
thermal_data->sw_ctf_threshold = thermal_data->max;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1016,6 +1016,18 @@ static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
|
|||
data->acp_boot_level = 0xff;
|
||||
}
|
||||
|
||||
static void smu8_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct phm_clock_voltage_dependency_table *table =
|
||||
hwmgr->dyn_state.vddc_dependency_on_sclk;
|
||||
|
||||
hwmgr->pstate_sclk = table->entries[0].clk / 100;
|
||||
hwmgr->pstate_mclk = 0;
|
||||
|
||||
hwmgr->pstate_sclk_peak = table->entries[table->count - 1].clk / 100;
|
||||
hwmgr->pstate_mclk_peak = 0;
|
||||
}
|
||||
|
||||
static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
smu8_program_voting_clients(hwmgr);
|
||||
|
@ -1024,6 +1036,8 @@ static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|||
smu8_program_bootup_state(hwmgr);
|
||||
smu8_reset_acp_boot_level(hwmgr);
|
||||
|
||||
smu8_populate_umdpstate_clocks(hwmgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1167,8 +1181,6 @@ static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
|
|||
|
||||
data->sclk_dpm.soft_min_clk = table->entries[0].clk;
|
||||
data->sclk_dpm.hard_min_clk = table->entries[0].clk;
|
||||
hwmgr->pstate_sclk = table->entries[0].clk;
|
||||
hwmgr->pstate_mclk = 0;
|
||||
|
||||
level = smu8_get_max_sclk_level(hwmgr) - 1;
|
||||
|
||||
|
|
|
@ -603,21 +603,17 @@ int phm_irq_process(struct amdgpu_device *adev,
|
|||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
|
||||
uint32_t client_id = entry->client_id;
|
||||
uint32_t src_id = entry->src_id;
|
||||
|
||||
if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
|
||||
if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
|
||||
dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
|
||||
/*
|
||||
* SW CTF just occurred.
|
||||
* Try to do a graceful shutdown to prevent further damage.
|
||||
*/
|
||||
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
|
||||
orderly_poweroff(true);
|
||||
} else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
|
||||
schedule_delayed_work(&hwmgr->swctf_delayed_work,
|
||||
msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
|
||||
} else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) {
|
||||
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
|
||||
else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
|
||||
} else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
|
||||
dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
|
||||
/*
|
||||
* HW CTF just occurred. Shutdown to prevent further damage.
|
||||
|
@ -626,15 +622,10 @@ int phm_irq_process(struct amdgpu_device *adev,
|
|||
orderly_poweroff(true);
|
||||
}
|
||||
} else if (client_id == SOC15_IH_CLIENTID_THM) {
|
||||
if (src_id == 0) {
|
||||
dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
|
||||
/*
|
||||
* SW CTF just occurred.
|
||||
* Try to do a graceful shutdown to prevent further damage.
|
||||
*/
|
||||
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
|
||||
orderly_poweroff(true);
|
||||
} else
|
||||
if (src_id == 0)
|
||||
schedule_delayed_work(&hwmgr->swctf_delayed_work,
|
||||
msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
|
||||
else
|
||||
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
|
||||
} else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
|
||||
dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
|
||||
|
|
|
@ -3008,6 +3008,30 @@ static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void vega10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct phm_ppt_v2_information *table_info =
|
||||
(struct phm_ppt_v2_information *)(hwmgr->pptable);
|
||||
|
||||
if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
|
||||
table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
|
||||
hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
|
||||
hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
|
||||
} else {
|
||||
hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
|
||||
hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[0].clk;
|
||||
}
|
||||
|
||||
hwmgr->pstate_sclk_peak = table_info->vdd_dep_on_sclk->entries[table_info->vdd_dep_on_sclk->count - 1].clk;
|
||||
hwmgr->pstate_mclk_peak = table_info->vdd_dep_on_mclk->entries[table_info->vdd_dep_on_mclk->count - 1].clk;
|
||||
|
||||
/* make sure the output is in Mhz */
|
||||
hwmgr->pstate_sclk /= 100;
|
||||
hwmgr->pstate_mclk /= 100;
|
||||
hwmgr->pstate_sclk_peak /= 100;
|
||||
hwmgr->pstate_mclk_peak /= 100;
|
||||
}
|
||||
|
||||
static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega10_hwmgr *data = hwmgr->backend;
|
||||
|
@ -3082,6 +3106,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|||
result = tmp_result);
|
||||
}
|
||||
|
||||
vega10_populate_umdpstate_clocks(hwmgr);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -4169,8 +4195,6 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
|
|||
*sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
|
||||
*soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
|
||||
*mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
|
||||
hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
|
||||
hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
|
||||
}
|
||||
|
||||
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
|
||||
|
@ -4281,9 +4305,6 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
|
|||
uint32_t mclk_mask = 0;
|
||||
uint32_t soc_mask = 0;
|
||||
|
||||
if (hwmgr->pstate_sclk == 0)
|
||||
vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
|
||||
|
||||
switch (level) {
|
||||
case AMD_DPM_FORCED_LEVEL_HIGH:
|
||||
ret = vega10_force_dpm_highest(hwmgr);
|
||||
|
@ -5221,6 +5242,9 @@ static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
|
|||
{
|
||||
struct vega10_hwmgr *data = hwmgr->backend;
|
||||
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
|
||||
struct phm_ppt_v2_information *pp_table_info =
|
||||
(struct phm_ppt_v2_information *)(hwmgr->pptable);
|
||||
struct phm_tdp_table *tdp_table = pp_table_info->tdp_table;
|
||||
|
||||
memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
|
||||
|
||||
|
@ -5237,6 +5261,13 @@ static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
|
|||
thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
|
||||
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
|
||||
if (tdp_table->usSoftwareShutdownTemp > pp_table->ThotspotLimit &&
|
||||
tdp_table->usSoftwareShutdownTemp < VEGA10_THERMAL_MAXIMUM_ALERT_TEMP)
|
||||
thermal_data->sw_ctf_threshold = tdp_table->usSoftwareShutdownTemp;
|
||||
else
|
||||
thermal_data->sw_ctf_threshold = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP;
|
||||
thermal_data->sw_ctf_threshold *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1026,6 +1026,25 @@ static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void vega12_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
|
||||
struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
|
||||
struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
|
||||
|
||||
if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
|
||||
mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
|
||||
hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
|
||||
hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
|
||||
} else {
|
||||
hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[0].value;
|
||||
hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[0].value;
|
||||
}
|
||||
|
||||
hwmgr->pstate_sclk_peak = gfx_dpm_table->dpm_levels[gfx_dpm_table->count].value;
|
||||
hwmgr->pstate_mclk_peak = mem_dpm_table->dpm_levels[mem_dpm_table->count].value;
|
||||
}
|
||||
|
||||
static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int tmp_result, result = 0;
|
||||
|
@ -1077,6 +1096,9 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|||
PP_ASSERT_WITH_CODE(!result,
|
||||
"Failed to setup default DPM tables!",
|
||||
return result);
|
||||
|
||||
vega12_populate_umdpstate_clocks(hwmgr);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -2742,6 +2764,8 @@ static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
|
|||
static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
|
||||
struct PP_TemperatureRange *thermal_data)
|
||||
{
|
||||
struct phm_ppt_v3_information *pptable_information =
|
||||
(struct phm_ppt_v3_information *)hwmgr->pptable;
|
||||
struct vega12_hwmgr *data =
|
||||
(struct vega12_hwmgr *)(hwmgr->backend);
|
||||
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
|
||||
|
@ -2760,6 +2784,8 @@ static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
|
|||
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
|
||||
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
thermal_data->sw_ctf_threshold = pptable_information->us_software_shutdown_temp *
|
||||
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1555,26 +1555,23 @@ static int vega20_set_mclk_od(
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vega20_populate_umdpstate_clocks(
|
||||
struct pp_hwmgr *hwmgr)
|
||||
static void vega20_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
|
||||
struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table);
|
||||
struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table);
|
||||
|
||||
hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
|
||||
hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
|
||||
|
||||
if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
|
||||
mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
|
||||
hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
|
||||
hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
|
||||
} else {
|
||||
hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
|
||||
hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
|
||||
}
|
||||
|
||||
hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100;
|
||||
hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100;
|
||||
|
||||
return 0;
|
||||
hwmgr->pstate_sclk_peak = gfx_table->dpm_levels[gfx_table->count - 1].value;
|
||||
hwmgr->pstate_mclk_peak = mem_table->dpm_levels[mem_table->count - 1].value;
|
||||
}
|
||||
|
||||
static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
|
||||
|
@ -1753,10 +1750,7 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|||
"[EnableDPMTasks] Failed to initialize odn settings!",
|
||||
return result);
|
||||
|
||||
result = vega20_populate_umdpstate_clocks(hwmgr);
|
||||
PP_ASSERT_WITH_CODE(!result,
|
||||
"[EnableDPMTasks] Failed to populate umdpstate clocks!",
|
||||
return result);
|
||||
vega20_populate_umdpstate_clocks(hwmgr);
|
||||
|
||||
result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
|
||||
POWER_SOURCE_AC << 16, &hwmgr->default_power_limit);
|
||||
|
@ -4213,6 +4207,8 @@ static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
|
|||
static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
|
||||
struct PP_TemperatureRange *thermal_data)
|
||||
{
|
||||
struct phm_ppt_v3_information *pptable_information =
|
||||
(struct phm_ppt_v3_information *)hwmgr->pptable;
|
||||
struct vega20_hwmgr *data =
|
||||
(struct vega20_hwmgr *)(hwmgr->backend);
|
||||
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
|
||||
|
@ -4231,6 +4227,8 @@ static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
|
|||
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
|
||||
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
thermal_data->sw_ctf_threshold = pptable_information->us_software_shutdown_temp *
|
||||
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -809,6 +809,10 @@ struct pp_hwmgr {
|
|||
uint32_t workload_prority[Workload_Policy_Max];
|
||||
uint32_t workload_setting[Workload_Policy_Max];
|
||||
bool gfxoff_state_changed_by_workload;
|
||||
uint32_t pstate_sclk_peak;
|
||||
uint32_t pstate_mclk_peak;
|
||||
|
||||
struct delayed_work swctf_delayed_work;
|
||||
};
|
||||
|
||||
int hwmgr_early_init(struct pp_hwmgr *hwmgr);
|
||||
|
|
|
@ -131,6 +131,7 @@ struct PP_TemperatureRange {
|
|||
int mem_min;
|
||||
int mem_crit_max;
|
||||
int mem_emergency_max;
|
||||
int sw_ctf_threshold;
|
||||
};
|
||||
|
||||
struct PP_StateValidationBlock {
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/reboot.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_smu.h"
|
||||
|
@ -1061,6 +1062,34 @@ static void smu_interrupt_work_fn(struct work_struct *work)
|
|||
smu->ppt_funcs->interrupt_work(smu);
|
||||
}
|
||||
|
||||
static void smu_swctf_delayed_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct smu_context *smu =
|
||||
container_of(work, struct smu_context, swctf_delayed_work.work);
|
||||
struct smu_temperature_range *range =
|
||||
&smu->thermal_range;
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
uint32_t hotspot_tmp, size;
|
||||
|
||||
/*
|
||||
* If the hotspot temperature is confirmed as below SW CTF setting point
|
||||
* after the delay enforced, nothing will be done.
|
||||
* Otherwise, a graceful shutdown will be performed to prevent further damage.
|
||||
*/
|
||||
if (range->software_shutdown_temp &&
|
||||
smu->ppt_funcs->read_sensor &&
|
||||
!smu->ppt_funcs->read_sensor(smu,
|
||||
AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
|
||||
&hotspot_tmp,
|
||||
&size) &&
|
||||
hotspot_tmp / 1000 < range->software_shutdown_temp)
|
||||
return;
|
||||
|
||||
dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
|
||||
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
|
||||
orderly_poweroff(true);
|
||||
}
|
||||
|
||||
static int smu_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
@ -1109,6 +1138,9 @@ static int smu_sw_init(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
INIT_DELAYED_WORK(&smu->swctf_delayed_work,
|
||||
smu_swctf_delayed_work_handler);
|
||||
|
||||
ret = smu_smc_table_sw_init(smu);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed to sw init smc table!\n");
|
||||
|
@ -1581,6 +1613,8 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
cancel_delayed_work_sync(&smu->swctf_delayed_work);
|
||||
|
||||
ret = smu_disable_dpms(smu);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Fail to disable dpm features!\n");
|
||||
|
@ -2520,6 +2554,14 @@ static int smu_read_sensor(void *handle,
|
|||
*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
|
||||
*((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
|
||||
*((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
|
||||
ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
|
||||
*size = 8;
|
||||
|
|
|
@ -573,6 +573,8 @@ struct smu_context
|
|||
u32 debug_param_reg;
|
||||
u32 debug_msg_reg;
|
||||
u32 debug_resp_reg;
|
||||
|
||||
struct delayed_work swctf_delayed_work;
|
||||
};
|
||||
|
||||
struct i2c_adapter;
|
||||
|
|
|
@ -1438,13 +1438,8 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
|
|||
if (client_id == SOC15_IH_CLIENTID_THM) {
|
||||
switch (src_id) {
|
||||
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
|
||||
dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
|
||||
/*
|
||||
* SW CTF just occurred.
|
||||
* Try to do a graceful shutdown to prevent further damage.
|
||||
*/
|
||||
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
|
||||
orderly_poweroff(true);
|
||||
schedule_delayed_work(&smu->swctf_delayed_work,
|
||||
msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
|
||||
break;
|
||||
case THM_11_0__SRCID__THM_DIG_THERM_H2L:
|
||||
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
|
||||
|
|
|
@ -1386,13 +1386,8 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
|
|||
if (client_id == SOC15_IH_CLIENTID_THM) {
|
||||
switch (src_id) {
|
||||
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
|
||||
dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
|
||||
/*
|
||||
* SW CTF just occurred.
|
||||
* Try to do a graceful shutdown to prevent further damage.
|
||||
*/
|
||||
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
|
||||
orderly_poweroff(true);
|
||||
schedule_delayed_work(&smu->swctf_delayed_work,
|
||||
msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
|
||||
break;
|
||||
case THM_11_0__SRCID__THM_DIG_THERM_H2L:
|
||||
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
|
||||
|
|
|
@ -588,13 +588,12 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
|
|||
#else
|
||||
nfences = kmalloc(count * sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
if (nfences != NULL && *fences != NULL)
|
||||
if (nfences != NULL && *fences != NULL) {
|
||||
memcpy(nfences, *fences,
|
||||
(count - 1) * sizeof(void *));
|
||||
if (nfences) {
|
||||
kfree(*fences);
|
||||
new_fences = nfences;
|
||||
}
|
||||
new_fences = nfences;
|
||||
#endif
|
||||
if (count && !new_fences) {
|
||||
kfree(*fences);
|
||||
|
|
|
@ -7123,8 +7123,6 @@ static void intel_update_crtc(struct intel_atomic_state *state,
|
|||
|
||||
intel_fbc_update(state, crtc);
|
||||
|
||||
drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
|
||||
|
||||
if (!modeset &&
|
||||
(new_crtc_state->uapi.color_mgmt_changed ||
|
||||
new_crtc_state->update_pipe))
|
||||
|
@ -7501,28 +7499,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
|
|||
drm_atomic_helper_wait_for_dependencies(&state->base);
|
||||
drm_dp_mst_atomic_wait_for_dependencies(&state->base);
|
||||
|
||||
/*
|
||||
* During full modesets we write a lot of registers, wait
|
||||
* for PLLs, etc. Doing that while DC states are enabled
|
||||
* is not a good idea.
|
||||
*
|
||||
* During fastsets and other updates we also need to
|
||||
* disable DC states due to the following scenario:
|
||||
* 1. DC5 exit and PSR exit happen
|
||||
* 2. Some or all _noarm() registers are written
|
||||
* 3. Due to some long delay PSR is re-entered
|
||||
* 4. DC5 entry -> DMC saves the already written new
|
||||
* _noarm() registers and the old not yet written
|
||||
* _arm() registers
|
||||
* 5. DC5 exit -> DMC restores a mixture of old and
|
||||
* new register values and arms the update
|
||||
* 6. PSR exit -> hardware latches a mixture of old and
|
||||
* new register values -> corrupted frame, or worse
|
||||
* 7. New _arm() registers are finally written
|
||||
* 8. Hardware finally latches a complete set of new
|
||||
* register values, and subsequent frames will be OK again
|
||||
*/
|
||||
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
|
||||
if (state->modeset)
|
||||
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
|
||||
|
||||
intel_atomic_prepare_plane_clear_colors(state);
|
||||
|
||||
|
@ -7661,8 +7639,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
|
|||
* the culprit.
|
||||
*/
|
||||
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
|
||||
}
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
|
||||
|
||||
/*
|
||||
|
|
|
@ -256,8 +256,8 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
|
|||
|
||||
if (!HAS_FLAT_CCS(rq->engine->i915)) {
|
||||
/* hsdes: 1809175790 */
|
||||
cs = gen12_emit_aux_table_inv(rq->engine->gt,
|
||||
cs, GEN12_GFX_CCS_AUX_NV);
|
||||
cs = gen12_emit_aux_table_inv(rq->engine->gt, cs,
|
||||
GEN12_CCS_AUX_INV);
|
||||
}
|
||||
|
||||
*cs++ = preparser_disable(false);
|
||||
|
@ -317,10 +317,10 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
|
|||
if (aux_inv) { /* hsdes: 1809175790 */
|
||||
if (rq->engine->class == VIDEO_DECODE_CLASS)
|
||||
cs = gen12_emit_aux_table_inv(rq->engine->gt,
|
||||
cs, GEN12_VD0_AUX_NV);
|
||||
cs, GEN12_VD0_AUX_INV);
|
||||
else
|
||||
cs = gen12_emit_aux_table_inv(rq->engine->gt,
|
||||
cs, GEN12_VE0_AUX_NV);
|
||||
cs, GEN12_VE0_AUX_INV);
|
||||
}
|
||||
|
||||
if (mode & EMIT_INVALIDATE)
|
||||
|
|
|
@ -301,9 +301,11 @@
|
|||
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
|
||||
#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
|
||||
#define BSD_HWS_PGA_GEN7 _MMIO(0x4180)
|
||||
#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208)
|
||||
#define GEN12_VD0_AUX_NV _MMIO(0x4218)
|
||||
#define GEN12_VD1_AUX_NV _MMIO(0x4228)
|
||||
|
||||
#define GEN12_CCS_AUX_INV _MMIO(0x4208)
|
||||
#define GEN12_VD0_AUX_INV _MMIO(0x4218)
|
||||
#define GEN12_VE0_AUX_INV _MMIO(0x4238)
|
||||
#define GEN12_BCS0_AUX_INV _MMIO(0x4248)
|
||||
|
||||
#define GEN8_RTCR _MMIO(0x4260)
|
||||
#define GEN8_M1TCR _MMIO(0x4264)
|
||||
|
@ -311,14 +313,12 @@
|
|||
#define GEN8_BTCR _MMIO(0x426c)
|
||||
#define GEN8_VTCR _MMIO(0x4270)
|
||||
|
||||
#define GEN12_VD2_AUX_NV _MMIO(0x4298)
|
||||
#define GEN12_VD3_AUX_NV _MMIO(0x42a8)
|
||||
#define GEN12_VE0_AUX_NV _MMIO(0x4238)
|
||||
|
||||
#define BLT_HWS_PGA_GEN7 _MMIO(0x4280)
|
||||
|
||||
#define GEN12_VE1_AUX_NV _MMIO(0x42b8)
|
||||
#define GEN12_VD2_AUX_INV _MMIO(0x4298)
|
||||
#define GEN12_CCS0_AUX_INV _MMIO(0x42c8)
|
||||
#define AUX_INV REG_BIT(0)
|
||||
|
||||
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x4380)
|
||||
|
||||
#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
|
||||
|
|
|
@ -1304,7 +1304,7 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
|
|||
/* hsdes: 1809175790 */
|
||||
if (!HAS_FLAT_CCS(ce->engine->i915))
|
||||
cs = gen12_emit_aux_table_inv(ce->engine->gt,
|
||||
cs, GEN12_GFX_CCS_AUX_NV);
|
||||
cs, GEN12_CCS_AUX_INV);
|
||||
|
||||
/* Wa_16014892111 */
|
||||
if (IS_DG2(ce->engine->i915))
|
||||
|
@ -1331,10 +1331,10 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
|
|||
if (!HAS_FLAT_CCS(ce->engine->i915)) {
|
||||
if (ce->engine->class == VIDEO_DECODE_CLASS)
|
||||
cs = gen12_emit_aux_table_inv(ce->engine->gt,
|
||||
cs, GEN12_VD0_AUX_NV);
|
||||
cs, GEN12_VD0_AUX_INV);
|
||||
else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS)
|
||||
cs = gen12_emit_aux_table_inv(ce->engine->gt,
|
||||
cs, GEN12_VE0_AUX_NV);
|
||||
cs, GEN12_VE0_AUX_INV);
|
||||
}
|
||||
|
||||
return cs;
|
||||
|
|
|
@ -461,8 +461,11 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
|
|||
}
|
||||
} while (unlikely(is_barrier(active)));
|
||||
|
||||
if (!__i915_active_fence_set(active, fence))
|
||||
fence = __i915_active_fence_set(active, fence);
|
||||
if (!fence)
|
||||
__i915_active_acquire(ref);
|
||||
else
|
||||
dma_fence_put(fence);
|
||||
|
||||
out:
|
||||
i915_active_release(ref);
|
||||
|
@ -481,13 +484,9 @@ __i915_active_set_fence(struct i915_active *ref,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
prev = __i915_active_fence_set(active, fence);
|
||||
if (prev)
|
||||
prev = dma_fence_get_rcu(prev);
|
||||
else
|
||||
if (!prev)
|
||||
__i915_active_acquire(ref);
|
||||
rcu_read_unlock();
|
||||
|
||||
return prev;
|
||||
}
|
||||
|
@ -1043,10 +1042,11 @@ void i915_request_add_active_barriers(struct i915_request *rq)
|
|||
*
|
||||
* Records the new @fence as the last active fence along its timeline in
|
||||
* this active tracker, moving the tracking callbacks from the previous
|
||||
* fence onto this one. Returns the previous fence (if not already completed),
|
||||
* which the caller must ensure is executed before the new fence. To ensure
|
||||
* that the order of fences within the timeline of the i915_active_fence is
|
||||
* understood, it should be locked by the caller.
|
||||
* fence onto this one. Gets and returns a reference to the previous fence
|
||||
* (if not already completed), which the caller must put after making sure
|
||||
* that it is executed before the new fence. To ensure that the order of
|
||||
* fences within the timeline of the i915_active_fence is understood, it
|
||||
* should be locked by the caller.
|
||||
*/
|
||||
struct dma_fence *
|
||||
__i915_active_fence_set(struct i915_active_fence *active,
|
||||
|
@ -1055,7 +1055,23 @@ __i915_active_fence_set(struct i915_active_fence *active,
|
|||
struct dma_fence *prev;
|
||||
unsigned long flags;
|
||||
|
||||
if (fence == rcu_access_pointer(active->fence))
|
||||
/*
|
||||
* In case of fences embedded in i915_requests, their memory is
|
||||
* SLAB_FAILSAFE_BY_RCU, then it can be reused right after release
|
||||
* by new requests. Then, there is a risk of passing back a pointer
|
||||
* to a new, completely unrelated fence that reuses the same memory
|
||||
* while tracked under a different active tracker. Combined with i915
|
||||
* perf open/close operations that build await dependencies between
|
||||
* engine kernel context requests and user requests from different
|
||||
* timelines, this can lead to dependency loops and infinite waits.
|
||||
*
|
||||
* As a countermeasure, we try to get a reference to the active->fence
|
||||
* first, so if we succeed and pass it back to our user then it is not
|
||||
* released and potentially reused by an unrelated request before the
|
||||
* user has a chance to set up an await dependency on it.
|
||||
*/
|
||||
prev = i915_active_fence_get(active);
|
||||
if (fence == prev)
|
||||
return fence;
|
||||
|
||||
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
|
||||
|
@ -1064,27 +1080,56 @@ __i915_active_fence_set(struct i915_active_fence *active,
|
|||
* Consider that we have two threads arriving (A and B), with
|
||||
* C already resident as the active->fence.
|
||||
*
|
||||
* A does the xchg first, and so it sees C or NULL depending
|
||||
* on the timing of the interrupt handler. If it is NULL, the
|
||||
* previous fence must have been signaled and we know that
|
||||
* we are first on the timeline. If it is still present,
|
||||
* we acquire the lock on that fence and serialise with the interrupt
|
||||
* handler, in the process removing it from any future interrupt
|
||||
* callback. A will then wait on C before executing (if present).
|
||||
*
|
||||
* As B is second, it sees A as the previous fence and so waits for
|
||||
* it to complete its transition and takes over the occupancy for
|
||||
* itself -- remembering that it needs to wait on A before executing.
|
||||
* Both A and B have got a reference to C or NULL, depending on the
|
||||
* timing of the interrupt handler. Let's assume that if A has got C
|
||||
* then it has locked C first (before B).
|
||||
*
|
||||
* Note the strong ordering of the timeline also provides consistent
|
||||
* nesting rules for the fence->lock; the inner lock is always the
|
||||
* older lock.
|
||||
*/
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
prev = xchg(__active_fence_slot(active), fence);
|
||||
if (prev) {
|
||||
GEM_BUG_ON(prev == fence);
|
||||
if (prev)
|
||||
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
|
||||
|
||||
/*
|
||||
* A does the cmpxchg first, and so it sees C or NULL, as before, or
|
||||
* something else, depending on the timing of other threads and/or
|
||||
* interrupt handler. If not the same as before then A unlocks C if
|
||||
* applicable and retries, starting from an attempt to get a new
|
||||
* active->fence. Meanwhile, B follows the same path as A.
|
||||
* Once A succeeds with cmpxch, B fails again, retires, gets A from
|
||||
* active->fence, locks it as soon as A completes, and possibly
|
||||
* succeeds with cmpxchg.
|
||||
*/
|
||||
while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
|
||||
if (prev) {
|
||||
spin_unlock(prev->lock);
|
||||
dma_fence_put(prev);
|
||||
}
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
|
||||
prev = i915_active_fence_get(active);
|
||||
GEM_BUG_ON(prev == fence);
|
||||
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
if (prev)
|
||||
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
|
||||
/*
|
||||
* If prev is NULL then the previous fence must have been signaled
|
||||
* and we know that we are first on the timeline. If it is still
|
||||
* present then, having the lock on that fence already acquired, we
|
||||
* serialise with the interrupt handler, in the process of removing it
|
||||
* from any future interrupt callback. A will then wait on C before
|
||||
* executing (if present).
|
||||
*
|
||||
* As B is second, it sees A as the previous fence and so waits for
|
||||
* it to complete its transition and takes over the occupancy for
|
||||
* itself -- remembering that it needs to wait on A before executing.
|
||||
*/
|
||||
if (prev) {
|
||||
__list_del_entry(&active->cb.node);
|
||||
spin_unlock(prev->lock); /* serialise with prev->cb_list */
|
||||
}
|
||||
|
@ -1101,11 +1146,7 @@ int i915_active_fence_set(struct i915_active_fence *active,
|
|||
int err = 0;
|
||||
|
||||
/* Must maintain timeline ordering wrt previous active requests */
|
||||
rcu_read_lock();
|
||||
fence = __i915_active_fence_set(active, &rq->fence);
|
||||
if (fence) /* but the previous fence may not belong to that timeline! */
|
||||
fence = dma_fence_get_rcu(fence);
|
||||
rcu_read_unlock();
|
||||
if (fence) {
|
||||
err = i915_request_await_dma_fence(rq, fence);
|
||||
dma_fence_put(fence);
|
||||
|
|
|
@ -1728,6 +1728,11 @@ __i915_request_ensure_parallel_ordering(struct i915_request *rq,
|
|||
|
||||
request_to_parent(rq)->parallel.last_rq = i915_request_get(rq);
|
||||
|
||||
/*
|
||||
* Users have to put a reference potentially got by
|
||||
* __i915_active_fence_set() to the returned request
|
||||
* when no longer needed
|
||||
*/
|
||||
return to_request(__i915_active_fence_set(&timeline->last_request,
|
||||
&rq->fence));
|
||||
}
|
||||
|
@ -1774,6 +1779,10 @@ __i915_request_ensure_ordering(struct i915_request *rq,
|
|||
0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Users have to put the reference to prev potentially got
|
||||
* by __i915_active_fence_set() when no longer needed
|
||||
*/
|
||||
return prev;
|
||||
}
|
||||
|
||||
|
@ -1817,6 +1826,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
|
|||
prev = __i915_request_ensure_ordering(rq, timeline);
|
||||
else
|
||||
prev = __i915_request_ensure_parallel_ordering(rq, timeline);
|
||||
if (prev)
|
||||
i915_request_put(prev);
|
||||
|
||||
/*
|
||||
* Make sure that no request gazumped us - if it was allocated after
|
||||
|
|
|
@ -552,7 +552,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
|
|||
|
||||
if (bo->pin_count) {
|
||||
*locked = false;
|
||||
*busy = false;
|
||||
if (busy)
|
||||
*busy = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: if_aq_pci.c,v 1.22 2023/05/02 12:32:22 kettenis Exp $ */
|
||||
/* $OpenBSD: if_aq_pci.c,v 1.23 2023/08/15 08:27:30 miod Exp $ */
|
||||
/* $NetBSD: if_aq.c,v 1.27 2021/06/16 00:21:18 riastradh Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -175,7 +175,7 @@
|
|||
#define AQ_INTR_CTRL_IRQMODE_MSIX 2
|
||||
#define AQ_INTR_CTRL_MULTIVEC (1 << 2)
|
||||
#define AQ_INTR_CTRL_RESET_DIS (1 << 29)
|
||||
#define AQ_INTR_CTRL_RESET_IRQ (1 << 31)
|
||||
#define AQ_INTR_CTRL_RESET_IRQ (1U << 31)
|
||||
#define AQ_MBOXIF_POWER_GATING_CONTROL_REG 0x32a8
|
||||
|
||||
#define FW_MPI_MBOX_ADDR_REG 0x0360
|
||||
|
@ -220,12 +220,12 @@
|
|||
#define RPF_L2UC_MSW_MACADDR_HI 0xFFFF
|
||||
#define RPF_L2UC_MSW_ACTION 0x70000
|
||||
#define RPF_L2UC_MSW_TAG 0x03c00000
|
||||
#define RPF_L2UC_MSW_EN (1 << 31)
|
||||
#define RPF_L2UC_MSW_EN (1U << 31)
|
||||
#define AQ_HW_MAC_NUM 34
|
||||
|
||||
/* RPF_MCAST_FILTER_REG[8] 0x5250-0x5270 */
|
||||
#define RPF_MCAST_FILTER_REG(i) (0x5250 + (i) * 4)
|
||||
#define RPF_MCAST_FILTER_EN (1 << 31)
|
||||
#define RPF_MCAST_FILTER_EN (1U << 31)
|
||||
#define RPF_MCAST_FILTER_MASK_REG 0x5270
|
||||
#define RPF_MCAST_FILTER_MASK_ALLMULTI (1 << 14)
|
||||
|
||||
|
@ -240,14 +240,14 @@
|
|||
|
||||
/* RPF_ETHERTYPE_FILTER_REG[AQ_RINGS_NUM] 0x5300-0x5380 */
|
||||
#define RPF_ETHERTYPE_FILTER_REG(i) (0x5300 + (i) * 4)
|
||||
#define RPF_ETHERTYPE_FILTER_EN (1 << 31)
|
||||
#define RPF_ETHERTYPE_FILTER_EN (1U << 31)
|
||||
|
||||
/* RPF_L3_FILTER_REG[8] 0x5380-0x53a0 */
|
||||
#define RPF_L3_FILTER_REG(i) (0x5380 + (i) * 4)
|
||||
#define RPF_L3_FILTER_L4_EN (1 << 31)
|
||||
#define RPF_L3_FILTER_L4_EN (1U << 31)
|
||||
|
||||
#define RX_FLR_RSS_CONTROL1_REG 0x54c0
|
||||
#define RX_FLR_RSS_CONTROL1_EN (1 << 31)
|
||||
#define RX_FLR_RSS_CONTROL1_EN (1U << 31)
|
||||
|
||||
#define RPF_RPB_RX_TC_UPT_REG 0x54c4
|
||||
#define RPF_RPB_RX_TC_UPT_MASK(i) (0x00000007 << ((i) * 4))
|
||||
|
@ -278,7 +278,7 @@
|
|||
#define RPB_RXB_BUFSIZE_REG(i) (0x5710 + (i) * 0x10)
|
||||
#define RPB_RXB_BUFSIZE 0x1FF
|
||||
#define RPB_RXB_XOFF_REG(i) (0x5714 + (i) * 0x10)
|
||||
#define RPB_RXB_XOFF_EN (1 << 31)
|
||||
#define RPB_RXB_XOFF_EN (1U << 31)
|
||||
#define RPB_RXB_XOFF_THRESH_HI 0x3FFF0000
|
||||
#define RPB_RXB_XOFF_THRESH_LO 0x3FFF
|
||||
|
||||
|
@ -301,7 +301,7 @@
|
|||
#define RX_DMA_DESC_RESET (1 << 25)
|
||||
#define RX_DMA_DESC_HEADER_SPLIT (1 << 28)
|
||||
#define RX_DMA_DESC_VLAN_STRIP (1 << 29)
|
||||
#define RX_DMA_DESC_EN (1 << 31)
|
||||
#define RX_DMA_DESC_EN (1U << 31)
|
||||
#define RX_DMA_DESC_HEAD_PTR_REG(i) (0x5b0c + (i) * 0x20)
|
||||
#define RX_DMA_DESC_HEAD_PTR 0xFFF
|
||||
#define RX_DMA_DESC_TAIL_PTR_REG(i) (0x5b10 + (i) * 0x20)
|
||||
|
@ -313,10 +313,10 @@
|
|||
#define RX_DMA_DCAD_CPUID 0xFF
|
||||
#define RX_DMA_DCAD_PAYLOAD_EN (1 << 29)
|
||||
#define RX_DMA_DCAD_HEADER_EN (1 << 30)
|
||||
#define RX_DMA_DCAD_DESC_EN (1 << 31)
|
||||
#define RX_DMA_DCAD_DESC_EN (1U << 31)
|
||||
|
||||
#define RX_DMA_DCA_REG 0x6180
|
||||
#define RX_DMA_DCA_EN (1 << 31)
|
||||
#define RX_DMA_DCA_EN (1U << 31)
|
||||
#define RX_DMA_DCA_MODE 0xF
|
||||
|
||||
#define TX_SYSCONTROL_REG 0x7000
|
||||
|
@ -328,7 +328,7 @@
|
|||
#define TPS_DESC_VM_ARB_MODE_REG 0x7300
|
||||
#define TPS_DESC_VM_ARB_MODE (1 << 0)
|
||||
#define TPS_DESC_RATE_REG 0x7310
|
||||
#define TPS_DESC_RATE_TA_RST (1 << 31)
|
||||
#define TPS_DESC_RATE_TA_RST (1U << 31)
|
||||
#define TPS_DESC_RATE_LIM 0x7FF
|
||||
#define TPS_DESC_TC_ARB_MODE_REG 0x7200
|
||||
#define TPS_DESC_TC_ARB_MODE 0x3
|
||||
|
@ -393,7 +393,7 @@
|
|||
#define TDM_DCAD_CPUID_EN 0x80000000
|
||||
|
||||
#define TDM_DCA_REG 0x8480
|
||||
#define TDM_DCA_EN (1 << 31)
|
||||
#define TDM_DCA_EN (1U << 31)
|
||||
#define TDM_DCA_MODE 0xF
|
||||
|
||||
#define TX_INTR_MODERATION_CTL_REG(i) (0x8980 + (i) * 4)
|
||||
|
@ -418,7 +418,7 @@
|
|||
#define AQ2_MIF_BOOT_CRASH_INIT (1 << 27)
|
||||
#define AQ2_MIF_BOOT_BOOT_CODE_FAILED (1 << 28)
|
||||
#define AQ2_MIF_BOOT_FW_INIT_FAILED (1 << 29)
|
||||
#define AQ2_MIF_BOOT_FW_INIT_COMP_SUCCESS (1 << 31)
|
||||
#define AQ2_MIF_BOOT_FW_INIT_COMP_SUCCESS (1U << 31)
|
||||
|
||||
/* AQ2 action resolver table */
|
||||
#define AQ2_ART_ACTION_ACT_SHIFT 8
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: if_mcx.c,v 1.107 2023/06/06 01:40:04 dlg Exp $ */
|
||||
/* $OpenBSD: if_mcx.c,v 1.108 2023/08/15 08:27:30 miod Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2017 David Gwynne <dlg@openbsd.org>
|
||||
|
@ -149,8 +149,8 @@ CTASSERT(MCX_MAX_QUEUES * MCX_WQ_DOORBELL_STRIDE <
|
|||
#define MCX_CMDQ_DOORBELL 0x0018
|
||||
|
||||
#define MCX_STATE 0x01fc
|
||||
#define MCX_STATE_MASK (1 << 31)
|
||||
#define MCX_STATE_INITIALIZING (1 << 31)
|
||||
#define MCX_STATE_MASK (1U << 31)
|
||||
#define MCX_STATE_INITIALIZING (1U << 31)
|
||||
#define MCX_STATE_READY (0 << 31)
|
||||
#define MCX_STATE_INTERFACE_MASK (0x3 << 24)
|
||||
#define MCX_STATE_INTERFACE_FULL_DRIVER (0x0 << 24)
|
||||
|
@ -1405,7 +1405,7 @@ struct mcx_cmd_create_tir_mb_in {
|
|||
#define MCX_TIR_CTX_HASH_SEL_SPORT (1 << 2)
|
||||
#define MCX_TIR_CTX_HASH_SEL_DPORT (1 << 3)
|
||||
#define MCX_TIR_CTX_HASH_SEL_IPV4 (0 << 31)
|
||||
#define MCX_TIR_CTX_HASH_SEL_IPV6 (1 << 31)
|
||||
#define MCX_TIR_CTX_HASH_SEL_IPV6 (1U << 31)
|
||||
#define MCX_TIR_CTX_HASH_SEL_TCP (0 << 30)
|
||||
#define MCX_TIR_CTX_HASH_SEL_UDP (1 << 30)
|
||||
uint32_t cmd_rx_hash_sel_inner;
|
||||
|
@ -1675,7 +1675,7 @@ CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0);
|
|||
|
||||
struct mcx_sq_ctx {
|
||||
uint32_t sq_flags;
|
||||
#define MCX_SQ_CTX_RLKEY (1 << 31)
|
||||
#define MCX_SQ_CTX_RLKEY (1U << 31)
|
||||
#define MCX_SQ_CTX_FRE_SHIFT (1 << 29)
|
||||
#define MCX_SQ_CTX_FLUSH_IN_ERROR (1 << 28)
|
||||
#define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT 24
|
||||
|
@ -1722,7 +1722,7 @@ struct mcx_sq_entry {
|
|||
/* ethernet segment */
|
||||
uint32_t sqe_reserved1;
|
||||
uint32_t sqe_mss_csum;
|
||||
#define MCX_SQE_L4_CSUM (1 << 31)
|
||||
#define MCX_SQE_L4_CSUM (1U << 31)
|
||||
#define MCX_SQE_L3_CSUM (1 << 30)
|
||||
uint32_t sqe_reserved2;
|
||||
uint16_t sqe_inline_header_size;
|
||||
|
@ -1789,7 +1789,7 @@ struct mcx_cmd_destroy_sq_out {
|
|||
|
||||
struct mcx_rq_ctx {
|
||||
uint32_t rq_flags;
|
||||
#define MCX_RQ_CTX_RLKEY (1 << 31)
|
||||
#define MCX_RQ_CTX_RLKEY (1U << 31)
|
||||
#define MCX_RQ_CTX_VLAN_STRIP_DIS (1 << 28)
|
||||
#define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT 24
|
||||
#define MCX_RQ_CTX_STATE_SHIFT 20
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: igc_regs.h,v 1.1 2021/10/31 14:52:57 patrick Exp $ */
|
||||
/* $OpenBSD: igc_regs.h,v 1.2 2023/08/15 08:27:30 miod Exp $ */
|
||||
/*-
|
||||
* Copyright 2021 Intel Corp
|
||||
* Copyright 2021 Rubicon Communications, LLC (Netgate)
|
||||
|
@ -333,7 +333,7 @@
|
|||
/* ETQF register bit definitions */
|
||||
#define IGC_ETQF_FILTER_ENABLE (1 << 26)
|
||||
#define IGC_ETQF_IMM_INT (1 << 29)
|
||||
#define IGC_ETQF_QUEUE_ENABLE (1 << 31)
|
||||
#define IGC_ETQF_QUEUE_ENABLE (1U << 31)
|
||||
#define IGC_ETQF_QUEUE_SHIFT 16
|
||||
#define IGC_ETQF_QUEUE_MASK 0x00070000
|
||||
#define IGC_ETQF_ETYPE_MASK 0x0000FFFF
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: ixgbe_type.h,v 1.37 2023/05/18 08:22:37 jan Exp $ */
|
||||
/* $OpenBSD: ixgbe_type.h,v 1.38 2023/08/15 08:27:30 miod Exp $ */
|
||||
|
||||
/******************************************************************************
|
||||
SPDX-License-Identifier: BSD-3-Clause
|
||||
|
@ -4454,7 +4454,7 @@ struct ixgbe_bypass_eeprom {
|
|||
#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26)
|
||||
#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE (1 << 28)
|
||||
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
|
||||
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
|
||||
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1U << 31)
|
||||
|
||||
#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28)
|
||||
#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29)
|
||||
|
@ -4484,7 +4484,7 @@ struct ixgbe_bypass_eeprom {
|
|||
#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1)
|
||||
#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2)
|
||||
#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3)
|
||||
#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31)
|
||||
#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1U << 31)
|
||||
|
||||
#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
|
||||
#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: pckbd.c,v 1.50 2023/07/25 10:00:44 miod Exp $ */
|
||||
/* $OpenBSD: pckbd.c,v 1.51 2023/08/13 21:54:02 miod Exp $ */
|
||||
/* $NetBSD: pckbd.c,v 1.24 2000/06/05 22:20:57 sommerfeld Exp $ */
|
||||
|
||||
/*-
|
||||
|
@ -344,7 +344,7 @@ pckbdprobe(struct device *parent, void *match, void *aux)
|
|||
{
|
||||
struct cfdata *cf = match;
|
||||
struct pckbc_attach_args *pa = aux;
|
||||
u_char cmd[1], resp[1];
|
||||
u_char cmd[1], resp[2];
|
||||
int res;
|
||||
|
||||
/*
|
||||
|
@ -363,10 +363,40 @@ pckbdprobe(struct device *parent, void *match, void *aux)
|
|||
/* Reset the keyboard. */
|
||||
cmd[0] = KBC_RESET;
|
||||
res = pckbc_poll_cmd(pa->pa_tag, pa->pa_slot, cmd, 1, 1, resp, 1);
|
||||
if (res) {
|
||||
if (res != 0) {
|
||||
#ifdef DEBUG
|
||||
printf("pckbdprobe: reset error %d\n", res);
|
||||
#endif
|
||||
} else if (resp[0] != KBR_RSTDONE) {
|
||||
#ifdef DEBUG
|
||||
printf("pckbdprobe: reset response 0x%x\n", resp[0]);
|
||||
#endif
|
||||
res = EINVAL;
|
||||
}
|
||||
#if defined(__i386__) || defined(__amd64__)
|
||||
if (res) {
|
||||
/*
|
||||
* The 8042 emulation on Chromebooks fails the reset
|
||||
* command but otherwise appears to work correctly.
|
||||
* Try a "get ID" command to give it a second chance.
|
||||
*/
|
||||
cmd[0] = KBC_GETID;
|
||||
res = pckbc_poll_cmd(pa->pa_tag, pa->pa_slot,
|
||||
cmd, 1, 2, resp, 0);
|
||||
if (res != 0) {
|
||||
#ifdef DEBUG
|
||||
printf("pckbdprobe: getid error %d\n", res);
|
||||
#endif
|
||||
} else if (resp[0] != 0xab || resp[1] != 0x83) {
|
||||
#ifdef DEBUG
|
||||
printf("pckbdprobe: unexpected id 0x%x/0x%x\n",
|
||||
resp[0], resp[1]);
|
||||
#endif
|
||||
res = EINVAL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (res) {
|
||||
/*
|
||||
* There is probably no keyboard connected.
|
||||
* Let the probe succeed if the keyboard is used
|
||||
|
@ -387,10 +417,6 @@ pckbdprobe(struct device *parent, void *match, void *aux)
|
|||
#endif
|
||||
return (pckbd_is_console(pa->pa_tag, pa->pa_slot) ? 1 : 0);
|
||||
}
|
||||
if (resp[0] != KBR_RSTDONE) {
|
||||
printf("pckbdprobe: reset response 0x%x\n", resp[0]);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Some keyboards seem to leave a second ack byte after the reset.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: pckbdreg.h,v 1.2 2003/10/22 09:44:22 jmc Exp $ */
|
||||
/* $OpenBSD: pckbdreg.h,v 1.3 2023/08/13 21:54:02 miod Exp $ */
|
||||
/* $NetBSD: pckbdreg.h,v 1.2 1998/04/07 13:43:16 hannken Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -12,6 +12,7 @@
|
|||
#define KBC_DISABLE 0xF5 /* as per KBC_SETDEFAULT, but also disable key scanning */
|
||||
#define KBC_ENABLE 0xF4 /* enable key scanning */
|
||||
#define KBC_TYPEMATIC 0xF3 /* set typematic rate and delay */
|
||||
#define KBC_GETID 0xF2 /* get keyboard ID (not supported on AT kbd) */
|
||||
#define KBC_SETTABLE 0xF0 /* set scancode translation table */
|
||||
#define KBC_MODEIND 0xED /* set mode indicators (i.e. LEDs) */
|
||||
#define KBC_ECHO 0xEE /* request an echo from the keyboard */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: pms.c,v 1.97 2022/07/23 05:55:16 sdk Exp $ */
|
||||
/* $OpenBSD: pms.c,v 1.98 2023/08/16 20:53:47 bru Exp $ */
|
||||
/* $NetBSD: psm.c,v 1.11 2000/06/05 22:20:57 sommerfeld Exp $ */
|
||||
|
||||
/*-
|
||||
|
@ -1075,7 +1075,11 @@ synaptics_get_hwinfo(struct pms_softc *sc)
|
|||
hw->y_max = (max_coords ?
|
||||
SYNAPTICS_Y_LIMIT(max_coords) : SYNAPTICS_YMAX_BEZEL);
|
||||
|
||||
hw->contacts_max = SYNAPTICS_MAX_FINGERS;
|
||||
if ((syn->capabilities & SYNAPTICS_CAP_MULTIFINGER) ||
|
||||
SYNAPTICS_SUPPORTS_AGM(syn->ext_capabilities))
|
||||
hw->contacts_max = SYNAPTICS_MAX_FINGERS;
|
||||
else
|
||||
hw->contacts_max = 1;
|
||||
|
||||
syn->sec_buttons = 0;
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: dwc2_hw.h,v 1.4 2022/09/04 08:42:40 mglocker Exp $ */
|
||||
/* $OpenBSD: dwc2_hw.h,v 1.5 2023/08/15 08:27:30 miod Exp $ */
|
||||
/* $NetBSD: dwc2_hw.h,v 1.2 2013/09/25 06:19:22 skrll Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -127,7 +127,7 @@
|
|||
#define GUSBCFG_TOUTCAL(_x) ((_x) << 0)
|
||||
|
||||
#define GRSTCTL HSOTG_REG(0x010)
|
||||
#define GRSTCTL_AHBIDLE (1 << 31)
|
||||
#define GRSTCTL_AHBIDLE (1U << 31)
|
||||
#define GRSTCTL_DMAREQ (1 << 30)
|
||||
#define GRSTCTL_CSFTRST_DONE (1 << 29)
|
||||
#define GRSTCTL_TXFNUM_MASK (0x1f << 6)
|
||||
|
@ -143,7 +143,7 @@
|
|||
|
||||
#define GINTSTS HSOTG_REG(0x014)
|
||||
#define GINTMSK HSOTG_REG(0x018)
|
||||
#define GINTSTS_WKUPINT (1 << 31)
|
||||
#define GINTSTS_WKUPINT (1U << 31)
|
||||
#define GINTSTS_SESSREQINT (1 << 30)
|
||||
#define GINTSTS_DISCONNINT (1 << 29)
|
||||
#define GINTSTS_CONIDSTSCHNG (1 << 28)
|
||||
|
@ -219,7 +219,7 @@
|
|||
#define GNPTXSTS_NP_TXF_SPC_AVAIL_GET(_v) (((_v) >> 0) & 0xffff)
|
||||
|
||||
#define GI2CCTL HSOTG_REG(0x0030)
|
||||
#define GI2CCTL_BSYDNE (1 << 31)
|
||||
#define GI2CCTL_BSYDNE (1U << 31)
|
||||
#define GI2CCTL_RW (1 << 30)
|
||||
#define GI2CCTL_I2CDATSE0 (1 << 28)
|
||||
#define GI2CCTL_I2CDEVADDR_MASK (0x3 << 26)
|
||||
|
@ -246,7 +246,7 @@
|
|||
#define GSNPSID_ID_MASK 0xffff0000
|
||||
|
||||
#define GHWCFG2 HSOTG_REG(0x0048)
|
||||
#define GHWCFG2_OTG_ENABLE_IC_USB (1 << 31)
|
||||
#define GHWCFG2_OTG_ENABLE_IC_USB (1U << 31)
|
||||
#define GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK (0x1f << 26)
|
||||
#define GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT 26
|
||||
#define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK (0x3 << 24)
|
||||
|
@ -307,7 +307,7 @@
|
|||
#define GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT 0
|
||||
|
||||
#define GHWCFG4 HSOTG_REG(0x0050)
|
||||
#define GHWCFG4_DESC_DMA_DYN (1 << 31)
|
||||
#define GHWCFG4_DESC_DMA_DYN (1U << 31)
|
||||
#define GHWCFG4_DESC_DMA (1 << 30)
|
||||
#define GHWCFG4_NUM_IN_EPS_MASK (0xf << 26)
|
||||
#define GHWCFG4_NUM_IN_EPS_SHIFT 26
|
||||
|
@ -336,7 +336,7 @@
|
|||
#define GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT 0
|
||||
|
||||
#define GLPMCFG HSOTG_REG(0x0054)
|
||||
#define GLPMCFG_INVSELHSIC (1 << 31)
|
||||
#define GLPMCFG_INVSELHSIC (1U << 31)
|
||||
#define GLPMCFG_HSICCON (1 << 30)
|
||||
#define GLPMCFG_RSTRSLPSTS (1 << 29)
|
||||
#define GLPMCFG_ENBESL (1 << 28)
|
||||
|
@ -554,7 +554,7 @@
|
|||
#define D0EPCTL_MPS_16 2
|
||||
#define D0EPCTL_MPS_8 3
|
||||
|
||||
#define DXEPCTL_EPENA (1 << 31)
|
||||
#define DXEPCTL_EPENA (1U << 31)
|
||||
#define DXEPCTL_EPDIS (1 << 30)
|
||||
#define DXEPCTL_SETD1PID (1 << 29)
|
||||
#define DXEPCTL_SETODDFR (1 << 29)
|
||||
|
@ -652,7 +652,7 @@
|
|||
#define DTXFSTS(_a) HSOTG_REG(0x918 + ((_a) * 0x20))
|
||||
|
||||
#define PCGCTL HSOTG_REG(0x0e00)
|
||||
#define PCGCTL_IF_DEV_MODE (1 << 31)
|
||||
#define PCGCTL_IF_DEV_MODE (1U << 31)
|
||||
#define PCGCTL_P2HD_PRT_SPD_MASK (0x3 << 29)
|
||||
#define PCGCTL_P2HD_PRT_SPD_SHIFT 29
|
||||
#define PCGCTL_P2HD_DEV_ENUM_SPD_MASK (0x3 << 27)
|
||||
|
@ -688,7 +688,7 @@
|
|||
/* Host Mode Registers */
|
||||
|
||||
#define HCFG HSOTG_REG(0x0400)
|
||||
#define HCFG_MODECHTIMEN (1 << 31)
|
||||
#define HCFG_MODECHTIMEN (1U << 31)
|
||||
#define HCFG_PERSCHEDENA (1 << 26)
|
||||
#define HCFG_FRLISTEN_MASK (0x3 << 24)
|
||||
#define HCFG_FRLISTEN_SHIFT 24
|
||||
|
@ -724,7 +724,7 @@
|
|||
#define HFNUM_MAX_FRNUM 0x3fff
|
||||
|
||||
#define HPTXSTS HSOTG_REG(0x0410)
|
||||
#define TXSTS_QTOP_ODD (1 << 31)
|
||||
#define TXSTS_QTOP_ODD (1U << 31)
|
||||
#define TXSTS_QTOP_CHNEP_MASK (0xf << 27)
|
||||
#define TXSTS_QTOP_CHNEP_SHIFT 27
|
||||
#define TXSTS_QTOP_TOKEN_MASK (0x3 << 25)
|
||||
|
@ -761,7 +761,7 @@
|
|||
#define HPRT0_CONNSTS (1 << 0)
|
||||
|
||||
#define HCCHAR(_ch) HSOTG_REG(0x0500 + 0x20 * (_ch))
|
||||
#define HCCHAR_CHENA (1 << 31)
|
||||
#define HCCHAR_CHENA (1U << 31)
|
||||
#define HCCHAR_CHDIS (1 << 30)
|
||||
#define HCCHAR_ODDFRM (1 << 29)
|
||||
#define HCCHAR_DEVADDR_MASK (0x7f << 22)
|
||||
|
@ -778,7 +778,7 @@
|
|||
#define HCCHAR_MPS_SHIFT 0
|
||||
|
||||
#define HCSPLT(_ch) HSOTG_REG(0x0504 + 0x20 * (_ch))
|
||||
#define HCSPLT_SPLTENA (1 << 31)
|
||||
#define HCSPLT_SPLTENA (1U << 31)
|
||||
#define HCSPLT_COMPSPLT (1 << 16)
|
||||
#define HCSPLT_XACTPOS_MASK (0x3 << 14)
|
||||
#define HCSPLT_XACTPOS_SHIFT 14
|
||||
|
@ -810,7 +810,7 @@
|
|||
#define HCINTMSK_XFERCOMPL (1 << 0)
|
||||
|
||||
#define HCTSIZ(_ch) HSOTG_REG(0x0510 + 0x20 * (_ch))
|
||||
#define TSIZ_DOPNG (1 << 31)
|
||||
#define TSIZ_DOPNG (1U << 31)
|
||||
#define TSIZ_SC_MC_PID_MASK (0x3 << 29)
|
||||
#define TSIZ_SC_MC_PID_SHIFT 29
|
||||
#define TSIZ_SC_MC_PID_DATA0 0
|
||||
|
@ -850,7 +850,7 @@ struct dwc2_dma_desc {
|
|||
|
||||
/* Host Mode DMA descriptor status quadlet */
|
||||
|
||||
#define HOST_DMA_A (1 << 31)
|
||||
#define HOST_DMA_A (1U << 31)
|
||||
#define HOST_DMA_STS_MASK (0x3 << 28)
|
||||
#define HOST_DMA_STS_SHIFT 28
|
||||
#define HOST_DMA_STS_PKTERR (1 << 28)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: if_urereg.h,v 1.12 2023/05/06 08:07:10 kevlo Exp $ */
|
||||
/* $OpenBSD: if_urereg.h,v 1.13 2023/08/15 08:27:30 miod Exp $ */
|
||||
/*-
|
||||
* Copyright (c) 2015, 2016, 2019 Kevin Lo <kevlo@openbsd.org>
|
||||
* All rights reserved.
|
||||
|
@ -567,11 +567,11 @@ struct ure_rxpkt {
|
|||
|
||||
struct ure_txpkt {
|
||||
uint32_t ure_pktlen;
|
||||
#define URE_TXPKT_TX_FS (1 << 31)
|
||||
#define URE_TXPKT_TX_FS (1U << 31)
|
||||
#define URE_TXPKT_TX_LS (1 << 30)
|
||||
#define URE_TXPKT_LEN_MASK 0xffff
|
||||
uint32_t ure_vlan;
|
||||
#define URE_TXPKT_UDP (1 << 31)
|
||||
#define URE_TXPKT_UDP (1U << 31)
|
||||
#define URE_TXPKT_TCP (1 << 30)
|
||||
#define URE_TXPKT_IPV4 (1 << 29)
|
||||
#define URE_TXPKT_IPV6 (1 << 28)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: uhidev.c,v 1.108 2022/05/20 05:03:45 anton Exp $ */
|
||||
/* $OpenBSD: uhidev.c,v 1.109 2023/08/12 20:47:06 miod Exp $ */
|
||||
/* $NetBSD: uhidev.c,v 1.14 2003/03/11 16:44:00 augustss Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -139,6 +139,22 @@ uhidev_match(struct device *parent, void *match, void *aux)
|
|||
return (UMATCH_IFACECLASS_GENERIC);
|
||||
}
|
||||
|
||||
int
|
||||
uhidev_attach_repid(struct uhidev_softc *sc, struct uhidev_attach_arg *uha,
|
||||
int repid)
|
||||
{
|
||||
struct device *dev;
|
||||
|
||||
/* Could already be assigned by uhidev_set_report_dev(). */
|
||||
if (sc->sc_subdevs[repid] != NULL)
|
||||
return 0;
|
||||
|
||||
uha->reportid = repid;
|
||||
dev = config_found_sm(&sc->sc_dev, uha, uhidevprint, NULL);
|
||||
sc->sc_subdevs[repid] = (struct uhidev *)dev;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void
|
||||
uhidev_attach(struct device *parent, struct device *self, void *aux)
|
||||
{
|
||||
|
@ -283,6 +299,35 @@ uhidev_attach(struct device *parent, struct device *self, void *aux)
|
|||
free(uha.claimed, M_TEMP, nrepid);
|
||||
uha.claimed = NULL;
|
||||
|
||||
/* Special case for Wacom tablets */
|
||||
if (uha.uaa->vendor == USB_VENDOR_WACOM) {
|
||||
int ndigitizers = 0;
|
||||
/*
|
||||
* Get all the needed collections (only 3 seem to be of
|
||||
* interest currently).
|
||||
*/
|
||||
repid = hid_get_id_of_collection(desc, size,
|
||||
HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS, HUD_STYLUS),
|
||||
HCOLL_PHYSICAL);
|
||||
if (repid >= 0 && repid < nrepid)
|
||||
ndigitizers += uhidev_attach_repid(sc, &uha, repid);
|
||||
repid = hid_get_id_of_collection(desc, size,
|
||||
HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS, HUD_TABLET_FKEYS),
|
||||
HCOLL_PHYSICAL);
|
||||
if (repid >= 0 && repid < nrepid)
|
||||
ndigitizers += uhidev_attach_repid(sc, &uha, repid);
|
||||
#ifdef notyet /* not handled in hidms_wacom_setup() yet */
|
||||
repid = hid_get_id_of_collection(desc, size,
|
||||
HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS, HUD_WACOM_BATTERY),
|
||||
HCOLL_PHYSICAL);
|
||||
if (repid >= 0 && repid < nrepid)
|
||||
ndigitizers += uhidev_attach_repid(sc, &uha, repid);
|
||||
#endif
|
||||
|
||||
if (ndigitizers != 0)
|
||||
return;
|
||||
}
|
||||
|
||||
for (repid = 0; repid < nrepid; repid++) {
|
||||
DPRINTF(("%s: try repid=%d\n", __func__, repid));
|
||||
if (hid_report_size(desc, size, hid_input, repid) == 0 &&
|
||||
|
@ -290,13 +335,7 @@ uhidev_attach(struct device *parent, struct device *self, void *aux)
|
|||
hid_report_size(desc, size, hid_feature, repid) == 0)
|
||||
continue;
|
||||
|
||||
/* Could already be assigned by uhidev_set_report_dev(). */
|
||||
if (sc->sc_subdevs[repid] != NULL)
|
||||
continue;
|
||||
|
||||
uha.reportid = repid;
|
||||
dev = config_found_sm(self, &uha, uhidevprint, NULL);
|
||||
sc->sc_subdevs[repid] = (struct uhidev *)dev;
|
||||
uhidev_attach_repid(sc, &uha, repid);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
$OpenBSD: usbdevs,v 1.757 2023/06/12 11:26:24 jsg Exp $
|
||||
$OpenBSD: usbdevs,v 1.758 2023/08/12 20:43:49 miod Exp $
|
||||
/* $NetBSD: usbdevs,v 1.322 2003/05/10 17:47:14 hamajima Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -4634,6 +4634,7 @@ product WACOM GRAPHIRE3_4X5 0x0013 Graphire3 4x5
|
|||
product WACOM GRAPHIRE4_4X5 0x0015 Graphire4 Classic A6
|
||||
product WACOM INTUOSA5 0x0021 Intuos A5
|
||||
product WACOM INTUOS_DRAW 0x033b Intuos Draw (CTL-490)
|
||||
product WACOM INTUOS_S 0x0374 Intuos S (CTL-4100)
|
||||
product WACOM ONE_S 0x037a One S (CTL-472)
|
||||
product WACOM ONE_M 0x037b One M (CTL-672)
|
||||
product WACOM INTUOS_PRO_S 0x0392 Intuos Pro S
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
/* $OpenBSD: usbdevs.h,v 1.769 2023/06/12 11:26:54 jsg Exp $ */
|
||||
/* $OpenBSD: usbdevs.h,v 1.770 2023/08/12 20:44:32 miod Exp $ */
|
||||
|
||||
/*
|
||||
* THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
*
|
||||
* generated from:
|
||||
* OpenBSD: usbdevs,v 1.757 2023/06/12 11:26:24 jsg Exp
|
||||
* OpenBSD: usbdevs,v 1.758 2023/08/12 20:43:49 miod Exp
|
||||
*/
|
||||
/* $NetBSD: usbdevs,v 1.322 2003/05/10 17:47:14 hamajima Exp $ */
|
||||
|
||||
|
@ -4641,6 +4641,7 @@
|
|||
#define USB_PRODUCT_WACOM_GRAPHIRE4_4X5 0x0015 /* Graphire4 Classic A6 */
|
||||
#define USB_PRODUCT_WACOM_INTUOSA5 0x0021 /* Intuos A5 */
|
||||
#define USB_PRODUCT_WACOM_INTUOS_DRAW 0x033b /* Intuos Draw (CTL-490) */
|
||||
#define USB_PRODUCT_WACOM_INTUOS_S 0x0374 /* Intuos S (CTL-4100) */
|
||||
#define USB_PRODUCT_WACOM_ONE_S 0x037a /* One S (CTL-472) */
|
||||
#define USB_PRODUCT_WACOM_ONE_M 0x037b /* One M (CTL-672) */
|
||||
#define USB_PRODUCT_WACOM_INTUOS_PRO_S 0x0392 /* Intuos Pro S */
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
/* $OpenBSD: usbdevs_data.h,v 1.763 2023/06/12 11:26:54 jsg Exp $ */
|
||||
/* $OpenBSD: usbdevs_data.h,v 1.764 2023/08/12 20:44:32 miod Exp $ */
|
||||
|
||||
/*
|
||||
* THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
*
|
||||
* generated from:
|
||||
* OpenBSD: usbdevs,v 1.757 2023/06/12 11:26:24 jsg Exp
|
||||
* OpenBSD: usbdevs,v 1.758 2023/08/12 20:43:49 miod Exp
|
||||
*/
|
||||
/* $NetBSD: usbdevs,v 1.322 2003/05/10 17:47:14 hamajima Exp $ */
|
||||
|
||||
|
@ -11897,6 +11897,10 @@ const struct usb_known_product usb_known_products[] = {
|
|||
USB_VENDOR_WACOM, USB_PRODUCT_WACOM_INTUOS_DRAW,
|
||||
"Intuos Draw (CTL-490)",
|
||||
},
|
||||
{
|
||||
USB_VENDOR_WACOM, USB_PRODUCT_WACOM_INTUOS_S,
|
||||
"Intuos S (CTL-4100)",
|
||||
},
|
||||
{
|
||||
USB_VENDOR_WACOM, USB_PRODUCT_WACOM_ONE_S,
|
||||
"One S (CTL-472)",
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: uwacom.c,v 1.7 2022/10/08 06:53:06 mglocker Exp $ */
|
||||
/* $OpenBSD: uwacom.c,v 1.8 2023/08/12 20:47:06 miod Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2016 Frank Groeneveld <frank@frankgroeneveld.nl>
|
||||
|
@ -43,22 +43,25 @@ struct uwacom_softc {
|
|||
struct hidms sc_ms;
|
||||
struct hid_location sc_loc_tip_press;
|
||||
int sc_flags;
|
||||
int sc_x, sc_y, sc_z, sc_w;
|
||||
int sc_moved;
|
||||
};
|
||||
|
||||
struct cfdriver uwacom_cd = {
|
||||
NULL, "uwacom", DV_DULL
|
||||
};
|
||||
|
||||
|
||||
const struct usb_devno uwacom_devs[] = {
|
||||
{ USB_VENDOR_WACOM, USB_PRODUCT_WACOM_INTUOS_DRAW },
|
||||
{ USB_VENDOR_WACOM, USB_PRODUCT_WACOM_ONE_S },
|
||||
{ USB_VENDOR_WACOM, USB_PRODUCT_WACOM_ONE_M }
|
||||
{ USB_VENDOR_WACOM, USB_PRODUCT_WACOM_ONE_M },
|
||||
{ USB_VENDOR_WACOM, USB_PRODUCT_WACOM_INTUOS_S }
|
||||
};
|
||||
|
||||
int uwacom_match(struct device *, void *, void *);
|
||||
void uwacom_attach(struct device *, struct device *, void *);
|
||||
int uwacom_detach(struct device *, int);
|
||||
void uwacom_intr_legacy(struct uhidev *, void *, u_int);
|
||||
void uwacom_intr(struct uhidev *, void *, u_int);
|
||||
int uwacom_enable(void *);
|
||||
void uwacom_disable(void *);
|
||||
|
@ -90,6 +93,9 @@ uwacom_match(struct device *parent, void *match, void *aux)
|
|||
|
||||
uhidev_get_report_desc(uha->parent, &desc, &size);
|
||||
|
||||
if (hid_is_collection(desc, size, uha->reportid,
|
||||
HID_USAGE2(HUP_WACOM | HUP_DIGITIZERS, HUD_DIGITIZER)))
|
||||
return (UMATCH_IFACECLASS);
|
||||
if (!hid_locate(desc, size, HID_USAGE2(HUP_WACOM, HUG_POINTER),
|
||||
uha->reportid, hid_input, NULL, NULL))
|
||||
return (UMATCH_NONE);
|
||||
|
@ -104,10 +110,11 @@ uwacom_attach(struct device *parent, struct device *self, void *aux)
|
|||
struct hidms *ms = &sc->sc_ms;
|
||||
struct uhidev_attach_arg *uha = (struct uhidev_attach_arg *)aux;
|
||||
struct usb_attach_arg *uaa = uha->uaa;
|
||||
static uByte wacom_report_buf[2] = { 0x02, 0x02 };
|
||||
int size, repid;
|
||||
void *desc;
|
||||
|
||||
sc->sc_hdev.sc_intr = uwacom_intr;
|
||||
sc->sc_hdev.sc_intr = uwacom_intr_legacy;
|
||||
sc->sc_hdev.sc_parent = uha->parent;
|
||||
sc->sc_hdev.sc_udev = uaa->device;
|
||||
sc->sc_hdev.sc_report_id = uha->reportid;
|
||||
|
@ -141,20 +148,23 @@ uwacom_attach(struct device *parent, struct device *self, void *aux)
|
|||
ms->sc_loc_btn[2].pos = 2;
|
||||
ms->sc_loc_btn[2].size = 1;
|
||||
|
||||
if (uha->uaa->product == USB_PRODUCT_WACOM_ONE_S) {
|
||||
static uByte reportbuf[2] = { 0x02, 0x02 };
|
||||
uhidev_set_report(uha->parent, UHID_FEATURE_REPORT, 2,
|
||||
&reportbuf, 2);
|
||||
ms->sc_tsscale.maxx = 15200;
|
||||
ms->sc_tsscale.maxy = 9500;
|
||||
}
|
||||
|
||||
if (uha->uaa->product == USB_PRODUCT_WACOM_INTUOS_DRAW) {
|
||||
switch (uha->uaa->product) {
|
||||
case USB_PRODUCT_WACOM_ONE_S:
|
||||
case USB_PRODUCT_WACOM_INTUOS_S:
|
||||
uhidev_set_report(uha->parent, UHID_FEATURE_REPORT,
|
||||
sc->sc_hdev.sc_report_id, &wacom_report_buf,
|
||||
sizeof(wacom_report_buf));
|
||||
sc->sc_hdev.sc_intr = uwacom_intr;
|
||||
hidms_setup((struct device *)sc, ms, HIDMS_WACOM_SETUP,
|
||||
repid, desc, size);
|
||||
break;
|
||||
case USB_PRODUCT_WACOM_INTUOS_DRAW:
|
||||
sc->sc_flags = UWACOM_USE_PRESSURE | UWACOM_BIG_ENDIAN;
|
||||
sc->sc_loc_tip_press.pos = 43;
|
||||
sc->sc_loc_tip_press.size = 8;
|
||||
ms->sc_tsscale.maxx = 7600;
|
||||
ms->sc_tsscale.maxy = 4750;
|
||||
break;
|
||||
}
|
||||
|
||||
hidms_attach(ms, &uwacom_accessops);
|
||||
|
@ -170,7 +180,7 @@ uwacom_detach(struct device *self, int flags)
|
|||
}
|
||||
|
||||
void
|
||||
uwacom_intr(struct uhidev *addr, void *buf, u_int len)
|
||||
uwacom_intr_legacy(struct uhidev *addr, void *buf, u_int len)
|
||||
{
|
||||
struct uwacom_softc *sc = (struct uwacom_softc *)addr;
|
||||
struct hidms *ms = &sc->sc_ms;
|
||||
|
@ -195,7 +205,7 @@ uwacom_intr(struct uhidev *addr, void *buf, u_int len)
|
|||
|
||||
for (i = 0; i < ms->sc_num_buttons; i++)
|
||||
if (hid_get_data(data, len, &ms->sc_loc_btn[i]))
|
||||
buttons |= (1 << i);
|
||||
buttons |= 1 << i;
|
||||
|
||||
if (sc->sc_flags & UWACOM_USE_PRESSURE) {
|
||||
pressure = hid_get_data(data, len, &sc->sc_loc_tip_press);
|
||||
|
@ -212,6 +222,63 @@ uwacom_intr(struct uhidev *addr, void *buf, u_int len)
|
|||
}
|
||||
}
|
||||
|
||||
void
|
||||
uwacom_intr(struct uhidev *addr, void *buf, u_int len)
|
||||
{
|
||||
struct uwacom_softc *sc = (struct uwacom_softc *)addr;
|
||||
struct hidms *ms = &sc->sc_ms;
|
||||
u_int32_t buttons = 0;
|
||||
uint8_t *data = (uint8_t *)buf;
|
||||
int i, j, x, y, dx, dy, dz, dw, pressure, distance;
|
||||
|
||||
if (ms->sc_enabled == 0)
|
||||
return;
|
||||
|
||||
x = hid_get_data(data, len, &ms->sc_loc_x);
|
||||
y = hid_get_data(data, len, &ms->sc_loc_y);
|
||||
pressure = hid_get_data(data, len, &ms->sc_loc_z);
|
||||
distance = hid_get_data(data, len, &ms->sc_loc_w);
|
||||
|
||||
if (!sc->sc_moved) {
|
||||
sc->sc_x = x;
|
||||
sc->sc_y = y;
|
||||
sc->sc_z = pressure;
|
||||
sc->sc_w = distance;
|
||||
sc->sc_moved = 1;
|
||||
}
|
||||
|
||||
dx = sc->sc_x - x;
|
||||
dy = sc->sc_y - y;
|
||||
/* Clamp sensitivity to +/-127 */
|
||||
dz = sc->sc_z / 32 - pressure / 32;
|
||||
dw = sc->sc_w - distance;
|
||||
|
||||
sc->sc_x = x;
|
||||
sc->sc_y = y;
|
||||
sc->sc_z = pressure;
|
||||
sc->sc_w = distance;
|
||||
|
||||
if (sc->sc_flags & UWACOM_BIG_ENDIAN) {
|
||||
x = be16toh(x);
|
||||
y = be16toh(y);
|
||||
}
|
||||
|
||||
for (i = 0; i < ms->sc_num_stylus_buttons; i++)
|
||||
if (hid_get_data(data, len, &ms->sc_loc_btn[i]))
|
||||
buttons |= 1 << i;
|
||||
|
||||
for (j = 0; i < ms->sc_num_buttons; i++, j++)
|
||||
if (hid_get_data(data, len, &ms->sc_loc_btn[i]))
|
||||
buttons |= 1 << j;
|
||||
|
||||
if (x != 0 || y != 0 || pressure != 0 || distance != 0 ||
|
||||
buttons != ms->sc_buttons) {
|
||||
wsmouse_motion(ms->sc_wsmousedev, -dx, dy, dz, dw);
|
||||
wsmouse_buttons(ms->sc_wsmousedev, buttons);
|
||||
wsmouse_input_sync(ms->sc_wsmousedev);
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
uwacom_enable(void *v)
|
||||
{
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: wstpad.c,v 1.32 2023/07/02 21:44:04 bru Exp $ */
|
||||
/* $OpenBSD: wstpad.c,v 1.33 2023/08/15 08:27:30 miod Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2015, 2016 Ulf Brosziewski
|
||||
|
@ -151,7 +151,7 @@ struct tpad_touch {
|
|||
#define WSTPAD_DISABLE (1 << 7)
|
||||
#define WSTPAD_MTBUTTONS (1 << 8)
|
||||
|
||||
#define WSTPAD_MT (1 << 31)
|
||||
#define WSTPAD_MT (1U << 31)
|
||||
|
||||
|
||||
struct wstpad {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue