sync with OpenBSD -current
This commit is contained in:
parent
037d8115db
commit
7d66fd8cb0
45 changed files with 2495 additions and 357 deletions
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: sxiccmu.c,v 1.33 2024/01/26 17:50:00 kettenis Exp $ */
|
||||
/* $OpenBSD: sxiccmu.c,v 1.34 2024/02/02 12:01:49 kettenis Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
|
||||
* Copyright (c) 2013 Artturi Alm
|
||||
|
@ -1168,14 +1168,50 @@ sxiccmu_a80_get_frequency(struct sxiccmu_softc *sc, uint32_t idx)
|
|||
}
|
||||
|
||||
/* Allwinner D1 */
|
||||
#define D1_PLL_CPU_CTRL_REG 0x0000
|
||||
#define D1_PLL_CPU_FACTOR_M(x) (((x) >> 0) & 0x3)
|
||||
#define D1_PLL_CPU_FACTOR_N(x) (((x) >> 8) & 0xff)
|
||||
#define D1_RISCV_CLK_REG 0x0d00
|
||||
#define D1_RISCV_CLK_SEL (7 << 24)
|
||||
#define D1_RISCV_CLK_SEL_HOSC (0 << 24)
|
||||
#define D1_RISCV_CLK_SEL_PLL_CPU (5 << 24)
|
||||
#define D1_RISCV_DIV_CFG_FACTOR_M(x) (((x) >> 0) & 0x1f)
|
||||
|
||||
uint32_t
|
||||
sxiccmu_d1_get_frequency(struct sxiccmu_softc *sc, uint32_t idx)
|
||||
{
|
||||
uint32_t parent;
|
||||
uint32_t reg;
|
||||
uint32_t m, n;
|
||||
|
||||
switch (idx) {
|
||||
case D1_CLK_HOSC:
|
||||
return clock_get_frequency(sc->sc_node, "hosc");
|
||||
case D1_CLK_PLL_CPU:
|
||||
reg = SXIREAD4(sc, D1_PLL_CPU_CTRL_REG);
|
||||
m = D1_PLL_CPU_FACTOR_M(reg) + 1;
|
||||
n = D1_PLL_CPU_FACTOR_N(reg) + 1;
|
||||
return (24000000 * n) / m;
|
||||
case D1_CLK_PLL_PERIPH0:
|
||||
/* Not hardcoded, but recommended. */
|
||||
return 600000000;
|
||||
case D1_CLK_APB1:
|
||||
/* XXX Controlled by a MUX. */
|
||||
return 24000000;
|
||||
case D1_CLK_RISCV:
|
||||
reg = SXIREAD4(sc, D1_RISCV_CLK_REG);
|
||||
switch (reg & D1_RISCV_CLK_SEL) {
|
||||
case D1_RISCV_CLK_SEL_HOSC:
|
||||
parent = D1_CLK_HOSC;
|
||||
break;
|
||||
case D1_RISCV_CLK_SEL_PLL_CPU:
|
||||
parent = D1_CLK_PLL_CPU;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
m = D1_RISCV_DIV_CFG_FACTOR_M(reg) + 1;
|
||||
return sxiccmu_ccu_get_frequency(sc, &parent) / m;
|
||||
}
|
||||
|
||||
printf("%s: 0x%08x\n", __func__, idx);
|
||||
|
@ -1671,9 +1707,72 @@ sxiccmu_a80_set_frequency(struct sxiccmu_softc *sc, uint32_t idx, uint32_t freq)
|
|||
return -1;
|
||||
}
|
||||
|
||||
#define D1_SMHC0_CLK_REG 0x0830
|
||||
#define D1_SMHC1_CLK_REG 0x0834
|
||||
#define D1_SMHC2_CLK_REG 0x0838
|
||||
#define D1_SMHC_CLK_SRC_SEL (0x3 << 24)
|
||||
#define D1_SMHC_CLK_SRC_SEL_HOSC (0x0 << 24)
|
||||
#define D1_SMHC_CLK_SRC_SEL_PLL_PERIPH0 (0x1 << 24)
|
||||
#define D1_SMHC_FACTOR_N_MASK (0x3 << 8)
|
||||
#define D1_SMHC_FACTOR_N_SHIFT 8
|
||||
#define D1_SMHC_FACTOR_M_MASK (0xf << 0)
|
||||
#define D1_SMHC_FACTOR_M_SHIFT 0
|
||||
|
||||
int
|
||||
sxiccmu_d1_mmc_set_frequency(struct sxiccmu_softc *sc, bus_size_t offset,
|
||||
uint32_t freq)
|
||||
{
|
||||
uint32_t parent_freq;
|
||||
uint32_t reg, m, n;
|
||||
uint32_t clk_src;
|
||||
|
||||
switch (freq) {
|
||||
case 400000:
|
||||
n = 2, m = 15;
|
||||
clk_src = D1_SMHC_CLK_SRC_SEL_HOSC;
|
||||
break;
|
||||
case 20000000:
|
||||
case 25000000:
|
||||
case 26000000:
|
||||
case 50000000:
|
||||
case 52000000:
|
||||
n = 0, m = 0;
|
||||
clk_src = D1_SMHC_CLK_SRC_SEL_PLL_PERIPH0;
|
||||
parent_freq =
|
||||
sxiccmu_d1_get_frequency(sc, D1_CLK_PLL_PERIPH0);
|
||||
while ((parent_freq / (1 << n) / 16) > freq)
|
||||
n++;
|
||||
while ((parent_freq / (1 << n) / (m + 1)) > freq)
|
||||
m++;
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
reg = SXIREAD4(sc, offset);
|
||||
reg &= ~D1_SMHC_CLK_SRC_SEL;
|
||||
reg |= clk_src;
|
||||
reg &= ~D1_SMHC_FACTOR_N_MASK;
|
||||
reg |= n << D1_SMHC_FACTOR_N_SHIFT;
|
||||
reg &= ~D1_SMHC_FACTOR_M_MASK;
|
||||
reg |= m << D1_SMHC_FACTOR_M_SHIFT;
|
||||
SXIWRITE4(sc, offset, reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
sxiccmu_d1_set_frequency(struct sxiccmu_softc *sc, uint32_t idx, uint32_t freq)
|
||||
{
|
||||
switch (idx) {
|
||||
case D1_CLK_MMC0:
|
||||
return sxiccmu_d1_mmc_set_frequency(sc, D1_SMHC0_CLK_REG, freq);
|
||||
case D1_CLK_MMC1:
|
||||
return sxiccmu_d1_mmc_set_frequency(sc, D1_SMHC1_CLK_REG, freq);
|
||||
case D1_CLK_MMC2:
|
||||
return sxiccmu_d1_mmc_set_frequency(sc, D1_SMHC2_CLK_REG, freq);
|
||||
}
|
||||
|
||||
printf("%s: 0x%08x\n", __func__, idx);
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -304,7 +304,15 @@ const struct sxiccmu_ccu_bit sun9i_a80_mmc_gates[] = {
|
|||
|
||||
/* D1 */
|
||||
|
||||
#define D1_CLK_PLL_CPU 0
|
||||
#define D1_CLK_PLL_PERIPH0 5
|
||||
#define D1_CLK_APB1 25
|
||||
#define D1_CLK_MMC0 56
|
||||
#define D1_CLK_MMC1 57
|
||||
#define D1_CLK_MMC2 58
|
||||
#define D1_CLK_BUS_MMC0 59
|
||||
#define D1_CLK_BUS_MMC1 60
|
||||
#define D1_CLK_BUS_MMC2 61
|
||||
#define D1_CLK_BUS_UART0 62
|
||||
#define D1_CLK_BUS_UART1 63
|
||||
#define D1_CLK_BUS_UART2 64
|
||||
|
@ -317,8 +325,17 @@ const struct sxiccmu_ccu_bit sun9i_a80_mmc_gates[] = {
|
|||
#define D1_CLK_BUS_OHCI1 100
|
||||
#define D1_CLK_BUS_EHCI0 101
|
||||
#define D1_CLK_BUS_EHCI1 102
|
||||
#define D1_CLK_RISCV 132
|
||||
|
||||
#define D1_CLK_HOSC 255
|
||||
|
||||
const struct sxiccmu_ccu_bit sun20i_d1_gates[] = {
|
||||
[D1_CLK_MMC0] = { 0x0830, 31 },
|
||||
[D1_CLK_MMC1] = { 0x0834, 31 },
|
||||
[D1_CLK_MMC2] = { 0x0838, 31 },
|
||||
[D1_CLK_BUS_MMC0] = { 0x084c, 0 },
|
||||
[D1_CLK_BUS_MMC1] = { 0x084c, 1 },
|
||||
[D1_CLK_BUS_MMC2] = { 0x084c, 2 },
|
||||
[D1_CLK_BUS_UART0] = { 0x090c, 0, D1_CLK_APB1 },
|
||||
[D1_CLK_BUS_UART1] = { 0x090c, 1, D1_CLK_APB1 },
|
||||
[D1_CLK_BUS_UART2] = { 0x090c, 2, D1_CLK_APB1 },
|
||||
|
@ -801,6 +818,9 @@ const struct sxiccmu_ccu_bit sun9i_a80_mmc_resets[] = {
|
|||
|
||||
/* D1 */
|
||||
|
||||
#define D1_RST_BUS_MMC0 15
|
||||
#define D1_RST_BUS_MMC1 16
|
||||
#define D1_RST_BUS_MMC2 17
|
||||
#define D1_RST_BUS_UART0 18
|
||||
#define D1_RST_BUS_UART1 19
|
||||
#define D1_RST_BUS_UART2 20
|
||||
|
@ -815,6 +835,9 @@ const struct sxiccmu_ccu_bit sun9i_a80_mmc_resets[] = {
|
|||
#define D1_RST_BUS_EHCI1 45
|
||||
|
||||
const struct sxiccmu_ccu_bit sun20i_d1_resets[] = {
|
||||
[D1_RST_BUS_MMC0] = { 0x084c, 16 },
|
||||
[D1_RST_BUS_MMC1] = { 0x084c, 17 },
|
||||
[D1_RST_BUS_MMC2] = { 0x084c, 18 },
|
||||
[D1_RST_BUS_UART0] = { 0x090c, 16 },
|
||||
[D1_RST_BUS_UART1] = { 0x090c, 17 },
|
||||
[D1_RST_BUS_UART2] = { 0x090c, 18 },
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: sximmc.c,v 1.12 2021/10/24 17:52:27 mpi Exp $ */
|
||||
/* $OpenBSD: sximmc.c,v 1.13 2024/02/02 12:02:26 kettenis Exp $ */
|
||||
/* $NetBSD: awin_mmc.c,v 1.23 2015/11/14 10:32:40 bouyer Exp $ */
|
||||
|
||||
/*-
|
||||
|
@ -261,6 +261,7 @@ struct sximmc_softc {
|
|||
bus_dmamap_t sc_idma_map;
|
||||
int sc_idma_ndesc;
|
||||
char *sc_idma_desc;
|
||||
int sc_idma_shift;
|
||||
|
||||
uint32_t sc_intr_rint;
|
||||
uint32_t sc_intr_mint;
|
||||
|
@ -297,6 +298,8 @@ sximmc_match(struct device *parent, void *match, void *aux)
|
|||
OF_is_compatible(faa->fa_node, "allwinner,sun5i-a13-mmc") ||
|
||||
OF_is_compatible(faa->fa_node, "allwinner,sun7i-a20-mmc") ||
|
||||
OF_is_compatible(faa->fa_node, "allwinner,sun9i-a80-mmc") ||
|
||||
OF_is_compatible(faa->fa_node, "allwinner,sun20i-d1-mmc") ||
|
||||
OF_is_compatible(faa->fa_node, "allwinner,sun20i-d1-emmc") ||
|
||||
OF_is_compatible(faa->fa_node, "allwinner,sun50i-a64-mmc") ||
|
||||
OF_is_compatible(faa->fa_node, "allwinner,sun50i-a64-emmc"));
|
||||
}
|
||||
|
@ -394,6 +397,10 @@ sximmc_attach(struct device *parent, struct device *self, void *aux)
|
|||
else
|
||||
sc->sc_dma_ftrglevel = SXIMMC_DMA_FTRGLEVEL_A20;
|
||||
|
||||
if (OF_is_compatible(faa->fa_node, "allwinner,sun20i-d1-mmc") ||
|
||||
OF_is_compatible(faa->fa_node, "allwinner,sun20i-d1-emmc"))
|
||||
sc->sc_idma_shift = 2;
|
||||
|
||||
if (sc->sc_use_dma) {
|
||||
if (sximmc_idma_setup(sc) != 0) {
|
||||
printf("%s: failed to setup DMA\n", self->dv_xname);
|
||||
|
@ -443,6 +450,8 @@ sximmc_attach(struct device *parent, struct device *self, void *aux)
|
|||
}
|
||||
|
||||
if (OF_is_compatible(sc->sc_node, "allwinner,sun4i-a10-mmc") ||
|
||||
OF_is_compatible(sc->sc_node, "allwinner,sun20i-d1-mmc") ||
|
||||
OF_is_compatible(sc->sc_node, "allwinner,sun20i-d1-emmc") ||
|
||||
OF_is_compatible(sc->sc_node, "allwinner,sun50i-a64-emmc")) {
|
||||
saa.max_seg = 0x2000;
|
||||
} else {
|
||||
|
@ -853,8 +862,10 @@ sximmc_dma_prepare(struct sximmc_softc *sc, struct sdmmc_command *cmd)
|
|||
for (seg = 0; seg < cmd->c_dmamap->dm_nsegs; seg++) {
|
||||
bus_addr_t paddr = cmd->c_dmamap->dm_segs[seg].ds_addr;
|
||||
bus_size_t len = cmd->c_dmamap->dm_segs[seg].ds_len;
|
||||
|
||||
desc_paddr += sizeof(struct sximmc_idma_descriptor);
|
||||
dma[seg].dma_buf_size = htole32(len);
|
||||
dma[seg].dma_buf_addr = htole32(paddr);
|
||||
dma[seg].dma_buf_addr = htole32(paddr >> sc->sc_idma_shift);
|
||||
dma[seg].dma_config = htole32(SXIMMC_IDMA_CONFIG_CH |
|
||||
SXIMMC_IDMA_CONFIG_OWN);
|
||||
if (seg == 0) {
|
||||
|
@ -870,9 +881,8 @@ sximmc_dma_prepare(struct sximmc_softc *sc, struct sdmmc_command *cmd)
|
|||
} else {
|
||||
dma[seg].dma_config |=
|
||||
htole32(SXIMMC_IDMA_CONFIG_DIC);
|
||||
dma[seg].dma_next = htole32(
|
||||
desc_paddr + ((seg + 1) *
|
||||
sizeof(struct sximmc_idma_descriptor)));
|
||||
dma[seg].dma_next =
|
||||
htole32(desc_paddr >> sc->sc_idma_shift);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -897,7 +907,8 @@ sximmc_dma_prepare(struct sximmc_softc *sc, struct sdmmc_command *cmd)
|
|||
else
|
||||
val |= SXIMMC_IDST_TRANSMIT_INT;
|
||||
MMC_WRITE(sc, SXIMMC_IDIE, val);
|
||||
MMC_WRITE(sc, SXIMMC_DLBA, desc_paddr);
|
||||
MMC_WRITE(sc, SXIMMC_DLBA,
|
||||
sc->sc_idma_map->dm_segs[0].ds_addr >> sc->sc_idma_shift);
|
||||
MMC_WRITE(sc, SXIMMC_FTRGLEVEL, sc->sc_dma_ftrglevel);
|
||||
|
||||
return 0;
|
||||
|
|
1221
sys/dev/ic/qwx.c
1221
sys/dev/ic/qwx.c
File diff suppressed because it is too large
Load diff
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: qwxreg.h,v 1.4 2024/01/30 15:32:04 stsp Exp $ */
|
||||
/* $OpenBSD: qwxreg.h,v 1.5 2024/02/02 15:44:19 stsp Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc.
|
||||
|
@ -10416,6 +10416,24 @@ enum rx_desc_sw_frame_grp_id {
|
|||
RX_DESC_SW_FRAME_GRP_ID_PHY_ERR,
|
||||
};
|
||||
|
||||
#define DP_MAX_NWIFI_HDR_LEN 30
|
||||
|
||||
#define DP_RX_MPDU_ERR_FCS BIT(0)
|
||||
#define DP_RX_MPDU_ERR_DECRYPT BIT(1)
|
||||
#define DP_RX_MPDU_ERR_TKIP_MIC BIT(2)
|
||||
#define DP_RX_MPDU_ERR_AMSDU_ERR BIT(3)
|
||||
#define DP_RX_MPDU_ERR_OVERFLOW BIT(4)
|
||||
#define DP_RX_MPDU_ERR_MSDU_LEN BIT(5)
|
||||
#define DP_RX_MPDU_ERR_MPDU_LEN BIT(6)
|
||||
#define DP_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7)
|
||||
|
||||
enum dp_rx_decap_type {
|
||||
DP_RX_DECAP_TYPE_RAW,
|
||||
DP_RX_DECAP_TYPE_NATIVE_WIFI,
|
||||
DP_RX_DECAP_TYPE_ETHERNET2_DIX,
|
||||
DP_RX_DECAP_TYPE_8023,
|
||||
};
|
||||
|
||||
enum rx_desc_decap_type {
|
||||
RX_DESC_DECAP_TYPE_RAW,
|
||||
RX_DESC_DECAP_TYPE_NATIVE_WIFI,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: qwxvar.h,v 1.8 2024/01/30 15:32:04 stsp Exp $ */
|
||||
/* $OpenBSD: qwxvar.h,v 1.9 2024/02/02 15:44:19 stsp Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2018-2019 The Linux Foundation.
|
||||
|
@ -221,21 +221,27 @@ struct ath11k_hw_ops {
|
|||
struct hal_tcl_data_cmd *tcl_cmd);
|
||||
bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc);
|
||||
bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc);
|
||||
#endif
|
||||
uint8_t (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc);
|
||||
uint8_t *(*rx_desc_get_hdr_status)(struct hal_rx_desc *desc);
|
||||
bool (*rx_desc_encrypt_valid)(struct hal_rx_desc *desc);
|
||||
int (*rx_desc_encrypt_valid)(struct hal_rx_desc *desc);
|
||||
uint32_t (*rx_desc_get_encrypt_type)(struct hal_rx_desc *desc);
|
||||
uint8_t (*rx_desc_get_decap_type)(struct hal_rx_desc *desc);
|
||||
#ifdef notyet
|
||||
uint8_t (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc);
|
||||
bool (*rx_desc_get_ldpc_support)(struct hal_rx_desc *desc);
|
||||
bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc);
|
||||
bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc);
|
||||
uint16_t (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc);
|
||||
#endif
|
||||
uint16_t (*rx_desc_get_msdu_len)(struct hal_rx_desc *desc);
|
||||
#ifdef notyet
|
||||
uint8_t (*rx_desc_get_msdu_sgi)(struct hal_rx_desc *desc);
|
||||
uint8_t (*rx_desc_get_msdu_rate_mcs)(struct hal_rx_desc *desc);
|
||||
uint8_t (*rx_desc_get_msdu_rx_bw)(struct hal_rx_desc *desc);
|
||||
#endif
|
||||
uint32_t (*rx_desc_get_msdu_freq)(struct hal_rx_desc *desc);
|
||||
#ifdef notyet
|
||||
uint8_t (*rx_desc_get_msdu_pkt_type)(struct hal_rx_desc *desc);
|
||||
uint8_t (*rx_desc_get_msdu_nss)(struct hal_rx_desc *desc);
|
||||
uint8_t (*rx_desc_get_mpdu_tid)(struct hal_rx_desc *desc);
|
||||
|
@ -245,7 +251,9 @@ struct ath11k_hw_ops {
|
|||
uint32_t (*rx_desc_get_mpdu_start_tag)(struct hal_rx_desc *desc);
|
||||
uint32_t (*rx_desc_get_mpdu_ppdu_id)(struct hal_rx_desc *desc);
|
||||
void (*rx_desc_set_msdu_len)(struct hal_rx_desc *desc, uint16_t len);
|
||||
#endif
|
||||
struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc);
|
||||
#ifdef notyet
|
||||
uint8_t *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
|
||||
#endif
|
||||
void (*reo_setup)(struct qwx_softc *);
|
||||
|
@ -695,9 +703,10 @@ struct ce_attr {
|
|||
|
||||
#define CE_DESC_RING_ALIGN 8
|
||||
|
||||
struct qwx_rx_data {
|
||||
struct mbuf *m;
|
||||
bus_dmamap_t map;
|
||||
struct qwx_rx_msdu {
|
||||
TAILQ_ENTRY(qwx_rx_msdu) entry;
|
||||
struct mbuf *m;
|
||||
struct ieee80211_rxinfo rxi;
|
||||
int is_first_msdu;
|
||||
int is_last_msdu;
|
||||
int is_continuation;
|
||||
|
@ -714,6 +723,14 @@ struct qwx_rx_data {
|
|||
uint16_t seq_no;
|
||||
};
|
||||
|
||||
TAILQ_HEAD(qwx_rx_msdu_list, qwx_rx_msdu);
|
||||
|
||||
struct qwx_rx_data {
|
||||
struct mbuf *m;
|
||||
bus_dmamap_t map;
|
||||
struct qwx_rx_msdu rx_msdu;
|
||||
};
|
||||
|
||||
struct qwx_tx_data {
|
||||
struct mbuf *m;
|
||||
bus_dmamap_t map;
|
||||
|
|
|
@ -3989,16 +3989,13 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
|
|||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
|
||||
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
|
||||
/* don't check this. There are apparently firmwares in the wild with
|
||||
* incorrect size in the header
|
||||
*/
|
||||
if (err == -ENODEV)
|
||||
goto out;
|
||||
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
dev_dbg(adev->dev,
|
||||
"gfx10: amdgpu_ucode_request() failed \"%s\"\n",
|
||||
fw_name);
|
||||
goto out;
|
||||
|
||||
/* don't validate this firmware. There are apparently firmwares
|
||||
* in the wild with incorrect size in the header
|
||||
*/
|
||||
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
|
||||
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
|
||||
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
|
||||
|
@ -6575,7 +6572,7 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
|
|||
#ifdef __BIG_ENDIAN
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
|
||||
#endif
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
|
||||
|
|
|
@ -3807,7 +3807,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
|
|||
(order_base_2(prop->queue_size / 4) - 1));
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
|
||||
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
|
||||
|
@ -6353,6 +6353,9 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
|
|||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
bitmap = i * adev->gfx.config.max_sh_per_se + j;
|
||||
if (!((gfx_v11_0_get_sa_active_bitmap(adev) >> bitmap) & 1))
|
||||
continue;
|
||||
mask = 1;
|
||||
counter = 0;
|
||||
gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
|
||||
|
|
|
@ -170,6 +170,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
|
|||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control |=
|
||||
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
|
||||
|
||||
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
|
||||
|
|
|
@ -224,6 +224,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
|
|||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control |=
|
||||
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
|
||||
|
||||
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
|
||||
|
|
|
@ -956,6 +956,11 @@ int dm_helper_dmub_aux_transfer_sync(
|
|||
struct aux_payload *payload,
|
||||
enum aux_return_code_type *operation_result)
|
||||
{
|
||||
if (!link->hpd_status) {
|
||||
*operation_result = AUX_RET_ERROR_HPD_DISCON;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload,
|
||||
operation_result);
|
||||
}
|
||||
|
|
|
@ -131,30 +131,27 @@ static int dcn314_get_active_display_cnt_wa(
|
|||
return display_count;
|
||||
}
|
||||
|
||||
static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
|
||||
static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
|
||||
bool safe_to_lower, bool disable)
|
||||
{
|
||||
struct dc *dc = clk_mgr_base->ctx->dc;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
|
||||
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
struct pipe_ctx *pipe = safe_to_lower
|
||||
? &context->res_ctx.pipe_ctx[i]
|
||||
: &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe->top_pipe || pipe->prev_odm_pipe)
|
||||
continue;
|
||||
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
|
||||
struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
|
||||
|
||||
if (disable) {
|
||||
if (stream_enc && stream_enc->funcs->disable_fifo)
|
||||
pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
|
||||
if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
|
||||
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
|
||||
|
||||
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
|
||||
reset_sync_context_for_pipe(dc, context, i);
|
||||
} else {
|
||||
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
|
||||
|
||||
if (stream_enc && stream_enc->funcs->enable_fifo)
|
||||
pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -252,11 +249,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
|
||||
dcn314_disable_otg_wa(clk_mgr_base, context, true);
|
||||
dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
|
||||
|
||||
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
|
||||
dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
|
||||
dcn314_disable_otg_wa(clk_mgr_base, context, false);
|
||||
dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
|
||||
|
||||
update_dispclk = true;
|
||||
}
|
||||
|
|
|
@ -873,11 +873,15 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi
|
|||
{
|
||||
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
DC_LOGGER_INIT(dsc->ctx->logger);
|
||||
|
||||
if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
|
||||
if (!pipe_ctx->stream->timing.flags.DSC)
|
||||
return false;
|
||||
|
||||
if (!dsc)
|
||||
return false;
|
||||
|
||||
DC_LOGGER_INIT(dsc->ctx->logger);
|
||||
|
||||
if (enable) {
|
||||
struct dsc_config dsc_cfg;
|
||||
uint8_t dsc_packed_pps[128];
|
||||
|
|
|
@ -205,7 +205,7 @@ enum dc_status core_link_read_dpcd(
|
|||
uint32_t extended_size;
|
||||
/* size of the remaining partitioned address space */
|
||||
uint32_t size_left_to_read;
|
||||
enum dc_status status;
|
||||
enum dc_status status = DC_ERROR_UNEXPECTED;
|
||||
/* size of the next partition to be read from */
|
||||
uint32_t partition_size;
|
||||
uint32_t data_index = 0;
|
||||
|
@ -234,7 +234,7 @@ enum dc_status core_link_write_dpcd(
|
|||
{
|
||||
uint32_t partition_size;
|
||||
uint32_t data_index = 0;
|
||||
enum dc_status status;
|
||||
enum dc_status status = DC_ERROR_UNEXPECTED;
|
||||
|
||||
while (size) {
|
||||
partition_size = dpcd_get_next_partition_size(address, size);
|
||||
|
|
|
@ -920,8 +920,8 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state)
|
|||
bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
|
||||
{
|
||||
/* To-do: Setup Replay */
|
||||
struct dc *dc = link->ctx->dc;
|
||||
struct dmub_replay *replay = dc->res_pool->replay;
|
||||
struct dc *dc;
|
||||
struct dmub_replay *replay;
|
||||
int i;
|
||||
unsigned int panel_inst;
|
||||
struct replay_context replay_context = { 0 };
|
||||
|
@ -937,6 +937,10 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
|
|||
if (!link)
|
||||
return false;
|
||||
|
||||
dc = link->ctx->dc;
|
||||
|
||||
replay = dc->res_pool->replay;
|
||||
|
||||
if (!replay)
|
||||
return false;
|
||||
|
||||
|
@ -965,8 +969,7 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
|
|||
|
||||
replay_context.line_time_in_ns = lineTimeInNs;
|
||||
|
||||
if (replay)
|
||||
link->replay_settings.replay_feature_enabled =
|
||||
link->replay_settings.replay_feature_enabled =
|
||||
replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
|
||||
if (link->replay_settings.replay_feature_enabled) {
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/power_supply.h>
|
||||
#include <linux/reboot.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
|
@ -741,16 +742,8 @@ static int smu_late_init(void *handle)
|
|||
* handle the switch automatically. Driver involvement
|
||||
* is unnecessary.
|
||||
*/
|
||||
if (!smu->dc_controlled_by_gpio) {
|
||||
ret = smu_set_power_source(smu,
|
||||
adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
|
||||
SMU_POWER_SOURCE_DC);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed to switch to %s mode!\n",
|
||||
adev->pm.ac_power ? "AC" : "DC");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
adev->pm.ac_power = power_supply_is_system_supplied() > 0;
|
||||
smu_set_ac_dc(smu);
|
||||
|
||||
if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
|
||||
(adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
|
||||
|
|
|
@ -1441,10 +1441,12 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
|
|||
case 0x3:
|
||||
dev_dbg(adev->dev, "Switched to AC mode!\n");
|
||||
schedule_work(&smu->interrupt_work);
|
||||
adev->pm.ac_power = true;
|
||||
break;
|
||||
case 0x4:
|
||||
dev_dbg(adev->dev, "Switched to DC mode!\n");
|
||||
schedule_work(&smu->interrupt_work);
|
||||
adev->pm.ac_power = false;
|
||||
break;
|
||||
case 0x7:
|
||||
/*
|
||||
|
|
|
@ -1377,10 +1377,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
|
|||
case 0x3:
|
||||
dev_dbg(adev->dev, "Switched to AC mode!\n");
|
||||
smu_v13_0_ack_ac_dc_interrupt(smu);
|
||||
adev->pm.ac_power = true;
|
||||
break;
|
||||
case 0x4:
|
||||
dev_dbg(adev->dev, "Switched to DC mode!\n");
|
||||
smu_v13_0_ack_ac_dc_interrupt(smu);
|
||||
adev->pm.ac_power = false;
|
||||
break;
|
||||
case 0x7:
|
||||
/*
|
||||
|
|
|
@ -241,7 +241,8 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
|
|||
iter->plane_src.x2 = (src.x2 >> 16) + !!(src.x2 & 0xFFFF);
|
||||
iter->plane_src.y2 = (src.y2 >> 16) + !!(src.y2 & 0xFFFF);
|
||||
|
||||
if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) {
|
||||
if (!iter->clips || state->ignore_damage_clips ||
|
||||
!drm_rect_equals(&state->src, &old_state->src)) {
|
||||
iter->clips = NULL;
|
||||
iter->num_clips = 0;
|
||||
iter->full_update = true;
|
||||
|
|
|
@ -678,6 +678,19 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
|
|||
!file_priv->universal_planes)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If we're running on a virtualized driver then,
|
||||
* unless userspace advertizes support for the
|
||||
* virtualized cursor plane, disable cursor planes
|
||||
* because they'll be broken due to missing cursor
|
||||
* hotspot info.
|
||||
*/
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
|
||||
drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) &&
|
||||
file_priv->atomic &&
|
||||
!file_priv->supports_virtualized_cursor_plane)
|
||||
continue;
|
||||
|
||||
if (drm_lease_held(file_priv, plane->base.id)) {
|
||||
if (count < plane_resp->count_planes &&
|
||||
put_user(plane->base.id, plane_ptr + count))
|
||||
|
@ -1387,6 +1400,7 @@ retry:
|
|||
out:
|
||||
if (fb)
|
||||
drm_framebuffer_put(fb);
|
||||
fb = NULL;
|
||||
if (plane->old_fb)
|
||||
drm_framebuffer_put(plane->old_fb);
|
||||
plane->old_fb = NULL;
|
||||
|
|
|
@ -1155,6 +1155,7 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
|
|||
}
|
||||
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
|
||||
|
||||
/* ensure all panel commands dispatched before enabling transcoder */
|
||||
wait_for_cmds_dispatched_to_panel(encoder);
|
||||
|
@ -1255,8 +1256,6 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
|
|||
/* step6d: enable dsi transcoder */
|
||||
gen11_dsi_enable_transcoder(encoder);
|
||||
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
|
||||
|
||||
/* step7: enable backlight */
|
||||
intel_backlight_enable(crtc_state, conn_state);
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
|
||||
|
|
|
@ -674,7 +674,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
|
|||
|
||||
val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
|
||||
|
||||
val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
|
||||
if (DISPLAY_VER(dev_priv) < 20)
|
||||
val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
|
||||
|
||||
if (IS_HASWELL(dev_priv))
|
||||
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
|
||||
|
||||
|
@ -1398,9 +1400,21 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
|
|||
* can rely on frontbuffer tracking.
|
||||
*/
|
||||
mask = EDP_PSR_DEBUG_MASK_MEMUP |
|
||||
EDP_PSR_DEBUG_MASK_HPD |
|
||||
EDP_PSR_DEBUG_MASK_LPSP |
|
||||
EDP_PSR_DEBUG_MASK_MAX_SLEEP;
|
||||
EDP_PSR_DEBUG_MASK_HPD;
|
||||
|
||||
/*
|
||||
* For some unknown reason on HSW non-ULT (or at least on
|
||||
* Dell Latitude E6540) external displays start to flicker
|
||||
* when PSR is enabled on the eDP. SR/PC6 residency is much
|
||||
* higher than should be possible with an external display.
|
||||
* As a workaround leave LPSP unmasked to prevent PSR entry
|
||||
* when external displays are active.
|
||||
*/
|
||||
if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
|
||||
mask |= EDP_PSR_DEBUG_MASK_LPSP;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) < 20)
|
||||
mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
|
||||
|
||||
/*
|
||||
* No separate pipe reg write mask on hsw/bdw, so have to unmask all
|
||||
|
|
|
@ -112,6 +112,15 @@ enum drm_driver_feature {
|
|||
* Driver supports user defined GPU VA bindings for GEM objects.
|
||||
*/
|
||||
DRIVER_GEM_GPUVA = BIT(8),
|
||||
/**
|
||||
* @DRIVER_CURSOR_HOTSPOT:
|
||||
*
|
||||
* Driver supports and requires cursor hotspot information in the
|
||||
* cursor plane (e.g. cursor plane has to actually track the mouse
|
||||
* cursor and the clients are required to set hotspot in order for
|
||||
* the cursor planes to work correctly).
|
||||
*/
|
||||
DRIVER_CURSOR_HOTSPOT = BIT(9),
|
||||
|
||||
/* IMPORTANT: Below are all the legacy flags, add new ones above. */
|
||||
|
||||
|
|
|
@ -231,6 +231,18 @@ struct drm_file {
|
|||
*/
|
||||
bool is_master;
|
||||
|
||||
/**
|
||||
* @supports_virtualized_cursor_plane:
|
||||
*
|
||||
* This client is capable of handling the cursor plane with the
|
||||
* restrictions imposed on it by the virtualized drivers.
|
||||
*
|
||||
* This implies that the cursor plane has to behave like a cursor
|
||||
* i.e. track cursor movement. It also requires setting of the
|
||||
* hotspot properties by the client on the cursor plane.
|
||||
*/
|
||||
bool supports_virtualized_cursor_plane;
|
||||
|
||||
/**
|
||||
* @master:
|
||||
*
|
||||
|
|
|
@ -190,6 +190,16 @@ struct drm_plane_state {
|
|||
*/
|
||||
struct drm_property_blob *fb_damage_clips;
|
||||
|
||||
/**
|
||||
* @ignore_damage_clips:
|
||||
*
|
||||
* Set by drivers to indicate the drm_atomic_helper_damage_iter_init()
|
||||
* helper that the @fb_damage_clips blob property should be ignored.
|
||||
*
|
||||
* See :ref:`damage_tracking_properties` for more information.
|
||||
*/
|
||||
bool ignore_damage_clips;
|
||||
|
||||
/**
|
||||
* @src:
|
||||
*
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue