sync with OpenBSD -current

This commit is contained in:
purplerain 2024-08-21 05:59:35 +00:00
parent f36b410006
commit df7f961280
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
37 changed files with 741 additions and 712 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qwz.c,v 1.5 2024/08/19 08:22:30 jsg Exp $ */
/* $OpenBSD: qwz.c,v 1.7 2024/08/20 21:24:15 patrick Exp $ */
/*
* Copyright 2023 Stefan Sperling <stsp@openbsd.org>
@ -124,6 +124,7 @@ uint32_t qwz_debug = 0
#endif
int qwz_ce_init_pipes(struct qwz_softc *);
int qwz_hal_srng_create_config_wcn7850(struct qwz_softc *);
int qwz_hal_srng_src_num_free(struct qwz_softc *, struct hal_srng *, int);
int qwz_ce_per_engine_service(struct qwz_softc *, uint16_t);
int qwz_hal_srng_setup(struct qwz_softc *, enum hal_ring_type, int, int,
@ -1009,19 +1010,10 @@ qwz_init_wmi_config_qca6390(struct qwz_softc *sc,
}
void
qwz_hw_wcn7850_reo_setup(struct qwz_softc *sc)
qwz_hal_reo_hw_setup(struct qwz_softc *sc, uint32_t ring_hash_map)
{
uint32_t reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
uint32_t val;
/* Each hash entry uses four bits to map to a particular ring. */
uint32_t ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
HAL_HASH_ROUTING_RING_SW2 << 4 |
HAL_HASH_ROUTING_RING_SW3 << 8 |
HAL_HASH_ROUTING_RING_SW4 << 12 |
HAL_HASH_ROUTING_RING_SW1 << 16 |
HAL_HASH_ROUTING_RING_SW2 << 20 |
HAL_HASH_ROUTING_RING_SW3 << 24 |
HAL_HASH_ROUTING_RING_SW4 << 28;
val = sc->ops.read32(sc, reo_base + HAL_REO1_GEN_ENABLE);
val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
@ -1718,7 +1710,6 @@ const struct ath12k_hw_ops wcn7850_ops = {
.get_hw_mac_from_pdev_id = qwz_hw_ipq6018_mac_from_pdev_id,
.mac_id_to_pdev_id = qwz_hw_mac_id_to_pdev_id_qca6390,
.mac_id_to_srng_id = qwz_hw_mac_id_to_srng_id_qca6390,
.reo_setup = qwz_hw_wcn7850_reo_setup,
};
#define ATH12K_TX_RING_MASK_0 BIT(0)
@ -1738,186 +1729,22 @@ const struct ath12k_hw_ops wcn7850_ops = {
#define ATH12K_REO_STATUS_RING_MASK_0 0x1
#define ATH12K_RXDMA2HOST_RING_MASK_0 0x1
#define ATH12K_RXDMA2HOST_RING_MASK_1 0x2
#define ATH12K_RXDMA2HOST_RING_MASK_2 0x4
#define ATH12K_HOST2RXDMA_RING_MASK_0 0x1
#define ATH12K_HOST2RXDMA_RING_MASK_1 0x2
#define ATH12K_HOST2RXDMA_RING_MASK_2 0x4
#define ATH12K_RX_MON_STATUS_RING_MASK_0 0x1
#define ATH12K_RX_MON_STATUS_RING_MASK_1 0x2
#define ATH12K_RX_MON_STATUS_RING_MASK_2 0x4
const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_ipq8074 = {
.tx = {
ATH12K_TX_RING_MASK_0,
ATH12K_TX_RING_MASK_1,
ATH12K_TX_RING_MASK_2,
},
.rx_mon_status = {
0, 0, 0, 0,
ATH12K_RX_MON_STATUS_RING_MASK_0,
ATH12K_RX_MON_STATUS_RING_MASK_1,
ATH12K_RX_MON_STATUS_RING_MASK_2,
},
.rx = {
0, 0, 0, 0, 0, 0, 0,
ATH12K_RX_RING_MASK_0,
ATH12K_RX_RING_MASK_1,
ATH12K_RX_RING_MASK_2,
ATH12K_RX_RING_MASK_3,
},
.rx_err = {
ATH12K_RX_ERR_RING_MASK_0,
},
.rx_wbm_rel = {
ATH12K_RX_WBM_REL_RING_MASK_0,
},
.reo_status = {
ATH12K_REO_STATUS_RING_MASK_0,
},
.rxdma2host = {
ATH12K_RXDMA2HOST_RING_MASK_0,
ATH12K_RXDMA2HOST_RING_MASK_1,
ATH12K_RXDMA2HOST_RING_MASK_2,
},
.host2rxdma = {
ATH12K_HOST2RXDMA_RING_MASK_0,
ATH12K_HOST2RXDMA_RING_MASK_1,
ATH12K_HOST2RXDMA_RING_MASK_2,
},
.tx_mon_dest = {
},
};
const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qca6390 = {
.tx = {
ATH12K_TX_RING_MASK_0,
},
.rx_mon_status = {
0, 0, 0, 0,
ATH12K_RX_MON_STATUS_RING_MASK_0,
ATH12K_RX_MON_STATUS_RING_MASK_1,
ATH12K_RX_MON_STATUS_RING_MASK_2,
},
.rx = {
0, 0, 0, 0, 0, 0, 0,
ATH12K_RX_RING_MASK_0,
ATH12K_RX_RING_MASK_1,
ATH12K_RX_RING_MASK_2,
ATH12K_RX_RING_MASK_3,
},
.rx_err = {
ATH12K_RX_ERR_RING_MASK_0,
},
.rx_wbm_rel = {
ATH12K_RX_WBM_REL_RING_MASK_0,
},
.reo_status = {
ATH12K_REO_STATUS_RING_MASK_0,
},
.rxdma2host = {
ATH12K_RXDMA2HOST_RING_MASK_0,
ATH12K_RXDMA2HOST_RING_MASK_1,
ATH12K_RXDMA2HOST_RING_MASK_2,
},
.host2rxdma = {
},
.tx_mon_dest = {
},
};
const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9074 = {
.tx = {
ATH12K_TX_RING_MASK_0,
ATH12K_TX_RING_MASK_1,
ATH12K_TX_RING_MASK_2,
},
.rx_mon_status = {
0, 0, 0,
ATH12K_RX_MON_STATUS_RING_MASK_0,
ATH12K_RX_MON_STATUS_RING_MASK_1,
ATH12K_RX_MON_STATUS_RING_MASK_2,
},
.rx = {
0, 0, 0, 0,
ATH12K_RX_RING_MASK_0,
ATH12K_RX_RING_MASK_1,
ATH12K_RX_RING_MASK_2,
ATH12K_RX_RING_MASK_3,
},
.rx_err = {
0, 0, 0,
ATH12K_RX_ERR_RING_MASK_0,
},
.rx_wbm_rel = {
0, 0, 0,
ATH12K_RX_WBM_REL_RING_MASK_0,
},
.reo_status = {
0, 0, 0,
ATH12K_REO_STATUS_RING_MASK_0,
},
.rxdma2host = {
0, 0, 0,
ATH12K_RXDMA2HOST_RING_MASK_0,
},
.host2rxdma = {
0, 0, 0,
ATH12K_HOST2RXDMA_RING_MASK_0,
},
.tx_mon_dest = {
},
};
const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn6750 = {
.tx = {
ATH12K_TX_RING_MASK_0,
0,
ATH12K_TX_RING_MASK_2,
0,
ATH12K_TX_RING_MASK_4,
},
.rx_mon_status = {
0, 0, 0, 0, 0, 0,
ATH12K_RX_MON_STATUS_RING_MASK_0,
},
.rx = {
0, 0, 0, 0, 0, 0, 0,
ATH12K_RX_RING_MASK_0,
ATH12K_RX_RING_MASK_1,
ATH12K_RX_RING_MASK_2,
ATH12K_RX_RING_MASK_3,
},
.rx_err = {
0, ATH12K_RX_ERR_RING_MASK_0,
},
.rx_wbm_rel = {
0, ATH12K_RX_WBM_REL_RING_MASK_0,
},
.reo_status = {
0, ATH12K_REO_STATUS_RING_MASK_0,
},
.rxdma2host = {
ATH12K_RXDMA2HOST_RING_MASK_0,
ATH12K_RXDMA2HOST_RING_MASK_1,
ATH12K_RXDMA2HOST_RING_MASK_2,
},
.host2rxdma = {
},
.tx_mon_dest = {
},
};
#define ATH12K_TX_MON_RING_MASK_0 0x1
#define ATH12K_TX_MON_RING_MASK_1 0x2
const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
.tx = {
ATH12K_TX_RING_MASK_0,
ATH12K_TX_RING_MASK_1,
ATH12K_TX_RING_MASK_2,
ATH12K_TX_RING_MASK_4,
},
.rx_mon_status = {
.rx_mon_dest = {
},
.rx = {
0, 0, 0,
@ -1935,8 +1762,6 @@ const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
.reo_status = {
ATH12K_REO_STATUS_RING_MASK_0,
},
.rxdma2host = {
},
.host2rxdma = {
},
.tx_mon_dest = {
@ -2964,19 +2789,17 @@ const struct ce_attr qwz_host_ce_config_wcn7850[QWZ_CE_COUNT_QCA6390] = {
},
};
static const struct ath12k_hw_tcl2wbm_rbm_map ath12k_hw_tcl2wbm_rbm_map_wcn7850[] = {
static const struct ath12k_hal_tcl_to_wbm_rbm_map
ath12k_hal_wcn7850_tcl_to_wbm_rbm_map[DP_TCL_NUM_RING_MAX] = {
{
.tcl_ring_num = 0,
.wbm_ring_num = 0,
.rbm_id = HAL_RX_BUF_RBM_SW0_BM,
},
{
.tcl_ring_num = 1,
.wbm_ring_num = 2,
.rbm_id = HAL_RX_BUF_RBM_SW2_BM,
},
{
.tcl_ring_num = 2,
.wbm_ring_num = 4,
.rbm_id = HAL_RX_BUF_RBM_SW4_BM,
},
@ -2984,11 +2807,15 @@ static const struct ath12k_hw_tcl2wbm_rbm_map ath12k_hw_tcl2wbm_rbm_map_wcn7850[
static const struct ath12k_hw_hal_params ath12k_hw_hal_params_wcn7850 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
.tcl2wbm_rbm_map = ath12k_hw_tcl2wbm_rbm_map_wcn7850,
.wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
};
const struct hal_ops hal_wcn7850_ops = {
.create_srng_config = qwz_hal_srng_create_config_wcn7850,
.tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
};
static const struct ath12k_hw_params ath12k_hw_params[] = {
@ -3021,6 +2848,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.supports_shadow_regs = true,
.fix_l1ss = false,
.hal_params = &ath12k_hw_hal_params_wcn7850,
.hal_ops = &hal_wcn7850_ops,
.qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01) |
BIT(CNSS_PCIE_PERST_NO_PULL_V01),
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
@ -8203,14 +8031,24 @@ int
qwz_dp_srng_calculate_msi_group(struct qwz_softc *sc, enum hal_ring_type type,
int ring_num)
{
const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
const uint8_t *grp_mask;
int i;
switch (type) {
case HAL_WBM2SW_RELEASE:
if (ring_num == DP_RX_RELEASE_RING_NUM) {
if (ring_num == HAL_WBM2SW_REL_ERR_RING_NUM) {
grp_mask = &sc->hw_params.ring_mask->rx_wbm_rel[0];
ring_num = 0;
} else {
map = sc->hw_params.hal_ops->tcl_to_wbm_rbm_map;
for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
if (ring_num == map[i].wbm_ring_num) {
ring_num = i;
break;
}
}
grp_mask = &sc->hw_params.ring_mask->tx[0];
}
break;
@ -8225,14 +8063,11 @@ qwz_dp_srng_calculate_msi_group(struct qwz_softc *sc, enum hal_ring_type type,
break;
case HAL_RXDMA_MONITOR_STATUS:
case HAL_RXDMA_MONITOR_DST:
grp_mask = &sc->hw_params.ring_mask->rx_mon_status[0];
grp_mask = &sc->hw_params.ring_mask->rx_mon_dest[0];
break;
case HAL_TX_MONITOR_DST:
grp_mask = &sc->hw_params.ring_mask->tx_mon_dest[0];
break;
case HAL_RXDMA_DST:
grp_mask = &sc->hw_params.ring_mask->rxdma2host[0];
break;
case HAL_RXDMA_BUF:
grp_mask = &sc->hw_params.ring_mask->host2rxdma[0];
break;
@ -8969,8 +8804,6 @@ qwz_dp_srng_common_cleanup(struct qwz_softc *sc)
int i;
qwz_dp_srng_cleanup(sc, &dp->wbm_desc_rel_ring);
qwz_dp_srng_cleanup(sc, &dp->tcl_cmd_ring);
qwz_dp_srng_cleanup(sc, &dp->tcl_status_ring);
for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
qwz_dp_srng_cleanup(sc, &dp->tx_ring[i].tcl_data_ring);
qwz_dp_srng_cleanup(sc, &dp->tx_ring[i].tcl_comp_ring);
@ -9387,13 +9220,15 @@ out:
#endif
return ret;
}
int
qwz_dp_srng_common_setup(struct qwz_softc *sc)
{
struct qwz_dp *dp = &sc->dp;
const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
struct hal_srng *srng;
int i, ret;
uint8_t tcl_num, wbm_num;
uint8_t tx_comp_ring_num;
ret = qwz_dp_srng_setup(sc, &dp->wbm_desc_rel_ring, HAL_SW2WBM_RELEASE,
0, 0, DP_WBM_RELEASE_RING_SIZE);
@ -9403,31 +9238,12 @@ qwz_dp_srng_common_setup(struct qwz_softc *sc)
goto err;
}
ret = qwz_dp_srng_setup(sc, &dp->tcl_cmd_ring, HAL_TCL_CMD,
0, 0, DP_TCL_CMD_RING_SIZE);
if (ret) {
printf("%s: failed to set up tcl_cmd ring :%d\n",
sc->sc_dev.dv_xname, ret);
goto err;
}
ret = qwz_dp_srng_setup(sc, &dp->tcl_status_ring, HAL_TCL_STATUS,
0, 0, DP_TCL_STATUS_RING_SIZE);
if (ret) {
printf("%s: failed to set up tcl_status ring :%d\n",
sc->sc_dev.dv_xname, ret);
goto err;
}
for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
const struct ath12k_hw_hal_params *hal_params;
hal_params = sc->hw_params.hal_params;
tcl_num = hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
wbm_num = hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
map = sc->hw_params.hal_ops->tcl_to_wbm_rbm_map;
tx_comp_ring_num = map[i].wbm_ring_num;
ret = qwz_dp_srng_setup(sc, &dp->tx_ring[i].tcl_data_ring,
HAL_TCL_DATA, tcl_num, 0, sc->hw_params.tx_ring_size);
HAL_TCL_DATA, i, 0, DP_TCL_DATA_RING_SIZE);
if (ret) {
printf("%s: failed to set up tcl_data ring (%d) :%d\n",
sc->sc_dev.dv_xname, i, ret);
@ -9435,7 +9251,7 @@ qwz_dp_srng_common_setup(struct qwz_softc *sc)
}
ret = qwz_dp_srng_setup(sc, &dp->tx_ring[i].tcl_comp_ring,
HAL_WBM2SW_RELEASE, wbm_num, 0, DP_TX_COMP_RING_SIZE);
HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0, DP_TX_COMP_RING_SIZE);
if (ret) {
printf("%s: failed to set up tcl_comp ring (%d) :%d\n",
sc->sc_dev.dv_xname, i, ret);
@ -9491,8 +9307,17 @@ qwz_dp_srng_common_setup(struct qwz_softc *sc)
/* When hash based routing of rx packet is enabled, 32 entries to map
* the hash values to the ring will be configured.
*/
sc->hw_params.hw_ops->reo_setup(sc);
* Each hash entry uses four bits to map to a particular ring. */
uint32_t ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
HAL_HASH_ROUTING_RING_SW2 << 4 |
HAL_HASH_ROUTING_RING_SW3 << 8 |
HAL_HASH_ROUTING_RING_SW4 << 12 |
HAL_HASH_ROUTING_RING_SW1 << 16 |
HAL_HASH_ROUTING_RING_SW2 << 20 |
HAL_HASH_ROUTING_RING_SW3 << 24 |
HAL_HASH_ROUTING_RING_SW4 << 28;
qwz_hal_reo_hw_setup(sc, ring_hash_map);
return 0;
err:
@ -16893,14 +16718,9 @@ qwz_dp_service_srng(struct qwz_softc *sc, int grp_id)
struct qwz_pdev_dp *dp = &sc->pdev_dp;
int i, j, ret = 0;
for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
const struct ath12k_hw_tcl2wbm_rbm_map *map;
map = &sc->hw_params.hal_params->tcl2wbm_rbm_map[i];
if ((sc->hw_params.ring_mask->tx[grp_id]) &
(1 << (map->wbm_ring_num)) &&
qwz_dp_tx_completion_handler(sc, i))
ret = 1;
if (sc->hw_params.ring_mask->tx[grp_id]) {
i = fls(sc->hw_params.ring_mask->tx[grp_id]) - 1;
qwz_dp_tx_completion_handler(sc, i);
}
if (sc->hw_params.ring_mask->rx_err[grp_id] &&
@ -16921,7 +16741,7 @@ qwz_dp_service_srng(struct qwz_softc *sc, int grp_id)
for (j = 0; j < sc->hw_params.num_rxmda_per_pdev; j++) {
int id = i * sc->hw_params.num_rxmda_per_pdev + j;
if ((sc->hw_params.ring_mask->rx_mon_status[grp_id] &
if ((sc->hw_params.ring_mask->rx_mon_dest[grp_id] &
(1 << id)) == 0)
continue;
@ -16934,23 +16754,10 @@ qwz_dp_service_srng(struct qwz_softc *sc, int grp_id)
qwz_dp_process_reo_status(sc))
ret = 1;
for (i = 0; i < sc->num_radios; i++) {
for (j = 0; j < sc->hw_params.num_rxmda_per_pdev; j++) {
int id = i * sc->hw_params.num_rxmda_per_pdev + j;
if (sc->hw_params.ring_mask->rxdma2host[grp_id] &
(1 << (id))) {
if (qwz_dp_process_rxdma_err(sc, id))
ret = 1;
}
if (sc->hw_params.ring_mask->host2rxdma[grp_id] &
(1 << id)) {
qwz_dp_rxbufs_replenish(sc, id,
&dp->rx_refill_buf_ring, 0,
sc->hw_params.hal_params->rx_buf_rbm);
}
}
if (sc->hw_params.ring_mask->host2rxdma[grp_id]) {
qwz_dp_rxbufs_replenish(sc, 0 /* FIXME */,
&dp->rx_refill_buf_ring, 0,
sc->hw_params.hal_params->rx_buf_rbm);
}
return ret;
@ -19577,7 +19384,7 @@ static const struct hal_srng_config hw_srng_config_templ[] = {
};
int
qwz_hal_srng_create_config(struct qwz_softc *sc)
qwz_hal_srng_create_config_wcn7850(struct qwz_softc *sc)
{
struct ath12k_hal *hal = &sc->hal;
struct hal_srng_config *s;
@ -19915,7 +19722,7 @@ qwz_hal_srng_init(struct qwz_softc *sc)
memset(hal, 0, sizeof(*hal));
ret = qwz_hal_srng_create_config(sc);
ret = sc->hw_params.hal_ops->create_srng_config(sc);
if (ret)
goto err_hal;
@ -23670,7 +23477,7 @@ qwz_dp_tx(struct qwz_softc *sc, struct qwz_vif *arvif, uint8_t pdev_id,
ring_selector = 0;
ti.ring_id = ring_selector % sc->hw_params.max_tx_ring;
ti.rbm_id = sc->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id;
ti.rbm_id = sc->hw_params.hal_ops->tcl_to_wbm_rbm_map[ti.ring_id].rbm_id;
ring_map |= (1 << ti.ring_id);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qwzvar.h,v 1.4 2024/08/16 00:26:54 patrick Exp $ */
/* $OpenBSD: qwzvar.h,v 1.6 2024/08/20 21:24:15 patrick Exp $ */
/*
* Copyright (c) 2018-2019 The Linux Foundation.
@ -57,14 +57,13 @@ struct qwz_softc;
struct ath12k_hw_ring_mask {
uint8_t tx[ATH12K_EXT_IRQ_GRP_NUM_MAX];
uint8_t rx_mon_status[ATH12K_EXT_IRQ_GRP_NUM_MAX];
uint8_t rx_mon_dest[ATH12K_EXT_IRQ_GRP_NUM_MAX];
uint8_t rx[ATH12K_EXT_IRQ_GRP_NUM_MAX];
uint8_t rx_err[ATH12K_EXT_IRQ_GRP_NUM_MAX];
uint8_t rx_wbm_rel[ATH12K_EXT_IRQ_GRP_NUM_MAX];
uint8_t reo_status[ATH12K_EXT_IRQ_GRP_NUM_MAX];
uint8_t rxdma2host[ATH12K_EXT_IRQ_GRP_NUM_MAX];
uint8_t host2rxdma[ATH12K_EXT_IRQ_GRP_NUM_MAX];
uint8_t tx_mon_dest[ATH12K_EXT_IRQ_GRP_NUM_MAX];
uint8_t tx_mon_dest[ATH12K_EXT_IRQ_GRP_NUM_MAX];
};
#define ATH12K_FW_DIR "qwz"
@ -78,8 +77,7 @@ struct ath12k_hw_ring_mask {
#define QWZ_FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING="
struct ath12k_hw_tcl2wbm_rbm_map {
uint8_t tcl_ring_num;
struct ath12k_hal_tcl_to_wbm_rbm_map {
uint8_t wbm_ring_num;
uint8_t rbm_id;
};
@ -167,6 +165,11 @@ struct hal_tx_status {
uint32_t rate_stats;
};
struct hal_ops {
int (*create_srng_config)(struct qwz_softc *);
const struct ath12k_hal_tcl_to_wbm_rbm_map *tcl_to_wbm_rbm_map;
};
struct ath12k_hw_params {
const char *name;
uint16_t hw_rev;
@ -226,6 +229,7 @@ struct ath12k_hw_params {
bool credit_flow;
uint8_t max_tx_ring;
const struct ath12k_hw_hal_params *hal_params;
const struct hal_ops *hal_ops;
uint64_t qmi_cnss_feature_bitmap;
#if notyet
bool supports_dynamic_smps_6ghz;
@ -297,9 +301,6 @@ struct ath12k_hw_ops {
struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc);
#ifdef notyet
uint8_t *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
#endif
void (*reo_setup)(struct qwz_softc *);
#ifdef notyet
uint16_t (*mpdu_info_get_peerid)(uint8_t *tlv_data);
bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
uint8_t* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
@ -1197,8 +1198,6 @@ struct qwz_dp {
struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
struct dp_srng wbm_idle_ring;
struct dp_srng wbm_desc_rel_ring;
struct dp_srng tcl_cmd_ring;
struct dp_srng tcl_status_ring;
struct dp_srng reo_reinject_ring;
struct dp_srng rx_rel_ring;
struct dp_srng reo_except_ring;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_qwz_pci.c,v 1.3 2024/08/16 00:26:54 patrick Exp $ */
/* $OpenBSD: if_qwz_pci.c,v 1.4 2024/08/20 21:24:15 patrick Exp $ */
/*
* Copyright 2023 Stefan Sperling <stsp@openbsd.org>
@ -1506,9 +1506,8 @@ qwz_pcic_ext_irq_config(struct qwz_softc *sc, struct pci_attach_args *pa)
sc->hw_params.ring_mask->rx_err[i] ||
sc->hw_params.ring_mask->rx_wbm_rel[i] ||
sc->hw_params.ring_mask->reo_status[i] ||
sc->hw_params.ring_mask->rxdma2host[i] ||
sc->hw_params.ring_mask->host2rxdma[i] ||
sc->hw_params.ring_mask->rx_mon_status[i]) {
sc->hw_params.ring_mask->rx_mon_dest[i]) {
num_irq = 1;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_rge.c,v 1.30 2024/08/20 00:09:12 dlg Exp $ */
/* $OpenBSD: if_rge.c,v 1.34 2024/08/21 01:22:31 dlg Exp $ */
/*
* Copyright (c) 2019, 2020, 2023, 2024
@ -74,7 +74,6 @@ int rge_ifmedia_upd(struct ifnet *);
void rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
int rge_allocmem(struct rge_softc *);
int rge_newbuf(struct rge_queues *);
void rge_discard_rxbuf(struct rge_queues *, int);
void rge_rx_list_init(struct rge_queues *);
void rge_tx_list_init(struct rge_queues *);
void rge_fill_rx_ring(struct rge_queues *);
@ -567,7 +566,7 @@ rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
break;
case SIOCGIFRXR:
error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
NULL, RGE_JUMBO_FRAMELEN, &sc->sc_queues->q_rx.rge_rx_ring);
NULL, MCLBYTES, &sc->sc_queues->q_rx.rge_rx_ring);
break;
default:
error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
@ -886,7 +885,8 @@ rge_stop(struct ifnet *ifp)
if (q->q_rx.rge_head != NULL) {
m_freem(q->q_rx.rge_head);
q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
q->q_rx.rge_head = NULL;
q->q_rx.rge_tail = &q->q_rx.rge_head;
}
/* Free the TX list buffers. */
@ -1141,13 +1141,15 @@ rge_newbuf(struct rge_queues *q)
struct rge_rx_desc *r;
struct rge_rxq *rxq;
bus_dmamap_t rxmap;
uint32_t cmdsts;
int idx;
m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
m->m_len = m->m_pkthdr.len = MCLBYTES;
m_adj(m, ETHER_ALIGN);
idx = q->q_rx.rge_rxq_prodidx;
rxq = &q->q_rx.rge_rxq[idx];
@ -1166,15 +1168,23 @@ rge_newbuf(struct rge_queues *q)
rxq->rxq_mbuf = m;
r->hi_qword1.rx_qword4.rge_extsts = 0;
cmdsts = rxmap->dm_segs[0].ds_len;
if (idx == RGE_RX_LIST_CNT - 1)
cmdsts |= RGE_RDCMDSTS_EOR;
r->hi_qword1.rx_qword4.rge_cmdsts = htole32(cmdsts);
r->hi_qword1.rx_qword4.rge_extsts = htole32(0);
r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
r->hi_qword1.rx_qword4.rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
if (idx == RGE_RX_LIST_CNT - 1)
r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
BUS_DMASYNC_POSTWRITE);
cmdsts |= RGE_RDCMDSTS_OWN;
r->hi_qword1.rx_qword4.rge_cmdsts = htole32(cmdsts);
bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@ -1184,34 +1194,16 @@ rge_newbuf(struct rge_queues *q)
return (0);
}
void
rge_discard_rxbuf(struct rge_queues *q, int idx)
{
struct rge_softc *sc = q->q_sc;
struct rge_rx_desc *r;
r = &q->q_rx.rge_rx_list[idx];
r->hi_qword1.rx_qword4.rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
r->hi_qword1.rx_qword4.rge_extsts = 0;
if (idx == RGE_RX_LIST_CNT - 1)
r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
void
rge_rx_list_init(struct rge_queues *q)
{
memset(q->q_rx.rge_rx_list, 0, RGE_RX_LIST_SZ);
q->q_rx.rge_rxq_prodidx = q->q_rx.rge_rxq_considx = 0;
q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
q->q_rx.rge_head = NULL;
q->q_rx.rge_tail = &q->q_rx.rge_head;
if_rxr_init(&q->q_rx.rge_rx_ring, 32, RGE_RX_LIST_CNT);
if_rxr_init(&q->q_rx.rge_rx_ring, 32, RGE_RX_LIST_CNT - 1);
rge_fill_rx_ring(q);
}
@ -1261,80 +1253,76 @@ rge_rxeof(struct rge_queues *q)
struct rge_rx_desc *cur_rx;
struct rge_rxq *rxq;
uint32_t rxstat, extsts;
int i, total_len, rx = 0;
int i, mlen, rx = 0;
int cons;
for (i = q->q_rx.rge_rxq_considx; if_rxr_inuse(rxr) > 0;
i = RGE_NEXT_RX_DESC(i)) {
/* Invalidate the descriptor memory. */
bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
i = cons = q->q_rx.rge_rxq_considx;
while (if_rxr_inuse(rxr) > 0) {
cur_rx = &q->q_rx.rge_rx_list[i];
bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
i * sizeof(*cur_rx), sizeof(*cur_rx),
BUS_DMASYNC_POSTREAD);
rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
if (rxstat & RGE_RDCMDSTS_OWN)
if (rxstat & RGE_RDCMDSTS_OWN) {
bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
i * sizeof(*cur_rx), sizeof(*cur_rx),
BUS_DMASYNC_PREREAD);
break;
}
total_len = rxstat & RGE_RDCMDSTS_FRAGLEN;
rxq = &q->q_rx.rge_rxq[i];
m = rxq->rxq_mbuf;
rxq->rxq_mbuf = NULL;
if_rxr_put(rxr, 1);
rx = 1;
/* Invalidate the RX mbuf and unload its map. */
bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
m = rxq->rxq_mbuf;
rxq->rxq_mbuf = NULL;
if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
(RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
ifp->if_ierrors++;
i = RGE_NEXT_RX_DESC(i);
if_rxr_put(rxr, 1);
rx = 1;
if (ISSET(rxstat, RGE_RDCMDSTS_SOF)) {
if (q->q_rx.rge_head != NULL) {
ifp->if_ierrors++;
m_freem(q->q_rx.rge_head);
q->q_rx.rge_tail = &q->q_rx.rge_head;
}
m->m_pkthdr.len = 0;
} else if (q->q_rx.rge_head == NULL) {
m_freem(m);
rge_discard_rxbuf(q, i);
continue;
}
} else
CLR(m->m_flags, M_PKTHDR);
*q->q_rx.rge_tail = m;
q->q_rx.rge_tail = &m->m_next;
mlen = rxstat & RGE_RDCMDSTS_FRAGLEN;
m->m_len = mlen;
m = q->q_rx.rge_head;
m->m_pkthdr.len += mlen;
if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
ifp->if_ierrors++;
/*
* If this is part of a multi-fragment packet,
* discard all the pieces.
*/
if (q->q_rx.rge_head != NULL) {
m_freem(q->q_rx.rge_head);
q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
}
m_freem(m);
rge_discard_rxbuf(q, i);
q->q_rx.rge_head = NULL;
q->q_rx.rge_tail = &q->q_rx.rge_head;
continue;
}
if (q->q_rx.rge_head != NULL) {
m->m_len = total_len;
/*
* Special case: if there's 4 bytes or less
* in this buffer, the mbuf can be discarded:
* the last 4 bytes is the CRC, which we don't
* care about anyway.
*/
if (m->m_len <= ETHER_CRC_LEN) {
q->q_rx.rge_tail->m_len -=
(ETHER_CRC_LEN - m->m_len);
m_freem(m);
} else {
m->m_len -= ETHER_CRC_LEN;
m->m_flags &= ~M_PKTHDR;
q->q_rx.rge_tail->m_next = m;
}
m = q->q_rx.rge_head;
q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
} else
m->m_pkthdr.len = m->m_len =
(total_len - ETHER_CRC_LEN);
if (!ISSET(rxstat, RGE_RDCMDSTS_EOF))
continue;
q->q_rx.rge_head = NULL;
q->q_rx.rge_tail = &q->q_rx.rge_head;
m_adj(m, -ETHER_CRC_LEN);
extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
/* Check IP header checksum. */
if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
@ -1361,13 +1349,32 @@ rge_rxeof(struct rge_queues *q)
ml_enqueue(&ml, m);
}
if (!rx)
return (0);
if (i >= cons) {
bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
cons * sizeof(*cur_rx), (i - cons) * sizeof(*cur_rx),
BUS_DMASYNC_POSTWRITE);
} else {
bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
cons * sizeof(*cur_rx),
(RGE_RX_LIST_CNT - cons) * sizeof(*cur_rx),
BUS_DMASYNC_POSTWRITE);
if (i > 0) {
bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
0, i * sizeof(*cur_rx),
BUS_DMASYNC_POSTWRITE);
}
}
if (ifiq_input(&ifp->if_rcv, &ml))
if_rxr_livelocked(rxr);
q->q_rx.rge_rxq_considx = i;
rge_fill_rx_ring(q);
return (rx);
return (1);
}
int

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_rgereg.h,v 1.12 2024/08/20 00:09:12 dlg Exp $ */
/* $OpenBSD: if_rgereg.h,v 1.13 2024/08/21 01:12:52 dlg Exp $ */
/*
* Copyright (c) 2019, 2020 Kevin Lo <kevlo@openbsd.org>
@ -270,7 +270,7 @@ struct rge_rx_desc {
uint32_t rsvd8;
} rx_ptp;
} hi_qword1;
};
} __packed __aligned(16);
#define RGE_RDCMDSTS_RXERRSUM 0x00100000
#define RGE_RDCMDSTS_EOF 0x01000000
@ -344,7 +344,7 @@ struct rge_rx {
struct rge_rx_desc *rge_rx_list;
struct mbuf *rge_head;
struct mbuf *rge_tail;
struct mbuf **rge_tail;
};
struct rge_queues {