sync with OpenBSD -current

This commit is contained in:
purplerain 2024-01-25 20:43:37 +00:00
parent 125fcc4eee
commit df63e3891f
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
24 changed files with 3451 additions and 650 deletions

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qwxreg.h,v 1.1 2023/12/28 17:36:29 stsp Exp $ */
/* $OpenBSD: qwxreg.h,v 1.2 2024/01/25 10:11:04 stsp Exp $ */
/*
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc.
@ -2911,7 +2911,7 @@ struct wmi_vdev_start_req_arg {
};
struct peer_create_params {
const uint8_t *peer_addr;
uint8_t *peer_addr;
uint32_t peer_type;
uint32_t vdev_id;
};
@ -8257,6 +8257,85 @@ struct hal_reo_cmd_hdr {
uint32_t info0;
} __packed;
#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000
#define HAL_REO_CMD_FLG_NEED_STATUS BIT(0)
#define HAL_REO_CMD_FLG_STATS_CLEAR BIT(1)
#define HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER BIT(2)
#define HAL_REO_CMD_FLG_FLUSH_RELEASE_BLOCKING BIT(3)
#define HAL_REO_CMD_FLG_FLUSH_NO_INVAL BIT(4)
#define HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS BIT(5)
#define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6)
#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */
#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
#define HAL_REO_CMD_UPD0_VLD BIT(9)
#define HAL_REO_CMD_UPD0_ALDC BIT(10)
#define HAL_REO_CMD_UPD0_DIS_DUP_DETECTION BIT(11)
#define HAL_REO_CMD_UPD0_SOFT_REORDER_EN BIT(12)
#define HAL_REO_CMD_UPD0_AC BIT(13)
#define HAL_REO_CMD_UPD0_BAR BIT(14)
#define HAL_REO_CMD_UPD0_RETRY BIT(15)
#define HAL_REO_CMD_UPD0_CHECK_2K_MODE BIT(16)
#define HAL_REO_CMD_UPD0_OOR_MODE BIT(17)
#define HAL_REO_CMD_UPD0_BA_WINDOW_SIZE BIT(18)
#define HAL_REO_CMD_UPD0_PN_CHECK BIT(19)
#define HAL_REO_CMD_UPD0_EVEN_PN BIT(20)
#define HAL_REO_CMD_UPD0_UNEVEN_PN BIT(21)
#define HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE BIT(22)
#define HAL_REO_CMD_UPD0_PN_SIZE BIT(23)
#define HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG BIT(24)
#define HAL_REO_CMD_UPD0_SVLD BIT(25)
#define HAL_REO_CMD_UPD0_SSN BIT(26)
#define HAL_REO_CMD_UPD0_SEQ_2K_ERR BIT(27)
#define HAL_REO_CMD_UPD0_PN_ERR BIT(28)
#define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
#define HAL_REO_CMD_UPD0_PN BIT(30)
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */
#define HAL_REO_CMD_UPD1_VLD BIT(16)
#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
#define HAL_REO_CMD_UPD1_SOFT_REORDER_EN BIT(20)
#define HAL_REO_CMD_UPD1_AC GENMASK(22, 21)
#define HAL_REO_CMD_UPD1_BAR BIT(23)
#define HAL_REO_CMD_UPD1_RETRY BIT(24)
#define HAL_REO_CMD_UPD1_CHECK_2K_MODE BIT(25)
#define HAL_REO_CMD_UPD1_OOR_MODE BIT(26)
#define HAL_REO_CMD_UPD1_PN_CHECK BIT(27)
#define HAL_REO_CMD_UPD1_EVEN_PN BIT(28)
#define HAL_REO_CMD_UPD1_UNEVEN_PN BIT(29)
#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */
#define HAL_REO_CMD_UPD2_SVLD BIT(10)
#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
#define HAL_REO_CMD_UPD2_PN_ERR BIT(24)
#define HAL_REO_DEST_RING_CTRL_HASH_RING_MAP GENMASK(31, 8)
struct ath11k_hal_reo_cmd {
uint32_t addr_lo;
uint32_t flag;
uint32_t upd0;
uint32_t upd1;
uint32_t upd2;
uint32_t pn[4];
uint16_t rx_queue_num;
uint16_t min_rel;
uint16_t min_fwd;
uint8_t addr_hi;
uint8_t ac_list;
uint8_t blocking_idx;
uint16_t ba_window_size;
uint8_t pn_size;
};
#define HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
#define HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS BIT(8)
@ -9862,6 +9941,11 @@ struct hal_reo_desc_thresh_reached_status {
* entries into this Ring has looped around the ring.
*/
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0 0xDDBEEF
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1 0xADBEEF
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3 0xCDBEEF
#define HAL_TX_ADDRX_EN 1
#define HAL_TX_ADDRY_EN 2

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qwxvar.h,v 1.1 2023/12/28 17:36:29 stsp Exp $ */
/* $OpenBSD: qwxvar.h,v 1.5 2024/01/25 17:00:21 stsp Exp $ */
/*
* Copyright (c) 2018-2019 The Linux Foundation.
@ -69,12 +69,12 @@ struct ath11k_hw_ring_mask {
#define ATH11K_FW_DIR "qwx"
#define ATH11K_BOARD_MAGIC "QCA-ATH11K-BOARD"
#define ATH11K_BOARD_API2_FILE "board-2.bin"
#define ATH11K_DEFAULT_BOARD_FILE "board.bin"
#define ATH11K_DEFAULT_CAL_FILE "caldata.bin"
#define ATH11K_AMSS_FILE "amss.bin"
#define ATH11K_M3_FILE "m3.bin"
#define ATH11K_REGDB_FILE "regdb.bin"
#define ATH11K_BOARD_API2_FILE "board-2"
#define ATH11K_DEFAULT_BOARD_FILE "board"
#define ATH11K_DEFAULT_CAL_FILE "caldata"
#define ATH11K_AMSS_FILE "amss"
#define ATH11K_M3_FILE "m3"
#define ATH11K_REGDB_FILE "regdb"
#define QWX_FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING="
@ -214,9 +214,9 @@ struct ath11k_hw_ops {
#endif
void (*wmi_init_config)(struct qwx_softc *sc,
struct target_resource_config *config);
#if notyet
int (*mac_id_to_pdev_id)(struct ath11k_hw_params *hw, int mac_id);
int (*mac_id_to_srng_id)(struct ath11k_hw_params *hw, int mac_id);
#if notyet
void (*tx_mesh_enable)(struct ath11k_base *ab,
struct hal_tcl_data_cmd *tcl_cmd);
bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc);
@ -645,6 +645,13 @@ struct ath11k_hal {
#endif
};
enum hal_pn_type {
HAL_PN_TYPE_NONE,
HAL_PN_TYPE_WPA,
HAL_PN_TYPE_WAPI_EVEN,
HAL_PN_TYPE_WAPI_UNEVEN,
};
enum hal_ce_desc {
HAL_CE_DESC_SRC,
HAL_CE_DESC_DST,
@ -839,8 +846,9 @@ struct qwx_hp_update_timer {
struct dp_rx_tid {
uint8_t tid;
struct qwx_dmamem *mem;
uint32_t *vaddr;
bus_addr_t paddr;
uint64_t paddr;
uint32_t size;
uint32_t ba_win_sz;
int active;
@ -1267,6 +1275,177 @@ struct dp_rxdma_ring {
int bufs_max;
};
enum hal_rx_mon_status {
HAL_RX_MON_STATUS_PPDU_NOT_DONE,
HAL_RX_MON_STATUS_PPDU_DONE,
HAL_RX_MON_STATUS_BUF_DONE,
};
struct hal_rx_user_status {
uint32_t mcs:4,
nss:3,
ofdma_info_valid:1,
dl_ofdma_ru_start_index:7,
dl_ofdma_ru_width:7,
dl_ofdma_ru_size:8;
uint32_t ul_ofdma_user_v0_word0;
uint32_t ul_ofdma_user_v0_word1;
uint32_t ast_index;
uint32_t tid;
uint16_t tcp_msdu_count;
uint16_t udp_msdu_count;
uint16_t other_msdu_count;
uint16_t frame_control;
uint8_t frame_control_info_valid;
uint8_t data_sequence_control_info_valid;
uint16_t first_data_seq_ctrl;
uint32_t preamble_type;
uint16_t ht_flags;
uint16_t vht_flags;
uint16_t he_flags;
uint8_t rs_flags;
uint32_t mpdu_cnt_fcs_ok;
uint32_t mpdu_cnt_fcs_err;
uint32_t mpdu_fcs_ok_bitmap[8];
uint32_t mpdu_ok_byte_count;
uint32_t mpdu_err_byte_count;
};
#define HAL_INVALID_PEERID 0xffff
#define VHT_SIG_SU_NSS_MASK 0x7
#define HAL_RX_MAX_MCS 12
#define HAL_RX_MAX_NSS 8
#define HAL_TLV_STATUS_PPDU_NOT_DONE HAL_RX_MON_STATUS_PPDU_NOT_DONE
#define HAL_TLV_STATUS_PPDU_DONE HAL_RX_MON_STATUS_PPDU_DONE
#define HAL_TLV_STATUS_BUF_DONE HAL_RX_MON_STATUS_BUF_DONE
struct hal_rx_mon_ppdu_info {
uint32_t ppdu_id;
uint32_t ppdu_ts;
uint32_t num_mpdu_fcs_ok;
uint32_t num_mpdu_fcs_err;
uint32_t preamble_type;
uint16_t chan_num;
uint16_t tcp_msdu_count;
uint16_t tcp_ack_msdu_count;
uint16_t udp_msdu_count;
uint16_t other_msdu_count;
uint16_t peer_id;
uint8_t rate;
uint8_t mcs;
uint8_t nss;
uint8_t bw;
uint8_t vht_flag_values1;
uint8_t vht_flag_values2;
uint8_t vht_flag_values3[4];
uint8_t vht_flag_values4;
uint8_t vht_flag_values5;
uint16_t vht_flag_values6;
uint8_t is_stbc;
uint8_t gi;
uint8_t ldpc;
uint8_t beamformed;
uint8_t rssi_comb;
uint8_t rssi_chain_pri20[HAL_RX_MAX_NSS];
uint8_t tid;
uint16_t ht_flags;
uint16_t vht_flags;
uint16_t he_flags;
uint16_t he_mu_flags;
uint8_t dcm;
uint8_t ru_alloc;
uint8_t reception_type;
uint64_t tsft;
uint64_t rx_duration;
uint16_t frame_control;
uint32_t ast_index;
uint8_t rs_fcs_err;
uint8_t rs_flags;
uint8_t cck_flag;
uint8_t ofdm_flag;
uint8_t ulofdma_flag;
uint8_t frame_control_info_valid;
uint16_t he_per_user_1;
uint16_t he_per_user_2;
uint8_t he_per_user_position;
uint8_t he_per_user_known;
uint16_t he_flags1;
uint16_t he_flags2;
uint8_t he_RU[4];
uint16_t he_data1;
uint16_t he_data2;
uint16_t he_data3;
uint16_t he_data4;
uint16_t he_data5;
uint16_t he_data6;
uint32_t ppdu_len;
uint32_t prev_ppdu_id;
uint32_t device_id;
uint16_t first_data_seq_ctrl;
uint8_t monitor_direct_used;
uint8_t data_sequence_control_info_valid;
uint8_t ltf_size;
uint8_t rxpcu_filter_pass;
char rssi_chain[8][8];
struct hal_rx_user_status userstats;
};
enum dp_mon_status_buf_state {
/* PPDU id matches in dst ring and status ring */
DP_MON_STATUS_MATCH,
/* status ring dma is not done */
DP_MON_STATUS_NO_DMA,
/* status ring is lagging, reap status ring */
DP_MON_STATUS_LAG,
/* status ring is leading, reap dst ring and drop */
DP_MON_STATUS_LEAD,
/* replinish monitor status ring */
DP_MON_STATUS_REPLINISH,
};
struct qwx_pdev_mon_stats {
uint32_t status_ppdu_state;
uint32_t status_ppdu_start;
uint32_t status_ppdu_end;
uint32_t status_ppdu_compl;
uint32_t status_ppdu_start_mis;
uint32_t status_ppdu_end_mis;
uint32_t status_ppdu_done;
uint32_t dest_ppdu_done;
uint32_t dest_mpdu_done;
uint32_t dest_mpdu_drop;
uint32_t dup_mon_linkdesc_cnt;
uint32_t dup_mon_buf_cnt;
uint32_t dest_mon_stuck;
uint32_t dest_mon_not_reaped;
};
struct qwx_mon_data {
struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
struct hal_rx_mon_ppdu_info mon_ppdu_info;
uint32_t mon_ppdu_status;
uint32_t mon_last_buf_cookie;
uint64_t mon_last_linkdesc_paddr;
uint16_t chan_noise_floor;
bool hold_mon_dst_ring;
enum dp_mon_status_buf_state buf_state;
bus_addr_t mon_status_paddr;
struct dp_full_mon_mpdu *mon_mpdu;
#ifdef notyet
struct hal_sw_mon_ring_entries sw_mon_entries;
#endif
struct qwx_pdev_mon_stats rx_mon_stats;
#ifdef notyet
/* lock for monitor data */
spinlock_t mon_lock;
struct sk_buff_head rx_status_q;
#endif
};
#define MAX_RXDMA_PER_PDEV 2
struct qwx_pdev_dp {
@ -1285,8 +1464,8 @@ struct qwx_pdev_dp {
struct dp_rxdma_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV];
#if 0
struct ieee80211_rx_status rx_status;
struct ath11k_mon_data mon_data;
#endif
struct qwx_mon_data mon_data;
};
struct qwx_vif {
@ -1341,8 +1520,8 @@ struct qwx_vif {
bool wpaie_present;
bool bcca_zero_sent;
bool do_not_send_tmpl;
struct ieee80211_channel *chan;
#if 0
struct ieee80211_chanctx_conf chanctx;
struct ath11k_arp_ns_offload arp_ns_offload;
struct ath11k_rekey_data rekey_data;
#endif
@ -1359,6 +1538,22 @@ struct qwx_survey_info {
uint64_t time_busy;
};
#define ATH11K_IRQ_NUM_MAX 52
#define ATH11K_EXT_IRQ_NUM_MAX 16
struct qwx_ext_irq_grp {
struct qwx_softc *sc;
uint32_t irqs[ATH11K_EXT_IRQ_NUM_MAX];
uint32_t num_irq;
uint32_t grp_id;
uint64_t timestamp;
#if 0
bool napi_enabled;
struct napi_struct napi;
struct net_device napi_ndev;
#endif
};
struct qwx_softc {
struct device sc_dev;
struct ieee80211com sc_ic;
@ -1410,6 +1605,8 @@ struct qwx_softc {
enum ath11k_crypt_mode crypto_mode;
enum ath11k_hw_txrx_mode frame_mode;
struct qwx_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX];
uint16_t qmi_txn_id;
int qmi_cal_done;
struct qwx_qmi_ce_cfg qmi_ce_cfg;
@ -1426,6 +1623,9 @@ struct qwx_softc {
uint32_t allocated_vdev_map;
uint32_t free_vdev_map;
int num_peers;
int peer_mapped;
int peer_delete_done;
int vdev_setup_done;
struct qwx_dbring_cap *db_caps;
uint32_t num_db_cap;
@ -1443,7 +1643,7 @@ struct qwx_softc {
uint32_t pdev_id;
} target_pdev_ids[MAX_RADIOS];
uint8_t target_pdev_count;
struct qwx_pdev *pdevs_active[MAX_RADIOS];
uint32_t pdevs_active;
int pdevs_macaddr_valid;
struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS];
@ -1470,15 +1670,19 @@ struct qwx_softc {
enum ath11k_hw_rev sc_hw_rev;
struct qwx_device_id id;
char sc_bus_str[4]; /* "pci" or "ahb" */
int num_msivec;
uint32_t msi_addr_lo;
uint32_t msi_addr_hi;
uint32_t msi_data_start;
const struct qwx_msi_config *msi_cfg;
uint32_t msi_ce_irqmask;
struct qmi_wlanfw_request_mem_ind_msg_v01 *sc_req_mem_ind;
};
int qwx_intr(struct qwx_softc *);
int qwx_ce_intr(void *);
int qwx_ext_intr(void *);
int qwx_dp_service_srng(struct qwx_softc *, int);
int qwx_init_hw_params(struct qwx_softc *);
int qwx_attach(struct qwx_softc *);
@ -1495,8 +1699,46 @@ void qwx_init_task(void *);
int qwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
void qwx_newstate_task(void *);
struct ath11k_peer {
#if 0
struct list_head list;
struct ieee80211_sta *sta;
#endif
int vdev_id;
#if 0
u8 addr[ETH_ALEN];
#endif
int peer_id;
uint16_t ast_hash;
uint8_t pdev_id;
uint16_t hw_peer_id;
#if 0
/* protected by ab->data_lock */
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
#endif
struct dp_rx_tid rx_tid[IEEE80211_NUM_TID + 1];
#if 0
/* peer id based rhashtable list pointer */
struct rhash_head rhash_id;
/* peer addr based rhashtable list pointer */
struct rhash_head rhash_addr;
/* Info used in MMIC verification of
* RX fragments
*/
struct crypto_shash *tfm_mmic;
u8 mcast_keyidx;
u8 ucast_keyidx;
u16 sec_type;
u16 sec_type_grp;
bool is_authorized;
bool dp_setup_done;
#endif
};
struct qwx_node {
struct ieee80211_node ni;
struct ath11k_peer peer;
};
struct ieee80211_node *qwx_node_alloc(struct ieee80211com *);
@ -1506,6 +1748,7 @@ void qwx_qrtr_recv_msg(struct qwx_softc *, struct mbuf *);
int qwx_hal_srng_init(struct qwx_softc *);
int qwx_ce_alloc_pipes(struct qwx_softc *);
void qwx_ce_free_pipes(struct qwx_softc *);
void qwx_ce_rx_post_buf(struct qwx_softc *);
void qwx_ce_get_shadow_config(struct qwx_softc *, uint32_t **, uint32_t *);
@ -1521,3 +1764,11 @@ qwx_ce_get_attr_flags(struct qwx_softc *sc, int ce_id)
KASSERT(ce_id < sc->hw_params.ce_count);
return sc->hw_params.host_ce_config[ce_id].flags;
}
static inline enum ieee80211_edca_ac qwx_tid_to_ac(uint32_t tid)
{
return (((tid == 0) || (tid == 3)) ? EDCA_AC_BE :
((tid == 1) || (tid == 2)) ? EDCA_AC_BK :
((tid == 4) || (tid == 5)) ? EDCA_AC_VI :
EDCA_AC_VO);
}

View file

@ -3653,10 +3653,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->rmmio_base = pci_resource_start(adev->pdev, 2);
adev->rmmio_size = pci_resource_len(adev->pdev, 2);
}
#endif
for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
#ifdef __linux__
adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
if (!adev->rmmio)
return -ENOMEM;

View file

@ -3478,63 +3478,9 @@ amdgpu_attachhook(struct device *self)
struct drm_gem_object *obj;
struct amdgpu_bo *rbo;
/* from amdgpu_driver_load_kms() */
/* amdgpu_device_init should report only fatal error
* like memory allocation failure or iomapping failure,
* or memory manager initialization failure, it must
* properly initialize the GPU MC controller and permit
* VRAM allocation
*/
r = amdgpu_device_init(adev, adev->flags);
if (r) {
dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
r = amdgpu_driver_load_kms(adev, adev->flags);
if (r)
goto out;
}
adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
if (amdgpu_device_supports_px(dev) &&
(amdgpu_runtime_pm != 0)) { /* enable PX as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
dev_info(adev->dev, "Using ATPX for runtime pm\n");
} else if (amdgpu_device_supports_boco(dev) &&
(amdgpu_runtime_pm != 0)) { /* enable boco as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
dev_info(adev->dev, "Using BOCO for runtime pm\n");
} else if (amdgpu_device_supports_baco(dev) &&
(amdgpu_runtime_pm != 0)) {
switch (adev->asic_type) {
case CHIP_VEGA20:
case CHIP_ARCTURUS:
/* enable BACO as runpm mode if runpm=1 */
if (amdgpu_runtime_pm > 0)
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
case CHIP_VEGA10:
/* enable BACO as runpm mode if noretry=0 */
if (!adev->gmc.noretry)
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
default:
/* enable BACO as runpm mode on CI+ */
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
}
if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)
dev_info(adev->dev, "Using BACO for runtime pm\n");
}
/* Call ACPI methods: require modeset init
* but failure is not fatal
*/
acpi_status = amdgpu_acpi_init(adev);
if (acpi_status)
dev_dbg(dev->dev, "Error during ACPI methods call\n");
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
DRM_WARN("smart shift update failed\n");
/*
* 1. don't init fbdev on hw without DCE

View file

@ -69,7 +69,6 @@ void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
mutex_unlock(&mgpu_info.mutex);
}
#ifdef __linux__
/**
* amdgpu_driver_unload_kms - Main unload function for KMS.
*
@ -96,7 +95,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
amdgpu_acpi_fini(adev);
amdgpu_device_fini_hw(adev);
}
#endif /* __linux__ */
void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
{
@ -123,7 +121,6 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
mutex_unlock(&mgpu_info.mutex);
}
#ifdef __linux__
/**
* amdgpu_driver_load_kms - Main load function for KMS.
*
@ -202,7 +199,6 @@ out:
return r;
}
#endif /* __linux__ */
static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
struct drm_amdgpu_query_fw *query_fw,

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_qwx_pci.c,v 1.2 2024/01/11 09:52:19 stsp Exp $ */
/* $OpenBSD: if_qwx_pci.c,v 1.4 2024/01/25 17:00:21 stsp Exp $ */
/*
* Copyright 2023 Stefan Sperling <stsp@openbsd.org>
@ -330,6 +330,8 @@ struct qwx_mhi_newstate {
int queued;
};
#define QWX_NUM_MSI_VEC 32
struct qwx_pci_softc {
struct qwx_softc sc_sc;
pci_chipset_tag_t sc_pc;
@ -337,7 +339,10 @@ struct qwx_pci_softc {
int sc_cap_off;
int sc_msi_off;
pcireg_t sc_msi_cap;
void *sc_ih;
void *sc_ih[QWX_NUM_MSI_VEC];
char sc_ivname[QWX_NUM_MSI_VEC][16];
struct qwx_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX];
int mhi_irq[2];
bus_space_tag_t sc_st;
bus_space_handle_t sc_sh;
bus_addr_t sc_map;
@ -414,6 +419,7 @@ void qwx_pcic_write32(struct qwx_softc *, uint32_t, uint32_t);
void qwx_pcic_ext_irq_enable(struct qwx_softc *);
void qwx_pcic_ext_irq_disable(struct qwx_softc *);
int qwx_pcic_config_irq(struct qwx_softc *, struct pci_attach_args *);
int qwx_pci_start(struct qwx_softc *);
void qwx_pci_stop(struct qwx_softc *);
@ -475,6 +481,8 @@ void qwx_pci_intr_data_event_tx(struct qwx_pci_softc *,
struct qwx_mhi_ring_element *);
int qwx_pci_intr_data_event(struct qwx_pci_softc *,
struct qwx_pci_event_ring *);
int qwx_pci_intr_mhi_ctrl(void *);
int qwx_pci_intr_mhi_data(void *);
int qwx_pci_intr(void *);
struct qwx_pci_ops {
@ -555,6 +563,89 @@ const struct qwx_msi_config qwx_msi_config_one_msi = {
},
};
const struct qwx_msi_config qwx_msi_config[] = {
{
.total_vectors = 32,
.total_users = 4,
.users = (struct qwx_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_QCA6390_HW20,
},
{
.total_vectors = 16,
.total_users = 3,
.users = (struct qwx_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
{ .name = "DP", .num_vectors = 8, .base_vector = 8 },
},
.hw_rev = ATH11K_HW_QCN9074_HW10,
},
{
.total_vectors = 32,
.total_users = 4,
.users = (struct qwx_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_WCN6855_HW20,
},
{
.total_vectors = 32,
.total_users = 4,
.users = (struct qwx_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_WCN6855_HW21,
},
{
.total_vectors = 28,
.total_users = 2,
.users = (struct qwx_msi_user[]) {
{ .name = "CE", .num_vectors = 10, .base_vector = 0 },
{ .name = "DP", .num_vectors = 18, .base_vector = 10 },
},
.hw_rev = ATH11K_HW_WCN6750_HW10,
},
};
int
qwx_pcic_init_msi_config(struct qwx_softc *sc)
{
const struct qwx_msi_config *msi_config;
int i;
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) {
sc->msi_cfg = &qwx_msi_config_one_msi;
return 0;
}
for (i = 0; i < nitems(qwx_msi_config); i++) {
msi_config = &qwx_msi_config[i];
if (msi_config->hw_rev == sc->sc_hw_rev)
break;
}
if (i == nitems(qwx_msi_config)) {
printf("%s: failed to fetch msi config, "
"unsupported hw version: 0x%x\n",
sc->sc_dev.dv_xname, sc->sc_hw_rev);
return EINVAL;
}
sc->msi_cfg = msi_config;
return 0;
}
int
qwx_pci_alloc_msi(struct qwx_softc *sc)
{
@ -562,11 +653,6 @@ qwx_pci_alloc_msi(struct qwx_softc *sc)
uint64_t addr;
pcireg_t data;
/*
* OpenBSD only supports one MSI vector at present.
* Mulitple vectors are only supported with MSI-X.
*/
if (psc->sc_msi_cap & PCI_MSI_MC_C64) {
uint64_t addr_hi;
pcireg_t addr_lo;
@ -592,7 +678,6 @@ qwx_pci_alloc_msi(struct qwx_softc *sc)
DPRINTF("%s: MSI addr: 0x%llx MSI data: 0x%x\n", sc->sc_dev.dv_xname,
addr, data);
sc->msi_cfg = &qwx_msi_config_one_msi;
return 0;
}
@ -661,6 +746,7 @@ qwx_pcic_get_user_msi_vector(struct qwx_softc *sc, char *user_name,
DPRINTF("%s: Failed to find MSI assignment for %s\n",
sc->sc_dev.dv_xname, user_name);
return EINVAL;
}
@ -732,15 +818,31 @@ qwx_pci_attach(struct device *parent, struct device *self, void *aux)
sc->mem = psc->sc_map;
if (pci_intr_map_msi(pa, &ih)) {
printf(": can't map interrupt\n");
return;
sc->num_msivec = 32;
if (pci_intr_enable_msivec(pa, sc->num_msivec) != 0) {
sc->num_msivec = 1;
if (pci_intr_map_msi(pa, &ih) != 0) {
printf(": can't map interrupt\n");
return;
}
clear_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags);
} else {
if (pci_intr_map_msivec(pa, 0, &ih) != 0 &&
pci_intr_map_msi(pa, &ih) != 0) {
printf(": can't map interrupt\n");
return;
}
set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags);
psc->mhi_irq[MHI_ER_CTRL] = 1;
psc->mhi_irq[MHI_ER_DATA] = 2;
}
intrstr = pci_intr_string(psc->sc_pc, ih);
psc->sc_ih = pci_intr_establish(psc->sc_pc, ih, IPL_NET,
qwx_pci_intr, psc, sc->sc_dev.dv_xname);
if (psc->sc_ih == NULL) {
snprintf(psc->sc_ivname[0], sizeof(psc->sc_ivname[0]), "%s:bhi",
sc->sc_dev.dv_xname);
psc->sc_ih[0] = pci_intr_establish(psc->sc_pc, ih, IPL_NET,
qwx_pci_intr, psc, psc->sc_ivname[0]);
if (psc->sc_ih[0] == NULL) {
printf(": can't establish interrupt");
if (intrstr != NULL)
printf(" at %s", intrstr);
@ -749,6 +851,46 @@ qwx_pci_attach(struct device *parent, struct device *self, void *aux)
}
printf(": %s\n", intrstr);
if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) {
int msivec;
msivec = psc->mhi_irq[MHI_ER_CTRL];
if (pci_intr_map_msivec(pa, msivec, &ih) != 0 &&
pci_intr_map_msi(pa, &ih) != 0) {
printf(": can't map interrupt\n");
return;
}
snprintf(psc->sc_ivname[msivec],
sizeof(psc->sc_ivname[msivec]),
"%s:mhic", sc->sc_dev.dv_xname);
psc->sc_ih[msivec] = pci_intr_establish(psc->sc_pc, ih,
IPL_NET, qwx_pci_intr_mhi_ctrl, psc,
psc->sc_ivname[msivec]);
if (psc->sc_ih[msivec] == NULL) {
printf("%s: can't establish interrupt\n",
sc->sc_dev.dv_xname);
return;
}
msivec = psc->mhi_irq[MHI_ER_DATA];
if (pci_intr_map_msivec(pa, msivec, &ih) != 0 &&
pci_intr_map_msi(pa, &ih) != 0) {
printf(": can't map interrupt\n");
return;
}
snprintf(psc->sc_ivname[msivec],
sizeof(psc->sc_ivname[msivec]),
"%s:mhid", sc->sc_dev.dv_xname);
psc->sc_ih[msivec] = pci_intr_establish(psc->sc_pc, ih,
IPL_NET, qwx_pci_intr_mhi_data, psc,
psc->sc_ivname[msivec]);
if (psc->sc_ih[msivec] == NULL) {
printf("%s: can't establish interrupt\n",
sc->sc_dev.dv_xname);
return;
}
}
pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
switch (PCI_PRODUCT(pa->pa_id)) {
@ -810,16 +952,10 @@ unsupported_wcn6855_soc:
/* register PCI ops */
psc->sc_pci_ops = pci_ops;
/* init MSI config */
clear_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags);
#if notyet
ret = ath11k_pcic_init_msi_config(ab);
if (ret) {
ath11k_err(ab, "failed to init msi config: %d\n", ret);
error = qwx_pcic_init_msi_config(sc);
if (error)
goto err_pci_free_region;
}
#endif
error = qwx_pci_alloc_msi(sc);
if (error) {
printf("%s: failed to enable msi: %d\n", sc->sc_dev.dv_xname,
@ -891,17 +1027,17 @@ unsupported_wcn6855_soc:
sc->sc_nswq = taskq_create("qwxns", 1, IPL_NET, 0);
if (sc->sc_nswq == NULL)
goto err_hal_srng_deinit;
goto err_ce_free;
qwx_pci_init_qmi_ce_config(sc);
#if notyet
ret = ath11k_pcic_config_irq(ab);
if (ret) {
ath11k_err(ab, "failed to config irq: %d\n", ret);
error = qwx_pcic_config_irq(sc, pa);
if (error) {
printf("%s: failed to config irq: %d\n",
sc->sc_dev.dv_xname, error);
goto err_ce_free;
}
#if notyet
ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
if (ret) {
ath11k_err(ab, "failed to set irq affinity %d\n", ret);
@ -978,6 +1114,8 @@ unsupported_wcn6855_soc:
config_mountroot(self, qwx_pci_attach_hook);
return;
err_ce_free:
qwx_ce_free_pipes(sc);
err_hal_srng_deinit:
err_mhi_unregister:
err_pci_free_cmd_ring:
@ -997,7 +1135,7 @@ err_pci_free_chan_ctxt:
psc->chan_ctxt = NULL;
err_pci_disable_msi:
err_pci_free_region:
pci_intr_disestablish(psc->sc_pc, psc->sc_ih);
pci_intr_disestablish(psc->sc_pc, psc->sc_ih[0]);
return;
}
@ -1007,9 +1145,9 @@ qwx_pci_detach(struct device *self, int flags)
struct qwx_pci_softc *psc = (struct qwx_pci_softc *)self;
struct qwx_softc *sc = &psc->sc_sc;
if (psc->sc_ih) {
pci_intr_disestablish(psc->sc_pc, psc->sc_ih);
psc->sc_ih = NULL;
if (psc->sc_ih[0]) {
pci_intr_disestablish(psc->sc_pc, psc->sc_ih[0]);
psc->sc_ih[0] = NULL;
}
qwx_detach(sc);
@ -1289,12 +1427,12 @@ qwx_pci_alloc_event_rings(struct qwx_pci_softc *psc)
int ret;
ret = qwx_pci_alloc_event_ring(sc, &psc->event_rings[0],
MHI_ER_CTRL, 0, 0, 32);
MHI_ER_CTRL, psc->mhi_irq[MHI_ER_CTRL], 0, 32);
if (ret)
goto fail;
ret = qwx_pci_alloc_event_ring(sc, &psc->event_rings[1],
MHI_ER_DATA, 0, 1, 256);
MHI_ER_DATA, psc->mhi_irq[MHI_ER_DATA], 1, 256);
if (ret)
goto fail;
@ -1449,7 +1587,8 @@ qwx_pcic_ce_irq_enable(struct qwx_softc *sc, uint16_t ce_id)
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
return;
printf("%s not implemented\n", __func__);
/* OpenBSD PCI stack does not yet implement MSI interrupt masking. */
sc->msi_ce_irqmask |= (1U << ce_id);
}
void
@ -1461,7 +1600,145 @@ qwx_pcic_ce_irq_disable(struct qwx_softc *sc, uint16_t ce_id)
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
return;
printf("%s not implemented\n", __func__);
/* OpenBSD PCI stack does not yet implement MSI interrupt masking. */
sc->msi_ce_irqmask &= ~(1U << ce_id);
}
void
qwx_pcic_ext_grp_disable(struct qwx_ext_irq_grp *irq_grp)
{
struct qwx_softc *sc = irq_grp->sc;
/* In case of one MSI vector, we handle irq enable/disable
* in a uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
return;
}
int
qwx_pcic_ext_irq_config(struct qwx_softc *sc, struct pci_attach_args *pa)
{
struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
int i, ret, num_vectors = 0;
uint32_t msi_data_start = 0;
uint32_t base_vector = 0;
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
return 0;
ret = qwx_pcic_get_user_msi_vector(sc, "DP", &num_vectors,
&msi_data_start, &base_vector);
if (ret < 0)
return ret;
for (i = 0; i < nitems(sc->ext_irq_grp); i++) {
struct qwx_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
uint32_t num_irq = 0;
irq_grp->sc = sc;
irq_grp->grp_id = i;
#if 0
init_dummy_netdev(&irq_grp->napi_ndev);
netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
ath11k_pcic_ext_grp_napi_poll);
#endif
if (sc->hw_params.ring_mask->tx[i] ||
sc->hw_params.ring_mask->rx[i] ||
sc->hw_params.ring_mask->rx_err[i] ||
sc->hw_params.ring_mask->rx_wbm_rel[i] ||
sc->hw_params.ring_mask->reo_status[i] ||
sc->hw_params.ring_mask->rxdma2host[i] ||
sc->hw_params.ring_mask->host2rxdma[i] ||
sc->hw_params.ring_mask->rx_mon_status[i]) {
num_irq = 1;
}
irq_grp->num_irq = num_irq;
irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
if (num_irq) {
int irq_idx = irq_grp->irqs[0];
pci_intr_handle_t ih;
if (pci_intr_map_msivec(pa, irq_idx, &ih) != 0 &&
pci_intr_map(pa, &ih) != 0) {
printf("%s: can't map interrupt\n",
sc->sc_dev.dv_xname);
return EIO;
}
snprintf(psc->sc_ivname[irq_idx], sizeof(psc->sc_ivname[0]),
"%s:ex%d", sc->sc_dev.dv_xname, i);
psc->sc_ih[irq_idx] = pci_intr_establish(psc->sc_pc, ih,
IPL_NET, qwx_ext_intr, irq_grp, psc->sc_ivname[irq_idx]);
if (psc->sc_ih[irq_idx] == NULL) {
printf("%s: failed to request irq %d\n",
sc->sc_dev.dv_xname, irq_idx);
return EIO;
}
}
qwx_pcic_ext_grp_disable(irq_grp);
}
return 0;
}
int
qwx_pcic_config_irq(struct qwx_softc *sc, struct pci_attach_args *pa)
{
struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc;
struct qwx_ce_pipe *ce_pipe;
uint32_t msi_data_start;
uint32_t msi_data_count, msi_data_idx;
uint32_t msi_irq_start;
int i, ret, irq_idx;
pci_intr_handle_t ih;
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags))
return 0;
ret = qwx_pcic_get_user_msi_vector(sc, "CE", &msi_data_count,
&msi_data_start, &msi_irq_start);
if (ret)
return ret;
/* Configure CE irqs */
for (i = 0, msi_data_idx = 0; i < sc->hw_params.ce_count; i++) {
if (qwx_ce_get_attr_flags(sc, i) & CE_ATTR_DIS_INTR)
continue;
ce_pipe = &sc->ce.ce_pipe[i];
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
if (pci_intr_map_msivec(pa, irq_idx, &ih) != 0 &&
pci_intr_map(pa, &ih) != 0) {
printf("%s: can't map interrupt\n",
sc->sc_dev.dv_xname);
return EIO;
}
snprintf(psc->sc_ivname[irq_idx], sizeof(psc->sc_ivname[0]),
"%s:ce%d", sc->sc_dev.dv_xname, ce_pipe->pipe_num);
psc->sc_ih[irq_idx] = pci_intr_establish(psc->sc_pc, ih,
IPL_NET, qwx_ce_intr, ce_pipe, psc->sc_ivname[irq_idx]);
if (psc->sc_ih[irq_idx] == NULL) {
printf("%s: failed to request irq %d\n",
sc->sc_dev.dv_xname, irq_idx);
return EIO;
}
msi_data_idx++;
qwx_pcic_ce_irq_disable(sc, i);
}
ret = qwx_pcic_ext_irq_config(sc, pa);
if (ret)
return ret;
return 0;
}
void
@ -2747,7 +3024,7 @@ qwx_mhi_fw_load_handler(struct qwx_pci_softc *psc)
u_char *data;
size_t len;
ret = snprintf(amss_path, sizeof(amss_path), "%s/%s/%s",
ret = snprintf(amss_path, sizeof(amss_path), "%s-%s-%s",
ATH11K_FW_DIR, sc->hw_params.fw.dir, ATH11K_AMSS_FILE);
if (ret < 0 || ret >= sizeof(amss_path))
return ENOSPC;
@ -3813,6 +4090,28 @@ qwx_pci_intr_data_event(struct qwx_pci_softc *psc, struct qwx_pci_event_ring *ri
return 1;
}
int
qwx_pci_intr_mhi_ctrl(void *arg)
{
struct qwx_pci_softc *psc = arg;
if (qwx_pci_intr_ctrl_event(psc, &psc->event_rings[0]))
return 1;
return 0;
}
int
qwx_pci_intr_mhi_data(void *arg)
{
struct qwx_pci_softc *psc = arg;
if (qwx_pci_intr_data_event(psc, &psc->event_rings[1]))
return 1;
return 0;
}
int
qwx_pci_intr(void *arg)
{
@ -3834,7 +4133,7 @@ qwx_pci_intr(void *arg)
MHI_STATUS_MHISTATE_SHFT;
DNPRINTF(QWX_D_MHI,
"%s: MHI interrupt with EE: 0x%x -> 0x%x state: 0x%x -> 0x%x\n",
"%s: BHI interrupt with EE: 0x%x -> 0x%x state: 0x%x -> 0x%x\n",
sc->sc_dev.dv_xname, psc->bhi_ee, ee, psc->mhi_state, state);
if (ee == MHI_EE_RDDM) {
@ -3860,13 +4159,26 @@ qwx_pci_intr(void *arg)
ret = 1;
}
if (qwx_pci_intr_ctrl_event(psc, &psc->event_rings[0]))
ret = 1;
if (qwx_pci_intr_data_event(psc, &psc->event_rings[1]))
ret = 1;
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) {
int i;
if (qwx_intr(sc))
ret = 1;
if (qwx_pci_intr_ctrl_event(psc, &psc->event_rings[0]))
ret = 1;
if (qwx_pci_intr_data_event(psc, &psc->event_rings[1]))
ret = 1;
for (i = 0; i < sc->hw_params.ce_count; i++) {
struct qwx_ce_pipe *ce_pipe = &sc->ce.ce_pipe[i];
if (qwx_ce_intr(ce_pipe))
ret = 1;
}
for (i = 0; i < nitems(sc->ext_irq_grp); i++) {
if (qwx_dp_service_srng(sc, i))
ret = 1;
}
}
return ret;
}