sync with OpenBSD -current
This commit is contained in:
parent
ee68147dcd
commit
1cefe29c7e
1651 changed files with 283292 additions and 68089 deletions
|
@ -56,7 +56,8 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
|
|||
|
||||
/* enable virtual display */
|
||||
if (adev->asic_type != CHIP_ALDEBARAN &&
|
||||
adev->asic_type != CHIP_ARCTURUS) {
|
||||
adev->asic_type != CHIP_ARCTURUS &&
|
||||
((adev->pdev->class >> 8) != PCI_CLASS_ACCELERATOR_PROCESSING)) {
|
||||
if (adev->mode_info.num_crtc == 0)
|
||||
adev->mode_info.num_crtc = 1;
|
||||
adev->enable_virtual_display = true;
|
||||
|
@ -64,13 +65,17 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
|
|||
ddev->driver_features &= ~DRIVER_ATOMIC;
|
||||
adev->cg_flags = 0;
|
||||
adev->pg_flags = 0;
|
||||
|
||||
/* Reduce kcq number to 2 to reduce latency */
|
||||
if (amdgpu_num_kcq == -1)
|
||||
amdgpu_num_kcq = 2;
|
||||
}
|
||||
|
||||
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
|
||||
uint32_t reg0, uint32_t reg1,
|
||||
uint32_t ref, uint32_t mask)
|
||||
{
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
|
||||
struct amdgpu_ring *ring = &kiq->ring;
|
||||
signed long r, cnt = 0;
|
||||
unsigned long flags;
|
||||
|
@ -228,7 +233,8 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
|
||||
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_DOMAIN_VRAM |
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->virt.mm_table.bo,
|
||||
&adev->virt.mm_table.gpu_addr,
|
||||
(void *)&adev->virt.mm_table.cpu_addr);
|
||||
|
@ -423,11 +429,17 @@ static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
|
|||
struct eeprom_table_record bp;
|
||||
uint64_t retired_page;
|
||||
uint32_t bp_idx, bp_cnt;
|
||||
void *vram_usage_va = NULL;
|
||||
|
||||
if (adev->mman.fw_vram_usage_va)
|
||||
vram_usage_va = adev->mman.fw_vram_usage_va;
|
||||
else
|
||||
vram_usage_va = adev->mman.drv_vram_usage_va;
|
||||
|
||||
if (bp_block_size) {
|
||||
bp_cnt = bp_block_size / sizeof(uint64_t);
|
||||
for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
|
||||
retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va +
|
||||
retired_page = *(uint64_t *)(vram_usage_va +
|
||||
bp_block_offset + bp_idx * sizeof(uint64_t));
|
||||
bp.retired_page = retired_page;
|
||||
|
||||
|
@ -508,7 +520,7 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
|
|||
tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
|
||||
adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
|
||||
}
|
||||
if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
|
||||
if ((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
|
||||
adev->virt.is_mm_bw_enabled = true;
|
||||
|
||||
adev->unique_id =
|
||||
|
@ -546,7 +558,6 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
|
|||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_IMU, adev->gfx.imu_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
|
||||
adev->psp.asd_context.bin_desc.fw_version);
|
||||
|
@ -639,7 +650,9 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
|
|||
adev->virt.fw_reserve.p_vf2pf = NULL;
|
||||
adev->virt.vf2pf_update_interval_ms = 0;
|
||||
|
||||
if (adev->mman.fw_vram_usage_va != NULL) {
|
||||
if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
|
||||
DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!");
|
||||
} else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
|
||||
/* go through this logic in ip_init and reset to init workqueue*/
|
||||
amdgpu_virt_exchange_data(adev);
|
||||
|
||||
|
@ -662,32 +675,40 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
|
|||
uint32_t bp_block_size = 0;
|
||||
struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
|
||||
|
||||
if (adev->mman.fw_vram_usage_va != NULL) {
|
||||
|
||||
adev->virt.fw_reserve.p_pf2vf =
|
||||
(struct amd_sriov_msg_pf2vf_info_header *)
|
||||
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
|
||||
adev->virt.fw_reserve.p_vf2pf =
|
||||
(struct amd_sriov_msg_vf2pf_info_header *)
|
||||
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
|
||||
if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
|
||||
if (adev->mman.fw_vram_usage_va) {
|
||||
adev->virt.fw_reserve.p_pf2vf =
|
||||
(struct amd_sriov_msg_pf2vf_info_header *)
|
||||
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
|
||||
adev->virt.fw_reserve.p_vf2pf =
|
||||
(struct amd_sriov_msg_vf2pf_info_header *)
|
||||
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
|
||||
} else if (adev->mman.drv_vram_usage_va) {
|
||||
adev->virt.fw_reserve.p_pf2vf =
|
||||
(struct amd_sriov_msg_pf2vf_info_header *)
|
||||
(adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
|
||||
adev->virt.fw_reserve.p_vf2pf =
|
||||
(struct amd_sriov_msg_vf2pf_info_header *)
|
||||
(adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
|
||||
}
|
||||
|
||||
amdgpu_virt_read_pf2vf_data(adev);
|
||||
amdgpu_virt_write_vf2pf_data(adev);
|
||||
|
||||
/* bad page handling for version 2 */
|
||||
if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
|
||||
pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
|
||||
pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
|
||||
|
||||
bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
|
||||
((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
|
||||
bp_block_size = pf2vf_v2->bp_block_size;
|
||||
bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
|
||||
((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
|
||||
bp_block_size = pf2vf_v2->bp_block_size;
|
||||
|
||||
if (bp_block_size && !adev->virt.ras_init_done)
|
||||
amdgpu_virt_init_ras_err_handler_data(adev);
|
||||
if (bp_block_size && !adev->virt.ras_init_done)
|
||||
amdgpu_virt_init_ras_err_handler_data(adev);
|
||||
|
||||
if (adev->virt.ras_init_done)
|
||||
amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
|
||||
}
|
||||
if (adev->virt.ras_init_done)
|
||||
amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -815,6 +836,16 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad
|
|||
return mode;
|
||||
}
|
||||
|
||||
void amdgpu_virt_post_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) {
|
||||
/* force set to GFXOFF state after reset,
|
||||
* to avoid some invalid operation before GC enable
|
||||
*/
|
||||
adev->gfx.is_poweron = false;
|
||||
}
|
||||
}
|
||||
|
||||
bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
|
||||
{
|
||||
switch (adev->ip_versions[MP0_HWIP][0]) {
|
||||
|
@ -825,6 +856,17 @@ bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_i
|
|||
return false;
|
||||
else
|
||||
return true;
|
||||
case IP_VERSION(11, 0, 9):
|
||||
case IP_VERSION(11, 0, 7):
|
||||
/* black list for CHIP_NAVI12 and CHIP_SIENNA_CICHLID */
|
||||
if (ucode_id == AMDGPU_UCODE_ID_RLC_G
|
||||
|| ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
|
||||
|| ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
|
||||
|| ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
|
||||
|| ucode_id == AMDGPU_UCODE_ID_SMC)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
case IP_VERSION(13, 0, 10):
|
||||
/* white list */
|
||||
if (ucode_id == AMDGPU_UCODE_ID_CAP
|
||||
|
@ -934,7 +976,7 @@ static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
|
||||
static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id)
|
||||
{
|
||||
struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
|
||||
uint32_t timeout = 50000;
|
||||
|
@ -952,7 +994,12 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
|
|||
return 0;
|
||||
}
|
||||
|
||||
reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
|
||||
if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) {
|
||||
dev_err(adev->dev, "invalid xcc\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
|
||||
scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
|
||||
scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
|
||||
scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
|
||||
|
@ -963,11 +1010,13 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
|
|||
if (offset == reg_access_ctrl->grbm_cntl) {
|
||||
/* if the target reg offset is grbm_cntl, write to scratch_reg2 */
|
||||
writel(v, scratch_reg2);
|
||||
writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
|
||||
if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
|
||||
writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
|
||||
} else if (offset == reg_access_ctrl->grbm_idx) {
|
||||
/* if the target reg offset is grbm_idx, write to scratch_reg3 */
|
||||
writel(v, scratch_reg3);
|
||||
writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
|
||||
if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
|
||||
writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
|
||||
} else {
|
||||
/*
|
||||
* SCRATCH_REG0 = read/write value
|
||||
|
@ -1015,13 +1064,13 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
|
|||
|
||||
void amdgpu_sriov_wreg(struct amdgpu_device *adev,
|
||||
u32 offset, u32 value,
|
||||
u32 acc_flags, u32 hwip)
|
||||
u32 acc_flags, u32 hwip, u32 xcc_id)
|
||||
{
|
||||
u32 rlcg_flag;
|
||||
|
||||
if (!amdgpu_sriov_runtime(adev) &&
|
||||
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
|
||||
amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
|
||||
amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1032,13 +1081,13 @@ void amdgpu_sriov_wreg(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
|
||||
u32 offset, u32 acc_flags, u32 hwip)
|
||||
u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id)
|
||||
{
|
||||
u32 rlcg_flag;
|
||||
|
||||
if (!amdgpu_sriov_runtime(adev) &&
|
||||
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
|
||||
return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
|
||||
return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);
|
||||
|
||||
if (acc_flags & AMDGPU_REGS_NO_KIQ)
|
||||
return RREG32_NO_KIQ(offset);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue