sync with OpenBSD -current
This commit is contained in:
parent
ee68147dcd
commit
1cefe29c7e
1651 changed files with 283292 additions and 68089 deletions
|
@ -575,60 +575,17 @@ static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
|
|||
// vega10 real chip need to use PSP to load firmware
|
||||
static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
const char *chip_name;
|
||||
char fw_name[30];
|
||||
int ret, i;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
switch (adev->ip_versions[SDMA0_HWIP][0]) {
|
||||
case IP_VERSION(4, 0, 0):
|
||||
chip_name = "vega10";
|
||||
break;
|
||||
case IP_VERSION(4, 0, 1):
|
||||
chip_name = "vega12";
|
||||
break;
|
||||
case IP_VERSION(4, 2, 0):
|
||||
chip_name = "vega20";
|
||||
break;
|
||||
case IP_VERSION(4, 1, 0):
|
||||
case IP_VERSION(4, 1, 1):
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||
chip_name = "raven2";
|
||||
else if (adev->apu_flags & AMD_APU_IS_PICASSO)
|
||||
chip_name = "picasso";
|
||||
else
|
||||
chip_name = "raven";
|
||||
break;
|
||||
case IP_VERSION(4, 2, 2):
|
||||
chip_name = "arcturus";
|
||||
break;
|
||||
case IP_VERSION(4, 1, 2):
|
||||
if (adev->apu_flags & AMD_APU_IS_RENOIR)
|
||||
chip_name = "renoir";
|
||||
else
|
||||
chip_name = "green_sardine";
|
||||
break;
|
||||
case IP_VERSION(4, 4, 0):
|
||||
chip_name = "aldebaran";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (i == 0)
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
|
||||
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
|
||||
adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) {
|
||||
/* Acturus & Aldebaran will leverage the same FW memory
|
||||
for every SDMA instance */
|
||||
ret = amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
|
||||
ret = amdgpu_sdma_init_microcode(adev, 0, true);
|
||||
break;
|
||||
} else {
|
||||
ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
|
||||
ret = amdgpu_sdma_init_microcode(adev, i, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -1157,8 +1114,6 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
|
|||
#endif
|
||||
/* enable DMA IBs */
|
||||
WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
|
||||
|
||||
ring->sched.ready = true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1245,8 +1200,6 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
|
|||
#endif
|
||||
/* enable DMA IBs */
|
||||
WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
|
||||
|
||||
ring->sched.ready = true;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1866,6 +1819,15 @@ static int sdma_v4_0_sw_init(void *handle)
|
|||
/* doorbell size is 2 dwords, get DWORD offset */
|
||||
ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
|
||||
|
||||
/*
|
||||
* On Arcturus, SDMA instance 5~7 has a different vmhub
|
||||
* type(AMDGPU_MMHUB1).
|
||||
*/
|
||||
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
|
||||
ring->vm_hub = AMDGPU_MMHUB1(0);
|
||||
else
|
||||
ring->vm_hub = AMDGPU_MMHUB0(0);
|
||||
|
||||
snprintf(ring->name, sizeof(ring->name), "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
|
||||
|
@ -1881,8 +1843,23 @@ static int sdma_v4_0_sw_init(void *handle)
|
|||
/* paging queue use same doorbell index/routing as gfx queue
|
||||
* with 0x400 (4096 dwords) offset on second doorbell page
|
||||
*/
|
||||
ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
|
||||
ring->doorbell_index += 0x400;
|
||||
if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(4, 0, 0) &&
|
||||
adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(4, 2, 0)) {
|
||||
ring->doorbell_index =
|
||||
adev->doorbell_index.sdma_engine[i] << 1;
|
||||
ring->doorbell_index += 0x400;
|
||||
} else {
|
||||
/* From vega20, the sdma_doorbell_range in 1st
|
||||
* doorbell page is reserved for page queue.
|
||||
*/
|
||||
ring->doorbell_index =
|
||||
(adev->doorbell_index.sdma_engine[i] + 1) << 1;
|
||||
}
|
||||
|
||||
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
|
||||
ring->vm_hub = AMDGPU_MMHUB1(0);
|
||||
else
|
||||
ring->vm_hub = AMDGPU_MMHUB0(0);
|
||||
|
||||
snprintf(ring->name, sizeof(ring->name), "page%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
|
@ -1894,6 +1871,11 @@ static int sdma_v4_0_sw_init(void *handle)
|
|||
}
|
||||
}
|
||||
|
||||
if (amdgpu_sdma_ras_sw_init(adev)) {
|
||||
dev_err(adev->dev, "Failed to initialize sdma ras block!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -2334,44 +2316,6 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
|
|||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = sdma_v4_0_ring_get_rptr,
|
||||
.get_wptr = sdma_v4_0_ring_get_wptr,
|
||||
.set_wptr = sdma_v4_0_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
6 + /* sdma_v4_0_ring_emit_hdp_flush */
|
||||
3 + /* hdp invalidate */
|
||||
6 + /* sdma_v4_0_ring_emit_pipeline_sync */
|
||||
/* sdma_v4_0_ring_emit_vm_flush */
|
||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
|
||||
10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
|
||||
.emit_ib = sdma_v4_0_ring_emit_ib,
|
||||
.emit_fence = sdma_v4_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
|
||||
.emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
|
||||
.emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
|
||||
.test_ring = sdma_v4_0_ring_test_ring,
|
||||
.test_ib = sdma_v4_0_ring_test_ib,
|
||||
.insert_nop = sdma_v4_0_ring_insert_nop,
|
||||
.pad_ib = sdma_v4_0_ring_pad_ib,
|
||||
.emit_wreg = sdma_v4_0_ring_emit_wreg,
|
||||
.emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
/*
|
||||
* On Arcturus, SDMA instance 5~7 has a different vmhub type(AMDGPU_MMHUB_1).
|
||||
* So create a individual constant ring_funcs for those instances.
|
||||
*/
|
||||
static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs_2nd_mmhub = {
|
||||
.type = AMDGPU_RING_TYPE_SDMA,
|
||||
.align_mask = 0xf,
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_MMHUB_1,
|
||||
.get_rptr = sdma_v4_0_ring_get_rptr,
|
||||
.get_wptr = sdma_v4_0_ring_get_wptr,
|
||||
.set_wptr = sdma_v4_0_ring_set_wptr,
|
||||
|
@ -2404,40 +2348,6 @@ static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
|
|||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_MMHUB_0,
|
||||
.get_rptr = sdma_v4_0_ring_get_rptr,
|
||||
.get_wptr = sdma_v4_0_page_ring_get_wptr,
|
||||
.set_wptr = sdma_v4_0_page_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
6 + /* sdma_v4_0_ring_emit_hdp_flush */
|
||||
3 + /* hdp invalidate */
|
||||
6 + /* sdma_v4_0_ring_emit_pipeline_sync */
|
||||
/* sdma_v4_0_ring_emit_vm_flush */
|
||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
|
||||
10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
|
||||
.emit_ib = sdma_v4_0_ring_emit_ib,
|
||||
.emit_fence = sdma_v4_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
|
||||
.emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
|
||||
.emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
|
||||
.test_ring = sdma_v4_0_ring_test_ring,
|
||||
.test_ib = sdma_v4_0_ring_test_ib,
|
||||
.insert_nop = sdma_v4_0_ring_insert_nop,
|
||||
.pad_ib = sdma_v4_0_ring_pad_ib,
|
||||
.emit_wreg = sdma_v4_0_ring_emit_wreg,
|
||||
.emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs_2nd_mmhub = {
|
||||
.type = AMDGPU_RING_TYPE_SDMA,
|
||||
.align_mask = 0xf,
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
.vmhub = AMDGPU_MMHUB_1,
|
||||
.get_rptr = sdma_v4_0_ring_get_rptr,
|
||||
.get_wptr = sdma_v4_0_page_ring_get_wptr,
|
||||
.set_wptr = sdma_v4_0_page_ring_set_wptr,
|
||||
|
@ -2469,19 +2379,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
|
||||
adev->sdma.instance[i].ring.funcs =
|
||||
&sdma_v4_0_ring_funcs_2nd_mmhub;
|
||||
else
|
||||
adev->sdma.instance[i].ring.funcs =
|
||||
&sdma_v4_0_ring_funcs;
|
||||
adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
|
||||
adev->sdma.instance[i].ring.me = i;
|
||||
if (adev->sdma.has_page_queue) {
|
||||
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
|
||||
adev->sdma.instance[i].page.funcs =
|
||||
&sdma_v4_0_page_ring_funcs_2nd_mmhub;
|
||||
else
|
||||
adev->sdma.instance[i].page.funcs =
|
||||
adev->sdma.instance[i].page.funcs =
|
||||
&sdma_v4_0_page_ring_funcs;
|
||||
adev->sdma.instance[i].page.me = i;
|
||||
}
|
||||
|
@ -2733,23 +2634,6 @@ static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
|
|||
break;
|
||||
}
|
||||
|
||||
if (adev->sdma.ras) {
|
||||
amdgpu_ras_register_ras_block(adev, &adev->sdma.ras->ras_block);
|
||||
|
||||
strlcpy(adev->sdma.ras->ras_block.ras_comm.name, "sdma",
|
||||
sizeof(adev->sdma.ras->ras_block.ras_comm.name));
|
||||
adev->sdma.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA;
|
||||
adev->sdma.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->sdma.ras_if = &adev->sdma.ras->ras_block.ras_comm;
|
||||
|
||||
/* If don't define special ras_late_init function, use default ras_late_init */
|
||||
if (!adev->sdma.ras->ras_block.ras_late_init)
|
||||
adev->sdma.ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init;
|
||||
|
||||
/* If not defined special ras_cb function, use default ras_cb */
|
||||
if (!adev->sdma.ras->ras_block.ras_cb)
|
||||
adev->sdma.ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb;
|
||||
}
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue