sync code with last improvements from OpenBSD
This commit is contained in:
parent
ab8d6e7bca
commit
aaee5ffc53
52 changed files with 584 additions and 229 deletions
|
@ -185,7 +185,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
|
|||
uint64_t *chunk_array_user;
|
||||
uint64_t *chunk_array;
|
||||
uint32_t uf_offset = 0;
|
||||
unsigned int size;
|
||||
size_t size;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
|
@ -1609,15 +1609,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
|
|||
continue;
|
||||
|
||||
r = dma_fence_wait_timeout(fence, true, timeout);
|
||||
if (r > 0 && fence->error)
|
||||
r = fence->error;
|
||||
|
||||
dma_fence_put(fence);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
if (r == 0)
|
||||
break;
|
||||
|
||||
if (fence->error)
|
||||
return fence->error;
|
||||
}
|
||||
|
||||
memset(wait, 0, sizeof(*wait));
|
||||
|
|
|
@ -4354,6 +4354,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
|||
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
|
||||
|
||||
cancel_delayed_work_sync(&adev->delayed_init_work);
|
||||
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
|
||||
|
||||
amdgpu_ras_suspend(adev);
|
||||
|
||||
|
|
|
@ -518,6 +518,41 @@ int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_need_ring_interrupt_restore - helper function to check whether
|
||||
* fence driver interrupts need to be restored.
|
||||
*
|
||||
* @ring: ring that to be checked
|
||||
*
|
||||
* Interrupts for rings that belong to GFX IP don't need to be restored
|
||||
* when the target power state is s0ix.
|
||||
*
|
||||
* Return true if need to restore interrupts, false otherwise.
|
||||
*/
|
||||
static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
bool is_gfx_power_domain = false;
|
||||
|
||||
switch (ring->funcs->type) {
|
||||
case AMDGPU_RING_TYPE_SDMA:
|
||||
/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
|
||||
if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0))
|
||||
is_gfx_power_domain = true;
|
||||
break;
|
||||
case AMDGPU_RING_TYPE_GFX:
|
||||
case AMDGPU_RING_TYPE_COMPUTE:
|
||||
case AMDGPU_RING_TYPE_KIQ:
|
||||
case AMDGPU_RING_TYPE_MES:
|
||||
is_gfx_power_domain = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return !(adev->in_s0ix && is_gfx_power_domain);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_hw_fini - tear down the fence driver
|
||||
* for all possible rings.
|
||||
|
@ -546,7 +581,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
|
|||
amdgpu_fence_driver_force_completion(ring);
|
||||
|
||||
if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
|
||||
ring->fence_drv.irq_src)
|
||||
ring->fence_drv.irq_src &&
|
||||
amdgpu_fence_need_ring_interrupt_restore(ring))
|
||||
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
|
||||
|
@ -624,7 +660,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
|
|||
continue;
|
||||
|
||||
/* enable the interrupt */
|
||||
if (ring->fence_drv.irq_src)
|
||||
if (ring->fence_drv.irq_src &&
|
||||
amdgpu_fence_need_ring_interrupt_restore(ring))
|
||||
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
}
|
||||
|
|
|
@ -587,15 +587,8 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
|
|||
|
||||
if (adev->gfx.gfx_off_req_count == 0 &&
|
||||
!adev->gfx.gfx_off_state) {
|
||||
/* If going to s2idle, no need to wait */
|
||||
if (adev->in_s0ix) {
|
||||
if (!amdgpu_dpm_set_powergating_by_smu(adev,
|
||||
AMD_IP_BLOCK_TYPE_GFX, true))
|
||||
adev->gfx.gfx_off_state = true;
|
||||
} else {
|
||||
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
|
||||
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
|
||||
delay);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (adev->gfx.gfx_off_req_count == 0) {
|
||||
|
|
|
@ -160,7 +160,6 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
|
|||
continue;
|
||||
|
||||
for (k = 0; k < src->num_types; ++k) {
|
||||
atomic_set(&src->enabled_types[k], 0);
|
||||
r = src->funcs->set(adev, src, k,
|
||||
AMDGPU_IRQ_STATE_DISABLE);
|
||||
if (r)
|
||||
|
|
|
@ -514,6 +514,8 @@ static int psp_sw_fini(void *handle)
|
|||
kfree(cmd);
|
||||
cmd = NULL;
|
||||
|
||||
psp_free_shared_bufs(psp);
|
||||
|
||||
if (psp->km_ring.ring_mem)
|
||||
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
|
||||
&psp->km_ring.ring_mem_mc_addr,
|
||||
|
@ -2686,8 +2688,6 @@ static int psp_hw_fini(void *handle)
|
|||
|
||||
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
|
||||
|
||||
psp_free_shared_bufs(psp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -361,6 +361,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
|
|||
amdgpu_bo_free_kernel(&ring->ring_obj,
|
||||
&ring->gpu_addr,
|
||||
(void **)&ring->ring);
|
||||
} else {
|
||||
kfree(ring->fence_drv.fences);
|
||||
}
|
||||
|
||||
dma_fence_put(ring->vmid_wait);
|
||||
|
|
|
@ -1414,6 +1414,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
|||
amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
|
||||
|
||||
bo_va->ref_count = 1;
|
||||
bo_va->last_pt_update = dma_fence_get_stub();
|
||||
INIT_LIST_HEAD(&bo_va->valids);
|
||||
INIT_LIST_HEAD(&bo_va->invalids);
|
||||
|
||||
|
@ -2142,7 +2143,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
vm->update_funcs = &amdgpu_vm_cpu_funcs;
|
||||
else
|
||||
vm->update_funcs = &amdgpu_vm_sdma_funcs;
|
||||
vm->last_update = NULL;
|
||||
|
||||
vm->last_update = dma_fence_get_stub();
|
||||
vm->last_unlocked = dma_fence_get_stub();
|
||||
vm->last_tlb_flush = dma_fence_get_stub();
|
||||
|
||||
|
@ -2271,7 +2273,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
goto unreserve_bo;
|
||||
|
||||
dma_fence_put(vm->last_update);
|
||||
vm->last_update = NULL;
|
||||
vm->last_update = dma_fence_get_stub();
|
||||
vm->is_compute_context = true;
|
||||
|
||||
/* Free the shadow bo for compute VM */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue