sync with OpenBSD -current
This commit is contained in:
parent
ee68147dcd
commit
1cefe29c7e
1651 changed files with 283292 additions and 68089 deletions
|
@ -47,6 +47,32 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
|
|||
return r;
|
||||
}
|
||||
|
||||
/* Allocate a new job for @count PTE updates */
|
||||
static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
|
||||
unsigned int count)
|
||||
{
|
||||
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
|
||||
: AMDGPU_IB_POOL_DELAYED;
|
||||
struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate
|
||||
: &p->vm->delayed;
|
||||
unsigned int ndw;
|
||||
int r;
|
||||
|
||||
/* estimate how many dw we need */
|
||||
ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
|
||||
if (p->pages_addr)
|
||||
ndw += count * 2;
|
||||
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
|
||||
ndw * 4, pool, &p->job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
p->num_dw_left = ndw;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_sdma_prepare - prepare SDMA command submission
|
||||
*
|
||||
|
@ -61,21 +87,22 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
|
|||
struct dma_resv *resv,
|
||||
enum amdgpu_sync_mode sync_mode)
|
||||
{
|
||||
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
|
||||
: AMDGPU_IB_POOL_DELAYED;
|
||||
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
|
||||
struct amdgpu_sync sync;
|
||||
int r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job);
|
||||
r = amdgpu_vm_sdma_alloc_job(p, 0);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
p->num_dw_left = ndw;
|
||||
|
||||
if (!resv)
|
||||
return 0;
|
||||
|
||||
return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
|
||||
amdgpu_sync_create(&sync);
|
||||
r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm);
|
||||
if (!r)
|
||||
r = amdgpu_sync_push_to_job(&sync, p->job);
|
||||
amdgpu_sync_free(&sync);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -91,20 +118,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
|
|||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_ib *ib = p->job->ibs;
|
||||
struct drm_sched_entity *entity;
|
||||
struct amdgpu_ring *ring;
|
||||
struct dma_fence *f;
|
||||
int r;
|
||||
|
||||
entity = p->immediate ? &p->vm->immediate : &p->vm->delayed;
|
||||
ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
|
||||
ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
|
||||
sched);
|
||||
|
||||
WARN_ON(ib->length_dw == 0);
|
||||
amdgpu_ring_pad_ib(ring, ib);
|
||||
WARN_ON(ib->length_dw > p->num_dw_left);
|
||||
r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f);
|
||||
if (r)
|
||||
goto error;
|
||||
f = amdgpu_job_submit(p->job);
|
||||
|
||||
if (p->unlocked) {
|
||||
struct dma_fence *tmp = dma_fence_get(f);
|
||||
|
@ -127,10 +150,6 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
|
|||
}
|
||||
dma_fence_put(f);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
amdgpu_job_free(p->job);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -152,7 +171,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
|
|||
|
||||
src += p->num_dw_left * 4;
|
||||
|
||||
pe += amdgpu_gmc_sign_extend(amdgpu_bo_gpu_offset_no_check(bo));
|
||||
pe += amdgpu_bo_gpu_offset_no_check(bo);
|
||||
trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate);
|
||||
|
||||
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
|
||||
|
@ -179,7 +198,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
|
|||
{
|
||||
struct amdgpu_ib *ib = p->job->ibs;
|
||||
|
||||
pe += amdgpu_gmc_sign_extend(amdgpu_bo_gpu_offset_no_check(bo));
|
||||
pe += amdgpu_bo_gpu_offset_no_check(bo);
|
||||
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
|
||||
if (count < 3) {
|
||||
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
|
||||
|
@ -210,8 +229,6 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
|
|||
uint64_t flags)
|
||||
{
|
||||
struct amdgpu_bo *bo = &vmbo->bo;
|
||||
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
|
||||
: AMDGPU_IB_POOL_DELAYED;
|
||||
struct dma_resv_iter cursor;
|
||||
unsigned int i, ndw, nptes;
|
||||
struct dma_fence *fence;
|
||||
|
@ -221,8 +238,10 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
|
|||
/* Wait for PD/PT moves to be completed */
|
||||
dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
r = amdgpu_sync_fence(&p->job->sync, fence);
|
||||
dma_fence_get(fence);
|
||||
r = drm_sched_job_add_dependency(&p->job->base, fence);
|
||||
if (r) {
|
||||
dma_fence_put(fence);
|
||||
dma_resv_iter_end(&cursor);
|
||||
return r;
|
||||
}
|
||||
|
@ -238,19 +257,9 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
/* estimate how many dw we need */
|
||||
ndw = 32;
|
||||
if (p->pages_addr)
|
||||
ndw += count * 2;
|
||||
ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
|
||||
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool,
|
||||
&p->job);
|
||||
r = amdgpu_vm_sdma_alloc_job(p, count);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
p->num_dw_left = ndw;
|
||||
}
|
||||
|
||||
if (!p->pages_addr) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue