sync with OpenBSD -current
This commit is contained in:
parent
ee68147dcd
commit
1cefe29c7e
1651 changed files with 283292 additions and 68089 deletions
|
@ -33,11 +33,14 @@
|
|||
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_exec.h>
|
||||
#include <drm/drm_gem_ttm_helper.h>
|
||||
#include <drm/ttm/ttm_tt.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_display.h"
|
||||
#include "amdgpu_dma_buf.h"
|
||||
#include "amdgpu_hmm.h"
|
||||
#include "amdgpu_xgmi.h"
|
||||
|
||||
static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
|
||||
|
@ -61,10 +64,10 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
|
||||
TTM_BO_VM_NUM_PREFAULT);
|
||||
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
|
||||
TTM_BO_VM_NUM_PREFAULT);
|
||||
|
||||
drm_dev_exit(idx);
|
||||
drm_dev_exit(idx);
|
||||
} else {
|
||||
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
|
||||
}
|
||||
|
@ -178,7 +181,7 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
|
|||
struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
|
||||
|
||||
if (robj) {
|
||||
amdgpu_mn_unregister(robj);
|
||||
amdgpu_hmm_unregister(robj);
|
||||
amdgpu_bo_unref(&robj);
|
||||
}
|
||||
}
|
||||
|
@ -187,7 +190,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
|||
int alignment, u32 initial_domain,
|
||||
u64 flags, enum ttm_bo_type type,
|
||||
struct dma_resv *resv,
|
||||
struct drm_gem_object **obj)
|
||||
struct drm_gem_object **obj, int8_t xcp_id_plus1)
|
||||
{
|
||||
struct amdgpu_bo *bo;
|
||||
struct amdgpu_bo_user *ubo;
|
||||
|
@ -205,6 +208,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
|||
bp.flags = flags;
|
||||
bp.domain = initial_domain;
|
||||
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
|
||||
bp.xcp_id_plus1 = xcp_id_plus1;
|
||||
|
||||
r = amdgpu_bo_create_user(adev, &bp, &ubo);
|
||||
if (r)
|
||||
|
@ -280,11 +284,10 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
|
|||
return r;
|
||||
|
||||
bo_va = amdgpu_vm_bo_find(vm, abo);
|
||||
if (!bo_va) {
|
||||
if (!bo_va)
|
||||
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
|
||||
} else {
|
||||
else
|
||||
++bo_va->ref_count;
|
||||
}
|
||||
amdgpu_bo_unreserve(abo);
|
||||
return 0;
|
||||
}
|
||||
|
@ -297,29 +300,24 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
|||
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
|
||||
struct amdgpu_bo_list_entry vm_pd;
|
||||
struct list_head list, duplicates;
|
||||
struct dma_fence *fence = NULL;
|
||||
struct ttm_validate_buffer tv;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct drm_exec exec;
|
||||
long r;
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
INIT_LIST_HEAD(&duplicates);
|
||||
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
|
||||
drm_exec_retry_on_contention(&exec);
|
||||
if (unlikely(r))
|
||||
goto out_unlock;
|
||||
|
||||
tv.bo = &bo->tbo;
|
||||
tv.num_shared = 2;
|
||||
list_add(&tv.head, &list);
|
||||
|
||||
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "leaking bo va because "
|
||||
"we fail to reserve bo (%ld)\n", r);
|
||||
return;
|
||||
r = amdgpu_vm_lock_pd(vm, &exec, 0);
|
||||
drm_exec_retry_on_contention(&exec);
|
||||
if (unlikely(r))
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
bo_va = amdgpu_vm_bo_find(vm, bo);
|
||||
if (!bo_va || --bo_va->ref_count)
|
||||
goto out_unlock;
|
||||
|
@ -329,6 +327,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
|||
goto out_unlock;
|
||||
|
||||
r = amdgpu_vm_clear_freed(adev, vm, &fence);
|
||||
if (unlikely(r < 0))
|
||||
dev_err(adev->dev, "failed to clear page "
|
||||
"tables on GEM object close (%ld)\n", r);
|
||||
if (r || !fence)
|
||||
goto out_unlock;
|
||||
|
||||
|
@ -336,10 +337,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
|||
dma_fence_put(fence);
|
||||
|
||||
out_unlock:
|
||||
if (unlikely(r < 0))
|
||||
dev_err(adev->dev, "failed to clear page "
|
||||
"tables on GEM object close (%ld)\n", r);
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
if (r)
|
||||
dev_err(adev->dev, "leaking bo va (%ld)\n", r);
|
||||
drm_exec_fini(&exec);
|
||||
}
|
||||
|
||||
#ifdef __linux__
|
||||
|
@ -358,8 +358,8 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
|
|||
* becoming writable and makes is_cow_mapping(vm_flags) false.
|
||||
*/
|
||||
if (is_cow_mapping(vma->vm_flags) &&
|
||||
!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
!(vma->vm_flags & VM_ACCESS_FLAGS))
|
||||
vm_flags_clear(vma, VM_MAYWRITE);
|
||||
|
||||
return drm_gem_ttm_mmap(obj, vma);
|
||||
}
|
||||
|
@ -417,6 +417,10 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||
uint32_t handle, initial_domain;
|
||||
int r;
|
||||
|
||||
/* reject DOORBELLs until userspace code to use it is available */
|
||||
if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL)
|
||||
return -EINVAL;
|
||||
|
||||
/* reject invalid gem flags */
|
||||
if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
||||
|
@ -462,7 +466,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||
retry:
|
||||
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
|
||||
initial_domain,
|
||||
flags, ttm_bo_type_device, resv, &gobj);
|
||||
flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1);
|
||||
if (r && r != -ERESTARTSYS) {
|
||||
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
|
||||
flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||
|
@ -507,6 +511,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||
struct ttm_operation_ctx ctx = { true, false };
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct drm_amdgpu_gem_userptr *args = data;
|
||||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
struct drm_gem_object *gobj;
|
||||
struct hmm_range *range;
|
||||
struct amdgpu_bo *bo;
|
||||
|
@ -533,7 +538,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
/* create a gem object to contain this object in */
|
||||
r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
|
||||
0, ttm_bo_type_device, NULL, &gobj);
|
||||
0, ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -544,7 +549,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||
if (r)
|
||||
goto release_object;
|
||||
|
||||
r = amdgpu_mn_register(bo, args->addr);
|
||||
r = amdgpu_hmm_register(bo, args->addr);
|
||||
if (r)
|
||||
goto release_object;
|
||||
|
||||
|
@ -590,9 +595,9 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
|
|||
struct amdgpu_bo *robj;
|
||||
|
||||
gobj = drm_gem_object_lookup(filp, handle);
|
||||
if (gobj == NULL) {
|
||||
if (!gobj)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
robj = gem_to_amdgpu_bo(gobj);
|
||||
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
|
||||
(robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
|
||||
|
@ -609,6 +614,7 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
|||
{
|
||||
union drm_amdgpu_gem_mmap *args = data;
|
||||
uint32_t handle = args->in.handle;
|
||||
|
||||
memset(args, 0, sizeof(*args));
|
||||
return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
|
||||
}
|
||||
|
@ -635,7 +641,7 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
|
|||
|
||||
timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
|
||||
/* clamp timeout to avoid unsigned-> signed overflow */
|
||||
if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
|
||||
if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT)
|
||||
return MAX_SCHEDULE_TIMEOUT - 1;
|
||||
|
||||
return timeout_jiffies;
|
||||
|
@ -653,9 +659,9 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
|||
long ret;
|
||||
|
||||
gobj = drm_gem_object_lookup(filp, handle);
|
||||
if (gobj == NULL) {
|
||||
if (!gobj)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
robj = gem_to_amdgpu_bo(gobj);
|
||||
ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
|
||||
true, timeout);
|
||||
|
@ -682,7 +688,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
|
|||
struct amdgpu_bo *robj;
|
||||
int r = -1;
|
||||
|
||||
DRM_DEBUG("%d \n", args->handle);
|
||||
DRM_DEBUG("%d\n", args->handle);
|
||||
gobj = drm_gem_object_lookup(filp, args->handle);
|
||||
if (gobj == NULL)
|
||||
return -ENOENT;
|
||||
|
@ -802,17 +808,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
struct amdgpu_bo *abo;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct amdgpu_bo_list_entry vm_pd;
|
||||
struct ttm_validate_buffer tv;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct list_head list, duplicates;
|
||||
struct drm_exec exec;
|
||||
uint64_t va_flags;
|
||||
uint64_t vm_size;
|
||||
int r = 0;
|
||||
|
||||
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
|
||||
dev_dbg(dev->dev,
|
||||
"va_address 0x%llX is in reserved area 0x%llX\n",
|
||||
"va_address 0x%llx is in reserved area 0x%llx\n",
|
||||
args->va_address, AMDGPU_VA_RESERVED_SIZE);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -820,7 +823,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
if (args->va_address >= AMDGPU_GMC_HOLE_START &&
|
||||
args->va_address < AMDGPU_GMC_HOLE_END) {
|
||||
dev_dbg(dev->dev,
|
||||
"va_address 0x%llX is in VA hole 0x%llX-0x%llX\n",
|
||||
"va_address 0x%llx is in VA hole 0x%llx-0x%llx\n",
|
||||
args->va_address, AMDGPU_GMC_HOLE_START,
|
||||
AMDGPU_GMC_HOLE_END);
|
||||
return -EINVAL;
|
||||
|
@ -855,36 +858,38 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
INIT_LIST_HEAD(&duplicates);
|
||||
if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
|
||||
!(args->flags & AMDGPU_VM_PAGE_PRT)) {
|
||||
gobj = drm_gem_object_lookup(filp, args->handle);
|
||||
if (gobj == NULL)
|
||||
return -ENOENT;
|
||||
abo = gem_to_amdgpu_bo(gobj);
|
||||
tv.bo = &abo->tbo;
|
||||
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
|
||||
tv.num_shared = 1;
|
||||
else
|
||||
tv.num_shared = 0;
|
||||
list_add(&tv.head, &list);
|
||||
} else {
|
||||
gobj = NULL;
|
||||
abo = NULL;
|
||||
}
|
||||
|
||||
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
|
||||
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
|
||||
DRM_EXEC_IGNORE_DUPLICATES);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
if (gobj) {
|
||||
r = drm_exec_lock_obj(&exec, gobj);
|
||||
drm_exec_retry_on_contention(&exec);
|
||||
if (unlikely(r))
|
||||
goto error;
|
||||
}
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
|
||||
if (r)
|
||||
goto error_unref;
|
||||
r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
|
||||
drm_exec_retry_on_contention(&exec);
|
||||
if (unlikely(r))
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (abo) {
|
||||
bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
|
||||
if (!bo_va) {
|
||||
r = -ENOENT;
|
||||
goto error_backoff;
|
||||
goto error;
|
||||
}
|
||||
} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
|
||||
bo_va = fpriv->prt_va;
|
||||
|
@ -921,10 +926,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
|
||||
args->operation);
|
||||
|
||||
error_backoff:
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
|
||||
error_unref:
|
||||
error:
|
||||
drm_exec_fini(&exec);
|
||||
drm_gem_object_put(gobj);
|
||||
return r;
|
||||
}
|
||||
|
@ -940,9 +943,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
|||
int r;
|
||||
|
||||
gobj = drm_gem_object_lookup(filp, args->handle);
|
||||
if (gobj == NULL) {
|
||||
if (!gobj)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
robj = gem_to_amdgpu_bo(gobj);
|
||||
|
||||
r = amdgpu_bo_reserve(robj, false);
|
||||
|
@ -1037,6 +1040,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
|||
struct drm_mode_create_dumb *args)
|
||||
{
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
|
||||
struct drm_gem_object *gobj;
|
||||
uint32_t handle;
|
||||
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
|
@ -1056,20 +1060,20 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
|||
args->pitch = amdgpu_gem_align_pitch(adev, args->width,
|
||||
DIV_ROUND_UP(args->bpp, 8), 0);
|
||||
args->size = (u64)args->pitch * args->height;
|
||||
args->size = roundup2(args->size, PAGE_SIZE);
|
||||
args->size = ALIGN(args->size, PAGE_SIZE);
|
||||
domain = amdgpu_bo_get_preferred_domain(adev,
|
||||
amdgpu_display_supported_domains(adev, flags));
|
||||
r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
|
||||
ttm_bo_type_device, NULL, &gobj);
|
||||
ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
|
||||
if (r)
|
||||
return -ENOMEM;
|
||||
|
||||
r = drm_gem_handle_create(file_priv, gobj, &handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_put(gobj);
|
||||
if (r) {
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
args->handle = handle;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1077,7 +1081,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
|||
#if defined(CONFIG_DEBUG_FS)
|
||||
static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
|
||||
struct amdgpu_device *adev = m->private;
|
||||
struct drm_device *dev = adev_to_drm(adev);
|
||||
struct drm_file *file;
|
||||
int r;
|
||||
|
@ -1089,6 +1093,7 @@ static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
|
|||
list_for_each_entry(file, &dev->filelist, lhead) {
|
||||
struct task_struct *task;
|
||||
struct drm_gem_object *gobj;
|
||||
struct pid *pid;
|
||||
int id;
|
||||
|
||||
/*
|
||||
|
@ -1098,8 +1103,9 @@ static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
|
|||
* Therefore, we need to protect this ->comm access using RCU.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
task = pid_task(file->pid, PIDTYPE_PID);
|
||||
seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
|
||||
pid = rcu_dereference(file->pid);
|
||||
task = pid_task(pid, PIDTYPE_TGID);
|
||||
seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
|
||||
task ? task->comm : "<unknown>");
|
||||
rcu_read_unlock();
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue