sync with OpenBSD -current

This commit is contained in:
purplerain 2024-03-27 04:10:08 +00:00
parent 56a087cff9
commit 0189975fb5
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
61 changed files with 1691 additions and 1177 deletions

View file

@ -3448,22 +3448,34 @@ amdgpu_init_backlight(struct amdgpu_device *adev)
struct backlight_device *bd = adev->dm.backlight_dev[0];
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
struct amdgpu_dm_connector *aconnector;
if (bd == NULL)
return;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
connector->connector_type != DRM_MODE_CONNECTOR_DSI)
aconnector = to_amdgpu_dm_connector(connector);
if (connector->registration_state != DRM_CONNECTOR_REGISTERED)
continue;
if (aconnector->bl_idx == -1)
continue;
dev->registered = false;
connector->registration_state = DRM_CONNECTOR_UNREGISTERED;
connector->backlight_device = bd;
connector->backlight_property = drm_property_create_range(dev,
0, "Backlight", 0, bd->props.max_brightness);
drm_object_attach_property(&connector->base,
connector->backlight_property, bd->props.brightness);
connector->registration_state = DRM_CONNECTOR_REGISTERED;
dev->registered = true;
break;
}
drm_connector_list_iter_end(&conn_iter);
}

View file

@ -1449,8 +1449,18 @@ i915_gem_mmap(struct file *filp, vm_prot_t accessprot,
* destroyed and will be invalid when the vma manager lock
* is released.
*/
mmo = container_of(node, struct i915_mmap_offset, vma_node);
obj = i915_gem_object_get_rcu(mmo->obj);
if (!node->driver_private) {
mmo = container_of(node, struct i915_mmap_offset, vma_node);
obj = i915_gem_object_get_rcu(mmo->obj);
GEM_BUG_ON(obj && obj->ops->mmap_ops);
} else {
obj = i915_gem_object_get_rcu
(container_of(node, struct drm_i915_gem_object,
base.vma_node));
GEM_BUG_ON(obj && !obj->ops->mmap_ops);
}
}
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
rcu_read_unlock();
@ -1464,6 +1474,9 @@ i915_gem_mmap(struct file *filp, vm_prot_t accessprot,
}
}
if (obj->ops->mmap_ops)
uvm_obj_init(&obj->base.uobj, obj->ops->mmap_ops, 1);
return &obj->base.uobj;
}

View file

@ -114,7 +114,11 @@ struct drm_i915_gem_object_ops {
void (*release)(struct drm_i915_gem_object *obj);
#ifdef __linux__
const struct vm_operations_struct *mmap_ops;
#else
const struct uvm_pagerops *mmap_ops;
#endif
const char *name; /* friendly name for debug, e.g. lockdep classes */
};

View file

@ -1067,7 +1067,8 @@ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
ttm_bo_put(i915_gem_to_ttm(obj));
}
#ifdef notyet
#ifdef __linux__
static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
{
struct vm_area_struct *area = vmf->vma;
@ -1219,6 +1220,187 @@ static const struct vm_operations_struct vm_ops_ttm = {
.close = ttm_vm_close,
};
#else /* !__linux__ */
static int
vm_fault_ttm(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
int npages, int centeridx, vm_fault_t fault_type,
vm_prot_t access_type, int flags)
{
struct uvm_object *uobj = ufi->entry->object.uvm_obj;
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
struct drm_device *dev = bo->base.dev;
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
intel_wakeref_t wakeref = 0;
vm_fault_t ret;
int idx;
int write = !!(access_type & PROT_WRITE);
/* Sanity check that we allow writing into this object */
if (unlikely(i915_gem_object_is_readonly(obj) && write)) {
uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
return VM_PAGER_BAD;
}
ret = ttm_bo_vm_reserve(bo);
if (ret) {
switch (ret) {
case VM_FAULT_NOPAGE:
ret = VM_PAGER_OK;
break;
case VM_FAULT_RETRY:
ret = VM_PAGER_REFAULT;
break;
default:
ret = VM_PAGER_BAD;
break;
}
uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
return ret;
}
if (obj->mm.madv != I915_MADV_WILLNEED) {
dma_resv_unlock(bo->base.resv);
uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
return VM_PAGER_BAD;
}
/*
* This must be swapped out with shmem ttm_tt (pipeline-gutting).
* Calling ttm_bo_validate() here with TTM_PL_SYSTEM should only go as
* far as far doing a ttm_bo_move_null(), which should skip all the
* other junk.
*/
if (!bo->resource) {
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = true, /* should be idle already */
};
int err;
GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
if (err) {
dma_resv_unlock(bo->base.resv);
uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
return VM_PAGER_BAD;
}
} else if (!i915_ttm_resource_mappable(bo->resource)) {
int err = -ENODEV;
int i;
for (i = 0; i < obj->mm.n_placements; i++) {
struct intel_memory_region *mr = obj->mm.placements[i];
unsigned int flags;
if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM)
continue;
flags = obj->flags;
flags &= ~I915_BO_ALLOC_GPU_ONLY;
err = __i915_ttm_migrate(obj, mr, flags);
if (!err)
break;
}
if (err) {
drm_dbg(dev, "Unable to make resource CPU accessible(err = %pe)\n",
ERR_PTR(err));
dma_resv_unlock(bo->base.resv);
ret = VM_FAULT_SIGBUS;
goto out_rpm;
}
}
if (i915_ttm_cpu_maps_iomem(bo->resource))
wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
if (drm_dev_enter(dev, &idx)) {
ret = ttm_bo_vm_fault_reserved(ufi, vaddr,
TTM_BO_VM_NUM_PREFAULT, 1);
drm_dev_exit(idx);
} else {
STUB();
#ifdef notyet
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
#else
STUB();
ret = VM_FAULT_NOPAGE;
#endif
}
#ifdef __linux__
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
goto out_rpm;
#endif
/*
* ttm_bo_vm_reserve() already has dma_resv_lock.
* userfault_count is protected by dma_resv lock and rpm wakeref.
*/
if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
obj->userfault_count = 1;
spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list);
spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource));
}
if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
i915_ttm_adjust_lru(obj);
dma_resv_unlock(bo->base.resv);
out_rpm:
switch (ret) {
case VM_FAULT_NOPAGE:
ret = VM_PAGER_OK;
break;
case VM_FAULT_RETRY:
ret = VM_PAGER_REFAULT;
break;
default:
ret = VM_PAGER_BAD;
break;
}
if (wakeref)
intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
return ret;
}
static void
ttm_vm_reference(struct uvm_object *uobj)
{
struct drm_i915_gem_object *obj =
i915_ttm_to_gem((struct ttm_buffer_object *)uobj);
i915_gem_object_get(obj);
}
static void
ttm_vm_detach(struct uvm_object *uobj)
{
struct drm_i915_gem_object *obj =
i915_ttm_to_gem((struct ttm_buffer_object *)uobj);
i915_gem_object_put(obj);
}
const struct uvm_pagerops vm_ops_ttm = {
.pgo_fault = vm_fault_ttm,
.pgo_reference = ttm_vm_reference,
.pgo_detach = ttm_vm_detach,
};
#endif
static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
@ -1272,9 +1454,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.mmap_offset = i915_ttm_mmap_offset,
.unmap_virtual = i915_ttm_unmap_virtual,
#ifdef notyet
.mmap_ops = &vm_ops_ttm,
#endif
};
void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)