sync with OpenBSD -current

This commit is contained in:
purplerain 2024-03-21 01:27:27 +00:00
parent caadbe0d20
commit ffd019c293
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
50 changed files with 617 additions and 577 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: drm_linux.c,v 1.109 2024/01/21 13:36:40 kettenis Exp $ */
/* $OpenBSD: drm_linux.c,v 1.111 2024/03/20 02:42:17 jsg Exp $ */
/*
* Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
* Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
@ -673,6 +673,28 @@ vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
return (void *)va;
}
void *
vmap_pfn(unsigned long *pfns, unsigned int npfn, pgprot_t prot)
{
vaddr_t va;
paddr_t pa;
int i;
va = (vaddr_t)km_alloc(PAGE_SIZE * npfn, &kv_any, &kp_none,
&kd_nowait);
if (va == 0)
return NULL;
for (i = 0; i < npfn; i++) {
pa = round_page(pfns[i]) | prot;
pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
PROT_READ | PROT_WRITE,
PROT_READ | PROT_WRITE | PMAP_WIRED);
pmap_update(pmap_kernel());
}
return (void *)va;
}
void
vunmap(void *addr, size_t size)
{
@ -1302,7 +1324,8 @@ vga_disable_bridge(struct pci_attach_args *pa)
void
vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
{
KASSERT(pdev->pci->sc_bridgetag == NULL);
if (pdev->pci->sc_bridgetag != NULL)
return;
pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
}

View file

@ -11,9 +11,6 @@
#include <drm/drm_syncobj.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/agpvar.h>
#include "display/intel_frontbuffer.h"
#include "gem/i915_gem_ioctls.h"
@ -294,10 +291,6 @@ struct i915_execbuffer {
bool has_llc : 1;
bool has_fence : 1;
bool needs_unfenced : 1;
struct agp_map *map;
bus_space_tag_t iot;
bus_space_handle_t ioh;
} reloc_cache;
u64 invalid_flags; /** Set of execobj.flags that are invalid */
@ -1133,9 +1126,6 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->has_fence = cache->graphics_ver < 4;
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
cache->node.flags = 0;
cache->map = i915->agph;
cache->iot = i915->bst;
}
static inline void *unmask_page(unsigned long p)
@ -1168,11 +1158,7 @@ static void reloc_cache_unmap(struct reloc_cache *cache)
if (cache->vaddr & KMAP)
kunmap_atomic(vaddr);
else
#ifdef __linux__
io_mapping_unmap_atomic((void __iomem *)vaddr);
#else
agp_unmap_atomic(cache->map, cache->ioh);
#endif
}
static void reloc_cache_remap(struct reloc_cache *cache,
@ -1197,14 +1183,8 @@ static void reloc_cache_remap(struct reloc_cache *cache,
if (!drm_mm_node_allocated(&cache->node))
offset += cache->page << PAGE_SHIFT;
#ifdef __linux__
cache->vaddr = (unsigned long)
io_mapping_map_atomic_wc(&ggtt->iomap, offset);
#else
agp_map_atomic(cache->map, offset, &cache->ioh);
cache->vaddr = (unsigned long)
bus_space_vaddr(cache->iot, cache->ioh);
#endif
}
}
@ -1228,11 +1208,7 @@ static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
#ifdef __linux__
io_mapping_unmap_atomic((void __iomem *)vaddr);
#else
agp_unmap_atomic(cache->map, cache->ioh);
#endif
if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.clear_range(&ggtt->vm,
@ -1299,11 +1275,7 @@ static void *reloc_iomap(struct i915_vma *batch,
if (cache->vaddr) {
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
#ifdef __linux__
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
#else
agp_unmap_atomic(cache->map, cache->ioh);
#endif
} else {
struct i915_vma *vma = ERR_PTR(-ENODEV);
int err;
@ -1365,13 +1337,8 @@ static void *reloc_iomap(struct i915_vma *batch,
offset += page << PAGE_SHIFT;
}
#ifdef __linux__
vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
offset);
#else
agp_map_atomic(cache->map, offset, &cache->ioh);
vaddr = bus_space_vaddr(cache->iot, cache->ioh);
#endif
cache->page = page;
cache->vaddr = (unsigned long)vaddr;

View file

@ -15,9 +15,6 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
unsigned long n,
unsigned long size)
{
STUB();
return NULL;
#ifdef notyet
resource_size_t offset;
GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
@ -26,7 +23,6 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
offset -= obj->mm.region->region.start;
return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
#endif
}
/**

View file

@ -521,8 +521,6 @@ i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset,
static void
i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
{
STUB();
#ifdef notyet
pgoff_t idx = offset >> PAGE_SHIFT;
dma_addr_t dma = i915_gem_object_get_dma_address(obj, idx);
void __iomem *src_map;
@ -537,7 +535,6 @@ i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset
memcpy_fromio(dst, src_ptr, size);
io_mapping_unmap(src_map);
#endif
}
static bool object_has_mappable_iomem(struct drm_i915_gem_object *obj)

View file

@ -329,9 +329,6 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
STUB();
return NULL;
#ifdef notyet
resource_size_t iomap = obj->mm.region->iomap.base -
obj->mm.region->region.start;
unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
@ -357,76 +354,6 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
kvfree(pfns);
return vaddr ?: ERR_PTR(-ENOMEM);
#endif
}
static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt = obj->mm.pages;
struct vm_page *stack_pages[32];
struct vm_page **pages = stack_pages;
struct vm_struct *area;
pgprot_t pgprot;
void *addr;
if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
return NULL;
#if 0
/* A single page can always be kmapped */
if (n_pages == 1 && type == I915_MAP_WB)
return kmap(sg_page(sgt->sgl));
#endif
if (n_pages > ARRAY_SIZE(stack_pages)) {
/* Too big for stack -- allocate temporary array instead */
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
if (!pages)
return NULL;
}
switch (type) {
default:
MISSING_CASE(type);
/* fallthrough - to use PAGE_KERNEL anyway */
case I915_MAP_WB:
pgprot = PAGE_KERNEL;
break;
case I915_MAP_WC:
pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
break;
}
if (i915_gem_object_has_struct_page(obj)) {
struct sgt_iter iter;
struct vm_page *page;
unsigned long i = 0;
for_each_sgt_page(page, iter, sgt)
pages[i++] = page;
} else {
STUB();
#ifdef notyet
resource_size_t iomap;
struct sgt_iter iter;
pte_t **ptes = mem;
dma_addr_t addr;
iomap = obj->mm.region->iomap.base;
iomap -= obj->mm.region->region.start;
for_each_sgt_daddr(addr, iter, sgt)
**ptes++ = iomap_pte(iomap, addr, pgprot);
#endif
}
addr = vmap(pages, n_pages, 0, pgprot);
if (pages != stack_pages)
kvfree(pages);
return addr;
}
/* get, pin, and map the pages of the object into kernel space */

View file

@ -821,19 +821,41 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
return 0;
}
STUB();
return -ENOSYS;
#ifdef notyet
#ifdef __linux__
if (mem->io_size &&
!io_mapping_init_wc(&mem->iomap, mem->io_start, mem->io_size))
goto err_cleanup;
#else
if (mem->io_size) {
paddr_t start, end;
struct vm_page *pgs;
int i;
bus_space_handle_t bsh;
start = atop(mem->io_start);
end = start + atop(mem->io_size);
uvm_page_physload(start, end, start, end, PHYSLOAD_DEVICE);
pgs = PHYS_TO_VM_PAGE(mem->io_start);
for (i = 0; i < atop(mem->io_size); i++)
atomic_setbits_int(&(pgs[i].pg_flags), PG_PMAP_WC);
if (bus_space_map(i915->bst, mem->io_start, mem->io_size,
BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, &bsh))
panic("can't map stolen lmem");
mem->iomap.base = mem->io_start;
mem->iomap.size = mem->io_size;
mem->iomap.iomem = bus_space_vaddr(i915->bst, bsh);
}
#endif
drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
&mem->io_start);
drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &mem->region.start);
return 0;
#ifdef __linux__
err_cleanup:
i915_gem_cleanup_stolen(mem->i915);
return err;

View file

@ -715,9 +715,6 @@ static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource
static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
STUB();
return 0;
#ifdef notyet
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
struct scatterlist *sg;
unsigned long base;
@ -730,7 +727,6 @@ static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
sg = i915_gem_object_page_iter_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs);
return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
#endif
}
static int i915_ttm_access_memory(struct ttm_buffer_object *bo,

View file

@ -272,6 +272,7 @@ struct i915_ttm_memcpy_arg {
bool clear;
struct i915_refct_sgt *src_rsgt;
struct i915_refct_sgt *dst_rsgt;
bus_space_tag_t memt;
};
/**
@ -304,11 +305,8 @@ struct i915_ttm_memcpy_work {
static void i915_ttm_move_memcpy(struct i915_ttm_memcpy_arg *arg)
{
STUB();
#ifdef notyet
ttm_move_memcpy(arg->clear, arg->num_pages,
arg->dst_iter, arg->src_iter);
#endif
arg->dst_iter, arg->src_iter, arg->memt);
}
static void i915_ttm_memcpy_init(struct i915_ttm_memcpy_arg *arg,
@ -317,8 +315,6 @@ static void i915_ttm_memcpy_init(struct i915_ttm_memcpy_arg *arg,
struct ttm_tt *dst_ttm,
struct i915_refct_sgt *dst_rsgt)
{
STUB();
#ifdef notyet
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
struct intel_memory_region *dst_reg, *src_reg;
@ -342,7 +338,8 @@ static void i915_ttm_memcpy_init(struct i915_ttm_memcpy_arg *arg,
arg->dst_rsgt = i915_refct_sgt_get(dst_rsgt);
arg->src_rsgt = clear ? NULL :
i915_ttm_resource_get_st(obj, bo->resource);
#endif
arg->memt = bo->bdev->memt;
}
static void i915_ttm_memcpy_release(struct i915_ttm_memcpy_arg *arg)

View file

@ -28,9 +28,6 @@
#include "intel_gtt.h"
#include "gen8_ppgtt.h"
#include <dev/pci/pcivar.h>
#include <dev/pci/agpvar.h>
static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
@ -53,7 +50,6 @@ static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
static int ggtt_init_hw(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
int i;
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
@ -77,6 +73,9 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt)
ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
ggtt->mappable_end);
#else
bus_space_handle_t bsh;
int i;
/* XXX would be a lot nicer to get agp info before now */
uvm_page_physload(atop(ggtt->gmadr.start),
atop(ggtt->gmadr.start + ggtt->mappable_end),
@ -94,11 +93,13 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt)
for (i = 0; i < atop(ggtt->mappable_end); i++)
atomic_setbits_int(&(i915->pgs[i].pg_flags),
PG_PMAP_WC);
if (agp_init_map(i915->bst, ggtt->gmadr.start,
if (bus_space_map(i915->bst, ggtt->gmadr.start,
ggtt->mappable_end,
BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE,
&i915->agph))
BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, &bsh))
panic("can't map aperture");
ggtt->iomap.base = ggtt->gmadr.start;
ggtt->iomap.size = ggtt->mappable_end;
ggtt->iomap.iomem = bus_space_vaddr(i915->bst, bsh);
#endif
}

View file

@ -358,9 +358,7 @@ struct i915_address_space {
struct i915_ggtt {
struct i915_address_space vm;
#ifdef notyet
struct io_mapping iomap; /* Mapping to our CPU mappable region */
#endif
struct resource gmadr; /* GMADR resource */
resource_size_t mappable_end; /* End offset that we can CPU map */

View file

@ -153,15 +153,36 @@ region_lmem_release(struct intel_memory_region *mem)
static int
region_lmem_init(struct intel_memory_region *mem)
{
STUB();
return -ENOSYS;
#ifdef notyet
int ret;
#ifdef __linux__
if (!io_mapping_init_wc(&mem->iomap,
mem->io_start,
mem->io_size))
return -EIO;
#else
struct drm_i915_private *i915 = mem->i915;
paddr_t start, end;
struct vm_page *pgs;
int i;
bus_space_handle_t bsh;
start = atop(mem->io_start);
end = start + atop(mem->io_size);
uvm_page_physload(start, end, start, end, PHYSLOAD_DEVICE);
pgs = PHYS_TO_VM_PAGE(mem->io_start);
for (i = 0; i < atop(mem->io_size); i++)
atomic_setbits_int(&(pgs[i].pg_flags), PG_PMAP_WC);
if (bus_space_map(i915->bst, mem->io_start, mem->io_size,
BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, &bsh))
panic("can't map lmem");
mem->iomap.base = mem->io_start;
mem->iomap.size = mem->io_size;
mem->iomap.iomem = bus_space_vaddr(i915->bst, bsh);
#endif
ret = intel_region_ttm_init(mem);
if (ret)
@ -170,10 +191,11 @@ region_lmem_init(struct intel_memory_region *mem)
return 0;
out_no_buddy:
#ifdef __linux__
io_mapping_fini(&mem->iomap);
#endif
return ret;
#endif
}
static const struct intel_memory_region_ops intel_region_lmem_ops = {

View file

@ -190,8 +190,6 @@ xehp_load_dss_mask(struct intel_uncore *uncore,
int numregs,
...)
{
STUB();
#ifdef notyet
va_list argp;
u32 fuse_val[I915_MAX_SS_FUSE_REGS] = {};
int i;
@ -205,7 +203,6 @@ xehp_load_dss_mask(struct intel_uncore *uncore,
va_end(argp);
bitmap_from_arr32(ssmask->xehp, fuse_val, numregs * 32);
#endif
}
static void xehp_sseu_info_init(struct intel_gt *gt)

View file

@ -248,6 +248,12 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
if (ret < 0)
goto err_workqueues;
#ifdef __OpenBSD__
dev_priv->bdev.iot = dev_priv->iot;
dev_priv->bdev.memt = dev_priv->bst;
dev_priv->bdev.dmat = dev_priv->dmat;
#endif
ret = intel_region_ttm_device_init(dev_priv);
if (ret)
goto err_ttm;
@ -2254,6 +2260,7 @@ inteldrm_attach(struct device *parent, struct device *self, void *aux)
dev_priv->pc = pa->pa_pc;
dev_priv->tag = pa->pa_tag;
dev_priv->iot = pa->pa_iot;
dev_priv->dmat = pa->pa_dmat;
dev_priv->bst = pa->pa_memt;
dev_priv->memex = pa->pa_memex;

View file

@ -215,8 +215,8 @@ struct inteldrm_softc {
#ifdef __OpenBSD__
struct device sc_dev;
bus_dma_tag_t dmat;
bus_space_tag_t iot;
bus_space_tag_t bst;
struct agp_map *agph;
bus_space_handle_t opregion_ioh;
bus_space_handle_t opregion_rvda_ioh;
bus_size_t opregion_rvda_size;

View file

@ -39,8 +39,6 @@
#include <drm/drm_cache.h>
#include <drm/drm_vma_manager.h>
#include <dev/pci/agpvar.h>
#include "display/intel_display.h"
#include "display/intel_frontbuffer.h"
@ -279,7 +277,6 @@ err_unlock:
return ret;
}
#ifdef __linux__
static inline bool
gtt_user_read(struct io_mapping *mapping,
loff_t base, int offset,
@ -303,34 +300,6 @@ gtt_user_read(struct io_mapping *mapping,
}
return unwritten;
}
#else
static inline bool
gtt_user_read(struct drm_i915_private *dev_priv,
loff_t base, int offset,
char __user *user_data, int length)
{
bus_space_handle_t bsh;
void __iomem *vaddr;
unsigned long unwritten;
/* We can use the cpu mem copy function because this is X86. */
agp_map_atomic(dev_priv->agph, base, &bsh);
vaddr = bus_space_vaddr(dev_priv->bst, bsh);
unwritten = __copy_to_user_inatomic(user_data,
(void __force *)vaddr + offset,
length);
agp_unmap_atomic(dev_priv->agph, bsh);
if (unwritten) {
agp_map_subregion(dev_priv->agph, base, PAGE_SIZE, &bsh);
vaddr = bus_space_vaddr(dev_priv->bst, bsh);
unwritten = copy_to_user(user_data,
(void __force *)vaddr + offset,
length);
agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE);
}
return unwritten;
}
#endif
static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
struct drm_mm_node *node,
@ -460,7 +429,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
page_base += offset & LINUX_PAGE_MASK;
}
if (gtt_user_read(i915, page_base, page_offset,
if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) {
ret = -EFAULT;
break;
@ -542,7 +511,7 @@ out:
/* This is the fast write path which cannot handle
* page faults in the source data
*/
#ifdef __linux__
static inline bool
ggtt_write(struct io_mapping *mapping,
loff_t base, int offset,
@ -565,33 +534,6 @@ ggtt_write(struct io_mapping *mapping,
return unwritten;
}
#else
static inline bool
ggtt_write(struct drm_i915_private *dev_priv,
loff_t base, int offset,
char __user *user_data, int length)
{
bus_space_handle_t bsh;
void __iomem *vaddr;
unsigned long unwritten;
/* We can use the cpu mem copy function because this is X86. */
agp_map_atomic(dev_priv->agph, base, &bsh);
vaddr = bus_space_vaddr(dev_priv->bst, bsh);
unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
user_data, length);
agp_unmap_atomic(dev_priv->agph, bsh);
if (unwritten) {
agp_map_subregion(dev_priv->agph, base, PAGE_SIZE, &bsh);
vaddr = bus_space_vaddr(dev_priv->bst, bsh);
unwritten = copy_from_user((void __force *)vaddr + offset,
user_data, length);
agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE);
}
return unwritten;
}
#endif
/**
* i915_gem_gtt_pwrite_fast - This is the fast pwrite path, where we copy the data directly from the
@ -674,7 +616,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
* If the object is non-shmem backed, we retry again with the
* path that handles page fault.
*/
if (ggtt_write(i915, page_base, page_offset,
if (ggtt_write(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) {
ret = -EFAULT;
break;

View file

@ -43,8 +43,6 @@
#include "i915_vma.h"
#include "i915_vma_resource.h"
#include <dev/pci/agpvar.h>
static inline void assert_vma_held_evict(const struct i915_vma *vma)
{
/*
@ -582,22 +580,9 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
vma->obj->base.size);
} else if (i915_vma_is_map_and_fenceable(vma)) {
#ifdef __linux__
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
i915_vma_offset(vma),
i915_vma_size(vma));
#else
{
struct drm_i915_private *dev_priv = vma->vm->i915;
err = agp_map_subregion(dev_priv->agph, i915_vma_offset(vma),
i915_vma_size(vma), &vma->bsh);
if (err) {
err = -err;
goto err;
}
ptr = bus_space_vaddr(dev_priv->bst, vma->bsh);
}
#endif
} else {
ptr = (void __iomem *)
i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
@ -616,10 +601,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
if (page_unmask_bits(ptr))
__i915_gem_object_release_map(vma->obj);
#ifdef __linux__
else
io_mapping_unmap(ptr);
#endif
ptr = vma->iomap;
}
}
@ -1879,14 +1862,8 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
if (page_unmask_bits(vma->iomap))
__i915_gem_object_release_map(vma->obj);
else {
#ifdef __linux__
else
io_mapping_unmap(vma->iomap);
#else
struct drm_i915_private *dev_priv = vma->vm->i915;
agp_unmap_subregion(dev_priv->agph, vma->bsh, vma->node.size);
#endif
}
vma->iomap = NULL;
}

View file

@ -70,9 +70,7 @@ struct intel_memory_region {
const struct intel_memory_region_ops *ops;
#ifdef notyet
struct io_mapping iomap;
#endif
struct resource region;
resource_size_t io_start;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: bitmap.h,v 1.6 2024/01/06 09:33:08 kettenis Exp $ */
/* $OpenBSD: bitmap.h,v 1.8 2024/03/20 22:52:44 bluhm Exp $ */
/*
* Copyright (c) 2013, 2014, 2015 Mark Kettenis
*
@ -97,11 +97,28 @@ bitmap_complement(void *d, void *s, u_int n)
dst[b >> 5] = ~src[b >> 5];
}
static inline bool
bitmap_intersects(const void *s1, const void *s2, u_int n)
{
const u_int *b1 = s1;
const u_int *b2 = s2;
u_int b;
for (b = 0; b < n; b += 32)
if (b1[b >> 5] & b2[b >> 5])
return true;
if ((n % 32) != 0)
if ((b1[n >> 5] & b2[b >> 5]) & (0xffffffff >> (32 - (n % 32))))
return true;
return false;
}
static inline void
bitmap_copy(void *d, void *s, u_int n)
bitmap_copy(void *d, const void *s, u_int n)
{
u_int *dst = d;
u_int *src = s;
const u_int *src = s;
u_int b;
for (b = 0; b < n; b += 32)
@ -109,7 +126,7 @@ bitmap_copy(void *d, void *s, u_int n)
}
static inline void
bitmap_to_arr32(void *d, unsigned long *src, u_int n)
bitmap_to_arr32(void *d, const unsigned long *src, u_int n)
{
u_int *dst = d;
u_int b;
@ -128,6 +145,27 @@ bitmap_to_arr32(void *d, unsigned long *src, u_int n)
dst[n >> 5] &= (0xffffffff >> (32 - (n % 32)));
}
static inline void
bitmap_from_arr32(unsigned long *dst, const void *s, u_int n)
{
const u_int *src = s;
u_int b;
#ifdef __LP64__
for (b = 0; b < n; b += 32) {
dst[b >> 6] = src[b >> 5];
b += 32;
if (b < n)
dst[b >> 6] |= ((unsigned long)src[b >> 5]) << 32;
}
if ((n % 64) != 0)
dst[n >> 6] &= (0xffffffffffffffffUL >> (64 - (n % 64)));
#else
bitmap_copy(dst, s, n);
if ((n % 32) != 0)
dst[n >> 5] &= (0xffffffff >> (32 - (n % 32)));
#endif
}
static inline int
bitmap_weight(const void *p, u_int n)

View file

@ -3,4 +3,45 @@
#ifndef _LINUX_IO_MAPPING_H
#define _LINUX_IO_MAPPING_H
#include <linux/types.h>
struct io_mapping {
resource_size_t base;
unsigned long size;
void *iomem;
};
static inline void *
io_mapping_map_wc(struct io_mapping *map, unsigned long off, unsigned long size)
{
return ((uint8_t *)map->iomem + off);
}
static inline void
io_mapping_unmap(void *va)
{
}
static inline void *
io_mapping_map_local_wc(struct io_mapping *map, unsigned long off)
{
return ((uint8_t *)map->iomem + off);
}
static inline void
io_mapping_unmap_local(void *va)
{
}
static inline void *
io_mapping_map_atomic_wc(struct io_mapping *map, unsigned long off)
{
return ((uint8_t *)map->iomem + off);
}
static inline void
io_mapping_unmap_atomic(void *va)
{
}
#endif

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vmalloc.h,v 1.6 2024/01/16 23:38:13 jsg Exp $ */
/* $OpenBSD: vmalloc.h,v 1.7 2024/03/20 02:42:17 jsg Exp $ */
/*
* Copyright (c) 2013, 2014, 2015 Mark Kettenis
*
@ -25,6 +25,7 @@
#include <linux/types.h> /* for pgprot_t */
void *vmap(struct vm_page **, unsigned int, unsigned long, pgprot_t);
void *vmap_pfn(unsigned long *, unsigned int, pgprot_t);
void vunmap(void *, size_t);
static inline void *

View file

@ -564,32 +564,16 @@ retry:
goto retry;
}
#ifdef __linux__
addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs +
(((resource_size_t)i - iter_io->cache.i)
<< PAGE_SHIFT));
#else
if (bus_space_map(bst, iter_io->cache.offs +
(((resource_size_t)i - iter_io->cache.i) << PAGE_SHIFT),
PAGE_SIZE, BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE,
&dmap->bsh)) {
printf("%s bus_space_map failed\n", __func__);
addr = 0;
} else {
addr = bus_space_vaddr(bst, dmap->bsh);
}
#endif
iosys_map_set_vaddr_iomem(dmap, addr);
}
static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter,
struct iosys_map *map, bus_space_tag_t bst)
{
#ifdef notyet
io_mapping_unmap_local(map->vaddr_iomem);
#else
bus_space_unmap(bst, map->bsh, PAGE_SIZE);
#endif
}
static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = {

View file

@ -151,6 +151,9 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
ttm->swap_storage = NULL;
ttm->sg = bo->sg;
ttm->caching = caching;
ttm->dmat = bo->bdev->dmat;
ttm->map = NULL;
ttm->segs = NULL;
}
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
@ -184,9 +187,11 @@ void ttm_tt_fini(struct ttm_tt *ttm)
ttm->dma_address = NULL;
ttm->orders = NULL;
bus_dmamap_destroy(ttm->dmat, ttm->map);
km_free(ttm->segs, round_page(ttm->num_pages *
sizeof(bus_dma_segment_t)), &kv_any, &kp_zero);
if (ttm->map)
bus_dmamap_destroy(ttm->dmat, ttm->map);
if (ttm->segs)
km_free(ttm->segs, round_page(ttm->num_pages *
sizeof(bus_dma_segment_t)), &kv_any, &kp_zero);
}
EXPORT_SYMBOL(ttm_tt_fini);
@ -210,8 +215,6 @@ int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
ttm->segs = km_alloc(round_page(ttm->num_pages *
sizeof(bus_dma_segment_t)), &kv_any, &kp_zero, &kd_waitok);
ttm->dmat = bo->bdev->dmat;
if (bo->bdev->pool.use_dma32 == false)
flags |= BUS_DMA_64BIT;
if (bus_dmamap_create(ttm->dmat, ttm->num_pages << PAGE_SHIFT,