sync with OpenBSD -current
This commit is contained in:
parent
5d45cd7ee8
commit
155eb8555e
5506 changed files with 1786257 additions and 1416034 deletions
|
@ -13,9 +13,9 @@ $ make headers_install INSTALL_HDR_PATH=/path/to/install
|
|||
|
||||
The last update was done at the following kernel commit :
|
||||
|
||||
commit 7f7a942c0a338c4a2a7b359bdb2b68e9896122ec
|
||||
Merge: 0a20a3ea4259 ddcb8fa6514f
|
||||
commit d9aa1da9a8cfb0387eb5703c15bd1f54421460ac
|
||||
Merge: 7c9aa0f7463e 28e671114fb0
|
||||
Author: Dave Airlie <airlied@redhat.com>
|
||||
Date: Thu Oct 27 14:44:02 2022 +1000
|
||||
Date: Mon Aug 7 13:49:24 2023 +1000
|
||||
|
||||
Merge tag 'drm-next-20221025' of git://linuxtv.org/pinchartl/media into drm-next
|
||||
Merge tag 'drm-intel-gt-next-2023-08-04' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
|
||||
|
|
|
@ -94,6 +94,9 @@ extern "C" {
|
|||
*
|
||||
* %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
|
||||
* for appending data.
|
||||
*
|
||||
* %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
|
||||
* signalling user mode queues.
|
||||
*/
|
||||
#define AMDGPU_GEM_DOMAIN_CPU 0x1
|
||||
#define AMDGPU_GEM_DOMAIN_GTT 0x2
|
||||
|
@ -101,12 +104,14 @@ extern "C" {
|
|||
#define AMDGPU_GEM_DOMAIN_GDS 0x8
|
||||
#define AMDGPU_GEM_DOMAIN_GWS 0x10
|
||||
#define AMDGPU_GEM_DOMAIN_OA 0x20
|
||||
#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
|
||||
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
|
||||
AMDGPU_GEM_DOMAIN_GTT | \
|
||||
AMDGPU_GEM_DOMAIN_VRAM | \
|
||||
AMDGPU_GEM_DOMAIN_GDS | \
|
||||
AMDGPU_GEM_DOMAIN_GWS | \
|
||||
AMDGPU_GEM_DOMAIN_OA)
|
||||
AMDGPU_GEM_DOMAIN_OA | \
|
||||
AMDGPU_GEM_DOMAIN_DOORBELL)
|
||||
|
||||
/* Flag that CPU access will be required for the case of VRAM domain */
|
||||
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
|
||||
|
@ -145,7 +150,7 @@ extern "C" {
|
|||
*/
|
||||
#define AMDGPU_GEM_CREATE_DISCARDABLE (1 << 12)
|
||||
/* Flag that BO is shared coherently between multiple devices or CPU threads.
|
||||
* May depend on GPU instructions to flush caches explicitly
|
||||
* May depend on GPU instructions to flush caches to system scope explicitly.
|
||||
*
|
||||
* This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
|
||||
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
|
||||
|
@ -158,6 +163,14 @@ extern "C" {
|
|||
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
|
||||
*/
|
||||
#define AMDGPU_GEM_CREATE_UNCACHED (1 << 14)
|
||||
/* Flag that BO should be coherent across devices when using device-level
|
||||
* atomics. May depend on GPU instructions to flush caches to device scope
|
||||
* explicitly, promoting them to system scope automatically.
|
||||
*
|
||||
* This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
|
||||
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
|
||||
*/
|
||||
#define AMDGPU_GEM_CREATE_EXT_COHERENT (1 << 15)
|
||||
|
||||
struct drm_amdgpu_gem_create_in {
|
||||
/** the requested memory size */
|
||||
|
@ -236,15 +249,17 @@ union drm_amdgpu_bo_list {
|
|||
/* unknown cause */
|
||||
#define AMDGPU_CTX_UNKNOWN_RESET 3
|
||||
|
||||
/* indicate gpu reset occured after ctx created */
|
||||
/* indicate gpu reset occurred after ctx created */
|
||||
#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
|
||||
/* indicate vram lost occured after ctx created */
|
||||
/* indicate vram lost occurred after ctx created */
|
||||
#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
|
||||
/* indicate some job from this context once cause gpu hang */
|
||||
#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
|
||||
/* indicate some errors are detected by RAS */
|
||||
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3)
|
||||
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4)
|
||||
/* indicate that the reset hasn't completed yet */
|
||||
#define AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS (1<<5)
|
||||
|
||||
/* Context priority level */
|
||||
#define AMDGPU_CTX_PRIORITY_UNSET -2048
|
||||
|
@ -579,7 +594,8 @@ struct drm_amdgpu_gem_va {
|
|||
*/
|
||||
#define AMDGPU_HW_IP_VCN_ENC 7
|
||||
#define AMDGPU_HW_IP_VCN_JPEG 8
|
||||
#define AMDGPU_HW_IP_NUM 9
|
||||
#define AMDGPU_HW_IP_VPE 9
|
||||
#define AMDGPU_HW_IP_NUM 10
|
||||
|
||||
#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
|
||||
|
||||
|
@ -592,6 +608,7 @@ struct drm_amdgpu_gem_va {
|
|||
#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
|
||||
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
|
||||
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
|
||||
#define AMDGPU_CHUNK_ID_CP_GFX_SHADOW 0x0a
|
||||
|
||||
struct drm_amdgpu_cs_chunk {
|
||||
__u32 chunk_id;
|
||||
|
@ -708,6 +725,15 @@ struct drm_amdgpu_cs_chunk_data {
|
|||
};
|
||||
};
|
||||
|
||||
#define AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW 0x1
|
||||
|
||||
struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
|
||||
__u64 shadow_va;
|
||||
__u64 csa_va;
|
||||
__u64 gds_va;
|
||||
__u64 flags;
|
||||
};
|
||||
|
||||
/*
|
||||
* Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
|
||||
*
|
||||
|
@ -780,6 +806,8 @@ struct drm_amdgpu_cs_chunk_data {
|
|||
#define AMDGPU_INFO_FW_MES 0x1a
|
||||
/* Subquery id: Query IMU firmware version */
|
||||
#define AMDGPU_INFO_FW_IMU 0x1b
|
||||
/* Subquery id: Query VPE firmware version */
|
||||
#define AMDGPU_INFO_FW_VPE 0x1c
|
||||
|
||||
/* number of bytes moved for TTM migration */
|
||||
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
|
||||
|
@ -876,6 +904,10 @@ struct drm_amdgpu_cs_chunk_data {
|
|||
#define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
|
||||
/* Subquery id: Encode */
|
||||
#define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
|
||||
/* Query the max number of IBs per gang per submission */
|
||||
#define AMDGPU_INFO_MAX_IBS 0x22
|
||||
/* query last page fault info */
|
||||
#define AMDGPU_INFO_GPUVM_FAULT 0x23
|
||||
|
||||
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
|
||||
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
|
||||
|
@ -1126,6 +1158,14 @@ struct drm_amdgpu_info_device {
|
|||
__u64 mall_size; /* AKA infinity cache */
|
||||
/* high 32 bits of the rb pipes mask */
|
||||
__u32 enabled_rb_pipes_mask_hi;
|
||||
/* shadow area size for gfx11 */
|
||||
__u32 shadow_size;
|
||||
/* shadow area base virtual alignment for gfx11 */
|
||||
__u32 shadow_alignment;
|
||||
/* context save area size for gfx11 */
|
||||
__u32 csa_size;
|
||||
/* context save area base virtual alignment for gfx11 */
|
||||
__u32 csa_alignment;
|
||||
};
|
||||
|
||||
struct drm_amdgpu_info_hw_ip {
|
||||
|
@ -1193,6 +1233,20 @@ struct drm_amdgpu_info_video_caps {
|
|||
struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT];
|
||||
};
|
||||
|
||||
#define AMDGPU_VMHUB_TYPE_MASK 0xff
|
||||
#define AMDGPU_VMHUB_TYPE_SHIFT 0
|
||||
#define AMDGPU_VMHUB_TYPE_GFX 0
|
||||
#define AMDGPU_VMHUB_TYPE_MM0 1
|
||||
#define AMDGPU_VMHUB_TYPE_MM1 2
|
||||
#define AMDGPU_VMHUB_IDX_MASK 0xff00
|
||||
#define AMDGPU_VMHUB_IDX_SHIFT 8
|
||||
|
||||
struct drm_amdgpu_info_gpuvm_fault {
|
||||
__u64 addr;
|
||||
__u32 status;
|
||||
__u32 vmhub;
|
||||
};
|
||||
|
||||
/*
|
||||
* Supported GPU families
|
||||
*/
|
||||
|
@ -1211,6 +1265,7 @@ struct drm_amdgpu_info_video_caps {
|
|||
#define AMDGPU_FAMILY_GC_11_0_1 148 /* GC 11.0.1 */
|
||||
#define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */
|
||||
#define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */
|
||||
#define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
|
|
|
@ -667,8 +667,11 @@ struct drm_gem_open {
|
|||
* Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
|
||||
* and &DRM_PRIME_CAP_EXPORT.
|
||||
*
|
||||
* PRIME buffers are exposed as dma-buf file descriptors. See
|
||||
* Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
|
||||
* Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
|
||||
* &DRM_PRIME_CAP_EXPORT are always advertised.
|
||||
*
|
||||
* PRIME buffers are exposed as dma-buf file descriptors.
|
||||
* See :ref:`prime_buffer_sharing`.
|
||||
*/
|
||||
#define DRM_CAP_PRIME 0x5
|
||||
/**
|
||||
|
@ -676,6 +679,8 @@ struct drm_gem_open {
|
|||
*
|
||||
* If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
|
||||
* buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
|
||||
*
|
||||
* Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
|
||||
*/
|
||||
#define DRM_PRIME_CAP_IMPORT 0x1
|
||||
/**
|
||||
|
@ -683,6 +688,8 @@ struct drm_gem_open {
|
|||
*
|
||||
* If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
|
||||
* buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
|
||||
*
|
||||
* Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
|
||||
*/
|
||||
#define DRM_PRIME_CAP_EXPORT 0x2
|
||||
/**
|
||||
|
@ -750,15 +757,14 @@ struct drm_gem_open {
|
|||
/**
|
||||
* DRM_CAP_SYNCOBJ
|
||||
*
|
||||
* If set to 1, the driver supports sync objects. See
|
||||
* Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
|
||||
* If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
|
||||
*/
|
||||
#define DRM_CAP_SYNCOBJ 0x13
|
||||
/**
|
||||
* DRM_CAP_SYNCOBJ_TIMELINE
|
||||
*
|
||||
* If set to 1, the driver supports timeline operations on sync objects. See
|
||||
* Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
|
||||
* :ref:`drm_sync_objects`.
|
||||
*/
|
||||
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
|
||||
|
||||
|
@ -903,6 +909,27 @@ struct drm_syncobj_timeline_wait {
|
|||
__u32 pad;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_syncobj_eventfd
|
||||
* @handle: syncobj handle.
|
||||
* @flags: Zero to wait for the point to be signalled, or
|
||||
* &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
|
||||
* available for the point.
|
||||
* @point: syncobj timeline point (set to zero for binary syncobjs).
|
||||
* @fd: Existing eventfd to sent events to.
|
||||
* @pad: Must be zero.
|
||||
*
|
||||
* Register an eventfd to be signalled by a syncobj. The eventfd counter will
|
||||
* be incremented by one.
|
||||
*/
|
||||
struct drm_syncobj_eventfd {
|
||||
__u32 handle;
|
||||
__u32 flags;
|
||||
__u64 point;
|
||||
__s32 fd;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
|
||||
struct drm_syncobj_array {
|
||||
__u64 handles;
|
||||
|
@ -966,6 +993,19 @@ extern "C" {
|
|||
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
|
||||
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
|
||||
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
|
||||
/**
|
||||
* DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
|
||||
*
|
||||
* GEM handles are not reference-counted by the kernel. User-space is
|
||||
* responsible for managing their lifetime. For example, if user-space imports
|
||||
* the same memory object twice on the same DRM file description, the same GEM
|
||||
* handle is returned by both imports, and user-space needs to ensure
|
||||
* &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
|
||||
* when a memory object is allocated, then exported and imported again on the
|
||||
* same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
|
||||
* and always returns fresh new GEM handles even if an existing GEM handle
|
||||
* already refers to the same memory object before the IOCTL is performed.
|
||||
*/
|
||||
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
|
||||
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
|
||||
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
|
||||
|
@ -1006,7 +1046,37 @@ extern "C" {
|
|||
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
|
||||
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
|
||||
*
|
||||
* User-space sets &drm_prime_handle.handle with the GEM handle to export and
|
||||
* &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
|
||||
* &drm_prime_handle.fd.
|
||||
*
|
||||
* The export can fail for any driver-specific reason, e.g. because export is
|
||||
* not supported for this specific GEM handle (but might be for others).
|
||||
*
|
||||
* Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
|
||||
*/
|
||||
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
|
||||
/**
|
||||
* DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
|
||||
*
|
||||
* User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
|
||||
* import, and gets back a GEM handle in &drm_prime_handle.handle.
|
||||
* &drm_prime_handle.flags is unused.
|
||||
*
|
||||
* If an existing GEM handle refers to the memory object backing the DMA-BUF,
|
||||
* that GEM handle is returned. Therefore user-space which needs to handle
|
||||
* arbitrary DMA-BUFs must have a user-space lookup data structure to manually
|
||||
* reference-count duplicated GEM handles. For more information see
|
||||
* &DRM_IOCTL_GEM_CLOSE.
|
||||
*
|
||||
* The import can fail for any driver-specific reason, e.g. because import is
|
||||
* only supported for DMA-BUFs allocated on this DRM device.
|
||||
*
|
||||
* Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
|
||||
*/
|
||||
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
|
||||
|
||||
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
|
||||
|
@ -1098,8 +1168,13 @@ extern "C" {
|
|||
* struct as the output.
|
||||
*
|
||||
* If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
|
||||
* will be filled with GEM buffer handles. Planes are valid until one has a
|
||||
* zero handle -- this can be used to compute the number of planes.
|
||||
* will be filled with GEM buffer handles. Fresh new GEM handles are always
|
||||
* returned, even if another GEM handle referring to the same memory object
|
||||
* already exists on the DRM file description. The caller is responsible for
|
||||
* removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
|
||||
* new handle will be returned for multiple planes in case they use the same
|
||||
* memory object. Planes are valid until one has a zero handle -- this can be
|
||||
* used to compute the number of planes.
|
||||
*
|
||||
* Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
|
||||
* until one has a zero &drm_mode_fb_cmd2.pitches.
|
||||
|
@ -1107,9 +1182,16 @@ extern "C" {
|
|||
* If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
|
||||
* in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
|
||||
* modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
|
||||
*
|
||||
* To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
|
||||
* can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
|
||||
* close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
|
||||
* double-close handles which are specified multiple times in the array.
|
||||
*/
|
||||
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
|
||||
|
||||
#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
|
||||
|
||||
/*
|
||||
* Device specific ioctls should only be in their respective headers
|
||||
* The device specific ioctl range is from 0x40 to 0x9f.
|
||||
|
@ -1121,25 +1203,50 @@ extern "C" {
|
|||
#define DRM_COMMAND_BASE 0x40
|
||||
#define DRM_COMMAND_END 0xA0
|
||||
|
||||
/*
|
||||
* Header for events written back to userspace on the drm fd. The
|
||||
* type defines the type of event, the length specifies the total
|
||||
* length of the event (including the header), and user_data is
|
||||
* typically a 64 bit value passed with the ioctl that triggered the
|
||||
* event. A read on the drm fd will always only return complete
|
||||
* events, that is, if for example the read buffer is 100 bytes, and
|
||||
* there are two 64 byte events pending, only one will be returned.
|
||||
/**
|
||||
* struct drm_event - Header for DRM events
|
||||
* @type: event type.
|
||||
* @length: total number of payload bytes (including header).
|
||||
*
|
||||
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
|
||||
* up are chipset specific.
|
||||
* This struct is a header for events written back to user-space on the DRM FD.
|
||||
* A read on the DRM FD will always only return complete events: e.g. if the
|
||||
* read buffer is 100 bytes large and there are two 64 byte events pending,
|
||||
* only one will be returned.
|
||||
*
|
||||
* Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
|
||||
* up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
|
||||
* &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
|
||||
*/
|
||||
struct drm_event {
|
||||
__u32 type;
|
||||
__u32 length;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_EVENT_VBLANK - vertical blanking event
|
||||
*
|
||||
* This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
|
||||
* &_DRM_VBLANK_EVENT flag set.
|
||||
*
|
||||
* The event payload is a struct drm_event_vblank.
|
||||
*/
|
||||
#define DRM_EVENT_VBLANK 0x01
|
||||
/**
|
||||
* DRM_EVENT_FLIP_COMPLETE - page-flip completion event
|
||||
*
|
||||
* This event is sent in response to an atomic commit or legacy page-flip with
|
||||
* the &DRM_MODE_PAGE_FLIP_EVENT flag set.
|
||||
*
|
||||
* The event payload is a struct drm_event_vblank.
|
||||
*/
|
||||
#define DRM_EVENT_FLIP_COMPLETE 0x02
|
||||
/**
|
||||
* DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
|
||||
*
|
||||
* This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
|
||||
*
|
||||
* The event payload is a struct drm_event_crtc_sequence.
|
||||
*/
|
||||
#define DRM_EVENT_CRTC_SEQUENCE 0x03
|
||||
|
||||
struct drm_event_vblank {
|
||||
|
|
|
@ -88,6 +88,18 @@ extern "C" {
|
|||
*
|
||||
* The authoritative list of format modifier codes is found in
|
||||
* `include/uapi/drm/drm_fourcc.h`
|
||||
*
|
||||
* Open Source User Waiver
|
||||
* -----------------------
|
||||
*
|
||||
* Because this is the authoritative source for pixel formats and modifiers
|
||||
* referenced by GL, Vulkan extensions and other standards and hence used both
|
||||
* by open source and closed source driver stacks, the usual requirement for an
|
||||
* upstream in-kernel or open source userspace user does not apply.
|
||||
*
|
||||
* To ensure, as much as feasible, compatibility across stacks and avoid
|
||||
* confusion with incompatible enumerations stakeholders for all relevant driver
|
||||
* stacks should approve additions.
|
||||
*/
|
||||
|
||||
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
|
||||
|
@ -645,6 +657,49 @@ extern "C" {
|
|||
*/
|
||||
#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12)
|
||||
|
||||
/*
|
||||
* Intel Color Control Surfaces (CCS) for display ver. 14 render compression.
|
||||
*
|
||||
* The main surface is tile4 and at plane index 0, the CCS is linear and
|
||||
* at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
|
||||
* main surface. In other words, 4 bits in CCS map to a main surface cache
|
||||
* line pair. The main surface pitch is required to be a multiple of four
|
||||
* tile4 widths.
|
||||
*/
|
||||
#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS fourcc_mod_code(INTEL, 13)
|
||||
|
||||
/*
|
||||
* Intel Color Control Surfaces (CCS) for display ver. 14 media compression
|
||||
*
|
||||
* The main surface is tile4 and at plane index 0, the CCS is linear and
|
||||
* at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
|
||||
* main surface. In other words, 4 bits in CCS map to a main surface cache
|
||||
* line pair. The main surface pitch is required to be a multiple of four
|
||||
* tile4 widths. For semi-planar formats like NV12, CCS planes follow the
|
||||
* Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
|
||||
* planes 2 and 3 for the respective CCS.
|
||||
*/
|
||||
#define I915_FORMAT_MOD_4_TILED_MTL_MC_CCS fourcc_mod_code(INTEL, 14)
|
||||
|
||||
/*
|
||||
* Intel Color Control Surface with Clear Color (CCS) for display ver. 14 render
|
||||
* compression.
|
||||
*
|
||||
* The main surface is tile4 and is at plane index 0 whereas CCS is linear
|
||||
* and at index 1. The clear color is stored at index 2, and the pitch should
|
||||
* be ignored. The clear color structure is 256 bits. The first 128 bits
|
||||
* represents Raw Clear Color Red, Green, Blue and Alpha color each represented
|
||||
* by 32 bits. The raw clear color is consumed by the 3d engine and generates
|
||||
* the converted clear color of size 64 bits. The first 32 bits store the Lower
|
||||
* Converted Clear Color value and the next 32 bits store the Higher Converted
|
||||
* Clear Color value when applicable. The Converted Clear Color values are
|
||||
* consumed by the DE. The last 64 bits are used to store Color Discard Enable
|
||||
* and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
|
||||
* corresponds to an area of 4x1 tiles in the main surface. The main surface
|
||||
* pitch is required to be a multiple of 4 tile widths.
|
||||
*/
|
||||
#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15)
|
||||
|
||||
/*
|
||||
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
|
||||
*
|
||||
|
|
|
@ -488,6 +488,9 @@ struct drm_mode_get_connector {
|
|||
* This is not an object ID. This is a per-type connector number. Each
|
||||
* (type, type_id) combination is unique across all connectors of a DRM
|
||||
* device.
|
||||
*
|
||||
* The (type, type_id) combination is not a stable identifier: the
|
||||
* type_id can change depending on the driver probe order.
|
||||
*/
|
||||
__u32 connector_type_id;
|
||||
|
||||
|
@ -834,6 +837,11 @@ struct drm_color_ctm {
|
|||
/*
|
||||
* Conversion matrix in S31.32 sign-magnitude
|
||||
* (not two's complement!) format.
|
||||
*
|
||||
* out matrix in
|
||||
* |R| |0 1 2| |R|
|
||||
* |G| = |3 4 5| x |G|
|
||||
* |B| |6 7 8| |B|
|
||||
*/
|
||||
__u64 matrix[9];
|
||||
};
|
||||
|
@ -878,7 +886,7 @@ struct hdr_metadata_infoframe {
|
|||
*/
|
||||
struct {
|
||||
__u16 x, y;
|
||||
} display_primaries[3];
|
||||
} display_primaries[3];
|
||||
/**
|
||||
* @white_point: White Point of Colorspace Data.
|
||||
* These are coded as unsigned 16-bit values in units of
|
||||
|
@ -889,7 +897,7 @@ struct hdr_metadata_infoframe {
|
|||
*/
|
||||
struct {
|
||||
__u16 x, y;
|
||||
} white_point;
|
||||
} white_point;
|
||||
/**
|
||||
* @max_display_mastering_luminance: Max Mastering Display Luminance.
|
||||
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
|
||||
|
|
|
@ -280,7 +280,16 @@ enum drm_i915_pmu_engine_sample {
|
|||
#define I915_PMU_ENGINE_SEMA(class, instance) \
|
||||
__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
|
||||
|
||||
#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
|
||||
/*
|
||||
* Top 4 bits of every non-engine counter are GT id.
|
||||
*/
|
||||
#define __I915_PMU_GT_SHIFT (60)
|
||||
|
||||
#define ___I915_PMU_OTHER(gt, x) \
|
||||
(((__u64)__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) | \
|
||||
((__u64)(gt) << __I915_PMU_GT_SHIFT))
|
||||
|
||||
#define __I915_PMU_OTHER(x) ___I915_PMU_OTHER(0, x)
|
||||
|
||||
#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
|
||||
#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
|
||||
|
@ -290,6 +299,12 @@ enum drm_i915_pmu_engine_sample {
|
|||
|
||||
#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
|
||||
|
||||
#define __I915_PMU_ACTUAL_FREQUENCY(gt) ___I915_PMU_OTHER(gt, 0)
|
||||
#define __I915_PMU_REQUESTED_FREQUENCY(gt) ___I915_PMU_OTHER(gt, 1)
|
||||
#define __I915_PMU_INTERRUPTS(gt) ___I915_PMU_OTHER(gt, 2)
|
||||
#define __I915_PMU_RC6_RESIDENCY(gt) ___I915_PMU_OTHER(gt, 3)
|
||||
#define __I915_PMU_SOFTWARE_GT_AWAKE_TIME(gt) ___I915_PMU_OTHER(gt, 4)
|
||||
|
||||
/* Each region is a minimum of 16k, and there are at most 255 of them.
|
||||
*/
|
||||
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
|
||||
|
@ -645,6 +660,23 @@ typedef struct drm_i915_irq_wait {
|
|||
*/
|
||||
#define I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP (1ul << 5)
|
||||
|
||||
/*
|
||||
* Query the status of HuC load.
|
||||
*
|
||||
* The query can fail in the following scenarios with the listed error codes:
|
||||
* -ENODEV if HuC is not present on this platform,
|
||||
* -EOPNOTSUPP if HuC firmware usage is disabled,
|
||||
* -ENOPKG if HuC firmware fetch failed,
|
||||
* -ENOEXEC if HuC firmware is invalid or mismatched,
|
||||
* -ENOMEM if i915 failed to prepare the FW objects for transfer to the uC,
|
||||
* -EIO if the FW transfer or the FW authentication failed.
|
||||
*
|
||||
* If the IOCTL is successful, the returned parameter will be set to one of the
|
||||
* following values:
|
||||
* * 0 if HuC firmware load is not complete,
|
||||
* * 1 if HuC firmware is loaded and fully authenticated,
|
||||
* * 2 if HuC firmware is loaded and authenticated for clear media only
|
||||
*/
|
||||
#define I915_PARAM_HUC_STATUS 42
|
||||
|
||||
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
|
||||
|
@ -755,6 +787,25 @@ typedef struct drm_i915_irq_wait {
|
|||
*/
|
||||
#define I915_PARAM_OA_TIMESTAMP_FREQUENCY 57
|
||||
|
||||
/*
|
||||
* Query the status of PXP support in i915.
|
||||
*
|
||||
* The query can fail in the following scenarios with the listed error codes:
|
||||
* -ENODEV = PXP support is not available on the GPU device or in the
|
||||
* kernel due to missing component drivers or kernel configs.
|
||||
*
|
||||
* If the IOCTL is successful, the returned parameter will be set to one of
|
||||
* the following values:
|
||||
* 1 = PXP feature is supported and is ready for use.
|
||||
* 2 = PXP feature is supported but should be ready soon (pending
|
||||
* initialization of non-i915 system dependencies).
|
||||
*
|
||||
* NOTE: When param is supported (positive return values), user space should
|
||||
* still refer to the GEM PXP context-creation UAPI header specs to be
|
||||
* aware of possible failure due to system state machine at the time.
|
||||
*/
|
||||
#define I915_PARAM_PXP_STATUS 58
|
||||
|
||||
/* Must be kept compact -- no holes and well documented */
|
||||
|
||||
/**
|
||||
|
@ -2080,6 +2131,21 @@ struct drm_i915_gem_context_param {
|
|||
*
|
||||
* -ENODEV: feature not available
|
||||
* -EPERM: trying to mark a recoverable or not bannable context as protected
|
||||
* -ENXIO: A dependency such as a component driver or firmware is not yet
|
||||
* loaded so user space may need to attempt again. Depending on the
|
||||
* device, this error may be reported if protected context creation is
|
||||
* attempted very early after kernel start because the internal timeout
|
||||
* waiting for such dependencies is not guaranteed to be larger than
|
||||
* required (numbers differ depending on system and kernel config):
|
||||
* - ADL/RPL: dependencies may take up to 3 seconds from kernel start
|
||||
* while context creation internal timeout is 250 milisecs
|
||||
* - MTL: dependencies may take up to 8 seconds from kernel start
|
||||
* while context creation internal timeout is 250 milisecs
|
||||
* NOTE: such dependencies happen once, so a subsequent call to create a
|
||||
* protected context after a prior successful call will not experience
|
||||
* such timeouts and will not return -ENXIO (unless the driver is reloaded,
|
||||
* or, depending on the device, resumes from a suspended state).
|
||||
* -EIO: The firmware did not succeed in creating the protected context.
|
||||
*/
|
||||
#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
|
||||
/* Must be kept compact -- no holes and well documented */
|
||||
|
@ -2475,7 +2541,7 @@ struct i915_context_param_engines {
|
|||
#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
|
||||
#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
|
||||
#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
|
||||
struct i915_engine_class_instance engines[0];
|
||||
struct i915_engine_class_instance engines[];
|
||||
} __attribute__((packed));
|
||||
|
||||
#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
|
||||
|
@ -2660,6 +2726,10 @@ enum drm_i915_oa_format {
|
|||
I915_OAR_FORMAT_A32u40_A4u32_B8_C8,
|
||||
I915_OA_FORMAT_A24u40_A14u32_B8_C8,
|
||||
|
||||
/* MTL OAM */
|
||||
I915_OAM_FORMAT_MPEC8u64_B8_C8,
|
||||
I915_OAM_FORMAT_MPEC8u32_B8_C8,
|
||||
|
||||
I915_OA_FORMAT_MAX /* non-ABI */
|
||||
};
|
||||
|
||||
|
@ -2742,6 +2812,25 @@ enum drm_i915_perf_property_id {
|
|||
*/
|
||||
DRM_I915_PERF_PROP_POLL_OA_PERIOD,
|
||||
|
||||
/**
|
||||
* Multiple engines may be mapped to the same OA unit. The OA unit is
|
||||
* identified by class:instance of any engine mapped to it.
|
||||
*
|
||||
* This parameter specifies the engine class and must be passed along
|
||||
* with DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE.
|
||||
*
|
||||
* This property is available in perf revision 6.
|
||||
*/
|
||||
DRM_I915_PERF_PROP_OA_ENGINE_CLASS,
|
||||
|
||||
/**
|
||||
* This parameter specifies the engine instance and must be passed along
|
||||
* with DRM_I915_PERF_PROP_OA_ENGINE_CLASS.
|
||||
*
|
||||
* This property is available in perf revision 6.
|
||||
*/
|
||||
DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE,
|
||||
|
||||
DRM_I915_PERF_PROP_MAX /* non-ABI */
|
||||
};
|
||||
|
||||
|
@ -3503,27 +3592,13 @@ struct drm_i915_gem_create_ext {
|
|||
*
|
||||
* The (page-aligned) allocated size for the object will be returned.
|
||||
*
|
||||
* DG2 64K min page size implications:
|
||||
* On platforms like DG2/ATS the kernel will always use 64K or larger
|
||||
* pages for I915_MEMORY_CLASS_DEVICE. The kernel also requires a
|
||||
* minimum of 64K GTT alignment for such objects.
|
||||
*
|
||||
* On discrete platforms, starting from DG2, we have to contend with GTT
|
||||
* page size restrictions when dealing with I915_MEMORY_CLASS_DEVICE
|
||||
* objects. Specifically the hardware only supports 64K or larger GTT
|
||||
* page sizes for such memory. The kernel will already ensure that all
|
||||
* I915_MEMORY_CLASS_DEVICE memory is allocated using 64K or larger page
|
||||
* sizes underneath.
|
||||
*
|
||||
* Note that the returned size here will always reflect any required
|
||||
* rounding up done by the kernel, i.e 4K will now become 64K on devices
|
||||
* such as DG2. The kernel will always select the largest minimum
|
||||
* page-size for the set of possible placements as the value to use when
|
||||
* rounding up the @size.
|
||||
*
|
||||
* Special DG2 GTT address alignment requirement:
|
||||
*
|
||||
* The GTT alignment will also need to be at least 2M for such objects.
|
||||
*
|
||||
* Note that due to how the hardware implements 64K GTT page support, we
|
||||
* have some further complications:
|
||||
* NOTE: Previously the ABI here required a minimum GTT alignment of 2M
|
||||
* on DG2/ATS, due to how the hardware implemented 64K GTT page support,
|
||||
* where we had the following complications:
|
||||
*
|
||||
* 1) The entire PDE (which covers a 2MB virtual address range), must
|
||||
* contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same
|
||||
|
@ -3532,12 +3607,10 @@ struct drm_i915_gem_create_ext {
|
|||
* 2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM
|
||||
* objects.
|
||||
*
|
||||
* To keep things simple for userland, we mandate that any GTT mappings
|
||||
* must be aligned to and rounded up to 2MB. The kernel will internally
|
||||
* pad them out to the next 2MB boundary. As this only wastes virtual
|
||||
* address space and avoids userland having to copy any needlessly
|
||||
* complicated PDE sharing scheme (coloring) and only affects DG2, this
|
||||
* is deemed to be a good compromise.
|
||||
* However on actual production HW this was completely changed to now
|
||||
* allow setting a TLB hint at the PTE level (see PS64), which is a lot
|
||||
* more flexible than the above. With this the 2M restriction was
|
||||
* dropped where we now only require 64K.
|
||||
*/
|
||||
__u64 size;
|
||||
|
||||
|
@ -3607,9 +3680,13 @@ struct drm_i915_gem_create_ext {
|
|||
*
|
||||
* For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
|
||||
* struct drm_i915_gem_create_ext_protected_content.
|
||||
*
|
||||
* For I915_GEM_CREATE_EXT_SET_PAT usage see
|
||||
* struct drm_i915_gem_create_ext_set_pat.
|
||||
*/
|
||||
#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
|
||||
#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
|
||||
#define I915_GEM_CREATE_EXT_SET_PAT 2
|
||||
__u64 extensions;
|
||||
};
|
||||
|
||||
|
@ -3724,6 +3801,43 @@ struct drm_i915_gem_create_ext_protected_content {
|
|||
__u32 flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_i915_gem_create_ext_set_pat - The
|
||||
* I915_GEM_CREATE_EXT_SET_PAT extension.
|
||||
*
|
||||
* If this extension is provided, the specified caching policy (PAT index) is
|
||||
* applied to the buffer object.
|
||||
*
|
||||
* Below is an example on how to create an object with specific caching policy:
|
||||
*
|
||||
* .. code-block:: C
|
||||
*
|
||||
* struct drm_i915_gem_create_ext_set_pat set_pat_ext = {
|
||||
* .base = { .name = I915_GEM_CREATE_EXT_SET_PAT },
|
||||
* .pat_index = 0,
|
||||
* };
|
||||
* struct drm_i915_gem_create_ext create_ext = {
|
||||
* .size = PAGE_SIZE,
|
||||
* .extensions = (uintptr_t)&set_pat_ext,
|
||||
* };
|
||||
*
|
||||
* int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
|
||||
* if (err) ...
|
||||
*/
|
||||
struct drm_i915_gem_create_ext_set_pat {
|
||||
/** @base: Extension link. See struct i915_user_extension. */
|
||||
struct i915_user_extension base;
|
||||
/**
|
||||
* @pat_index: PAT index to be set
|
||||
* PAT index is a bit field in Page Table Entry to control caching
|
||||
* behaviors for GPU accesses. The definition of PAT index is
|
||||
* platform dependent and can be found in hardware specifications,
|
||||
*/
|
||||
__u32 pat_index;
|
||||
/** @rsvd: reserved for future use */
|
||||
__u32 rsvd;
|
||||
};
|
||||
|
||||
/* ID of the protected content session managed by i915 when PXP is active */
|
||||
#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
|
||||
|
||||
|
|
|
@ -138,6 +138,7 @@ struct drm_msm_gem_new {
|
|||
#define MSM_INFO_SET_NAME 0x02 /* set the debug name (by pointer) */
|
||||
#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */
|
||||
#define MSM_INFO_SET_IOVA 0x04 /* set the iova, passed by value */
|
||||
#define MSM_INFO_GET_FLAGS 0x05 /* get the MSM_BO_x flags */
|
||||
|
||||
struct drm_msm_gem_info {
|
||||
__u32 handle; /* in */
|
||||
|
@ -150,8 +151,13 @@ struct drm_msm_gem_info {
|
|||
#define MSM_PREP_READ 0x01
|
||||
#define MSM_PREP_WRITE 0x02
|
||||
#define MSM_PREP_NOSYNC 0x04
|
||||
#define MSM_PREP_BOOST 0x08
|
||||
|
||||
#define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC)
|
||||
#define MSM_PREP_FLAGS (MSM_PREP_READ | \
|
||||
MSM_PREP_WRITE | \
|
||||
MSM_PREP_NOSYNC | \
|
||||
MSM_PREP_BOOST | \
|
||||
0)
|
||||
|
||||
struct drm_msm_gem_cpu_prep {
|
||||
__u32 handle; /* in */
|
||||
|
@ -225,10 +231,12 @@ struct drm_msm_gem_submit_cmd {
|
|||
#define MSM_SUBMIT_BO_READ 0x0001
|
||||
#define MSM_SUBMIT_BO_WRITE 0x0002
|
||||
#define MSM_SUBMIT_BO_DUMP 0x0004
|
||||
#define MSM_SUBMIT_BO_NO_IMPLICIT 0x0008
|
||||
|
||||
#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | \
|
||||
MSM_SUBMIT_BO_WRITE | \
|
||||
MSM_SUBMIT_BO_DUMP)
|
||||
MSM_SUBMIT_BO_DUMP | \
|
||||
MSM_SUBMIT_BO_NO_IMPLICIT)
|
||||
|
||||
struct drm_msm_gem_submit_bo {
|
||||
__u32 flags; /* in, mask of MSM_SUBMIT_BO_x */
|
||||
|
@ -287,6 +295,11 @@ struct drm_msm_gem_submit {
|
|||
|
||||
};
|
||||
|
||||
#define MSM_WAIT_FENCE_BOOST 0x00000001
|
||||
#define MSM_WAIT_FENCE_FLAGS ( \
|
||||
MSM_WAIT_FENCE_BOOST | \
|
||||
0)
|
||||
|
||||
/* The normal way to synchronize with the GPU is just to CPU_PREP on
|
||||
* a buffer if you need to access it from the CPU (other cmdstream
|
||||
* submission from same or other contexts, PAGE_FLIP ioctl, etc, all
|
||||
|
@ -296,7 +309,7 @@ struct drm_msm_gem_submit {
|
|||
*/
|
||||
struct drm_msm_wait_fence {
|
||||
__u32 fence; /* in */
|
||||
__u32 pad;
|
||||
__u32 flags; /* in, bitmask of MSM_WAIT_FENCE_x */
|
||||
struct drm_msm_timespec timeout; /* in */
|
||||
__u32 queueid; /* in, submitqueue id */
|
||||
};
|
||||
|
|
479
lib/mesa/include/drm-uapi/nouveau_drm.h
Normal file
479
lib/mesa/include/drm-uapi/nouveau_drm.h
Normal file
|
@ -0,0 +1,479 @@
|
|||
/*
|
||||
* Copyright 2005 Stephane Marchesin.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NOUVEAU_DRM_H__
|
||||
#define __NOUVEAU_DRM_H__
|
||||
|
||||
#define DRM_NOUVEAU_EVENT_NVIF 0x80000000
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define NOUVEAU_GETPARAM_PCI_VENDOR 3
|
||||
#define NOUVEAU_GETPARAM_PCI_DEVICE 4
|
||||
#define NOUVEAU_GETPARAM_BUS_TYPE 5
|
||||
#define NOUVEAU_GETPARAM_FB_SIZE 8
|
||||
#define NOUVEAU_GETPARAM_AGP_SIZE 9
|
||||
#define NOUVEAU_GETPARAM_CHIPSET_ID 11
|
||||
#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
|
||||
#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
|
||||
#define NOUVEAU_GETPARAM_PTIMER_TIME 14
|
||||
#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
|
||||
#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
|
||||
|
||||
/**
|
||||
* @NOUVEAU_GETPARAM_EXEC_PUSH_MAX
|
||||
*
|
||||
* Query the maximum amount of IBs that can be pushed through a single
|
||||
* &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
|
||||
* ioctl().
|
||||
*/
|
||||
#define NOUVEAU_GETPARAM_EXEC_PUSH_MAX 17
|
||||
|
||||
struct drm_nouveau_getparam {
|
||||
__u64 param;
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
struct drm_nouveau_channel_alloc {
|
||||
__u32 fb_ctxdma_handle;
|
||||
__u32 tt_ctxdma_handle;
|
||||
|
||||
__s32 channel;
|
||||
__u32 pushbuf_domains;
|
||||
|
||||
/* Notifier memory */
|
||||
__u32 notifier_handle;
|
||||
|
||||
/* DRM-enforced subchannel assignments */
|
||||
struct {
|
||||
__u32 handle;
|
||||
__u32 grclass;
|
||||
} subchan[8];
|
||||
__u32 nr_subchan;
|
||||
};
|
||||
|
||||
struct drm_nouveau_channel_free {
|
||||
__s32 channel;
|
||||
};
|
||||
|
||||
#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
|
||||
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
|
||||
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
|
||||
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
|
||||
#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
|
||||
/* The BO will never be shared via import or export. */
|
||||
#define NOUVEAU_GEM_DOMAIN_NO_SHARE (1 << 5)
|
||||
|
||||
#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
|
||||
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
|
||||
#define NOUVEAU_GEM_TILE_16BPP 0x00000001
|
||||
#define NOUVEAU_GEM_TILE_32BPP 0x00000002
|
||||
#define NOUVEAU_GEM_TILE_ZETA 0x00000004
|
||||
#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008
|
||||
|
||||
struct drm_nouveau_gem_info {
|
||||
__u32 handle;
|
||||
__u32 domain;
|
||||
__u64 size;
|
||||
__u64 offset;
|
||||
__u64 map_handle;
|
||||
__u32 tile_mode;
|
||||
__u32 tile_flags;
|
||||
};
|
||||
|
||||
struct drm_nouveau_gem_new {
|
||||
struct drm_nouveau_gem_info info;
|
||||
__u32 channel_hint;
|
||||
__u32 align;
|
||||
};
|
||||
|
||||
#define NOUVEAU_GEM_MAX_BUFFERS 1024
|
||||
struct drm_nouveau_gem_pushbuf_bo_presumed {
|
||||
__u32 valid;
|
||||
__u32 domain;
|
||||
__u64 offset;
|
||||
};
|
||||
|
||||
struct drm_nouveau_gem_pushbuf_bo {
|
||||
__u64 user_priv;
|
||||
__u32 handle;
|
||||
__u32 read_domains;
|
||||
__u32 write_domains;
|
||||
__u32 valid_domains;
|
||||
struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
|
||||
};
|
||||
|
||||
#define NOUVEAU_GEM_RELOC_LOW (1 << 0)
|
||||
#define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
|
||||
#define NOUVEAU_GEM_RELOC_OR (1 << 2)
|
||||
#define NOUVEAU_GEM_MAX_RELOCS 1024
|
||||
struct drm_nouveau_gem_pushbuf_reloc {
|
||||
__u32 reloc_bo_index;
|
||||
__u32 reloc_bo_offset;
|
||||
__u32 bo_index;
|
||||
__u32 flags;
|
||||
__u32 data;
|
||||
__u32 vor;
|
||||
__u32 tor;
|
||||
};
|
||||
|
||||
#define NOUVEAU_GEM_MAX_PUSH 512
|
||||
struct drm_nouveau_gem_pushbuf_push {
|
||||
__u32 bo_index;
|
||||
__u32 pad;
|
||||
__u64 offset;
|
||||
__u64 length;
|
||||
#define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23)
|
||||
};
|
||||
|
||||
struct drm_nouveau_gem_pushbuf {
|
||||
__u32 channel;
|
||||
__u32 nr_buffers;
|
||||
__u64 buffers;
|
||||
__u32 nr_relocs;
|
||||
__u32 nr_push;
|
||||
__u64 relocs;
|
||||
__u64 push;
|
||||
__u32 suffix0;
|
||||
__u32 suffix1;
|
||||
#define NOUVEAU_GEM_PUSHBUF_SYNC (1ULL << 0)
|
||||
__u64 vram_available;
|
||||
__u64 gart_available;
|
||||
};
|
||||
|
||||
#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
|
||||
#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
|
||||
struct drm_nouveau_gem_cpu_prep {
|
||||
__u32 handle;
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
struct drm_nouveau_gem_cpu_fini {
|
||||
__u32 handle;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_nouveau_sync - sync object
|
||||
*
|
||||
* This structure serves as synchronization mechanism for (potentially)
|
||||
* asynchronous operations such as EXEC or VM_BIND.
|
||||
*/
|
||||
struct drm_nouveau_sync {
|
||||
/**
|
||||
* @flags: the flags for a sync object
|
||||
*
|
||||
* The first 8 bits are used to determine the type of the sync object.
|
||||
*/
|
||||
__u32 flags;
|
||||
#define DRM_NOUVEAU_SYNC_SYNCOBJ 0x0
|
||||
#define DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ 0x1
|
||||
#define DRM_NOUVEAU_SYNC_TYPE_MASK 0xf
|
||||
/**
|
||||
* @handle: the handle of the sync object
|
||||
*/
|
||||
__u32 handle;
|
||||
/**
|
||||
* @timeline_value:
|
||||
*
|
||||
* The timeline point of the sync object in case the syncobj is of
|
||||
* type DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ.
|
||||
*/
|
||||
__u64 timeline_value;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_nouveau_vm_init - GPU VA space init structure
|
||||
*
|
||||
* Used to initialize the GPU's VA space for a user client, telling the kernel
|
||||
* which portion of the VA space is managed by the UMD and kernel respectively.
|
||||
*
|
||||
* For the UMD to use the VM_BIND uAPI, this must be called before any BOs or
|
||||
* channels are created; if called afterwards DRM_IOCTL_NOUVEAU_VM_INIT fails
|
||||
* with -ENOSYS.
|
||||
*/
|
||||
struct drm_nouveau_vm_init {
|
||||
/**
|
||||
* @kernel_managed_addr: start address of the kernel managed VA space
|
||||
* region
|
||||
*/
|
||||
__u64 kernel_managed_addr;
|
||||
/**
|
||||
* @kernel_managed_size: size of the kernel managed VA space region in
|
||||
* bytes
|
||||
*/
|
||||
__u64 kernel_managed_size;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_nouveau_vm_bind_op - VM_BIND operation
|
||||
*
|
||||
* This structure represents a single VM_BIND operation. UMDs should pass
|
||||
* an array of this structure via struct drm_nouveau_vm_bind's &op_ptr field.
|
||||
*/
|
||||
struct drm_nouveau_vm_bind_op {
|
||||
/**
|
||||
* @op: the operation type
|
||||
*/
|
||||
__u32 op;
|
||||
/**
|
||||
* @DRM_NOUVEAU_VM_BIND_OP_MAP:
|
||||
*
|
||||
* Map a GEM object to the GPU's VA space. Optionally, the
|
||||
* &DRM_NOUVEAU_VM_BIND_SPARSE flag can be passed to instruct the kernel to
|
||||
* create sparse mappings for the given range.
|
||||
*/
|
||||
#define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0
|
||||
/**
|
||||
* @DRM_NOUVEAU_VM_BIND_OP_UNMAP:
|
||||
*
|
||||
* Unmap an existing mapping in the GPU's VA space. If the region the mapping
|
||||
* is located in is a sparse region, new sparse mappings are created where the
|
||||
* unmapped (memory backed) mapping was mapped previously. To remove a sparse
|
||||
* region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
|
||||
*/
|
||||
#define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1
|
||||
/**
|
||||
* @flags: the flags for a &drm_nouveau_vm_bind_op
|
||||
*/
|
||||
__u32 flags;
|
||||
/**
|
||||
* @DRM_NOUVEAU_VM_BIND_SPARSE:
|
||||
*
|
||||
* Indicates that an allocated VA space region should be sparse.
|
||||
*/
|
||||
#define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8)
|
||||
/**
|
||||
* @handle: the handle of the DRM GEM object to map
|
||||
*/
|
||||
__u32 handle;
|
||||
/**
|
||||
* @pad: 32 bit padding, should be 0
|
||||
*/
|
||||
__u32 pad;
|
||||
/**
|
||||
* @addr:
|
||||
*
|
||||
* the address the VA space region or (memory backed) mapping should be mapped to
|
||||
*/
|
||||
__u64 addr;
|
||||
/**
|
||||
* @bo_offset: the offset within the BO backing the mapping
|
||||
*/
|
||||
__u64 bo_offset;
|
||||
/**
|
||||
* @range: the size of the requested mapping in bytes
|
||||
*/
|
||||
__u64 range;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_nouveau_vm_bind - structure for DRM_IOCTL_NOUVEAU_VM_BIND
|
||||
*/
|
||||
struct drm_nouveau_vm_bind {
|
||||
/**
|
||||
* @op_count: the number of &drm_nouveau_vm_bind_op
|
||||
*/
|
||||
__u32 op_count;
|
||||
/**
|
||||
* @flags: the flags for a &drm_nouveau_vm_bind ioctl
|
||||
*/
|
||||
__u32 flags;
|
||||
/**
|
||||
* @DRM_NOUVEAU_VM_BIND_RUN_ASYNC:
|
||||
*
|
||||
* Indicates that the given VM_BIND operation should be executed asynchronously
|
||||
* by the kernel.
|
||||
*
|
||||
* If this flag is not supplied the kernel executes the associated operations
|
||||
* synchronously and doesn't accept any &drm_nouveau_sync objects.
|
||||
*/
|
||||
#define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1
|
||||
/**
|
||||
* @wait_count: the number of wait &drm_nouveau_syncs
|
||||
*/
|
||||
__u32 wait_count;
|
||||
/**
|
||||
* @sig_count: the number of &drm_nouveau_syncs to signal when finished
|
||||
*/
|
||||
__u32 sig_count;
|
||||
/**
|
||||
* @wait_ptr: pointer to &drm_nouveau_syncs to wait for
|
||||
*/
|
||||
__u64 wait_ptr;
|
||||
/**
|
||||
* @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
|
||||
*/
|
||||
__u64 sig_ptr;
|
||||
/**
|
||||
* @op_ptr: pointer to the &drm_nouveau_vm_bind_ops to execute
|
||||
*/
|
||||
__u64 op_ptr;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_nouveau_exec_push - EXEC push operation
|
||||
*
|
||||
* This structure represents a single EXEC push operation. UMDs should pass an
|
||||
* array of this structure via struct drm_nouveau_exec's &push_ptr field.
|
||||
*/
|
||||
struct drm_nouveau_exec_push {
|
||||
/**
|
||||
* @va: the virtual address of the push buffer mapping
|
||||
*/
|
||||
__u64 va;
|
||||
/**
|
||||
* @va_len: the length of the push buffer mapping
|
||||
*/
|
||||
__u32 va_len;
|
||||
/**
|
||||
* @flags: the flags for this push buffer mapping
|
||||
*/
|
||||
__u32 flags;
|
||||
#define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_nouveau_exec - structure for DRM_IOCTL_NOUVEAU_EXEC
|
||||
*/
|
||||
struct drm_nouveau_exec {
|
||||
/**
|
||||
* @channel: the channel to execute the push buffer in
|
||||
*/
|
||||
__u32 channel;
|
||||
/**
|
||||
* @push_count: the number of &drm_nouveau_exec_push ops
|
||||
*/
|
||||
__u32 push_count;
|
||||
/**
|
||||
* @wait_count: the number of wait &drm_nouveau_syncs
|
||||
*/
|
||||
__u32 wait_count;
|
||||
/**
|
||||
* @sig_count: the number of &drm_nouveau_syncs to signal when finished
|
||||
*/
|
||||
__u32 sig_count;
|
||||
/**
|
||||
* @wait_ptr: pointer to &drm_nouveau_syncs to wait for
|
||||
*/
|
||||
__u64 wait_ptr;
|
||||
/**
|
||||
* @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
|
||||
*/
|
||||
__u64 sig_ptr;
|
||||
/**
|
||||
* @push_ptr: pointer to &drm_nouveau_exec_push ops
|
||||
*/
|
||||
__u64 push_ptr;
|
||||
};
|
||||
|
||||
#define DRM_NOUVEAU_GETPARAM 0x00
|
||||
#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
|
||||
#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
|
||||
#define DRM_NOUVEAU_CHANNEL_FREE 0x03
|
||||
#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
|
||||
#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
|
||||
#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
|
||||
#define DRM_NOUVEAU_NVIF 0x07
|
||||
#define DRM_NOUVEAU_SVM_INIT 0x08
|
||||
#define DRM_NOUVEAU_SVM_BIND 0x09
|
||||
#define DRM_NOUVEAU_VM_INIT 0x10
|
||||
#define DRM_NOUVEAU_VM_BIND 0x11
|
||||
#define DRM_NOUVEAU_EXEC 0x12
|
||||
#define DRM_NOUVEAU_GEM_NEW 0x40
|
||||
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
|
||||
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
|
||||
#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
|
||||
#define DRM_NOUVEAU_GEM_INFO 0x44
|
||||
|
||||
struct drm_nouveau_svm_init {
|
||||
__u64 unmanaged_addr;
|
||||
__u64 unmanaged_size;
|
||||
};
|
||||
|
||||
struct drm_nouveau_svm_bind {
|
||||
__u64 header;
|
||||
__u64 va_start;
|
||||
__u64 va_end;
|
||||
__u64 npages;
|
||||
__u64 stride;
|
||||
__u64 result;
|
||||
__u64 reserved0;
|
||||
__u64 reserved1;
|
||||
};
|
||||
|
||||
#define NOUVEAU_SVM_BIND_COMMAND_SHIFT 0
|
||||
#define NOUVEAU_SVM_BIND_COMMAND_BITS 8
|
||||
#define NOUVEAU_SVM_BIND_COMMAND_MASK ((1 << 8) - 1)
|
||||
#define NOUVEAU_SVM_BIND_PRIORITY_SHIFT 8
|
||||
#define NOUVEAU_SVM_BIND_PRIORITY_BITS 8
|
||||
#define NOUVEAU_SVM_BIND_PRIORITY_MASK ((1 << 8) - 1)
|
||||
#define NOUVEAU_SVM_BIND_TARGET_SHIFT 16
|
||||
#define NOUVEAU_SVM_BIND_TARGET_BITS 32
|
||||
#define NOUVEAU_SVM_BIND_TARGET_MASK 0xffffffff
|
||||
|
||||
/*
|
||||
* Below is use to validate ioctl argument, userspace can also use it to make
|
||||
* sure that no bit are set beyond known fields for a given kernel version.
|
||||
*/
|
||||
#define NOUVEAU_SVM_BIND_VALID_BITS 48
|
||||
#define NOUVEAU_SVM_BIND_VALID_MASK ((1ULL << NOUVEAU_SVM_BIND_VALID_BITS) - 1)
|
||||
|
||||
|
||||
/*
|
||||
* NOUVEAU_BIND_COMMAND__MIGRATE: synchronous migrate to target memory.
|
||||
* result: number of page successfuly migrate to the target memory.
|
||||
*/
|
||||
#define NOUVEAU_SVM_BIND_COMMAND__MIGRATE 0
|
||||
|
||||
/*
|
||||
* NOUVEAU_SVM_BIND_HEADER_TARGET__GPU_VRAM: target the GPU VRAM memory.
|
||||
*/
|
||||
#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
|
||||
|
||||
|
||||
#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
|
||||
#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
|
||||
#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
|
||||
|
||||
#define DRM_IOCTL_NOUVEAU_SVM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
|
||||
#define DRM_IOCTL_NOUVEAU_SVM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
|
||||
|
||||
#define DRM_IOCTL_NOUVEAU_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
|
||||
#define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)
|
||||
#define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep)
|
||||
#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
|
||||
#define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
|
||||
|
||||
#define DRM_IOCTL_NOUVEAU_VM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
|
||||
#define DRM_IOCTL_NOUVEAU_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
|
||||
#define DRM_IOCTL_NOUVEAU_EXEC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __NOUVEAU_DRM_H__ */
|
|
@ -319,6 +319,11 @@ struct drm_v3d_submit_tfu {
|
|||
|
||||
/* Pointer to an array of ioctl extensions*/
|
||||
__u64 extensions;
|
||||
|
||||
struct {
|
||||
__u32 ioc;
|
||||
__u32 pad;
|
||||
} v71;
|
||||
};
|
||||
|
||||
/* Submits a compute shader for dispatch. This job will block on any
|
||||
|
|
|
@ -64,6 +64,17 @@ struct drm_virtgpu_map {
|
|||
__u32 pad;
|
||||
};
|
||||
|
||||
#define VIRTGPU_EXECBUF_SYNCOBJ_RESET 0x01
|
||||
#define VIRTGPU_EXECBUF_SYNCOBJ_FLAGS ( \
|
||||
VIRTGPU_EXECBUF_SYNCOBJ_RESET | \
|
||||
0)
|
||||
struct drm_virtgpu_execbuffer_syncobj {
|
||||
__u32 handle;
|
||||
__u32 flags;
|
||||
__u64 point;
|
||||
};
|
||||
|
||||
/* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
|
||||
struct drm_virtgpu_execbuffer {
|
||||
__u32 flags;
|
||||
__u32 size;
|
||||
|
@ -72,7 +83,11 @@ struct drm_virtgpu_execbuffer {
|
|||
__u32 num_bo_handles;
|
||||
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
|
||||
__u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
|
||||
__u32 pad;
|
||||
__u32 syncobj_stride; /* size of @drm_virtgpu_execbuffer_syncobj */
|
||||
__u32 num_in_syncobjs;
|
||||
__u32 num_out_syncobjs;
|
||||
__u64 in_syncobjs;
|
||||
__u64 out_syncobjs;
|
||||
};
|
||||
|
||||
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
|
||||
|
|
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue