sync with OpenBSD -current

This commit is contained in:
purplerain 2024-04-30 02:20:47 +00:00
parent 6fc9e02a30
commit 7768d1f254
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
35 changed files with 335 additions and 351 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vmm_machdep.c,v 1.24 2024/04/13 21:57:22 dv Exp $ */
/* $OpenBSD: vmm_machdep.c,v 1.25 2024/04/29 14:47:05 dv Exp $ */
/*
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
*
@ -3691,18 +3691,14 @@ vm_run(struct vm_run_params *vrp)
}
/*
* We may be returning from userland helping us from the last exit.
* If so (vrp_continue == 1), copy in the exit data from vmd. The
* exit data will be consumed before the next entry (this typically
* comprises VCPU register changes as the result of vmd(8)'s actions).
* We may be returning from userland helping us from the last
* exit. Copy in the exit data from vmd. The exit data will be
* consumed before the next entry (this typically comprises
* VCPU register changes as the result of vmd(8)'s actions).
*/
if (vrp->vrp_continue) {
if (copyin(vrp->vrp_exit, &vcpu->vc_exit,
sizeof(struct vm_exit)) == EFAULT) {
ret = EFAULT;
goto out_unlock;
}
}
ret = copyin(vrp->vrp_exit, &vcpu->vc_exit, sizeof(struct vm_exit));
if (ret)
goto out_unlock;
vcpu->vc_inject.vie_type = vrp->vrp_inject.vie_type;
vcpu->vc_inject.vie_vector = vrp->vrp_inject.vie_vector;
@ -4001,67 +3997,28 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *vrp)
else
vcpu->vc_intr = 0;
if (vrp->vrp_continue) {
switch (vcpu->vc_gueststate.vg_exit_reason) {
case VMX_EXIT_IO:
if (vcpu->vc_exit.vei.vei_dir == VEI_DIR_IN)
vcpu->vc_gueststate.vg_rax =
vcpu->vc_exit.vei.vei_data;
vcpu->vc_gueststate.vg_rip =
vcpu->vc_exit.vrs.vrs_gprs[VCPU_REGS_RIP];
if (vmwrite(VMCS_GUEST_IA32_RIP,
vcpu->vc_gueststate.vg_rip)) {
printf("%s: failed to update rip\n", __func__);
return (EINVAL);
}
break;
case VMX_EXIT_EPT_VIOLATION:
ret = vcpu_writeregs_vmx(vcpu, VM_RWREGS_GPRS, 0,
&vcpu->vc_exit.vrs);
if (ret) {
printf("%s: vm %d vcpu %d failed to update "
"registers\n", __func__,
vcpu->vc_parent->vm_id, vcpu->vc_id);
return (EINVAL);
}
break;
case VM_EXIT_NONE:
case VMX_EXIT_HLT:
case VMX_EXIT_INT_WINDOW:
case VMX_EXIT_EXTINT:
case VMX_EXIT_CPUID:
case VMX_EXIT_XSETBV:
break;
#ifdef VMM_DEBUG
case VMX_EXIT_TRIPLE_FAULT:
DPRINTF("%s: vm %d vcpu %d triple fault\n",
__func__, vcpu->vc_parent->vm_id,
vcpu->vc_id);
vmx_vcpu_dump_regs(vcpu);
dump_vcpu(vcpu);
vmx_dump_vmcs(vcpu);
break;
case VMX_EXIT_ENTRY_FAILED_GUEST_STATE:
DPRINTF("%s: vm %d vcpu %d failed entry "
"due to invalid guest state\n",
__func__, vcpu->vc_parent->vm_id,
vcpu->vc_id);
vmx_vcpu_dump_regs(vcpu);
dump_vcpu(vcpu);
switch (vcpu->vc_gueststate.vg_exit_reason) {
case VMX_EXIT_IO:
if (vcpu->vc_exit.vei.vei_dir == VEI_DIR_IN)
vcpu->vc_gueststate.vg_rax = vcpu->vc_exit.vei.vei_data;
vcpu->vc_gueststate.vg_rip =
vcpu->vc_exit.vrs.vrs_gprs[VCPU_REGS_RIP];
if (vmwrite(VMCS_GUEST_IA32_RIP, vcpu->vc_gueststate.vg_rip)) {
printf("%s: failed to update rip\n", __func__);
return (EINVAL);
default:
DPRINTF("%s: unimplemented exit type %d (%s)\n",
__func__,
vcpu->vc_gueststate.vg_exit_reason,
vmx_exit_reason_decode(
vcpu->vc_gueststate.vg_exit_reason));
vmx_vcpu_dump_regs(vcpu);
dump_vcpu(vcpu);
break;
#endif /* VMM_DEBUG */
}
memset(&vcpu->vc_exit, 0, sizeof(vcpu->vc_exit));
break;
case VMX_EXIT_EPT_VIOLATION:
ret = vcpu_writeregs_vmx(vcpu, VM_RWREGS_GPRS, 0,
&vcpu->vc_exit.vrs);
if (ret) {
printf("%s: vm %d vcpu %d failed to update registers\n",
__func__, vcpu->vc_parent->vm_id, vcpu->vc_id);
return (EINVAL);
}
break;
}
memset(&vcpu->vc_exit, 0, sizeof(vcpu->vc_exit));
/* Host CR3 */
cr3 = rcr3();
@ -6519,31 +6476,29 @@ vcpu_run_svm(struct vcpu *vcpu, struct vm_run_params *vrp)
* needs to be fixed up depends on what vmd populated in the
* exit data structure.
*/
if (vrp->vrp_continue) {
switch (vcpu->vc_gueststate.vg_exit_reason) {
case SVM_VMEXIT_IOIO:
if (vcpu->vc_exit.vei.vei_dir == VEI_DIR_IN) {
vcpu->vc_gueststate.vg_rax =
vcpu->vc_exit.vei.vei_data;
vmcb->v_rax = vcpu->vc_gueststate.vg_rax;
}
vcpu->vc_gueststate.vg_rip =
vcpu->vc_exit.vrs.vrs_gprs[VCPU_REGS_RIP];
vmcb->v_rip = vcpu->vc_gueststate.vg_rip;
break;
case SVM_VMEXIT_NPF:
ret = vcpu_writeregs_svm(vcpu, VM_RWREGS_GPRS,
&vcpu->vc_exit.vrs);
if (ret) {
printf("%s: vm %d vcpu %d failed to update "
"registers\n", __func__,
vcpu->vc_parent->vm_id, vcpu->vc_id);
return (EINVAL);
}
break;
switch (vcpu->vc_gueststate.vg_exit_reason) {
case SVM_VMEXIT_IOIO:
if (vcpu->vc_exit.vei.vei_dir == VEI_DIR_IN) {
vcpu->vc_gueststate.vg_rax =
vcpu->vc_exit.vei.vei_data;
vmcb->v_rax = vcpu->vc_gueststate.vg_rax;
}
memset(&vcpu->vc_exit, 0, sizeof(vcpu->vc_exit));
vcpu->vc_gueststate.vg_rip =
vcpu->vc_exit.vrs.vrs_gprs[VCPU_REGS_RIP];
vmcb->v_rip = vcpu->vc_gueststate.vg_rip;
break;
case SVM_VMEXIT_NPF:
ret = vcpu_writeregs_svm(vcpu, VM_RWREGS_GPRS,
&vcpu->vc_exit.vrs);
if (ret) {
printf("%s: vm %d vcpu %d failed to update "
"registers\n", __func__,
vcpu->vc_parent->vm_id, vcpu->vc_id);
return (EINVAL);
}
break;
}
memset(&vcpu->vc_exit, 0, sizeof(vcpu->vc_exit));
while (ret == 0) {
vmm_update_pvclock(vcpu);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vmmvar.h,v 1.100 2024/04/09 21:55:16 dv Exp $ */
/* $OpenBSD: vmmvar.h,v 1.101 2024/04/29 14:47:05 dv Exp $ */
/*
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
*
@ -478,7 +478,6 @@ struct vm_run_params {
/* Input parameters to VMM_IOC_RUN */
uint32_t vrp_vm_id;
uint32_t vrp_vcpu_id;
uint8_t vrp_continue; /* Continuing from an exit */
struct vcpu_inject_event vrp_inject;
uint8_t vrp_intr_pending; /* Additional intrs pending? */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: fault.c,v 1.47 2023/01/05 20:35:44 kettenis Exp $ */
/* $OpenBSD: fault.c,v 1.48 2024/04/29 12:33:17 jsg Exp $ */
/* $NetBSD: fault.c,v 1.46 2004/01/21 15:39:21 skrll Exp $ */
/*
@ -96,10 +96,6 @@
#include <arm/machdep.h>
#include <arm/vfp.h>
#ifdef DEBUG
int last_fault_code; /* For the benefit of pmap_fault_fixup() */
#endif
struct sigdata {
int signo;
int code;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: machdep.h,v 1.5 2016/09/24 13:43:25 kettenis Exp $ */
/* $OpenBSD: machdep.h,v 1.6 2024/04/29 12:24:46 jsg Exp $ */
/* $NetBSD: machdep.h,v 1.7 2002/02/21 02:52:21 thorpej Exp $ */
#ifndef _ARM_MACHDEP_H_
@ -6,7 +6,6 @@
/* misc prototypes used by the many arm machdeps */
void halt (void);
void parse_mi_bootargs (char *);
void data_abort_handler (trapframe_t *);
void prefetch_abort_handler (trapframe_t *);
void undefinedinstruction_bounce (trapframe_t *);
@ -18,10 +17,4 @@ void dumpsys (void);
*/
u_int initarm (void *, void *, void *, paddr_t);
/* from arm/arm/intr.c */
void dosoftints (void);
void set_spl_masks (void);
#ifdef DIAGNOSTIC
void dump_spl_masks (void);
#endif
#endif

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pmap.h,v 1.55 2023/12/11 22:12:53 kettenis Exp $ */
/* $OpenBSD: pmap.h,v 1.56 2024/04/29 12:24:46 jsg Exp $ */
/* $NetBSD: pmap.h,v 1.76 2003/09/06 09:10:46 rearnsha Exp $ */
/*
@ -153,18 +153,6 @@ union pmap_cache_state {
*/
#define PMAP_CACHE_STATE_ALL 0xffffffffu
/*
* This structure is used by machine-dependent code to describe
* static mappings of devices, created at bootstrap time.
*/
struct pmap_devmap {
vaddr_t pd_va; /* virtual address */
paddr_t pd_pa; /* physical address */
psize_t pd_size; /* size of region */
vm_prot_t pd_prot; /* protection code */
int pd_cache; /* cache attributes */
};
/*
* The pmap structure itself
*/
@ -245,12 +233,6 @@ extern struct pmap kernel_pmap_store;
#define pmap_unuse_final(p) do { /* nothing */ } while (0)
#define pmap_remove_holes(vm) do { /* nothing */ } while (0)
/*
* Functions that we need to export
*/
void pmap_remove_all(pmap_t);
void pmap_uncache_page(paddr_t, vaddr_t);
#define PMAP_CHECK_COPYIN 1
#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
@ -258,7 +240,6 @@ void pmap_uncache_page(paddr_t, vaddr_t);
/* Functions we use internally. */
void pmap_bootstrap(pd_entry_t *, vaddr_t, vaddr_t);
int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
int pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
int pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
void pmap_set_pcb_pagedir(pmap_t, struct pcb *);
@ -270,16 +251,11 @@ void vector_page_setprot(int);
/* XXX */
void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable);
const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
/* Bootstrapping routines. */
void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
void pmap_devmap_register(const struct pmap_devmap *);
/*
* The current top of kernel VM

View file

@ -1,4 +1,4 @@
/* $OpenBSD: machdep.c,v 1.88 2024/03/17 13:05:40 kettenis Exp $ */
/* $OpenBSD: machdep.c,v 1.89 2024/04/29 13:01:54 jsg Exp $ */
/*
* Copyright (c) 2014 Patrick Wildt <patrick@blueri.se>
* Copyright (c) 2021 Mark Kettenis <kettenis@openbsd.org>
@ -620,11 +620,6 @@ dumpsys(void)
int (*dump)(dev_t, daddr_t, caddr_t, size_t);
int error;
#if 0
/* Save registers. */
savectx(&dumppcb);
#endif
if (dumpdev == NODEV)
return;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: cpu.h,v 1.45 2024/04/19 10:22:50 mpi Exp $ */
/* $OpenBSD: cpu.h,v 1.46 2024/04/29 13:01:54 jsg Exp $ */
/*
* Copyright (c) 2016 Dale Rahn <drahn@dalerahn.com>
*
@ -96,10 +96,6 @@ extern uint64_t cpu_id_aa64pfr1;
#define PROC_PC(p) ((p)->p_addr->u_pcb.pcb_tf->tf_elr)
#define PROC_STACK(p) ((p)->p_addr->u_pcb.pcb_tf->tf_sp)
/* The address of the vector page. */
extern vaddr_t vector_page;
void arm32_vector_init(vaddr_t, int);
/*
* Per-CPU information. For now we assume one CPU.
*/
@ -276,29 +272,15 @@ void need_resched(struct cpu_info *);
// asm code to start new kernel contexts.
void proc_trampoline(void);
void child_trampoline(void);
/*
* Random cruft
*/
void dumpconf(void);
// cpuswitch.S
struct pcb;
void savectx (struct pcb *pcb);
// machdep.h
void bootsync (int);
// fault.c
int badaddr_read (void *, size_t, void *);
// syscall.c
void svc_handler (trapframe_t *);
/* machine_machdep.c */
void board_startup(void);
// functions to manipulate interrupt state
static __inline void
restore_daif(uint32_t daif)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: intc.c,v 1.12 2022/01/03 03:06:50 jsg Exp $ */
/* $OpenBSD: intc.c,v 1.14 2024/04/29 12:42:06 jsg Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
*
@ -91,8 +91,6 @@ struct intrq {
int iq_ist; /* share type */
};
volatile int softint_pending;
struct intrq intc_handler[INTC_MAX_IRQ];
u_int32_t intc_smask[NIPL];
u_int32_t intc_imask[INTC_MAX_BANKS][NIPL];
@ -310,18 +308,6 @@ intc_setipl(int new)
restore_interrupts(psw);
}
void
intc_intr_bootstrap(vaddr_t addr)
{
int i, j;
extern struct bus_space armv7_bs_tag;
intc_iot = &armv7_bs_tag;
intc_ioh = addr;
for (i = 0; i < INTC_NUM_BANKS; i++)
for (j = 0; j < NIPL; j++)
intc_imask[i][j] = 0xffffffff;
}
void
intc_irq_handler(void *frame)
{

View file

@ -1,4 +1,4 @@
/* $OpenBSD: intc.h,v 1.4 2020/07/14 15:34:15 patrick Exp $ */
/* $OpenBSD: intc.h,v 1.8 2024/04/29 12:46:22 jsg Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
*
@ -25,42 +25,11 @@
#include <machine/intr.h>
#include <arm/softintr.h>
extern volatile int current_spl_level;
extern volatile int softint_pending;
void intc_do_pending(void);
#define SI_TO_IRQBIT(si) (1U<<(si))
void intc_setipl(int new);
void intc_splx(int new);
int intc_splraise(int ipl);
int intc_spllower(int ipl);
void intc_setsoftintr(int si);
/*
* An useful function for interrupt handlers.
* XXX: This shouldn't be here.
*/
static __inline int
find_first_bit( uint32_t bits )
{
int count;
/* since CLZ is available only on ARMv5, this isn't portable
* to all ARM CPUs. This file is for OMAPINTC processor.
*/
asm( "clz %0, %1" : "=r" (count) : "r" (bits) );
return 31-count;
}
/*
* This function *MUST* be called very early on in a port's
* initarm() function, before ANY spl*() functions are called.
*
* The parameter is the virtual address of the OMAPINTC's Interrupt
* Controller registers.
*/
void intc_intr_bootstrap(vaddr_t);
void intc_irq_handler(void *);
void *intc_intr_establish(int irqno, int level, struct cpu_info *ci,

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sxiintc.c,v 1.11 2022/01/03 03:06:50 jsg Exp $ */
/* $OpenBSD: sxiintc.c,v 1.12 2024/04/29 12:33:17 jsg Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2013 Artturi Alm
@ -131,8 +131,6 @@ struct intrq {
int iq_ist; /* share type */
};
volatile int a1xsoftint_pending;
struct intrq sxiintc_handler[NIRQ];
u_int32_t sxiintc_smask[NIPL];
u_int32_t sxiintc_imask[NBANKS][NIPL];

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sxiintc.h,v 1.2 2020/07/14 15:34:15 patrick Exp $ */
/* $OpenBSD: sxiintc.h,v 1.4 2024/04/29 12:33:17 jsg Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
*
@ -25,16 +25,11 @@
#include <machine/intr.h>
#include <arm/softintr.h>
extern volatile int current_spl_level;
extern volatile int softint_pending;
void sxiintc_do_pending(void);
#define SI_TO_IRQBIT(si) (1U<<(si))
void sxiintc_setipl(int);
void sxiintc_splx(int);
int sxiintc_splraise(int);
int sxiintc_spllower(int);
void sxiintc_setsoftintr(int);
void sxiintc_irq_handler(void *);
void *sxiintc_intr_establish(int, int, struct cpu_info *,

View file

@ -562,7 +562,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
size_t bus_size = (size_t)mem->size;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
@ -573,9 +572,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
return -EINVAL;
if (adev->mman.aper_base_kaddr &&
mem->placement & TTM_PL_FLAG_CONTIGUOUS)

View file

@ -1562,6 +1562,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
/* Validate operation parameters to prevent potential abuse */
static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
uint64_t saddr,
uint64_t offset,
uint64_t size)
{
uint64_t tmp, lpfn;
if (saddr & AMDGPU_GPU_PAGE_MASK
|| offset & AMDGPU_GPU_PAGE_MASK
|| size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
if (check_add_overflow(saddr, size, &tmp)
|| check_add_overflow(offset, size, &tmp)
|| size == 0 /* which also leads to end < begin */)
return -EINVAL;
/* make sure object fit at this offset */
if (bo && offset + size > amdgpu_bo_size(bo))
return -EINVAL;
/* Ensure last pfn not exceed max_pfn */
lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
if (lpfn >= adev->vm_manager.max_pfn)
return -EINVAL;
return 0;
}
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@ -1588,21 +1619,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
int r;
/* validate the parameters */
if (saddr & ~LINUX_PAGE_MASK || offset & ~LINUX_PAGE_MASK || size & ~LINUX_PAGE_MASK)
return -EINVAL;
if (saddr + size <= saddr || offset + size <= offset)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (r)
return r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) {
@ -1655,17 +1679,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr;
int r;
/* validate the parameters */
if (saddr & ~LINUX_PAGE_MASK || offset & ~LINUX_PAGE_MASK || size & ~LINUX_PAGE_MASK)
return -EINVAL;
if (saddr + size <= saddr || offset + size <= offset)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (r)
return r;
/* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@ -1679,7 +1695,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr;
mapping->last = eaddr;
@ -1766,10 +1782,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
DRM_LIST_HEAD(removed);
uint64_t eaddr;
int r;
r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
if (r)
return r;
eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL);

View file

@ -818,9 +818,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
mutex_lock(&kfd_processes_mutex);
if (kfd_is_locked()) {
mutex_unlock(&kfd_processes_mutex);
pr_debug("KFD is locked! Cannot create process");
return ERR_PTR(-EINVAL);
process = ERR_PTR(-EINVAL);
goto out;
}
/* A prior open of /dev/kfd could have already created the process. */

View file

@ -236,9 +236,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
// Setup manual flow control for EOF via TRIG_A
optc->funcs->setup_manual_trigger(optc);
}
}

View file

@ -259,6 +259,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
drm_property_blob_get(crtc_state->post_csc_lut);
crtc_state->update_pipe = false;
crtc_state->update_m_n = false;
crtc_state->disable_lp_wm = false;
crtc_state->disable_cxsr = false;
crtc_state->update_wm_pre = false;

View file

@ -2453,7 +2453,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
enum pipe pipe = new_cdclk_state->pipe;
struct intel_cdclk_config cdclk_config;
enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@ -2462,12 +2463,25 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_pre_notify(state);
if (new_cdclk_state->disable_pipes ||
old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
if (new_cdclk_state->disable_pipes) {
cdclk_config = new_cdclk_state->actual;
pipe = INVALID_PIPE;
} else {
if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
cdclk_config = new_cdclk_state->actual;
pipe = new_cdclk_state->pipe;
} else {
cdclk_config = old_cdclk_state->actual;
pipe = INVALID_PIPE;
}
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
old_cdclk_state->actual.voltage_level);
}
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
intel_set_cdclk(i915, &cdclk_config, pipe);
}
/**
@ -2485,7 +2499,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
enum pipe pipe = new_cdclk_state->pipe;
enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@ -2495,11 +2509,14 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_cdclk_pcode_post_notify(state);
if (!new_cdclk_state->disable_pipes &&
old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
pipe = new_cdclk_state->pipe;
else
pipe = INVALID_PIPE;
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
}
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
}
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)

View file

@ -468,9 +468,56 @@ static int intel_mode_vblank_start(const struct drm_display_mode *mode)
return vblank_start;
}
static void intel_crtc_vblank_evade_scanlines(struct intel_atomic_state *state,
struct intel_crtc *crtc,
int *min, int *max, int *vblank_start)
{
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *crtc_state;
const struct drm_display_mode *adjusted_mode;
/*
* During fastsets/etc. the transcoder is still
* running with the old timings at this point.
*
* TODO: maybe just use the active timings here?
*/
if (intel_crtc_needs_modeset(new_crtc_state))
crtc_state = new_crtc_state;
else
crtc_state = old_crtc_state;
adjusted_mode = &crtc_state->hw.adjusted_mode;
if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
if (intel_vrr_is_push_sent(crtc_state))
*vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
else
*vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
} else {
*vblank_start = intel_mode_vblank_start(adjusted_mode);
}
/* FIXME needs to be calibrated sensibly */
*min = *vblank_start - intel_usecs_to_scanlines(adjusted_mode,
VBLANK_EVASION_TIME_US);
*max = *vblank_start - 1;
/*
* M/N is double buffered on the transcoder's undelayed vblank,
* so with seamless M/N we must evade both vblanks.
*/
if (new_crtc_state->update_m_n)
*min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
}
/**
* intel_pipe_update_start() - start update of a set of display registers
* @new_crtc_state: the new crtc state
* @state: the atomic state
* @crtc: the crtc
*
* Mark the start of an update to pipe registers that should be updated
* atomically regarding vblank. If the next vblank will happens within
@ -480,11 +527,12 @@ static int intel_mode_vblank_start(const struct drm_display_mode *mode)
* until a subsequent call to intel_pipe_update_end(). That is done to
* avoid random delays.
*/
void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state)
void intel_pipe_update_start(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
@ -500,27 +548,7 @@ void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state)
if (intel_crtc_needs_vblank_work(new_crtc_state))
intel_crtc_vblank_work_init(new_crtc_state);
if (new_crtc_state->vrr.enable) {
if (intel_vrr_is_push_sent(new_crtc_state))
vblank_start = intel_vrr_vmin_vblank_start(new_crtc_state);
else
vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
} else {
vblank_start = intel_mode_vblank_start(adjusted_mode);
}
/* FIXME needs to be calibrated sensibly */
min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
VBLANK_EVASION_TIME_US);
max = vblank_start - 1;
/*
* M/N is double buffered on the transcoder's undelayed vblank,
* so with seamless M/N we must evade both vblanks.
*/
if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
intel_crtc_vblank_evade_scanlines(state, crtc, &min, &max, &vblank_start);
if (min <= 0 || max <= 0)
goto irq_disable;
@ -631,15 +659,18 @@ static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
/**
* intel_pipe_update_end() - end update of a set of display registers
* @new_crtc_state: the new crtc state
* @state: the atomic state
* @crtc: the crtc
*
* Mark the end of an update started with intel_pipe_update_start(). This
* re-enables interrupts and verifies the update was actually completed
* before a vblank.
*/
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
void intel_pipe_update_end(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
int scanline_end = intel_get_crtc_scanline(crtc);
u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
@ -697,15 +728,6 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
*/
intel_vrr_send_push(new_crtc_state);
/*
* Seamless M/N update may need to update frame timings.
*
* FIXME Should be synchronized with the start of vblank somehow...
*/
if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
intel_crtc_update_active_timings(new_crtc_state,
new_crtc_state->vrr.enable);
local_irq_enable();
if (intel_vgpu_active(dev_priv))

View file

@ -37,8 +37,10 @@ void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state);
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state);
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
void intel_pipe_update_start(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_pipe_update_end(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_wait_for_vblank_workers(struct intel_atomic_state *state);
struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915);
struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915,

View file

@ -5215,7 +5215,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
if (!fastset || !pipe_config->seamless_m_n)
if (!fastset || !pipe_config->update_m_n)
PIPE_CONF_CHECK_M_N(dp_m_n);
} else {
PIPE_CONF_CHECK_M_N(dp_m_n);
@ -5353,7 +5353,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
if (!fastset || !pipe_config->seamless_m_n) {
if (!fastset || !pipe_config->update_m_n) {
PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
}
@ -5448,6 +5448,7 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state,
crtc_state->uapi.mode_changed = true;
crtc_state->update_pipe = false;
crtc_state->update_m_n = false;
ret = drm_atomic_add_affected_connectors(&state->base,
&crtc->base);
@ -5565,13 +5566,14 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
{
struct drm_i915_private *i915 = to_i915(old_crtc_state->uapi.crtc->dev);
if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) {
if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
drm_dbg_kms(&i915->drm, "fastset requirement not met, forcing full modeset\n");
else
new_crtc_state->uapi.mode_changed = false;
return;
}
if (intel_crtc_needs_modeset(new_crtc_state))
new_crtc_state->update_m_n = false;
new_crtc_state->uapi.mode_changed = false;
if (!intel_crtc_needs_modeset(new_crtc_state))
new_crtc_state->update_pipe = true;
}
@ -6297,6 +6299,7 @@ int intel_atomic_check(struct drm_device *dev,
if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
new_crtc_state->uapi.mode_changed = true;
new_crtc_state->update_pipe = false;
new_crtc_state->update_m_n = false;
}
}
@ -6309,6 +6312,7 @@ int intel_atomic_check(struct drm_device *dev,
if (intel_cpu_transcoders_need_modeset(state, trans)) {
new_crtc_state->uapi.mode_changed = true;
new_crtc_state->update_pipe = false;
new_crtc_state->update_m_n = false;
}
}
@ -6316,6 +6320,7 @@ int intel_atomic_check(struct drm_device *dev,
if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
new_crtc_state->uapi.mode_changed = true;
new_crtc_state->update_pipe = false;
new_crtc_state->update_m_n = false;
}
}
}
@ -6494,7 +6499,7 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
hsw_set_linetime_wm(new_crtc_state);
if (new_crtc_state->seamless_m_n)
if (new_crtc_state->update_m_n)
intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
&new_crtc_state->dp_m_n);
}
@ -6533,6 +6538,8 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@ -6544,6 +6551,9 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) >= 9 &&
!intel_crtc_needs_modeset(new_crtc_state))
skl_detach_scalers(new_crtc_state);
if (vrr_enabling(old_crtc_state, new_crtc_state))
intel_vrr_enable(new_crtc_state);
}
static void intel_enable_crtc(struct intel_atomic_state *state,
@ -6584,12 +6594,6 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_dpt_configure(crtc);
}
if (vrr_enabling(old_crtc_state, new_crtc_state)) {
intel_vrr_enable(new_crtc_state);
intel_crtc_update_active_timings(new_crtc_state,
new_crtc_state->vrr.enable);
}
if (!modeset) {
if (new_crtc_state->preload_luts &&
intel_crtc_needs_color_update(new_crtc_state))
@ -6616,7 +6620,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_crtc_planes_update_noarm(state, crtc);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
intel_pipe_update_start(state, crtc);
commit_pipe_pre_planes(state, crtc);
@ -6624,7 +6628,16 @@ static void intel_update_crtc(struct intel_atomic_state *state,
commit_pipe_post_planes(state, crtc);
intel_pipe_update_end(new_crtc_state);
intel_pipe_update_end(state, crtc);
/*
* VRR/Seamless M/N update may need to update frame timings.
*
* FIXME Should be synchronized with the start of vblank somehow...
*/
if (vrr_enabling(old_crtc_state, new_crtc_state) || new_crtc_state->update_m_n)
intel_crtc_update_active_timings(new_crtc_state,
new_crtc_state->vrr.enable);
/*
* We usually enable FIFO underrun interrupts as part of the

View file

@ -47,6 +47,7 @@ struct drm_printer;
#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
#define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915))
#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) > 2)

View file

@ -1084,6 +1084,7 @@ struct intel_crtc_state {
unsigned fb_bits; /* framebuffers to flip */
bool update_pipe; /* can a fast modeset be performed? */
bool update_m_n; /* update M/N seamlessly during fastset? */
bool disable_cxsr;
bool update_wm_pre, update_wm_post; /* watermarks are updated */
bool fifo_changed; /* FIFO split is changed */
@ -1196,7 +1197,6 @@ struct intel_crtc_state {
/* m2_n2 for eDP downclock */
struct intel_link_m_n dp_m2_n2;
bool has_drrs;
bool seamless_m_n;
/* PSR is supported but might not be enabled due the lack of enabled planes */
bool has_psr;

View file

@ -1310,13 +1310,14 @@ bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp)
static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
/* On TGL, FEC is supported on all Pipes */
if (DISPLAY_VER(dev_priv) >= 12)
return true;
if (DISPLAY_VER(dev_priv) == 11 && pipe_config->cpu_transcoder != TRANSCODER_A)
if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
return true;
return false;
@ -2147,8 +2148,12 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock;
if (has_seamless_m_n(connector))
pipe_config->seamless_m_n = true;
/*
* FIXME all joined pipes share the same transcoder.
* Need to account for that when updating M/N live.
*/
if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
pipe_config->update_m_n = true;
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))

View file

@ -964,7 +964,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
if (DISPLAY_VER(dev_priv) >= 10 &&
if (HAS_DSC_MST(dev_priv) &&
drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
/*
* TBD pass the connector BPC,

View file

@ -33,6 +33,7 @@
#include "gt/intel_engine.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_tlb.h"
@ -110,12 +111,34 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref)
static int __i915_vma_active(struct i915_active *ref)
{
return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
struct i915_vma *vma = active_to_vma(ref);
if (!i915_vma_tryget(vma))
return -ENOENT;
/*
* Exclude global GTT VMA from holding a GT wakeref
* while active, otherwise GPU never goes idle.
*/
if (!i915_vma_is_ggtt(vma))
intel_gt_pm_get(vma->vm->gt);
return 0;
}
static void __i915_vma_retire(struct i915_active *ref)
{
i915_vma_put(active_to_vma(ref));
struct i915_vma *vma = active_to_vma(ref);
if (!i915_vma_is_ggtt(vma)) {
/*
* Since we can be called from atomic contexts,
* use an async variant of intel_gt_pm_put().
*/
intel_gt_pm_put_async(vma->vm->gt);
}
i915_vma_put(vma);
}
static struct i915_vma *
@ -1413,7 +1436,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
struct i915_vma_work *work = NULL;
struct dma_fence *moving = NULL;
struct i915_vma_resource *vma_res = NULL;
intel_wakeref_t wakeref = 0;
intel_wakeref_t wakeref;
unsigned int bound;
int err;
@ -1433,8 +1456,14 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (err)
return err;
if (flags & PIN_GLOBAL)
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
/*
* In case of a global GTT, we must hold a runtime-pm wakeref
* while global PTEs are updated. In other cases, we hold
* the rpm reference while the VMA is active. Since runtime
* resume may require allocations, which are forbidden inside
* vm->mutex, get the first rpm wakeref outside of the mutex.
*/
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
if (flags & vma->vm->bind_async_flags) {
/* lock VM */
@ -1570,8 +1599,7 @@ err_fence:
if (work)
dma_fence_work_commit_imm(&work->base);
err_rpm:
if (wakeref)
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
if (moving)
dma_fence_put(moving);

View file

@ -8,4 +8,8 @@
#define struct_size(p, member, n) \
(sizeof(*(p)) + ((n) * (sizeof(*(p)->member))))
#if defined(__clang__) || (defined(__GNUC__) && __GNUC__ >= 5)
#define check_add_overflow(x, y, sum) __builtin_add_overflow(x, y, sum)
#endif
#endif

View file

@ -926,8 +926,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
for (i = 0; i < max_device; i++) {
ATOM_CONNECTOR_INFO_I2C ci =
supported_devices->info.asConnInfo[i];
ATOM_CONNECTOR_INFO_I2C ci;
if (frev > 1)
ci = supported_devices->info_2d1.asConnInfo[i];
else
ci = supported_devices->info.asConnInfo[i];
bios_connectors[i].valid = false;