sync with OpenBSD -current
This commit is contained in:
parent
b467550def
commit
991d31b9d0
40 changed files with 217 additions and 55 deletions
|
@ -1,5 +1,5 @@
|
||||||
#!/bin/ksh
|
#!/bin/ksh
|
||||||
# $OpenBSD: install.sub,v 1.1264 2024/05/12 19:47:14 kn Exp $
|
# $OpenBSD: install.sub,v 1.1265 2024/06/15 15:05:15 florian Exp $
|
||||||
#
|
#
|
||||||
# Copyright (c) 1997-2015 Todd Miller, Theo de Raadt, Ken Westerback
|
# Copyright (c) 1997-2015 Todd Miller, Theo de Raadt, Ken Westerback
|
||||||
# Copyright (c) 2015, Robert Peichaer <rpe@openbsd.org>
|
# Copyright (c) 2015, Robert Peichaer <rpe@openbsd.org>
|
||||||
|
@ -1664,7 +1664,8 @@ install_files() {
|
||||||
_get_sets=$resp
|
_get_sets=$resp
|
||||||
|
|
||||||
# Reorder $_get_sets.
|
# Reorder $_get_sets.
|
||||||
_get_sets=$(for s in $ALLSETS; do isin "$s" $_get_sets && echo $s; done)
|
_get_sets=$(for s in $ALLSETS; do isin "$s" $_get_sets && echo $s; done;
|
||||||
|
isin "BUILDINFO" $_files && echo "BUILDINFO")
|
||||||
|
|
||||||
# Note which sets didn't verify ok.
|
# Note which sets didn't verify ok.
|
||||||
_unver=$_get_sets
|
_unver=$_get_sets
|
||||||
|
@ -1808,6 +1809,9 @@ install_files() {
|
||||||
tar -zxphf - -C /mnt
|
tar -zxphf - -C /mnt
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
*BUILDINFO) $_unpriv ftp -D Installing -Vmo - "$_fsrc" \
|
||||||
|
> "/mnt/var/db/installed.$_f"
|
||||||
|
;;
|
||||||
*) # Make a backup of the existing ramdisk kernel in the
|
*) # Make a backup of the existing ramdisk kernel in the
|
||||||
# bsd.rd only download/verify/install case.
|
# bsd.rd only download/verify/install case.
|
||||||
$UPGRADE_BSDRD && [[ $_f == bsd.rd* ]] &&
|
$UPGRADE_BSDRD && [[ $_f == bsd.rd* ]] &&
|
||||||
|
|
|
@ -1,7 +1,11 @@
|
||||||
# $OpenBSD: Makefile,v 1.11 2023/09/24 07:01:40 tb Exp $
|
# $OpenBSD: Makefile,v 1.12 2024/06/15 08:39:47 tb Exp $
|
||||||
|
|
||||||
OPENSSL_RUBY_TESTS = /usr/local/share/openssl-ruby-tests
|
OPENSSL_RUBY_TESTS = /usr/local/share/openssl-ruby-tests
|
||||||
|
.if exists(/usr/local/bin/ruby32)
|
||||||
RUBY_BINREV = 32
|
RUBY_BINREV = 32
|
||||||
|
.else
|
||||||
|
RUBY_BINREV = 33
|
||||||
|
.endif
|
||||||
RUBY = ruby${RUBY_BINREV}
|
RUBY = ruby${RUBY_BINREV}
|
||||||
|
|
||||||
# We work in a subdirectory of obj/ since extconf.rb generates a Makefile whose
|
# We work in a subdirectory of obj/ since extconf.rb generates a Makefile whose
|
||||||
|
|
|
@ -14,7 +14,7 @@ conf() {
|
||||||
start_sshd
|
start_sshd
|
||||||
}
|
}
|
||||||
|
|
||||||
conf "authfail:30s min:50s max:200s"
|
conf "authfail:300s min:350s max:900s"
|
||||||
|
|
||||||
verbose "test connect"
|
verbose "test connect"
|
||||||
${SSH} -F $OBJ/ssh_config somehost true || fatal "basic connect failed"
|
${SSH} -F $OBJ/ssh_config somehost true || fatal "basic connect failed"
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/* $OpenBSD: intr.c,v 1.59 2024/06/07 06:26:23 jsg Exp $ */
|
/* $OpenBSD: intr.c,v 1.60 2024/06/15 18:01:44 kettenis Exp $ */
|
||||||
/* $NetBSD: intr.c,v 1.3 2003/03/03 22:16:20 fvdl Exp $ */
|
/* $NetBSD: intr.c,v 1.3 2003/03/03 22:16:20 fvdl Exp $ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -73,6 +73,8 @@ struct pic softintr_pic = {
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
int intr_suspended;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fill in default interrupt table (in case of spurious interrupt
|
* Fill in default interrupt table (in case of spurious interrupt
|
||||||
* during configuration of kernel), setup interrupt control unit
|
* during configuration of kernel), setup interrupt control unit
|
||||||
|
@ -524,7 +526,6 @@ intr_disestablish(struct intrhand *ih)
|
||||||
int
|
int
|
||||||
intr_handler(struct intrframe *frame, struct intrhand *ih)
|
intr_handler(struct intrframe *frame, struct intrhand *ih)
|
||||||
{
|
{
|
||||||
extern int cpu_suspended;
|
|
||||||
struct cpu_info *ci = curcpu();
|
struct cpu_info *ci = curcpu();
|
||||||
int floor;
|
int floor;
|
||||||
int rc;
|
int rc;
|
||||||
|
@ -536,7 +537,7 @@ intr_handler(struct intrframe *frame, struct intrhand *ih)
|
||||||
* We may not be able to mask MSIs, so block non-wakeup
|
* We may not be able to mask MSIs, so block non-wakeup
|
||||||
* interrupts while we're suspended.
|
* interrupts while we're suspended.
|
||||||
*/
|
*/
|
||||||
if (cpu_suspended && (ih->ih_flags & IPL_WAKEUP) == 0)
|
if (intr_suspended && (ih->ih_flags & IPL_WAKEUP) == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
#ifdef MULTIPROCESSOR
|
#ifdef MULTIPROCESSOR
|
||||||
|
@ -723,6 +724,8 @@ intr_enable_wakeup(void)
|
||||||
if (pic->pic_hwmask)
|
if (pic->pic_hwmask)
|
||||||
pic->pic_hwmask(pic, pin);
|
pic->pic_hwmask(pic, pin);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
intr_suspended = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -732,6 +735,8 @@ intr_disable_wakeup(void)
|
||||||
struct pic *pic;
|
struct pic *pic;
|
||||||
int irq, pin;
|
int irq, pin;
|
||||||
|
|
||||||
|
intr_suspended = 0;
|
||||||
|
|
||||||
for (irq = 0; irq < MAX_INTR_SOURCES; irq++) {
|
for (irq = 0; irq < MAX_INTR_SOURCES; irq++) {
|
||||||
if (ci->ci_isources[irq] == NULL)
|
if (ci->ci_isources[irq] == NULL)
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/* $OpenBSD: ufshci.c,v 1.37 2024/06/14 20:52:07 mglocker Exp $ */
|
/* $OpenBSD: ufshci.c,v 1.38 2024/06/15 18:26:25 mglocker Exp $ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2022 Marcus Glocker <mglocker@openbsd.org>
|
* Copyright (c) 2022 Marcus Glocker <mglocker@openbsd.org>
|
||||||
|
@ -263,8 +263,8 @@ ufshci_attach(struct ufshci_softc *sc)
|
||||||
/* Attach to SCSI layer */
|
/* Attach to SCSI layer */
|
||||||
saa.saa_adapter = &ufshci_switch;
|
saa.saa_adapter = &ufshci_switch;
|
||||||
saa.saa_adapter_softc = sc;
|
saa.saa_adapter_softc = sc;
|
||||||
saa.saa_adapter_buswidth = 2; /* XXX: What's the right value? */
|
saa.saa_adapter_buswidth = UFSHCI_TARGETS_MAX + 1;
|
||||||
saa.saa_luns = 1; /* XXX: Should we use ufshci_utr_cmd_lun() */
|
saa.saa_luns = 1;
|
||||||
saa.saa_adapter_target = 0;
|
saa.saa_adapter_target = 0;
|
||||||
saa.saa_openings = sc->sc_nutrs;
|
saa.saa_openings = sc->sc_nutrs;
|
||||||
saa.saa_pool = &sc->sc_iopool;
|
saa.saa_pool = &sc->sc_iopool;
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/* $OpenBSD: ufshcireg.h,v 1.13 2024/05/24 20:34:06 mglocker Exp $ */
|
/* $OpenBSD: ufshcireg.h,v 1.14 2024/06/15 18:26:25 mglocker Exp $ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2022 Marcus Glocker <mglocker@openbsd.org>
|
* Copyright (c) 2022 Marcus Glocker <mglocker@openbsd.org>
|
||||||
|
@ -25,6 +25,7 @@
|
||||||
#define UFSHCI_INTR_AGGR_COUNT_MAX 31
|
#define UFSHCI_INTR_AGGR_COUNT_MAX 31
|
||||||
#define UFSHCI_SLOTS_MIN 1
|
#define UFSHCI_SLOTS_MIN 1
|
||||||
#define UFSHCI_SLOTS_MAX 32
|
#define UFSHCI_SLOTS_MAX 32
|
||||||
|
#define UFSHCI_TARGETS_MAX 1
|
||||||
#define UFSHCI_LBS 4096 /* UFS Logical Block Size:
|
#define UFSHCI_LBS 4096 /* UFS Logical Block Size:
|
||||||
For UFS minimum size shall be
|
For UFS minimum size shall be
|
||||||
4096 bytes */
|
4096 bytes */
|
||||||
|
|
|
@ -213,7 +213,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
|
||||||
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
|
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
|
||||||
kfd_mem_limit.max_ttm_mem_limit) ||
|
kfd_mem_limit.max_ttm_mem_limit) ||
|
||||||
(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
|
(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
|
||||||
vram_size - reserved_for_pt)) {
|
vram_size - reserved_for_pt - atomic64_read(&adev->vram_pin_size))) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto release;
|
goto release;
|
||||||
}
|
}
|
||||||
|
|
|
@ -235,5 +235,8 @@ static const struct pci_matchid amdgpu_devices[] = {
|
||||||
{0x1002, 0x1901 }, /* Radeon 740M */
|
{0x1002, 0x1901 }, /* Radeon 740M */
|
||||||
|
|
||||||
/* GC 11.5.0, DCN 3.5.0, APU, linux >= 6.7 */
|
/* GC 11.5.0, DCN 3.5.0, APU, linux >= 6.7 */
|
||||||
/* GC 11.5.1, DCN 3.5.0, APU, linux >= 6.9 */
|
/* GC 11.5.1, DCN 3.5.1, APU, linux >= 6.9 */
|
||||||
|
|
||||||
|
/* GC 12.0.0, DCN 4.0.1, dGPU, linux ?, amd-staging-drm-next */
|
||||||
|
/* GC 12.0.1, DCN 4.0.1, dGPU, linux ?, amd-staging-drm-next */
|
||||||
};
|
};
|
||||||
|
|
|
@ -1098,6 +1098,7 @@ void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
|
amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
|
||||||
|
del_timer_sync(&ring->fence_drv.fallback_timer);
|
||||||
amdgpu_ring_fini(ring);
|
amdgpu_ring_fini(ring);
|
||||||
kfree(ring);
|
kfree(ring);
|
||||||
}
|
}
|
||||||
|
|
|
@ -615,6 +615,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||||
else
|
else
|
||||||
amdgpu_bo_placement_from_domain(bo, bp->domain);
|
amdgpu_bo_placement_from_domain(bo, bp->domain);
|
||||||
if (bp->type == ttm_bo_type_kernel)
|
if (bp->type == ttm_bo_type_kernel)
|
||||||
|
bo->tbo.priority = 2;
|
||||||
|
else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE))
|
||||||
bo->tbo.priority = 1;
|
bo->tbo.priority = 1;
|
||||||
|
|
||||||
if (!bp->destroy)
|
if (!bp->destroy)
|
||||||
|
|
|
@ -9157,7 +9157,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
|
||||||
7 + /* PIPELINE_SYNC */
|
7 + /* PIPELINE_SYNC */
|
||||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
|
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
|
||||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
||||||
2 + /* VM_FLUSH */
|
4 + /* VM_FLUSH */
|
||||||
8 + /* FENCE for VM_FLUSH */
|
8 + /* FENCE for VM_FLUSH */
|
||||||
20 + /* GDS switch */
|
20 + /* GDS switch */
|
||||||
4 + /* double SWITCH_BUFFER,
|
4 + /* double SWITCH_BUFFER,
|
||||||
|
@ -9248,7 +9248,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
|
||||||
7 + /* gfx_v10_0_ring_emit_pipeline_sync */
|
7 + /* gfx_v10_0_ring_emit_pipeline_sync */
|
||||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
|
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
|
||||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
||||||
2 + /* gfx_v10_0_ring_emit_vm_flush */
|
|
||||||
8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
|
8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
|
||||||
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
|
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
|
||||||
.emit_ib = gfx_v10_0_ring_emit_ib_compute,
|
.emit_ib = gfx_v10_0_ring_emit_ib_compute,
|
||||||
|
|
|
@ -6102,7 +6102,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
|
||||||
7 + /* PIPELINE_SYNC */
|
7 + /* PIPELINE_SYNC */
|
||||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
|
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
|
||||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
||||||
2 + /* VM_FLUSH */
|
4 + /* VM_FLUSH */
|
||||||
8 + /* FENCE for VM_FLUSH */
|
8 + /* FENCE for VM_FLUSH */
|
||||||
20 + /* GDS switch */
|
20 + /* GDS switch */
|
||||||
5 + /* COND_EXEC */
|
5 + /* COND_EXEC */
|
||||||
|
@ -6187,7 +6187,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
|
||||||
7 + /* gfx_v11_0_ring_emit_pipeline_sync */
|
7 + /* gfx_v11_0_ring_emit_pipeline_sync */
|
||||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
|
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
|
||||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
||||||
2 + /* gfx_v11_0_ring_emit_vm_flush */
|
|
||||||
8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
|
8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
|
||||||
.emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
|
.emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
|
||||||
.emit_ib = gfx_v11_0_ring_emit_ib_compute,
|
.emit_ib = gfx_v11_0_ring_emit_ib_compute,
|
||||||
|
|
|
@ -6988,7 +6988,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
|
||||||
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
|
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
|
||||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
|
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
|
||||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
||||||
2 + /* gfx_v9_0_ring_emit_vm_flush */
|
|
||||||
8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
|
8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
|
||||||
7 + /* gfx_v9_0_emit_mem_sync */
|
7 + /* gfx_v9_0_emit_mem_sync */
|
||||||
5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
|
5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
|
||||||
|
@ -7026,7 +7025,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
|
||||||
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
|
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
|
||||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
|
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
|
||||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
||||||
2 + /* gfx_v9_0_ring_emit_vm_flush */
|
|
||||||
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
|
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
|
||||||
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
|
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
|
||||||
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
|
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
|
||||||
|
|
|
@ -425,16 +425,16 @@ out:
|
||||||
|
|
||||||
static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
|
static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
const char *chip_name;
|
char ucode_prefix[15];
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
chip_name = "gc_9_4_3";
|
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
|
||||||
|
|
||||||
r = gfx_v9_4_3_init_rlc_microcode(adev, chip_name);
|
r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = gfx_v9_4_3_init_cp_compute_microcode(adev, chip_name);
|
r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
|
|
@ -516,10 +516,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||||
start = prange->start << PAGE_SHIFT;
|
start = prange->start << PAGE_SHIFT;
|
||||||
end = (prange->last + 1) << PAGE_SHIFT;
|
end = (prange->last + 1) << PAGE_SHIFT;
|
||||||
|
|
||||||
|
r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
|
||||||
|
prange->npages * PAGE_SIZE,
|
||||||
|
KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
|
||||||
|
node->xcp ? node->xcp->id : 0);
|
||||||
|
if (r) {
|
||||||
|
dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r);
|
||||||
|
return -ENOSPC;
|
||||||
|
}
|
||||||
|
|
||||||
r = svm_range_vram_node_new(node, prange, true);
|
r = svm_range_vram_node_new(node, prange, true);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
|
dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
|
||||||
return r;
|
goto out;
|
||||||
}
|
}
|
||||||
ttm_res_offset = prange->offset << PAGE_SHIFT;
|
ttm_res_offset = prange->offset << PAGE_SHIFT;
|
||||||
|
|
||||||
|
@ -549,6 +558,11 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||||
svm_range_vram_node_free(prange);
|
svm_range_vram_node_free(prange);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
amdgpu_amdkfd_unreserve_mem_limit(node->adev,
|
||||||
|
prange->npages * PAGE_SIZE,
|
||||||
|
KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
|
||||||
|
node->xcp ? node->xcp->id : 0);
|
||||||
return r < 0 ? r : 0;
|
return r < 0 ? r : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -828,6 +828,14 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
|
||||||
if (process) {
|
if (process) {
|
||||||
pr_debug("Process already found\n");
|
pr_debug("Process already found\n");
|
||||||
} else {
|
} else {
|
||||||
|
/* If the process just called exec(3), it is possible that the
|
||||||
|
* cleanup of the kfd_process (following the release of the mm
|
||||||
|
* of the old process image) is still in the cleanup work queue.
|
||||||
|
* Make sure to drain any job before trying to recreate any
|
||||||
|
* resource for this process.
|
||||||
|
*/
|
||||||
|
flush_workqueue(kfd_process_wq);
|
||||||
|
|
||||||
process = create_process(thread);
|
process = create_process(thread);
|
||||||
if (IS_ERR(process))
|
if (IS_ERR(process))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -3416,7 +3416,7 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
|
||||||
r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
|
r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
|
||||||
*migrated = !r;
|
*migrated = !r;
|
||||||
|
|
||||||
return r;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
|
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
|
||||||
|
|
|
@ -2960,6 +2960,7 @@ static int dm_resume(void *handle)
|
||||||
dc_stream_release(dm_new_crtc_state->stream);
|
dc_stream_release(dm_new_crtc_state->stream);
|
||||||
dm_new_crtc_state->stream = NULL;
|
dm_new_crtc_state->stream = NULL;
|
||||||
}
|
}
|
||||||
|
dm_new_crtc_state->base.color_mgmt_changed = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
|
for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
|
||||||
|
|
|
@ -606,6 +606,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||||
&connector->base,
|
&connector->base,
|
||||||
dev->mode_config.tile_property,
|
dev->mode_config.tile_property,
|
||||||
0);
|
0);
|
||||||
|
connector->colorspace_property = master->base.colorspace_property;
|
||||||
|
if (connector->colorspace_property)
|
||||||
|
drm_connector_attach_colorspace_property(connector);
|
||||||
|
|
||||||
drm_connector_set_path_property(connector, pathprop);
|
drm_connector_set_path_property(connector, pathprop);
|
||||||
|
|
||||||
|
|
|
@ -145,6 +145,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||||
*/
|
*/
|
||||||
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
|
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
|
||||||
if (safe_to_lower) {
|
if (safe_to_lower) {
|
||||||
|
if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
|
||||||
|
dcn315_smu_set_dtbclk(clk_mgr, false);
|
||||||
|
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
|
||||||
|
}
|
||||||
/* check that we're not already in lower */
|
/* check that we're not already in lower */
|
||||||
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
|
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
|
||||||
display_count = dcn315_get_active_display_cnt_wa(dc, context);
|
display_count = dcn315_get_active_display_cnt_wa(dc, context);
|
||||||
|
@ -160,6 +164,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
|
||||||
|
dcn315_smu_set_dtbclk(clk_mgr, true);
|
||||||
|
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
|
||||||
|
}
|
||||||
/* check that we're not already in D0 */
|
/* check that we're not already in D0 */
|
||||||
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
|
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
|
||||||
union display_idle_optimization_u idle_info = { 0 };
|
union display_idle_optimization_u idle_info = { 0 };
|
||||||
|
|
|
@ -547,8 +547,12 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||||
* since we calculate mode support based on softmax being the max UCLK
|
* since we calculate mode support based on softmax being the max UCLK
|
||||||
* frequency.
|
* frequency.
|
||||||
*/
|
*/
|
||||||
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
|
if (dc->debug.disable_dc_mode_overwrite) {
|
||||||
dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
|
dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
|
||||||
|
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
|
||||||
|
} else
|
||||||
|
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
|
||||||
|
dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
|
||||||
} else {
|
} else {
|
||||||
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
|
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
|
||||||
}
|
}
|
||||||
|
@ -581,8 +585,13 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||||
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
|
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
|
||||||
if (clk_mgr_base->clks.p_state_change_support &&
|
if (clk_mgr_base->clks.p_state_change_support &&
|
||||||
(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) &&
|
(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) &&
|
||||||
!dc->work_arounds.clock_update_disable_mask.uclk)
|
!dc->work_arounds.clock_update_disable_mask.uclk) {
|
||||||
|
if (dc->clk_mgr->dc_mode_softmax_enabled && dc->debug.disable_dc_mode_overwrite)
|
||||||
|
dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
|
||||||
|
max((int)dc->clk_mgr->bw_params->dc_mode_softmax_memclk, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)));
|
||||||
|
|
||||||
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
|
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
|
||||||
|
}
|
||||||
|
|
||||||
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
|
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
|
||||||
clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
|
clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
|
||||||
|
|
|
@ -1710,6 +1710,9 @@ bool dc_validate_boot_timing(const struct dc *dc,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)
|
||||||
|
return false;
|
||||||
|
|
||||||
if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
|
if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
|
||||||
DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
|
DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -382,6 +382,11 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
|
||||||
i += increment) {
|
i += increment) {
|
||||||
if (j == hw_points - 1)
|
if (j == hw_points - 1)
|
||||||
break;
|
break;
|
||||||
|
if (i >= TRANSFER_FUNC_POINTS) {
|
||||||
|
DC_LOG_ERROR("Index out of bounds: i=%d, TRANSFER_FUNC_POINTS=%d\n",
|
||||||
|
i, TRANSFER_FUNC_POINTS);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
rgb_resulted[j].red = output_tf->tf_pts.red[i];
|
rgb_resulted[j].red = output_tf->tf_pts.red[i];
|
||||||
rgb_resulted[j].green = output_tf->tf_pts.green[i];
|
rgb_resulted[j].green = output_tf->tf_pts.green[i];
|
||||||
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
|
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
|
||||||
|
|
|
@ -291,6 +291,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
|
||||||
.do_urgent_latency_adjustment = false,
|
.do_urgent_latency_adjustment = false,
|
||||||
.urgent_latency_adjustment_fabric_clock_component_us = 0,
|
.urgent_latency_adjustment_fabric_clock_component_us = 0,
|
||||||
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
|
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
|
||||||
|
.dispclk_dppclk_vco_speed_mhz = 2400.0,
|
||||||
.num_chans = 4,
|
.num_chans = 4,
|
||||||
.dummy_pstate_latency_us = 10.0
|
.dummy_pstate_latency_us = 10.0
|
||||||
};
|
};
|
||||||
|
@ -438,6 +439,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
|
||||||
.do_urgent_latency_adjustment = false,
|
.do_urgent_latency_adjustment = false,
|
||||||
.urgent_latency_adjustment_fabric_clock_component_us = 0,
|
.urgent_latency_adjustment_fabric_clock_component_us = 0,
|
||||||
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
|
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
|
||||||
|
.dispclk_dppclk_vco_speed_mhz = 2500.0,
|
||||||
};
|
};
|
||||||
|
|
||||||
void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
|
void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
|
||||||
|
|
|
@ -270,7 +270,7 @@ static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
|
||||||
|
|
||||||
/* Error check whether requested and allocated are equal */
|
/* Error check whether requested and allocated are equal */
|
||||||
req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
|
req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
|
||||||
if (req_bw == link->dpia_bw_alloc_config.allocated_bw) {
|
if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) {
|
||||||
DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
|
DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
|
||||||
__func__, link->link_index);
|
__func__, link->link_index);
|
||||||
}
|
}
|
||||||
|
@ -341,6 +341,14 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
|
||||||
ret = true;
|
ret = true;
|
||||||
init_usb4_bw_struct(link);
|
init_usb4_bw_struct(link);
|
||||||
link->dpia_bw_alloc_config.bw_alloc_enabled = true;
|
link->dpia_bw_alloc_config.bw_alloc_enabled = true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* During DP tunnel creation, CM preallocates BW and reduces estimated BW of other
|
||||||
|
* DPIA. CM release preallocation only when allocation is complete. Do zero alloc
|
||||||
|
* to make the CM to release preallocation and update estimated BW correctly for
|
||||||
|
* all DPIAs per host router
|
||||||
|
*/
|
||||||
|
link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2041,6 +2041,20 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
|
||||||
return sizeof(struct gpu_metrics_v1_3);
|
return sizeof(struct gpu_metrics_v1_3);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void smu_v13_0_6_restore_pci_config(struct smu_context *smu)
|
||||||
|
{
|
||||||
|
STUB();
|
||||||
|
#if notyet
|
||||||
|
struct amdgpu_device *adev = smu->adev;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < 16; i++)
|
||||||
|
pci_write_config_dword(adev->pdev, i * 4,
|
||||||
|
adev->pdev->saved_config_space[i]);
|
||||||
|
pci_restore_msi_state(adev->pdev);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
|
static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
|
||||||
{
|
{
|
||||||
int ret = 0, index;
|
int ret = 0, index;
|
||||||
|
@ -2062,6 +2076,20 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
|
||||||
/* Restore the config space saved during init */
|
/* Restore the config space saved during init */
|
||||||
amdgpu_device_load_pci_state(adev->pdev);
|
amdgpu_device_load_pci_state(adev->pdev);
|
||||||
|
|
||||||
|
/* Certain platforms have switches which assign virtual BAR values to
|
||||||
|
* devices. OS uses the virtual BAR values and device behind the switch
|
||||||
|
* is assgined another BAR value. When device's config space registers
|
||||||
|
* are queried, switch returns the virtual BAR values. When mode-2 reset
|
||||||
|
* is performed, switch is unaware of it, and will continue to return
|
||||||
|
* the same virtual values to the OS.This affects
|
||||||
|
* pci_restore_config_space() API as it doesn't write the value saved if
|
||||||
|
* the current value read from config space is the same as what is
|
||||||
|
* saved. As a workaround, make sure the config space is restored
|
||||||
|
* always.
|
||||||
|
*/
|
||||||
|
if (!(adev->flags & AMD_IS_APU))
|
||||||
|
smu_v13_0_6_restore_pci_config(smu);
|
||||||
|
|
||||||
dev_dbg(smu->adev->dev, "wait for reset ack\n");
|
dev_dbg(smu->adev->dev, "wait for reset ack\n");
|
||||||
do {
|
do {
|
||||||
ret = smu_cmn_wait_for_response(smu);
|
ret = smu_cmn_wait_for_response(smu);
|
||||||
|
|
|
@ -532,6 +532,15 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
|
||||||
|
|
||||||
mutex_lock(&aux->hw_mutex);
|
mutex_lock(&aux->hw_mutex);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the device attached to the aux bus is powered down then there's
|
||||||
|
* no reason to attempt a transfer. Error out immediately.
|
||||||
|
*/
|
||||||
|
if (aux->powered_down) {
|
||||||
|
ret = -EBUSY;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The specification doesn't give any recommendation on how often to
|
* The specification doesn't give any recommendation on how often to
|
||||||
* retry native transactions. We used to retry 7 times like for
|
* retry native transactions. We used to retry 7 times like for
|
||||||
|
@ -599,6 +608,29 @@ int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_dp_dpcd_probe);
|
EXPORT_SYMBOL(drm_dp_dpcd_probe);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_dp_dpcd_set_powered() - Set whether the DP device is powered
|
||||||
|
* @aux: DisplayPort AUX channel; for convenience it's OK to pass NULL here
|
||||||
|
* and the function will be a no-op.
|
||||||
|
* @powered: true if powered; false if not
|
||||||
|
*
|
||||||
|
* If the endpoint device on the DP AUX bus is known to be powered down
|
||||||
|
* then this function can be called to make future transfers fail immediately
|
||||||
|
* instead of needing to time out.
|
||||||
|
*
|
||||||
|
* If this function is never called then a device defaults to being powered.
|
||||||
|
*/
|
||||||
|
void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered)
|
||||||
|
{
|
||||||
|
if (!aux)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_lock(&aux->hw_mutex);
|
||||||
|
aux->powered_down = !powered;
|
||||||
|
mutex_unlock(&aux->hw_mutex);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
|
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
|
||||||
* @aux: DisplayPort AUX channel (SST or MST)
|
* @aux: DisplayPort AUX channel (SST or MST)
|
||||||
|
@ -1855,6 +1887,9 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
|
||||||
struct drm_dp_aux_msg msg;
|
struct drm_dp_aux_msg msg;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
|
if (aux->powered_down)
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES);
|
dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES);
|
||||||
|
|
||||||
memset(&msg, 0, sizeof(msg));
|
memset(&msg, 0, sizeof(msg));
|
||||||
|
|
|
@ -694,11 +694,17 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
|
||||||
*/
|
*/
|
||||||
list_for_each_entry_from(next, &encoder->bridge_chain,
|
list_for_each_entry_from(next, &encoder->bridge_chain,
|
||||||
chain_node) {
|
chain_node) {
|
||||||
if (next->pre_enable_prev_first) {
|
if (!next->pre_enable_prev_first) {
|
||||||
next = list_prev_entry(next, chain_node);
|
next = list_prev_entry(next, chain_node);
|
||||||
limit = next;
|
limit = next;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (list_is_last(&next->chain_node,
|
||||||
|
&encoder->bridge_chain)) {
|
||||||
|
limit = next;
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Call these bridges in reverse order */
|
/* Call these bridges in reverse order */
|
||||||
|
@ -781,7 +787,7 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
|
||||||
/* Found first bridge that does NOT
|
/* Found first bridge that does NOT
|
||||||
* request prev to be enabled first
|
* request prev to be enabled first
|
||||||
*/
|
*/
|
||||||
limit = list_prev_entry(next, chain_node);
|
limit = next;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -7365,7 +7365,7 @@ static void drm_parse_tiled_block(struct drm_connector *connector,
|
||||||
static bool displayid_is_tiled_block(const struct displayid_iter *iter,
|
static bool displayid_is_tiled_block(const struct displayid_iter *iter,
|
||||||
const struct displayid_block *block)
|
const struct displayid_block *block)
|
||||||
{
|
{
|
||||||
return (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_12 &&
|
return (displayid_version(iter) < DISPLAY_ID_STRUCTURE_VER_20 &&
|
||||||
block->tag == DATA_BLOCK_TILED_DISPLAY) ||
|
block->tag == DATA_BLOCK_TILED_DISPLAY) ||
|
||||||
(displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
|
(displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
|
||||||
block->tag == DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY);
|
block->tag == DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY);
|
||||||
|
|
|
@ -666,7 +666,7 @@ EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
|
||||||
*
|
*
|
||||||
* Return: 0 on success or a negative error code on failure.
|
* Return: 0 on success or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
|
int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
|
||||||
{
|
{
|
||||||
/* Note: Needs updating for non-default PPS or algorithm */
|
/* Note: Needs updating for non-default PPS or algorithm */
|
||||||
u8 tx[2] = { enable << 0, 0 };
|
u8 tx[2] = { enable << 0, 0 };
|
||||||
|
@ -691,8 +691,8 @@ EXPORT_SYMBOL(mipi_dsi_compression_mode);
|
||||||
*
|
*
|
||||||
* Return: 0 on success or a negative error code on failure.
|
* Return: 0 on success or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
|
int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
|
||||||
const struct drm_dsc_picture_parameter_set *pps)
|
const struct drm_dsc_picture_parameter_set *pps)
|
||||||
{
|
{
|
||||||
struct mipi_dsi_msg msg = {
|
struct mipi_dsi_msg msg = {
|
||||||
.channel = dsi->channel,
|
.channel = dsi->channel,
|
||||||
|
|
|
@ -280,7 +280,7 @@ static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state,
|
||||||
struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
|
struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
|
||||||
|
|
||||||
pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
|
pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
|
||||||
pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
|
pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -436,7 +436,7 @@ static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn
|
||||||
intel_backlight_set_pwm_level(old_conn_state, level);
|
intel_backlight_set_pwm_level(old_conn_state, level);
|
||||||
|
|
||||||
panel->backlight.pwm_state.enabled = false;
|
panel->backlight.pwm_state.enabled = false;
|
||||||
pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
|
pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -761,7 +761,7 @@ static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
|
||||||
|
|
||||||
pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
|
pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
|
||||||
panel->backlight.pwm_state.enabled = true;
|
panel->backlight.pwm_state.enabled = true;
|
||||||
pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
|
pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -927,6 +927,12 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
|
||||||
if (IS_DG2(gt->i915)) {
|
if (IS_DG2(gt->i915)) {
|
||||||
u8 first_ccs = __ffs(CCS_MASK(gt));
|
u8 first_ccs = __ffs(CCS_MASK(gt));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Store the number of active cslices before
|
||||||
|
* changing the CCS engine configuration
|
||||||
|
*/
|
||||||
|
gt->ccs.cslices = CCS_MASK(gt);
|
||||||
|
|
||||||
/* Mask off all the CCS engine */
|
/* Mask off all the CCS engine */
|
||||||
info->engine_mask &= ~GENMASK(CCS3, CCS0);
|
info->engine_mask &= ~GENMASK(CCS3, CCS0);
|
||||||
/* Put back in the first CCS engine */
|
/* Put back in the first CCS engine */
|
||||||
|
|
|
@ -19,7 +19,7 @@ unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt)
|
||||||
|
|
||||||
/* Build the value for the fixed CCS load balancing */
|
/* Build the value for the fixed CCS load balancing */
|
||||||
for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
|
for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
|
||||||
if (CCS_MASK(gt) & BIT(cslice))
|
if (gt->ccs.cslices & BIT(cslice))
|
||||||
/*
|
/*
|
||||||
* If available, assign the cslice
|
* If available, assign the cslice
|
||||||
* to the first available engine...
|
* to the first available engine...
|
||||||
|
|
|
@ -207,6 +207,14 @@ struct intel_gt {
|
||||||
[MAX_ENGINE_INSTANCE + 1];
|
[MAX_ENGINE_INSTANCE + 1];
|
||||||
enum intel_submission_method submission_method;
|
enum intel_submission_method submission_method;
|
||||||
|
|
||||||
|
struct {
|
||||||
|
/*
|
||||||
|
* Mask of the non fused CCS slices
|
||||||
|
* to be used for the load balancing
|
||||||
|
*/
|
||||||
|
intel_engine_mask_t cslices;
|
||||||
|
} ccs;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Default address space (either GGTT or ppGTT depending on arch).
|
* Default address space (either GGTT or ppGTT depending on arch).
|
||||||
*
|
*
|
||||||
|
|
|
@ -719,11 +719,9 @@ static int threaded_migrate(struct intel_migrate *migrate,
|
||||||
if (IS_ERR_OR_NULL(tsk))
|
if (IS_ERR_OR_NULL(tsk))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
status = kthread_stop(tsk);
|
status = kthread_stop_put(tsk);
|
||||||
if (status && !err)
|
if (status && !err)
|
||||||
err = status;
|
err = status;
|
||||||
|
|
||||||
put_task_struct(tsk);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(thread);
|
kfree(thread);
|
||||||
|
|
|
@ -29,9 +29,9 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define GUC_KLV_LEN_MIN 1u
|
#define GUC_KLV_LEN_MIN 1u
|
||||||
#define GUC_KLV_0_KEY (0xffff << 16)
|
#define GUC_KLV_0_KEY (0xffffu << 16)
|
||||||
#define GUC_KLV_0_LEN (0xffff << 0)
|
#define GUC_KLV_0_LEN (0xffffu << 0)
|
||||||
#define GUC_KLV_n_VALUE (0xffffffff << 0)
|
#define GUC_KLV_n_VALUE (0xffffffffu << 0)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DOC: GuC Self Config KLVs
|
* DOC: GuC Self Config KLVs
|
||||||
|
|
|
@ -405,7 +405,7 @@ static void init_irq_map(struct intel_gvt_irq *irq)
|
||||||
#define MSI_CAP_DATA(offset) (offset + 8)
|
#define MSI_CAP_DATA(offset) (offset + 8)
|
||||||
#define MSI_CAP_EN 0x1
|
#define MSI_CAP_EN 0x1
|
||||||
|
|
||||||
static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
|
static void inject_virtual_interrupt(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
|
unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
|
||||||
u16 control, data;
|
u16 control, data;
|
||||||
|
@ -417,10 +417,10 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
|
||||||
|
|
||||||
/* Do not generate MSI if MSIEN is disabled */
|
/* Do not generate MSI if MSIEN is disabled */
|
||||||
if (!(control & MSI_CAP_EN))
|
if (!(control & MSI_CAP_EN))
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
|
if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
|
||||||
return -EINVAL;
|
return;
|
||||||
|
|
||||||
trace_inject_msi(vgpu->id, addr, data);
|
trace_inject_msi(vgpu->id, addr, data);
|
||||||
|
|
||||||
|
@ -434,10 +434,9 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
|
||||||
* returned and don't inject interrupt into guest.
|
* returned and don't inject interrupt into guest.
|
||||||
*/
|
*/
|
||||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||||
return -ESRCH;
|
return;
|
||||||
if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
|
if (vgpu->msi_trigger)
|
||||||
return -EFAULT;
|
eventfd_signal(vgpu->msi_trigger, 1);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void propagate_event(struct intel_gvt_irq *irq,
|
static void propagate_event(struct intel_gvt_irq *irq,
|
||||||
|
|
|
@ -449,9 +449,15 @@ struct drm_dp_aux {
|
||||||
* @is_remote: Is this AUX CH actually using sideband messaging.
|
* @is_remote: Is this AUX CH actually using sideband messaging.
|
||||||
*/
|
*/
|
||||||
bool is_remote;
|
bool is_remote;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @powered_down: If true then the remote endpoint is powered down.
|
||||||
|
*/
|
||||||
|
bool powered_down;
|
||||||
};
|
};
|
||||||
|
|
||||||
int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset);
|
int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset);
|
||||||
|
void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered);
|
||||||
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
|
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
|
||||||
void *buffer, size_t size);
|
void *buffer, size_t size);
|
||||||
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
|
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
|
||||||
|
|
|
@ -30,7 +30,6 @@ struct drm_edid;
|
||||||
#define VESA_IEEE_OUI 0x3a0292
|
#define VESA_IEEE_OUI 0x3a0292
|
||||||
|
|
||||||
/* DisplayID Structure versions */
|
/* DisplayID Structure versions */
|
||||||
#define DISPLAY_ID_STRUCTURE_VER_12 0x12
|
|
||||||
#define DISPLAY_ID_STRUCTURE_VER_20 0x20
|
#define DISPLAY_ID_STRUCTURE_VER_20 0x20
|
||||||
|
|
||||||
/* DisplayID Structure v1r2 Data Blocks */
|
/* DisplayID Structure v1r2 Data Blocks */
|
||||||
|
|
|
@ -70,8 +70,8 @@ ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *, u8, const void *, size_t);
|
||||||
int mipi_dsi_dcs_nop(struct mipi_dsi_device *);
|
int mipi_dsi_dcs_nop(struct mipi_dsi_device *);
|
||||||
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *, u16);
|
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *, u16);
|
||||||
bool mipi_dsi_packet_format_is_long(u8);
|
bool mipi_dsi_packet_format_is_long(u8);
|
||||||
ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *, bool);
|
int mipi_dsi_compression_mode(struct mipi_dsi_device *, bool);
|
||||||
ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *,
|
int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *,
|
||||||
const struct drm_dsc_picture_parameter_set *);
|
const struct drm_dsc_picture_parameter_set *);
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue