diff --git a/distrib/miniroot/install.sub b/distrib/miniroot/install.sub index b24b55077..701b0e3de 100644 --- a/distrib/miniroot/install.sub +++ b/distrib/miniroot/install.sub @@ -1,5 +1,5 @@ #!/bin/ksh -# $OpenBSD: install.sub,v 1.1264 2024/05/12 19:47:14 kn Exp $ +# $OpenBSD: install.sub,v 1.1265 2024/06/15 15:05:15 florian Exp $ # # Copyright (c) 1997-2015 Todd Miller, Theo de Raadt, Ken Westerback # Copyright (c) 2015, Robert Peichaer @@ -1664,7 +1664,8 @@ install_files() { _get_sets=$resp # Reorder $_get_sets. - _get_sets=$(for s in $ALLSETS; do isin "$s" $_get_sets && echo $s; done) + _get_sets=$(for s in $ALLSETS; do isin "$s" $_get_sets && echo $s; done; + isin "BUILDINFO" $_files && echo "BUILDINFO") # Note which sets didn't verify ok. _unver=$_get_sets @@ -1808,6 +1809,9 @@ install_files() { tar -zxphf - -C /mnt fi ;; + *BUILDINFO) $_unpriv ftp -D Installing -Vmo - "$_fsrc" \ + > "/mnt/var/db/installed.$_f" + ;; *) # Make a backup of the existing ramdisk kernel in the # bsd.rd only download/verify/install case. $UPGRADE_BSDRD && [[ $_f == bsd.rd* ]] && diff --git a/regress/lib/libssl/openssl-ruby/Makefile b/regress/lib/libssl/openssl-ruby/Makefile index 40b6a374d..bf29045bc 100644 --- a/regress/lib/libssl/openssl-ruby/Makefile +++ b/regress/lib/libssl/openssl-ruby/Makefile @@ -1,7 +1,11 @@ -# $OpenBSD: Makefile,v 1.11 2023/09/24 07:01:40 tb Exp $ +# $OpenBSD: Makefile,v 1.12 2024/06/15 08:39:47 tb Exp $ OPENSSL_RUBY_TESTS = /usr/local/share/openssl-ruby-tests +.if exists(/usr/local/bin/ruby32) RUBY_BINREV = 32 +.else +RUBY_BINREV = 33 +.endif RUBY = ruby${RUBY_BINREV} # We work in a subdirectory of obj/ since extconf.rb generates a Makefile whose diff --git a/regress/usr.bin/ssh/penalty.sh b/regress/usr.bin/ssh/penalty.sh index 4308e0b82..9556b31f7 100644 --- a/regress/usr.bin/ssh/penalty.sh +++ b/regress/usr.bin/ssh/penalty.sh @@ -14,7 +14,7 @@ conf() { start_sshd } -conf "authfail:30s min:50s max:200s" +conf "authfail:300s min:350s max:900s" verbose "test connect" ${SSH} -F $OBJ/ssh_config somehost true || fatal "basic connect failed" diff --git a/sys/arch/amd64/amd64/intr.c b/sys/arch/amd64/amd64/intr.c index efdfbc96c..96f5d4ee9 100644 --- a/sys/arch/amd64/amd64/intr.c +++ b/sys/arch/amd64/amd64/intr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: intr.c,v 1.59 2024/06/07 06:26:23 jsg Exp $ */ +/* $OpenBSD: intr.c,v 1.60 2024/06/15 18:01:44 kettenis Exp $ */ /* $NetBSD: intr.c,v 1.3 2003/03/03 22:16:20 fvdl Exp $ */ /* @@ -73,6 +73,8 @@ struct pic softintr_pic = { NULL, }; +int intr_suspended; + /* * Fill in default interrupt table (in case of spurious interrupt * during configuration of kernel), setup interrupt control unit @@ -524,7 +526,6 @@ intr_disestablish(struct intrhand *ih) int intr_handler(struct intrframe *frame, struct intrhand *ih) { - extern int cpu_suspended; struct cpu_info *ci = curcpu(); int floor; int rc; @@ -536,7 +537,7 @@ intr_handler(struct intrframe *frame, struct intrhand *ih) * We may not be able to mask MSIs, so block non-wakeup * interrupts while we're suspended. */ - if (cpu_suspended && (ih->ih_flags & IPL_WAKEUP) == 0) + if (intr_suspended && (ih->ih_flags & IPL_WAKEUP) == 0) return 0; #ifdef MULTIPROCESSOR @@ -723,6 +724,8 @@ intr_enable_wakeup(void) if (pic->pic_hwmask) pic->pic_hwmask(pic, pin); } + + intr_suspended = 1; } void @@ -732,6 +735,8 @@ intr_disable_wakeup(void) struct pic *pic; int irq, pin; + intr_suspended = 0; + for (irq = 0; irq < MAX_INTR_SOURCES; irq++) { if (ci->ci_isources[irq] == NULL) continue; diff --git a/sys/dev/ic/ufshci.c b/sys/dev/ic/ufshci.c index 3193aee80..4ae343819 100644 --- a/sys/dev/ic/ufshci.c +++ b/sys/dev/ic/ufshci.c @@ -1,4 +1,4 @@ -/* $OpenBSD: ufshci.c,v 1.37 2024/06/14 20:52:07 mglocker Exp $ */ +/* $OpenBSD: ufshci.c,v 1.38 2024/06/15 18:26:25 mglocker Exp $ */ /* * Copyright (c) 2022 Marcus Glocker @@ -263,8 +263,8 @@ ufshci_attach(struct ufshci_softc *sc) /* Attach to SCSI layer */ saa.saa_adapter = &ufshci_switch; saa.saa_adapter_softc = sc; - saa.saa_adapter_buswidth = 2; /* XXX: What's the right value? */ - saa.saa_luns = 1; /* XXX: Should we use ufshci_utr_cmd_lun() */ + saa.saa_adapter_buswidth = UFSHCI_TARGETS_MAX + 1; + saa.saa_luns = 1; saa.saa_adapter_target = 0; saa.saa_openings = sc->sc_nutrs; saa.saa_pool = &sc->sc_iopool; diff --git a/sys/dev/ic/ufshcireg.h b/sys/dev/ic/ufshcireg.h index b96bef52e..47e05ba6c 100644 --- a/sys/dev/ic/ufshcireg.h +++ b/sys/dev/ic/ufshcireg.h @@ -1,4 +1,4 @@ -/* $OpenBSD: ufshcireg.h,v 1.13 2024/05/24 20:34:06 mglocker Exp $ */ +/* $OpenBSD: ufshcireg.h,v 1.14 2024/06/15 18:26:25 mglocker Exp $ */ /* * Copyright (c) 2022 Marcus Glocker @@ -25,6 +25,7 @@ #define UFSHCI_INTR_AGGR_COUNT_MAX 31 #define UFSHCI_SLOTS_MIN 1 #define UFSHCI_SLOTS_MAX 32 +#define UFSHCI_TARGETS_MAX 1 #define UFSHCI_LBS 4096 /* UFS Logical Block Size: For UFS minimum size shall be 4096 bytes */ diff --git a/sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 6044d3fc8..6bbd18cb7 100644 --- a/sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -213,7 +213,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > kfd_mem_limit.max_ttm_mem_limit) || (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed > - vram_size - reserved_for_pt)) { + vram_size - reserved_for_pt - atomic64_read(&adev->vram_pin_size))) { ret = -ENOMEM; goto release; } diff --git a/sys/dev/pci/drm/amd/amdgpu/amdgpu_devlist.h b/sys/dev/pci/drm/amd/amdgpu/amdgpu_devlist.h index 1fa8e5bed..f710dd397 100644 --- a/sys/dev/pci/drm/amd/amdgpu/amdgpu_devlist.h +++ b/sys/dev/pci/drm/amd/amdgpu/amdgpu_devlist.h @@ -235,5 +235,8 @@ static const struct pci_matchid amdgpu_devices[] = { {0x1002, 0x1901 }, /* Radeon 740M */ /* GC 11.5.0, DCN 3.5.0, APU, linux >= 6.7 */ - /* GC 11.5.1, DCN 3.5.0, APU, linux >= 6.9 */ + /* GC 11.5.1, DCN 3.5.1, APU, linux >= 6.9 */ + + /* GC 12.0.0, DCN 4.0.1, dGPU, linux ?, amd-staging-drm-next */ + /* GC 12.0.1, DCN 4.0.1, dGPU, linux ?, amd-staging-drm-next */ }; diff --git a/sys/dev/pci/drm/amd/amdgpu/amdgpu_mes.c b/sys/dev/pci/drm/amd/amdgpu/amdgpu_mes.c index a88eff2f5..3cb010f1f 100644 --- a/sys/dev/pci/drm/amd/amdgpu/amdgpu_mes.c +++ b/sys/dev/pci/drm/amd/amdgpu/amdgpu_mes.c @@ -1098,6 +1098,7 @@ void amdgpu_mes_remove_ring(struct amdgpu_device *adev, return; amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id); + del_timer_sync(&ring->fence_drv.fallback_timer); amdgpu_ring_fini(ring); kfree(ring); } diff --git a/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c b/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c index f4f80f6ec..ffee2d9a9 100644 --- a/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c +++ b/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c @@ -615,6 +615,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev, else amdgpu_bo_placement_from_domain(bo, bp->domain); if (bp->type == ttm_bo_type_kernel) + bo->tbo.priority = 2; + else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE)) bo->tbo.priority = 1; if (!bp->destroy) diff --git a/sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c b/sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c index a0bf45202..744fd26c2 100644 --- a/sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c +++ b/sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c @@ -9157,7 +9157,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { 7 + /* PIPELINE_SYNC */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + - 2 + /* VM_FLUSH */ + 4 + /* VM_FLUSH */ 8 + /* FENCE for VM_FLUSH */ 20 + /* GDS switch */ 4 + /* double SWITCH_BUFFER, @@ -9248,7 +9248,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { 7 + /* gfx_v10_0_ring_emit_pipeline_sync */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + - 2 + /* gfx_v10_0_ring_emit_vm_flush */ 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */ .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */ .emit_ib = gfx_v10_0_ring_emit_ib_compute, diff --git a/sys/dev/pci/drm/amd/amdgpu/gfx_v11_0.c b/sys/dev/pci/drm/amd/amdgpu/gfx_v11_0.c index f122c43bd..01bac8d94 100644 --- a/sys/dev/pci/drm/amd/amdgpu/gfx_v11_0.c +++ b/sys/dev/pci/drm/amd/amdgpu/gfx_v11_0.c @@ -6102,7 +6102,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { 7 + /* PIPELINE_SYNC */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + - 2 + /* VM_FLUSH */ + 4 + /* VM_FLUSH */ 8 + /* FENCE for VM_FLUSH */ 20 + /* GDS switch */ 5 + /* COND_EXEC */ @@ -6187,7 +6187,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + - 2 + /* gfx_v11_0_ring_emit_vm_flush */ 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */ .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ .emit_ib = gfx_v11_0_ring_emit_ib_compute, diff --git a/sys/dev/pci/drm/amd/amdgpu/gfx_v9_0.c b/sys/dev/pci/drm/amd/amdgpu/gfx_v9_0.c index 3878529d5..68b32849a 100644 --- a/sys/dev/pci/drm/amd/amdgpu/gfx_v9_0.c +++ b/sys/dev/pci/drm/amd/amdgpu/gfx_v9_0.c @@ -6988,7 +6988,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { 7 + /* gfx_v9_0_ring_emit_pipeline_sync */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + - 2 + /* gfx_v9_0_ring_emit_vm_flush */ 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */ 7 + /* gfx_v9_0_emit_mem_sync */ 5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */ @@ -7026,7 +7025,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { 7 + /* gfx_v9_0_ring_emit_pipeline_sync */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + - 2 + /* gfx_v9_0_ring_emit_vm_flush */ 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */ .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */ .emit_fence = gfx_v9_0_ring_emit_fence_kiq, diff --git a/sys/dev/pci/drm/amd/amdgpu/gfx_v9_4_3.c b/sys/dev/pci/drm/amd/amdgpu/gfx_v9_4_3.c index 7691d173c..d06192829 100644 --- a/sys/dev/pci/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/sys/dev/pci/drm/amd/amdgpu/gfx_v9_4_3.c @@ -425,16 +425,16 @@ out: static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev) { - const char *chip_name; + char ucode_prefix[15]; int r; - chip_name = "gc_9_4_3"; + amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - r = gfx_v9_4_3_init_rlc_microcode(adev, chip_name); + r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix); if (r) return r; - r = gfx_v9_4_3_init_cp_compute_microcode(adev, chip_name); + r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix); if (r) return r; diff --git a/sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c b/sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c index 659313648..3263b5fa1 100644 --- a/sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c +++ b/sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c @@ -516,10 +516,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, start = prange->start << PAGE_SHIFT; end = (prange->last + 1) << PAGE_SHIFT; + r = amdgpu_amdkfd_reserve_mem_limit(node->adev, + prange->npages * PAGE_SIZE, + KFD_IOC_ALLOC_MEM_FLAGS_VRAM, + node->xcp ? node->xcp->id : 0); + if (r) { + dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r); + return -ENOSPC; + } + r = svm_range_vram_node_new(node, prange, true); if (r) { dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r); - return r; + goto out; } ttm_res_offset = prange->offset << PAGE_SHIFT; @@ -549,6 +558,11 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, svm_range_vram_node_free(prange); } +out: + amdgpu_amdkfd_unreserve_mem_limit(node->adev, + prange->npages * PAGE_SIZE, + KFD_IOC_ALLOC_MEM_FLAGS_VRAM, + node->xcp ? node->xcp->id : 0); return r < 0 ? r : 0; } diff --git a/sys/dev/pci/drm/amd/amdkfd/kfd_process.c b/sys/dev/pci/drm/amd/amdkfd/kfd_process.c index 7a1a57410..d98e45aec 100644 --- a/sys/dev/pci/drm/amd/amdkfd/kfd_process.c +++ b/sys/dev/pci/drm/amd/amdkfd/kfd_process.c @@ -828,6 +828,14 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) if (process) { pr_debug("Process already found\n"); } else { + /* If the process just called exec(3), it is possible that the + * cleanup of the kfd_process (following the release of the mm + * of the old process image) is still in the cleanup work queue. + * Make sure to drain any job before trying to recreate any + * resource for this process. + */ + flush_workqueue(kfd_process_wq); + process = create_process(thread); if (IS_ERR(process)) goto out; diff --git a/sys/dev/pci/drm/amd/amdkfd/kfd_svm.c b/sys/dev/pci/drm/amd/amdkfd/kfd_svm.c index 87e9ca65e..ce76d4554 100644 --- a/sys/dev/pci/drm/amd/amdkfd/kfd_svm.c +++ b/sys/dev/pci/drm/amd/amdkfd/kfd_svm.c @@ -3416,7 +3416,7 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange, r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH); *migrated = !r; - return r; + return 0; } int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence) diff --git a/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c index ff3730cd7..ab5cdb0ab 100644 --- a/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2960,6 +2960,7 @@ static int dm_resume(void *handle) dc_stream_release(dm_new_crtc_state->stream); dm_new_crtc_state->stream = NULL; } + dm_new_crtc_state->base.color_mgmt_changed = true; } for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { diff --git a/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 10dd4cd6f..2104511f3 100644 --- a/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -606,6 +606,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, &connector->base, dev->mode_config.tile_property, 0); + connector->colorspace_property = master->base.colorspace_property; + if (connector->colorspace_property) + drm_connector_attach_colorspace_property(connector); drm_connector_set_path_property(connector, pathprop); diff --git a/sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index 8776055bb..d4d3f58a6 100644 --- a/sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -145,6 +145,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, */ clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; if (safe_to_lower) { + if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) { + dcn315_smu_set_dtbclk(clk_mgr, false); + clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; + } /* check that we're not already in lower */ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { display_count = dcn315_get_active_display_cnt_wa(dc, context); @@ -160,6 +164,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, } } } else { + if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) { + dcn315_smu_set_dtbclk(clk_mgr, true); + clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; + } /* check that we're not already in D0 */ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { union display_idle_optimization_u idle_info = { 0 }; diff --git a/sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index e9345f655..2428a4763 100644 --- a/sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -547,8 +547,12 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, * since we calculate mode support based on softmax being the max UCLK * frequency. */ - dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, - dc->clk_mgr->bw_params->dc_mode_softmax_memclk); + if (dc->debug.disable_dc_mode_overwrite) { + dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz); + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz); + } else + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, + dc->clk_mgr->bw_params->dc_mode_softmax_memclk); } else { dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz); } @@ -581,8 +585,13 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, /* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */ if (clk_mgr_base->clks.p_state_change_support && (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) && - !dc->work_arounds.clock_update_disable_mask.uclk) + !dc->work_arounds.clock_update_disable_mask.uclk) { + if (dc->clk_mgr->dc_mode_softmax_enabled && dc->debug.disable_dc_mode_overwrite) + dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, + max((int)dc->clk_mgr->bw_params->dc_mode_softmax_memclk, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz))); + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)); + } if (clk_mgr_base->clks.num_ways != new_clocks->num_ways && clk_mgr_base->clks.num_ways > new_clocks->num_ways) { diff --git a/sys/dev/pci/drm/amd/display/dc/core/amdgpu_dc.c b/sys/dev/pci/drm/amd/display/dc/core/amdgpu_dc.c index 46b10ff8f..72db370e2 100644 --- a/sys/dev/pci/drm/amd/display/dc/core/amdgpu_dc.c +++ b/sys/dev/pci/drm/amd/display/dc/core/amdgpu_dc.c @@ -1710,6 +1710,9 @@ bool dc_validate_boot_timing(const struct dc *dc, return false; } + if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) + return false; + if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) { DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); return false; diff --git a/sys/dev/pci/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/sys/dev/pci/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 3538973bd..c0372aa4e 100644 --- a/sys/dev/pci/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/sys/dev/pci/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -382,6 +382,11 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx, i += increment) { if (j == hw_points - 1) break; + if (i >= TRANSFER_FUNC_POINTS) { + DC_LOG_ERROR("Index out of bounds: i=%d, TRANSFER_FUNC_POINTS=%d\n", + i, TRANSFER_FUNC_POINTS); + return false; + } rgb_resulted[j].red = output_tf->tf_pts.red[i]; rgb_resulted[j].green = output_tf->tf_pts.green[i]; rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; diff --git a/sys/dev/pci/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/sys/dev/pci/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c index deb6d162a..7307b7b8d 100644 --- a/sys/dev/pci/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c +++ b/sys/dev/pci/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c @@ -291,6 +291,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = { .do_urgent_latency_adjustment = false, .urgent_latency_adjustment_fabric_clock_component_us = 0, .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, + .dispclk_dppclk_vco_speed_mhz = 2400.0, .num_chans = 4, .dummy_pstate_latency_us = 10.0 }; @@ -438,6 +439,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = { .do_urgent_latency_adjustment = false, .urgent_latency_adjustment_fabric_clock_component_us = 0, .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, + .dispclk_dppclk_vco_speed_mhz = 2500.0, }; void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes, diff --git a/sys/dev/pci/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/sys/dev/pci/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c index 3db9489c0..4c6a6d6c1 100644 --- a/sys/dev/pci/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c +++ b/sys/dev/pci/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c @@ -270,7 +270,7 @@ static void set_usb4_req_bw_req(struct dc_link *link, int req_bw) /* Error check whether requested and allocated are equal */ req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); - if (req_bw == link->dpia_bw_alloc_config.allocated_bw) { + if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) { DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n", __func__, link->link_index); } @@ -341,6 +341,14 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link) ret = true; init_usb4_bw_struct(link); link->dpia_bw_alloc_config.bw_alloc_enabled = true; + + /* + * During DP tunnel creation, CM preallocates BW and reduces estimated BW of other + * DPIA. CM release preallocation only when allocation is complete. Do zero alloc + * to make the CM to release preallocation and update estimated BW correctly for + * all DPIAs per host router + */ + link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0); } } diff --git a/sys/dev/pci/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/sys/dev/pci/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 37902e918..ca4b8f968 100644 --- a/sys/dev/pci/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/sys/dev/pci/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -2041,6 +2041,20 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table return sizeof(struct gpu_metrics_v1_3); } +static void smu_v13_0_6_restore_pci_config(struct smu_context *smu) +{ + STUB(); +#if notyet + struct amdgpu_device *adev = smu->adev; + int i; + + for (i = 0; i < 16; i++) + pci_write_config_dword(adev->pdev, i * 4, + adev->pdev->saved_config_space[i]); + pci_restore_msi_state(adev->pdev); +#endif +} + static int smu_v13_0_6_mode2_reset(struct smu_context *smu) { int ret = 0, index; @@ -2062,6 +2076,20 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu) /* Restore the config space saved during init */ amdgpu_device_load_pci_state(adev->pdev); + /* Certain platforms have switches which assign virtual BAR values to + * devices. OS uses the virtual BAR values and device behind the switch + * is assgined another BAR value. When device's config space registers + * are queried, switch returns the virtual BAR values. When mode-2 reset + * is performed, switch is unaware of it, and will continue to return + * the same virtual values to the OS.This affects + * pci_restore_config_space() API as it doesn't write the value saved if + * the current value read from config space is the same as what is + * saved. As a workaround, make sure the config space is restored + * always. + */ + if (!(adev->flags & AMD_IS_APU)) + smu_v13_0_6_restore_pci_config(smu); + dev_dbg(smu->adev->dev, "wait for reset ack\n"); do { ret = smu_cmn_wait_for_response(smu); diff --git a/sys/dev/pci/drm/display/drm_dp_helper.c b/sys/dev/pci/drm/display/drm_dp_helper.c index cdcb06279..c2926c2eb 100644 --- a/sys/dev/pci/drm/display/drm_dp_helper.c +++ b/sys/dev/pci/drm/display/drm_dp_helper.c @@ -532,6 +532,15 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, mutex_lock(&aux->hw_mutex); + /* + * If the device attached to the aux bus is powered down then there's + * no reason to attempt a transfer. Error out immediately. + */ + if (aux->powered_down) { + ret = -EBUSY; + goto unlock; + } + /* * The specification doesn't give any recommendation on how often to * retry native transactions. We used to retry 7 times like for @@ -599,6 +608,29 @@ int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset) } EXPORT_SYMBOL(drm_dp_dpcd_probe); +/** + * drm_dp_dpcd_set_powered() - Set whether the DP device is powered + * @aux: DisplayPort AUX channel; for convenience it's OK to pass NULL here + * and the function will be a no-op. + * @powered: true if powered; false if not + * + * If the endpoint device on the DP AUX bus is known to be powered down + * then this function can be called to make future transfers fail immediately + * instead of needing to time out. + * + * If this function is never called then a device defaults to being powered. + */ +void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered) +{ + if (!aux) + return; + + mutex_lock(&aux->hw_mutex); + aux->powered_down = !powered; + mutex_unlock(&aux->hw_mutex); +} +EXPORT_SYMBOL(drm_dp_dpcd_set_powered); + /** * drm_dp_dpcd_read() - read a series of bytes from the DPCD * @aux: DisplayPort AUX channel (SST or MST) @@ -1855,6 +1887,9 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, struct drm_dp_aux_msg msg; int err = 0; + if (aux->powered_down) + return -EBUSY; + dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES); memset(&msg, 0, sizeof(msg)); diff --git a/sys/dev/pci/drm/drm_bridge.c b/sys/dev/pci/drm/drm_bridge.c index ce4906209..e02c2923e 100644 --- a/sys/dev/pci/drm/drm_bridge.c +++ b/sys/dev/pci/drm/drm_bridge.c @@ -694,11 +694,17 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge, */ list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) { - if (next->pre_enable_prev_first) { + if (!next->pre_enable_prev_first) { next = list_prev_entry(next, chain_node); limit = next; break; } + + if (list_is_last(&next->chain_node, + &encoder->bridge_chain)) { + limit = next; + break; + } } /* Call these bridges in reverse order */ @@ -781,7 +787,7 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge, /* Found first bridge that does NOT * request prev to be enabled first */ - limit = list_prev_entry(next, chain_node); + limit = next; break; } } diff --git a/sys/dev/pci/drm/drm_edid.c b/sys/dev/pci/drm/drm_edid.c index ba92bd045..d19497022 100644 --- a/sys/dev/pci/drm/drm_edid.c +++ b/sys/dev/pci/drm/drm_edid.c @@ -7365,7 +7365,7 @@ static void drm_parse_tiled_block(struct drm_connector *connector, static bool displayid_is_tiled_block(const struct displayid_iter *iter, const struct displayid_block *block) { - return (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_12 && + return (displayid_version(iter) < DISPLAY_ID_STRUCTURE_VER_20 && block->tag == DATA_BLOCK_TILED_DISPLAY) || (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_20 && block->tag == DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY); diff --git a/sys/dev/pci/drm/drm_mipi_dsi.c b/sys/dev/pci/drm/drm_mipi_dsi.c index 97fd47b36..2ea9c6ddc 100644 --- a/sys/dev/pci/drm/drm_mipi_dsi.c +++ b/sys/dev/pci/drm/drm_mipi_dsi.c @@ -666,7 +666,7 @@ EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size); * * Return: 0 on success or a negative error code on failure. */ -ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable) +int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable) { /* Note: Needs updating for non-default PPS or algorithm */ u8 tx[2] = { enable << 0, 0 }; @@ -691,8 +691,8 @@ EXPORT_SYMBOL(mipi_dsi_compression_mode); * * Return: 0 on success or a negative error code on failure. */ -ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi, - const struct drm_dsc_picture_parameter_set *pps) +int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi, + const struct drm_dsc_picture_parameter_set *pps) { struct mipi_dsi_msg msg = { .channel = dsi->channel, diff --git a/sys/dev/pci/drm/i915/display/intel_backlight.c b/sys/dev/pci/drm/i915/display/intel_backlight.c index bb9f6faf7..cd65ded1f 100644 --- a/sys/dev/pci/drm/i915/display/intel_backlight.c +++ b/sys/dev/pci/drm/i915/display/intel_backlight.c @@ -280,7 +280,7 @@ static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100); - pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); + pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state); #endif } @@ -436,7 +436,7 @@ static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn intel_backlight_set_pwm_level(old_conn_state, level); panel->backlight.pwm_state.enabled = false; - pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); + pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state); #endif } @@ -761,7 +761,7 @@ static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state, pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100); panel->backlight.pwm_state.enabled = true; - pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); + pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state); #endif } diff --git a/sys/dev/pci/drm/i915/gt/intel_engine_cs.c b/sys/dev/pci/drm/i915/gt/intel_engine_cs.c index 32506b6b2..1f1973fba 100644 --- a/sys/dev/pci/drm/i915/gt/intel_engine_cs.c +++ b/sys/dev/pci/drm/i915/gt/intel_engine_cs.c @@ -927,6 +927,12 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) if (IS_DG2(gt->i915)) { u8 first_ccs = __ffs(CCS_MASK(gt)); + /* + * Store the number of active cslices before + * changing the CCS engine configuration + */ + gt->ccs.cslices = CCS_MASK(gt); + /* Mask off all the CCS engine */ info->engine_mask &= ~GENMASK(CCS3, CCS0); /* Put back in the first CCS engine */ diff --git a/sys/dev/pci/drm/i915/gt/intel_gt_ccs_mode.c b/sys/dev/pci/drm/i915/gt/intel_gt_ccs_mode.c index 99b71bb7d..3c62a44e9 100644 --- a/sys/dev/pci/drm/i915/gt/intel_gt_ccs_mode.c +++ b/sys/dev/pci/drm/i915/gt/intel_gt_ccs_mode.c @@ -19,7 +19,7 @@ unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt) /* Build the value for the fixed CCS load balancing */ for (cslice = 0; cslice < I915_MAX_CCS; cslice++) { - if (CCS_MASK(gt) & BIT(cslice)) + if (gt->ccs.cslices & BIT(cslice)) /* * If available, assign the cslice * to the first available engine... diff --git a/sys/dev/pci/drm/i915/gt/intel_gt_types.h b/sys/dev/pci/drm/i915/gt/intel_gt_types.h index b7c2b22e4..7798398d3 100644 --- a/sys/dev/pci/drm/i915/gt/intel_gt_types.h +++ b/sys/dev/pci/drm/i915/gt/intel_gt_types.h @@ -207,6 +207,14 @@ struct intel_gt { [MAX_ENGINE_INSTANCE + 1]; enum intel_submission_method submission_method; + struct { + /* + * Mask of the non fused CCS slices + * to be used for the load balancing + */ + intel_engine_mask_t cslices; + } ccs; + /* * Default address space (either GGTT or ppGTT depending on arch). * diff --git a/sys/dev/pci/drm/i915/gt/selftest_migrate.c b/sys/dev/pci/drm/i915/gt/selftest_migrate.c index 713e018c7..31d206c0b 100644 --- a/sys/dev/pci/drm/i915/gt/selftest_migrate.c +++ b/sys/dev/pci/drm/i915/gt/selftest_migrate.c @@ -719,11 +719,9 @@ static int threaded_migrate(struct intel_migrate *migrate, if (IS_ERR_OR_NULL(tsk)) continue; - status = kthread_stop(tsk); + status = kthread_stop_put(tsk); if (status && !err) err = status; - - put_task_struct(tsk); } kfree(thread); diff --git a/sys/dev/pci/drm/i915/gt/uc/abi/guc_klvs_abi.h b/sys/dev/pci/drm/i915/gt/uc/abi/guc_klvs_abi.h index 58012edd4..4f4f53c42 100644 --- a/sys/dev/pci/drm/i915/gt/uc/abi/guc_klvs_abi.h +++ b/sys/dev/pci/drm/i915/gt/uc/abi/guc_klvs_abi.h @@ -29,9 +29,9 @@ */ #define GUC_KLV_LEN_MIN 1u -#define GUC_KLV_0_KEY (0xffff << 16) -#define GUC_KLV_0_LEN (0xffff << 0) -#define GUC_KLV_n_VALUE (0xffffffff << 0) +#define GUC_KLV_0_KEY (0xffffu << 16) +#define GUC_KLV_0_LEN (0xffffu << 0) +#define GUC_KLV_n_VALUE (0xffffffffu << 0) /** * DOC: GuC Self Config KLVs diff --git a/sys/dev/pci/drm/i915/gvt/interrupt.c b/sys/dev/pci/drm/i915/gvt/interrupt.c index 68eca023b..80301472a 100644 --- a/sys/dev/pci/drm/i915/gvt/interrupt.c +++ b/sys/dev/pci/drm/i915/gvt/interrupt.c @@ -405,7 +405,7 @@ static void init_irq_map(struct intel_gvt_irq *irq) #define MSI_CAP_DATA(offset) (offset + 8) #define MSI_CAP_EN 0x1 -static int inject_virtual_interrupt(struct intel_vgpu *vgpu) +static void inject_virtual_interrupt(struct intel_vgpu *vgpu) { unsigned long offset = vgpu->gvt->device_info.msi_cap_offset; u16 control, data; @@ -417,10 +417,10 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu) /* Do not generate MSI if MSIEN is disabled */ if (!(control & MSI_CAP_EN)) - return 0; + return; if (WARN(control & GENMASK(15, 1), "only support one MSI format\n")) - return -EINVAL; + return; trace_inject_msi(vgpu->id, addr, data); @@ -434,10 +434,9 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu) * returned and don't inject interrupt into guest. */ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) - return -ESRCH; - if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1) - return -EFAULT; - return 0; + return; + if (vgpu->msi_trigger) + eventfd_signal(vgpu->msi_trigger, 1); } static void propagate_event(struct intel_gvt_irq *irq, diff --git a/sys/dev/pci/drm/include/drm/display/drm_dp_helper.h b/sys/dev/pci/drm/include/drm/display/drm_dp_helper.h index 6aa750e3c..133d996b4 100644 --- a/sys/dev/pci/drm/include/drm/display/drm_dp_helper.h +++ b/sys/dev/pci/drm/include/drm/display/drm_dp_helper.h @@ -449,9 +449,15 @@ struct drm_dp_aux { * @is_remote: Is this AUX CH actually using sideband messaging. */ bool is_remote; + + /** + * @powered_down: If true then the remote endpoint is powered down. + */ + bool powered_down; }; int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset); +void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered); ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, void *buffer, size_t size); ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, diff --git a/sys/dev/pci/drm/include/drm/drm_displayid.h b/sys/dev/pci/drm/include/drm/drm_displayid.h index 566497eeb..bc1f6b378 100644 --- a/sys/dev/pci/drm/include/drm/drm_displayid.h +++ b/sys/dev/pci/drm/include/drm/drm_displayid.h @@ -30,7 +30,6 @@ struct drm_edid; #define VESA_IEEE_OUI 0x3a0292 /* DisplayID Structure versions */ -#define DISPLAY_ID_STRUCTURE_VER_12 0x12 #define DISPLAY_ID_STRUCTURE_VER_20 0x20 /* DisplayID Structure v1r2 Data Blocks */ diff --git a/sys/dev/pci/drm/include/drm/drm_mipi_dsi.h b/sys/dev/pci/drm/include/drm/drm_mipi_dsi.h index ca58202ec..a81d1bbde 100644 --- a/sys/dev/pci/drm/include/drm/drm_mipi_dsi.h +++ b/sys/dev/pci/drm/include/drm/drm_mipi_dsi.h @@ -70,8 +70,8 @@ ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *, u8, const void *, size_t); int mipi_dsi_dcs_nop(struct mipi_dsi_device *); int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *, u16); bool mipi_dsi_packet_format_is_long(u8); -ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *, bool); -ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *, +int mipi_dsi_compression_mode(struct mipi_dsi_device *, bool); +int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *, const struct drm_dsc_picture_parameter_set *); static inline int