sync code with last fixes and improvements from OpenBSD

This commit is contained in:
purplerain 2023-07-30 17:58:45 +00:00
parent f960599e67
commit 691f97cc10
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
215 changed files with 1520 additions and 11518 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: copy.S,v 1.18 2023/01/31 15:18:54 deraadt Exp $ */
/* $OpenBSD: copy.S,v 1.19 2023/07/28 06:18:35 guenther Exp $ */
/* $NetBSD: copy.S,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */
/*
@ -299,11 +299,5 @@ copystr_return:
ret
lfence
.section .rodata
.globl _stac
_stac:
stac
.globl _clac
_clac:
clac
CODEPATCH_CODE(_stac, stac)
CODEPATCH_CODE(_clac, clac)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: cpu.c,v 1.173 2023/07/25 04:42:00 deraadt Exp $ */
/* $OpenBSD: cpu.c,v 1.174 2023/07/28 06:36:16 guenther Exp $ */
/* $NetBSD: cpu.c,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */
/*-
@ -222,7 +222,7 @@ replacemeltdown(void)
/* enable reuse of PCID for U-K page tables */
if (pmap_use_pcid) {
extern long _pcid_set_reuse;
DPRINTF("%s: codepatching PCID use", __func__);
DPRINTF("%s: codepatching PCID use\n", __func__);
codepatch_replace(CPTAG_PCID_SET_REUSE,
&_pcid_set_reuse, PCID_SET_REUSE_SIZE);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: locore.S,v 1.138 2023/07/27 00:28:24 guenther Exp $ */
/* $OpenBSD: locore.S,v 1.139 2023/07/28 06:18:35 guenther Exp $ */
/* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */
/*
@ -1084,30 +1084,16 @@ NENTRY(xsetbv_resume)
lfence
END(xsetbv_user)
.section .rodata
.globl _xrstor
_xrstor:
xrstor64 (%rdi)
.globl _xrstors
_xrstors:
xrstors64 (%rdi)
.globl _xsave
_xsave:
xsave64 (%rdi)
.globl _xsaves
_xsaves:
xsaves64 (%rdi)
.globl _xsaveopt
_xsaveopt:
xsaveopt64 (%rdi)
.globl _pcid_set_reuse
_pcid_set_reuse:
orl $(CR3_REUSE_PCID >> 32),CPUVAR(USER_CR3 + 4)
CODEPATCH_CODE(_xrstor, xrstor64 (%rdi))
CODEPATCH_CODE(_xrstors, xrstors64 (%rdi))
CODEPATCH_CODE(_xsave, xsave64 (%rdi))
CODEPATCH_CODE(_xsaves, xsaves64 (%rdi))
CODEPATCH_CODE(_xsaveopt, xsaveopt64 (%rdi))
CODEPATCH_CODE(_pcid_set_reuse,
orl $(CR3_REUSE_PCID >> 32),CPUVAR(USER_CR3 + 4))
CODEPATCH_CODE_LEN(_jmprax, jmp *%rax; int3)
CODEPATCH_CODE_LEN(_jmpr11, jmp *%r11; int3)
CODEPATCH_CODE_LEN(_jmpr13, jmp *%r13; int3)
ENTRY(pagezero)
RETGUARD_SETUP(pagezero, r11)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: codepatch.h,v 1.15 2023/07/10 03:32:10 guenther Exp $ */
/* $OpenBSD: codepatch.h,v 1.16 2023/07/28 06:18:35 guenther Exp $ */
/*
* Copyright (c) 2014-2015 Stefan Fritsch <sf@sfritsch.de>
*
@ -97,4 +97,20 @@ void codepatch_disable(void);
.byte 0x0f, 0x1f, 0x40, 0x00 ;\
CODEPATCH_END2(997, CPTAG_PCID_SET_REUSE)
/* Would be neat if these could be in something like .cptext */
#define CODEPATCH_CODE(symbol, instructions...) \
.section .rodata; \
.globl symbol; \
symbol: instructions; \
.size symbol, . - symbol
/* provide a (short) variable with the length of the patch */
#define CODEPATCH_CODE_LEN(symbol, instructions...) \
CODEPATCH_CODE(symbol, instructions); \
996: .globl symbol##_len; \
.align 2; \
symbol##_len: \
.short 996b - symbol; \
.size symbol##_len, 2
#endif /* _MACHINE_CODEPATCH_H_ */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: specialreg.h,v 1.106 2023/07/27 01:51:35 guenther Exp $ */
/* $OpenBSD: specialreg.h,v 1.107 2023/07/27 16:33:56 guenther Exp $ */
/* $NetBSD: specialreg.h,v 1.1 2003/04/26 18:39:48 fvdl Exp $ */
/* $NetBSD: x86/specialreg.h,v 1.2 2003/04/25 21:54:30 fvdl Exp $ */
@ -237,7 +237,7 @@
#define SEFF0ECX_AVX512VBMI 0x00000002 /* AVX-512 vector bit inst */
#define SEFF0ECX_UMIP 0x00000004 /* UMIP support */
#define SEFF0ECX_PKU 0x00000008 /* Page prot keys for user mode */
#define SEFF0ECX_WAITPKG 0x00000010 /* UMONITOR/UMWAIT/TPAUSE insns */
#define SEFF0ECX_WAITPKG 0x00000020 /* UMONITOR/UMWAIT/TPAUSE insns */
#define SEFF0ECX_PKS 0x80000000 /* Page prot keys for sup mode */
/* SEFF EDX bits */
#define SEFF0EDX_AVX512_4FNNIW 0x00000004 /* AVX-512 neural network insns */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: specialreg.h,v 1.83 2023/07/24 14:54:00 deraadt Exp $ */
/* $OpenBSD: specialreg.h,v 1.84 2023/07/28 03:06:46 jsg Exp $ */
/* $NetBSD: specialreg.h,v 1.7 1994/10/27 04:16:26 cgd Exp $ */
/*-
@ -190,7 +190,7 @@
#define SEFF0ECX_AVX512VBMI 0x00000002 /* AVX-512 vector bit inst */
#define SEFF0ECX_UMIP 0x00000004 /* UMIP support */
#define SEFF0ECX_PKU 0x00000008 /* Page prot keys for user mode */
#define SEFF0ECX_WAITPKG 0x00000010 /* UMONITOR/UMWAIT/TPAUSE insns */
#define SEFF0ECX_WAITPKG 0x00000020 /* UMONITOR/UMWAIT/TPAUSE insns */
/* SEFF EDX bits */
#define SEFF0EDX_AVX512_4FNNIW 0x00000004 /* AVX-512 neural network insns */
#define SEFF0EDX_AVX512_4FMAPS 0x00000008 /* AVX-512 mult accum single prec */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kcov.c,v 1.48 2022/01/19 06:46:55 anton Exp $ */
/* $OpenBSD: kcov.c,v 1.49 2023/07/29 06:52:50 anton Exp $ */
/*
* Copyright (c) 2018 Anton Lindqvist <anton@openbsd.org>
@ -119,7 +119,6 @@ struct kcov_remote *kr_lookup(int, void *);
static struct kcov_dev *kd_curproc(int);
static struct kcov_cpu *kd_curcpu(void);
static uint64_t kd_claim(struct kcov_dev *, int, int);
static inline int inintr(void);
TAILQ_HEAD(, kcov_dev) kd_list = TAILQ_HEAD_INITIALIZER(kd_list);
TAILQ_HEAD(, kcov_remote) kr_list = TAILQ_HEAD_INITIALIZER(kr_list);
@ -130,10 +129,21 @@ int kr_cold = 1;
struct mutex kcov_mtx = MUTEX_INITIALIZER(IPL_MPFLOOR);
struct pool kr_pool;
static inline int
inintr(struct cpu_info *ci)
{
#if defined(__amd64__) || defined(__arm__) || defined(__arm64__) || \
defined(__i386__)
return (ci->ci_idepth > 0);
#else
return (0);
#endif
}
/*
* Compiling the kernel with the `-fsanitize-coverage=trace-pc' option will
* cause the following function to be called upon function entry and before
* each block instructions that maps to a single line in the original source
* each block of instructions that maps to a single line in the original source
* code.
*
* If kcov is enabled for the current thread, the kernel program counter will
@ -564,6 +574,7 @@ kd_free(struct kcov_dev *kd)
static struct kcov_dev *
kd_curproc(int mode)
{
struct cpu_info *ci;
struct kcov_dev *kd;
/*
@ -574,7 +585,8 @@ kd_curproc(int mode)
if (__predict_false(kcov_cold))
return (NULL);
kd = curproc->p_kd;
ci = curcpu();
kd = ci->ci_curproc->p_kd;
if (__predict_true(kd == NULL) || kd->kd_mode != mode)
return (NULL);
@ -586,7 +598,7 @@ kd_curproc(int mode)
return (NULL);
/* Do not trace in interrupt context unless this is a remote section. */
if (inintr() && kd->kd_intr == 0)
if (inintr(ci) && kd->kd_intr == 0)
return (NULL);
return (kd);
@ -628,20 +640,10 @@ kd_claim(struct kcov_dev *kd, int stride, int nmemb)
}
}
static inline int
inintr(void)
{
#if defined(__amd64__) || defined(__arm__) || defined(__arm64__) || \
defined(__i386__)
return (curcpu()->ci_idepth > 0);
#else
return (0);
#endif
}
void
kcov_remote_enter(int subsystem, void *id)
{
struct cpu_info *ci;
struct kcov_cpu *kc;
struct kcov_dev *kd;
struct kcov_remote *kr;
@ -654,8 +656,9 @@ kcov_remote_enter(int subsystem, void *id)
kd = kr->kr_kd;
if (kd == NULL || kd->kd_state != KCOV_STATE_TRACE)
goto out;
p = curproc;
if (inintr()) {
ci = curcpu();
p = ci->ci_curproc;
if (inintr(ci)) {
/*
* XXX we only expect to be called from softclock interrupts at
* this point.
@ -683,18 +686,20 @@ out:
void
kcov_remote_leave(int subsystem, void *id)
{
struct cpu_info *ci;
struct kcov_cpu *kc;
struct kcov_remote *kr;
struct proc *p;
mtx_enter(&kcov_mtx);
p = curproc;
ci = curcpu();
p = ci->ci_curproc;
if (p->p_kd == NULL)
goto out;
kr = kr_lookup(subsystem, id);
if (kr == NULL)
goto out;
if (inintr()) {
if (inintr(ci)) {
kc = kd_curcpu();
if (kc == NULL || kc->kc_kd.kd_intr == 0)
goto out;

View file

@ -40,6 +40,9 @@
#include "dc/dc_stat.h"
#include "amdgpu_dm_trace.h"
#include "dc/inc/dc_link_ddc.h"
#include "dpcd_defs.h"
#include "dc/inc/link_dpcd.h"
#include "link_service_types.h"
#include "vid.h"
#include "amdgpu.h"
@ -211,7 +214,7 @@ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *amdgpu_dm_connector,
uint32_t link_index,
u32 link_index,
struct amdgpu_encoder *amdgpu_encoder);
static int amdgpu_dm_encoder_init(struct drm_device *dev,
struct amdgpu_encoder *aencoder,
@ -263,7 +266,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
u32 *vbl, u32 *position)
{
uint32_t v_blank_start, v_blank_end, h_position, v_position;
u32 v_blank_start, v_blank_end, h_position, v_position;
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
@ -391,7 +394,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
struct amdgpu_device *adev = irq_params->adev;
unsigned long flags;
struct drm_pending_vblank_event *e;
uint32_t vpos, hpos, v_blank_start, v_blank_end;
u32 vpos, hpos, v_blank_start, v_blank_end;
bool vrr_active;
amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
@ -405,12 +408,12 @@ static void dm_pflip_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED,
amdgpu_crtc->crtc_id,
amdgpu_crtc);
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED,
amdgpu_crtc->crtc_id,
amdgpu_crtc);
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return;
}
@ -678,7 +681,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct dc_link *link;
uint8_t link_index = 0;
u8 link_index = 0;
struct drm_device *dev;
if (adev == NULL)
@ -779,7 +782,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_display_manager *dm = &adev->dm;
struct dmcub_trace_buf_entry entry = { 0 };
uint32_t count = 0;
u32 count = 0;
struct dmub_hpd_work *dmub_hpd_wrk;
struct dc_link *plink = NULL;
@ -858,7 +861,7 @@ static int dm_set_powergating_state(void *handle,
}
/* Prototypes of private functions */
static int dm_early_init(void* handle);
static int dm_early_init(void *handle);
/* Allocate memory for FBC compressed data */
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
@ -1047,7 +1050,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
struct dmub_srv_hw_params hw_params;
enum dmub_status status;
const unsigned char *fw_inst_const, *fw_bss_data;
uint32_t i, fw_inst_const_size, fw_bss_data_size;
u32 i, fw_inst_const_size, fw_bss_data_size;
bool has_hw_support;
if (!dmub_srv)
@ -1208,10 +1211,10 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
{
uint64_t pt_base;
uint32_t logical_addr_low;
uint32_t logical_addr_high;
uint32_t agp_base, agp_bot, agp_top;
u64 pt_base;
u32 logical_addr_low;
u32 logical_addr_high;
u32 agp_base, agp_bot, agp_top;
PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
memset(pa_config, 0, sizeof(*pa_config));
@ -1259,7 +1262,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
@ -1275,6 +1278,21 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
}
static void force_connector_state(
struct amdgpu_dm_connector *aconnector,
enum drm_connector_force force_state)
{
struct drm_connector *connector = &aconnector->base;
mutex_lock(&connector->dev->mode_config.mutex);
aconnector->base.force = force_state;
mutex_unlock(&connector->dev->mode_config.mutex);
mutex_lock(&aconnector->hpd_lock);
drm_kms_helper_connector_hotplug_event(connector);
mutex_unlock(&aconnector->hpd_lock);
}
static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
{
struct hpd_rx_irq_offload_work *offload_work;
@ -1283,6 +1301,9 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
struct amdgpu_device *adev;
enum dc_connection_type new_connection_type = dc_connection_none;
unsigned long flags;
union test_response test_response;
memset(&test_response, 0, sizeof(test_response));
offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
aconnector = offload_work->offload_wq->aconnector;
@ -1306,16 +1327,58 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
if (amdgpu_in_reset(adev))
goto skip;
if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
goto skip;
}
mutex_lock(&adev->dm.dc_lock);
if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
dc_link_dp_handle_automated_test(dc_link);
else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
if (aconnector->timing_changed) {
/* force connector disconnect and reconnect */
force_connector_state(aconnector, DRM_FORCE_OFF);
drm_msleep(100);
force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
}
test_response.bits.ACK = 1;
core_link_write_dpcd(
dc_link,
DP_TEST_RESPONSE,
&test_response.raw,
sizeof(test_response));
} else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
dc_link_dp_allow_hpd_rx_irq(dc_link)) {
dc_link_dp_handle_link_loss(dc_link);
/* offload_work->data is from handle_hpd_rx_irq->
* schedule_hpd_rx_offload_work.this is defer handle
* for hpd short pulse. upon here, link status may be
* changed, need get latest link status from dpcd
* registers. if link status is good, skip run link
* training again.
*/
union hpd_irq_data irq_data;
memset(&irq_data, 0, sizeof(irq_data));
/* before dc_link_dp_handle_link_loss, allow new link lost handle
* request be added to work queue if link lost at end of dc_link_
* dp_handle_link_loss
*/
spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
offload_work->offload_wq->is_handling_link_loss = false;
spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
if ((read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
hpd_rx_irq_check_link_loss_status(dc_link, &irq_data))
dc_link_dp_handle_link_loss(dc_link);
}
mutex_unlock(&adev->dm.dc_lock);
@ -1484,7 +1547,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
rw_init(&adev->dm.audio_lock, "dmaud");
mtx_init(&adev->dm.vblank_lock, IPL_TTY);
if(amdgpu_dm_irq_init(adev)) {
if (amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
goto error;
}
@ -1619,9 +1682,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
adev->dm.dc->debug.disable_stutter = true;
if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
adev->dm.dc->debug.disable_dsc = true;
}
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
adev->dm.dc->debug.disable_clock_gate = true;
@ -1842,8 +1904,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
mutex_destroy(&adev->dm.audio_lock);
mutex_destroy(&adev->dm.dc_lock);
mutex_destroy(&adev->dm.dpia_aux_lock);
return;
}
static int load_dmcu_fw(struct amdgpu_device *adev)
@ -1852,7 +1912,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
int r;
const struct dmcu_firmware_header_v1_0 *hdr;
switch(adev->asic_type) {
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
case CHIP_TAHITI:
case CHIP_PITCAIRN:
@ -2538,7 +2598,7 @@ struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
uint32_t i;
u32 i;
struct drm_connector_state *new_con_state;
struct drm_connector *connector;
struct drm_crtc *crtc_from_state;
@ -2644,7 +2704,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
struct dc_scaling_info scaling_infos[MAX_SURFACES];
struct dc_flip_addrs flip_addrs[MAX_SURFACES];
struct dc_stream_update stream_update;
} * bundle;
} *bundle;
int k, m;
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
@ -2674,8 +2734,6 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
cleanup:
kfree(bundle);
return;
}
static int dm_resume(void *handle)
@ -2889,8 +2947,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.set_powergating_state = dm_set_powergating_state,
};
const struct amdgpu_ip_block_version dm_ip_block =
{
const struct amdgpu_ip_block_version dm_ip_block = {
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 1,
.minor = 0,
@ -2947,9 +3004,12 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
caps->aux_support = false;
if (caps->ext_caps->bits.oled == 1 /*||
caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
if (caps->ext_caps->bits.oled == 1
/*
* ||
* caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
* caps->ext_caps->bits.hdr_aux_backlight_control == 1
*/)
caps->aux_support = true;
if (amdgpu_backlight == 0)
@ -3078,6 +3138,10 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->edid);
}
aconnector->timing_requested = kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
if (!aconnector->timing_requested)
dm_error("%s: failed to create aconnector->requested_timing\n", __func__);
drm_connector_update_edid_property(connector, aconnector->edid);
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
update_connector_ext_caps(aconnector);
@ -3089,6 +3153,8 @@ void amdgpu_dm_update_connector_after_detect(
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
kfree(aconnector->timing_requested);
aconnector->timing_requested = NULL;
#ifdef CONFIG_DRM_AMD_DC_HDCP
/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
@ -3133,6 +3199,8 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
if (aconnector->fake_enable)
aconnector->fake_enable = false;
aconnector->timing_changed = false;
if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
@ -3172,84 +3240,6 @@ static void handle_hpd_irq(void *param)
}
static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
{
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
uint8_t dret;
bool new_irq_handled = false;
int dpcd_addr;
int dpcd_bytes_to_read;
const int max_process_count = 30;
int process_count = 0;
const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
/* DPCD 0x200 - 0x201 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT;
} else {
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT_ESI;
}
dret = drm_dp_dpcd_read(
&aconnector->dm_dp_aux.aux,
dpcd_addr,
esi,
dpcd_bytes_to_read);
while (dret == dpcd_bytes_to_read &&
process_count < max_process_count) {
uint8_t retry;
dret = 0;
process_count++;
DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
/* handle HPD short pulse irq */
if (aconnector->mst_mgr.mst_state)
drm_dp_mst_hpd_irq(
&aconnector->mst_mgr,
esi,
&new_irq_handled);
if (new_irq_handled) {
/* ACK at DPCD to notify down stream */
const int ack_dpcd_bytes_to_write =
dpcd_bytes_to_read - 1;
for (retry = 0; retry < 3; retry++) {
uint8_t wret;
wret = drm_dp_dpcd_write(
&aconnector->dm_dp_aux.aux,
dpcd_addr + 1,
&esi[1],
ack_dpcd_bytes_to_write);
if (wret == ack_dpcd_bytes_to_write)
break;
}
/* check if there is new irq to be handled */
dret = drm_dp_dpcd_read(
&aconnector->dm_dp_aux.aux,
dpcd_addr,
esi,
dpcd_bytes_to_read);
new_irq_handled = false;
} else {
break;
}
}
if (process_count == max_process_count)
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
}
static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
union hpd_irq_data hpd_irq_data)
{
@ -3282,7 +3272,7 @@ static void handle_hpd_rx_irq(void *param)
union hpd_irq_data hpd_irq_data;
bool link_loss = false;
bool has_left_work = false;
int idx = aconnector->base.index;
int idx = dc_link->link_index;
struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
@ -3311,7 +3301,23 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
dm_handle_mst_sideband_msg(aconnector);
bool skip = false;
/*
* DOWN_REP_MSG_RDY is also handled by polling method
* mgr->cbs->poll_hpd_irq()
*/
spin_lock(&offload_wq->offload_lock);
skip = offload_wq->is_handling_mst_msg_rdy_event;
if (!skip)
offload_wq->is_handling_mst_msg_rdy_event = true;
spin_unlock(&offload_wq->offload_lock);
if (!skip)
schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
goto out;
}
@ -3404,7 +3410,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
aconnector = to_amdgpu_dm_connector(connector);
dc_link = aconnector->dc_link;
if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
int_params.irq_source = dc_link->irq_source_hpd;
@ -3413,7 +3419,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
(void *) aconnector);
}
if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
/* Also register for DP short pulse (hpd_rx). */
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
@ -3422,11 +3428,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
amdgpu_dm_irq_register_interrupt(adev, &int_params,
handle_hpd_rx_irq,
(void *) aconnector);
if (adev->dm.hpd_rx_offload_wq)
adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
aconnector;
}
if (adev->dm.hpd_rx_offload_wq)
adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
aconnector;
}
}
@ -3439,7 +3445,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
struct dc_interrupt_params int_params = {0};
int r;
int i;
unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
@ -3453,11 +3459,12 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
* coming from DC hardware.
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC
* for acknowledging and handling. */
* for acknowledging and handling.
*/
/* Use VBLANK interrupt */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
if (r) {
DRM_ERROR("Failed to add crtc irq id!\n");
return r;
@ -3465,7 +3472,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i+1 , 0);
dc_interrupt_to_irq_source(dc, i + 1, 0);
c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
@ -3521,7 +3528,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
struct dc_interrupt_params int_params = {0};
int r;
int i;
unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
if (adev->family >= AMDGPU_FAMILY_AI)
client_id = SOC15_IH_CLIENTID_DCE;
@ -3538,7 +3545,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
* coming from DC hardware.
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC
* for acknowledging and handling. */
* for acknowledging and handling.
*/
/* Use VBLANK interrupt */
for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
@ -3987,7 +3995,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
}
static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
unsigned *min, unsigned *max)
unsigned int *min, unsigned int *max)
{
if (!caps)
return 0;
@ -4007,7 +4015,7 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
uint32_t brightness)
{
unsigned min, max;
unsigned int min, max;
if (!get_brightness_range(caps, &min, &max))
return brightness;
@ -4020,7 +4028,7 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
uint32_t brightness)
{
unsigned min, max;
unsigned int min, max;
if (!get_brightness_range(caps, &min, &max))
return brightness;
@ -4238,12 +4246,12 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector);
static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
{
struct amdgpu_display_manager *dm = &adev->dm;
int32_t i;
s32 i;
struct amdgpu_dm_connector *aconnector = NULL;
struct amdgpu_encoder *aencoder = NULL;
struct amdgpu_mode_info *mode_info = &adev->mode_info;
uint32_t link_cnt;
int32_t primary_planes;
u32 link_cnt;
s32 primary_planes;
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
bool psr_feature_enabled = false;
@ -4501,7 +4509,6 @@ fail:
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{
drm_atomic_private_obj_fini(&dm->atomic_obj);
return;
}
/******************************************************************************
@ -4770,7 +4777,7 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
static int
fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const struct drm_plane_state *plane_state,
const uint64_t tiling_flags,
const u64 tiling_flags,
struct dc_plane_info *plane_info,
struct dc_plane_address *address,
bool tmz_surface,
@ -4979,7 +4986,7 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
uint32_t num_clips;
bool bb_changed;
bool fb_changed;
uint32_t i = 0;
u32 i = 0;
flip_addrs->dirty_rect_count = 0;
@ -5113,7 +5120,7 @@ static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector *connector,
bool is_y420, int requested_bpc)
{
uint8_t bpc;
u8 bpc;
if (is_y420) {
bpc = 8;
@ -5227,6 +5234,7 @@ static bool adjust_colour_depth_from_display_info(
{
enum dc_color_depth depth = timing_out->display_color_depth;
int normalized_clk;
do {
normalized_clk = timing_out->pix_clk_100hz / 10;
/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
@ -5442,6 +5450,7 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct dc_sink *sink = NULL;
sink_init_data.link = aconnector->dc_link;
sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
@ -5565,7 +5574,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
return &aconnector->freesync_vid_base;
/* Find the preferred mode */
list_for_each_entry (m, list_head, head) {
list_for_each_entry(m, list_head, head) {
if (m->type & DRM_MODE_TYPE_PREFERRED) {
m_pref = m;
break;
@ -5589,7 +5598,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
* For some monitors, preferred mode is not the mode with highest
* supported refresh rate.
*/
list_for_each_entry (m, list_head, head) {
list_for_each_entry(m, list_head, head) {
current_refresh = drm_mode_vrefresh(m);
if (m->hdisplay == m_pref->hdisplay &&
@ -5657,8 +5666,8 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
uint32_t max_dsc_target_bpp_limit_override)
{
const struct dc_link_settings *verified_link_cap = NULL;
uint32_t link_bw_in_kbps;
uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
u32 link_bw_in_kbps;
u32 edp_min_bpp_x16, edp_max_bpp_x16;
struct dc *dc = sink->ctx->dc;
struct dc_dsc_bw_range bw_range = {0};
struct dc_dsc_config dsc_cfg = {0};
@ -5715,17 +5724,15 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
struct dsc_dec_dpcd_caps *dsc_caps)
{
struct drm_connector *drm_connector = &aconnector->base;
uint32_t link_bandwidth_kbps;
uint32_t max_dsc_target_bpp_limit_override = 0;
u32 link_bandwidth_kbps;
struct dc *dc = sink->ctx->dc;
uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
uint32_t dsc_max_supported_bw_in_kbps;
u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
u32 dsc_max_supported_bw_in_kbps;
u32 max_dsc_target_bpp_limit_override =
drm_connector->display_info.max_dsc_bpp;
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link));
if (stream->link && stream->link->local_sink)
max_dsc_target_bpp_limit_override =
stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
/* Set DSC policy according to dsc_clock_en */
dc_dsc_policy_set_enable_dsc_when_not_needed(
@ -5862,7 +5869,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
* This may not be an error, the use case is when we have no
* usermode calls to reset and set mode upon hotplug. In this
* case, we call set mode ourselves to restore the previous mode
* and the modelist may not be filled in in time.
* and the modelist may not be filled in time.
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
} else {
@ -5886,9 +5893,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
drm_mode_set_crtcinfo(&mode, 0);
/*
* If scaling is enabled and refresh rate didn't change
* we copy the vic and polarities of the old timings
*/
* If scaling is enabled and refresh rate didn't change
* we copy the vic and polarities of the old timings
*/
if (!scale || mode_refresh != preferred_refresh)
fill_stream_properties_from_drm_display_mode(
stream, &mode, &aconnector->base, con_state, NULL,
@ -5898,6 +5905,14 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream, &mode, &aconnector->base, con_state, old_stream,
requested_bpc);
if (aconnector->timing_changed) {
DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n",
__func__,
stream->timing.display_color_depth,
aconnector->timing_requested->display_color_depth);
stream->timing = *aconnector->timing_requested;
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* SST DSC determination policy */
update_dsc_caps(aconnector, sink, stream, &dsc_caps);
@ -6542,6 +6557,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
aconnector->force_yuv420_output;
color_depth = convert_color_depth_from_display_info(connector,
@ -6862,7 +6878,7 @@ static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
{
struct drm_display_mode *m;
list_for_each_entry (m, &aconnector->base.probed_modes, head) {
list_for_each_entry(m, &aconnector->base.probed_modes, head) {
if (drm_mode_equal(m, mode))
return true;
}
@ -6875,7 +6891,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
const struct drm_display_mode *m;
struct drm_display_mode *new_mode;
uint i;
uint32_t new_modes_count = 0;
u32 new_modes_count = 0;
/* Standard FPS values
*
@ -6889,7 +6905,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
* 60 - Commonly used
* 48,72,96,120 - Multiples of 24
*/
static const uint32_t common_rates[] = {
static const u32 common_rates[] = {
23976, 24000, 25000, 29970, 30000,
48000, 50000, 60000, 72000, 96000, 120000
};
@ -6905,8 +6921,8 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
return 0;
for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
uint64_t target_vtotal, target_vtotal_diff;
uint64_t num, den;
u64 target_vtotal, target_vtotal_diff;
u64 num, den;
if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
continue;
@ -6974,13 +6990,7 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
drm_add_modes_noedid(connector, 640, 480);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, edid);
/* most eDP supports only timings from its edid,
* usually only detailed timings are available
* from eDP edid. timings which are not from edid
* may damage eDP
*/
if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_connector_add_common_modes(encoder, connector);
amdgpu_dm_connector_add_common_modes(encoder, connector);
amdgpu_dm_connector_add_freesync_modes(connector, edid);
}
amdgpu_dm_fbc_init(connector);
@ -7012,6 +7022,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
aconnector->audio_inst = -1;
rw_init(&aconnector->hpd_lock, "dmhpd");
rw_init(&aconnector->handle_mst_msg_ready, "dmmr");
/*
* configure support HPD hot plug connector_>polled default value is 0
@ -7156,7 +7167,7 @@ create_i2c(struct ddc_service *ddc_service,
*/
static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector,
uint32_t link_index,
u32 link_index,
struct amdgpu_encoder *aencoder)
{
int res = 0;
@ -7167,7 +7178,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
link->priv = aconnector;
DRM_DEBUG_DRIVER("%s()\n", __func__);
i2c = create_i2c(link->ddc, link->link_index, &res);
if (!i2c) {
@ -7647,8 +7657,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_crtc *pcrtc,
bool wait_for_vblank)
{
uint32_t i;
uint64_t timestamp_ns;
u32 i;
u64 timestamp_ns;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
@ -7659,7 +7669,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
int planes_count = 0, vpos, hpos;
unsigned long flags;
uint32_t target_vblank, last_flip_vblank;
u32 target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool cursor_update = false;
bool pflip_present = false;
@ -7761,7 +7771,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* Only allow immediate flips for fast updates that don't
* change memory domain, FB pitch, DCC state, rotation or
* mirroring.
*
* dm_crtc_helper_atomic_check() only accepts async flips with
* fast updates.
*/
if (crtc->state->async_flip &&
acrtc_state->update_type != UPDATE_TYPE_FAST)
drm_warn_once(state->dev,
"[PLANE:%d:%s] async flip with non-fast update\n",
plane->base.id, plane->name);
bundle->flip_addrs[planes_count].flip_immediate =
crtc->state->async_flip &&
acrtc_state->update_type == UPDATE_TYPE_FAST &&
@ -7804,8 +7822,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* DRI3/Present extension with defined target_msc.
*/
last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
}
else {
} else {
/* For variable refresh rate mode only:
* Get vblank of last completed flip to avoid > 1 vrr
* flips per video frame by use of throttling, but allow
@ -8100,7 +8117,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_display_manager *dm = &adev->dm;
struct dm_atomic_state *dm_state;
struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
uint32_t i, j;
u32 i, j;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
unsigned long flags;
@ -8132,8 +8149,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_resource_state_copy_construct_current(dm->dc, dc_state);
}
for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
new_crtc_state, i) {
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
@ -8156,9 +8173,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
drm_dbg_state(state->dev,
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
"connectors_changed:%d\n",
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
acrtc->crtc_id,
new_crtc_state->enable,
new_crtc_state->active,
@ -8643,8 +8658,8 @@ static int do_aquire_global_lock(struct drm_device *dev,
&commit->flip_done, 10*HZ);
if (ret == 0)
DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
"timed out\n", crtc->base.id, crtc->name);
DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
crtc->base.id, crtc->name);
drm_crtc_commit_put(commit);
}
@ -8729,8 +8744,9 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
return false;
}
static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
uint64_t num, den, res;
static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
{
u64 num, den, res;
struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
@ -8852,9 +8868,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
goto skip_modeset;
drm_dbg_state(state->dev,
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
"connectors_changed:%d\n",
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
acrtc->crtc_id,
new_crtc_state->enable,
new_crtc_state->active,
@ -8883,8 +8897,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
old_crtc_state)) {
new_crtc_state->mode_changed = false;
DRM_DEBUG_DRIVER(
"Mode change not required for front porch change, "
"setting mode_changed to %d",
"Mode change not required for front porch change, setting mode_changed to %d",
new_crtc_state->mode_changed);
set_freesync_fixed_config(dm_new_crtc_state);
@ -8896,9 +8909,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
struct drm_display_mode *high_mode;
high_mode = get_highest_refresh_rate_mode(aconnector, false);
if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
set_freesync_fixed_config(dm_new_crtc_state);
}
}
ret = dm_atomic_get_state(state, &dm_state);
@ -9066,6 +9078,7 @@ static bool should_reset_plane(struct drm_atomic_state *state,
*/
for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
struct amdgpu_framebuffer *old_afb, *new_afb;
if (other->type == DRM_PLANE_TYPE_CURSOR)
continue;
@ -9164,11 +9177,12 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
}
/* Core DRM takes care of checking FB modifiers, so we only need to
* check tiling flags when the FB doesn't have a modifier. */
* check tiling flags when the FB doesn't have a modifier.
*/
if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
if (adev->family < AMDGPU_FAMILY_AI) {
linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
} else {
linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
@ -9381,12 +9395,12 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
* cursor per pipe but it's going to inherit the scaling and
* positioning from the underlying pipe. Check the cursor plane's
* blending properties match the underlying planes'. */
* blending properties match the underlying planes'.
*/
new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
if (!new_cursor_state || !new_cursor_state->fb) {
if (!new_cursor_state || !new_cursor_state->fb)
return 0;
}
dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
@ -9432,6 +9446,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
struct drm_connector_state *conn_state, *old_conn_state;
struct amdgpu_dm_connector *aconnector = NULL;
int i;
for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
if (!conn_state->crtc)
conn_state = old_conn_state;
@ -9874,7 +9889,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
/* Store the overall update type for use later in atomic check. */
for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct dm_crtc_state *dm_new_crtc_state =
to_dm_crtc_state(new_crtc_state);
@ -9896,7 +9911,7 @@ fail:
else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
else
DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
trace_amdgpu_dm_atomic_check_finish(state, ret);
@ -9906,7 +9921,7 @@ fail:
static bool is_dp_capable_without_timing_msa(struct dc *dc,
struct amdgpu_dm_connector *amdgpu_dm_connector)
{
uint8_t dpcd_data;
u8 dpcd_data;
bool capable = false;
if (amdgpu_dm_connector->dc_link &&
@ -9925,7 +9940,7 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc,
static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
unsigned int offset,
unsigned int total_length,
uint8_t *data,
u8 *data,
unsigned int length,
struct amdgpu_hdmi_vsdb_info *vsdb)
{
@ -9981,7 +9996,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
}
static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
uint8_t *edid_ext, int len,
u8 *edid_ext, int len,
struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
int i;
@ -10022,7 +10037,7 @@ static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
}
static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
uint8_t *edid_ext, int len,
u8 *edid_ext, int len,
struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
int i;
@ -10038,7 +10053,7 @@ static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
}
static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
uint8_t *edid_ext, int len,
u8 *edid_ext, int len,
struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
@ -10052,7 +10067,7 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
uint8_t *edid_ext = NULL;
u8 *edid_ext = NULL;
int i;
bool valid_vsdb_found = false;
@ -10228,7 +10243,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
}
void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
uint32_t value, const char *func_name)
u32 value, const char *func_name)
{
#ifdef DM_CHECK_ADDR_0
if (address == 0) {
@ -10243,7 +10258,7 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
const char *func_name)
{
uint32_t value;
u32 value;
#ifdef DM_CHECK_ADDR_0
if (address == 0) {
DC_ERR("invalid register read; address = 0\n");

View file

@ -31,6 +31,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_plane.h>
#include "link_service_types.h"
/*
* This file contains the definition for amdgpu_display_manager
@ -192,6 +193,11 @@ struct hpd_rx_irq_offload_work_queue {
* we're handling link loss
*/
bool is_handling_link_loss;
/**
* @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
* ready event when we're already handling mst message ready event
*/
bool is_handling_mst_msg_rdy_event;
/**
* @aconnector: The aconnector that this work queue is attached to
*/
@ -613,6 +619,8 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *port;
struct amdgpu_dm_connector *mst_port;
struct drm_dp_aux *dsc_aux;
struct rwlock handle_mst_msg_ready;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
@ -650,6 +658,10 @@ struct amdgpu_dm_connector {
/* Record progress status of mst*/
uint8_t mst_status;
/* Automated testing */
bool timing_changed;
struct dc_crtc_timing *timing_requested;
};
static inline void amdgpu_dm_set_mst_status(uint8_t *status,

View file

@ -406,6 +406,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
/*
* Only allow async flips for fast updates that don't change the FB
* pitch, the DCC state, rotation, etc.
*/
if (crtc_state->async_flip &&
dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
/* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream)
return 0;

View file

@ -38,6 +38,9 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_irq.h"
#include "amdgpu_dm_mst_types.h"
#include "dpcd_defs.h"
#include "dc/inc/core_types.h"
#include "dc_link_dp.h"
#include "dm_helpers.h"
#include "ddc_service_types.h"
@ -1056,6 +1059,128 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
sizeof(new_downspread));
}
bool dm_helpers_dp_handle_test_pattern_request(
struct dc_context *ctx,
const struct dc_link *link,
union link_test_pattern dpcd_test_pattern,
union test_misc dpcd_test_params)
{
enum dp_test_pattern test_pattern;
enum dp_test_pattern_color_space test_pattern_color_space =
DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED;
enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED;
struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
struct pipe_ctx *pipe_ctx = NULL;
struct amdgpu_dm_connector *aconnector = link->priv;
int i;
for (i = 0; i < MAX_PIPES; i++) {
if (pipes[i].stream == NULL)
continue;
if (pipes[i].stream->link == link && !pipes[i].top_pipe &&
!pipes[i].prev_odm_pipe) {
pipe_ctx = &pipes[i];
break;
}
}
if (pipe_ctx == NULL)
return false;
switch (dpcd_test_pattern.bits.PATTERN) {
case LINK_TEST_PATTERN_COLOR_RAMP:
test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
break;
case LINK_TEST_PATTERN_VERTICAL_BARS:
test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
break; /* black and white */
case LINK_TEST_PATTERN_COLOR_SQUARES:
test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
TEST_DYN_RANGE_VESA ?
DP_TEST_PATTERN_COLOR_SQUARES :
DP_TEST_PATTERN_COLOR_SQUARES_CEA);
break;
default:
test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
break;
}
if (dpcd_test_params.bits.CLR_FORMAT == 0)
test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
else
test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
switch (dpcd_test_params.bits.BPC) {
case 0: // 6 bits
requestColorDepth = COLOR_DEPTH_666;
break;
case 1: // 8 bits
requestColorDepth = COLOR_DEPTH_888;
break;
case 2: // 10 bits
requestColorDepth = COLOR_DEPTH_101010;
break;
case 3: // 12 bits
requestColorDepth = COLOR_DEPTH_121212;
break;
default:
break;
}
switch (dpcd_test_params.bits.CLR_FORMAT) {
case 0:
requestPixelEncoding = PIXEL_ENCODING_RGB;
break;
case 1:
requestPixelEncoding = PIXEL_ENCODING_YCBCR422;
break;
case 2:
requestPixelEncoding = PIXEL_ENCODING_YCBCR444;
break;
default:
requestPixelEncoding = PIXEL_ENCODING_RGB;
break;
}
if ((requestColorDepth != COLOR_DEPTH_UNDEFINED
&& pipe_ctx->stream->timing.display_color_depth != requestColorDepth)
|| (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED
&& pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) {
DC_LOG_DEBUG("%s: original bpc %d pix encoding %d, changing to %d %d\n",
__func__,
pipe_ctx->stream->timing.display_color_depth,
pipe_ctx->stream->timing.pixel_encoding,
requestColorDepth,
requestPixelEncoding);
pipe_ctx->stream->timing.display_color_depth = requestColorDepth;
pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding;
dp_update_dsc_config(pipe_ctx);
aconnector->timing_changed = true;
/* store current timing */
if (aconnector->timing_requested)
*aconnector->timing_requested = pipe_ctx->stream->timing;
else
DC_LOG_ERROR("%s: timing storage failed\n", __func__);
}
dc_link_dp_set_test_pattern(
(struct dc_link *) link,
test_pattern,
test_pattern_color_space,
NULL,
NULL,
0);
return false;
}
void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
{
// TODO

View file

@ -590,8 +590,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
return connector;
}
void dm_handle_mst_sideband_msg_ready_event(
struct drm_dp_mst_topology_mgr *mgr,
enum mst_msg_ready_type msg_rdy_type)
{
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
uint8_t dret;
bool new_irq_handled = false;
int dpcd_addr;
uint8_t dpcd_bytes_to_read;
const uint8_t max_process_count = 30;
uint8_t process_count = 0;
u8 retry;
struct amdgpu_dm_connector *aconnector =
container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
/* DPCD 0x200 - 0x201 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT;
} else {
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT_ESI;
}
mutex_lock(&aconnector->handle_mst_msg_ready);
while (process_count < max_process_count) {
u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
process_count++;
dret = drm_dp_dpcd_read(
&aconnector->dm_dp_aux.aux,
dpcd_addr,
esi,
dpcd_bytes_to_read);
if (dret != dpcd_bytes_to_read) {
DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
break;
}
DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
switch (msg_rdy_type) {
case DOWN_REP_MSG_RDY_EVENT:
/* Only handle DOWN_REP_MSG_RDY case*/
esi[1] &= DP_DOWN_REP_MSG_RDY;
break;
case UP_REQ_MSG_RDY_EVENT:
/* Only handle UP_REQ_MSG_RDY case*/
esi[1] &= DP_UP_REQ_MSG_RDY;
break;
default:
/* Handle both cases*/
esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
break;
}
if (!esi[1])
break;
/* handle MST irq */
if (aconnector->mst_mgr.mst_state)
drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
esi,
ack,
&new_irq_handled);
if (new_irq_handled) {
/* ACK at DPCD to notify down stream */
for (retry = 0; retry < 3; retry++) {
ssize_t wret;
wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
dpcd_addr + 1,
ack[1]);
if (wret == 1)
break;
}
if (retry == 3) {
DRM_ERROR("Failed to ack MST event.\n");
return;
}
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
new_irq_handled = false;
} else {
break;
}
}
mutex_unlock(&aconnector->handle_mst_msg_ready);
if (process_count == max_process_count)
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
}
static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
{
dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
}
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
.poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
};
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
@ -673,15 +783,18 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
int count,
int k)
{
struct drm_connector *drm_connector;
int i;
for (i = 0; i < count; i++) {
drm_connector = &params[i].aconnector->base;
memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
if (vars[i + k].dsc_enabled && dc_dsc_compute_config(
params[i].sink->ctx->dc->res_pool->dscs[0],
&params[i].sink->dsc_caps.dsc_dec_caps,
params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
params[i].sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
drm_connector->display_info.max_dsc_bpp,
0,
params[i].timing,
&params[i].timing->dsc_cfg)) {
@ -723,12 +836,16 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
struct dc_dsc_config dsc_config;
u64 kbps;
struct drm_connector *drm_connector = &param.aconnector->base;
uint32_t max_dsc_target_bpp_limit_override =
drm_connector->display_info.max_dsc_bpp;
kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
dc_dsc_compute_config(
param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps,
param.sink->ctx->dc->debug.dsc_min_slice_height_override,
param.sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
max_dsc_target_bpp_limit_override,
(int) kbps, param.timing, &dsc_config);
return dsc_config.bits_per_pixel;

View file

@ -49,6 +49,13 @@
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
enum mst_msg_ready_type {
NONE_MSG_RDY_EVENT = 0,
DOWN_REP_MSG_RDY_EVENT = 1,
UP_REQ_MSG_RDY_EVENT = 2,
DOWN_OR_UP_MSG_RDY_EVENT = 3
};
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
void dm_handle_mst_sideband_msg_ready_event(
struct drm_dp_mst_topology_mgr *mgr,
enum mst_msg_ready_type msg_rdy_type);
struct dsc_mst_fairness_vars {
int pbn;
bool dsc_enabled;

View file

@ -86,6 +86,11 @@ static int dcn31_get_active_display_cnt_wa(
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
/* Checking stream / link detection ensuring that PHY is active*/
if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
display_count++;
}
for (i = 0; i < dc->link_count; i++) {

View file

@ -3115,7 +3115,7 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
return max_link_cap;
}
static enum dc_status read_hpd_rx_irq_data(
enum dc_status read_hpd_rx_irq_data(
struct dc_link *link,
union hpd_irq_data *irq_data)
{
@ -4264,124 +4264,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
test_pattern_size);
}
static void dp_test_send_link_test_pattern(struct dc_link *link)
{
union link_test_pattern dpcd_test_pattern;
union test_misc dpcd_test_params;
enum dp_test_pattern test_pattern;
enum dp_test_pattern_color_space test_pattern_color_space =
DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED;
struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
struct pipe_ctx *pipe_ctx = NULL;
int i;
memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
for (i = 0; i < MAX_PIPES; i++) {
if (pipes[i].stream == NULL)
continue;
if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
pipe_ctx = &pipes[i];
break;
}
}
if (pipe_ctx == NULL)
return;
/* get link test pattern and pattern parameters */
core_link_read_dpcd(
link,
DP_TEST_PATTERN,
&dpcd_test_pattern.raw,
sizeof(dpcd_test_pattern));
core_link_read_dpcd(
link,
DP_TEST_MISC0,
&dpcd_test_params.raw,
sizeof(dpcd_test_params));
switch (dpcd_test_pattern.bits.PATTERN) {
case LINK_TEST_PATTERN_COLOR_RAMP:
test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
break;
case LINK_TEST_PATTERN_VERTICAL_BARS:
test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
break; /* black and white */
case LINK_TEST_PATTERN_COLOR_SQUARES:
test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
TEST_DYN_RANGE_VESA ?
DP_TEST_PATTERN_COLOR_SQUARES :
DP_TEST_PATTERN_COLOR_SQUARES_CEA);
break;
default:
test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
break;
}
if (dpcd_test_params.bits.CLR_FORMAT == 0)
test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
else
test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
switch (dpcd_test_params.bits.BPC) {
case 0: // 6 bits
requestColorDepth = COLOR_DEPTH_666;
break;
case 1: // 8 bits
requestColorDepth = COLOR_DEPTH_888;
break;
case 2: // 10 bits
requestColorDepth = COLOR_DEPTH_101010;
break;
case 3: // 12 bits
requestColorDepth = COLOR_DEPTH_121212;
break;
default:
break;
}
switch (dpcd_test_params.bits.CLR_FORMAT) {
case 0:
pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_RGB;
break;
case 1:
pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_YCBCR422;
break;
case 2:
pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_YCBCR444;
break;
default:
pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_RGB;
break;
}
if (requestColorDepth != COLOR_DEPTH_UNDEFINED
&& pipe_ctx->stream->timing.display_color_depth != requestColorDepth) {
DC_LOG_DEBUG("%s: original bpc %d, changing to %d\n",
__func__,
pipe_ctx->stream->timing.display_color_depth,
requestColorDepth);
pipe_ctx->stream->timing.display_color_depth = requestColorDepth;
}
dp_update_dsc_config(pipe_ctx);
dc_link_dp_set_test_pattern(
link,
test_pattern,
test_pattern_color_space,
NULL,
NULL,
0);
}
static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video)
{
union audio_test_mode dpcd_test_mode = {0};
@ -4494,8 +4376,25 @@ void dc_link_dp_handle_automated_test(struct dc_link *link)
test_response.bits.ACK = 0;
}
if (test_request.bits.LINK_TEST_PATTRN) {
dp_test_send_link_test_pattern(link);
test_response.bits.ACK = 1;
union test_misc dpcd_test_params;
union link_test_pattern dpcd_test_pattern;
memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
/* get link test pattern and pattern parameters */
core_link_read_dpcd(
link,
DP_TEST_PATTERN,
&dpcd_test_pattern.raw,
sizeof(dpcd_test_pattern));
core_link_read_dpcd(
link,
DP_TEST_MISC0,
&dpcd_test_params.raw,
sizeof(dpcd_test_params));
test_response.bits.ACK = dm_helpers_dp_handle_test_pattern_request(link->ctx, link,
dpcd_test_pattern, dpcd_test_params) ? 1 : 0;
}
if (test_request.bits.AUDIO_TEST_PATTERN) {

View file

@ -3293,7 +3293,8 @@ void dcn10_wait_for_mpcc_disconnect(
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
if (pipe_ctx->stream_res.tg &&
pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
hubp->funcs->set_blank(hubp, true);

View file

@ -65,7 +65,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.pipe_split_policy = MPC_SPLIT_AVOID,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,

View file

@ -156,6 +156,12 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct dc_link *link,
struct dc_sink *sink);
bool dm_helpers_dp_handle_test_pattern_request(
struct dc_context *ctx,
const struct dc_link *link,
union link_test_pattern dpcd_test_pattern,
union test_misc dpcd_test_params);
void dm_set_dcn_clocks(
struct dc_context *ctx,
struct dc_clocks *clks);

View file

@ -82,6 +82,10 @@ bool perform_link_training_with_retries(
enum amd_signal_type signal,
bool do_fallback);
enum dc_status read_hpd_rx_irq_data(
struct dc_link *link,
union hpd_irq_data *irq_data);
bool hpd_rx_irq_check_link_loss_status(
struct dc_link *link,
union hpd_irq_data *hpd_irq_dpcd_data);

View file

@ -1927,12 +1927,16 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
ret = sienna_cichlid_get_smu_metrics_data(smu,
METRICS_CURR_UCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_SCLK:
ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
ret = sienna_cichlid_get_smu_metrics_data(smu,
METRICS_AVERAGE_GFXCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;

View file

@ -940,7 +940,7 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu,
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = smu_v13_0_7_get_smu_metrics_data(smu,
METRICS_AVERAGE_UCLK,
METRICS_CURR_UCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;

View file

@ -4063,17 +4063,28 @@ out:
}
/**
* drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
* drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
* @mgr: manager to notify irq for.
* @esi: 4 bytes from SINK_COUNT_ESI
* @ack: 4 bytes used to ack events starting from SINK_COUNT_ESI
* @handled: whether the hpd interrupt was consumed or not
*
* This should be called from the driver when it detects a short IRQ,
* This should be called from the driver when it detects a HPD IRQ,
* along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
* topology manager will process the sideband messages received as a result
* of this.
* topology manager will process the sideband messages received
* as indicated in the DEVICE_SERVICE_IRQ_VECTOR_ESI0 and set the
* corresponding flags that Driver has to ack the DP receiver later.
*
* Note that driver shall also call
* drm_dp_mst_hpd_irq_send_new_request() if the 'handled' is set
* after calling this function, to try to kick off a new request in
* the queue if the previous message transaction is completed.
*
* See also:
* drm_dp_mst_hpd_irq_send_new_request()
*/
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u8 *esi,
u8 *ack, bool *handled)
{
int ret = 0;
int sc;
@ -4088,18 +4099,47 @@ int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handl
if (esi[1] & DP_DOWN_REP_MSG_RDY) {
ret = drm_dp_mst_handle_down_rep(mgr);
*handled = true;
ack[1] |= DP_DOWN_REP_MSG_RDY;
}
if (esi[1] & DP_UP_REQ_MSG_RDY) {
ret |= drm_dp_mst_handle_up_req(mgr);
*handled = true;
ack[1] |= DP_UP_REQ_MSG_RDY;
}
drm_dp_mst_kick_tx(mgr);
return ret;
}
EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
EXPORT_SYMBOL(drm_dp_mst_hpd_irq_handle_event);
/**
* drm_dp_mst_hpd_irq_send_new_request() - MST hotplug IRQ kick off new request
* @mgr: manager to notify irq for.
*
* This should be called from the driver when mst irq event is handled
* and acked. Note that new down request should only be sent when
* previous message transaction is completed. Source is not supposed to generate
* interleaved message transactions.
*/
void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
bool kick = true;
mutex_lock(&mgr->qlock);
txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
struct drm_dp_sideband_msg_tx, next);
/* If last transaction is not completed yet*/
if (!txmsg ||
txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
kick = false;
mutex_unlock(&mgr->qlock);
if (kick)
drm_dp_mst_kick_tx(mgr);
}
EXPORT_SYMBOL(drm_dp_mst_hpd_irq_send_new_request);
/**
* drm_dp_mst_detect_port() - get connection status for an MST port
* @connector: DRM connector for this port

View file

@ -572,6 +572,7 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (dma_resv_iter_is_restarted(&cursor)) {
struct dma_fence **new_fences;
unsigned int count;
while (*num_fences)
@ -581,9 +582,9 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
/* Eventually re-allocate the array */
#ifdef __linux__
*fences = krealloc_array(*fences, count,
sizeof(void *),
GFP_KERNEL);
new_fences = krealloc_array(*fences, count,
sizeof(void *),
GFP_KERNEL);
#else
nfences = kmalloc(count * sizeof(void *),
GFP_KERNEL);
@ -592,13 +593,17 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
(count - 1) * sizeof(void *));
if (nfences) {
kfree(*fences);
*fences = nfences;
new_fences = nfences;
}
#endif
if (count && !*fences) {
if (count && !new_fences) {
kfree(*fences);
*fences = NULL;
*num_fences = 0;
dma_resv_iter_end(&cursor);
return -ENOMEM;
}
*fences = new_fences;
}
(*fences)[(*num_fences)++] = dma_fence_get(fence);

View file

@ -315,6 +315,9 @@ static bool drm_client_target_cloned(struct drm_device *dev,
can_clone = true;
dmt_mode = drm_mode_find_dmt(dev, 1024, 768, 60, false);
if (!dmt_mode)
goto fail;
for (i = 0; i < connector_count; i++) {
if (!enabled[i])
continue;
@ -330,11 +333,13 @@ static bool drm_client_target_cloned(struct drm_device *dev,
if (!modes[i])
can_clone = false;
}
kfree(dmt_mode);
if (can_clone) {
DRM_DEBUG_KMS("can clone using 1024x768\n");
return true;
}
fail:
DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
return false;
}
@ -876,6 +881,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
break;
}
kfree(modeset->mode);
modeset->mode = drm_mode_duplicate(dev, mode);
drm_connector_get(connector);
modeset->connectors[modeset->num_connectors++] = connector;

View file

@ -7396,7 +7396,6 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
struct wait_queue_entry wait_fence, wait_reset;
struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
#ifdef notyet
init_wait_entry(&wait_fence, 0);
init_wait_entry(&wait_reset, 0);
for (;;) {
@ -7417,22 +7416,6 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
I915_RESET_MODESET),
&wait_reset);
#else
/* XXX above recurses sch_mtx */
init_wait_entry(&wait_fence, 0);
for (;;) {
prepare_to_wait(&intel_state->commit_ready.wait,
&wait_fence, TASK_UNINTERRUPTIBLE);
if (i915_sw_fence_done(&intel_state->commit_ready) ||
test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
break;
schedule();
}
finish_wait(&intel_state->commit_ready.wait, &wait_fence);
#endif
}
static void intel_cleanup_dsbs(struct intel_atomic_state *state)

View file

@ -3804,9 +3804,7 @@ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
{
bool handled = false;
drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
if (handled)
ack[1] |= esi[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled);
if (esi[1] & DP_CP_IRQ) {
intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
@ -3881,6 +3879,9 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
}
return link_ok;

View file

@ -815,8 +815,11 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr,
const u8 *esi,
u8 *ack,
bool *handled);
void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr);
int
drm_dp_mst_detect_port(struct drm_connector *connector,

View file

@ -1,4 +1,4 @@
/* $OpenBSD: wait.h,v 1.10 2023/07/18 06:58:59 claudio Exp $ */
/* $OpenBSD: wait.h,v 1.11 2023/07/28 09:46:13 claudio Exp $ */
/*
* Copyright (c) 2013, 2014, 2015 Mark Kettenis
* Copyright (c) 2017 Martin Pieuchot
@ -159,11 +159,37 @@ do { \
__ret; \
})
#define __wait_event_intr_locked(wqh, condition) \
({ \
struct wait_queue_entry __wq_entry; \
int __error; \
\
init_wait_entry(&__wq_entry, 0); \
do { \
KASSERT(!cold); \
\
if (list_empty(&__wq_entry.entry)) \
__add_wait_queue_entry_tail(&wqh, &__wq_entry); \
set_current_state(TASK_INTERRUPTIBLE); \
\
mtx_leave(&(wqh).lock); \
__error = sleep_finish(0, 1); \
mtx_enter(&(wqh).lock); \
if (__error == ERESTART || __error == EINTR) { \
__error = -ERESTARTSYS; \
break; \
} \
} while (!(condition)); \
__remove_wait_queue(&(wqh), &__wq_entry); \
__set_current_state(TASK_RUNNING); \
__error; \
})
#define wait_event_interruptible_locked(wqh, condition) \
({ \
int __ret = 0; \
if (!(condition)) \
__ret = __wait_event_intr_timeout(wqh, condition, 0, PCATCH); \
__ret = __wait_event_intr_locked(wqh, condition); \
__ret; \
})

View file

@ -274,7 +274,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
struct drm_radeon_cs *cs = data;
uint64_t *chunk_array_ptr;
unsigned size, i;
u64 size;
unsigned i;
u32 ring = RADEON_CS_RING_GFX;
s32 priority = 0;

View file

@ -85,6 +85,8 @@ static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
struct ttm_resource *res)
{
if (pos->last != res) {
if (pos->first == res)
pos->first = list_next_entry(res, lru);
list_move(&res->lru, &pos->last->lru);
pos->last = res;
}
@ -110,7 +112,8 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
{
struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
if (unlikely(pos->first == res && pos->last == res)) {
if (unlikely(WARN_ON(!pos->first || !pos->last) ||
(pos->first == res && pos->last == res))) {
pos->first = NULL;
pos->last = NULL;
} else if (pos->first == res) {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_ix.c,v 1.200 2023/07/18 16:01:20 bluhm Exp $ */
/* $OpenBSD: if_ix.c,v 1.202 2023/07/28 20:25:08 bluhm Exp $ */
/******************************************************************************
@ -3225,30 +3225,27 @@ ixgbe_rxeof(struct rx_ring *rxr)
sendmp = rxbuf->fmp;
rxbuf->buf = rxbuf->fmp = NULL;
if (sendmp != NULL) { /* secondary frag */
sendmp->m_pkthdr.len += mp->m_len;
/*
* This function iterates over interleaved descriptors.
* Thus, we reuse ph_mss as global segment counter per
* TCP connection, instead of introducing a new variable
* in m_pkthdr.
*/
if (rsccnt)
sendmp->m_pkthdr.ph_mss += rsccnt - 1;
} else {
if (sendmp == NULL) {
/* first desc of a non-ps chain */
sendmp = mp;
sendmp->m_pkthdr.len = mp->m_len;
if (rsccnt)
sendmp->m_pkthdr.ph_mss = rsccnt - 1;
sendmp->m_pkthdr.len = 0;
sendmp->m_pkthdr.ph_mss = 0;
#if NVLAN > 0
if (sc->vlan_stripping && staterr & IXGBE_RXD_STAT_VP) {
sendmp->m_pkthdr.ether_vtag = vtag;
sendmp->m_flags |= M_VLANTAG;
SET(sendmp->m_flags, M_VLANTAG);
}
#endif
}
sendmp->m_pkthdr.len += mp->m_len;
/*
* This function iterates over interleaved descriptors.
* Thus, we reuse ph_mss as global segment counter per
* TCP connection, instead of introducing a new variable
* in m_pkthdr.
*/
if (rsccnt)
sendmp->m_pkthdr.ph_mss += rsccnt - 1;
/* Pass the head pointer on */
if (eop == 0) {
@ -3275,6 +3272,10 @@ ixgbe_rxeof(struct rx_ring *rxr)
/* Calculate header size. */
ether_extract_headers(sendmp, &ext);
hdrlen = sizeof(*ext.eh);
#if NVLAN > 0
if (ext.evh)
hdrlen += ETHER_VLAN_ENCAP_LEN;
#endif
if (ext.ip4)
hdrlen += ext.ip4->ip_hl << 2;
if (ext.ip6)
@ -3292,7 +3293,8 @@ ixgbe_rxeof(struct rx_ring *rxr)
* mark it as TSO, set a correct mss,
* and recalculate the TCP checksum.
*/
paylen = sendmp->m_pkthdr.len - hdrlen;
paylen = sendmp->m_pkthdr.len > hdrlen ?
sendmp->m_pkthdr.len - hdrlen : 0;
if (ext.tcp && paylen >= pkts) {
SET(sendmp->m_pkthdr.csum_flags,
M_TCP_TSO);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_vio.c,v 1.24 2023/07/03 07:40:52 kn Exp $ */
/* $OpenBSD: if_vio.c,v 1.25 2023/07/28 16:54:48 dv Exp $ */
/*
* Copyright (c) 2012 Stefan Fritsch, Alexander Fiveg.
@ -529,7 +529,7 @@ vio_attach(struct device *parent, struct device *self, void *aux)
vsc->sc_child = self;
vsc->sc_ipl = IPL_NET;
vsc->sc_vqs = &sc->sc_vq[0];
vsc->sc_config_change = 0;
vsc->sc_config_change = NULL;
vsc->sc_driver_features = VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS |
VIRTIO_NET_F_CTRL_VQ | VIRTIO_NET_F_CTRL_RX |
VIRTIO_NET_F_MRG_RXBUF | VIRTIO_NET_F_CSUM |

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vioblk.c,v 1.36 2023/05/29 08:13:35 sf Exp $ */
/* $OpenBSD: vioblk.c,v 1.37 2023/07/28 16:54:48 dv Exp $ */
/*
* Copyright (c) 2012 Stefan Fritsch.
@ -174,7 +174,7 @@ vioblk_attach(struct device *parent, struct device *self, void *aux)
vsc->sc_vqs = &sc->sc_vq[0];
vsc->sc_nvqs = 1;
vsc->sc_config_change = 0;
vsc->sc_config_change = NULL;
if (vsc->sc_child)
panic("already attached to something else");
vsc->sc_child = self;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: viocon.c,v 1.9 2023/05/29 08:13:35 sf Exp $ */
/* $OpenBSD: viocon.c,v 1.10 2023/07/28 16:54:48 dv Exp $ */
/*
* Copyright (c) 2013-2015 Stefan Fritsch <sf@sfritsch.de>
@ -180,7 +180,7 @@ viocon_attach(struct device *parent, struct device *self, void *aux)
panic("already attached to something else");
vsc->sc_child = self;
vsc->sc_ipl = IPL_TTY;
vsc->sc_config_change = 0;
vsc->sc_config_change = NULL;
sc->sc_virtio = vsc;
sc->sc_max_ports = maxports;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: viornd.c,v 1.6 2023/05/29 08:13:35 sf Exp $ */
/* $OpenBSD: viornd.c,v 1.7 2023/07/28 16:54:48 dv Exp $ */
/*
* Copyright (c) 2014 Stefan Fritsch <sf@sfritsch.de>
@ -88,7 +88,7 @@ viornd_attach(struct device *parent, struct device *self, void *aux)
vsc->sc_vqs = &sc->sc_vq;
vsc->sc_nvqs = 1;
vsc->sc_config_change = 0;
vsc->sc_config_change = NULL;
if (vsc->sc_child != NULL)
panic("already attached to something else");
vsc->sc_child = self;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clockintr.c,v 1.28 2023/07/25 18:16:19 cheloha Exp $ */
/* $OpenBSD: kern_clockintr.c,v 1.29 2023/07/27 17:52:53 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@ -24,7 +24,9 @@
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/resourcevar.h>
#include <sys/queue.h>
#include <sys/sched.h>
#include <sys/stdint.h>
#include <sys/sysctl.h>
#include <sys/time.h>
@ -104,6 +106,7 @@ clockintr_cpu_init(const struct intrclock *ic)
uint64_t multiplier = 0;
struct cpu_info *ci = curcpu();
struct clockintr_queue *cq = &ci->ci_queue;
struct schedstate_percpu *spc = &ci->ci_schedstate;
int reset_cq_intrclock = 0;
KASSERT(ISSET(clockintr_flags, CL_INIT));
@ -189,6 +192,15 @@ clockintr_cpu_init(const struct intrclock *ic)
clockintr_advance(cq->cq_schedclock, schedclock_period);
}
/*
* XXX Need to find a better place to do this. We can't do it in
* sched_init_cpu() because initclocks() runs after it.
*/
if (spc->spc_profclock->cl_expiration == 0) {
clockintr_stagger(spc->spc_profclock, profclock_period,
multiplier, MAXCPUS);
}
if (reset_cq_intrclock)
SET(cq->cq_flags, CQ_INTRCLOCK);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sched.c,v 1.80 2023/07/25 18:16:19 cheloha Exp $ */
/* $OpenBSD: kern_sched.c,v 1.81 2023/07/27 17:52:53 cheloha Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
@ -92,8 +92,6 @@ sched_init_cpu(struct cpu_info *ci)
profclock);
if (spc->spc_profclock == NULL)
panic("%s: clockintr_establish profclock", __func__);
clockintr_stagger(spc->spc_profclock, profclock_period,
CPU_INFO_UNIT(ci), MAXCPUS);
}
kthread_create_deferred(sched_kthreads_create, ci);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_task.c,v 1.33 2022/08/15 11:38:35 mvs Exp $ */
/* $OpenBSD: kern_task.c,v 1.34 2023/07/29 06:52:08 anton Exp $ */
/*
* Copyright (c) 2013 David Gwynne <dlg@openbsd.org>
@ -363,7 +363,8 @@ task_add(struct taskq *tq, struct task *w)
SET(w->t_flags, TASK_ONQUEUE);
TAILQ_INSERT_TAIL(&tq->tq_worklist, w, t_entry);
#if NKCOV > 0
w->t_process = curproc->p_p;
if (!kcov_cold)
w->t_process = curproc->p_p;
#endif
}
mtx_leave(&tq->tq_mtx);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_timeout.c,v 1.94 2023/07/14 07:07:08 claudio Exp $ */
/* $OpenBSD: kern_timeout.c,v 1.95 2023/07/29 06:52:08 anton Exp $ */
/*
* Copyright (c) 2001 Thomas Nordin <nordin@openbsd.org>
* Copyright (c) 2000-2001 Artur Grabowski <art@openbsd.org>
@ -307,7 +307,8 @@ timeout_add(struct timeout *new, int to_ticks)
CIRCQ_INSERT_TAIL(&timeout_new, &new->to_list);
}
#if NKCOV > 0
new->to_process = curproc->p_p;
if (!kcov_cold)
new->to_process = curproc->p_p;
#endif
tostat.tos_added++;
mtx_leave(&timeout_mutex);
@ -406,7 +407,8 @@ timeout_abs_ts(struct timeout *to, const struct timespec *abstime)
CIRCQ_INSERT_TAIL(&timeout_new, &to->to_list);
}
#if NKCOV > 0
to->to_process = curproc->p_p;
if (!kcov_cold)
to->to_process = curproc->p_p;
#endif
tostat.tos_added++;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_ethersubr.c,v 1.290 2023/07/06 19:46:53 kn Exp $ */
/* $OpenBSD: if_ethersubr.c,v 1.291 2023/07/27 20:21:25 jan Exp $ */
/* $NetBSD: if_ethersubr.c,v 1.19 1996/05/07 02:40:30 thorpej Exp $ */
/*
@ -1040,6 +1040,7 @@ ether_extract_headers(struct mbuf *mp, struct ether_extracted *ext)
uint64_t hlen;
int hoff;
uint8_t ipproto;
uint16_t ether_type;
/* Return NULL if header was not recognized. */
memset(ext, 0, sizeof(*ext));
@ -1048,9 +1049,20 @@ ether_extract_headers(struct mbuf *mp, struct ether_extracted *ext)
return;
ext->eh = mtod(mp, struct ether_header *);
switch (ntohs(ext->eh->ether_type)) {
ether_type = ntohs(ext->eh->ether_type);
hlen = sizeof(*ext->eh);
#if NVLAN > 0
if (ether_type == ETHERTYPE_VLAN) {
ext->evh = mtod(mp, struct ether_vlan_header *);
ether_type = ntohs(ext->evh->evl_proto);
hlen = sizeof(*ext->evh);
}
#endif
switch (ether_type) {
case ETHERTYPE_IP:
m = m_getptr(mp, sizeof(*ext->eh), &hoff);
m = m_getptr(mp, hlen, &hoff);
if (m == NULL || m->m_len - hoff < sizeof(*ext->ip4))
return;
ext->ip4 = (struct ip *)(mtod(m, caddr_t) + hoff);
@ -1064,7 +1076,7 @@ ether_extract_headers(struct mbuf *mp, struct ether_extracted *ext)
break;
#ifdef INET6
case ETHERTYPE_IPV6:
m = m_getptr(mp, sizeof(*ext->eh), &hoff);
m = m_getptr(mp, hlen, &hoff);
if (m == NULL || m->m_len - hoff < sizeof(*ext->ip6))
return;
ext->ip6 = (struct ip6_hdr *)(mtod(m, caddr_t) + hoff);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: rtsock.c,v 1.367 2023/06/26 07:52:18 claudio Exp $ */
/* $OpenBSD: rtsock.c,v 1.369 2023/07/28 09:33:16 mvs Exp $ */
/* $NetBSD: rtsock.c,v 1.18 1996/03/29 00:32:10 cgd Exp $ */
/*
@ -690,7 +690,7 @@ route_output(struct mbuf *m, struct socket *so)
u_char vers, type;
if (m == NULL || ((m->m_len < sizeof(int32_t)) &&
(m = m_pullup(m, sizeof(int32_t))) == 0))
(m = m_pullup(m, sizeof(int32_t))) == NULL))
return (ENOBUFS);
if ((m->m_flags & M_PKTHDR) == 0)
panic("route_output");
@ -705,7 +705,8 @@ route_output(struct mbuf *m, struct socket *so)
sounlock(so);
len = m->m_pkthdr.len;
if (len < offsetof(struct rt_msghdr, rtm_hdrlen) + 1 ||
if (len < offsetof(struct rt_msghdr, rtm_hdrlen) +
sizeof(rtm->rtm_hdrlen) ||
len != mtod(m, struct rt_msghdr *)->rtm_msglen) {
error = EINVAL;
goto fail;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_ether.h,v 1.89 2023/07/06 19:46:53 kn Exp $ */
/* $OpenBSD: if_ether.h,v 1.90 2023/07/27 20:21:25 jan Exp $ */
/* $NetBSD: if_ether.h,v 1.22 1996/05/11 13:00:00 mycroft Exp $ */
/*
@ -301,11 +301,12 @@ uint64_t ether_addr_to_e64(const struct ether_addr *);
void ether_e64_to_addr(struct ether_addr *, uint64_t);
struct ether_extracted {
struct ether_header *eh;
struct ip *ip4;
struct ip6_hdr *ip6;
struct tcphdr *tcp;
struct udphdr *udp;
struct ether_header *eh;
struct ether_vlan_header *evh;
struct ip *ip4;
struct ip6_hdr *ip6;
struct tcphdr *tcp;
struct udphdr *udp;
};
void ether_extract_headers(struct mbuf *, struct ether_extracted *);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: nd6_nbr.c,v 1.149 2023/05/07 16:23:24 bluhm Exp $ */
/* $OpenBSD: nd6_nbr.c,v 1.150 2023/07/29 15:59:27 krw Exp $ */
/* $KAME: nd6_nbr.c,v 1.61 2001/02/10 16:06:14 jinmei Exp $ */
/*
@ -1160,8 +1160,7 @@ nd6_dad_timer(void *xifa)
{
struct ifaddr *ifa = xifa;
struct in6_ifaddr *ia6 = ifatoia6(ifa);
struct in6_addr taddr6 = ia6->ia_addr.sin6_addr;
struct in6_addr daddr6;
struct in6_addr daddr6, taddr6;
struct ifnet *ifp = ifa->ifa_ifp;
struct dadq *dp;
char addr[INET6_ADDRSTRLEN];
@ -1173,6 +1172,7 @@ nd6_dad_timer(void *xifa)
log(LOG_ERR, "%s: called with null parameter\n", __func__);
goto done;
}
taddr6 = ia6->ia_addr.sin6_addr;
dp = nd6_dad_find(ifa);
if (dp == NULL) {
log(LOG_ERR, "%s: DAD structure not found\n", __func__);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kcov.h,v 1.8 2021/12/29 07:15:13 anton Exp $ */
/* $OpenBSD: kcov.h,v 1.9 2023/07/29 06:52:08 anton Exp $ */
/*
* Copyright (c) 2018 Anton Lindqvist <anton@openbsd.org>
@ -41,6 +41,8 @@ struct kio_remote_attach {
struct proc;
extern int kcov_cold;
void kcov_exit(struct proc *);
int kcov_vnode(struct vnode *);
void kcov_remote_register(int, void *);