sync code with last improvements from OpenBSD

This commit is contained in:
purplerain 2023-09-13 16:41:13 +00:00
parent 256236394b
commit 6b03483410
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
31 changed files with 409 additions and 280 deletions

View file

@ -1574,17 +1574,8 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
pcie_capability_read_word(root, PCI_EXP_LNKCTL,
&bridge_cfg);
pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
&gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
tmp16);
pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
@ -1637,21 +1628,14 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
drm_msleep(100);
/* linkctl */
pcie_capability_read_word(root, PCI_EXP_LNKCTL,
&tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
pcie_capability_write_word(root, PCI_EXP_LNKCTL,
tmp16);
pcie_capability_read_word(adev->pdev,
PCI_EXP_LNKCTL,
&tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
pcie_capability_write_word(adev->pdev,
PCI_EXP_LNKCTL,
tmp16);
pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_HAWD,
bridge_cfg &
PCI_EXP_LNKCTL_HAWD);
pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_HAWD,
gpu_cfg &
PCI_EXP_LNKCTL_HAWD);
/* linkctl2 */
pcie_capability_read_word(root, PCI_EXP_LNKCTL2,

View file

@ -1229,6 +1229,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
u16 cmd;
int r;
if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
return 0;
/* Bypass for VF */
if (amdgpu_sriov_vf(adev))
return 0;

View file

@ -558,6 +558,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
crtc = (struct drm_crtc *)minfo->crtcs[i];
if (crtc && crtc->base.id == info->mode_crtc.id) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
ui32 = amdgpu_crtc->crtc_id;
found = 1;
break;
@ -576,7 +577,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (ret)
return ret;
ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
ret = copy_to_user(out, &ip, min_t(size_t, size, sizeof(ip)));
return ret ? -EFAULT : 0;
}
case AMDGPU_INFO_HW_IP_COUNT: {
@ -722,17 +723,18 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
? -EFAULT : 0;
}
case AMDGPU_INFO_READ_MMR_REG: {
unsigned n, alloc_size;
unsigned int n, alloc_size;
uint32_t *regs;
unsigned se_num = (info->read_mmr_reg.instance >>
unsigned int se_num = (info->read_mmr_reg.instance >>
AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
AMDGPU_INFO_MMR_SE_INDEX_MASK;
unsigned sh_num = (info->read_mmr_reg.instance >>
unsigned int sh_num = (info->read_mmr_reg.instance >>
AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
AMDGPU_INFO_MMR_SH_INDEX_MASK;
/* set full masks if the userspace set all bits
* in the bitfields */
* in the bitfields
*/
if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
se_num = 0xffffffff;
else if (se_num >= AMDGPU_GFX_MAX_SE)
@ -856,7 +858,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return ret;
}
case AMDGPU_INFO_VCE_CLOCK_TABLE: {
unsigned i;
unsigned int i;
struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
struct amd_vce_state *vce_state;

View file

@ -2276,17 +2276,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
pcie_capability_read_word(root, PCI_EXP_LNKCTL,
&bridge_cfg);
pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
&gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
tmp16);
pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
tmp = RREG32_PCIE(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@ -2331,21 +2322,14 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
mdelay(100);
pcie_capability_read_word(root, PCI_EXP_LNKCTL,
&tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
pcie_capability_write_word(root, PCI_EXP_LNKCTL,
tmp16);
pcie_capability_read_word(adev->pdev,
PCI_EXP_LNKCTL,
&tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
pcie_capability_write_word(adev->pdev,
PCI_EXP_LNKCTL,
tmp16);
pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_HAWD,
bridge_cfg &
PCI_EXP_LNKCTL_HAWD);
pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_HAWD,
gpu_cfg &
PCI_EXP_LNKCTL_HAWD);
pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
&tmp16);

View file

@ -147,14 +147,15 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
int ret;
int retry_loop;
/* Wait for bootloader to signify that it is ready having bit 31 of
* C2PMSG_35 set to 1. All other bits are expected to be cleared.
* If there is an error in processing command, bits[7:0] will be set.
* This is applicable for PSP v13.0.6 and newer.
*/
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp,
SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
0x80000000,
0x80000000,
false);
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
0x80000000, 0xffffffff, false);
if (ret == 0)
return 0;

View file

@ -5923,8 +5923,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
} else {
recalculate_timing = amdgpu_freesync_vid_mode &&
is_freesync_video_mode(&mode, aconnector);
recalculate_timing = is_freesync_video_mode(&mode, aconnector);
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
drm_mode_copy(&saved_mode, &mode);
@ -7018,7 +7017,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
if (!(amdgpu_freesync_vid_mode && edid))
if (!edid)
return;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@ -7863,10 +7862,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* fast updates.
*/
if (crtc->state->async_flip &&
acrtc_state->update_type != UPDATE_TYPE_FAST)
(acrtc_state->update_type != UPDATE_TYPE_FAST ||
get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
drm_warn_once(state->dev,
"[PLANE:%d:%s] async flip with non-fast update\n",
plane->base.id, plane->name);
bundle->flip_addrs[planes_count].flip_immediate =
crtc->state->async_flip &&
acrtc_state->update_type == UPDATE_TYPE_FAST &&
@ -9026,8 +9027,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
* TODO: Refactor this function to allow this check to work
* in all conditions.
*/
if (amdgpu_freesync_vid_mode &&
dm_new_crtc_state->stream &&
if (dm_new_crtc_state->stream &&
is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
goto skip_modeset;
@ -9067,7 +9067,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
}
/* Now check if we should set freesync video mode */
if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
if (dm_new_crtc_state->stream &&
dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
is_timing_unchanged_for_freesync(new_crtc_state,
@ -9080,7 +9080,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
set_freesync_fixed_config(dm_new_crtc_state);
goto skip_modeset;
} else if (amdgpu_freesync_vid_mode && aconnector &&
} else if (aconnector &&
is_freesync_video_mode(&new_crtc_state->mode,
aconnector)) {
struct drm_display_mode *high_mode;
@ -9819,6 +9819,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
if (old_plane_state->fb && new_plane_state->fb &&
get_mem_type(old_plane_state->fb) !=
get_mem_type(new_plane_state->fb))
lock_and_validation_needed = true;
ret = dm_update_plane_state(dc, state, plane,
old_plane_state,
new_plane_state,
@ -10070,9 +10075,20 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct dm_crtc_state *dm_new_crtc_state =
to_dm_crtc_state(new_crtc_state);
/*
* Only allow async flips for fast updates that don't change
* the FB pitch, the DCC state, rotation, etc.
*/
if (new_crtc_state->async_flip && lock_and_validation_needed) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
ret = -EINVAL;
goto fail;
}
dm_new_crtc_state->update_type = lock_and_validation_needed ?
UPDATE_TYPE_FULL :
UPDATE_TYPE_FAST;
UPDATE_TYPE_FULL : UPDATE_TYPE_FAST;
}
/* Must be success */

View file

@ -406,18 +406,6 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
/*
* Only allow async flips for fast updates that don't change the FB
* pitch, the DCC state, rotation, etc.
*/
if (crtc_state->async_flip &&
dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
/* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream)
return 0;

View file

@ -32,6 +32,7 @@
#define MAX_INSTANCE 6
#define MAX_SEGMENT 6
#define SMU_REGISTER_WRITE_RETRY_COUNT 5
struct IP_BASE_INSTANCE
{
@ -134,6 +135,8 @@ static int dcn315_smu_send_msg_with_param(
unsigned int msg_id, unsigned int param)
{
uint32_t result;
uint32_t i = 0;
uint32_t read_back_data;
result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
@ -150,10 +153,19 @@ static int dcn315_smu_send_msg_with_param(
/* Set the parameter register for the SMU message, unit is Mhz */
REG_WRITE(MP1_SMN_C2PMSG_37, param);
/* Trigger the message transaction by writing the message ID */
generic_write_indirect_reg(CTX,
REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
mmMP1_C2PMSG_3, msg_id);
for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) {
/* Trigger the message transaction by writing the message ID */
generic_write_indirect_reg(CTX,
REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
mmMP1_C2PMSG_3, msg_id);
read_back_data = generic_read_indirect_reg(CTX,
REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
mmMP1_C2PMSG_3);
if (read_back_data == msg_id)
break;
udelay(2);
smu_print("SMU msg id write fail %x times. \n", i + 1);
}
result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);

View file

@ -1813,10 +1813,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
hws->funcs.edp_backlight_control(edp_link_with_sink, false);
}
/*resume from S3, no vbios posting, no need to power down again*/
clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
power_down_all_hw_blocks(dc);
disable_vga_and_power_gate_all_controllers(dc);
if (edp_link_with_sink && !keep_edp_vdd_on)
dc->hwss.edp_power_control(edp_link_with_sink, false);
clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
}
bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1);
}

View file

@ -75,6 +75,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
.edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,

View file

@ -84,7 +84,8 @@ static enum phyd32clk_clock_source get_phy_mux_symclk(
struct dcn_dccg *dccg_dcn,
enum phyd32clk_clock_source src)
{
if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
if (src == PHYD32CLKC)
src = PHYD32CLKF;
if (src == PHYD32CLKD)

View file

@ -32,7 +32,7 @@
#include "dml/display_mode_vba.h"
struct _vcs_dpi_ip_params_st dcn3_14_ip = {
.VBlankNomDefaultUS = 800,
.VBlankNomDefaultUS = 668,
.gpuvm_enable = 1,
.gpuvm_max_page_table_levels = 1,
.hostvm_enable = 1,

View file

@ -2074,15 +2074,19 @@ static int amdgpu_device_attr_create(struct amdgpu_device *adev,
uint32_t mask, struct list_head *attr_list)
{
int ret = 0;
struct device_attribute *dev_attr = &attr->dev_attr;
const char *name = dev_attr->attr.name;
enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
struct amdgpu_device_attr_entry *attr_entry;
struct device_attribute *dev_attr;
const char *name;
int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
BUG_ON(!attr);
if (!attr)
return -EINVAL;
dev_attr = &attr->dev_attr;
name = dev_attr->attr.name;
attr_update = attr->attr_update ? attr->attr_update : default_attr_update;

View file

@ -1307,7 +1307,7 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK];
gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];

View file

@ -2,7 +2,7 @@
#define _DRM_DEVICE_H_
#include <sys/types.h>
#include <sys/selinfo.h>
#include <sys/event.h>
#include <linux/list.h>
#include <linux/kref.h>

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pci.h,v 1.13 2023/01/01 01:34:58 jsg Exp $ */
/* $OpenBSD: pci.h,v 1.14 2023/09/13 12:31:49 jsg Exp $ */
/*
* Copyright (c) 2015 Mark Kettenis
*
@ -305,6 +305,27 @@ pcie_capability_write_word(struct pci_dev *pdev, int off, u16 val)
return 0;
}
static inline int
pcie_capability_set_word(struct pci_dev *pdev, int off, u16 val)
{
u16 r;
pcie_capability_read_word(pdev, off, &r);
r |= val;
pcie_capability_write_word(pdev, off, r);
return 0;
}
static inline int
pcie_capability_clear_and_set_word(struct pci_dev *pdev, int off, u16 c, u16 s)
{
u16 r;
pcie_capability_read_word(pdev, off, &r);
r &= ~c;
r |= s;
pcie_capability_write_word(pdev, off, r);
return 0;
}
static inline int
pcie_get_readrq(struct pci_dev *pdev)
{

View file

@ -9536,17 +9536,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
pcie_capability_read_word(root, PCI_EXP_LNKCTL,
&bridge_cfg);
pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
&gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
tmp16);
pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
pcie_capability_set_word(rdev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@ -9593,21 +9584,14 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
drm_msleep(100);
/* linkctl */
pcie_capability_read_word(root, PCI_EXP_LNKCTL,
&tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
pcie_capability_write_word(root, PCI_EXP_LNKCTL,
tmp16);
pcie_capability_read_word(rdev->pdev,
PCI_EXP_LNKCTL,
&tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
pcie_capability_write_word(rdev->pdev,
PCI_EXP_LNKCTL,
tmp16);
pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_HAWD,
bridge_cfg &
PCI_EXP_LNKCTL_HAWD);
pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_HAWD,
gpu_cfg &
PCI_EXP_LNKCTL_HAWD);
/* linkctl2 */
pcie_capability_read_word(root, PCI_EXP_LNKCTL2,

View file

@ -1500,7 +1500,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
} else if (of_machine_is_compatible("PowerMac3,5")) {
/* PowerMac G4 Silver radeon 7500 */
rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
} else if (of_machine_is_compatible("PowerMac4,4")) {
} else if (of_machine_is_compatible("PowerMac4,4") ||
of_machine_is_compatible("PowerMac6,4")) {
/* emac */
rdev->mode_info.connector_table = CT_EMAC;
} else if (of_machine_is_compatible("PowerMac10,1")) {

View file

@ -7133,17 +7133,8 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
pcie_capability_read_word(root, PCI_EXP_LNKCTL,
&bridge_cfg);
pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
&gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
tmp16);
pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
pcie_capability_set_word(rdev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
tmp = RREG32_PCIE(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@ -7190,22 +7181,14 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
drm_msleep(100);
/* linkctl */
pcie_capability_read_word(root, PCI_EXP_LNKCTL,
&tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
pcie_capability_write_word(root,
PCI_EXP_LNKCTL,
tmp16);
pcie_capability_read_word(rdev->pdev,
PCI_EXP_LNKCTL,
&tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
pcie_capability_write_word(rdev->pdev,
PCI_EXP_LNKCTL,
tmp16);
pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_HAWD,
bridge_cfg &
PCI_EXP_LNKCTL_HAWD);
pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_HAWD,
gpu_cfg &
PCI_EXP_LNKCTL_HAWD);
/* linkctl2 */
pcie_capability_read_word(root, PCI_EXP_LNKCTL2,

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_exit.c,v 1.214 2023/09/08 09:06:31 claudio Exp $ */
/* $OpenBSD: kern_exit.c,v 1.215 2023/09/13 14:25:49 claudio Exp $ */
/* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */
/*
@ -119,7 +119,7 @@ exit1(struct proc *p, int xexit, int xsig, int flags)
struct process *pr, *qr, *nqr;
struct rusage *rup;
struct timespec ts;
int s, wake;
int s;
atomic_setbits_int(&p->p_flag, P_WEXIT);
@ -157,22 +157,14 @@ exit1(struct proc *p, int xexit, int xsig, int flags)
}
/* unlink ourselves from the active threads */
mtx_enter(&pr->ps_mtx);
SCHED_LOCK(s);
TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link);
pr->ps_threadcnt--;
wake = (pr->ps_single && pr->ps_singlecnt == pr->ps_threadcnt);
mtx_leave(&pr->ps_mtx);
if (wake)
wakeup(&pr->ps_singlecnt);
SCHED_UNLOCK(s);
if ((p->p_flag & P_THREAD) == 0) {
/* main thread gotta wait because it has the pid, et al */
mtx_enter(&pr->ps_mtx);
while (pr->ps_threadcnt > 0)
msleep_nsec(&pr->ps_threads, &pr->ps_mtx, PWAIT,
"thrdeath", INFSLP);
mtx_leave(&pr->ps_mtx);
while (pr->ps_threadcnt > 1)
tsleep_nsec(&pr->ps_threads, PWAIT, "thrdeath", INFSLP);
if (pr->ps_flags & PS_PROFIL)
stopprofclock(pr);
}
@ -345,10 +337,9 @@ exit1(struct proc *p, int xexit, int xsig, int flags)
/* just a thread? detach it from its process */
if (p->p_flag & P_THREAD) {
/* scheduler_wait_hook(pr->ps_mainproc, p); XXX */
mtx_enter(&pr->ps_mtx);
if (pr->ps_threadcnt == 0)
if (--pr->ps_threadcnt == 1)
wakeup(&pr->ps_threads);
mtx_leave(&pr->ps_mtx);
KASSERT(pr->ps_threadcnt > 0);
}
/* Release the thread's read reference of resource limit structure. */
@ -832,7 +823,7 @@ process_zap(struct process *pr)
if (otvp)
vrele(otvp);
KASSERT(pr->ps_threadcnt == 0);
KASSERT(pr->ps_threadcnt == 1);
if (pr->ps_ptstat != NULL)
free(pr->ps_ptstat, M_SUBPROC, sizeof(*pr->ps_ptstat));
pool_put(&rusage_pool, pr->ps_ru);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_fork.c,v 1.251 2023/09/08 09:06:31 claudio Exp $ */
/* $OpenBSD: kern_fork.c,v 1.252 2023/09/13 14:25:49 claudio Exp $ */
/* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */
/*
@ -519,7 +519,7 @@ thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr,
struct proc *p;
pid_t tid;
vaddr_t uaddr;
int error;
int s, error;
if (stack == NULL)
return EINVAL;
@ -543,6 +543,7 @@ thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr,
/* other links */
p->p_p = pr;
pr->ps_threadcnt++;
/* local copies */
p->p_fd = pr->ps_fd;
@ -561,17 +562,18 @@ thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr,
LIST_INSERT_HEAD(&allproc, p, p_list);
LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash);
mtx_enter(&pr->ps_mtx);
SCHED_LOCK(s);
TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
pr->ps_threadcnt++;
/*
* if somebody else wants to take us to single threaded mode,
* count ourselves in.
*/
if (pr->ps_single)
if (pr->ps_single) {
atomic_inc_int(&pr->ps_singlecount);
atomic_setbits_int(&p->p_flag, P_SUSPSINGLE);
mtx_leave(&pr->ps_mtx);
}
SCHED_UNLOCK(s);
/*
* Return tid to parent thread and copy it out to userspace

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_resource.c,v 1.79 2023/09/08 09:06:31 claudio Exp $ */
/* $OpenBSD: kern_resource.c,v 1.80 2023/09/13 14:25:49 claudio Exp $ */
/* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */
/*-
@ -212,13 +212,11 @@ donice(struct proc *curp, struct process *chgpr, int n)
if (n < chgpr->ps_nice && suser(curp))
return (EACCES);
chgpr->ps_nice = n;
mtx_enter(&chgpr->ps_mtx);
SCHED_LOCK(s);
TAILQ_FOREACH(p, &chgpr->ps_threads, p_thr_link) {
SCHED_LOCK(s);
setpriority(p, p->p_estcpu, n);
SCHED_UNLOCK(s);
}
mtx_leave(&chgpr->ps_mtx);
SCHED_UNLOCK(s);
return (0);
}
@ -478,9 +476,8 @@ dogetrusage(struct proc *p, int who, struct rusage *rup)
struct process *pr = p->p_p;
struct proc *q;
KERNEL_ASSERT_LOCKED();
switch (who) {
case RUSAGE_SELF:
/* start with the sum of dead threads, if any */
if (pr->ps_ru != NULL)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sig.c,v 1.316 2023/09/09 14:50:09 claudio Exp $ */
/* $OpenBSD: kern_sig.c,v 1.317 2023/09/13 14:25:49 claudio Exp $ */
/* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */
/*
@ -2008,12 +2008,11 @@ userret(struct proc *p)
}
int
single_thread_check_locked(struct proc *p, int deep)
single_thread_check_locked(struct proc *p, int deep, int s)
{
struct process *pr = p->p_p;
int s, wake;
MUTEX_ASSERT_LOCKED(&pr->ps_mtx);
SCHED_ASSERT_LOCKED();
if (pr->ps_single == NULL || pr->ps_single == p)
return (0);
@ -2027,24 +2026,19 @@ single_thread_check_locked(struct proc *p, int deep)
return (EINTR);
}
if (atomic_dec_int_nv(&pr->ps_singlecount) == 0)
wakeup(&pr->ps_singlecount);
if (pr->ps_flags & PS_SINGLEEXIT) {
mtx_leave(&pr->ps_mtx);
SCHED_UNLOCK(s);
KERNEL_LOCK();
exit1(p, 0, 0, EXIT_THREAD_NOCHECK);
/* NOTREACHED */
}
/* not exiting and don't need to unwind, so suspend */
wake = (++pr->ps_singlecnt == pr->ps_threadcnt);
mtx_leave(&pr->ps_mtx);
if (wake)
wakeup(&pr->ps_singlecnt);
SCHED_LOCK(s);
p->p_stat = SSTOP;
mi_switch();
SCHED_UNLOCK(s);
mtx_enter(&pr->ps_mtx);
} while (pr->ps_single != NULL);
return (0);
@ -2053,11 +2047,11 @@ single_thread_check_locked(struct proc *p, int deep)
int
single_thread_check(struct proc *p, int deep)
{
int error;
int s, error;
mtx_enter(&p->p_p->ps_mtx);
error = single_thread_check_locked(p, deep);
mtx_leave(&p->p_p->ps_mtx);
SCHED_LOCK(s);
error = single_thread_check_locked(p, deep, s);
SCHED_UNLOCK(s);
return error;
}
@ -2077,14 +2071,13 @@ single_thread_set(struct proc *p, enum single_thread_mode mode, int wait)
struct process *pr = p->p_p;
struct proc *q;
int error, s;
u_int count = 0;
KASSERT(curproc == p);
mtx_enter(&pr->ps_mtx);
error = single_thread_check_locked(p, (mode == SINGLE_UNWIND));
SCHED_LOCK(s);
error = single_thread_check_locked(p, (mode == SINGLE_UNWIND), s);
if (error) {
mtx_leave(&pr->ps_mtx);
SCHED_UNLOCK(s);
return error;
}
@ -2103,25 +2096,26 @@ single_thread_set(struct proc *p, enum single_thread_mode mode, int wait)
panic("single_thread_mode = %d", mode);
#endif
}
pr->ps_singlecnt = 1; /* count ourselfs in already */
pr->ps_singlecount = 0;
membar_producer();
pr->ps_single = p;
TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
if (q == p)
continue;
if (q->p_flag & P_WEXIT) {
SCHED_LOCK(s);
if (mode == SINGLE_EXIT && q->p_stat == SSTOP)
setrunnable(q);
SCHED_UNLOCK(s);
if (mode == SINGLE_EXIT) {
if (q->p_stat == SSTOP) {
setrunnable(q);
atomic_inc_int(&pr->ps_singlecount);
}
}
continue;
}
SCHED_LOCK(s);
atomic_setbits_int(&q->p_flag, P_SUSPSINGLE);
switch (q->p_stat) {
case SIDL:
case SDEAD:
case SRUN:
atomic_inc_int(&pr->ps_singlecount);
break;
case SSLEEP:
/* if it's not interruptible, then just have to wait */
@ -2129,29 +2123,28 @@ single_thread_set(struct proc *p, enum single_thread_mode mode, int wait)
/* merely need to suspend? just stop it */
if (mode == SINGLE_SUSPEND) {
q->p_stat = SSTOP;
count++;
break;
}
/* need to unwind or exit, so wake it */
setrunnable(q);
}
atomic_inc_int(&pr->ps_singlecount);
break;
case SSTOP:
if (mode == SINGLE_EXIT) {
setrunnable(q);
break;
atomic_inc_int(&pr->ps_singlecount);
}
count++;
break;
case SDEAD:
break;
case SONPROC:
atomic_inc_int(&pr->ps_singlecount);
signotify(q);
break;
}
SCHED_UNLOCK(s);
}
pr->ps_singlecnt += count;
mtx_leave(&pr->ps_mtx);
SCHED_UNLOCK(s);
if (wait)
single_thread_wait(pr, 1);
@ -2170,16 +2163,14 @@ single_thread_wait(struct process *pr, int recheck)
int wait;
/* wait until they're all suspended */
mtx_enter(&pr->ps_mtx);
wait = pr->ps_singlecnt < pr->ps_threadcnt;
wait = pr->ps_singlecount > 0;
while (wait) {
msleep_nsec(&pr->ps_singlecnt, &pr->ps_mtx, PWAIT, "suspend",
INFSLP);
sleep_setup(&pr->ps_singlecount, PWAIT, "suspend");
wait = pr->ps_singlecount > 0;
sleep_finish(0, wait);
if (!recheck)
break;
wait = pr->ps_singlecnt < pr->ps_threadcnt;
}
mtx_leave(&pr->ps_mtx);
return wait;
}
@ -2194,10 +2185,9 @@ single_thread_clear(struct proc *p, int flag)
KASSERT(pr->ps_single == p);
KASSERT(curproc == p);
mtx_enter(&pr->ps_mtx);
SCHED_LOCK(s);
pr->ps_single = NULL;
atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND | PS_SINGLEEXIT);
TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
if (q == p || (q->p_flag & P_SUSPSINGLE) == 0)
continue;
@ -2208,7 +2198,6 @@ single_thread_clear(struct proc *p, int flag)
* then clearing that either makes it runnable or puts
* it back into some sleep queue
*/
SCHED_LOCK(s);
if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) {
if (q->p_wchan == NULL)
setrunnable(q);
@ -2217,9 +2206,8 @@ single_thread_clear(struct proc *p, int flag)
q->p_stat = SSLEEP;
}
}
SCHED_UNLOCK(s);
}
mtx_leave(&pr->ps_mtx);
SCHED_UNLOCK(s);
}
void

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_synch.c,v 1.199 2023/09/08 09:06:31 claudio Exp $ */
/* $OpenBSD: kern_synch.c,v 1.200 2023/09/13 14:25:49 claudio Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*
@ -566,18 +566,15 @@ sys_sched_yield(struct proc *p, void *v, register_t *retval)
uint8_t newprio;
int s;
SCHED_LOCK(s);
/*
* If one of the threads of a multi-threaded process called
* sched_yield(2), drop its priority to ensure its siblings
* can make some progress.
*/
mtx_enter(&p->p_p->ps_mtx);
newprio = p->p_usrpri;
TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link)
newprio = max(newprio, q->p_runpri);
mtx_leave(&p->p_p->ps_mtx);
SCHED_LOCK(s);
setrunqueue(p->p_cpu, p, newprio);
p->p_ru.ru_nvcsw++;
mi_switch();

View file

@ -1,4 +1,4 @@
/* $OpenBSD: proc.h,v 1.350 2023/09/08 09:06:31 claudio Exp $ */
/* $OpenBSD: proc.h,v 1.351 2023/09/13 14:25:49 claudio Exp $ */
/* $NetBSD: proc.h,v 1.44 1996/04/22 01:23:21 christos Exp $ */
/*-
@ -138,7 +138,7 @@ struct process {
struct ucred *ps_ucred; /* Process owner's identity. */
LIST_ENTRY(process) ps_list; /* List of all processes. */
TAILQ_HEAD(,proc) ps_threads; /* [K|m] Threads in this process. */
TAILQ_HEAD(,proc) ps_threads; /* [K|S] Threads in this process. */
LIST_ENTRY(process) ps_pglist; /* List of processes in pgrp. */
struct process *ps_pptr; /* Pointer to parent process. */
@ -173,8 +173,8 @@ struct process {
u_int ps_flags; /* [a] PS_* flags. */
int ps_siglist; /* Signals pending for the process. */
struct proc *ps_single; /* [m] Thread for single-threading. */
u_int ps_singlecnt; /* [m] Number of suspended threads. */
struct proc *ps_single; /* [S] Thread for single-threading. */
u_int ps_singlecount; /* [a] Not yet suspended threads. */
int ps_traceflag; /* Kernel trace points. */
struct vnode *ps_tracevp; /* Trace to vnode. */
@ -242,7 +242,7 @@ struct process {
/* End area that is copied on creation. */
#define ps_endcopy ps_threadcnt
u_int ps_threadcnt; /* [m] Number of threads. */
u_int ps_threadcnt; /* Number of threads. */
struct timespec ps_start; /* starting uptime. */
struct timeout ps_realit_to; /* [m] ITIMER_REAL timeout */
@ -310,14 +310,13 @@ struct p_inentry {
* U uidinfolk
* l read only reference, see lim_read_enter()
* o owned (read/modified only) by this thread
* m this proc's' `p->p_p->ps_mtx'
*/
struct proc {
TAILQ_ENTRY(proc) p_runq; /* [S] current run/sleep queue */
LIST_ENTRY(proc) p_list; /* List of all threads. */
struct process *p_p; /* [I] The process of this thread. */
TAILQ_ENTRY(proc) p_thr_link; /* [K|m] Threads in a process linkage. */
TAILQ_ENTRY(proc) p_thr_link; /* Threads in a process linkage. */
TAILQ_ENTRY(proc) p_fut_link; /* Threads in a futex linkage. */
struct futex *p_futex; /* Current sleeping futex. */