sync code with last fixes and improvements from OpenBSD
This commit is contained in:
parent
58df21ce75
commit
f960599e67
399 changed files with 7016 additions and 6902 deletions
|
@ -2737,6 +2737,9 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
|||
if (!attachment->is_mapped)
|
||||
continue;
|
||||
|
||||
if (attachment->bo_va->base.bo->tbo.pin_count)
|
||||
continue;
|
||||
|
||||
kfd_mem_dmaunmap_attachment(mem, attachment);
|
||||
ret = update_gpuvm_pte(mem, attachment, &sync_obj);
|
||||
if (ret) {
|
||||
|
|
|
@ -1730,18 +1730,30 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|||
|
||||
/* Insert partial mapping before the range */
|
||||
if (!list_empty(&before->list)) {
|
||||
struct amdgpu_bo *bo = before->bo_va->base.bo;
|
||||
|
||||
amdgpu_vm_it_insert(before, &vm->va);
|
||||
if (before->flags & AMDGPU_PTE_PRT)
|
||||
amdgpu_vm_prt_get(adev);
|
||||
|
||||
if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
|
||||
!before->bo_va->base.moved)
|
||||
amdgpu_vm_bo_moved(&before->bo_va->base);
|
||||
} else {
|
||||
kfree(before);
|
||||
}
|
||||
|
||||
/* Insert partial mapping after the range */
|
||||
if (!list_empty(&after->list)) {
|
||||
struct amdgpu_bo *bo = after->bo_va->base.bo;
|
||||
|
||||
amdgpu_vm_it_insert(after, &vm->va);
|
||||
if (after->flags & AMDGPU_PTE_PRT)
|
||||
amdgpu_vm_prt_get(adev);
|
||||
|
||||
if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
|
||||
!after->bo_va->base.moved)
|
||||
amdgpu_vm_bo_moved(&after->bo_va->base);
|
||||
} else {
|
||||
kfree(after);
|
||||
}
|
||||
|
|
|
@ -346,7 +346,7 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
|
|||
|
||||
#define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1
|
||||
#define NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT 0x00000009 // 1=1us, 9=1ms
|
||||
#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT 0x0000000E // 4ms
|
||||
#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT 0x0000000E // 400ms
|
||||
|
||||
static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
|
@ -479,9 +479,12 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
|
|||
WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
|
||||
|
||||
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
|
||||
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
|
||||
data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
|
||||
data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
|
||||
data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
|
||||
if (pci_is_thunderbolt_attached(adev->pdev))
|
||||
data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
|
||||
else
|
||||
data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
|
||||
data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnPCIE_LC_CNTL, data);
|
||||
|
||||
|
|
|
@ -2330,7 +2330,7 @@ const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
|
|||
|
||||
static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_SDMA,
|
||||
.align_mask = 0xf,
|
||||
.align_mask = 0xff,
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
|
@ -2400,7 +2400,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs_2nd_mmhub = {
|
|||
|
||||
static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_SDMA,
|
||||
.align_mask = 0xf,
|
||||
.align_mask = 0xff,
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.support_64bit_ptrs = true,
|
||||
.secure_submission_supported = true,
|
||||
|
|
|
@ -6974,7 +6974,13 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
|
|||
drm_add_modes_noedid(connector, 640, 480);
|
||||
} else {
|
||||
amdgpu_dm_connector_ddc_get_modes(connector, edid);
|
||||
amdgpu_dm_connector_add_common_modes(encoder, connector);
|
||||
/* most eDP supports only timings from its edid,
|
||||
* usually only detailed timings are available
|
||||
* from eDP edid. timings which are not from edid
|
||||
* may damage eDP
|
||||
*/
|
||||
if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
|
||||
amdgpu_dm_connector_add_common_modes(encoder, connector);
|
||||
amdgpu_dm_connector_add_freesync_modes(connector, edid);
|
||||
}
|
||||
amdgpu_dm_fbc_init(connector);
|
||||
|
|
|
@ -42,6 +42,30 @@
|
|||
#include "dm_helpers.h"
|
||||
#include "ddc_service_types.h"
|
||||
|
||||
static u32 edid_extract_panel_id(struct edid *edid)
|
||||
{
|
||||
return (u32)edid->mfg_id[0] << 24 |
|
||||
(u32)edid->mfg_id[1] << 16 |
|
||||
(u32)EDID_PRODUCT_ID(edid);
|
||||
}
|
||||
|
||||
static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
|
||||
{
|
||||
uint32_t panel_id = edid_extract_panel_id(edid);
|
||||
|
||||
switch (panel_id) {
|
||||
/* Workaround for some monitors which does not work well with FAMS */
|
||||
case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E):
|
||||
case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053):
|
||||
case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC):
|
||||
DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id);
|
||||
edid_caps->panel_patch.disable_fams = true;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* dm_helpers_parse_edid_caps
|
||||
*
|
||||
* Parse edid caps
|
||||
|
@ -113,6 +137,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
|
|||
else
|
||||
edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
|
||||
|
||||
apply_edid_quirks(edid_buf, edid_caps);
|
||||
|
||||
kfree(sads);
|
||||
kfree(sadb);
|
||||
|
||||
|
|
|
@ -1539,6 +1539,9 @@ bool dc_validate_boot_timing(const struct dc *dc,
|
|||
return false;
|
||||
}
|
||||
|
||||
if (dc->debug.force_odm_combine)
|
||||
return false;
|
||||
|
||||
/* Check for enabled DIG to identify enabled display */
|
||||
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
|
||||
return false;
|
||||
|
|
|
@ -970,10 +970,12 @@ enum dc_status resource_map_phy_clock_resources(
|
|||
|| dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
pipe_ctx->clock_source =
|
||||
dc->res_pool->dp_clock_source;
|
||||
else
|
||||
pipe_ctx->clock_source = find_matching_pll(
|
||||
&context->res_ctx, dc->res_pool,
|
||||
stream);
|
||||
else {
|
||||
if (stream && stream->link && stream->link->link_enc)
|
||||
pipe_ctx->clock_source = find_matching_pll(
|
||||
&context->res_ctx, dc->res_pool,
|
||||
stream);
|
||||
}
|
||||
|
||||
if (pipe_ctx->clock_source == NULL)
|
||||
return DC_NO_CLOCK_SOURCE_RESOURCE;
|
||||
|
|
|
@ -1678,6 +1678,17 @@ static void dcn20_program_pipe(
|
|||
|
||||
if (hws->funcs.setup_vupdate_interrupt)
|
||||
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
|
||||
|
||||
if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
|
||||
unsigned int k1_div, k2_div;
|
||||
|
||||
hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div);
|
||||
|
||||
dc->res_pool->dccg->funcs->set_pixel_rate_div(
|
||||
dc->res_pool->dccg,
|
||||
pipe_ctx->stream_res.tg->inst,
|
||||
k1_div, k2_div);
|
||||
}
|
||||
}
|
||||
|
||||
if (pipe_ctx->update_flags.bits.odm)
|
||||
|
|
|
@ -1165,10 +1165,6 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
|
|||
unsigned int odm_combine_factor = 0;
|
||||
bool two_pix_per_container = false;
|
||||
|
||||
// For phantom pipes, use the same programming as the main pipes
|
||||
if (pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
||||
stream = pipe_ctx->stream->mall_stream_config.paired_stream;
|
||||
}
|
||||
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
|
||||
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@ static void optc32_set_odm_combine(struct timing_generator *optc, int *opp_id, i
|
|||
optc1->opp_count = opp_cnt;
|
||||
}
|
||||
|
||||
static void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
|
||||
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
||||
|
|
|
@ -250,5 +250,6 @@
|
|||
SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh)
|
||||
|
||||
void dcn32_timing_generator_init(struct optc *optc1);
|
||||
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode);
|
||||
|
||||
#endif /* __DC_OPTC_DCN32_H__ */
|
||||
|
|
|
@ -471,7 +471,7 @@ struct dmub_notification {
|
|||
* of a firmware to know if feature or functionality is supported or present.
|
||||
*/
|
||||
#define DMUB_FW_VERSION(major, minor, revision) \
|
||||
((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | ((revision) & 0xFFFF))
|
||||
((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | (((revision) & 0xFF) << 8))
|
||||
|
||||
/**
|
||||
* dmub_srv_create() - creates the DMUB service.
|
||||
|
|
|
@ -168,6 +168,7 @@ struct smu_temperature_range {
|
|||
int mem_crit_max;
|
||||
int mem_emergency_max;
|
||||
int software_shutdown_temp;
|
||||
int software_shutdown_temp_offset;
|
||||
};
|
||||
|
||||
struct smu_state_validation_block {
|
||||
|
|
|
@ -297,5 +297,9 @@ int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
|
|||
uint32_t *size,
|
||||
uint32_t pptable_id);
|
||||
|
||||
int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -1381,6 +1381,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
|
|||
*/
|
||||
uint32_t ctxid = entry->src_data[0];
|
||||
uint32_t data;
|
||||
uint32_t high;
|
||||
|
||||
if (client_id == SOC15_IH_CLIENTID_THM) {
|
||||
switch (src_id) {
|
||||
|
@ -1437,6 +1438,36 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
|
|||
schedule_work(&smu->throttling_logging_work);
|
||||
|
||||
break;
|
||||
case 0x8:
|
||||
high = smu->thermal_range.software_shutdown_temp +
|
||||
smu->thermal_range.software_shutdown_temp_offset;
|
||||
high = min_t(typeof(high),
|
||||
SMU_THERMAL_MAXIMUM_ALERT_TEMP,
|
||||
high);
|
||||
dev_emerg(adev->dev, "Reduce soft CTF limit to %d (by an offset %d)\n",
|
||||
high,
|
||||
smu->thermal_range.software_shutdown_temp_offset);
|
||||
|
||||
data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
|
||||
data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
|
||||
DIG_THERM_INTH,
|
||||
(high & 0xff));
|
||||
data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
|
||||
WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
|
||||
break;
|
||||
case 0x9:
|
||||
high = min_t(typeof(high),
|
||||
SMU_THERMAL_MAXIMUM_ALERT_TEMP,
|
||||
smu->thermal_range.software_shutdown_temp);
|
||||
dev_emerg(adev->dev, "Recover soft CTF limit to %d\n", high);
|
||||
|
||||
data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
|
||||
data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
|
||||
DIG_THERM_INTH,
|
||||
(high & 0xff));
|
||||
data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
|
||||
WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2458,3 +2489,74 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
|
||||
* speed switching. Until we have confirmation from Intel that a specific host
|
||||
* supports it, it's safer that we keep it disabled for all.
|
||||
*
|
||||
* https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
|
||||
* https://gitlab.freedesktop.org/drm/amd/-/issues/2663
|
||||
*/
|
||||
static bool smu_v13_0_is_pcie_dynamic_switching_supported(void)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_X86)
|
||||
#ifdef __linux__
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL)
|
||||
#else
|
||||
if (strcmp(cpu_vendor, "GenuineIntel") == 0)
|
||||
#endif
|
||||
return false;
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_13_0_pcie_table *pcie_table =
|
||||
&dpm_context->dpm_tables.pcie_table;
|
||||
int num_of_levels = pcie_table->num_of_link_levels;
|
||||
uint32_t smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
if (!smu_v13_0_is_pcie_dynamic_switching_supported()) {
|
||||
if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
|
||||
pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
|
||||
|
||||
if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
|
||||
pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
|
||||
|
||||
/* Force all levels to use the same settings */
|
||||
for (i = 0; i < num_of_levels; i++) {
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < num_of_levels; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < num_of_levels; i++) {
|
||||
smu_pcie_arg = i << 16;
|
||||
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
|
||||
smu_pcie_arg |= pcie_table->pcie_lane[i];
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_OverridePcieParameters,
|
||||
smu_pcie_arg,
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1216,37 +1216,6 @@ static int smu_v13_0_0_force_clk_levels(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_13_0_pcie_table *pcie_table =
|
||||
&dpm_context->dpm_tables.pcie_table;
|
||||
uint32_t smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
for (i = 0; i < pcie_table->num_of_link_levels; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
|
||||
smu_pcie_arg = i << 16;
|
||||
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
|
||||
smu_pcie_arg |= pcie_table->pcie_lane[i];
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_OverridePcieParameters,
|
||||
smu_pcie_arg,
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct smu_temperature_range smu13_thermal_policy[] = {
|
||||
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
|
||||
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
|
||||
|
@ -1281,6 +1250,7 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
|
|||
range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
|
||||
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
|
||||
range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2036,7 +2006,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
|
|||
.feature_is_enabled = smu_cmn_feature_is_enabled,
|
||||
.print_clk_levels = smu_v13_0_0_print_clk_levels,
|
||||
.force_clk_levels = smu_v13_0_0_force_clk_levels,
|
||||
.update_pcie_parameters = smu_v13_0_0_update_pcie_parameters,
|
||||
.update_pcie_parameters = smu_v13_0_update_pcie_parameters,
|
||||
.get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,
|
||||
.register_irq_handler = smu_v13_0_register_irq_handler,
|
||||
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
|
||||
|
|
|
@ -1225,37 +1225,6 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_13_0_pcie_table *pcie_table =
|
||||
&dpm_context->dpm_tables.pcie_table;
|
||||
uint32_t smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
for (i = 0; i < pcie_table->num_of_link_levels; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
|
||||
smu_pcie_arg = i << 16;
|
||||
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
|
||||
smu_pcie_arg |= pcie_table->pcie_lane[i];
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_OverridePcieParameters,
|
||||
smu_pcie_arg,
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct smu_temperature_range smu13_thermal_policy[] =
|
||||
{
|
||||
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
|
||||
|
@ -1288,6 +1257,7 @@ static int smu_v13_0_7_get_thermal_temperature_range(struct smu_context *smu,
|
|||
range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
|
||||
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
|
||||
range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1751,7 +1721,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
|
|||
.feature_is_enabled = smu_cmn_feature_is_enabled,
|
||||
.print_clk_levels = smu_v13_0_7_print_clk_levels,
|
||||
.force_clk_levels = smu_v13_0_7_force_clk_levels,
|
||||
.update_pcie_parameters = smu_v13_0_7_update_pcie_parameters,
|
||||
.update_pcie_parameters = smu_v13_0_update_pcie_parameters,
|
||||
.get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range,
|
||||
.register_irq_handler = smu_v13_0_register_irq_handler,
|
||||
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
|
||||
|
|
|
@ -140,6 +140,12 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
|
|||
if (!state->planes)
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* Because drm_atomic_state can be committed asynchronously we need our
|
||||
* own reference and cannot rely on the on implied by drm_file in the
|
||||
* ioctl call.
|
||||
*/
|
||||
drm_dev_get(dev);
|
||||
state->dev = dev;
|
||||
|
||||
drm_dbg_atomic(dev, "Allocated atomic state %p\n", state);
|
||||
|
@ -299,7 +305,8 @@ EXPORT_SYMBOL(drm_atomic_state_clear);
|
|||
void __drm_atomic_state_free(struct kref *ref)
|
||||
{
|
||||
struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
|
||||
struct drm_mode_config *config = &state->dev->mode_config;
|
||||
struct drm_device *dev = state->dev;
|
||||
struct drm_mode_config *config = &dev->mode_config;
|
||||
|
||||
drm_atomic_state_clear(state);
|
||||
|
||||
|
@ -311,6 +318,8 @@ void __drm_atomic_state_free(struct kref *ref)
|
|||
drm_atomic_state_default_release(state);
|
||||
kfree(state);
|
||||
}
|
||||
|
||||
drm_dev_put(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(__drm_atomic_state_free);
|
||||
|
||||
|
|
|
@ -1231,7 +1231,16 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
|
|||
continue;
|
||||
|
||||
ret = drm_crtc_vblank_get(crtc);
|
||||
WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
|
||||
/*
|
||||
* Self-refresh is not a true "disable"; ensure vblank remains
|
||||
* enabled.
|
||||
*/
|
||||
if (new_crtc_state->self_refresh_active)
|
||||
WARN_ONCE(ret != 0,
|
||||
"driver disabled vblank in self-refresh\n");
|
||||
else
|
||||
WARN_ONCE(ret != -EINVAL,
|
||||
"driver forgot to call drm_crtc_vblank_off()\n");
|
||||
if (ret == 0)
|
||||
drm_crtc_vblank_put(crtc);
|
||||
}
|
||||
|
|
|
@ -122,13 +122,34 @@ EXPORT_SYMBOL(drm_client_init);
|
|||
* drm_client_register() it is no longer permissible to call drm_client_release()
|
||||
* directly (outside the unregister callback), instead cleanup will happen
|
||||
* automatically on driver unload.
|
||||
*
|
||||
* Registering a client generates a hotplug event that allows the client
|
||||
* to set up its display from pre-existing outputs. The client must have
|
||||
* initialized its state to able to handle the hotplug event successfully.
|
||||
*/
|
||||
void drm_client_register(struct drm_client_dev *client)
|
||||
{
|
||||
struct drm_device *dev = client->dev;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->clientlist_mutex);
|
||||
list_add(&client->list, &dev->clientlist);
|
||||
|
||||
if (client->funcs && client->funcs->hotplug) {
|
||||
/*
|
||||
* Perform an initial hotplug event to pick up the
|
||||
* display configuration for the client. This step
|
||||
* has to be performed *after* registering the client
|
||||
* in the list of clients, or a concurrent hotplug
|
||||
* event might be lost; leaving the display off.
|
||||
*
|
||||
* Hold the clientlist_mutex as for a regular hotplug
|
||||
* event.
|
||||
*/
|
||||
ret = client->funcs->hotplug(client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
}
|
||||
mutex_unlock(&dev->clientlist_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_client_register);
|
||||
|
|
|
@ -1309,6 +1309,7 @@ drm_attach(struct device *parent, struct device *self, void *aux)
|
|||
|
||||
sc->sc_drm = dev;
|
||||
|
||||
kref_init(&dev->ref);
|
||||
dev->dev = self;
|
||||
dev->dev_private = parent;
|
||||
dev->driver = da->driver;
|
||||
|
|
|
@ -2717,10 +2717,6 @@ void drm_fbdev_generic_setup(struct drm_device *dev,
|
|||
preferred_bpp = 32;
|
||||
fb_helper->preferred_bpp = preferred_bpp;
|
||||
|
||||
ret = drm_fbdev_client_hotplug(&fb_helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fb_helper->client);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fbdev_generic_setup);
|
||||
|
|
|
@ -5133,7 +5133,6 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
|
|||
saved_state->uapi = slave_crtc_state->uapi;
|
||||
saved_state->scaler_state = slave_crtc_state->scaler_state;
|
||||
saved_state->shared_dpll = slave_crtc_state->shared_dpll;
|
||||
saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
|
||||
saved_state->crc_enabled = slave_crtc_state->crc_enabled;
|
||||
|
||||
intel_crtc_free_hw_state(slave_crtc_state);
|
||||
|
|
|
@ -611,7 +611,7 @@ __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
|
|||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
|
||||
|
||||
vma = i915_vma_instance(obj, vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
|
|
|
@ -1191,6 +1191,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
|
|||
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
|
||||
if (unlikely(ret != 0)) {
|
||||
WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
|
||||
ttm_resource_free(bo, &evict_mem);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue