sync with OpenBSD -current
This commit is contained in:
parent
20629a8b0d
commit
604988d5d3
58 changed files with 592 additions and 377 deletions
|
@ -755,7 +755,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
|
|||
int r;
|
||||
|
||||
if (!adev->smc_rreg)
|
||||
return -EPERM;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
@ -814,7 +814,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
|
|||
int r;
|
||||
|
||||
if (!adev->smc_wreg)
|
||||
return -EPERM;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1026,7 +1026,12 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
if (amdgpu_dpm_read_sensor(adev,
|
||||
AMDGPU_PP_SENSOR_GPU_AVG_POWER,
|
||||
(void *)&ui32, &ui32_size)) {
|
||||
return -EINVAL;
|
||||
/* fall back to input power for backwards compat */
|
||||
if (amdgpu_dpm_read_sensor(adev,
|
||||
AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
|
||||
(void *)&ui32, &ui32_size)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
ui32 >>= 8;
|
||||
break;
|
||||
|
|
|
@ -330,12 +330,6 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
|
|||
pdd->gpuvm_limit =
|
||||
pdd->dev->kfd->shared_resources.gpuvm_size - 1;
|
||||
|
||||
/* dGPUs: the reserved space for kernel
|
||||
* before SVM
|
||||
*/
|
||||
pdd->qpd.cwsr_base = SVM_CWSR_BASE;
|
||||
pdd->qpd.ib_base = SVM_IB_BASE;
|
||||
|
||||
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
|
||||
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
|
||||
}
|
||||
|
@ -345,18 +339,18 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
|
|||
pdd->lds_base = MAKE_LDS_APP_BASE_V9();
|
||||
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
|
||||
|
||||
pdd->gpuvm_base = PAGE_SIZE;
|
||||
/* Raven needs SVM to support graphic handle, etc. Leave the small
|
||||
* reserved space before SVM on Raven as well, even though we don't
|
||||
* have to.
|
||||
* Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they
|
||||
* are used in Thunk to reserve SVM.
|
||||
*/
|
||||
pdd->gpuvm_base = SVM_USER_BASE;
|
||||
pdd->gpuvm_limit =
|
||||
pdd->dev->kfd->shared_resources.gpuvm_size - 1;
|
||||
|
||||
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
|
||||
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
|
||||
|
||||
/*
|
||||
* Place TBA/TMA on opposite side of VM hole to prevent
|
||||
* stray faults from triggering SVM on these pages.
|
||||
*/
|
||||
pdd->qpd.cwsr_base = pdd->dev->kfd->shared_resources.gpuvm_size;
|
||||
}
|
||||
|
||||
int kfd_init_apertures(struct kfd_process *process)
|
||||
|
@ -413,6 +407,12 @@ int kfd_init_apertures(struct kfd_process *process)
|
|||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* dGPUs: the reserved space for kernel
|
||||
* before SVM
|
||||
*/
|
||||
pdd->qpd.cwsr_base = SVM_CWSR_BASE;
|
||||
pdd->qpd.ib_base = SVM_IB_BASE;
|
||||
}
|
||||
|
||||
dev_dbg(kfd_device, "node id %u\n", id);
|
||||
|
|
|
@ -1021,7 +1021,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
|
|||
} else {
|
||||
res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
|
||||
if (IS_ERR(res))
|
||||
return -ENOMEM;
|
||||
return PTR_ERR(res);
|
||||
pgmap->range.start = res->start;
|
||||
pgmap->range.end = res->end;
|
||||
pgmap->type = MEMORY_DEVICE_PRIVATE;
|
||||
|
@ -1037,10 +1037,10 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
|
|||
r = devm_memremap_pages(adev->dev, pgmap);
|
||||
if (IS_ERR(r)) {
|
||||
pr_err("failed to register HMM device memory\n");
|
||||
/* Disable SVM support capability */
|
||||
pgmap->type = 0;
|
||||
if (pgmap->type == MEMORY_DEVICE_PRIVATE)
|
||||
devm_release_mem_region(adev->dev, res->start, resource_size(res));
|
||||
/* Disable SVM support capability */
|
||||
pgmap->type = 0;
|
||||
return PTR_ERR(r);
|
||||
}
|
||||
|
||||
|
|
|
@ -971,7 +971,7 @@ struct kfd_process {
|
|||
struct work_struct debug_event_workarea;
|
||||
|
||||
/* Tracks debug per-vmid request for debug flags */
|
||||
bool dbg_flags;
|
||||
u32 dbg_flags;
|
||||
|
||||
atomic_t poison;
|
||||
/* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
|
||||
|
|
|
@ -1342,10 +1342,11 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
|
|||
num_cpu++;
|
||||
}
|
||||
|
||||
if (list_empty(&kdev->io_link_props))
|
||||
return -ENODATA;
|
||||
|
||||
gpu_link = list_first_entry(&kdev->io_link_props,
|
||||
struct kfd_iolink_properties, list);
|
||||
if (!gpu_link)
|
||||
return -ENOMEM;
|
||||
struct kfd_iolink_properties, list);
|
||||
|
||||
for (i = 0; i < num_cpu; i++) {
|
||||
/* CPU <--> GPU */
|
||||
|
@ -1423,15 +1424,17 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
|
|||
peer->gpu->adev))
|
||||
return ret;
|
||||
|
||||
if (list_empty(&kdev->io_link_props))
|
||||
return -ENODATA;
|
||||
|
||||
iolink1 = list_first_entry(&kdev->io_link_props,
|
||||
struct kfd_iolink_properties, list);
|
||||
if (!iolink1)
|
||||
return -ENOMEM;
|
||||
struct kfd_iolink_properties, list);
|
||||
|
||||
if (list_empty(&peer->io_link_props))
|
||||
return -ENODATA;
|
||||
|
||||
iolink2 = list_first_entry(&peer->io_link_props,
|
||||
struct kfd_iolink_properties, list);
|
||||
if (!iolink2)
|
||||
return -ENOMEM;
|
||||
struct kfd_iolink_properties, list);
|
||||
|
||||
props = kfd_alloc_struct(props);
|
||||
if (!props)
|
||||
|
|
|
@ -6886,7 +6886,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
|
|||
max_bpc);
|
||||
bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
|
||||
clock = adjusted_mode->clock;
|
||||
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
|
||||
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
|
||||
}
|
||||
|
||||
dm_new_connector_state->vcpi_slots =
|
||||
|
|
|
@ -1636,7 +1636,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
|||
} else {
|
||||
/* check if mode could be supported within full_pbn */
|
||||
bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
|
||||
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
|
||||
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4);
|
||||
if (pbn > full_pbn)
|
||||
return DC_FAIL_BANDWIDTH_VALIDATE;
|
||||
}
|
||||
|
|
|
@ -807,7 +807,7 @@ void dp_decide_lane_settings(
|
|||
const struct link_training_settings *lt_settings,
|
||||
const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
|
||||
struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
|
||||
union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
|
||||
union dpcd_training_lane *dpcd_lane_settings)
|
||||
{
|
||||
uint32_t lane;
|
||||
|
||||
|
|
|
@ -111,7 +111,7 @@ void dp_decide_lane_settings(
|
|||
const struct link_training_settings *lt_settings,
|
||||
const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
|
||||
struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
|
||||
union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
|
||||
union dpcd_training_lane *dpcd_lane_settings);
|
||||
|
||||
enum dc_dp_training_pattern decide_cr_training_pattern(
|
||||
const struct dc_link_settings *link_settings);
|
||||
|
|
|
@ -2735,10 +2735,8 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
|
|||
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
|
||||
&non_clock_info_array->nonClockInfo[non_clock_array_index];
|
||||
ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
|
||||
if (ps == NULL) {
|
||||
kfree(adev->pm.dpm.ps);
|
||||
if (ps == NULL)
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.ps[i].ps_priv = ps;
|
||||
k = 0;
|
||||
idx = (u8 *)&power_state->v2.clockInfoIndex[0];
|
||||
|
|
|
@ -7379,10 +7379,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
|
|||
kcalloc(4,
|
||||
sizeof(struct amdgpu_clock_voltage_dependency_entry),
|
||||
GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
|
||||
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
|
||||
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
|
||||
|
|
|
@ -272,10 +272,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
|
||||
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
|
||||
dep_table);
|
||||
if (ret) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
|
||||
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
|
||||
|
@ -283,10 +281,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
|
||||
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
|
||||
dep_table);
|
||||
if (ret) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
|
||||
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
|
||||
|
@ -294,10 +290,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
|
||||
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
|
||||
dep_table);
|
||||
if (ret) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
|
||||
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
|
||||
|
@ -305,10 +299,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
|
||||
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
|
||||
dep_table);
|
||||
if (ret) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
|
||||
ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
|
||||
|
@ -339,10 +331,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
kcalloc(psl->ucNumEntries,
|
||||
sizeof(struct amdgpu_phase_shedding_limits_entry),
|
||||
GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
entry = &psl->entries[0];
|
||||
for (i = 0; i < psl->ucNumEntries; i++) {
|
||||
|
@ -383,10 +373,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
ATOM_PPLIB_CAC_Leakage_Record *entry;
|
||||
u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
|
||||
adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries)
|
||||
return -ENOMEM;
|
||||
}
|
||||
entry = &cac_table->entries[0];
|
||||
for (i = 0; i < cac_table->ucNumEntries; i++) {
|
||||
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
|
||||
|
@ -438,10 +426,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
|
||||
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
|
||||
kzalloc(size, GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
|
||||
limits->numEntries;
|
||||
entry = &limits->entries[0];
|
||||
|
@ -493,10 +479,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
|
||||
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
|
||||
kzalloc(size, GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
|
||||
limits->numEntries;
|
||||
entry = &limits->entries[0];
|
||||
|
@ -525,10 +509,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
sizeof(struct amdgpu_clock_voltage_dependency_entry);
|
||||
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
|
||||
kzalloc(size, GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
|
||||
limits->numEntries;
|
||||
entry = &limits->entries[0];
|
||||
|
@ -548,10 +530,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
le16_to_cpu(ext_hdr->usPPMTableOffset));
|
||||
adev->pm.dpm.dyn_state.ppm_table =
|
||||
kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.ppm_table) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.ppm_table)
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
|
||||
adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
|
||||
le16_to_cpu(ppm->usCpuCoreNumber);
|
||||
|
@ -583,10 +563,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
sizeof(struct amdgpu_clock_voltage_dependency_entry);
|
||||
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
|
||||
kzalloc(size, GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
|
||||
limits->numEntries;
|
||||
entry = &limits->entries[0];
|
||||
|
@ -606,10 +584,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
ATOM_PowerTune_Table *pt;
|
||||
adev->pm.dpm.dyn_state.cac_tdp_table =
|
||||
kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.cac_tdp_table)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (rev > 0) {
|
||||
ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
|
@ -645,10 +621,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
ret = amdgpu_parse_clk_voltage_dep_table(
|
||||
&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
|
||||
dep_table);
|
||||
if (ret) {
|
||||
kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2974,6 +2974,8 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
result = smu7_get_evv_voltages(hwmgr);
|
||||
if (result) {
|
||||
pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
|
||||
kfree(hwmgr->backend);
|
||||
hwmgr->backend = NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
|
@ -3019,8 +3021,10 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
}
|
||||
|
||||
result = smu7_update_edc_leakage_table(hwmgr);
|
||||
if (result)
|
||||
if (result) {
|
||||
smu7_hwmgr_backend_fini(hwmgr);
|
||||
return result;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue