sync
This commit is contained in:
parent
6871d7cb85
commit
451579e149
103 changed files with 365 additions and 470 deletions
|
@ -2544,8 +2544,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
|||
amdgpu_fru_get_product_info(adev);
|
||||
|
||||
init_failed:
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -3593,6 +3591,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
int r, i;
|
||||
bool px = false;
|
||||
u32 max_MBps;
|
||||
int tmp;
|
||||
|
||||
adev->shutdown = false;
|
||||
adev->flags = flags;
|
||||
|
@ -3774,6 +3773,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
|
||||
adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
|
||||
(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
|
||||
/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
|
||||
* internal path natively support atomics, set have_atomics_support to true.
|
||||
*/
|
||||
else if ((adev->flags & AMD_IS_APU) &&
|
||||
(adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)))
|
||||
adev->have_atomics_support = true;
|
||||
else
|
||||
adev->have_atomics_support =
|
||||
!pci_enable_atomic_ops_to_root(adev->pdev,
|
||||
|
@ -3782,7 +3787,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
if (!adev->have_atomics_support)
|
||||
dev_info(adev->dev, "PCIE atomic ops is not supported\n");
|
||||
#else
|
||||
adev->have_atomics_support = false;
|
||||
/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
|
||||
* internal path natively support atomics, set have_atomics_support to true.
|
||||
*/
|
||||
if ((adev->flags & AMD_IS_APU) &&
|
||||
(adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)))
|
||||
adev->have_atomics_support = true;
|
||||
else
|
||||
adev->have_atomics_support = false;
|
||||
#endif
|
||||
|
||||
/* doorbell bar mapping and doorbell index init*/
|
||||
|
@ -3820,7 +3832,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
} else {
|
||||
tmp = amdgpu_reset_method;
|
||||
/* It should do a default reset when loading or reloading the driver,
|
||||
* regardless of the module parameter reset_method.
|
||||
*/
|
||||
amdgpu_reset_method = AMD_RESET_METHOD_NONE;
|
||||
r = amdgpu_asic_reset(adev);
|
||||
amdgpu_reset_method = tmp;
|
||||
if (r) {
|
||||
dev_err(adev->dev, "asic reset on init failed\n");
|
||||
goto failed;
|
||||
|
@ -3880,18 +3898,6 @@ fence_driver_init:
|
|||
|
||||
r = amdgpu_device_ip_init(adev);
|
||||
if (r) {
|
||||
/* failed in exclusive mode due to timeout */
|
||||
if (amdgpu_sriov_vf(adev) &&
|
||||
!amdgpu_sriov_runtime(adev) &&
|
||||
amdgpu_virt_mmio_blocked(adev) &&
|
||||
!amdgpu_virt_wait_reset(adev)) {
|
||||
dev_err(adev->dev, "VF exclusive mode timeout\n");
|
||||
/* Don't send request since VF is inactive. */
|
||||
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
|
||||
adev->virt.ops = NULL;
|
||||
r = -EAGAIN;
|
||||
goto release_ras_con;
|
||||
}
|
||||
dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
|
||||
goto release_ras_con;
|
||||
|
@ -4000,8 +4006,10 @@ fence_driver_init:
|
|||
msecs_to_jiffies(AMDGPU_RESUME_MS));
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
flush_delayed_work(&adev->delayed_init_work);
|
||||
}
|
||||
|
||||
r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
|
||||
if (r)
|
||||
|
@ -4043,6 +4051,20 @@ fence_driver_init:
|
|||
return 0;
|
||||
|
||||
release_ras_con:
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
|
||||
/* failed in exclusive mode due to timeout */
|
||||
if (amdgpu_sriov_vf(adev) &&
|
||||
!amdgpu_sriov_runtime(adev) &&
|
||||
amdgpu_virt_mmio_blocked(adev) &&
|
||||
!amdgpu_virt_wait_reset(adev)) {
|
||||
dev_err(adev->dev, "VF exclusive mode timeout\n");
|
||||
/* Don't send request since VF is inactive. */
|
||||
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
|
||||
adev->virt.ops = NULL;
|
||||
r = -EAGAIN;
|
||||
}
|
||||
amdgpu_release_ras_context(adev);
|
||||
|
||||
failed:
|
||||
|
|
|
@ -545,7 +545,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
|
|||
if (r)
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
|
||||
if (ring->fence_drv.irq_src)
|
||||
if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
|
||||
ring->fence_drv.irq_src)
|
||||
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
|
||||
|
|
|
@ -526,6 +526,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
|
|||
case IP_VERSION(9, 3, 0):
|
||||
/* GC 10.3.7 */
|
||||
case IP_VERSION(10, 3, 7):
|
||||
/* GC 11.0.1 */
|
||||
case IP_VERSION(11, 0, 1):
|
||||
if (amdgpu_tmz == 0) {
|
||||
adev->gmc.tmz_enabled = false;
|
||||
dev_info(adev->dev,
|
||||
|
@ -548,7 +550,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
|
|||
case IP_VERSION(10, 3, 1):
|
||||
/* YELLOW_CARP*/
|
||||
case IP_VERSION(10, 3, 3):
|
||||
case IP_VERSION(11, 0, 1):
|
||||
case IP_VERSION(11, 0, 4):
|
||||
/* Don't enable it by default yet.
|
||||
*/
|
||||
|
|
|
@ -2765,7 +2765,7 @@ static int dm_resume(void *handle)
|
|||
* this is the case when traversing through already created
|
||||
* MST connectors, should be skipped
|
||||
*/
|
||||
if (aconnector->dc_link->type == dc_connection_mst_branch)
|
||||
if (aconnector && aconnector->mst_port)
|
||||
continue;
|
||||
|
||||
mutex_lock(&aconnector->hpd_lock);
|
||||
|
@ -6494,7 +6494,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
|
|||
int clock, bpp = 0;
|
||||
bool is_y420 = false;
|
||||
|
||||
if (!aconnector->port || !aconnector->dc_sink)
|
||||
if (!aconnector->port)
|
||||
return 0;
|
||||
|
||||
mst_port = aconnector->port;
|
||||
|
|
|
@ -6925,23 +6925,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int si_set_temperature_range(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = si_thermal_enable_alert(adev, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = si_thermal_enable_alert(adev, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void si_dpm_disable(struct amdgpu_device *adev)
|
||||
{
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(adev);
|
||||
|
@ -7626,18 +7609,6 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
|
|||
|
||||
static int si_dpm_late_init(void *handle)
|
||||
{
|
||||
int ret;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return 0;
|
||||
|
||||
ret = si_set_temperature_range(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
#if 0 //TODO ?
|
||||
si_dpm_powergate_uvd(adev, true);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -588,7 +588,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
|
|||
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
|
||||
SmuMetrics_legacy_t metrics;
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
int i, size = 0, ret = 0;
|
||||
int i, idx, size = 0, ret = 0;
|
||||
uint32_t cur_value = 0, value = 0, count = 0;
|
||||
bool cur_value_match_level = false;
|
||||
|
||||
|
@ -662,7 +662,8 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
|
|||
case SMU_MCLK:
|
||||
case SMU_FCLK:
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
|
||||
idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
|
||||
ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!value)
|
||||
|
@ -689,7 +690,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
|
|||
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
|
||||
SmuMetrics_t metrics;
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
int i, size = 0, ret = 0;
|
||||
int i, idx, size = 0, ret = 0;
|
||||
uint32_t cur_value = 0, value = 0, count = 0;
|
||||
bool cur_value_match_level = false;
|
||||
uint32_t min, max;
|
||||
|
@ -771,7 +772,8 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
|
|||
case SMU_MCLK:
|
||||
case SMU_FCLK:
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
|
||||
idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
|
||||
ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!value)
|
||||
|
|
|
@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
|
|||
static int renoir_print_clk_levels(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type, char *buf)
|
||||
{
|
||||
int i, size = 0, ret = 0;
|
||||
int i, idx, size = 0, ret = 0;
|
||||
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
|
||||
SmuMetrics_t metrics;
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
|
@ -594,7 +594,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
|
|||
case SMU_VCLK:
|
||||
case SMU_DCLK:
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
|
||||
idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
|
||||
ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!value)
|
||||
|
|
|
@ -478,7 +478,7 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu,
|
|||
static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type, char *buf)
|
||||
{
|
||||
int i, size = 0, ret = 0;
|
||||
int i, idx, size = 0, ret = 0;
|
||||
uint32_t cur_value = 0, value = 0, count = 0;
|
||||
uint32_t min, max;
|
||||
|
||||
|
@ -512,7 +512,8 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
|
|||
break;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, i, &value);
|
||||
idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
|
||||
ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
|
|
|
@ -866,7 +866,7 @@ out:
|
|||
static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type, char *buf)
|
||||
{
|
||||
int i, size = 0, ret = 0;
|
||||
int i, idx, size = 0, ret = 0;
|
||||
uint32_t cur_value = 0, value = 0, count = 0;
|
||||
uint32_t min = 0, max = 0;
|
||||
|
||||
|
@ -898,7 +898,8 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
|
|||
goto print_clk_out;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, i, &value);
|
||||
idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i;
|
||||
ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, idx, &value);
|
||||
if (ret)
|
||||
goto print_clk_out;
|
||||
|
||||
|
|
|
@ -1000,7 +1000,7 @@ out:
|
|||
static int yellow_carp_print_clk_levels(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type, char *buf)
|
||||
{
|
||||
int i, size = 0, ret = 0;
|
||||
int i, idx, size = 0, ret = 0;
|
||||
uint32_t cur_value = 0, value = 0, count = 0;
|
||||
uint32_t min, max;
|
||||
|
||||
|
@ -1033,7 +1033,8 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
|
|||
goto print_clk_out;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value);
|
||||
idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
|
||||
ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value);
|
||||
if (ret)
|
||||
goto print_clk_out;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue