sync with OpenBSD -current
This commit is contained in:
parent
af487c914f
commit
bf24177229
83 changed files with 1053 additions and 684 deletions
|
@ -100,6 +100,7 @@ struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock)
|
|||
amdgpu_afmt_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
|
||||
amdgpu_afmt_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
|
||||
amdgpu_afmt_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
|
||||
res.clock = clock;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -407,6 +407,10 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
|
|||
"Called with userptr BO"))
|
||||
return -EINVAL;
|
||||
|
||||
/* bo has been pinned, not need validate it */
|
||||
if (bo->tbo.pin_count)
|
||||
return 0;
|
||||
|
||||
amdgpu_bo_placement_from_domain(bo, domain);
|
||||
|
||||
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||
|
@ -2631,7 +2635,7 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
|
|||
|
||||
/* keep mem without hmm range at userptr_inval_list */
|
||||
if (!mem->range)
|
||||
continue;
|
||||
continue;
|
||||
|
||||
/* Only check mem with hmm range associated */
|
||||
valid = amdgpu_ttm_tt_get_user_pages_done(
|
||||
|
@ -2848,9 +2852,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
|||
if (!attachment->is_mapped)
|
||||
continue;
|
||||
|
||||
if (attachment->bo_va->base.bo->tbo.pin_count)
|
||||
continue;
|
||||
|
||||
kfd_mem_dmaunmap_attachment(mem, attachment);
|
||||
ret = update_gpuvm_pte(mem, attachment, &sync_obj);
|
||||
if (ret) {
|
||||
|
|
|
@ -1476,6 +1476,8 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
|
|||
(u32)le32_to_cpu(*((u32 *)reg_data + j));
|
||||
j++;
|
||||
} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
|
||||
if (i == 0)
|
||||
continue;
|
||||
reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
|
||||
reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
|
||||
}
|
||||
|
|
|
@ -213,6 +213,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
struct amdgpu_firmware_info *ucode;
|
||||
|
||||
id = fw_type_convert(cgs_device, type);
|
||||
if (id >= AMDGPU_UCODE_ID_MAXIMUM)
|
||||
return -EINVAL;
|
||||
|
||||
ucode = &adev->firmware.ucode[id];
|
||||
if (ucode->fw == NULL)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -4605,7 +4605,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
|
|||
shadow = vmbo->shadow;
|
||||
|
||||
/* No need to recover an evicted BO */
|
||||
if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
|
||||
if (!shadow->tbo.resource ||
|
||||
shadow->tbo.resource->mem_type != TTM_PL_TT ||
|
||||
shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
|
||||
shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
|
||||
continue;
|
||||
|
@ -5372,7 +5373,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
* to put adev in the 1st position.
|
||||
*/
|
||||
INIT_LIST_HEAD(&device_list);
|
||||
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
|
||||
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
|
||||
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
|
||||
list_add_tail(&tmp_adev->reset_list, &device_list);
|
||||
if (gpu_reset_for_dev_remove && adev->shutdown)
|
||||
|
|
|
@ -1567,7 +1567,7 @@ static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
|
|||
break;
|
||||
case 2:
|
||||
mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
|
||||
adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
|
||||
adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
|
||||
break;
|
||||
default:
|
||||
dev_err(adev->dev,
|
||||
|
|
|
@ -179,7 +179,7 @@ static int __amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
|
|||
* Returns the number of bytes read/written; -errno on error.
|
||||
*/
|
||||
static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
|
||||
u8 *eeprom_buf, u16 buf_size, bool read)
|
||||
u8 *eeprom_buf, u32 buf_size, bool read)
|
||||
{
|
||||
const struct i2c_adapter_quirks *quirks = i2c_adap->quirks;
|
||||
u16 limit;
|
||||
|
@ -225,7 +225,7 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
|
|||
|
||||
int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
|
||||
u32 eeprom_addr, u8 *eeprom_buf,
|
||||
u16 bytes)
|
||||
u32 bytes)
|
||||
{
|
||||
return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
|
||||
true);
|
||||
|
@ -233,7 +233,7 @@ int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
|
|||
|
||||
int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
|
||||
u32 eeprom_addr, u8 *eeprom_buf,
|
||||
u16 bytes)
|
||||
u32 bytes)
|
||||
{
|
||||
return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
|
||||
false);
|
||||
|
|
|
@ -28,10 +28,10 @@
|
|||
|
||||
int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
|
||||
u32 eeprom_addr, u8 *eeprom_buf,
|
||||
u16 bytes);
|
||||
u32 bytes);
|
||||
|
||||
int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
|
||||
u32 eeprom_addr, u8 *eeprom_buf,
|
||||
u16 bytes);
|
||||
u32 bytes);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <asm/set_memory.h>
|
||||
#endif
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_reset.h"
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/ttm/ttm_tt.h>
|
||||
|
||||
|
@ -404,7 +405,10 @@ void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
|
|||
return;
|
||||
|
||||
mb();
|
||||
amdgpu_device_flush_hdp(adev, NULL);
|
||||
if (down_read_trylock(&adev->reset_domain->sem)) {
|
||||
amdgpu_device_flush_hdp(adev, NULL);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
}
|
||||
for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
|
||||
}
|
||||
|
|
|
@ -1336,6 +1336,9 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
|
|||
uint8_t dst_num_links = node_info.num_links;
|
||||
|
||||
hive = amdgpu_get_xgmi_hive(psp->adev);
|
||||
if (WARN_ON(!hive))
|
||||
return;
|
||||
|
||||
list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
|
||||
struct psp_xgmi_topology_info *mirror_top_info;
|
||||
int j;
|
||||
|
|
|
@ -352,7 +352,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
ring->max_dw = max_dw;
|
||||
ring->hw_prio = hw_prio;
|
||||
|
||||
if (!ring->no_scheduler) {
|
||||
if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) {
|
||||
hw_ip = ring->funcs->type;
|
||||
num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
|
||||
adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
|
||||
|
@ -469,8 +469,9 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
|
|||
size_t size, loff_t *pos)
|
||||
{
|
||||
struct amdgpu_ring *ring = file_inode(f)->i_private;
|
||||
int r, i;
|
||||
uint32_t value, result, early[3];
|
||||
loff_t i;
|
||||
int r;
|
||||
|
||||
if (*pos & 3 || size & 3)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -135,6 +135,10 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
|
|||
mutex_unlock(&psp->securedisplay_context.mutex);
|
||||
break;
|
||||
case 2:
|
||||
if (size < 3 || phy_id >= TA_SECUREDISPLAY_MAX_PHY) {
|
||||
dev_err(adev->dev, "Invalid input: %s\n", str);
|
||||
return -EINVAL;
|
||||
}
|
||||
mutex_lock(&psp->securedisplay_context.mutex);
|
||||
psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
|
||||
TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
|
||||
|
|
|
@ -616,7 +616,7 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
|
|||
vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
|
||||
vf2pf_info->checksum =
|
||||
amd_sriov_msg_checksum(
|
||||
vf2pf_info, vf2pf_info->header.size, 0, 0);
|
||||
vf2pf_info, sizeof(*vf2pf_info), 0, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -999,6 +999,9 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (amdgpu_device_skip_hw_access(adev))
|
||||
return 0;
|
||||
|
||||
reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
|
||||
scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
|
||||
scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
|
||||
|
@ -1074,6 +1077,9 @@ void amdgpu_sriov_wreg(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 rlcg_flag;
|
||||
|
||||
if (amdgpu_device_skip_hw_access(adev))
|
||||
return;
|
||||
|
||||
if (!amdgpu_sriov_runtime(adev) &&
|
||||
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
|
||||
amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
|
||||
|
@ -1091,6 +1097,9 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 rlcg_flag;
|
||||
|
||||
if (amdgpu_device_skip_hw_access(adev))
|
||||
return 0;
|
||||
|
||||
if (!amdgpu_sriov_runtime(adev) &&
|
||||
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
|
||||
return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);
|
||||
|
|
|
@ -500,6 +500,12 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
|
|||
|
||||
if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
|
||||
mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
|
||||
if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
|
||||
dev_err(adev->dev,
|
||||
"Invalid config, no compatible compute partition mode found, available memory partitions: %d",
|
||||
adev->gmc.num_mem_partitions);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
|
||||
dev_err(adev->dev,
|
||||
"Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
|
||||
|
|
|
@ -70,6 +70,8 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
|
|||
int fb_channel_number;
|
||||
|
||||
fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
|
||||
if (fb_channel_number >= ARRAY_SIZE(df_v1_7_channel_number))
|
||||
fb_channel_number = 0;
|
||||
|
||||
return df_v1_7_channel_number[fb_channel_number];
|
||||
}
|
||||
|
|
|
@ -384,7 +384,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
|
|||
else
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
|
||||
|
||||
if (!ras->disable_ras_err_cnt_harvest) {
|
||||
if (ras && !ras->disable_ras_err_cnt_harvest && obj) {
|
||||
/*
|
||||
* clear error status after ras_controller_intr
|
||||
* according to hw team and count ue number
|
||||
|
|
|
@ -42,8 +42,6 @@
|
|||
#define CRAT_OEMTABLEID_LENGTH 8
|
||||
#define CRAT_RESERVED_LENGTH 6
|
||||
|
||||
#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1)
|
||||
|
||||
/* Compute Unit flags */
|
||||
#define COMPUTE_UNIT_CPU (1 << 0) /* Create Virtual CRAT for CPU */
|
||||
#define COMPUTE_UNIT_GPU (1 << 1) /* Create Virtual CRAT for GPU */
|
||||
|
|
|
@ -103,7 +103,8 @@ void debug_event_write_work_handler(struct work_struct *work)
|
|||
struct kfd_process,
|
||||
debug_event_workarea);
|
||||
|
||||
kernel_write(process->dbg_ev_file, &write_data, 1, &pos);
|
||||
if (process->debug_trap_enabled && process->dbg_ev_file)
|
||||
kernel_write(process->dbg_ev_file, &write_data, 1, &pos);
|
||||
}
|
||||
|
||||
/* update process/device/queue exception status, write to descriptor
|
||||
|
@ -645,6 +646,7 @@ int kfd_dbg_trap_disable(struct kfd_process *target)
|
|||
else if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED)
|
||||
target->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
|
||||
|
||||
cancel_work_sync(&target->debug_event_workarea);
|
||||
fput(target->dbg_ev_file);
|
||||
target->dbg_ev_file = NULL;
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "kfd_priv.h"
|
||||
#include "kfd_kernel_queue.h"
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "amdgpu_reset.h"
|
||||
|
||||
static inline struct process_queue_node *get_queue_by_qid(
|
||||
struct process_queue_manager *pqm, unsigned int qid)
|
||||
|
@ -87,8 +88,12 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
|
|||
return;
|
||||
|
||||
dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
|
||||
if (dev->kfd->shared_resources.enable_mes)
|
||||
amdgpu_mes_flush_shader_debugger(dev->adev, pdd->proc_ctx_gpu_addr);
|
||||
if (dev->kfd->shared_resources.enable_mes &&
|
||||
down_read_trylock(&dev->adev->reset_domain->sem)) {
|
||||
amdgpu_mes_flush_shader_debugger(dev->adev,
|
||||
pdd->proc_ctx_gpu_addr);
|
||||
up_read(&dev->adev->reset_domain->sem);
|
||||
}
|
||||
pdd->already_dequeued = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -958,8 +958,7 @@ static void kfd_update_system_properties(void)
|
|||
dev = list_last_entry(&topology_device_list,
|
||||
struct kfd_topology_device, list);
|
||||
if (dev) {
|
||||
sys_props.platform_id =
|
||||
(*((uint64_t *)dev->oem_id)) & CRAT_OEMID_64BIT_MASK;
|
||||
sys_props.platform_id = dev->oem_id64;
|
||||
sys_props.platform_oem = *((uint64_t *)dev->oem_table_id);
|
||||
sys_props.platform_rev = dev->oem_revision;
|
||||
}
|
||||
|
|
|
@ -154,7 +154,10 @@ struct kfd_topology_device {
|
|||
struct attribute attr_gpuid;
|
||||
struct attribute attr_name;
|
||||
struct attribute attr_props;
|
||||
uint8_t oem_id[CRAT_OEMID_LENGTH];
|
||||
union {
|
||||
uint8_t oem_id[CRAT_OEMID_LENGTH];
|
||||
uint64_t oem_id64;
|
||||
};
|
||||
uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
|
||||
uint32_t oem_revision;
|
||||
};
|
||||
|
|
|
@ -4361,7 +4361,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
|||
|
||||
/* There is one primary plane per CRTC */
|
||||
primary_planes = dm->dc->caps.max_streams;
|
||||
ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
|
||||
if (primary_planes > AMDGPU_MAX_PLANES) {
|
||||
DRM_ERROR("DM: Plane nums out of 6 planes\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize primary planes, implicit planes for legacy IOCTLS.
|
||||
|
@ -8289,15 +8292,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
bundle->stream_update.vrr_infopacket =
|
||||
&acrtc_state->stream->vrr_infopacket;
|
||||
}
|
||||
} else if (cursor_update && acrtc_state->active_planes > 0 &&
|
||||
acrtc_attach->base.state->event) {
|
||||
drm_crtc_vblank_get(pcrtc);
|
||||
|
||||
} else if (cursor_update && acrtc_state->active_planes > 0) {
|
||||
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
|
||||
|
||||
acrtc_attach->event = acrtc_attach->base.state->event;
|
||||
acrtc_attach->base.state->event = NULL;
|
||||
|
||||
if (acrtc_attach->base.state->event) {
|
||||
drm_crtc_vblank_get(pcrtc);
|
||||
acrtc_attach->event = acrtc_attach->base.state->event;
|
||||
acrtc_attach->base.state->event = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
|
||||
#define AMDGPU_DM_MAX_NUM_EDP 2
|
||||
|
||||
#define AMDGPU_DMUB_NOTIFICATION_MAX 5
|
||||
#define AMDGPU_DMUB_NOTIFICATION_MAX 6
|
||||
|
||||
#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
|
||||
#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
|
||||
|
|
|
@ -667,6 +667,9 @@ static enum bp_result get_ss_info_v3_1(
|
|||
ss_table_header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base,
|
||||
DATA_TABLES(ASIC_InternalSS_Info),
|
||||
struct_size(ss_table_header_include, asSpreadSpectrum, 1)));
|
||||
if (!ss_table_header_include)
|
||||
return BP_RESULT_UNSUPPORTED;
|
||||
|
||||
table_size =
|
||||
(le16_to_cpu(ss_table_header_include->sHeader.usStructureSize)
|
||||
- sizeof(ATOM_COMMON_TABLE_HEADER))
|
||||
|
@ -1036,6 +1039,8 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
|
|||
&bp->base,
|
||||
DATA_TABLES(ASIC_InternalSS_Info),
|
||||
struct_size(header, asSpreadSpectrum, 1)));
|
||||
if (!header)
|
||||
return result;
|
||||
|
||||
memset(info, 0, sizeof(struct spread_spectrum_info));
|
||||
|
||||
|
@ -1109,6 +1114,8 @@ static enum bp_result get_ss_info_from_ss_info_table(
|
|||
get_atom_data_table_revision(header, &revision);
|
||||
|
||||
tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info));
|
||||
if (!tbl)
|
||||
return result;
|
||||
|
||||
if (1 != revision.major || 2 > revision.minor)
|
||||
return result;
|
||||
|
@ -1636,6 +1643,8 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl(
|
|||
|
||||
tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO,
|
||||
DATA_TABLES(SS_Info));
|
||||
if (!tbl)
|
||||
return number;
|
||||
|
||||
if (1 != revision.major || 2 > revision.minor)
|
||||
return number;
|
||||
|
@ -1718,6 +1727,8 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
|
|||
&bp->base,
|
||||
DATA_TABLES(ASIC_InternalSS_Info),
|
||||
struct_size(header_include, asSpreadSpectrum, 1)));
|
||||
if (!header_include)
|
||||
return 0;
|
||||
|
||||
size = (le16_to_cpu(header_include->sHeader.usStructureSize)
|
||||
- sizeof(ATOM_COMMON_TABLE_HEADER))
|
||||
|
@ -1756,6 +1767,9 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
|
|||
header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base,
|
||||
DATA_TABLES(ASIC_InternalSS_Info),
|
||||
struct_size(header_include, asSpreadSpectrum, 1)));
|
||||
if (!header_include)
|
||||
return number;
|
||||
|
||||
size = (le16_to_cpu(header_include->sHeader.usStructureSize) -
|
||||
sizeof(ATOM_COMMON_TABLE_HEADER)) /
|
||||
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
|
||||
|
@ -2552,8 +2566,8 @@ static enum bp_result construct_integrated_info(
|
|||
|
||||
/* Sort voltage table from low to high*/
|
||||
if (result == BP_RESULT_OK) {
|
||||
uint32_t i;
|
||||
uint32_t j;
|
||||
int32_t i;
|
||||
int32_t j;
|
||||
|
||||
for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
|
||||
for (j = i; j > 0; --j) {
|
||||
|
|
|
@ -2935,8 +2935,11 @@ static enum bp_result construct_integrated_info(
|
|||
struct atom_common_table_header *header;
|
||||
struct atom_data_revision revision;
|
||||
|
||||
uint32_t i;
|
||||
uint32_t j;
|
||||
int32_t i;
|
||||
int32_t j;
|
||||
|
||||
if (!info)
|
||||
return result;
|
||||
|
||||
if (info && DATA_TABLES(integratedsysteminfo)) {
|
||||
header = GET_IMAGE(struct atom_common_table_header,
|
||||
|
|
|
@ -484,7 +484,8 @@ static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_sm
|
|||
ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
|
||||
|
||||
/* Modify previous watermark range to cover up to max */
|
||||
ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
|
||||
if (num_valid_sets > 0)
|
||||
ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
|
||||
}
|
||||
num_valid_sets++;
|
||||
}
|
||||
|
|
|
@ -1298,6 +1298,7 @@ struct dc *dc_create(const struct dc_init_data *init_params)
|
|||
return NULL;
|
||||
|
||||
if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
|
||||
dc->caps.linear_pitch_alignment = 64;
|
||||
if (!dc_construct_ctx(dc, init_params))
|
||||
goto destruct_dc;
|
||||
} else {
|
||||
|
|
|
@ -3927,6 +3927,9 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
|
|||
|
||||
enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
|
||||
{
|
||||
if (dc == NULL || stream == NULL)
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
|
||||
struct dc_link *link = stream->link;
|
||||
struct timing_generator *tg = dc->res_pool->timing_generators[0];
|
||||
enum dc_status res = DC_OK;
|
||||
|
|
|
@ -102,7 +102,8 @@ static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait,
|
|||
break;
|
||||
}
|
||||
|
||||
fsleep(500);
|
||||
/* must *not* be fsleep - this can be called from high irq levels */
|
||||
udelay(500);
|
||||
}
|
||||
|
||||
/* assert if max retry hit */
|
||||
|
|
|
@ -690,6 +690,9 @@ static void wbscl_set_scaler_filter(
|
|||
int pair;
|
||||
uint16_t odd_coef, even_coef;
|
||||
|
||||
if (!filter)
|
||||
return;
|
||||
|
||||
for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) {
|
||||
for (pair = 0; pair < tap_pairs; pair++) {
|
||||
even_coef = filter[phase * taps + 2 * pair];
|
||||
|
|
|
@ -1453,10 +1453,9 @@ void dcn_bw_update_from_pplib_fclks(
|
|||
ASSERT(fclks->num_levels);
|
||||
|
||||
vmin0p65_idx = 0;
|
||||
vmid0p72_idx = fclks->num_levels -
|
||||
(fclks->num_levels > 2 ? 3 : (fclks->num_levels > 1 ? 2 : 1));
|
||||
vnom0p8_idx = fclks->num_levels - (fclks->num_levels > 1 ? 2 : 1);
|
||||
vmax0p9_idx = fclks->num_levels - 1;
|
||||
vmid0p72_idx = fclks->num_levels > 2 ? fclks->num_levels - 3 : 0;
|
||||
vnom0p8_idx = fclks->num_levels > 1 ? fclks->num_levels - 2 : 0;
|
||||
vmax0p9_idx = fclks->num_levels > 0 ? fclks->num_levels - 1 : 0;
|
||||
|
||||
dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
|
||||
32 * (fclks->data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
|
||||
|
|
|
@ -304,6 +304,16 @@ void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
|
|||
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
|
||||
}
|
||||
|
||||
/* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
|
||||
* MAX_NUM_DPM_LVL is 8.
|
||||
* dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
|
||||
* DC__VOLTAGE_STATES is 40.
|
||||
*/
|
||||
if (num_states > MAX_NUM_DPM_LVL) {
|
||||
ASSERT(0);
|
||||
return;
|
||||
}
|
||||
|
||||
dcn3_02_soc.num_states = num_states;
|
||||
for (i = 0; i < dcn3_02_soc.num_states; i++) {
|
||||
dcn3_02_soc.clock_limits[i].state = i;
|
||||
|
|
|
@ -299,6 +299,16 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
|
|||
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
|
||||
}
|
||||
|
||||
/* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
|
||||
* MAX_NUM_DPM_LVL is 8.
|
||||
* dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
|
||||
* DC__VOLTAGE_STATES is 40.
|
||||
*/
|
||||
if (num_states > MAX_NUM_DPM_LVL) {
|
||||
ASSERT(0);
|
||||
return;
|
||||
}
|
||||
|
||||
dcn3_03_soc.num_states = num_states;
|
||||
for (i = 0; i < dcn3_03_soc.num_states; i++) {
|
||||
dcn3_03_soc.clock_limits[i].state = i;
|
||||
|
|
|
@ -2885,6 +2885,16 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
|
|||
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
|
||||
}
|
||||
|
||||
/* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
|
||||
* MAX_NUM_DPM_LVL is 8.
|
||||
* dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
|
||||
* DC__VOLTAGE_STATES is 40.
|
||||
*/
|
||||
if (num_states > MAX_NUM_DPM_LVL) {
|
||||
ASSERT(0);
|
||||
return;
|
||||
}
|
||||
|
||||
dcn3_2_soc.num_states = num_states;
|
||||
for (i = 0; i < dcn3_2_soc.num_states; i++) {
|
||||
dcn3_2_soc.clock_limits[i].state = i;
|
||||
|
|
|
@ -789,6 +789,16 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
|
|||
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
|
||||
}
|
||||
|
||||
/* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
|
||||
* MAX_NUM_DPM_LVL is 8.
|
||||
* dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
|
||||
* DC__VOLTAGE_STATES is 40.
|
||||
*/
|
||||
if (num_states > MAX_NUM_DPM_LVL) {
|
||||
ASSERT(0);
|
||||
return;
|
||||
}
|
||||
|
||||
dcn3_21_soc.num_states = num_states;
|
||||
for (i = 0; i < dcn3_21_soc.num_states; i++) {
|
||||
dcn3_21_soc.clock_limits[i].state = i;
|
||||
|
|
|
@ -1099,8 +1099,13 @@ void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
|
|||
|
||||
// Total Available Pipes Support Check
|
||||
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
|
||||
total_pipes += mode_lib->vba.DPPPerPlane[k];
|
||||
pipe_idx = get_pipe_idx(mode_lib, k);
|
||||
if (pipe_idx == -1) {
|
||||
ASSERT(0);
|
||||
continue; // skip inactive planes
|
||||
}
|
||||
total_pipes += mode_lib->vba.DPPPerPlane[k];
|
||||
|
||||
if (mode_lib->vba.cache_pipes[pipe_idx].clks_cfg.dppclk_mhz > 0.0)
|
||||
mode_lib->vba.DPPCLK[k] = mode_lib->vba.cache_pipes[pipe_idx].clks_cfg.dppclk_mhz;
|
||||
else
|
||||
|
|
|
@ -56,7 +56,7 @@ struct gpio_service *dal_gpio_service_create(
|
|||
struct dc_context *ctx)
|
||||
{
|
||||
struct gpio_service *service;
|
||||
uint32_t index_of_id;
|
||||
int32_t index_of_id;
|
||||
|
||||
service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
|
||||
|
||||
|
@ -112,7 +112,7 @@ struct gpio_service *dal_gpio_service_create(
|
|||
return service;
|
||||
|
||||
failure_2:
|
||||
while (index_of_id) {
|
||||
while (index_of_id > 0) {
|
||||
--index_of_id;
|
||||
kfree(service->busyness[index_of_id]);
|
||||
}
|
||||
|
@ -239,6 +239,9 @@ static bool is_pin_busy(
|
|||
enum gpio_id id,
|
||||
uint32_t en)
|
||||
{
|
||||
if (id == GPIO_ID_UNKNOWN)
|
||||
return false;
|
||||
|
||||
return service->busyness[id][en];
|
||||
}
|
||||
|
||||
|
@ -247,6 +250,9 @@ static void set_pin_busy(
|
|||
enum gpio_id id,
|
||||
uint32_t en)
|
||||
{
|
||||
if (id == GPIO_ID_UNKNOWN)
|
||||
return;
|
||||
|
||||
service->busyness[id][en] = true;
|
||||
}
|
||||
|
||||
|
@ -255,6 +261,9 @@ static void set_pin_free(
|
|||
enum gpio_id id,
|
||||
uint32_t en)
|
||||
{
|
||||
if (id == GPIO_ID_UNKNOWN)
|
||||
return;
|
||||
|
||||
service->busyness[id][en] = false;
|
||||
}
|
||||
|
||||
|
@ -263,7 +272,7 @@ enum gpio_result dal_gpio_service_lock(
|
|||
enum gpio_id id,
|
||||
uint32_t en)
|
||||
{
|
||||
if (!service->busyness[id]) {
|
||||
if (id != GPIO_ID_UNKNOWN && !service->busyness[id]) {
|
||||
ASSERT_CRITICAL(false);
|
||||
return GPIO_RESULT_OPEN_FAILED;
|
||||
}
|
||||
|
@ -277,7 +286,7 @@ enum gpio_result dal_gpio_service_unlock(
|
|||
enum gpio_id id,
|
||||
uint32_t en)
|
||||
{
|
||||
if (!service->busyness[id]) {
|
||||
if (id != GPIO_ID_UNKNOWN && !service->busyness[id]) {
|
||||
ASSERT_CRITICAL(false);
|
||||
return GPIO_RESULT_OPEN_FAILED;
|
||||
}
|
||||
|
|
|
@ -130,13 +130,21 @@ static bool hdmi_14_process_transaction(
|
|||
const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/
|
||||
const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/
|
||||
struct i2c_command i2c_command;
|
||||
uint8_t offset = hdcp_i2c_offsets[message_info->msg_id];
|
||||
uint8_t offset;
|
||||
struct i2c_payload i2c_payloads[] = {
|
||||
{ true, 0, 1, &offset },
|
||||
{ true, 0, 1, 0 },
|
||||
/* actual hdcp payload, will be filled later, zeroed for now*/
|
||||
{ 0 }
|
||||
};
|
||||
|
||||
if (message_info->msg_id == HDCP_MESSAGE_ID_INVALID) {
|
||||
DC_LOG_ERROR("%s: Invalid message_info msg_id - %d\n", __func__, message_info->msg_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
offset = hdcp_i2c_offsets[message_info->msg_id];
|
||||
i2c_payloads[0].data = &offset;
|
||||
|
||||
switch (message_info->link) {
|
||||
case HDCP_LINK_SECONDARY:
|
||||
i2c_payloads[0].address = hdcp_i2c_addr_link_secondary;
|
||||
|
@ -310,6 +318,11 @@ static bool dp_11_process_transaction(
|
|||
struct dc_link *link,
|
||||
struct hdcp_protection_message *message_info)
|
||||
{
|
||||
if (message_info->msg_id == HDCP_MESSAGE_ID_INVALID) {
|
||||
DC_LOG_ERROR("%s: Invalid message_info msg_id - %d\n", __func__, message_info->msg_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
return dpcd_access_helper(
|
||||
link,
|
||||
message_info->length,
|
||||
|
|
|
@ -528,7 +528,7 @@ static bool decide_fallback_link_setting_max_bw_policy(
|
|||
struct dc_link_settings *cur,
|
||||
enum link_training_result training_result)
|
||||
{
|
||||
uint8_t cur_idx = 0, next_idx;
|
||||
uint32_t cur_idx = 0, next_idx;
|
||||
bool found = false;
|
||||
|
||||
if (training_result == LINK_TRAINING_ABORT)
|
||||
|
@ -908,21 +908,17 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
|
|||
|
||||
memset(link_setting, 0, sizeof(*link_setting));
|
||||
|
||||
/* if preferred is specified through AMDDP, use it, if it's enough
|
||||
* to drive the mode
|
||||
*/
|
||||
if (link->preferred_link_setting.lane_count !=
|
||||
LANE_COUNT_UNKNOWN &&
|
||||
link->preferred_link_setting.link_rate !=
|
||||
LINK_RATE_UNKNOWN) {
|
||||
if (dc_is_dp_signal(stream->signal) &&
|
||||
link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
|
||||
link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) {
|
||||
/* if preferred is specified through AMDDP, use it, if it's enough
|
||||
* to drive the mode
|
||||
*/
|
||||
*link_setting = link->preferred_link_setting;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* MST doesn't perform link training for now
|
||||
* TODO: add MST specific link training routine
|
||||
*/
|
||||
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
|
||||
} else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
|
||||
/* MST doesn't perform link training for now
|
||||
* TODO: add MST specific link training routine
|
||||
*/
|
||||
decide_mst_link_settings(link, link_setting);
|
||||
} else if (link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
/* enable edp link optimization for DSC eDP case */
|
||||
|
|
|
@ -914,10 +914,10 @@ static enum dc_status configure_lttpr_mode_non_transparent(
|
|||
/* Driver does not need to train the first hop. Skip DPCD read and clear
|
||||
* AUX_RD_INTERVAL for DPTX-to-DPIA hop.
|
||||
*/
|
||||
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
|
||||
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && repeater_cnt > 0 && repeater_cnt < MAX_REPEATER_CNT)
|
||||
link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0;
|
||||
|
||||
for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
|
||||
for (repeater_id = repeater_cnt; repeater_id > 0 && repeater_id < MAX_REPEATER_CNT; repeater_id--) {
|
||||
aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
|
||||
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
|
||||
core_link_read_dpcd(
|
||||
|
|
|
@ -158,11 +158,16 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
|
|||
uint32_t cur_size = 0;
|
||||
uint32_t data_offset = 0;
|
||||
|
||||
if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
|
||||
if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID ||
|
||||
msg_id >= MOD_HDCP_MESSAGE_ID_MAX)
|
||||
return MOD_HDCP_STATUS_DDC_FAILURE;
|
||||
}
|
||||
|
||||
if (is_dp_hdcp(hdcp)) {
|
||||
int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) /
|
||||
sizeof(hdcp_dpcd_addrs[0]);
|
||||
if (msg_id >= num_dpcd_addrs)
|
||||
return MOD_HDCP_STATUS_DDC_FAILURE;
|
||||
|
||||
while (buf_len > 0) {
|
||||
cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
|
||||
success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle,
|
||||
|
@ -177,6 +182,11 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
|
|||
data_offset += cur_size;
|
||||
}
|
||||
} else {
|
||||
int num_i2c_offsets = sizeof(hdcp_i2c_offsets) /
|
||||
sizeof(hdcp_i2c_offsets[0]);
|
||||
if (msg_id >= num_i2c_offsets)
|
||||
return MOD_HDCP_STATUS_DDC_FAILURE;
|
||||
|
||||
success = hdcp->config.ddc.funcs.read_i2c(
|
||||
hdcp->config.ddc.handle,
|
||||
HDCP_I2C_ADDR,
|
||||
|
@ -221,11 +231,16 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
|
|||
uint32_t cur_size = 0;
|
||||
uint32_t data_offset = 0;
|
||||
|
||||
if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
|
||||
if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID ||
|
||||
msg_id >= MOD_HDCP_MESSAGE_ID_MAX)
|
||||
return MOD_HDCP_STATUS_DDC_FAILURE;
|
||||
}
|
||||
|
||||
if (is_dp_hdcp(hdcp)) {
|
||||
int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) /
|
||||
sizeof(hdcp_dpcd_addrs[0]);
|
||||
if (msg_id >= num_dpcd_addrs)
|
||||
return MOD_HDCP_STATUS_DDC_FAILURE;
|
||||
|
||||
while (buf_len > 0) {
|
||||
cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
|
||||
success = hdcp->config.ddc.funcs.write_dpcd(
|
||||
|
@ -241,6 +256,11 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
|
|||
data_offset += cur_size;
|
||||
}
|
||||
} else {
|
||||
int num_i2c_offsets = sizeof(hdcp_i2c_offsets) /
|
||||
sizeof(hdcp_i2c_offsets[0]);
|
||||
if (msg_id >= num_i2c_offsets)
|
||||
return MOD_HDCP_STATUS_DDC_FAILURE;
|
||||
|
||||
hdcp->buf[0] = hdcp_i2c_offsets[msg_id];
|
||||
memmove(&hdcp->buf[1], buf, buf_len);
|
||||
success = hdcp->config.ddc.funcs.write_i2c(
|
||||
|
|
|
@ -99,7 +99,7 @@ static void pp_swctf_delayed_work_handler(struct work_struct *work)
|
|||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
struct amdgpu_dpm_thermal *range =
|
||||
&adev->pm.dpm.thermal;
|
||||
uint32_t gpu_temperature, size;
|
||||
uint32_t gpu_temperature, size = sizeof(gpu_temperature);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
|
|
@ -30,9 +30,8 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
|
|||
{
|
||||
int result;
|
||||
unsigned int i;
|
||||
unsigned int table_entries;
|
||||
struct pp_power_state *state;
|
||||
int size;
|
||||
int size, table_entries;
|
||||
|
||||
if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
|
||||
return 0;
|
||||
|
@ -40,15 +39,19 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
|
|||
if (hwmgr->hwmgr_func->get_power_state_size == NULL)
|
||||
return 0;
|
||||
|
||||
hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
|
||||
table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
|
||||
|
||||
hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
|
||||
size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
|
||||
sizeof(struct pp_power_state);
|
||||
|
||||
if (table_entries == 0 || size == 0) {
|
||||
if (table_entries <= 0 || size == 0) {
|
||||
pr_warn("Please check whether power state management is supported on this asic\n");
|
||||
hwmgr->num_ps = 0;
|
||||
hwmgr->ps_size = 0;
|
||||
return 0;
|
||||
}
|
||||
hwmgr->num_ps = table_entries;
|
||||
hwmgr->ps_size = size;
|
||||
|
||||
hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL);
|
||||
if (hwmgr->ps == NULL)
|
||||
|
|
|
@ -73,8 +73,9 @@ static int atomctrl_retrieve_ac_timing(
|
|||
j++;
|
||||
} else if ((table->mc_reg_address[i].uc_pre_reg_data &
|
||||
LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
|
||||
table->mc_reg_table_entry[num_ranges].mc_data[i] =
|
||||
table->mc_reg_table_entry[num_ranges].mc_data[i-1];
|
||||
if (i)
|
||||
table->mc_reg_table_entry[num_ranges].mc_data[i] =
|
||||
table->mc_reg_table_entry[num_ranges].mc_data[i-1];
|
||||
}
|
||||
}
|
||||
num_ranges++;
|
||||
|
|
|
@ -1036,7 +1036,9 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
|
||||
if (now == data->gfx_max_freq_limit/100)
|
||||
|
@ -1057,7 +1059,9 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
i == 2 ? "*" : "");
|
||||
break;
|
||||
case PP_MCLK:
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < mclk_table->count; i++)
|
||||
size += snprintf(buf + size, PAGE_SIZE - size, "%d: %uMhz %s\n",
|
||||
|
@ -1550,7 +1554,10 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
|
|||
}
|
||||
|
||||
if (input[0] == 0) {
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (input[1] < min_freq) {
|
||||
pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
|
||||
input[1], min_freq);
|
||||
|
@ -1558,7 +1565,10 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
|
|||
}
|
||||
smu10_data->gfx_actual_soft_min_freq = input[1];
|
||||
} else if (input[0] == 1) {
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (input[1] > max_freq) {
|
||||
pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
|
||||
input[1], max_freq);
|
||||
|
@ -1573,10 +1583,15 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
|
|||
pr_err("Input parameter number not correct\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
|
||||
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
|
||||
if (ret)
|
||||
return ret;
|
||||
smu10_data->gfx_actual_soft_min_freq = min_freq;
|
||||
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu10_data->gfx_actual_soft_max_freq = max_freq;
|
||||
} else if (type == PP_OD_COMMIT_DPM_TABLE) {
|
||||
if (size != 0) {
|
||||
|
|
|
@ -5641,7 +5641,7 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
|
|||
mode = input[size];
|
||||
switch (mode) {
|
||||
case PP_SMC_POWER_PROFILE_CUSTOM:
|
||||
if (size < 8 && size != 0)
|
||||
if (size != 8 && size != 0)
|
||||
return -EINVAL;
|
||||
/* If only CUSTOM is passed in, use the saved values. Check
|
||||
* that we actually have a CUSTOM profile by ensuring that
|
||||
|
|
|
@ -584,6 +584,7 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
|
|||
hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
|
||||
unsigned long clock = 0;
|
||||
uint32_t level;
|
||||
int ret;
|
||||
|
||||
if (NULL == table || table->count <= 0)
|
||||
return -EINVAL;
|
||||
|
@ -591,7 +592,9 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
|
|||
data->uvd_dpm.soft_min_clk = 0;
|
||||
data->uvd_dpm.hard_min_clk = 0;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (level < table->count)
|
||||
clock = table->entries[level].vclk;
|
||||
|
@ -611,6 +614,7 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
|
|||
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
|
||||
unsigned long clock = 0;
|
||||
uint32_t level;
|
||||
int ret;
|
||||
|
||||
if (NULL == table || table->count <= 0)
|
||||
return -EINVAL;
|
||||
|
@ -618,7 +622,9 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
|
|||
data->vce_dpm.soft_min_clk = 0;
|
||||
data->vce_dpm.hard_min_clk = 0;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (level < table->count)
|
||||
clock = table->entries[level].ecclk;
|
||||
|
@ -638,6 +644,7 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
|
|||
hwmgr->dyn_state.acp_clock_voltage_dependency_table;
|
||||
unsigned long clock = 0;
|
||||
uint32_t level;
|
||||
int ret;
|
||||
|
||||
if (NULL == table || table->count <= 0)
|
||||
return -EINVAL;
|
||||
|
@ -645,7 +652,9 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
|
|||
data->acp_dpm.soft_min_clk = 0;
|
||||
data->acp_dpm.hard_min_clk = 0;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (level < table->count)
|
||||
clock = table->entries[level].acpclk;
|
||||
|
|
|
@ -354,13 +354,13 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
static int vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega10_hwmgr *data = hwmgr->backend;
|
||||
int i;
|
||||
uint32_t sub_vendor_id, hw_revision;
|
||||
uint32_t top32, bottom32;
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
int ret, i;
|
||||
|
||||
vega10_initialize_power_tune_defaults(hwmgr);
|
||||
|
||||
|
@ -485,9 +485,12 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
|||
if (data->registry_data.vr0hot_enabled)
|
||||
data->smu_features[GNLD_VR0HOT].supported = true;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr,
|
||||
ret = smum_send_msg_to_smc(hwmgr,
|
||||
PPSMC_MSG_GetSmuVersion,
|
||||
&hwmgr->smu_version);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* ACG firmware has major version 5 */
|
||||
if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
|
||||
data->smu_features[GNLD_ACG].supported = true;
|
||||
|
@ -505,10 +508,16 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
|||
data->smu_features[GNLD_PCC_LIMIT].supported = true;
|
||||
|
||||
/* Get the SN to turn into a Unique ID */
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef PPLIB_VEGA10_EVV_SUPPORT
|
||||
|
@ -882,7 +891,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
|
||||
vega10_set_features_platform_caps(hwmgr);
|
||||
|
||||
vega10_init_dpm_defaults(hwmgr);
|
||||
result = vega10_init_dpm_defaults(hwmgr);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
#ifdef PPLIB_VEGA10_EVV_SUPPORT
|
||||
/* Get leakage voltage based on leakage ID. */
|
||||
|
@ -2350,15 +2361,20 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
|
|||
{
|
||||
struct vega10_hwmgr *data = hwmgr->backend;
|
||||
uint32_t agc_btc_response;
|
||||
int ret;
|
||||
|
||||
if (data->smu_features[GNLD_ACG].supported) {
|
||||
if (0 == vega10_enable_smc_features(hwmgr, true,
|
||||
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
|
||||
data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
|
||||
if (ret)
|
||||
agc_btc_response = 0;
|
||||
|
||||
if (1 == agc_btc_response) {
|
||||
if (1 == data->acg_loop_state)
|
||||
|
@ -2571,8 +2587,11 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
|
|||
}
|
||||
}
|
||||
|
||||
pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
|
||||
result = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
|
||||
VOLTAGE_OBJ_SVID2, &voltage_table);
|
||||
PP_ASSERT_WITH_CODE(!result,
|
||||
"Failed to get voltage table!",
|
||||
return result);
|
||||
pp_table->MaxVidStep = voltage_table.max_vid_step;
|
||||
|
||||
pp_table->GfxDpmVoltageMode =
|
||||
|
@ -3910,11 +3929,14 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
|
|||
uint32_t *query)
|
||||
{
|
||||
uint32_t value;
|
||||
int ret;
|
||||
|
||||
if (!query)
|
||||
return -EINVAL;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
|
||||
*query = value << 8;
|
||||
|
@ -4810,14 +4832,16 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
|
||||
PPTable_t *pptable = &(data->smc_state_table.pp_table);
|
||||
|
||||
int i, now, size = 0, count = 0;
|
||||
int i, ret, now, size = 0, count = 0;
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
if (data->registry_data.sclk_dpm_key_disabled)
|
||||
break;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (hwmgr->pp_one_vf &&
|
||||
(hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
|
||||
|
@ -4833,7 +4857,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
if (data->registry_data.mclk_dpm_key_disabled)
|
||||
break;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
for (i = 0; i < mclk_table->count; i++)
|
||||
size += snprintf(buf + size, PAGE_SIZE - size, "%d: %uMhz %s\n",
|
||||
|
@ -4844,7 +4870,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
if (data->registry_data.socclk_dpm_key_disabled)
|
||||
break;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
for (i = 0; i < soc_table->count; i++)
|
||||
size += snprintf(buf + size, PAGE_SIZE - size, "%d: %uMhz %s\n",
|
||||
|
@ -4855,8 +4883,10 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
if (data->registry_data.dcefclk_dpm_key_disabled)
|
||||
break;
|
||||
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
for (i = 0; i < dcef_table->count; i++)
|
||||
size += snprintf(buf + size, PAGE_SIZE - size, "%d: %uMhz %s\n",
|
||||
|
|
|
@ -293,12 +293,12 @@ static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
static int vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
uint32_t top32, bottom32;
|
||||
int i;
|
||||
int i, ret;
|
||||
|
||||
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
|
||||
FEATURE_DPM_PREFETCHER_BIT;
|
||||
|
@ -364,10 +364,16 @@ static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
|||
}
|
||||
|
||||
/* Get the SN to turn into a Unique ID */
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
|
||||
|
@ -410,7 +416,11 @@ static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
|
||||
vega12_set_features_platform_caps(hwmgr);
|
||||
|
||||
vega12_init_dpm_defaults(hwmgr);
|
||||
result = vega12_init_dpm_defaults(hwmgr);
|
||||
if (result) {
|
||||
pr_err("%s failed\n", __func__);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Parse pptable data read from VBIOS */
|
||||
vega12_set_private_data_based_on_pptable(hwmgr);
|
||||
|
|
|
@ -328,12 +328,12 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
static int vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
uint32_t top32, bottom32;
|
||||
int i;
|
||||
int i, ret;
|
||||
|
||||
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
|
||||
FEATURE_DPM_PREFETCHER_BIT;
|
||||
|
@ -404,10 +404,17 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
|||
}
|
||||
|
||||
/* Get the SN to turn into a Unique ID */
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
|
||||
|
@ -427,6 +434,7 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
{
|
||||
struct vega20_hwmgr *data;
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
int result;
|
||||
|
||||
data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL);
|
||||
if (data == NULL)
|
||||
|
@ -452,8 +460,11 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
|
||||
vega20_set_features_platform_caps(hwmgr);
|
||||
|
||||
vega20_init_dpm_defaults(hwmgr);
|
||||
|
||||
result = vega20_init_dpm_defaults(hwmgr);
|
||||
if (result) {
|
||||
pr_err("%s failed\n", __func__);
|
||||
return result;
|
||||
}
|
||||
/* Parse pptable data read from VBIOS */
|
||||
vega20_set_private_data_based_on_pptable(hwmgr);
|
||||
|
||||
|
@ -4091,9 +4102,11 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
|
|||
if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
|
||||
struct vega20_hwmgr *data =
|
||||
(struct vega20_hwmgr *)(hwmgr->backend);
|
||||
if (size == 0 && !data->is_custom_profile_set)
|
||||
|
||||
if (size != 10 && size != 0)
|
||||
return -EINVAL;
|
||||
if (size < 10 && size != 0)
|
||||
|
||||
if (size == 0 && !data->is_custom_profile_set)
|
||||
return -EINVAL;
|
||||
|
||||
result = vega20_get_activity_monitor_coeff(hwmgr,
|
||||
|
@ -4155,6 +4168,8 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
|
|||
activity_monitor.Fclk_PD_Data_error_coeff = input[8];
|
||||
activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
result = vega20_set_activity_monitor_coeff(hwmgr,
|
||||
|
|
|
@ -130,13 +130,17 @@ int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
|
|||
uint64_t *features_enabled)
|
||||
{
|
||||
uint32_t enabled_features;
|
||||
int ret;
|
||||
|
||||
if (features_enabled == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr,
|
||||
ret = smum_send_msg_to_smc(hwmgr,
|
||||
PPSMC_MSG_GetEnabledSmuFeatures,
|
||||
&enabled_features);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*features_enabled = enabled_features;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1222,19 +1222,22 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
|
|||
value);
|
||||
}
|
||||
|
||||
static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
|
||||
static int navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
|
||||
{
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
DpmDescriptor_t *dpm_desc = NULL;
|
||||
uint32_t clk_index = 0;
|
||||
int clk_index = 0;
|
||||
|
||||
clk_index = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_CLK,
|
||||
clk_type);
|
||||
if (clk_index < 0)
|
||||
return clk_index;
|
||||
|
||||
dpm_desc = &pptable->DpmDescriptor[clk_index];
|
||||
|
||||
/* 0 - Fine grained DPM, 1 - Discrete DPM */
|
||||
return dpm_desc->SnapToDiscrete == 0;
|
||||
return dpm_desc->SnapToDiscrete == 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
|
||||
|
@ -1290,7 +1293,11 @@ static int navi10_emit_clk_levels(struct smu_context *smu,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
|
||||
ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!ret) {
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = smu_v11_0_get_dpm_freq_by_index(smu,
|
||||
clk_type, i, &value);
|
||||
|
@ -1499,7 +1506,11 @@ static int navi10_print_clk_levels(struct smu_context *smu,
|
|||
if (ret)
|
||||
return size;
|
||||
|
||||
if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
|
||||
ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!ret) {
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
|
||||
if (ret)
|
||||
|
@ -1668,7 +1679,11 @@ static int navi10_force_clk_levels(struct smu_context *smu,
|
|||
case SMU_UCLK:
|
||||
case SMU_FCLK:
|
||||
/* There is only 2 levels for fine grained DPM */
|
||||
if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
|
||||
ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ret) {
|
||||
soft_max_level = (soft_max_level >= 1 ? 1 : 0);
|
||||
soft_min_level = (soft_min_level >= 1 ? 1 : 0);
|
||||
}
|
||||
|
|
|
@ -1017,6 +1017,18 @@ static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
|
|||
}
|
||||
}
|
||||
if (min) {
|
||||
ret = vangogh_get_profiling_clk_mask(smu,
|
||||
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK,
|
||||
NULL,
|
||||
NULL,
|
||||
&mclk_mask,
|
||||
&fclk_mask,
|
||||
&soc_mask);
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
vclk_mask = dclk_mask = 0;
|
||||
|
||||
switch (clk_type) {
|
||||
case SMU_UCLK:
|
||||
case SMU_MCLK:
|
||||
|
@ -2489,6 +2501,8 @@ static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start)
|
|||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency,
|
||||
start, &residency);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!start)
|
||||
adev->gfx.gfx_off_residency = residency;
|
||||
|
|
|
@ -1933,7 +1933,8 @@ static int aldebaran_mode2_reset(struct smu_context *smu)
|
|||
|
||||
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
|
||||
SMU_MSG_GfxDeviceDriverReset);
|
||||
|
||||
if (index < 0 )
|
||||
return -EINVAL;
|
||||
mutex_lock(&smu->message_lock);
|
||||
if (smu_version >= 0x00441400) {
|
||||
ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
|
||||
|
|
|
@ -2063,6 +2063,8 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
|
|||
|
||||
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
|
||||
SMU_MSG_GfxDeviceDriverReset);
|
||||
if (index < 0)
|
||||
return index;
|
||||
|
||||
mutex_lock(&smu->message_lock);
|
||||
|
||||
|
|
|
@ -647,6 +647,17 @@ static void drm_fb_helper_add_damage_clip(struct drm_fb_helper *helper, u32 x, u
|
|||
static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
|
||||
u32 width, u32 height)
|
||||
{
|
||||
/*
|
||||
* This function may be invoked by panic() to flush the frame
|
||||
* buffer, where all CPUs except the panic CPU are stopped.
|
||||
* During the following schedule_work(), the panic CPU needs
|
||||
* the worker_pool lock, which might be held by a stopped CPU,
|
||||
* causing schedule_work() and panic() to block. Return early on
|
||||
* oops_in_progress to prevent this blocking.
|
||||
*/
|
||||
if (oops_in_progress)
|
||||
return;
|
||||
|
||||
drm_fb_helper_add_damage_clip(helper, x, y, width, height);
|
||||
|
||||
schedule_work(&helper->damage_work);
|
||||
|
|
|
@ -420,6 +420,12 @@ static const struct dmi_system_id orientation_data[] = {
|
|||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
|
||||
},
|
||||
.driver_data = (void *)&lcd1600x2560_leftside_up,
|
||||
}, { /* OrangePi Neo */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "NEO-01"),
|
||||
},
|
||||
.driver_data = (void *)&lcd1200x1920_rightside_up,
|
||||
}, { /* Samsung GalaxyBook 10.6 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue