sync with OpenBSD -current

This commit is contained in:
purplerain 2024-01-29 17:38:40 +00:00
parent 20629a8b0d
commit 604988d5d3
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
58 changed files with 592 additions and 377 deletions

View file

@ -755,7 +755,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
int r;
if (!adev->smc_rreg)
return -EPERM;
return -EOPNOTSUPP;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
@ -814,7 +814,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
int r;
if (!adev->smc_wreg)
return -EPERM;
return -EOPNOTSUPP;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;

View file

@ -1026,7 +1026,12 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_GPU_AVG_POWER,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
/* fall back to input power for backwards compat */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
}
ui32 >>= 8;
break;

View file

@ -330,12 +330,6 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
pdd->gpuvm_limit =
pdd->dev->kfd->shared_resources.gpuvm_size - 1;
/* dGPUs: the reserved space for kernel
* before SVM
*/
pdd->qpd.cwsr_base = SVM_CWSR_BASE;
pdd->qpd.ib_base = SVM_IB_BASE;
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
}
@ -345,18 +339,18 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
pdd->lds_base = MAKE_LDS_APP_BASE_V9();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
pdd->gpuvm_base = PAGE_SIZE;
/* Raven needs SVM to support graphic handle, etc. Leave the small
* reserved space before SVM on Raven as well, even though we don't
* have to.
* Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they
* are used in Thunk to reserve SVM.
*/
pdd->gpuvm_base = SVM_USER_BASE;
pdd->gpuvm_limit =
pdd->dev->kfd->shared_resources.gpuvm_size - 1;
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
/*
* Place TBA/TMA on opposite side of VM hole to prevent
* stray faults from triggering SVM on these pages.
*/
pdd->qpd.cwsr_base = pdd->dev->kfd->shared_resources.gpuvm_size;
}
int kfd_init_apertures(struct kfd_process *process)
@ -413,6 +407,12 @@ int kfd_init_apertures(struct kfd_process *process)
return -EINVAL;
}
}
/* dGPUs: the reserved space for kernel
* before SVM
*/
pdd->qpd.cwsr_base = SVM_CWSR_BASE;
pdd->qpd.ib_base = SVM_IB_BASE;
}
dev_dbg(kfd_device, "node id %u\n", id);

View file

@ -1021,7 +1021,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
} else {
res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
if (IS_ERR(res))
return -ENOMEM;
return PTR_ERR(res);
pgmap->range.start = res->start;
pgmap->range.end = res->end;
pgmap->type = MEMORY_DEVICE_PRIVATE;
@ -1037,10 +1037,10 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
r = devm_memremap_pages(adev->dev, pgmap);
if (IS_ERR(r)) {
pr_err("failed to register HMM device memory\n");
/* Disable SVM support capability */
pgmap->type = 0;
if (pgmap->type == MEMORY_DEVICE_PRIVATE)
devm_release_mem_region(adev->dev, res->start, resource_size(res));
/* Disable SVM support capability */
pgmap->type = 0;
return PTR_ERR(r);
}

View file

@ -971,7 +971,7 @@ struct kfd_process {
struct work_struct debug_event_workarea;
/* Tracks debug per-vmid request for debug flags */
bool dbg_flags;
u32 dbg_flags;
atomic_t poison;
/* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */

View file

@ -1342,10 +1342,11 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
num_cpu++;
}
if (list_empty(&kdev->io_link_props))
return -ENODATA;
gpu_link = list_first_entry(&kdev->io_link_props,
struct kfd_iolink_properties, list);
if (!gpu_link)
return -ENOMEM;
struct kfd_iolink_properties, list);
for (i = 0; i < num_cpu; i++) {
/* CPU <--> GPU */
@ -1423,15 +1424,17 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
peer->gpu->adev))
return ret;
if (list_empty(&kdev->io_link_props))
return -ENODATA;
iolink1 = list_first_entry(&kdev->io_link_props,
struct kfd_iolink_properties, list);
if (!iolink1)
return -ENOMEM;
struct kfd_iolink_properties, list);
if (list_empty(&peer->io_link_props))
return -ENODATA;
iolink2 = list_first_entry(&peer->io_link_props,
struct kfd_iolink_properties, list);
if (!iolink2)
return -ENOMEM;
struct kfd_iolink_properties, list);
props = kfd_alloc_struct(props);
if (!props)

View file

@ -6886,7 +6886,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
max_bpc);
bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
clock = adjusted_mode->clock;
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
}
dm_new_connector_state->vcpi_slots =

View file

@ -1636,7 +1636,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
} else {
/* check if mode could be supported within full_pbn */
bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4);
if (pbn > full_pbn)
return DC_FAIL_BANDWIDTH_VALIDATE;
}

View file

@ -807,7 +807,7 @@ void dp_decide_lane_settings(
const struct link_training_settings *lt_settings,
const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
union dpcd_training_lane *dpcd_lane_settings)
{
uint32_t lane;

View file

@ -111,7 +111,7 @@ void dp_decide_lane_settings(
const struct link_training_settings *lt_settings,
const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
union dpcd_training_lane *dpcd_lane_settings);
enum dc_dp_training_pattern decide_cr_training_pattern(
const struct dc_link_settings *link_settings);

View file

@ -2735,10 +2735,8 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(adev->pm.dpm.ps);
if (ps == NULL)
return -ENOMEM;
}
adev->pm.dpm.ps[i].ps_priv = ps;
k = 0;
idx = (u8 *)&power_state->v2.clockInfoIndex[0];

View file

@ -7379,10 +7379,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
kcalloc(4,
sizeof(struct amdgpu_clock_voltage_dependency_entry),
GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
amdgpu_free_extended_power_table(adev);
if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries)
return -ENOMEM;
}
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;

View file

@ -272,10 +272,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
dep_table);
if (ret) {
amdgpu_free_extended_power_table(adev);
if (ret)
return ret;
}
}
if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
@ -283,10 +281,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
dep_table);
if (ret) {
amdgpu_free_extended_power_table(adev);
if (ret)
return ret;
}
}
if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
@ -294,10 +290,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
dep_table);
if (ret) {
amdgpu_free_extended_power_table(adev);
if (ret)
return ret;
}
}
if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
@ -305,10 +299,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
dep_table);
if (ret) {
amdgpu_free_extended_power_table(adev);
if (ret)
return ret;
}
}
if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
@ -339,10 +331,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
kcalloc(psl->ucNumEntries,
sizeof(struct amdgpu_phase_shedding_limits_entry),
GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
amdgpu_free_extended_power_table(adev);
if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
return -ENOMEM;
}
entry = &psl->entries[0];
for (i = 0; i < psl->ucNumEntries; i++) {
@ -383,10 +373,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
ATOM_PPLIB_CAC_Leakage_Record *entry;
u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
amdgpu_free_extended_power_table(adev);
if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries)
return -ENOMEM;
}
entry = &cac_table->entries[0];
for (i = 0; i < cac_table->ucNumEntries; i++) {
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
@ -438,10 +426,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
amdgpu_free_extended_power_table(adev);
if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
return -ENOMEM;
}
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@ -493,10 +479,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
amdgpu_free_extended_power_table(adev);
if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
return -ENOMEM;
}
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@ -525,10 +509,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
amdgpu_free_extended_power_table(adev);
if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
return -ENOMEM;
}
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@ -548,10 +530,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(ext_hdr->usPPMTableOffset));
adev->pm.dpm.dyn_state.ppm_table =
kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.ppm_table) {
amdgpu_free_extended_power_table(adev);
if (!adev->pm.dpm.dyn_state.ppm_table)
return -ENOMEM;
}
adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
le16_to_cpu(ppm->usCpuCoreNumber);
@ -583,10 +563,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
amdgpu_free_extended_power_table(adev);
if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
return -ENOMEM;
}
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@ -606,10 +584,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
ATOM_PowerTune_Table *pt;
adev->pm.dpm.dyn_state.cac_tdp_table =
kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
amdgpu_free_extended_power_table(adev);
if (!adev->pm.dpm.dyn_state.cac_tdp_table)
return -ENOMEM;
}
if (rev > 0) {
ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
(mode_info->atom_context->bios + data_offset +
@ -645,10 +621,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
ret = amdgpu_parse_clk_voltage_dep_table(
&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
dep_table);
if (ret) {
kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
if (ret)
return ret;
}
}
}

View file

@ -2974,6 +2974,8 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
result = smu7_get_evv_voltages(hwmgr);
if (result) {
pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
kfree(hwmgr->backend);
hwmgr->backend = NULL;
return -EINVAL;
}
} else {
@ -3019,8 +3021,10 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
}
result = smu7_update_edc_leakage_table(hwmgr);
if (result)
if (result) {
smu7_hwmgr_backend_fini(hwmgr);
return result;
}
return 0;
}

View file

@ -4700,13 +4700,12 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
/**
* drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
* @clock: dot clock for the mode
* @bpp: bpp for the mode.
* @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
* @clock: dot clock
* @bpp: bpp as .4 binary fixed point
*
* This uses the formula in the spec to calculate the PBN value for a mode.
*/
int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
int drm_dp_calc_pbn_mode(int clock, int bpp)
{
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
@ -4717,18 +4716,9 @@ int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
* peak_kbps *= (1006/1000)
* peak_kbps *= (64/54)
* peak_kbps *= 8 convert to bytes
*
* If the bpp is in units of 1/16, further divide by 16. Put this
* factor in the numerator rather than the denominator to avoid
* integer overflow
*/
if (dsc)
return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
8 * 54 * 1000 * 1000);
return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
8 * 54 * 1000 * 1000);
return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006 >> 4),
1000 * 8 * 54 * 1000);
}
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);

View file

@ -1042,8 +1042,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto err_minors;
}
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_register_all(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = drm_modeset_register_all(dev);
if (ret)
goto err_unload;
}
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor,
@ -1053,6 +1056,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto out_unlock;
err_unload:
if (dev->driver->unload)
dev->driver->unload(dev);
err_minors:
remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_ACCEL);

View file

@ -109,8 +109,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
continue;
crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
dsc ? bpp << 4 : bpp,
dsc);
bpp << 4);
slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
connector->port,
@ -941,7 +940,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return ret;
if (mode_rate > max_rate || mode->clock > max_dotclk ||
drm_dp_calc_pbn_mode(mode->clock, min_bpp, false) > port->full_pbn) {
drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
*status = MODE_CLOCK_HIGH;
return 0;
}

View file

@ -832,7 +832,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
int link_rate, int link_lane_count);
int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
int drm_dp_calc_pbn_mode(int clock, int bpp);
void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);

View file

@ -192,7 +192,7 @@ struct drm_bridge_funcs {
* or &drm_encoder_helper_funcs.dpms hook.
*
* The bridge must assume that the display pipe (i.e. clocks and timing
* singals) feeding it is no longer running when this callback is
* signals) feeding it is no longer running when this callback is
* called.
*
* The @post_disable callback is optional.

View file

@ -2321,7 +2321,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
switch (prim_walk) {
case 1:
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * track->max_indx * 4;
size = track->arrays[i].esize * track->max_indx * 4UL;
if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i);
@ -2340,7 +2340,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
break;
case 2:
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * (nverts - 1) * 4;
size = track->arrays[i].esize * (nverts - 1) * 4UL;
if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i);

View file

@ -1275,7 +1275,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
tmp = (reg - CB_COLOR0_BASE) / 4;
track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
track->cb_color_bo_offset[tmp] = (u64)radeon_get_ib_value(p, idx) << 8;
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->cb_color_base_last[tmp] = ib[idx];
track->cb_color_bo[tmp] = reloc->robj;
@ -1302,7 +1302,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
track->htile_offset = radeon_get_ib_value(p, idx) << 8;
track->htile_offset = (u64)radeon_get_ib_value(p, idx) << 8;
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->htile_bo = reloc->robj;
track->db_dirty = true;

View file

@ -687,11 +687,16 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
if (radeon_crtc == NULL)
return;
radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
if (!radeon_crtc->flip_queue) {
kfree(radeon_crtc);
return;
}
drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
radeon_crtc->crtc_id = index;
radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
rdev->mode_info.crtcs[index] = radeon_crtc;
if (rdev->family >= CHIP_BONAIRE) {

View file

@ -1204,13 +1204,17 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
r = radeon_bo_create(rdev, pd_size, align, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
NULL, &vm->page_directory);
if (r)
if (r) {
kfree(vm->page_tables);
vm->page_tables = NULL;
return r;
}
r = radeon_vm_clear_bo(rdev, vm->page_directory);
if (r) {
radeon_bo_unref(&vm->page_directory);
vm->page_directory = NULL;
kfree(vm->page_tables);
vm->page_tables = NULL;
return r;
}

View file

@ -3611,6 +3611,10 @@ static int si_cp_start(struct radeon_device *rdev)
for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
ring = &rdev->ring[i];
r = radeon_ring_lock(rdev, ring, 2);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
/* clear the compute context state */
radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));

View file

@ -1493,8 +1493,10 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
if (!rdev->pm.power_state[i].clock_info)
if (!rdev->pm.power_state[i].clock_info) {
kfree(rdev->pm.dpm.ps);
return -EINVAL;
}
ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);

View file

@ -1726,8 +1726,10 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
if (!rdev->pm.power_state[i].clock_info)
if (!rdev->pm.power_state[i].clock_info) {
kfree(rdev->pm.dpm.ps);
return -EINVAL;
}
ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);