sync with OpenBSD -current

This commit is contained in:
purplerain 2024-03-04 15:49:10 +00:00
parent 3c7ee3c11c
commit dd4d2242a5
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
44 changed files with 850 additions and 587 deletions

View file

@ -1040,6 +1040,8 @@ struct amdgpu_device {
bool in_s3;
bool in_s4;
bool in_s0ix;
/* indicate amdgpu suspension status */
bool suspend_complete;
enum pp_mp1_state mp1_state;
struct amdgpu_doorbell_index doorbell_index;

View file

@ -2416,6 +2416,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
adev->suspend_complete = false;
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
@ -2430,6 +2431,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
adev->suspend_complete = true;
if (amdgpu_acpi_should_gpu_reset(adev))
return amdgpu_asic_reset(adev);

View file

@ -362,7 +362,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
}
}
if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len))
if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len))
ret = -EFAULT;
err_free_shared_buf:

View file

@ -3033,6 +3033,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
gfx_v9_0_cp_gfx_enable(adev, true);
/* Now only limit the quirk on the APU gfx9 series and already
* confirmed that the APU gfx10/gfx11 needn't such update.
*/
if (adev->flags & AMD_IS_APU &&
adev->in_s3 && !adev->suspend_complete) {
DRM_INFO(" Will skip the CSB packet resubmit\n");
return 0;
}
r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);

View file

@ -426,6 +426,12 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
u32 inst_mask;
int i;
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset =
SOC15_REG_OFFSET(
NBIO, 0,
regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL)
<< 2;
WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE,
0xff & ~(adev->gfx.xcc_mask));

View file

@ -1296,10 +1296,32 @@ static int soc15_common_suspend(void *handle)
return soc15_common_hw_fini(adev);
}
static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
{
u32 sol_reg;
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
/* Will reset for the following suspend abort cases.
* 1) Only reset limit on APU side, dGPU hasn't checked yet.
* 2) S3 suspend abort and TOS already launched.
*/
if (adev->flags & AMD_IS_APU && adev->in_s3 &&
!adev->suspend_complete &&
sol_reg)
return true;
return false;
}
static int soc15_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (soc15_need_reset_on_resume(adev)) {
dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
soc15_asic_reset(adev);
}
return soc15_common_hw_init(adev);
}

View file

@ -1482,10 +1482,15 @@ void kfd_dec_compute_active(struct kfd_node *dev);
/* Cgroup Support */
/* Check with device cgroup if @kfd device is accessible */
static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd)
static inline int kfd_devcgroup_check_permission(struct kfd_node *node)
{
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
struct drm_device *ddev = adev_to_drm(kfd->adev);
struct drm_device *ddev;
if (node->xcp)
ddev = node->xcp->ddev;
else
ddev = adev_to_drm(node->adev);
return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
ddev->render->index,

View file

@ -1816,21 +1816,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: fail to register dmub aux callback");
goto error;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
goto error;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
goto error;
}
}
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
* It is expected that DMUB will resend any pending notifications at this point, for
* example HPD from DPIA.
*/
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
* It is expected that DMUB will resend any pending notifications at this point. Note
* that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
* align legacy interface initialization sequence. Connection status will be proactivly
* detected once in the amdgpu_dm_initialize_drm_device.
*/
dc_enable_dmub_outbox(adev->dm.dc);
/* DPIA trace goes to dmesg logs only if outbox is enabled */
@ -2260,6 +2251,7 @@ static int dm_sw_fini(void *handle)
if (adev->dm.dmub_srv) {
dmub_srv_destroy(adev->dm.dmub_srv);
kfree(adev->dm.dmub_srv);
adev->dm.dmub_srv = NULL;
}
@ -3488,6 +3480,14 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
}
list_for_each_entry(connector,
&dev->mode_config.connector_list, head) {
@ -3513,10 +3513,6 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
handle_hpd_rx_irq,
(void *) aconnector);
}
if (adev->dm.hpd_rx_offload_wq)
adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
aconnector;
}
}
@ -4485,6 +4481,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
link = dc_get_link_at_index(dm->dc, i);
if (dm->hpd_rx_offload_wq)
dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
aconnector;
if (!dc_link_detect_connection_type(link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");

View file

@ -1860,19 +1860,21 @@ static enum bp_result get_firmware_info_v3_2(
/* Vega12 */
smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
DATA_TABLES(smu_info));
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
if (!smu_info_v3_2)
return BP_RESULT_BADBIOSTABLE;
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
} else if (revision.minor == 3) {
/* Vega20 */
smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
DATA_TABLES(smu_info));
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
if (!smu_info_v3_3)
return BP_RESULT_BADBIOSTABLE;
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
}
@ -2435,10 +2437,11 @@ static enum bp_result get_integrated_info_v11(
info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
DATA_TABLES(integratedsysteminfo));
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
if (info_v11 == NULL)
return BP_RESULT_BADBIOSTABLE;
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
info->gpu_cap_info =
le32_to_cpu(info_v11->gpucapinfo);
/*
@ -2650,11 +2653,12 @@ static enum bp_result get_integrated_info_v2_1(
info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1,
DATA_TABLES(integratedsysteminfo));
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
if (info_v2_1 == NULL)
return BP_RESULT_BADBIOSTABLE;
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
info->gpu_cap_info =
le32_to_cpu(info_v2_1->gpucapinfo);
/*
@ -2812,11 +2816,11 @@ static enum bp_result get_integrated_info_v2_2(
info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2,
DATA_TABLES(integratedsysteminfo));
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
if (info_v2_2 == NULL)
return BP_RESULT_BADBIOSTABLE;
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
info->gpu_cap_info =
le32_to_cpu(info_v2_2->gpucapinfo);
/*

View file

@ -497,7 +497,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
link->dc->link_srv->enable_hpd_filter(link, enable);
}
bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
{
return dc->link_srv->validate_dpia_bandwidth(streams, count);
}

View file

@ -2116,11 +2116,11 @@ int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
*
* @dc: pointer to dc struct
* @stream: pointer to all possible streams
* @num_streams: number of valid DPIA streams
* @count: number of valid DPIA streams
*
* return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
*/
bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams,
bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams,
const unsigned int count);
/* Sink Interfaces - A sink corresponds to a display output device */

View file

@ -1433,6 +1433,12 @@ struct dp_trace {
#ifndef DP_TUNNELING_STATUS
#define DP_TUNNELING_STATUS 0xE0025 /* 1.4a */
#endif
#ifndef DP_TUNNELING_MAX_LINK_RATE
#define DP_TUNNELING_MAX_LINK_RATE 0xE0028 /* 1.4a */
#endif
#ifndef DP_TUNNELING_MAX_LANE_COUNT
#define DP_TUNNELING_MAX_LANE_COUNT 0xE0029 /* 1.4a */
#endif
#ifndef DPTX_BW_ALLOCATION_MODE_CONTROL
#define DPTX_BW_ALLOCATION_MODE_CONTROL 0xE0030 /* 1.4a */
#endif

View file

@ -1110,21 +1110,25 @@ struct dc_panel_config {
} ilr;
};
#define MAX_SINKS_PER_LINK 4
/*
* USB4 DPIA BW ALLOCATION STRUCTS
*/
struct dc_dpia_bw_alloc {
int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already
int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
int sink_max_bw; // The Max BW that sink can require/support
int remote_sink_req_bw[MAX_SINKS_PER_LINK]; // BW requested by remote sinks
int link_verified_bw; // The Verified BW that link can allocated and use that has been verified already
int link_max_bw; // The Max BW that link can require/support
int allocated_bw; // The Actual Allocated BW for this DPIA
int estimated_bw; // The estimated available BW for this DPIA
int bw_granularity; // BW Granularity
int dp_overhead; // DP overhead in dp tunneling
bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
bool response_ready; // Response ready from the CM side
uint8_t nrd_max_lane_count; // Non-reduced max lane count
uint8_t nrd_max_link_rate; // Non-reduced max link rate
};
#define MAX_SINKS_PER_LINK 4
enum dc_hpd_enable_select {
HPD_EN_FOR_ALL_EDP = 0,
HPD_EN_FOR_PRIMARY_EDP_ONLY,

View file

@ -2071,17 +2071,11 @@ static enum dc_status enable_link_dp(struct dc_state *state,
}
}
/*
* If the link is DP-over-USB4 do the following:
* - Train with fallback when enabling DPIA link. Conventional links are
/* Train with fallback when enabling DPIA link. Conventional links are
* trained with fallback during sink detection.
* - Allocate only what the stream needs for bw in Gbps. Inform the CM
* in case stream needs more or less bw from what has been allocated
* earlier at plug time.
*/
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
do_fallback = true;
}
/*
* Temporary w/a to get DP2.0 link rates to work with SST.
@ -2263,6 +2257,32 @@ static enum dc_status enable_link(
return status;
}
static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, int bw)
{
return true;
}
static bool allocate_usb4_bandwidth(struct dc_stream_state *stream)
{
bool ret;
int bw = dc_bandwidth_in_kbps_from_timing(&stream->timing,
dc_link_get_highest_encoding_format(stream->sink->link));
ret = allocate_usb4_bandwidth_for_stream(stream, bw);
return ret;
}
static bool deallocate_usb4_bandwidth(struct dc_stream_state *stream)
{
bool ret;
ret = allocate_usb4_bandwidth_for_stream(stream, 0);
return ret;
}
void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
@ -2299,6 +2319,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
update_psp_stream_config(pipe_ctx, true);
dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
deallocate_usb4_bandwidth(pipe_ctx->stream);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
@ -2520,6 +2543,9 @@ void link_set_dpms_on(
}
}
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
allocate_usb4_bandwidth(pipe_ctx->stream);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
allocate_mst_payload(pipe_ctx);
else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&

View file

@ -346,23 +346,61 @@ enum dc_status link_validate_mode_timing(
return DC_OK;
}
/*
* This function calculates the bandwidth required for the stream timing
* and aggregates the stream bandwidth for the respective dpia link
*
* @stream: pointer to the dc_stream_state struct instance
* @num_streams: number of streams to be validated
*
* return: true if validation is succeeded
*/
bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams)
{
bool ret = true;
int bw_needed[MAX_DPIA_NUM];
struct dc_link *link[MAX_DPIA_NUM];
int bw_needed[MAX_DPIA_NUM] = {0};
struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
int num_dpias = 0;
if (!num_streams || num_streams > MAX_DPIA_NUM)
return ret;
for (unsigned int i = 0; i < num_streams; ++i) {
if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
/* new dpia sst stream, check whether it exceeds max dpia */
if (num_dpias >= MAX_DPIA_NUM)
return false;
for (uint8_t i = 0; i < num_streams; ++i) {
dpia_link[num_dpias] = stream[i].link;
bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
dc_link_get_highest_encoding_format(dpia_link[num_dpias]));
num_dpias++;
} else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
uint8_t j = 0;
/* check whether its a known dpia link */
for (; j < num_dpias; ++j) {
if (dpia_link[j] == stream[i].link)
break;
}
link[i] = stream[i].link;
bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
dc_link_get_highest_encoding_format(link[i]));
if (j == num_dpias) {
/* new dpia mst stream, check whether it exceeds max dpia */
if (num_dpias >= MAX_DPIA_NUM)
return false;
else {
dpia_link[j] = stream[i].link;
num_dpias++;
}
}
bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
dc_link_get_highest_encoding_format(dpia_link[j]));
}
}
ret = dpia_validate_usb4_bw(link, bw_needed, num_streams);
/* Include dp overheads */
for (uint8_t i = 0; i < num_dpias; ++i) {
int dp_overhead = 0;
return ret;
dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]);
bw_needed[i] += dp_overhead;
}
return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
}

View file

@ -54,11 +54,18 @@ static bool get_bw_alloc_proceed_flag(struct dc_link *tmp)
static void reset_bw_alloc_struct(struct dc_link *link)
{
link->dpia_bw_alloc_config.bw_alloc_enabled = false;
link->dpia_bw_alloc_config.sink_verified_bw = 0;
link->dpia_bw_alloc_config.sink_max_bw = 0;
link->dpia_bw_alloc_config.link_verified_bw = 0;
link->dpia_bw_alloc_config.link_max_bw = 0;
link->dpia_bw_alloc_config.allocated_bw = 0;
link->dpia_bw_alloc_config.estimated_bw = 0;
link->dpia_bw_alloc_config.bw_granularity = 0;
link->dpia_bw_alloc_config.dp_overhead = 0;
link->dpia_bw_alloc_config.response_ready = false;
link->dpia_bw_alloc_config.nrd_max_lane_count = 0;
link->dpia_bw_alloc_config.nrd_max_link_rate = 0;
for (int i = 0; i < MAX_SINKS_PER_LINK; i++)
link->dpia_bw_alloc_config.remote_sink_req_bw[i] = 0;
DC_LOG_DEBUG("reset usb4 bw alloc of link(%d)\n", link->link_index);
}
#define BW_GRANULARITY_0 4 // 0.25 Gbps
@ -104,6 +111,32 @@ static int get_estimated_bw(struct dc_link *link)
return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
}
static int get_non_reduced_max_link_rate(struct dc_link *link)
{
uint8_t nrd_max_link_rate = 0;
core_link_read_dpcd(
link,
DP_TUNNELING_MAX_LINK_RATE,
&nrd_max_link_rate,
sizeof(uint8_t));
return nrd_max_link_rate;
}
static int get_non_reduced_max_lane_count(struct dc_link *link)
{
uint8_t nrd_max_lane_count = 0;
core_link_read_dpcd(
link,
DP_TUNNELING_MAX_LANE_COUNT,
&nrd_max_lane_count,
sizeof(uint8_t));
return nrd_max_lane_count;
}
/*
* Read all New BW alloc configuration ex: estimated_bw, allocated_bw,
* granuality, Driver_ID, CM_Group, & populate the BW allocation structs
@ -111,13 +144,20 @@ static int get_estimated_bw(struct dc_link *link)
*/
static void init_usb4_bw_struct(struct dc_link *link)
{
// Init the known values
reset_bw_alloc_struct(link);
/* init the known values */
link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
link->dpia_bw_alloc_config.nrd_max_link_rate = get_non_reduced_max_link_rate(link);
link->dpia_bw_alloc_config.nrd_max_lane_count = get_non_reduced_max_lane_count(link);
DC_LOG_DEBUG("%s: bw_granularity(%d), estimated_bw(%d)\n",
__func__, link->dpia_bw_alloc_config.bw_granularity,
link->dpia_bw_alloc_config.estimated_bw);
DC_LOG_DEBUG("%s: nrd_max_link_rate(%d), nrd_max_lane_count(%d)\n",
__func__, link->dpia_bw_alloc_config.nrd_max_link_rate,
link->dpia_bw_alloc_config.nrd_max_lane_count);
}
static uint8_t get_lowest_dpia_index(struct dc_link *link)
@ -142,39 +182,50 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link)
}
/*
* Get the Max Available BW or Max Estimated BW for each Host Router
* Get the maximum dp tunnel banwidth of host router
*
* @link: pointer to the dc_link struct instance
* @type: ESTIMATD BW or MAX AVAILABLE BW
* @dc: pointer to the dc struct instance
* @hr_index: host router index
*
* return: response_ready flag from dc_link struct
* return: host router maximum dp tunnel bandwidth
*/
static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index)
{
const struct dc *dc_struct = link->dc;
uint8_t lowest_dpia_index = get_lowest_dpia_index(link);
uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0;
struct dc_link *link_temp;
uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]);
uint8_t hr_index_temp = 0;
struct dc_link *link_dpia_primary, *link_dpia_secondary;
int total_bw = 0;
int i;
for (i = 0; i < MAX_PIPES * 2; ++i) {
for (uint8_t i = 0; i < (MAX_PIPES * 2) - 1; ++i) {
if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;
link_temp = dc_struct->links[i];
if (!link_temp || !link_temp->hpd_status)
continue;
hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2;
idx_temp = (link_temp->link_index - lowest_dpia_index) / 2;
if (hr_index_temp == hr_index) {
link_dpia_primary = dc->links[i];
link_dpia_secondary = dc->links[i + 1];
if (idx_temp == idx) {
if (type == HOST_ROUTER_BW_ESTIMATED)
total_bw += link_temp->dpia_bw_alloc_config.estimated_bw;
else if (type == HOST_ROUTER_BW_ALLOCATED)
total_bw += link_temp->dpia_bw_alloc_config.sink_allocated_bw;
/**
* If BW allocation enabled on both DPIAs, then
* HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary)
* otherwise HR BW = Estimated(bw alloc enabled dpia)
*/
if ((link_dpia_primary->hpd_status &&
link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) &&
(link_dpia_secondary->hpd_status &&
link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) {
total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
link_dpia_secondary->dpia_bw_alloc_config.allocated_bw;
} else if (link_dpia_primary->hpd_status &&
link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) {
total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw;
} else if (link_dpia_secondary->hpd_status &&
link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) {
total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw;
}
break;
}
}
@ -194,7 +245,6 @@ static void dpia_bw_alloc_unplug(struct dc_link *link)
if (link) {
DC_LOG_DEBUG("%s: resetting bw alloc config for link(%d)\n",
__func__, link->link_index);
link->dpia_bw_alloc_config.sink_allocated_bw = 0;
reset_bw_alloc_struct(link);
}
}
@ -220,7 +270,7 @@ static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
/* Error check whether requested and allocated are equal */
req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw) {
if (req_bw == link->dpia_bw_alloc_config.allocated_bw) {
DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
__func__, link->link_index);
}
@ -343,9 +393,9 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
DC_LOG_DEBUG("%s: BW REQ SUCCESS for DP-TX Request for link(%d)\n",
__func__, link->link_index);
DC_LOG_DEBUG("%s: current allocated_bw(%d), new allocated_bw(%d)\n",
__func__, link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed);
__func__, link->dpia_bw_alloc_config.allocated_bw, bw_needed);
link->dpia_bw_alloc_config.sink_allocated_bw = bw_needed;
link->dpia_bw_alloc_config.allocated_bw = bw_needed;
link->dpia_bw_alloc_config.response_ready = true;
break;
@ -383,8 +433,8 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
if (link->hpd_status && peak_bw > 0) {
// If DP over USB4 then we need to check BW allocation
link->dpia_bw_alloc_config.sink_max_bw = peak_bw;
set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw);
link->dpia_bw_alloc_config.link_max_bw = peak_bw;
set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.link_max_bw);
do {
if (timeout > 0)
@ -396,8 +446,8 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
if (!timeout)
ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
else if (link->dpia_bw_alloc_config.allocated_bw > 0)
ret = link->dpia_bw_alloc_config.allocated_bw;
}
//2. Cold Unplug
else if (!link->hpd_status)
@ -406,7 +456,6 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
out:
return ret;
}
bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
{
bool ret = false;
@ -414,7 +463,7 @@ bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n",
__func__, link->link_index, link->hpd_status,
link->dpia_bw_alloc_config.sink_allocated_bw, req_bw);
link->dpia_bw_alloc_config.allocated_bw, req_bw);
if (!get_bw_alloc_proceed_flag(link))
goto out;
@ -439,31 +488,70 @@ out:
bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
{
bool ret = true;
int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 };
uint8_t lowest_dpia_index = 0, dpia_index = 0;
uint8_t i;
int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0;
uint8_t lowest_dpia_index, i, hr_index;
if (!num_dpias || num_dpias > MAX_DPIA_NUM)
return ret;
//Get total Host Router BW & Validate against each Host Router max BW
lowest_dpia_index = get_lowest_dpia_index(link[0]);
/* get total Host Router BW with granularity for the given modes */
for (i = 0; i < num_dpias; ++i) {
int granularity_Gbps = 0;
int bw_granularity = 0;
if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled)
continue;
lowest_dpia_index = get_lowest_dpia_index(link[i]);
if (link[i]->link_index < lowest_dpia_index)
continue;
dpia_index = (link[i]->link_index - lowest_dpia_index) / 2;
bw_needed_per_hr[dpia_index] += bw_needed_per_dpia[i];
if (bw_needed_per_hr[dpia_index] > get_host_router_total_bw(link[i], HOST_ROUTER_BW_ALLOCATED)) {
granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity);
bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps +
((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0);
ret = false;
break;
hr_index = (link[i]->link_index - lowest_dpia_index) / 2;
bw_needed_per_hr[hr_index] += bw_granularity;
}
/* validate against each Host Router max BW */
for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) {
if (bw_needed_per_hr[hr_index]) {
host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index);
if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) {
ret = false;
break;
}
}
}
return ret;
}
int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link)
{
int dp_overhead = 0, link_mst_overhead = 0;
if (!get_bw_alloc_proceed_flag((link)))
return dp_overhead;
/* if its mst link, add MTPH overhead */
if ((link->type == dc_connection_mst_branch) &&
!link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
/* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
* MST overhead is 1/64 of link bandwidth (excluding any overhead)
*/
const struct dc_link_settings *link_cap =
dc_link_get_link_cap(link);
uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
(uint32_t)link_cap->lane_count *
LINK_RATE_REF_FREQ_IN_KHZ * 8;
link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0);
}
/* add all the overheads */
dp_overhead = link_mst_overhead;
return dp_overhead;
}

View file

@ -99,4 +99,13 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
*/
bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias);
/*
* Obtain all the DP overheads in dp tunneling for the dpia link
*
* @link: pointer to the dc_link struct instance
*
* return: DP overheads in DP tunneling
*/
int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */

View file

@ -1096,7 +1096,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
uint64_t *points;
uint32_t signaled_count, i;
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
lockdep_assert_none_held_once();
points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
@ -1169,7 +1170,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
* fallthough and try a 0 timeout wait!
*/
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
for (i = 0; i < count; ++i)
drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
}
@ -1444,10 +1446,21 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
/* This happens inside the syncobj lock */
fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
if (!fence)
return;
ret = dma_fence_chain_find_seqno(&fence, entry->point);
if (ret != 0 || !fence) {
if (ret != 0) {
/* The given seqno has not been submitted yet. */
dma_fence_put(fence);
return;
} else if (!fence) {
/* If dma_fence_chain_find_seqno returns 0 but sets the fence
* to NULL, it implies that the given seqno is signaled and a
* later seqno has already been submitted. Assign a stub fence
* so that the eventfd still gets signaled below.
*/
fence = dma_fence_get_stub();
}
list_del_init(&entry->node);

View file

@ -1212,7 +1212,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_tv_format format;
u32 format_map;
format_map = 1 << conn_state->tv.mode;
format_map = 1 << conn_state->tv.legacy_mode;
memset(&format, 0, sizeof(format));
memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
@ -2295,7 +2295,7 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
* Read the list of supported input resolutions for the selected TV
* format.
*/
format_map = 1 << conn_state->tv.mode;
format_map = 1 << conn_state->tv.legacy_mode;
memcpy(&tv_res, &format_map,
min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
@ -2360,7 +2360,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
int i;
for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) {
if (state->tv.legacy_mode == intel_sdvo_connector->tv_format_supported[i]) {
*val = i;
return 0;
@ -2416,7 +2416,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
if (property == intel_sdvo_connector->tv_format) {
state->tv.mode = intel_sdvo_connector->tv_format_supported[val];
state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[val];
if (state->crtc) {
struct drm_crtc_state *crtc_state =
@ -3071,7 +3071,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
drm_property_add_enum(intel_sdvo_connector->tv_format, i,
tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
intel_sdvo_connector->base.base.state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[0];
drm_object_attach_property(&intel_sdvo_connector->base.base.base,
intel_sdvo_connector->tv_format, 0);
return true;

View file

@ -949,7 +949,7 @@ intel_disable_tv(struct intel_atomic_state *state,
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
{
int format = conn_state->tv.mode;
int format = conn_state->tv.legacy_mode;
return &tv_modes[format];
}
@ -1710,7 +1710,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
break;
}
connector->state->tv.mode = i;
connector->state->tv.legacy_mode = i;
}
static int
@ -1865,7 +1865,7 @@ static int intel_tv_atomic_check(struct drm_connector *connector,
old_state = drm_atomic_get_old_connector_state(state, connector);
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
if (old_state->tv.mode != new_state->tv.mode ||
if (old_state->tv.legacy_mode != new_state->tv.legacy_mode ||
old_state->tv.margins.left != new_state->tv.margins.left ||
old_state->tv.margins.right != new_state->tv.margins.right ||
old_state->tv.margins.top != new_state->tv.margins.top ||
@ -1902,7 +1902,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
conn_state->tv.margins.right = 46;
conn_state->tv.margins.bottom = 37;
conn_state->tv.mode = 0;
conn_state->tv.legacy_mode = 0;
/* Create TV properties then attach current values */
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
@ -1916,7 +1916,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
drm_object_attach_property(&connector->base,
i915->drm.mode_config.legacy_tv_mode_property,
conn_state->tv.mode);
conn_state->tv.legacy_mode);
drm_object_attach_property(&connector->base,
i915->drm.mode_config.tv_left_margin_property,
conn_state->tv.margins.left);

View file

@ -528,7 +528,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
enum ttm_caching caching,
pgoff_t start_page, pgoff_t end_page)
{
struct vm_page **pages = tt->pages;
struct vm_page **pages = &tt->pages[start_page];
unsigned int order;
pgoff_t i, nr;