sync code with last improvements from OpenBSD

This commit is contained in:
purplerain 2023-09-19 17:34:32 +00:00
parent 0726fd4247
commit ba37adff3d
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
22 changed files with 251 additions and 150 deletions

View file

@ -38,6 +38,8 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
@ -493,11 +495,29 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
return true;
}
static int amdgpu_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file,
unsigned int flags, unsigned int color,
struct drm_clip_rect *clips, unsigned int num_clips)
{
if (file)
return -ENOSYS;
return drm_atomic_helper_dirtyfb(fb, file, flags, color, clips,
num_clips);
}
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
.dirty = amdgpu_dirtyfb
};
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags)
{
@ -1100,7 +1120,11 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
if (ret)
goto err;
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (drm_drv_uses_atomic_modeset(dev))
ret = drm_framebuffer_init(dev, &rfb->base,
&amdgpu_fb_funcs_atomic);
else
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;

View file

@ -1269,6 +1269,13 @@ void handle_cursor_update(struct drm_plane *plane,
attributes.rotation_angle = 0;
attributes.attribute_flags.value = 0;
/* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
* legacy gamma setup.
*/
if (crtc_state->cm_is_degamma_srgb &&
adev->dm.dc->caps.color.dpp.gamma_corr)
attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
if (crtc_state->stream) {

View file

@ -1977,12 +1977,12 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
}
/* Check for case where we are going from odm 2:1 to max
* pipe scenario. For these cases, we will call
* commit_minimal_transition_state() to exit out of odm 2:1
* first before processing new streams
/* ODM Combine 2:1 power optimization is only applied for single stream
* scenario, it uses extra pipes than needed to reduce power consumption
* We need to switch off this feature to make room for new streams.
*/
if (stream_count == dc->res_pool->pipe_count) {
if (stream_count > dc->current_state->stream_count &&
dc->current_state->stream_count == 1) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->next_odm_pipe)
@ -3361,6 +3361,45 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
}
}
static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
{
/*
* This function calls HWSS to wait for any potentially double buffered
* operations to complete. It should be invoked as a pre-amble prior
* to full update programming before asserting any HW locks.
*/
int pipe_idx;
int opp_inst;
int opp_count = dc->res_pool->pipe_count;
struct hubp *hubp;
int mpcc_inst;
const struct pipe_ctx *pipe_ctx;
for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
if (!pipe_ctx->stream)
continue;
if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
hubp = pipe_ctx->plane_res.hubp;
if (!hubp)
continue;
mpcc_inst = hubp->inst;
// MPCC inst is equal to pipe index in practice
for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
break;
}
}
}
}
static void commit_planes_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
@ -3378,24 +3417,9 @@ static void commit_planes_for_stream(struct dc *dc,
// dc->current_state anymore, so we have to cache it before we apply
// the new SubVP context
subvp_prev_use = false;
dc_z10_restore(dc);
if (update_type == UPDATE_TYPE_FULL) {
/* wait for all double-buffer activity to clear on all pipes */
int pipe_idx;
for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
if (!pipe_ctx->stream)
continue;
if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
}
}
if (update_type == UPDATE_TYPE_FULL)
wait_for_outstanding_hw_updates(dc, context);
if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
/* Optimize seamless boot flag keeps clocks and watermarks high until

View file

@ -212,8 +212,9 @@ struct mpcc *mpc1_insert_plane(
/* check insert_above_mpcc exist in tree->opp_list */
struct mpcc *temp_mpcc = tree->opp_list;
while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
temp_mpcc = temp_mpcc->mpcc_bot;
if (temp_mpcc != insert_above_mpcc)
while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
temp_mpcc = temp_mpcc->mpcc_bot;
if (temp_mpcc == NULL)
return NULL;
}

View file

@ -1515,17 +1515,6 @@ static void dcn20_update_dchubp_dpp(
|| plane_state->update_flags.bits.global_alpha_change
|| plane_state->update_flags.bits.per_pixel_alpha_change) {
// MPCC inst is equal to pipe index in practice
int mpcc_inst = hubp->inst;
int opp_inst;
int opp_count = dc->res_pool->pipe_count;
for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
break;
}
}
hws->funcs.update_mpcc(dc, pipe_ctx);
}

View file

@ -338,7 +338,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
* - Delta for CEIL: delta_from_mid_point_in_us_1
* - Delta for FLOOR: delta_from_mid_point_in_us_2
*/
if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) {
if (mid_point_frames_ceil &&
(last_render_time_in_us / mid_point_frames_ceil) <
in_out_vrr->min_duration_in_us) {
/* Check for out of range.
* If using CEIL produces a value that is out of range,
* then we are forced to use FLOOR.
@ -385,8 +387,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Either we've calculated the number of frames to insert,
* or we need to insert min duration frames
*/
if (last_render_time_in_us / frames_to_insert <
in_out_vrr->min_duration_in_us){
if (frames_to_insert &&
(last_render_time_in_us / frames_to_insert) <
in_out_vrr->min_duration_in_us){
frames_to_insert -= (frames_to_insert > 1) ?
1 : 0;
}

View file

@ -56,6 +56,7 @@ struct intel_breadcrumbs;
typedef u32 intel_engine_mask_t;
#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
#define VIRTUAL_ENGINES BIT(BITS_PER_TYPE(intel_engine_mask_t) - 1)
struct intel_hw_status_page {
struct list_head timelines;

View file

@ -5121,6 +5121,11 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
ve->base.flags = I915_ENGINE_IS_VIRTUAL;
#ifdef notyet
BUILD_BUG_ON(ilog2(VIRTUAL_ENGINES) < I915_NUM_ENGINES);
#endif
ve->base.mask = VIRTUAL_ENGINES;
intel_context_init(&ve->context, &ve->base);
for (n = 0; n < count; n++) {

View file

@ -1179,6 +1179,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
{
const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
kvm_pfn_t pfn;
int ret;
if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0;
@ -1188,7 +1189,13 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
if (is_error_noslot_pfn(pfn))
return -EINVAL;
return PageTransHuge(pfn_to_page(pfn));
if (!pfn_valid(pfn))
return -EINVAL;
ret = PageTransHuge(pfn_to_page(pfn));
kvm_release_pfn_clean(pfn);
return ret;
}
static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
@ -2880,24 +2887,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
ggtt_invalidate(gvt->gt);
}
/**
* intel_vgpu_reset_gtt - reset the all GTT related status
* @vgpu: a vGPU
*
* This function is called from vfio core to reset reset all
* GTT related status, including GGTT, PPGTT, scratch page.
*
*/
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
{
/* Shadow pages are only created when there is no page
* table tracking data, so remove page tracking data after
* removing the shadow pages.
*/
intel_vgpu_destroy_all_ppgtt_mm(vgpu);
intel_vgpu_reset_ggtt(vgpu, true);
}
/**
* intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
* @gvt: intel gvt device

View file

@ -224,7 +224,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
int intel_gvt_init_gtt(struct intel_gvt *gvt);
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
void intel_gvt_clean_gtt(struct intel_gvt *gvt);
struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,

View file

@ -141,9 +141,7 @@ static void i915_fence_release(struct dma_fence *fence)
i915_sw_fence_fini(&rq->semaphore);
/*
* Keep one request on each engine for reserved use under mempressure
* do not use with virtual engines as this really is only needed for
* kernel contexts.
* Keep one request on each engine for reserved use under mempressure.
*
* We do not hold a reference to the engine here and so have to be
* very careful in what rq->engine we poke. The virtual engine is
@ -173,8 +171,7 @@ static void i915_fence_release(struct dma_fence *fence)
* know that if the rq->execution_mask is a single bit, rq->engine
* can be a physical engine with the exact corresponding mask.
*/
if (!intel_engine_is_virtual(rq->engine) &&
is_power_of_2(rq->execution_mask) &&
if (is_power_of_2(rq->execution_mask) &&
!cmpxchg(&rq->engine->request_pool, NULL, rq))
return;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_aq_pci.c,v 1.23 2023/08/15 08:27:30 miod Exp $ */
/* $OpenBSD: if_aq_pci.c,v 1.24 2023/09/19 14:14:35 jsg Exp $ */
/* $NetBSD: if_aq.c,v 1.27 2021/06/16 00:21:18 riastradh Exp $ */
/*
@ -2606,7 +2606,7 @@ aq_hw_init_rx_path(struct aq_softc *sc)
0, AQ2_RPF_TAG_VLAN_MASK | AQ2_RPF_TAG_UNTAG_MASK,
AQ2_ART_ACTION_DROP);
for (int i = 0; i < 8; i++) {
for (i = 0; i < 8; i++) {
aq2_filter_art_set(sc, AQ2_RPF_INDEX_PCP_TO_TC + i,
(i << AQ2_RPF_TAG_PCP_SHIFT), AQ2_RPF_TAG_PCP_MASK,
AQ2_ART_ACTION_ASSIGN_TC(i % sc->sc_nqueues));