sync with OpenBSD -current
This commit is contained in:
parent
302c0be22f
commit
ee61daa776
105 changed files with 1609 additions and 484 deletions
|
@ -1059,3 +1059,33 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
|
|||
|
||||
plane_config->fb = intel_fb;
|
||||
}
|
||||
|
||||
bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
|
||||
const struct intel_initial_plane_config *plane_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
|
||||
const struct intel_plane_state *plane_state =
|
||||
to_intel_plane_state(plane->base.state);
|
||||
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
|
||||
u32 base;
|
||||
|
||||
if (!plane_state->uapi.visible)
|
||||
return false;
|
||||
|
||||
base = intel_plane_ggtt_offset(plane_state);
|
||||
|
||||
/*
|
||||
* We may have moved the surface to a different
|
||||
* part of ggtt, make the plane aware of that.
|
||||
*/
|
||||
if (plane_config->base == base)
|
||||
return false;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 4)
|
||||
intel_de_write(dev_priv, DSPSURF(i9xx_plane), base);
|
||||
else
|
||||
intel_de_write(dev_priv, DSPADDR(i9xx_plane), base);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -26,4 +26,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe);
|
|||
|
||||
void i9xx_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
struct intel_initial_plane_config *plane_config);
|
||||
bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
|
||||
const struct intel_initial_plane_config *plane_config);
|
||||
#endif
|
||||
|
|
|
@ -7764,6 +7764,7 @@ static const struct intel_display_funcs skl_display_funcs = {
|
|||
.crtc_disable = hsw_crtc_disable,
|
||||
.commit_modeset_enables = skl_commit_modeset_enables,
|
||||
.get_initial_plane_config = skl_get_initial_plane_config,
|
||||
.fixup_initial_plane_config = skl_fixup_initial_plane_config,
|
||||
};
|
||||
|
||||
static const struct intel_display_funcs ddi_display_funcs = {
|
||||
|
@ -7772,6 +7773,7 @@ static const struct intel_display_funcs ddi_display_funcs = {
|
|||
.crtc_disable = hsw_crtc_disable,
|
||||
.commit_modeset_enables = intel_commit_modeset_enables,
|
||||
.get_initial_plane_config = i9xx_get_initial_plane_config,
|
||||
.fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
|
||||
};
|
||||
|
||||
static const struct intel_display_funcs pch_split_display_funcs = {
|
||||
|
@ -7780,6 +7782,7 @@ static const struct intel_display_funcs pch_split_display_funcs = {
|
|||
.crtc_disable = ilk_crtc_disable,
|
||||
.commit_modeset_enables = intel_commit_modeset_enables,
|
||||
.get_initial_plane_config = i9xx_get_initial_plane_config,
|
||||
.fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
|
||||
};
|
||||
|
||||
static const struct intel_display_funcs vlv_display_funcs = {
|
||||
|
@ -7788,6 +7791,7 @@ static const struct intel_display_funcs vlv_display_funcs = {
|
|||
.crtc_disable = i9xx_crtc_disable,
|
||||
.commit_modeset_enables = intel_commit_modeset_enables,
|
||||
.get_initial_plane_config = i9xx_get_initial_plane_config,
|
||||
.fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
|
||||
};
|
||||
|
||||
static const struct intel_display_funcs i9xx_display_funcs = {
|
||||
|
@ -7796,6 +7800,7 @@ static const struct intel_display_funcs i9xx_display_funcs = {
|
|||
.crtc_disable = i9xx_crtc_disable,
|
||||
.commit_modeset_enables = intel_commit_modeset_enables,
|
||||
.get_initial_plane_config = i9xx_get_initial_plane_config,
|
||||
.fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -63,6 +63,8 @@ struct intel_display_funcs {
|
|||
struct intel_crtc_state *);
|
||||
void (*get_initial_plane_config)(struct intel_crtc *,
|
||||
struct intel_initial_plane_config *);
|
||||
bool (*fixup_initial_plane_config)(struct intel_crtc *crtc,
|
||||
const struct intel_initial_plane_config *plane_config);
|
||||
void (*crtc_enable)(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
void (*crtc_disable)(struct intel_atomic_state *state,
|
||||
|
|
|
@ -277,7 +277,6 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
|
|||
{
|
||||
struct drm_device *dev = &i915->drm;
|
||||
enum pipe pipe;
|
||||
struct intel_crtc *crtc;
|
||||
int ret;
|
||||
|
||||
if (!HAS_DISPLAY(i915))
|
||||
|
@ -327,11 +326,7 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
|
|||
intel_acpi_assign_connector_fwnodes(i915);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
|
||||
continue;
|
||||
intel_crtc_initial_plane_config(crtc);
|
||||
}
|
||||
intel_initial_plane_config(i915);
|
||||
|
||||
/*
|
||||
* Make sure hardware watermarks really match the state we read out.
|
||||
|
|
|
@ -770,6 +770,8 @@ struct intel_plane_state {
|
|||
|
||||
struct intel_initial_plane_config {
|
||||
struct intel_framebuffer *fb;
|
||||
struct intel_memory_region *mem;
|
||||
resource_size_t phys_base;
|
||||
struct i915_vma *vma;
|
||||
unsigned int tiling;
|
||||
int size;
|
||||
|
|
|
@ -1556,7 +1556,7 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
|
|||
}
|
||||
|
||||
static int
|
||||
skl_ddi_calculate_wrpll(int clock /* in Hz */,
|
||||
skl_ddi_calculate_wrpll(int clock,
|
||||
int ref_clock,
|
||||
struct skl_wrpll_params *wrpll_params)
|
||||
{
|
||||
|
@ -1581,7 +1581,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */,
|
|||
};
|
||||
unsigned int dco, d, i;
|
||||
unsigned int p0, p1, p2;
|
||||
u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
|
||||
u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
|
||||
|
||||
for (d = 0; d < ARRAY_SIZE(dividers); d++) {
|
||||
for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
|
||||
|
@ -1713,7 +1713,7 @@ static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
|
|||
|
||||
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
|
||||
|
||||
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
|
||||
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
|
||||
i915->display.dpll.ref_clks.nssc, &wrpll_params);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -294,8 +294,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
|||
|
||||
/* Use fbdev's framebuffer from lmem for discrete */
|
||||
info->fix.smem_start =
|
||||
(unsigned long)(mem->io_start +
|
||||
i915_gem_object_get_dma_address(obj, 0));
|
||||
(unsigned long)(mem->io.start +
|
||||
i915_gem_object_get_dma_address(obj, 0) -
|
||||
mem->region.start);
|
||||
info->fix.smem_len = obj->base.size;
|
||||
} else {
|
||||
/* Our framebuffer is the entirety of fbdev's system memory */
|
||||
|
|
|
@ -249,7 +249,7 @@
|
|||
#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
|
||||
(GRAPHICS_VER(dev_priv) >= 12 ? \
|
||||
TRANS_HDCP2_STREAM_STATUS(trans) : \
|
||||
PIPE_HDCP2_STREAM_STATUS(pipe))
|
||||
PIPE_HDCP2_STREAM_STATUS(port))
|
||||
|
||||
#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
|
||||
#define _PORTB_HDCP2_AUTH_STREAM 0x66F04
|
||||
|
|
|
@ -3,29 +3,32 @@
|
|||
* Copyright © 2021 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "gem/i915_gem_lmem.h"
|
||||
#include "gem/i915_gem_region.h"
|
||||
#include "i915_drv.h"
|
||||
#include "intel_atomic_plane.h"
|
||||
#include "intel_crtc.h"
|
||||
#include "intel_display.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_fb.h"
|
||||
#include "intel_plane_initial.h"
|
||||
|
||||
static bool
|
||||
intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
|
||||
const struct intel_initial_plane_config *plane_config,
|
||||
intel_reuse_initial_plane_obj(struct intel_crtc *this,
|
||||
const struct intel_initial_plane_config plane_configs[],
|
||||
struct drm_framebuffer **fb,
|
||||
struct i915_vma **vma)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(this->base.dev);
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
for_each_intel_crtc(&i915->drm, crtc) {
|
||||
struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
struct intel_plane *plane =
|
||||
to_intel_plane(crtc->base.primary);
|
||||
struct intel_plane_state *plane_state =
|
||||
const struct intel_plane_state *plane_state =
|
||||
to_intel_plane_state(plane->base.state);
|
||||
const struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
|
||||
if (!crtc_state->uapi.active)
|
||||
continue;
|
||||
|
@ -33,7 +36,7 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
|
|||
if (!plane_state->ggtt_vma)
|
||||
continue;
|
||||
|
||||
if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
|
||||
if (plane_configs[this->pipe].base == plane_configs[crtc->pipe].base) {
|
||||
*fb = plane_state->hw.fb;
|
||||
*vma = plane_state->ggtt_vma;
|
||||
return true;
|
||||
|
@ -43,12 +46,100 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
initial_plane_phys_lmem(struct drm_i915_private *i915,
|
||||
struct intel_initial_plane_config *plane_config)
|
||||
{
|
||||
gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
|
||||
struct intel_memory_region *mem;
|
||||
dma_addr_t dma_addr;
|
||||
gen8_pte_t pte;
|
||||
u32 base;
|
||||
|
||||
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
|
||||
|
||||
gte += base / I915_GTT_PAGE_SIZE;
|
||||
|
||||
pte = ioread64(gte);
|
||||
if (!(pte & GEN12_GGTT_PTE_LM)) {
|
||||
drm_err(&i915->drm,
|
||||
"Initial plane programming missing PTE_LM bit\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
dma_addr = pte & GEN12_GGTT_PTE_ADDR_MASK;
|
||||
|
||||
if (IS_DGFX(i915))
|
||||
mem = i915->mm.regions[INTEL_REGION_LMEM_0];
|
||||
else
|
||||
mem = i915->mm.stolen_region;
|
||||
if (!mem) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Initial plane memory region not initialized\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* On lmem we don't currently expect this to
|
||||
* ever be placed in the stolen portion.
|
||||
*/
|
||||
if (dma_addr < mem->region.start || dma_addr > mem->region.end) {
|
||||
drm_err(&i915->drm,
|
||||
"Initial plane programming using invalid range, dma_addr=%pa (%s [%pa-%pa])\n",
|
||||
&dma_addr, mem->region.name, &mem->region.start, &mem->region.end);
|
||||
return false;
|
||||
}
|
||||
|
||||
drm_dbg(&i915->drm,
|
||||
"Using dma_addr=%pa, based on initial plane programming\n",
|
||||
&dma_addr);
|
||||
|
||||
plane_config->phys_base = dma_addr - mem->region.start;
|
||||
plane_config->mem = mem;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
initial_plane_phys_smem(struct drm_i915_private *i915,
|
||||
struct intel_initial_plane_config *plane_config)
|
||||
{
|
||||
struct intel_memory_region *mem;
|
||||
u32 base;
|
||||
|
||||
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
|
||||
|
||||
mem = i915->mm.stolen_region;
|
||||
if (!mem) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Initial plane memory region not initialized\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* FIXME get and validate the dma_addr from the PTE */
|
||||
plane_config->phys_base = base;
|
||||
plane_config->mem = mem;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
initial_plane_phys(struct drm_i915_private *i915,
|
||||
struct intel_initial_plane_config *plane_config)
|
||||
{
|
||||
if (IS_DGFX(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
|
||||
return initial_plane_phys_lmem(i915, plane_config);
|
||||
else
|
||||
return initial_plane_phys_smem(i915, plane_config);
|
||||
}
|
||||
|
||||
static struct i915_vma *
|
||||
initial_plane_vma(struct drm_i915_private *i915,
|
||||
struct intel_initial_plane_config *plane_config)
|
||||
{
|
||||
struct intel_memory_region *mem;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_mm_node orig_mm = {};
|
||||
struct i915_vma *vma;
|
||||
resource_size_t phys_base;
|
||||
u32 base, size;
|
||||
|
@ -57,45 +148,13 @@ initial_plane_vma(struct drm_i915_private *i915,
|
|||
if (plane_config->size == 0)
|
||||
return NULL;
|
||||
|
||||
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
|
||||
if (IS_DGFX(i915)) {
|
||||
gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
|
||||
gen8_pte_t pte;
|
||||
|
||||
gte += base / I915_GTT_PAGE_SIZE;
|
||||
|
||||
pte = ioread64(gte);
|
||||
if (!(pte & GEN12_GGTT_PTE_LM)) {
|
||||
drm_err(&i915->drm,
|
||||
"Initial plane programming missing PTE_LM bit\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
phys_base = pte & I915_GTT_PAGE_MASK;
|
||||
mem = i915->mm.regions[INTEL_REGION_LMEM_0];
|
||||
|
||||
/*
|
||||
* We don't currently expect this to ever be placed in the
|
||||
* stolen portion.
|
||||
*/
|
||||
if (phys_base >= resource_size(&mem->region)) {
|
||||
drm_err(&i915->drm,
|
||||
"Initial plane programming using invalid range, phys_base=%pa\n",
|
||||
&phys_base);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
drm_dbg(&i915->drm,
|
||||
"Using phys_base=%pa, based on initial plane programming\n",
|
||||
&phys_base);
|
||||
} else {
|
||||
phys_base = base;
|
||||
mem = i915->mm.stolen_region;
|
||||
}
|
||||
|
||||
if (!mem)
|
||||
if (!initial_plane_phys(i915, plane_config))
|
||||
return NULL;
|
||||
|
||||
phys_base = plane_config->phys_base;
|
||||
mem = plane_config->mem;
|
||||
|
||||
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
|
||||
size = round_up(plane_config->base + plane_config->size,
|
||||
mem->min_page_size);
|
||||
size -= base;
|
||||
|
@ -107,14 +166,19 @@ initial_plane_vma(struct drm_i915_private *i915,
|
|||
*/
|
||||
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
|
||||
mem == i915->mm.stolen_region &&
|
||||
size * 2 > i915->dsm.usable_size)
|
||||
size * 2 > i915->dsm.usable_size) {
|
||||
drm_dbg_kms(&i915->drm, "Initial FB size exceeds half of stolen, discarding\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
obj = i915_gem_object_create_region_at(mem, phys_base, size,
|
||||
I915_BO_ALLOC_USER |
|
||||
I915_BO_PREALLOC);
|
||||
if (IS_ERR(obj))
|
||||
if (IS_ERR(obj)) {
|
||||
drm_dbg_kms(&i915->drm, "Failed to preallocate initial FB in %s\n",
|
||||
mem->region.name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark it WT ahead of time to avoid changing the
|
||||
|
@ -138,23 +202,66 @@ initial_plane_vma(struct drm_i915_private *i915,
|
|||
goto err_obj;
|
||||
}
|
||||
|
||||
/*
|
||||
* MTL GOP likes to place the framebuffer high up in ggtt,
|
||||
* which can cause problems for ggtt_reserve_guc_top().
|
||||
* Try to pin it to a low ggtt address instead to avoid that.
|
||||
*/
|
||||
base = 0;
|
||||
|
||||
if (base != plane_config->base) {
|
||||
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Make sure the original and new locations
|
||||
* can't overlap. That would corrupt the original
|
||||
* PTEs which are still being used for scanout.
|
||||
*/
|
||||
ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &orig_mm,
|
||||
size, plane_config->base,
|
||||
I915_COLOR_UNEVICTABLE, PIN_NOEVICT);
|
||||
if (ret)
|
||||
goto err_obj;
|
||||
}
|
||||
|
||||
vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL);
|
||||
if (IS_ERR(vma))
|
||||
goto err_obj;
|
||||
|
||||
retry:
|
||||
pinctl = PIN_GLOBAL | PIN_OFFSET_FIXED | base;
|
||||
if (HAS_GMCH(i915))
|
||||
if (!i915_gem_object_is_lmem(obj))
|
||||
pinctl |= PIN_MAPPABLE;
|
||||
if (i915_vma_pin(vma, 0, 0, pinctl))
|
||||
if (i915_vma_pin(vma, 0, 0, pinctl)) {
|
||||
if (drm_mm_node_allocated(&orig_mm)) {
|
||||
drm_mm_remove_node(&orig_mm);
|
||||
/*
|
||||
* Try again, but this time pin
|
||||
* it to its original location.
|
||||
*/
|
||||
base = plane_config->base;
|
||||
goto retry;
|
||||
}
|
||||
goto err_obj;
|
||||
}
|
||||
|
||||
if (i915_gem_object_is_tiled(obj) &&
|
||||
!i915_vma_is_map_and_fenceable(vma))
|
||||
goto err_obj;
|
||||
|
||||
if (drm_mm_node_allocated(&orig_mm))
|
||||
drm_mm_remove_node(&orig_mm);
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Initial plane fb bound to 0x%x in the ggtt (original 0x%x)\n",
|
||||
i915_ggtt_offset(vma), plane_config->base);
|
||||
|
||||
return vma;
|
||||
|
||||
err_obj:
|
||||
if (drm_mm_node_allocated(&orig_mm))
|
||||
drm_mm_remove_node(&orig_mm);
|
||||
i915_gem_object_put(obj);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -209,10 +316,11 @@ err_vma:
|
|||
|
||||
static void
|
||||
intel_find_initial_plane_obj(struct intel_crtc *crtc,
|
||||
struct intel_initial_plane_config *plane_config)
|
||||
struct intel_initial_plane_config plane_configs[])
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_initial_plane_config *plane_config =
|
||||
&plane_configs[crtc->pipe];
|
||||
struct intel_plane *plane =
|
||||
to_intel_plane(crtc->base.primary);
|
||||
struct intel_plane_state *plane_state =
|
||||
|
@ -238,7 +346,7 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
|
|||
* Failed to alloc the obj, check to see if we should share
|
||||
* an fb with another CRTC instead
|
||||
*/
|
||||
if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
|
||||
if (intel_reuse_initial_plane_obj(crtc, plane_configs, &fb, &vma))
|
||||
goto valid_fb;
|
||||
|
||||
/*
|
||||
|
@ -301,25 +409,36 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
|
|||
i915_vma_put(plane_config->vma);
|
||||
}
|
||||
|
||||
void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
|
||||
void intel_initial_plane_config(struct drm_i915_private *i915)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_initial_plane_config plane_config = {};
|
||||
struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {};
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
/*
|
||||
* Note that reserving the BIOS fb up front prevents us
|
||||
* from stuffing other stolen allocations like the ring
|
||||
* on top. This prevents some ugliness at boot time, and
|
||||
* can even allow for smooth boot transitions if the BIOS
|
||||
* fb is large enough for the active pipe configuration.
|
||||
*/
|
||||
dev_priv->display.funcs.display->get_initial_plane_config(crtc, &plane_config);
|
||||
for_each_intel_crtc(&i915->drm, crtc) {
|
||||
struct intel_initial_plane_config *plane_config =
|
||||
&plane_configs[crtc->pipe];
|
||||
|
||||
/*
|
||||
* If the fb is shared between multiple heads, we'll
|
||||
* just get the first one.
|
||||
*/
|
||||
intel_find_initial_plane_obj(crtc, &plane_config);
|
||||
if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
|
||||
continue;
|
||||
|
||||
plane_config_fini(&plane_config);
|
||||
/*
|
||||
* Note that reserving the BIOS fb up front prevents us
|
||||
* from stuffing other stolen allocations like the ring
|
||||
* on top. This prevents some ugliness at boot time, and
|
||||
* can even allow for smooth boot transitions if the BIOS
|
||||
* fb is large enough for the active pipe configuration.
|
||||
*/
|
||||
i915->display.funcs.display->get_initial_plane_config(crtc, plane_config);
|
||||
|
||||
/*
|
||||
* If the fb is shared between multiple heads, we'll
|
||||
* just get the first one.
|
||||
*/
|
||||
intel_find_initial_plane_obj(crtc, plane_configs);
|
||||
|
||||
if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config))
|
||||
intel_crtc_wait_for_next_vblank(crtc);
|
||||
|
||||
plane_config_fini(plane_config);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,8 +6,8 @@
|
|||
#ifndef __INTEL_PLANE_INITIAL_H__
|
||||
#define __INTEL_PLANE_INITIAL_H__
|
||||
|
||||
struct intel_crtc;
|
||||
struct drm_i915_private;
|
||||
|
||||
void intel_crtc_initial_plane_config(struct intel_crtc *crtc);
|
||||
void intel_initial_plane_config(struct drm_i915_private *i915);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -2557,3 +2557,31 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
|
|||
error:
|
||||
kfree(intel_fb);
|
||||
}
|
||||
|
||||
bool skl_fixup_initial_plane_config(struct intel_crtc *crtc,
|
||||
const struct intel_initial_plane_config *plane_config)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
|
||||
const struct intel_plane_state *plane_state =
|
||||
to_intel_plane_state(plane->base.state);
|
||||
enum plane_id plane_id = plane->id;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 base;
|
||||
|
||||
if (!plane_state->uapi.visible)
|
||||
return false;
|
||||
|
||||
base = intel_plane_ggtt_offset(plane_state);
|
||||
|
||||
/*
|
||||
* We may have moved the surface to a different
|
||||
* part of ggtt, make the plane aware of that.
|
||||
*/
|
||||
if (plane_config->base == base)
|
||||
return false;
|
||||
|
||||
intel_de_write(i915, PLANE_SURF(pipe, plane_id), base);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -22,6 +22,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
|
|||
|
||||
void skl_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
struct intel_initial_plane_config *plane_config);
|
||||
bool skl_fixup_initial_plane_config(struct intel_crtc *crtc,
|
||||
const struct intel_initial_plane_config *plane_config);
|
||||
|
||||
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
|
||||
|
||||
|
|
|
@ -129,7 +129,7 @@ i915_gem_object_create_region_at(struct intel_memory_region *mem,
|
|||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!(flags & I915_BO_ALLOC_GPU_ONLY) &&
|
||||
offset + size > mem->io_size &&
|
||||
offset + size > resource_size(&mem->io) &&
|
||||
!i915_ggtt_has_aperture(to_gt(mem->i915)->ggtt))
|
||||
return ERR_PTR(-ENOSPC);
|
||||
|
||||
|
|
|
@ -545,7 +545,9 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
|
|||
|
||||
/* Exclude the reserved region from driver use */
|
||||
mem->region.end = i915->dsm.reserved.start - 1;
|
||||
mem->io_size = min(mem->io_size, resource_size(&mem->region));
|
||||
mem->io = DEFINE_RES_MEM(mem->io.start,
|
||||
min(resource_size(&mem->io),
|
||||
resource_size(&mem->region)));
|
||||
|
||||
i915->dsm.usable_size = resource_size(&mem->region);
|
||||
|
||||
|
@ -756,7 +758,7 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
|
|||
* With discrete devices, where we lack a mappable aperture there is no
|
||||
* possible way to ever access this memory on the CPU side.
|
||||
*/
|
||||
if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size &&
|
||||
if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !resource_size(&mem->io) &&
|
||||
!(flags & I915_BO_ALLOC_GPU_ONLY))
|
||||
return -ENOSPC;
|
||||
|
||||
|
@ -830,7 +832,6 @@ static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
|
|||
|
||||
static int init_stolen_lmem(struct intel_memory_region *mem)
|
||||
{
|
||||
struct drm_i915_private *i915 = mem->i915;
|
||||
int err;
|
||||
|
||||
if (GEM_WARN_ON(resource_size(&mem->region) == 0))
|
||||
|
@ -843,38 +844,34 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
|
|||
}
|
||||
|
||||
#ifdef __linux__
|
||||
if (mem->io_size &&
|
||||
!io_mapping_init_wc(&mem->iomap, mem->io_start, mem->io_size))
|
||||
if (resource_size(&mem->io) &&
|
||||
!io_mapping_init_wc(&mem->iomap, mem->io.start, resource_size(&mem->io)))
|
||||
goto err_cleanup;
|
||||
#else
|
||||
if (mem->io_size) {
|
||||
if (resource_size(&mem->io)) {
|
||||
paddr_t start, end;
|
||||
struct vm_page *pgs;
|
||||
int i;
|
||||
bus_space_handle_t bsh;
|
||||
|
||||
start = atop(mem->io_start);
|
||||
end = start + atop(mem->io_size);
|
||||
start = atop(mem->io.start);
|
||||
end = start + atop(resource_size(&mem->io));
|
||||
uvm_page_physload(start, end, start, end, PHYSLOAD_DEVICE);
|
||||
|
||||
pgs = PHYS_TO_VM_PAGE(mem->io_start);
|
||||
for (i = 0; i < atop(mem->io_size); i++)
|
||||
pgs = PHYS_TO_VM_PAGE(mem->io.start);
|
||||
for (i = 0; i < atop(resource_size(&mem->io)); i++)
|
||||
atomic_setbits_int(&(pgs[i].pg_flags), PG_PMAP_WC);
|
||||
|
||||
if (bus_space_map(i915->bst, mem->io_start, mem->io_size,
|
||||
if (bus_space_map(mem->i915->bst, mem->io.start, resource_size(&mem->io),
|
||||
BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, &bsh))
|
||||
panic("can't map stolen lmem");
|
||||
|
||||
mem->iomap.base = mem->io_start;
|
||||
mem->iomap.size = mem->io_size;
|
||||
mem->iomap.iomem = bus_space_vaddr(i915->bst, bsh);
|
||||
mem->iomap.base = mem->io.start;
|
||||
mem->iomap.size = resource_size(&mem->io);
|
||||
mem->iomap.iomem = bus_space_vaddr(mem->i915->bst, bsh);
|
||||
}
|
||||
#endif
|
||||
|
||||
drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
|
||||
&mem->io_start);
|
||||
drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &mem->region.start);
|
||||
|
||||
return 0;
|
||||
#ifdef __linux__
|
||||
err_cleanup:
|
||||
|
@ -887,7 +884,7 @@ static int release_stolen_lmem(struct intel_memory_region *mem)
|
|||
{
|
||||
STUB();
|
||||
#ifdef notyet
|
||||
if (mem->io_size)
|
||||
if (resource_size(&mem->io))
|
||||
io_mapping_fini(&mem->iomap);
|
||||
#endif
|
||||
i915_gem_cleanup_stolen(mem->i915);
|
||||
|
@ -987,14 +984,18 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
|
|||
GEM_BUG_ON((dsm_base + dsm_size) > lmem_size);
|
||||
} else {
|
||||
/* Use DSM base address instead for stolen memory */
|
||||
dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK;
|
||||
dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
|
||||
if (WARN_ON(lmem_size < dsm_base))
|
||||
return ERR_PTR(-ENODEV);
|
||||
dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
|
||||
}
|
||||
|
||||
if (i915_direct_stolen_access(i915)) {
|
||||
drm_dbg(&i915->drm, "Using direct DSM access\n");
|
||||
io_start = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
|
||||
io_size = dsm_size;
|
||||
#ifdef __linux__
|
||||
if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
|
||||
} else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
|
||||
io_start = 0;
|
||||
io_size = 0;
|
||||
} else {
|
||||
|
@ -1002,7 +1003,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
|
|||
io_size = dsm_size;
|
||||
}
|
||||
#else
|
||||
if (lmem_len < lmem_size) {
|
||||
} else if (lmem_len < lmem_size) {
|
||||
io_start = 0;
|
||||
io_size = 0;
|
||||
} else {
|
||||
|
|
|
@ -144,13 +144,13 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr,
|
|||
place->fpfn = offset >> PAGE_SHIFT;
|
||||
WARN_ON(overflows_type(place->fpfn + (size >> PAGE_SHIFT), place->lpfn));
|
||||
place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
|
||||
} else if (mr->io_size && mr->io_size < mr->total) {
|
||||
} else if (resource_size(&mr->io) && resource_size(&mr->io) < mr->total) {
|
||||
if (flags & I915_BO_ALLOC_GPU_ONLY) {
|
||||
place->flags |= TTM_PL_FLAG_TOPDOWN;
|
||||
} else {
|
||||
place->fpfn = 0;
|
||||
WARN_ON(overflows_type(mr->io_size >> PAGE_SHIFT, place->lpfn));
|
||||
place->lpfn = mr->io_size >> PAGE_SHIFT;
|
||||
WARN_ON(overflows_type(resource_size(&mr->io) >> PAGE_SHIFT, place->lpfn));
|
||||
place->lpfn = resource_size(&mr->io) >> PAGE_SHIFT;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1121,7 +1121,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
|
|||
struct intel_memory_region *mr = obj->mm.placements[i];
|
||||
unsigned int flags;
|
||||
|
||||
if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM)
|
||||
if (!resource_size(&mr->io) && mr->type != INTEL_MEMORY_SYSTEM)
|
||||
continue;
|
||||
|
||||
flags = obj->flags;
|
||||
|
@ -1295,7 +1295,7 @@ vm_fault_ttm(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
|
|||
struct intel_memory_region *mr = obj->mm.placements[i];
|
||||
unsigned int flags;
|
||||
|
||||
if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM)
|
||||
if (!resource_size(&mr->io) && mr->type != INTEL_MEMORY_SYSTEM)
|
||||
continue;
|
||||
|
||||
flags = obj->flags;
|
||||
|
|
|
@ -1052,7 +1052,7 @@ static int igt_fill_mappable(struct intel_memory_region *mr,
|
|||
int err;
|
||||
|
||||
total = 0;
|
||||
size = mr->io_size;
|
||||
size = resource_size(&mr->io);
|
||||
do {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
|
@ -1313,28 +1313,28 @@ static int igt_mmap_migrate(void *arg)
|
|||
struct intel_memory_region *mixed[] = { mr, system };
|
||||
struct intel_memory_region *single[] = { mr };
|
||||
struct ttm_resource_manager *man = mr->region_private;
|
||||
resource_size_t saved_io_size;
|
||||
struct resource saved_io;
|
||||
int err;
|
||||
|
||||
if (mr->private)
|
||||
continue;
|
||||
|
||||
if (!mr->io_size)
|
||||
if (!resource_size(&mr->io))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* For testing purposes let's force small BAR, if not already
|
||||
* present.
|
||||
*/
|
||||
saved_io_size = mr->io_size;
|
||||
if (mr->io_size == mr->total) {
|
||||
resource_size_t io_size = mr->io_size;
|
||||
saved_io = mr->io;
|
||||
if (resource_size(&mr->io) == mr->total) {
|
||||
resource_size_t io_size = resource_size(&mr->io);
|
||||
|
||||
io_size = rounddown_pow_of_two(io_size >> 1);
|
||||
if (io_size < PAGE_SIZE)
|
||||
continue;
|
||||
|
||||
mr->io_size = io_size;
|
||||
mr->io = DEFINE_RES_MEM(mr->io.start, io_size);
|
||||
i915_ttm_buddy_man_force_visible_size(man,
|
||||
io_size >> PAGE_SHIFT);
|
||||
}
|
||||
|
@ -1394,9 +1394,9 @@ static int igt_mmap_migrate(void *arg)
|
|||
IGT_MMAP_MIGRATE_FAIL_GPU |
|
||||
IGT_MMAP_MIGRATE_UNFAULTABLE);
|
||||
out_io_size:
|
||||
mr->io_size = saved_io_size;
|
||||
mr->io = saved_io;
|
||||
i915_ttm_buddy_man_force_visible_size(man,
|
||||
mr->io_size >> PAGE_SHIFT);
|
||||
resource_size(&mr->io) >> PAGE_SHIFT);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -278,7 +278,8 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
|
|||
* deals with Protected Memory which is not needed for
|
||||
* AUX CCS invalidation and lead to unwanted side effects.
|
||||
*/
|
||||
if (mode & EMIT_FLUSH)
|
||||
if ((mode & EMIT_FLUSH) &&
|
||||
GRAPHICS_VER_FULL(rq->i915) < IP_VER(12, 70))
|
||||
bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
|
||||
|
||||
bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH;
|
||||
|
@ -812,12 +813,14 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
|
|||
u32 flags = (PIPE_CONTROL_CS_STALL |
|
||||
PIPE_CONTROL_TLB_INVALIDATE |
|
||||
PIPE_CONTROL_TILE_CACHE_FLUSH |
|
||||
PIPE_CONTROL_FLUSH_L3 |
|
||||
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
|
||||
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
|
||||
PIPE_CONTROL_DC_FLUSH_ENABLE |
|
||||
PIPE_CONTROL_FLUSH_ENABLE);
|
||||
|
||||
if (GRAPHICS_VER_FULL(rq->i915) < IP_VER(12, 70))
|
||||
flags |= PIPE_CONTROL_FLUSH_L3;
|
||||
|
||||
/* Wa_14016712196 */
|
||||
if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
|
||||
/* dummy PIPE_CONTROL + depth flush */
|
||||
|
|
|
@ -170,6 +170,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
|
|||
#define I915_GEM_HWS_SEQNO 0x40
|
||||
#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
|
||||
#define I915_GEM_HWS_MIGRATE (0x42 * sizeof(u32))
|
||||
#define I915_GEM_HWS_GGTT_BIND 0x46
|
||||
#define I915_GEM_HWS_GGTT_BIND_ADDR (I915_GEM_HWS_GGTT_BIND * sizeof(u32))
|
||||
#define I915_GEM_HWS_PXP 0x60
|
||||
#define I915_GEM_HWS_PXP_ADDR (I915_GEM_HWS_PXP * sizeof(u32))
|
||||
#define I915_GEM_HWS_GSC 0x62
|
||||
|
|
|
@ -1445,6 +1445,20 @@ void intel_engine_destroy_pinned_context(struct intel_context *ce)
|
|||
intel_context_put(ce);
|
||||
}
|
||||
|
||||
static struct intel_context *
|
||||
create_ggtt_bind_context(struct intel_engine_cs *engine)
|
||||
{
|
||||
static struct lock_class_key kernel;
|
||||
|
||||
/*
|
||||
* MI_UPDATE_GTT can insert up to 511 PTE entries and there could be multiple
|
||||
* bind requets at a time so get a bigger ring.
|
||||
*/
|
||||
return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_512K,
|
||||
I915_GEM_HWS_GGTT_BIND_ADDR,
|
||||
&kernel, "ggtt_bind_context");
|
||||
}
|
||||
|
||||
static struct intel_context *
|
||||
create_kernel_context(struct intel_engine_cs *engine)
|
||||
{
|
||||
|
@ -1468,7 +1482,7 @@ create_kernel_context(struct intel_engine_cs *engine)
|
|||
*/
|
||||
static int engine_init_common(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_context *ce;
|
||||
struct intel_context *ce, *bce = NULL;
|
||||
int ret;
|
||||
|
||||
engine->set_default_submission(engine);
|
||||
|
@ -1484,17 +1498,34 @@ static int engine_init_common(struct intel_engine_cs *engine)
|
|||
ce = create_kernel_context(engine);
|
||||
if (IS_ERR(ce))
|
||||
return PTR_ERR(ce);
|
||||
/*
|
||||
* Create a separate pinned context for GGTT update with blitter engine
|
||||
* if a platform require such service. MI_UPDATE_GTT works on other
|
||||
* engines as well but BCS should be less busy engine so pick that for
|
||||
* GGTT updates.
|
||||
*/
|
||||
if (i915_ggtt_require_binder(engine->i915) && engine->id == BCS0) {
|
||||
bce = create_ggtt_bind_context(engine);
|
||||
if (IS_ERR(bce)) {
|
||||
ret = PTR_ERR(bce);
|
||||
goto err_ce_context;
|
||||
}
|
||||
}
|
||||
|
||||
ret = measure_breadcrumb_dw(ce);
|
||||
if (ret < 0)
|
||||
goto err_context;
|
||||
goto err_bce_context;
|
||||
|
||||
engine->emit_fini_breadcrumb_dw = ret;
|
||||
engine->kernel_context = ce;
|
||||
engine->bind_context = bce;
|
||||
|
||||
return 0;
|
||||
|
||||
err_context:
|
||||
err_bce_context:
|
||||
if (bce)
|
||||
intel_engine_destroy_pinned_context(bce);
|
||||
err_ce_context:
|
||||
intel_engine_destroy_pinned_context(ce);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1564,6 +1595,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
|
|||
if (engine->kernel_context)
|
||||
intel_engine_destroy_pinned_context(engine->kernel_context);
|
||||
|
||||
if (engine->bind_context)
|
||||
intel_engine_destroy_pinned_context(engine->bind_context);
|
||||
|
||||
|
||||
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
|
||||
cleanup_status_page(engine);
|
||||
|
||||
|
|
|
@ -416,6 +416,9 @@ struct intel_engine_cs {
|
|||
struct llist_head barrier_tasks;
|
||||
|
||||
struct intel_context *kernel_context; /* pinned */
|
||||
struct intel_context *bind_context; /* pinned, only for BCS0 */
|
||||
/* mark the bind context's availability status */
|
||||
bool bind_context_ready;
|
||||
|
||||
/**
|
||||
* pinned_contexts_list: List of pinned contexts. This list is only
|
||||
|
|
|
@ -15,18 +15,24 @@
|
|||
#include "display/intel_display.h"
|
||||
#include "gem/i915_gem_lmem.h"
|
||||
|
||||
#include "intel_context.h"
|
||||
#include "intel_ggtt_gmch.h"
|
||||
#include "intel_gpu_commands.h"
|
||||
#include "intel_gt.h"
|
||||
#include "intel_gt_regs.h"
|
||||
#include "intel_pci_config.h"
|
||||
#include "intel_ring.h"
|
||||
#include "i915_drv.h"
|
||||
#include "i915_pci.h"
|
||||
#include "i915_reg.h"
|
||||
#include "i915_request.h"
|
||||
#include "i915_scatterlist.h"
|
||||
#include "i915_utils.h"
|
||||
#include "i915_vgpu.h"
|
||||
|
||||
#include "intel_gtt.h"
|
||||
#include "gen8_ppgtt.h"
|
||||
#include "intel_engine_pm.h"
|
||||
|
||||
static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
|
||||
unsigned long color,
|
||||
|
@ -301,6 +307,145 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
|
|||
return pte;
|
||||
}
|
||||
|
||||
static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt)
|
||||
{
|
||||
struct intel_gt *gt = ggtt->vm.gt;
|
||||
|
||||
return intel_gt_is_bind_context_ready(gt);
|
||||
}
|
||||
|
||||
static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
|
||||
{
|
||||
struct intel_context *ce;
|
||||
struct intel_gt *gt = ggtt->vm.gt;
|
||||
|
||||
if (intel_gt_is_wedged(gt))
|
||||
return NULL;
|
||||
|
||||
ce = gt->engine[BCS0]->bind_context;
|
||||
GEM_BUG_ON(!ce);
|
||||
|
||||
/*
|
||||
* If the GT is not awake already at this stage then fallback
|
||||
* to pci based GGTT update otherwise __intel_wakeref_get_first()
|
||||
* would conflict with fs_reclaim trying to allocate memory while
|
||||
* doing rpm_resume().
|
||||
*/
|
||||
if (!intel_gt_pm_get_if_awake(gt))
|
||||
return NULL;
|
||||
|
||||
intel_engine_pm_get(ce->engine);
|
||||
|
||||
return ce;
|
||||
}
|
||||
|
||||
static void gen8_ggtt_bind_put_ce(struct intel_context *ce)
|
||||
{
|
||||
intel_engine_pm_put(ce->engine);
|
||||
intel_gt_pm_put(ce->engine->gt);
|
||||
}
|
||||
|
||||
static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset,
|
||||
struct sg_table *pages, u32 num_entries,
|
||||
const gen8_pte_t pte)
|
||||
{
|
||||
struct i915_sched_attr attr = {};
|
||||
struct intel_gt *gt = ggtt->vm.gt;
|
||||
const gen8_pte_t scratch_pte = ggtt->vm.scratch[0]->encode;
|
||||
struct sgt_iter iter;
|
||||
struct i915_request *rq;
|
||||
struct intel_context *ce;
|
||||
u32 *cs;
|
||||
|
||||
if (!num_entries)
|
||||
return true;
|
||||
|
||||
ce = gen8_ggtt_bind_get_ce(ggtt);
|
||||
if (!ce)
|
||||
return false;
|
||||
|
||||
if (pages)
|
||||
iter = __sgt_iter(pages->sgl, true);
|
||||
|
||||
while (num_entries) {
|
||||
int count = 0;
|
||||
dma_addr_t addr;
|
||||
/*
|
||||
* MI_UPDATE_GTT can update 512 entries in a single command but
|
||||
* that end up with engine reset, 511 works.
|
||||
*/
|
||||
u32 n_ptes = min_t(u32, 511, num_entries);
|
||||
|
||||
if (mutex_lock_interruptible(&ce->timeline->mutex))
|
||||
goto put_ce;
|
||||
|
||||
intel_context_enter(ce);
|
||||
rq = __i915_request_create(ce, GFP_NOWAIT | GFP_ATOMIC);
|
||||
intel_context_exit(ce);
|
||||
if (IS_ERR(rq)) {
|
||||
GT_TRACE(gt, "Failed to get bind request\n");
|
||||
mutex_unlock(&ce->timeline->mutex);
|
||||
goto put_ce;
|
||||
}
|
||||
|
||||
cs = intel_ring_begin(rq, 2 * n_ptes + 2);
|
||||
if (IS_ERR(cs)) {
|
||||
GT_TRACE(gt, "Failed to ring space for GGTT bind\n");
|
||||
i915_request_set_error_once(rq, PTR_ERR(cs));
|
||||
/* once a request is created, it must be queued */
|
||||
goto queue_err_rq;
|
||||
}
|
||||
|
||||
*cs++ = MI_UPDATE_GTT | (2 * n_ptes);
|
||||
*cs++ = offset << 12;
|
||||
|
||||
if (pages) {
|
||||
for_each_sgt_daddr_next(addr, iter) {
|
||||
if (count == n_ptes)
|
||||
break;
|
||||
*cs++ = lower_32_bits(pte | addr);
|
||||
*cs++ = upper_32_bits(pte | addr);
|
||||
count++;
|
||||
}
|
||||
/* fill remaining with scratch pte, if any */
|
||||
if (count < n_ptes) {
|
||||
memset64((u64 *)cs, scratch_pte,
|
||||
n_ptes - count);
|
||||
cs += (n_ptes - count) * 2;
|
||||
}
|
||||
} else {
|
||||
memset64((u64 *)cs, pte, n_ptes);
|
||||
cs += n_ptes * 2;
|
||||
}
|
||||
|
||||
intel_ring_advance(rq, cs);
|
||||
queue_err_rq:
|
||||
i915_request_get(rq);
|
||||
__i915_request_commit(rq);
|
||||
__i915_request_queue(rq, &attr);
|
||||
|
||||
mutex_unlock(&ce->timeline->mutex);
|
||||
/* This will break if the request is complete or after engine reset */
|
||||
i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
|
||||
if (rq->fence.error)
|
||||
goto err_rq;
|
||||
|
||||
i915_request_put(rq);
|
||||
|
||||
num_entries -= n_ptes;
|
||||
offset += n_ptes;
|
||||
}
|
||||
|
||||
gen8_ggtt_bind_put_ce(ce);
|
||||
return true;
|
||||
|
||||
err_rq:
|
||||
i915_request_put(rq);
|
||||
put_ce:
|
||||
gen8_ggtt_bind_put_ce(ce);
|
||||
return false;
|
||||
}
|
||||
|
||||
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
|
||||
{
|
||||
writeq(pte, addr);
|
||||
|
@ -321,6 +466,21 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
|
|||
ggtt->invalidate(ggtt);
|
||||
}
|
||||
|
||||
static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm,
|
||||
dma_addr_t addr, u64 offset,
|
||||
unsigned int pat_index, u32 flags)
|
||||
{
|
||||
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
||||
gen8_pte_t pte;
|
||||
|
||||
pte = ggtt->vm.pte_encode(addr, pat_index, flags);
|
||||
if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) &&
|
||||
gen8_ggtt_bind_ptes(ggtt, offset, NULL, 1, pte))
|
||||
return ggtt->invalidate(ggtt);
|
||||
|
||||
gen8_ggtt_insert_page(vm, addr, offset, pat_index, flags);
|
||||
}
|
||||
|
||||
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
struct i915_vma_resource *vma_res,
|
||||
unsigned int pat_index,
|
||||
|
@ -360,6 +520,50 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
|
|||
ggtt->invalidate(ggtt);
|
||||
}
|
||||
|
||||
static bool __gen8_ggtt_insert_entries_bind(struct i915_address_space *vm,
|
||||
struct i915_vma_resource *vma_res,
|
||||
unsigned int pat_index, u32 flags)
|
||||
{
|
||||
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
||||
gen8_pte_t scratch_pte = vm->scratch[0]->encode;
|
||||
gen8_pte_t pte_encode;
|
||||
u64 start, end;
|
||||
|
||||
pte_encode = ggtt->vm.pte_encode(0, pat_index, flags);
|
||||
start = (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
|
||||
end = start + vma_res->guard / I915_GTT_PAGE_SIZE;
|
||||
if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte))
|
||||
goto err;
|
||||
|
||||
start = end;
|
||||
end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
|
||||
if (!gen8_ggtt_bind_ptes(ggtt, start, vma_res->bi.pages,
|
||||
vma_res->node_size / I915_GTT_PAGE_SIZE, pte_encode))
|
||||
goto err;
|
||||
|
||||
start += vma_res->node_size / I915_GTT_PAGE_SIZE;
|
||||
if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte))
|
||||
goto err;
|
||||
|
||||
return true;
|
||||
|
||||
err:
|
||||
return false;
|
||||
}
|
||||
|
||||
static void gen8_ggtt_insert_entries_bind(struct i915_address_space *vm,
|
||||
struct i915_vma_resource *vma_res,
|
||||
unsigned int pat_index, u32 flags)
|
||||
{
|
||||
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
||||
|
||||
if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) &&
|
||||
__gen8_ggtt_insert_entries_bind(vm, vma_res, pat_index, flags))
|
||||
return ggtt->invalidate(ggtt);
|
||||
|
||||
gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags);
|
||||
}
|
||||
|
||||
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
|
||||
u64 start, u64 length)
|
||||
{
|
||||
|
@ -381,6 +585,27 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
|
|||
gen8_set_pte(>t_base[i], scratch_pte);
|
||||
}
|
||||
|
||||
static void gen8_ggtt_scratch_range_bind(struct i915_address_space *vm,
|
||||
u64 start, u64 length)
|
||||
{
|
||||
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
||||
unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
|
||||
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
|
||||
const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
|
||||
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
|
||||
|
||||
if (WARN(num_entries > max_entries,
|
||||
"First entry = %d; Num entries = %d (max=%d)\n",
|
||||
first_entry, num_entries, max_entries))
|
||||
num_entries = max_entries;
|
||||
|
||||
if (should_update_ggtt_with_bind(ggtt) && gen8_ggtt_bind_ptes(ggtt, first_entry,
|
||||
NULL, num_entries, scratch_pte))
|
||||
return ggtt->invalidate(ggtt);
|
||||
|
||||
gen8_ggtt_clear_range(vm, start, length);
|
||||
}
|
||||
|
||||
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
|
||||
dma_addr_t addr,
|
||||
u64 offset,
|
||||
|
@ -947,13 +1172,20 @@ static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
|
|||
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
|
||||
{
|
||||
struct drm_i915_private *i915 = ggtt->vm.i915;
|
||||
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
phys_addr_t phys_addr;
|
||||
u32 pte_flags;
|
||||
int ret;
|
||||
|
||||
GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
|
||||
phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
|
||||
|
||||
if (i915_direct_stolen_access(i915)) {
|
||||
drm_dbg(&i915->drm, "Using direct GSM access\n");
|
||||
phys_addr = intel_uncore_read64(uncore, GEN6_GSMBASE) & GEN11_BDSM_MASK;
|
||||
} else {
|
||||
phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
|
||||
}
|
||||
|
||||
if (needs_wc_ggtt_mapping(i915))
|
||||
ggtt->gsm = ioremap_wc(phys_addr, size);
|
||||
|
@ -992,6 +1224,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
|
|||
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
|
||||
{
|
||||
struct drm_i915_private *i915 = ggtt->vm.i915;
|
||||
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
phys_addr_t phys_addr;
|
||||
bus_addr_t addr;
|
||||
|
@ -1008,7 +1241,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
|
|||
return ret;
|
||||
|
||||
GEM_WARN_ON(len != gen6_gttmmadr_size(i915));
|
||||
phys_addr = addr + gen6_gttadr_offset(i915);
|
||||
|
||||
if (i915_direct_stolen_access(i915)) {
|
||||
drm_dbg(&i915->drm, "Using direct GSM access\n");
|
||||
phys_addr = intel_uncore_read64(uncore, GEN6_GSMBASE) & GEN11_BDSM_MASK;
|
||||
} else {
|
||||
phys_addr = addr + gen6_gttadr_offset(i915);
|
||||
}
|
||||
|
||||
if (needs_wc_ggtt_mapping(i915))
|
||||
flags = BUS_SPACE_MAP_PREFETCHABLE;
|
||||
|
@ -1141,6 +1380,17 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
|
|||
I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
|
||||
}
|
||||
|
||||
if (i915_ggtt_require_binder(i915)) {
|
||||
ggtt->vm.scratch_range = gen8_ggtt_scratch_range_bind;
|
||||
ggtt->vm.insert_page = gen8_ggtt_insert_page_bind;
|
||||
ggtt->vm.insert_entries = gen8_ggtt_insert_entries_bind;
|
||||
/*
|
||||
* On GPU is hung, we might bind VMAs for error capture.
|
||||
* Fallback to CPU GGTT updates in that case.
|
||||
*/
|
||||
ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
|
||||
}
|
||||
|
||||
if (intel_uc_wants_guc(&ggtt->vm.gt->uc))
|
||||
ggtt->invalidate = guc_ggtt_invalidate;
|
||||
else
|
||||
|
|
|
@ -1073,6 +1073,55 @@ err:
|
|||
|
||||
#endif
|
||||
|
||||
static void __intel_gt_bind_context_set_ready(struct intel_gt *gt, bool ready)
|
||||
{
|
||||
struct intel_engine_cs *engine = gt->engine[BCS0];
|
||||
|
||||
if (engine && engine->bind_context)
|
||||
engine->bind_context_ready = ready;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gt_bind_context_set_ready - Set the context binding as ready
|
||||
*
|
||||
* @gt: GT structure
|
||||
*
|
||||
* This function marks the binder context as ready.
|
||||
*/
|
||||
void intel_gt_bind_context_set_ready(struct intel_gt *gt)
|
||||
{
|
||||
__intel_gt_bind_context_set_ready(gt, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gt_bind_context_set_unready - Set the context binding as ready
|
||||
* @gt: GT structure
|
||||
*
|
||||
* This function marks the binder context as not ready.
|
||||
*/
|
||||
|
||||
void intel_gt_bind_context_set_unready(struct intel_gt *gt)
|
||||
{
|
||||
__intel_gt_bind_context_set_ready(gt, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gt_is_bind_context_ready - Check if context binding is ready
|
||||
*
|
||||
* @gt: GT structure
|
||||
*
|
||||
* This function returns binder context's ready status.
|
||||
*/
|
||||
bool intel_gt_is_bind_context_ready(struct intel_gt *gt)
|
||||
{
|
||||
struct intel_engine_cs *engine = gt->engine[BCS0];
|
||||
|
||||
if (engine)
|
||||
return engine->bind_context_ready;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int intel_gt_tiles_init(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_gt *gt;
|
||||
|
|
|
@ -148,4 +148,7 @@ enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
|
|||
struct drm_i915_gem_object *obj,
|
||||
bool always_coherent);
|
||||
|
||||
void intel_gt_bind_context_set_ready(struct intel_gt *gt);
|
||||
void intel_gt_bind_context_set_unready(struct intel_gt *gt);
|
||||
bool intel_gt_is_bind_context_ready(struct intel_gt *gt);
|
||||
#endif /* __INTEL_GT_H__ */
|
||||
|
|
|
@ -296,6 +296,7 @@ int intel_gt_resume(struct intel_gt *gt)
|
|||
out_fw:
|
||||
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
|
||||
intel_gt_pm_put(gt);
|
||||
intel_gt_bind_context_set_ready(gt);
|
||||
return err;
|
||||
|
||||
err_wedged:
|
||||
|
@ -322,6 +323,7 @@ static void wait_for_suspend(struct intel_gt *gt)
|
|||
|
||||
void intel_gt_suspend_prepare(struct intel_gt *gt)
|
||||
{
|
||||
intel_gt_bind_context_set_unready(gt);
|
||||
user_forcewake(gt, true);
|
||||
wait_for_suspend(gt);
|
||||
}
|
||||
|
@ -375,6 +377,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
|
|||
|
||||
void intel_gt_runtime_suspend(struct intel_gt *gt)
|
||||
{
|
||||
intel_gt_bind_context_set_unready(gt);
|
||||
intel_uc_runtime_suspend(>->uc);
|
||||
|
||||
GT_TRACE(gt, "\n");
|
||||
|
@ -392,6 +395,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_gt_bind_context_set_ready(gt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -21,6 +21,12 @@
|
|||
#include "intel_gt_regs.h"
|
||||
#include "intel_gtt.h"
|
||||
|
||||
bool i915_ggtt_require_binder(struct drm_i915_private *i915)
|
||||
{
|
||||
/* Wa_13010847436 & Wa_14019519902 */
|
||||
return !i915_direct_stolen_access(i915) &&
|
||||
MEDIA_VER_FULL(i915) == IP_VER(13, 0);
|
||||
}
|
||||
|
||||
static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
|
||||
{
|
||||
|
|
|
@ -171,6 +171,9 @@ struct intel_gt;
|
|||
#define for_each_sgt_daddr(__dp, __iter, __sgt) \
|
||||
__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
|
||||
|
||||
#define for_each_sgt_daddr_next(__dp, __iter) \
|
||||
__for_each_daddr_next(__dp, __iter, I915_GTT_PAGE_SIZE)
|
||||
|
||||
struct i915_page_table {
|
||||
struct drm_i915_gem_object *base;
|
||||
union {
|
||||
|
@ -690,4 +693,6 @@ static inline struct sgt_dma {
|
|||
return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
|
||||
}
|
||||
|
||||
bool i915_ggtt_require_binder(struct drm_i915_private *i915);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -157,8 +157,8 @@ region_lmem_init(struct intel_memory_region *mem)
|
|||
|
||||
#ifdef __linux__
|
||||
if (!io_mapping_init_wc(&mem->iomap,
|
||||
mem->io_start,
|
||||
mem->io_size))
|
||||
mem->io.start,
|
||||
resource_size(&mem->io)))
|
||||
return -EIO;
|
||||
#else
|
||||
struct drm_i915_private *i915 = mem->i915;
|
||||
|
@ -167,20 +167,20 @@ region_lmem_init(struct intel_memory_region *mem)
|
|||
int i;
|
||||
bus_space_handle_t bsh;
|
||||
|
||||
start = atop(mem->io_start);
|
||||
end = start + atop(mem->io_size);
|
||||
start = atop(mem->io.start);
|
||||
end = start + atop(resource_size(&mem->io));
|
||||
uvm_page_physload(start, end, start, end, PHYSLOAD_DEVICE);
|
||||
|
||||
pgs = PHYS_TO_VM_PAGE(mem->io_start);
|
||||
for (i = 0; i < atop(mem->io_size); i++)
|
||||
pgs = PHYS_TO_VM_PAGE(mem->io.start);
|
||||
for (i = 0; i < atop(resource_size(&mem->io)); i++)
|
||||
atomic_setbits_int(&(pgs[i].pg_flags), PG_PMAP_WC);
|
||||
|
||||
if (bus_space_map(i915->bst, mem->io_start, mem->io_size,
|
||||
if (bus_space_map(i915->bst, mem->io.start, resource_size(&mem->io),
|
||||
BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, &bsh))
|
||||
panic("can't map lmem");
|
||||
|
||||
mem->iomap.base = mem->io_start;
|
||||
mem->iomap.size = mem->io_size;
|
||||
mem->iomap.base = mem->io.start;
|
||||
mem->iomap.size = resource_size(&mem->io);
|
||||
mem->iomap.iomem = bus_space_vaddr(i915->bst, bsh);
|
||||
#endif
|
||||
|
||||
|
@ -280,7 +280,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
|
|||
lmem_size -= tile_stolen;
|
||||
} else {
|
||||
/* Stolen starts from GSMBASE without CCS */
|
||||
lmem_size = intel_uncore_read64(&i915->uncore, GEN12_GSMBASE);
|
||||
lmem_size = intel_uncore_read64(&i915->uncore, GEN6_GSMBASE);
|
||||
}
|
||||
|
||||
i915_resize_lmem_bar(i915, lmem_size);
|
||||
|
@ -326,14 +326,6 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
|
|||
if (err)
|
||||
goto err_region_put;
|
||||
|
||||
drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
|
||||
drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
|
||||
&mem->io_start);
|
||||
drm_info(&i915->drm, "Local memory IO size: %pa\n",
|
||||
&mem->io_size);
|
||||
drm_info(&i915->drm, "Local memory available: %pa\n",
|
||||
&lmem_size);
|
||||
|
||||
if (io_size < lmem_size)
|
||||
drm_info(&i915->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' or similar, if available in the BIOS.\n",
|
||||
(u64)io_size >> 20);
|
||||
|
|
|
@ -199,8 +199,8 @@ static struct drm_i915_gem_object *create_lmem(struct intel_gt *gt)
|
|||
* of pages. To succeed with both allocations, especially in case of Small
|
||||
* BAR, try to allocate no more than quarter of mappable memory.
|
||||
*/
|
||||
if (mr && size > mr->io_size / 4)
|
||||
size = mr->io_size / 4;
|
||||
if (mr && size > resource_size(&mr->io) / 4)
|
||||
size = resource_size(&mr->io) / 4;
|
||||
|
||||
return i915_gem_object_create_lmem(gt->i915, size, I915_BO_ALLOC_CONTIGUOUS);
|
||||
}
|
||||
|
|
|
@ -321,4 +321,9 @@ static const struct pci_matchid i915_devices[] = {
|
|||
{ 0x8086, 0xa7a8 },
|
||||
{ 0x8086, 0xa7aa },
|
||||
{ 0x8086, 0xa7ab },
|
||||
{ 0x8086, 0x7d40 },
|
||||
{ 0x8086, 0x7d60 },
|
||||
{ 0x8086, 0x7d45 },
|
||||
{ 0x8086, 0x7d55 },
|
||||
{ 0x8086, 0x7dd5 },
|
||||
};
|
||||
|
|
|
@ -2263,6 +2263,11 @@ inteldrm_attach(struct device *parent, struct device *self, void *aux)
|
|||
dev_priv->memex = pa->pa_memex;
|
||||
dev_priv->vga_regs = &dev_priv->bar;
|
||||
|
||||
id = drm_find_description(PCI_VENDOR(pa->pa_id),
|
||||
PCI_PRODUCT(pa->pa_id), pciidlist);
|
||||
dev_priv->id = id;
|
||||
info = (struct intel_device_info *)id->driver_data;
|
||||
|
||||
if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY &&
|
||||
PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_DISPLAY_VGA &&
|
||||
(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG)
|
||||
|
@ -2281,6 +2286,18 @@ inteldrm_attach(struct device *parent, struct device *self, void *aux)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Meteor Lake GOP framebuffer doesn't pass efifb pci bar tests
|
||||
* too early for IS_METEORLAKE which uses runtime info
|
||||
*/
|
||||
if (info->platform == INTEL_METEORLAKE) {
|
||||
dev_priv->primary = 1;
|
||||
dev_priv->console = 1;
|
||||
#if NEFIFB > 0
|
||||
efifb_detach();
|
||||
#endif
|
||||
}
|
||||
|
||||
printf("\n");
|
||||
|
||||
dev = drm_attach_pci(&i915_drm_driver, pa, 0, dev_priv->primary,
|
||||
|
@ -2290,11 +2307,6 @@ inteldrm_attach(struct device *parent, struct device *self, void *aux)
|
|||
return;
|
||||
}
|
||||
|
||||
id = drm_find_description(PCI_VENDOR(pa->pa_id),
|
||||
PCI_PRODUCT(pa->pa_id), pciidlist);
|
||||
dev_priv->id = id;
|
||||
info = (struct intel_device_info *)id->driver_data;
|
||||
|
||||
/* Device parameters start as a copy of module parameters. */
|
||||
i915_params_copy(&dev_priv->params, &i915_modparams);
|
||||
dev_priv->params.request_timeout_ms = 0;
|
||||
|
|
|
@ -1206,7 +1206,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
|
|||
dma_addr_t offset = dma - mem->region.start;
|
||||
void __iomem *s;
|
||||
|
||||
if (offset + PAGE_SIZE > mem->io_size) {
|
||||
if (offset + PAGE_SIZE > resource_size(&mem->io)) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -838,7 +838,6 @@ static const struct intel_device_info mtl_info = {
|
|||
.has_pxp = 1,
|
||||
.memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
|
||||
.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0),
|
||||
.require_force_probe = 1,
|
||||
MTL_CACHELEVEL,
|
||||
};
|
||||
|
||||
|
|
|
@ -2803,26 +2803,6 @@ oa_configure_all_contexts(struct i915_perf_stream *stream,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gen12_configure_all_contexts(struct i915_perf_stream *stream,
|
||||
const struct i915_oa_config *oa_config,
|
||||
struct i915_active *active)
|
||||
{
|
||||
struct flex regs[] = {
|
||||
{
|
||||
GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
|
||||
CTX_R_PWR_CLK_STATE,
|
||||
},
|
||||
};
|
||||
|
||||
if (stream->engine->class != RENDER_CLASS)
|
||||
return 0;
|
||||
|
||||
return oa_configure_all_contexts(stream,
|
||||
regs, ARRAY_SIZE(regs),
|
||||
active);
|
||||
}
|
||||
|
||||
static int
|
||||
lrc_configure_all_contexts(struct i915_perf_stream *stream,
|
||||
const struct i915_oa_config *oa_config,
|
||||
|
@ -2929,7 +2909,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
|
|||
{
|
||||
struct drm_i915_private *i915 = stream->perf->i915;
|
||||
struct intel_uncore *uncore = stream->uncore;
|
||||
struct i915_oa_config *oa_config = stream->oa_config;
|
||||
bool periodic = stream->periodic;
|
||||
u32 period_exponent = stream->period_exponent;
|
||||
u32 sqcnt1;
|
||||
|
@ -2973,15 +2952,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
|
|||
|
||||
intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1);
|
||||
|
||||
/*
|
||||
* Update all contexts prior writing the mux configurations as we need
|
||||
* to make sure all slices/subslices are ON before writing to NOA
|
||||
* registers.
|
||||
*/
|
||||
ret = gen12_configure_all_contexts(stream, oa_config, active);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* For Gen12, performance counters are context
|
||||
* saved/restored. Only enable it for the context that
|
||||
|
@ -3036,9 +3006,6 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
|
|||
_MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
|
||||
}
|
||||
|
||||
/* Reset all contexts' slices/subslices configurations. */
|
||||
gen12_configure_all_contexts(stream, NULL, NULL);
|
||||
|
||||
/* disable the context save/restore or OAR counters */
|
||||
if (stream->ctx)
|
||||
gen12_configure_oar_context(stream, NULL);
|
||||
|
|
|
@ -513,7 +513,7 @@ static int query_memregion_info(struct drm_i915_private *i915,
|
|||
info.probed_size = mr->total;
|
||||
|
||||
if (mr->type == INTEL_MEMORY_LOCAL)
|
||||
info.probed_cpu_visible_size = mr->io_size;
|
||||
info.probed_cpu_visible_size = resource_size(&mr->io);
|
||||
else
|
||||
info.probed_cpu_visible_size = mr->total;
|
||||
|
||||
|
|
|
@ -5452,6 +5452,9 @@
|
|||
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
|
||||
#define GEN6_PCODE_DATA1 _MMIO(0x13812C)
|
||||
|
||||
#define MTL_PCODE_STOLEN_ACCESS _MMIO(0x138914)
|
||||
#define STOLEN_ACCESS_ALLOWED 0x1
|
||||
|
||||
/* IVYBRIDGE DPF */
|
||||
#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
|
||||
#define GEN7_L3CDERRST1_ROW_MASK (0x7ff << 14)
|
||||
|
@ -6581,9 +6584,10 @@ enum skl_power_gate {
|
|||
#define GMS_MASK REG_GENMASK(15, 8)
|
||||
#define GGMS_MASK REG_GENMASK(7, 6)
|
||||
|
||||
#define GEN12_GSMBASE _MMIO(0x108100)
|
||||
#define GEN12_DSMBASE _MMIO(0x1080C0)
|
||||
#define GEN12_BDSM_MASK REG_GENMASK64(63, 20)
|
||||
#define GEN6_GSMBASE _MMIO(0x108100)
|
||||
#define GEN6_DSMBASE _MMIO(0x1080C0)
|
||||
#define GEN6_BDSM_MASK REG_GENMASK64(31, 20)
|
||||
#define GEN11_BDSM_MASK REG_GENMASK64(63, 20)
|
||||
|
||||
#define XEHP_CLOCK_GATE_DIS _MMIO(0x101014)
|
||||
#define SGSI_SIDECLK_DIS REG_BIT(17)
|
||||
|
|
|
@ -91,6 +91,16 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
|
|||
((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \
|
||||
(((__iter).curr += (__step)) >= (__iter).max) ? \
|
||||
(__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
|
||||
/**
|
||||
* __for_each_daddr_next - iterates over the device addresses with pre-initialized iterator.
|
||||
* @__dp: Device address (output)
|
||||
* @__iter: 'struct sgt_iter' (iterator state, external)
|
||||
* @__step: step size
|
||||
*/
|
||||
#define __for_each_daddr_next(__dp, __iter, __step) \
|
||||
for (; ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \
|
||||
(((__iter).curr += (__step)) >= (__iter).max) ? \
|
||||
(__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
|
||||
|
||||
/**
|
||||
* for_each_sgt_page - iterate over the pages of the given sg_table
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <drm/drm_drv.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_reg.h"
|
||||
#include "i915_utils.h"
|
||||
|
||||
#include <sys/syslog.h>
|
||||
|
@ -138,3 +139,19 @@ bool i915_vtd_active(struct drm_i915_private *i915)
|
|||
return i915_run_as_guest();
|
||||
#endif
|
||||
}
|
||||
|
||||
bool i915_direct_stolen_access(struct drm_i915_private *i915)
|
||||
{
|
||||
/*
|
||||
* Wa_22018444074
|
||||
*
|
||||
* Access via BAR can hang MTL, go directly to GSM/DSM,
|
||||
* except for VM guests which won't have access to it.
|
||||
*
|
||||
* Normally this would not work but on MTL the system firmware
|
||||
* should have relaxed the access permissions sufficiently.
|
||||
* 0x138914==0x1 indicates that the firmware has done its job.
|
||||
*/
|
||||
return IS_METEORLAKE(i915) && !i915_run_as_guest() &&
|
||||
intel_uncore_read(&i915->uncore, MTL_PCODE_STOLEN_ACCESS) == STOLEN_ACCESS_ALLOWED;
|
||||
}
|
||||
|
|
|
@ -401,4 +401,6 @@ static inline bool i915_run_as_guest(void)
|
|||
|
||||
bool i915_vtd_active(struct drm_i915_private *i915);
|
||||
|
||||
bool i915_direct_stolen_access(struct drm_i915_private *i915);
|
||||
|
||||
#endif /* !__I915_UTILS_H */
|
||||
|
|
|
@ -50,7 +50,7 @@ static int __iopagetest(struct intel_memory_region *mem,
|
|||
if (memchr_inv(result, value, sizeof(result))) {
|
||||
dev_err(mem->i915->drm.dev,
|
||||
"Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
|
||||
&mem->region, &mem->io_start, &offset, caller,
|
||||
&mem->region, &mem->io.start, &offset, caller,
|
||||
value, result[0], result[1], result[2]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -70,11 +70,11 @@ static int iopagetest(struct intel_memory_region *mem,
|
|||
int err;
|
||||
int i;
|
||||
|
||||
va = ioremap_wc(mem->io_start + offset, PAGE_SIZE);
|
||||
va = ioremap_wc(mem->io.start + offset, PAGE_SIZE);
|
||||
if (!va) {
|
||||
dev_err(mem->i915->drm.dev,
|
||||
"Failed to ioremap memory region [%pa + %pa] for %ps\n",
|
||||
&mem->io_start, &offset, caller);
|
||||
&mem->io.start, &offset, caller);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
@ -106,10 +106,10 @@ static int iomemtest(struct intel_memory_region *mem,
|
|||
resource_size_t last, page;
|
||||
int err;
|
||||
|
||||
if (mem->io_size < PAGE_SIZE)
|
||||
if (resource_size(&mem->io) < PAGE_SIZE)
|
||||
return 0;
|
||||
|
||||
last = mem->io_size - PAGE_SIZE;
|
||||
last = resource_size(&mem->io) - PAGE_SIZE;
|
||||
|
||||
/*
|
||||
* Quick test to check read/write access to the iomap (backing store).
|
||||
|
@ -211,7 +211,7 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
|
|||
struct drm_i915_private *i915 = mem->i915;
|
||||
int err = 0;
|
||||
|
||||
if (!mem->io_start)
|
||||
if (!mem->io.start)
|
||||
return 0;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
|
||||
|
@ -240,8 +240,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
|
|||
|
||||
mem->i915 = i915;
|
||||
mem->region = DEFINE_RES_MEM(start, size);
|
||||
mem->io_start = io_start;
|
||||
mem->io_size = io_size;
|
||||
mem->io = DEFINE_RES_MEM(io_start, io_size);
|
||||
mem->min_page_size = min_page_size;
|
||||
mem->ops = ops;
|
||||
mem->total = size;
|
||||
|
@ -360,6 +359,24 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
|
|||
i915->mm.regions[i] = mem;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
|
||||
struct intel_memory_region *mem = i915->mm.regions[i];
|
||||
u64 region_size, io_size;
|
||||
|
||||
if (!mem)
|
||||
continue;
|
||||
|
||||
region_size = resource_size(&mem->region) >> 20;
|
||||
io_size = resource_size(&mem->io) >> 20;
|
||||
|
||||
if (resource_size(&mem->io))
|
||||
drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: %llu MiB %pR\n",
|
||||
mem->id, mem->name, region_size, &mem->region, io_size, &mem->io);
|
||||
else
|
||||
drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: n/a\n",
|
||||
mem->id, mem->name, region_size, &mem->region);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_cleanup:
|
||||
|
|
|
@ -73,8 +73,7 @@ struct intel_memory_region {
|
|||
struct io_mapping iomap;
|
||||
struct resource region;
|
||||
|
||||
resource_size_t io_start;
|
||||
resource_size_t io_size;
|
||||
struct resource io;
|
||||
resource_size_t min_page_size;
|
||||
resource_size_t total;
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ int intel_region_ttm_init(struct intel_memory_region *mem)
|
|||
|
||||
ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
|
||||
resource_size(&mem->region),
|
||||
mem->io_size,
|
||||
resource_size(&mem->io),
|
||||
mem->min_page_size, PAGE_SIZE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -225,16 +225,16 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
|
|||
goto out;
|
||||
}
|
||||
place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
|
||||
} else if (mem->io_size && mem->io_size < mem->total) {
|
||||
} else if (resource_size(&mem->io) && resource_size(&mem->io) < mem->total) {
|
||||
if (flags & I915_BO_ALLOC_GPU_ONLY) {
|
||||
place.flags |= TTM_PL_FLAG_TOPDOWN;
|
||||
} else {
|
||||
place.fpfn = 0;
|
||||
if (WARN_ON(overflows_type(mem->io_size >> PAGE_SHIFT, place.lpfn))) {
|
||||
if (WARN_ON(overflows_type(resource_size(&mem->io) >> PAGE_SHIFT, place.lpfn))) {
|
||||
ret = -E2BIG;
|
||||
goto out;
|
||||
}
|
||||
place.lpfn = mem->io_size >> PAGE_SHIFT;
|
||||
place.lpfn = resource_size(&mem->io) >> PAGE_SHIFT;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1833,7 +1833,10 @@ static const struct intel_forcewake_range __mtl_fw_ranges[] = {
|
|||
GEN_FW_RANGE(0x24000, 0x2ffff, 0), /*
|
||||
0x24000 - 0x2407f: always on
|
||||
0x24080 - 0x2ffff: reserved */
|
||||
GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT)
|
||||
GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x40000, 0x1901ef, 0),
|
||||
GEN_FW_RANGE(0x1901f0, 0x1901f3, FORCEWAKE_GT)
|
||||
/* FIXME: WA to wake GT while triggering H2G */
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -10,21 +10,12 @@
|
|||
#include "intel_wakeref.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
static void rpm_get(struct intel_wakeref *wf)
|
||||
{
|
||||
wf->wakeref = intel_runtime_pm_get(&wf->i915->runtime_pm);
|
||||
}
|
||||
|
||||
static void rpm_put(struct intel_wakeref *wf)
|
||||
{
|
||||
intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
|
||||
|
||||
intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
|
||||
INTEL_WAKEREF_BUG_ON(!wakeref);
|
||||
}
|
||||
|
||||
int __intel_wakeref_get_first(struct intel_wakeref *wf)
|
||||
{
|
||||
intel_wakeref_t wakeref;
|
||||
int ret = 0;
|
||||
|
||||
wakeref = intel_runtime_pm_get(&wf->i915->runtime_pm);
|
||||
/*
|
||||
* Treat get/put as different subclasses, as we may need to run
|
||||
* the put callback from under the shrinker and do not want to
|
||||
|
@ -32,41 +23,52 @@ int __intel_wakeref_get_first(struct intel_wakeref *wf)
|
|||
* upon acquiring the wakeref.
|
||||
*/
|
||||
mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
|
||||
|
||||
if (!atomic_read(&wf->count)) {
|
||||
int err;
|
||||
INTEL_WAKEREF_BUG_ON(wf->wakeref);
|
||||
wf->wakeref = wakeref;
|
||||
wakeref = 0;
|
||||
|
||||
rpm_get(wf);
|
||||
|
||||
err = wf->ops->get(wf);
|
||||
if (unlikely(err)) {
|
||||
rpm_put(wf);
|
||||
mutex_unlock(&wf->mutex);
|
||||
return err;
|
||||
ret = wf->ops->get(wf);
|
||||
if (ret) {
|
||||
wakeref = xchg(&wf->wakeref, 0);
|
||||
wake_up_var(&wf->wakeref);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
smp_mb__before_atomic(); /* release wf->count */
|
||||
}
|
||||
atomic_inc(&wf->count);
|
||||
mutex_unlock(&wf->mutex);
|
||||
|
||||
atomic_inc(&wf->count);
|
||||
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
|
||||
return 0;
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&wf->mutex);
|
||||
if (unlikely(wakeref))
|
||||
intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
|
||||
{
|
||||
intel_wakeref_t wakeref = 0;
|
||||
|
||||
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
|
||||
if (unlikely(!atomic_dec_and_test(&wf->count)))
|
||||
goto unlock;
|
||||
|
||||
/* ops->put() must reschedule its own release on error/deferral */
|
||||
if (likely(!wf->ops->put(wf))) {
|
||||
rpm_put(wf);
|
||||
INTEL_WAKEREF_BUG_ON(!wf->wakeref);
|
||||
wakeref = xchg(&wf->wakeref, 0);
|
||||
wake_up_var(&wf->wakeref);
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&wf->mutex);
|
||||
if (wakeref)
|
||||
intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
|
||||
}
|
||||
|
||||
void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
#define PXP43_CMDID_NEW_HUC_AUTH 0x0000003F /* MTL+ */
|
||||
#define PXP43_CMDID_INIT_SESSION 0x00000036
|
||||
|
||||
/* PXP-Packet sizes for MTL's GSCCS-HECI instruction */
|
||||
#define PXP43_MAX_HECI_INOUT_SIZE (SZ_32K)
|
||||
/* PXP-Packet sizes for MTL's GSCCS-HECI instruction is spec'd at 65K before page alignment*/
|
||||
#define PXP43_MAX_HECI_INOUT_SIZE (PAGE_ALIGN(SZ_64K + SZ_1K))
|
||||
|
||||
/* PXP-Packet size for MTL's NEW_HUC_AUTH instruction */
|
||||
#define PXP43_HUC_AUTH_INOUT_SIZE (SZ_4K)
|
||||
|
|
|
@ -544,8 +544,8 @@ static u64 igt_object_mappable_total(struct drm_i915_gem_object *obj)
|
|||
u64 start = drm_buddy_block_offset(block);
|
||||
u64 end = start + drm_buddy_block_size(mm, block);
|
||||
|
||||
if (start < mr->io_size)
|
||||
total += min_t(u64, end, mr->io_size) - start;
|
||||
if (start < resource_size(&mr->io))
|
||||
total += min_t(u64, end, resource_size(&mr->io)) - start;
|
||||
}
|
||||
|
||||
return total;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue