sync with OpenBSD -current

This commit is contained in:
purplerain 2024-08-14 02:30:10 +00:00
parent 302c0be22f
commit ee61daa776
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
105 changed files with 1609 additions and 484 deletions

View file

@ -1,4 +1,4 @@
# $OpenBSD: RAMDISK,v 1.86 2023/07/20 02:26:24 yasuoka Exp $
# $OpenBSD: RAMDISK,v 1.87 2024/08/12 18:43:41 deraadt Exp $
machine amd64
maxusers 4
@ -82,7 +82,7 @@ wsdisplay0 at vga? console 1
com0 at isa? port 0x3f8 irq 4 # standard PC serial ports
com1 at isa? port 0x2f8 irq 3
mpi* at pci? # LSI Logic Message Passing Interface
#mpi* at pci? # LSI Logic Message Passing Interface
#mfi* at pci? # LSI MegaRAID SAS controllers
scsibus* at scsi?

View file

@ -1,4 +1,4 @@
/* $OpenBSD: acpi_x86.c,v 1.29 2024/08/11 17:30:28 deraadt Exp $ */
/* $OpenBSD: acpi_x86.c,v 1.30 2024/08/13 22:31:16 deraadt Exp $ */
/*
* Copyright (c) 2005 Thorsten Lockert <tholo@sigmasoft.com>
* Copyright (c) 2005 Jordan Hargrave <jordan@openbsd.org>
@ -126,6 +126,10 @@ gosleep(void *v)
sc->sc_pmc_resume(sc->sc_pmc_cookie);
acpi_indicator(sc, ACPI_SST_WAKING); /* blink */
/* 1st resume AML step: _WAK(fromstate) */
if (sc->sc_state != ACPI_STATE_S0)
aml_node_setval(sc, sc->sc_wak, sc->sc_state);
return ret;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: acpipwrres.c,v 1.15 2024/08/02 09:28:35 kettenis Exp $ */
/* $OpenBSD: acpipwrres.c,v 1.16 2024/08/12 17:24:58 kettenis Exp $ */
/*
* Copyright (c) 2013 Martin Pieuchot <mpi@openbsd.org>
@ -33,7 +33,6 @@
int acpipwrres_match(struct device *, void *, void *);
void acpipwrres_attach(struct device *, struct device *, void *);
int acpipwrres_activate(struct device *, int);
#ifdef ACPIPWRRES_DEBUG
#define DPRINTF(x) printf x
@ -67,8 +66,7 @@ struct acpipwrres_consumer {
};
const struct cfattach acpipwrres_ca = {
sizeof(struct acpipwrres_softc), acpipwrres_match, acpipwrres_attach,
NULL, acpipwrres_activate
sizeof(struct acpipwrres_softc), acpipwrres_match, acpipwrres_attach
};
struct cfdriver acpipwrres_cd = {
@ -142,23 +140,6 @@ acpipwrres_attach(struct device *parent, struct device *self, void *aux)
printf("\n");
}
int
acpipwrres_activate(struct device *self, int act)
{
struct acpipwrres_softc *sc = (struct acpipwrres_softc *)self;
switch (act) {
case DVACT_POWERDOWN:
if (sc->sc_cons_ref == 0 && sc->sc_state != ACPIPWRRES_OFF) {
aml_evalname(sc->sc_acpi, sc->sc_devnode, "_OFF", 0,
NULL, NULL);
sc->sc_state = ACPIPWRRES_OFF;
}
break;
}
return 0;
}
int
acpipwrres_ref_incr(struct acpipwrres_softc *sc, struct aml_node *node)
{

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ccp.c,v 1.5 2024/06/13 17:59:08 bluhm Exp $ */
/* $OpenBSD: ccp.c,v 1.6 2024/08/13 20:48:00 bluhm Exp $ */
/*
* Copyright (c) 2018 David Gwynne <dlg@openbsd.org>
@ -228,7 +228,7 @@ ccp_docmd(struct ccp_softc *sc, int cmd, uint64_t paddr)
plo = ((paddr >> 0) & 0xffffffff);
phi = ((paddr >> 32) & 0xffffffff);
cmdword = (cmd & 0x3f) << 16;
cmdword = (cmd & 0x3ff) << 16;
if (!cold)
cmdword |= PSP_CMDRESP_IOC;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: re.c,v 1.217 2024/01/19 03:46:14 dlg Exp $ */
/* $OpenBSD: re.c,v 1.218 2024/08/12 06:47:11 dlg Exp $ */
/* $FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $ */
/*
* Copyright (c) 1997, 1998-2003
@ -1834,7 +1834,7 @@ re_start(struct ifqueue *ifq)
free -= idx;
for (;;) {
if (sc->rl_ldata.rl_tx_ndescs >= free + 2) {
if (free < sc->rl_ldata.rl_tx_ndescs + 2) {
ifq_set_oactive(ifq);
break;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: azalia.c,v 1.287 2024/05/17 19:43:45 kettenis Exp $ */
/* $OpenBSD: azalia.c,v 1.288 2024/08/13 22:32:58 deraadt Exp $ */
/* $NetBSD: azalia.c,v 1.20 2006/05/07 08:31:44 kent Exp $ */
/*-
@ -599,6 +599,18 @@ azalia_pci_activate(struct device *self, int act)
int rv = 0;
switch (act) {
case DVACT_QUIESCE:
rv = config_activate_children(self, act);
if (sc->detached)
break;
/* stop interrupts and clear status registers */
AZ_WRITE_4(sc, INTCTL, 0);
AZ_WRITE_2(sc, STATESTS, HDA_STATESTS_SDIWAKE);
AZ_WRITE_1(sc, RIRBSTS, HDA_RIRBSTS_RINTFL | HDA_RIRBSTS_RIRBOIS);
(void) AZ_READ_4(sc, INTSTS);
break;
case DVACT_SUSPEND:
azalia_suspend(sc);
break;
@ -1388,6 +1400,12 @@ azalia_suspend(azalia_t *az)
if (az->detached)
return 0;
/* stop interrupts and clear status registers */
AZ_WRITE_4(az, INTCTL, 0);
AZ_WRITE_2(az, STATESTS, HDA_STATESTS_SDIWAKE);
AZ_WRITE_1(az, RIRBSTS, HDA_RIRBSTS_RINTFL | HDA_RIRBSTS_RIRBOIS);
(void) AZ_READ_4(az, INTSTS);
/* disable unsolicited responses */
AZ_WRITE_4(az, GCTL, AZ_READ_4(az, GCTL) & ~HDA_GCTL_UNSOL);

View file

@ -1059,3 +1059,33 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->fb = intel_fb;
}
bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
const struct intel_initial_plane_config *plane_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 base;
if (!plane_state->uapi.visible)
return false;
base = intel_plane_ggtt_offset(plane_state);
/*
* We may have moved the surface to a different
* part of ggtt, make the plane aware of that.
*/
if (plane_config->base == base)
return false;
if (DISPLAY_VER(dev_priv) >= 4)
intel_de_write(dev_priv, DSPSURF(i9xx_plane), base);
else
intel_de_write(dev_priv, DSPADDR(i9xx_plane), base);
return true;
}

View file

@ -26,4 +26,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe);
void i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config);
bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
const struct intel_initial_plane_config *plane_config);
#endif

View file

@ -7764,6 +7764,7 @@ static const struct intel_display_funcs skl_display_funcs = {
.crtc_disable = hsw_crtc_disable,
.commit_modeset_enables = skl_commit_modeset_enables,
.get_initial_plane_config = skl_get_initial_plane_config,
.fixup_initial_plane_config = skl_fixup_initial_plane_config,
};
static const struct intel_display_funcs ddi_display_funcs = {
@ -7772,6 +7773,7 @@ static const struct intel_display_funcs ddi_display_funcs = {
.crtc_disable = hsw_crtc_disable,
.commit_modeset_enables = intel_commit_modeset_enables,
.get_initial_plane_config = i9xx_get_initial_plane_config,
.fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
};
static const struct intel_display_funcs pch_split_display_funcs = {
@ -7780,6 +7782,7 @@ static const struct intel_display_funcs pch_split_display_funcs = {
.crtc_disable = ilk_crtc_disable,
.commit_modeset_enables = intel_commit_modeset_enables,
.get_initial_plane_config = i9xx_get_initial_plane_config,
.fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
};
static const struct intel_display_funcs vlv_display_funcs = {
@ -7788,6 +7791,7 @@ static const struct intel_display_funcs vlv_display_funcs = {
.crtc_disable = i9xx_crtc_disable,
.commit_modeset_enables = intel_commit_modeset_enables,
.get_initial_plane_config = i9xx_get_initial_plane_config,
.fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
};
static const struct intel_display_funcs i9xx_display_funcs = {
@ -7796,6 +7800,7 @@ static const struct intel_display_funcs i9xx_display_funcs = {
.crtc_disable = i9xx_crtc_disable,
.commit_modeset_enables = intel_commit_modeset_enables,
.get_initial_plane_config = i9xx_get_initial_plane_config,
.fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
};
/**

View file

@ -63,6 +63,8 @@ struct intel_display_funcs {
struct intel_crtc_state *);
void (*get_initial_plane_config)(struct intel_crtc *,
struct intel_initial_plane_config *);
bool (*fixup_initial_plane_config)(struct intel_crtc *crtc,
const struct intel_initial_plane_config *plane_config);
void (*crtc_enable)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void (*crtc_disable)(struct intel_atomic_state *state,

View file

@ -277,7 +277,6 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
{
struct drm_device *dev = &i915->drm;
enum pipe pipe;
struct intel_crtc *crtc;
int ret;
if (!HAS_DISPLAY(i915))
@ -327,11 +326,7 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
intel_acpi_assign_connector_fwnodes(i915);
drm_modeset_unlock_all(dev);
for_each_intel_crtc(dev, crtc) {
if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
continue;
intel_crtc_initial_plane_config(crtc);
}
intel_initial_plane_config(i915);
/*
* Make sure hardware watermarks really match the state we read out.

View file

@ -770,6 +770,8 @@ struct intel_plane_state {
struct intel_initial_plane_config {
struct intel_framebuffer *fb;
struct intel_memory_region *mem;
resource_size_t phys_base;
struct i915_vma *vma;
unsigned int tiling;
int size;

View file

@ -1556,7 +1556,7 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
}
static int
skl_ddi_calculate_wrpll(int clock /* in Hz */,
skl_ddi_calculate_wrpll(int clock,
int ref_clock,
struct skl_wrpll_params *wrpll_params)
{
@ -1581,7 +1581,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */,
};
unsigned int dco, d, i;
unsigned int p0, p1, p2;
u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
for (d = 0; d < ARRAY_SIZE(dividers); d++) {
for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
@ -1713,7 +1713,7 @@ static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
i915->display.dpll.ref_clks.nssc, &wrpll_params);
if (ret)
return ret;

View file

@ -294,8 +294,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* Use fbdev's framebuffer from lmem for discrete */
info->fix.smem_start =
(unsigned long)(mem->io_start +
i915_gem_object_get_dma_address(obj, 0));
(unsigned long)(mem->io.start +
i915_gem_object_get_dma_address(obj, 0) -
mem->region.start);
info->fix.smem_len = obj->base.size;
} else {
/* Our framebuffer is the entirety of fbdev's system memory */

View file

@ -249,7 +249,7 @@
#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
(GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP2_STREAM_STATUS(trans) : \
PIPE_HDCP2_STREAM_STATUS(pipe))
PIPE_HDCP2_STREAM_STATUS(port))
#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
#define _PORTB_HDCP2_AUTH_STREAM 0x66F04

View file

@ -3,29 +3,32 @@
* Copyright © 2021 Intel Corporation
*/
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "intel_atomic_plane.h"
#include "intel_crtc.h"
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_plane_initial.h"
static bool
intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
const struct intel_initial_plane_config *plane_config,
intel_reuse_initial_plane_obj(struct intel_crtc *this,
const struct intel_initial_plane_config plane_configs[],
struct drm_framebuffer **fb,
struct i915_vma **vma)
{
struct drm_i915_private *i915 = to_i915(this->base.dev);
struct intel_crtc *crtc;
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
if (!crtc_state->uapi.active)
continue;
@ -33,7 +36,7 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
if (!plane_state->ggtt_vma)
continue;
if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
if (plane_configs[this->pipe].base == plane_configs[crtc->pipe].base) {
*fb = plane_state->hw.fb;
*vma = plane_state->ggtt_vma;
return true;
@ -43,12 +46,100 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
return false;
}
static bool
initial_plane_phys_lmem(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
{
gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
struct intel_memory_region *mem;
dma_addr_t dma_addr;
gen8_pte_t pte;
u32 base;
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
gte += base / I915_GTT_PAGE_SIZE;
pte = ioread64(gte);
if (!(pte & GEN12_GGTT_PTE_LM)) {
drm_err(&i915->drm,
"Initial plane programming missing PTE_LM bit\n");
return false;
}
dma_addr = pte & GEN12_GGTT_PTE_ADDR_MASK;
if (IS_DGFX(i915))
mem = i915->mm.regions[INTEL_REGION_LMEM_0];
else
mem = i915->mm.stolen_region;
if (!mem) {
drm_dbg_kms(&i915->drm,
"Initial plane memory region not initialized\n");
return false;
}
/*
* On lmem we don't currently expect this to
* ever be placed in the stolen portion.
*/
if (dma_addr < mem->region.start || dma_addr > mem->region.end) {
drm_err(&i915->drm,
"Initial plane programming using invalid range, dma_addr=%pa (%s [%pa-%pa])\n",
&dma_addr, mem->region.name, &mem->region.start, &mem->region.end);
return false;
}
drm_dbg(&i915->drm,
"Using dma_addr=%pa, based on initial plane programming\n",
&dma_addr);
plane_config->phys_base = dma_addr - mem->region.start;
plane_config->mem = mem;
return true;
}
static bool
initial_plane_phys_smem(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
{
struct intel_memory_region *mem;
u32 base;
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
mem = i915->mm.stolen_region;
if (!mem) {
drm_dbg_kms(&i915->drm,
"Initial plane memory region not initialized\n");
return false;
}
/* FIXME get and validate the dma_addr from the PTE */
plane_config->phys_base = base;
plane_config->mem = mem;
return true;
}
static bool
initial_plane_phys(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
{
if (IS_DGFX(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
return initial_plane_phys_lmem(i915, plane_config);
else
return initial_plane_phys_smem(i915, plane_config);
}
static struct i915_vma *
initial_plane_vma(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
{
struct intel_memory_region *mem;
struct drm_i915_gem_object *obj;
struct drm_mm_node orig_mm = {};
struct i915_vma *vma;
resource_size_t phys_base;
u32 base, size;
@ -57,45 +148,13 @@ initial_plane_vma(struct drm_i915_private *i915,
if (plane_config->size == 0)
return NULL;
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
if (IS_DGFX(i915)) {
gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
gen8_pte_t pte;
gte += base / I915_GTT_PAGE_SIZE;
pte = ioread64(gte);
if (!(pte & GEN12_GGTT_PTE_LM)) {
drm_err(&i915->drm,
"Initial plane programming missing PTE_LM bit\n");
return NULL;
}
phys_base = pte & I915_GTT_PAGE_MASK;
mem = i915->mm.regions[INTEL_REGION_LMEM_0];
/*
* We don't currently expect this to ever be placed in the
* stolen portion.
*/
if (phys_base >= resource_size(&mem->region)) {
drm_err(&i915->drm,
"Initial plane programming using invalid range, phys_base=%pa\n",
&phys_base);
return NULL;
}
drm_dbg(&i915->drm,
"Using phys_base=%pa, based on initial plane programming\n",
&phys_base);
} else {
phys_base = base;
mem = i915->mm.stolen_region;
}
if (!mem)
if (!initial_plane_phys(i915, plane_config))
return NULL;
phys_base = plane_config->phys_base;
mem = plane_config->mem;
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
size = round_up(plane_config->base + plane_config->size,
mem->min_page_size);
size -= base;
@ -107,14 +166,19 @@ initial_plane_vma(struct drm_i915_private *i915,
*/
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
mem == i915->mm.stolen_region &&
size * 2 > i915->dsm.usable_size)
size * 2 > i915->dsm.usable_size) {
drm_dbg_kms(&i915->drm, "Initial FB size exceeds half of stolen, discarding\n");
return NULL;
}
obj = i915_gem_object_create_region_at(mem, phys_base, size,
I915_BO_ALLOC_USER |
I915_BO_PREALLOC);
if (IS_ERR(obj))
if (IS_ERR(obj)) {
drm_dbg_kms(&i915->drm, "Failed to preallocate initial FB in %s\n",
mem->region.name);
return NULL;
}
/*
* Mark it WT ahead of time to avoid changing the
@ -138,23 +202,66 @@ initial_plane_vma(struct drm_i915_private *i915,
goto err_obj;
}
/*
* MTL GOP likes to place the framebuffer high up in ggtt,
* which can cause problems for ggtt_reserve_guc_top().
* Try to pin it to a low ggtt address instead to avoid that.
*/
base = 0;
if (base != plane_config->base) {
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
int ret;
/*
* Make sure the original and new locations
* can't overlap. That would corrupt the original
* PTEs which are still being used for scanout.
*/
ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &orig_mm,
size, plane_config->base,
I915_COLOR_UNEVICTABLE, PIN_NOEVICT);
if (ret)
goto err_obj;
}
vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL);
if (IS_ERR(vma))
goto err_obj;
retry:
pinctl = PIN_GLOBAL | PIN_OFFSET_FIXED | base;
if (HAS_GMCH(i915))
if (!i915_gem_object_is_lmem(obj))
pinctl |= PIN_MAPPABLE;
if (i915_vma_pin(vma, 0, 0, pinctl))
if (i915_vma_pin(vma, 0, 0, pinctl)) {
if (drm_mm_node_allocated(&orig_mm)) {
drm_mm_remove_node(&orig_mm);
/*
* Try again, but this time pin
* it to its original location.
*/
base = plane_config->base;
goto retry;
}
goto err_obj;
}
if (i915_gem_object_is_tiled(obj) &&
!i915_vma_is_map_and_fenceable(vma))
goto err_obj;
if (drm_mm_node_allocated(&orig_mm))
drm_mm_remove_node(&orig_mm);
drm_dbg_kms(&i915->drm,
"Initial plane fb bound to 0x%x in the ggtt (original 0x%x)\n",
i915_ggtt_offset(vma), plane_config->base);
return vma;
err_obj:
if (drm_mm_node_allocated(&orig_mm))
drm_mm_remove_node(&orig_mm);
i915_gem_object_put(obj);
return NULL;
}
@ -209,10 +316,11 @@ err_vma:
static void
intel_find_initial_plane_obj(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
struct intel_initial_plane_config plane_configs[])
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_initial_plane_config *plane_config =
&plane_configs[crtc->pipe];
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
@ -238,7 +346,7 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
* Failed to alloc the obj, check to see if we should share
* an fb with another CRTC instead
*/
if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
if (intel_reuse_initial_plane_obj(crtc, plane_configs, &fb, &vma))
goto valid_fb;
/*
@ -301,25 +409,36 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
i915_vma_put(plane_config->vma);
}
void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
void intel_initial_plane_config(struct drm_i915_private *i915)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_initial_plane_config plane_config = {};
struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
/*
* Note that reserving the BIOS fb up front prevents us
* from stuffing other stolen allocations like the ring
* on top. This prevents some ugliness at boot time, and
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
dev_priv->display.funcs.display->get_initial_plane_config(crtc, &plane_config);
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_initial_plane_config *plane_config =
&plane_configs[crtc->pipe];
/*
* If the fb is shared between multiple heads, we'll
* just get the first one.
*/
intel_find_initial_plane_obj(crtc, &plane_config);
if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
continue;
plane_config_fini(&plane_config);
/*
* Note that reserving the BIOS fb up front prevents us
* from stuffing other stolen allocations like the ring
* on top. This prevents some ugliness at boot time, and
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
i915->display.funcs.display->get_initial_plane_config(crtc, plane_config);
/*
* If the fb is shared between multiple heads, we'll
* just get the first one.
*/
intel_find_initial_plane_obj(crtc, plane_configs);
if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config))
intel_crtc_wait_for_next_vblank(crtc);
plane_config_fini(plane_config);
}
}

View file

@ -6,8 +6,8 @@
#ifndef __INTEL_PLANE_INITIAL_H__
#define __INTEL_PLANE_INITIAL_H__
struct intel_crtc;
struct drm_i915_private;
void intel_crtc_initial_plane_config(struct intel_crtc *crtc);
void intel_initial_plane_config(struct drm_i915_private *i915);
#endif

View file

@ -2557,3 +2557,31 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
error:
kfree(intel_fb);
}
bool skl_fixup_initial_plane_config(struct intel_crtc *crtc,
const struct intel_initial_plane_config *plane_config)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
enum plane_id plane_id = plane->id;
enum pipe pipe = crtc->pipe;
u32 base;
if (!plane_state->uapi.visible)
return false;
base = intel_plane_ggtt_offset(plane_state);
/*
* We may have moved the surface to a different
* part of ggtt, make the plane aware of that.
*/
if (plane_config->base == base)
return false;
intel_de_write(i915, PLANE_SURF(pipe, plane_id), base);
return true;
}

View file

@ -22,6 +22,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
void skl_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config);
bool skl_fixup_initial_plane_config(struct intel_crtc *crtc,
const struct intel_initial_plane_config *plane_config);
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);

View file

@ -129,7 +129,7 @@ i915_gem_object_create_region_at(struct intel_memory_region *mem,
return ERR_PTR(-EINVAL);
if (!(flags & I915_BO_ALLOC_GPU_ONLY) &&
offset + size > mem->io_size &&
offset + size > resource_size(&mem->io) &&
!i915_ggtt_has_aperture(to_gt(mem->i915)->ggtt))
return ERR_PTR(-ENOSPC);

View file

@ -545,7 +545,9 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
/* Exclude the reserved region from driver use */
mem->region.end = i915->dsm.reserved.start - 1;
mem->io_size = min(mem->io_size, resource_size(&mem->region));
mem->io = DEFINE_RES_MEM(mem->io.start,
min(resource_size(&mem->io),
resource_size(&mem->region)));
i915->dsm.usable_size = resource_size(&mem->region);
@ -756,7 +758,7 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
* With discrete devices, where we lack a mappable aperture there is no
* possible way to ever access this memory on the CPU side.
*/
if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size &&
if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !resource_size(&mem->io) &&
!(flags & I915_BO_ALLOC_GPU_ONLY))
return -ENOSPC;
@ -830,7 +832,6 @@ static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
static int init_stolen_lmem(struct intel_memory_region *mem)
{
struct drm_i915_private *i915 = mem->i915;
int err;
if (GEM_WARN_ON(resource_size(&mem->region) == 0))
@ -843,38 +844,34 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
}
#ifdef __linux__
if (mem->io_size &&
!io_mapping_init_wc(&mem->iomap, mem->io_start, mem->io_size))
if (resource_size(&mem->io) &&
!io_mapping_init_wc(&mem->iomap, mem->io.start, resource_size(&mem->io)))
goto err_cleanup;
#else
if (mem->io_size) {
if (resource_size(&mem->io)) {
paddr_t start, end;
struct vm_page *pgs;
int i;
bus_space_handle_t bsh;
start = atop(mem->io_start);
end = start + atop(mem->io_size);
start = atop(mem->io.start);
end = start + atop(resource_size(&mem->io));
uvm_page_physload(start, end, start, end, PHYSLOAD_DEVICE);
pgs = PHYS_TO_VM_PAGE(mem->io_start);
for (i = 0; i < atop(mem->io_size); i++)
pgs = PHYS_TO_VM_PAGE(mem->io.start);
for (i = 0; i < atop(resource_size(&mem->io)); i++)
atomic_setbits_int(&(pgs[i].pg_flags), PG_PMAP_WC);
if (bus_space_map(i915->bst, mem->io_start, mem->io_size,
if (bus_space_map(mem->i915->bst, mem->io.start, resource_size(&mem->io),
BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, &bsh))
panic("can't map stolen lmem");
mem->iomap.base = mem->io_start;
mem->iomap.size = mem->io_size;
mem->iomap.iomem = bus_space_vaddr(i915->bst, bsh);
mem->iomap.base = mem->io.start;
mem->iomap.size = resource_size(&mem->io);
mem->iomap.iomem = bus_space_vaddr(mem->i915->bst, bsh);
}
#endif
drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
&mem->io_start);
drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &mem->region.start);
return 0;
#ifdef __linux__
err_cleanup:
@ -887,7 +884,7 @@ static int release_stolen_lmem(struct intel_memory_region *mem)
{
STUB();
#ifdef notyet
if (mem->io_size)
if (resource_size(&mem->io))
io_mapping_fini(&mem->iomap);
#endif
i915_gem_cleanup_stolen(mem->i915);
@ -987,14 +984,18 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
GEM_BUG_ON((dsm_base + dsm_size) > lmem_size);
} else {
/* Use DSM base address instead for stolen memory */
dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK;
dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
if (WARN_ON(lmem_size < dsm_base))
return ERR_PTR(-ENODEV);
dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
}
if (i915_direct_stolen_access(i915)) {
drm_dbg(&i915->drm, "Using direct DSM access\n");
io_start = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
io_size = dsm_size;
#ifdef __linux__
if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
} else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
io_start = 0;
io_size = 0;
} else {
@ -1002,7 +1003,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
io_size = dsm_size;
}
#else
if (lmem_len < lmem_size) {
} else if (lmem_len < lmem_size) {
io_start = 0;
io_size = 0;
} else {

View file

@ -144,13 +144,13 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr,
place->fpfn = offset >> PAGE_SHIFT;
WARN_ON(overflows_type(place->fpfn + (size >> PAGE_SHIFT), place->lpfn));
place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
} else if (mr->io_size && mr->io_size < mr->total) {
} else if (resource_size(&mr->io) && resource_size(&mr->io) < mr->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place->flags |= TTM_PL_FLAG_TOPDOWN;
} else {
place->fpfn = 0;
WARN_ON(overflows_type(mr->io_size >> PAGE_SHIFT, place->lpfn));
place->lpfn = mr->io_size >> PAGE_SHIFT;
WARN_ON(overflows_type(resource_size(&mr->io) >> PAGE_SHIFT, place->lpfn));
place->lpfn = resource_size(&mr->io) >> PAGE_SHIFT;
}
}
}
@ -1121,7 +1121,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct intel_memory_region *mr = obj->mm.placements[i];
unsigned int flags;
if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM)
if (!resource_size(&mr->io) && mr->type != INTEL_MEMORY_SYSTEM)
continue;
flags = obj->flags;
@ -1295,7 +1295,7 @@ vm_fault_ttm(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
struct intel_memory_region *mr = obj->mm.placements[i];
unsigned int flags;
if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM)
if (!resource_size(&mr->io) && mr->type != INTEL_MEMORY_SYSTEM)
continue;
flags = obj->flags;

View file

@ -1052,7 +1052,7 @@ static int igt_fill_mappable(struct intel_memory_region *mr,
int err;
total = 0;
size = mr->io_size;
size = resource_size(&mr->io);
do {
struct drm_i915_gem_object *obj;
@ -1313,28 +1313,28 @@ static int igt_mmap_migrate(void *arg)
struct intel_memory_region *mixed[] = { mr, system };
struct intel_memory_region *single[] = { mr };
struct ttm_resource_manager *man = mr->region_private;
resource_size_t saved_io_size;
struct resource saved_io;
int err;
if (mr->private)
continue;
if (!mr->io_size)
if (!resource_size(&mr->io))
continue;
/*
* For testing purposes let's force small BAR, if not already
* present.
*/
saved_io_size = mr->io_size;
if (mr->io_size == mr->total) {
resource_size_t io_size = mr->io_size;
saved_io = mr->io;
if (resource_size(&mr->io) == mr->total) {
resource_size_t io_size = resource_size(&mr->io);
io_size = rounddown_pow_of_two(io_size >> 1);
if (io_size < PAGE_SIZE)
continue;
mr->io_size = io_size;
mr->io = DEFINE_RES_MEM(mr->io.start, io_size);
i915_ttm_buddy_man_force_visible_size(man,
io_size >> PAGE_SHIFT);
}
@ -1394,9 +1394,9 @@ static int igt_mmap_migrate(void *arg)
IGT_MMAP_MIGRATE_FAIL_GPU |
IGT_MMAP_MIGRATE_UNFAULTABLE);
out_io_size:
mr->io_size = saved_io_size;
mr->io = saved_io;
i915_ttm_buddy_man_force_visible_size(man,
mr->io_size >> PAGE_SHIFT);
resource_size(&mr->io) >> PAGE_SHIFT);
if (err)
return err;
}

View file

@ -278,7 +278,8 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
* deals with Protected Memory which is not needed for
* AUX CCS invalidation and lead to unwanted side effects.
*/
if (mode & EMIT_FLUSH)
if ((mode & EMIT_FLUSH) &&
GRAPHICS_VER_FULL(rq->i915) < IP_VER(12, 70))
bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH;
@ -812,12 +813,14 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
u32 flags = (PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TLB_INVALIDATE |
PIPE_CONTROL_TILE_CACHE_FLUSH |
PIPE_CONTROL_FLUSH_L3 |
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_FLUSH_ENABLE);
if (GRAPHICS_VER_FULL(rq->i915) < IP_VER(12, 70))
flags |= PIPE_CONTROL_FLUSH_L3;
/* Wa_14016712196 */
if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
/* dummy PIPE_CONTROL + depth flush */

View file

@ -170,6 +170,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_GEM_HWS_SEQNO 0x40
#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
#define I915_GEM_HWS_MIGRATE (0x42 * sizeof(u32))
#define I915_GEM_HWS_GGTT_BIND 0x46
#define I915_GEM_HWS_GGTT_BIND_ADDR (I915_GEM_HWS_GGTT_BIND * sizeof(u32))
#define I915_GEM_HWS_PXP 0x60
#define I915_GEM_HWS_PXP_ADDR (I915_GEM_HWS_PXP * sizeof(u32))
#define I915_GEM_HWS_GSC 0x62

View file

@ -1445,6 +1445,20 @@ void intel_engine_destroy_pinned_context(struct intel_context *ce)
intel_context_put(ce);
}
static struct intel_context *
create_ggtt_bind_context(struct intel_engine_cs *engine)
{
static struct lock_class_key kernel;
/*
* MI_UPDATE_GTT can insert up to 511 PTE entries and there could be multiple
* bind requets at a time so get a bigger ring.
*/
return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_512K,
I915_GEM_HWS_GGTT_BIND_ADDR,
&kernel, "ggtt_bind_context");
}
static struct intel_context *
create_kernel_context(struct intel_engine_cs *engine)
{
@ -1468,7 +1482,7 @@ create_kernel_context(struct intel_engine_cs *engine)
*/
static int engine_init_common(struct intel_engine_cs *engine)
{
struct intel_context *ce;
struct intel_context *ce, *bce = NULL;
int ret;
engine->set_default_submission(engine);
@ -1484,17 +1498,34 @@ static int engine_init_common(struct intel_engine_cs *engine)
ce = create_kernel_context(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
/*
* Create a separate pinned context for GGTT update with blitter engine
* if a platform require such service. MI_UPDATE_GTT works on other
* engines as well but BCS should be less busy engine so pick that for
* GGTT updates.
*/
if (i915_ggtt_require_binder(engine->i915) && engine->id == BCS0) {
bce = create_ggtt_bind_context(engine);
if (IS_ERR(bce)) {
ret = PTR_ERR(bce);
goto err_ce_context;
}
}
ret = measure_breadcrumb_dw(ce);
if (ret < 0)
goto err_context;
goto err_bce_context;
engine->emit_fini_breadcrumb_dw = ret;
engine->kernel_context = ce;
engine->bind_context = bce;
return 0;
err_context:
err_bce_context:
if (bce)
intel_engine_destroy_pinned_context(bce);
err_ce_context:
intel_engine_destroy_pinned_context(ce);
return ret;
}
@ -1564,6 +1595,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->kernel_context)
intel_engine_destroy_pinned_context(engine->kernel_context);
if (engine->bind_context)
intel_engine_destroy_pinned_context(engine->bind_context);
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
cleanup_status_page(engine);

View file

@ -416,6 +416,9 @@ struct intel_engine_cs {
struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */
struct intel_context *bind_context; /* pinned, only for BCS0 */
/* mark the bind context's availability status */
bool bind_context_ready;
/**
* pinned_contexts_list: List of pinned contexts. This list is only

View file

@ -15,18 +15,24 @@
#include "display/intel_display.h"
#include "gem/i915_gem_lmem.h"
#include "intel_context.h"
#include "intel_ggtt_gmch.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_gt_regs.h"
#include "intel_pci_config.h"
#include "intel_ring.h"
#include "i915_drv.h"
#include "i915_pci.h"
#include "i915_reg.h"
#include "i915_request.h"
#include "i915_scatterlist.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
#include "intel_gtt.h"
#include "gen8_ppgtt.h"
#include "intel_engine_pm.h"
static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
@ -301,6 +307,145 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
return pte;
}
static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt)
{
struct intel_gt *gt = ggtt->vm.gt;
return intel_gt_is_bind_context_ready(gt);
}
static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
{
struct intel_context *ce;
struct intel_gt *gt = ggtt->vm.gt;
if (intel_gt_is_wedged(gt))
return NULL;
ce = gt->engine[BCS0]->bind_context;
GEM_BUG_ON(!ce);
/*
* If the GT is not awake already at this stage then fallback
* to pci based GGTT update otherwise __intel_wakeref_get_first()
* would conflict with fs_reclaim trying to allocate memory while
* doing rpm_resume().
*/
if (!intel_gt_pm_get_if_awake(gt))
return NULL;
intel_engine_pm_get(ce->engine);
return ce;
}
static void gen8_ggtt_bind_put_ce(struct intel_context *ce)
{
intel_engine_pm_put(ce->engine);
intel_gt_pm_put(ce->engine->gt);
}
static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset,
struct sg_table *pages, u32 num_entries,
const gen8_pte_t pte)
{
struct i915_sched_attr attr = {};
struct intel_gt *gt = ggtt->vm.gt;
const gen8_pte_t scratch_pte = ggtt->vm.scratch[0]->encode;
struct sgt_iter iter;
struct i915_request *rq;
struct intel_context *ce;
u32 *cs;
if (!num_entries)
return true;
ce = gen8_ggtt_bind_get_ce(ggtt);
if (!ce)
return false;
if (pages)
iter = __sgt_iter(pages->sgl, true);
while (num_entries) {
int count = 0;
dma_addr_t addr;
/*
* MI_UPDATE_GTT can update 512 entries in a single command but
* that end up with engine reset, 511 works.
*/
u32 n_ptes = min_t(u32, 511, num_entries);
if (mutex_lock_interruptible(&ce->timeline->mutex))
goto put_ce;
intel_context_enter(ce);
rq = __i915_request_create(ce, GFP_NOWAIT | GFP_ATOMIC);
intel_context_exit(ce);
if (IS_ERR(rq)) {
GT_TRACE(gt, "Failed to get bind request\n");
mutex_unlock(&ce->timeline->mutex);
goto put_ce;
}
cs = intel_ring_begin(rq, 2 * n_ptes + 2);
if (IS_ERR(cs)) {
GT_TRACE(gt, "Failed to ring space for GGTT bind\n");
i915_request_set_error_once(rq, PTR_ERR(cs));
/* once a request is created, it must be queued */
goto queue_err_rq;
}
*cs++ = MI_UPDATE_GTT | (2 * n_ptes);
*cs++ = offset << 12;
if (pages) {
for_each_sgt_daddr_next(addr, iter) {
if (count == n_ptes)
break;
*cs++ = lower_32_bits(pte | addr);
*cs++ = upper_32_bits(pte | addr);
count++;
}
/* fill remaining with scratch pte, if any */
if (count < n_ptes) {
memset64((u64 *)cs, scratch_pte,
n_ptes - count);
cs += (n_ptes - count) * 2;
}
} else {
memset64((u64 *)cs, pte, n_ptes);
cs += n_ptes * 2;
}
intel_ring_advance(rq, cs);
queue_err_rq:
i915_request_get(rq);
__i915_request_commit(rq);
__i915_request_queue(rq, &attr);
mutex_unlock(&ce->timeline->mutex);
/* This will break if the request is complete or after engine reset */
i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
if (rq->fence.error)
goto err_rq;
i915_request_put(rq);
num_entries -= n_ptes;
offset += n_ptes;
}
gen8_ggtt_bind_put_ce(ce);
return true;
err_rq:
i915_request_put(rq);
put_ce:
gen8_ggtt_bind_put_ce(ce);
return false;
}
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
writeq(pte, addr);
@ -321,6 +466,21 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm,
dma_addr_t addr, u64 offset,
unsigned int pat_index, u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen8_pte_t pte;
pte = ggtt->vm.pte_encode(addr, pat_index, flags);
if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) &&
gen8_ggtt_bind_ptes(ggtt, offset, NULL, 1, pte))
return ggtt->invalidate(ggtt);
gen8_ggtt_insert_page(vm, addr, offset, pat_index, flags);
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
@ -360,6 +520,50 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
static bool __gen8_ggtt_insert_entries_bind(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
unsigned int pat_index, u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen8_pte_t scratch_pte = vm->scratch[0]->encode;
gen8_pte_t pte_encode;
u64 start, end;
pte_encode = ggtt->vm.pte_encode(0, pat_index, flags);
start = (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
end = start + vma_res->guard / I915_GTT_PAGE_SIZE;
if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte))
goto err;
start = end;
end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
if (!gen8_ggtt_bind_ptes(ggtt, start, vma_res->bi.pages,
vma_res->node_size / I915_GTT_PAGE_SIZE, pte_encode))
goto err;
start += vma_res->node_size / I915_GTT_PAGE_SIZE;
if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte))
goto err;
return true;
err:
return false;
}
static void gen8_ggtt_insert_entries_bind(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
unsigned int pat_index, u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) &&
__gen8_ggtt_insert_entries_bind(vm, vma_res, pat_index, flags))
return ggtt->invalidate(ggtt);
gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags);
}
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
@ -381,6 +585,27 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
gen8_set_pte(&gtt_base[i], scratch_pte);
}
static void gen8_ggtt_scratch_range_bind(struct i915_address_space *vm,
u64 start, u64 length)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
num_entries = max_entries;
if (should_update_ggtt_with_bind(ggtt) && gen8_ggtt_bind_ptes(ggtt, first_entry,
NULL, num_entries, scratch_pte))
return ggtt->invalidate(ggtt);
gen8_ggtt_clear_range(vm, start, length);
}
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
@ -947,13 +1172,20 @@ static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
phys_addr_t phys_addr;
u32 pte_flags;
int ret;
GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
if (i915_direct_stolen_access(i915)) {
drm_dbg(&i915->drm, "Using direct GSM access\n");
phys_addr = intel_uncore_read64(uncore, GEN6_GSMBASE) & GEN11_BDSM_MASK;
} else {
phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
}
if (needs_wc_ggtt_mapping(i915))
ggtt->gsm = ioremap_wc(phys_addr, size);
@ -992,6 +1224,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
struct pci_dev *pdev = i915->drm.pdev;
phys_addr_t phys_addr;
bus_addr_t addr;
@ -1008,7 +1241,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return ret;
GEM_WARN_ON(len != gen6_gttmmadr_size(i915));
phys_addr = addr + gen6_gttadr_offset(i915);
if (i915_direct_stolen_access(i915)) {
drm_dbg(&i915->drm, "Using direct GSM access\n");
phys_addr = intel_uncore_read64(uncore, GEN6_GSMBASE) & GEN11_BDSM_MASK;
} else {
phys_addr = addr + gen6_gttadr_offset(i915);
}
if (needs_wc_ggtt_mapping(i915))
flags = BUS_SPACE_MAP_PREFETCHABLE;
@ -1141,6 +1380,17 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
}
if (i915_ggtt_require_binder(i915)) {
ggtt->vm.scratch_range = gen8_ggtt_scratch_range_bind;
ggtt->vm.insert_page = gen8_ggtt_insert_page_bind;
ggtt->vm.insert_entries = gen8_ggtt_insert_entries_bind;
/*
* On GPU is hung, we might bind VMAs for error capture.
* Fallback to CPU GGTT updates in that case.
*/
ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
}
if (intel_uc_wants_guc(&ggtt->vm.gt->uc))
ggtt->invalidate = guc_ggtt_invalidate;
else

View file

@ -1073,6 +1073,55 @@ err:
#endif
static void __intel_gt_bind_context_set_ready(struct intel_gt *gt, bool ready)
{
struct intel_engine_cs *engine = gt->engine[BCS0];
if (engine && engine->bind_context)
engine->bind_context_ready = ready;
}
/**
* intel_gt_bind_context_set_ready - Set the context binding as ready
*
* @gt: GT structure
*
* This function marks the binder context as ready.
*/
void intel_gt_bind_context_set_ready(struct intel_gt *gt)
{
__intel_gt_bind_context_set_ready(gt, true);
}
/**
* intel_gt_bind_context_set_unready - Set the context binding as ready
* @gt: GT structure
*
* This function marks the binder context as not ready.
*/
void intel_gt_bind_context_set_unready(struct intel_gt *gt)
{
__intel_gt_bind_context_set_ready(gt, false);
}
/**
* intel_gt_is_bind_context_ready - Check if context binding is ready
*
* @gt: GT structure
*
* This function returns binder context's ready status.
*/
bool intel_gt_is_bind_context_ready(struct intel_gt *gt)
{
struct intel_engine_cs *engine = gt->engine[BCS0];
if (engine)
return engine->bind_context_ready;
return false;
}
int intel_gt_tiles_init(struct drm_i915_private *i915)
{
struct intel_gt *gt;

View file

@ -148,4 +148,7 @@ enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
struct drm_i915_gem_object *obj,
bool always_coherent);
void intel_gt_bind_context_set_ready(struct intel_gt *gt);
void intel_gt_bind_context_set_unready(struct intel_gt *gt);
bool intel_gt_is_bind_context_ready(struct intel_gt *gt);
#endif /* __INTEL_GT_H__ */

View file

@ -296,6 +296,7 @@ int intel_gt_resume(struct intel_gt *gt)
out_fw:
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_gt_pm_put(gt);
intel_gt_bind_context_set_ready(gt);
return err;
err_wedged:
@ -322,6 +323,7 @@ static void wait_for_suspend(struct intel_gt *gt)
void intel_gt_suspend_prepare(struct intel_gt *gt)
{
intel_gt_bind_context_set_unready(gt);
user_forcewake(gt, true);
wait_for_suspend(gt);
}
@ -375,6 +377,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
intel_gt_bind_context_set_unready(gt);
intel_uc_runtime_suspend(&gt->uc);
GT_TRACE(gt, "\n");
@ -392,6 +395,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
if (ret)
return ret;
intel_gt_bind_context_set_ready(gt);
return 0;
}

View file

@ -21,6 +21,12 @@
#include "intel_gt_regs.h"
#include "intel_gtt.h"
bool i915_ggtt_require_binder(struct drm_i915_private *i915)
{
/* Wa_13010847436 & Wa_14019519902 */
return !i915_direct_stolen_access(i915) &&
MEDIA_VER_FULL(i915) == IP_VER(13, 0);
}
static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
{

View file

@ -171,6 +171,9 @@ struct intel_gt;
#define for_each_sgt_daddr(__dp, __iter, __sgt) \
__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
#define for_each_sgt_daddr_next(__dp, __iter) \
__for_each_daddr_next(__dp, __iter, I915_GTT_PAGE_SIZE)
struct i915_page_table {
struct drm_i915_gem_object *base;
union {
@ -690,4 +693,6 @@ static inline struct sgt_dma {
return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
}
bool i915_ggtt_require_binder(struct drm_i915_private *i915);
#endif

View file

@ -157,8 +157,8 @@ region_lmem_init(struct intel_memory_region *mem)
#ifdef __linux__
if (!io_mapping_init_wc(&mem->iomap,
mem->io_start,
mem->io_size))
mem->io.start,
resource_size(&mem->io)))
return -EIO;
#else
struct drm_i915_private *i915 = mem->i915;
@ -167,20 +167,20 @@ region_lmem_init(struct intel_memory_region *mem)
int i;
bus_space_handle_t bsh;
start = atop(mem->io_start);
end = start + atop(mem->io_size);
start = atop(mem->io.start);
end = start + atop(resource_size(&mem->io));
uvm_page_physload(start, end, start, end, PHYSLOAD_DEVICE);
pgs = PHYS_TO_VM_PAGE(mem->io_start);
for (i = 0; i < atop(mem->io_size); i++)
pgs = PHYS_TO_VM_PAGE(mem->io.start);
for (i = 0; i < atop(resource_size(&mem->io)); i++)
atomic_setbits_int(&(pgs[i].pg_flags), PG_PMAP_WC);
if (bus_space_map(i915->bst, mem->io_start, mem->io_size,
if (bus_space_map(i915->bst, mem->io.start, resource_size(&mem->io),
BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, &bsh))
panic("can't map lmem");
mem->iomap.base = mem->io_start;
mem->iomap.size = mem->io_size;
mem->iomap.base = mem->io.start;
mem->iomap.size = resource_size(&mem->io);
mem->iomap.iomem = bus_space_vaddr(i915->bst, bsh);
#endif
@ -280,7 +280,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
lmem_size -= tile_stolen;
} else {
/* Stolen starts from GSMBASE without CCS */
lmem_size = intel_uncore_read64(&i915->uncore, GEN12_GSMBASE);
lmem_size = intel_uncore_read64(&i915->uncore, GEN6_GSMBASE);
}
i915_resize_lmem_bar(i915, lmem_size);
@ -326,14 +326,6 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
if (err)
goto err_region_put;
drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
&mem->io_start);
drm_info(&i915->drm, "Local memory IO size: %pa\n",
&mem->io_size);
drm_info(&i915->drm, "Local memory available: %pa\n",
&lmem_size);
if (io_size < lmem_size)
drm_info(&i915->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' or similar, if available in the BIOS.\n",
(u64)io_size >> 20);

View file

@ -199,8 +199,8 @@ static struct drm_i915_gem_object *create_lmem(struct intel_gt *gt)
* of pages. To succeed with both allocations, especially in case of Small
* BAR, try to allocate no more than quarter of mappable memory.
*/
if (mr && size > mr->io_size / 4)
size = mr->io_size / 4;
if (mr && size > resource_size(&mr->io) / 4)
size = resource_size(&mr->io) / 4;
return i915_gem_object_create_lmem(gt->i915, size, I915_BO_ALLOC_CONTIGUOUS);
}

View file

@ -321,4 +321,9 @@ static const struct pci_matchid i915_devices[] = {
{ 0x8086, 0xa7a8 },
{ 0x8086, 0xa7aa },
{ 0x8086, 0xa7ab },
{ 0x8086, 0x7d40 },
{ 0x8086, 0x7d60 },
{ 0x8086, 0x7d45 },
{ 0x8086, 0x7d55 },
{ 0x8086, 0x7dd5 },
};

View file

@ -2263,6 +2263,11 @@ inteldrm_attach(struct device *parent, struct device *self, void *aux)
dev_priv->memex = pa->pa_memex;
dev_priv->vga_regs = &dev_priv->bar;
id = drm_find_description(PCI_VENDOR(pa->pa_id),
PCI_PRODUCT(pa->pa_id), pciidlist);
dev_priv->id = id;
info = (struct intel_device_info *)id->driver_data;
if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY &&
PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_DISPLAY_VGA &&
(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG)
@ -2281,6 +2286,18 @@ inteldrm_attach(struct device *parent, struct device *self, void *aux)
}
#endif
/*
* Meteor Lake GOP framebuffer doesn't pass efifb pci bar tests
* too early for IS_METEORLAKE which uses runtime info
*/
if (info->platform == INTEL_METEORLAKE) {
dev_priv->primary = 1;
dev_priv->console = 1;
#if NEFIFB > 0
efifb_detach();
#endif
}
printf("\n");
dev = drm_attach_pci(&i915_drm_driver, pa, 0, dev_priv->primary,
@ -2290,11 +2307,6 @@ inteldrm_attach(struct device *parent, struct device *self, void *aux)
return;
}
id = drm_find_description(PCI_VENDOR(pa->pa_id),
PCI_PRODUCT(pa->pa_id), pciidlist);
dev_priv->id = id;
info = (struct intel_device_info *)id->driver_data;
/* Device parameters start as a copy of module parameters. */
i915_params_copy(&dev_priv->params, &i915_modparams);
dev_priv->params.request_timeout_ms = 0;

View file

@ -1206,7 +1206,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
dma_addr_t offset = dma - mem->region.start;
void __iomem *s;
if (offset + PAGE_SIZE > mem->io_size) {
if (offset + PAGE_SIZE > resource_size(&mem->io)) {
ret = -EINVAL;
break;
}

View file

@ -838,7 +838,6 @@ static const struct intel_device_info mtl_info = {
.has_pxp = 1,
.memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0),
.require_force_probe = 1,
MTL_CACHELEVEL,
};

View file

@ -2803,26 +2803,6 @@ oa_configure_all_contexts(struct i915_perf_stream *stream,
return 0;
}
static int
gen12_configure_all_contexts(struct i915_perf_stream *stream,
const struct i915_oa_config *oa_config,
struct i915_active *active)
{
struct flex regs[] = {
{
GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
CTX_R_PWR_CLK_STATE,
},
};
if (stream->engine->class != RENDER_CLASS)
return 0;
return oa_configure_all_contexts(stream,
regs, ARRAY_SIZE(regs),
active);
}
static int
lrc_configure_all_contexts(struct i915_perf_stream *stream,
const struct i915_oa_config *oa_config,
@ -2929,7 +2909,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
{
struct drm_i915_private *i915 = stream->perf->i915;
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
bool periodic = stream->periodic;
u32 period_exponent = stream->period_exponent;
u32 sqcnt1;
@ -2973,15 +2952,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1);
/*
* Update all contexts prior writing the mux configurations as we need
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
ret = gen12_configure_all_contexts(stream, oa_config, active);
if (ret)
return ret;
/*
* For Gen12, performance counters are context
* saved/restored. Only enable it for the context that
@ -3036,9 +3006,6 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
_MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
}
/* Reset all contexts' slices/subslices configurations. */
gen12_configure_all_contexts(stream, NULL, NULL);
/* disable the context save/restore or OAR counters */
if (stream->ctx)
gen12_configure_oar_context(stream, NULL);

View file

@ -513,7 +513,7 @@ static int query_memregion_info(struct drm_i915_private *i915,
info.probed_size = mr->total;
if (mr->type == INTEL_MEMORY_LOCAL)
info.probed_cpu_visible_size = mr->io_size;
info.probed_cpu_visible_size = resource_size(&mr->io);
else
info.probed_cpu_visible_size = mr->total;

View file

@ -5452,6 +5452,9 @@
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
#define GEN6_PCODE_DATA1 _MMIO(0x13812C)
#define MTL_PCODE_STOLEN_ACCESS _MMIO(0x138914)
#define STOLEN_ACCESS_ALLOWED 0x1
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
#define GEN7_L3CDERRST1_ROW_MASK (0x7ff << 14)
@ -6581,9 +6584,10 @@ enum skl_power_gate {
#define GMS_MASK REG_GENMASK(15, 8)
#define GGMS_MASK REG_GENMASK(7, 6)
#define GEN12_GSMBASE _MMIO(0x108100)
#define GEN12_DSMBASE _MMIO(0x1080C0)
#define GEN12_BDSM_MASK REG_GENMASK64(63, 20)
#define GEN6_GSMBASE _MMIO(0x108100)
#define GEN6_DSMBASE _MMIO(0x1080C0)
#define GEN6_BDSM_MASK REG_GENMASK64(31, 20)
#define GEN11_BDSM_MASK REG_GENMASK64(63, 20)
#define XEHP_CLOCK_GATE_DIS _MMIO(0x101014)
#define SGSI_SIDECLK_DIS REG_BIT(17)

View file

@ -91,6 +91,16 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \
(((__iter).curr += (__step)) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
/**
* __for_each_daddr_next - iterates over the device addresses with pre-initialized iterator.
* @__dp: Device address (output)
* @__iter: 'struct sgt_iter' (iterator state, external)
* @__step: step size
*/
#define __for_each_daddr_next(__dp, __iter, __step) \
for (; ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \
(((__iter).curr += (__step)) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
/**
* for_each_sgt_page - iterate over the pages of the given sg_table

View file

@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_utils.h"
#include <sys/syslog.h>
@ -138,3 +139,19 @@ bool i915_vtd_active(struct drm_i915_private *i915)
return i915_run_as_guest();
#endif
}
bool i915_direct_stolen_access(struct drm_i915_private *i915)
{
/*
* Wa_22018444074
*
* Access via BAR can hang MTL, go directly to GSM/DSM,
* except for VM guests which won't have access to it.
*
* Normally this would not work but on MTL the system firmware
* should have relaxed the access permissions sufficiently.
* 0x138914==0x1 indicates that the firmware has done its job.
*/
return IS_METEORLAKE(i915) && !i915_run_as_guest() &&
intel_uncore_read(&i915->uncore, MTL_PCODE_STOLEN_ACCESS) == STOLEN_ACCESS_ALLOWED;
}

View file

@ -401,4 +401,6 @@ static inline bool i915_run_as_guest(void)
bool i915_vtd_active(struct drm_i915_private *i915);
bool i915_direct_stolen_access(struct drm_i915_private *i915);
#endif /* !__I915_UTILS_H */

View file

@ -50,7 +50,7 @@ static int __iopagetest(struct intel_memory_region *mem,
if (memchr_inv(result, value, sizeof(result))) {
dev_err(mem->i915->drm.dev,
"Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
&mem->region, &mem->io_start, &offset, caller,
&mem->region, &mem->io.start, &offset, caller,
value, result[0], result[1], result[2]);
return -EINVAL;
}
@ -70,11 +70,11 @@ static int iopagetest(struct intel_memory_region *mem,
int err;
int i;
va = ioremap_wc(mem->io_start + offset, PAGE_SIZE);
va = ioremap_wc(mem->io.start + offset, PAGE_SIZE);
if (!va) {
dev_err(mem->i915->drm.dev,
"Failed to ioremap memory region [%pa + %pa] for %ps\n",
&mem->io_start, &offset, caller);
&mem->io.start, &offset, caller);
return -EFAULT;
}
@ -106,10 +106,10 @@ static int iomemtest(struct intel_memory_region *mem,
resource_size_t last, page;
int err;
if (mem->io_size < PAGE_SIZE)
if (resource_size(&mem->io) < PAGE_SIZE)
return 0;
last = mem->io_size - PAGE_SIZE;
last = resource_size(&mem->io) - PAGE_SIZE;
/*
* Quick test to check read/write access to the iomap (backing store).
@ -211,7 +211,7 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
struct drm_i915_private *i915 = mem->i915;
int err = 0;
if (!mem->io_start)
if (!mem->io.start)
return 0;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
@ -240,8 +240,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->i915 = i915;
mem->region = DEFINE_RES_MEM(start, size);
mem->io_start = io_start;
mem->io_size = io_size;
mem->io = DEFINE_RES_MEM(io_start, io_size);
mem->min_page_size = min_page_size;
mem->ops = ops;
mem->total = size;
@ -360,6 +359,24 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
i915->mm.regions[i] = mem;
}
for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
struct intel_memory_region *mem = i915->mm.regions[i];
u64 region_size, io_size;
if (!mem)
continue;
region_size = resource_size(&mem->region) >> 20;
io_size = resource_size(&mem->io) >> 20;
if (resource_size(&mem->io))
drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: %llu MiB %pR\n",
mem->id, mem->name, region_size, &mem->region, io_size, &mem->io);
else
drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: n/a\n",
mem->id, mem->name, region_size, &mem->region);
}
return 0;
out_cleanup:

View file

@ -73,8 +73,7 @@ struct intel_memory_region {
struct io_mapping iomap;
struct resource region;
resource_size_t io_start;
resource_size_t io_size;
struct resource io;
resource_size_t min_page_size;
resource_size_t total;

View file

@ -93,7 +93,7 @@ int intel_region_ttm_init(struct intel_memory_region *mem)
ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
resource_size(&mem->region),
mem->io_size,
resource_size(&mem->io),
mem->min_page_size, PAGE_SIZE);
if (ret)
return ret;
@ -225,16 +225,16 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
goto out;
}
place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
} else if (mem->io_size && mem->io_size < mem->total) {
} else if (resource_size(&mem->io) && resource_size(&mem->io) < mem->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place.flags |= TTM_PL_FLAG_TOPDOWN;
} else {
place.fpfn = 0;
if (WARN_ON(overflows_type(mem->io_size >> PAGE_SHIFT, place.lpfn))) {
if (WARN_ON(overflows_type(resource_size(&mem->io) >> PAGE_SHIFT, place.lpfn))) {
ret = -E2BIG;
goto out;
}
place.lpfn = mem->io_size >> PAGE_SHIFT;
place.lpfn = resource_size(&mem->io) >> PAGE_SHIFT;
}
}

View file

@ -1833,7 +1833,10 @@ static const struct intel_forcewake_range __mtl_fw_ranges[] = {
GEN_FW_RANGE(0x24000, 0x2ffff, 0), /*
0x24000 - 0x2407f: always on
0x24080 - 0x2ffff: reserved */
GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT)
GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
GEN_FW_RANGE(0x40000, 0x1901ef, 0),
GEN_FW_RANGE(0x1901f0, 0x1901f3, FORCEWAKE_GT)
/* FIXME: WA to wake GT while triggering H2G */
};
/*

View file

@ -10,21 +10,12 @@
#include "intel_wakeref.h"
#include "i915_drv.h"
static void rpm_get(struct intel_wakeref *wf)
{
wf->wakeref = intel_runtime_pm_get(&wf->i915->runtime_pm);
}
static void rpm_put(struct intel_wakeref *wf)
{
intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
INTEL_WAKEREF_BUG_ON(!wakeref);
}
int __intel_wakeref_get_first(struct intel_wakeref *wf)
{
intel_wakeref_t wakeref;
int ret = 0;
wakeref = intel_runtime_pm_get(&wf->i915->runtime_pm);
/*
* Treat get/put as different subclasses, as we may need to run
* the put callback from under the shrinker and do not want to
@ -32,41 +23,52 @@ int __intel_wakeref_get_first(struct intel_wakeref *wf)
* upon acquiring the wakeref.
*/
mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
if (!atomic_read(&wf->count)) {
int err;
INTEL_WAKEREF_BUG_ON(wf->wakeref);
wf->wakeref = wakeref;
wakeref = 0;
rpm_get(wf);
err = wf->ops->get(wf);
if (unlikely(err)) {
rpm_put(wf);
mutex_unlock(&wf->mutex);
return err;
ret = wf->ops->get(wf);
if (ret) {
wakeref = xchg(&wf->wakeref, 0);
wake_up_var(&wf->wakeref);
goto unlock;
}
smp_mb__before_atomic(); /* release wf->count */
}
atomic_inc(&wf->count);
mutex_unlock(&wf->mutex);
atomic_inc(&wf->count);
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
return 0;
unlock:
mutex_unlock(&wf->mutex);
if (unlikely(wakeref))
intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
return ret;
}
static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
{
intel_wakeref_t wakeref = 0;
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
if (unlikely(!atomic_dec_and_test(&wf->count)))
goto unlock;
/* ops->put() must reschedule its own release on error/deferral */
if (likely(!wf->ops->put(wf))) {
rpm_put(wf);
INTEL_WAKEREF_BUG_ON(!wf->wakeref);
wakeref = xchg(&wf->wakeref, 0);
wake_up_var(&wf->wakeref);
}
unlock:
mutex_unlock(&wf->mutex);
if (wakeref)
intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
}
void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)

View file

@ -14,8 +14,8 @@
#define PXP43_CMDID_NEW_HUC_AUTH 0x0000003F /* MTL+ */
#define PXP43_CMDID_INIT_SESSION 0x00000036
/* PXP-Packet sizes for MTL's GSCCS-HECI instruction */
#define PXP43_MAX_HECI_INOUT_SIZE (SZ_32K)
/* PXP-Packet sizes for MTL's GSCCS-HECI instruction is spec'd at 65K before page alignment*/
#define PXP43_MAX_HECI_INOUT_SIZE (PAGE_ALIGN(SZ_64K + SZ_1K))
/* PXP-Packet size for MTL's NEW_HUC_AUTH instruction */
#define PXP43_HUC_AUTH_INOUT_SIZE (SZ_4K)

View file

@ -544,8 +544,8 @@ static u64 igt_object_mappable_total(struct drm_i915_gem_object *obj)
u64 start = drm_buddy_block_offset(block);
u64 end = start + drm_buddy_block_size(mm, block);
if (start < mr->io_size)
total += min_t(u64, end, mr->io_size) - start;
if (start < resource_size(&mr->io))
total += min_t(u64, end, resource_size(&mr->io)) - start;
}
return total;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_rge.c,v 1.28 2024/08/10 21:53:06 patrick Exp $ */
/* $OpenBSD: if_rge.c,v 1.29 2024/08/12 06:47:11 dlg Exp $ */
/*
* Copyright (c) 2019, 2020, 2023, 2024
@ -581,7 +581,7 @@ rge_start(struct ifqueue *ifq)
free -= idx;
for (;;) {
if (RGE_TX_NSEGS >= free + 2) {
if (free < RGE_TX_NSEGS + 2) {
ifq_set_oactive(&ifp->if_snd);
break;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: virtio.c,v 1.29 2024/08/01 11:13:19 sf Exp $ */
/* $OpenBSD: virtio.c,v 1.30 2024/08/13 08:47:28 sf Exp $ */
/* $NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $ */
/*
@ -311,10 +311,11 @@ virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq)
vq->vq_entries[i].qe_index = i;
}
bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_bytesize,
BUS_DMASYNC_PREWRITE);
/* enqueue/dequeue status */
vq->vq_avail_idx = 0;
vq->vq_used_idx = 0;
vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
vq->vq_queued = 1;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sys_generic.c,v 1.157 2024/04/10 10:05:26 claudio Exp $ */
/* $OpenBSD: sys_generic.c,v 1.158 2024/08/12 19:32:05 anton Exp $ */
/* $NetBSD: sys_generic.c,v 1.24 1996/03/29 00:25:32 cgd Exp $ */
/*
@ -68,11 +68,16 @@
* 2 - print ppoll(2) information, somewhat verbose
* 3 - print pselect(2) and ppoll(2) information, very verbose
*/
int kqpoll_debug = 0;
/* #define KQPOLL_DEBUG */
#ifdef KQPOLL_DEBUG
int kqpoll_debug = 1;
#define DPRINTFN(v, x...) if (kqpoll_debug > v) { \
printf("%s(%d): ", curproc->p_p->ps_comm, curproc->p_tid); \
printf(x); \
}
#else
#define DPRINTFN(v, x...) do {} while (0);
#endif
int pselregister(struct proc *, fd_set **, fd_set **, int, int *, int *);
int pselcollect(struct proc *, struct kevent *, fd_set **, int *);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uipc_domain.c,v 1.65 2024/01/11 14:15:11 bluhm Exp $ */
/* $OpenBSD: uipc_domain.c,v 1.66 2024/08/12 11:25:27 bluhm Exp $ */
/* $NetBSD: uipc_domain.c,v 1.14 1996/02/09 19:00:44 christos Exp $ */
/*
@ -90,8 +90,10 @@ domaininit(void)
max_linkhdr = 64;
max_hdr = max_linkhdr + max_protohdr;
timeout_set_proc(&pffast_timeout, pffasttimo, &pffast_timeout);
timeout_set_proc(&pfslow_timeout, pfslowtimo, &pfslow_timeout);
timeout_set_flags(&pffast_timeout, pffasttimo, &pffast_timeout,
KCLOCK_NONE, TIMEOUT_PROC | TIMEOUT_MPSAFE);
timeout_set_flags(&pfslow_timeout, pfslowtimo, &pfslow_timeout,
KCLOCK_NONE, TIMEOUT_PROC | TIMEOUT_MPSAFE);
timeout_add(&pffast_timeout, 1);
timeout_add(&pfslow_timeout, 1);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: bpf.c,v 1.223 2024/08/05 23:56:10 dlg Exp $ */
/* $OpenBSD: bpf.c,v 1.224 2024/08/12 17:02:58 mvs Exp $ */
/* $NetBSD: bpf.c,v 1.33 1997/02/21 23:59:35 thorpej Exp $ */
/*
@ -83,11 +83,16 @@
#define PRINET 26 /* interruptible */
/*
* Locks used to protect data:
* a atomic
*/
/*
* The default read buffer size is patchable.
*/
int bpf_bufsize = BPF_BUFSIZE;
int bpf_maxbufsize = BPF_MAXBUFSIZE;
int bpf_bufsize = BPF_BUFSIZE; /* [a] */
int bpf_maxbufsize = BPF_MAXBUFSIZE; /* [a] */
/*
* bpf_iflist is the list of interfaces; each corresponds to an ifnet
@ -117,8 +122,6 @@ int filt_bpfread(struct knote *, long);
int filt_bpfreadmodify(struct kevent *, struct knote *);
int filt_bpfreadprocess(struct knote *, struct kevent *);
int bpf_sysctl_locked(int *, u_int, void *, size_t *, void *, size_t);
struct bpf_d *bpfilter_lookup(int);
/*
@ -137,9 +140,6 @@ void bpf_d_smr(void *);
void bpf_get(struct bpf_d *);
void bpf_put(struct bpf_d *);
struct rwlock bpf_sysctl_lk = RWLOCK_INITIALIZER("bpfsz");
int
bpf_movein(struct uio *uio, struct bpf_d *d, struct mbuf **mp,
struct sockaddr *sockp)
@ -393,7 +393,7 @@ bpfopen(dev_t dev, int flag, int mode, struct proc *p)
/* Mark "free" and do most initialization. */
bd->bd_unit = unit;
bd->bd_bufsize = bpf_bufsize;
bd->bd_bufsize = atomic_load_int(&bpf_bufsize);
bd->bd_sig = SIGIO;
mtx_init(&bd->bd_mtx, IPL_NET);
task_set(&bd->bd_wake_task, bpf_wakeup_cb, bd);
@ -853,9 +853,11 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
error = EINVAL;
else {
u_int size = *(u_int *)addr;
int bpf_maxbufsize_local =
atomic_load_int(&bpf_maxbufsize);
if (size > bpf_maxbufsize)
*(u_int *)addr = size = bpf_maxbufsize;
if (size > bpf_maxbufsize_local)
*(u_int *)addr = size = bpf_maxbufsize_local;
else if (size < BPF_MINBUFSIZE)
*(u_int *)addr = size = BPF_MINBUFSIZE;
mtx_enter(&d->bd_mtx);
@ -1815,42 +1817,25 @@ bpfsdetach(void *p)
}
int
bpf_sysctl_locked(int *name, u_int namelen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
bpf_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
{
if (namelen != 1)
return (ENOTDIR);
switch (name[0]) {
case NET_BPF_BUFSIZE:
return sysctl_int_bounded(oldp, oldlenp, newp, newlen,
&bpf_bufsize, BPF_MINBUFSIZE, bpf_maxbufsize);
&bpf_bufsize, BPF_MINBUFSIZE,
atomic_load_int(&bpf_maxbufsize));
case NET_BPF_MAXBUFSIZE:
return sysctl_int_bounded(oldp, oldlenp, newp, newlen,
&bpf_maxbufsize, BPF_MINBUFSIZE, INT_MAX);
default:
return (EOPNOTSUPP);
}
}
int
bpf_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
{
int flags = RW_INTR;
int error;
if (namelen != 1)
return (ENOTDIR);
flags |= (newp == NULL) ? RW_READ : RW_WRITE;
error = rw_enter(&bpf_sysctl_lk, flags);
if (error != 0)
return (error);
error = bpf_sysctl_locked(name, namelen, oldp, oldlenp, newp, newlen);
rw_exit(&bpf_sysctl_lk);
return (error);
/* NOTREACHED */
}
struct bpf_d *

View file

@ -1,4 +1,4 @@
/* $OpenBSD: igmp.c,v 1.83 2023/09/16 09:33:27 mpi Exp $ */
/* $OpenBSD: igmp.c,v 1.84 2024/08/12 11:25:27 bluhm Exp $ */
/* $NetBSD: igmp.c,v 1.15 1996/02/13 23:41:25 christos Exp $ */
/*
@ -96,12 +96,12 @@
#define IP_MULTICASTOPTS 0
int igmp_timers_are_running; /* [N] shortcut for fast timer */
int igmp_timers_are_running; /* [a] shortcut for fast timer */
static LIST_HEAD(, router_info) rti_head;
static struct mbuf *router_alert;
struct cpumem *igmpcounters;
void igmp_checktimer(struct ifnet *);
int igmp_checktimer(struct ifnet *);
void igmp_sendpkt(struct ifnet *, struct in_multi *, int, in_addr_t);
int rti_fill(struct in_multi *);
struct router_info * rti_find(struct ifnet *);
@ -228,7 +228,7 @@ igmp_input_if(struct ifnet *ifp, struct mbuf **mp, int *offp, int proto, int af)
struct in_multi *inm;
struct router_info *rti;
struct in_ifaddr *ia;
int timer;
int timer, running = 0;
igmplen = ntohs(ip->ip_len) - iphlen;
@ -300,7 +300,7 @@ igmp_input_if(struct ifnet *ifp, struct mbuf **mp, int *offp, int proto, int af)
inm->inm_state = IGMP_DELAYING_MEMBER;
inm->inm_timer = IGMP_RANDOM_DELAY(
IGMP_MAX_HOST_REPORT_DELAY * PR_FASTHZ);
igmp_timers_are_running = 1;
running = 1;
}
}
} else {
@ -341,7 +341,7 @@ igmp_input_if(struct ifnet *ifp, struct mbuf **mp, int *offp, int proto, int af)
IGMP_DELAYING_MEMBER;
inm->inm_timer =
IGMP_RANDOM_DELAY(timer);
igmp_timers_are_running = 1;
running = 1;
break;
case IGMP_SLEEPING_MEMBER:
inm->inm_state =
@ -475,6 +475,11 @@ igmp_input_if(struct ifnet *ifp, struct mbuf **mp, int *offp, int proto, int af)
}
if (running) {
membar_producer();
atomic_store_int(&igmp_timers_are_running, running);
}
/*
* Pass all valid IGMP packets up to any process(es) listening
* on a raw IGMP socket.
@ -485,7 +490,7 @@ igmp_input_if(struct ifnet *ifp, struct mbuf **mp, int *offp, int proto, int af)
void
igmp_joingroup(struct in_multi *inm, struct ifnet *ifp)
{
int i;
int i, running = 0;
inm->inm_state = IGMP_IDLE_MEMBER;
@ -496,9 +501,14 @@ igmp_joingroup(struct in_multi *inm, struct ifnet *ifp)
inm->inm_state = IGMP_DELAYING_MEMBER;
inm->inm_timer = IGMP_RANDOM_DELAY(
IGMP_MAX_HOST_REPORT_DELAY * PR_FASTHZ);
igmp_timers_are_running = 1;
running = 1;
} else
inm->inm_timer = 0;
if (running) {
membar_producer();
atomic_store_int(&igmp_timers_are_running, running);
}
}
void
@ -525,6 +535,7 @@ void
igmp_fasttimo(void)
{
struct ifnet *ifp;
int running = 0;
/*
* Quick check to see if any work needs to be done, in order
@ -533,23 +544,29 @@ igmp_fasttimo(void)
* lock intentionally. In case it is not set due to MP races, we may
* miss to check the timers. Then run the loop at next fast timeout.
*/
if (!igmp_timers_are_running)
if (!atomic_load_int(&igmp_timers_are_running))
return;
membar_consumer();
NET_LOCK();
igmp_timers_are_running = 0;
TAILQ_FOREACH(ifp, &ifnetlist, if_list)
igmp_checktimer(ifp);
TAILQ_FOREACH(ifp, &ifnetlist, if_list) {
if (igmp_checktimer(ifp))
running = 1;
}
membar_producer();
atomic_store_int(&igmp_timers_are_running, running);
NET_UNLOCK();
}
void
int
igmp_checktimer(struct ifnet *ifp)
{
struct in_multi *inm;
struct ifmaddr *ifma;
int running = 0;
NET_ASSERT_LOCKED();
@ -570,9 +587,11 @@ igmp_checktimer(struct ifnet *ifp)
inm->inm_state = IGMP_IDLE_MEMBER;
}
} else {
igmp_timers_are_running = 1;
running = 1;
}
}
return (running);
}
void

View file

@ -1,4 +1,4 @@
/* $OpenBSD: icmp6.c,v 1.254 2024/07/14 18:53:39 bluhm Exp $ */
/* $OpenBSD: icmp6.c,v 1.255 2024/08/12 11:25:27 bluhm Exp $ */
/* $KAME: icmp6.c,v 1.217 2001/06/20 15:03:29 jinmei Exp $ */
/*
@ -1198,7 +1198,6 @@ icmp6_reflect(struct mbuf **mp, size_t off, struct sockaddr *sa)
void
icmp6_fasttimo(void)
{
mld6_fasttimeo();
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: mld6.c,v 1.62 2024/02/13 12:22:09 bluhm Exp $ */
/* $OpenBSD: mld6.c,v 1.63 2024/08/12 11:25:27 bluhm Exp $ */
/* $KAME: mld6.c,v 1.26 2001/02/16 14:50:35 itojun Exp $ */
/*
@ -85,9 +85,9 @@
#include <netinet6/mld6_var.h>
static struct ip6_pktopts ip6_opts;
int mld6_timers_are_running; /* [N] shortcut for fast timer */
int mld6_timers_are_running; /* [a] shortcut for fast timer */
void mld6_checktimer(struct ifnet *);
int mld6_checktimer(struct ifnet *);
static void mld6_sendpkt(struct in6_multi *, int, const struct in6_addr *);
void
@ -118,6 +118,7 @@ mld6_start_listening(struct in6_multi *in6m)
{
/* XXX: These are necessary for KAME's link-local hack */
struct in6_addr all_nodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
int running = 0;
/*
* RFC2710 page 10:
@ -138,7 +139,12 @@ mld6_start_listening(struct in6_multi *in6m)
MLD_RANDOM_DELAY(MLD_V1_MAX_RI *
PR_FASTHZ);
in6m->in6m_state = MLD_IREPORTEDLAST;
mld6_timers_are_running = 1;
running = 1;
}
if (running) {
membar_producer();
atomic_store_int(&mld6_timers_are_running, running);
}
}
@ -169,6 +175,7 @@ mld6_input(struct mbuf *m, int off)
struct in6_multi *in6m;
struct ifmaddr *ifma;
int timer; /* timer value in the MLD query header */
int running = 0;
/* XXX: These are necessary for KAME's link-local hack */
struct in6_addr all_nodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
@ -272,7 +279,7 @@ mld6_input(struct mbuf *m, int off)
in6m->in6m_timer > timer) {
in6m->in6m_timer =
MLD_RANDOM_DELAY(timer);
mld6_timers_are_running = 1;
running = 1;
}
}
}
@ -323,8 +330,13 @@ mld6_input(struct mbuf *m, int off)
#endif
break;
}
if_put(ifp);
if (running) {
membar_producer();
atomic_store_int(&mld6_timers_are_running, running);
}
if_put(ifp);
m_freem(m);
}
@ -332,6 +344,7 @@ void
mld6_fasttimeo(void)
{
struct ifnet *ifp;
int running;
/*
* Quick check to see if any work needs to be done, in order
@ -340,23 +353,29 @@ mld6_fasttimeo(void)
* lock intentionally. In case it is not set due to MP races, we may
* miss to check the timers. Then run the loop at next fast timeout.
*/
if (!mld6_timers_are_running)
if (!atomic_load_int(&mld6_timers_are_running))
return;
membar_consumer();
NET_LOCK();
mld6_timers_are_running = 0;
TAILQ_FOREACH(ifp, &ifnetlist, if_list)
mld6_checktimer(ifp);
TAILQ_FOREACH(ifp, &ifnetlist, if_list) {
if (mld6_checktimer(ifp))
running = 1;
}
membar_producer();
atomic_store_int(&mld6_timers_are_running, running);
NET_UNLOCK();
}
void
int
mld6_checktimer(struct ifnet *ifp)
{
struct in6_multi *in6m;
struct ifmaddr *ifma;
int running = 0;
NET_ASSERT_LOCKED();
@ -370,9 +389,11 @@ mld6_checktimer(struct ifnet *ifp)
mld6_sendpkt(in6m, MLD_LISTENER_REPORT, NULL);
in6m->in6m_state = MLD_IREPORTEDLAST;
} else {
mld6_timers_are_running = 1;
running = 1;
}
}
return (running);
}
static void