sync code with last fixes and improvements from OpenBSD
This commit is contained in:
parent
691f97cc10
commit
371ae113c6
175 changed files with 2932 additions and 1512 deletions
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: acpi_apm.c,v 1.2 2023/07/08 14:44:43 tobhe Exp $ */
|
||||
/* $OpenBSD: acpi_apm.c,v 1.3 2023/08/06 14:30:08 tobhe Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2005 Thorsten Lockert <tholo@sigmasoft.com>
|
||||
* Copyright (c) 2005 Jordan Hargrave <jordan@openbsd.org>
|
||||
|
@ -47,6 +47,9 @@ acpiopen(dev_t dev, int flag, int mode, struct proc *p)
|
|||
struct acpi_softc *sc = acpi_softc;
|
||||
int s;
|
||||
|
||||
if (sc == NULL)
|
||||
return (ENXIO);
|
||||
|
||||
s = splbio();
|
||||
switch (APMDEV(dev)) {
|
||||
case APMDEV_CTL:
|
||||
|
@ -82,6 +85,9 @@ acpiclose(dev_t dev, int flag, int mode, struct proc *p)
|
|||
struct acpi_softc *sc = acpi_softc;
|
||||
int s;
|
||||
|
||||
if (sc == NULL)
|
||||
return (ENXIO);
|
||||
|
||||
s = splbio();
|
||||
switch (APMDEV(dev)) {
|
||||
case APMDEV_CTL:
|
||||
|
@ -106,6 +112,9 @@ acpiioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
|
|||
struct apm_power_info *pi = (struct apm_power_info *)data;
|
||||
int s;
|
||||
|
||||
if (sc == NULL)
|
||||
return (ENXIO);
|
||||
|
||||
s = splbio();
|
||||
/* fake APM */
|
||||
switch (cmd) {
|
||||
|
@ -168,6 +177,9 @@ acpikqfilter(dev_t dev, struct knote *kn)
|
|||
struct acpi_softc *sc = acpi_softc;
|
||||
int s;
|
||||
|
||||
if (sc == NULL)
|
||||
return (ENXIO);
|
||||
|
||||
switch (kn->kn_filter) {
|
||||
case EVFILT_READ:
|
||||
kn->kn_fop = &acpiread_filtops;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: axppmic.c,v 1.17 2023/07/06 20:02:36 uaa Exp $ */
|
||||
/* $OpenBSD: axppmic.c,v 1.20 2023/08/02 11:52:18 uaa Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2017 Mark Kettenis <kettenis@openbsd.org>
|
||||
*
|
||||
|
@ -126,6 +126,21 @@ const struct axppmic_regdata axp221_regdata[] = {
|
|||
{ NULL }
|
||||
};
|
||||
|
||||
const struct axppmic_regdata axp313a_regdata[] = {
|
||||
/* dcdc1: 1.6-3.4V (100mV step) not supported */
|
||||
{ "dcdc1", 0x10, (1 << 0), (1 << 0), (0 << 0),
|
||||
0x13, 0x7f, 500000, 10000, 71, 122000, 20000, 17 },
|
||||
{ "dcdc2", 0x10, (1 << 1), (1 << 1), (0 << 1),
|
||||
0x14, 0x7f, 500000, 10000, 71, 122000, 20000, 17 },
|
||||
{ "dcdc3", 0x10, (1 << 2), (1 << 2), (0 << 2),
|
||||
0x15, 0x7f, 500000, 10000, 71, 122000, 20000, 32 },
|
||||
{ "aldo1", 0x10, (1 << 3), (1 << 3), (0 << 3),
|
||||
0x16, 0x1f, 500000, 100000, 31 },
|
||||
{ "dldo1", 0x10, (1 << 4), (1 << 4), (0 << 4),
|
||||
0x17, 0x1f, 500000, 100000, 31 },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
const struct axppmic_regdata axp803_regdata[] = {
|
||||
{ "dcdc1", 0x10, (1 << 0), (1 << 0), (0 << 0),
|
||||
0x20, 0x1f, 1600000, 100000, 19 },
|
||||
|
@ -242,6 +257,53 @@ const struct axppmic_regdata axp809_regdata[] = {
|
|||
{ NULL }
|
||||
};
|
||||
|
||||
const struct axppmic_regdata axp15060_regdata[] = {
|
||||
{ "dcdc1", 0x10, (1 << 0), (1 << 0), (0 << 0),
|
||||
0x13, 0x1f, 15000000, 100000, 20 },
|
||||
{ "dcdc2", 0x10, (1 << 1), (1 << 1), (0 << 1),
|
||||
0x14, 0x7f, 500000, 10000, 71, 1220000, 20000, 17 },
|
||||
{ "dcdc3", 0x10, (1 << 2), (1 << 2), (0 << 2),
|
||||
0x15, 0x7f, 500000, 10000, 71, 1220000, 20000, 17 },
|
||||
{ "dcdc4", 0x10, (1 << 3), (1 << 3), (0 << 3),
|
||||
0x16, 0x7f, 500000, 10000, 71, 1220000, 20000, 17 },
|
||||
{ "dcdc5", 0x10, (1 << 4), (1 << 4), (0 << 4),
|
||||
0x17, 0x7f, 800000, 10000, 33, 1140000, 20000, 36 },
|
||||
{ "dcdc6", 0x10, (1 << 5), (1 << 5), (0 << 5),
|
||||
0x18, 0x1f, 500000, 100000, 30 },
|
||||
{ "aldo1", 0x11, (1 << 0), (1 << 0), (0 << 0),
|
||||
0x19, 0x1f, 700000, 100000, 27 },
|
||||
{ "aldo2", 0x11, (1 << 1), (1 << 1), (0 << 1),
|
||||
0x20, 0x1f, 700000, 100000, 27 },
|
||||
{ "aldo3", 0x11, (1 << 2), (1 << 2), (0 << 2),
|
||||
0x21, 0x1f, 700000, 100000, 27 },
|
||||
{ "aldo4", 0x11, (1 << 3), (1 << 3), (0 << 3),
|
||||
0x22, 0x1f, 700000, 100000, 27 },
|
||||
{ "aldo5", 0x11, (1 << 4), (1 << 4), (0 << 4),
|
||||
0x23, 0x1f, 700000, 100000, 27 },
|
||||
{ "bldo1", 0x11, (1 << 5), (1 << 5), (0 << 5),
|
||||
0x24, 0x1f, 700000, 100000, 27 },
|
||||
{ "bldo2", 0x11, (1 << 6), (1 << 6), (0 << 6),
|
||||
0x25, 0x1f, 700000, 100000, 27 },
|
||||
{ "bldo3", 0x11, (1 << 7), (1 << 7), (0 << 7),
|
||||
0x26, 0x1f, 700000, 100000, 27 },
|
||||
{ "bldo4", 0x12, (1 << 0), (1 << 0), (0 << 0),
|
||||
0x27, 0x1f, 700000, 100000, 27 },
|
||||
{ "bldo5", 0x12, (1 << 1), (1 << 1), (0 << 1),
|
||||
0x28, 0x1f, 700000, 100000, 27 },
|
||||
{ "cldo1", 0x12, (1 << 2), (1 << 2), (0 << 2),
|
||||
0x29, 0x1f, 700000, 100000, 27 },
|
||||
{ "cldo2", 0x12, (1 << 3), (1 << 3), (0 << 3),
|
||||
0x2a, 0x1f, 700000, 100000, 27 },
|
||||
{ "cldo3", 0x12, (1 << 4), (1 << 4), (0 << 4),
|
||||
0x2b, 0x1f, 700000, 100000, 27 },
|
||||
{ "cldo4", 0x12, (1 << 5), (1 << 5), (0 << 5),
|
||||
0x2d, 0x3f, 700000, 100000, 36 },
|
||||
{ "cpusldo", 0x12, (1 << 6), (1 << 6), (0 << 6),
|
||||
0x2e, 0x0f, 700000, 50000, 15 },
|
||||
{ "sw", 0x12, (1 << 7), (1 << 7), (0 << 7) },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
/* Sensors for AXP209 and AXP221/AXP809. */
|
||||
|
||||
#define AXPPMIC_NSENSORS 12
|
||||
|
@ -307,10 +369,12 @@ const struct axppmic_device axppmic_devices[] = {
|
|||
{ "x-powers,axp221", "AXP221", axp221_regdata, axp221_sensdata },
|
||||
{ "x-powers,axp223", "AXP223", axp221_regdata, axp221_sensdata },
|
||||
{ "x-powers,axp305", "AXP305", axp806_regdata },
|
||||
{ "x-powers,axp313a", "AXP313A", axp313a_regdata },
|
||||
{ "x-powers,axp803", "AXP803", axp803_regdata, axp803_sensdata },
|
||||
{ "x-powers,axp805", "AXP805", axp806_regdata },
|
||||
{ "x-powers,axp806", "AXP806", axp806_regdata },
|
||||
{ "x-powers,axp809", "AXP809", axp809_regdata, axp221_sensdata }
|
||||
{ "x-powers,axp809", "AXP809", axp809_regdata, axp221_sensdata },
|
||||
{ "x-powers,axp15060", "AXP15060", axp15060_regdata },
|
||||
};
|
||||
|
||||
const struct axppmic_device *
|
||||
|
@ -438,6 +502,10 @@ axppmic_i2c_write(struct axppmic_softc *sc, uint8_t reg, uint8_t value)
|
|||
|
||||
/* RSB interface */
|
||||
|
||||
#include "sxirsb.h"
|
||||
|
||||
#if NSXIRSB > 0
|
||||
|
||||
int axppmic_rsb_match(struct device *, void *, void *);
|
||||
void axppmic_rsb_attach(struct device *, struct device *, void *);
|
||||
|
||||
|
@ -489,6 +557,8 @@ axppmic_rsb_write(struct axppmic_softc *sc, uint8_t reg, uint8_t value)
|
|||
rsb_write_1(sc->sc_cookie, sc->sc_addr, reg, value);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* Common code */
|
||||
|
||||
void axppmic_attach_node(struct axppmic_softc *, int);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# $OpenBSD: files.fdt,v 1.196 2023/07/22 22:43:53 patrick Exp $
|
||||
# $OpenBSD: files.fdt,v 1.197 2023/07/31 09:00:43 kettenis Exp $
|
||||
#
|
||||
# Config file and device description for machine-independent FDT code.
|
||||
# Included by ports that need it.
|
||||
|
@ -48,7 +48,7 @@ file dev/fdt/sxipio.c sxipio
|
|||
define rsb {}
|
||||
device sxirsb: rsb
|
||||
attach sxirsb at fdt
|
||||
file dev/fdt/sxirsb.c sxirsb
|
||||
file dev/fdt/sxirsb.c sxirsb needs-flag
|
||||
|
||||
device sxipwm
|
||||
attach sxipwm at fdt
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: dwqe.c,v 1.10 2023/07/04 12:48:42 kettenis Exp $ */
|
||||
/* $OpenBSD: dwqe.c,v 1.11 2023/08/07 20:28:47 kettenis Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
|
||||
* Copyright (c) 2017, 2022 Patrick Wildt <patrick@blueri.se>
|
||||
|
@ -608,6 +608,9 @@ dwqe_tx_proc(struct dwqe_softc *sc)
|
|||
if (txd->sd_tdes3 & TDES3_OWN)
|
||||
break;
|
||||
|
||||
if (txd->sd_tdes3 & TDES3_ES)
|
||||
ifp->if_oerrors++;
|
||||
|
||||
txb = &sc->sc_txbuf[idx];
|
||||
if (txb->tb_m) {
|
||||
bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
|
||||
|
@ -808,7 +811,7 @@ dwqe_up(struct dwqe_softc *sc)
|
|||
if (sc->sc_force_thresh_dma_mode) {
|
||||
mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TSF;
|
||||
mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TTC_MASK;
|
||||
mode |= GMAC_MTL_CHAN_TX_OP_MODE_TTC_128;
|
||||
mode |= GMAC_MTL_CHAN_TX_OP_MODE_TTC_512;
|
||||
} else {
|
||||
mode |= GMAC_MTL_CHAN_TX_OP_MODE_TSF;
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: azalia.c,v 1.283 2023/02/21 13:42:59 bcallah Exp $ */
|
||||
/* $OpenBSD: azalia.c,v 1.284 2023/07/30 08:46:03 yasuoka Exp $ */
|
||||
/* $NetBSD: azalia.c,v 1.20 2006/05/07 08:31:44 kent Exp $ */
|
||||
|
||||
/*-
|
||||
|
@ -463,6 +463,7 @@ azalia_configure_pci(azalia_t *az)
|
|||
case PCI_PRODUCT_INTEL_600SERIES_HDA:
|
||||
case PCI_PRODUCT_INTEL_600SERIES_LP_HDA:
|
||||
case PCI_PRODUCT_INTEL_700SERIES_HDA:
|
||||
case PCI_PRODUCT_INTEL_700SERIES_LP_HDA:
|
||||
case PCI_PRODUCT_INTEL_C600_HDA:
|
||||
case PCI_PRODUCT_INTEL_C610_HDA_1:
|
||||
case PCI_PRODUCT_INTEL_C610_HDA_2:
|
||||
|
@ -492,6 +493,7 @@ const struct pci_matchid azalia_pci_devices[] = {
|
|||
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_500SERIES_HDA },
|
||||
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_500SERIES_LP_HDA },
|
||||
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_600SERIES_LP_HDA },
|
||||
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_700SERIES_LP_HDA },
|
||||
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_APOLLOLAKE_HDA },
|
||||
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_GLK_HDA },
|
||||
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_JSL_HDA },
|
||||
|
|
|
@ -1291,6 +1291,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
|
||||
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_need_post(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_pcie_dynamic_switching_supported(void);
|
||||
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_aspm_support_quirk(void);
|
||||
|
||||
|
|
|
@ -1351,6 +1351,29 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
|
|||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
|
||||
* speed switching. Until we have confirmation from Intel that a specific host
|
||||
* supports it, it's safer that we keep it disabled for all.
|
||||
*
|
||||
* https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
|
||||
* https://gitlab.freedesktop.org/drm/amd/-/issues/2663
|
||||
*/
|
||||
bool amdgpu_device_pcie_dynamic_switching_supported(void)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_X86)
|
||||
#ifdef __linux__
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL)
|
||||
#else
|
||||
if (strcmp(cpu_vendor, "GenuineIntel") == 0)
|
||||
#endif
|
||||
return false;
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_should_use_aspm - check if the device should program ASPM
|
||||
*
|
||||
|
|
|
@ -203,16 +203,18 @@ static const struct pci_matchid amdgpu_devices[] = {
|
|||
{0x1002, 0x1506 },
|
||||
|
||||
/* GC 11.0.0, DCN 3.2.0, dGPU, "Navi 31" */
|
||||
{0x1002, 0x744c }, /* Radeon RX 7900 XT/XTX */
|
||||
{0x1002, 0x7448 }, /* Radeon Pro W7900 */
|
||||
{0x1002, 0x744c }, /* Radeon RX 7900 XT/XTX/GRE */
|
||||
{0x1002, 0x745e }, /* Radeon Pro W7800 */
|
||||
|
||||
/* GC 11.0.1, DCN 3.1.4, APU, Ryzen 7040 "Phoenix" */
|
||||
{0x1002, 0x15bf },
|
||||
|
||||
/* GC 11.0.2, DCN 3.2.1, dGPU, "Navi 33" */
|
||||
{0x1002, 0x7480 }, /* Radeon RX 7600S, 7700S, 7600M XT, 7600 */
|
||||
{0x1002, 0x7480 }, /* Radeon RX 7600S, 7700S, 7600M XT,
|
||||
7600, Pro W7600 */
|
||||
{0x1002, 0x7483 }, /* Radeon RX 7600M */
|
||||
{0x1002, 0x7489 }, /* Radeon Pro W7500 */
|
||||
|
||||
/* GC 11.0.3, DCN 3.2.0, dGPU */
|
||||
/* GC 11.0.4, DCN 3.1.4, APU */
|
||||
|
|
|
@ -472,11 +472,11 @@ static int psp_sw_init(void *handle)
|
|||
return 0;
|
||||
|
||||
failed2:
|
||||
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
|
||||
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
|
||||
failed1:
|
||||
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
|
||||
&psp->fence_buf_mc_addr, &psp->fence_buf);
|
||||
failed1:
|
||||
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
|
||||
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -4950,6 +4950,30 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void fill_dc_dirty_rect(struct drm_plane *plane,
|
||||
struct rect *dirty_rect, int32_t x,
|
||||
int32_t y, int32_t width, int32_t height,
|
||||
int *i, bool ffu)
|
||||
{
|
||||
WARN_ON(*i >= DC_MAX_DIRTY_RECTS);
|
||||
|
||||
dirty_rect->x = x;
|
||||
dirty_rect->y = y;
|
||||
dirty_rect->width = width;
|
||||
dirty_rect->height = height;
|
||||
|
||||
if (ffu)
|
||||
drm_dbg(plane->dev,
|
||||
"[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
|
||||
plane->base.id, width, height);
|
||||
else
|
||||
drm_dbg(plane->dev,
|
||||
"[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
|
||||
plane->base.id, x, y, width, height);
|
||||
|
||||
(*i)++;
|
||||
}
|
||||
|
||||
/**
|
||||
* fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
|
||||
*
|
||||
|
@ -4970,10 +4994,6 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
|
|||
* addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
|
||||
* implicitly provide damage clips without any client support via the plane
|
||||
* bounds.
|
||||
*
|
||||
* Today, amdgpu_dm only supports the MPO and cursor usecase.
|
||||
*
|
||||
* TODO: Also enable for FB_DAMAGE_CLIPS
|
||||
*/
|
||||
static void fill_dc_dirty_rects(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_plane_state,
|
||||
|
@ -4984,12 +5004,11 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
|
|||
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
|
||||
struct rect *dirty_rects = flip_addrs->dirty_rects;
|
||||
uint32_t num_clips;
|
||||
struct drm_mode_rect *clips;
|
||||
bool bb_changed;
|
||||
bool fb_changed;
|
||||
u32 i = 0;
|
||||
|
||||
flip_addrs->dirty_rect_count = 0;
|
||||
|
||||
/*
|
||||
* Cursor plane has it's own dirty rect update interface. See
|
||||
* dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
|
||||
|
@ -4997,20 +5016,20 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
|
|||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Today, we only consider MPO use-case for PSR SU. If MPO not
|
||||
* requested, and there is a plane update, do FFU.
|
||||
*/
|
||||
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
|
||||
clips = drm_plane_get_damage_clips(new_plane_state);
|
||||
|
||||
if (!dm_crtc_state->mpo_requested) {
|
||||
dirty_rects[0].x = 0;
|
||||
dirty_rects[0].y = 0;
|
||||
dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
|
||||
dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
|
||||
flip_addrs->dirty_rect_count = 1;
|
||||
DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
|
||||
new_plane_state->plane->base.id,
|
||||
dm_crtc_state->base.mode.crtc_hdisplay,
|
||||
dm_crtc_state->base.mode.crtc_vdisplay);
|
||||
if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
|
||||
goto ffu;
|
||||
|
||||
for (; flip_addrs->dirty_rect_count < num_clips; clips++)
|
||||
fill_dc_dirty_rect(new_plane_state->plane,
|
||||
&dirty_rects[flip_addrs->dirty_rect_count],
|
||||
clips->x1, clips->y1,
|
||||
clips->x2 - clips->x1, clips->y2 - clips->y1,
|
||||
&flip_addrs->dirty_rect_count,
|
||||
false);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -5021,7 +5040,6 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
|
|||
* If plane is moved or resized, also add old bounding box to dirty
|
||||
* rects.
|
||||
*/
|
||||
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
|
||||
fb_changed = old_plane_state->fb->base.id !=
|
||||
new_plane_state->fb->base.id;
|
||||
bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
|
||||
|
@ -5029,36 +5047,51 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
|
|||
old_plane_state->crtc_w != new_plane_state->crtc_w ||
|
||||
old_plane_state->crtc_h != new_plane_state->crtc_h);
|
||||
|
||||
DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
|
||||
new_plane_state->plane->base.id,
|
||||
bb_changed, fb_changed, num_clips);
|
||||
drm_dbg(plane->dev,
|
||||
"[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
|
||||
new_plane_state->plane->base.id,
|
||||
bb_changed, fb_changed, num_clips);
|
||||
|
||||
if (num_clips || fb_changed || bb_changed) {
|
||||
dirty_rects[i].x = new_plane_state->crtc_x;
|
||||
dirty_rects[i].y = new_plane_state->crtc_y;
|
||||
dirty_rects[i].width = new_plane_state->crtc_w;
|
||||
dirty_rects[i].height = new_plane_state->crtc_h;
|
||||
DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
|
||||
new_plane_state->plane->base.id,
|
||||
dirty_rects[i].x, dirty_rects[i].y,
|
||||
dirty_rects[i].width, dirty_rects[i].height);
|
||||
i += 1;
|
||||
if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS)
|
||||
goto ffu;
|
||||
|
||||
if (bb_changed) {
|
||||
fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
|
||||
new_plane_state->crtc_x,
|
||||
new_plane_state->crtc_y,
|
||||
new_plane_state->crtc_w,
|
||||
new_plane_state->crtc_h, &i, false);
|
||||
|
||||
/* Add old plane bounding-box if plane is moved or resized */
|
||||
fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
|
||||
old_plane_state->crtc_x,
|
||||
old_plane_state->crtc_y,
|
||||
old_plane_state->crtc_w,
|
||||
old_plane_state->crtc_h, &i, false);
|
||||
}
|
||||
|
||||
/* Add old plane bounding-box if plane is moved or resized */
|
||||
if (bb_changed) {
|
||||
dirty_rects[i].x = old_plane_state->crtc_x;
|
||||
dirty_rects[i].y = old_plane_state->crtc_y;
|
||||
dirty_rects[i].width = old_plane_state->crtc_w;
|
||||
dirty_rects[i].height = old_plane_state->crtc_h;
|
||||
DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
|
||||
old_plane_state->plane->base.id,
|
||||
dirty_rects[i].x, dirty_rects[i].y,
|
||||
dirty_rects[i].width, dirty_rects[i].height);
|
||||
i += 1;
|
||||
if (num_clips) {
|
||||
for (; i < num_clips; clips++)
|
||||
fill_dc_dirty_rect(new_plane_state->plane,
|
||||
&dirty_rects[i], clips->x1,
|
||||
clips->y1, clips->x2 - clips->x1,
|
||||
clips->y2 - clips->y1, &i, false);
|
||||
} else if (fb_changed && !bb_changed) {
|
||||
fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
|
||||
new_plane_state->crtc_x,
|
||||
new_plane_state->crtc_y,
|
||||
new_plane_state->crtc_w,
|
||||
new_plane_state->crtc_h, &i, false);
|
||||
}
|
||||
|
||||
flip_addrs->dirty_rect_count = i;
|
||||
return;
|
||||
|
||||
ffu:
|
||||
fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
|
||||
dm_crtc_state->base.mode.crtc_hdisplay,
|
||||
dm_crtc_state->base.mode.crtc_vdisplay,
|
||||
&flip_addrs->dirty_rect_count, true);
|
||||
}
|
||||
|
||||
static void update_stream_scaling_settings(const struct drm_display_mode *mode,
|
||||
|
|
|
@ -677,7 +677,7 @@ void dm_handle_mst_sideband_msg_ready_event(
|
|||
|
||||
if (retry == 3) {
|
||||
DRM_ERROR("Failed to ack MST event.\n");
|
||||
return;
|
||||
break;
|
||||
}
|
||||
|
||||
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
|
||||
|
|
|
@ -1600,6 +1600,10 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
|
|||
drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
|
||||
supported_rotations);
|
||||
|
||||
if (dm->adev->ip_versions[DCE_HWIP][0] > IP_VERSION(3, 0, 1) &&
|
||||
plane->type != DRM_PLANE_TYPE_CURSOR)
|
||||
drm_plane_enable_fb_damage_clips(plane);
|
||||
|
||||
drm_plane_helper_add(plane, &dm_plane_helper_funcs);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDR
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
*/
|
||||
|
||||
#include "amdgpu_dm_psr.h"
|
||||
#include "dc_dmub_srv.h"
|
||||
#include "dc.h"
|
||||
#include "dm_helpers.h"
|
||||
#include "amdgpu_dm.h"
|
||||
|
@ -50,7 +51,7 @@ static bool link_supports_psrsu(struct dc_link *link)
|
|||
!link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -108,6 +108,11 @@ static int dcn314_get_active_display_cnt_wa(
|
|||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
|
||||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
|
||||
tmds_present = true;
|
||||
|
||||
/* Checking stream / link detection ensuring that PHY is active*/
|
||||
if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
|
||||
display_count++;
|
||||
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->link_count; i++) {
|
||||
|
|
|
@ -135,9 +135,7 @@ static const char DC_BUILD_ID[] = "production-build";
|
|||
* one or two (in the pipe-split case).
|
||||
*/
|
||||
|
||||
/*******************************************************************************
|
||||
* Private functions
|
||||
******************************************************************************/
|
||||
/* Private functions */
|
||||
|
||||
static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
|
||||
{
|
||||
|
@ -384,16 +382,18 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
|
|||
}
|
||||
|
||||
/**
|
||||
* dc_stream_adjust_vmin_vmax:
|
||||
* dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
|
||||
* @dc: dc reference
|
||||
* @stream: Initial dc stream state
|
||||
* @adjust: Updated parameters for vertical_total_min and vertical_total_max
|
||||
*
|
||||
* Looks up the pipe context of dc_stream_state and updates the
|
||||
* vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
|
||||
* Rate, which is a power-saving feature that targets reducing panel
|
||||
* refresh rate while the screen is static
|
||||
*
|
||||
* @dc: dc reference
|
||||
* @stream: Initial dc stream state
|
||||
* @adjust: Updated parameters for vertical_total_min and vertical_total_max
|
||||
* Return: %true if the pipe context is found and adjusted;
|
||||
* %false if the pipe context is not found.
|
||||
*/
|
||||
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
|
||||
struct dc_stream_state *stream,
|
||||
|
@ -429,18 +429,17 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
|
|||
}
|
||||
|
||||
/**
|
||||
*****************************************************************************
|
||||
* Function: dc_stream_get_last_vrr_vtotal
|
||||
* dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
|
||||
* dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
|
||||
*
|
||||
* @brief
|
||||
* Looks up the pipe context of dc_stream_state and gets the
|
||||
* last VTOTAL used by DRR (Dynamic Refresh Rate)
|
||||
* @dc: [in] dc reference
|
||||
* @stream: [in] Initial dc stream state
|
||||
* @refresh_rate: [in] new refresh_rate
|
||||
*
|
||||
* @param [in] dc: dc reference
|
||||
* @param [in] stream: Initial dc stream state
|
||||
* @param [in] adjust: Updated parameters for vertical_total_min and
|
||||
* vertical_total_max
|
||||
*****************************************************************************
|
||||
* Return: %true if the pipe context is found and there is an associated
|
||||
* timing_generator for the DC;
|
||||
* %false if the pipe context is not found or there is no
|
||||
* timing_generator for the DC.
|
||||
*/
|
||||
bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
|
||||
struct dc_stream_state *stream,
|
||||
|
@ -587,7 +586,10 @@ bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *s
|
|||
* once.
|
||||
*
|
||||
* By default, only CRC0 is configured, and the entire frame is used to
|
||||
* calculate the crc.
|
||||
* calculate the CRC.
|
||||
*
|
||||
* Return: %false if the stream is not found or CRC capture is not supported;
|
||||
* %true if the stream has been configured.
|
||||
*/
|
||||
bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
|
||||
struct crc_params *crc_window, bool enable, bool continuous)
|
||||
|
@ -656,7 +658,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
|
|||
* dc_stream_configure_crc needs to be called beforehand to enable CRCs.
|
||||
*
|
||||
* Return:
|
||||
* false if stream is not found, or if CRCs are not enabled.
|
||||
* %false if stream is not found, or if CRCs are not enabled.
|
||||
*/
|
||||
bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
|
||||
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
|
||||
|
@ -1236,9 +1238,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
|
|||
PERF_TRACE();
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* Public functions
|
||||
******************************************************************************/
|
||||
/* Public functions */
|
||||
|
||||
struct dc *dc_create(const struct dc_init_data *init_params)
|
||||
{
|
||||
|
@ -1505,17 +1505,19 @@ static void program_timing_sync(
|
|||
}
|
||||
}
|
||||
|
||||
static bool context_changed(
|
||||
struct dc *dc,
|
||||
struct dc_state *context)
|
||||
static bool streams_changed(struct dc *dc,
|
||||
struct dc_stream_state *streams[],
|
||||
uint8_t stream_count)
|
||||
{
|
||||
uint8_t i;
|
||||
|
||||
if (context->stream_count != dc->current_state->stream_count)
|
||||
if (stream_count != dc->current_state->stream_count)
|
||||
return true;
|
||||
|
||||
for (i = 0; i < dc->current_state->stream_count; i++) {
|
||||
if (dc->current_state->streams[i] != context->streams[i])
|
||||
if (dc->current_state->streams[i] != streams[i])
|
||||
return true;
|
||||
if (!streams[i]->link->link_state_valid)
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1745,6 +1747,8 @@ void dc_z10_save_init(struct dc *dc)
|
|||
/*
|
||||
* Applies given context to HW and copy it into current context.
|
||||
* It's up to the user to release the src context afterwards.
|
||||
*
|
||||
* Return: an enum dc_status result code for the operation
|
||||
*/
|
||||
static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
|
@ -1911,12 +1915,114 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
|||
return result;
|
||||
}
|
||||
|
||||
static bool commit_minimal_transition_state(struct dc *dc,
|
||||
struct dc_state *transition_base_context);
|
||||
|
||||
/**
|
||||
* dc_commit_streams - Commit current stream state
|
||||
*
|
||||
* @dc: DC object with the commit state to be configured in the hardware
|
||||
* @streams: Array with a list of stream state
|
||||
* @stream_count: Total of streams
|
||||
*
|
||||
* Function responsible for commit streams change to the hardware.
|
||||
*
|
||||
* Return:
|
||||
* Return DC_OK if everything work as expected, otherwise, return a dc_status
|
||||
* code.
|
||||
*/
|
||||
enum dc_status dc_commit_streams(struct dc *dc,
|
||||
struct dc_stream_state *streams[],
|
||||
uint8_t stream_count)
|
||||
{
|
||||
int i, j;
|
||||
struct dc_state *context;
|
||||
enum dc_status res = DC_OK;
|
||||
struct dc_validation_set set[MAX_STREAMS] = {0};
|
||||
struct pipe_ctx *pipe;
|
||||
bool handle_exit_odm2to1 = false;
|
||||
|
||||
if (!streams_changed(dc, streams, stream_count))
|
||||
return res;
|
||||
|
||||
DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
|
||||
|
||||
for (i = 0; i < stream_count; i++) {
|
||||
struct dc_stream_state *stream = streams[i];
|
||||
struct dc_stream_status *status = dc_stream_get_status(stream);
|
||||
|
||||
dc_stream_log(dc, stream);
|
||||
|
||||
set[i].stream = stream;
|
||||
|
||||
if (status) {
|
||||
set[i].plane_count = status->plane_count;
|
||||
for (j = 0; j < status->plane_count; j++)
|
||||
set[i].plane_states[j] = status->plane_states[j];
|
||||
}
|
||||
}
|
||||
|
||||
/* Check for case where we are going from odm 2:1 to max
|
||||
* pipe scenario. For these cases, we will call
|
||||
* commit_minimal_transition_state() to exit out of odm 2:1
|
||||
* first before processing new streams
|
||||
*/
|
||||
if (stream_count == dc->res_pool->pipe_count) {
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe->next_odm_pipe)
|
||||
handle_exit_odm2to1 = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (handle_exit_odm2to1)
|
||||
res = commit_minimal_transition_state(dc, dc->current_state);
|
||||
|
||||
context = dc_create_state(dc);
|
||||
if (!context)
|
||||
goto context_alloc_fail;
|
||||
|
||||
dc_resource_state_copy_construct_current(dc, context);
|
||||
|
||||
/*
|
||||
* Previous validation was perfomred with fast_validation = true and
|
||||
* the full DML state required for hardware programming was skipped.
|
||||
*
|
||||
* Re-validate here to calculate these parameters / watermarks.
|
||||
*/
|
||||
res = dc_validate_global_state(dc, context, false);
|
||||
if (res != DC_OK) {
|
||||
DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
|
||||
dc_status_to_str(res), res);
|
||||
return res;
|
||||
}
|
||||
|
||||
res = dc_commit_state_no_check(dc, context);
|
||||
|
||||
context_alloc_fail:
|
||||
|
||||
DC_LOG_DC("%s Finished.\n", __func__);
|
||||
|
||||
return (res == DC_OK);
|
||||
}
|
||||
|
||||
/* TODO: When the transition to the new commit sequence is done, remove this
|
||||
* function in favor of dc_commit_streams. */
|
||||
bool dc_commit_state(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
enum dc_status result = DC_ERROR_UNEXPECTED;
|
||||
int i;
|
||||
|
||||
if (!context_changed(dc, context))
|
||||
/* TODO: Since change commit sequence can have a huge impact,
|
||||
* we decided to only enable it for DCN3x. However, as soon as
|
||||
* we get more confident about this change we'll need to enable
|
||||
* the new sequence for all ASICs. */
|
||||
if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
|
||||
result = dc_commit_streams(dc, context->streams, context->stream_count);
|
||||
return result == DC_OK;
|
||||
}
|
||||
|
||||
if (!streams_changed(dc, context->streams, context->stream_count))
|
||||
return DC_OK;
|
||||
|
||||
DC_LOG_DC("%s: %d streams\n",
|
||||
|
@ -2482,8 +2588,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
|
|||
|
||||
if (stream_update->mst_bw_update)
|
||||
su_flags->bits.mst_bw = 1;
|
||||
if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
|
||||
su_flags->bits.crtc_timing_adjust = 1;
|
||||
|
||||
if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
|
||||
(stream_update->vrr_infopacket || stream_update->allow_freesync ||
|
||||
stream_update->vrr_active_variable))
|
||||
su_flags->bits.fams_changed = 1;
|
||||
|
||||
if (su_flags->raw != 0)
|
||||
overall_type = UPDATE_TYPE_FULL;
|
||||
|
@ -3648,17 +3757,17 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
|
|||
}
|
||||
}
|
||||
|
||||
/* For SubVP when adding MPO video we need to add a minimal transition.
|
||||
/* For SubVP when adding or removing planes we need to add a minimal transition
|
||||
* (even when disabling all planes). Whenever disabling a phantom pipe, we
|
||||
* must use the minimal transition path to disable the pipe correctly.
|
||||
*/
|
||||
if (cur_stream_status && stream->mall_stream_config.type == SUBVP_MAIN) {
|
||||
/* determine if minimal transition is required due to SubVP*/
|
||||
if (surface_count > 0) {
|
||||
if (cur_stream_status->plane_count > surface_count) {
|
||||
force_minimal_pipe_splitting = true;
|
||||
} else if (cur_stream_status->plane_count < surface_count) {
|
||||
force_minimal_pipe_splitting = true;
|
||||
*is_plane_addition = true;
|
||||
}
|
||||
if (cur_stream_status->plane_count > surface_count) {
|
||||
force_minimal_pipe_splitting = true;
|
||||
} else if (cur_stream_status->plane_count < surface_count) {
|
||||
force_minimal_pipe_splitting = true;
|
||||
*is_plane_addition = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3675,6 +3784,8 @@ static bool commit_minimal_transition_state(struct dc *dc,
|
|||
enum dc_status ret = DC_ERROR_UNEXPECTED;
|
||||
unsigned int i, j;
|
||||
unsigned int pipe_in_use = 0;
|
||||
bool subvp_in_use = false;
|
||||
bool odm_in_use = false;
|
||||
|
||||
if (!transition_context)
|
||||
return false;
|
||||
|
@ -3687,6 +3798,30 @@ static bool commit_minimal_transition_state(struct dc *dc,
|
|||
pipe_in_use++;
|
||||
}
|
||||
|
||||
/* If SubVP is enabled and we are adding or removing planes from any main subvp
|
||||
* pipe, we must use the minimal transition.
|
||||
*/
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
||||
subvp_in_use = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* If ODM is enabled and we are adding or removing planes from any ODM
|
||||
* pipe, we must use the minimal transition.
|
||||
*/
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe->stream && pipe->next_odm_pipe) {
|
||||
odm_in_use = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* When the OS add a new surface if we have been used all of pipes with odm combine
|
||||
* and mpc split feature, it need use commit_minimal_transition_state to transition safely.
|
||||
* After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
|
||||
|
@ -3695,7 +3830,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
|
|||
* Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
|
||||
* enter/exit MPO when DCN still have enough resources.
|
||||
*/
|
||||
if (pipe_in_use != dc->res_pool->pipe_count) {
|
||||
if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) {
|
||||
dc_release_state(transition_context);
|
||||
return true;
|
||||
}
|
||||
|
@ -4430,21 +4565,17 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
|
|||
dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
|
||||
}
|
||||
|
||||
/*
|
||||
*****************************************************************************
|
||||
* Function: dc_is_dmub_outbox_supported -
|
||||
/**
|
||||
* dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
|
||||
*
|
||||
* @brief
|
||||
* Checks whether DMUB FW supports outbox notifications, if supported
|
||||
* DM should register outbox interrupt prior to actually enabling interrupts
|
||||
* via dc_enable_dmub_outbox
|
||||
* @dc: [in] dc structure
|
||||
*
|
||||
* @param
|
||||
* [in] dc: dc structure
|
||||
* Checks whether DMUB FW supports outbox notifications, if supported DM
|
||||
* should register outbox interrupt prior to actually enabling interrupts
|
||||
* via dc_enable_dmub_outbox
|
||||
*
|
||||
* @return
|
||||
* True if DMUB FW supports outbox notifications, False otherwise
|
||||
*****************************************************************************
|
||||
* Return:
|
||||
* True if DMUB FW supports outbox notifications, False otherwise
|
||||
*/
|
||||
bool dc_is_dmub_outbox_supported(struct dc *dc)
|
||||
{
|
||||
|
@ -4462,21 +4593,17 @@ bool dc_is_dmub_outbox_supported(struct dc *dc)
|
|||
return dc->debug.enable_dmub_aux_for_legacy_ddc;
|
||||
}
|
||||
|
||||
/*
|
||||
*****************************************************************************
|
||||
* Function: dc_enable_dmub_notifications
|
||||
/**
|
||||
* dc_enable_dmub_notifications - Check if dmub fw supports outbox
|
||||
*
|
||||
* @brief
|
||||
* Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
|
||||
* notifications. All DMs shall switch to dc_is_dmub_outbox_supported.
|
||||
* This API shall be removed after switching.
|
||||
* @dc: [in] dc structure
|
||||
*
|
||||
* @param
|
||||
* [in] dc: dc structure
|
||||
* Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
|
||||
* notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This
|
||||
* API shall be removed after switching.
|
||||
*
|
||||
* @return
|
||||
* True if DMUB FW supports outbox notifications, False otherwise
|
||||
*****************************************************************************
|
||||
* Return:
|
||||
* True if DMUB FW supports outbox notifications, False otherwise
|
||||
*/
|
||||
bool dc_enable_dmub_notifications(struct dc *dc)
|
||||
{
|
||||
|
@ -4484,18 +4611,11 @@ bool dc_enable_dmub_notifications(struct dc *dc)
|
|||
}
|
||||
|
||||
/**
|
||||
*****************************************************************************
|
||||
* Function: dc_enable_dmub_outbox
|
||||
* dc_enable_dmub_outbox - Enables DMUB unsolicited notification
|
||||
*
|
||||
* @brief
|
||||
* Enables DMUB unsolicited notifications to x86 via outbox
|
||||
* @dc: [in] dc structure
|
||||
*
|
||||
* @param
|
||||
* [in] dc: dc structure
|
||||
*
|
||||
* @return
|
||||
* None
|
||||
*****************************************************************************
|
||||
* Enables DMUB unsolicited notifications to x86 via outbox.
|
||||
*/
|
||||
void dc_enable_dmub_outbox(struct dc *dc)
|
||||
{
|
||||
|
@ -4596,21 +4716,17 @@ uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
|
|||
}
|
||||
|
||||
/**
|
||||
*****************************************************************************
|
||||
* Function: dc_process_dmub_set_config_async
|
||||
* dc_process_dmub_set_config_async - Submits set_config command
|
||||
*
|
||||
* @brief
|
||||
* Submits set_config command to dmub via inbox message
|
||||
* @dc: [in] dc structure
|
||||
* @link_index: [in] link_index: link index
|
||||
* @payload: [in] aux payload
|
||||
* @notify: [out] set_config immediate reply
|
||||
*
|
||||
* @param
|
||||
* [in] dc: dc structure
|
||||
* [in] link_index: link index
|
||||
* [in] payload: aux payload
|
||||
* [out] notify: set_config immediate reply
|
||||
* Submits set_config command to dmub via inbox message.
|
||||
*
|
||||
* @return
|
||||
* True if successful, False if failure
|
||||
*****************************************************************************
|
||||
* Return:
|
||||
* True if successful, False if failure
|
||||
*/
|
||||
bool dc_process_dmub_set_config_async(struct dc *dc,
|
||||
uint32_t link_index,
|
||||
|
@ -4646,21 +4762,17 @@ bool dc_process_dmub_set_config_async(struct dc *dc,
|
|||
}
|
||||
|
||||
/**
|
||||
*****************************************************************************
|
||||
* Function: dc_process_dmub_set_mst_slots
|
||||
* dc_process_dmub_set_mst_slots - Submits MST solt allocation
|
||||
*
|
||||
* @brief
|
||||
* Submits mst slot allocation command to dmub via inbox message
|
||||
* @dc: [in] dc structure
|
||||
* @link_index: [in] link index
|
||||
* @mst_alloc_slots: [in] mst slots to be allotted
|
||||
* @mst_slots_in_use: [out] mst slots in use returned in failure case
|
||||
*
|
||||
* @param
|
||||
* [in] dc: dc structure
|
||||
* [in] link_index: link index
|
||||
* [in] mst_alloc_slots: mst slots to be allotted
|
||||
* [out] mst_slots_in_use: mst slots in use returned in failure case
|
||||
* Submits mst slot allocation command to dmub via inbox message
|
||||
*
|
||||
* @return
|
||||
* DC_OK if successful, DC_ERROR if failure
|
||||
*****************************************************************************
|
||||
* Return:
|
||||
* DC_OK if successful, DC_ERROR if failure
|
||||
*/
|
||||
enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
|
||||
uint32_t link_index,
|
||||
|
@ -4700,19 +4812,12 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
|
|||
}
|
||||
|
||||
/**
|
||||
*****************************************************************************
|
||||
* Function: dc_process_dmub_dpia_hpd_int_enable
|
||||
* dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
|
||||
*
|
||||
* @brief
|
||||
* Submits dpia hpd int enable command to dmub via inbox message
|
||||
* @dc: [in] dc structure
|
||||
* @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
|
||||
*
|
||||
* @param
|
||||
* [in] dc: dc structure
|
||||
* [in] hpd_int_enable: 1 for hpd int enable, 0 to disable
|
||||
*
|
||||
* @return
|
||||
* None
|
||||
*****************************************************************************
|
||||
* Submits dpia hpd int enable command to dmub via inbox message
|
||||
*/
|
||||
void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
|
||||
uint32_t hpd_int_enable)
|
||||
|
@ -4741,16 +4846,13 @@ void dc_disable_accelerated_mode(struct dc *dc)
|
|||
|
||||
|
||||
/**
|
||||
*****************************************************************************
|
||||
* dc_notify_vsync_int_state() - notifies vsync enable/disable state
|
||||
* dc_notify_vsync_int_state - notifies vsync enable/disable state
|
||||
* @dc: dc structure
|
||||
* @stream: stream where vsync int state changed
|
||||
* @enable: whether vsync is enabled or disabled
|
||||
* @stream: stream where vsync int state changed
|
||||
* @enable: whether vsync is enabled or disabled
|
||||
*
|
||||
* Called when vsync is enabled/disabled
|
||||
* Will notify DMUB to start/stop ABM interrupts after steady state is reached
|
||||
*
|
||||
*****************************************************************************
|
||||
* Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
|
||||
* interrupts after steady state is reached.
|
||||
*/
|
||||
void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
|
||||
{
|
||||
|
@ -4792,17 +4894,3 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
|
|||
if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
|
||||
pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
|
||||
}
|
||||
/*
|
||||
* dc_extended_blank_supported: Decide whether extended blank is supported
|
||||
*
|
||||
* Extended blank is a freesync optimization feature to be enabled in the future.
|
||||
* During the extra vblank period gained from freesync, we have the ability to enter z9/z10.
|
||||
*
|
||||
* @param [in] dc: Current DC state
|
||||
* @return: Indicate whether extended blank is supported (true or false)
|
||||
*/
|
||||
bool dc_extended_blank_supported(struct dc *dc)
|
||||
{
|
||||
return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
|
||||
&& dc->caps.zstate_support && dc->caps.is_apu;
|
||||
}
|
||||
|
|
|
@ -1444,6 +1444,26 @@ static int acquire_first_split_pipe(
|
|||
split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
|
||||
split_pipe->pipe_idx = i;
|
||||
|
||||
split_pipe->stream = stream;
|
||||
return i;
|
||||
} else if (split_pipe->prev_odm_pipe &&
|
||||
split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) {
|
||||
split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe;
|
||||
if (split_pipe->next_odm_pipe)
|
||||
split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe;
|
||||
|
||||
if (split_pipe->prev_odm_pipe->plane_state)
|
||||
resource_build_scaling_params(split_pipe->prev_odm_pipe);
|
||||
|
||||
memset(split_pipe, 0, sizeof(*split_pipe));
|
||||
split_pipe->stream_res.tg = pool->timing_generators[i];
|
||||
split_pipe->plane_res.hubp = pool->hubps[i];
|
||||
split_pipe->plane_res.ipp = pool->ipps[i];
|
||||
split_pipe->plane_res.dpp = pool->dpps[i];
|
||||
split_pipe->stream_res.opp = pool->opps[i];
|
||||
split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
|
||||
split_pipe->pipe_idx = i;
|
||||
|
||||
split_pipe->stream = stream;
|
||||
return i;
|
||||
}
|
||||
|
|
|
@ -56,9 +56,7 @@ struct dmub_notification;
|
|||
#define MIN_VIEWPORT_SIZE 12
|
||||
#define MAX_NUM_EDP 2
|
||||
|
||||
/*******************************************************************************
|
||||
* Display Core Interfaces
|
||||
******************************************************************************/
|
||||
/* Display Core Interfaces */
|
||||
struct dc_versions {
|
||||
const char *dc_ver;
|
||||
struct dmcu_version dmcu_version;
|
||||
|
@ -993,9 +991,7 @@ void dc_init_callbacks(struct dc *dc,
|
|||
void dc_deinit_callbacks(struct dc *dc);
|
||||
void dc_destroy(struct dc **dc);
|
||||
|
||||
/*******************************************************************************
|
||||
* Surface Interfaces
|
||||
******************************************************************************/
|
||||
/* Surface Interfaces */
|
||||
|
||||
enum {
|
||||
TRANSFER_FUNC_POINTS = 1025
|
||||
|
@ -1274,12 +1270,23 @@ void dc_post_update_surfaces_to_stream(
|
|||
|
||||
#include "dc_stream.h"
|
||||
|
||||
/*
|
||||
* Structure to store surface/stream associations for validation
|
||||
/**
|
||||
* struct dc_validation_set - Struct to store surface/stream associations for validation
|
||||
*/
|
||||
struct dc_validation_set {
|
||||
/**
|
||||
* @stream: Stream state properties
|
||||
*/
|
||||
struct dc_stream_state *stream;
|
||||
|
||||
/**
|
||||
* @plane_state: Surface state
|
||||
*/
|
||||
struct dc_plane_state *plane_states[MAX_SURFACES];
|
||||
|
||||
/**
|
||||
* @plane_count: Total of active planes
|
||||
*/
|
||||
uint8_t plane_count;
|
||||
};
|
||||
|
||||
|
@ -1326,15 +1333,12 @@ void dc_resource_state_destruct(struct dc_state *context);
|
|||
|
||||
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
|
||||
|
||||
/*
|
||||
* TODO update to make it about validation sets
|
||||
* Set up streams and links associated to drive sinks
|
||||
* The streams parameter is an absolute set of all active streams.
|
||||
*
|
||||
* After this call:
|
||||
* Phy, Encoder, Timing Generator are programmed and enabled.
|
||||
* New streams are enabled with blank stream; no memory read.
|
||||
*/
|
||||
enum dc_status dc_commit_streams(struct dc *dc,
|
||||
struct dc_stream_state *streams[],
|
||||
uint8_t stream_count);
|
||||
|
||||
/* TODO: When the transition to the new commit sequence is done, remove this
|
||||
* function in favor of dc_commit_streams. */
|
||||
bool dc_commit_state(struct dc *dc, struct dc_state *context);
|
||||
|
||||
struct dc_state *dc_create_state(struct dc *dc);
|
||||
|
@ -1342,9 +1346,7 @@ struct dc_state *dc_copy_state(struct dc_state *src_ctx);
|
|||
void dc_retain_state(struct dc_state *context);
|
||||
void dc_release_state(struct dc_state *context);
|
||||
|
||||
/*******************************************************************************
|
||||
* Link Interfaces
|
||||
******************************************************************************/
|
||||
/* Link Interfaces */
|
||||
|
||||
struct dpcd_caps {
|
||||
union dpcd_rev dpcd_rev;
|
||||
|
@ -1446,9 +1448,7 @@ struct hdcp_caps {
|
|||
|
||||
uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
|
||||
|
||||
/*******************************************************************************
|
||||
* Sink Interfaces - A sink corresponds to a display output device
|
||||
******************************************************************************/
|
||||
/* Sink Interfaces - A sink corresponds to a display output device */
|
||||
|
||||
struct dc_container_id {
|
||||
// 128bit GUID in binary form
|
||||
|
@ -1520,8 +1520,6 @@ struct dc_sink_init_data {
|
|||
bool converter_disable_audio;
|
||||
};
|
||||
|
||||
bool dc_extended_blank_supported(struct dc *dc);
|
||||
|
||||
struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
|
||||
|
||||
/* Newer interfaces */
|
||||
|
@ -1531,9 +1529,7 @@ struct dc_cursor {
|
|||
};
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
* Interrupt interfaces
|
||||
******************************************************************************/
|
||||
/* Interrupt interfaces */
|
||||
enum dc_irq_source dc_interrupt_to_irq_source(
|
||||
struct dc *dc,
|
||||
uint32_t src_id,
|
||||
|
@ -1545,9 +1541,7 @@ enum dc_irq_source dc_get_hpd_irq_source_at_index(
|
|||
|
||||
void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable);
|
||||
|
||||
/*******************************************************************************
|
||||
* Power Interfaces
|
||||
******************************************************************************/
|
||||
/* Power Interfaces */
|
||||
|
||||
void dc_set_power_state(
|
||||
struct dc *dc,
|
||||
|
@ -1620,14 +1614,10 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
|
|||
void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
|
||||
uint32_t hpd_int_enable);
|
||||
|
||||
/*******************************************************************************
|
||||
* DSC Interfaces
|
||||
******************************************************************************/
|
||||
/* DSC Interfaces */
|
||||
#include "dc_dsc.h"
|
||||
|
||||
/*******************************************************************************
|
||||
* Disable acc mode Interfaces
|
||||
******************************************************************************/
|
||||
/* Disable acc mode Interfaces */
|
||||
void dc_disable_accelerated_mode(struct dc *dc);
|
||||
|
||||
#endif /* DC_INTERFACE_H_ */
|
||||
|
|
|
@ -1026,3 +1026,10 @@ void dc_send_update_cursor_info_to_dmu(
|
|||
dc_send_cmd_to_dmu(pCtx->stream->ctx->dmub_srv, &cmd);
|
||||
}
|
||||
}
|
||||
|
||||
bool dc_dmub_check_min_version(struct dmub_srv *srv)
|
||||
{
|
||||
if (!srv->hw_funcs.is_psrsu_supported)
|
||||
return true;
|
||||
return srv->hw_funcs.is_psrsu_supported(srv);
|
||||
}
|
||||
|
|
|
@ -89,4 +89,5 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, b
|
|||
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
|
||||
|
||||
void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx);
|
||||
bool dc_dmub_check_min_version(struct dmub_srv *srv);
|
||||
#endif /* _DMUB_DC_SRV_H_ */
|
||||
|
|
|
@ -41,6 +41,10 @@ struct timing_sync_info {
|
|||
struct dc_stream_status {
|
||||
int primary_otg_inst;
|
||||
int stream_enc_inst;
|
||||
|
||||
/**
|
||||
* @plane_count: Total of planes attached to a single stream
|
||||
*/
|
||||
int plane_count;
|
||||
int audio_inst;
|
||||
struct timing_sync_info timing_sync_info;
|
||||
|
@ -127,6 +131,7 @@ union stream_update_flags {
|
|||
uint32_t dsc_changed : 1;
|
||||
uint32_t mst_bw : 1;
|
||||
uint32_t crtc_timing_adjust : 1;
|
||||
uint32_t fams_changed : 1;
|
||||
} bits;
|
||||
|
||||
uint32_t raw;
|
||||
|
|
|
@ -2036,7 +2036,7 @@ void dcn20_optimize_bandwidth(
|
|||
dc->clk_mgr,
|
||||
context,
|
||||
true);
|
||||
if (dc_extended_blank_supported(dc) && context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
|
||||
if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
|
||||
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
|
@ -2044,7 +2044,7 @@ void dcn20_optimize_bandwidth(
|
|||
&& pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
|
||||
&& pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
|
||||
pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
|
||||
pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
|
||||
pipe_ctx->dlg_regs.min_dst_y_next_start);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -292,7 +292,12 @@ void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
|
|||
|
||||
void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
|
||||
{
|
||||
optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
|
||||
struct dc *dc = optc->ctx->dc;
|
||||
|
||||
if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams)
|
||||
dc_dmub_srv_drr_update_cmd(dc, optc->inst, vtotal_min, vtotal_max);
|
||||
else
|
||||
optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
|
||||
}
|
||||
|
||||
void optc3_tg_init(struct timing_generator *optc)
|
||||
|
|
|
@ -103,6 +103,7 @@ static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne
|
|||
default:
|
||||
break;
|
||||
}
|
||||
DC_LOG_DEBUG("Set DET%d to %d segments\n", hubp_inst, det_size_segments);
|
||||
/* Should never be hit, if it is we have an erroneous hw config*/
|
||||
ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
|
||||
+ hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);
|
||||
|
|
|
@ -136,6 +136,9 @@
|
|||
|
||||
#define DCN3_15_MAX_DET_SIZE 384
|
||||
#define DCN3_15_CRB_SEGMENT_SIZE_KB 64
|
||||
#define DCN3_15_MAX_DET_SEGS (DCN3_15_MAX_DET_SIZE / DCN3_15_CRB_SEGMENT_SIZE_KB)
|
||||
/* Minimum 2 extra segments need to be in compbuf and claimable to guarantee seamless mpo transitions */
|
||||
#define MIN_RESERVED_DET_SEGS 2
|
||||
|
||||
enum dcn31_clk_src_array_id {
|
||||
DCN31_CLK_SRC_PLL0,
|
||||
|
@ -1636,21 +1639,61 @@ static bool is_dual_plane(enum surface_pixel_format format)
|
|||
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
|
||||
}
|
||||
|
||||
static int source_format_to_bpp (enum source_format_class SourcePixelFormat)
|
||||
{
|
||||
if (SourcePixelFormat == dm_444_64)
|
||||
return 8;
|
||||
else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16)
|
||||
return 2;
|
||||
else if (SourcePixelFormat == dm_444_8)
|
||||
return 1;
|
||||
else if (SourcePixelFormat == dm_rgbe_alpha)
|
||||
return 5;
|
||||
else if (SourcePixelFormat == dm_420_8)
|
||||
return 3;
|
||||
else if (SourcePixelFormat == dm_420_12)
|
||||
return 6;
|
||||
else
|
||||
return 4;
|
||||
}
|
||||
|
||||
static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
int i;
|
||||
struct resource_context *res_ctx = &context->res_ctx;
|
||||
|
||||
/*Don't apply for single stream*/
|
||||
if (context->stream_count < 2)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!res_ctx->pipe_ctx[i].stream)
|
||||
continue;
|
||||
|
||||
/*Don't apply if MPO to avoid transition issues*/
|
||||
if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state != res_ctx->pipe_ctx[i].plane_state)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static int dcn315_populate_dml_pipes_from_context(
|
||||
struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
bool fast_validate)
|
||||
{
|
||||
int i, pipe_cnt;
|
||||
int i, pipe_cnt, crb_idx, crb_pipes;
|
||||
struct resource_context *res_ctx = &context->res_ctx;
|
||||
struct pipe_ctx *pipe;
|
||||
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB;
|
||||
int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB;
|
||||
bool pixel_rate_crb = allow_pixel_rate_crb(dc, context);
|
||||
|
||||
DC_FP_START();
|
||||
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
|
||||
DC_FP_END();
|
||||
|
||||
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct dc_crtc_timing *timing;
|
||||
|
||||
if (!res_ctx->pipe_ctx[i].stream)
|
||||
|
@ -1672,6 +1715,23 @@ static int dcn315_populate_dml_pipes_from_context(
|
|||
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
|
||||
DC_FP_START();
|
||||
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
|
||||
if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) {
|
||||
int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format);
|
||||
/* Ceil to crb segment size */
|
||||
int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate(
|
||||
&context->bw_ctx.dml.soc, timing->pix_clk_100hz, bpp, DCN3_15_CRB_SEGMENT_SIZE_KB);
|
||||
if (approx_det_segs_required_for_pstate <= 2 * DCN3_15_MAX_DET_SEGS) {
|
||||
bool split_required = approx_det_segs_required_for_pstate > DCN3_15_MAX_DET_SEGS;
|
||||
split_required = split_required || timing->pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc);
|
||||
split_required = split_required || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
|
||||
if (split_required)
|
||||
approx_det_segs_required_for_pstate += approx_det_segs_required_for_pstate % 2;
|
||||
pipes[pipe_cnt].pipe.src.det_size_override = approx_det_segs_required_for_pstate;
|
||||
remaining_det_segs -= approx_det_segs_required_for_pstate;
|
||||
} else
|
||||
remaining_det_segs = -1;
|
||||
crb_pipes++;
|
||||
}
|
||||
DC_FP_END();
|
||||
|
||||
if (pipes[pipe_cnt].dout.dsc_enable) {
|
||||
|
@ -1690,16 +1750,54 @@ static int dcn315_populate_dml_pipes_from_context(
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
pipe_cnt++;
|
||||
}
|
||||
|
||||
/* Spread remaining unreserved crb evenly among all pipes*/
|
||||
if (pixel_rate_crb) {
|
||||
for (i = 0, pipe_cnt = 0, crb_idx = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
pipe = &res_ctx->pipe_ctx[i];
|
||||
if (!pipe->stream)
|
||||
continue;
|
||||
|
||||
/* Do not use asymetric crb if not enough for pstate support */
|
||||
if (remaining_det_segs < 0) {
|
||||
pipes[pipe_cnt].pipe.src.det_size_override = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!pipe->top_pipe && !pipe->prev_odm_pipe) {
|
||||
bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
|
||||
|| (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
|
||||
|
||||
if (remaining_det_segs > MIN_RESERVED_DET_SEGS)
|
||||
pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
|
||||
(crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
|
||||
if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
|
||||
/* Clamp to 2 pipe split max det segments */
|
||||
remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
|
||||
pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
|
||||
}
|
||||
if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
|
||||
/* If we are splitting we must have an even number of segments */
|
||||
remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
|
||||
pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
|
||||
}
|
||||
/* Convert segments into size for DML use */
|
||||
pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
|
||||
|
||||
crb_idx++;
|
||||
}
|
||||
pipe_cnt++;
|
||||
}
|
||||
}
|
||||
|
||||
if (pipe_cnt)
|
||||
context->bw_ctx.dml.ip.det_buffer_size_kbytes =
|
||||
(max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_15_CRB_SEGMENT_SIZE_KB;
|
||||
if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_15_MAX_DET_SIZE)
|
||||
context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_15_MAX_DET_SIZE;
|
||||
ASSERT(context->bw_ctx.dml.ip.det_buffer_size_kbytes >= DCN3_15_DEFAULT_DET_SIZE);
|
||||
|
||||
dc->config.enable_4to1MPC = false;
|
||||
if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
|
||||
if (is_dual_plane(pipe->plane_state->format)
|
||||
|
@ -1707,7 +1805,9 @@ static int dcn315_populate_dml_pipes_from_context(
|
|||
dc->config.enable_4to1MPC = true;
|
||||
context->bw_ctx.dml.ip.det_buffer_size_kbytes =
|
||||
(max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / 4) * DCN3_15_CRB_SEGMENT_SIZE_KB;
|
||||
} else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
|
||||
} else if (!is_dual_plane(pipe->plane_state->format)
|
||||
&& pipe->plane_state->src_rect.width <= 5120
|
||||
&& pipe->stream->timing.pix_clk_100hz < dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)) {
|
||||
/* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
|
||||
context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
|
||||
pipes[0].pipe.src.unbounded_req_mode = true;
|
||||
|
|
|
@ -948,10 +948,10 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
|
|||
{
|
||||
int plane_count;
|
||||
int i;
|
||||
unsigned int optimized_min_dst_y_next_start_us;
|
||||
unsigned int min_dst_y_next_start_us;
|
||||
|
||||
plane_count = 0;
|
||||
optimized_min_dst_y_next_start_us = 0;
|
||||
min_dst_y_next_start_us = 0;
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (context->res_ctx.pipe_ctx[i].plane_state)
|
||||
plane_count++;
|
||||
|
@ -973,19 +973,18 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
|
|||
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
|
||||
struct dc_link *link = context->streams[0]->sink->link;
|
||||
struct dc_stream_status *stream_status = &context->stream_status[0];
|
||||
struct dc_stream_state *current_stream = context->streams[0];
|
||||
int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
|
||||
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
|
||||
bool is_pwrseq0 = link->link_index == 0;
|
||||
bool isFreesyncVideo;
|
||||
|
||||
if (dc_extended_blank_supported(dc)) {
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (context->res_ctx.pipe_ctx[i].stream == context->streams[0]
|
||||
&& context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min == context->res_ctx.pipe_ctx[i].stream->adjust.v_total_max
|
||||
&& context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min > context->res_ctx.pipe_ctx[i].stream->timing.v_total) {
|
||||
optimized_min_dst_y_next_start_us =
|
||||
context->res_ctx.pipe_ctx[i].dlg_regs.optimized_min_dst_y_next_start_us;
|
||||
break;
|
||||
}
|
||||
isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
|
||||
isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
|
||||
min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -993,7 +992,7 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
|
|||
if (stream_status->plane_count > 1)
|
||||
return DCN_ZSTATE_SUPPORT_DISALLOW;
|
||||
|
||||
if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000))
|
||||
if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
|
||||
return DCN_ZSTATE_SUPPORT_ALLOW;
|
||||
else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
|
||||
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
|
||||
|
|
|
@ -483,7 +483,7 @@ void dcn31_calculate_wm_and_dlg_fp(
|
|||
int pipe_cnt,
|
||||
int vlevel)
|
||||
{
|
||||
int i, pipe_idx, active_dpp_count = 0;
|
||||
int i, pipe_idx, total_det = 0, active_hubp_count = 0;
|
||||
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
|
||||
|
||||
dc_assert_fp_enabled();
|
||||
|
@ -529,7 +529,7 @@ void dcn31_calculate_wm_and_dlg_fp(
|
|||
continue;
|
||||
|
||||
if (context->res_ctx.pipe_ctx[i].plane_state)
|
||||
active_dpp_count++;
|
||||
active_hubp_count++;
|
||||
|
||||
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
|
@ -547,9 +547,34 @@ void dcn31_calculate_wm_and_dlg_fp(
|
|||
}
|
||||
|
||||
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
|
||||
/* For 31x apu pstate change is only supported if possible in vactive or if there are no active dpps */
|
||||
/* For 31x apu pstate change is only supported if possible in vactive*/
|
||||
context->bw_ctx.bw.dcn.clk.p_state_change_support =
|
||||
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive || !active_dpp_count;
|
||||
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive;
|
||||
/* If DCN isn't making memory requests we can allow pstate change and lower clocks */
|
||||
if (!active_hubp_count) {
|
||||
context->bw_ctx.bw.dcn.clk.socclk_khz = 0;
|
||||
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
|
||||
context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0;
|
||||
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0;
|
||||
context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
|
||||
context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
|
||||
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++)
|
||||
if (context->res_ctx.pipe_ctx[i].stream)
|
||||
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
|
||||
}
|
||||
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
|
||||
context->res_ctx.pipe_ctx[i].det_buffer_size_kb =
|
||||
get_det_buffer_size_kbytes(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
if (context->res_ctx.pipe_ctx[i].det_buffer_size_kb > 384)
|
||||
context->res_ctx.pipe_ctx[i].det_buffer_size_kb /= 2;
|
||||
total_det += context->res_ctx.pipe_ctx[i].det_buffer_size_kb;
|
||||
pipe_idx++;
|
||||
}
|
||||
context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - total_det;
|
||||
}
|
||||
|
||||
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
|
||||
|
@ -797,3 +822,19 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
|
|||
else
|
||||
dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31_FPGA);
|
||||
}
|
||||
|
||||
int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc)
|
||||
{
|
||||
return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0);
|
||||
}
|
||||
|
||||
int dcn_get_approx_det_segs_required_for_pstate(
|
||||
struct _vcs_dpi_soc_bounding_box_st *soc,
|
||||
int pix_clk_100hz, int bpp, int seg_size_kb)
|
||||
{
|
||||
/* Roughly calculate required crb to hide latency. In practice there is slightly
|
||||
* more buffer available for latency hiding
|
||||
*/
|
||||
return (int)(soc->dram_clock_change_latency_us * pix_clk_100hz * bpp
|
||||
/ 10240000 + seg_size_kb - 1) / seg_size_kb;
|
||||
}
|
||||
|
|
|
@ -46,5 +46,9 @@ void dcn31_calculate_wm_and_dlg_fp(
|
|||
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
|
||||
void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
|
||||
void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
|
||||
int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc);
|
||||
int dcn_get_approx_det_segs_required_for_pstate(
|
||||
struct _vcs_dpi_soc_bounding_box_st *soc,
|
||||
int pix_clk_100hz, int bpp, int seg_size_kb);
|
||||
|
||||
#endif /* __DCN31_FPU_H__*/
|
||||
|
|
|
@ -533,7 +533,8 @@ static void CalculateStutterEfficiency(
|
|||
static void CalculateSwathAndDETConfiguration(
|
||||
bool ForceSingleDPP,
|
||||
int NumberOfActivePlanes,
|
||||
unsigned int DETBufferSizeInKByte,
|
||||
bool DETSharedByAllDPP,
|
||||
unsigned int DETBufferSizeInKByte[],
|
||||
double MaximumSwathWidthLuma[],
|
||||
double MaximumSwathWidthChroma[],
|
||||
enum scan_direction_class SourceScan[],
|
||||
|
@ -3116,7 +3117,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
|
|||
v->SurfaceWidthC[k],
|
||||
v->SurfaceHeightY[k],
|
||||
v->SurfaceHeightC[k],
|
||||
v->DETBufferSizeInKByte[0] * 1024,
|
||||
v->DETBufferSizeInKByte[k] * 1024,
|
||||
v->BlockHeight256BytesY[k],
|
||||
v->BlockHeight256BytesC[k],
|
||||
v->SurfaceTiling[k],
|
||||
|
@ -3311,7 +3312,8 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
|
|||
CalculateSwathAndDETConfiguration(
|
||||
false,
|
||||
v->NumberOfActivePlanes,
|
||||
v->DETBufferSizeInKByte[0],
|
||||
mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
|
||||
v->DETBufferSizeInKByte,
|
||||
dummy1,
|
||||
dummy2,
|
||||
v->SourceScan,
|
||||
|
@ -3777,14 +3779,16 @@ static noinline void CalculatePrefetchSchedulePerPlane(
|
|||
&v->VReadyOffsetPix[k]);
|
||||
}
|
||||
|
||||
static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int *DETBufferSizeInKByte)
|
||||
static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int DETBufferSizeInKByte[])
|
||||
{
|
||||
int i, total_pipes = 0;
|
||||
for (i = 0; i < NumberOfActivePlanes; i++)
|
||||
total_pipes += NoOfDPPThisState[i];
|
||||
*DETBufferSizeInKByte = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
|
||||
if (*DETBufferSizeInKByte > DCN3_15_MAX_DET_SIZE)
|
||||
*DETBufferSizeInKByte = DCN3_15_MAX_DET_SIZE;
|
||||
DETBufferSizeInKByte[0] = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
|
||||
if (DETBufferSizeInKByte[0] > DCN3_15_MAX_DET_SIZE)
|
||||
DETBufferSizeInKByte[0] = DCN3_15_MAX_DET_SIZE;
|
||||
for (i = 1; i < NumberOfActivePlanes; i++)
|
||||
DETBufferSizeInKByte[i] = DETBufferSizeInKByte[0];
|
||||
}
|
||||
|
||||
|
||||
|
@ -4024,7 +4028,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
|||
CalculateSwathAndDETConfiguration(
|
||||
true,
|
||||
v->NumberOfActivePlanes,
|
||||
v->DETBufferSizeInKByte[0],
|
||||
mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
|
||||
v->DETBufferSizeInKByte,
|
||||
v->MaximumSwathWidthLuma,
|
||||
v->MaximumSwathWidthChroma,
|
||||
v->SourceScan,
|
||||
|
@ -4164,6 +4169,10 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
|||
|| (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) {
|
||||
v->DISPCLK_DPPCLK_Support[i][j] = false;
|
||||
}
|
||||
if (mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[k] > DCN3_15_MAX_DET_SIZE && v->NoOfDPP[i][j][k] < 2) {
|
||||
v->MPCCombine[i][j][k] = true;
|
||||
v->NoOfDPP[i][j][k] = 2;
|
||||
}
|
||||
}
|
||||
v->TotalNumberOfActiveDPP[i][j] = 0;
|
||||
v->TotalNumberOfSingleDPPPlanes[i][j] = 0;
|
||||
|
@ -4640,12 +4649,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
|||
v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k];
|
||||
}
|
||||
|
||||
if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315)
|
||||
PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, &v->DETBufferSizeInKByte[0]);
|
||||
if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315 && !v->DETSizeOverride[0])
|
||||
PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, v->DETBufferSizeInKByte);
|
||||
CalculateSwathAndDETConfiguration(
|
||||
false,
|
||||
v->NumberOfActivePlanes,
|
||||
v->DETBufferSizeInKByte[0],
|
||||
mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
|
||||
v->DETBufferSizeInKByte,
|
||||
v->MaximumSwathWidthLuma,
|
||||
v->MaximumSwathWidthChroma,
|
||||
v->SourceScan,
|
||||
|
@ -6557,7 +6567,8 @@ static void CalculateStutterEfficiency(
|
|||
static void CalculateSwathAndDETConfiguration(
|
||||
bool ForceSingleDPP,
|
||||
int NumberOfActivePlanes,
|
||||
unsigned int DETBufferSizeInKByte,
|
||||
bool DETSharedByAllDPP,
|
||||
unsigned int DETBufferSizeInKByteA[],
|
||||
double MaximumSwathWidthLuma[],
|
||||
double MaximumSwathWidthChroma[],
|
||||
enum scan_direction_class SourceScan[],
|
||||
|
@ -6641,6 +6652,10 @@ static void CalculateSwathAndDETConfiguration(
|
|||
|
||||
*ViewportSizeSupport = true;
|
||||
for (k = 0; k < NumberOfActivePlanes; ++k) {
|
||||
unsigned int DETBufferSizeInKByte = DETBufferSizeInKByteA[k];
|
||||
|
||||
if (DETSharedByAllDPP && DPPPerPlane[k])
|
||||
DETBufferSizeInKByte /= DPPPerPlane[k];
|
||||
if ((SourcePixelFormat[k] == dm_444_64 || SourcePixelFormat[k] == dm_444_32 || SourcePixelFormat[k] == dm_444_16 || SourcePixelFormat[k] == dm_mono_16
|
||||
|| SourcePixelFormat[k] == dm_mono_8 || SourcePixelFormat[k] == dm_rgbe)) {
|
||||
if (SurfaceTiling[k] == dm_sw_linear
|
||||
|
|
|
@ -988,8 +988,7 @@ static void dml_rq_dlg_get_dlg_params(
|
|||
|
||||
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
|
||||
disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
|
||||
disp_dlg_regs->optimized_min_dst_y_next_start_us = 0;
|
||||
disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
|
||||
disp_dlg_regs->min_dst_y_next_start_us = 0;
|
||||
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
|
||||
|
||||
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
#include "dml/display_mode_vba.h"
|
||||
|
||||
struct _vcs_dpi_ip_params_st dcn3_14_ip = {
|
||||
.VBlankNomDefaultUS = 668,
|
||||
.VBlankNomDefaultUS = 800,
|
||||
.gpuvm_enable = 1,
|
||||
.gpuvm_max_page_table_levels = 1,
|
||||
.hostvm_enable = 1,
|
||||
|
@ -288,6 +288,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
|
|||
struct resource_context *res_ctx = &context->res_ctx;
|
||||
struct pipe_ctx *pipe;
|
||||
bool upscaled = false;
|
||||
const unsigned int max_allowed_vblank_nom = 1023;
|
||||
|
||||
dc_assert_fp_enabled();
|
||||
|
||||
|
@ -301,9 +302,15 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
|
|||
pipe = &res_ctx->pipe_ctx[i];
|
||||
timing = &pipe->stream->timing;
|
||||
|
||||
if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
|
||||
&& pipe->stream->adjust.v_total_min > timing->v_total)
|
||||
if (pipe->stream->adjust.v_total_min != 0)
|
||||
pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
|
||||
else
|
||||
pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
|
||||
|
||||
pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
|
||||
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS);
|
||||
pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width);
|
||||
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
|
||||
|
||||
if (pipe->plane_state &&
|
||||
(pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
|
||||
|
@ -327,8 +334,6 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
|
|||
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
|
||||
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
|
||||
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
|
||||
pipes[pipe_cnt].pipe.dest.vblank_nom =
|
||||
dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0));
|
||||
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
|
||||
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
|
||||
|
||||
|
|
|
@ -1053,7 +1053,6 @@ static void dml_rq_dlg_get_dlg_params(
|
|||
|
||||
float vba__refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
|
||||
float vba__refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
|
||||
int blank_lines = 0;
|
||||
|
||||
memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
|
||||
memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
|
||||
|
@ -1077,17 +1076,10 @@ static void dml_rq_dlg_get_dlg_params(
|
|||
min_ttu_vblank = get_min_ttu_vblank_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
|
||||
|
||||
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
|
||||
disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
|
||||
disp_dlg_regs->optimized_min_dst_y_next_start_us = 0;
|
||||
disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
|
||||
blank_lines = (dst->vblank_end + dst->vtotal_min - dst->vblank_start - dst->vstartup_start - 1);
|
||||
if (blank_lines < 0)
|
||||
blank_lines = 0;
|
||||
if (blank_lines != 0) {
|
||||
disp_dlg_regs->optimized_min_dst_y_next_start = vba__min_dst_y_next_start;
|
||||
disp_dlg_regs->optimized_min_dst_y_next_start_us = (disp_dlg_regs->optimized_min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
|
||||
disp_dlg_regs->min_dst_y_next_start = disp_dlg_regs->optimized_min_dst_y_next_start;
|
||||
}
|
||||
disp_dlg_regs->min_dst_y_next_start_us =
|
||||
(vba__min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
|
||||
disp_dlg_regs->min_dst_y_next_start = vba__min_dst_y_next_start * dml_pow(2, 2);
|
||||
|
||||
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
|
||||
|
||||
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);
|
||||
|
|
|
@ -1237,7 +1237,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
|
|||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt, int vlevel)
|
||||
{
|
||||
int i, pipe_idx;
|
||||
int i, pipe_idx, active_hubp_count = 0;
|
||||
bool usr_retraining_support = false;
|
||||
bool unbounded_req_enabled = false;
|
||||
|
||||
|
@ -1282,6 +1282,8 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
|
|||
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
if (context->res_ctx.pipe_ctx[i].plane_state)
|
||||
active_hubp_count++;
|
||||
pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt,
|
||||
pipe_idx);
|
||||
pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
|
||||
|
@ -1303,10 +1305,23 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
|
|||
|
||||
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
|
||||
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
|
||||
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
|
||||
if (context->res_ctx.pipe_ctx[i].plane_state)
|
||||
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
|
||||
else
|
||||
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
|
||||
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
|
||||
pipe_idx++;
|
||||
}
|
||||
/* If DCN isn't making memory requests we can allow pstate change and lower clocks */
|
||||
if (!active_hubp_count) {
|
||||
context->bw_ctx.bw.dcn.clk.socclk_khz = 0;
|
||||
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
|
||||
context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0;
|
||||
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0;
|
||||
context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
|
||||
context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
|
||||
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
|
||||
}
|
||||
/*save a original dppclock copy*/
|
||||
context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
|
||||
context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
|
||||
|
|
|
@ -618,8 +618,7 @@ struct _vcs_dpi_display_dlg_regs_st {
|
|||
unsigned int refcyc_h_blank_end;
|
||||
unsigned int dlg_vblank_end;
|
||||
unsigned int min_dst_y_next_start;
|
||||
unsigned int optimized_min_dst_y_next_start;
|
||||
unsigned int optimized_min_dst_y_next_start_us;
|
||||
unsigned int min_dst_y_next_start_us;
|
||||
unsigned int refcyc_per_htotal;
|
||||
unsigned int refcyc_x_after_scaler;
|
||||
unsigned int dst_y_after_scaler;
|
||||
|
|
|
@ -569,6 +569,10 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
|
|||
mode_lib->vba.OutputLinkDPRate[mode_lib->vba.NumberOfActivePlanes] = dout->dp_rate;
|
||||
mode_lib->vba.ODMUse[mode_lib->vba.NumberOfActivePlanes] = dst->odm_combine_policy;
|
||||
mode_lib->vba.DETSizeOverride[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override;
|
||||
if (src->det_size_override)
|
||||
mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override;
|
||||
else
|
||||
mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = ip->det_buffer_size_kbytes;
|
||||
//TODO: Need to assign correct values to dp_multistream vars
|
||||
mode_lib->vba.OutputMultistreamEn[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_en;
|
||||
mode_lib->vba.OutputMultistreamId[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_id;
|
||||
|
@ -783,6 +787,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
|
|||
mode_lib->vba.pipe_plane[k] =
|
||||
mode_lib->vba.NumberOfActivePlanes;
|
||||
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++;
|
||||
if (src_k->det_size_override)
|
||||
mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src_k->det_size_override;
|
||||
if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes]
|
||||
== dm_horz) {
|
||||
mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=
|
||||
|
|
|
@ -350,6 +350,8 @@ struct dmub_srv_hw_funcs {
|
|||
|
||||
bool (*is_supported)(struct dmub_srv *dmub);
|
||||
|
||||
bool (*is_psrsu_supported)(struct dmub_srv *dmub);
|
||||
|
||||
bool (*is_hw_init)(struct dmub_srv *dmub);
|
||||
|
||||
bool (*is_phy_init)(struct dmub_srv *dmub);
|
||||
|
|
|
@ -347,7 +347,7 @@ union dmub_fw_boot_status {
|
|||
uint32_t optimized_init_done : 1; /**< 1 if optimized init done */
|
||||
uint32_t restore_required : 1; /**< 1 if driver should call restore */
|
||||
uint32_t defer_load : 1; /**< 1 if VBIOS data is deferred programmed */
|
||||
uint32_t reserved : 1;
|
||||
uint32_t fams_enabled : 1; /**< 1 if VBIOS data is deferred programmed */
|
||||
uint32_t detection_required: 1; /**< if detection need to be triggered by driver */
|
||||
|
||||
} bits; /**< status bits */
|
||||
|
|
|
@ -297,6 +297,11 @@ bool dmub_dcn31_is_supported(struct dmub_srv *dmub)
|
|||
return supported;
|
||||
}
|
||||
|
||||
bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub)
|
||||
{
|
||||
return dmub->fw_version >= DMUB_FW_VERSION(4, 0, 59);
|
||||
}
|
||||
|
||||
void dmub_dcn31_set_gpint(struct dmub_srv *dmub,
|
||||
union dmub_gpint_data_register reg)
|
||||
{
|
||||
|
|
|
@ -219,6 +219,8 @@ bool dmub_dcn31_is_hw_init(struct dmub_srv *dmub);
|
|||
|
||||
bool dmub_dcn31_is_supported(struct dmub_srv *dmub);
|
||||
|
||||
bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub);
|
||||
|
||||
void dmub_dcn31_set_gpint(struct dmub_srv *dmub,
|
||||
union dmub_gpint_data_register reg);
|
||||
|
||||
|
|
67
sys/dev/pci/drm/amd/display/dmub/src/dmub_dcn314.c
Normal file
67
sys/dev/pci/drm/amd/display/dmub/src/dmub_dcn314.c
Normal file
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include "../dmub_srv.h"
|
||||
#include "dmub_reg.h"
|
||||
#include "dmub_dcn314.h"
|
||||
|
||||
#include "dcn/dcn_3_1_4_offset.h"
|
||||
#include "dcn/dcn_3_1_4_sh_mask.h"
|
||||
|
||||
#define DCN_BASE__INST0_SEG0 0x00000012
|
||||
#define DCN_BASE__INST0_SEG1 0x000000C0
|
||||
#define DCN_BASE__INST0_SEG2 0x000034C0
|
||||
#define DCN_BASE__INST0_SEG3 0x00009000
|
||||
#define DCN_BASE__INST0_SEG4 0x02403C00
|
||||
#define DCN_BASE__INST0_SEG5 0
|
||||
|
||||
#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
|
||||
#define CTX dmub
|
||||
#define REGS dmub->regs_dcn31
|
||||
#define REG_OFFSET_EXP(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name)
|
||||
|
||||
/* Registers. */
|
||||
|
||||
const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs = {
|
||||
#define DMUB_SR(reg) REG_OFFSET_EXP(reg),
|
||||
{
|
||||
DMUB_DCN31_REGS()
|
||||
DMCUB_INTERNAL_REGS()
|
||||
},
|
||||
#undef DMUB_SR
|
||||
|
||||
#define DMUB_SF(reg, field) FD_MASK(reg, field),
|
||||
{ DMUB_DCN31_FIELDS() },
|
||||
#undef DMUB_SF
|
||||
|
||||
#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
|
||||
{ DMUB_DCN31_FIELDS() },
|
||||
#undef DMUB_SF
|
||||
};
|
||||
|
||||
bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub)
|
||||
{
|
||||
return dmub->fw_version >= DMUB_FW_VERSION(8, 0, 16);
|
||||
}
|
35
sys/dev/pci/drm/amd/display/dmub/src/dmub_dcn314.h
Normal file
35
sys/dev/pci/drm/amd/display/dmub/src/dmub_dcn314.h
Normal file
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _DMUB_DCN314_H_
|
||||
#define _DMUB_DCN314_H_
|
||||
|
||||
#include "dmub_dcn31.h"
|
||||
|
||||
extern const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs;
|
||||
|
||||
bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub);
|
||||
|
||||
#endif /* _DMUB_DCN314_H_ */
|
|
@ -32,6 +32,7 @@
|
|||
#include "dmub_dcn302.h"
|
||||
#include "dmub_dcn303.h"
|
||||
#include "dmub_dcn31.h"
|
||||
#include "dmub_dcn314.h"
|
||||
#include "dmub_dcn315.h"
|
||||
#include "dmub_dcn316.h"
|
||||
#include "dmub_dcn32.h"
|
||||
|
@ -226,12 +227,17 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
|
|||
case DMUB_ASIC_DCN314:
|
||||
case DMUB_ASIC_DCN315:
|
||||
case DMUB_ASIC_DCN316:
|
||||
if (asic == DMUB_ASIC_DCN315)
|
||||
if (asic == DMUB_ASIC_DCN314) {
|
||||
dmub->regs_dcn31 = &dmub_srv_dcn314_regs;
|
||||
funcs->is_psrsu_supported = dmub_dcn314_is_psrsu_supported;
|
||||
} else if (asic == DMUB_ASIC_DCN315) {
|
||||
dmub->regs_dcn31 = &dmub_srv_dcn315_regs;
|
||||
else if (asic == DMUB_ASIC_DCN316)
|
||||
} else if (asic == DMUB_ASIC_DCN316) {
|
||||
dmub->regs_dcn31 = &dmub_srv_dcn316_regs;
|
||||
else
|
||||
} else {
|
||||
dmub->regs_dcn31 = &dmub_srv_dcn31_regs;
|
||||
funcs->is_psrsu_supported = dmub_dcn31_is_psrsu_supported;
|
||||
}
|
||||
funcs->reset = dmub_dcn31_reset;
|
||||
funcs->reset_release = dmub_dcn31_reset_release;
|
||||
funcs->backdoor_load = dmub_dcn31_backdoor_load;
|
||||
|
|
|
@ -2081,91 +2081,36 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
|
||||
uint32_t *gen_speed_override,
|
||||
uint32_t *lane_width_override)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
*gen_speed_override = 0xff;
|
||||
*lane_width_override = 0xff;
|
||||
|
||||
switch (adev->pdev->device) {
|
||||
case 0x73A0:
|
||||
case 0x73A1:
|
||||
case 0x73A2:
|
||||
case 0x73A3:
|
||||
case 0x73AB:
|
||||
case 0x73AE:
|
||||
/* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
|
||||
*lane_width_override = 6;
|
||||
break;
|
||||
case 0x73E0:
|
||||
case 0x73E1:
|
||||
case 0x73E3:
|
||||
*lane_width_override = 4;
|
||||
break;
|
||||
case 0x7420:
|
||||
case 0x7421:
|
||||
case 0x7422:
|
||||
case 0x7423:
|
||||
case 0x7424:
|
||||
*lane_width_override = 3;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef MAX
|
||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||
#endif
|
||||
|
||||
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
|
||||
uint32_t gen_speed_override, lane_width_override;
|
||||
uint8_t *table_member1, *table_member2;
|
||||
uint32_t min_gen_speed, max_gen_speed;
|
||||
uint32_t min_lane_width, max_lane_width;
|
||||
uint32_t smu_pcie_arg;
|
||||
u32 smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
|
||||
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
|
||||
/* PCIE gen speed and lane width override */
|
||||
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
|
||||
if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
|
||||
pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
|
||||
|
||||
sienna_cichlid_get_override_pcie_settings(smu,
|
||||
&gen_speed_override,
|
||||
&lane_width_override);
|
||||
if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
|
||||
pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
|
||||
|
||||
/* PCIE gen speed override */
|
||||
if (gen_speed_override != 0xff) {
|
||||
min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
|
||||
max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
|
||||
/* Force all levels to use the same settings */
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
} else {
|
||||
min_gen_speed = MAX(0, table_member1[0]);
|
||||
max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
|
||||
min_gen_speed = min_gen_speed > max_gen_speed ?
|
||||
max_gen_speed : min_gen_speed;
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
}
|
||||
pcie_table->pcie_gen[0] = min_gen_speed;
|
||||
pcie_table->pcie_gen[1] = max_gen_speed;
|
||||
|
||||
/* PCIE lane width override */
|
||||
if (lane_width_override != 0xff) {
|
||||
min_lane_width = MIN(pcie_width_cap, lane_width_override);
|
||||
max_lane_width = MIN(pcie_width_cap, lane_width_override);
|
||||
} else {
|
||||
min_lane_width = MAX(1, table_member2[0]);
|
||||
max_lane_width = MIN(pcie_width_cap, table_member2[1]);
|
||||
min_lane_width = min_lane_width > max_lane_width ?
|
||||
max_lane_width : min_lane_width;
|
||||
}
|
||||
pcie_table->pcie_lane[0] = min_lane_width;
|
||||
pcie_table->pcie_lane[1] = max_lane_width;
|
||||
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
smu_pcie_arg = (i << 16 |
|
||||
|
|
|
@ -2490,29 +2490,6 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
|
||||
* speed switching. Until we have confirmation from Intel that a specific host
|
||||
* supports it, it's safer that we keep it disabled for all.
|
||||
*
|
||||
* https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
|
||||
* https://gitlab.freedesktop.org/drm/amd/-/issues/2663
|
||||
*/
|
||||
static bool smu_v13_0_is_pcie_dynamic_switching_supported(void)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_X86)
|
||||
#ifdef __linux__
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL)
|
||||
#else
|
||||
if (strcmp(cpu_vendor, "GenuineIntel") == 0)
|
||||
#endif
|
||||
return false;
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
|
@ -2524,7 +2501,7 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
|
|||
uint32_t smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
if (!smu_v13_0_is_pcie_dynamic_switching_supported()) {
|
||||
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
|
||||
if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
|
||||
pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: drm_linux.c,v 1.101 2023/07/18 06:58:59 claudio Exp $ */
|
||||
/* $OpenBSD: drm_linux.c,v 1.103 2023/08/04 09:36:28 jsg Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
|
||||
* Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
|
||||
|
@ -2007,14 +2007,14 @@ dma_fence_get_stub(void)
|
|||
}
|
||||
|
||||
struct dma_fence *
|
||||
dma_fence_allocate_private_stub(void)
|
||||
dma_fence_allocate_private_stub(ktime_t ts)
|
||||
{
|
||||
struct dma_fence *f = malloc(sizeof(*f), M_DRM,
|
||||
M_ZERO | M_WAITOK | M_CANFAIL);
|
||||
if (f == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return NULL;
|
||||
dma_fence_init(f, &dma_fence_stub_ops, &dma_fence_stub_mtx, 0, 0);
|
||||
dma_fence_signal(f);
|
||||
dma_fence_signal_timestamp(f, ts);
|
||||
return f;
|
||||
}
|
||||
|
||||
|
|
|
@ -356,10 +356,10 @@ EXPORT_SYMBOL(drm_syncobj_replace_fence);
|
|||
*/
|
||||
static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
|
||||
{
|
||||
struct dma_fence *fence = dma_fence_allocate_private_stub();
|
||||
struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get());
|
||||
|
||||
if (IS_ERR(fence))
|
||||
return PTR_ERR(fence);
|
||||
if (!fence)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_syncobj_replace_fence(syncobj, fence);
|
||||
dma_fence_put(fence);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# $OpenBSD: files.drm,v 1.59 2023/01/01 01:34:34 jsg Exp $
|
||||
# $OpenBSD: files.drm,v 1.60 2023/08/04 08:49:46 jsg Exp $
|
||||
|
||||
#file dev/pci/drm/aperture.c drm
|
||||
file dev/pci/drm/dma-resv.c drm
|
||||
|
@ -1182,6 +1182,7 @@ file dev/pci/drm/amd/display/dmub/src/dmub_dcn301.c amdgpu
|
|||
file dev/pci/drm/amd/display/dmub/src/dmub_dcn302.c amdgpu
|
||||
file dev/pci/drm/amd/display/dmub/src/dmub_dcn303.c amdgpu
|
||||
file dev/pci/drm/amd/display/dmub/src/dmub_dcn31.c amdgpu
|
||||
file dev/pci/drm/amd/display/dmub/src/dmub_dcn314.c amdgpu
|
||||
file dev/pci/drm/amd/display/dmub/src/dmub_dcn315.c amdgpu
|
||||
file dev/pci/drm/amd/display/dmub/src/dmub_dcn316.c amdgpu
|
||||
file dev/pci/drm/amd/display/dmub/src/dmub_dcn32.c amdgpu
|
||||
|
|
|
@ -163,6 +163,8 @@ struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
|
|||
i915_vma_get(vma);
|
||||
}
|
||||
|
||||
dpt->obj->mm.dirty = true;
|
||||
|
||||
atomic_dec(&i915->gpu_error.pending_fb_pin);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
|
||||
|
@ -258,7 +260,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
|
|||
dpt_obj = i915_gem_object_create_stolen(i915, size);
|
||||
if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
|
||||
drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
|
||||
dpt_obj = i915_gem_object_create_internal(i915, size);
|
||||
dpt_obj = i915_gem_object_create_shmem(i915, size);
|
||||
}
|
||||
if (IS_ERR(dpt_obj))
|
||||
return ERR_CAST(dpt_obj);
|
||||
|
|
|
@ -1185,8 +1185,10 @@ static int igt_write_huge(struct drm_i915_private *i915,
|
|||
* times in succession a possibility by enlarging the permutation array.
|
||||
*/
|
||||
order = i915_random_order(count * count, &prng);
|
||||
if (!order)
|
||||
return -ENOMEM;
|
||||
if (!order) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
|
||||
max = div_u64(max - size, max_page_size);
|
||||
|
|
|
@ -78,7 +78,7 @@ bool dma_fence_remove_callback(struct dma_fence *, struct dma_fence_cb *);
|
|||
bool dma_fence_is_container(struct dma_fence *);
|
||||
|
||||
struct dma_fence *dma_fence_get_stub(void);
|
||||
struct dma_fence *dma_fence_allocate_private_stub(void);
|
||||
struct dma_fence *dma_fence_allocate_private_stub(ktime_t);
|
||||
|
||||
static inline void
|
||||
dma_fence_free(struct dma_fence *fence)
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#define __GFP_RETRY_MAYFAIL 0
|
||||
#define __GFP_MOVABLE 0
|
||||
#define __GFP_COMP 0
|
||||
#define __GFP_KSWAPD_RECLAIM 0
|
||||
#define __GFP_KSWAPD_RECLAIM M_NOWAIT
|
||||
#define __GFP_HIGHMEM 0
|
||||
#define __GFP_RECLAIMABLE 0
|
||||
#define __GFP_NOMEMALLOC 0
|
||||
|
|
|
@ -41,9 +41,20 @@ jiffies_to_nsecs(const unsigned long x)
|
|||
#define nsecs_to_jiffies(x) (((uint64_t)(x)) * hz / 1000000000)
|
||||
#define nsecs_to_jiffies64(x) (((uint64_t)(x)) * hz / 1000000000)
|
||||
#define get_jiffies_64() jiffies
|
||||
#define time_after(a,b) ((long)(b) - (long)(a) < 0)
|
||||
|
||||
static inline int
|
||||
time_after(const unsigned long a, const unsigned long b)
|
||||
{
|
||||
return((long)(b - a) < 0);
|
||||
}
|
||||
#define time_before(a,b) time_after(b,a)
|
||||
|
||||
static inline int
|
||||
time_after_eq(const unsigned long a, const unsigned long b)
|
||||
{
|
||||
return((long)(b - a) <= 0);
|
||||
}
|
||||
|
||||
#define time_after32(a,b) ((int32_t)((uint32_t)(b) - (uint32_t)(a)) < 0)
|
||||
#define time_after_eq(a,b) ((long)(b) - (long)(a) <= 0)
|
||||
#define time_before(a,b) ((long)(a) - (long)(b) < 0)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: scatterlist.h,v 1.5 2023/01/01 01:34:58 jsg Exp $ */
|
||||
/* $OpenBSD: scatterlist.h,v 1.6 2023/08/02 11:03:17 jsg Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2013, 2014, 2015 Mark Kettenis
|
||||
*
|
||||
|
@ -119,7 +119,6 @@ sg_set_page(struct scatterlist *sgl, struct vm_page *page,
|
|||
sgl->dma_address = page ? VM_PAGE_TO_PHYS(page) : 0;
|
||||
sgl->offset = offset;
|
||||
sgl->length = length;
|
||||
sgl->end = false;
|
||||
}
|
||||
|
||||
#define sg_dma_address(sg) ((sg)->dma_address)
|
||||
|
|
|
@ -499,17 +499,18 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
|
|||
goto out;
|
||||
}
|
||||
|
||||
bounce:
|
||||
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
|
||||
if (ret == -EMULTIHOP) {
|
||||
do {
|
||||
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
|
||||
if (ret != -EMULTIHOP)
|
||||
break;
|
||||
|
||||
ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
|
||||
if (ret) {
|
||||
} while (!ret);
|
||||
|
||||
if (ret) {
|
||||
ttm_resource_free(bo, &evict_mem);
|
||||
if (ret != -ERESTARTSYS && ret != -EINTR)
|
||||
pr_err("Buffer eviction failed\n");
|
||||
ttm_resource_free(bo, &evict_mem);
|
||||
goto out;
|
||||
}
|
||||
/* try and move to final place now. */
|
||||
goto bounce;
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
|
@ -549,6 +550,12 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
|
|||
{
|
||||
bool ret = false;
|
||||
|
||||
if (bo->pin_count) {
|
||||
*locked = false;
|
||||
*busy = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (bo->base.resv == ctx->resv) {
|
||||
dma_resv_assert_held(bo->base.resv);
|
||||
if (ctx->allow_res_evict)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: if_ix.c,v 1.202 2023/07/28 20:25:08 bluhm Exp $ */
|
||||
/* $OpenBSD: if_ix.c,v 1.203 2023/08/03 18:56:32 jan Exp $ */
|
||||
|
||||
/******************************************************************************
|
||||
|
||||
|
@ -3231,7 +3231,7 @@ ixgbe_rxeof(struct rx_ring *rxr)
|
|||
sendmp->m_pkthdr.len = 0;
|
||||
sendmp->m_pkthdr.ph_mss = 0;
|
||||
#if NVLAN > 0
|
||||
if (sc->vlan_stripping && staterr & IXGBE_RXD_STAT_VP) {
|
||||
if (staterr & IXGBE_RXD_STAT_VP) {
|
||||
sendmp->m_pkthdr.ether_vtag = vtag;
|
||||
SET(sendmp->m_flags, M_VLANTAG);
|
||||
}
|
||||
|
@ -3273,7 +3273,8 @@ ixgbe_rxeof(struct rx_ring *rxr)
|
|||
ether_extract_headers(sendmp, &ext);
|
||||
hdrlen = sizeof(*ext.eh);
|
||||
#if NVLAN > 0
|
||||
if (ext.evh)
|
||||
if (ISSET(sendmp->m_flags, M_VLANTAG) ||
|
||||
ext.evh)
|
||||
hdrlen += ETHER_VLAN_ENCAP_LEN;
|
||||
#endif
|
||||
if (ext.ip4)
|
||||
|
@ -3361,20 +3362,8 @@ ixgbe_rx_checksum(uint32_t staterr, struct mbuf * mp)
|
|||
void
|
||||
ixgbe_setup_vlan_hw_support(struct ix_softc *sc)
|
||||
{
|
||||
struct ifnet *ifp = &sc->arpcom.ac_if;
|
||||
uint32_t ctrl;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* We have to disable VLAN striping when using TCP offloading, due to a
|
||||
* firmware bug.
|
||||
*/
|
||||
if (ISSET(ifp->if_xflags, IFXF_LRO)) {
|
||||
sc->vlan_stripping = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
sc->vlan_stripping = 1;
|
||||
uint32_t ctrl;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* A soft reset zero's out the VFTA, so
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: if_ix.h,v 1.45 2022/06/27 15:11:23 jan Exp $ */
|
||||
/* $OpenBSD: if_ix.h,v 1.46 2023/08/04 10:58:27 jan Exp $ */
|
||||
|
||||
/******************************************************************************
|
||||
|
||||
|
@ -225,7 +225,6 @@ struct ix_softc {
|
|||
struct ifmedia media;
|
||||
struct intrmap *sc_intrmap;
|
||||
int if_flags;
|
||||
int vlan_stripping;
|
||||
|
||||
uint16_t num_vlans;
|
||||
uint16_t num_queues;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: if_vmx.c,v 1.70 2022/09/11 08:38:39 yasuoka Exp $ */
|
||||
/* $OpenBSD: if_vmx.c,v 1.78 2023/07/30 04:27:01 dlg Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2013 Tsubai Masanari
|
||||
|
@ -61,31 +61,49 @@
|
|||
#define VMX_RX_GEN htole32(VMXNET3_RX_GEN_M << VMXNET3_RX_GEN_S)
|
||||
#define VMX_RXC_GEN htole32(VMXNET3_RXC_GEN_M << VMXNET3_RXC_GEN_S)
|
||||
|
||||
struct vmx_dmamem {
|
||||
bus_dmamap_t vdm_map;
|
||||
bus_dma_segment_t vdm_seg;
|
||||
int vdm_nsegs;
|
||||
size_t vdm_size;
|
||||
caddr_t vdm_kva;
|
||||
};
|
||||
|
||||
#define VMX_DMA_MAP(_vdm) ((_vdm)->vdm_map)
|
||||
#define VMX_DMA_DVA(_vdm) ((_vdm)->vdm_map->dm_segs[0].ds_addr)
|
||||
#define VMX_DMA_KVA(_vdm) ((void *)(_vdm)->vdm_kva)
|
||||
#define VMX_DMA_LEN(_vdm) ((_vdm)->vdm_size)
|
||||
|
||||
struct vmxnet3_softc;
|
||||
|
||||
struct vmxnet3_txring {
|
||||
struct vmx_dmamem dmamem;
|
||||
struct mbuf *m[NTXDESC];
|
||||
bus_dmamap_t dmap[NTXDESC];
|
||||
struct vmxnet3_txdesc *txd;
|
||||
u_int32_t gen;
|
||||
u_int prod;
|
||||
u_int cons;
|
||||
volatile u_int prod;
|
||||
volatile u_int cons;
|
||||
};
|
||||
|
||||
struct vmxnet3_rxring {
|
||||
struct vmxnet3_softc *sc;
|
||||
struct vmxnet3_rxq_shared *rs; /* copy of the rxqueue rs */
|
||||
struct vmx_dmamem dmamem;
|
||||
struct mbuf *m[NRXDESC];
|
||||
bus_dmamap_t dmap[NRXDESC];
|
||||
struct mutex mtx;
|
||||
struct if_rxring rxr;
|
||||
struct timeout refill;
|
||||
struct vmxnet3_rxdesc *rxd;
|
||||
bus_size_t rxh;
|
||||
u_int fill;
|
||||
u_int32_t gen;
|
||||
u_int8_t rid;
|
||||
};
|
||||
|
||||
struct vmxnet3_comp_ring {
|
||||
struct vmx_dmamem dmamem;
|
||||
union {
|
||||
struct vmxnet3_txcompdesc *txcd;
|
||||
struct vmxnet3_rxcompdesc *rxcd;
|
||||
|
@ -101,6 +119,7 @@ struct vmxnet3_txqueue {
|
|||
struct vmxnet3_txq_shared *ts;
|
||||
struct ifqueue *ifq;
|
||||
struct kstat *txkstat;
|
||||
unsigned int queue;
|
||||
} __aligned(64);
|
||||
|
||||
struct vmxnet3_rxqueue {
|
||||
|
@ -193,6 +212,12 @@ void vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
|
|||
int vmxnet3_media_change(struct ifnet *);
|
||||
void *vmxnet3_dma_allocmem(struct vmxnet3_softc *, u_int, u_int, bus_addr_t *);
|
||||
|
||||
static int vmx_dmamem_alloc(struct vmxnet3_softc *, struct vmx_dmamem *,
|
||||
bus_size_t, u_int);
|
||||
#ifdef notyet
|
||||
static void vmx_dmamem_free(struct vmxnet3_softc *, struct vmx_dmamem *);
|
||||
#endif
|
||||
|
||||
#if NKSTAT > 0
|
||||
static void vmx_kstat_init(struct vmxnet3_softc *);
|
||||
static void vmx_kstat_txstats(struct vmxnet3_softc *,
|
||||
|
@ -532,16 +557,18 @@ vmxnet3_alloc_txring(struct vmxnet3_softc *sc, int queue, int intr)
|
|||
struct vmxnet3_txq_shared *ts;
|
||||
struct vmxnet3_txring *ring = &tq->cmd_ring;
|
||||
struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
|
||||
bus_addr_t pa, comp_pa;
|
||||
int idx;
|
||||
|
||||
ring->txd = vmxnet3_dma_allocmem(sc, NTXDESC * sizeof ring->txd[0], 512, &pa);
|
||||
if (ring->txd == NULL)
|
||||
tq->queue = queue;
|
||||
|
||||
if (vmx_dmamem_alloc(sc, &ring->dmamem,
|
||||
NTXDESC * sizeof(struct vmxnet3_txdesc), 512) != 0)
|
||||
return -1;
|
||||
comp_ring->txcd = vmxnet3_dma_allocmem(sc,
|
||||
NTXCOMPDESC * sizeof comp_ring->txcd[0], 512, &comp_pa);
|
||||
if (comp_ring->txcd == NULL)
|
||||
ring->txd = VMX_DMA_KVA(&ring->dmamem);
|
||||
if (vmx_dmamem_alloc(sc, &comp_ring->dmamem,
|
||||
NTXCOMPDESC * sizeof(comp_ring->txcd[0]), 512) != 0)
|
||||
return -1;
|
||||
comp_ring->txcd = VMX_DMA_KVA(&comp_ring->dmamem);
|
||||
|
||||
for (idx = 0; idx < NTXDESC; idx++) {
|
||||
if (bus_dmamap_create(sc->sc_dmat, JUMBO_LEN, NTXSEGS,
|
||||
|
@ -553,9 +580,9 @@ vmxnet3_alloc_txring(struct vmxnet3_softc *sc, int queue, int intr)
|
|||
bzero(ts, sizeof *ts);
|
||||
ts->npending = 0;
|
||||
ts->intr_threshold = 1;
|
||||
ts->cmd_ring = pa;
|
||||
ts->cmd_ring = VMX_DMA_DVA(&ring->dmamem);
|
||||
ts->cmd_ring_len = NTXDESC;
|
||||
ts->comp_ring = comp_pa;
|
||||
ts->comp_ring = VMX_DMA_DVA(&comp_ring->dmamem);
|
||||
ts->comp_ring_len = NTXCOMPDESC;
|
||||
ts->driver_data = ~0ULL;
|
||||
ts->driver_data_len = 0;
|
||||
|
@ -572,21 +599,20 @@ vmxnet3_alloc_rxring(struct vmxnet3_softc *sc, int queue, int intr)
|
|||
struct vmxnet3_rxq_shared *rs;
|
||||
struct vmxnet3_rxring *ring;
|
||||
struct vmxnet3_comp_ring *comp_ring;
|
||||
bus_addr_t pa[2], comp_pa;
|
||||
int i, idx;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
ring = &rq->cmd_ring[i];
|
||||
ring->rxd = vmxnet3_dma_allocmem(sc, NRXDESC * sizeof ring->rxd[0],
|
||||
512, &pa[i]);
|
||||
if (ring->rxd == NULL)
|
||||
if (vmx_dmamem_alloc(sc, &ring->dmamem,
|
||||
NRXDESC * sizeof(struct vmxnet3_rxdesc), 512) != 0)
|
||||
return -1;
|
||||
ring->rxd = VMX_DMA_KVA(&ring->dmamem);
|
||||
}
|
||||
comp_ring = &rq->comp_ring;
|
||||
comp_ring->rxcd = vmxnet3_dma_allocmem(sc,
|
||||
NRXCOMPDESC * sizeof comp_ring->rxcd[0], 512, &comp_pa);
|
||||
if (comp_ring->rxcd == NULL)
|
||||
if (vmx_dmamem_alloc(sc, &comp_ring->dmamem,
|
||||
NRXCOMPDESC * sizeof(comp_ring->rxcd[0]), 512) != 0)
|
||||
return -1;
|
||||
comp_ring->rxcd = VMX_DMA_KVA(&comp_ring->dmamem);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
ring = &rq->cmd_ring[i];
|
||||
|
@ -599,15 +625,19 @@ vmxnet3_alloc_rxring(struct vmxnet3_softc *sc, int queue, int intr)
|
|||
JUMBO_LEN, 0, BUS_DMA_NOWAIT, &ring->dmap[idx]))
|
||||
return -1;
|
||||
}
|
||||
|
||||
ring->rs = rq->rs;
|
||||
ring->rxh = (i == 0) ?
|
||||
VMXNET3_BAR0_RXH1(queue) : VMXNET3_BAR0_RXH2(queue);
|
||||
}
|
||||
|
||||
rs = rq->rs;
|
||||
bzero(rs, sizeof *rs);
|
||||
rs->cmd_ring[0] = pa[0];
|
||||
rs->cmd_ring[1] = pa[1];
|
||||
rs->cmd_ring[0] = VMX_DMA_DVA(&rq->cmd_ring[0].dmamem);
|
||||
rs->cmd_ring[1] = VMX_DMA_DVA(&rq->cmd_ring[1].dmamem);
|
||||
rs->cmd_ring_len[0] = NRXDESC;
|
||||
rs->cmd_ring_len[1] = NRXDESC;
|
||||
rs->comp_ring = comp_pa;
|
||||
rs->comp_ring = VMX_DMA_DVA(&comp_ring->dmamem);
|
||||
rs->comp_ring_len = NRXCOMPDESC;
|
||||
rs->driver_data = ~0ULL;
|
||||
rs->driver_data_len = 0;
|
||||
|
@ -627,8 +657,16 @@ vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
|
|||
ring->gen = VMX_TX_GEN;
|
||||
comp_ring->next = 0;
|
||||
comp_ring->gen = VMX_TXC_GEN;
|
||||
bzero(ring->txd, NTXDESC * sizeof ring->txd[0]);
|
||||
bzero(comp_ring->txcd, NTXCOMPDESC * sizeof comp_ring->txcd[0]);
|
||||
memset(VMX_DMA_KVA(&ring->dmamem), 0,
|
||||
VMX_DMA_LEN(&ring->dmamem));
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
|
||||
0, VMX_DMA_LEN(&ring->dmamem), BUS_DMASYNC_PREWRITE);
|
||||
memset(VMX_DMA_KVA(&comp_ring->dmamem), 0,
|
||||
VMX_DMA_LEN(&comp_ring->dmamem));
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&comp_ring->dmamem),
|
||||
0, VMX_DMA_LEN(&comp_ring->dmamem), BUS_DMASYNC_PREREAD);
|
||||
|
||||
ifq_clr_oactive(tq->ifq);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -657,10 +695,17 @@ vmxnet3_rxfill(struct vmxnet3_rxring *ring)
|
|||
|
||||
MUTEX_ASSERT_LOCKED(&ring->mtx);
|
||||
|
||||
slots = if_rxr_get(&ring->rxr, NRXDESC);
|
||||
if (slots == 0)
|
||||
return;
|
||||
|
||||
prod = ring->fill;
|
||||
rgen = ring->gen;
|
||||
|
||||
for (slots = if_rxr_get(&ring->rxr, NRXDESC); slots > 0; slots--) {
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
|
||||
0, VMX_DMA_LEN(&ring->dmamem), BUS_DMASYNC_POSTWRITE);
|
||||
|
||||
do {
|
||||
KASSERT(ring->m[prod] == NULL);
|
||||
|
||||
m = MCLGETL(NULL, M_DONTWAIT, JUMBO_LEN);
|
||||
|
@ -681,7 +726,9 @@ vmxnet3_rxfill(struct vmxnet3_rxring *ring)
|
|||
|
||||
rxd = &ring->rxd[prod];
|
||||
rxd->rx_addr = htole64(DMAADDR(map));
|
||||
membar_producer();
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
|
||||
0, VMX_DMA_LEN(&ring->dmamem),
|
||||
BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTWRITE);
|
||||
rxd->rx_word2 = (htole32(m->m_pkthdr.len & VMXNET3_RX_LEN_M) <<
|
||||
VMXNET3_RX_LEN_S) | type | rgen;
|
||||
|
||||
|
@ -689,7 +736,11 @@ vmxnet3_rxfill(struct vmxnet3_rxring *ring)
|
|||
prod = 0;
|
||||
rgen ^= VMX_RX_GEN;
|
||||
}
|
||||
}
|
||||
} while (--slots > 0);
|
||||
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
|
||||
0, VMX_DMA_LEN(&ring->dmamem), BUS_DMASYNC_PREWRITE);
|
||||
|
||||
if_rxr_put(&ring->rxr, slots);
|
||||
|
||||
ring->fill = prod;
|
||||
|
@ -697,6 +748,9 @@ vmxnet3_rxfill(struct vmxnet3_rxring *ring)
|
|||
|
||||
if (if_rxr_inuse(&ring->rxr) == 0)
|
||||
timeout_add(&ring->refill, 1);
|
||||
|
||||
if (ring->rs->update_rxhead)
|
||||
WRITE_BAR0(sc, ring->rxh, prod);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -708,10 +762,14 @@ vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
|
|||
|
||||
for (i = 0; i < 2; i++) {
|
||||
ring = &rq->cmd_ring[i];
|
||||
if_rxr_init(&ring->rxr, 2, NRXDESC - 1);
|
||||
ring->fill = 0;
|
||||
ring->gen = VMX_RX_GEN;
|
||||
bzero(ring->rxd, NRXDESC * sizeof ring->rxd[0]);
|
||||
if_rxr_init(&ring->rxr, 2, NRXDESC - 1);
|
||||
|
||||
memset(VMX_DMA_KVA(&ring->dmamem), 0,
|
||||
VMX_DMA_LEN(&ring->dmamem));
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
|
||||
0, VMX_DMA_LEN(&ring->dmamem), BUS_DMASYNC_PREWRITE);
|
||||
}
|
||||
|
||||
/* XXX only fill ring 0 */
|
||||
|
@ -723,15 +781,26 @@ vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
|
|||
comp_ring = &rq->comp_ring;
|
||||
comp_ring->next = 0;
|
||||
comp_ring->gen = VMX_RXC_GEN;
|
||||
bzero(comp_ring->rxcd, NRXCOMPDESC * sizeof comp_ring->rxcd[0]);
|
||||
|
||||
memset(VMX_DMA_KVA(&comp_ring->dmamem), 0,
|
||||
VMX_DMA_LEN(&comp_ring->dmamem));
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&comp_ring->dmamem),
|
||||
0, VMX_DMA_LEN(&comp_ring->dmamem), BUS_DMASYNC_PREREAD);
|
||||
}
|
||||
|
||||
void
|
||||
vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
|
||||
{
|
||||
struct vmxnet3_txring *ring = &tq->cmd_ring;
|
||||
struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
|
||||
struct ifqueue *ifq = tq->ifq;
|
||||
int idx;
|
||||
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&comp_ring->dmamem),
|
||||
0, VMX_DMA_LEN(&comp_ring->dmamem), BUS_DMASYNC_POSTREAD);
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
|
||||
0, VMX_DMA_LEN(&ring->dmamem), BUS_DMASYNC_POSTWRITE);
|
||||
|
||||
for (idx = 0; idx < NTXDESC; idx++) {
|
||||
if (ring->m[idx]) {
|
||||
bus_dmamap_unload(sc->sc_dmat, ring->dmap[idx]);
|
||||
|
@ -739,16 +808,25 @@ vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
|
|||
ring->m[idx] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
ifq_purge(ifq);
|
||||
ifq_clr_oactive(ifq);
|
||||
}
|
||||
|
||||
void
|
||||
vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
|
||||
{
|
||||
struct vmxnet3_rxring *ring;
|
||||
struct vmxnet3_comp_ring *comp_ring = &rq->comp_ring;
|
||||
int i, idx;
|
||||
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&comp_ring->dmamem),
|
||||
0, VMX_DMA_LEN(&comp_ring->dmamem), BUS_DMASYNC_POSTREAD);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
ring = &rq->cmd_ring[i];
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
|
||||
0, VMX_DMA_LEN(&ring->dmamem), BUS_DMASYNC_POSTWRITE);
|
||||
timeout_del(&ring->refill);
|
||||
for (idx = 0; idx < NRXDESC; idx++) {
|
||||
struct mbuf *m = ring->m[idx];
|
||||
|
@ -924,18 +1002,22 @@ vmxnet3_txintr(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
|
|||
struct vmxnet3_txcompdesc *txcd;
|
||||
bus_dmamap_t map;
|
||||
struct mbuf *m;
|
||||
u_int cons, next;
|
||||
u_int prod, cons, next;
|
||||
uint32_t rgen;
|
||||
|
||||
prod = ring->prod;
|
||||
cons = ring->cons;
|
||||
if (cons == ring->prod)
|
||||
|
||||
if (cons == prod)
|
||||
return;
|
||||
|
||||
next = comp_ring->next;
|
||||
rgen = comp_ring->gen;
|
||||
|
||||
/* postread */
|
||||
for (;;) {
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&comp_ring->dmamem),
|
||||
0, VMX_DMA_LEN(&comp_ring->dmamem), BUS_DMASYNC_POSTREAD);
|
||||
|
||||
do {
|
||||
txcd = &comp_ring->txcd[next];
|
||||
if ((txcd->txc_word3 & VMX_TXC_GEN) != rgen)
|
||||
break;
|
||||
|
@ -958,8 +1040,10 @@ vmxnet3_txintr(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
|
|||
VMXNET3_TXC_EOPIDX_M;
|
||||
cons++;
|
||||
cons %= NTXDESC;
|
||||
}
|
||||
/* preread */
|
||||
} while (cons != prod);
|
||||
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&comp_ring->dmamem),
|
||||
0, VMX_DMA_LEN(&comp_ring->dmamem), BUS_DMASYNC_PREREAD);
|
||||
|
||||
comp_ring->next = next;
|
||||
comp_ring->gen = rgen;
|
||||
|
@ -986,6 +1070,9 @@ vmxnet3_rxintr(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
|
|||
next = comp_ring->next;
|
||||
rgen = comp_ring->gen;
|
||||
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&comp_ring->dmamem),
|
||||
0, VMX_DMA_LEN(&comp_ring->dmamem), BUS_DMASYNC_POSTREAD);
|
||||
|
||||
for (;;) {
|
||||
rxcd = &comp_ring->rxcd[next];
|
||||
if ((rxcd->rxc_word3 & VMX_RXC_GEN) != rgen)
|
||||
|
@ -1018,14 +1105,14 @@ vmxnet3_rxintr(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
|
|||
if (letoh32(rxcd->rxc_word2 & VMXNET3_RXC_ERROR)) {
|
||||
ifp->if_ierrors++;
|
||||
m_freem(m);
|
||||
goto skip_buffer;
|
||||
continue;
|
||||
}
|
||||
|
||||
len = letoh32((rxcd->rxc_word2 >> VMXNET3_RXC_LEN_S) &
|
||||
VMXNET3_RXC_LEN_M);
|
||||
if (len < VMXNET3_MIN_MTU) {
|
||||
m_freem(m);
|
||||
goto skip_buffer;
|
||||
continue;
|
||||
}
|
||||
m->m_pkthdr.len = m->m_len = len;
|
||||
|
||||
|
@ -1042,22 +1129,11 @@ vmxnet3_rxintr(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
|
|||
}
|
||||
|
||||
ml_enqueue(&ml, m);
|
||||
|
||||
skip_buffer:
|
||||
if (rq->rs->update_rxhead) {
|
||||
u_int qid = letoh32((rxcd->rxc_word0 >>
|
||||
VMXNET3_RXC_QID_S) & VMXNET3_RXC_QID_M);
|
||||
|
||||
idx = (idx + 1) % NRXDESC;
|
||||
if (qid < sc->sc_nqueues) {
|
||||
WRITE_BAR0(sc, VMXNET3_BAR0_RXH1(qid), idx);
|
||||
} else {
|
||||
qid -= sc->sc_nqueues;
|
||||
WRITE_BAR0(sc, VMXNET3_BAR0_RXH2(qid), idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&comp_ring->dmamem),
|
||||
0, VMX_DMA_LEN(&comp_ring->dmamem), BUS_DMASYNC_PREREAD);
|
||||
|
||||
comp_ring->next = next;
|
||||
comp_ring->gen = rgen;
|
||||
|
||||
|
@ -1152,7 +1228,6 @@ vmxnet3_stop(struct ifnet *ifp)
|
|||
int queue;
|
||||
|
||||
ifp->if_flags &= ~IFF_RUNNING;
|
||||
ifq_clr_oactive(&ifp->if_snd);
|
||||
ifp->if_timer = 0;
|
||||
|
||||
vmxnet3_disable_all_intrs(sc);
|
||||
|
@ -1218,7 +1293,6 @@ vmxnet3_init(struct vmxnet3_softc *sc)
|
|||
vmxnet3_link_state(sc);
|
||||
|
||||
ifp->if_flags |= IFF_RUNNING;
|
||||
ifq_clr_oactive(&ifp->if_snd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1325,7 +1399,7 @@ vmxnet3_start(struct ifqueue *ifq)
|
|||
struct vmxnet3_txring *ring = &tq->cmd_ring;
|
||||
struct vmxnet3_txdesc *txd, *sop;
|
||||
bus_dmamap_t map;
|
||||
unsigned int prod, free, i;
|
||||
unsigned int prod, free, i;
|
||||
unsigned int post = 0;
|
||||
uint32_t rgen, gen;
|
||||
|
||||
|
@ -1337,6 +1411,9 @@ vmxnet3_start(struct ifqueue *ifq)
|
|||
free += NTXDESC;
|
||||
free -= prod;
|
||||
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
|
||||
0, VMX_DMA_LEN(&ring->dmamem), BUS_DMASYNC_POSTWRITE);
|
||||
|
||||
rgen = ring->gen;
|
||||
|
||||
for (;;) {
|
||||
|
@ -1391,21 +1468,26 @@ vmxnet3_start(struct ifqueue *ifq)
|
|||
VMXNET3_TX_VLANTAG_M) << VMXNET3_TX_VLANTAG_S);
|
||||
}
|
||||
|
||||
ring->prod = prod;
|
||||
/* Change the ownership by flipping the "generation" bit */
|
||||
membar_producer();
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
|
||||
0, VMX_DMA_LEN(&ring->dmamem),
|
||||
BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTWRITE);
|
||||
sop->tx_word2 ^= VMX_TX_GEN;
|
||||
|
||||
free -= i;
|
||||
post = 1;
|
||||
}
|
||||
|
||||
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
|
||||
0, VMX_DMA_LEN(&ring->dmamem), BUS_DMASYNC_PREWRITE);
|
||||
|
||||
if (!post)
|
||||
return;
|
||||
|
||||
ring->prod = prod;
|
||||
ring->gen = rgen;
|
||||
|
||||
WRITE_BAR0(sc, VMXNET3_BAR0_TXH(0), prod);
|
||||
WRITE_BAR0(sc, VMXNET3_BAR0_TXH(tq->queue), prod);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1469,6 +1551,49 @@ vmxnet3_dma_allocmem(struct vmxnet3_softc *sc, u_int size, u_int align, bus_addr
|
|||
return va;
|
||||
}
|
||||
|
||||
static int
|
||||
vmx_dmamem_alloc(struct vmxnet3_softc *sc, struct vmx_dmamem *vdm,
|
||||
bus_size_t size, u_int align)
|
||||
{
|
||||
vdm->vdm_size = size;
|
||||
|
||||
if (bus_dmamap_create(sc->sc_dmat, vdm->vdm_size, 1,
|
||||
vdm->vdm_size, 0,
|
||||
BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
|
||||
&vdm->vdm_map) != 0)
|
||||
return (1);
|
||||
if (bus_dmamem_alloc(sc->sc_dmat, vdm->vdm_size,
|
||||
align, 0, &vdm->vdm_seg, 1, &vdm->vdm_nsegs,
|
||||
BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
|
||||
goto destroy;
|
||||
if (bus_dmamem_map(sc->sc_dmat, &vdm->vdm_seg, vdm->vdm_nsegs,
|
||||
vdm->vdm_size, &vdm->vdm_kva, BUS_DMA_WAITOK) != 0)
|
||||
goto free;
|
||||
if (bus_dmamap_load(sc->sc_dmat, vdm->vdm_map, vdm->vdm_kva,
|
||||
vdm->vdm_size, NULL, BUS_DMA_WAITOK) != 0)
|
||||
goto unmap;
|
||||
|
||||
return (0);
|
||||
unmap:
|
||||
bus_dmamem_unmap(sc->sc_dmat, vdm->vdm_kva, vdm->vdm_size);
|
||||
free:
|
||||
bus_dmamem_free(sc->sc_dmat, &vdm->vdm_seg, 1);
|
||||
destroy:
|
||||
bus_dmamap_destroy(sc->sc_dmat, vdm->vdm_map);
|
||||
return (1);
|
||||
}
|
||||
|
||||
#ifdef notyet
|
||||
static void
|
||||
vmx_dmamem_free(struct vmxnet3_softc *sc, struct vmx_dmamem *vdm)
|
||||
{
|
||||
bus_dmamap_unload(sc->sc_dmat, vdm->vdm_map);
|
||||
bus_dmamem_unmap(sc->sc_dmat, vdm->vdm_kva, vdm->vdm_size);
|
||||
bus_dmamem_free(sc->sc_dmat, &vdm->vdm_seg, 1);
|
||||
bus_dmamap_destroy(sc->sc_dmat, vdm->vdm_map);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if NKSTAT > 0
|
||||
/*
|
||||
* "hardware" counters are exported as separate kstats for each tx
|
||||
|
@ -1536,7 +1661,7 @@ vmx_kstat_read(struct kstat *ks)
|
|||
for (i = 0; i < n; i++)
|
||||
kstat_kv_u64(&kvs[i]) = lemtoh64(&vs[i]);
|
||||
|
||||
TIMEVAL_TO_TIMESPEC(&sc->sc_kstat_updated, &ks->ks_updated);
|
||||
TIMEVAL_TO_TIMESPEC(&sc->sc_kstat_updated, &ks->ks_updated);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
$OpenBSD: pcidevs,v 1.2043 2023/07/08 09:11:51 kettenis Exp $
|
||||
$OpenBSD: pcidevs,v 1.2044 2023/08/06 14:40:25 jsg Exp $
|
||||
/* $NetBSD: pcidevs,v 1.30 1997/06/24 06:20:24 thorpej Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -2103,6 +2103,7 @@ product ATI NAVI31_1 0x744c Navi 31
|
|||
product ATI NAVI31_3 0x745e Navi 31
|
||||
product ATI NAVI33_1 0x7480 Navi 33
|
||||
product ATI NAVI33_2 0x7483 Navi 33
|
||||
product ATI NAVI33_3 0x7489 Navi 33
|
||||
product ATI RADEON_9000IGP 0x7834 Radeon 9000/9100 IGP
|
||||
product ATI RADEON_RS350IGP 0x7835 Radeon RS350IGP
|
||||
product ATI RS690_HB 0x7910 RS690 Host
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* THIS FILE AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
*
|
||||
* generated from:
|
||||
* OpenBSD: pcidevs,v 1.2043 2023/07/08 09:11:51 kettenis Exp
|
||||
* OpenBSD: pcidevs,v 1.2044 2023/08/06 14:40:25 jsg Exp
|
||||
*/
|
||||
/* $NetBSD: pcidevs,v 1.30 1997/06/24 06:20:24 thorpej Exp $ */
|
||||
|
||||
|
@ -2108,6 +2108,7 @@
|
|||
#define PCI_PRODUCT_ATI_NAVI31_3 0x745e /* Navi 31 */
|
||||
#define PCI_PRODUCT_ATI_NAVI33_1 0x7480 /* Navi 33 */
|
||||
#define PCI_PRODUCT_ATI_NAVI33_2 0x7483 /* Navi 33 */
|
||||
#define PCI_PRODUCT_ATI_NAVI33_3 0x7489 /* Navi 33 */
|
||||
#define PCI_PRODUCT_ATI_RADEON_9000IGP 0x7834 /* Radeon 9000/9100 IGP */
|
||||
#define PCI_PRODUCT_ATI_RADEON_RS350IGP 0x7835 /* Radeon RS350IGP */
|
||||
#define PCI_PRODUCT_ATI_RS690_HB 0x7910 /* RS690 Host */
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* THIS FILE AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
*
|
||||
* generated from:
|
||||
* OpenBSD: pcidevs,v 1.2043 2023/07/08 09:11:51 kettenis Exp
|
||||
* OpenBSD: pcidevs,v 1.2044 2023/08/06 14:40:25 jsg Exp
|
||||
*/
|
||||
|
||||
/* $NetBSD: pcidevs,v 1.30 1997/06/24 06:20:24 thorpej Exp $ */
|
||||
|
@ -6555,6 +6555,10 @@ static const struct pci_known_product pci_known_products[] = {
|
|||
PCI_VENDOR_ATI, PCI_PRODUCT_ATI_NAVI33_2,
|
||||
"Navi 33",
|
||||
},
|
||||
{
|
||||
PCI_VENDOR_ATI, PCI_PRODUCT_ATI_NAVI33_3,
|
||||
"Navi 33",
|
||||
},
|
||||
{
|
||||
PCI_VENDOR_ATI, PCI_PRODUCT_ATI_RADEON_9000IGP,
|
||||
"Radeon 9000/9100 IGP",
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: wsemul_vt100.c,v 1.46 2023/07/24 17:03:32 miod Exp $ */
|
||||
/* $OpenBSD: wsemul_vt100.c,v 1.47 2023/08/02 19:20:19 miod Exp $ */
|
||||
/* $NetBSD: wsemul_vt100.c,v 1.13 2000/04/28 21:56:16 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -862,7 +862,7 @@ wsemul_vt100_output_dcs(struct wsemul_vt100_emuldata *edp,
|
|||
case '0': case '1': case '2': case '3': case '4':
|
||||
case '5': case '6': case '7': case '8': case '9':
|
||||
/* argument digit */
|
||||
if (edp->nargs > VT100_EMUL_NARGS - 1)
|
||||
if (edp->nargs >= VT100_EMUL_NARGS)
|
||||
break;
|
||||
edp->args[edp->nargs] = (edp->args[edp->nargs] * 10) +
|
||||
(instate->inchar - '0');
|
||||
|
@ -1084,6 +1084,7 @@ wsemul_vt100_output_csi(struct wsemul_vt100_emuldata *edp,
|
|||
edp->nargs++;
|
||||
rc = wsemul_vt100_handle_csi(edp, instate);
|
||||
if (rc != 0) {
|
||||
/* undo nargs progress */
|
||||
edp->nargs = oargs;
|
||||
return rc;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue