sync with OpenBSD -current

This commit is contained in:
purplerain 2024-08-11 00:21:13 +00:00
parent 32e75f5310
commit b321f55ead
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
23 changed files with 206 additions and 161 deletions

View file

@ -1001,25 +1001,21 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
u32 pte_flags;
int ret;
/* For Modern GENs the PTEs and register space are split in the BAR */
type = pci_mapreg_type(i915->pc, i915->tag, 0x10);
ret = -pci_mapreg_info(i915->pc, i915->tag, 0x10, type,
&addr, &len, NULL);
if (ret)
return ret;
/*
* On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
* will be dropped. For WC mappings in general we have 64 byte burst
* writes when the WC buffer is flushed, so we can't use it, but have to
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
flags = 0;
else
GEM_WARN_ON(len != gen6_gttmmadr_size(i915));
phys_addr = addr + gen6_gttadr_offset(i915);
if (needs_wc_ggtt_mapping(i915))
flags = BUS_SPACE_MAP_PREFETCHABLE;
ret = -bus_space_map(i915->bst, addr + len / 2, size,
else
flags = 0;
ret = -bus_space_map(i915->bst, phys_addr, size,
flags | BUS_SPACE_MAP_LINEAR, &ggtt->gsm_bsh);
if (ret) {
drm_err(&i915->drm, "Failed to map the ggtt page table\n");
@ -1028,7 +1024,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
ggtt->gsm = bus_space_vaddr(i915->bst, ggtt->gsm_bsh);
ggtt->gsm_size = size;
if (!ggtt->gsm) {
DRM_ERROR("Failed to map the ggtt page table\n");
drm_err(&i915->drm, "Failed to map the ggtt page table\n");
return -ENOMEM;
}

View file

@ -184,7 +184,7 @@ static int guc_wait_ucode(struct intel_guc *guc)
* in the seconds range. However, there is a limit on how long an
* individual wait_for() can wait. So wrap it in a loop.
*/
before_freq = intel_rps_read_actual_frequency(&uncore->gt->rps);
before_freq = intel_rps_read_actual_frequency(&gt->rps);
before = ktime_get();
for (count = 0; count < GUC_LOAD_RETRY_LIMIT; count++) {
ret = wait_for(guc_load_done(uncore, &status, &success), 1000);
@ -192,7 +192,7 @@ static int guc_wait_ucode(struct intel_guc *guc)
break;
guc_dbg(guc, "load still in progress, count = %d, freq = %dMHz, status = 0x%08X [0x%02X/%02X]\n",
count, intel_rps_read_actual_frequency(&uncore->gt->rps), status,
count, intel_rps_read_actual_frequency(&gt->rps), status,
REG_FIELD_GET(GS_BOOTROM_MASK, status),
REG_FIELD_GET(GS_UKERNEL_MASK, status));
}
@ -204,7 +204,7 @@ static int guc_wait_ucode(struct intel_guc *guc)
u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status);
guc_info(guc, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz, ret = %d\n",
status, delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps), ret);
status, delta_ms, intel_rps_read_actual_frequency(&gt->rps), ret);
guc_info(guc, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
REG_FIELD_GET(GS_MIA_IN_RESET, status),
bootrom, ukernel,
@ -254,11 +254,11 @@ static int guc_wait_ucode(struct intel_guc *guc)
guc_warn(guc, "excessive init time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
delta_ms, status, count, ret);
guc_warn(guc, "excessive init time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
intel_rps_read_actual_frequency(&uncore->gt->rps), before_freq,
intel_rps_read_actual_frequency(&gt->rps), before_freq,
intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
} else {
guc_dbg(guc, "init took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps),
delta_ms, intel_rps_read_actual_frequency(&gt->rps),
before_freq, status, count, ret);
}

View file

@ -6,6 +6,7 @@
#include <linux/types.h>
#include "gt/intel_gt.h"
#include "gt/intel_rps.h"
#include "intel_guc_reg.h"
#include "intel_huc.h"
#include "intel_huc_print.h"
@ -462,17 +463,68 @@ static const char *auth_mode_string(struct intel_huc *huc,
return partial ? "clear media" : "all workloads";
}
/*
* Use a longer timeout for debug builds so that problems can be detected
* and analysed. But a shorter timeout for releases so that user's don't
* wait forever to find out there is a problem. Note that the only reason
* an end user should hit the timeout is in case of extreme thermal throttling.
* And a system that is that hot during boot is probably dead anyway!
*/
#if defined(CONFIG_DRM_I915_DEBUG_GEM)
#define HUC_LOAD_RETRY_LIMIT 20
#else
#define HUC_LOAD_RETRY_LIMIT 3
#endif
int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
enum intel_huc_authentication_type type)
{
struct intel_gt *gt = huc_to_gt(huc);
int ret;
struct intel_uncore *uncore = gt->uncore;
ktime_t before, after, delta;
int ret, count;
u64 delta_ms;
u32 before_freq;
ret = __intel_wait_for_register(gt->uncore,
huc->status[type].reg,
huc->status[type].mask,
huc->status[type].value,
2, 50, NULL);
/*
* The KMD requests maximum frequency during driver load, however thermal
* throttling can force the frequency down to minimum (although the board
* really should never get that hot in real life!). IFWI issues have been
* seen to cause sporadic failures to grant the higher frequency. And at
* minimum frequency, the authentication time can be in the seconds range.
* Note that there is a limit on how long an individual wait_for() can wait.
* So wrap it in a loop.
*/
before_freq = intel_rps_read_actual_frequency(&gt->rps);
before = ktime_get();
for (count = 0; count < HUC_LOAD_RETRY_LIMIT; count++) {
ret = __intel_wait_for_register(gt->uncore,
huc->status[type].reg,
huc->status[type].mask,
huc->status[type].value,
2, 1000, NULL);
if (!ret)
break;
huc_dbg(huc, "auth still in progress, count = %d, freq = %dMHz, status = 0x%08X\n",
count, intel_rps_read_actual_frequency(&gt->rps),
huc->status[type].reg.reg);
}
after = ktime_get();
delta = ktime_sub(after, before);
delta_ms = ktime_to_ms(delta);
if (delta_ms > 50) {
huc_warn(huc, "excessive auth time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
delta_ms, huc->status[type].reg.reg, count, ret);
huc_warn(huc, "excessive auth time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
intel_rps_read_actual_frequency(&gt->rps), before_freq,
intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
} else {
huc_dbg(huc, "auth took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
delta_ms, intel_rps_read_actual_frequency(&gt->rps),
before_freq, huc->status[type].reg.reg, count, ret);
}
/* mark the load process as complete even if the wait failed */
delayed_huc_load_complete(huc);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_rge.c,v 1.27 2024/06/30 08:13:02 kevlo Exp $ */
/* $OpenBSD: if_rge.c,v 1.28 2024/08/10 21:53:06 patrick Exp $ */
/*
* Copyright (c) 2019, 2020, 2023, 2024
@ -480,24 +480,27 @@ rge_encap(struct rge_queues *q, struct mbuf *m, int idx)
if (cur == RGE_TX_LIST_CNT - 1)
cmdsts |= RGE_TDCMDSTS_EOR;
if (i == (txmap->dm_nsegs - 1))
cmdsts |= RGE_TDCMDSTS_EOF;
d->rge_cmdsts = htole32(cmdsts);
bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
last = cur;
cmdsts = RGE_TDCMDSTS_OWN;
cur = RGE_NEXT_TX_DESC(cur);
}
/* Set EOF on the last descriptor. */
d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
/* Transfer ownership of packet to the chip. */
d = &q->q_tx.rge_tx_list[idx];
d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
idx * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Update info of TX queue and descriptors. */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pci.c,v 1.128 2024/03/18 21:20:46 kettenis Exp $ */
/* $OpenBSD: pci.c,v 1.129 2024/08/10 20:20:50 kettenis Exp $ */
/* $NetBSD: pci.c,v 1.31 1997/06/06 23:48:04 thorpej Exp $ */
/*
@ -752,8 +752,22 @@ pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag)
int
pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, int state)
{
pcireg_t reg;
pcireg_t id, reg;
int offset, ostate = state;
int d3_delay = 10 * 1000;
/* Some AMD Ryzen xHCI controllers need a bit more time to wake up. */
id = pci_conf_read(pc, tag, PCI_ID_REG);
if (PCI_VENDOR(id) == PCI_VENDOR_AMD) {
switch (PCI_PRODUCT(id)) {
case PCI_PRODUCT_AMD_17_1X_XHCI_1:
case PCI_PRODUCT_AMD_17_1X_XHCI_2:
case PCI_PRODUCT_AMD_17_6X_XHCI:
d3_delay = 20 * 1000;
default:
break;
}
}
/*
* Warn the firmware that we are going to put the device
@ -783,7 +797,7 @@ pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, int state)
(reg & ~PCI_PMCSR_STATE_MASK) | state);
if (state == PCI_PMCSR_STATE_D3 ||
ostate == PCI_PMCSR_STATE_D3)
delay(10 * 1000);
delay(d3_delay);
}
}

View file

@ -1,4 +1,4 @@
$OpenBSD: pcidevs,v 1.2082 2024/08/09 01:50:16 jsg Exp $
$OpenBSD: pcidevs,v 1.2083 2024/08/10 11:00:14 jsg Exp $
/* $NetBSD: pcidevs,v 1.30 1997/06/24 06:20:24 thorpej Exp $ */
/*
@ -8964,10 +8964,18 @@ product SANDISK WDSXXXG1X0C 0x5001 WD Black NVMe
product SANDISK WDSXXXG2X0C 0x5002 WD Black NVMe
product SANDISK PCSN520_1 0x5003 PC SN520
product SANDISK PCSN520_2 0x5004 PC SN520
product SANDISK PCSN520_3 0x5005 PC SN520
product SANDISK WDSXXXG3X0C 0x5006 WD Black NVMe
product SANDISK PCSN530 0x5008 PC SN530
product SANDISK NVME_1 0x5009 NVMe
product SANDISK SN850 0x5011 SN850
product SANDISK PCSN740 0x5015 PC SN740
product SANDISK NVME_2 0x5014 NVMe
product SANDISK PCSN740_1 0x5015 PC SN740
product SANDISK PCSN740_2 0x5016 PC SN740
product SANDISK NVME_3 0x5017 NVMe
product SANDISK SN750 0x501a SN750
product SANDISK SN850X 0x5030 SN850X
product SANDISK SN580 0x5041 SN580
/* Sangoma products */
product SANGOMA A10X 0x0300 A10x

View file

@ -2,7 +2,7 @@
* THIS FILE AUTOMATICALLY GENERATED. DO NOT EDIT.
*
* generated from:
* OpenBSD: pcidevs,v 1.2082 2024/08/09 01:50:16 jsg Exp
* OpenBSD: pcidevs,v 1.2083 2024/08/10 11:00:14 jsg Exp
*/
/* $NetBSD: pcidevs,v 1.30 1997/06/24 06:20:24 thorpej Exp $ */
@ -8969,10 +8969,18 @@
#define PCI_PRODUCT_SANDISK_WDSXXXG2X0C 0x5002 /* WD Black NVMe */
#define PCI_PRODUCT_SANDISK_PCSN520_1 0x5003 /* PC SN520 */
#define PCI_PRODUCT_SANDISK_PCSN520_2 0x5004 /* PC SN520 */
#define PCI_PRODUCT_SANDISK_PCSN520_3 0x5005 /* PC SN520 */
#define PCI_PRODUCT_SANDISK_WDSXXXG3X0C 0x5006 /* WD Black NVMe */
#define PCI_PRODUCT_SANDISK_PCSN530 0x5008 /* PC SN530 */
#define PCI_PRODUCT_SANDISK_NVME_1 0x5009 /* NVMe */
#define PCI_PRODUCT_SANDISK_SN850 0x5011 /* SN850 */
#define PCI_PRODUCT_SANDISK_PCSN740 0x5015 /* PC SN740 */
#define PCI_PRODUCT_SANDISK_NVME_2 0x5014 /* NVMe */
#define PCI_PRODUCT_SANDISK_PCSN740_1 0x5015 /* PC SN740 */
#define PCI_PRODUCT_SANDISK_PCSN740_2 0x5016 /* PC SN740 */
#define PCI_PRODUCT_SANDISK_NVME_3 0x5017 /* NVMe */
#define PCI_PRODUCT_SANDISK_SN750 0x501a /* SN750 */
#define PCI_PRODUCT_SANDISK_SN850X 0x5030 /* SN850X */
#define PCI_PRODUCT_SANDISK_SN580 0x5041 /* SN580 */
/* Sangoma products */
#define PCI_PRODUCT_SANGOMA_A10X 0x0300 /* A10x */

View file

@ -2,7 +2,7 @@
* THIS FILE AUTOMATICALLY GENERATED. DO NOT EDIT.
*
* generated from:
* OpenBSD: pcidevs,v 1.2082 2024/08/09 01:50:16 jsg Exp
* OpenBSD: pcidevs,v 1.2083 2024/08/10 11:00:14 jsg Exp
*/
/* $NetBSD: pcidevs,v 1.30 1997/06/24 06:20:24 thorpej Exp $ */
@ -32331,6 +32331,10 @@ static const struct pci_known_product pci_known_products[] = {
PCI_VENDOR_SANDISK, PCI_PRODUCT_SANDISK_PCSN520_2,
"PC SN520",
},
{
PCI_VENDOR_SANDISK, PCI_PRODUCT_SANDISK_PCSN520_3,
"PC SN520",
},
{
PCI_VENDOR_SANDISK, PCI_PRODUCT_SANDISK_WDSXXXG3X0C,
"WD Black NVMe",
@ -32339,14 +32343,42 @@ static const struct pci_known_product pci_known_products[] = {
PCI_VENDOR_SANDISK, PCI_PRODUCT_SANDISK_PCSN530,
"PC SN530",
},
{
PCI_VENDOR_SANDISK, PCI_PRODUCT_SANDISK_NVME_1,
"NVMe",
},
{
PCI_VENDOR_SANDISK, PCI_PRODUCT_SANDISK_SN850,
"SN850",
},
{
PCI_VENDOR_SANDISK, PCI_PRODUCT_SANDISK_PCSN740,
PCI_VENDOR_SANDISK, PCI_PRODUCT_SANDISK_NVME_2,
"NVMe",
},
{
PCI_VENDOR_SANDISK, PCI_PRODUCT_SANDISK_PCSN740_1,
"PC SN740",
},
{
PCI_VENDOR_SANDISK, PCI_PRODUCT_SANDISK_PCSN740_2,
"PC SN740",
},
{
PCI_VENDOR_SANDISK, PCI_PRODUCT_SANDISK_NVME_3,
"NVMe",
},
{
PCI_VENDOR_SANDISK, PCI_PRODUCT_SANDISK_SN750,
"SN750",
},
{
PCI_VENDOR_SANDISK, PCI_PRODUCT_SANDISK_SN850X,
"SN850X",
},
{
PCI_VENDOR_SANDISK, PCI_PRODUCT_SANDISK_SN580,
"SN580",
},
{
PCI_VENDOR_SANGOMA, PCI_PRODUCT_SANGOMA_A10X,
"A10x",

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sig.c,v 1.337 2024/08/06 08:44:54 claudio Exp $ */
/* $OpenBSD: kern_sig.c,v 1.338 2024/08/10 09:18:09 jsg Exp $ */
/* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */
/*
@ -2146,7 +2146,7 @@ single_thread_set(struct proc *p, int flags)
SCHED_UNLOCK();
}
/* count ourselfs out */
/* count ourself out */
--pr->ps_singlecnt;
mtx_leave(&pr->ps_mtx);