sync code with last fixes and improvements from OpenBSD

This commit is contained in:
purplerain 2023-08-08 00:42:18 +00:00
parent 691f97cc10
commit 371ae113c6
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
175 changed files with 2932 additions and 1512 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: codepatch.c,v 1.9 2020/09/11 09:27:09 mpi Exp $ */
/* $OpenBSD: codepatch.c,v 1.10 2023/07/31 01:33:57 guenther Exp $ */
/*
* Copyright (c) 2014-2015 Stefan Fritsch <sf@sfritsch.de>
*
@ -126,7 +126,7 @@ codepatch_nop(uint16_t tag)
/* Patch with alternative code */
void
codepatch_replace(uint16_t tag, void *code, size_t len)
codepatch_replace(uint16_t tag, const void *code, size_t len)
{
struct codepatch *patch;
unsigned char *rwaddr;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: cpu.c,v 1.174 2023/07/28 06:36:16 guenther Exp $ */
/* $OpenBSD: cpu.c,v 1.175 2023/07/31 04:01:07 guenther Exp $ */
/* $NetBSD: cpu.c,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */
/*-
@ -187,11 +187,7 @@ replacemeltdown(void)
{
static int replacedone = 0;
struct cpu_info *ci = &cpu_info_primary;
int swapgs_vuln = 0, s;
if (replacedone)
return;
replacedone = 1;
int swapgs_vuln = 0, ibrs = 0, s;
if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
int family = ci->ci_family;
@ -208,9 +204,39 @@ replacemeltdown(void)
/* KnightsLanding */
swapgs_vuln = 0;
}
if ((ci->ci_feature_sefflags_edx & SEFF0EDX_ARCH_CAP) &&
(rdmsr(MSR_ARCH_CAPABILITIES) & ARCH_CAP_IBRS_ALL)) {
ibrs = 2;
} else if (ci->ci_feature_sefflags_edx & SEFF0EDX_IBRS) {
ibrs = 1;
}
} else if (strcmp(cpu_vendor, "AuthenticAMD") == 0 &&
ci->ci_pnfeatset >= 0x80000008) {
if (ci->ci_feature_amdspec_ebx & CPUIDEBX_IBRS_ALWAYSON) {
ibrs = 2;
} else if ((ci->ci_feature_amdspec_ebx & CPUIDEBX_IBRS) &&
(ci->ci_feature_amdspec_ebx & CPUIDEBX_IBRS_PREF)) {
ibrs = 1;
}
}
/* Enhanced IBRS: turn it on once on each CPU and don't touch again */
if (ibrs == 2)
wrmsr(MSR_SPEC_CTRL, SPEC_CTRL_IBRS);
if (replacedone)
return;
replacedone = 1;
s = splhigh();
if (ibrs == 2 || (ci->ci_feature_sefflags_edx & SEFF0EDX_IBT)) {
extern const char _jmprax, _jmpr11, _jmpr13;
extern const short _jmprax_len, _jmpr11_len, _jmpr13_len;
codepatch_replace(CPTAG_RETPOLINE_RAX, &_jmprax, _jmprax_len);
codepatch_replace(CPTAG_RETPOLINE_R11, &_jmpr11, _jmpr11_len);
codepatch_replace(CPTAG_RETPOLINE_R13, &_jmpr13, _jmpr13_len);
}
if (!cpu_meltdown)
codepatch_nop(CPTAG_MELTDOWN_NOP);
else {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: locore.S,v 1.139 2023/07/28 06:18:35 guenther Exp $ */
/* $OpenBSD: locore.S,v 1.140 2023/07/31 04:01:07 guenther Exp $ */
/* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */
/*
@ -474,7 +474,16 @@ bogus_proc_pmap:
END(cpu_switchto)
NENTRY(retpoline_rax)
CODEPATCH_START
JMP_RETPOLINE(rax)
CODEPATCH_END(CPTAG_RETPOLINE_RAX)
END(retpoline_rax)
NENTRY(__x86_indirect_thunk_r11)
CODEPATCH_START
JMP_RETPOLINE(r11)
CODEPATCH_END(CPTAG_RETPOLINE_R11)
END(__x86_indirect_thunk_r11)
ENTRY(cpu_idle_cycle_hlt)
RETGUARD_SETUP(cpu_idle_cycle_hlt, r11)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vector.S,v 1.93 2023/07/27 00:30:07 guenther Exp $ */
/* $OpenBSD: vector.S,v 1.94 2023/07/31 04:01:07 guenther Exp $ */
/* $NetBSD: vector.S,v 1.5 2004/06/28 09:13:11 fvdl Exp $ */
/*
@ -1269,7 +1269,9 @@ END(ioapic_level_stubs)
* Soft interrupt handlers
*/
NENTRY(retpoline_r13)
CODEPATCH_START
JMP_RETPOLINE(r13)
CODEPATCH_END(CPTAG_RETPOLINE_R13)
END(retpoline_r13)
KIDTVEC(softtty)
@ -1280,7 +1282,9 @@ KIDTVEC(softtty)
movl $X86_SOFTINTR_SOFTTTY,%edi
call softintr_dispatch
decl CPUVAR(IDEPTH)
CODEPATCH_START
jmp retpoline_r13
CODEPATCH_END(CPTAG_RETPOLINE_R13)
END(Xsofttty)
KIDTVEC(softnet)
@ -1291,7 +1295,9 @@ KIDTVEC(softnet)
movl $X86_SOFTINTR_SOFTNET,%edi
call softintr_dispatch
decl CPUVAR(IDEPTH)
CODEPATCH_START
jmp retpoline_r13
CODEPATCH_END(CPTAG_RETPOLINE_R13)
END(Xsoftnet)
KIDTVEC(softclock)
@ -1302,5 +1308,7 @@ KIDTVEC(softclock)
movl $X86_SOFTINTR_SOFTCLOCK,%edi
call softintr_dispatch
decl CPUVAR(IDEPTH)
CODEPATCH_START
jmp retpoline_r13
CODEPATCH_END(CPTAG_RETPOLINE_R13)
END(Xsoftclock)

View file

@ -1,4 +1,4 @@
# $OpenBSD: Makefile.amd64,v 1.132 2023/04/21 13:24:20 bluhm Exp $
# $OpenBSD: Makefile.amd64,v 1.133 2023/07/31 04:01:07 guenther Exp $
# For instructions on building kernels consult the config(8) and options(4)
# manual pages.
@ -72,7 +72,7 @@ COPTIMIZE= -Oz
CMACHFLAGS+= -mno-retpoline -fcf-protection=none
.endif
.else
CMACHFLAGS+= -fcf-protection=branch
CMACHFLAGS+= -mretpoline-external-thunk -fcf-protection=branch
.endif
.if ${COMPILER_VERSION:Mclang}
NO_INTEGR_AS= -no-integrated-as

View file

@ -1,4 +1,4 @@
/* $OpenBSD: codepatch.h,v 1.16 2023/07/28 06:18:35 guenther Exp $ */
/* $OpenBSD: codepatch.h,v 1.18 2023/07/31 04:01:07 guenther Exp $ */
/*
* Copyright (c) 2014-2015 Stefan Fritsch <sf@sfritsch.de>
*
@ -29,7 +29,7 @@ __cptext void *codepatch_maprw(vaddr_t *nva, vaddr_t dest);
__cptext void codepatch_unmaprw(vaddr_t nva);
__cptext void codepatch_fill_nop(void *caddr, uint16_t len);
__cptext void codepatch_nop(uint16_t tag);
__cptext void codepatch_replace(uint16_t tag, void *code, size_t len);
__cptext void codepatch_replace(uint16_t tag, const void *code, size_t len);
__cptext void codepatch_call(uint16_t _tag, void *_func);
__cptext void codepatch_jmp(uint16_t _tag, void *_func);
void codepatch_disable(void);
@ -66,6 +66,9 @@ void codepatch_disable(void);
#define CPTAG_FENCE_SWAPGS_MIS_TAKEN 11
#define CPTAG_FENCE_NO_SAFE_SMAP 12
#define CPTAG_XRSTORS 13
#define CPTAG_RETPOLINE_RAX 14
#define CPTAG_RETPOLINE_R11 15
#define CPTAG_RETPOLINE_R13 16
/*
* stac/clac SMAP instructions have lfence like semantics. Let's

View file

@ -1,4 +1,4 @@
/* $OpenBSD: codepatch.c,v 1.5 2020/09/11 09:27:10 mpi Exp $ */
/* $OpenBSD: codepatch.c,v 1.6 2023/07/31 17:10:31 bluhm Exp $ */
/*
* Copyright (c) 2014-2015 Stefan Fritsch <sf@sfritsch.de>
*
@ -147,7 +147,7 @@ codepatch_nop(uint16_t tag)
/* Patch with alternative code */
void
codepatch_replace(uint16_t tag, void *code, size_t len)
codepatch_replace(uint16_t tag, const void *code, size_t len)
{
struct codepatch *patch;
unsigned char *rwaddr;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: codepatch.h,v 1.2 2017/07/01 19:42:59 sf Exp $ */
/* $OpenBSD: codepatch.h,v 1.3 2023/07/31 17:10:31 bluhm Exp $ */
/*
* Copyright (c) 2014-2015 Stefan Fritsch <sf@sfritsch.de>
*
@ -26,7 +26,7 @@ void *codepatch_maprw(vaddr_t *nva, vaddr_t dest);
void codepatch_unmaprw(vaddr_t nva);
void codepatch_fill_nop(void *caddr, uint16_t len);
void codepatch_nop(uint16_t tag);
void codepatch_replace(uint16_t tag, void *code, size_t len);
void codepatch_replace(uint16_t tag, const void *code, size_t len);
void codepatch_call(uint16_t tag, void *func);
#endif /* !_LOCORE */

View file

@ -1,4 +1,4 @@
# $OpenBSD: files,v 1.724 2023/04/23 00:20:26 dlg Exp $
# $OpenBSD: files,v 1.725 2023/08/07 01:59:38 dlg Exp $
# $NetBSD: files,v 1.87 1996/05/19 17:17:50 jonathan Exp $
# @(#)files.newconf 7.5 (Berkeley) 5/10/93
@ -572,6 +572,7 @@ pseudo-device vlan: ifnet, ether
pseudo-device carp: ifnet, ether
pseudo-device sppp: ifnet
pseudo-device gif: ifnet
pseudo-device sec: ifnet
pseudo-device gre: ifnet, ether, etherbridge
pseudo-device crypto: ifnet
pseudo-device trunk: ifnet, ether, ifmedia
@ -1006,6 +1007,7 @@ file uvm/uvm_vnode.c
# IPv6
file net/if_gif.c gif needs-count
file net/if_sec.c sec needs-count
file netinet/ip_ecn.c
file netinet6/in6_pcb.c inet6
file netinet6/in6.c inet6

View file

@ -1,4 +1,4 @@
/* $OpenBSD: acpi_apm.c,v 1.2 2023/07/08 14:44:43 tobhe Exp $ */
/* $OpenBSD: acpi_apm.c,v 1.3 2023/08/06 14:30:08 tobhe Exp $ */
/*
* Copyright (c) 2005 Thorsten Lockert <tholo@sigmasoft.com>
* Copyright (c) 2005 Jordan Hargrave <jordan@openbsd.org>
@ -47,6 +47,9 @@ acpiopen(dev_t dev, int flag, int mode, struct proc *p)
struct acpi_softc *sc = acpi_softc;
int s;
if (sc == NULL)
return (ENXIO);
s = splbio();
switch (APMDEV(dev)) {
case APMDEV_CTL:
@ -82,6 +85,9 @@ acpiclose(dev_t dev, int flag, int mode, struct proc *p)
struct acpi_softc *sc = acpi_softc;
int s;
if (sc == NULL)
return (ENXIO);
s = splbio();
switch (APMDEV(dev)) {
case APMDEV_CTL:
@ -106,6 +112,9 @@ acpiioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
struct apm_power_info *pi = (struct apm_power_info *)data;
int s;
if (sc == NULL)
return (ENXIO);
s = splbio();
/* fake APM */
switch (cmd) {
@ -168,6 +177,9 @@ acpikqfilter(dev_t dev, struct knote *kn)
struct acpi_softc *sc = acpi_softc;
int s;
if (sc == NULL)
return (ENXIO);
switch (kn->kn_filter) {
case EVFILT_READ:
kn->kn_fop = &acpiread_filtops;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: axppmic.c,v 1.17 2023/07/06 20:02:36 uaa Exp $ */
/* $OpenBSD: axppmic.c,v 1.20 2023/08/02 11:52:18 uaa Exp $ */
/*
* Copyright (c) 2017 Mark Kettenis <kettenis@openbsd.org>
*
@ -126,6 +126,21 @@ const struct axppmic_regdata axp221_regdata[] = {
{ NULL }
};
const struct axppmic_regdata axp313a_regdata[] = {
/* dcdc1: 1.6-3.4V (100mV step) not supported */
{ "dcdc1", 0x10, (1 << 0), (1 << 0), (0 << 0),
0x13, 0x7f, 500000, 10000, 71, 122000, 20000, 17 },
{ "dcdc2", 0x10, (1 << 1), (1 << 1), (0 << 1),
0x14, 0x7f, 500000, 10000, 71, 122000, 20000, 17 },
{ "dcdc3", 0x10, (1 << 2), (1 << 2), (0 << 2),
0x15, 0x7f, 500000, 10000, 71, 122000, 20000, 32 },
{ "aldo1", 0x10, (1 << 3), (1 << 3), (0 << 3),
0x16, 0x1f, 500000, 100000, 31 },
{ "dldo1", 0x10, (1 << 4), (1 << 4), (0 << 4),
0x17, 0x1f, 500000, 100000, 31 },
{ NULL }
};
const struct axppmic_regdata axp803_regdata[] = {
{ "dcdc1", 0x10, (1 << 0), (1 << 0), (0 << 0),
0x20, 0x1f, 1600000, 100000, 19 },
@ -242,6 +257,53 @@ const struct axppmic_regdata axp809_regdata[] = {
{ NULL }
};
const struct axppmic_regdata axp15060_regdata[] = {
{ "dcdc1", 0x10, (1 << 0), (1 << 0), (0 << 0),
0x13, 0x1f, 15000000, 100000, 20 },
{ "dcdc2", 0x10, (1 << 1), (1 << 1), (0 << 1),
0x14, 0x7f, 500000, 10000, 71, 1220000, 20000, 17 },
{ "dcdc3", 0x10, (1 << 2), (1 << 2), (0 << 2),
0x15, 0x7f, 500000, 10000, 71, 1220000, 20000, 17 },
{ "dcdc4", 0x10, (1 << 3), (1 << 3), (0 << 3),
0x16, 0x7f, 500000, 10000, 71, 1220000, 20000, 17 },
{ "dcdc5", 0x10, (1 << 4), (1 << 4), (0 << 4),
0x17, 0x7f, 800000, 10000, 33, 1140000, 20000, 36 },
{ "dcdc6", 0x10, (1 << 5), (1 << 5), (0 << 5),
0x18, 0x1f, 500000, 100000, 30 },
{ "aldo1", 0x11, (1 << 0), (1 << 0), (0 << 0),
0x19, 0x1f, 700000, 100000, 27 },
{ "aldo2", 0x11, (1 << 1), (1 << 1), (0 << 1),
0x20, 0x1f, 700000, 100000, 27 },
{ "aldo3", 0x11, (1 << 2), (1 << 2), (0 << 2),
0x21, 0x1f, 700000, 100000, 27 },
{ "aldo4", 0x11, (1 << 3), (1 << 3), (0 << 3),
0x22, 0x1f, 700000, 100000, 27 },
{ "aldo5", 0x11, (1 << 4), (1 << 4), (0 << 4),
0x23, 0x1f, 700000, 100000, 27 },
{ "bldo1", 0x11, (1 << 5), (1 << 5), (0 << 5),
0x24, 0x1f, 700000, 100000, 27 },
{ "bldo2", 0x11, (1 << 6), (1 << 6), (0 << 6),
0x25, 0x1f, 700000, 100000, 27 },
{ "bldo3", 0x11, (1 << 7), (1 << 7), (0 << 7),
0x26, 0x1f, 700000, 100000, 27 },
{ "bldo4", 0x12, (1 << 0), (1 << 0), (0 << 0),
0x27, 0x1f, 700000, 100000, 27 },
{ "bldo5", 0x12, (1 << 1), (1 << 1), (0 << 1),
0x28, 0x1f, 700000, 100000, 27 },
{ "cldo1", 0x12, (1 << 2), (1 << 2), (0 << 2),
0x29, 0x1f, 700000, 100000, 27 },
{ "cldo2", 0x12, (1 << 3), (1 << 3), (0 << 3),
0x2a, 0x1f, 700000, 100000, 27 },
{ "cldo3", 0x12, (1 << 4), (1 << 4), (0 << 4),
0x2b, 0x1f, 700000, 100000, 27 },
{ "cldo4", 0x12, (1 << 5), (1 << 5), (0 << 5),
0x2d, 0x3f, 700000, 100000, 36 },
{ "cpusldo", 0x12, (1 << 6), (1 << 6), (0 << 6),
0x2e, 0x0f, 700000, 50000, 15 },
{ "sw", 0x12, (1 << 7), (1 << 7), (0 << 7) },
{ NULL }
};
/* Sensors for AXP209 and AXP221/AXP809. */
#define AXPPMIC_NSENSORS 12
@ -307,10 +369,12 @@ const struct axppmic_device axppmic_devices[] = {
{ "x-powers,axp221", "AXP221", axp221_regdata, axp221_sensdata },
{ "x-powers,axp223", "AXP223", axp221_regdata, axp221_sensdata },
{ "x-powers,axp305", "AXP305", axp806_regdata },
{ "x-powers,axp313a", "AXP313A", axp313a_regdata },
{ "x-powers,axp803", "AXP803", axp803_regdata, axp803_sensdata },
{ "x-powers,axp805", "AXP805", axp806_regdata },
{ "x-powers,axp806", "AXP806", axp806_regdata },
{ "x-powers,axp809", "AXP809", axp809_regdata, axp221_sensdata }
{ "x-powers,axp809", "AXP809", axp809_regdata, axp221_sensdata },
{ "x-powers,axp15060", "AXP15060", axp15060_regdata },
};
const struct axppmic_device *
@ -438,6 +502,10 @@ axppmic_i2c_write(struct axppmic_softc *sc, uint8_t reg, uint8_t value)
/* RSB interface */
#include "sxirsb.h"
#if NSXIRSB > 0
int axppmic_rsb_match(struct device *, void *, void *);
void axppmic_rsb_attach(struct device *, struct device *, void *);
@ -489,6 +557,8 @@ axppmic_rsb_write(struct axppmic_softc *sc, uint8_t reg, uint8_t value)
rsb_write_1(sc->sc_cookie, sc->sc_addr, reg, value);
}
#endif
/* Common code */
void axppmic_attach_node(struct axppmic_softc *, int);

View file

@ -1,4 +1,4 @@
# $OpenBSD: files.fdt,v 1.196 2023/07/22 22:43:53 patrick Exp $
# $OpenBSD: files.fdt,v 1.197 2023/07/31 09:00:43 kettenis Exp $
#
# Config file and device description for machine-independent FDT code.
# Included by ports that need it.
@ -48,7 +48,7 @@ file dev/fdt/sxipio.c sxipio
define rsb {}
device sxirsb: rsb
attach sxirsb at fdt
file dev/fdt/sxirsb.c sxirsb
file dev/fdt/sxirsb.c sxirsb needs-flag
device sxipwm
attach sxipwm at fdt

View file

@ -1,4 +1,4 @@
/* $OpenBSD: dwqe.c,v 1.10 2023/07/04 12:48:42 kettenis Exp $ */
/* $OpenBSD: dwqe.c,v 1.11 2023/08/07 20:28:47 kettenis Exp $ */
/*
* Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
* Copyright (c) 2017, 2022 Patrick Wildt <patrick@blueri.se>
@ -608,6 +608,9 @@ dwqe_tx_proc(struct dwqe_softc *sc)
if (txd->sd_tdes3 & TDES3_OWN)
break;
if (txd->sd_tdes3 & TDES3_ES)
ifp->if_oerrors++;
txb = &sc->sc_txbuf[idx];
if (txb->tb_m) {
bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
@ -808,7 +811,7 @@ dwqe_up(struct dwqe_softc *sc)
if (sc->sc_force_thresh_dma_mode) {
mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TSF;
mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TTC_MASK;
mode |= GMAC_MTL_CHAN_TX_OP_MODE_TTC_128;
mode |= GMAC_MTL_CHAN_TX_OP_MODE_TTC_512;
} else {
mode |= GMAC_MTL_CHAN_TX_OP_MODE_TSF;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: azalia.c,v 1.283 2023/02/21 13:42:59 bcallah Exp $ */
/* $OpenBSD: azalia.c,v 1.284 2023/07/30 08:46:03 yasuoka Exp $ */
/* $NetBSD: azalia.c,v 1.20 2006/05/07 08:31:44 kent Exp $ */
/*-
@ -463,6 +463,7 @@ azalia_configure_pci(azalia_t *az)
case PCI_PRODUCT_INTEL_600SERIES_HDA:
case PCI_PRODUCT_INTEL_600SERIES_LP_HDA:
case PCI_PRODUCT_INTEL_700SERIES_HDA:
case PCI_PRODUCT_INTEL_700SERIES_LP_HDA:
case PCI_PRODUCT_INTEL_C600_HDA:
case PCI_PRODUCT_INTEL_C610_HDA_1:
case PCI_PRODUCT_INTEL_C610_HDA_2:
@ -492,6 +493,7 @@ const struct pci_matchid azalia_pci_devices[] = {
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_500SERIES_HDA },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_500SERIES_LP_HDA },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_600SERIES_LP_HDA },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_700SERIES_LP_HDA },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_APOLLOLAKE_HDA },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_GLK_HDA },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_JSL_HDA },

View file

@ -1291,6 +1291,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
bool amdgpu_device_pcie_dynamic_switching_supported(void);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
bool amdgpu_device_aspm_support_quirk(void);

View file

@ -1351,6 +1351,29 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
return true;
}
/*
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
* speed switching. Until we have confirmation from Intel that a specific host
* supports it, it's safer that we keep it disabled for all.
*
* https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
* https://gitlab.freedesktop.org/drm/amd/-/issues/2663
*/
bool amdgpu_device_pcie_dynamic_switching_supported(void)
{
#if IS_ENABLED(CONFIG_X86)
#ifdef __linux__
struct cpuinfo_x86 *c = &cpu_data(0);
if (c->x86_vendor == X86_VENDOR_INTEL)
#else
if (strcmp(cpu_vendor, "GenuineIntel") == 0)
#endif
return false;
#endif
return true;
}
/**
* amdgpu_device_should_use_aspm - check if the device should program ASPM
*

View file

@ -203,16 +203,18 @@ static const struct pci_matchid amdgpu_devices[] = {
{0x1002, 0x1506 },
/* GC 11.0.0, DCN 3.2.0, dGPU, "Navi 31" */
{0x1002, 0x744c }, /* Radeon RX 7900 XT/XTX */
{0x1002, 0x7448 }, /* Radeon Pro W7900 */
{0x1002, 0x744c }, /* Radeon RX 7900 XT/XTX/GRE */
{0x1002, 0x745e }, /* Radeon Pro W7800 */
/* GC 11.0.1, DCN 3.1.4, APU, Ryzen 7040 "Phoenix" */
{0x1002, 0x15bf },
/* GC 11.0.2, DCN 3.2.1, dGPU, "Navi 33" */
{0x1002, 0x7480 }, /* Radeon RX 7600S, 7700S, 7600M XT, 7600 */
{0x1002, 0x7480 }, /* Radeon RX 7600S, 7700S, 7600M XT,
7600, Pro W7600 */
{0x1002, 0x7483 }, /* Radeon RX 7600M */
{0x1002, 0x7489 }, /* Radeon Pro W7500 */
/* GC 11.0.3, DCN 3.2.0, dGPU */
/* GC 11.0.4, DCN 3.1.4, APU */

View file

@ -472,11 +472,11 @@ static int psp_sw_init(void *handle)
return 0;
failed2:
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
failed1:
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf);
failed1:
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
return ret;
}

View file

@ -4950,6 +4950,30 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
return 0;
}
static inline void fill_dc_dirty_rect(struct drm_plane *plane,
struct rect *dirty_rect, int32_t x,
int32_t y, int32_t width, int32_t height,
int *i, bool ffu)
{
WARN_ON(*i >= DC_MAX_DIRTY_RECTS);
dirty_rect->x = x;
dirty_rect->y = y;
dirty_rect->width = width;
dirty_rect->height = height;
if (ffu)
drm_dbg(plane->dev,
"[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
plane->base.id, width, height);
else
drm_dbg(plane->dev,
"[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
plane->base.id, x, y, width, height);
(*i)++;
}
/**
* fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
*
@ -4970,10 +4994,6 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
* addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
* implicitly provide damage clips without any client support via the plane
* bounds.
*
* Today, amdgpu_dm only supports the MPO and cursor usecase.
*
* TODO: Also enable for FB_DAMAGE_CLIPS
*/
static void fill_dc_dirty_rects(struct drm_plane *plane,
struct drm_plane_state *old_plane_state,
@ -4984,12 +5004,11 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
struct rect *dirty_rects = flip_addrs->dirty_rects;
uint32_t num_clips;
struct drm_mode_rect *clips;
bool bb_changed;
bool fb_changed;
u32 i = 0;
flip_addrs->dirty_rect_count = 0;
/*
* Cursor plane has it's own dirty rect update interface. See
* dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
@ -4997,20 +5016,20 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
if (plane->type == DRM_PLANE_TYPE_CURSOR)
return;
/*
* Today, we only consider MPO use-case for PSR SU. If MPO not
* requested, and there is a plane update, do FFU.
*/
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
clips = drm_plane_get_damage_clips(new_plane_state);
if (!dm_crtc_state->mpo_requested) {
dirty_rects[0].x = 0;
dirty_rects[0].y = 0;
dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
flip_addrs->dirty_rect_count = 1;
DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
new_plane_state->plane->base.id,
dm_crtc_state->base.mode.crtc_hdisplay,
dm_crtc_state->base.mode.crtc_vdisplay);
if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
goto ffu;
for (; flip_addrs->dirty_rect_count < num_clips; clips++)
fill_dc_dirty_rect(new_plane_state->plane,
&dirty_rects[flip_addrs->dirty_rect_count],
clips->x1, clips->y1,
clips->x2 - clips->x1, clips->y2 - clips->y1,
&flip_addrs->dirty_rect_count,
false);
return;
}
@ -5021,7 +5040,6 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
* If plane is moved or resized, also add old bounding box to dirty
* rects.
*/
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
fb_changed = old_plane_state->fb->base.id !=
new_plane_state->fb->base.id;
bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
@ -5029,36 +5047,51 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
old_plane_state->crtc_w != new_plane_state->crtc_w ||
old_plane_state->crtc_h != new_plane_state->crtc_h);
DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
new_plane_state->plane->base.id,
bb_changed, fb_changed, num_clips);
drm_dbg(plane->dev,
"[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
new_plane_state->plane->base.id,
bb_changed, fb_changed, num_clips);
if (num_clips || fb_changed || bb_changed) {
dirty_rects[i].x = new_plane_state->crtc_x;
dirty_rects[i].y = new_plane_state->crtc_y;
dirty_rects[i].width = new_plane_state->crtc_w;
dirty_rects[i].height = new_plane_state->crtc_h;
DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
new_plane_state->plane->base.id,
dirty_rects[i].x, dirty_rects[i].y,
dirty_rects[i].width, dirty_rects[i].height);
i += 1;
if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS)
goto ffu;
if (bb_changed) {
fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
new_plane_state->crtc_x,
new_plane_state->crtc_y,
new_plane_state->crtc_w,
new_plane_state->crtc_h, &i, false);
/* Add old plane bounding-box if plane is moved or resized */
fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
old_plane_state->crtc_x,
old_plane_state->crtc_y,
old_plane_state->crtc_w,
old_plane_state->crtc_h, &i, false);
}
/* Add old plane bounding-box if plane is moved or resized */
if (bb_changed) {
dirty_rects[i].x = old_plane_state->crtc_x;
dirty_rects[i].y = old_plane_state->crtc_y;
dirty_rects[i].width = old_plane_state->crtc_w;
dirty_rects[i].height = old_plane_state->crtc_h;
DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
old_plane_state->plane->base.id,
dirty_rects[i].x, dirty_rects[i].y,
dirty_rects[i].width, dirty_rects[i].height);
i += 1;
if (num_clips) {
for (; i < num_clips; clips++)
fill_dc_dirty_rect(new_plane_state->plane,
&dirty_rects[i], clips->x1,
clips->y1, clips->x2 - clips->x1,
clips->y2 - clips->y1, &i, false);
} else if (fb_changed && !bb_changed) {
fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
new_plane_state->crtc_x,
new_plane_state->crtc_y,
new_plane_state->crtc_w,
new_plane_state->crtc_h, &i, false);
}
flip_addrs->dirty_rect_count = i;
return;
ffu:
fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
dm_crtc_state->base.mode.crtc_hdisplay,
dm_crtc_state->base.mode.crtc_vdisplay,
&flip_addrs->dirty_rect_count, true);
}
static void update_stream_scaling_settings(const struct drm_display_mode *mode,

View file

@ -677,7 +677,7 @@ void dm_handle_mst_sideband_msg_ready_event(
if (retry == 3) {
DRM_ERROR("Failed to ack MST event.\n");
return;
break;
}
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);

View file

@ -1600,6 +1600,10 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
supported_rotations);
if (dm->adev->ip_versions[DCE_HWIP][0] > IP_VERSION(3, 0, 1) &&
plane->type != DRM_PLANE_TYPE_CURSOR)
drm_plane_enable_fb_damage_clips(plane);
drm_plane_helper_add(plane, &dm_plane_helper_funcs);
#ifdef CONFIG_DRM_AMD_DC_HDR

View file

@ -24,6 +24,7 @@
*/
#include "amdgpu_dm_psr.h"
#include "dc_dmub_srv.h"
#include "dc.h"
#include "dm_helpers.h"
#include "amdgpu_dm.h"
@ -50,7 +51,7 @@ static bool link_supports_psrsu(struct dc_link *link)
!link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)
return false;
return true;
return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
}
/*

View file

@ -108,6 +108,11 @@ static int dcn314_get_active_display_cnt_wa(
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
/* Checking stream / link detection ensuring that PHY is active*/
if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
display_count++;
}
for (i = 0; i < dc->link_count; i++) {

View file

@ -135,9 +135,7 @@ static const char DC_BUILD_ID[] = "production-build";
* one or two (in the pipe-split case).
*/
/*******************************************************************************
* Private functions
******************************************************************************/
/* Private functions */
static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
{
@ -384,16 +382,18 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
}
/**
* dc_stream_adjust_vmin_vmax:
* dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
* @dc: dc reference
* @stream: Initial dc stream state
* @adjust: Updated parameters for vertical_total_min and vertical_total_max
*
* Looks up the pipe context of dc_stream_state and updates the
* vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
* Rate, which is a power-saving feature that targets reducing panel
* refresh rate while the screen is static
*
* @dc: dc reference
* @stream: Initial dc stream state
* @adjust: Updated parameters for vertical_total_min and vertical_total_max
* Return: %true if the pipe context is found and adjusted;
* %false if the pipe context is not found.
*/
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state *stream,
@ -429,18 +429,17 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
}
/**
*****************************************************************************
* Function: dc_stream_get_last_vrr_vtotal
* dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
* dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
*
* @brief
* Looks up the pipe context of dc_stream_state and gets the
* last VTOTAL used by DRR (Dynamic Refresh Rate)
* @dc: [in] dc reference
* @stream: [in] Initial dc stream state
* @refresh_rate: [in] new refresh_rate
*
* @param [in] dc: dc reference
* @param [in] stream: Initial dc stream state
* @param [in] adjust: Updated parameters for vertical_total_min and
* vertical_total_max
*****************************************************************************
* Return: %true if the pipe context is found and there is an associated
* timing_generator for the DC;
* %false if the pipe context is not found or there is no
* timing_generator for the DC.
*/
bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
struct dc_stream_state *stream,
@ -587,7 +586,10 @@ bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *s
* once.
*
* By default, only CRC0 is configured, and the entire frame is used to
* calculate the crc.
* calculate the CRC.
*
* Return: %false if the stream is not found or CRC capture is not supported;
* %true if the stream has been configured.
*/
bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
struct crc_params *crc_window, bool enable, bool continuous)
@ -656,7 +658,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
* dc_stream_configure_crc needs to be called beforehand to enable CRCs.
*
* Return:
* false if stream is not found, or if CRCs are not enabled.
* %false if stream is not found, or if CRCs are not enabled.
*/
bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
@ -1236,9 +1238,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
PERF_TRACE();
}
/*******************************************************************************
* Public functions
******************************************************************************/
/* Public functions */
struct dc *dc_create(const struct dc_init_data *init_params)
{
@ -1505,17 +1505,19 @@ static void program_timing_sync(
}
}
static bool context_changed(
struct dc *dc,
struct dc_state *context)
static bool streams_changed(struct dc *dc,
struct dc_stream_state *streams[],
uint8_t stream_count)
{
uint8_t i;
if (context->stream_count != dc->current_state->stream_count)
if (stream_count != dc->current_state->stream_count)
return true;
for (i = 0; i < dc->current_state->stream_count; i++) {
if (dc->current_state->streams[i] != context->streams[i])
if (dc->current_state->streams[i] != streams[i])
return true;
if (!streams[i]->link->link_state_valid)
return true;
}
@ -1745,6 +1747,8 @@ void dc_z10_save_init(struct dc *dc)
/*
* Applies given context to HW and copy it into current context.
* It's up to the user to release the src context afterwards.
*
* Return: an enum dc_status result code for the operation
*/
static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
{
@ -1911,12 +1915,114 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
return result;
}
static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context);
/**
* dc_commit_streams - Commit current stream state
*
* @dc: DC object with the commit state to be configured in the hardware
* @streams: Array with a list of stream state
* @stream_count: Total of streams
*
* Function responsible for commit streams change to the hardware.
*
* Return:
* Return DC_OK if everything work as expected, otherwise, return a dc_status
* code.
*/
enum dc_status dc_commit_streams(struct dc *dc,
struct dc_stream_state *streams[],
uint8_t stream_count)
{
int i, j;
struct dc_state *context;
enum dc_status res = DC_OK;
struct dc_validation_set set[MAX_STREAMS] = {0};
struct pipe_ctx *pipe;
bool handle_exit_odm2to1 = false;
if (!streams_changed(dc, streams, stream_count))
return res;
DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
for (i = 0; i < stream_count; i++) {
struct dc_stream_state *stream = streams[i];
struct dc_stream_status *status = dc_stream_get_status(stream);
dc_stream_log(dc, stream);
set[i].stream = stream;
if (status) {
set[i].plane_count = status->plane_count;
for (j = 0; j < status->plane_count; j++)
set[i].plane_states[j] = status->plane_states[j];
}
}
/* Check for case where we are going from odm 2:1 to max
* pipe scenario. For these cases, we will call
* commit_minimal_transition_state() to exit out of odm 2:1
* first before processing new streams
*/
if (stream_count == dc->res_pool->pipe_count) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->next_odm_pipe)
handle_exit_odm2to1 = true;
}
}
if (handle_exit_odm2to1)
res = commit_minimal_transition_state(dc, dc->current_state);
context = dc_create_state(dc);
if (!context)
goto context_alloc_fail;
dc_resource_state_copy_construct_current(dc, context);
/*
* Previous validation was perfomred with fast_validation = true and
* the full DML state required for hardware programming was skipped.
*
* Re-validate here to calculate these parameters / watermarks.
*/
res = dc_validate_global_state(dc, context, false);
if (res != DC_OK) {
DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
dc_status_to_str(res), res);
return res;
}
res = dc_commit_state_no_check(dc, context);
context_alloc_fail:
DC_LOG_DC("%s Finished.\n", __func__);
return (res == DC_OK);
}
/* TODO: When the transition to the new commit sequence is done, remove this
* function in favor of dc_commit_streams. */
bool dc_commit_state(struct dc *dc, struct dc_state *context)
{
enum dc_status result = DC_ERROR_UNEXPECTED;
int i;
if (!context_changed(dc, context))
/* TODO: Since change commit sequence can have a huge impact,
* we decided to only enable it for DCN3x. However, as soon as
* we get more confident about this change we'll need to enable
* the new sequence for all ASICs. */
if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
result = dc_commit_streams(dc, context->streams, context->stream_count);
return result == DC_OK;
}
if (!streams_changed(dc, context->streams, context->stream_count))
return DC_OK;
DC_LOG_DC("%s: %d streams\n",
@ -2482,8 +2588,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->mst_bw_update)
su_flags->bits.mst_bw = 1;
if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
su_flags->bits.crtc_timing_adjust = 1;
if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
(stream_update->vrr_infopacket || stream_update->allow_freesync ||
stream_update->vrr_active_variable))
su_flags->bits.fams_changed = 1;
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
@ -3648,17 +3757,17 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
}
}
/* For SubVP when adding MPO video we need to add a minimal transition.
/* For SubVP when adding or removing planes we need to add a minimal transition
* (even when disabling all planes). Whenever disabling a phantom pipe, we
* must use the minimal transition path to disable the pipe correctly.
*/
if (cur_stream_status && stream->mall_stream_config.type == SUBVP_MAIN) {
/* determine if minimal transition is required due to SubVP*/
if (surface_count > 0) {
if (cur_stream_status->plane_count > surface_count) {
force_minimal_pipe_splitting = true;
} else if (cur_stream_status->plane_count < surface_count) {
force_minimal_pipe_splitting = true;
*is_plane_addition = true;
}
if (cur_stream_status->plane_count > surface_count) {
force_minimal_pipe_splitting = true;
} else if (cur_stream_status->plane_count < surface_count) {
force_minimal_pipe_splitting = true;
*is_plane_addition = true;
}
}
@ -3675,6 +3784,8 @@ static bool commit_minimal_transition_state(struct dc *dc,
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
unsigned int pipe_in_use = 0;
bool subvp_in_use = false;
bool odm_in_use = false;
if (!transition_context)
return false;
@ -3687,6 +3798,30 @@ static bool commit_minimal_transition_state(struct dc *dc,
pipe_in_use++;
}
/* If SubVP is enabled and we are adding or removing planes from any main subvp
* pipe, we must use the minimal transition.
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
subvp_in_use = true;
break;
}
}
/* If ODM is enabled and we are adding or removing planes from any ODM
* pipe, we must use the minimal transition.
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->next_odm_pipe) {
odm_in_use = true;
break;
}
}
/* When the OS add a new surface if we have been used all of pipes with odm combine
* and mpc split feature, it need use commit_minimal_transition_state to transition safely.
* After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
@ -3695,7 +3830,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
* Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
* enter/exit MPO when DCN still have enough resources.
*/
if (pipe_in_use != dc->res_pool->pipe_count) {
if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) {
dc_release_state(transition_context);
return true;
}
@ -4430,21 +4565,17 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
}
/*
*****************************************************************************
* Function: dc_is_dmub_outbox_supported -
/**
* dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
*
* @brief
* Checks whether DMUB FW supports outbox notifications, if supported
* DM should register outbox interrupt prior to actually enabling interrupts
* via dc_enable_dmub_outbox
* @dc: [in] dc structure
*
* @param
* [in] dc: dc structure
* Checks whether DMUB FW supports outbox notifications, if supported DM
* should register outbox interrupt prior to actually enabling interrupts
* via dc_enable_dmub_outbox
*
* @return
* True if DMUB FW supports outbox notifications, False otherwise
*****************************************************************************
* Return:
* True if DMUB FW supports outbox notifications, False otherwise
*/
bool dc_is_dmub_outbox_supported(struct dc *dc)
{
@ -4462,21 +4593,17 @@ bool dc_is_dmub_outbox_supported(struct dc *dc)
return dc->debug.enable_dmub_aux_for_legacy_ddc;
}
/*
*****************************************************************************
* Function: dc_enable_dmub_notifications
/**
* dc_enable_dmub_notifications - Check if dmub fw supports outbox
*
* @brief
* Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
* notifications. All DMs shall switch to dc_is_dmub_outbox_supported.
* This API shall be removed after switching.
* @dc: [in] dc structure
*
* @param
* [in] dc: dc structure
* Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
* notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This
* API shall be removed after switching.
*
* @return
* True if DMUB FW supports outbox notifications, False otherwise
*****************************************************************************
* Return:
* True if DMUB FW supports outbox notifications, False otherwise
*/
bool dc_enable_dmub_notifications(struct dc *dc)
{
@ -4484,18 +4611,11 @@ bool dc_enable_dmub_notifications(struct dc *dc)
}
/**
*****************************************************************************
* Function: dc_enable_dmub_outbox
* dc_enable_dmub_outbox - Enables DMUB unsolicited notification
*
* @brief
* Enables DMUB unsolicited notifications to x86 via outbox
* @dc: [in] dc structure
*
* @param
* [in] dc: dc structure
*
* @return
* None
*****************************************************************************
* Enables DMUB unsolicited notifications to x86 via outbox.
*/
void dc_enable_dmub_outbox(struct dc *dc)
{
@ -4596,21 +4716,17 @@ uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
}
/**
*****************************************************************************
* Function: dc_process_dmub_set_config_async
* dc_process_dmub_set_config_async - Submits set_config command
*
* @brief
* Submits set_config command to dmub via inbox message
* @dc: [in] dc structure
* @link_index: [in] link_index: link index
* @payload: [in] aux payload
* @notify: [out] set_config immediate reply
*
* @param
* [in] dc: dc structure
* [in] link_index: link index
* [in] payload: aux payload
* [out] notify: set_config immediate reply
* Submits set_config command to dmub via inbox message.
*
* @return
* True if successful, False if failure
*****************************************************************************
* Return:
* True if successful, False if failure
*/
bool dc_process_dmub_set_config_async(struct dc *dc,
uint32_t link_index,
@ -4646,21 +4762,17 @@ bool dc_process_dmub_set_config_async(struct dc *dc,
}
/**
*****************************************************************************
* Function: dc_process_dmub_set_mst_slots
* dc_process_dmub_set_mst_slots - Submits MST solt allocation
*
* @brief
* Submits mst slot allocation command to dmub via inbox message
* @dc: [in] dc structure
* @link_index: [in] link index
* @mst_alloc_slots: [in] mst slots to be allotted
* @mst_slots_in_use: [out] mst slots in use returned in failure case
*
* @param
* [in] dc: dc structure
* [in] link_index: link index
* [in] mst_alloc_slots: mst slots to be allotted
* [out] mst_slots_in_use: mst slots in use returned in failure case
* Submits mst slot allocation command to dmub via inbox message
*
* @return
* DC_OK if successful, DC_ERROR if failure
*****************************************************************************
* Return:
* DC_OK if successful, DC_ERROR if failure
*/
enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
uint32_t link_index,
@ -4700,19 +4812,12 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
}
/**
*****************************************************************************
* Function: dc_process_dmub_dpia_hpd_int_enable
* dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
*
* @brief
* Submits dpia hpd int enable command to dmub via inbox message
* @dc: [in] dc structure
* @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
*
* @param
* [in] dc: dc structure
* [in] hpd_int_enable: 1 for hpd int enable, 0 to disable
*
* @return
* None
*****************************************************************************
* Submits dpia hpd int enable command to dmub via inbox message
*/
void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
uint32_t hpd_int_enable)
@ -4741,16 +4846,13 @@ void dc_disable_accelerated_mode(struct dc *dc)
/**
*****************************************************************************
* dc_notify_vsync_int_state() - notifies vsync enable/disable state
* dc_notify_vsync_int_state - notifies vsync enable/disable state
* @dc: dc structure
* @stream: stream where vsync int state changed
* @enable: whether vsync is enabled or disabled
* @stream: stream where vsync int state changed
* @enable: whether vsync is enabled or disabled
*
* Called when vsync is enabled/disabled
* Will notify DMUB to start/stop ABM interrupts after steady state is reached
*
*****************************************************************************
* Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
* interrupts after steady state is reached.
*/
void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
{
@ -4792,17 +4894,3 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
}
/*
* dc_extended_blank_supported: Decide whether extended blank is supported
*
* Extended blank is a freesync optimization feature to be enabled in the future.
* During the extra vblank period gained from freesync, we have the ability to enter z9/z10.
*
* @param [in] dc: Current DC state
* @return: Indicate whether extended blank is supported (true or false)
*/
bool dc_extended_blank_supported(struct dc *dc)
{
return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
&& dc->caps.zstate_support && dc->caps.is_apu;
}

View file

@ -1444,6 +1444,26 @@ static int acquire_first_split_pipe(
split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
split_pipe->pipe_idx = i;
split_pipe->stream = stream;
return i;
} else if (split_pipe->prev_odm_pipe &&
split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) {
split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe;
if (split_pipe->next_odm_pipe)
split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe;
if (split_pipe->prev_odm_pipe->plane_state)
resource_build_scaling_params(split_pipe->prev_odm_pipe);
memset(split_pipe, 0, sizeof(*split_pipe));
split_pipe->stream_res.tg = pool->timing_generators[i];
split_pipe->plane_res.hubp = pool->hubps[i];
split_pipe->plane_res.ipp = pool->ipps[i];
split_pipe->plane_res.dpp = pool->dpps[i];
split_pipe->stream_res.opp = pool->opps[i];
split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
split_pipe->pipe_idx = i;
split_pipe->stream = stream;
return i;
}

View file

@ -56,9 +56,7 @@ struct dmub_notification;
#define MIN_VIEWPORT_SIZE 12
#define MAX_NUM_EDP 2
/*******************************************************************************
* Display Core Interfaces
******************************************************************************/
/* Display Core Interfaces */
struct dc_versions {
const char *dc_ver;
struct dmcu_version dmcu_version;
@ -993,9 +991,7 @@ void dc_init_callbacks(struct dc *dc,
void dc_deinit_callbacks(struct dc *dc);
void dc_destroy(struct dc **dc);
/*******************************************************************************
* Surface Interfaces
******************************************************************************/
/* Surface Interfaces */
enum {
TRANSFER_FUNC_POINTS = 1025
@ -1274,12 +1270,23 @@ void dc_post_update_surfaces_to_stream(
#include "dc_stream.h"
/*
* Structure to store surface/stream associations for validation
/**
* struct dc_validation_set - Struct to store surface/stream associations for validation
*/
struct dc_validation_set {
/**
* @stream: Stream state properties
*/
struct dc_stream_state *stream;
/**
* @plane_state: Surface state
*/
struct dc_plane_state *plane_states[MAX_SURFACES];
/**
* @plane_count: Total of active planes
*/
uint8_t plane_count;
};
@ -1326,15 +1333,12 @@ void dc_resource_state_destruct(struct dc_state *context);
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
/*
* TODO update to make it about validation sets
* Set up streams and links associated to drive sinks
* The streams parameter is an absolute set of all active streams.
*
* After this call:
* Phy, Encoder, Timing Generator are programmed and enabled.
* New streams are enabled with blank stream; no memory read.
*/
enum dc_status dc_commit_streams(struct dc *dc,
struct dc_stream_state *streams[],
uint8_t stream_count);
/* TODO: When the transition to the new commit sequence is done, remove this
* function in favor of dc_commit_streams. */
bool dc_commit_state(struct dc *dc, struct dc_state *context);
struct dc_state *dc_create_state(struct dc *dc);
@ -1342,9 +1346,7 @@ struct dc_state *dc_copy_state(struct dc_state *src_ctx);
void dc_retain_state(struct dc_state *context);
void dc_release_state(struct dc_state *context);
/*******************************************************************************
* Link Interfaces
******************************************************************************/
/* Link Interfaces */
struct dpcd_caps {
union dpcd_rev dpcd_rev;
@ -1446,9 +1448,7 @@ struct hdcp_caps {
uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
/*******************************************************************************
* Sink Interfaces - A sink corresponds to a display output device
******************************************************************************/
/* Sink Interfaces - A sink corresponds to a display output device */
struct dc_container_id {
// 128bit GUID in binary form
@ -1520,8 +1520,6 @@ struct dc_sink_init_data {
bool converter_disable_audio;
};
bool dc_extended_blank_supported(struct dc *dc);
struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
/* Newer interfaces */
@ -1531,9 +1529,7 @@ struct dc_cursor {
};
/*******************************************************************************
* Interrupt interfaces
******************************************************************************/
/* Interrupt interfaces */
enum dc_irq_source dc_interrupt_to_irq_source(
struct dc *dc,
uint32_t src_id,
@ -1545,9 +1541,7 @@ enum dc_irq_source dc_get_hpd_irq_source_at_index(
void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable);
/*******************************************************************************
* Power Interfaces
******************************************************************************/
/* Power Interfaces */
void dc_set_power_state(
struct dc *dc,
@ -1620,14 +1614,10 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
uint32_t hpd_int_enable);
/*******************************************************************************
* DSC Interfaces
******************************************************************************/
/* DSC Interfaces */
#include "dc_dsc.h"
/*******************************************************************************
* Disable acc mode Interfaces
******************************************************************************/
/* Disable acc mode Interfaces */
void dc_disable_accelerated_mode(struct dc *dc);
#endif /* DC_INTERFACE_H_ */

View file

@ -1026,3 +1026,10 @@ void dc_send_update_cursor_info_to_dmu(
dc_send_cmd_to_dmu(pCtx->stream->ctx->dmub_srv, &cmd);
}
}
bool dc_dmub_check_min_version(struct dmub_srv *srv)
{
if (!srv->hw_funcs.is_psrsu_supported)
return true;
return srv->hw_funcs.is_psrsu_supported(srv);
}

View file

@ -89,4 +89,5 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, b
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx);
bool dc_dmub_check_min_version(struct dmub_srv *srv);
#endif /* _DMUB_DC_SRV_H_ */

View file

@ -41,6 +41,10 @@ struct timing_sync_info {
struct dc_stream_status {
int primary_otg_inst;
int stream_enc_inst;
/**
* @plane_count: Total of planes attached to a single stream
*/
int plane_count;
int audio_inst;
struct timing_sync_info timing_sync_info;
@ -127,6 +131,7 @@ union stream_update_flags {
uint32_t dsc_changed : 1;
uint32_t mst_bw : 1;
uint32_t crtc_timing_adjust : 1;
uint32_t fams_changed : 1;
} bits;
uint32_t raw;

View file

@ -2036,7 +2036,7 @@ void dcn20_optimize_bandwidth(
dc->clk_mgr,
context,
true);
if (dc_extended_blank_supported(dc) && context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@ -2044,7 +2044,7 @@ void dcn20_optimize_bandwidth(
&& pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
&& pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
pipe_ctx->dlg_regs.min_dst_y_next_start);
}
}
}

View file

@ -292,7 +292,12 @@ void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
{
optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
struct dc *dc = optc->ctx->dc;
if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams)
dc_dmub_srv_drr_update_cmd(dc, optc->inst, vtotal_min, vtotal_max);
else
optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
}
void optc3_tg_init(struct timing_generator *optc)

View file

@ -103,6 +103,7 @@ static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne
default:
break;
}
DC_LOG_DEBUG("Set DET%d to %d segments\n", hubp_inst, det_size_segments);
/* Should never be hit, if it is we have an erroneous hw config*/
ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
+ hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);

View file

@ -136,6 +136,9 @@
#define DCN3_15_MAX_DET_SIZE 384
#define DCN3_15_CRB_SEGMENT_SIZE_KB 64
#define DCN3_15_MAX_DET_SEGS (DCN3_15_MAX_DET_SIZE / DCN3_15_CRB_SEGMENT_SIZE_KB)
/* Minimum 2 extra segments need to be in compbuf and claimable to guarantee seamless mpo transitions */
#define MIN_RESERVED_DET_SEGS 2
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
@ -1636,21 +1639,61 @@ static bool is_dual_plane(enum surface_pixel_format format)
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
}
static int source_format_to_bpp (enum source_format_class SourcePixelFormat)
{
if (SourcePixelFormat == dm_444_64)
return 8;
else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16)
return 2;
else if (SourcePixelFormat == dm_444_8)
return 1;
else if (SourcePixelFormat == dm_rgbe_alpha)
return 5;
else if (SourcePixelFormat == dm_420_8)
return 3;
else if (SourcePixelFormat == dm_420_12)
return 6;
else
return 4;
}
static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context)
{
int i;
struct resource_context *res_ctx = &context->res_ctx;
/*Don't apply for single stream*/
if (context->stream_count < 2)
return false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream)
continue;
/*Don't apply if MPO to avoid transition issues*/
if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state != res_ctx->pipe_ctx[i].plane_state)
return false;
}
return true;
}
static int dcn315_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
int i, pipe_cnt;
int i, pipe_cnt, crb_idx, crb_pipes;
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB;
int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB;
bool pixel_rate_crb = allow_pixel_rate_crb(dc, context);
DC_FP_START();
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
if (!res_ctx->pipe_ctx[i].stream)
@ -1672,6 +1715,23 @@ static int dcn315_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) {
int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format);
/* Ceil to crb segment size */
int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate(
&context->bw_ctx.dml.soc, timing->pix_clk_100hz, bpp, DCN3_15_CRB_SEGMENT_SIZE_KB);
if (approx_det_segs_required_for_pstate <= 2 * DCN3_15_MAX_DET_SEGS) {
bool split_required = approx_det_segs_required_for_pstate > DCN3_15_MAX_DET_SEGS;
split_required = split_required || timing->pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc);
split_required = split_required || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
if (split_required)
approx_det_segs_required_for_pstate += approx_det_segs_required_for_pstate % 2;
pipes[pipe_cnt].pipe.src.det_size_override = approx_det_segs_required_for_pstate;
remaining_det_segs -= approx_det_segs_required_for_pstate;
} else
remaining_det_segs = -1;
crb_pipes++;
}
DC_FP_END();
if (pipes[pipe_cnt].dout.dsc_enable) {
@ -1690,16 +1750,54 @@ static int dcn315_populate_dml_pipes_from_context(
break;
}
}
pipe_cnt++;
}
/* Spread remaining unreserved crb evenly among all pipes*/
if (pixel_rate_crb) {
for (i = 0, pipe_cnt = 0, crb_idx = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &res_ctx->pipe_ctx[i];
if (!pipe->stream)
continue;
/* Do not use asymetric crb if not enough for pstate support */
if (remaining_det_segs < 0) {
pipes[pipe_cnt].pipe.src.det_size_override = 0;
continue;
}
if (!pipe->top_pipe && !pipe->prev_odm_pipe) {
bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
|| (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
if (remaining_det_segs > MIN_RESERVED_DET_SEGS)
pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
(crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
/* Clamp to 2 pipe split max det segments */
remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
}
if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
/* If we are splitting we must have an even number of segments */
remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
}
/* Convert segments into size for DML use */
pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
crb_idx++;
}
pipe_cnt++;
}
}
if (pipe_cnt)
context->bw_ctx.dml.ip.det_buffer_size_kbytes =
(max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_15_CRB_SEGMENT_SIZE_KB;
if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_15_MAX_DET_SIZE)
context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_15_MAX_DET_SIZE;
ASSERT(context->bw_ctx.dml.ip.det_buffer_size_kbytes >= DCN3_15_DEFAULT_DET_SIZE);
dc->config.enable_4to1MPC = false;
if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
if (is_dual_plane(pipe->plane_state->format)
@ -1707,7 +1805,9 @@ static int dcn315_populate_dml_pipes_from_context(
dc->config.enable_4to1MPC = true;
context->bw_ctx.dml.ip.det_buffer_size_kbytes =
(max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / 4) * DCN3_15_CRB_SEGMENT_SIZE_KB;
} else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
} else if (!is_dual_plane(pipe->plane_state->format)
&& pipe->plane_state->src_rect.width <= 5120
&& pipe->stream->timing.pix_clk_100hz < dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)) {
/* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
pipes[0].pipe.src.unbounded_req_mode = true;

View file

@ -948,10 +948,10 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
{
int plane_count;
int i;
unsigned int optimized_min_dst_y_next_start_us;
unsigned int min_dst_y_next_start_us;
plane_count = 0;
optimized_min_dst_y_next_start_us = 0;
min_dst_y_next_start_us = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
@ -973,19 +973,18 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
struct dc_stream_state *current_stream = context->streams[0];
int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
bool is_pwrseq0 = link->link_index == 0;
bool isFreesyncVideo;
if (dc_extended_blank_supported(dc)) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].stream == context->streams[0]
&& context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min == context->res_ctx.pipe_ctx[i].stream->adjust.v_total_max
&& context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min > context->res_ctx.pipe_ctx[i].stream->timing.v_total) {
optimized_min_dst_y_next_start_us =
context->res_ctx.pipe_ctx[i].dlg_regs.optimized_min_dst_y_next_start_us;
break;
}
isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
break;
}
}
@ -993,7 +992,7 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
if (stream_status->plane_count > 1)
return DCN_ZSTATE_SUPPORT_DISALLOW;
if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000))
if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
return DCN_ZSTATE_SUPPORT_ALLOW;
else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;

View file

@ -483,7 +483,7 @@ void dcn31_calculate_wm_and_dlg_fp(
int pipe_cnt,
int vlevel)
{
int i, pipe_idx, active_dpp_count = 0;
int i, pipe_idx, total_det = 0, active_hubp_count = 0;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
dc_assert_fp_enabled();
@ -529,7 +529,7 @@ void dcn31_calculate_wm_and_dlg_fp(
continue;
if (context->res_ctx.pipe_ctx[i].plane_state)
active_dpp_count++;
active_hubp_count++;
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@ -547,9 +547,34 @@ void dcn31_calculate_wm_and_dlg_fp(
}
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
/* For 31x apu pstate change is only supported if possible in vactive or if there are no active dpps */
/* For 31x apu pstate change is only supported if possible in vactive*/
context->bw_ctx.bw.dcn.clk.p_state_change_support =
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive || !active_dpp_count;
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive;
/* If DCN isn't making memory requests we can allow pstate change and lower clocks */
if (!active_hubp_count) {
context->bw_ctx.bw.dcn.clk.socclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0;
context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].stream)
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
}
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
context->res_ctx.pipe_ctx[i].det_buffer_size_kb =
get_det_buffer_size_kbytes(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
if (context->res_ctx.pipe_ctx[i].det_buffer_size_kb > 384)
context->res_ctx.pipe_ctx[i].det_buffer_size_kb /= 2;
total_det += context->res_ctx.pipe_ctx[i].det_buffer_size_kb;
pipe_idx++;
}
context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - total_det;
}
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@ -797,3 +822,19 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
else
dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31_FPGA);
}
int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc)
{
return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0);
}
int dcn_get_approx_det_segs_required_for_pstate(
struct _vcs_dpi_soc_bounding_box_st *soc,
int pix_clk_100hz, int bpp, int seg_size_kb)
{
/* Roughly calculate required crb to hide latency. In practice there is slightly
* more buffer available for latency hiding
*/
return (int)(soc->dram_clock_change_latency_us * pix_clk_100hz * bpp
/ 10240000 + seg_size_kb - 1) / seg_size_kb;
}

View file

@ -46,5 +46,9 @@ void dcn31_calculate_wm_and_dlg_fp(
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc);
int dcn_get_approx_det_segs_required_for_pstate(
struct _vcs_dpi_soc_bounding_box_st *soc,
int pix_clk_100hz, int bpp, int seg_size_kb);
#endif /* __DCN31_FPU_H__*/

View file

@ -533,7 +533,8 @@ static void CalculateStutterEfficiency(
static void CalculateSwathAndDETConfiguration(
bool ForceSingleDPP,
int NumberOfActivePlanes,
unsigned int DETBufferSizeInKByte,
bool DETSharedByAllDPP,
unsigned int DETBufferSizeInKByte[],
double MaximumSwathWidthLuma[],
double MaximumSwathWidthChroma[],
enum scan_direction_class SourceScan[],
@ -3116,7 +3117,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SurfaceWidthC[k],
v->SurfaceHeightY[k],
v->SurfaceHeightC[k],
v->DETBufferSizeInKByte[0] * 1024,
v->DETBufferSizeInKByte[k] * 1024,
v->BlockHeight256BytesY[k],
v->BlockHeight256BytesC[k],
v->SurfaceTiling[k],
@ -3311,7 +3312,8 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
v->DETBufferSizeInKByte[0],
mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
v->DETBufferSizeInKByte,
dummy1,
dummy2,
v->SourceScan,
@ -3777,14 +3779,16 @@ static noinline void CalculatePrefetchSchedulePerPlane(
&v->VReadyOffsetPix[k]);
}
static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int *DETBufferSizeInKByte)
static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int DETBufferSizeInKByte[])
{
int i, total_pipes = 0;
for (i = 0; i < NumberOfActivePlanes; i++)
total_pipes += NoOfDPPThisState[i];
*DETBufferSizeInKByte = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
if (*DETBufferSizeInKByte > DCN3_15_MAX_DET_SIZE)
*DETBufferSizeInKByte = DCN3_15_MAX_DET_SIZE;
DETBufferSizeInKByte[0] = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
if (DETBufferSizeInKByte[0] > DCN3_15_MAX_DET_SIZE)
DETBufferSizeInKByte[0] = DCN3_15_MAX_DET_SIZE;
for (i = 1; i < NumberOfActivePlanes; i++)
DETBufferSizeInKByte[i] = DETBufferSizeInKByte[0];
}
@ -4024,7 +4028,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
CalculateSwathAndDETConfiguration(
true,
v->NumberOfActivePlanes,
v->DETBufferSizeInKByte[0],
mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
v->DETBufferSizeInKByte,
v->MaximumSwathWidthLuma,
v->MaximumSwathWidthChroma,
v->SourceScan,
@ -4164,6 +4169,10 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|| (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) {
v->DISPCLK_DPPCLK_Support[i][j] = false;
}
if (mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[k] > DCN3_15_MAX_DET_SIZE && v->NoOfDPP[i][j][k] < 2) {
v->MPCCombine[i][j][k] = true;
v->NoOfDPP[i][j][k] = 2;
}
}
v->TotalNumberOfActiveDPP[i][j] = 0;
v->TotalNumberOfSingleDPPPlanes[i][j] = 0;
@ -4640,12 +4649,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k];
}
if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315)
PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, &v->DETBufferSizeInKByte[0]);
if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315 && !v->DETSizeOverride[0])
PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, v->DETBufferSizeInKByte);
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
v->DETBufferSizeInKByte[0],
mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
v->DETBufferSizeInKByte,
v->MaximumSwathWidthLuma,
v->MaximumSwathWidthChroma,
v->SourceScan,
@ -6557,7 +6567,8 @@ static void CalculateStutterEfficiency(
static void CalculateSwathAndDETConfiguration(
bool ForceSingleDPP,
int NumberOfActivePlanes,
unsigned int DETBufferSizeInKByte,
bool DETSharedByAllDPP,
unsigned int DETBufferSizeInKByteA[],
double MaximumSwathWidthLuma[],
double MaximumSwathWidthChroma[],
enum scan_direction_class SourceScan[],
@ -6641,6 +6652,10 @@ static void CalculateSwathAndDETConfiguration(
*ViewportSizeSupport = true;
for (k = 0; k < NumberOfActivePlanes; ++k) {
unsigned int DETBufferSizeInKByte = DETBufferSizeInKByteA[k];
if (DETSharedByAllDPP && DPPPerPlane[k])
DETBufferSizeInKByte /= DPPPerPlane[k];
if ((SourcePixelFormat[k] == dm_444_64 || SourcePixelFormat[k] == dm_444_32 || SourcePixelFormat[k] == dm_444_16 || SourcePixelFormat[k] == dm_mono_16
|| SourcePixelFormat[k] == dm_mono_8 || SourcePixelFormat[k] == dm_rgbe)) {
if (SurfaceTiling[k] == dm_sw_linear

View file

@ -988,8 +988,7 @@ static void dml_rq_dlg_get_dlg_params(
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
disp_dlg_regs->optimized_min_dst_y_next_start_us = 0;
disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
disp_dlg_regs->min_dst_y_next_start_us = 0;
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);

View file

@ -32,7 +32,7 @@
#include "dml/display_mode_vba.h"
struct _vcs_dpi_ip_params_st dcn3_14_ip = {
.VBlankNomDefaultUS = 668,
.VBlankNomDefaultUS = 800,
.gpuvm_enable = 1,
.gpuvm_max_page_table_levels = 1,
.hostvm_enable = 1,
@ -288,6 +288,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
dc_assert_fp_enabled();
@ -301,9 +302,15 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing;
if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
&& pipe->stream->adjust.v_total_min > timing->v_total)
if (pipe->stream->adjust.v_total_min != 0)
pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
else
pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS);
pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width);
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
if (pipe->plane_state &&
(pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
@ -327,8 +334,6 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.dest.vblank_nom =
dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0));
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
pipes[pipe_cnt].dout.dsc_input_bpc = 0;

View file

@ -1053,7 +1053,6 @@ static void dml_rq_dlg_get_dlg_params(
float vba__refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
int blank_lines = 0;
memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
@ -1077,17 +1076,10 @@ static void dml_rq_dlg_get_dlg_params(
min_ttu_vblank = get_min_ttu_vblank_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
disp_dlg_regs->optimized_min_dst_y_next_start_us = 0;
disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
blank_lines = (dst->vblank_end + dst->vtotal_min - dst->vblank_start - dst->vstartup_start - 1);
if (blank_lines < 0)
blank_lines = 0;
if (blank_lines != 0) {
disp_dlg_regs->optimized_min_dst_y_next_start = vba__min_dst_y_next_start;
disp_dlg_regs->optimized_min_dst_y_next_start_us = (disp_dlg_regs->optimized_min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
disp_dlg_regs->min_dst_y_next_start = disp_dlg_regs->optimized_min_dst_y_next_start;
}
disp_dlg_regs->min_dst_y_next_start_us =
(vba__min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
disp_dlg_regs->min_dst_y_next_start = vba__min_dst_y_next_start * dml_pow(2, 2);
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);

View file

@ -1237,7 +1237,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt, int vlevel)
{
int i, pipe_idx;
int i, pipe_idx, active_hubp_count = 0;
bool usr_retraining_support = false;
bool unbounded_req_enabled = false;
@ -1282,6 +1282,8 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
if (context->res_ctx.pipe_ctx[i].plane_state)
active_hubp_count++;
pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt,
pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
@ -1303,10 +1305,23 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
if (context->res_ctx.pipe_ctx[i].plane_state)
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
else
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
pipe_idx++;
}
/* If DCN isn't making memory requests we can allow pstate change and lower clocks */
if (!active_hubp_count) {
context->bw_ctx.bw.dcn.clk.socclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0;
context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
}
/*save a original dppclock copy*/
context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;

View file

@ -618,8 +618,7 @@ struct _vcs_dpi_display_dlg_regs_st {
unsigned int refcyc_h_blank_end;
unsigned int dlg_vblank_end;
unsigned int min_dst_y_next_start;
unsigned int optimized_min_dst_y_next_start;
unsigned int optimized_min_dst_y_next_start_us;
unsigned int min_dst_y_next_start_us;
unsigned int refcyc_per_htotal;
unsigned int refcyc_x_after_scaler;
unsigned int dst_y_after_scaler;

View file

@ -569,6 +569,10 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.OutputLinkDPRate[mode_lib->vba.NumberOfActivePlanes] = dout->dp_rate;
mode_lib->vba.ODMUse[mode_lib->vba.NumberOfActivePlanes] = dst->odm_combine_policy;
mode_lib->vba.DETSizeOverride[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override;
if (src->det_size_override)
mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override;
else
mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = ip->det_buffer_size_kbytes;
//TODO: Need to assign correct values to dp_multistream vars
mode_lib->vba.OutputMultistreamEn[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_en;
mode_lib->vba.OutputMultistreamId[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_id;
@ -783,6 +787,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.pipe_plane[k] =
mode_lib->vba.NumberOfActivePlanes;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++;
if (src_k->det_size_override)
mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src_k->det_size_override;
if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes]
== dm_horz) {
mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=

View file

@ -350,6 +350,8 @@ struct dmub_srv_hw_funcs {
bool (*is_supported)(struct dmub_srv *dmub);
bool (*is_psrsu_supported)(struct dmub_srv *dmub);
bool (*is_hw_init)(struct dmub_srv *dmub);
bool (*is_phy_init)(struct dmub_srv *dmub);

View file

@ -347,7 +347,7 @@ union dmub_fw_boot_status {
uint32_t optimized_init_done : 1; /**< 1 if optimized init done */
uint32_t restore_required : 1; /**< 1 if driver should call restore */
uint32_t defer_load : 1; /**< 1 if VBIOS data is deferred programmed */
uint32_t reserved : 1;
uint32_t fams_enabled : 1; /**< 1 if VBIOS data is deferred programmed */
uint32_t detection_required: 1; /**< if detection need to be triggered by driver */
} bits; /**< status bits */

View file

@ -297,6 +297,11 @@ bool dmub_dcn31_is_supported(struct dmub_srv *dmub)
return supported;
}
bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub)
{
return dmub->fw_version >= DMUB_FW_VERSION(4, 0, 59);
}
void dmub_dcn31_set_gpint(struct dmub_srv *dmub,
union dmub_gpint_data_register reg)
{

View file

@ -219,6 +219,8 @@ bool dmub_dcn31_is_hw_init(struct dmub_srv *dmub);
bool dmub_dcn31_is_supported(struct dmub_srv *dmub);
bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub);
void dmub_dcn31_set_gpint(struct dmub_srv *dmub,
union dmub_gpint_data_register reg);

View file

@ -0,0 +1,67 @@
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn314.h"
#include "dcn/dcn_3_1_4_offset.h"
#include "dcn/dcn_3_1_4_sh_mask.h"
#define DCN_BASE__INST0_SEG0 0x00000012
#define DCN_BASE__INST0_SEG1 0x000000C0
#define DCN_BASE__INST0_SEG2 0x000034C0
#define DCN_BASE__INST0_SEG3 0x00009000
#define DCN_BASE__INST0_SEG4 0x02403C00
#define DCN_BASE__INST0_SEG5 0
#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
#define CTX dmub
#define REGS dmub->regs_dcn31
#define REG_OFFSET_EXP(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name)
/* Registers. */
const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs = {
#define DMUB_SR(reg) REG_OFFSET_EXP(reg),
{
DMUB_DCN31_REGS()
DMCUB_INTERNAL_REGS()
},
#undef DMUB_SR
#define DMUB_SF(reg, field) FD_MASK(reg, field),
{ DMUB_DCN31_FIELDS() },
#undef DMUB_SF
#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
{ DMUB_DCN31_FIELDS() },
#undef DMUB_SF
};
bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub)
{
return dmub->fw_version >= DMUB_FW_VERSION(8, 0, 16);
}

View file

@ -0,0 +1,35 @@
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef _DMUB_DCN314_H_
#define _DMUB_DCN314_H_
#include "dmub_dcn31.h"
extern const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs;
bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub);
#endif /* _DMUB_DCN314_H_ */

View file

@ -32,6 +32,7 @@
#include "dmub_dcn302.h"
#include "dmub_dcn303.h"
#include "dmub_dcn31.h"
#include "dmub_dcn314.h"
#include "dmub_dcn315.h"
#include "dmub_dcn316.h"
#include "dmub_dcn32.h"
@ -226,12 +227,17 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
case DMUB_ASIC_DCN314:
case DMUB_ASIC_DCN315:
case DMUB_ASIC_DCN316:
if (asic == DMUB_ASIC_DCN315)
if (asic == DMUB_ASIC_DCN314) {
dmub->regs_dcn31 = &dmub_srv_dcn314_regs;
funcs->is_psrsu_supported = dmub_dcn314_is_psrsu_supported;
} else if (asic == DMUB_ASIC_DCN315) {
dmub->regs_dcn31 = &dmub_srv_dcn315_regs;
else if (asic == DMUB_ASIC_DCN316)
} else if (asic == DMUB_ASIC_DCN316) {
dmub->regs_dcn31 = &dmub_srv_dcn316_regs;
else
} else {
dmub->regs_dcn31 = &dmub_srv_dcn31_regs;
funcs->is_psrsu_supported = dmub_dcn31_is_psrsu_supported;
}
funcs->reset = dmub_dcn31_reset;
funcs->reset_release = dmub_dcn31_reset_release;
funcs->backdoor_load = dmub_dcn31_backdoor_load;

View file

@ -2081,91 +2081,36 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
return ret;
}
static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
uint32_t *gen_speed_override,
uint32_t *lane_width_override)
{
struct amdgpu_device *adev = smu->adev;
*gen_speed_override = 0xff;
*lane_width_override = 0xff;
switch (adev->pdev->device) {
case 0x73A0:
case 0x73A1:
case 0x73A2:
case 0x73A3:
case 0x73AB:
case 0x73AE:
/* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
*lane_width_override = 6;
break;
case 0x73E0:
case 0x73E1:
case 0x73E3:
*lane_width_override = 4;
break;
case 0x7420:
case 0x7421:
case 0x7422:
case 0x7423:
case 0x7424:
*lane_width_override = 3;
break;
default:
break;
}
}
#ifndef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#endif
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
{
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
uint32_t gen_speed_override, lane_width_override;
uint8_t *table_member1, *table_member2;
uint32_t min_gen_speed, max_gen_speed;
uint32_t min_lane_width, max_lane_width;
uint32_t smu_pcie_arg;
u32 smu_pcie_arg;
int ret, i;
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
/* PCIE gen speed and lane width override */
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
sienna_cichlid_get_override_pcie_settings(smu,
&gen_speed_override,
&lane_width_override);
if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
/* PCIE gen speed override */
if (gen_speed_override != 0xff) {
min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
/* Force all levels to use the same settings */
for (i = 0; i < NUM_LINK_LEVELS; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
}
} else {
min_gen_speed = MAX(0, table_member1[0]);
max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
min_gen_speed = min_gen_speed > max_gen_speed ?
max_gen_speed : min_gen_speed;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
pcie_table->pcie_gen[i] = pcie_gen_cap;
if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
}
}
pcie_table->pcie_gen[0] = min_gen_speed;
pcie_table->pcie_gen[1] = max_gen_speed;
/* PCIE lane width override */
if (lane_width_override != 0xff) {
min_lane_width = MIN(pcie_width_cap, lane_width_override);
max_lane_width = MIN(pcie_width_cap, lane_width_override);
} else {
min_lane_width = MAX(1, table_member2[0]);
max_lane_width = MIN(pcie_width_cap, table_member2[1]);
min_lane_width = min_lane_width > max_lane_width ?
max_lane_width : min_lane_width;
}
pcie_table->pcie_lane[0] = min_lane_width;
pcie_table->pcie_lane[1] = max_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16 |

View file

@ -2490,29 +2490,6 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
return ret;
}
/*
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
* speed switching. Until we have confirmation from Intel that a specific host
* supports it, it's safer that we keep it disabled for all.
*
* https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
* https://gitlab.freedesktop.org/drm/amd/-/issues/2663
*/
static bool smu_v13_0_is_pcie_dynamic_switching_supported(void)
{
#if IS_ENABLED(CONFIG_X86)
#ifdef __linux__
struct cpuinfo_x86 *c = &cpu_data(0);
if (c->x86_vendor == X86_VENDOR_INTEL)
#else
if (strcmp(cpu_vendor, "GenuineIntel") == 0)
#endif
return false;
#endif
return true;
}
int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
@ -2524,7 +2501,7 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
uint32_t smu_pcie_arg;
int ret, i;
if (!smu_v13_0_is_pcie_dynamic_switching_supported()) {
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];

View file

@ -1,4 +1,4 @@
/* $OpenBSD: drm_linux.c,v 1.101 2023/07/18 06:58:59 claudio Exp $ */
/* $OpenBSD: drm_linux.c,v 1.103 2023/08/04 09:36:28 jsg Exp $ */
/*
* Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
* Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
@ -2007,14 +2007,14 @@ dma_fence_get_stub(void)
}
struct dma_fence *
dma_fence_allocate_private_stub(void)
dma_fence_allocate_private_stub(ktime_t ts)
{
struct dma_fence *f = malloc(sizeof(*f), M_DRM,
M_ZERO | M_WAITOK | M_CANFAIL);
if (f == NULL)
return ERR_PTR(-ENOMEM);
return NULL;
dma_fence_init(f, &dma_fence_stub_ops, &dma_fence_stub_mtx, 0, 0);
dma_fence_signal(f);
dma_fence_signal_timestamp(f, ts);
return f;
}

View file

@ -356,10 +356,10 @@ EXPORT_SYMBOL(drm_syncobj_replace_fence);
*/
static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
{
struct dma_fence *fence = dma_fence_allocate_private_stub();
struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get());
if (IS_ERR(fence))
return PTR_ERR(fence);
if (!fence)
return -ENOMEM;
drm_syncobj_replace_fence(syncobj, fence);
dma_fence_put(fence);

View file

@ -1,4 +1,4 @@
# $OpenBSD: files.drm,v 1.59 2023/01/01 01:34:34 jsg Exp $
# $OpenBSD: files.drm,v 1.60 2023/08/04 08:49:46 jsg Exp $
#file dev/pci/drm/aperture.c drm
file dev/pci/drm/dma-resv.c drm
@ -1182,6 +1182,7 @@ file dev/pci/drm/amd/display/dmub/src/dmub_dcn301.c amdgpu
file dev/pci/drm/amd/display/dmub/src/dmub_dcn302.c amdgpu
file dev/pci/drm/amd/display/dmub/src/dmub_dcn303.c amdgpu
file dev/pci/drm/amd/display/dmub/src/dmub_dcn31.c amdgpu
file dev/pci/drm/amd/display/dmub/src/dmub_dcn314.c amdgpu
file dev/pci/drm/amd/display/dmub/src/dmub_dcn315.c amdgpu
file dev/pci/drm/amd/display/dmub/src/dmub_dcn316.c amdgpu
file dev/pci/drm/amd/display/dmub/src/dmub_dcn32.c amdgpu

View file

@ -163,6 +163,8 @@ struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
i915_vma_get(vma);
}
dpt->obj->mm.dirty = true;
atomic_dec(&i915->gpu_error.pending_fb_pin);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@ -258,7 +260,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
dpt_obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
dpt_obj = i915_gem_object_create_internal(i915, size);
dpt_obj = i915_gem_object_create_shmem(i915, size);
}
if (IS_ERR(dpt_obj))
return ERR_CAST(dpt_obj);

View file

@ -1185,8 +1185,10 @@ static int igt_write_huge(struct drm_i915_private *i915,
* times in succession a possibility by enlarging the permutation array.
*/
order = i915_random_order(count * count, &prng);
if (!order)
return -ENOMEM;
if (!order) {
err = -ENOMEM;
goto out;
}
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
max = div_u64(max - size, max_page_size);

View file

@ -78,7 +78,7 @@ bool dma_fence_remove_callback(struct dma_fence *, struct dma_fence_cb *);
bool dma_fence_is_container(struct dma_fence *);
struct dma_fence *dma_fence_get_stub(void);
struct dma_fence *dma_fence_allocate_private_stub(void);
struct dma_fence *dma_fence_allocate_private_stub(ktime_t);
static inline void
dma_fence_free(struct dma_fence *fence)

View file

@ -16,7 +16,7 @@
#define __GFP_RETRY_MAYFAIL 0
#define __GFP_MOVABLE 0
#define __GFP_COMP 0
#define __GFP_KSWAPD_RECLAIM 0
#define __GFP_KSWAPD_RECLAIM M_NOWAIT
#define __GFP_HIGHMEM 0
#define __GFP_RECLAIMABLE 0
#define __GFP_NOMEMALLOC 0

View file

@ -41,9 +41,20 @@ jiffies_to_nsecs(const unsigned long x)
#define nsecs_to_jiffies(x) (((uint64_t)(x)) * hz / 1000000000)
#define nsecs_to_jiffies64(x) (((uint64_t)(x)) * hz / 1000000000)
#define get_jiffies_64() jiffies
#define time_after(a,b) ((long)(b) - (long)(a) < 0)
static inline int
time_after(const unsigned long a, const unsigned long b)
{
return((long)(b - a) < 0);
}
#define time_before(a,b) time_after(b,a)
static inline int
time_after_eq(const unsigned long a, const unsigned long b)
{
return((long)(b - a) <= 0);
}
#define time_after32(a,b) ((int32_t)((uint32_t)(b) - (uint32_t)(a)) < 0)
#define time_after_eq(a,b) ((long)(b) - (long)(a) <= 0)
#define time_before(a,b) ((long)(a) - (long)(b) < 0)
#endif

View file

@ -1,4 +1,4 @@
/* $OpenBSD: scatterlist.h,v 1.5 2023/01/01 01:34:58 jsg Exp $ */
/* $OpenBSD: scatterlist.h,v 1.6 2023/08/02 11:03:17 jsg Exp $ */
/*
* Copyright (c) 2013, 2014, 2015 Mark Kettenis
*
@ -119,7 +119,6 @@ sg_set_page(struct scatterlist *sgl, struct vm_page *page,
sgl->dma_address = page ? VM_PAGE_TO_PHYS(page) : 0;
sgl->offset = offset;
sgl->length = length;
sgl->end = false;
}
#define sg_dma_address(sg) ((sg)->dma_address)

View file

@ -499,17 +499,18 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
goto out;
}
bounce:
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
if (ret == -EMULTIHOP) {
do {
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
if (ret != -EMULTIHOP)
break;
ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
if (ret) {
} while (!ret);
if (ret) {
ttm_resource_free(bo, &evict_mem);
if (ret != -ERESTARTSYS && ret != -EINTR)
pr_err("Buffer eviction failed\n");
ttm_resource_free(bo, &evict_mem);
goto out;
}
/* try and move to final place now. */
goto bounce;
}
out:
return ret;
@ -549,6 +550,12 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
{
bool ret = false;
if (bo->pin_count) {
*locked = false;
*busy = false;
return false;
}
if (bo->base.resv == ctx->resv) {
dma_resv_assert_held(bo->base.resv);
if (ctx->allow_res_evict)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_ix.c,v 1.202 2023/07/28 20:25:08 bluhm Exp $ */
/* $OpenBSD: if_ix.c,v 1.203 2023/08/03 18:56:32 jan Exp $ */
/******************************************************************************
@ -3231,7 +3231,7 @@ ixgbe_rxeof(struct rx_ring *rxr)
sendmp->m_pkthdr.len = 0;
sendmp->m_pkthdr.ph_mss = 0;
#if NVLAN > 0
if (sc->vlan_stripping && staterr & IXGBE_RXD_STAT_VP) {
if (staterr & IXGBE_RXD_STAT_VP) {
sendmp->m_pkthdr.ether_vtag = vtag;
SET(sendmp->m_flags, M_VLANTAG);
}
@ -3273,7 +3273,8 @@ ixgbe_rxeof(struct rx_ring *rxr)
ether_extract_headers(sendmp, &ext);
hdrlen = sizeof(*ext.eh);
#if NVLAN > 0
if (ext.evh)
if (ISSET(sendmp->m_flags, M_VLANTAG) ||
ext.evh)
hdrlen += ETHER_VLAN_ENCAP_LEN;
#endif
if (ext.ip4)
@ -3361,20 +3362,8 @@ ixgbe_rx_checksum(uint32_t staterr, struct mbuf * mp)
void
ixgbe_setup_vlan_hw_support(struct ix_softc *sc)
{
struct ifnet *ifp = &sc->arpcom.ac_if;
uint32_t ctrl;
int i;
/*
* We have to disable VLAN striping when using TCP offloading, due to a
* firmware bug.
*/
if (ISSET(ifp->if_xflags, IFXF_LRO)) {
sc->vlan_stripping = 0;
return;
}
sc->vlan_stripping = 1;
uint32_t ctrl;
int i;
/*
* A soft reset zero's out the VFTA, so

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_ix.h,v 1.45 2022/06/27 15:11:23 jan Exp $ */
/* $OpenBSD: if_ix.h,v 1.46 2023/08/04 10:58:27 jan Exp $ */
/******************************************************************************
@ -225,7 +225,6 @@ struct ix_softc {
struct ifmedia media;
struct intrmap *sc_intrmap;
int if_flags;
int vlan_stripping;
uint16_t num_vlans;
uint16_t num_queues;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_vmx.c,v 1.70 2022/09/11 08:38:39 yasuoka Exp $ */
/* $OpenBSD: if_vmx.c,v 1.78 2023/07/30 04:27:01 dlg Exp $ */
/*
* Copyright (c) 2013 Tsubai Masanari
@ -61,31 +61,49 @@
#define VMX_RX_GEN htole32(VMXNET3_RX_GEN_M << VMXNET3_RX_GEN_S)
#define VMX_RXC_GEN htole32(VMXNET3_RXC_GEN_M << VMXNET3_RXC_GEN_S)
struct vmx_dmamem {
bus_dmamap_t vdm_map;
bus_dma_segment_t vdm_seg;
int vdm_nsegs;
size_t vdm_size;
caddr_t vdm_kva;
};
#define VMX_DMA_MAP(_vdm) ((_vdm)->vdm_map)
#define VMX_DMA_DVA(_vdm) ((_vdm)->vdm_map->dm_segs[0].ds_addr)
#define VMX_DMA_KVA(_vdm) ((void *)(_vdm)->vdm_kva)
#define VMX_DMA_LEN(_vdm) ((_vdm)->vdm_size)
struct vmxnet3_softc;
struct vmxnet3_txring {
struct vmx_dmamem dmamem;
struct mbuf *m[NTXDESC];
bus_dmamap_t dmap[NTXDESC];
struct vmxnet3_txdesc *txd;
u_int32_t gen;
u_int prod;
u_int cons;
volatile u_int prod;
volatile u_int cons;
};
struct vmxnet3_rxring {
struct vmxnet3_softc *sc;
struct vmxnet3_rxq_shared *rs; /* copy of the rxqueue rs */
struct vmx_dmamem dmamem;
struct mbuf *m[NRXDESC];
bus_dmamap_t dmap[NRXDESC];
struct mutex mtx;
struct if_rxring rxr;
struct timeout refill;
struct vmxnet3_rxdesc *rxd;
bus_size_t rxh;
u_int fill;
u_int32_t gen;
u_int8_t rid;
};
struct vmxnet3_comp_ring {
struct vmx_dmamem dmamem;
union {
struct vmxnet3_txcompdesc *txcd;
struct vmxnet3_rxcompdesc *rxcd;
@ -101,6 +119,7 @@ struct vmxnet3_txqueue {
struct vmxnet3_txq_shared *ts;
struct ifqueue *ifq;
struct kstat *txkstat;
unsigned int queue;
} __aligned(64);
struct vmxnet3_rxqueue {
@ -193,6 +212,12 @@ void vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
int vmxnet3_media_change(struct ifnet *);
void *vmxnet3_dma_allocmem(struct vmxnet3_softc *, u_int, u_int, bus_addr_t *);
static int vmx_dmamem_alloc(struct vmxnet3_softc *, struct vmx_dmamem *,
bus_size_t, u_int);
#ifdef notyet
static void vmx_dmamem_free(struct vmxnet3_softc *, struct vmx_dmamem *);
#endif
#if NKSTAT > 0
static void vmx_kstat_init(struct vmxnet3_softc *);
static void vmx_kstat_txstats(struct vmxnet3_softc *,
@ -532,16 +557,18 @@ vmxnet3_alloc_txring(struct vmxnet3_softc *sc, int queue, int intr)
struct vmxnet3_txq_shared *ts;
struct vmxnet3_txring *ring = &tq->cmd_ring;
struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
bus_addr_t pa, comp_pa;
int idx;
ring->txd = vmxnet3_dma_allocmem(sc, NTXDESC * sizeof ring->txd[0], 512, &pa);
if (ring->txd == NULL)
tq->queue = queue;
if (vmx_dmamem_alloc(sc, &ring->dmamem,
NTXDESC * sizeof(struct vmxnet3_txdesc), 512) != 0)
return -1;
comp_ring->txcd = vmxnet3_dma_allocmem(sc,
NTXCOMPDESC * sizeof comp_ring->txcd[0], 512, &comp_pa);
if (comp_ring->txcd == NULL)
ring->txd = VMX_DMA_KVA(&ring->dmamem);
if (vmx_dmamem_alloc(sc, &comp_ring->dmamem,
NTXCOMPDESC * sizeof(comp_ring->txcd[0]), 512) != 0)
return -1;
comp_ring->txcd = VMX_DMA_KVA(&comp_ring->dmamem);
for (idx = 0; idx < NTXDESC; idx++) {
if (bus_dmamap_create(sc->sc_dmat, JUMBO_LEN, NTXSEGS,
@ -553,9 +580,9 @@ vmxnet3_alloc_txring(struct vmxnet3_softc *sc, int queue, int intr)
bzero(ts, sizeof *ts);
ts->npending = 0;
ts->intr_threshold = 1;
ts->cmd_ring = pa;
ts->cmd_ring = VMX_DMA_DVA(&ring->dmamem);
ts->cmd_ring_len = NTXDESC;
ts->comp_ring = comp_pa;
ts->comp_ring = VMX_DMA_DVA(&comp_ring->dmamem);
ts->comp_ring_len = NTXCOMPDESC;
ts->driver_data = ~0ULL;
ts->driver_data_len = 0;
@ -572,21 +599,20 @@ vmxnet3_alloc_rxring(struct vmxnet3_softc *sc, int queue, int intr)
struct vmxnet3_rxq_shared *rs;
struct vmxnet3_rxring *ring;
struct vmxnet3_comp_ring *comp_ring;
bus_addr_t pa[2], comp_pa;
int i, idx;
for (i = 0; i < 2; i++) {
ring = &rq->cmd_ring[i];
ring->rxd = vmxnet3_dma_allocmem(sc, NRXDESC * sizeof ring->rxd[0],
512, &pa[i]);
if (ring->rxd == NULL)
if (vmx_dmamem_alloc(sc, &ring->dmamem,
NRXDESC * sizeof(struct vmxnet3_rxdesc), 512) != 0)
return -1;
ring->rxd = VMX_DMA_KVA(&ring->dmamem);
}
comp_ring = &rq->comp_ring;
comp_ring->rxcd = vmxnet3_dma_allocmem(sc,
NRXCOMPDESC * sizeof comp_ring->rxcd[0], 512, &comp_pa);
if (comp_ring->rxcd == NULL)
if (vmx_dmamem_alloc(sc, &comp_ring->dmamem,
NRXCOMPDESC * sizeof(comp_ring->rxcd[0]), 512) != 0)
return -1;
comp_ring->rxcd = VMX_DMA_KVA(&comp_ring->dmamem);
for (i = 0; i < 2; i++) {
ring = &rq->cmd_ring[i];
@ -599,15 +625,19 @@ vmxnet3_alloc_rxring(struct vmxnet3_softc *sc, int queue, int intr)
JUMBO_LEN, 0, BUS_DMA_NOWAIT, &ring->dmap[idx]))
return -1;
}
ring->rs = rq->rs;
ring->rxh = (i == 0) ?
VMXNET3_BAR0_RXH1(queue) : VMXNET3_BAR0_RXH2(queue);
}
rs = rq->rs;
bzero(rs, sizeof *rs);
rs->cmd_ring[0] = pa[0];
rs->cmd_ring[1] = pa[1];
rs->cmd_ring[0] = VMX_DMA_DVA(&rq->cmd_ring[0].dmamem);
rs->cmd_ring[1] = VMX_DMA_DVA(&rq->cmd_ring[1].dmamem);
rs->cmd_ring_len[0] = NRXDESC;
rs->cmd_ring_len[1] = NRXDESC;
rs->comp_ring = comp_pa;
rs->comp_ring = VMX_DMA_DVA(&comp_ring->dmamem);
rs->comp_ring_len = NRXCOMPDESC;
rs->driver_data = ~0ULL;
rs->driver_data_len = 0;
@ -627,8 +657,16 @@ vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
ring->gen = VMX_TX_GEN;
comp_ring->next = 0;
comp_ring->gen = VMX_TXC_GEN;
bzero(ring->txd, NTXDESC * sizeof ring->txd[0]);
bzero(comp_ring->txcd, NTXCOMPDESC * sizeof comp_ring->txcd[0]);
memset(VMX_DMA_KVA(&ring->dmamem), 0,
VMX_DMA_LEN(&ring->dmamem));
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
0, VMX_DMA_LEN(&ring->dmamem), BUS_DMASYNC_PREWRITE);
memset(VMX_DMA_KVA(&comp_ring->dmamem), 0,
VMX_DMA_LEN(&comp_ring->dmamem));
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&comp_ring->dmamem),
0, VMX_DMA_LEN(&comp_ring->dmamem), BUS_DMASYNC_PREREAD);
ifq_clr_oactive(tq->ifq);
}
void
@ -657,10 +695,17 @@ vmxnet3_rxfill(struct vmxnet3_rxring *ring)
MUTEX_ASSERT_LOCKED(&ring->mtx);
slots = if_rxr_get(&ring->rxr, NRXDESC);
if (slots == 0)
return;
prod = ring->fill;
rgen = ring->gen;
for (slots = if_rxr_get(&ring->rxr, NRXDESC); slots > 0; slots--) {
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
0, VMX_DMA_LEN(&ring->dmamem), BUS_DMASYNC_POSTWRITE);
do {
KASSERT(ring->m[prod] == NULL);
m = MCLGETL(NULL, M_DONTWAIT, JUMBO_LEN);
@ -681,7 +726,9 @@ vmxnet3_rxfill(struct vmxnet3_rxring *ring)
rxd = &ring->rxd[prod];
rxd->rx_addr = htole64(DMAADDR(map));
membar_producer();
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
0, VMX_DMA_LEN(&ring->dmamem),
BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTWRITE);
rxd->rx_word2 = (htole32(m->m_pkthdr.len & VMXNET3_RX_LEN_M) <<
VMXNET3_RX_LEN_S) | type | rgen;
@ -689,7 +736,11 @@ vmxnet3_rxfill(struct vmxnet3_rxring *ring)
prod = 0;
rgen ^= VMX_RX_GEN;
}
}
} while (--slots > 0);
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
0, VMX_DMA_LEN(&ring->dmamem), BUS_DMASYNC_PREWRITE);
if_rxr_put(&ring->rxr, slots);
ring->fill = prod;
@ -697,6 +748,9 @@ vmxnet3_rxfill(struct vmxnet3_rxring *ring)
if (if_rxr_inuse(&ring->rxr) == 0)
timeout_add(&ring->refill, 1);
if (ring->rs->update_rxhead)
WRITE_BAR0(sc, ring->rxh, prod);
}
void
@ -708,10 +762,14 @@ vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
for (i = 0; i < 2; i++) {
ring = &rq->cmd_ring[i];
if_rxr_init(&ring->rxr, 2, NRXDESC - 1);
ring->fill = 0;
ring->gen = VMX_RX_GEN;
bzero(ring->rxd, NRXDESC * sizeof ring->rxd[0]);
if_rxr_init(&ring->rxr, 2, NRXDESC - 1);
memset(VMX_DMA_KVA(&ring->dmamem), 0,
VMX_DMA_LEN(&ring->dmamem));
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
0, VMX_DMA_LEN(&ring->dmamem), BUS_DMASYNC_PREWRITE);
}
/* XXX only fill ring 0 */
@ -723,15 +781,26 @@ vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
comp_ring = &rq->comp_ring;
comp_ring->next = 0;
comp_ring->gen = VMX_RXC_GEN;
bzero(comp_ring->rxcd, NRXCOMPDESC * sizeof comp_ring->rxcd[0]);
memset(VMX_DMA_KVA(&comp_ring->dmamem), 0,
VMX_DMA_LEN(&comp_ring->dmamem));
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&comp_ring->dmamem),
0, VMX_DMA_LEN(&comp_ring->dmamem), BUS_DMASYNC_PREREAD);
}
void
vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
{
struct vmxnet3_txring *ring = &tq->cmd_ring;
struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
struct ifqueue *ifq = tq->ifq;
int idx;
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&comp_ring->dmamem),
0, VMX_DMA_LEN(&comp_ring->dmamem), BUS_DMASYNC_POSTREAD);
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
0, VMX_DMA_LEN(&ring->dmamem), BUS_DMASYNC_POSTWRITE);
for (idx = 0; idx < NTXDESC; idx++) {
if (ring->m[idx]) {
bus_dmamap_unload(sc->sc_dmat, ring->dmap[idx]);
@ -739,16 +808,25 @@ vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
ring->m[idx] = NULL;
}
}
ifq_purge(ifq);
ifq_clr_oactive(ifq);
}
void
vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
{
struct vmxnet3_rxring *ring;
struct vmxnet3_comp_ring *comp_ring = &rq->comp_ring;
int i, idx;
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&comp_ring->dmamem),
0, VMX_DMA_LEN(&comp_ring->dmamem), BUS_DMASYNC_POSTREAD);
for (i = 0; i < 2; i++) {
ring = &rq->cmd_ring[i];
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
0, VMX_DMA_LEN(&ring->dmamem), BUS_DMASYNC_POSTWRITE);
timeout_del(&ring->refill);
for (idx = 0; idx < NRXDESC; idx++) {
struct mbuf *m = ring->m[idx];
@ -924,18 +1002,22 @@ vmxnet3_txintr(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
struct vmxnet3_txcompdesc *txcd;
bus_dmamap_t map;
struct mbuf *m;
u_int cons, next;
u_int prod, cons, next;
uint32_t rgen;
prod = ring->prod;
cons = ring->cons;
if (cons == ring->prod)
if (cons == prod)
return;
next = comp_ring->next;
rgen = comp_ring->gen;
/* postread */
for (;;) {
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&comp_ring->dmamem),
0, VMX_DMA_LEN(&comp_ring->dmamem), BUS_DMASYNC_POSTREAD);
do {
txcd = &comp_ring->txcd[next];
if ((txcd->txc_word3 & VMX_TXC_GEN) != rgen)
break;
@ -958,8 +1040,10 @@ vmxnet3_txintr(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
VMXNET3_TXC_EOPIDX_M;
cons++;
cons %= NTXDESC;
}
/* preread */
} while (cons != prod);
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&comp_ring->dmamem),
0, VMX_DMA_LEN(&comp_ring->dmamem), BUS_DMASYNC_PREREAD);
comp_ring->next = next;
comp_ring->gen = rgen;
@ -986,6 +1070,9 @@ vmxnet3_rxintr(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
next = comp_ring->next;
rgen = comp_ring->gen;
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&comp_ring->dmamem),
0, VMX_DMA_LEN(&comp_ring->dmamem), BUS_DMASYNC_POSTREAD);
for (;;) {
rxcd = &comp_ring->rxcd[next];
if ((rxcd->rxc_word3 & VMX_RXC_GEN) != rgen)
@ -1018,14 +1105,14 @@ vmxnet3_rxintr(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
if (letoh32(rxcd->rxc_word2 & VMXNET3_RXC_ERROR)) {
ifp->if_ierrors++;
m_freem(m);
goto skip_buffer;
continue;
}
len = letoh32((rxcd->rxc_word2 >> VMXNET3_RXC_LEN_S) &
VMXNET3_RXC_LEN_M);
if (len < VMXNET3_MIN_MTU) {
m_freem(m);
goto skip_buffer;
continue;
}
m->m_pkthdr.len = m->m_len = len;
@ -1042,22 +1129,11 @@ vmxnet3_rxintr(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
}
ml_enqueue(&ml, m);
skip_buffer:
if (rq->rs->update_rxhead) {
u_int qid = letoh32((rxcd->rxc_word0 >>
VMXNET3_RXC_QID_S) & VMXNET3_RXC_QID_M);
idx = (idx + 1) % NRXDESC;
if (qid < sc->sc_nqueues) {
WRITE_BAR0(sc, VMXNET3_BAR0_RXH1(qid), idx);
} else {
qid -= sc->sc_nqueues;
WRITE_BAR0(sc, VMXNET3_BAR0_RXH2(qid), idx);
}
}
}
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&comp_ring->dmamem),
0, VMX_DMA_LEN(&comp_ring->dmamem), BUS_DMASYNC_PREREAD);
comp_ring->next = next;
comp_ring->gen = rgen;
@ -1152,7 +1228,6 @@ vmxnet3_stop(struct ifnet *ifp)
int queue;
ifp->if_flags &= ~IFF_RUNNING;
ifq_clr_oactive(&ifp->if_snd);
ifp->if_timer = 0;
vmxnet3_disable_all_intrs(sc);
@ -1218,7 +1293,6 @@ vmxnet3_init(struct vmxnet3_softc *sc)
vmxnet3_link_state(sc);
ifp->if_flags |= IFF_RUNNING;
ifq_clr_oactive(&ifp->if_snd);
return 0;
}
@ -1325,7 +1399,7 @@ vmxnet3_start(struct ifqueue *ifq)
struct vmxnet3_txring *ring = &tq->cmd_ring;
struct vmxnet3_txdesc *txd, *sop;
bus_dmamap_t map;
unsigned int prod, free, i;
unsigned int prod, free, i;
unsigned int post = 0;
uint32_t rgen, gen;
@ -1337,6 +1411,9 @@ vmxnet3_start(struct ifqueue *ifq)
free += NTXDESC;
free -= prod;
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
0, VMX_DMA_LEN(&ring->dmamem), BUS_DMASYNC_POSTWRITE);
rgen = ring->gen;
for (;;) {
@ -1391,21 +1468,26 @@ vmxnet3_start(struct ifqueue *ifq)
VMXNET3_TX_VLANTAG_M) << VMXNET3_TX_VLANTAG_S);
}
ring->prod = prod;
/* Change the ownership by flipping the "generation" bit */
membar_producer();
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
0, VMX_DMA_LEN(&ring->dmamem),
BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTWRITE);
sop->tx_word2 ^= VMX_TX_GEN;
free -= i;
post = 1;
}
bus_dmamap_sync(sc->sc_dmat, VMX_DMA_MAP(&ring->dmamem),
0, VMX_DMA_LEN(&ring->dmamem), BUS_DMASYNC_PREWRITE);
if (!post)
return;
ring->prod = prod;
ring->gen = rgen;
WRITE_BAR0(sc, VMXNET3_BAR0_TXH(0), prod);
WRITE_BAR0(sc, VMXNET3_BAR0_TXH(tq->queue), prod);
}
void
@ -1469,6 +1551,49 @@ vmxnet3_dma_allocmem(struct vmxnet3_softc *sc, u_int size, u_int align, bus_addr
return va;
}
static int
vmx_dmamem_alloc(struct vmxnet3_softc *sc, struct vmx_dmamem *vdm,
bus_size_t size, u_int align)
{
vdm->vdm_size = size;
if (bus_dmamap_create(sc->sc_dmat, vdm->vdm_size, 1,
vdm->vdm_size, 0,
BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
&vdm->vdm_map) != 0)
return (1);
if (bus_dmamem_alloc(sc->sc_dmat, vdm->vdm_size,
align, 0, &vdm->vdm_seg, 1, &vdm->vdm_nsegs,
BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
goto destroy;
if (bus_dmamem_map(sc->sc_dmat, &vdm->vdm_seg, vdm->vdm_nsegs,
vdm->vdm_size, &vdm->vdm_kva, BUS_DMA_WAITOK) != 0)
goto free;
if (bus_dmamap_load(sc->sc_dmat, vdm->vdm_map, vdm->vdm_kva,
vdm->vdm_size, NULL, BUS_DMA_WAITOK) != 0)
goto unmap;
return (0);
unmap:
bus_dmamem_unmap(sc->sc_dmat, vdm->vdm_kva, vdm->vdm_size);
free:
bus_dmamem_free(sc->sc_dmat, &vdm->vdm_seg, 1);
destroy:
bus_dmamap_destroy(sc->sc_dmat, vdm->vdm_map);
return (1);
}
#ifdef notyet
static void
vmx_dmamem_free(struct vmxnet3_softc *sc, struct vmx_dmamem *vdm)
{
bus_dmamap_unload(sc->sc_dmat, vdm->vdm_map);
bus_dmamem_unmap(sc->sc_dmat, vdm->vdm_kva, vdm->vdm_size);
bus_dmamem_free(sc->sc_dmat, &vdm->vdm_seg, 1);
bus_dmamap_destroy(sc->sc_dmat, vdm->vdm_map);
}
#endif
#if NKSTAT > 0
/*
* "hardware" counters are exported as separate kstats for each tx
@ -1536,7 +1661,7 @@ vmx_kstat_read(struct kstat *ks)
for (i = 0; i < n; i++)
kstat_kv_u64(&kvs[i]) = lemtoh64(&vs[i]);
TIMEVAL_TO_TIMESPEC(&sc->sc_kstat_updated, &ks->ks_updated);
TIMEVAL_TO_TIMESPEC(&sc->sc_kstat_updated, &ks->ks_updated);
return (0);
}

View file

@ -1,4 +1,4 @@
$OpenBSD: pcidevs,v 1.2043 2023/07/08 09:11:51 kettenis Exp $
$OpenBSD: pcidevs,v 1.2044 2023/08/06 14:40:25 jsg Exp $
/* $NetBSD: pcidevs,v 1.30 1997/06/24 06:20:24 thorpej Exp $ */
/*
@ -2103,6 +2103,7 @@ product ATI NAVI31_1 0x744c Navi 31
product ATI NAVI31_3 0x745e Navi 31
product ATI NAVI33_1 0x7480 Navi 33
product ATI NAVI33_2 0x7483 Navi 33
product ATI NAVI33_3 0x7489 Navi 33
product ATI RADEON_9000IGP 0x7834 Radeon 9000/9100 IGP
product ATI RADEON_RS350IGP 0x7835 Radeon RS350IGP
product ATI RS690_HB 0x7910 RS690 Host

View file

@ -2,7 +2,7 @@
* THIS FILE AUTOMATICALLY GENERATED. DO NOT EDIT.
*
* generated from:
* OpenBSD: pcidevs,v 1.2043 2023/07/08 09:11:51 kettenis Exp
* OpenBSD: pcidevs,v 1.2044 2023/08/06 14:40:25 jsg Exp
*/
/* $NetBSD: pcidevs,v 1.30 1997/06/24 06:20:24 thorpej Exp $ */
@ -2108,6 +2108,7 @@
#define PCI_PRODUCT_ATI_NAVI31_3 0x745e /* Navi 31 */
#define PCI_PRODUCT_ATI_NAVI33_1 0x7480 /* Navi 33 */
#define PCI_PRODUCT_ATI_NAVI33_2 0x7483 /* Navi 33 */
#define PCI_PRODUCT_ATI_NAVI33_3 0x7489 /* Navi 33 */
#define PCI_PRODUCT_ATI_RADEON_9000IGP 0x7834 /* Radeon 9000/9100 IGP */
#define PCI_PRODUCT_ATI_RADEON_RS350IGP 0x7835 /* Radeon RS350IGP */
#define PCI_PRODUCT_ATI_RS690_HB 0x7910 /* RS690 Host */

View file

@ -2,7 +2,7 @@
* THIS FILE AUTOMATICALLY GENERATED. DO NOT EDIT.
*
* generated from:
* OpenBSD: pcidevs,v 1.2043 2023/07/08 09:11:51 kettenis Exp
* OpenBSD: pcidevs,v 1.2044 2023/08/06 14:40:25 jsg Exp
*/
/* $NetBSD: pcidevs,v 1.30 1997/06/24 06:20:24 thorpej Exp $ */
@ -6555,6 +6555,10 @@ static const struct pci_known_product pci_known_products[] = {
PCI_VENDOR_ATI, PCI_PRODUCT_ATI_NAVI33_2,
"Navi 33",
},
{
PCI_VENDOR_ATI, PCI_PRODUCT_ATI_NAVI33_3,
"Navi 33",
},
{
PCI_VENDOR_ATI, PCI_PRODUCT_ATI_RADEON_9000IGP,
"Radeon 9000/9100 IGP",

View file

@ -1,4 +1,4 @@
/* $OpenBSD: wsemul_vt100.c,v 1.46 2023/07/24 17:03:32 miod Exp $ */
/* $OpenBSD: wsemul_vt100.c,v 1.47 2023/08/02 19:20:19 miod Exp $ */
/* $NetBSD: wsemul_vt100.c,v 1.13 2000/04/28 21:56:16 mycroft Exp $ */
/*
@ -862,7 +862,7 @@ wsemul_vt100_output_dcs(struct wsemul_vt100_emuldata *edp,
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
/* argument digit */
if (edp->nargs > VT100_EMUL_NARGS - 1)
if (edp->nargs >= VT100_EMUL_NARGS)
break;
edp->args[edp->nargs] = (edp->args[edp->nargs] * 10) +
(instate->inchar - '0');
@ -1084,6 +1084,7 @@ wsemul_vt100_output_csi(struct wsemul_vt100_emuldata *edp,
edp->nargs++;
rc = wsemul_vt100_handle_csi(edp, instate);
if (rc != 0) {
/* undo nargs progress */
edp->nargs = oargs;
return rc;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clock.c,v 1.109 2023/07/25 18:16:19 cheloha Exp $ */
/* $OpenBSD: kern_clock.c,v 1.111 2023/08/05 20:07:55 cheloha Exp $ */
/* $NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $ */
/*-
@ -84,7 +84,8 @@ int profhz;
int profprocs;
int ticks = INT_MAX - (15 * 60 * HZ);
volatile unsigned long jiffies = ULONG_MAX - (10 * 60 * HZ);
/* Don't force early wrap around, triggers bug in inteldrm */
volatile unsigned long jiffies;
/*
* Initialize clock frequencies and start both clocks running.
@ -104,43 +105,14 @@ initclocks(void)
inittimecounter();
}
/*
* hardclock does the accounting needed for ITIMER_PROF and ITIMER_VIRTUAL.
* We don't want to send signals with psignal from hardclock because it makes
* MULTIPROCESSOR locking very complicated. Instead, to use an idea from
* FreeBSD, we set a flag on the thread and when it goes to return to
* userspace it signals itself.
*/
/*
* The real-time timer, interrupting hz times per second.
*/
void
hardclock(struct clockframe *frame)
{
struct proc *p;
struct cpu_info *ci = curcpu();
p = curproc;
if (p && ((p->p_flag & (P_SYSTEM | P_WEXIT)) == 0)) {
struct process *pr = p->p_p;
/*
* Run current process's virtual and profile time, as needed.
*/
if (CLKF_USERMODE(frame) &&
timespecisset(&pr->ps_timer[ITIMER_VIRTUAL].it_value) &&
itimerdecr(&pr->ps_timer[ITIMER_VIRTUAL], tick_nsec) == 0) {
atomic_setbits_int(&p->p_flag, P_ALRMPEND);
need_proftick(p);
}
if (timespecisset(&pr->ps_timer[ITIMER_PROF].it_value) &&
itimerdecr(&pr->ps_timer[ITIMER_PROF], tick_nsec) == 0) {
atomic_setbits_int(&p->p_flag, P_PROFPEND);
need_proftick(p);
}
}
if (--ci->ci_schedstate.spc_rrticks <= 0)
roundrobin(ci);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clockintr.c,v 1.29 2023/07/27 17:52:53 cheloha Exp $ */
/* $OpenBSD: kern_clockintr.c,v 1.30 2023/08/05 20:07:55 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@ -196,6 +196,10 @@ clockintr_cpu_init(const struct intrclock *ic)
* XXX Need to find a better place to do this. We can't do it in
* sched_init_cpu() because initclocks() runs after it.
*/
if (spc->spc_itimer->cl_expiration == 0) {
clockintr_stagger(spc->spc_itimer, hardclock_period,
multiplier, MAXCPUS);
}
if (spc->spc_profclock->cl_expiration == 0) {
clockintr_stagger(spc->spc_profclock, profclock_period,
multiplier, MAXCPUS);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sched.c,v 1.81 2023/07/27 17:52:53 cheloha Exp $ */
/* $OpenBSD: kern_sched.c,v 1.84 2023/08/05 20:07:55 cheloha Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
@ -24,6 +24,7 @@
#include <sys/clockintr.h>
#include <sys/resourcevar.h>
#include <sys/task.h>
#include <sys/time.h>
#include <sys/smr.h>
#include <sys/tracepoint.h>
@ -87,6 +88,14 @@ sched_init_cpu(struct cpu_info *ci)
spc->spc_idleproc = NULL;
if (spc->spc_itimer == NULL) {
spc->spc_itimer = clockintr_establish(&ci->ci_queue,
itimer_update);
if (spc->spc_itimer == NULL) {
panic("%s: clockintr_establish itimer_update",
__func__);
}
}
if (spc->spc_profclock == NULL) {
spc->spc_profclock = clockintr_establish(&ci->ci_queue,
profclock);
@ -223,6 +232,10 @@ sched_exit(struct proc *p)
timespecsub(&ts, &spc->spc_runtime, &ts);
timespecadd(&p->p_rtime, &ts, &p->p_rtime);
if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) {
atomic_clearbits_int(&spc->spc_schedflags, SPCF_ITIMER);
clockintr_cancel(spc->spc_itimer);
}
if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) {
atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK);
clockintr_cancel(spc->spc_profclock);
@ -262,7 +275,6 @@ setrunqueue(struct cpu_info *ci, struct proc *p, uint8_t prio)
KASSERT(ci != NULL);
SCHED_ASSERT_LOCKED();
KASSERT(!ISSET(p->p_flag, P_WSLEEP) || p->p_stat == SSTOP);
p->p_cpu = ci;
p->p_stat = SRUN;
@ -373,7 +385,6 @@ sched_choosecpu_fork(struct proc *parent, int flags)
{
#ifdef MULTIPROCESSOR
struct cpu_info *choice = NULL;
fixpt_t load, best_load = ~0;
int run, best_run = INT_MAX;
struct cpu_info *ci;
struct cpuset set;
@ -407,13 +418,10 @@ sched_choosecpu_fork(struct proc *parent, int flags)
while ((ci = cpuset_first(&set)) != NULL) {
cpuset_del(&set, ci);
load = ci->ci_schedstate.spc_ldavg;
run = ci->ci_schedstate.spc_nrun;
if (choice == NULL || run < best_run ||
(run == best_run &&load < best_load)) {
if (choice == NULL || run < best_run) {
choice = ci;
best_load = load;
best_run = run;
}
}
@ -606,11 +614,6 @@ sched_proc_to_cpu_cost(struct cpu_info *ci, struct proc *p)
if (CPU_IS_PRIMARY(ci))
cost += sched_cost_runnable;
/*
* Higher load on the destination means we don't want to go there.
*/
cost += ((sched_cost_load * spc->spc_ldavg) >> FSHIFT);
/*
* If the proc is on this cpu already, lower the cost by how much
* it has been running and an estimate of its footprint.

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_time.c,v 1.163 2023/02/15 10:07:50 claudio Exp $ */
/* $OpenBSD: kern_time.c,v 1.164 2023/08/05 20:07:55 cheloha Exp $ */
/* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */
/*
@ -35,6 +35,7 @@
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/clockintr.h>
#include <sys/mutex.h>
#include <sys/rwlock.h>
#include <sys/proc.h>
@ -43,6 +44,7 @@
#include <sys/stdint.h>
#include <sys/pledge.h>
#include <sys/task.h>
#include <sys/time.h>
#include <sys/timeout.h>
#include <sys/timetc.h>
@ -52,6 +54,7 @@
#include <dev/clock_subr.h>
int itimerfix(struct itimerval *);
void process_reset_itimer_flag(struct process *);
/*
* Time of day and interval timer support.
@ -551,6 +554,10 @@ setitimer(int which, const struct itimerval *itv, struct itimerval *olditv)
timeout_del(&pr->ps_realit_to);
}
*itimer = its;
if (which == ITIMER_VIRTUAL || which == ITIMER_PROF) {
process_reset_itimer_flag(pr);
need_resched(curcpu());
}
}
if (which == ITIMER_REAL)
@ -729,49 +736,72 @@ itimerfix(struct itimerval *itv)
}
/*
* Decrement an interval timer by the given number of nanoseconds.
* Decrement an interval timer by the given duration.
* If the timer expires and it is periodic then reload it. When reloading
* the timer we subtract any overrun from the next period so that the timer
* does not drift.
*/
int
itimerdecr(struct itimerspec *itp, long nsec)
itimerdecr(struct itimerspec *itp, const struct timespec *decrement)
{
struct timespec decrement;
NSEC_TO_TIMESPEC(nsec, &decrement);
mtx_enter(&itimer_mtx);
/*
* Double-check that the timer is enabled. A different thread
* in setitimer(2) may have disabled it while we were entering
* the mutex.
*/
if (!timespecisset(&itp->it_value)) {
mtx_leave(&itimer_mtx);
timespecsub(&itp->it_value, decrement, &itp->it_value);
if (itp->it_value.tv_sec >= 0 && timespecisset(&itp->it_value))
return (1);
}
/*
* The timer is enabled. Update and reload it as needed.
*/
timespecsub(&itp->it_value, &decrement, &itp->it_value);
if (itp->it_value.tv_sec >= 0 && timespecisset(&itp->it_value)) {
mtx_leave(&itimer_mtx);
return (1);
}
if (!timespecisset(&itp->it_interval)) {
timespecclear(&itp->it_value);
mtx_leave(&itimer_mtx);
return (0);
}
while (itp->it_value.tv_sec < 0 || !timespecisset(&itp->it_value))
timespecadd(&itp->it_value, &itp->it_interval, &itp->it_value);
mtx_leave(&itimer_mtx);
return (0);
}
void
itimer_update(struct clockintr *cl, void *cf)
{
struct timespec elapsed;
uint64_t nsecs;
struct clockframe *frame = cf;
struct proc *p = curproc;
struct process *pr;
if (p == NULL || ISSET(p->p_flag, P_SYSTEM | P_WEXIT))
return;
pr = p->p_p;
if (!ISSET(pr->ps_flags, PS_ITIMER))
return;
nsecs = clockintr_advance(cl, hardclock_period) * hardclock_period;
NSEC_TO_TIMESPEC(nsecs, &elapsed);
mtx_enter(&itimer_mtx);
if (CLKF_USERMODE(frame) &&
timespecisset(&pr->ps_timer[ITIMER_VIRTUAL].it_value) &&
itimerdecr(&pr->ps_timer[ITIMER_VIRTUAL], &elapsed) == 0) {
process_reset_itimer_flag(pr);
atomic_setbits_int(&p->p_flag, P_ALRMPEND);
need_proftick(p);
}
if (timespecisset(&pr->ps_timer[ITIMER_PROF].it_value) &&
itimerdecr(&pr->ps_timer[ITIMER_PROF], &elapsed) == 0) {
process_reset_itimer_flag(pr);
atomic_setbits_int(&p->p_flag, P_PROFPEND);
need_proftick(p);
}
mtx_leave(&itimer_mtx);
}
void
process_reset_itimer_flag(struct process *ps)
{
if (timespecisset(&ps->ps_timer[ITIMER_VIRTUAL].it_value) ||
timespecisset(&ps->ps_timer[ITIMER_PROF].it_value))
atomic_setbits_int(&ps->ps_flags, PS_ITIMER);
else
atomic_clearbits_int(&ps->ps_flags, PS_ITIMER);
}
struct mutex ratecheck_mtx = MUTEX_INITIALIZER(IPL_HIGH);
/*

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sched_bsd.c,v 1.78 2023/07/25 18:16:19 cheloha Exp $ */
/* $OpenBSD: sched_bsd.c,v 1.79 2023/08/05 20:07:55 cheloha Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@ -350,7 +350,11 @@ mi_switch(void)
/* add the time counts for this thread to the process's total */
tuagg_unlocked(pr, p);
/* Stop the profclock if it's running. */
/* Stop any optional clock interrupts. */
if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) {
atomic_clearbits_int(&spc->spc_schedflags, SPCF_ITIMER);
clockintr_cancel(spc->spc_itimer);
}
if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) {
atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK);
clockintr_cancel(spc->spc_profclock);
@ -400,7 +404,13 @@ mi_switch(void)
*/
KASSERT(p->p_cpu == curcpu());
/* Start the profclock if profil(2) is enabled. */
/* Start any optional clock interrupts needed by the thread. */
if (ISSET(p->p_p->ps_flags, PS_ITIMER)) {
atomic_setbits_int(&p->p_cpu->ci_schedstate.spc_schedflags,
SPCF_ITIMER);
clockintr_advance(p->p_cpu->ci_schedstate.spc_itimer,
hardclock_period);
}
if (ISSET(p->p_p->ps_flags, PS_PROFIL)) {
atomic_setbits_int(&p->p_cpu->ci_schedstate.spc_schedflags,
SPCF_PROFCLOCK);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uipc_socket.c,v 1.306 2023/07/22 14:30:39 mvs Exp $ */
/* $OpenBSD: uipc_socket.c,v 1.307 2023/08/03 09:49:08 mvs Exp $ */
/* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */
/*
@ -1789,12 +1789,12 @@ sosetopt(struct socket *so, int level, int optname, struct mbuf *m)
{
int error = 0;
soassertlocked(so);
if (level != SOL_SOCKET) {
if (so->so_proto->pr_ctloutput) {
solock(so);
error = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so,
level, optname, m);
sounlock(so);
return (error);
}
error = ENOPROTOOPT;
@ -1813,9 +1813,16 @@ sosetopt(struct socket *so, int level, int optname, struct mbuf *m)
mtod(m, struct linger *)->l_linger < 0 ||
mtod(m, struct linger *)->l_linger > SHRT_MAX)
return (EINVAL);
so->so_linger = mtod(m, struct linger *)->l_linger;
/* FALLTHROUGH */
solock(so);
so->so_linger = mtod(m, struct linger *)->l_linger;
if (*mtod(m, int *))
so->so_options |= optname;
else
so->so_options &= ~optname;
sounlock(so);
break;
case SO_BINDANY:
case SO_DEBUG:
case SO_KEEPALIVE:
@ -1828,12 +1835,15 @@ sosetopt(struct socket *so, int level, int optname, struct mbuf *m)
case SO_ZEROIZE:
if (m == NULL || m->m_len < sizeof (int))
return (EINVAL);
solock(so);
if (*mtod(m, int *))
so->so_options |= optname;
else
so->so_options &= ~optname;
break;
sounlock(so);
break;
case SO_DONTROUTE:
if (m == NULL || m->m_len < sizeof (int))
return (EINVAL);
@ -1853,23 +1863,32 @@ sosetopt(struct socket *so, int level, int optname, struct mbuf *m)
cnt = *mtod(m, int *);
if ((long)cnt <= 0)
cnt = 1;
switch (optname) {
solock(so);
switch (optname) {
case SO_SNDBUF:
if (so->so_snd.sb_state & SS_CANTSENDMORE)
return (EINVAL);
if (so->so_snd.sb_state & SS_CANTSENDMORE) {
error = EINVAL;
break;
}
if (sbcheckreserve(cnt, so->so_snd.sb_wat) ||
sbreserve(so, &so->so_snd, cnt))
return (ENOBUFS);
sbreserve(so, &so->so_snd, cnt)) {
error = ENOBUFS;
break;
}
so->so_snd.sb_wat = cnt;
break;
case SO_RCVBUF:
if (so->so_rcv.sb_state & SS_CANTRCVMORE)
return (EINVAL);
if (so->so_rcv.sb_state & SS_CANTRCVMORE) {
error = EINVAL;
break;
}
if (sbcheckreserve(cnt, so->so_rcv.sb_wat) ||
sbreserve(so, &so->so_rcv, cnt))
return (ENOBUFS);
sbreserve(so, &so->so_rcv, cnt)) {
error = ENOBUFS;
break;
}
so->so_rcv.sb_wat = cnt;
break;
@ -1884,6 +1903,7 @@ sosetopt(struct socket *so, int level, int optname, struct mbuf *m)
so->so_rcv.sb_hiwat : cnt;
break;
}
sounlock(so);
break;
}
@ -1903,8 +1923,9 @@ sosetopt(struct socket *so, int level, int optname, struct mbuf *m)
return (EDOM);
if (nsecs == 0)
nsecs = INFSLP;
switch (optname) {
solock(so);
switch (optname) {
case SO_SNDTIMEO:
so->so_snd.sb_timeo_nsecs = nsecs;
break;
@ -1912,6 +1933,7 @@ sosetopt(struct socket *so, int level, int optname, struct mbuf *m)
so->so_rcv.sb_timeo_nsecs = nsecs;
break;
}
sounlock(so);
break;
}
@ -1923,19 +1945,20 @@ sosetopt(struct socket *so, int level, int optname, struct mbuf *m)
so->so_proto->pr_domain;
level = dom->dom_protosw->pr_protocol;
solock(so);
error = (*so->so_proto->pr_ctloutput)
(PRCO_SETOPT, so, level, optname, m);
return (error);
}
error = ENOPROTOOPT;
sounlock(so);
} else
error = ENOPROTOOPT;
break;
#ifdef SOCKET_SPLICE
case SO_SPLICE:
solock(so);
if (m == NULL) {
error = sosplice(so, -1, 0, NULL);
} else if (m->m_len < sizeof(int)) {
return (EINVAL);
error = EINVAL;
} else if (m->m_len < sizeof(struct splice)) {
error = sosplice(so, *mtod(m, int *), 0, NULL);
} else {
@ -1944,6 +1967,7 @@ sosetopt(struct socket *so, int level, int optname, struct mbuf *m)
mtod(m, struct splice *)->sp_max,
&mtod(m, struct splice *)->sp_idle);
}
sounlock(so);
break;
#endif /* SOCKET_SPLICE */
@ -1951,10 +1975,6 @@ sosetopt(struct socket *so, int level, int optname, struct mbuf *m)
error = ENOPROTOOPT;
break;
}
if (error == 0 && so->so_proto->pr_ctloutput) {
(*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so,
level, optname, m);
}
}
return (error);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uipc_syscalls.c,v 1.212 2023/02/10 14:34:17 visa Exp $ */
/* $OpenBSD: uipc_syscalls.c,v 1.213 2023/08/03 09:49:08 mvs Exp $ */
/* $NetBSD: uipc_syscalls.c,v 1.19 1996/02/09 19:00:48 christos Exp $ */
/*
@ -1232,9 +1232,7 @@ sys_setsockopt(struct proc *p, void *v, register_t *retval)
m->m_len = SCARG(uap, valsize);
}
so = fp->f_data;
solock(so);
error = sosetopt(so, SCARG(uap, level), SCARG(uap, name), m);
sounlock(so);
bad:
m_freem(m);
FRELE(fp, p);

View file

@ -722,8 +722,8 @@ unsigned long ZEXPORT crc32_z(unsigned long crc, const unsigned char FAR *buf,
words = (z_word_t const *)buf;
/* Do endian check at execution time instead of compile time, since ARM
processors can change the endianess at execution time. If the
compiler knows what the endianess will be, it can optimize out the
processors can change the endianness at execution time. If the
compiler knows what the endianness will be, it can optimize out the
check and the unused branch. */
endian = 1;
if (*(unsigned char *)&endian) {

View file

@ -230,7 +230,7 @@ ZEXTERN int ZEXPORT deflateInit(z_streamp strm, int level);
Initializes the internal stream state for compression. The fields
zalloc, zfree and opaque must be initialized before by the caller. If
zalloc and zfree are set to Z_NULL, deflateInit updates them to use default
allocation functions.
allocation functions. total_in, total_out, adler, and msg are initialized.
The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
1 gives best speed, 9 gives best compression, 0 gives no compression at all
@ -383,7 +383,8 @@ ZEXTERN int ZEXPORT inflateInit(z_streamp strm);
read or consumed. The allocation of a sliding window will be deferred to
the first call of inflate (if the decompression does not complete on the
first call). If zalloc and zfree are set to Z_NULL, inflateInit updates
them to use default allocation functions.
them to use default allocation functions. total_in, total_out, adler, and
msg are initialized.
inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough
memory, Z_VERSION_ERROR if the zlib library version is incompatible with the
@ -696,7 +697,7 @@ ZEXTERN int ZEXPORT deflateReset(z_streamp strm);
This function is equivalent to deflateEnd followed by deflateInit, but
does not free and reallocate the internal compression state. The stream
will leave the compression level and any other attributes that may have been
set unchanged.
set unchanged. total_in, total_out, adler, and msg are initialized.
deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
stream state was inconsistent (such as zalloc or state being Z_NULL).
@ -821,8 +822,9 @@ ZEXTERN int ZEXPORT deflateSetHeader(z_streamp strm,
gzip file" and give up.
If deflateSetHeader is not used, the default gzip header has text false,
the time set to zero, and os set to 255, with no extra, name, or comment
fields. The gzip header is returned to the default state by deflateReset().
the time set to zero, and os set to the current operating system, with no
extra, name, or comment fields. The gzip header is returned to the default
state by deflateReset().
deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source
stream state was inconsistent.
@ -961,6 +963,7 @@ ZEXTERN int ZEXPORT inflateReset(z_streamp strm);
This function is equivalent to inflateEnd followed by inflateInit,
but does not free and reallocate the internal decompression state. The
stream will keep attributes that may have been set by inflateInit2.
total_in, total_out, adler, and msg are initialized.
inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
stream state was inconsistent (such as zalloc or state being Z_NULL).

View file

@ -1,4 +1,4 @@
/* $OpenBSD: bfd.c,v 1.79 2023/07/12 16:10:45 mvs Exp $ */
/* $OpenBSD: bfd.c,v 1.80 2023/08/03 09:49:08 mvs Exp $ */
/*
* Copyright (c) 2016-2018 Peter Hessler <phessler@openbsd.org>
@ -452,9 +452,7 @@ bfd_listener(struct bfd_config *bfd, unsigned int port)
mopt->m_len = sizeof(int);
ip = mtod(mopt, int *);
*ip = MAXTTL;
solock(so);
error = sosetopt(so, IPPROTO_IP, IP_MINTTL, mopt);
sounlock(so);
m_freem(mopt);
if (error) {
printf("%s: sosetopt error %d\n",
@ -531,9 +529,7 @@ bfd_sender(struct bfd_config *bfd, unsigned int port)
mopt->m_len = sizeof(int);
ip = mtod(mopt, int *);
*ip = IP_PORTRANGE_HIGH;
solock(so);
error = sosetopt(so, IPPROTO_IP, IP_PORTRANGE, mopt);
sounlock(so);
m_freem(mopt);
if (error) {
printf("%s: sosetopt error %d\n",
@ -545,9 +541,7 @@ bfd_sender(struct bfd_config *bfd, unsigned int port)
mopt->m_len = sizeof(int);
ip = mtod(mopt, int *);
*ip = MAXTTL;
solock(so);
error = sosetopt(so, IPPROTO_IP, IP_TTL, mopt);
sounlock(so);
m_freem(mopt);
if (error) {
printf("%s: sosetopt error %d\n",
@ -559,9 +553,7 @@ bfd_sender(struct bfd_config *bfd, unsigned int port)
mopt->m_len = sizeof(int);
ip = mtod(mopt, int *);
*ip = IPTOS_PREC_INTERNETCONTROL;
solock(so);
error = sosetopt(so, IPPROTO_IP, IP_TOS, mopt);
sounlock(so);
m_freem(mopt);
if (error) {
printf("%s: sosetopt error %d\n",

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_pfsync.c,v 1.318 2023/07/06 04:55:05 dlg Exp $ */
/* $OpenBSD: if_pfsync.c,v 1.319 2023/07/31 11:13:09 dlg Exp $ */
/*
* Copyright (c) 2002 Michael Shalayeff
@ -1676,11 +1676,14 @@ pfsync_init_state(struct pf_state *st, const struct pf_state_key *skw,
}
/* state came off the wire */
if (ISSET(st->state_flags, PFSTATE_ACK)) {
CLR(st->state_flags, PFSTATE_ACK);
if (ISSET(flags, PFSYNC_SI_PFSYNC)) {
if (ISSET(st->state_flags, PFSTATE_ACK)) {
CLR(st->state_flags, PFSTATE_ACK);
/* peer wants an iack, not an insert */
st->sync_state = PFSYNC_S_SYNC;
/* peer wants an iack, not an insert */
st->sync_state = PFSYNC_S_SYNC;
} else
st->sync_state = PFSYNC_S_PFSYNC;
}
}
@ -1713,6 +1716,10 @@ pfsync_insert_state(struct pf_state *st)
pfsync_q_ins(s, st, PFSYNC_S_IACK);
pfsync_slice_sched(s); /* the peer is waiting */
break;
case PFSYNC_S_PFSYNC:
/* state was just inserted by pfsync */
st->sync_state = PFSYNC_S_NONE;
break;
default:
panic("%s: state %p unexpected sync_state %d",
__func__, st, st->sync_state);
@ -2829,7 +2836,7 @@ pfsync_in_ins(struct pfsync_softc *sc,
continue;
}
if (pf_state_import(sp, 0) == ENOMEM) {
if (pf_state_import(sp, PFSYNC_SI_PFSYNC) == ENOMEM) {
/* drop out, but process the rest of the actions */
break;
}
@ -3009,7 +3016,7 @@ pfsync_in_upd(struct pfsync_softc *sc,
if (st == NULL) {
/* insert the update */
PF_LOCK();
error = pf_state_import(sp, 0);
error = pf_state_import(sp, PFSYNC_SI_PFSYNC);
if (error)
pfsyncstat_inc(pfsyncs_badstate);
PF_UNLOCK();

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_pfsync.h,v 1.60 2023/07/06 04:55:05 dlg Exp $ */
/* $OpenBSD: if_pfsync.h,v 1.61 2023/07/31 11:13:10 dlg Exp $ */
/*
* Copyright (c) 2001 Michael Shalayeff
@ -307,6 +307,7 @@ enum pfsync_counters {
#define PFSYNC_S_NONE 0xd0
#define PFSYNC_S_SYNC 0xd1
#define PFSYNC_S_PFSYNC 0xd2
#define PFSYNC_S_DEAD 0xde
int pfsync_input4(struct mbuf **, int *, int, int);
@ -316,6 +317,7 @@ int pfsync_sysctl(int *, u_int, void *, size_t *,
#define PFSYNC_SI_IOCTL 0x01
#define PFSYNC_SI_CKSUM 0x02
#define PFSYNC_SI_ACK 0x04
#define PFSYNC_SI_PFSYNC 0x08
int pfsync_state_import(struct pfsync_state *, int);
void pfsync_state_export(struct pfsync_state *,
struct pf_state *);

578
sys/net/if_sec.c Normal file
View file

@ -0,0 +1,578 @@
/* $OpenBSD: if_sec.c,v 1.1 2023/08/07 01:57:33 dlg Exp $ */
/*
* Copyright (c) 2022 The University of Queensland
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This code was written by David Gwynne <dlg@uq.edu.au> as part
* of the Information Technology Infrastructure Group (ITIG) in the
* Faculty of Engineering, Architecture and Information Technology
* (EAIT).
*/
#ifndef IPSEC
#error sec enabled without IPSEC defined
#endif
#include "bpfilter.h"
#include "pf.h"
#include <sys/param.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/errno.h>
#include <sys/timeout.h>
#include <sys/queue.h>
#include <sys/tree.h>
#include <sys/pool.h>
#include <sys/smr.h>
#include <sys/refcnt.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_types.h>
#include <net/if_media.h>
#include <net/route.h>
#include <net/toeplitz.h>
#include <netinet/in.h>
#include <netinet/in_var.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <netinet/ip_var.h>
#include <netinet/ip_ecn.h>
#include <netinet/ip_ipsp.h>
#ifdef INET6
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
#include <netinet6/in6_var.h>
#endif
#ifdef MPLS
#include <netmpls/mpls.h>
#endif /* MPLS */
#if NBPFILTER > 0
#include <net/bpf.h>
#endif
#if NPF > 0
#include <net/pfvar.h>
#endif
#define SEC_MTU 1280
#define SEC_MTU_MIN 1280
#define SEC_MTU_MAX 32768 /* could get closer to 64k... */
struct sec_softc {
struct ifnet sc_if;
struct task sc_send;
unsigned int sc_unit;
SMR_SLIST_ENTRY(sec_softc) sc_entry;
struct refcnt sc_refs;
};
SMR_SLIST_HEAD(sec_bucket, sec_softc);
static int sec_output(struct ifnet *, struct mbuf *, struct sockaddr *,
struct rtentry *);
static int sec_enqueue(struct ifnet *, struct mbuf *);
static void sec_send(void *);
static void sec_start(struct ifnet *);
static int sec_ioctl(struct ifnet *, u_long, caddr_t);
static int sec_up(struct sec_softc *);
static int sec_down(struct sec_softc *);
static int sec_clone_create(struct if_clone *, int);
static int sec_clone_destroy(struct ifnet *);
static struct tdb *
sec_tdb_get(unsigned int);
static void sec_tdb_gc(void *);
static struct if_clone sec_cloner =
IF_CLONE_INITIALIZER("sec", sec_clone_create, sec_clone_destroy);
static struct sec_bucket sec_map[256] __aligned(CACHELINESIZE);
static struct tdb *sec_tdbh[256] __aligned(CACHELINESIZE);
static struct tdb *sec_tdb_gc_list;
static struct task sec_tdb_gc_task =
TASK_INITIALIZER(sec_tdb_gc, NULL);
static struct mutex sec_tdb_gc_mtx =
MUTEX_INITIALIZER(IPL_MPFLOOR);
void
secattach(int n)
{
if_clone_attach(&sec_cloner);
}
static int
sec_clone_create(struct if_clone *ifc, int unit)
{
struct sec_softc *sc;
struct ifnet *ifp;
sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
sc->sc_unit = unit;
task_set(&sc->sc_send, sec_send, sc);
snprintf(sc->sc_if.if_xname, sizeof sc->sc_if.if_xname, "%s%d",
ifc->ifc_name, unit);
ifp = &sc->sc_if;
ifp->if_softc = sc;
ifp->if_type = IFT_TUNNEL;
ifp->if_mtu = SEC_MTU;
ifp->if_flags = IFF_POINTOPOINT|IFF_MULTICAST;
ifp->if_xflags = IFXF_CLONED;
ifp->if_bpf_mtap = p2p_bpf_mtap;
ifp->if_input = p2p_input;
ifp->if_output = sec_output;
ifp->if_enqueue = sec_enqueue;
ifp->if_start = sec_start;
ifp->if_ioctl = sec_ioctl;
ifp->if_rtrequest = p2p_rtrequest;
if_counters_alloc(ifp);
if_attach(ifp);
if_alloc_sadl(ifp);
#if NBPFILTER > 0
bpfattach(&ifp->if_bpf, ifp, DLT_LOOP, sizeof(uint32_t));
#endif
return (0);
}
static int
sec_clone_destroy(struct ifnet *ifp)
{
struct sec_softc *sc = ifp->if_softc;
NET_LOCK();
if (ISSET(ifp->if_flags, IFF_RUNNING))
sec_down(sc);
NET_UNLOCK();
if_detach(ifp);
free(sc, M_DEVBUF, sizeof(*sc));
return (0);
}
static int
sec_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct sec_softc *sc = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *)data;
int error = 0;
switch (cmd) {
case SIOCSIFADDR:
break;
case SIOCSIFFLAGS:
if (ISSET(ifp->if_flags, IFF_UP)) {
if (!ISSET(ifp->if_flags, IFF_RUNNING))
error = sec_up(sc);
else
error = 0;
} else {
if (ISSET(ifp->if_flags, IFF_RUNNING))
error = sec_down(sc);
}
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
break;
case SIOCSIFMTU:
if (ifr->ifr_mtu < SEC_MTU_MIN ||
ifr->ifr_mtu > SEC_MTU_MAX) {
error = EINVAL;
break;
}
ifp->if_mtu = ifr->ifr_mtu;
break;
default:
error = ENOTTY;
break;
}
return (error);
}
static int
sec_up(struct sec_softc *sc)
{
struct ifnet *ifp = &sc->sc_if;
unsigned int idx = stoeplitz_h32(sc->sc_unit) % nitems(sec_map);
NET_ASSERT_LOCKED();
SET(ifp->if_flags, IFF_RUNNING);
refcnt_init(&sc->sc_refs);
SMR_SLIST_INSERT_HEAD_LOCKED(&sec_map[idx], sc, sc_entry);
return (0);
}
static int
sec_down(struct sec_softc *sc)
{
struct ifnet *ifp = &sc->sc_if;
unsigned int idx = stoeplitz_h32(sc->sc_unit) % nitems(sec_map);
NET_ASSERT_LOCKED();
CLR(ifp->if_flags, IFF_RUNNING);
SMR_SLIST_REMOVE_LOCKED(&sec_map[idx], sc, sec_softc, sc_entry);
smr_barrier();
taskq_del_barrier(systq, &sc->sc_send);
refcnt_finalize(&sc->sc_refs, "secdown");
return (0);
}
static int
sec_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
struct rtentry *rt)
{
struct m_tag *mtag;
int error = 0;
if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
error = ENETDOWN;
goto drop;
}
switch (dst->sa_family) {
case AF_INET:
#ifdef INET6
case AF_INET6:
#endif
#ifdef MPLS
case AF_MPLS:
#endif
break;
default:
error = EAFNOSUPPORT;
goto drop;
}
mtag = NULL;
while ((mtag = m_tag_find(m, PACKET_TAG_GRE, mtag)) != NULL) {
if (ifp->if_index == *(int *)(mtag + 1)) {
error = EIO;
goto drop;
}
}
m->m_pkthdr.ph_family = dst->sa_family;
error = if_enqueue(ifp, m);
if (error != 0)
counters_inc(ifp->if_counters, ifc_oerrors);
return (error);
drop:
m_freem(m);
return (error);
}
static int
sec_enqueue(struct ifnet *ifp, struct mbuf *m)
{
struct sec_softc *sc = ifp->if_softc;
struct ifqueue *ifq = &ifp->if_snd;
int error;
error = ifq_enqueue(ifq, m);
if (error)
return (error);
task_add(systq, &sc->sc_send);
return (0);
}
static void
sec_send(void *arg)
{
struct sec_softc *sc = arg;
struct ifnet *ifp = &sc->sc_if;
struct ifqueue *ifq = &ifp->if_snd;
struct tdb *tdb;
struct mbuf *m;
int error;
if (!ISSET(ifp->if_flags, IFF_RUNNING))
return;
tdb = sec_tdb_get(sc->sc_unit);
if (tdb == NULL)
goto purge;
NET_LOCK();
while ((m = ifq_dequeue(ifq)) != NULL) {
CLR(m->m_flags, M_BCAST|M_MCAST);
#if NPF > 0
pf_pkt_addr_changed(m);
#endif
error = ipsp_process_packet(m, tdb,
m->m_pkthdr.ph_family, /* already tunnelled? */ 0);
if (error != 0)
counters_inc(ifp->if_counters, ifc_oerrors);
}
NET_UNLOCK();
tdb_unref(tdb);
return;
purge:
counters_add(ifp->if_counters, ifc_oerrors, ifq_purge(ifq));
}
static void
sec_start(struct ifnet *ifp)
{
counters_add(ifp->if_counters, ifc_oerrors, ifq_purge(&ifp->if_snd));
}
/*
* ipsec_input handling
*/
struct sec_softc *
sec_get(unsigned int unit)
{
unsigned int idx = stoeplitz_h32(unit) % nitems(sec_map);
struct sec_bucket *sb = &sec_map[idx];
struct sec_softc *sc;
smr_read_enter();
SMR_SLIST_FOREACH(sc, sb, sc_entry) {
if (sc->sc_unit == unit) {
refcnt_take(&sc->sc_refs);
break;
}
}
smr_read_leave();
return (sc);
}
void
sec_input(struct sec_softc *sc, int af, int proto, struct mbuf *m)
{
struct ip *iph;
int hlen;
switch (af) {
case AF_INET:
iph = mtod(m, struct ip *);
hlen = iph->ip_hl << 2;
break;
#ifdef INET6
case AF_INET6:
hlen = sizeof(struct ip6_hdr);
break;
#endif
default:
unhandled_af(af);
}
m_adj(m, hlen);
switch (proto) {
case IPPROTO_IPV4:
af = AF_INET;
break;
case IPPROTO_IPV6:
af = AF_INET6;
break;
case IPPROTO_MPLS:
af = AF_MPLS;
break;
default:
af = AF_UNSPEC;
break;
}
m->m_pkthdr.ph_family = af;
if_vinput(&sc->sc_if, m);
}
void
sec_put(struct sec_softc *sc)
{
refcnt_rele_wake(&sc->sc_refs);
}
/*
* tdb handling
*/
static int
sec_tdb_valid(struct tdb *tdb)
{
KASSERT(ISSET(tdb->tdb_flags, TDBF_IFACE));
if (!ISSET(tdb->tdb_flags, TDBF_TUNNELING))
return (0);
if (ISSET(tdb->tdb_flags, TDBF_INVALID))
return (0);
if (tdb->tdb_iface_dir != IPSP_DIRECTION_OUT)
return (0);
return (1);
}
/*
* these are called from netinet/ip_ipsp.c with tdb_sadb_mtx held,
* which we rely on to serialise modifications to the sec_tdbh.
*/
void
sec_tdb_insert(struct tdb *tdb)
{
unsigned int idx;
struct tdb **tdbp;
struct tdb *ltdb;
if (!sec_tdb_valid(tdb))
return;
idx = stoeplitz_h32(tdb->tdb_iface) % nitems(sec_tdbh);
tdbp = &sec_tdbh[idx];
tdb_ref(tdb); /* take a ref for the SMR pointer */
/* wire the tdb into the head of the list */
ltdb = SMR_PTR_GET_LOCKED(tdbp);
SMR_PTR_SET_LOCKED(&tdb->tdb_dnext, ltdb);
SMR_PTR_SET_LOCKED(tdbp, tdb);
}
void
sec_tdb_remove(struct tdb *tdb)
{
struct tdb **tdbp;
struct tdb *ltdb;
unsigned int idx;
if (!sec_tdb_valid(tdb))
return;
idx = stoeplitz_h32(tdb->tdb_iface) % nitems(sec_tdbh);
tdbp = &sec_tdbh[idx];
while ((ltdb = SMR_PTR_GET_LOCKED(tdbp)) != NULL) {
if (ltdb == tdb) {
/* take the tdb out of the list */
ltdb = SMR_PTR_GET_LOCKED(&tdb->tdb_dnext);
SMR_PTR_SET_LOCKED(tdbp, ltdb);
/* move the ref to the gc */
mtx_enter(&sec_tdb_gc_mtx);
tdb->tdb_dnext = sec_tdb_gc_list;
sec_tdb_gc_list = tdb;
mtx_leave(&sec_tdb_gc_mtx);
task_add(systq, &sec_tdb_gc_task);
return;
}
tdbp = &ltdb->tdb_dnext;
}
panic("%s: unable to find tdb %p", __func__, tdb);
}
static void
sec_tdb_gc(void *null)
{
struct tdb *tdb, *ntdb;
mtx_enter(&sec_tdb_gc_mtx);
tdb = sec_tdb_gc_list;
sec_tdb_gc_list = NULL;
mtx_leave(&sec_tdb_gc_mtx);
if (tdb == NULL)
return;
smr_barrier();
NET_LOCK();
do {
ntdb = tdb->tdb_dnext;
tdb_unref(tdb);
tdb = ntdb;
} while (tdb != NULL);
NET_UNLOCK();
}
struct tdb *
sec_tdb_get(unsigned int unit)
{
unsigned int idx;
struct tdb **tdbp;
struct tdb *tdb;
idx = stoeplitz_h32(unit) % nitems(sec_map);
tdbp = &sec_tdbh[idx];
smr_read_enter();
while ((tdb = SMR_PTR_GET(tdbp)) != NULL) {
KASSERT(ISSET(tdb->tdb_flags, TDBF_IFACE));
if (!ISSET(tdb->tdb_flags, TDBF_DELETED) &&
tdb->tdb_iface == unit) {
tdb_ref(tdb);
break;
}
tdbp = &tdb->tdb_dnext;
}
smr_read_leave();
return (tdb);
}

44
sys/net/if_sec.h Normal file
View file

@ -0,0 +1,44 @@
/* $OpenBSD: if_sec.h,v 1.1 2023/08/07 01:57:33 dlg Exp $ */
/*
* Copyright (c) 2023 David Gwynne <dlg@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _NET_IF_SEC_H
#define _NET_IF_SEC_H
#ifdef _KERNEL
struct sec_softc;
struct tdb;
/*
* let the IPsec stack hand packets to sec(4) for input
*/
struct sec_softc *sec_get(unsigned int);
void sec_input(struct sec_softc * , int, int,
struct mbuf *);
void sec_put(struct sec_softc *);
/*
* let the IPsec stack give tdbs to sec(4) for output
*/
void sec_tdb_insert(struct tdb *);
void sec_tdb_remove(struct tdb *);
#endif /* _KERNEL */
#endif /* _NET_IF_SEC_H */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_vxlan.c,v 1.92 2023/04/13 02:19:05 jsg Exp $ */
/* $OpenBSD: if_vxlan.c,v 1.93 2023/08/03 09:49:08 mvs Exp $ */
/*
* Copyright (c) 2021 David Gwynne <dlg@openbsd.org>
@ -934,9 +934,9 @@ vxlan_tep_add_addr(struct vxlan_softc *sc, const union vxlan_addr *addr,
goto free;
solock(so);
sotoinpcb(so)->inp_upcall = vxlan_input;
sotoinpcb(so)->inp_upcall_arg = vt;
sounlock(so);
m_inithdr(&m);
m.m_len = sizeof(vt->vt_rdomain);
@ -973,12 +973,12 @@ vxlan_tep_add_addr(struct vxlan_softc *sc, const union vxlan_addr *addr,
unhandled_af(vt->vt_af);
}
solock(so);
error = sobind(so, &m, curproc);
sounlock(so);
if (error != 0)
goto close;
sounlock(so);
rw_assert_wrlock(&vxlan_lock);
TAILQ_INSERT_TAIL(&vxlan_teps, vt, vt_entry);
@ -987,7 +987,6 @@ vxlan_tep_add_addr(struct vxlan_softc *sc, const union vxlan_addr *addr,
return (0);
close:
sounlock(so);
soclose(so, MSG_DONTWAIT);
free:
free(vt, M_DEVBUF, sizeof(*vt));

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_wg.c,v 1.28 2023/06/01 18:57:53 kn Exp $ */
/* $OpenBSD: if_wg.c,v 1.29 2023/08/03 09:49:08 mvs Exp $ */
/*
* Copyright (C) 2015-2020 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
@ -720,14 +720,16 @@ wg_socket_open(struct socket **so, int af, in_port_t *port,
solock(*so);
sotoinpcb(*so)->inp_upcall = wg_input;
sotoinpcb(*so)->inp_upcall_arg = upcall_arg;
sounlock(*so);
if ((ret = sosetopt(*so, SOL_SOCKET, SO_RTABLE, &mrtable)) == 0) {
solock(*so);
if ((ret = sobind(*so, &mhostnam, curproc)) == 0) {
*port = sotoinpcb(*so)->inp_lport;
*rtable = sotoinpcb(*so)->inp_rtableid;
}
sounlock(*so);
}
sounlock(*so);
if (ret != 0)
wg_socket_close(so);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ifq.c,v 1.49 2023/01/09 03:39:14 dlg Exp $ */
/* $OpenBSD: ifq.c,v 1.50 2023/07/30 05:39:52 dlg Exp $ */
/*
* Copyright (c) 2015 David Gwynne <dlg@openbsd.org>
@ -147,6 +147,20 @@ ifq_start_task(void *p)
ifp->if_qstart(ifq);
}
void
ifq_set_oactive(struct ifqueue *ifq)
{
if (ifq->ifq_oactive)
return;
mtx_enter(&ifq->ifq_mtx);
if (!ifq->ifq_oactive) {
ifq->ifq_oactive = 1;
ifq->ifq_oactives++;
}
mtx_leave(&ifq->ifq_mtx);
}
void
ifq_restart_task(void *p)
{
@ -202,6 +216,7 @@ struct ifq_kstat_data {
struct kstat_kv kd_qlen;
struct kstat_kv kd_maxqlen;
struct kstat_kv kd_oactive;
struct kstat_kv kd_oactives;
};
static const struct ifq_kstat_data ifq_kstat_tpl = {
@ -218,6 +233,7 @@ static const struct ifq_kstat_data ifq_kstat_tpl = {
KSTAT_KV_UNIT_INITIALIZER("maxqlen",
KSTAT_KV_T_UINT32, KSTAT_KV_U_PACKETS),
KSTAT_KV_INITIALIZER("oactive", KSTAT_KV_T_BOOL),
KSTAT_KV_INITIALIZER("oactives", KSTAT_KV_T_COUNTER32),
};
int
@ -234,6 +250,7 @@ ifq_kstat_copy(struct kstat *ks, void *dst)
kstat_kv_u32(&kd->kd_qlen) = ifq->ifq_len;
kstat_kv_u32(&kd->kd_maxqlen) = ifq->ifq_maxlen;
kstat_kv_bool(&kd->kd_oactive) = ifq->ifq_oactive;
kstat_kv_u32(&kd->kd_oactives) = ifq->ifq_oactives;
return (0);
}
@ -243,7 +260,7 @@ void
ifq_init(struct ifqueue *ifq, struct ifnet *ifp, unsigned int idx)
{
ifq->ifq_if = ifp;
ifq->ifq_softnet = net_tq(ifp->if_index + idx);
ifq->ifq_softnet = net_tq(idx);
ifq->ifq_softc = NULL;
mtx_init(&ifq->ifq_mtx, IPL_NET);
@ -635,7 +652,7 @@ void
ifiq_init(struct ifiqueue *ifiq, struct ifnet *ifp, unsigned int idx)
{
ifiq->ifiq_if = ifp;
ifiq->ifiq_softnet = net_tq(ifp->if_index + idx);
ifiq->ifiq_softnet = net_tq(idx);
ifiq->ifiq_softc = NULL;
mtx_init(&ifiq->ifiq_mtx, IPL_NET);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ifq.h,v 1.37 2023/01/09 03:37:44 dlg Exp $ */
/* $OpenBSD: ifq.h,v 1.38 2023/07/30 05:39:52 dlg Exp $ */
/*
* Copyright (c) 2015 David Gwynne <dlg@openbsd.org>
@ -54,6 +54,7 @@ struct ifqueue {
uint64_t ifq_qdrops;
uint64_t ifq_errors;
uint64_t ifq_mcasts;
uint32_t ifq_oactives;
struct kstat *ifq_kstat;
@ -441,7 +442,7 @@ void *ifq_q_enter(struct ifqueue *, const struct ifq_ops *);
void ifq_q_leave(struct ifqueue *, void *);
void ifq_serialize(struct ifqueue *, struct task *);
void ifq_barrier(struct ifqueue *);
void ifq_set_oactive(struct ifqueue *);
int ifq_deq_sleep(struct ifqueue *, struct mbuf **, int, int,
const char *, volatile unsigned int *,
@ -457,12 +458,6 @@ ifq_is_priq(struct ifqueue *ifq)
return (ifq->ifq_ops == ifq_priq_ops);
}
static inline void
ifq_set_oactive(struct ifqueue *ifq)
{
ifq->ifq_oactive = 1;
}
static inline void
ifq_clr_oactive(struct ifqueue *ifq)
{

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pf.c,v 1.1183 2023/07/07 08:05:02 bluhm Exp $ */
/* $OpenBSD: pf.c,v 1.1184 2023/07/31 11:13:09 dlg Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
@ -4698,6 +4698,10 @@ pf_create_state(struct pf_pdesc *pd, struct pf_rule *r, struct pf_rule *a,
sni->sn->states++;
}
#if NPFSYNC > 0
pfsync_init_state(st, *skw, *sks, 0);
#endif
if (pf_state_insert(BOUND_IFACE(r, pd->kif), skw, sks, st)) {
*sks = *skw = NULL;
REASON_SET(&reason, PFRES_STATEINS);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pfkeyv2.c,v 1.256 2023/04/22 20:51:56 mvs Exp $ */
/* $OpenBSD: pfkeyv2.c,v 1.257 2023/08/07 03:35:06 dlg Exp $ */
/*
* @(#)COPYRIGHT 1.1 (NRL) 17 January 1995
@ -868,6 +868,9 @@ pfkeyv2_get(struct tdb *tdb, void **headers, void **buffer, int *lenp,
i += sizeof(struct sadb_x_tap);
#endif
if (ISSET(tdb->tdb_flags, TDBF_IFACE))
i += sizeof(struct sadb_x_iface);
if (lenp)
*lenp = i;
@ -979,6 +982,12 @@ pfkeyv2_get(struct tdb *tdb, void **headers, void **buffer, int *lenp,
}
#endif
/* Export sec(4) interface information, if present */
if (ISSET(tdb->tdb_flags, TDBF_IFACE)) {
headers[SADB_X_EXT_IFACE] = p;
export_iface(&p, tdb);
}
headers[SADB_X_EXT_COUNTER] = p;
export_counter(&p, tdb);
@ -1360,6 +1369,7 @@ pfkeyv2_dosend(struct socket *so, void *message, int len)
import_tag(newsa, headers[SADB_X_EXT_TAG]);
import_tap(newsa, headers[SADB_X_EXT_TAP]);
#endif
import_iface(newsa, headers[SADB_X_EXT_IFACE]);
/* Exclude sensitive data from reply message. */
headers[SADB_EXT_KEY_AUTH] = NULL;
@ -1411,6 +1421,8 @@ pfkeyv2_dosend(struct socket *so, void *message, int len)
import_tag(sa2, headers[SADB_X_EXT_TAG]);
import_tap(sa2, headers[SADB_X_EXT_TAP]);
#endif
import_iface(sa2, headers[SADB_X_EXT_IFACE]);
if (headers[SADB_EXT_ADDRESS_SRC] ||
headers[SADB_EXT_ADDRESS_PROXY]) {
mtx_enter(&tdb_sadb_mtx);
@ -1535,6 +1547,7 @@ pfkeyv2_dosend(struct socket *so, void *message, int len)
import_tag(newsa, headers[SADB_X_EXT_TAG]);
import_tap(newsa, headers[SADB_X_EXT_TAP]);
#endif
import_iface(newsa, headers[SADB_X_EXT_IFACE]);
/* Exclude sensitive data from reply message. */
headers[SADB_EXT_KEY_AUTH] = NULL;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pfkeyv2.h,v 1.93 2022/08/27 20:28:01 mvs Exp $ */
/* $OpenBSD: pfkeyv2.h,v 1.94 2023/08/07 03:35:06 dlg Exp $ */
/*
* @(#)COPYRIGHT 1.1 (NRL) January 1998
*
@ -252,6 +252,14 @@ struct sadb_x_mtu {
uint32_t sadb_x_mtu_mtu;
};
struct sadb_x_iface {
uint16_t sadb_x_iface_len;
uint16_t sadb_x_iface_exttype;
uint32_t sadb_x_iface_unit;
uint8_t sadb_x_iface_direction;
uint8_t sadb_x_iface_reserved[7];
};
#ifdef _KERNEL
#define SADB_X_GETSPROTO(x) \
( (x) == SADB_SATYPE_AH ? IPPROTO_AH :\
@ -300,7 +308,8 @@ struct sadb_x_mtu {
#define SADB_X_EXT_RDOMAIN 37
#define SADB_X_EXT_MTU 38
#define SADB_X_EXT_REPLAY 39
#define SADB_EXT_MAX 39
#define SADB_X_EXT_IFACE 40
#define SADB_EXT_MAX 40
/* Fix pfkeyv2.c struct pfkeyv2_socket if SATYPE_MAX > 31 */
#define SADB_SATYPE_UNSPEC 0
@ -438,6 +447,7 @@ void export_mtu(void **, struct tdb *);
void export_tap(void **, struct tdb *);
void export_satype(void **, struct tdb *);
void export_counter(void **, struct tdb *);
void export_iface(void **, struct tdb *);
void import_address(struct sockaddr *, struct sadb_address *);
void import_identities(struct ipsec_ids **, int, struct sadb_ident *,
@ -452,6 +462,7 @@ void import_udpencap(struct tdb *, struct sadb_x_udpencap *);
void import_tag(struct tdb *, struct sadb_x_tag *);
void import_rdomain(struct tdb *, struct sadb_x_rdomain *);
void import_tap(struct tdb *, struct sadb_x_tap *);
void import_iface(struct tdb *, struct sadb_x_iface *);
extern const uint64_t sadb_exts_allowed_out[SADB_MAX+1];
extern const uint64_t sadb_exts_required_out[SADB_MAX+1];

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pfkeyv2_convert.c,v 1.79 2022/01/20 17:13:12 bluhm Exp $ */
/* $OpenBSD: pfkeyv2_convert.c,v 1.80 2023/08/07 03:35:06 dlg Exp $ */
/*
* The author of this code is Angelos D. Keromytis (angelos@keromytis.org)
*
@ -951,6 +951,30 @@ export_tap(void **p, struct tdb *tdb)
}
#endif
/* Import interface information for SA */
void
import_iface(struct tdb *tdb, struct sadb_x_iface *siface)
{
if (siface != NULL) {
SET(tdb->tdb_flags, TDBF_IFACE);
tdb->tdb_iface = siface->sadb_x_iface_unit;
tdb->tdb_iface_dir = siface->sadb_x_iface_direction;
}
}
/* Export interface information for SA */
void
export_iface(void **p, struct tdb *tdb)
{
struct sadb_x_iface *siface = (struct sadb_x_iface *)*p;
siface->sadb_x_iface_len = sizeof(*siface) / sizeof(uint64_t);
siface->sadb_x_iface_unit = tdb->tdb_iface;
siface->sadb_x_iface_direction = tdb->tdb_iface_dir;
*p += sizeof(*siface);
}
void
export_satype(void **p, struct tdb *tdb)
{

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pfkeyv2_parsemessage.c,v 1.60 2021/07/14 22:39:26 tobhe Exp $ */
/* $OpenBSD: pfkeyv2_parsemessage.c,v 1.61 2023/08/07 03:35:06 dlg Exp $ */
/*
* @(#)COPYRIGHT 1.1 (NRL) 17 January 1995
@ -135,6 +135,7 @@
#define BITMAP_X_COUNTER (1LL << SADB_X_EXT_COUNTER)
#define BITMAP_X_MTU (1LL << SADB_X_EXT_MTU)
#define BITMAP_X_REPLAY (1LL << SADB_X_EXT_REPLAY)
#define BITMAP_X_IFACE (1LL << SADB_X_EXT_IFACE)
uint64_t sadb_exts_allowed_in[SADB_MAX+1] =
{
@ -143,9 +144,9 @@ uint64_t sadb_exts_allowed_in[SADB_MAX+1] =
/* GETSPI */
BITMAP_ADDRESS_SRC | BITMAP_ADDRESS_DST | BITMAP_SPIRANGE,
/* UPDATE */
BITMAP_SA | BITMAP_LIFETIME | BITMAP_ADDRESS | BITMAP_ADDRESS_PROXY | BITMAP_KEY | BITMAP_IDENTITY | BITMAP_X_FLOW | BITMAP_X_UDPENCAP | BITMAP_X_TAG | BITMAP_X_TAP | BITMAP_X_RDOMAIN,
BITMAP_SA | BITMAP_LIFETIME | BITMAP_ADDRESS | BITMAP_ADDRESS_PROXY | BITMAP_KEY | BITMAP_IDENTITY | BITMAP_X_FLOW | BITMAP_X_UDPENCAP | BITMAP_X_TAG | BITMAP_X_TAP | BITMAP_X_RDOMAIN | BITMAP_X_IFACE,
/* ADD */
BITMAP_SA | BITMAP_LIFETIME | BITMAP_ADDRESS | BITMAP_KEY | BITMAP_IDENTITY | BITMAP_X_FLOW | BITMAP_X_UDPENCAP | BITMAP_X_LIFETIME_LASTUSE | BITMAP_X_TAG | BITMAP_X_TAP | BITMAP_X_RDOMAIN,
BITMAP_SA | BITMAP_LIFETIME | BITMAP_ADDRESS | BITMAP_KEY | BITMAP_IDENTITY | BITMAP_X_FLOW | BITMAP_X_UDPENCAP | BITMAP_X_LIFETIME_LASTUSE | BITMAP_X_TAG | BITMAP_X_TAP | BITMAP_X_RDOMAIN | BITMAP_X_IFACE,
/* DELETE */
BITMAP_SA | BITMAP_ADDRESS_SRC | BITMAP_ADDRESS_DST | BITMAP_X_RDOMAIN,
/* GET */
@ -215,13 +216,13 @@ const uint64_t sadb_exts_allowed_out[SADB_MAX+1] =
/* GETSPI */
BITMAP_SA | BITMAP_ADDRESS_SRC | BITMAP_ADDRESS_DST,
/* UPDATE */
BITMAP_SA | BITMAP_LIFETIME | BITMAP_ADDRESS | BITMAP_ADDRESS_PROXY | BITMAP_IDENTITY | BITMAP_X_FLOW | BITMAP_X_UDPENCAP | BITMAP_X_TAG | BITMAP_X_TAP | BITMAP_X_RDOMAIN,
BITMAP_SA | BITMAP_LIFETIME | BITMAP_ADDRESS | BITMAP_ADDRESS_PROXY | BITMAP_IDENTITY | BITMAP_X_FLOW | BITMAP_X_UDPENCAP | BITMAP_X_TAG | BITMAP_X_TAP | BITMAP_X_RDOMAIN | BITMAP_X_IFACE,
/* ADD */
BITMAP_SA | BITMAP_LIFETIME | BITMAP_ADDRESS | BITMAP_IDENTITY | BITMAP_X_FLOW | BITMAP_X_UDPENCAP | BITMAP_X_TAG | BITMAP_X_TAP | BITMAP_X_RDOMAIN,
BITMAP_SA | BITMAP_LIFETIME | BITMAP_ADDRESS | BITMAP_IDENTITY | BITMAP_X_FLOW | BITMAP_X_UDPENCAP | BITMAP_X_TAG | BITMAP_X_TAP | BITMAP_X_RDOMAIN | BITMAP_X_IFACE,
/* DELETE */
BITMAP_SA | BITMAP_ADDRESS_SRC | BITMAP_ADDRESS_DST | BITMAP_X_RDOMAIN,
/* GET */
BITMAP_SA | BITMAP_LIFETIME | BITMAP_ADDRESS | BITMAP_KEY | BITMAP_IDENTITY | BITMAP_X_UDPENCAP | BITMAP_X_LIFETIME_LASTUSE | BITMAP_X_SRC_MASK | BITMAP_X_DST_MASK | BITMAP_X_PROTOCOL | BITMAP_X_FLOW_TYPE | BITMAP_X_SRC_FLOW | BITMAP_X_DST_FLOW | BITMAP_X_TAG | BITMAP_X_TAP | BITMAP_X_COUNTER | BITMAP_X_RDOMAIN | BITMAP_X_MTU | BITMAP_X_REPLAY,
BITMAP_SA | BITMAP_LIFETIME | BITMAP_ADDRESS | BITMAP_KEY | BITMAP_IDENTITY | BITMAP_X_UDPENCAP | BITMAP_X_LIFETIME_LASTUSE | BITMAP_X_SRC_MASK | BITMAP_X_DST_MASK | BITMAP_X_PROTOCOL | BITMAP_X_FLOW_TYPE | BITMAP_X_SRC_FLOW | BITMAP_X_DST_FLOW | BITMAP_X_TAG | BITMAP_X_TAP | BITMAP_X_COUNTER | BITMAP_X_RDOMAIN | BITMAP_X_MTU | BITMAP_X_REPLAY | BITMAP_X_IFACE,
/* ACQUIRE */
BITMAP_ADDRESS_SRC | BITMAP_ADDRESS_DST | BITMAP_IDENTITY | BITMAP_PROPOSAL,
/* REGISTER */
@ -881,6 +882,12 @@ pfkeyv2_parsemessage(void *p, int len, void **headers)
}
break;
#endif
case SADB_X_EXT_IFACE:
if (i != sizeof(struct sadb_x_iface)) {
DPRINTF("bad IFACE header length");
return (EINVAL);
}
break;
default:
DPRINTF("unknown extension header type %d",
sadb_ext->sadb_ext_type);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ip_ipsp.c,v 1.275 2022/11/11 18:09:58 cheloha Exp $ */
/* $OpenBSD: ip_ipsp.c,v 1.276 2023/08/07 03:43:57 dlg Exp $ */
/*
* The authors of this code are John Ioannidis (ji@tla.org),
* Angelos D. Keromytis (kermit@csd.uch.gr),
@ -39,6 +39,7 @@
#include "pf.h"
#include "pfsync.h"
#include "sec.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -67,6 +68,10 @@
#include <net/if_pfsync.h>
#endif
#if NSEC > 0
#include <net/if_sec.h>
#endif
#include <netinet/ip_ipsp.h>
#include <net/pfkeyv2.h>
@ -852,14 +857,6 @@ puttdb_locked(struct tdb *tdbp)
tdbp->tdb_hnext = tdbh[hashval];
tdbh[hashval] = tdbp;
hashval = tdb_hash(0, &tdbp->tdb_dst, tdbp->tdb_sproto);
tdbp->tdb_dnext = tdbdst[hashval];
tdbdst[hashval] = tdbp;
hashval = tdb_hash(0, &tdbp->tdb_src, tdbp->tdb_sproto);
tdbp->tdb_snext = tdbsrc[hashval];
tdbsrc[hashval] = tdbp;
tdb_count++;
#ifdef IPSEC
if ((tdbp->tdb_flags & (TDBF_INVALID|TDBF_TUNNELING)) == TDBF_TUNNELING)
@ -867,6 +864,21 @@ puttdb_locked(struct tdb *tdbp)
#endif /* IPSEC */
ipsec_last_added = getuptime();
if (ISSET(tdbp->tdb_flags, TDBF_IFACE)) {
#if NSEC > 0
sec_tdb_insert(tdbp);
#endif
return;
}
hashval = tdb_hash(0, &tdbp->tdb_dst, tdbp->tdb_sproto);
tdbp->tdb_dnext = tdbdst[hashval];
tdbdst[hashval] = tdbp;
hashval = tdb_hash(0, &tdbp->tdb_src, tdbp->tdb_sproto);
tdbp->tdb_snext = tdbsrc[hashval];
tdbsrc[hashval] = tdbp;
}
void
@ -901,6 +913,22 @@ tdb_unlink_locked(struct tdb *tdbp)
tdbp->tdb_hnext = NULL;
tdb_count--;
#ifdef IPSEC
if ((tdbp->tdb_flags & (TDBF_INVALID|TDBF_TUNNELING)) ==
TDBF_TUNNELING) {
ipsecstat_dec(ipsec_tunnels);
ipsecstat_inc(ipsec_prevtunnels);
}
#endif /* IPSEC */
if (ISSET(tdbp->tdb_flags, TDBF_IFACE)) {
#if NSEC > 0
sec_tdb_remove(tdbp);
#endif
return;
}
hashval = tdb_hash(0, &tdbp->tdb_dst, tdbp->tdb_sproto);
if (tdbdst[hashval] == tdbp) {
@ -932,14 +960,6 @@ tdb_unlink_locked(struct tdb *tdbp)
}
tdbp->tdb_snext = NULL;
tdb_count--;
#ifdef IPSEC
if ((tdbp->tdb_flags & (TDBF_INVALID|TDBF_TUNNELING)) ==
TDBF_TUNNELING) {
ipsecstat_dec(ipsec_tunnels);
ipsecstat_inc(ipsec_prevtunnels);
}
#endif /* IPSEC */
}
void

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ip_ipsp.h,v 1.241 2023/07/06 04:55:05 dlg Exp $ */
/* $OpenBSD: ip_ipsp.h,v 1.242 2023/08/07 01:44:51 dlg Exp $ */
/*
* The authors of this code are John Ioannidis (ji@tla.org),
* Angelos D. Keromytis (kermit@csd.uch.gr),
@ -357,6 +357,7 @@ struct tdb { /* tunnel descriptor block */
#define TDBF_PFSYNC_RPL 0x80000 /* Replay counter should be bumped */
#define TDBF_ESN 0x100000 /* 64-bit sequence numbers (ESN) */
#define TDBF_PFSYNC_SNAPPED 0x200000 /* entry is being dispatched to peer */
#define TDBF_IFACE 0x400000 /* entry policy is via sec(4) */
#define TDBF_BITS ("\20" \
"\1UNIQUE\2TIMER\3BYTES\4ALLOCATIONS" \
@ -364,7 +365,7 @@ struct tdb { /* tunnel descriptor block */
"\11SOFT_BYTES\12SOFT_ALLOCATIONS\13SOFT_FIRSTUSE\14PFS" \
"\15TUNNELING" \
"\21USEDTUNNEL\22UDPENCAP\23PFSYNC\24PFSYNC_RPL" \
"\25ESN")
"\25ESN" "\26IFACE")
u_int32_t tdb_flags; /* [m] Flags related to this TDB */
@ -406,6 +407,7 @@ struct tdb { /* tunnel descriptor block */
u_int8_t tdb_sproto; /* [I] IPsec protocol */
u_int8_t tdb_wnd; /* Replay window */
u_int8_t tdb_satype; /* SA type (RFC2367, PF_KEY) */
u_int8_t tdb_iface_dir; /* [I] sec(4) iface direction */
union sockaddr_union tdb_dst; /* [N] Destination address */
union sockaddr_union tdb_src; /* [N] Source address */
@ -431,6 +433,7 @@ struct tdb { /* tunnel descriptor block */
u_int16_t tdb_tag; /* Packet filter tag */
u_int32_t tdb_tap; /* Alternate enc(4) interface */
unsigned int tdb_iface; /* [I] sec(4) iface */
u_int tdb_rdomain; /* [I] Routing domain */
u_int tdb_rdomain_post; /* [I] Change domain */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ipsec_input.c,v 1.204 2023/05/13 13:35:17 bluhm Exp $ */
/* $OpenBSD: ipsec_input.c,v 1.205 2023/08/07 03:43:57 dlg Exp $ */
/*
* The authors of this code are John Ioannidis (ji@tla.org),
* Angelos D. Keromytis (kermit@csd.uch.gr) and
@ -36,6 +36,7 @@
*/
#include "pf.h"
#include "sec.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -63,6 +64,10 @@
#include <net/pfvar.h>
#endif
#if NSEC > 0
#include <net/if_sec.h>
#endif
#ifdef INET6
#include <netinet6/in6_var.h>
#include <netinet/ip6.h>
@ -545,6 +550,22 @@ ipsec_common_input_cb(struct mbuf **mp, struct tdb *tdbp, int skip, int protoff)
}
#endif
if (ISSET(tdbp->tdb_flags, TDBF_IFACE)) {
#if NSEC > 0
if (ISSET(tdbp->tdb_flags, TDBF_TUNNELING) &&
tdbp->tdb_iface_dir == IPSP_DIRECTION_IN) {
struct sec_softc *sc = sec_get(tdbp->tdb_iface);
if (sc == NULL)
goto baddone;
sec_input(sc, af, prot, m);
sec_put(sc);
return IPPROTO_DONE;
}
#endif /* NSEC > 0 */
goto baddone;
}
#if NPF > 0
/*
* The ip_deliver() shortcut avoids running through ip_input() with the

View file

@ -1,4 +1,4 @@
/* $OpenBSD: nd6_nbr.c,v 1.150 2023/07/29 15:59:27 krw Exp $ */
/* $OpenBSD: nd6_nbr.c,v 1.151 2023/07/30 12:52:03 krw Exp $ */
/* $KAME: nd6_nbr.c,v 1.61 2001/02/10 16:06:14 jinmei Exp $ */
/*
@ -1158,21 +1158,24 @@ nd6_dad_stop(struct ifaddr *ifa)
void
nd6_dad_timer(void *xifa)
{
struct ifaddr *ifa = xifa;
struct in6_ifaddr *ia6 = ifatoia6(ifa);
struct ifaddr *ifa;
struct in6_ifaddr *ia6;
struct in6_addr daddr6, taddr6;
struct ifnet *ifp = ifa->ifa_ifp;
struct ifnet *ifp;
struct dadq *dp;
char addr[INET6_ADDRSTRLEN];
NET_LOCK();
/* Sanity check */
if (ia6 == NULL) {
if (xifa == NULL) {
log(LOG_ERR, "%s: called with null parameter\n", __func__);
goto done;
}
ifa = xifa;
ia6 = ifatoia6(ifa);
taddr6 = ia6->ia_addr.sin6_addr;
ifp = ifa->ifa_ifp;
dp = nd6_dad_find(ifa);
if (dp == NULL) {
log(LOG_ERR, "%s: DAD structure not found\n", __func__);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: krpc_subr.c,v 1.37 2022/06/06 14:45:41 claudio Exp $ */
/* $OpenBSD: krpc_subr.c,v 1.38 2023/08/03 09:49:09 mvs Exp $ */
/* $NetBSD: krpc_subr.c,v 1.12.4.1 1996/06/07 00:52:26 cgd Exp $ */
/*
@ -239,9 +239,7 @@ krpc_call(struct sockaddr_in *sa, u_int prog, u_int vers, u_int func,
tv.tv_usec = 0;
memcpy(mtod(m, struct timeval *), &tv, sizeof tv);
m->m_len = sizeof(tv);
solock(so);
error = sosetopt(so, SOL_SOCKET, SO_RCVTIMEO, m);
sounlock(so);
m_freem(m);
if (error)
goto out;
@ -255,9 +253,7 @@ krpc_call(struct sockaddr_in *sa, u_int prog, u_int vers, u_int func,
on = mtod(m, int32_t *);
m->m_len = sizeof(*on);
*on = 1;
solock(so);
error = sosetopt(so, SOL_SOCKET, SO_BROADCAST, m);
sounlock(so);
m_freem(m);
if (error)
goto out;
@ -272,9 +268,7 @@ krpc_call(struct sockaddr_in *sa, u_int prog, u_int vers, u_int func,
mopt->m_len = sizeof(int);
ip = mtod(mopt, int *);
*ip = IP_PORTRANGE_LOW;
solock(so);
error = sosetopt(so, IPPROTO_IP, IP_PORTRANGE, mopt);
sounlock(so);
m_freem(mopt);
if (error)
goto out;
@ -299,9 +293,7 @@ krpc_call(struct sockaddr_in *sa, u_int prog, u_int vers, u_int func,
mopt->m_len = sizeof(int);
ip = mtod(mopt, int *);
*ip = IP_PORTRANGE_DEFAULT;
solock(so);
error = sosetopt(so, IPPROTO_IP, IP_PORTRANGE, mopt);
sounlock(so);
m_freem(mopt);
if (error)
goto out;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: nfs_socket.c,v 1.143 2022/08/13 21:01:46 mvs Exp $ */
/* $OpenBSD: nfs_socket.c,v 1.144 2023/08/03 09:49:09 mvs Exp $ */
/* $NetBSD: nfs_socket.c,v 1.27 1996/04/15 20:20:00 thorpej Exp $ */
/*
@ -258,7 +258,6 @@ nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
MGET(nam, M_WAIT, MT_SONAME);
so = nmp->nm_so;
solock(so);
nmp->nm_soflags = so->so_proto->pr_flags;
/*
@ -282,7 +281,9 @@ nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = INADDR_ANY;
sin->sin_port = htons(0);
solock(so);
error = sobind(so, nam, &proc0);
sounlock(so);
if (error)
goto bad;
@ -294,6 +295,7 @@ nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
goto bad;
}
solock(so);
/*
* Protocols that do not require connections may be optionally left
* unconnected for servers that reply from a port other than NFS_PORT.
@ -301,12 +303,12 @@ nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
if (nmp->nm_flag & NFSMNT_NOCONN) {
if (nmp->nm_soflags & PR_CONNREQUIRED) {
error = ENOTCONN;
goto bad;
goto bad_locked;
}
} else {
error = soconnect(so, nmp->nm_nam);
if (error)
goto bad;
goto bad_locked;
/*
* Wait for the connection to complete. Cribbed from the
@ -320,13 +322,13 @@ nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
so->so_error == 0 && rep &&
(error = nfs_sigintr(nmp, rep, rep->r_procp)) != 0){
so->so_state &= ~SS_ISCONNECTING;
goto bad;
goto bad_locked;
}
}
if (so->so_error) {
error = so->so_error;
so->so_error = 0;
goto bad;
goto bad_locked;
}
}
/*
@ -338,6 +340,7 @@ nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
so->so_snd.sb_timeo_nsecs = SEC_TO_NSEC(5);
else
so->so_snd.sb_timeo_nsecs = INFSLP;
sounlock(so);
if (nmp->nm_sotype == SOCK_DGRAM) {
sndreserve = nmp->nm_wsize + NFS_MAXPKTHDR;
rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
@ -360,9 +363,10 @@ nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
} else {
panic("%s: nm_sotype %d", __func__, nmp->nm_sotype);
}
solock(so);
error = soreserve(so, sndreserve, rcvreserve);
if (error)
goto bad;
goto bad_locked;
so->so_rcv.sb_flags |= SB_NOINTR;
so->so_snd.sb_flags |= SB_NOINTR;
sounlock(so);
@ -377,8 +381,9 @@ nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
nmp->nm_timeouts = 0;
return (0);
bad:
bad_locked:
sounlock(so);
bad:
m_freem(mopt);
m_freem(nam);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: nfs_syscalls.c,v 1.118 2022/06/06 14:45:41 claudio Exp $ */
/* $OpenBSD: nfs_syscalls.c,v 1.119 2023/08/03 09:49:09 mvs Exp $ */
/* $NetBSD: nfs_syscalls.c,v 1.19 1996/02/18 11:53:52 fvdl Exp $ */
/*
@ -249,8 +249,8 @@ nfssvc_addsock(struct file *fp, struct mbuf *mynam)
siz = NFS_MAXPACKET;
solock(so);
error = soreserve(so, siz, siz);
sounlock(so);
if (error) {
sounlock(so);
m_freem(mynam);
return (error);
}
@ -275,6 +275,7 @@ nfssvc_addsock(struct file *fp, struct mbuf *mynam)
sosetopt(so, IPPROTO_TCP, TCP_NODELAY, m);
m_freem(m);
}
solock(so);
so->so_rcv.sb_flags &= ~SB_NOINTR;
so->so_rcv.sb_timeo_nsecs = INFSLP;
so->so_snd.sb_flags &= ~SB_NOINTR;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: scsi_base.c,v 1.282 2023/07/06 10:17:43 visa Exp $ */
/* $OpenBSD: scsi_base.c,v 1.283 2023/08/02 19:58:52 kettenis Exp $ */
/* $NetBSD: scsi_base.c,v 1.43 1997/04/02 02:29:36 mycroft Exp $ */
/*
@ -138,6 +138,8 @@ scsi_init(void)
/* Initialize the scsi_xfer pool. */
pool_init(&scsi_xfer_pool, sizeof(struct scsi_xfer), 0, IPL_BIO, 0,
"scxspl", NULL);
pool_setlowat(&scsi_xfer_pool, 8);
pool_prime(&scsi_xfer_pool, 8);
pool_init(&scsi_plug_pool, sizeof(struct scsi_plug), 0, IPL_BIO, 0,
"scsiplug", NULL);
}

Some files were not shown because too many files have changed in this diff Show more