This commit is contained in:
purplerain 2023-07-06 21:55:14 +00:00
parent f1b2576417
commit 2a351e0cdc
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
347 changed files with 9596 additions and 5486 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: locore.S,v 1.134 2023/04/17 00:14:59 deraadt Exp $ */
/* $OpenBSD: locore.S,v 1.135 2023/07/05 18:23:10 anton Exp $ */
/* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */
/*
@ -1149,6 +1149,23 @@ NENTRY(rdmsr_resume)
lfence
END(rdmsr_safe)
#if NHYPERV > 0
/* uint64_t hv_hypercall_trampoline(uint64_t control, paddr_t input, paddr_t output) */
NENTRY(hv_hypercall_trampoline)
endbr64
mov %rdx, %r8
mov %rsi, %rdx
mov %rdi, %rcx
jmp hv_hypercall_page
END(hv_hypercall_trampoline)
/* Hypercall page needs to be page aligned */
.text
.align NBPG, 0xcc
.globl hv_hypercall_page
hv_hypercall_page:
.skip 0x1000, 0xcc
#endif /* NHYPERV > 0 */
#if NXEN > 0
/* Hypercall page needs to be page aligned */
.text
@ -1157,12 +1174,3 @@ END(rdmsr_safe)
xen_hypercall_page:
.skip 0x1000, 0xcc
#endif /* NXEN > 0 */
#if NHYPERV > 0
/* Hypercall page needs to be page aligned */
.text
.align NBPG, 0xcc
.globl hv_hypercall_page
hv_hypercall_page:
.skip 0x1000, 0xcc
#endif /* NXEN > 0 */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: trap.c,v 1.100 2023/04/16 06:43:49 jsg Exp $ */
/* $OpenBSD: trap.c,v 1.101 2023/07/05 12:58:55 kn Exp $ */
/* $NetBSD: trap.c,v 1.2 2003/05/04 23:51:56 fvdl Exp $ */
/*-
@ -216,9 +216,8 @@ upageflttrap(struct trapframe *frame, uint64_t cr2)
/*
* kpageflttrap(frame, usermode): page fault handler
* Returns non-zero if the fault was handled (possibly by generating
* a signal). Returns zero, possibly still holding the kernel lock,
* if something was so broken that we should panic.
* Returns non-zero if the fault was handled (possibly by generating a signal).
* Returns zero if something was so broken that we should panic.
*/
int
kpageflttrap(struct trapframe *frame, uint64_t cr2)
@ -240,11 +239,9 @@ kpageflttrap(struct trapframe *frame, uint64_t cr2)
caddr_t *nf = __nofault_start;
while (*nf++ != pcb->pcb_onfault) {
if (nf >= __nofault_end) {
KERNEL_LOCK();
fault("invalid pcb_nofault=%lx",
(long)pcb->pcb_onfault);
return 0;
/* retain kernel lock */
}
}
}
@ -252,19 +249,15 @@ kpageflttrap(struct trapframe *frame, uint64_t cr2)
/* This will only trigger if SMEP is enabled */
if (pcb->pcb_onfault == NULL && cr2 <= VM_MAXUSER_ADDRESS &&
frame->tf_err & PGEX_I) {
KERNEL_LOCK();
fault("attempt to execute user address %p "
"in supervisor mode", (void *)cr2);
/* retain kernel lock */
return 0;
}
/* This will only trigger if SMAP is enabled */
if (pcb->pcb_onfault == NULL && cr2 <= VM_MAXUSER_ADDRESS &&
frame->tf_err & PGEX_P) {
KERNEL_LOCK();
fault("attempt to access user address %p "
"in supervisor mode", (void *)cr2);
/* retain kernel lock */
return 0;
}
@ -294,10 +287,8 @@ kpageflttrap(struct trapframe *frame, uint64_t cr2)
if (error) {
if (pcb->pcb_onfault == NULL) {
/* bad memory access in the kernel */
KERNEL_LOCK();
fault("uvm_fault(%p, 0x%llx, 0, %d) -> %x",
map, cr2, access_type, error);
/* retain kernel lock */
return 0;
}
frame->tf_rip = (u_int64_t)pcb->pcb_onfault;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: _types.h,v 1.18 2022/11/08 17:34:13 cheloha Exp $ */
/* $OpenBSD: _types.h,v 1.19 2023/07/02 19:02:27 cheloha Exp $ */
/*-
* Copyright (c) 1990, 1993
@ -35,8 +35,6 @@
#ifndef _MACHINE__TYPES_H_
#define _MACHINE__TYPES_H_
#define __HAVE_CLOCKINTR
/*
* _ALIGN(p) rounds p (pointer or byte index) up to a correctly-aligned
* value for all data types (int, long, ...). The result is an

View file

@ -1,4 +1,4 @@
/* $OpenBSD: cpu.h,v 1.154 2022/11/29 21:41:39 guenther Exp $ */
/* $OpenBSD: cpu.h,v 1.155 2023/07/04 17:29:32 cheloha Exp $ */
/* $NetBSD: cpu.h,v 1.1 2003/04/26 18:39:39 fvdl Exp $ */
/*-
@ -112,10 +112,8 @@ struct cpu_info {
#define ci_PAGEALIGN ci_dev
struct device *ci_dev; /* [I] */
struct cpu_info *ci_self; /* [I] */
struct schedstate_percpu ci_schedstate; /* scheduler state */
struct cpu_info *ci_next; /* [I] */
struct proc *ci_curproc; /* [o] */
u_int ci_cpuid; /* [I] */
u_int ci_apicid; /* [I] */
u_int ci_acpi_proc_id; /* [I] */
@ -129,6 +127,9 @@ struct cpu_info {
char ci_mds_tmp[32]; /* [o] 32byte aligned */
void *ci_mds_buf; /* [I] */
struct proc *ci_curproc; /* [o] */
struct schedstate_percpu ci_schedstate; /* scheduler state */
struct pmap *ci_proc_pmap; /* last userspace pmap */
struct pcb *ci_curpcb; /* [o] */
struct pcb *ci_idle_pcb; /* [o] */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: _types.h,v 1.20 2023/01/17 02:27:14 cheloha Exp $ */
/* $OpenBSD: _types.h,v 1.21 2023/07/02 19:02:27 cheloha Exp $ */
/*-
* Copyright (c) 1990, 1993
@ -35,8 +35,6 @@
#ifndef _ARM__TYPES_H_
#define _ARM__TYPES_H_
#define __HAVE_CLOCKINTR
#if defined(_KERNEL)
typedef struct label_t {
long val[11];

View file

@ -1,4 +1,4 @@
# $OpenBSD: GENERIC,v 1.274 2023/06/27 22:38:46 patrick Exp $
# $OpenBSD: GENERIC,v 1.275 2023/07/01 16:34:29 drahn Exp $
#
# GENERIC machine description file
#
@ -324,6 +324,7 @@ sdmmc* at dwmshc?
# Qualcomm SoCs
qcaoss* at fdt?
qccpu* at fdt?
qcdwusb* at fdt?
qcgpio* at acpi?
qcgpio* at fdt? early 1

View file

@ -1,4 +1,4 @@
/* $OpenBSD: agintc.c,v 1.50 2023/06/18 16:25:21 kettenis Exp $ */
/* $OpenBSD: agintc.c,v 1.51 2023/07/06 09:40:36 patrick Exp $ */
/*
* Copyright (c) 2007, 2009, 2011, 2017 Dale Rahn <drahn@dalerahn.com>
* Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
@ -339,7 +339,7 @@ agintc_attach(struct device *parent, struct device *self, void *aux)
}
sc->sc_pend = agintc_dmamem_alloc(sc->sc_dmat,
GICR_PEND_SIZE, GICR_PEND_SIZE);
if (sc->sc_prop == NULL) {
if (sc->sc_pend == NULL) {
printf(": can't alloc LPI pending table\n");
goto unmap;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: apldc.c,v 1.8 2023/05/02 19:39:10 kettenis Exp $ */
/* $OpenBSD: apldc.c,v 1.9 2023/07/03 15:54:07 tobhe Exp $ */
/*
* Copyright (c) 2022 Mark Kettenis <kettenis@openbsd.org>
*
@ -1289,6 +1289,10 @@ int apldcms_enable(void *);
void apldcms_disable(void *);
int apldcms_ioctl(void *, u_long, caddr_t, int, struct proc *);
static struct wsmouse_param apldcms_wsmousecfg[] = {
{ WSMOUSECFG_MTBTN_MAXDIST, 0 }, /* 0: Compute a default value. */
};
const struct wsmouse_accessops apldcms_accessops = {
.enable = apldcms_enable,
.disable = apldcms_disable,
@ -1350,7 +1354,8 @@ apldcms_configure(struct apldcms_softc *sc)
hw->mt_slots = UBCMTP_MAX_FINGERS;
hw->flags = WSMOUSEHW_MT_TRACKING;
return wsmouse_configure(sc->sc_wsmousedev, NULL, 0);
return wsmouse_configure(sc->sc_wsmousedev, apldcms_wsmousecfg,
nitems(apldcms_wsmousecfg));
}
void

View file

@ -1,4 +1,4 @@
/* $OpenBSD: aplhidev.c,v 1.11 2023/04/10 15:14:04 tobhe Exp $ */
/* $OpenBSD: aplhidev.c,v 1.12 2023/07/02 21:44:04 bru Exp $ */
/*
* Copyright (c) 2021 Mark Kettenis <kettenis@openbsd.org>
* Copyright (c) 2013-2014 joshua stein <jcs@openbsd.org>
@ -683,6 +683,10 @@ struct ubcmtp_finger {
/* Use a constant, synaptics-compatible pressure value for now. */
#define DEFAULT_PRESSURE 40
static struct wsmouse_param aplms_wsmousecfg[] = {
{ WSMOUSECFG_MTBTN_MAXDIST, 0 }, /* 0: Compute a default value. */
};
struct aplms_softc {
struct device sc_dev;
struct device *sc_wsmousedev;
@ -762,7 +766,8 @@ aplms_configure(struct aplms_softc *sc)
hw->mt_slots = UBCMTP_MAX_FINGERS;
hw->flags = WSMOUSEHW_MT_TRACKING;
return wsmouse_configure(sc->sc_wsmousedev, NULL, 0);
return wsmouse_configure(sc->sc_wsmousedev,
aplms_wsmousecfg, nitems(aplms_wsmousecfg));
}
void

View file

@ -1,4 +1,4 @@
/* $OpenBSD: apm.c,v 1.22 2023/02/10 14:34:16 visa Exp $ */
/* $OpenBSD: apm.c,v 1.23 2023/07/05 08:26:56 tobhe Exp $ */
/*-
* Copyright (c) 2001 Alexander Guy. All rights reserved.
@ -60,6 +60,7 @@
struct taskq *suspend_taskq;
struct task suspend_task;
void do_suspend(void *);
void suspend(void);
#endif
struct apm_softc {
@ -223,7 +224,7 @@ apmioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
error = EBADF;
break;
}
sleep_state(NULL, SLEEP_SUSPEND);
suspend();
break;
#ifdef HIBERNATE
case APM_IOC_HIBERNATE:

View file

@ -1,4 +1,4 @@
/* $OpenBSD: _types.h,v 1.5 2022/11/08 17:56:38 cheloha Exp $ */
/* $OpenBSD: _types.h,v 1.6 2023/07/02 19:02:27 cheloha Exp $ */
/*-
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
@ -34,8 +34,6 @@
#ifndef _MACHINE__TYPES_H_
#define _MACHINE__TYPES_H_
#define __HAVE_CLOCKINTR
#if defined(_KERNEL)
typedef struct label_t {
long val[13];

View file

@ -1,4 +1,4 @@
/* $OpenBSD: armv7_machdep.c,v 1.65 2022/10/03 19:32:22 kettenis Exp $ */
/* $OpenBSD: armv7_machdep.c,v 1.66 2023/07/05 08:15:34 jsg Exp $ */
/* $NetBSD: lubbock_machdep.c,v 1.2 2003/07/15 00:25:06 lukem Exp $ */
/*
@ -154,11 +154,6 @@ u_int cpu_reset_address = 0;
vaddr_t physical_freestart;
int physmem;
/*int debug_flags;*/
#ifndef PMAP_STATIC_L1S
int max_processes = 64; /* Default number */
#endif /* !PMAP_STATIC_L1S */
/* Physical and virtual addresses for some global pages */
pv_addr_t systempage;
pv_addr_t irqstack;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: apm.c,v 1.131 2023/06/22 13:18:02 claudio Exp $ */
/* $OpenBSD: apm.c,v 1.132 2023/07/02 19:02:27 cheloha Exp $ */
/*-
* Copyright (c) 1998-2001 Michael Shalayeff. All rights reserved.
@ -265,10 +265,8 @@ apm_suspend(int state)
rtcstart(); /* in i8254 mode, rtc is profclock */
inittodr(gettime());
#ifdef __HAVE_CLOCKINTR
clockintr_cpu_init(NULL);
clockintr_trigger();
#endif
config_suspend_all(DVACT_RESUME);
cold = 0;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: _types.h,v 1.24 2022/12/06 01:56:44 cheloha Exp $ */
/* $OpenBSD: _types.h,v 1.25 2023/07/02 19:02:27 cheloha Exp $ */
/*-
* Copyright (c) 1990, 1993
@ -35,8 +35,6 @@
#ifndef _MACHINE__TYPES_H_
#define _MACHINE__TYPES_H_
#define __HAVE_CLOCKINTR
/*
* _ALIGN(p) rounds p (pointer or byte index) up to a correctly-aligned
* value for all data types (int, long, ...). The result is an

View file

@ -1,4 +1,4 @@
/* $OpenBSD: param.c,v 1.48 2023/03/03 20:16:44 cheloha Exp $ */
/* $OpenBSD: param.c,v 1.49 2023/07/04 09:47:51 jsg Exp $ */
/* $NetBSD: param.c,v 1.16 1996/03/12 03:08:40 mrg Exp $ */
/*
@ -82,16 +82,6 @@ int maxthread = 2 * NPROCESS;
int maxfiles = 5 * (NPROCESS + MAXUSERS) + 80;
long nmbclust = NMBCLUSTERS;
#ifndef MBLOWAT
#define MBLOWAT 16
#endif
int mblowat = MBLOWAT;
#ifndef MCLLOWAT
#define MCLLOWAT 8
#endif
int mcllowat = MCLLOWAT;
#ifndef BUFCACHEPERCENT
#define BUFCACHEPERCENT 20
#endif

View file

@ -1,4 +1,4 @@
/* $OpenBSD: db_command.c,v 1.98 2023/03/08 04:43:07 guenther Exp $ */
/* $OpenBSD: db_command.c,v 1.99 2023/07/02 19:02:27 cheloha Exp $ */
/* $NetBSD: db_command.c,v 1.20 1996/03/30 22:30:05 christos Exp $ */
/*
@ -579,9 +579,7 @@ db_bcstats_print_cmd(db_expr_t addr, int have_addr, db_expr_t count, char *modif
const struct db_command db_show_all_cmds[] = {
{ "procs", db_show_all_procs, 0, NULL },
{ "callout", db_show_callout, 0, NULL },
#ifdef __HAVE_CLOCKINTR
{ "clockintr", db_show_all_clockintr, 0, NULL },
#endif
{ "pools", db_show_all_pools, 0, NULL },
{ "mounts", db_show_all_mounts, 0, NULL },
{ "vnodes", db_show_all_vnodes, 0, NULL },

View file

@ -1,4 +1,4 @@
/* $OpenBSD: db_interface.h,v 1.25 2022/11/05 19:29:45 cheloha Exp $ */
/* $OpenBSD: db_interface.h,v 1.26 2023/07/02 19:02:27 cheloha Exp $ */
/* $NetBSD: db_interface.h,v 1.1 1996/02/05 01:57:03 christos Exp $ */
/*
@ -44,9 +44,7 @@ void db_kill_cmd(db_expr_t, int, db_expr_t, char *);
void db_show_all_procs(db_expr_t, int, db_expr_t, char *);
/* kern/kern_clockintr.c */
#ifdef __HAVE_CLOCKINTR
void db_show_all_clockintr(db_expr_t, int, db_expr_t, char *);
#endif
/* kern/kern_timeout.c */
void db_show_callout(db_expr_t, int, db_expr_t, char *);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: acpi.c,v 1.421 2023/06/29 20:58:08 dv Exp $ */
/* $OpenBSD: acpi.c,v 1.423 2023/07/06 06:58:07 deraadt Exp $ */
/*
* Copyright (c) 2005 Thorsten Lockert <tholo@sigmasoft.com>
* Copyright (c) 2005 Jordan Hargrave <jordan@openbsd.org>

View file

@ -1,4 +1,4 @@
/* $OpenBSD: acpi_x86.c,v 1.15 2022/03/06 15:12:00 deraadt Exp $ */
/* $OpenBSD: acpi_x86.c,v 1.17 2023/07/06 06:58:07 deraadt Exp $ */
/*
* Copyright (c) 2005 Thorsten Lockert <tholo@sigmasoft.com>
* Copyright (c) 2005 Jordan Hargrave <jordan@openbsd.org>

View file

@ -1,4 +1,4 @@
/* $OpenBSD: dt_prov_kprobe.c,v 1.4 2021/10/28 08:47:40 jasper Exp $ */
/* $OpenBSD: dt_prov_kprobe.c,v 1.7 2023/07/06 10:53:11 jasper Exp $ */
/*
* Copyright (c) 2020 Tom Rollet <tom.rollet@epita.fr>
@ -76,6 +76,12 @@ int nb_probes_return = 0;
#define KPROBE_RETURN "return"
#if defined(__amd64__)
#define KPROBE_IBT_1 0xf3
#define KPROBE_IBT_2 0x0f
#define KPROBE_IBT_3 0x1e
#define KPROBE_IBT_4 0xfa
#define KPROBE_IBT_SIZE 4
#define KPROBE_RETGUARD_MOV_1 0x4c
#define KPROBE_RETGUARD_MOV_2 0x8b
#define KPROBE_RETGUARD_MOV_3 0x1d
@ -88,10 +94,10 @@ int nb_probes_return = 0;
#define KPROBE_RETGUARD_XOR_SIZE 4
#define RET 0xc3
#define RET_INST 0xc3
#define RET_SIZE 1
#elif defined(__i386__)
#define POP_RBP 0x5d
#define POP_RBP_INST 0x5d
#define POP_RBP_SIZE 1
#endif
@ -154,20 +160,29 @@ dt_prov_kprobe_init(void)
continue;
#if defined(__amd64__)
/* Find if there is a retguard, if so move the inst pointer to the later 'push rbp' */
/*
* Find the IBT target and the retguard which follows it.
* Move the instruction pointer down to the 'push rbp' as needed.
*/
if (*((uint8_t *)inst) != SSF_INST) {
/* No retguards in i386 */
if (((uint8_t *)inst)[0] != KPROBE_RETGUARD_MOV_1 ||
((uint8_t *)inst)[1] != KPROBE_RETGUARD_MOV_2 ||
((uint8_t *)inst)[2] != KPROBE_RETGUARD_MOV_3 ||
((uint8_t *)inst)[KPROBE_RETGUARD_MOV_SIZE] != KPROBE_RETGUARD_XOR_1 ||
((uint8_t *)inst)[KPROBE_RETGUARD_MOV_SIZE + 1] != KPROBE_RETGUARD_XOR_2 ||
((uint8_t *)inst)[KPROBE_RETGUARD_MOV_SIZE + 2] != KPROBE_RETGUARD_XOR_3 ||
((uint8_t *)inst)[KPROBE_RETGUARD_MOV_SIZE + KPROBE_RETGUARD_XOR_SIZE] != SSF_INST)
if (((uint8_t *)inst)[0] != KPROBE_IBT_1 ||
((uint8_t *)inst)[1] != KPROBE_IBT_2 ||
((uint8_t *)inst)[2] != KPROBE_IBT_3 ||
((uint8_t *)inst)[3] != KPROBE_IBT_4)
continue;
inst = (vaddr_t)&(((uint8_t *)inst)[KPROBE_RETGUARD_MOV_SIZE + KPROBE_RETGUARD_XOR_SIZE]);
if (((uint8_t *)inst)[KPROBE_IBT_SIZE] != KPROBE_RETGUARD_MOV_1 ||
((uint8_t *)inst)[KPROBE_IBT_SIZE + 1] != KPROBE_RETGUARD_MOV_2 ||
((uint8_t *)inst)[KPROBE_IBT_SIZE + 2] != KPROBE_RETGUARD_MOV_3 ||
((uint8_t *)inst)[KPROBE_IBT_SIZE + KPROBE_RETGUARD_MOV_SIZE] != KPROBE_RETGUARD_XOR_1 ||
((uint8_t *)inst)[KPROBE_IBT_SIZE + KPROBE_RETGUARD_MOV_SIZE + 1] != KPROBE_RETGUARD_XOR_2 ||
((uint8_t *)inst)[KPROBE_IBT_SIZE + KPROBE_RETGUARD_MOV_SIZE + 2] != KPROBE_RETGUARD_XOR_3 ||
((uint8_t *)inst)[KPROBE_IBT_SIZE + KPROBE_RETGUARD_MOV_SIZE + KPROBE_RETGUARD_XOR_SIZE] != SSF_INST)
continue;
inst = (vaddr_t)&(((uint8_t *)inst)[KPROBE_IBT_SIZE + KPROBE_RETGUARD_MOV_SIZE + KPROBE_RETGUARD_XOR_SIZE]);
}
#elif defined(__i386__)
/* No retguard or IBT on i386 */
if (*((uint8_t *)inst) != SSF_INST)
continue;
#endif
@ -190,14 +205,9 @@ dt_prov_kprobe_init(void)
nb_probes++;
nb_probes_entry++;
/*
* Poor method to find the return point
* => we would need a disassembler to find all return points
* For now we start from the end of the function, iterate on
* int3 inserted for retguard until we find a ret
*/
#if defined(__amd64__)
if (*(uint8_t *)(limit - 1) != RET)
/* If there last instruction isn't a ret, just bail. */
if (*(uint8_t *)(limit - 1) != RET_INST)
continue;
inst = limit - 1;
#elif defined(__i386__)
@ -272,14 +282,14 @@ dt_prov_kprobe_dealloc(struct dt_probe *dtp, struct dt_softc *sc,
size = SSF_SIZE;
} else if (strcmp(dtp->dtp_name, KPROBE_RETURN) == 0) {
#if defined(__amd64__)
patch = RET;
patch = RET_INST;
size = RET_SIZE;
#elif defined(__i386__)
patch = POP_RBP;
patch = POP_RBP_INST;
size = POP_RBP_SIZE;
#endif
} else
KASSERT(0 && "Trying to dealloc not yet implemented probe type");
panic("Trying to dealloc not yet implemented probe type");
dtp->dtp_ref--;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: dwmmc.c,v 1.27 2022/06/09 14:43:28 kettenis Exp $ */
/* $OpenBSD: dwmmc.c,v 1.29 2023/07/01 08:27:26 jsing Exp $ */
/*
* Copyright (c) 2017 Mark Kettenis
*
@ -275,7 +275,8 @@ dwmmc_match(struct device *parent, void *match, void *aux)
OF_is_compatible(faa->fa_node, "hisilicon,hi3670-dw-mshc") ||
OF_is_compatible(faa->fa_node, "rockchip,rk3288-dw-mshc") ||
OF_is_compatible(faa->fa_node, "samsung,exynos5420-dw-mshc") ||
OF_is_compatible(faa->fa_node, "snps,dw-mshc"));
OF_is_compatible(faa->fa_node, "snps,dw-mshc") ||
OF_is_compatible(faa->fa_node, "starfive,jh7110-mmc"));
}
void
@ -358,6 +359,10 @@ dwmmc_attach(struct device *parent, struct device *self, void *aux)
/* if ciu clock is missing the rate is clock-frequency */
if (sc->sc_clkbase == 0)
sc->sc_clkbase = freq;
if (sc->sc_clkbase == 0) {
printf(": no clock base\n");
return;
}
div = OF_getpropint(faa->fa_node, "samsung,dw-mshc-ciu-div", div);
sc->sc_clkbase /= (div + 1);

View file

@ -1,4 +1,4 @@
# $OpenBSD: files.fdt,v 1.194 2023/06/27 22:38:46 patrick Exp $
# $OpenBSD: files.fdt,v 1.195 2023/07/01 16:34:30 drahn Exp $
#
# Config file and device description for machine-independent FDT code.
# Included by ports that need it.
@ -669,6 +669,11 @@ device qcaoss
attach qcaoss at fdt
file dev/fdt/qcaoss.c qcaoss
# Qualcomm CPU Clock
device qccpu
attach qccpu at fdt
file dev/fdt/qccpu.c qccpu
device qcdwusb: fdt
attach qcdwusb at fdt
file dev/fdt/qcdwusb.c qcdwusb

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_dwge.c,v 1.16 2023/06/25 22:36:09 jmatthew Exp $ */
/* $OpenBSD: if_dwge.c,v 1.18 2023/07/06 08:32:37 jmatthew Exp $ */
/*
* Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
* Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
@ -267,10 +267,13 @@ struct dwge_softc {
bus_dma_tag_t sc_dmat;
void *sc_ih;
struct if_device sc_ifd;
struct arpcom sc_ac;
#define sc_lladdr sc_ac.ac_enaddr
struct mii_data sc_mii;
#define sc_media sc_mii.mii_media
uint64_t sc_fixed_media;
int sc_link;
int sc_phyloc;
int sc_force_thresh_dma_mode;
@ -386,7 +389,7 @@ dwge_attach(struct device *parent, struct device *self, void *aux)
{
struct dwge_softc *sc = (void *)self;
struct fdt_attach_args *faa = aux;
struct ifnet *ifp;
struct ifnet *ifp = &sc->sc_ac.ac_if;
uint32_t phy, phy_supply;
uint32_t axi_config;
uint32_t mode, pbl;
@ -457,6 +460,30 @@ dwge_attach(struct device *parent, struct device *self, void *aux)
/* Reset PHY */
dwge_reset_phy(sc);
node = OF_getnodebyname(faa->fa_node, "fixed-link");
if (node) {
ifp->if_baudrate = IF_Mbps(OF_getpropint(node, "speed", 0));
switch (OF_getpropint(node, "speed", 0)) {
case 1000:
sc->sc_fixed_media = IFM_ETHER | IFM_1000_T;
break;
case 100:
sc->sc_fixed_media = IFM_ETHER | IFM_100_TX;
break;
default:
sc->sc_fixed_media = IFM_ETHER | IFM_AUTO;
break;
}
if (OF_getpropbool(node, "full-duplex")) {
ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
sc->sc_fixed_media |= IFM_FDX;
} else {
ifp->if_link_state = LINK_STATE_UP;
}
}
sc->sc_clk = clock_get_frequency(faa->fa_node, "stmmaceth");
if (sc->sc_clk > 250000000)
sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_124;
@ -479,7 +506,6 @@ dwge_attach(struct device *parent, struct device *self, void *aux)
timeout_set(&sc->sc_tick, dwge_tick, sc);
timeout_set(&sc->sc_rxto, dwge_rxtick, sc);
ifp = &sc->sc_ac.ac_if;
ifp->if_softc = sc;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_xflags = IFXF_MPSAFE;
@ -576,14 +602,23 @@ dwge_attach(struct device *parent, struct device *self, void *aux)
dwge_write(sc, GMAC_AXI_BUS_MODE, mode);
}
mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
(sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY, 0);
if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
} else
ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
if (sc->sc_fixed_media == 0) {
mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
(sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY, 0);
if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0,
NULL);
ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
} else
ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
} else {
ifmedia_add(&sc->sc_media, sc->sc_fixed_media, 0, NULL);
ifmedia_set(&sc->sc_media, sc->sc_fixed_media);
/* force a configuration of the clocks/mac */
sc->sc_mii.mii_statchg(self);
}
if_attach(ifp);
ether_ifattach(ifp);
@ -601,6 +636,10 @@ dwge_attach(struct device *parent, struct device *self, void *aux)
dwge_intr, sc, sc->sc_dev.dv_xname);
if (sc->sc_ih == NULL)
printf("%s: can't establish interrupt\n", sc->sc_dev.dv_xname);
sc->sc_ifd.if_node = faa->fa_node;
sc->sc_ifd.if_ifp = ifp;
if_register(&sc->sc_ifd);
}
void
@ -759,7 +798,10 @@ dwge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
if (sc->sc_fixed_media != 0)
error = ENOTTY;
else
error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
break;
case SIOCGIFRXR:
@ -858,11 +900,16 @@ dwge_mii_statchg(struct device *self)
{
struct dwge_softc *sc = (void *)self;
uint32_t conf;
uint64_t media_active;
conf = dwge_read(sc, GMAC_MAC_CONF);
conf &= ~(GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES);
switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
media_active = sc->sc_fixed_media;
if (media_active == 0)
media_active = sc->sc_mii.mii_media_active;
switch (IFM_SUBTYPE(media_active)) {
case IFM_1000_SX:
case IFM_1000_LX:
case IFM_1000_CX:
@ -886,7 +933,7 @@ dwge_mii_statchg(struct device *self)
return;
conf &= ~GMAC_MAC_CONF_DM;
if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
if ((media_active & IFM_GMASK) == IFM_FDX)
conf |= GMAC_MAC_CONF_DM;
/* XXX: RX/TX flow control? */
@ -1178,7 +1225,8 @@ dwge_up(struct dwge_softc *sc)
dwge_write(sc, GMAC_MAC_CONF, dwge_read(sc, GMAC_MAC_CONF) |
GMAC_MAC_CONF_TE | GMAC_MAC_CONF_RE);
timeout_add_sec(&sc->sc_tick, 1);
if (sc->sc_fixed_media == 0)
timeout_add_sec(&sc->sc_tick, 1);
}
void
@ -1190,7 +1238,8 @@ dwge_down(struct dwge_softc *sc)
int i;
timeout_del(&sc->sc_rxto);
timeout_del(&sc->sc_tick);
if (sc->sc_fixed_media == 0)
timeout_del(&sc->sc_tick);
ifp->if_flags &= ~IFF_RUNNING;
ifq_clr_oactive(&ifp->if_snd);
@ -1679,6 +1728,7 @@ dwge_mii_statchg_rockchip(struct device *self)
struct regmap *rm;
uint32_t grf;
uint32_t gmac_clk_sel = 0;
uint64_t media_active;
dwge_mii_statchg(self);
@ -1687,7 +1737,11 @@ dwge_mii_statchg_rockchip(struct device *self)
if (rm == NULL)
return;
switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
media_active = sc->sc_fixed_media;
if (media_active == 0)
media_active = sc->sc_mii.mii_media_active;
switch (IFM_SUBTYPE(media_active)) {
case IFM_10_T:
gmac_clk_sel = sc->sc_clk_sel_2_5;
break;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_dwqe_fdt.c,v 1.12 2023/05/30 08:30:01 jsg Exp $ */
/* $OpenBSD: if_dwqe_fdt.c,v 1.13 2023/07/04 12:58:42 kettenis Exp $ */
/*
* Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
* Copyright (c) 2017, 2022 Patrick Wildt <patrick@blueri.se>
@ -63,6 +63,7 @@
int dwqe_fdt_match(struct device *, void *, void *);
void dwqe_fdt_attach(struct device *, struct device *, void *);
void dwqe_setup_jh7110(struct dwqe_softc *);
void dwqe_setup_rk3568(struct dwqe_softc *);
void dwqe_mii_statchg_rk3568(struct device *);
void dwqe_mii_statchg_rk3588(struct device *);
@ -78,7 +79,8 @@ dwqe_fdt_match(struct device *parent, void *cfdata, void *aux)
{
struct fdt_attach_args *faa = aux;
return OF_is_compatible(faa->fa_node, "snps,dwmac-4.20a");
return OF_is_compatible(faa->fa_node, "snps,dwmac-4.20a") ||
OF_is_compatible(faa->fa_node, "snps,dwmac-5.20");
}
void
@ -103,14 +105,16 @@ dwqe_fdt_attach(struct device *parent, struct device *self, void *aux)
/* Decide GMAC id through address */
switch (faa->fa_reg[0].addr) {
case 0xfe2a0000:
case 0xfe2a0000: /* RK3568 */
case 0x16030000: /* JH7110 */
sc->sc_gmac_id = 0;
break;
case 0xfe010000:
case 0xfe010000: /* RK3568 */
case 0x16040000: /* JH7110 */
sc->sc_gmac_id = 1;
break;
default:
printf(": unknown controller\n");
printf(": unknown controller at 0x%llx\n", faa->fa_reg[0].addr);
return;
}
@ -143,8 +147,13 @@ dwqe_fdt_attach(struct device *parent, struct device *self, void *aux)
/* Enable clocks. */
clock_set_assigned(faa->fa_node);
clock_enable(faa->fa_node, "stmmaceth");
clock_enable(faa->fa_node, "pclk");
reset_deassert(faa->fa_node, "stmmaceth");
if (OF_is_compatible(faa->fa_node, "rockchip,rk3568-gmac")) {
reset_deassert(faa->fa_node, "ahb");
if (OF_is_compatible(faa->fa_node, "starfive,jh7110-dwmac")) {
clock_enable(faa->fa_node, "tx");
clock_enable(faa->fa_node, "gtx");
} else if (OF_is_compatible(faa->fa_node, "rockchip,rk3568-gmac")) {
clock_enable(faa->fa_node, "mac_clk_rx");
clock_enable(faa->fa_node, "mac_clk_tx");
clock_enable(faa->fa_node, "aclk_mac");
@ -153,7 +162,9 @@ dwqe_fdt_attach(struct device *parent, struct device *self, void *aux)
delay(5000);
/* Do hardware specific initializations. */
if (OF_is_compatible(faa->fa_node, "rockchip,rk3568-gmac"))
if (OF_is_compatible(faa->fa_node, "starfive,jh7110-dwmac"))
dwqe_setup_jh7110(sc);
else if (OF_is_compatible(faa->fa_node, "rockchip,rk3568-gmac"))
dwqe_setup_rk3568(sc);
/* Power up PHY. */
@ -292,6 +303,10 @@ dwqe_reset_phy(struct dwqe_softc *sc, uint32_t phy)
free(gpio, M_TEMP, len);
}
/* JH7110 registers */
#define JH7110_PHY_INTF_RGMII 1
#define JH7110_PHY_INTF_RMII 4
/* RK3568 registers */
#define RK3568_GRF_GMACx_CON0(x) (0x0380 + (x) * 0x8)
#define RK3568_GMAC_CLK_RX_DL_CFG(val) ((0x7f << 8) << 16 | ((val) << 8))
@ -304,6 +319,48 @@ dwqe_reset_phy(struct dwqe_softc *sc, uint32_t phy)
void dwqe_mii_statchg_rk3568_task(void *);
void
dwqe_setup_jh7110(struct dwqe_softc *sc)
{
struct regmap *rm;
uint32_t cells[3];
uint32_t phandle, offset, reg, shift;
char phy_mode[32];
uint32_t iface;
if (OF_getpropintarray(sc->sc_node, "starfive,syscon", cells,
sizeof(cells)) != sizeof(cells)) {
printf("%s: failed to get starfive,syscon\n", __func__);
return;
}
phandle = cells[0];
offset = cells[1];
shift = cells[2];
rm = regmap_byphandle(phandle);
if (rm == NULL) {
printf("%s: failed to get regmap\n", __func__);
return;
}
if (OF_getprop(sc->sc_node, "phy-mode", phy_mode,
sizeof(phy_mode)) <= 0)
return;
if (strcmp(phy_mode, "rgmii") == 0 ||
strcmp(phy_mode, "rgmii-id") == 0) {
iface = JH7110_PHY_INTF_RGMII;
} else if (strcmp(phy_mode, "rmii") == 0) {
iface = JH7110_PHY_INTF_RMII;
} else
return;
reg = regmap_read_4(rm, offset);
reg &= ~(((1U << 3) - 1) << shift);
reg |= iface << shift;
regmap_write_4(rm, offset, reg);
}
void
dwqe_setup_rk3568(struct dwqe_softc *sc)
{

287
sys/dev/fdt/qccpu.c Normal file
View file

@ -0,0 +1,287 @@
/* $OpenBSD: qccpu.c,v 1.2 2023/07/01 18:59:11 drahn Exp $ */
/*
* Copyright (c) 2023 Dale Rahn <drahn@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
#include <sys/sensors.h>
#include <machine/intr.h>
#include <machine/bus.h>
#include <machine/fdt.h>
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_clock.h>
#include <dev/ofw/fdt.h>
#define CPUF_ENABLE 0x000
#define CPUF_DOMAIN_STATE 0x020
#define CPUF_DOMAIN_STATE_LVAL_M 0xff
#define CPUF_DOMAIN_STATE_LVAL_S 0
#define CPUF_DVCS_CTRL 0x0b0
#define CPUF_DVCS_CTRL_PER_CORE 0x1
#define CPUF_FREQ_LUT 0x100
#define CPUF_FREQ_LUT_SRC_M 0x1
#define CPUF_FREQ_LUT_SRC_S 30
#define CPUF_FREQ_LUT_CORES_M 0x7
#define CPUF_FREQ_LUT_CORES_S 16
#define CPUF_FREQ_LUT_LVAL_M 0xff
#define CPUF_FREQ_LUT_LVAL_S 0
#define CPUF_VOLT_LUT 0x200
#define CPUF_VOLT_LUT_IDX_M 0x2f
#define CPUF_VOLT_LUT_IDX_S 16
#define CPUF_VOLT_LUT_VOLT_M 0xfff
#define CPUF_VOLT_LUT_VOLT_S 0
#define CPUF_PERF_STATE 0x320
#define LUT_ROW_SIZE 4
struct cpu_freq_tbl {
uint32_t driver_data;
uint32_t frequency;
};
#define NUM_GROUP 2
#define MAX_LUT 40
#define XO_FREQ_HZ 19200000
struct qccpu_softc {
struct device sc_dev;
bus_space_tag_t sc_iot;
bus_space_handle_t sc_ioh[NUM_GROUP];
int sc_node;
struct clock_device sc_cd;
uint32_t sc_freq[NUM_GROUP][MAX_LUT];
int sc_num_lut[NUM_GROUP];
struct ksensordev sc_sensordev;
struct ksensor sc_hz_sensor[NUM_GROUP];
};
#define DEVNAME(sc) (sc)->sc_dev.dv_xname
int qccpu_match(struct device *, void *, void *);
void qccpu_attach(struct device *, struct device *, void *);
void qccpu_enable(void *, uint32_t *, int);
int qccpu_set_frequency(void *, uint32_t *, uint32_t);
uint32_t qccpu_get_frequency(void *, uint32_t *);
uint32_t qccpu_lut_to_freq(struct qccpu_softc *, int, uint32_t);
uint32_t qccpu_lut_to_cores(struct qccpu_softc *, int, uint32_t);
void qccpu_refresh_sensor(void *arg);
void qccpu_collect_lut(struct qccpu_softc *sc, int);
const struct cfattach qccpu_ca = {
sizeof (struct qccpu_softc), qccpu_match, qccpu_attach
};
struct cfdriver qccpu_cd = {
NULL, "qccpu", DV_DULL
};
int
qccpu_match(struct device *parent, void *match, void *aux)
{
struct fdt_attach_args *faa = aux;
return OF_is_compatible(faa->fa_node, "qcom,cpufreq-epss");
}
void
qccpu_attach(struct device *parent, struct device *self, void *aux)
{
struct qccpu_softc *sc = (struct qccpu_softc *)self;
struct fdt_attach_args *faa = aux;
if (faa->fa_nreg < 2) {
printf(": no registers\n");
return;
}
sc->sc_iot = faa->fa_iot;
if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
faa->fa_reg[0].size, 0, &sc->sc_ioh[0])) {
printf(": can't map registers (cluster0)\n");
return;
}
if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
faa->fa_reg[1].size, 0, &sc->sc_ioh[1])) {
printf(": can't map registers (cluster1)\n");
return;
}
sc->sc_node = faa->fa_node;
printf("\n");
qccpu_collect_lut(sc, 0);
qccpu_collect_lut(sc, 1);
sc->sc_cd.cd_node = faa->fa_node;
sc->sc_cd.cd_cookie = sc;
sc->sc_cd.cd_get_frequency = qccpu_get_frequency;
sc->sc_cd.cd_set_frequency = qccpu_set_frequency;
clock_register(&sc->sc_cd);
strlcpy(sc->sc_sensordev.xname, sc->sc_dev.dv_xname,
sizeof(sc->sc_sensordev.xname));
sc->sc_hz_sensor[0].type = SENSOR_FREQ;
sensor_attach(&sc->sc_sensordev, &sc->sc_hz_sensor[0]);
sc->sc_hz_sensor[1].type = SENSOR_FREQ;
sensor_attach(&sc->sc_sensordev, &sc->sc_hz_sensor[1]);
sensordev_install(&sc->sc_sensordev);
sensor_task_register(sc, qccpu_refresh_sensor, 1);
}
void
qccpu_collect_lut(struct qccpu_softc *sc, int group)
{
int prev_freq = 0;
uint32_t freq;
int idx;
bus_space_tag_t iot = sc->sc_iot;
bus_space_handle_t ioh = sc->sc_ioh[group];
for (idx = 0; ; idx++) {
freq = bus_space_read_4(iot, ioh,
CPUF_FREQ_LUT + idx * LUT_ROW_SIZE);
if (idx != 0 && prev_freq == freq) {
sc->sc_num_lut[group] = idx;
break;
}
sc->sc_freq[group][idx] = freq;
#ifdef DEBUG
printf("%s: %d: %x %u\n", DEVNAME(sc), idx, freq,
qccpu_lut_to_freq(sc, idx, group));
#endif /* DEBUG */
prev_freq = freq;
if (idx >= MAX_LUT-1)
break;
}
return;
}
uint32_t
qccpu_get_frequency(void *cookie, uint32_t *cells)
{
struct qccpu_softc *sc = cookie;
bus_space_tag_t iot = sc->sc_iot;
bus_space_handle_t ioh;
uint32_t lval;
uint32_t group;
if (cells[0] >= NUM_GROUP) {
printf("%s: bad cell %d\n", __func__, cells[0]);
return 0;
}
group = cells[0];
ioh = sc->sc_ioh[cells[0]];
lval = (bus_space_read_4(iot, ioh, CPUF_DOMAIN_STATE)
>> CPUF_DOMAIN_STATE_LVAL_S) & CPUF_DOMAIN_STATE_LVAL_M;
return lval *XO_FREQ_HZ;
}
int
qccpu_set_frequency(void *cookie, uint32_t *cells, uint32_t freq)
{
struct qccpu_softc *sc = cookie;
bus_space_tag_t iot = sc->sc_iot;
bus_space_handle_t ioh;
int index = 0;
int numcores, i;
uint32_t group;
if (cells[0] >= NUM_GROUP) {
printf("%s: bad cell %d\n", __func__, cells[0]);
return 1;
}
group = cells[0];
ioh = sc->sc_ioh[group];
while (index < sc->sc_num_lut[group]) {
if (freq == qccpu_lut_to_freq(sc, index, group))
break;
if (freq < qccpu_lut_to_freq(sc, index, group)) {
/* select next slower if not match, not zero */
if (index != 0)
index = index - 1;
break;
}
index++;
}
#ifdef DEBUG
printf("%s called freq %u index %d\n", __func__, freq, index);
#endif /* DEBUG */
if ((bus_space_read_4(iot, ioh, CPUF_DVCS_CTRL) &
CPUF_DVCS_CTRL_PER_CORE) != 0)
numcores = qccpu_lut_to_cores(sc, index, group);
else
numcores = 1;
for (i = 0; i < numcores; i++)
bus_space_write_4(iot, ioh, CPUF_PERF_STATE + i * 4, index);
return 0;
}
uint32_t
qccpu_lut_to_freq(struct qccpu_softc *sc, int index, uint32_t group)
{
return XO_FREQ_HZ *
((sc->sc_freq[group][index] >> CPUF_FREQ_LUT_LVAL_S)
& CPUF_FREQ_LUT_LVAL_M);
}
uint32_t
qccpu_lut_to_cores(struct qccpu_softc *sc, int index, uint32_t group)
{
return ((sc->sc_freq[group][index] >> CPUF_FREQ_LUT_CORES_S)
& CPUF_FREQ_LUT_CORES_M);
}
void
qccpu_refresh_sensor(void *arg)
{
struct qccpu_softc *sc = arg;
bus_space_tag_t iot = sc->sc_iot;
bus_space_handle_t ioh;
int idx;
uint32_t lval;
for (idx = 0; idx < NUM_GROUP; idx++) {
ioh = sc->sc_ioh[idx];
lval = (bus_space_read_4(iot, ioh, CPUF_DOMAIN_STATE)
>> CPUF_DOMAIN_STATE_LVAL_S) & CPUF_DOMAIN_STATE_LVAL_M;
sc->sc_hz_sensor[idx].value = 1000000ULL * lval * XO_FREQ_HZ;
}
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qcpas.c,v 1.1 2023/06/10 18:31:38 patrick Exp $ */
/* $OpenBSD: qcpas.c,v 1.2 2023/07/01 15:50:18 drahn Exp $ */
/*
* Copyright (c) 2023 Patrick Wildt <patrick@blueri.se>
*
@ -1111,6 +1111,7 @@ struct battmgr_bat_status {
uint32_t rate;
uint32_t battery_voltage;
uint32_t power_state;
#define BATTMGR_PWR_STATE_AC_ON (1 << 0)
uint32_t charging_source;
#define BATTMGR_CHARGING_SOURCE_AC 1
#define BATTMGR_CHARGING_SOURCE_USB 2
@ -1175,6 +1176,7 @@ qcpas_pmic_rtr_recv(void *cookie, uint8_t *buf, int len)
{
struct pmic_glink_hdr hdr;
uint32_t notification;
extern int hw_power;
if (len < sizeof(hdr)) {
printf("%s: pmic glink message too small\n",
@ -1256,6 +1258,14 @@ qcpas_pmic_rtr_recv(void *cookie, uint8_t *buf, int len)
info->battery_state = APM_BATT_CHARGING;
else if (bat->battery_state & BATTMGR_BAT_STATE_CRITICAL_LOW)
info->battery_state = APM_BATT_CRITICAL;
if (bat->power_state & BATTMGR_PWR_STATE_AC_ON) {
info->ac_state = APM_AC_ON;
hw_power = 1;
} else {
info->ac_state = APM_AC_OFF;
hw_power = 0;
}
#endif
free(bat, M_TEMP, sizeof(*bat));
break;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: qcsmptp.c,v 1.1 2023/05/19 21:26:10 patrick Exp $ */
/* $OpenBSD: qcsmptp.c,v 1.2 2023/07/04 14:32:21 patrick Exp $ */
/*
* Copyright (c) 2023 Patrick Wildt <patrick@blueri.se>
*
@ -182,6 +182,18 @@ qcsmptp_deferred(struct device *self)
return;
}
if (qcsmem_alloc(sc->sc_remote_pid, sc->sc_smem_id[0],
sizeof(*sc->sc_in)) != 0) {
printf(": can't alloc smp2p item\n");
return;
}
sc->sc_in = qcsmem_get(sc->sc_remote_pid, sc->sc_smem_id[0], NULL);
if (sc->sc_in == NULL) {
printf(": can't get smp2p item\n");
return;
}
if (qcsmem_alloc(sc->sc_remote_pid, sc->sc_smem_id[1],
sizeof(*sc->sc_out)) != 0) {
printf(": can't alloc smp2p item\n");
@ -254,15 +266,6 @@ qcsmptp_intr(void *arg)
uint32_t changed, val;
int do_ack = 0, i;
/* Inbound item exists as soon as remoteproc is up. */
if (sc->sc_in == NULL)
sc->sc_in = qcsmem_get(sc->sc_remote_pid,
sc->sc_smem_id[0], NULL);
if (sc->sc_in == NULL) {
printf("%s: can't get smp2p item\n", sc->sc_dev.dv_xname);
return 1;
}
/* Do initial feature negotiation if inbound is new. */
if (!sc->sc_negotiated) {
if (sc->sc_in->version != sc->sc_out->version)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: dwqe.c,v 1.8 2023/04/24 01:33:32 dlg Exp $ */
/* $OpenBSD: dwqe.c,v 1.10 2023/07/04 12:48:42 kettenis Exp $ */
/*
* Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
* Copyright (c) 2017, 2022 Patrick Wildt <patrick@blueri.se>
@ -444,7 +444,7 @@ dwqe_mii_readreg(struct device *self, int phy, int reg)
int n;
dwqe_write(sc, GMAC_MAC_MDIO_ADDR,
sc->sc_clk << GMAC_MAC_MDIO_ADDR_CR_SHIFT |
(sc->sc_clk << GMAC_MAC_MDIO_ADDR_CR_SHIFT) |
(phy << GMAC_MAC_MDIO_ADDR_PA_SHIFT) |
(reg << GMAC_MAC_MDIO_ADDR_RDA_SHIFT) |
GMAC_MAC_MDIO_ADDR_GOC_READ |
@ -468,7 +468,7 @@ dwqe_mii_writereg(struct device *self, int phy, int reg, int val)
dwqe_write(sc, GMAC_MAC_MDIO_DATA, val);
dwqe_write(sc, GMAC_MAC_MDIO_ADDR,
sc->sc_clk << GMAC_MAC_MDIO_ADDR_CR_SHIFT |
(sc->sc_clk << GMAC_MAC_MDIO_ADDR_CR_SHIFT) |
(phy << GMAC_MAC_MDIO_ADDR_PA_SHIFT) |
(reg << GMAC_MAC_MDIO_ADDR_RDA_SHIFT) |
GMAC_MAC_MDIO_ADDR_GOC_WRITE |
@ -672,15 +672,21 @@ dwqe_rx_proc(struct dwqe_softc *sc)
len, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
/* Strip off CRC. */
len -= ETHER_CRC_LEN;
KASSERT(len > 0);
m = rxb->tb_m;
rxb->tb_m = NULL;
m->m_pkthdr.len = m->m_len = len;
ml_enqueue(&ml, m);
if (rxd->sd_tdes3 & RDES3_ES) {
ifp->if_ierrors++;
m_freem(m);
} else {
/* Strip off CRC. */
len -= ETHER_CRC_LEN;
KASSERT(len > 0);
m->m_pkthdr.len = m->m_len = len;
ml_enqueue(&ml, m);
}
put++;
if (sc->sc_rx_cons == (DWQE_NRXDESC - 1))
@ -698,7 +704,6 @@ dwqe_rx_proc(struct dwqe_softc *sc)
bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring), 0,
DWQE_DMA_LEN(sc->sc_rxring),
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
void

View file

@ -1,4 +1,4 @@
/* $OpenBSD: mfi.c,v 1.189 2023/05/25 19:35:58 kurt Exp $ */
/* $OpenBSD: mfi.c,v 1.190 2023/07/06 10:17:43 visa Exp $ */
/*
* Copyright (c) 2006 Marco Peereboom <marco@peereboom.us>
*
@ -925,8 +925,9 @@ mfi_poll(struct mfi_softc *sc, struct mfi_ccb *ccb)
void
mfi_exec(struct mfi_softc *sc, struct mfi_ccb *ccb)
{
struct mutex m = MUTEX_INITIALIZER_FLAGS(IPL_BIO, __MTX_NAME,
MTX_NOWITNESS);
struct mutex m;
mtx_init(&m, IPL_BIO);
#ifdef DIAGNOSTIC
if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: mpi.c,v 1.225 2023/05/25 19:35:58 kurt Exp $ */
/* $OpenBSD: mpi.c,v 1.226 2023/07/06 10:17:43 visa Exp $ */
/*
* Copyright (c) 2005, 2006, 2009 David Gwynne <dlg@openbsd.org>
@ -1263,10 +1263,11 @@ mpi_poll_done(struct mpi_ccb *ccb)
void
mpi_wait(struct mpi_softc *sc, struct mpi_ccb *ccb)
{
struct mutex cookie = MUTEX_INITIALIZER_FLAGS(
IPL_BIO, __MTX_NAME, MTX_NOWITNESS);
struct mutex cookie;
void (*done)(struct mpi_ccb *);
mtx_init(&cookie, IPL_BIO);
done = ccb->ccb_done;
ccb->ccb_done = mpi_wait_done;
ccb->ccb_cookie = &cookie;

View file

@ -2436,6 +2436,10 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
long timeout = msecs_to_jiffies(2000);
int r;
/* No valid flags defined yet */
if (args->in.flags)
return -EINVAL;
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
/* We only have requirement to reserve vmid from gfxhub */

View file

@ -348,6 +348,35 @@ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
return false;
}
/**
* update_planes_and_stream_adapter() - Send planes to be updated in DC
*
* DC has a generic way to update planes and stream via
* dc_update_planes_and_stream function; however, DM might need some
* adjustments and preparation before calling it. This function is a wrapper
* for the dc_update_planes_and_stream that does any required configuration
* before passing control to DC.
*/
static inline bool update_planes_and_stream_adapter(struct dc *dc,
int update_type,
int planes_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
struct dc_surface_update *array_of_surface_update)
{
/*
* Previous frame finished and HW is ready for optimization.
*/
if (update_type == UPDATE_TYPE_FAST)
dc_post_update_surfaces_to_stream(dc);
return dc_update_planes_and_stream(dc,
array_of_surface_update,
planes_count,
stream,
stream_update);
}
/**
* dm_pflip_high_irq() - Handle pageflip interrupt
* @interrupt_params: ignored
@ -2634,10 +2663,13 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
bundle->surface_updates[m].surface->force_full_update =
true;
}
dc_commit_updates_for_stream(
dm->dc, bundle->surface_updates,
dc_state->stream_status->plane_count,
dc_state->streams[k], &bundle->stream_update, dc_state);
update_planes_and_stream_adapter(dm->dc,
UPDATE_TYPE_FULL,
dc_state->stream_status->plane_count,
dc_state->streams[k],
&bundle->stream_update,
bundle->surface_updates);
}
cleanup:
@ -7874,6 +7906,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream);
mutex_unlock(&dm->dc_lock);
/*
* If FreeSync state on the stream has changed then we need to
* re-adjust the min/max bounds now that DC doesn't handle this
@ -7887,16 +7925,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
mutex_lock(&dm->dc_lock);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream);
dc_commit_updates_for_stream(dm->dc,
bundle->surface_updates,
planes_count,
acrtc_state->stream,
&bundle->stream_update,
dc_state);
update_planes_and_stream_adapter(dm->dc,
acrtc_state->update_type,
planes_count,
acrtc_state->stream,
&bundle->stream_update,
bundle->surface_updates);
/**
* Enable or disable the interrupts on the backend.
@ -8338,12 +8372,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
mutex_lock(&dm->dc_lock);
dc_commit_updates_for_stream(dm->dc,
dummy_updates,
status->plane_count,
dm_new_crtc_state->stream,
&stream_update,
dc_state);
dc_update_planes_and_stream(dm->dc,
dummy_updates,
status->plane_count,
dm_new_crtc_state->stream,
&stream_update);
mutex_unlock(&dm->dc_lock);
}

View file

@ -401,8 +401,13 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
{
int i;
if (memcmp(adjust, &stream->adjust, sizeof(struct dc_crtc_timing_adjust)) == 0)
return true;
/*
* Don't adjust DRR while there's bandwidth optimizations pending to
* avoid conflicting with firmware updates.
*/
if (dc->ctx->dce_version > DCE_VERSION_MAX)
if (dc->optimized_required || dc->wm_optimized_required)
return false;
stream->adjust.v_total_max = adjust->v_total_max;
stream->adjust.v_total_mid = adjust->v_total_mid;
@ -2024,27 +2029,33 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
post_surface_trace(dc);
if (dc->ctx->dce_version >= DCE_VERSION_MAX)
TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
else
/*
* Only relevant for DCN behavior where we can guarantee the optimization
* is safe to apply - retain the legacy behavior for DCE.
*/
if (dc->ctx->dce_version < DCE_VERSION_MAX)
TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
else {
TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
if (is_flip_pending_in_pipes(dc, context))
return;
if (is_flip_pending_in_pipes(dc, context))
return;
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
context->res_ctx.pipe_ctx[i].pipe_idx = i;
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
context->res_ctx.pipe_ctx[i].pipe_idx = i;
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
}
process_deferred_updates(dc);
process_deferred_updates(dc);
dc->hwss.optimize_bandwidth(dc, context);
dc->hwss.optimize_bandwidth(dc, context);
if (dc->debug.enable_double_buffered_dsc_pg_support)
dc->hwss.update_dsc_pg(dc, context, true);
if (dc->debug.enable_double_buffered_dsc_pg_support)
dc->hwss.update_dsc_pg(dc, context, true);
}
dc->optimized_required = false;
dc->wm_optimized_required = false;
@ -3869,12 +3880,9 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
new_pipe->plane_state->force_full_update = true;
}
} else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
} else if (update_type == UPDATE_TYPE_FAST) {
/*
* Previous frame finished and HW is ready for optimization.
*
* Only relevant for DCN behavior where we can guarantee the optimization
* is safe to apply - retain the legacy behavior for DCE.
*/
dc_post_update_surfaces_to_stream(dc);
}

View file

@ -552,7 +552,6 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
int r;
/* for now if someone requests domain CPU -
@ -565,13 +564,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
up_read(&rdev->exclusive_lock);
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
drm_gem_object_put(gobj);
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(robj->rdev, r);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_bge.c,v 1.400 2023/01/18 23:31:37 kettenis Exp $ */
/* $OpenBSD: if_bge.c,v 1.401 2023/07/04 10:22:39 jmatthew Exp $ */
/*
* Copyright (c) 2001 Wind River Systems
@ -74,6 +74,7 @@
#include "bpfilter.h"
#include "vlan.h"
#include "kstat.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -85,6 +86,7 @@
#include <sys/timeout.h>
#include <sys/socket.h>
#include <sys/atomic.h>
#include <sys/kstat.h>
#include <net/if.h>
#include <net/if_media.h>
@ -203,6 +205,58 @@ void bge_ape_unlock(struct bge_softc *, int);
void bge_ape_send_event(struct bge_softc *, uint32_t);
void bge_ape_driver_state_change(struct bge_softc *, int);
#if NKSTAT > 0
void bge_kstat_attach(struct bge_softc *);
enum {
bge_stat_out_octets = 0,
bge_stat_collisions,
bge_stat_xon_sent,
bge_stat_xoff_sent,
bge_stat_xmit_errors,
bge_stat_coll_frames,
bge_stat_multicoll_frames,
bge_stat_deferred_xmit,
bge_stat_excess_coll,
bge_stat_late_coll,
bge_stat_out_ucast_pkt,
bge_stat_out_mcast_pkt,
bge_stat_out_bcast_pkt,
bge_stat_in_octets,
bge_stat_fragments,
bge_stat_in_ucast_pkt,
bge_stat_in_mcast_pkt,
bge_stat_in_bcast_pkt,
bge_stat_fcs_errors,
bge_stat_align_errors,
bge_stat_xon_rcvd,
bge_stat_xoff_rcvd,
bge_stat_ctrl_frame_rcvd,
bge_stat_xoff_entered,
bge_stat_too_long_frames,
bge_stat_jabbers,
bge_stat_too_short_pkts,
bge_stat_dma_rq_full,
bge_stat_dma_hprq_full,
bge_stat_sdc_queue_full,
bge_stat_nic_sendprod_set,
bge_stat_status_updated,
bge_stat_irqs,
bge_stat_avoided_irqs,
bge_stat_tx_thresh_hit,
bge_stat_filtdrop,
bge_stat_dma_wrq_full,
bge_stat_dma_hpwrq_full,
bge_stat_out_of_bds,
bge_stat_if_in_drops,
bge_stat_if_in_errors,
bge_stat_rx_thresh_hit,
};
#endif
#ifdef BGE_DEBUG
#define DPRINTF(x) do { if (bgedebug) printf x; } while (0)
#define DPRINTFN(n,x) do { if (bgedebug >= (n)) printf x; } while (0)
@ -2993,6 +3047,12 @@ bge_attach(struct device *parent, struct device *self, void *aux)
else
sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
mtx_init(&sc->bge_kstat_mtx, IPL_SOFTCLOCK);
#if NKSTAT > 0
if (BGE_IS_5705_PLUS(sc))
bge_kstat_attach(sc);
#endif
/* Set up ifnet structure */
ifp = &sc->arpcom.ac_if;
ifp->if_softc = sc;
@ -3767,9 +3827,11 @@ bge_tick(void *xsc)
s = splnet();
if (BGE_IS_5705_PLUS(sc))
if (BGE_IS_5705_PLUS(sc)) {
mtx_enter(&sc->bge_kstat_mtx);
bge_stats_update_regs(sc);
else
mtx_leave(&sc->bge_kstat_mtx);
} else
bge_stats_update(sc);
if (sc->bge_flags & BGE_FIBER_TBI) {
@ -3799,12 +3861,16 @@ void
bge_stats_update_regs(struct bge_softc *sc)
{
struct ifnet *ifp = &sc->arpcom.ac_if;
uint32_t collisions, discards, inerrors;
uint32_t ucast, mcast, bcast;
u_int32_t val;
#if NKSTAT > 0
struct kstat_kv *kvs = sc->bge_kstat->ks_data;
#endif
sc->bge_tx_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
collisions = CSR_READ_4(sc, BGE_MAC_STATS +
offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
sc->bge_rx_overruns += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
/*
* XXX
* Unlike other controllers, the BGE_RXLP_LOCSTAT_IFIN_DROPS counter
@ -3826,23 +3892,22 @@ bge_stats_update_regs(struct bge_softc *sc)
BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762 &&
sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
sc->bge_chipid != BGE_CHIPID_BCM5720_A0)
sc->bge_rx_discards += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
discards = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
else
discards = 0;
sc->bge_rx_inerrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
inerrors = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
ifp->if_collisions = sc->bge_tx_collisions;
ifp->if_ierrors = sc->bge_rx_discards + sc->bge_rx_inerrors;
ifp->if_collisions += collisions;
ifp->if_ierrors += discards + inerrors;
ucast = CSR_READ_4(sc, BGE_MAC_STATS +
offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts));
mcast = CSR_READ_4(sc, BGE_MAC_STATS +
offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts));
bcast = CSR_READ_4(sc, BGE_MAC_STATS +
offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts));
if (sc->bge_flags & BGE_RDMA_BUG) {
u_int32_t val, ucast, mcast, bcast;
ucast = CSR_READ_4(sc, BGE_MAC_STATS +
offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts));
mcast = CSR_READ_4(sc, BGE_MAC_STATS +
offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts));
bcast = CSR_READ_4(sc, BGE_MAC_STATS +
offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts));
/*
* If controller transmitted more than BGE_NUM_RDMA_CHANNELS
* frames, it's safe to disable workaround for DMA engine's
@ -3858,6 +3923,15 @@ bge_stats_update_regs(struct bge_softc *sc)
sc->bge_flags &= ~BGE_RDMA_BUG;
}
}
#if NKSTAT > 0
kstat_kv_u32(&kvs[bge_stat_out_ucast_pkt]) += ucast;
kstat_kv_u32(&kvs[bge_stat_out_mcast_pkt]) += mcast;
kstat_kv_u32(&kvs[bge_stat_out_bcast_pkt]) += bcast;
kstat_kv_u32(&kvs[bge_stat_collisions]) += collisions;
kstat_kv_u32(&kvs[bge_stat_if_in_drops]) += discards;
kstat_kv_u32(&kvs[bge_stat_if_in_errors]) += inerrors;
#endif
}
void
@ -4814,3 +4888,151 @@ bge_link_upd(struct bge_softc *sc)
BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
BGE_MACSTAT_LINK_CHANGED);
}
#if NKSTAT > 0
struct bge_stat {
char name[KSTAT_KV_NAMELEN];
enum kstat_kv_unit unit;
bus_size_t reg;
};
#define MACREG(_f) \
BGE_MAC_STATS + offsetof(struct bge_mac_stats_regs, _f)
static const struct bge_stat bge_kstat_tpl[] = {
/* MAC stats */
[bge_stat_out_octets] = { "out octets", KSTAT_KV_U_BYTES,
MACREG(ifHCOutOctets) },
[bge_stat_collisions] = { "collisions", KSTAT_KV_U_NONE, 0 },
[bge_stat_xon_sent] = { "xon sent", KSTAT_KV_U_NONE,
MACREG(outXonSent) },
[bge_stat_xoff_sent] = { "xoff sent", KSTAT_KV_U_NONE,
MACREG(outXonSent) },
[bge_stat_xmit_errors] = { "xmit errors", KSTAT_KV_U_NONE,
MACREG(dot3StatsInternalMacTransmitErrors) },
[bge_stat_coll_frames] = { "coll frames", KSTAT_KV_U_PACKETS,
MACREG(dot3StatsSingleCollisionFrames) },
[bge_stat_multicoll_frames] = { "multicoll frames", KSTAT_KV_U_PACKETS,
MACREG(dot3StatsMultipleCollisionFrames) },
[bge_stat_deferred_xmit] = { "deferred xmit", KSTAT_KV_U_NONE,
MACREG(dot3StatsDeferredTransmissions) },
[bge_stat_excess_coll] = { "excess coll", KSTAT_KV_U_NONE,
MACREG(dot3StatsExcessiveCollisions) },
[bge_stat_late_coll] = { "late coll", KSTAT_KV_U_NONE,
MACREG(dot3StatsLateCollisions) },
[bge_stat_out_ucast_pkt] = { "out ucast pkts", KSTAT_KV_U_PACKETS, 0 },
[bge_stat_out_mcast_pkt] = { "out mcast pkts", KSTAT_KV_U_PACKETS, 0 },
[bge_stat_out_bcast_pkt] = { "out bcast pkts", KSTAT_KV_U_PACKETS, 0 },
[bge_stat_in_octets] = { "in octets", KSTAT_KV_U_BYTES,
MACREG(ifHCInOctets) },
[bge_stat_fragments] = { "fragments", KSTAT_KV_U_NONE,
MACREG(etherStatsFragments) },
[bge_stat_in_ucast_pkt] = { "in ucast pkts", KSTAT_KV_U_PACKETS,
MACREG(ifHCInUcastPkts) },
[bge_stat_in_mcast_pkt] = { "in mcast pkts", KSTAT_KV_U_PACKETS,
MACREG(ifHCInMulticastPkts) },
[bge_stat_in_bcast_pkt] = { "in bcast pkts", KSTAT_KV_U_PACKETS,
MACREG(ifHCInBroadcastPkts) },
[bge_stat_fcs_errors] = { "FCS errors", KSTAT_KV_U_NONE,
MACREG(dot3StatsFCSErrors) },
[bge_stat_align_errors] = { "align errors", KSTAT_KV_U_NONE,
MACREG(dot3StatsAlignmentErrors) },
[bge_stat_xon_rcvd] = { "xon rcvd", KSTAT_KV_U_NONE,
MACREG(xonPauseFramesReceived) },
[bge_stat_xoff_rcvd] = { "xoff rcvd", KSTAT_KV_U_NONE,
MACREG(xoffPauseFramesReceived) },
[bge_stat_ctrl_frame_rcvd] = { "ctrlframes rcvd", KSTAT_KV_U_NONE,
MACREG(macControlFramesReceived) },
[bge_stat_xoff_entered] = { "xoff entered", KSTAT_KV_U_NONE,
MACREG(xoffStateEntered) },
[bge_stat_too_long_frames] = { "too long frames", KSTAT_KV_U_NONE,
MACREG(dot3StatsFramesTooLong) },
[bge_stat_jabbers] = { "jabbers", KSTAT_KV_U_NONE,
MACREG(etherStatsJabbers) },
[bge_stat_too_short_pkts] = { "too short pkts", KSTAT_KV_U_NONE,
MACREG(etherStatsUndersizePkts) },
/* Send Data Initiator stats */
[bge_stat_dma_rq_full] = { "DMA RQ full", KSTAT_KV_U_NONE,
BGE_LOCSTATS_DMA_RQ_FULL },
[bge_stat_dma_hprq_full] = { "DMA HPRQ full", KSTAT_KV_U_NONE,
BGE_LOCSTATS_DMA_HIPRIO_RQ_FULL },
[bge_stat_sdc_queue_full] = { "SDC queue full", KSTAT_KV_U_NONE,
BGE_LOCSTATS_SDC_QUEUE_FULL },
[bge_stat_nic_sendprod_set] = { "sendprod set", KSTAT_KV_U_NONE,
BGE_LOCSTATS_NIC_SENDPROD_SET },
[bge_stat_status_updated] = { "stats updated", KSTAT_KV_U_NONE,
BGE_LOCSTATS_STATS_UPDATED },
[bge_stat_irqs] = { "irqs", KSTAT_KV_U_NONE, BGE_LOCSTATS_IRQS },
[bge_stat_avoided_irqs] = { "avoided irqs", KSTAT_KV_U_NONE,
BGE_LOCSTATS_AVOIDED_IRQS },
[bge_stat_tx_thresh_hit] = { "tx thresh hit", KSTAT_KV_U_NONE,
BGE_LOCSTATS_TX_THRESH_HIT },
/* Receive List Placement stats */
[bge_stat_filtdrop] = { "filtdrop", KSTAT_KV_U_NONE,
BGE_RXLP_LOCSTAT_FILTDROP },
[bge_stat_dma_wrq_full] = { "DMA WRQ full", KSTAT_KV_U_NONE,
BGE_RXLP_LOCSTAT_DMA_WRQ_FULL },
[bge_stat_dma_hpwrq_full] = { "DMA HPWRQ full", KSTAT_KV_U_NONE,
BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL },
[bge_stat_out_of_bds] = { "out of BDs", KSTAT_KV_U_NONE,
BGE_RXLP_LOCSTAT_OUT_OF_BDS },
[bge_stat_if_in_drops] = { "if in drops", KSTAT_KV_U_NONE, 0 },
[bge_stat_if_in_errors] = { "if in errors", KSTAT_KV_U_NONE, 0 },
[bge_stat_rx_thresh_hit] = { "rx thresh hit", KSTAT_KV_U_NONE,
BGE_RXLP_LOCSTAT_RXTHRESH_HIT },
};
int
bge_kstat_read(struct kstat *ks)
{
struct bge_softc *sc = ks->ks_softc;
struct kstat_kv *kvs = ks->ks_data;
int i;
bge_stats_update_regs(sc);
for (i = 0; i < nitems(bge_kstat_tpl); i++) {
if (bge_kstat_tpl[i].reg != 0)
kstat_kv_u32(kvs) += CSR_READ_4(sc,
bge_kstat_tpl[i].reg);
kvs++;
}
getnanouptime(&ks->ks_updated);
return 0;
}
void
bge_kstat_attach(struct bge_softc *sc)
{
struct kstat *ks;
struct kstat_kv *kvs;
int i;
ks = kstat_create(sc->bge_dev.dv_xname, 0, "bge-stats", 0,
KSTAT_T_KV, 0);
if (ks == NULL)
return;
kvs = mallocarray(nitems(bge_kstat_tpl), sizeof(*kvs), M_DEVBUF,
M_ZERO | M_WAITOK);
for (i = 0; i < nitems(bge_kstat_tpl); i++) {
const struct bge_stat *tpl = &bge_kstat_tpl[i];
kstat_kv_unit_init(&kvs[i], tpl->name, KSTAT_KV_T_UINT32,
tpl->unit);
}
kstat_set_mutex(ks, &sc->bge_kstat_mtx);
ks->ks_softc = sc;
ks->ks_data = kvs;
ks->ks_datalen = nitems(bge_kstat_tpl) * sizeof(*kvs);
ks->ks_read = bge_kstat_read;
sc->bge_kstat = ks;
kstat_install(ks);
}
#endif /* NKSTAT > 0 */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_bgereg.h,v 1.135 2022/01/09 05:42:46 jsg Exp $ */
/* $OpenBSD: if_bgereg.h,v 1.136 2023/07/04 10:22:39 jmatthew Exp $ */
/*
* Copyright (c) 2001 Wind River Systems
@ -2942,4 +2942,7 @@ struct bge_softc {
u_int32_t bge_rx_overruns;
u_int32_t bge_tx_collisions;
bus_dmamap_t bge_txdma[BGE_TX_RING_CNT];
struct mutex bge_kstat_mtx;
struct kstat *bge_kstat;
};

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_iwm.c,v 1.407 2023/04/14 12:45:10 stsp Exp $ */
/* $OpenBSD: if_iwm.c,v 1.408 2023/07/05 15:07:28 stsp Exp $ */
/*
* Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
@ -8574,7 +8574,7 @@ iwm_bgscan_done(struct ieee80211com *ic,
free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
sc->bgscan_unref_arg = arg;
sc->bgscan_unref_arg_size = arg_size;
iwm_add_task(sc, sc->sc_nswq, &sc->bgscan_done_task);
iwm_add_task(sc, systq, &sc->bgscan_done_task);
}
void

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_iwx.c,v 1.173 2023/06/27 15:31:27 stsp Exp $ */
/* $OpenBSD: if_iwx.c,v 1.175 2023/07/05 15:07:28 stsp Exp $ */
/*
* Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
@ -2925,7 +2925,7 @@ iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
cmd_v0.cb_size = htole32(0);
cmd_v0.byte_cnt_addr = htole64(0);
cmd_v0.tfdq_addr = htole64(0);
hcmd.id = IWX_SCD_QUEUE_CFG,
hcmd.id = IWX_SCD_QUEUE_CFG;
hcmd.data[0] = &cmd_v0;
hcmd.len[0] = sizeof(cmd_v0);
} else if (cmd_ver == 3) {
@ -7607,7 +7607,7 @@ iwx_bgscan_done(struct ieee80211com *ic,
free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
sc->bgscan_unref_arg = arg;
sc->bgscan_unref_arg_size = arg_size;
iwx_add_task(sc, sc->sc_nswq, &sc->bgscan_done_task);
iwx_add_task(sc, systq, &sc->bgscan_done_task);
}
void
@ -8048,7 +8048,7 @@ iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
idle_cnt = chains_static;
active_cnt = chains_dynamic;
cmd.phy_id = htole32(phyctxt->id),
cmd.phy_id = htole32(phyctxt->id);
cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
IWX_PHY_RX_CHAIN_VALID_POS);
cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: mfii.c,v 1.88 2023/05/25 19:35:58 kurt Exp $ */
/* $OpenBSD: mfii.c,v 1.89 2023/07/06 10:17:43 visa Exp $ */
/*
* Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
@ -1764,8 +1764,9 @@ mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
int
mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
{
struct mutex m = MUTEX_INITIALIZER_FLAGS(IPL_BIO, __MTX_NAME,
MTX_NOWITNESS);
struct mutex m;
mtx_init(&m, IPL_BIO);
#ifdef DIAGNOSTIC
if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: mpii.c,v 1.145 2023/05/25 19:35:58 kurt Exp $ */
/* $OpenBSD: mpii.c,v 1.146 2023/07/06 10:17:43 visa Exp $ */
/*
* Copyright (c) 2010, 2012 Mike Belopuhov
* Copyright (c) 2009 James Giannoules
@ -2857,11 +2857,12 @@ mpii_init_queues(struct mpii_softc *sc)
void
mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb)
{
struct mutex mtx = MUTEX_INITIALIZER_FLAGS(IPL_BIO,
__MTX_NAME, MTX_NOWITNESS);
struct mutex mtx;
void (*done)(struct mpii_ccb *);
void *cookie;
mtx_init(&mtx, IPL_BIO);
done = ccb->ccb_done;
cookie = ccb->ccb_cookie;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: virtio_pci.c,v 1.33 2023/05/29 08:13:35 sf Exp $ */
/* $OpenBSD: virtio_pci.c,v 1.34 2023/07/05 18:11:08 patrick Exp $ */
/* $NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $ */
/*
@ -976,7 +976,7 @@ virtio_pci_setup_msix(struct virtio_pci_softc *sc, struct pci_attach_args *pa,
for (i = 0; i < vsc->sc_nvqs; i++)
virtio_pci_set_msix_queue_vector(sc, i, 1);
} else {
for (i = 0; i <= vsc->sc_nvqs; i++) {
for (i = 0; i < vsc->sc_nvqs; i++) {
if (virtio_pci_msix_establish(sc, pa, i + 1,
virtio_pci_queue_intr, &vsc->sc_vqs[i])) {
goto fail;

View file

@ -407,9 +407,8 @@ hv_hypercall(struct hv_softc *sc, uint64_t control, void *input,
}
#ifdef __amd64__
__asm__ volatile ("mov %0, %%r8" : : "r" (output_pa) : "r8");
__asm__ volatile ("call *%3" : "=a" (status) : "c" (control),
"d" (input_pa), "m" (sc->sc_hc));
extern uint64_t hv_hypercall_trampoline(uint64_t, paddr_t, paddr_t);
status = hv_hypercall_trampoline(control, input_pa, output_pa);
#else /* __i386__ */
{
uint32_t control_hi = control >> 32;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_vio.c,v 1.23 2023/05/29 08:13:35 sf Exp $ */
/* $OpenBSD: if_vio.c,v 1.24 2023/07/03 07:40:52 kn Exp $ */
/*
* Copyright (c) 2012 Stefan Fritsch, Alexander Fiveg.
@ -265,8 +265,8 @@ int vio_init(struct ifnet *);
void vio_stop(struct ifnet *, int);
void vio_start(struct ifnet *);
int vio_ioctl(struct ifnet *, u_long, caddr_t);
void vio_get_lladr(struct arpcom *ac, struct virtio_softc *vsc);
void vio_put_lladr(struct arpcom *ac, struct virtio_softc *vsc);
void vio_get_lladdr(struct arpcom *ac, struct virtio_softc *vsc);
void vio_put_lladdr(struct arpcom *ac, struct virtio_softc *vsc);
/* rx */
int vio_add_rx_mbuf(struct vio_softc *, int);
@ -491,7 +491,7 @@ err_hdr:
}
void
vio_get_lladr(struct arpcom *ac, struct virtio_softc *vsc)
vio_get_lladdr(struct arpcom *ac, struct virtio_softc *vsc)
{
int i;
for (i = 0; i < ETHER_ADDR_LEN; i++) {
@ -501,7 +501,7 @@ vio_get_lladr(struct arpcom *ac, struct virtio_softc *vsc)
}
void
vio_put_lladr(struct arpcom *ac, struct virtio_softc *vsc)
vio_put_lladdr(struct arpcom *ac, struct virtio_softc *vsc)
{
int i;
for (i = 0; i < ETHER_ADDR_LEN; i++) {
@ -537,10 +537,10 @@ vio_attach(struct device *parent, struct device *self, void *aux)
virtio_negotiate_features(vsc, virtio_net_feature_names);
if (virtio_has_feature(vsc, VIRTIO_NET_F_MAC)) {
vio_get_lladr(&sc->sc_ac, vsc);
vio_get_lladdr(&sc->sc_ac, vsc);
} else {
ether_fakeaddr(ifp);
vio_put_lladr(&sc->sc_ac, vsc);
vio_put_lladdr(&sc->sc_ac, vsc);
}
printf(": address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ubcmtp.c,v 1.24 2022/10/26 16:07:28 kn Exp $ */
/* $OpenBSD: ubcmtp.c,v 1.25 2023/07/02 21:44:04 bru Exp $ */
/*
* Copyright (c) 2013-2014, joshua stein <jcs@openbsd.org>
@ -309,6 +309,10 @@ static const struct ubcmtp_dev ubcmtp_devices[] = {
},
};
static struct wsmouse_param ubcmtp_wsmousecfg[] = {
{ WSMOUSECFG_MTBTN_MAXDIST, 0 }, /* 0: Compute a default value. */
};
struct ubcmtp_softc {
struct device sc_dev; /* base device */
@ -529,7 +533,8 @@ ubcmtp_configure(struct ubcmtp_softc *sc)
hw->mt_slots = UBCMTP_MAX_FINGERS;
hw->flags = WSMOUSEHW_MT_TRACKING;
return wsmouse_configure(sc->sc_wsmousedev, NULL, 0);
return wsmouse_configure(sc->sc_wsmousedev,
ubcmtp_wsmousecfg, nitems(ubcmtp_wsmousecfg));
}
int

View file

@ -1,4 +1,4 @@
/* $OpenBSD: wsconsio.h,v 1.99 2023/04/20 19:28:31 jcs Exp $ */
/* $OpenBSD: wsconsio.h,v 1.100 2023/07/02 21:44:04 bru Exp $ */
/* $NetBSD: wsconsio.h,v 1.74 2005/04/28 07:15:44 martin Exp $ */
/*
@ -279,6 +279,9 @@ struct wsmouse_calibcoords {
* WSMOUSEIO_SETPARAMS calls. Arbitrary subsets can be passed, provided
* that all keys are valid and that the number of key/value pairs doesn't
* exceed WSMOUSECFG_MAX.
*
* The keys are divided into various groups, which end with marker entries
* of the form WSMOUSECFG__*.
*/
enum wsmousecfg {
/*
@ -295,6 +298,8 @@ enum wsmousecfg {
WSMOUSECFG_REVERSE_SCROLLING,
/* reverse scroll directions */
WSMOUSECFG__FILTERS,
/*
* Coordinate handling, applying only in WSMOUSE_COMPAT mode.
*/
@ -307,6 +312,8 @@ enum wsmousecfg {
ture is not supported anymore. */
WSMOUSECFG_SMOOTHING, /* smoothing factor (0-7) */
WSMOUSECFG__TPFILTERS,
/*
* Touchpad features
*/
@ -319,6 +326,9 @@ enum wsmousecfg {
WSMOUSECFG_SWAPSIDES, /* invert soft-button/scroll areas */
WSMOUSECFG_DISABLE, /* disable all output except for
clicks in the top-button area */
WSMOUSECFG_MTBUTTONS, /* multi-touch buttons */
WSMOUSECFG__TPFEATURES,
/*
* Touchpad options
@ -340,14 +350,25 @@ enum wsmousecfg {
WSMOUSECFG_TAP_ONE_BTNMAP, /* one-finger tap button mapping */
WSMOUSECFG_TAP_TWO_BTNMAP, /* two-finger tap button mapping */
WSMOUSECFG_TAP_THREE_BTNMAP, /* three-finger tap button mapping */
WSMOUSECFG_MTBTN_MAXDIST, /* MTBUTTONS: distance limit for
two-finger clicks */
WSMOUSECFG__TPSETUP,
/*
* Enable/Disable debug output.
*/
WSMOUSECFG_LOG_INPUT = 256,
WSMOUSECFG_LOG_EVENTS,
WSMOUSECFG__DEBUG,
};
#define WSMOUSECFG_MAX 41 /* max size of param array per ioctl */
#define WSMOUSECFG_MAX ((WSMOUSECFG__FILTERS - WSMOUSECFG_DX_SCALE) \
+ (WSMOUSECFG__TPFILTERS - WSMOUSECFG_DX_MAX) \
+ (WSMOUSECFG__TPFEATURES - WSMOUSECFG_SOFTBUTTONS) \
+ (WSMOUSECFG__TPSETUP - WSMOUSECFG_LEFT_EDGE) \
+ (WSMOUSECFG__DEBUG - WSMOUSECFG_LOG_INPUT))
struct wsmouse_param {
enum wsmousecfg key;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: wsevent.c,v 1.26 2022/07/02 08:50:42 visa Exp $ */
/* $OpenBSD: wsevent.c,v 1.27 2023/07/06 10:16:58 visa Exp $ */
/* $NetBSD: wsevent.c,v 1.16 2003/08/07 16:31:29 agc Exp $ */
/*
@ -134,6 +134,8 @@ wsevent_fini(struct wseventvar *ev)
free(ev->q, M_DEVBUF, WSEVENT_QSIZE * sizeof(struct wscons_event));
ev->q = NULL;
klist_invalidate(&ev->sel.si_note);
sigio_free(&ev->sigio);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: wstpad.c,v 1.31 2022/06/09 22:17:18 bru Exp $ */
/* $OpenBSD: wstpad.c,v 1.32 2023/07/02 21:44:04 bru Exp $ */
/*
* Copyright (c) 2015, 2016 Ulf Brosziewski
@ -149,6 +149,7 @@ struct tpad_touch {
#define WSTPAD_HORIZSCROLL (1 << 5)
#define WSTPAD_SWAPSIDES (1 << 6)
#define WSTPAD_DISABLE (1 << 7)
#define WSTPAD_MTBUTTONS (1 << 8)
#define WSTPAD_MT (1 << 31)
@ -201,6 +202,8 @@ struct wstpad {
/* two-finger contacts */
int f2pressure;
int f2width;
/* MTBUTTONS: distance limit for two-finger clicks */
int mtbtn_maxdist;
} params;
/* handler state and configuration: */
@ -634,6 +637,37 @@ wstpad_get_sbtn(struct wsmouseinput *input, int top)
return (btn != PRIMARYBTN ? btn : 0);
}
int
wstpad_mtbtn_contacts(struct wsmouseinput *input)
{
struct wstpad *tp = input->tp;
struct tpad_touch *t;
int dx, dy, dist, limit;
if (tp->ignore != 0)
return (tp->contacts - 1);
if (tp->contacts == 2 && (t = get_2nd_touch(input)) != NULL) {
dx = abs(t->x - tp->t->x) << 12;
dy = abs(t->y - tp->t->y) * tp->ratio;
dist = (dx >= dy ? dx + 3 * dy / 8 : dy + 3 * dx / 8);
limit = tp->params.mtbtn_maxdist << 12;
if (input->mt.ptr_mask != 0)
limit = limit * 2 / 3;
if (dist > limit)
return (1);
}
return (tp->contacts);
}
u_int
wstpad_get_mtbtn(struct wsmouseinput *input)
{
int contacts = wstpad_mtbtn_contacts(input);
return (contacts == 2 ? RIGHTBTN : (contacts == 3 ? MIDDLEBTN : 0));
}
void
wstpad_softbuttons(struct wsmouseinput *input, u_int *cmds, int hdlr)
{
@ -646,7 +680,8 @@ wstpad_softbuttons(struct wsmouseinput *input, u_int *cmds, int hdlr)
}
if (tp->softbutton == 0 && PRIMARYBTN_CLICKED(tp)) {
tp->softbutton = wstpad_get_sbtn(input, top);
tp->softbutton = ((tp->features & WSTPAD_MTBUTTONS)
? wstpad_get_mtbtn(input) : wstpad_get_sbtn(input, top));
if (tp->softbutton)
*cmds |= 1 << SOFTBUTTON_DOWN;
}
@ -1599,6 +1634,15 @@ wstpad_configure(struct wsmouseinput *input)
tp->scroll.hdist = 4 * h_unit;
tp->scroll.vdist = 4 * v_unit;
tp->tap.maxdist = 4 * h_unit;
if (IS_MT(tp) && h_res > 1 && v_res > 1 &&
input->hw.hw_type == WSMOUSEHW_CLICKPAD &&
(width + h_res / 2) / h_res > 100 &&
(height + v_res / 2) / v_res > 60) {
tp->params.mtbtn_maxdist = h_res * 35;
} else {
tp->params.mtbtn_maxdist = -1; /* not available */
}
}
/* A touch with a flag set in this mask does not move the pointer. */
@ -1619,13 +1663,24 @@ wstpad_configure(struct wsmouseinput *input)
tp->edge.center_left = tp->edge.center - offset;
tp->edge.center_right = tp->edge.center + offset;
/*
* Make the MTBUTTONS configuration consistent. A non-negative 'maxdist'
* value makes the feature visible in wsconsctl. 0-values are replaced
* by a default (one fourth of the length of the touchpad diagonal).
*/
if (tp->params.mtbtn_maxdist < 0) {
tp->features &= ~WSTPAD_MTBUTTONS;
} else if (tp->params.mtbtn_maxdist == 0) {
diag = isqrt(width * width + height * height);
tp->params.mtbtn_maxdist = diag / 4;
}
tp->handlers = 0;
if (tp->features & WSTPAD_SOFTBUTTONS)
if (tp->features & (WSTPAD_SOFTBUTTONS | WSTPAD_MTBUTTONS))
tp->handlers |= 1 << SOFTBUTTON_HDLR;
if (tp->features & WSTPAD_TOPBUTTONS)
tp->handlers |= 1 << TOPBUTTON_HDLR;
if (tp->features & WSTPAD_TWOFINGERSCROLL)
tp->handlers |= 1 << F2SCROLL_HDLR;
else if (tp->features & WSTPAD_EDGESCROLL)
@ -1691,7 +1746,7 @@ wstpad_set_param(struct wsmouseinput *input, int key, int val)
return (EINVAL);
switch (key) {
case WSMOUSECFG_SOFTBUTTONS ... WSMOUSECFG_DISABLE:
case WSMOUSECFG_SOFTBUTTONS ... WSMOUSECFG_MTBUTTONS:
switch (key) {
case WSMOUSECFG_SOFTBUTTONS:
flag = WSTPAD_SOFTBUTTONS;
@ -1717,6 +1772,9 @@ wstpad_set_param(struct wsmouseinput *input, int key, int val)
case WSMOUSECFG_DISABLE:
flag = WSTPAD_DISABLE;
break;
case WSMOUSECFG_MTBUTTONS:
flag = WSTPAD_MTBUTTONS;
break;
}
if (val)
tp->features |= flag;
@ -1768,6 +1826,10 @@ wstpad_set_param(struct wsmouseinput *input, int key, int val)
case WSMOUSECFG_TAP_THREE_BTNMAP:
tp->tap.btnmap[2] = BTNMASK(val);
break;
case WSMOUSECFG_MTBTN_MAXDIST:
if (IS_MT(tp))
tp->params.mtbtn_maxdist = val;
break;
default:
return (ENOTSUP);
}
@ -1785,7 +1847,7 @@ wstpad_get_param(struct wsmouseinput *input, int key, int *pval)
return (EINVAL);
switch (key) {
case WSMOUSECFG_SOFTBUTTONS ... WSMOUSECFG_DISABLE:
case WSMOUSECFG_SOFTBUTTONS ... WSMOUSECFG_MTBUTTONS:
switch (key) {
case WSMOUSECFG_SOFTBUTTONS:
flag = WSTPAD_SOFTBUTTONS;
@ -1811,6 +1873,9 @@ wstpad_get_param(struct wsmouseinput *input, int key, int *pval)
case WSMOUSECFG_DISABLE:
flag = WSTPAD_DISABLE;
break;
case WSMOUSECFG_MTBUTTONS:
flag = WSTPAD_MTBUTTONS;
break;
}
*pval = !!(tp->features & flag);
break;
@ -1859,6 +1924,9 @@ wstpad_get_param(struct wsmouseinput *input, int key, int *pval)
case WSMOUSECFG_TAP_THREE_BTNMAP:
*pval = ffs(tp->tap.btnmap[2]);
break;
case WSMOUSECFG_MTBTN_MAXDIST:
*pval = tp->params.mtbtn_maxdist;
break;
default:
return (ENOTSUP);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clockintr.c,v 1.25 2023/06/22 16:23:50 cheloha Exp $ */
/* $OpenBSD: kern_clockintr.c,v 1.27 2023/07/02 19:02:27 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@ -29,8 +29,6 @@
#include <sys/sysctl.h>
#include <sys/time.h>
#ifdef __HAVE_CLOCKINTR
/*
* Protection for global variables in this file:
*
@ -107,7 +105,7 @@ clockintr_init(u_int flags)
void
clockintr_cpu_init(const struct intrclock *ic)
{
uint64_t multiplier = 0, offset;
uint64_t multiplier = 0;
struct cpu_info *ci = curcpu();
struct clockintr_queue *cq = &ci->ci_queue;
int reset_cq_intrclock = 0;
@ -170,21 +168,28 @@ clockintr_cpu_init(const struct intrclock *ic)
clockintr_advance(cq->cq_hardclock, hardclock_period);
} else {
if (cq->cq_hardclock->cl_expiration == 0) {
offset = hardclock_period / ncpus * multiplier;
cq->cq_hardclock->cl_expiration = offset;
clockintr_stagger(cq->cq_hardclock, hardclock_period,
multiplier, MAXCPUS);
}
clockintr_advance(cq->cq_hardclock, hardclock_period);
}
/*
* We can always advance the statclock and schedclock.
* There is no reason to stagger a randomized statclock.
*/
offset = statclock_avg / ncpus * multiplier;
clockintr_schedule(cq->cq_statclock, offset);
if (!ISSET(clockintr_flags, CL_RNDSTAT)) {
if (cq->cq_statclock->cl_expiration == 0) {
clockintr_stagger(cq->cq_statclock, statclock_avg,
multiplier, MAXCPUS);
}
}
clockintr_advance(cq->cq_statclock, statclock_avg);
if (schedhz != 0) {
offset = schedclock_period / ncpus * multiplier;
clockintr_schedule(cq->cq_schedclock, offset);
if (cq->cq_schedclock->cl_expiration == 0) {
clockintr_stagger(cq->cq_schedclock, schedclock_period,
multiplier, MAXCPUS);
}
clockintr_advance(cq->cq_schedclock, schedclock_period);
}
@ -766,4 +771,3 @@ db_show_clockintr(const struct clockintr *cl, const char *state, u_int cpu)
}
#endif /* DDB */
#endif /*__HAVE_CLOCKINTR */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_exec.c,v 1.248 2023/05/30 08:30:01 jsg Exp $ */
/* $OpenBSD: kern_exec.c,v 1.249 2023/07/06 07:49:52 deraadt Exp $ */
/* $NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $ */
/*-
@ -531,18 +531,6 @@ sys_execve(struct proc *p, void *v, register_t *retval)
if (otvp)
vrele(otvp);
/*
* XXX As a transition mechanism, we don't enforce branch
* target control flow integrity on partitions mounted with
* the wxallowed flag.
*/
if (pr->ps_textvp->v_mount &&
(pr->ps_textvp->v_mount->mnt_flag & MNT_WXALLOWED))
pack.ep_flags |= EXEC_NOBTCFI;
/* XXX XXX But enable it for chrome. */
if (strcmp(p->p_p->ps_comm, "chrome") == 0)
pack.ep_flags &= ~EXEC_NOBTCFI;
atomic_setbits_int(&pr->ps_flags, PS_EXEC);
if (pr->ps_flags & PS_PPWAIT) {
atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_fork.c,v 1.247 2023/04/25 18:14:06 claudio Exp $ */
/* $OpenBSD: kern_fork.c,v 1.248 2023/07/02 11:16:03 deraadt Exp $ */
/* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */
/*
@ -241,7 +241,8 @@ process_new(struct proc *p, struct process *parent, int flags)
unveil_copy(parent, pr);
pr->ps_flags = parent->ps_flags &
(PS_SUGID | PS_SUGIDEXEC | PS_PLEDGE | PS_EXECPLEDGE | PS_WXNEEDED);
(PS_SUGID | PS_SUGIDEXEC | PS_PLEDGE | PS_EXECPLEDGE |
PS_WXNEEDED | PS_CHROOT);
if (parent->ps_session->s_ttyvp != NULL)
pr->ps_flags |= parent->ps_flags & PS_CONTROLT;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sysctl.c,v 1.415 2023/05/21 12:47:54 claudio Exp $ */
/* $OpenBSD: kern_sysctl.c,v 1.416 2023/07/02 19:02:27 cheloha Exp $ */
/* $NetBSD: kern_sysctl.c,v 1.17 1996/05/20 17:49:05 mrg Exp $ */
/*-
@ -430,11 +430,9 @@ kern_sysctl_dirs(int top_name, int *name, u_int namelen,
case KERN_CPUSTATS:
return (sysctl_cpustats(name, namelen, oldp, oldlenp,
newp, newlen));
#ifdef __HAVE_CLOCKINTR
case KERN_CLOCKINTR:
return sysctl_clockintr(name, namelen, oldp, oldlenp, newp,
newlen);
#endif
default:
return (ENOTDIR); /* overloaded */
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: subr_suspend.c,v 1.14 2022/11/10 10:37:40 kettenis Exp $ */
/* $OpenBSD: subr_suspend.c,v 1.15 2023/07/02 19:02:27 cheloha Exp $ */
/*
* Copyright (c) 2005 Thorsten Lockert <tholo@sigmasoft.com>
* Copyright (c) 2005 Jordan Hargrave <jordan@openbsd.org>
@ -165,10 +165,9 @@ fail_suspend:
splx(s);
inittodr(gettime());
#ifdef __HAVE_CLOCKINTR
clockintr_cpu_init(NULL);
clockintr_trigger();
#endif
sleep_resume(v);
resume_randomness(rndbuf, rndbuflen);
#ifdef MULTIPROCESSOR

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uipc_socket.c,v 1.304 2023/06/30 11:52:11 mvs Exp $ */
/* $OpenBSD: uipc_socket.c,v 1.305 2023/07/04 22:28:24 mvs Exp $ */
/* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */
/*
@ -524,7 +524,7 @@ sodisconnect(struct socket *so)
int m_getuio(struct mbuf **, int, long, struct uio *);
#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
/*
* Send on a socket.
* If send must go all at once and message is larger than
@ -1219,9 +1219,8 @@ sorflush(struct socket *so)
const struct protosw *pr = so->so_proto;
int error;
sb->sb_flags |= SB_NOINTR;
error = sblock(so, sb, M_WAITOK);
/* with SB_NOINTR and M_WAITOK sblock() must not fail */
error = sblock(so, sb, SBL_WAIT | SBL_NOINTR);
/* with SBL_WAIT and SLB_NOINTR sblock() must not fail */
KASSERT(error == 0);
socantrcvmore(so);
m = sb->sb_mb;
@ -1290,7 +1289,7 @@ sosplice(struct socket *so, int fd, off_t max, struct timeval *tv)
/* If no fd is given, unsplice by removing existing link. */
if (fd < 0) {
/* Lock receive buffer. */
if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) {
if ((error = sblock(so, &so->so_rcv, SBL_WAIT)) != 0) {
return (error);
}
if (so->so_sp->ssp_socket)
@ -1323,10 +1322,10 @@ sosplice(struct socket *so, int fd, off_t max, struct timeval *tv)
}
/* Lock both receive and send buffer. */
if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) {
if ((error = sblock(so, &so->so_rcv, SBL_WAIT)) != 0) {
goto frele;
}
if ((error = sblock(so, &sosp->so_snd, M_WAITOK)) != 0) {
if ((error = sblock(so, &sosp->so_snd, SBL_WAIT)) != 0) {
sbunlock(so, &so->so_rcv);
goto frele;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uipc_socket2.c,v 1.136 2023/02/10 14:34:17 visa Exp $ */
/* $OpenBSD: uipc_socket2.c,v 1.137 2023/07/04 22:28:24 mvs Exp $ */
/* $NetBSD: uipc_socket2.c,v 1.11 1996/02/04 02:17:55 christos Exp $ */
/*
@ -494,9 +494,9 @@ sbwait(struct socket *so, struct sockbuf *sb)
}
int
sblock(struct socket *so, struct sockbuf *sb, int wait)
sblock(struct socket *so, struct sockbuf *sb, int flags)
{
int error, prio = (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH;
int error, prio = PSOCK;
soassertlocked(so);
@ -504,8 +504,10 @@ sblock(struct socket *so, struct sockbuf *sb, int wait)
sb->sb_flags |= SB_LOCK;
return (0);
}
if (wait & M_NOWAIT)
if ((flags & SBL_WAIT) == 0)
return (EWOULDBLOCK);
if (!(flags & SBL_NOINTR || sb->sb_flags & SB_NOINTR))
prio |= PCATCH;
while (sb->sb_flags & SB_LOCK) {
sb->sb_flags |= SB_WANT;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vfs_syscalls.c,v 1.361 2023/02/11 23:22:17 deraadt Exp $ */
/* $OpenBSD: vfs_syscalls.c,v 1.362 2023/07/05 15:13:28 beck Exp $ */
/* $NetBSD: vfs_syscalls.c,v 1.71 1996/04/23 10:29:02 mycroft Exp $ */
/*
@ -239,11 +239,10 @@ update:
else if (mp->mnt_flag & MNT_RDONLY)
mp->mnt_flag |= MNT_WANTRDWR;
mp->mnt_flag &=~ (MNT_NOSUID | MNT_NOEXEC | MNT_WXALLOWED | MNT_NODEV |
MNT_SYNCHRONOUS | MNT_ASYNC | MNT_SOFTDEP | MNT_NOATIME |
MNT_NOPERM | MNT_FORCE);
MNT_SYNCHRONOUS | MNT_ASYNC | MNT_NOATIME | MNT_NOPERM | MNT_FORCE);
mp->mnt_flag |= flags & (MNT_NOSUID | MNT_NOEXEC | MNT_WXALLOWED |
MNT_NODEV | MNT_SYNCHRONOUS | MNT_ASYNC | MNT_SOFTDEP |
MNT_NOATIME | MNT_NOPERM | MNT_FORCE);
MNT_NODEV | MNT_SYNCHRONOUS | MNT_ASYNC | MNT_NOATIME | MNT_NOPERM |
MNT_FORCE);
/*
* Mount the filesystem.
*/

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if.c,v 1.701 2023/06/27 21:02:13 mvs Exp $ */
/* $OpenBSD: if.c,v 1.704 2023/07/06 04:55:04 dlg Exp $ */
/* $NetBSD: if.c,v 1.35 1996/05/07 05:26:04 thorpej Exp $ */
/*
@ -106,6 +106,9 @@
#ifdef MROUTING
#include <netinet/ip_mroute.h>
#endif
#include <netinet/tcp.h>
#include <netinet/tcp_timer.h>
#include <netinet/tcp_var.h>
#ifdef INET6
#include <netinet6/in6_var.h>
@ -802,13 +805,30 @@ if_input_local(struct ifnet *ifp, struct mbuf *m, sa_family_t af)
* is now incorrect, will be calculated before sending.
*/
keepcksum = m->m_pkthdr.csum_flags & (M_IPV4_CSUM_OUT |
M_TCP_CSUM_OUT | M_UDP_CSUM_OUT | M_ICMP_CSUM_OUT);
M_TCP_CSUM_OUT | M_UDP_CSUM_OUT | M_ICMP_CSUM_OUT |
M_TCP_TSO);
m_resethdr(m);
m->m_flags |= M_LOOP | keepflags;
m->m_pkthdr.csum_flags = keepcksum;
m->m_pkthdr.ph_ifidx = ifp->if_index;
m->m_pkthdr.ph_rtableid = ifp->if_rdomain;
if (ISSET(keepcksum, M_TCP_TSO) && m->m_pkthdr.len > ifp->if_mtu) {
if (ifp->if_mtu > 0 &&
((af == AF_INET &&
ISSET(ifp->if_capabilities, IFCAP_TSOv4)) ||
(af == AF_INET6 &&
ISSET(ifp->if_capabilities, IFCAP_TSOv6)))) {
tcpstat_inc(tcps_inswlro);
tcpstat_add(tcps_inpktlro,
(m->m_pkthdr.len + ifp->if_mtu - 1) / ifp->if_mtu);
} else {
tcpstat_inc(tcps_inbadlro);
m_freem(m);
return (EPROTONOSUPPORT);
}
}
if (ISSET(keepcksum, M_TCP_CSUM_OUT))
m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
if (ISSET(keepcksum, M_UDP_CSUM_OUT))
@ -1014,14 +1034,6 @@ if_netisr(void *unused)
t |= n;
}
#if NPFSYNC > 0
if (t & (1 << NETISR_PFSYNC)) {
KERNEL_LOCK();
pfsyncintr();
KERNEL_UNLOCK();
}
#endif
NET_UNLOCK();
}
@ -1100,7 +1112,7 @@ if_detachhook_del(struct ifnet *ifp, struct task *t)
}
/*
* Detach an interface from everything in the kernel. Also deallocate
* Detach an interface from everything in the kernel. Also deallocate
* private resources.
*/
void
@ -3186,7 +3198,7 @@ ifsetlro(struct ifnet *ifp, int on)
KERNEL_ASSERT_LOCKED(); /* for if_flags */
if (on && !ISSET(ifp->if_xflags, IFXF_LRO)) {
if (ether_brport_isset(ifp)) {
if (ifp->if_type == IFT_ETHER && ether_brport_isset(ifp)) {
error = EBUSY;
goto out;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_ethersubr.c,v 1.288 2023/04/05 23:01:03 kn Exp $ */
/* $OpenBSD: if_ethersubr.c,v 1.289 2023/07/03 15:52:51 kn Exp $ */
/* $NetBSD: if_ethersubr.c,v 1.19 1996/05/07 02:40:30 thorpej Exp $ */
/*
@ -709,9 +709,8 @@ ether_ifdetach(struct ifnet *ifp)
/* Undo pseudo-driver changes. */
if_deactivate(ifp);
for (enm = LIST_FIRST(&ac->ac_multiaddrs);
enm != NULL;
enm = LIST_FIRST(&ac->ac_multiaddrs)) {
while (!LIST_EMPTY(&ac->ac_multiaddrs)) {
enm = LIST_FIRST(&ac->ac_multiaddrs);
LIST_REMOVE(enm, enm_list);
free(enm, M_IFMADDR, sizeof *enm);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_loop.c,v 1.94 2023/06/05 11:35:46 bluhm Exp $ */
/* $OpenBSD: if_loop.c,v 1.95 2023/07/02 19:59:15 bluhm Exp $ */
/* $NetBSD: if_loop.c,v 1.15 1996/05/07 02:40:33 thorpej Exp $ */
/*
@ -175,7 +175,8 @@ loop_clone_create(struct if_clone *ifc, int unit)
ifp->if_xflags = IFXF_CLONED;
ifp->if_capabilities = IFCAP_CSUM_IPv4 |
IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 |
IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6 |
IFCAP_LRO;
ifp->if_rtrequest = lortrequest;
ifp->if_ioctl = loioctl;
ifp->if_input = loinput;
@ -281,6 +282,10 @@ loioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
switch (cmd) {
case SIOCSIFFLAGS:
if (ISSET(ifp->if_xflags, IFXF_LRO))
SET(ifp->if_capabilities, IFCAP_TSOv4 | IFCAP_TSOv6);
else
CLR(ifp->if_capabilities, IFCAP_TSOv4 | IFCAP_TSOv6);
break;
case SIOCSIFADDR:

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_pfsync.h,v 1.59 2022/11/11 11:47:13 dlg Exp $ */
/* $OpenBSD: if_pfsync.h,v 1.60 2023/07/06 04:55:05 dlg Exp $ */
/*
* Copyright (c) 2001 Michael Shalayeff
@ -177,7 +177,7 @@ struct pfsync_upd_c {
struct pfsync_upd_req {
u_int64_t id;
u_int32_t creatorid;
} __packed;
} __packed __aligned(4);
/*
* DEL_C
@ -295,16 +295,6 @@ enum pfsync_counters {
pfsyncs_ncounters,
};
extern struct cpumem *pfsynccounters;
struct pfsync_deferral;
static inline void
pfsyncstat_inc(enum pfsync_counters c)
{
counters_inc(pfsynccounters, c);
}
/*
* this shows where a pf state is with respect to the syncing.
*/
@ -315,10 +305,11 @@ pfsyncstat_inc(enum pfsync_counters c)
#define PFSYNC_S_UPD 0x04
#define PFSYNC_S_COUNT 0x05
#define PFSYNC_S_DEFER 0xfe
#define PFSYNC_S_NONE 0xff
#define PFSYNC_S_NONE 0xd0
#define PFSYNC_S_SYNC 0xd1
#define PFSYNC_S_DEAD 0xde
int pfsync_input(struct mbuf **, int *, int, int);
int pfsync_input4(struct mbuf **, int *, int, int);
int pfsync_sysctl(int *, u_int, void *, size_t *,
void *, size_t);
@ -329,6 +320,9 @@ int pfsync_state_import(struct pfsync_state *, int);
void pfsync_state_export(struct pfsync_state *,
struct pf_state *);
void pfsync_init_state(struct pf_state *,
const struct pf_state_key *,
const struct pf_state_key *, int);
void pfsync_insert_state(struct pf_state *);
void pfsync_update_state(struct pf_state *);
void pfsync_delete_state(struct pf_state *);
@ -337,14 +331,10 @@ void pfsync_clear_states(u_int32_t, const char *);
void pfsync_update_tdb(struct tdb *, int);
void pfsync_delete_tdb(struct tdb *);
int pfsync_defer(struct pf_state *, struct mbuf *,
struct pfsync_deferral **);
void pfsync_undefer(struct pfsync_deferral *, int);
int pfsync_defer(struct pf_state *, struct mbuf *);
int pfsync_is_up(void);
int pfsync_state_in_use(struct pf_state *);
void pfsync_iack(struct pf_state *);
#endif /* _KERNEL */
#endif /* _NET_IF_PFSYNC_H_ */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: netisr.h,v 1.60 2022/07/14 10:52:21 mvs Exp $ */
/* $OpenBSD: netisr.h,v 1.61 2023/07/06 04:55:05 dlg Exp $ */
/* $NetBSD: netisr.h,v 1.12 1995/08/12 23:59:24 mycroft Exp $ */
/*
@ -42,7 +42,6 @@
* on the lowest level routine of each protocol.
*/
#define NETISR_IP 2 /* same as AF_INET */
#define NETISR_PFSYNC 5 /* for pfsync "immediate" tx */
#define NETISR_ARP 18 /* same as AF_LINK */
#define NETISR_IPV6 24 /* same as AF_INET6 */
#define NETISR_PIPEX 27 /* for pipex processing */
@ -64,7 +63,6 @@ void ipintr(void);
void ip6intr(void);
void pppintr(void);
void bridgeintr(void);
void pfsyncintr(void);
void pipexintr(void);
void pppoeintr(void);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pf.c,v 1.1181 2023/06/05 08:37:27 sashan Exp $ */
/* $OpenBSD: pf.c,v 1.1182 2023/07/06 04:55:05 dlg Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
@ -100,8 +100,6 @@
#if NPFSYNC > 0
#include <net/if_pfsync.h>
#else
struct pfsync_deferral;
#endif /* NPFSYNC > 0 */
/*
@ -121,10 +119,6 @@ u_char pf_tcp_secret[16];
int pf_tcp_secret_init;
int pf_tcp_iss_off;
int pf_npurge;
struct task pf_purge_task = TASK_INITIALIZER(pf_purge, &pf_npurge);
struct timeout pf_purge_to = TIMEOUT_INITIALIZER(pf_purge_timeout, NULL);
enum pf_test_status {
PF_TEST_FAIL = -1,
PF_TEST_OK,
@ -190,8 +184,7 @@ void pf_rule_to_actions(struct pf_rule *,
struct pf_rule_actions *);
int pf_test_rule(struct pf_pdesc *, struct pf_rule **,
struct pf_state **, struct pf_rule **,
struct pf_ruleset **, u_short *,
struct pfsync_deferral **);
struct pf_ruleset **, u_short *);
static __inline int pf_create_state(struct pf_pdesc *, struct pf_rule *,
struct pf_rule *, struct pf_rule *,
struct pf_state_key **, struct pf_state_key **,
@ -250,6 +243,10 @@ void pf_counters_inc(int, struct pf_pdesc *,
struct pf_state *, struct pf_rule *,
struct pf_rule *);
int pf_state_insert(struct pfi_kif *,
struct pf_state_key **, struct pf_state_key **,
struct pf_state *);
int pf_state_key_isvalid(struct pf_state_key *);
struct pf_state_key *pf_state_key_ref(struct pf_state_key *);
void pf_state_key_unref(struct pf_state_key *);
@ -1064,10 +1061,11 @@ pf_state_insert(struct pfi_kif *kif, struct pf_state_key **skwp,
pf_status.fcounters[FCNT_STATE_INSERT]++;
pf_status.states++;
pfi_kif_ref(kif, PFI_KIF_REF_STATE);
PF_STATE_EXIT_WRITE();
#if NPFSYNC > 0
pfsync_insert_state(st);
#endif /* NPFSYNC > 0 */
PF_STATE_EXIT_WRITE();
*skwp = skw;
*sksp = sks;
@ -1318,6 +1316,8 @@ pf_state_export(struct pfsync_state *sp, struct pf_state *st)
#endif /* NPFLOG > 0 */
sp->timeout = st->timeout;
sp->state_flags = htons(st->state_flags);
if (READ_ONCE(st->sync_defer) != NULL)
sp->state_flags |= htons(PFSTATE_ACK);
if (!SLIST_EMPTY(&st->src_nodes))
sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
@ -1519,9 +1519,6 @@ pf_state_import(const struct pfsync_state *sp, int flags)
st->rule.ptr = r;
st->anchor.ptr = NULL;
st->pfsync_time = getuptime();
st->sync_state = PFSYNC_S_NONE;
PF_REF_INIT(st->refcnt);
mtx_init(&st->mtx, IPL_NET);
@ -1529,15 +1526,12 @@ pf_state_import(const struct pfsync_state *sp, int flags)
r->states_cur++;
r->states_tot++;
st->sync_state = PFSYNC_S_NONE;
st->pfsync_time = getuptime();
#if NPFSYNC > 0
if (!ISSET(flags, PFSYNC_SI_IOCTL))
SET(st->state_flags, PFSTATE_NOSYNC);
pfsync_init_state(st, skw, sks, flags);
#endif
/*
* We just set PFSTATE_NOSYNC bit, which prevents
* pfsync_insert_state() to insert state to pfsync.
*/
if (pf_state_insert(kif, &skw, &sks, st) != 0) {
/* XXX when we have anchors, use STATE_DEC_COUNTERS */
r->states_cur--;
@ -1545,15 +1539,6 @@ pf_state_import(const struct pfsync_state *sp, int flags)
goto cleanup_state;
}
#if NPFSYNC > 0
if (!ISSET(flags, PFSYNC_SI_IOCTL)) {
CLR(st->state_flags, PFSTATE_NOSYNC);
if (ISSET(st->state_flags, PFSTATE_ACK))
pfsync_iack(st);
}
CLR(st->state_flags, PFSTATE_ACK);
#endif
return (0);
cleanup:
@ -1576,47 +1561,106 @@ pf_state_import(const struct pfsync_state *sp, int flags)
/* END state table stuff */
void
pf_purge_timeout(void *unused)
{
/* XXX move to systqmp to avoid KERNEL_LOCK */
task_add(systq, &pf_purge_task);
void pf_purge_states(void *);
struct task pf_purge_states_task =
TASK_INITIALIZER(pf_purge_states, NULL);
void pf_purge_states_tick(void *);
struct timeout pf_purge_states_to =
TIMEOUT_INITIALIZER(pf_purge_states_tick, NULL);
unsigned int pf_purge_expired_states(unsigned int, unsigned int);
/*
* how many states to scan this interval.
*
* this is set when the timeout fires, and reduced by the task. the
* task will reschedule itself until the limit is reduced to zero,
* and then it adds the timeout again.
*/
unsigned int pf_purge_states_limit;
/*
* limit how many states are processed with locks held per run of
* the state purge task.
*/
unsigned int pf_purge_states_collect = 64;
void
pf_purge_states_tick(void *null)
{
unsigned int limit = pf_status.states;
unsigned int interval = pf_default_rule.timeout[PFTM_INTERVAL];
if (limit == 0) {
timeout_add_sec(&pf_purge_states_to, 1);
return;
}
/*
* process a fraction of the state table every second
*/
if (interval > 1)
limit /= interval;
pf_purge_states_limit = limit;
task_add(systqmp, &pf_purge_states_task);
}
void
pf_purge(void *xnloops)
pf_purge_states(void *null)
{
int *nloops = xnloops;
unsigned int limit;
unsigned int scanned;
/*
* process a fraction of the state table every second
* Note:
* we no longer need PF_LOCK() here, because
* pf_purge_expired_states() uses pf_state_lock to maintain
* consistency.
*/
if (pf_default_rule.timeout[PFTM_INTERVAL] > 0)
pf_purge_expired_states(1 + (pf_status.states
/ pf_default_rule.timeout[PFTM_INTERVAL]));
limit = pf_purge_states_limit;
if (limit < pf_purge_states_collect)
limit = pf_purge_states_collect;
NET_LOCK();
scanned = pf_purge_expired_states(limit, pf_purge_states_collect);
if (scanned >= pf_purge_states_limit) {
/* we've run out of states to scan this "interval" */
timeout_add_sec(&pf_purge_states_to, 1);
return;
}
pf_purge_states_limit -= scanned;
task_add(systqmp, &pf_purge_states_task);
}
void pf_purge_tick(void *);
struct timeout pf_purge_to =
TIMEOUT_INITIALIZER(pf_purge_tick, NULL);
void pf_purge(void *);
struct task pf_purge_task =
TASK_INITIALIZER(pf_purge, NULL);
void
pf_purge_tick(void *null)
{
task_add(systqmp, &pf_purge_task);
}
void
pf_purge(void *null)
{
unsigned int interval = max(1, pf_default_rule.timeout[PFTM_INTERVAL]);
PF_LOCK();
/* purge other expired types every PFTM_INTERVAL seconds */
if (++(*nloops) >= pf_default_rule.timeout[PFTM_INTERVAL])
pf_purge_expired_src_nodes();
PF_UNLOCK();
pf_purge_expired_src_nodes();
PF_UNLOCK();
/*
* Fragments don't require PF_LOCK(), they use their own lock.
*/
if ((*nloops) >= pf_default_rule.timeout[PFTM_INTERVAL]) {
pf_purge_expired_fragments();
*nloops = 0;
}
NET_UNLOCK();
timeout_add_sec(&pf_purge_to, 1);
pf_purge_expired_fragments();
/* interpret the interval as idle time between runs */
timeout_add_sec(&pf_purge_to, interval);
}
int32_t
@ -1717,6 +1761,8 @@ pf_remove_state(struct pf_state *st)
if (st->timeout == PFTM_UNLINKED)
return;
st->timeout = PFTM_UNLINKED;
/* handle load balancing related tasks */
pf_postprocess_addr(st);
@ -1741,7 +1787,6 @@ pf_remove_state(struct pf_state *st)
#if NPFSYNC > 0
pfsync_delete_state(st);
#endif /* NPFSYNC > 0 */
st->timeout = PFTM_UNLINKED;
pf_src_tree_remove_state(st);
pf_detach_state(st);
}
@ -1795,6 +1840,7 @@ pf_free_state(struct pf_state *st)
if (pfsync_state_in_use(st))
return;
#endif /* NPFSYNC > 0 */
KASSERT(st->timeout == PFTM_UNLINKED);
if (--st->rule.ptr->states_cur == 0 &&
st->rule.ptr->src_nodes == 0)
@ -1819,8 +1865,8 @@ pf_free_state(struct pf_state *st)
pf_status.states--;
}
void
pf_purge_expired_states(u_int32_t maxcheck)
unsigned int
pf_purge_expired_states(const unsigned int limit, const unsigned int collect)
{
/*
* this task/thread/context/whatever is the only thing that
@ -1834,6 +1880,8 @@ pf_purge_expired_states(u_int32_t maxcheck)
struct pf_state *st;
SLIST_HEAD(pf_state_gcl, pf_state) gcl = SLIST_HEAD_INITIALIZER(gcl);
time_t now;
unsigned int scanned;
unsigned int collected = 0;
PF_ASSERT_UNLOCKED();
@ -1847,7 +1895,7 @@ pf_purge_expired_states(u_int32_t maxcheck)
if (head == NULL) {
/* the list is empty */
rw_exit_read(&pf_state_list.pfs_rwl);
return;
return (limit);
}
/* (re)start at the front of the list */
@ -1856,13 +1904,17 @@ pf_purge_expired_states(u_int32_t maxcheck)
now = getuptime();
do {
for (scanned = 0; scanned < limit; scanned++) {
uint8_t stimeout = cur->timeout;
unsigned int limited = 0;
if ((stimeout == PFTM_UNLINKED) ||
(pf_state_expires(cur, stimeout) <= now)) {
st = pf_state_ref(cur);
SLIST_INSERT_HEAD(&gcl, st, gc_list);
if (++collected >= collect)
limited = 1;
}
/* don't iterate past the end of our view of the list */
@ -1872,14 +1924,18 @@ pf_purge_expired_states(u_int32_t maxcheck)
}
cur = TAILQ_NEXT(cur, entry_list);
} while (maxcheck--);
/* don't spend too much time here. */
if (ISSET(READ_ONCE(curcpu()->ci_schedstate.spc_schedflags),
SPCF_SHOULDYIELD) || limited)
break;
}
rw_exit_read(&pf_state_list.pfs_rwl);
if (SLIST_EMPTY(&gcl))
return;
return (scanned);
NET_LOCK();
rw_enter_write(&pf_state_list.pfs_rwl);
PF_LOCK();
PF_STATE_ENTER_WRITE();
@ -1892,12 +1948,13 @@ pf_purge_expired_states(u_int32_t maxcheck)
PF_STATE_EXIT_WRITE();
PF_UNLOCK();
rw_exit_write(&pf_state_list.pfs_rwl);
NET_UNLOCK();
while ((st = SLIST_FIRST(&gcl)) != NULL) {
SLIST_REMOVE_HEAD(&gcl, gc_list);
pf_state_unref(st);
}
return (scanned);
}
int
@ -4262,8 +4319,7 @@ next_rule:
int
pf_test_rule(struct pf_pdesc *pd, struct pf_rule **rm, struct pf_state **sm,
struct pf_rule **am, struct pf_ruleset **rsm, u_short *reason,
struct pfsync_deferral **pdeferral)
struct pf_rule **am, struct pf_ruleset **rsm, u_short *reason)
{
struct pf_rule *r = NULL;
struct pf_rule *a = NULL;
@ -4475,7 +4531,7 @@ pf_test_rule(struct pf_pdesc *pd, struct pf_rule **rm, struct pf_state **sm,
* firewall has to know about it to allow
* replies through it.
*/
if (pfsync_defer(*sm, pd->m, pdeferral))
if (pfsync_defer(*sm, pd->m))
return (PF_DEFER);
}
#endif /* NPFSYNC > 0 */
@ -4517,6 +4573,8 @@ pf_create_state(struct pf_pdesc *pd, struct pf_rule *r, struct pf_rule *a,
st->state_flags |= PFSTATE_SLOPPY;
if (r->rule_flag & PFRULE_PFLOW)
st->state_flags |= PFSTATE_PFLOW;
if (r->rule_flag & PFRULE_NOSYNC)
st->state_flags |= PFSTATE_NOSYNC;
#if NPFLOG > 0
st->log = act->log & PF_LOG_ALL;
#endif /* NPFLOG > 0 */
@ -4535,6 +4593,7 @@ pf_create_state(struct pf_pdesc *pd, struct pf_rule *r, struct pf_rule *a,
st->set_prio[1] = act->set_prio[1];
st->delay = act->delay;
SLIST_INIT(&st->src_nodes);
/*
* must initialize refcnt, before pf_state_insert() gets called.
* pf_state_inserts() grabs reference for pfsync!
@ -7462,7 +7521,6 @@ pf_test(sa_family_t af, int fwdir, struct ifnet *ifp, struct mbuf **m0)
int dir = (fwdir == PF_FWD) ? PF_OUT : fwdir;
u_int32_t qid, pqid = 0;
int have_pf_lock = 0;
struct pfsync_deferral *deferral = NULL;
if (!pf_status.running)
return (PF_PASS);
@ -7565,8 +7623,7 @@ pf_test(sa_family_t af, int fwdir, struct ifnet *ifp, struct mbuf **m0)
*/
PF_LOCK();
have_pf_lock = 1;
action = pf_test_rule(&pd, &r, &st, &a, &ruleset, &reason,
&deferral);
action = pf_test_rule(&pd, &r, &st, &a, &ruleset, &reason);
st = pf_state_ref(st);
if (action != PF_PASS)
REASON_SET(&reason, PFRES_FRAG);
@ -7598,7 +7655,7 @@ pf_test(sa_family_t af, int fwdir, struct ifnet *ifp, struct mbuf **m0)
PF_LOCK();
have_pf_lock = 1;
action = pf_test_rule(&pd, &r, &st, &a, &ruleset,
&reason, &deferral);
&reason);
st = pf_state_ref(st);
}
break;
@ -7630,7 +7687,7 @@ pf_test(sa_family_t af, int fwdir, struct ifnet *ifp, struct mbuf **m0)
PF_LOCK();
have_pf_lock = 1;
action = pf_test_rule(&pd, &r, &st, &a, &ruleset,
&reason, &deferral);
&reason);
st = pf_state_ref(st);
}
break;
@ -7714,7 +7771,7 @@ pf_test(sa_family_t af, int fwdir, struct ifnet *ifp, struct mbuf **m0)
PF_LOCK();
have_pf_lock = 1;
action = pf_test_rule(&pd, &r, &st, &a, &ruleset,
&reason, &deferral);
&reason);
st = pf_state_ref(st);
}
@ -7854,14 +7911,6 @@ done:
m_freem(pd.m);
/* FALLTHROUGH */
case PF_DEFER:
#if NPFSYNC > 0
/*
* We no longer hold PF_LOCK() here, so we can dispatch
* deferral if we are asked to do so.
*/
if (deferral != NULL)
pfsync_undefer(deferral, 0);
#endif /* NPFSYNC > 0 */
pd.m = NULL;
action = PF_PASS;
break;
@ -8210,7 +8259,7 @@ pf_state_unref(struct pf_state *st)
#if NPFSYNC > 0
KASSERT((TAILQ_NEXT(st, sync_list) == NULL) ||
((TAILQ_NEXT(st, sync_list) == _Q_INVALID) &&
(st->sync_state == PFSYNC_S_NONE)));
(st->sync_state >= PFSYNC_S_NONE)));
#endif /* NPFSYNC */
KASSERT((TAILQ_NEXT(st, entry_list) == NULL) ||
(TAILQ_NEXT(st, entry_list) == _Q_INVALID));

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pf_ioctl.c,v 1.411 2023/06/30 09:58:30 mvs Exp $ */
/* $OpenBSD: pf_ioctl.c,v 1.415 2023/07/06 04:55:05 dlg Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
@ -177,9 +177,12 @@ int pf_rtlabel_add(struct pf_addr_wrap *);
void pf_rtlabel_remove(struct pf_addr_wrap *);
void pf_rtlabel_copyout(struct pf_addr_wrap *);
uint64_t trans_ticket = 1;
LIST_HEAD(, pf_trans) pf_ioctl_trans = LIST_HEAD_INITIALIZER(pf_trans);
/* counts transactions opened by a device */
unsigned int pf_tcount[CLONE_MAPSZ * NBBY];
#define pf_unit2idx(_unit_) ((_unit_) >> CLONE_SHIFT)
void
pfattach(int num)
{
@ -997,13 +1000,14 @@ pf_states_clr(struct pfioc_state_kill *psk)
}
PF_STATE_EXIT_WRITE();
#if NPFSYNC > 0
pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
#endif /* NPFSYNC > 0 */
PF_UNLOCK();
rw_exit(&pf_state_list.pfs_rwl);
psk->psk_killed = killed;
#if NPFSYNC > 0
pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
#endif /* NPFSYNC > 0 */
unlock:
NET_UNLOCK();
@ -1145,6 +1149,7 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
case DIOCGETSRCNODES:
case DIOCIGETIFACES:
case DIOCGETSYNFLWATS:
case DIOCXEND:
break;
case DIOCRCLRTABLES:
case DIOCRADDTABLES:
@ -1186,6 +1191,7 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
pf_status.stateid = gettime();
pf_status.stateid = pf_status.stateid << 32;
}
timeout_add_sec(&pf_purge_states_to, 1);
timeout_add_sec(&pf_purge_to, 1);
pf_create_queues();
DPFPRINTF(LOG_NOTICE, "pf: started");
@ -1492,6 +1498,10 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
NET_UNLOCK();
t = pf_open_trans(minor(dev));
if (t == NULL) {
error = EBUSY;
goto fail;
}
pf_init_tgetrule(t, ruleset->anchor, ruleset_version, rule);
pr->ticket = t->pft_ticket;
@ -2775,8 +2785,9 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
pf_default_rule.timeout[i] =
pf_default_rule_new.timeout[i];
if (pf_default_rule.timeout[i] == PFTM_INTERVAL &&
pf_default_rule.timeout[i] < old)
task_add(net_tq(0), &pf_purge_task);
pf_default_rule.timeout[i] < old &&
timeout_del(&pf_purge_to))
task_add(systqmp, &pf_purge_task);
}
pfi_xcommit();
pf_trans_set_commit();
@ -2787,6 +2798,18 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
break;
}
case DIOCXEND: {
u_int32_t *ticket = (u_int32_t *)addr;
struct pf_trans *t;
t = pf_find_trans(minor(dev), *ticket);
if (t != NULL)
pf_rollback_trans(t);
else
error = ENXIO;
break;
}
case DIOCGETSRCNODES: {
struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
struct pf_src_node *n, *p, *pstore;
@ -3264,9 +3287,14 @@ pf_open_trans(uint32_t unit)
rw_assert_wrlock(&pfioctl_rw);
KASSERT(pf_unit2idx(unit) < nitems(pf_tcount));
if (pf_tcount[pf_unit2idx(unit)] >= (PF_ANCHOR_STACK_MAX * 8))
return (NULL);
t = malloc(sizeof(*t), M_PF, M_WAITOK|M_ZERO);
t->pft_unit = unit;
t->pft_ticket = ticket++;
pf_tcount[pf_unit2idx(unit)]++;
LIST_INSERT_HEAD(&pf_ioctl_trans, t, pft_entry);
@ -3320,6 +3348,11 @@ pf_free_trans(struct pf_trans *t)
log(LOG_ERR, "%s unknown transaction type: %d\n",
__func__, t->pft_type);
}
KASSERT(pf_unit2idx(t->pft_unit) < nitems(pf_tcount));
KASSERT(pf_tcount[pf_unit2idx(t->pft_unit)] >= 1);
pf_tcount[pf_unit2idx(t->pft_unit)]--;
free(t, M_PF, sizeof(*t));
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pf_norm.c,v 1.227 2023/05/07 16:23:23 bluhm Exp $ */
/* $OpenBSD: pf_norm.c,v 1.228 2023/07/06 04:55:05 dlg Exp $ */
/*
* Copyright 2001 Niels Provos <provos@citi.umich.edu>
@ -1098,10 +1098,22 @@ no_fragment:
}
#endif /* INET6 */
struct pf_state_scrub *
pf_state_scrub_get(void)
{
return (pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO));
}
void
pf_state_scrub_put(struct pf_state_scrub *scrub)
{
pool_put(&pf_state_scrub_pl, scrub);
}
int
pf_normalize_tcp_alloc(struct pf_state_peer *src)
{
src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
src->scrub = pf_state_scrub_get();
if (src->scrub == NULL)
return (ENOMEM);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pfvar.h,v 1.531 2023/05/26 12:13:26 kn Exp $ */
/* $OpenBSD: pfvar.h,v 1.533 2023/07/06 04:55:05 dlg Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
@ -1573,6 +1573,7 @@ struct pfioc_synflwats {
#define DIOCSETSYNFLWATS _IOWR('D', 97, struct pfioc_synflwats)
#define DIOCSETSYNCOOKIES _IOWR('D', 98, u_int8_t)
#define DIOCGETSYNFLWATS _IOWR('D', 99, struct pfioc_synflwats)
#define DIOCXEND _IOWR('D', 100, u_int32_t)
#ifdef _KERNEL
@ -1603,15 +1604,10 @@ extern void pf_tbladdr_remove(struct pf_addr_wrap *);
extern void pf_tbladdr_copyout(struct pf_addr_wrap *);
extern void pf_calc_skip_steps(struct pf_rulequeue *);
extern void pf_purge_expired_src_nodes(void);
extern void pf_purge_expired_states(u_int32_t);
extern void pf_purge_expired_rules(void);
extern void pf_remove_state(struct pf_state *);
extern void pf_remove_divert_state(struct pf_state_key *);
extern void pf_free_state(struct pf_state *);
extern int pf_state_insert(struct pfi_kif *,
struct pf_state_key **,
struct pf_state_key **,
struct pf_state *);
int pf_insert_src_node(struct pf_src_node **,
struct pf_rule *, enum pf_sn_types,
sa_family_t, struct pf_addr *,
@ -1675,6 +1671,10 @@ int pf_match_port(u_int8_t, u_int16_t, u_int16_t, u_int16_t);
int pf_match_uid(u_int8_t, uid_t, uid_t, uid_t);
int pf_match_gid(u_int8_t, gid_t, gid_t, gid_t);
struct pf_state_scrub *
pf_state_scrub_get(void);
void pf_state_scrub_put(struct pf_state_scrub *);
int pf_refragment6(struct mbuf **, struct m_tag *mtag,
struct sockaddr_in6 *, struct ifnet *, struct rtentry *);
void pf_normalize_init(void);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pfvar_priv.h,v 1.33 2023/05/10 22:42:51 sashan Exp $ */
/* $OpenBSD: pfvar_priv.h,v 1.34 2023/07/06 04:55:05 dlg Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
@ -41,6 +41,12 @@
#include <sys/mutex.h>
#include <sys/percpu.h>
struct pfsync_deferral;
/*
* pf state items - links from pf_state_key to pf_states
*/
struct pf_state_item {
TAILQ_ENTRY(pf_state_item)
si_entry;
@ -49,6 +55,10 @@ struct pf_state_item {
TAILQ_HEAD(pf_statelisthead, pf_state_item);
/*
* pf state keys - look up states by address
*/
struct pf_state_key {
struct pf_addr addr[2];
u_int16_t port[2];
@ -73,11 +83,13 @@ RBT_PROTOTYPE(pf_state_tree, pf_state_key, sk_entry, pf_state_compare_key);
(key[PF_SK_WIRE]->af != (family)))
/*
* pf state
*
* Protection/ownership of pf_state members:
* I immutable after pf_state_insert()
* M pf_state mtx
* P PF_STATE_LOCK
* S pfsync mutex
* S pfsync
* L pf_state_list
* g pf_purge gc
*/
@ -89,7 +101,7 @@ struct pf_state {
u_int8_t pad[3];
TAILQ_ENTRY(pf_state) sync_list; /* [S] */
TAILQ_ENTRY(pf_state) sync_snap; /* [S] */
struct pfsync_deferral *sync_defer; /* [S] */
TAILQ_ENTRY(pf_state) entry_list; /* [L] */
SLIST_ENTRY(pf_state) gc_list; /* [g] */
RB_ENTRY(pf_state) entry_id; /* [P] */
@ -101,7 +113,7 @@ struct pf_state {
union pf_rule_ptr natrule; /* [I] */
struct pf_addr rt_addr; /* [I] */
struct pf_sn_head src_nodes; /* [I] */
struct pf_state_key *key[2]; /* [I] stack and wire */
struct pf_state_key *key[2]; /* [I] stack and wire */
struct pfi_kif *kif; /* [I] */
struct mutex mtx;
pf_refcnt_t refcnt;
@ -109,16 +121,16 @@ struct pf_state {
u_int64_t bytes[2];
int32_t creation; /* [I] */
int32_t expire;
int32_t pfsync_time;
int rtableid[2]; /* [I] rtables stack and wire */
int32_t pfsync_time; /* [S] */
int rtableid[2]; /* [I] stack and wire */
u_int16_t qid; /* [I] */
u_int16_t pqid; /* [I] */
u_int16_t tag; /* [I] */
u_int16_t state_flags;
u_int16_t state_flags; /* [M] */
u_int8_t log; /* [I] */
u_int8_t timeout;
u_int8_t sync_state; /* PFSYNC_S_x */
u_int8_t sync_updates;
u_int8_t sync_state; /* [S] PFSYNC_S_x */
u_int8_t sync_updates; /* [S] */
u_int8_t min_ttl; /* [I] */
u_int8_t set_tos; /* [I] */
u_int8_t set_prio[2]; /* [I] */
@ -127,7 +139,6 @@ struct pf_state {
u_int16_t if_index_out; /* [I] */
u_int16_t delay; /* [I] */
u_int8_t rt; /* [I] */
u_int8_t snapped; /* [S] */
};
RBT_HEAD(pf_state_tree_id, pf_state);
@ -345,6 +356,7 @@ struct pf_trans {
#define pftgr_anchor u.u_getrule.gr_anchor
#define pftgr_rule u.u_getrule.gr_rule
extern struct timeout pf_purge_states_to;
extern struct task pf_purge_task;
extern struct timeout pf_purge_to;
@ -397,9 +409,6 @@ extern struct rwlock pf_state_lock;
rw_status(&pf_state_lock), __func__);\
} while (0)
extern void pf_purge_timeout(void *);
extern void pf_purge(void *);
/* for copies to/from network byte order */
void pf_state_peer_hton(const struct pf_state_peer *,
struct pfsync_state_peer *);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: in_proto.c,v 1.101 2023/05/18 09:59:43 mvs Exp $ */
/* $OpenBSD: in_proto.c,v 1.102 2023/07/06 04:55:05 dlg Exp $ */
/* $NetBSD: in_proto.c,v 1.14 1996/02/18 18:58:32 christos Exp $ */
/*
@ -343,7 +343,7 @@ const struct protosw inetsw[] = {
.pr_domain = &inetdomain,
.pr_protocol = IPPROTO_PFSYNC,
.pr_flags = PR_ATOMIC|PR_ADDR,
.pr_input = pfsync_input,
.pr_input = pfsync_input4,
.pr_ctloutput = rip_ctloutput,
.pr_usrreqs = &rip_usrreqs,
.pr_sysctl = pfsync_sysctl

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ip_ipsp.h,v 1.240 2022/07/14 13:52:10 mvs Exp $ */
/* $OpenBSD: ip_ipsp.h,v 1.241 2023/07/06 04:55:05 dlg Exp $ */
/*
* The authors of this code are John Ioannidis (ji@tla.org),
* Angelos D. Keromytis (kermit@csd.uch.gr),
@ -50,6 +50,7 @@
* P ipo_tdb_mtx link policy to TDB global mutex
* D tdb_sadb_mtx SA database global mutex
* m tdb_mtx fields of struct tdb
* S pfsync fields of struct tdb
*/
/* IPSP global definitions. */
@ -405,7 +406,6 @@ struct tdb { /* tunnel descriptor block */
u_int8_t tdb_sproto; /* [I] IPsec protocol */
u_int8_t tdb_wnd; /* Replay window */
u_int8_t tdb_satype; /* SA type (RFC2367, PF_KEY) */
u_int8_t tdb_updates; /* pfsync update counter */
union sockaddr_union tdb_dst; /* [N] Destination address */
union sockaddr_union tdb_src; /* [N] Source address */
@ -439,8 +439,8 @@ struct tdb { /* tunnel descriptor block */
struct sockaddr_encap tdb_filtermask; /* And the mask */
TAILQ_HEAD(tdb_policy_head, ipsec_policy) tdb_policy_head; /* [P] */
TAILQ_ENTRY(tdb) tdb_sync_entry;
TAILQ_ENTRY(tdb) tdb_sync_snap;
TAILQ_ENTRY(tdb) tdb_sync_entry; /* [S] pfsync tdb queue */
u_int32_t tdb_updates; /* [S] pfsync update counter */
};
enum tdb_counters {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ip_output.c,v 1.388 2023/05/22 16:08:34 bluhm Exp $ */
/* $OpenBSD: ip_output.c,v 1.389 2023/07/04 10:48:19 bluhm Exp $ */
/* $NetBSD: ip_output.c,v 1.28 1996/02/13 23:43:07 christos Exp $ */
/*
@ -1801,7 +1801,7 @@ in_hdr_cksum_out(struct mbuf *m, struct ifnet *ifp)
struct ip *ip = mtod(m, struct ip *);
ip->ip_sum = 0;
if (ifp && in_ifcap_cksum(m, ifp, IFCAP_CSUM_IPv4)) {
if (in_ifcap_cksum(m, ifp, IFCAP_CSUM_IPv4)) {
SET(m->m_pkthdr.csum_flags, M_IPV4_CSUM_OUT);
} else {
ipstat_inc(ips_outswcsum);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: tcp_input.c,v 1.388 2023/05/30 19:32:57 bluhm Exp $ */
/* $OpenBSD: tcp_input.c,v 1.389 2023/07/06 09:15:23 bluhm Exp $ */
/* $NetBSD: tcp_input.c,v 1.23 1996/02/13 23:43:44 christos Exp $ */
/*
@ -130,8 +130,8 @@ struct timeval tcp_ackdrop_ppslim_last;
#define TCP_PAWS_IDLE TCP_TIME(24 * 24 * 60 * 60)
/* for modulo comparisons of timestamps */
#define TSTMP_LT(a,b) ((int)((a)-(b)) < 0)
#define TSTMP_GEQ(a,b) ((int)((a)-(b)) >= 0)
#define TSTMP_LT(a,b) ((int32_t)((a)-(b)) < 0)
#define TSTMP_GEQ(a,b) ((int32_t)((a)-(b)) >= 0)
/* for TCP SACK comparisons */
#define SEQ_MIN(a,b) (SEQ_LT(a,b) ? (a) : (b))
@ -190,7 +190,7 @@ void tcp_newreno_partialack(struct tcpcb *, struct tcphdr *);
void syn_cache_put(struct syn_cache *);
void syn_cache_rm(struct syn_cache *);
int syn_cache_respond(struct syn_cache *, struct mbuf *, uint32_t);
int syn_cache_respond(struct syn_cache *, struct mbuf *, uint64_t);
void syn_cache_timer(void *);
void syn_cache_reaper(void *);
void syn_cache_insert(struct syn_cache *, struct tcpcb *);
@ -198,10 +198,10 @@ void syn_cache_reset(struct sockaddr *, struct sockaddr *,
struct tcphdr *, u_int);
int syn_cache_add(struct sockaddr *, struct sockaddr *, struct tcphdr *,
unsigned int, struct socket *, struct mbuf *, u_char *, int,
struct tcp_opt_info *, tcp_seq *, uint32_t);
struct tcp_opt_info *, tcp_seq *, uint64_t);
struct socket *syn_cache_get(struct sockaddr *, struct sockaddr *,
struct tcphdr *, unsigned int, unsigned int, struct socket *,
struct mbuf *, uint32_t);
struct mbuf *, uint64_t);
struct syn_cache *syn_cache_lookup(struct sockaddr *, struct sockaddr *,
struct syn_cache_head **, u_int);
@ -375,7 +375,7 @@ tcp_input(struct mbuf **mp, int *offp, int proto, int af)
short ostate;
caddr_t saveti;
tcp_seq iss, *reuse = NULL;
uint32_t now;
uint64_t now;
u_long tiwin;
struct tcp_opt_info opti;
struct tcphdr *th;
@ -885,7 +885,7 @@ findpcb:
goto drop;
if (opti.ts_present && opti.ts_ecr) {
int rtt_test;
int32_t rtt_test;
/* subtract out the tcp timestamp modulator */
opti.ts_ecr -= tp->ts_modulate;
@ -1272,7 +1272,7 @@ trimthenstep6:
TSTMP_LT(opti.ts_val, tp->ts_recent)) {
/* Check to see if ts_recent is over 24 days old. */
if ((int)(now - tp->ts_recent_age) > TCP_PAWS_IDLE) {
if (now - tp->ts_recent_age > TCP_PAWS_IDLE) {
/*
* Invalidate ts_recent. If this segment updates
* ts_recent, the age will be reset later and ts_recent
@ -2120,7 +2120,7 @@ drop:
int
tcp_dooptions(struct tcpcb *tp, u_char *cp, int cnt, struct tcphdr *th,
struct mbuf *m, int iphlen, struct tcp_opt_info *oi,
u_int rtableid, uint32_t now)
u_int rtableid, uint64_t now)
{
u_int16_t mss = 0;
int opt, optlen;
@ -2686,7 +2686,7 @@ tcp_pulloutofband(struct socket *so, u_int urgent, struct mbuf *m, int off)
* and update averages and current timeout.
*/
void
tcp_xmit_timer(struct tcpcb *tp, int rtt)
tcp_xmit_timer(struct tcpcb *tp, int32_t rtt)
{
int delta, rttmin;
@ -3335,7 +3335,7 @@ void
syn_cache_timer(void *arg)
{
struct syn_cache *sc = arg;
uint32_t now;
uint64_t now;
NET_LOCK();
if (sc->sc_flags & SCF_DEAD)
@ -3469,7 +3469,7 @@ syn_cache_lookup(struct sockaddr *src, struct sockaddr *dst,
*/
struct socket *
syn_cache_get(struct sockaddr *src, struct sockaddr *dst, struct tcphdr *th,
u_int hlen, u_int tlen, struct socket *so, struct mbuf *m, uint32_t now)
u_int hlen, u_int tlen, struct socket *so, struct mbuf *m, uint64_t now)
{
struct syn_cache *sc;
struct syn_cache_head *scp;
@ -3744,7 +3744,7 @@ syn_cache_unreach(struct sockaddr *src, struct sockaddr *dst, struct tcphdr *th,
int
syn_cache_add(struct sockaddr *src, struct sockaddr *dst, struct tcphdr *th,
u_int iphlen, struct socket *so, struct mbuf *m, u_char *optp, int optlen,
struct tcp_opt_info *oi, tcp_seq *issp, uint32_t now)
struct tcp_opt_info *oi, tcp_seq *issp, uint64_t now)
{
struct tcpcb tb, *tp;
long win;
@ -3911,7 +3911,7 @@ syn_cache_add(struct sockaddr *src, struct sockaddr *dst, struct tcphdr *th,
}
int
syn_cache_respond(struct syn_cache *sc, struct mbuf *m, uint32_t now)
syn_cache_respond(struct syn_cache *sc, struct mbuf *m, uint64_t now)
{
u_int8_t *optp;
int optlen, error;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: tcp_output.c,v 1.138 2023/05/15 16:34:56 bluhm Exp $ */
/* $OpenBSD: tcp_output.c,v 1.140 2023/07/06 09:15:24 bluhm Exp $ */
/* $NetBSD: tcp_output.c,v 1.16 1997/06/03 16:17:09 kml Exp $ */
/*
@ -204,7 +204,7 @@ tcp_output(struct tcpcb *tp)
int idle, sendalot = 0;
int i, sack_rxmit = 0;
struct sackhole *p;
uint32_t now;
uint64_t now;
#ifdef TCP_SIGNATURE
unsigned int sigoff;
#endif /* TCP_SIGNATURE */
@ -1295,7 +1295,6 @@ tcp_chopper(struct mbuf *m0, struct mbuf_list *ml, struct ifnet *ifp,
/* copy and adjust IP header, calculate checksum */
SET(m->m_pkthdr.csum_flags, M_TCP_CSUM_OUT);
mhth->th_sum = 0;
if (ip) {
struct ip *mhip;
@ -1328,10 +1327,8 @@ tcp_chopper(struct mbuf *m0, struct mbuf_list *ml, struct ifnet *ifp,
}
/* adjust IP header, calculate checksum */
SET(m0->m_pkthdr.csum_flags, M_TCP_CSUM_OUT);
th->th_sum = 0;
if (ip) {
ip->ip_len = htons(m0->m_pkthdr.len);
ip->ip_sum = 0;
in_hdr_cksum_out(m0, ifp);
in_proto_cksum_out(m0, ifp);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: tcp_subr.c,v 1.191 2023/05/10 12:07:16 bluhm Exp $ */
/* $OpenBSD: tcp_subr.c,v 1.192 2023/07/06 09:15:24 bluhm Exp $ */
/* $NetBSD: tcp_subr.c,v 1.22 1996/02/13 23:44:00 christos Exp $ */
/*
@ -137,6 +137,7 @@ struct cpumem *tcpcounters; /* tcp statistics */
u_char tcp_secret[16]; /* [I] */
SHA2_CTX tcp_secret_ctx; /* [I] */
tcp_seq tcp_iss; /* [T] updated by timer and connection */
uint64_t tcp_starttime; /* [I] random offset for tcp_now() */
/*
* Tcp initialization
@ -145,6 +146,9 @@ void
tcp_init(void)
{
tcp_iss = 1; /* wrong */
/* 0 is treated special so add 1, 63 bits to count is enough */
arc4random_buf(&tcp_starttime, sizeof(tcp_starttime));
tcp_starttime = 1ULL + (tcp_starttime / 2);
pool_init(&tcpcb_pool, sizeof(struct tcpcb), 0, IPL_SOFTNET, 0,
"tcpcb", NULL);
pool_init(&tcpqe_pool, sizeof(struct tcpqent), 0, IPL_SOFTNET, 0,
@ -289,7 +293,7 @@ tcp_template(struct tcpcb *tp)
*/
void
tcp_respond(struct tcpcb *tp, caddr_t template, struct tcphdr *th0,
tcp_seq ack, tcp_seq seq, int flags, u_int rtableid, uint32_t now)
tcp_seq ack, tcp_seq seq, int flags, u_int rtableid, uint64_t now)
{
int tlen;
int win = 0;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: tcp_timer.c,v 1.72 2023/03/14 00:24:05 yasuoka Exp $ */
/* $OpenBSD: tcp_timer.c,v 1.73 2023/07/06 09:15:24 bluhm Exp $ */
/* $NetBSD: tcp_timer.c,v 1.14 1996/02/13 23:44:09 christos Exp $ */
/*
@ -394,7 +394,7 @@ tcp_timer_persist(void *arg)
struct tcpcb *otp = NULL, *tp = arg;
uint32_t rto;
short ostate;
uint32_t now;
uint64_t now;
NET_LOCK();
/* Ignore canceled timeouts or timeouts that have been rescheduled. */
@ -463,7 +463,7 @@ tcp_timer_keep(void *arg)
tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) &&
tp->t_state <= TCPS_CLOSING) {
int maxidle;
uint32_t now;
uint64_t now;
maxidle = READ_ONCE(tcp_maxidle);
now = tcp_now();
@ -506,7 +506,7 @@ tcp_timer_2msl(void *arg)
struct tcpcb *otp = NULL, *tp = arg;
short ostate;
int maxidle;
uint32_t now;
uint64_t now;
NET_LOCK();
/* Ignore canceled timeouts or timeouts that have been rescheduled. */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: tcp_usrreq.c,v 1.219 2023/05/23 09:16:16 jan Exp $ */
/* $OpenBSD: tcp_usrreq.c,v 1.221 2023/07/06 09:15:24 bluhm Exp $ */
/* $NetBSD: tcp_usrreq.c,v 1.20 1996/02/13 23:44:16 christos Exp $ */
/*
@ -211,7 +211,7 @@ tcp_fill_info(struct tcpcb *tp, struct socket *so, struct mbuf *m)
struct proc *p = curproc;
struct tcp_info *ti;
u_int t = 1000; /* msec => usec */
uint32_t now;
uint64_t now;
if (sizeof(*ti) > MLEN) {
MCLGETL(m, M_WAITOK, sizeof(*ti));
@ -1340,6 +1340,7 @@ tcp_sysctl_tcpstat(void *oldp, size_t *oldlenp, void *newp)
ASSIGN(tcps_outhwtso);
ASSIGN(tcps_outpkttso);
ASSIGN(tcps_outbadtso);
ASSIGN(tcps_inswlro);
ASSIGN(tcps_inhwlro);
ASSIGN(tcps_inpktlro);
ASSIGN(tcps_inbadlro);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: tcp_var.h,v 1.167 2023/05/23 09:16:16 jan Exp $ */
/* $OpenBSD: tcp_var.h,v 1.169 2023/07/06 09:15:24 bluhm Exp $ */
/* $NetBSD: tcp_var.h,v 1.17 1996/02/13 23:44:24 christos Exp $ */
/*
@ -150,8 +150,8 @@ struct tcpcb {
*/
/* auto-sizing variables */
uint64_t rfbuf_ts; /* recv buffer autoscaling time stamp */
u_int rfbuf_cnt; /* recv buffer autoscaling byte count */
u_int32_t rfbuf_ts; /* recv buffer autoscaling time stamp */
u_short t_maxopd; /* mss plus options */
u_short t_peermss; /* peer's maximum segment size */
@ -160,11 +160,11 @@ struct tcpcb {
* transmit timing stuff. See below for scale of srtt and rttvar.
* "Variance" is actually smoothed difference.
*/
uint32_t t_rcvtime; /* time last segment received */
uint32_t t_rcvacktime; /* time last ack received */
uint32_t t_sndtime; /* time last segment sent */
uint32_t t_sndacktime; /* time last ack sent */
uint32_t t_rtttime; /* time we started measuring rtt */
uint64_t t_rcvtime; /* time last segment received */
uint64_t t_rcvacktime; /* time last ack received */
uint64_t t_sndtime; /* time last segment sent */
uint64_t t_sndacktime; /* time last ack sent */
uint64_t t_rtttime; /* time we started measuring rtt */
tcp_seq t_rtseq; /* sequence number being timed */
int t_srtt; /* smoothed round-trip time */
int t_rttvar; /* variance in round-trip time */
@ -183,9 +183,9 @@ struct tcpcb {
u_char rcv_scale; /* window scaling for recv window */
u_char request_r_scale; /* pending window scaling */
u_char requested_s_scale;
u_int32_t ts_recent; /* timestamp echo data */
u_int32_t ts_modulate; /* modulation on timestamp */
u_int32_t ts_recent_age; /* when last updated */
uint32_t ts_recent; /* timestamp echo data */
uint32_t ts_modulate; /* modulation on timestamp */
uint64_t ts_recent_age; /* when last updated */
tcp_seq last_ack_sent;
/* pointer for syn cache entries*/
@ -250,12 +250,9 @@ struct syn_cache {
long sc_win; /* advertised window */
struct syn_cache_head *sc_buckethead; /* our bucket index */
struct syn_cache_set *sc_set; /* our syn cache set */
u_int64_t sc_timestamp; /* timestamp from SYN */
u_int32_t sc_hash;
u_int32_t sc_timestamp; /* timestamp from SYN */
u_int32_t sc_modulate; /* our timestamp modulator */
#if 0
u_int32_t sc_timebase; /* our local timebase */
#endif
union syn_cache_sa sc_src;
union syn_cache_sa sc_dst;
tcp_seq sc_irs;
@ -447,6 +444,7 @@ struct tcpstat {
u_int32_t tcps_outhwtso; /* output tso processed by hardware */
u_int32_t tcps_outpkttso; /* packets generated by tso */
u_int32_t tcps_outbadtso; /* output tso failed, packet dropped */
u_int32_t tcps_inswlro; /* input lro on pseudo device */
u_int32_t tcps_inhwlro; /* input lro from hardware */
u_int32_t tcps_inpktlro; /* packets coalesced by hardware lro */
u_int32_t tcps_inbadlro; /* input bad lro packets */
@ -628,6 +626,7 @@ enum tcpstat_counters {
tcps_outhwtso,
tcps_outpkttso,
tcps_outbadtso,
tcps_inswlro,
tcps_inhwlro,
tcps_inpktlro,
tcps_inbadlro,
@ -655,10 +654,13 @@ tcpstat_pkt(enum tcpstat_counters pcounter, enum tcpstat_counters bcounter,
counters_pkt(tcpcounters, pcounter, bcounter, v);
}
static inline uint32_t
extern uint64_t tcp_starttime;
static inline uint64_t
tcp_now(void)
{
return (getnsecruntime() / 1000000);
/* TCP time ticks in 63 bit milliseconds with 63 bit random offset. */
return tcp_starttime + (getnsecruntime() / 1000000ULL);
}
#define TCP_TIME(_sec) ((_sec) * 1000) /* tcp_now() is in milliseconds */
@ -710,7 +712,7 @@ struct tcpcb *
struct tcpcb *
tcp_drop(struct tcpcb *, int);
int tcp_dooptions(struct tcpcb *, u_char *, int, struct tcphdr *,
struct mbuf *, int, struct tcp_opt_info *, u_int, uint32_t);
struct mbuf *, int, struct tcp_opt_info *, u_int, uint64_t);
void tcp_init(void);
int tcp_input(struct mbuf **, int *, int, int);
int tcp_mss(struct tcpcb *, int);
@ -733,7 +735,7 @@ void tcp_pulloutofband(struct socket *, u_int, struct mbuf *, int);
int tcp_reass(struct tcpcb *, struct tcphdr *, struct mbuf *, int *);
void tcp_rscale(struct tcpcb *, u_long);
void tcp_respond(struct tcpcb *, caddr_t, struct tcphdr *, tcp_seq,
tcp_seq, int, u_int, uint32_t);
tcp_seq, int, u_int, uint64_t);
void tcp_setpersist(struct tcpcb *);
void tcp_update_sndspace(struct tcpcb *);
void tcp_update_rcvspace(struct tcpcb *);
@ -765,7 +767,7 @@ int tcp_sense(struct socket *, struct stat *);
int tcp_rcvoob(struct socket *, struct mbuf *, int);
int tcp_sendoob(struct socket *, struct mbuf *, struct mbuf *,
struct mbuf *);
void tcp_xmit_timer(struct tcpcb *, int);
void tcp_xmit_timer(struct tcpcb *, int32_t);
void tcpdropoldhalfopen(struct tcpcb *, u_int16_t);
void tcp_sack_option(struct tcpcb *,struct tcphdr *,u_char *,int);
void tcp_update_sack_list(struct tcpcb *tp, tcp_seq, tcp_seq);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: scsi_base.c,v 1.281 2023/05/25 19:35:58 kurt Exp $ */
/* $OpenBSD: scsi_base.c,v 1.282 2023/07/06 10:17:43 visa Exp $ */
/* $NetBSD: scsi_base.c,v 1.43 1997/04/02 02:29:36 mycroft Exp $ */
/*
@ -1497,10 +1497,11 @@ scsi_done(struct scsi_xfer *xs)
int
scsi_xs_sync(struct scsi_xfer *xs)
{
struct mutex cookie = MUTEX_INITIALIZER_FLAGS(IPL_BIO, __MTX_NAME,
MTX_NOWITNESS);
struct mutex cookie;
int error;
mtx_init(&cookie, IPL_BIO);
#ifdef DIAGNOSTIC
if (xs->cookie != NULL)
panic("xs->cookie != NULL in scsi_xs_sync");

View file

@ -1,4 +1,4 @@
/* $OpenBSD: malloc.h,v 1.124 2023/06/30 09:58:30 mvs Exp $ */
/* $OpenBSD: malloc.h,v 1.125 2023/07/03 06:45:44 guenther Exp $ */
/* $NetBSD: malloc.h,v 1.39 1998/07/12 19:52:01 augustss Exp $ */
/*
@ -66,116 +66,116 @@
/* 1 - free */
#define M_DEVBUF 2 /* device driver memory */
/* 3 - free */
#define M_PCB 4 /* protocol control block */
#define M_PCB 4 /* protocol control blocks */
#define M_RTABLE 5 /* routing tables */
#define M_PF 6 /* packet filter structures */
#define M_PF 6 /* packet filter structures */
/* 7 - free */
/* 8 - free */
#define M_IFADDR 9 /* interface address */
#define M_IFGROUP 10 /* interface group */
#define M_SYSCTL 11 /* sysctl buffers (persistent storage) */
#define M_COUNTERS 12 /* per CPU counters */
#define M_IFADDR 9 /* interface addresses */
#define M_IFGROUP 10 /* interface groups */
#define M_SYSCTL 11 /* sysctl persistent buffers */
#define M_COUNTERS 12 /* per-CPU counters via counters_alloc(9) */
/* 13 - free */
#define M_IOCTLOPS 14 /* ioctl data buffer */
#define M_IOCTLOPS 14 /* ioctl data buffers */
/* 15-18 - free */
#define M_IOV 19 /* large iov's */
#define M_MOUNT 20 /* vfs mount struct */
#define M_IOV 19 /* large IOVs */
#define M_MOUNT 20 /* VFS mount structs */
/* 21 - free */
#define M_NFSREQ 22 /* NFS request header */
#define M_NFSMNT 23 /* NFS mount structure */
#define M_LOG 24 /* Messages in kernel log stash */
#define M_NFSREQ 22 /* NFS request headers */
#define M_NFSMNT 23 /* NFS mount structures */
#define M_LOG 24 /* messages in kernel log stash */
#define M_VNODE 25 /* Dynamically allocated vnodes */
/* 26 - free */
#define M_DQUOT 27 /* UFS quota entries */
#define M_UFSMNT 28 /* UFS mount structure */
#define M_UFSMNT 28 /* UFS mount structures */
#define M_SHM 29 /* SVID compatible shared memory segments */
#define M_VMMAP 30 /* VM map structures */
#define M_SEM 31 /* SVID compatible semaphores */
#define M_DIRHASH 32 /* UFS dirhash */
#define M_ACPI 33 /* ACPI */
#define M_VMPMAP 34 /* VM pmap */
#define M_DIRHASH 32 /* UFS directory hash structures */
#define M_ACPI 33 /* ACPI structures */
#define M_VMPMAP 34 /* VM pmap data */
/* 35-38 - free */
#define M_FILEDESC 39 /* Open file descriptor table */
#define M_SIGIO 40 /* Sigio structures */
#define M_PROC 41 /* Proc structures */
#define M_SUBPROC 42 /* Proc sub-structures */
#define M_FILEDESC 39 /* open file descriptor tables */
#define M_SIGIO 40 /* sigio structures */
#define M_PROC 41 /* proc structures */
#define M_SUBPROC 42 /* proc sub-structures */
/* 43-45 - free */
#define M_MFSNODE 46 /* MFS vnode private part */
/* 47-48 - free */
#define M_NETADDR 49 /* Export host address structure */
#define M_NFSSVC 50 /* Nfs server structure */
#define M_NETADDR 49 /* export host address structures */
#define M_NFSSVC 50 /* NFS server structures */
/* 51 - free */
#define M_NFSD 52 /* Nfs server daemon structure */
#define M_NFSD 52 /* NFS server daemon structures */
#define M_IPMOPTS 53 /* internet multicast options */
#define M_IPMADDR 54 /* internet multicast address */
#define M_IFMADDR 55 /* link-level multicast address */
#define M_IPMADDR 54 /* internet multicast addresses */
#define M_IFMADDR 55 /* link-level multicast addresses */
#define M_MRTABLE 56 /* multicast routing tables */
#define M_ISOFSMNT 57 /* ISOFS mount structure */
#define M_ISOFSMNT 57 /* ISOFS mount structures */
#define M_ISOFSNODE 58 /* ISOFS vnode private part */
#define M_MSDOSFSMNT 59 /* MSDOS FS mount structure */
#define M_MSDOSFSFAT 60 /* MSDOS FS fat table */
#define M_MSDOSFSMNT 59 /* MSDOS FS mount structures */
#define M_MSDOSFSFAT 60 /* MSDOS FS FAT tables */
#define M_MSDOSFSNODE 61 /* MSDOS FS vnode private part */
#define M_TTYS 62 /* allocated tty structures */
#define M_EXEC 63 /* argument lists & other mem used by exec */
#define M_MISCFSMNT 64 /* miscfs mount structures */
#define M_FUSEFS 65 /* fusefs mount structures */
#define M_MISCFSMNT 64 /* miscellaneous FS mount structures */
#define M_FUSEFS 65 /* FUSE FS mount structures */
/* 66-73 - free */
#define M_PFKEY 74 /* pfkey data */
#define M_TDB 75 /* Transforms database */
#define M_TDB 75 /* transforms database */
#define M_XDATA 76 /* IPsec data */
/* 77 - free */
#define M_PAGEDEP 78 /* File page dependencies */
#define M_INODEDEP 79 /* Inode dependencies */
#define M_NEWBLK 80 /* New block allocation */
#define M_PAGEDEP 78 /* file page dependencies */
#define M_INODEDEP 79 /* inode dependencies */
#define M_NEWBLK 80 /* new block allocation */
/* 81-82 - free */
#define M_INDIRDEP 83 /* Indirect block dependencies */
#define M_INDIRDEP 83 /* indirect block dependencies */
/* 84-91 - free */
#define M_VMSWAP 92 /* VM swap structures */
#define M_VMSWAP 92 /* VM swap structures */
/* 93-97 - free */
#define M_UVMAMAP 98 /* UVM amap and related */
#define M_UVMAOBJ 99 /* UVM aobj and related */
#define M_UVMAMAP 98 /* UVM amap and related */
#define M_UVMAOBJ 99 /* UVM aobj and related */
/* 100 - free */
#define M_USB 101 /* USB general */
#define M_USBDEV 102 /* USB device driver */
#define M_USBHC 103 /* USB host controller */
#define M_WITNESS 104 /* witness data */
#define M_MEMDESC 105 /* Memory range */
#define M_WITNESS 104 /* witness(4) memory */
#define M_MEMDESC 105 /* memory range */
/* 106-107 - free */
#define M_CRYPTO_DATA 108 /* Crypto framework data buffers (keys etc.) */
#define M_CRYPTO_DATA 108 /* crypto(9) data buffers */
/* 109 - free */
#define M_CREDENTIALS 110 /* IPsec-related credentials and ID info */
#define M_CREDENTIALS 110 /* ipsec(4) related credentials */
/* 111-122 - free */
/* KAME IPv6 */
#define M_IP6OPT 123 /* IPv6 options */
#define M_IP6NDP 124 /* IPv6 Neighbour Discovery */
#define M_IP6NDP 124 /* IPv6 Neighbor Discovery structures */
/* 125-126 - free */
#define M_TEMP 127 /* misc temporary data buffers */
#define M_TEMP 127 /* miscellaneous temporary data buffers */
#define M_NTFSMNT 128 /* NTFS mount structure */
#define M_NTFSMNT 128 /* NTFS mount structures */
#define M_NTFSNTNODE 129 /* NTFS ntnode information */
#define M_NTFSFNODE 130 /* NTFS fnode information */
#define M_NTFSDIR 131 /* NTFS dir buffer */
#define M_NTFSDIR 131 /* NTFS directory buffers */
#define M_NTFSNTHASH 132 /* NTFS ntnode hash tables */
#define M_NTFSNTVATTR 133 /* NTFS file attribute information */
#define M_NTFSRDATA 134 /* NTFS resident data */
#define M_NTFSDECOMP 135 /* NTFS decompression temporary */
#define M_NTFSDECOMP 135 /* NTFS decompression temporary storage */
#define M_NTFSRUN 136 /* NTFS vrun storage */
#define M_KEVENT 137 /* kqueue related */
#define M_KEVENT 137 /* kqueue(2) data structures */
/* 138 free */
#define M_SYNCACHE 139 /* syn cache hash array */
#define M_SYNCACHE 139 /* SYN cache hash array */
#define M_UDFMOUNT 140 /* UDF mount */
#define M_UDFFENTRY 141 /* UDF file entry */
#define M_UDFFID 142 /* UDF file id */
#define M_UDFMOUNT 140 /* UDF mount structures */
#define M_UDFFENTRY 141 /* UDF file entries */
#define M_UDFFID 142 /* UDF file IDs */
/* 143 free */
#define M_AGP 144 /* AGP Memory */
#define M_AGP 144 /* AGP memory */
#define M_DRM 145 /* Direct Rendering Manager */
#define M_DRM 145 /* Direct Rendering Manager */
#define M_LAST 146 /* Must be last type + 1 */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: mbuf.h,v 1.257 2023/05/10 12:07:17 bluhm Exp $ */
/* $OpenBSD: mbuf.h,v 1.259 2023/07/04 09:47:51 jsg Exp $ */
/* $NetBSD: mbuf.h,v 1.19 1996/02/09 18:25:14 christos Exp $ */
/*
@ -400,8 +400,6 @@ struct mbuf_queue {
struct pool;
extern long nmbclust; /* limit on the # of clusters */
extern int mblowat; /* mbuf low water mark */
extern int mcllowat; /* mbuf cluster low water mark */
extern int max_linkhdr; /* largest link-level header */
extern int max_protohdr; /* largest protocol header */
extern int max_hdr; /* largest link+protocol header */
@ -437,7 +435,6 @@ void m_adj(struct mbuf *, int);
int m_copyback(struct mbuf *, int, int, const void *, int);
struct mbuf *m_freem(struct mbuf *);
void m_purge(struct mbuf *);
void m_reclaim(void *, int);
void m_copydata(struct mbuf *, int, int, void *);
void m_cat(struct mbuf *, struct mbuf *);
struct mbuf *m_devget(char *, int, int);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: mount.h,v 1.148 2021/04/06 14:17:35 kn Exp $ */
/* $OpenBSD: mount.h,v 1.150 2023/07/05 15:13:28 beck Exp $ */
/* $NetBSD: mount.h,v 1.48 1996/02/18 11:55:47 fvdl Exp $ */
/*
@ -401,7 +401,7 @@ struct mount {
#define MNT_STALLED 0x00100000 /* filesystem stalled */
#define MNT_SWAPPABLE 0x00200000 /* filesystem can be used for swap */
#define MNT_WANTRDWR 0x02000000 /* want upgrade to read/write */
#define MNT_SOFTDEP 0x04000000 /* soft dependencies being done */
#define MNT_SOFTDEP 0x04000000 /* soft dependencies being done - now ignored */
#define MNT_DOOMED 0x08000000 /* device behind filesystem is gone */
#ifdef _KERNEL
@ -604,7 +604,6 @@ extern TAILQ_HEAD(mntlist, mount) mountlist;
int vfs_stall(struct proc *, int);
void vfs_stall_barrier(void);
struct mount *getvfs(fsid_t *); /* return vfs given fsid */
/* process mount export info */
int vfs_export(struct mount *, struct netexport *, struct export_args *);
/* lookup host in fs export list */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: proc.h,v 1.342 2023/06/28 08:23:25 claudio Exp $ */
/* $OpenBSD: proc.h,v 1.343 2023/07/04 11:14:00 jsg Exp $ */
/* $NetBSD: proc.h,v 1.44 1996/04/22 01:23:21 christos Exp $ */
/*-
@ -547,8 +547,6 @@ void unsleep(struct proc *);
void reaper(void *);
__dead void exit1(struct proc *, int, int, int);
void exit2(struct proc *);
int dowait4(struct proc *, pid_t, int *, int, struct rusage *,
register_t *);
void cpu_fork(struct proc *_curp, struct proc *_child, void *_stack,
void *_tcb, void (*_func)(void *), void *_arg);
void cpu_exit(struct proc *);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: socketvar.h,v 1.119 2023/01/27 18:46:34 mvs Exp $ */
/* $OpenBSD: socketvar.h,v 1.120 2023/07/04 22:28:24 mvs Exp $ */
/* $NetBSD: socketvar.h,v 1.18 1996/02/09 18:25:38 christos Exp $ */
/*-
@ -272,10 +272,17 @@ sbfree(struct socket *so, struct sockbuf *sb, struct mbuf *m)
sb->sb_mbcnt -= m->m_ext.ext_size;
}
/*
* Flags to sblock()
*/
#define SBL_WAIT 0x01 /* Wait if lock not immediately available. */
#define SBL_NOINTR 0x02 /* Enforce non-interruptible sleep. */
/*
* Set lock on sockbuf sb; sleep if lock is already held.
* Unless SB_NOINTR is set on sockbuf, sleep is interruptible.
* Returns error without lock if sleep is interrupted.
* Unless SB_NOINTR is set on sockbuf or SBL_NOINTR passed,
* sleep is interruptible. Returns error without lock if
* sleep is interrupted.
*/
int sblock(struct socket *, struct sockbuf *, int);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sysctl.h,v 1.233 2023/05/17 22:12:51 kettenis Exp $ */
/* $OpenBSD: sysctl.h,v 1.234 2023/07/04 11:14:00 jsg Exp $ */
/* $NetBSD: sysctl.h,v 1.16 1996/04/09 20:55:36 cgd Exp $ */
/*
@ -1077,7 +1077,6 @@ struct walkarg;
int sysctl_dumpentry(struct rtentry *, void *, unsigned int);
int sysctl_rtable(int *, u_int, void *, size_t *, void *, size_t);
int sysctl_clockrate(char *, size_t *, void *);
int sysctl_vnode(char *, size_t *, struct proc *);
#if defined(GPROF) || defined(DDBPROF)
int sysctl_doprof(int *, u_int, void *, size_t *, void *, size_t);
#endif
@ -1091,8 +1090,6 @@ int hw_sysctl(int *, u_int, void *, size_t *, void *, size_t,
int debug_sysctl(int *, u_int, void *, size_t *, void *, size_t,
struct proc *);
#endif
int vm_sysctl(int *, u_int, void *, size_t *, void *, size_t,
struct proc *);
int fs_sysctl(int *, u_int, void *, size_t *, void *, size_t,
struct proc *);
int fs_posix_sysctl(int *, u_int, void *, size_t *, void *, size_t,

View file

@ -1,4 +1,4 @@
/* $OpenBSD: tty.h,v 1.41 2022/07/02 08:50:42 visa Exp $ */
/* $OpenBSD: tty.h,v 1.42 2023/07/04 11:14:00 jsg Exp $ */
/* $NetBSD: tty.h,v 1.30.4.1 1996/06/02 09:08:13 mrg Exp $ */
/*-
@ -256,7 +256,6 @@ int sysctl_pty(int *, u_int, void *, size_t *, void *, size_t);
int b_to_q(u_char *cp, int cc, struct clist *q);
void catq(struct clist *from, struct clist *to);
void clist_init(void);
int getc(struct clist *q);
void ndflush(struct clist *q, int cc);
int ndqb(struct clist *q, int flag);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ffs_softdep.c,v 1.151 2023/04/11 00:45:09 jsg Exp $ */
/* $OpenBSD: ffs_softdep.c,v 1.152 2023/07/05 15:13:28 beck Exp $ */
/*
* Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved.
@ -1224,6 +1224,8 @@ softdep_mount(struct vnode *devvp, struct mount *mp, struct fs *fs,
struct buf *bp;
int error, cyl;
KASSERTMSG(1, "softdep_mount should not have been called");
/*
* When doing soft updates, the counters in the
* superblock may have gotten out of sync, so we have

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ffs_vfsops.c,v 1.194 2023/04/14 22:41:28 mbuhl Exp $ */
/* $OpenBSD: ffs_vfsops.c,v 1.195 2023/07/05 15:13:28 beck Exp $ */
/* $NetBSD: ffs_vfsops.c,v 1.19 1996/02/09 22:22:26 christos Exp $ */
/*
@ -213,12 +213,10 @@ ffs_mount(struct mount *mp, const char *path, void *data,
int error = 0, flags;
int ronly;
#ifndef FFS_SOFTUPDATES
/* Ask not for whom the bell tolls */
if (mp->mnt_flag & MNT_SOFTDEP) {
printf("WARNING: soft updates isn't compiled in\n");
mp->mnt_flag &= ~MNT_SOFTDEP;
}
#endif
/*
* Soft updates is incompatible with "async",
@ -284,8 +282,6 @@ ffs_mount(struct mount *mp, const char *path, void *data,
if (mp->mnt_flag & MNT_FORCE)
flags |= FORCECLOSE;
error = softdep_flushfiles(mp, flags, p);
#elif FFS_SOFTUPDATES
mp->mnt_flag |= MNT_SOFTDEP;
#endif
}
/*
@ -459,10 +455,7 @@ success:
free(fs->fs_contigdirs, M_UFSMNT, fs->fs_ncg);
}
if (!ronly) {
if (mp->mnt_flag & MNT_SOFTDEP)
fs->fs_flags |= FS_DOSOFTDEP;
else
fs->fs_flags &= ~FS_DOSOFTDEP;
fs->fs_flags &= ~FS_DOSOFTDEP;
}
ffs_sbupdate(ump, MNT_WAIT);
#if 0
@ -923,10 +916,7 @@ ffs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p)
}
fs->fs_fmod = 1;
fs->fs_clean = 0;
if (mp->mnt_flag & MNT_SOFTDEP)
fs->fs_flags |= FS_DOSOFTDEP;
else
fs->fs_flags &= ~FS_DOSOFTDEP;
fs->fs_flags &= ~FS_DOSOFTDEP;
error = ffs_sbupdate(ump, MNT_WAIT);
if (error == EROFS)
goto out;