sync with OpenBSD -current

This commit is contained in:
purplerain 2024-07-24 20:05:56 +00:00
parent e0e35f76e8
commit acf2ed1690
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
32 changed files with 354 additions and 212 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: virtio.c,v 1.25 2024/05/24 10:05:55 jsg Exp $ */
/* $OpenBSD: virtio.c,v 1.26 2024/07/23 19:14:05 sf Exp $ */
/* $NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $ */
/*
@ -807,7 +807,7 @@ virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
* if you forget to call this the slot will be leaked.
*
* Don't call this if you use statically allocated slots
* and virtio_dequeue_trim().
* and virtio_enqueue_trim().
*/
int
virtio_dequeue_commit(struct virtqueue *vq, int slot)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uaudio.c,v 1.174 2023/12/10 06:32:14 ratchov Exp $ */
/* $OpenBSD: uaudio.c,v 1.175 2024/07/23 08:59:21 ratchov Exp $ */
/*
* Copyright (c) 2018 Alexandre Ratchov <alex@caoua.org>
*
@ -2702,6 +2702,22 @@ uaudio_fixup_params(struct uaudio_softc *sc)
}
}
int
uaudio_iface_index(struct uaudio_softc *sc, int ifnum)
{
int i, nifaces;
nifaces = sc->udev->cdesc->bNumInterfaces;
for (i = 0; i < nifaces; i++) {
if (sc->udev->ifaces[i].idesc->bInterfaceNumber == ifnum)
return i;
}
printf("%s: %d: invalid interface number\n", __func__, ifnum);
return -1;
}
/*
* Parse all descriptors and build configuration of the device.
*/
@ -2711,6 +2727,7 @@ uaudio_process_conf(struct uaudio_softc *sc, struct uaudio_blob *p)
struct uaudio_blob dp;
struct uaudio_alt *a;
unsigned int type, ifnum, altnum, nep, class, subclass;
int i;
while (p->rptr != p->wptr) {
if (!uaudio_getdesc(p, &dp))
@ -2736,7 +2753,8 @@ uaudio_process_conf(struct uaudio_softc *sc, struct uaudio_blob *p)
switch (subclass) {
case UISUBCLASS_AUDIOCONTROL:
if (usbd_iface_claimed(sc->udev, ifnum)) {
i = uaudio_iface_index(sc, ifnum);
if (i != -1 && usbd_iface_claimed(sc->udev, i)) {
DPRINTF("%s: %d: AC already claimed\n", __func__, ifnum);
break;
}
@ -2748,7 +2766,8 @@ uaudio_process_conf(struct uaudio_softc *sc, struct uaudio_blob *p)
return 0;
break;
case UISUBCLASS_AUDIOSTREAM:
if (usbd_iface_claimed(sc->udev, ifnum)) {
i = uaudio_iface_index(sc, ifnum);
if (i != -1 && usbd_iface_claimed(sc->udev, i)) {
DPRINTF("%s: %d: AS already claimed\n", __func__, ifnum);
break;
}
@ -2768,10 +2787,19 @@ done:
* Claim all interfaces we use. This prevents other uaudio(4)
* devices from trying to use them.
*/
for (a = sc->alts; a != NULL; a = a->next)
usbd_claim_iface(sc->udev, a->ifnum);
for (a = sc->alts; a != NULL; a = a->next) {
i = uaudio_iface_index(sc, a->ifnum);
if (i != -1) {
DPRINTF("%s: claim: %d at %d\n", __func__, a->ifnum, i);
usbd_claim_iface(sc->udev, i);
}
}
usbd_claim_iface(sc->udev, sc->ctl_ifnum);
i = uaudio_iface_index(sc, sc->ctl_ifnum);
if (i != -1) {
DPRINTF("%s: claim: ac %d at %d\n", __func__, sc->ctl_ifnum, i);
usbd_claim_iface(sc->udev, i);
}
return 1;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_exit.c,v 1.225 2024/07/22 08:18:53 claudio Exp $ */
/* $OpenBSD: kern_exit.c,v 1.227 2024/07/24 15:30:17 claudio Exp $ */
/* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */
/*
@ -458,8 +458,6 @@ reaper(void *arg)
WITNESS_THREAD_EXIT(p);
KERNEL_LOCK();
/*
* Free the VM resources we're still holding on to.
* We must do this from a valid thread because doing
@ -470,13 +468,16 @@ reaper(void *arg)
if (p->p_flag & P_THREAD) {
/* Just a thread */
KERNEL_LOCK();
proc_free(p);
KERNEL_UNLOCK();
} else {
struct process *pr = p->p_p;
/* Release the rest of the process's vmspace */
uvm_exit(pr);
KERNEL_LOCK();
if ((pr->ps_flags & PS_NOZOMBIE) == 0) {
/* Process is now a true zombie. */
atomic_setbits_int(&pr->ps_flags, PS_ZOMBIE);
@ -493,9 +494,8 @@ reaper(void *arg)
/* No one will wait for us, just zap it. */
process_zap(pr);
}
KERNEL_UNLOCK();
}
KERNEL_UNLOCK();
}
}
@ -550,10 +550,9 @@ loop:
return (0);
}
if ((options & WTRAPPED) &&
pr->ps_flags & PS_TRACED &&
(pr->ps_flags & PS_TRACED) &&
(pr->ps_flags & PS_WAITED) == 0 && pr->ps_single &&
pr->ps_single->p_stat == SSTOP &&
(pr->ps_single->p_flag & P_SUSPSINGLE) == 0) {
pr->ps_single->p_stat == SSTOP) {
if (single_thread_wait(pr, 0))
goto loop;
@ -578,8 +577,8 @@ loop:
if (p->p_stat == SSTOP &&
(pr->ps_flags & PS_WAITED) == 0 &&
(p->p_flag & P_SUSPSINGLE) == 0 &&
(pr->ps_flags & PS_TRACED ||
options & WUNTRACED)) {
((pr->ps_flags & PS_TRACED) ||
(options & WUNTRACED))) {
if ((options & WNOWAIT) == 0)
atomic_setbits_int(&pr->ps_flags, PS_WAITED);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sig.c,v 1.333 2024/07/22 09:43:47 claudio Exp $ */
/* $OpenBSD: kern_sig.c,v 1.334 2024/07/24 15:31:08 claudio Exp $ */
/* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */
/*
@ -2164,6 +2164,7 @@ single_thread_set(struct proc *p, int flags)
panic("single_thread_mode = %d", mode);
#endif
}
KASSERT((p->p_flag & P_SUSPSINGLE) == 0);
pr->ps_single = p;
pr->ps_singlecnt = pr->ps_threadcnt;
@ -2233,6 +2234,7 @@ single_thread_wait(struct process *pr, int recheck)
if (!recheck)
break;
}
KASSERT((pr->ps_single->p_flag & P_SUSPSINGLE) == 0);
mtx_leave(&pr->ps_mtx);
return wait;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_synch.c,v 1.205 2024/06/03 12:48:25 claudio Exp $ */
/* $OpenBSD: kern_synch.c,v 1.206 2024/07/23 08:38:02 claudio Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*
@ -62,7 +62,7 @@
#include <sys/ktrace.h>
#endif
int sleep_signal_check(void);
int sleep_signal_check(struct proc *);
int thrsleep(struct proc *, struct sys___thrsleep_args *);
int thrsleep_unlock(void *);
@ -385,7 +385,7 @@ sleep_finish(int timo, int do_sleep)
* we must be ready for sleep when sleep_signal_check() is
* called.
*/
if ((error = sleep_signal_check()) != 0) {
if ((error = sleep_signal_check(p)) != 0) {
catch = 0;
do_sleep = 0;
}
@ -438,7 +438,7 @@ sleep_finish(int timo, int do_sleep)
/* Check if thread was woken up because of a unwind or signal */
if (catch != 0)
error = sleep_signal_check();
error = sleep_signal_check(p);
/* Signal errors are higher priority than timeouts. */
if (error == 0 && error1 != 0)
@ -451,9 +451,8 @@ sleep_finish(int timo, int do_sleep)
* Check and handle signals and suspensions around a sleep cycle.
*/
int
sleep_signal_check(void)
sleep_signal_check(struct proc *p)
{
struct proc *p = curproc;
struct sigctx ctx;
int err, sig;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: subr_log.c,v 1.78 2023/09/22 20:03:05 mvs Exp $ */
/* $OpenBSD: subr_log.c,v 1.79 2024/07/24 13:37:05 claudio Exp $ */
/* $NetBSD: subr_log.c,v 1.11 1996/03/30 22:24:44 christos Exp $ */
/*
@ -73,10 +73,11 @@
/*
* Locking:
* L log_mtx
* Q log_kq_mtx
*/
struct logsoftc {
int sc_state; /* [L] see above for possibilities */
struct klist sc_klist; /* process waiting on kevent call */
struct klist sc_klist; /* [Q] process waiting on kevent call */
struct sigio_ref sc_sigio; /* async I/O registration */
int sc_need_wakeup; /* if set, wake up waiters */
struct timeout sc_tick; /* wakeup poll timeout */
@ -97,6 +98,8 @@ struct rwlock syslogf_rwlock = RWLOCK_INITIALIZER("syslogf");
*/
struct mutex log_mtx =
MUTEX_INITIALIZER_FLAGS(IPL_HIGH, "logmtx", MTX_NOWITNESS);
struct mutex log_kq_mtx =
MUTEX_INITIALIZER_FLAGS(IPL_HIGH, "logkqmtx", MTX_NOWITNESS);
void filt_logrdetach(struct knote *kn);
int filt_logread(struct knote *kn, long hint);
@ -208,7 +211,7 @@ logopen(dev_t dev, int flags, int mode, struct proc *p)
if (log_open)
return (EBUSY);
log_open = 1;
klist_init_mutex(&logsoftc.sc_klist, &log_mtx);
klist_init_mutex(&logsoftc.sc_klist, &log_kq_mtx);
sigio_init(&logsoftc.sc_sigio);
timeout_set(&logsoftc.sc_tick, logtick, NULL);
timeout_add_msec(&logsoftc.sc_tick, LOG_TICK);
@ -336,7 +339,9 @@ filt_logread(struct knote *kn, long hint)
{
struct msgbuf *mbp = kn->kn_hook;
mtx_enter(&log_mtx);
kn->kn_data = msgbuf_getlen(mbp);
mtx_leave(&log_mtx);
return (kn->kn_data != 0);
}
@ -345,9 +350,9 @@ filt_logmodify(struct kevent *kev, struct knote *kn)
{
int active;
mtx_enter(&log_mtx);
mtx_enter(&log_kq_mtx);
active = knote_modify(kev, kn);
mtx_leave(&log_mtx);
mtx_leave(&log_kq_mtx);
return (active);
}
@ -357,9 +362,9 @@ filt_logprocess(struct knote *kn, struct kevent *kev)
{
int active;
mtx_enter(&log_mtx);
mtx_enter(&log_kq_mtx);
active = knote_process(kn, kev);
mtx_leave(&log_mtx);
mtx_leave(&log_kq_mtx);
return (active);
}
@ -404,9 +409,10 @@ logtick(void *arg)
state = logsoftc.sc_state;
if (logsoftc.sc_state & LOG_RDWAIT)
logsoftc.sc_state &= ~LOG_RDWAIT;
knote_locked(&logsoftc.sc_klist, 0);
mtx_leave(&log_mtx);
knote(&logsoftc.sc_klist, 0);
if (state & LOG_ASYNC)
pgsigio(&logsoftc.sc_sigio, SIGIO, 0);
if (state & LOG_RDWAIT)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: pfkeyv2_parsemessage.c,v 1.62 2023/09/29 18:45:42 tobhe Exp $ */
/* $OpenBSD: pfkeyv2_parsemessage.c,v 1.63 2024/07/23 20:04:51 tobhe Exp $ */
/*
* @(#)COPYRIGHT 1.1 (NRL) 17 January 1995
@ -144,9 +144,9 @@ uint64_t sadb_exts_allowed_in[SADB_MAX+1] =
/* GETSPI */
BITMAP_ADDRESS_SRC | BITMAP_ADDRESS_DST | BITMAP_SPIRANGE,
/* UPDATE */
BITMAP_SA | BITMAP_LIFETIME | BITMAP_ADDRESS | BITMAP_ADDRESS_PROXY | BITMAP_KEY | BITMAP_IDENTITY | BITMAP_X_FLOW | BITMAP_X_UDPENCAP | BITMAP_X_TAG | BITMAP_X_TAP | BITMAP_X_RDOMAIN | BITMAP_X_IFACE,
BITMAP_SA | BITMAP_LIFETIME | BITMAP_ADDRESS | BITMAP_ADDRESS_PROXY | BITMAP_KEY | BITMAP_IDENTITY | BITMAP_X_FLOW | BITMAP_X_UDPENCAP | BITMAP_X_TAG | BITMAP_X_TAP | BITMAP_X_RDOMAIN | BITMAP_X_COUNTER | BITMAP_X_REPLAY | BITMAP_X_IFACE,
/* ADD */
BITMAP_SA | BITMAP_LIFETIME | BITMAP_ADDRESS | BITMAP_KEY | BITMAP_IDENTITY | BITMAP_X_FLOW | BITMAP_X_UDPENCAP | BITMAP_X_LIFETIME_LASTUSE | BITMAP_X_TAG | BITMAP_X_TAP | BITMAP_X_RDOMAIN | BITMAP_X_IFACE,
BITMAP_SA | BITMAP_LIFETIME | BITMAP_ADDRESS | BITMAP_KEY | BITMAP_IDENTITY | BITMAP_X_FLOW | BITMAP_X_UDPENCAP | BITMAP_X_LIFETIME_LASTUSE | BITMAP_X_TAG | BITMAP_X_TAP | BITMAP_X_RDOMAIN | BITMAP_X_COUNTER | BITMAP_X_REPLAY | BITMAP_X_IFACE,
/* DELETE */
BITMAP_SA | BITMAP_ADDRESS_SRC | BITMAP_ADDRESS_DST | BITMAP_X_RDOMAIN,
/* GET */
@ -851,6 +851,19 @@ pfkeyv2_parsemessage(void *p, int len, void **headers)
return (EINVAL);
}
break;
case SADB_X_EXT_REPLAY:
if (i != sizeof(struct sadb_x_replay)) {
DPRINTF("bad REPLAY header length");
return (EINVAL);
}
break;
case SADB_X_EXT_COUNTER:
if (i != sizeof(struct sadb_x_counter)) {
DPRINTF("bad COUNTER header length");
return (EINVAL);
}
break;
#if NPF > 0
case SADB_X_EXT_TAG:
if (i < sizeof(struct sadb_x_tag)) {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uvm_device.c,v 1.66 2021/12/15 12:53:53 mpi Exp $ */
/* $OpenBSD: uvm_device.c,v 1.67 2024/07/24 12:15:55 mpi Exp $ */
/* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */
/*
@ -245,8 +245,6 @@ udv_detach(struct uvm_object *uobj)
{
struct uvm_device *udv = (struct uvm_device *)uobj;
KERNEL_ASSERT_LOCKED();
/*
* loop until done
*/

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uvm_extern.h,v 1.174 2024/04/02 08:39:17 deraadt Exp $ */
/* $OpenBSD: uvm_extern.h,v 1.175 2024/07/24 12:17:31 mpi Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
@ -195,11 +195,12 @@ struct pmap;
* Locks used to protect struct members in this file:
* K kernel lock
* I immutable after creation
* a atomic operations
* v vm_map's lock
*/
struct vmspace {
struct vm_map vm_map; /* VM address map */
int vm_refcnt; /* [K] number of references */
int vm_refcnt; /* [a] number of references */
caddr_t vm_shm; /* SYS5 shared memory private data XXX */
/* we copy from vm_startcopy to the end of the structure on fork */
#define vm_startcopy vm_rssize

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uvm_map.c,v 1.329 2024/06/02 15:31:57 deraadt Exp $ */
/* $OpenBSD: uvm_map.c,v 1.330 2024/07/24 12:17:31 mpi Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@ -1346,7 +1346,6 @@ void
uvm_unmap_detach(struct uvm_map_deadq *deadq, int flags)
{
struct vm_map_entry *entry, *tmp;
int waitok = flags & UVM_PLA_WAITOK;
TAILQ_FOREACH_SAFE(entry, deadq, dfree.deadq, tmp) {
/* Drop reference to amap, if we've got one. */
@ -1356,21 +1355,6 @@ uvm_unmap_detach(struct uvm_map_deadq *deadq, int flags)
atop(entry->end - entry->start),
flags & AMAP_REFALL);
/* Skip entries for which we have to grab the kernel lock. */
if (UVM_ET_ISSUBMAP(entry) || UVM_ET_ISOBJ(entry))
continue;
TAILQ_REMOVE(deadq, entry, dfree.deadq);
uvm_mapent_free(entry);
}
if (TAILQ_EMPTY(deadq))
return;
KERNEL_LOCK();
while ((entry = TAILQ_FIRST(deadq)) != NULL) {
if (waitok)
uvm_pause();
/* Drop reference to our backing object, if we've got one. */
if (UVM_ET_ISSUBMAP(entry)) {
/* ... unlikely to happen, but play it safe */
@ -1381,11 +1365,9 @@ uvm_unmap_detach(struct uvm_map_deadq *deadq, int flags)
entry->object.uvm_obj);
}
/* Step to next. */
TAILQ_REMOVE(deadq, entry, dfree.deadq);
uvm_mapent_free(entry);
}
KERNEL_UNLOCK();
}
void
@ -2476,10 +2458,6 @@ uvm_map_teardown(struct vm_map *map)
#endif
int i;
KERNEL_ASSERT_LOCKED();
KERNEL_UNLOCK();
KERNEL_ASSERT_UNLOCKED();
KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
vm_map_lock(map);
@ -2535,9 +2513,7 @@ uvm_map_teardown(struct vm_map *map)
numq++;
KASSERT(numt == numq);
#endif
uvm_unmap_detach(&dead_entries, UVM_PLA_WAITOK);
KERNEL_LOCK();
uvm_unmap_detach(&dead_entries, 0);
pmap_destroy(map->pmap);
map->pmap = NULL;
@ -3417,10 +3393,8 @@ uvmspace_exec(struct proc *p, vaddr_t start, vaddr_t end)
void
uvmspace_addref(struct vmspace *vm)
{
KERNEL_ASSERT_LOCKED();
KASSERT(vm->vm_refcnt > 0);
vm->vm_refcnt++;
atomic_inc_int(&vm->vm_refcnt);
}
/*
@ -3429,9 +3403,7 @@ uvmspace_addref(struct vmspace *vm)
void
uvmspace_free(struct vmspace *vm)
{
KERNEL_ASSERT_LOCKED();
if (--vm->vm_refcnt == 0) {
if (atomic_dec_int_nv(&vm->vm_refcnt) == 0) {
/*
* lock the map, to wait out all other references to it. delete
* all of the mappings and pages they hold, then call the pmap
@ -3439,8 +3411,11 @@ uvmspace_free(struct vmspace *vm)
*/
#ifdef SYSVSHM
/* Get rid of any SYSV shared memory segments. */
if (vm->vm_shm != NULL)
if (vm->vm_shm != NULL) {
KERNEL_LOCK();
shmexit(vm);
KERNEL_UNLOCK();
}
#endif
uvm_map_teardown(&vm->vm_map);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uvm_pager.c,v 1.91 2023/08/11 17:53:22 mpi Exp $ */
/* $OpenBSD: uvm_pager.c,v 1.92 2024/07/24 12:18:10 mpi Exp $ */
/* $NetBSD: uvm_pager.c,v 1.36 2000/11/27 18:26:41 chs Exp $ */
/*
@ -134,24 +134,6 @@ uvm_pseg_get(int flags)
int i;
struct uvm_pseg *pseg;
/*
* XXX Prevent lock ordering issue in uvm_unmap_detach(). A real
* fix would be to move the KERNEL_LOCK() out of uvm_unmap_detach().
*
* witness_checkorder() at witness_checkorder+0xba0
* __mp_lock() at __mp_lock+0x5f
* uvm_unmap_detach() at uvm_unmap_detach+0xc5
* uvm_map() at uvm_map+0x857
* uvm_km_valloc_try() at uvm_km_valloc_try+0x65
* uvm_pseg_get() at uvm_pseg_get+0x6f
* uvm_pagermapin() at uvm_pagermapin+0x45
* uvn_io() at uvn_io+0xcf
* uvn_get() at uvn_get+0x156
* uvm_fault_lower() at uvm_fault_lower+0x28a
* uvm_fault() at uvm_fault+0x1b3
* upageflttrap() at upageflttrap+0x62
*/
KERNEL_LOCK();
mtx_enter(&uvm_pseg_lck);
pager_seg_restart:
@ -178,7 +160,6 @@ pager_seg_restart:
if (!UVM_PSEG_INUSE(pseg, i)) {
pseg->use |= 1 << i;
mtx_leave(&uvm_pseg_lck);
KERNEL_UNLOCK();
return pseg->start + i * MAXBSIZE;
}
}
@ -191,7 +172,6 @@ pager_seg_fail:
}
mtx_leave(&uvm_pseg_lck);
KERNEL_UNLOCK();
return 0;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uvm_vnode.c,v 1.132 2023/04/10 04:21:20 jsg Exp $ */
/* $OpenBSD: uvm_vnode.c,v 1.133 2024/07/24 12:16:21 mpi Exp $ */
/* $NetBSD: uvm_vnode.c,v 1.36 2000/11/24 20:34:01 chs Exp $ */
/*
@ -306,10 +306,12 @@ uvn_detach(struct uvm_object *uobj)
struct vnode *vp;
int oldflags;
KERNEL_LOCK();
rw_enter(uobj->vmobjlock, RW_WRITE);
uobj->uo_refs--; /* drop ref! */
if (uobj->uo_refs) { /* still more refs */
rw_exit(uobj->vmobjlock);
KERNEL_UNLOCK();
return;
}
@ -365,6 +367,7 @@ uvn_detach(struct uvm_object *uobj)
if ((uvn->u_flags & UVM_VNODE_RELKILL) == 0) {
rw_exit(uobj->vmobjlock);
KERNEL_UNLOCK();
return;
}
@ -387,8 +390,7 @@ out:
/* drop our reference to the vnode. */
vrele(vp);
return;
KERNEL_UNLOCK();
}
/*