sync with OpenBSD -current

This commit is contained in:
purplerain 2024-02-04 06:16:28 +00:00
parent 7d66fd8cb0
commit 3f3212838f
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
122 changed files with 1363 additions and 8580 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_physio.c,v 1.48 2023/11/24 00:15:42 asou Exp $ */
/* $OpenBSD: kern_physio.c,v 1.49 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: kern_physio.c,v 1.28 1997/05/19 10:43:28 pk Exp $ */
/*-
@ -84,7 +84,6 @@ physio(void (*strategy)(struct buf *), dev_t dev, int flags,
bp->b_error = 0;
bp->b_proc = p;
bp->b_flags = B_BUSY;
LIST_INIT(&bp->b_dep);
splx(s);
/*

View file

@ -1,4 +1,4 @@
/* $OpenBSD: spec_vnops.c,v 1.111 2022/12/05 23:18:37 deraadt Exp $ */
/* $OpenBSD: spec_vnops.c,v 1.112 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: spec_vnops.c,v 1.29 1996/04/22 01:42:38 christos Exp $ */
/*
@ -444,9 +444,6 @@ spec_strategy(void *v)
struct buf *bp = ap->a_bp;
int maj = major(bp->b_dev);
if (LIST_FIRST(&bp->b_dep) != NULL)
buf_start(bp);
(*bdevsw[maj].d_strategy)(bp);
return (0);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uipc_socket.c,v 1.315 2024/01/26 18:24:23 mvs Exp $ */
/* $OpenBSD: uipc_socket.c,v 1.316 2024/02/03 22:50:08 mvs Exp $ */
/* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */
/*
@ -72,26 +72,20 @@ int filt_soread(struct knote *kn, long hint);
void filt_sowdetach(struct knote *kn);
int filt_sowrite(struct knote *kn, long hint);
int filt_soexcept(struct knote *kn, long hint);
int filt_solisten(struct knote *kn, long hint);
int filt_somodify(struct kevent *kev, struct knote *kn);
int filt_soprocess(struct knote *kn, struct kevent *kev);
const struct filterops solisten_filtops = {
.f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
.f_attach = NULL,
.f_detach = filt_sordetach,
.f_event = filt_solisten,
.f_modify = filt_somodify,
.f_process = filt_soprocess,
};
int filt_sowmodify(struct kevent *kev, struct knote *kn);
int filt_sowprocess(struct knote *kn, struct kevent *kev);
int filt_sormodify(struct kevent *kev, struct knote *kn);
int filt_sorprocess(struct knote *kn, struct kevent *kev);
const struct filterops soread_filtops = {
.f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
.f_attach = NULL,
.f_detach = filt_sordetach,
.f_event = filt_soread,
.f_modify = filt_somodify,
.f_process = filt_soprocess,
.f_modify = filt_sormodify,
.f_process = filt_sorprocess,
};
const struct filterops sowrite_filtops = {
@ -99,8 +93,8 @@ const struct filterops sowrite_filtops = {
.f_attach = NULL,
.f_detach = filt_sowdetach,
.f_event = filt_sowrite,
.f_modify = filt_somodify,
.f_process = filt_soprocess,
.f_modify = filt_sowmodify,
.f_process = filt_sowprocess,
};
const struct filterops soexcept_filtops = {
@ -108,18 +102,8 @@ const struct filterops soexcept_filtops = {
.f_attach = NULL,
.f_detach = filt_sordetach,
.f_event = filt_soexcept,
.f_modify = filt_somodify,
.f_process = filt_soprocess,
};
void klist_soassertlk(void *);
int klist_solock(void *);
void klist_sounlock(void *, int);
const struct klistops socket_klistops = {
.klo_assertlk = klist_soassertlk,
.klo_lock = klist_solock,
.klo_unlock = klist_sounlock,
.f_modify = filt_sormodify,
.f_process = filt_sorprocess,
};
#ifndef SOMINCONN
@ -158,8 +142,10 @@ soalloc(const struct domain *dp, int wait)
return (NULL);
rw_init_flags(&so->so_lock, dp->dom_name, RWL_DUPOK);
refcnt_init(&so->so_refcnt);
klist_init(&so->so_rcv.sb_klist, &socket_klistops, so);
klist_init(&so->so_snd.sb_klist, &socket_klistops, so);
mtx_init(&so->so_rcv.sb_mtx, IPL_MPFLOOR);
mtx_init(&so->so_snd.sb_mtx, IPL_MPFLOOR);
klist_init_mutex(&so->so_rcv.sb_klist, &so->so_rcv.sb_mtx);
klist_init_mutex(&so->so_snd.sb_klist, &so->so_snd.sb_mtx);
sigio_init(&so->so_sigio);
TAILQ_INIT(&so->so_q0);
TAILQ_INIT(&so->so_q);
@ -1757,7 +1743,7 @@ somove(struct socket *so, int wait)
void
sorwakeup(struct socket *so)
{
soassertlocked(so);
soassertlocked_readonly(so);
#ifdef SOCKET_SPLICE
if (so->so_rcv.sb_flags & SB_SPLICE) {
@ -1785,7 +1771,7 @@ sorwakeup(struct socket *so)
void
sowwakeup(struct socket *so)
{
soassertlocked(so);
soassertlocked_readonly(so);
#ifdef SOCKET_SPLICE
if (so->so_snd.sb_flags & SB_SPLICE)
@ -2134,7 +2120,46 @@ void
sohasoutofband(struct socket *so)
{
pgsigio(&so->so_sigio, SIGURG, 0);
knote_locked(&so->so_rcv.sb_klist, 0);
knote(&so->so_rcv.sb_klist, 0);
}
void
sofilt_lock(struct socket *so, struct sockbuf *sb)
{
switch (so->so_proto->pr_domain->dom_family) {
case PF_INET:
case PF_INET6:
NET_LOCK_SHARED();
break;
default:
rw_enter_write(&so->so_lock);
break;
}
mtx_enter(&sb->sb_mtx);
}
void
sofilt_unlock(struct socket *so, struct sockbuf *sb)
{
mtx_leave(&sb->sb_mtx);
switch (so->so_proto->pr_domain->dom_family) {
case PF_INET:
case PF_INET6:
NET_UNLOCK_SHARED();
break;
default:
rw_exit_write(&so->so_lock);
break;
}
}
static inline void
sofilt_assert_locked(struct socket *so, struct sockbuf *sb)
{
MUTEX_ASSERT_LOCKED(&sb->sb_mtx);
soassertlocked_readonly(so);
}
int
@ -2143,13 +2168,9 @@ soo_kqfilter(struct file *fp, struct knote *kn)
struct socket *so = kn->kn_fp->f_data;
struct sockbuf *sb;
solock(so);
switch (kn->kn_filter) {
case EVFILT_READ:
if (so->so_options & SO_ACCEPTCONN)
kn->kn_fop = &solisten_filtops;
else
kn->kn_fop = &soread_filtops;
kn->kn_fop = &soread_filtops;
sb = &so->so_rcv;
break;
case EVFILT_WRITE:
@ -2161,12 +2182,10 @@ soo_kqfilter(struct file *fp, struct knote *kn)
sb = &so->so_rcv;
break;
default:
sounlock(so);
return (EINVAL);
}
klist_insert_locked(&sb->sb_klist, kn);
sounlock(so);
klist_insert(&sb->sb_klist, kn);
return (0);
}
@ -2185,7 +2204,23 @@ filt_soread(struct knote *kn, long hint)
struct socket *so = kn->kn_fp->f_data;
int rv = 0;
soassertlocked(so);
sofilt_assert_locked(so, &so->so_rcv);
if (so->so_options & SO_ACCEPTCONN) {
kn->kn_data = so->so_qlen;
rv = (kn->kn_data != 0);
if (kn->kn_flags & (__EV_POLL | __EV_SELECT)) {
if (so->so_state & SS_ISDISCONNECTED) {
kn->kn_flags |= __EV_HUP;
rv = 1;
} else {
rv = soreadable(so);
}
}
return rv;
}
kn->kn_data = so->so_rcv.sb_cc;
#ifdef SOCKET_SPLICE
@ -2226,7 +2261,7 @@ filt_sowrite(struct knote *kn, long hint)
struct socket *so = kn->kn_fp->f_data;
int rv;
soassertlocked(so);
sofilt_assert_locked(so, &so->so_snd);
kn->kn_data = sbspace(so, &so->so_snd);
if (so->so_snd.sb_state & SS_CANTSENDMORE) {
@ -2257,7 +2292,7 @@ filt_soexcept(struct knote *kn, long hint)
struct socket *so = kn->kn_fp->f_data;
int rv = 0;
soassertlocked(so);
sofilt_assert_locked(so, &so->so_rcv);
#ifdef SOCKET_SPLICE
if (isspliced(so)) {
@ -2283,77 +2318,55 @@ filt_soexcept(struct knote *kn, long hint)
}
int
filt_solisten(struct knote *kn, long hint)
{
struct socket *so = kn->kn_fp->f_data;
int active;
soassertlocked(so);
kn->kn_data = so->so_qlen;
active = (kn->kn_data != 0);
if (kn->kn_flags & (__EV_POLL | __EV_SELECT)) {
if (so->so_state & SS_ISDISCONNECTED) {
kn->kn_flags |= __EV_HUP;
active = 1;
} else {
active = soreadable(so);
}
}
return (active);
}
int
filt_somodify(struct kevent *kev, struct knote *kn)
filt_sowmodify(struct kevent *kev, struct knote *kn)
{
struct socket *so = kn->kn_fp->f_data;
int rv;
solock(so);
sofilt_lock(so, &so->so_snd);
rv = knote_modify(kev, kn);
sounlock(so);
sofilt_unlock(so, &so->so_snd);
return (rv);
}
int
filt_soprocess(struct knote *kn, struct kevent *kev)
filt_sowprocess(struct knote *kn, struct kevent *kev)
{
struct socket *so = kn->kn_fp->f_data;
int rv;
solock(so);
sofilt_lock(so, &so->so_snd);
rv = knote_process(kn, kev);
sounlock(so);
sofilt_unlock(so, &so->so_snd);
return (rv);
}
void
klist_soassertlk(void *arg)
int
filt_sormodify(struct kevent *kev, struct knote *kn)
{
struct socket *so = arg;
struct socket *so = kn->kn_fp->f_data;
int rv;
soassertlocked(so);
sofilt_lock(so, &so->so_rcv);
rv = knote_modify(kev, kn);
sofilt_unlock(so, &so->so_rcv);
return (rv);
}
int
klist_solock(void *arg)
filt_sorprocess(struct knote *kn, struct kevent *kev)
{
struct socket *so = arg;
struct socket *so = kn->kn_fp->f_data;
int rv;
solock(so);
return (1);
}
sofilt_lock(so, &so->so_rcv);
rv = knote_process(kn, kev);
sofilt_unlock(so, &so->so_rcv);
void
klist_sounlock(void *arg, int ls)
{
struct socket *so = arg;
sounlock(so);
return (rv);
}
#ifdef DDB

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uipc_socket2.c,v 1.140 2024/01/11 14:15:11 bluhm Exp $ */
/* $OpenBSD: uipc_socket2.c,v 1.141 2024/02/03 22:50:08 mvs Exp $ */
/* $NetBSD: uipc_socket2.c,v 1.11 1996/02/04 02:17:55 christos Exp $ */
/*
@ -439,7 +439,7 @@ sounlock_shared(struct socket *so)
}
void
soassertlocked(struct socket *so)
soassertlocked_readonly(struct socket *so)
{
switch (so->so_proto->pr_domain->dom_family) {
case PF_INET:
@ -452,6 +452,27 @@ soassertlocked(struct socket *so)
}
}
void
soassertlocked(struct socket *so)
{
switch (so->so_proto->pr_domain->dom_family) {
case PF_INET:
case PF_INET6:
if (rw_status(&netlock) == RW_READ) {
NET_ASSERT_LOCKED();
if (splassert_ctl > 0 && pru_locked(so) == 0 &&
rw_status(&so->so_lock) != RW_WRITE)
splassert_fail(0, RW_WRITE, __func__);
} else
NET_ASSERT_LOCKED_EXCLUSIVE();
break;
default:
rw_assert_wrlock(&so->so_lock);
break;
}
}
int
sosleep_nsec(struct socket *so, void *ident, int prio, const char *wmesg,
uint64_t nsecs)
@ -489,46 +510,62 @@ sbwait(struct socket *so, struct sockbuf *sb)
soassertlocked(so);
mtx_enter(&sb->sb_mtx);
sb->sb_flags |= SB_WAIT;
mtx_leave(&sb->sb_mtx);
return sosleep_nsec(so, &sb->sb_cc, prio, "netio", sb->sb_timeo_nsecs);
}
int
sblock(struct socket *so, struct sockbuf *sb, int flags)
{
int error, prio = PSOCK;
int error = 0, prio = PSOCK;
soassertlocked(so);
mtx_enter(&sb->sb_mtx);
if ((sb->sb_flags & SB_LOCK) == 0) {
sb->sb_flags |= SB_LOCK;
return (0);
goto out;
}
if ((flags & SBL_WAIT) == 0) {
error = EWOULDBLOCK;
goto out;
}
if ((flags & SBL_WAIT) == 0)
return (EWOULDBLOCK);
if (!(flags & SBL_NOINTR || sb->sb_flags & SB_NOINTR))
prio |= PCATCH;
while (sb->sb_flags & SB_LOCK) {
sb->sb_flags |= SB_WANT;
mtx_leave(&sb->sb_mtx);
error = sosleep_nsec(so, &sb->sb_flags, prio, "netlck", INFSLP);
if (error)
return (error);
mtx_enter(&sb->sb_mtx);
}
sb->sb_flags |= SB_LOCK;
return (0);
out:
mtx_leave(&sb->sb_mtx);
return (error);
}
void
sbunlock(struct socket *so, struct sockbuf *sb)
{
soassertlocked(so);
int dowakeup = 0;
mtx_enter(&sb->sb_mtx);
sb->sb_flags &= ~SB_LOCK;
if (sb->sb_flags & SB_WANT) {
sb->sb_flags &= ~SB_WANT;
wakeup(&sb->sb_flags);
dowakeup = 1;
}
mtx_leave(&sb->sb_mtx);
if (dowakeup)
wakeup(&sb->sb_flags);
}
/*
@ -539,15 +576,24 @@ sbunlock(struct socket *so, struct sockbuf *sb)
void
sowakeup(struct socket *so, struct sockbuf *sb)
{
soassertlocked(so);
int dowakeup = 0, dopgsigio = 0;
mtx_enter(&sb->sb_mtx);
if (sb->sb_flags & SB_WAIT) {
sb->sb_flags &= ~SB_WAIT;
wakeup(&sb->sb_cc);
dowakeup = 1;
}
if (sb->sb_flags & SB_ASYNC)
pgsigio(&so->so_sigio, SIGIO, 0);
dopgsigio = 1;
knote_locked(&sb->sb_klist, 0);
mtx_leave(&sb->sb_mtx);
if (dowakeup)
wakeup(&sb->sb_cc);
if (dopgsigio)
pgsigio(&so->so_sigio, SIGIO, 0);
}
/*

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uipc_syscalls.c,v 1.216 2024/01/03 11:07:04 bluhm Exp $ */
/* $OpenBSD: uipc_syscalls.c,v 1.217 2024/02/03 22:50:09 mvs Exp $ */
/* $NetBSD: uipc_syscalls.c,v 1.19 1996/02/09 19:00:48 christos Exp $ */
/*
@ -326,7 +326,7 @@ doaccept(struct proc *p, int sock, struct sockaddr *name, socklen_t *anamelen,
: (flags & SOCK_NONBLOCK ? FNONBLOCK : 0);
/* connection has been removed from the listen queue */
knote_locked(&head->so_rcv.sb_klist, 0);
knote(&head->so_rcv.sb_klist, 0);
if (persocket)
sounlock(head);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vfs_bio.c,v 1.212 2023/04/26 15:13:52 beck Exp $ */
/* $OpenBSD: vfs_bio.c,v 1.213 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $ */
/*
@ -65,7 +65,6 @@ int fliphigh;
int nobuffers;
int needbuffer;
struct bio_ops bioops;
/* private bufcache functions */
void bufcache_init(void);
@ -120,8 +119,6 @@ buf_put(struct buf *bp)
if (bp->b_vnbufs.le_next != NOLIST &&
bp->b_vnbufs.le_next != (void *)-1)
panic("buf_put: still on the vnode list");
if (!LIST_EMPTY(&bp->b_dep))
panic("buf_put: b_dep is not empty");
#endif
LIST_REMOVE(bp, b_list);
@ -879,13 +876,6 @@ brelse(struct buf *bp)
if (bp->b_data != NULL)
KASSERT(bp->b_bufsize > 0);
/*
* softdep is basically incompatible with not caching buffers
* that have dependencies, so this buffer must be cached
*/
if (LIST_FIRST(&bp->b_dep) != NULL)
CLR(bp->b_flags, B_NOCACHE);
/*
* Determine which queue the buffer should be on, then put it there.
*/
@ -904,9 +894,6 @@ brelse(struct buf *bp)
* If the buffer is invalid, free it now rather than leaving
* it in a queue and wasting memory.
*/
if (LIST_FIRST(&bp->b_dep) != NULL)
buf_deallocate(bp);
if (ISSET(bp->b_flags, B_DELWRI)) {
CLR(bp->b_flags, B_DELWRI);
}
@ -1150,7 +1137,6 @@ buf_get(struct vnode *vp, daddr_t blkno, size_t size)
bp->b_freelist.tqe_next = NOLIST;
bp->b_dev = NODEV;
LIST_INIT(&bp->b_dep);
bp->b_bcount = size;
buf_acquire_nomap(bp);
@ -1243,16 +1229,6 @@ buf_daemon(void *arg)
if (!ISSET(bp->b_flags, B_DELWRI))
panic("Clean buffer on dirty queue");
#endif
if (LIST_FIRST(&bp->b_dep) != NULL &&
!ISSET(bp->b_flags, B_DEFERRED) &&
buf_countdeps(bp, 0, 0)) {
SET(bp->b_flags, B_DEFERRED);
s = splbio();
bufcache_release(bp);
buf_release(bp);
continue;
}
bawrite(bp);
pushed++;
@ -1321,9 +1297,6 @@ biodone(struct buf *bp)
if (bp->b_bq)
bufq_done(bp->b_bq, bp);
if (LIST_FIRST(&bp->b_dep) != NULL)
buf_complete(bp);
if (!ISSET(bp->b_flags, B_READ)) {
CLR(bp->b_flags, B_WRITEINPROG);
vwakeup(bp->b_vp);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vfs_subr.c,v 1.318 2022/12/26 19:25:49 miod Exp $ */
/* $OpenBSD: vfs_subr.c,v 1.319 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $ */
/*
@ -1819,6 +1819,10 @@ vfs_syncwait(struct proc *p, int verbose)
* With soft updates, some buffers that are
* written will be remarked as dirty until other
* buffers are written.
*
* XXX here be dragons. this should really go away
* but should be carefully made to go away on it's
* own with testing.. XXX
*/
if (bp->b_flags & B_DELWRI) {
s = splbio();
@ -2249,18 +2253,14 @@ vfs_buf_print(void *b, int full,
bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
(*pr)(" bufsize 0x%lx bcount 0x%lx resid 0x%lx\n"
" data %p saveaddr %p dep %p iodone %p\n",
" data %p saveaddr %p iodone %p\n",
bp->b_bufsize, bp->b_bcount, (long)bp->b_resid,
bp->b_data, bp->b_saveaddr,
LIST_FIRST(&bp->b_dep), bp->b_iodone);
bp->b_iodone);
(*pr)(" dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
#ifdef FFS_SOFTUPDATES
if (full)
softdep_print(bp, full, pr);
#endif
}
const char *vtypes[] = { VTYPE_NAMES };

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vfs_sync.c,v 1.69 2023/05/25 07:45:33 claudio Exp $ */
/* $OpenBSD: vfs_sync.c,v 1.70 2024/02/03 18:51:58 beck Exp $ */
/*
* Portions of this code are:
@ -50,10 +50,6 @@
#include <sys/malloc.h>
#include <sys/time.h>
#ifdef FFS_SOFTUPDATES
int softdep_process_worklist(struct mount *);
#endif
/*
* The workitem queue.
*/
@ -62,9 +58,6 @@ int softdep_process_worklist(struct mount *);
int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */
int syncdelay = SYNCER_DEFAULT; /* time to delay syncing vnodes */
int rushjob = 0; /* number of slots to run ASAP */
int stat_rush_requests = 0; /* number of rush requests */
int syncer_delayno = 0;
long syncer_mask;
LIST_HEAD(synclist, vnode);
@ -198,28 +191,6 @@ syncer_thread(void *arg)
splx(s);
#ifdef FFS_SOFTUPDATES
/*
* Do soft update processing.
*/
softdep_process_worklist(NULL);
#endif
/*
* The variable rushjob allows the kernel to speed up the
* processing of the filesystem syncer process. A rushjob
* value of N tells the filesystem syncer to process the next
* N seconds worth of work on its queue ASAP. Currently rushjob
* is used by the soft update code to speed up the filesystem
* syncer process when the incore state is getting so far
* ahead of the disk that the kernel memory pool is being
* threatened with exhaustion.
*/
if (rushjob > 0) {
rushjob -= 1;
continue;
}
/*
* If it has taken us less than a second to process the
* current work, then wait. Otherwise start right over
@ -236,24 +207,6 @@ syncer_thread(void *arg)
}
}
/*
* Request the syncer daemon to speed up its work.
* We never push it to speed up more than half of its
* normal turn time, otherwise it could take over the cpu.
*/
int
speedup_syncer(void)
{
if (syncerproc)
wakeup_one(&syncer_chan);
if (rushjob < syncdelay / 2) {
rushjob += 1;
stat_rush_requests += 1;
return 1;
}
return 0;
}
/* Routine to create and manage a filesystem syncer vnode. */
int sync_fsync(void *);
int sync_inactive(void *);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vfs_syscalls.c,v 1.362 2023/07/05 15:13:28 beck Exp $ */
/* $OpenBSD: vfs_syscalls.c,v 1.363 2024/02/03 18:51:58 beck Exp $ */
/* $NetBSD: vfs_syscalls.c,v 1.71 1996/04/23 10:29:02 mycroft Exp $ */
/*
@ -2894,10 +2894,6 @@ sys_fsync(struct proc *p, void *v, register_t *retval)
vp = fp->f_data;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
#ifdef FFS_SOFTUPDATES
if (error == 0 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
error = softdep_fsync(vp);
#endif
VOP_UNLOCK(vp);
FRELE(fp, p);