sync code with last fixes and improvements from OpenBSD

This commit is contained in:
purplerain 2023-07-20 23:56:46 +00:00
parent f57be82572
commit 58b04bcee7
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
468 changed files with 9958 additions and 7882 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: exec_elf.c,v 1.182 2023/06/10 19:30:48 kettenis Exp $ */
/* $OpenBSD: exec_elf.c,v 1.183 2023/07/12 19:34:14 jasper Exp $ */
/*
* Copyright (c) 1996 Per Fogelstrom
@ -325,6 +325,11 @@ elf_load_file(struct proc *p, char *path, struct exec_package *epp,
goto bad1;
for (i = 0; i < eh.e_phnum; i++) {
if ((ph[i].p_align > 1) && !powerof2(ph[i].p_align)) {
error = EINVAL;
goto bad1;
}
if (ph[i].p_type == PT_LOAD) {
if (ph[i].p_filesz > ph[i].p_memsz ||
ph[i].p_memsz == 0) {
@ -526,6 +531,11 @@ exec_elf_makecmds(struct proc *p, struct exec_package *epp)
epp->ep_dsize = ELF_NO_ADDR;
for (i = 0, pp = ph; i < eh->e_phnum; i++, pp++) {
if ((pp->p_align > 1) && !powerof2(pp->p_align)) {
error = EINVAL;
goto bad;
}
if (pp->p_type == PT_INTERP && !interp) {
if (pp->p_filesz < 2 || pp->p_filesz > MAXPATHLEN)
goto bad;
@ -560,7 +570,7 @@ exec_elf_makecmds(struct proc *p, struct exec_package *epp)
if (eh->e_type == ET_DYN) {
/* need phdr and load sections for PIE */
if (!has_phdr || base_ph == NULL) {
if (!has_phdr || base_ph == NULL || base_ph->p_vaddr != 0) {
error = EINVAL;
goto bad;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_exec.c,v 1.249 2023/07/06 07:49:52 deraadt Exp $ */
/* $OpenBSD: kern_exec.c,v 1.250 2023/07/10 03:31:57 guenther Exp $ */
/* $NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $ */
/*-
@ -531,6 +531,11 @@ sys_execve(struct proc *p, void *v, register_t *retval)
if (otvp)
vrele(otvp);
if (pack.ep_flags & EXEC_NOBTCFI)
atomic_setbits_int(&p->p_p->ps_flags, PS_NOBTCFI);
else
atomic_clearbits_int(&p->p_p->ps_flags, PS_NOBTCFI);
atomic_setbits_int(&pr->ps_flags, PS_EXEC);
if (pr->ps_flags & PS_PPWAIT) {
atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_rwlock.c,v 1.49 2023/06/28 08:23:25 claudio Exp $ */
/* $OpenBSD: kern_rwlock.c,v 1.50 2023/07/14 07:07:08 claudio Exp $ */
/*
* Copyright (c) 2002, 2003 Artur Grabowski <art@openbsd.org>
@ -224,7 +224,6 @@ int
rw_enter(struct rwlock *rwl, int flags)
{
const struct rwlock_op *op;
struct sleep_state sls;
unsigned long inc, o;
#ifdef MULTIPROCESSOR
/*
@ -279,11 +278,11 @@ retry:
prio = op->wait_prio;
if (flags & RW_INTR)
prio |= PCATCH;
sleep_setup(&sls, rwl, prio, rwl->rwl_name);
sleep_setup(rwl, prio, rwl->rwl_name);
do_sleep = !rw_cas(&rwl->rwl_owner, o, set);
error = sleep_finish(&sls, prio, 0, do_sleep);
error = sleep_finish(0, do_sleep);
if ((flags & RW_INTR) &&
(error != 0))
return (error);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sched.c,v 1.77 2023/06/28 08:23:25 claudio Exp $ */
/* $OpenBSD: kern_sched.c,v 1.79 2023/07/14 07:07:08 claudio Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
@ -248,6 +248,7 @@ setrunqueue(struct cpu_info *ci, struct proc *p, uint8_t prio)
KASSERT(ci != NULL);
SCHED_ASSERT_LOCKED();
KASSERT(!ISSET(p->p_flag, P_WSLEEP) || p->p_stat == SSTOP);
p->p_cpu = ci;
p->p_stat = SRUN;
@ -668,13 +669,12 @@ sched_stop_secondary_cpus(void)
}
CPU_INFO_FOREACH(cii, ci) {
struct schedstate_percpu *spc = &ci->ci_schedstate;
struct sleep_state sls;
if (CPU_IS_PRIMARY(ci) || !CPU_IS_RUNNING(ci))
continue;
while ((spc->spc_schedflags & SPCF_HALTED) == 0) {
sleep_setup(&sls, spc, PZERO, "schedstate");
sleep_finish(&sls, PZERO, 0,
sleep_setup(spc, PZERO, "schedstate");
sleep_finish(0,
(spc->spc_schedflags & SPCF_HALTED) == 0);
}
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sig.c,v 1.307 2023/06/28 08:23:25 claudio Exp $ */
/* $OpenBSD: kern_sig.c,v 1.310 2023/07/14 07:07:08 claudio Exp $ */
/* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */
/*
@ -1151,7 +1151,7 @@ ptsignal(struct proc *p, int signum, enum signal_type type)
atomic_clearbits_int(siglist, mask);
if (action == SIG_CATCH)
goto runfast;
if (p->p_wchan == NULL)
if (p->p_wchan == NULL || p->p_flag & P_WSLEEP)
goto run;
p->p_stat = SSLEEP;
goto out;
@ -1699,7 +1699,7 @@ coredump(struct proc *p)
}
/* incrash should be 0 or KERNELPATH only */
NDINIT(&nd, 0, incrash, UIO_SYSSPACE, name, p);
NDINIT(&nd, 0, BYPASSUNVEIL | incrash, UIO_SYSSPACE, name, p);
error = vn_open(&nd, O_CREAT | FWRITE | O_NOFOLLOW | O_NONBLOCK,
S_IRUSR | S_IWUSR);
@ -2164,15 +2164,14 @@ single_thread_set(struct proc *p, enum single_thread_mode mode, int wait)
int
single_thread_wait(struct process *pr, int recheck)
{
struct sleep_state sls;
int wait;
/* wait until they're all suspended */
wait = pr->ps_singlecount > 0;
while (wait) {
sleep_setup(&sls, &pr->ps_singlecount, PWAIT, "suspend");
sleep_setup(&pr->ps_singlecount, PWAIT, "suspend");
wait = pr->ps_singlecount > 0;
sleep_finish(&sls, PWAIT, 0, wait);
sleep_finish(0, wait);
if (!recheck)
break;
}
@ -2204,7 +2203,7 @@ single_thread_clear(struct proc *p, int flag)
* it back into some sleep queue
*/
if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) {
if (q->p_wchan == NULL)
if (p->p_wchan == NULL || p->p_flag & P_WSLEEP)
setrunnable(q);
else
q->p_stat = SSLEEP;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_synch.c,v 1.193 2023/06/28 08:23:25 claudio Exp $ */
/* $OpenBSD: kern_synch.c,v 1.195 2023/07/14 07:07:08 claudio Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*
@ -115,7 +115,6 @@ extern int safepri;
int
tsleep(const volatile void *ident, int priority, const char *wmesg, int timo)
{
struct sleep_state sls;
#ifdef MULTIPROCESSOR
int hold_count;
#endif
@ -151,8 +150,8 @@ tsleep(const volatile void *ident, int priority, const char *wmesg, int timo)
return (0);
}
sleep_setup(&sls, ident, priority, wmesg);
return sleep_finish(&sls, priority, timo, 1);
sleep_setup(ident, priority, wmesg);
return sleep_finish(timo, 1);
}
int
@ -206,7 +205,6 @@ int
msleep(const volatile void *ident, struct mutex *mtx, int priority,
const char *wmesg, int timo)
{
struct sleep_state sls;
int error, spl;
#ifdef MULTIPROCESSOR
int hold_count;
@ -244,23 +242,14 @@ msleep(const volatile void *ident, struct mutex *mtx, int priority,
return (0);
}
sleep_setup(&sls, ident, priority, wmesg);
sleep_setup(ident, priority, wmesg);
/* XXX - We need to make sure that the mutex doesn't
* unblock splsched. This can be made a bit more
* correct when the sched_lock is a mutex.
*/
spl = MUTEX_OLDIPL(mtx);
MUTEX_OLDIPL(mtx) = splsched();
mtx_leave(mtx);
/* signal may stop the process, release mutex before that */
error = sleep_finish(&sls, priority, timo, 1);
error = sleep_finish(timo, 1);
if ((priority & PNORELOCK) == 0) {
if ((priority & PNORELOCK) == 0)
mtx_enter(mtx);
MUTEX_OLDIPL(mtx) = spl; /* put the ipl back */
} else
splx(spl);
return error;
}
@ -296,19 +285,19 @@ int
rwsleep(const volatile void *ident, struct rwlock *rwl, int priority,
const char *wmesg, int timo)
{
struct sleep_state sls;
int error, status;
KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0);
KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0);
KASSERT(ident != rwl);
rw_assert_anylock(rwl);
status = rw_status(rwl);
sleep_setup(&sls, ident, priority, wmesg);
sleep_setup(ident, priority, wmesg);
rw_exit(rwl);
/* signal may stop the process, release rwlock before that */
error = sleep_finish(&sls, priority, timo, 1);
error = sleep_finish(timo, 1);
if ((priority & PNORELOCK) == 0)
rw_enter(rwl, status);
@ -340,10 +329,10 @@ rwsleep_nsec(const volatile void *ident, struct rwlock *rwl, int priority,
}
void
sleep_setup(struct sleep_state *sls, const volatile void *ident, int prio,
const char *wmesg)
sleep_setup(const volatile void *ident, int prio, const char *wmesg)
{
struct proc *p = curproc;
int s;
#ifdef DIAGNOSTIC
if (p->p_flag & P_CANTSLEEP)
@ -354,7 +343,7 @@ sleep_setup(struct sleep_state *sls, const volatile void *ident, int prio,
panic("tsleep: not SONPROC");
#endif
SCHED_LOCK(sls->sls_s);
SCHED_LOCK(s);
TRACEPOINT(sched, sleep, NULL);
@ -362,23 +351,29 @@ sleep_setup(struct sleep_state *sls, const volatile void *ident, int prio,
p->p_wmesg = wmesg;
p->p_slptime = 0;
p->p_slppri = prio & PRIMASK;
atomic_setbits_int(&p->p_flag, P_WSLEEP);
TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_runq);
if (prio & PCATCH)
atomic_setbits_int(&p->p_flag, P_SINTR);
p->p_stat = SSLEEP;
SCHED_UNLOCK(s);
}
int
sleep_finish(struct sleep_state *sls, int prio, int timo, int do_sleep)
sleep_finish(int timo, int do_sleep)
{
struct proc *p = curproc;
int catch, error = 0, error1 = 0;
int s, catch, error = 0, error1 = 0;
catch = prio & PCATCH;
catch = p->p_flag & P_SINTR;
if (timo != 0) {
KASSERT((p->p_flag & P_TIMEOUT) == 0);
timeout_add(&p->p_sleep_to, timo);
}
SCHED_LOCK(s);
if (catch != 0) {
/*
* We put ourselves on the sleep queue and start our
@ -388,28 +383,28 @@ sleep_finish(struct sleep_state *sls, int prio, int timo, int do_sleep)
* us to be marked as SSLEEP without resuming us, thus
* we must be ready for sleep when sleep_signal_check() is
* called.
* If the wakeup happens while we're stopped, p->p_wchan
* will be NULL upon return from sleep_signal_check(). In
* that case we need to unwind immediately.
*/
atomic_setbits_int(&p->p_flag, P_SINTR);
if ((error = sleep_signal_check()) != 0) {
p->p_stat = SONPROC;
catch = 0;
do_sleep = 0;
} else if (p->p_wchan == NULL) {
catch = 0;
do_sleep = 0;
}
}
/*
* If the wakeup happens while going to sleep, p->p_wchan
* will be NULL. In that case unwind immediately but still
* check for possible signals and timeouts.
*/
if (p->p_wchan == NULL)
do_sleep = 0;
atomic_clearbits_int(&p->p_flag, P_WSLEEP);
if (do_sleep) {
p->p_stat = SSLEEP;
p->p_ru.ru_nvcsw++;
SCHED_ASSERT_LOCKED();
mi_switch();
} else {
unsleep(p);
p->p_stat = SONPROC;
}
#ifdef DIAGNOSTIC
@ -418,7 +413,7 @@ sleep_finish(struct sleep_state *sls, int prio, int timo, int do_sleep)
#endif
p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
SCHED_UNLOCK(sls->sls_s);
SCHED_UNLOCK(s);
/*
* Even though this belongs to the signal handling part of sleep,
@ -482,8 +477,12 @@ wakeup_proc(struct proc *p, const volatile void *chan, int flags)
atomic_setbits_int(&p->p_flag, flags);
if (p->p_stat == SSLEEP)
setrunnable(p);
else
else if (p->p_stat == SSTOP)
unsleep(p);
#ifdef DIAGNOSTIC
else
panic("wakeup: p_stat is %d", (int)p->p_stat);
#endif
}
return awakened;
@ -538,12 +537,6 @@ wakeup_n(const volatile void *ident, int n)
qp = &slpque[LOOKUP(ident)];
for (p = TAILQ_FIRST(qp); p != NULL && n != 0; p = pnext) {
pnext = TAILQ_NEXT(p, p_runq);
/*
* This happens if wakeup(9) is called after enqueuing
* itself on the sleep queue and both `ident' collide.
*/
if (p == curproc)
continue;
#ifdef DIAGNOSTIC
if (p->p_stat != SSLEEP && p->p_stat != SSTOP)
panic("wakeup: p_stat is %d", (int)p->p_stat);
@ -828,7 +821,6 @@ refcnt_rele_wake(struct refcnt *r)
void
refcnt_finalize(struct refcnt *r, const char *wmesg)
{
struct sleep_state sls;
u_int refs;
membar_exit_before_atomic();
@ -836,9 +828,9 @@ refcnt_finalize(struct refcnt *r, const char *wmesg)
KASSERT(refs != ~0);
TRACEINDEX(refcnt, r->r_traceidx, r, refs + 1, -1);
while (refs) {
sleep_setup(&sls, r, PWAIT, wmesg);
sleep_setup(r, PWAIT, wmesg);
refs = atomic_load_int(&r->r_refs);
sleep_finish(&sls, PWAIT, 0, refs);
sleep_finish(0, refs);
}
TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0);
/* Order subsequent loads and stores after refs == 0 load. */
@ -882,13 +874,12 @@ cond_signal(struct cond *c)
void
cond_wait(struct cond *c, const char *wmesg)
{
struct sleep_state sls;
unsigned int wait;
wait = atomic_load_int(&c->c_wait);
while (wait) {
sleep_setup(&sls, c, PWAIT, wmesg);
sleep_setup(c, PWAIT, wmesg);
wait = atomic_load_int(&c->c_wait);
sleep_finish(&sls, PWAIT, 0, wait);
sleep_finish(0, wait);
}
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sysctl.c,v 1.417 2023/07/07 16:27:46 bluhm Exp $ */
/* $OpenBSD: kern_sysctl.c,v 1.418 2023/07/16 03:01:31 yasuoka Exp $ */
/* $NetBSD: kern_sysctl.c,v 1.17 1996/05/20 17:49:05 mrg Exp $ */
/*-
@ -515,22 +515,20 @@ kern_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
case KERN_MBSTAT: {
extern struct cpumem *mbstat;
uint64_t counters[MBSTAT_COUNT];
struct mbstat *mbs;
struct mbstat mbs;
unsigned int i;
int ret;
mbs = malloc(sizeof(*mbs), M_TEMP, M_WAITOK | M_ZERO);
memset(&mbs, 0, sizeof(mbs));
counters_read(mbstat, counters, MBSTAT_COUNT);
for (i = 0; i < MBSTAT_TYPES; i++)
mbs->m_mtypes[i] = counters[i];
mbs.m_mtypes[i] = counters[i];
mbs->m_drops = counters[MBSTAT_DROPS];
mbs->m_wait = counters[MBSTAT_WAIT];
mbs->m_drain = counters[MBSTAT_DRAIN];
mbs.m_drops = counters[MBSTAT_DROPS];
mbs.m_wait = counters[MBSTAT_WAIT];
mbs.m_drain = counters[MBSTAT_DRAIN];
ret = sysctl_rdstruct(oldp, oldlenp, newp, mbs, sizeof(*mbs));
free(mbs, M_TEMP, sizeof(*mbs));
return (ret);
return (sysctl_rdstruct(oldp, oldlenp, newp,
&mbs, sizeof(mbs)));
}
case KERN_MSGBUFSIZE:
case KERN_CONSBUFSIZE: {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_timeout.c,v 1.93 2023/07/06 23:24:37 cheloha Exp $ */
/* $OpenBSD: kern_timeout.c,v 1.94 2023/07/14 07:07:08 claudio Exp $ */
/*
* Copyright (c) 2001 Thomas Nordin <nordin@openbsd.org>
* Copyright (c) 2000-2001 Artur Grabowski <art@openbsd.org>
@ -735,7 +735,6 @@ softclock_thread(void *arg)
{
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
struct sleep_state sls;
struct timeout *to;
int s;
@ -751,8 +750,8 @@ softclock_thread(void *arg)
s = splsoftclock();
for (;;) {
sleep_setup(&sls, &timeout_proc, PSWP, "bored");
sleep_finish(&sls, PSWP, 0, CIRCQ_EMPTY(&timeout_proc));
sleep_setup(&timeout_proc, PSWP, "bored");
sleep_finish(0, CIRCQ_EMPTY(&timeout_proc));
mtx_enter(&timeout_mutex);
while (!CIRCQ_EMPTY(&timeout_proc)) {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sched_bsd.c,v 1.76 2023/06/21 21:16:21 cheloha Exp $ */
/* $OpenBSD: sched_bsd.c,v 1.77 2023/07/11 07:02:43 claudio Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@ -440,6 +440,10 @@ setrunnable(struct proc *p)
case SSLEEP:
prio = p->p_slppri;
unsleep(p); /* e.g. when sending signals */
/* if not yet asleep, don't add to runqueue */
if (ISSET(p->p_flag, P_WSLEEP))
return;
break;
}
setrunqueue(NULL, p, prio);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: subr_log.c,v 1.76 2023/06/28 08:23:25 claudio Exp $ */
/* $OpenBSD: subr_log.c,v 1.77 2023/07/14 07:07:08 claudio Exp $ */
/* $NetBSD: subr_log.c,v 1.11 1996/03/30 22:24:44 christos Exp $ */
/*
@ -233,7 +233,6 @@ logclose(dev_t dev, int flag, int mode, struct proc *p)
int
logread(dev_t dev, struct uio *uio, int flag)
{
struct sleep_state sls;
struct msgbuf *mbp = msgbufp;
size_t l, rpos;
int error = 0;
@ -250,9 +249,8 @@ logread(dev_t dev, struct uio *uio, int flag)
* Set up and enter sleep manually instead of using msleep()
* to keep log_mtx as a leaf lock.
*/
sleep_setup(&sls, mbp, LOG_RDPRI | PCATCH, "klog");
error = sleep_finish(&sls, LOG_RDPRI | PCATCH, 0,
logsoftc.sc_state & LOG_RDWAIT);
sleep_setup(mbp, LOG_RDPRI | PCATCH, "klog");
error = sleep_finish(0, logsoftc.sc_state & LOG_RDWAIT);
mtx_enter(&log_mtx);
if (error)
goto out;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: subr_suspend.c,v 1.15 2023/07/02 19:02:27 cheloha Exp $ */
/* $OpenBSD: subr_suspend.c,v 1.16 2023/07/12 18:40:06 cheloha Exp $ */
/*
* Copyright (c) 2005 Thorsten Lockert <tholo@sigmasoft.com>
* Copyright (c) 2005 Jordan Hargrave <jordan@openbsd.org>
@ -26,6 +26,9 @@
#include <sys/mount.h>
#include <sys/syscallargs.h>
#include <dev/wscons/wsdisplayvar.h>
#ifdef GPROF
#include <sys/gmon.h>
#endif
#ifdef HIBERNATE
#include <sys/hibernate.h>
#endif
@ -49,6 +52,9 @@ sleep_state(void *v, int sleepmode)
extern int perflevel;
size_t rndbuflen;
char *rndbuf;
#ifdef GPROF
int gmon_state;
#endif
#if NSOFTRAID > 0
extern void sr_quiesce(void);
#endif
@ -100,6 +106,12 @@ top:
#ifdef MULTIPROCESSOR
sched_stop_secondary_cpus();
KASSERT(CPU_IS_PRIMARY(curcpu()));
#endif
#ifdef GPROF
gmon_state = gmoninit;
gmoninit = 0;
#endif
#ifdef MULTIPROCESSOR
sleep_mp();
#endif
@ -172,6 +184,11 @@ fail_suspend:
resume_randomness(rndbuf, rndbuflen);
#ifdef MULTIPROCESSOR
resume_mp();
#endif
#ifdef GPROF
gmoninit = gmon_state;
#endif
#ifdef MULTIPROCESSOR
sched_start_secondary_cpus();
#endif
vfs_stall(curproc, 0);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vfs_vnops.c,v 1.121 2022/08/14 01:58:28 jsg Exp $ */
/* $OpenBSD: vfs_vnops.c,v 1.122 2023/07/10 22:54:40 deraadt Exp $ */
/* $NetBSD: vfs_vnops.c,v 1.20 1996/02/04 02:18:41 christos Exp $ */
/*
@ -86,13 +86,13 @@ vn_open(struct nameidata *ndp, int fmode, int cmode)
int error;
/*
* The only valid flag to pass in here from NDINIT is
* KERNELPATH, This function will override the nameiop based
* on the fmode and cmode flags, So validate that our caller
* has not set other flags or operations in the nameidata
* The only valid flags to pass in here from NDINIT are
* KERNELPATH or BYPASSUNVEIL. This function will override the
* nameiop based on the fmode and cmode flags, so validate that
* our caller has not set other flags or operations in the nameidata
* structure.
*/
KASSERT(ndp->ni_cnd.cn_flags == 0 || ndp->ni_cnd.cn_flags == KERNELPATH);
KASSERT((ndp->ni_cnd.cn_flags & ~(KERNELPATH|BYPASSUNVEIL)) == 0);
KASSERT(ndp->ni_cnd.cn_nameiop == 0);
if ((fmode & (FREAD|FWRITE)) == 0)