sync code with last improvements from OpenBSD

This commit is contained in:
purplerain 2023-08-17 07:36:55 +00:00
parent 454dab66ed
commit 27298272ec
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
237 changed files with 4666 additions and 2149 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clock.c,v 1.111 2023/08/05 20:07:55 cheloha Exp $ */
/* $OpenBSD: kern_clock.c,v 1.113 2023/08/12 13:19:28 miod Exp $ */
/* $NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $ */
/*-
@ -111,10 +111,9 @@ initclocks(void)
void
hardclock(struct clockframe *frame)
{
#if defined(MULTIPROCESSOR) || defined(__hppa__) /* XXX */
struct cpu_info *ci = curcpu();
if (--ci->ci_schedstate.spc_rrticks <= 0)
roundrobin(ci);
#endif
#if NDT > 0
DT_ENTER(profile, NULL);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clockintr.c,v 1.30 2023/08/05 20:07:55 cheloha Exp $ */
/* $OpenBSD: kern_clockintr.c,v 1.31 2023/08/11 22:02:50 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@ -69,6 +69,7 @@ clockintr_init(u_int flags)
KASSERT(hz > 0 && hz <= 1000000000);
hardclock_period = 1000000000 / hz;
roundrobin_period = hardclock_period * 10;
KASSERT(stathz >= 1 && stathz <= 1000000000);
@ -204,6 +205,11 @@ clockintr_cpu_init(const struct intrclock *ic)
clockintr_stagger(spc->spc_profclock, profclock_period,
multiplier, MAXCPUS);
}
if (spc->spc_roundrobin->cl_expiration == 0) {
clockintr_stagger(spc->spc_roundrobin, hardclock_period,
multiplier, MAXCPUS);
}
clockintr_advance(spc->spc_roundrobin, roundrobin_period);
if (reset_cq_intrclock)
SET(cq->cq_flags, CQ_INTRCLOCK);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_event.c,v 1.196 2023/04/11 00:45:09 jsg Exp $ */
/* $OpenBSD: kern_event.c,v 1.197 2023/08/13 08:29:28 visa Exp $ */
/*-
* Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
@ -449,17 +449,61 @@ filt_proc(struct knote *kn, long hint)
return (kn->kn_fflags != 0);
}
static void
filt_timer_timeout_add(struct knote *kn)
#define NOTE_TIMER_UNITMASK \
(NOTE_SECONDS|NOTE_MSECONDS|NOTE_USECONDS|NOTE_NSECONDS)
static int
filt_timervalidate(int sfflags, int64_t sdata, struct timespec *ts)
{
struct timeval tv;
if (sfflags & ~(NOTE_TIMER_UNITMASK | NOTE_ABSTIME))
return (EINVAL);
switch (sfflags & NOTE_TIMER_UNITMASK) {
case NOTE_SECONDS:
ts->tv_sec = sdata;
ts->tv_nsec = 0;
break;
case NOTE_MSECONDS:
ts->tv_sec = sdata / 1000;
ts->tv_nsec = (sdata % 1000) * 1000000;
break;
case NOTE_USECONDS:
ts->tv_sec = sdata / 1000000;
ts->tv_nsec = (sdata % 1000000) * 1000;
break;
case NOTE_NSECONDS:
ts->tv_sec = sdata / 1000000000;
ts->tv_nsec = sdata % 1000000000;
break;
default:
return (EINVAL);
}
return (0);
}
static void
filt_timeradd(struct knote *kn, struct timespec *ts)
{
struct timespec expiry, now;
struct timeout *to = kn->kn_hook;
int tticks;
tv.tv_sec = kn->kn_sdata / 1000;
tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
tticks = tvtohz(&tv);
/* Remove extra tick from tvtohz() if timeout has fired before. */
if (kn->kn_sfflags & NOTE_ABSTIME) {
nanotime(&now);
if (timespeccmp(ts, &now, >)) {
timespecsub(ts, &now, &expiry);
/* XXX timeout_abs_ts with CLOCK_REALTIME */
timeout_add(to, tstohz(&expiry));
} else {
/* Expire immediately. */
filt_timerexpire(kn);
}
return;
}
tticks = tstohz(ts);
/* Remove extra tick from tstohz() if timeout has fired before. */
if (timeout_triggered(to))
tticks--;
timeout_add(to, (tticks > 0) ? tticks : 1);
@ -468,6 +512,7 @@ filt_timer_timeout_add(struct knote *kn)
void
filt_timerexpire(void *knx)
{
struct timespec ts;
struct knote *kn = knx;
struct kqueue *kq = kn->kn_kq;
@ -476,28 +521,37 @@ filt_timerexpire(void *knx)
knote_activate(kn);
mtx_leave(&kq->kq_lock);
if ((kn->kn_flags & EV_ONESHOT) == 0)
filt_timer_timeout_add(kn);
if ((kn->kn_flags & EV_ONESHOT) == 0 &&
(kn->kn_sfflags & NOTE_ABSTIME) == 0) {
(void)filt_timervalidate(kn->kn_sfflags, kn->kn_sdata, &ts);
filt_timeradd(kn, &ts);
}
}
/*
* data contains amount of time to sleep, in milliseconds
* data contains amount of time to sleep
*/
int
filt_timerattach(struct knote *kn)
{
struct timespec ts;
struct timeout *to;
int error;
error = filt_timervalidate(kn->kn_sfflags, kn->kn_sdata, &ts);
if (error != 0)
return (error);
if (kq_ntimeouts > kq_timeoutmax)
return (ENOMEM);
kq_ntimeouts++;
kn->kn_flags |= EV_CLEAR; /* automatically set */
if ((kn->kn_sfflags & NOTE_ABSTIME) == 0)
kn->kn_flags |= EV_CLEAR; /* automatically set */
to = malloc(sizeof(*to), M_KEVENT, M_WAITOK);
timeout_set(to, filt_timerexpire, kn);
kn->kn_hook = to;
filt_timer_timeout_add(kn);
filt_timeradd(kn, &ts);
return (0);
}
@ -516,8 +570,17 @@ filt_timerdetach(struct knote *kn)
int
filt_timermodify(struct kevent *kev, struct knote *kn)
{
struct timespec ts;
struct kqueue *kq = kn->kn_kq;
struct timeout *to = kn->kn_hook;
int error;
error = filt_timervalidate(kev->fflags, kev->data, &ts);
if (error != 0) {
kev->flags |= EV_ERROR;
kev->data = error;
return (0);
}
/* Reset the timer. Any pending events are discarded. */
@ -533,7 +596,7 @@ filt_timermodify(struct kevent *kev, struct knote *kn)
knote_assign(kev, kn);
/* Reinit timeout to invoke tick adjustment again. */
timeout_set(to, filt_timerexpire, kn);
filt_timer_timeout_add(kn);
filt_timeradd(kn, &ts);
return (0);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_fork.c,v 1.248 2023/07/02 11:16:03 deraadt Exp $ */
/* $OpenBSD: kern_fork.c,v 1.249 2023/08/14 08:33:24 mpi Exp $ */
/* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */
/*
@ -56,6 +56,7 @@
#include <sys/ptrace.h>
#include <sys/atomic.h>
#include <sys/unistd.h>
#include <sys/tracepoint.h>
#include <sys/syscallargs.h>
@ -316,6 +317,8 @@ fork_thread_start(struct proc *p, struct proc *parent, int flags)
SCHED_LOCK(s);
ci = sched_choosecpu_fork(parent, flags);
TRACEPOINT(sched, fork, p->p_tid + THREAD_PID_OFFSET,
p->p_p->ps_pid, CPU_INFO_UNIT(ci));
setrunqueue(ci, p, p->p_usrpri);
SCHED_UNLOCK(s);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sched.c,v 1.84 2023/08/05 20:07:55 cheloha Exp $ */
/* $OpenBSD: kern_sched.c,v 1.86 2023/08/14 08:33:24 mpi Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
@ -102,6 +102,12 @@ sched_init_cpu(struct cpu_info *ci)
if (spc->spc_profclock == NULL)
panic("%s: clockintr_establish profclock", __func__);
}
if (spc->spc_roundrobin == NULL) {
spc->spc_roundrobin = clockintr_establish(&ci->ci_queue,
roundrobin);
if (spc->spc_roundrobin == NULL)
panic("%s: clockintr_establish roundrobin", __func__);
}
kthread_create_deferred(sched_kthreads_create, ci);
@ -542,6 +548,9 @@ sched_steal_proc(struct cpu_info *self)
if (best == NULL)
return (NULL);
TRACEPOINT(sched, steal, best->p_tid + THREAD_PID_OFFSET,
best->p_p->ps_pid, CPU_INFO_UNIT(self));
remrunqueue(best);
best->p_cpu = self;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sig.c,v 1.310 2023/07/14 07:07:08 claudio Exp $ */
/* $OpenBSD: kern_sig.c,v 1.313 2023/08/16 07:55:52 claudio Exp $ */
/* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */
/*
@ -1151,8 +1151,9 @@ ptsignal(struct proc *p, int signum, enum signal_type type)
atomic_clearbits_int(siglist, mask);
if (action == SIG_CATCH)
goto runfast;
if (p->p_wchan == NULL || p->p_flag & P_WSLEEP)
if (p->p_wchan == NULL)
goto run;
atomic_clearbits_int(&p->p_flag, P_WSLEEP);
p->p_stat = SSLEEP;
goto out;
}
@ -1250,7 +1251,6 @@ cursig(struct proc *p, struct sigctx *sctx)
{
struct process *pr = p->p_p;
int signum, mask, prop;
int dolock = (p->p_flag & P_SINTR) == 0;
sigset_t ps_siglist;
int s;
@ -1293,11 +1293,9 @@ cursig(struct proc *p, struct sigctx *sctx)
single_thread_set(p, SINGLE_SUSPEND, 0);
pr->ps_xsig = signum;
if (dolock)
SCHED_LOCK(s);
SCHED_LOCK(s);
proc_stop(p, 1);
if (dolock)
SCHED_UNLOCK(s);
SCHED_UNLOCK(s);
/*
* re-take the signal before releasing
@ -1370,11 +1368,9 @@ cursig(struct proc *p, struct sigctx *sctx)
prop & SA_TTYSTOP))
break; /* == ignore */
pr->ps_xsig = signum;
if (dolock)
SCHED_LOCK(s);
SCHED_LOCK(s);
proc_stop(p, 1);
if (dolock)
SCHED_UNLOCK(s);
SCHED_UNLOCK(s);
break;
} else if (prop & SA_IGNORE) {
/*
@ -1970,6 +1966,9 @@ userret(struct proc *p)
struct sigctx ctx;
int signum;
if (p->p_flag & P_SUSPSINGLE)
single_thread_check(p, 0);
/* send SIGPROF or SIGVTALRM if their timers interrupted this thread */
if (p->p_flag & P_PROFPEND) {
atomic_clearbits_int(&p->p_flag, P_PROFPEND);
@ -2003,9 +2002,6 @@ userret(struct proc *p)
postsig(p, signum, &ctx);
}
if (p->p_flag & P_SUSPSINGLE)
single_thread_check(p, 0);
WITNESS_WARN(WARN_PANIC, NULL, "userret: returning");
p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
@ -2203,10 +2199,12 @@ single_thread_clear(struct proc *p, int flag)
* it back into some sleep queue
*/
if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) {
if (p->p_wchan == NULL || p->p_flag & P_WSLEEP)
if (q->p_wchan == NULL)
setrunnable(q);
else
else {
atomic_clearbits_int(&q->p_flag, P_WSLEEP);
q->p_stat = SSLEEP;
}
}
}
SCHED_UNLOCK(s);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_synch.c,v 1.196 2023/08/10 20:44:52 claudio Exp $ */
/* $OpenBSD: kern_synch.c,v 1.198 2023/08/16 07:55:52 claudio Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*
@ -373,7 +373,6 @@ sleep_finish(int timo, int do_sleep)
timeout_add(&p->p_sleep_to, timo);
}
SCHED_LOCK(s);
if (catch != 0) {
/*
* We put ourselves on the sleep queue and start our
@ -390,6 +389,7 @@ sleep_finish(int timo, int do_sleep)
}
}
SCHED_LOCK(s);
/*
* If the wakeup happens while going to sleep, p->p_wchan
* will be NULL. In that case unwind immediately but still
@ -520,7 +520,7 @@ unsleep(struct proc *p)
if (p->p_wchan != NULL) {
TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_runq);
p->p_wchan = NULL;
TRACEPOINT(sched, wakeup, p->p_tid + THREAD_PID_OFFSET,
TRACEPOINT(sched, unsleep, p->p_tid + THREAD_PID_OFFSET,
p->p_p->ps_pid);
}
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sched_bsd.c,v 1.79 2023/08/05 20:07:55 cheloha Exp $ */
/* $OpenBSD: sched_bsd.c,v 1.81 2023/08/14 08:33:24 mpi Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@ -54,9 +54,8 @@
#include <sys/ktrace.h>
#endif
uint32_t roundrobin_period; /* [I] roundrobin period (ns) */
int lbolt; /* once a second sleep address */
int rrticks_init; /* # of hardclock ticks per roundrobin() */
#ifdef MULTIPROCESSOR
struct __mp_lock sched_lock;
@ -69,21 +68,23 @@ uint32_t decay_aftersleep(uint32_t, uint32_t);
* Force switch among equal priority processes every 100ms.
*/
void
roundrobin(struct cpu_info *ci)
roundrobin(struct clockintr *cl, void *cf)
{
uint64_t count;
struct cpu_info *ci = curcpu();
struct schedstate_percpu *spc = &ci->ci_schedstate;
spc->spc_rrticks = rrticks_init;
count = clockintr_advance(cl, roundrobin_period);
if (ci->ci_curproc != NULL) {
if (spc->spc_schedflags & SPCF_SEENRR) {
if (spc->spc_schedflags & SPCF_SEENRR || count >= 2) {
/*
* The process has already been through a roundrobin
* without switching and may be hogging the CPU.
* Indicate that the process should yield.
*/
atomic_setbits_int(&spc->spc_schedflags,
SPCF_SHOULDYIELD);
SPCF_SEENRR | SPCF_SHOULDYIELD);
} else {
atomic_setbits_int(&spc->spc_schedflags,
SPCF_SEENRR);
@ -461,6 +462,7 @@ setrunnable(struct proc *p)
atomic_setbits_int(&p->p_siglist, sigmask(pr->ps_xsig));
prio = p->p_usrpri;
unsleep(p);
setrunqueue(NULL, p, prio);
break;
case SSLEEP:
prio = p->p_slppri;
@ -469,9 +471,11 @@ setrunnable(struct proc *p)
/* if not yet asleep, don't add to runqueue */
if (ISSET(p->p_flag, P_WSLEEP))
return;
setrunqueue(NULL, p, prio);
TRACEPOINT(sched, wakeup, p->p_tid + THREAD_PID_OFFSET,
p->p_p->ps_pid, CPU_INFO_UNIT(p->p_cpu));
break;
}
setrunqueue(NULL, p, prio);
if (p->p_slptime > 1) {
uint32_t newcpu;
@ -695,8 +699,6 @@ scheduler_start(void)
* its job.
*/
timeout_set(&schedcpu_to, schedcpu, &schedcpu_to);
rrticks_init = hz / 10;
schedcpu(&schedcpu_to);
#ifndef SMALL_KERNEL

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sys_futex.c,v 1.21 2022/08/14 01:58:28 jsg Exp $ */
/* $OpenBSD: sys_futex.c,v 1.22 2023/08/14 07:42:34 miod Exp $ */
/*
* Copyright (c) 2016-2017 Martin Pieuchot
@ -32,15 +32,6 @@
#include <uvm/uvm.h>
/*
* Atomicity is only needed on MULTIPROCESSOR kernels. Fall back on
* copyin(9) until non-MULTIPROCESSOR architectures have a copyin32(9)
* implementation.
*/
#ifndef MULTIPROCESSOR
#define copyin32(uaddr, kaddr) copyin((uaddr), (kaddr), sizeof(uint32_t))
#endif
/*
* Kernel representation of a futex.
*/