sync code with last improvements from OpenBSD

This commit is contained in:
purplerain 2023-11-11 01:29:48 +00:00
parent 5903cbe575
commit 62d64fa864
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
841 changed files with 83929 additions and 40755 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clock.c,v 1.120 2023/10/11 15:42:44 cheloha Exp $ */
/* $OpenBSD: kern_clock.c,v 1.121 2023/10/17 00:04:02 cheloha Exp $ */
/* $NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $ */
/*-
@ -277,7 +277,7 @@ stopprofclock(struct process *pr)
* do process and kernel statistics.
*/
void
statclock(struct clockintr *cl, void *cf, void *arg)
statclock(struct clockrequest *cr, void *cf, void *arg)
{
uint64_t count, i;
struct clockframe *frame = cf;
@ -287,10 +287,10 @@ statclock(struct clockintr *cl, void *cf, void *arg)
struct process *pr;
if (statclock_is_randomized) {
count = clockintr_advance_random(cl, statclock_min,
count = clockrequest_advance_random(cr, statclock_min,
statclock_mask);
} else {
count = clockintr_advance(cl, statclock_avg);
count = clockrequest_advance(cr, statclock_avg);
}
if (CLKF_USERMODE(frame)) {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clockintr.c,v 1.61 2023/10/11 15:07:04 cheloha Exp $ */
/* $OpenBSD: kern_clockintr.c,v 1.62 2023/10/17 00:04:02 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@ -31,7 +31,7 @@
#include <sys/sysctl.h>
#include <sys/time.h>
void clockintr_hardclock(struct clockintr *, void *, void *);
void clockintr_hardclock(struct clockrequest *, void *, void *);
void clockintr_schedule_locked(struct clockintr *, uint64_t);
void clockqueue_intrclock_install(struct clockintr_queue *,
const struct intrclock *);
@ -164,8 +164,11 @@ clockintr_dispatch(void *frame)
{
uint64_t lateness, run = 0, start;
struct cpu_info *ci = curcpu();
struct clockintr *cl, *shadow;
struct clockintr *cl;
struct clockintr_queue *cq = &ci->ci_queue;
struct clockrequest *request = &cq->cq_request;
void *arg;
void (*func)(struct clockrequest *, void *, void *);
uint32_t ogen;
if (cq->cq_dispatch != 0)
@ -204,28 +207,26 @@ clockintr_dispatch(void *frame)
}
/*
* This clockintr has expired. Initialize a shadow copy
* and execute it.
* This clockintr has expired. Execute it.
*/
clockqueue_pend_delete(cq, cl);
shadow = &cq->cq_shadow;
shadow->cl_expiration = cl->cl_expiration;
shadow->cl_arg = cl->cl_arg;
shadow->cl_func = cl->cl_func;
request->cr_expiration = cl->cl_expiration;
arg = cl->cl_arg;
func = cl->cl_func;
cq->cq_running = cl;
mtx_leave(&cq->cq_mtx);
shadow->cl_func(shadow, frame, shadow->cl_arg);
func(request, frame, arg);
mtx_enter(&cq->cq_mtx);
cq->cq_running = NULL;
if (ISSET(cl->cl_flags, CLST_IGNORE_SHADOW)) {
CLR(cl->cl_flags, CLST_IGNORE_SHADOW);
CLR(shadow->cl_flags, CLST_SHADOW_PENDING);
if (ISSET(cl->cl_flags, CLST_IGNORE_REQUEST)) {
CLR(cl->cl_flags, CLST_IGNORE_REQUEST);
CLR(request->cr_flags, CR_RESCHEDULE);
}
if (ISSET(shadow->cl_flags, CLST_SHADOW_PENDING)) {
CLR(shadow->cl_flags, CLST_SHADOW_PENDING);
clockqueue_pend_insert(cq, cl, shadow->cl_expiration);
if (ISSET(request->cr_flags, CR_RESCHEDULE)) {
CLR(request->cr_flags, CR_RESCHEDULE);
clockqueue_pend_insert(cq, cl, request->cr_expiration);
}
run++;
}
@ -274,35 +275,43 @@ clockintr_advance(struct clockintr *cl, uint64_t period)
uint64_t count, expiration;
struct clockintr_queue *cq = cl->cl_queue;
if (cl == &cq->cq_shadow) {
count = nsec_advance(&cl->cl_expiration, period, cq->cq_uptime);
SET(cl->cl_flags, CLST_SHADOW_PENDING);
} else {
mtx_enter(&cq->cq_mtx);
expiration = cl->cl_expiration;
count = nsec_advance(&expiration, period, nsecuptime());
clockintr_schedule_locked(cl, expiration);
mtx_leave(&cq->cq_mtx);
}
mtx_enter(&cq->cq_mtx);
expiration = cl->cl_expiration;
count = nsec_advance(&expiration, period, nsecuptime());
clockintr_schedule_locked(cl, expiration);
mtx_leave(&cq->cq_mtx);
return count;
}
uint64_t
clockintr_advance_random(struct clockintr *cl, uint64_t min, uint32_t mask)
clockrequest_advance(struct clockrequest *cr, uint64_t period)
{
struct clockintr_queue *cq = cr->cr_queue;
KASSERT(cr == &cq->cq_request);
SET(cr->cr_flags, CR_RESCHEDULE);
return nsec_advance(&cr->cr_expiration, period, cq->cq_uptime);
}
uint64_t
clockrequest_advance_random(struct clockrequest *cr, uint64_t min,
uint32_t mask)
{
uint64_t count = 0;
struct clockintr_queue *cq = cl->cl_queue;
struct clockintr_queue *cq = cr->cr_queue;
uint32_t off;
KASSERT(cl == &cq->cq_shadow);
KASSERT(cr == &cq->cq_request);
while (cl->cl_expiration <= cq->cq_uptime) {
while (cr->cr_expiration <= cq->cq_uptime) {
while ((off = (random() & mask)) == 0)
continue;
cl->cl_expiration += min + off;
cr->cr_expiration += min + off;
count++;
}
SET(cl->cl_flags, CLST_SHADOW_PENDING);
SET(cr->cr_flags, CR_RESCHEDULE);
return count;
}
@ -312,11 +321,6 @@ clockintr_cancel(struct clockintr *cl)
struct clockintr_queue *cq = cl->cl_queue;
int was_next;
if (cl == &cq->cq_shadow) {
CLR(cl->cl_flags, CLST_SHADOW_PENDING);
return;
}
mtx_enter(&cq->cq_mtx);
if (ISSET(cl->cl_flags, CLST_PENDING)) {
was_next = cl == TAILQ_FIRST(&cq->cq_pend);
@ -329,13 +333,13 @@ clockintr_cancel(struct clockintr *cl)
}
}
if (cl == cq->cq_running)
SET(cl->cl_flags, CLST_IGNORE_SHADOW);
SET(cl->cl_flags, CLST_IGNORE_REQUEST);
mtx_leave(&cq->cq_mtx);
}
struct clockintr *
clockintr_establish(struct cpu_info *ci,
void (*func)(struct clockintr *, void *, void *), void *arg)
void (*func)(struct clockrequest *, void *, void *), void *arg)
{
struct clockintr *cl;
struct clockintr_queue *cq = &ci->ci_queue;
@ -358,14 +362,9 @@ clockintr_schedule(struct clockintr *cl, uint64_t expiration)
{
struct clockintr_queue *cq = cl->cl_queue;
if (cl == &cq->cq_shadow) {
cl->cl_expiration = expiration;
SET(cl->cl_flags, CLST_SHADOW_PENDING);
} else {
mtx_enter(&cq->cq_mtx);
clockintr_schedule_locked(cl, expiration);
mtx_leave(&cq->cq_mtx);
}
mtx_enter(&cq->cq_mtx);
clockintr_schedule_locked(cl, expiration);
mtx_leave(&cq->cq_mtx);
}
void
@ -385,7 +384,7 @@ clockintr_schedule_locked(struct clockintr *cl, uint64_t expiration)
}
}
if (cl == cq->cq_running)
SET(cl->cl_flags, CLST_IGNORE_SHADOW);
SET(cl->cl_flags, CLST_IGNORE_REQUEST);
}
void
@ -404,11 +403,11 @@ clockintr_stagger(struct clockintr *cl, uint64_t period, uint32_t numer,
}
void
clockintr_hardclock(struct clockintr *cl, void *frame, void *arg)
clockintr_hardclock(struct clockrequest *cr, void *frame, void *arg)
{
uint64_t count, i;
count = clockintr_advance(cl, hardclock_period);
count = clockrequest_advance(cr, hardclock_period);
for (i = 0; i < count; i++)
hardclock(frame);
}
@ -419,7 +418,7 @@ clockqueue_init(struct clockintr_queue *cq)
if (ISSET(cq->cq_flags, CQ_INIT))
return;
cq->cq_shadow.cl_queue = cq;
cq->cq_request.cr_queue = cq;
mtx_init(&cq->cq_mtx, IPL_CLOCK);
TAILQ_INIT(&cq->cq_all);
TAILQ_INIT(&cq->cq_pend);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_exec.c,v 1.251 2023/09/29 12:47:34 claudio Exp $ */
/* $OpenBSD: kern_exec.c,v 1.252 2023/10/30 07:13:10 claudio Exp $ */
/* $NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $ */
/*-
@ -283,9 +283,12 @@ sys_execve(struct proc *p, void *v, register_t *retval)
return (0);
}
/* get other threads to stop */
if ((error = single_thread_set(p, SINGLE_UNWIND | SINGLE_DEEP)))
return (error);
/*
* Get other threads to stop, if contested return ERESTART,
* so the syscall is restarted after halting in userret.
*/
if (single_thread_set(p, SINGLE_UNWIND | SINGLE_DEEP))
return (ERESTART);
/*
* Cheap solution to complicated problems.

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_fork.c,v 1.252 2023/09/13 14:25:49 claudio Exp $ */
/* $OpenBSD: kern_fork.c,v 1.253 2023/10/24 13:20:11 claudio Exp $ */
/* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */
/*
@ -50,6 +50,7 @@
#include <sys/acct.h>
#include <sys/ktrace.h>
#include <sys/sched.h>
#include <sys/smr.h>
#include <sys/sysctl.h>
#include <sys/pool.h>
#include <sys/mman.h>
@ -664,20 +665,37 @@ freepid(pid_t pid)
oldpids[idx++ % nitems(oldpids)] = pid;
}
#if defined(MULTIPROCESSOR)
/*
* XXX This is a slight hack to get newly-formed processes to
* XXX acquire the kernel lock as soon as they run.
*/
/* Do machine independent parts of switching to a new process */
void
proc_trampoline_mp(void)
proc_trampoline_mi(void)
{
struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
struct proc *p = curproc;
SCHED_ASSERT_LOCKED();
clear_resched(curcpu());
#if defined(MULTIPROCESSOR)
__mp_unlock(&sched_lock);
#endif
spl0();
SCHED_ASSERT_UNLOCKED();
KERNEL_ASSERT_UNLOCKED();
assertwaitok();
smr_idle();
/* Start any optional clock interrupts needed by the thread. */
if (ISSET(p->p_p->ps_flags, PS_ITIMER)) {
atomic_setbits_int(&spc->spc_schedflags, SPCF_ITIMER);
clockintr_advance(spc->spc_itimer, hardclock_period);
}
if (ISSET(p->p_p->ps_flags, PS_PROFIL)) {
atomic_setbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK);
clockintr_advance(spc->spc_profclock, profclock_period);
}
nanouptime(&spc->spc_runtime);
KERNEL_LOCK();
}
#endif

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sched.c,v 1.92 2023/09/19 11:31:51 claudio Exp $ */
/* $OpenBSD: kern_sched.c,v 1.93 2023/10/24 13:20:11 claudio Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
@ -220,11 +220,28 @@ sched_idle(void *v)
*/
void
sched_exit(struct proc *p)
{
struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
LIST_INSERT_HEAD(&spc->spc_deadproc, p, p_hash);
KERNEL_ASSERT_LOCKED();
sched_toidle();
}
void
sched_toidle(void)
{
struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
struct proc *idle;
int s;
#ifdef MULTIPROCESSOR
/* This process no longer needs to hold the kernel lock. */
if (_kernel_lock_held())
__mp_release_all(&kernel_lock);
#endif
if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) {
atomic_clearbits_int(&spc->spc_schedflags, SPCF_ITIMER);
clockintr_cancel(spc->spc_itimer);
@ -234,17 +251,16 @@ sched_exit(struct proc *p)
clockintr_cancel(spc->spc_profclock);
}
LIST_INSERT_HEAD(&spc->spc_deadproc, p, p_hash);
#ifdef MULTIPROCESSOR
/* This process no longer needs to hold the kernel lock. */
KERNEL_ASSERT_LOCKED();
__mp_release_all(&kernel_lock);
#endif
atomic_clearbits_int(&spc->spc_schedflags, SPCF_SWITCHCLEAR);
SCHED_LOCK(s);
idle = spc->spc_idleproc;
idle->p_stat = SRUN;
uvmexp.swtch++;
TRACEPOINT(sched, off__cpu, idle->p_tid + THREAD_PID_OFFSET,
idle->p_p->ps_pid);
cpu_switchto(NULL, idle);
panic("cpu_switchto returned");
}
@ -334,14 +350,16 @@ sched_chooseproc(void)
}
}
p = spc->spc_idleproc;
KASSERT(p);
KASSERT(p->p_wchan == NULL);
if (p == NULL)
panic("no idleproc set on CPU%d",
CPU_INFO_UNIT(curcpu()));
p->p_stat = SRUN;
KASSERT(p->p_wchan == NULL);
return (p);
}
again:
#endif
again:
if (spc->spc_whichqs) {
queue = ffs(spc->spc_whichqs) - 1;
p = TAILQ_FIRST(&spc->spc_qs[queue]);
@ -351,22 +369,9 @@ again:
panic("thread %d not in SRUN: %d", p->p_tid, p->p_stat);
} else if ((p = sched_steal_proc(curcpu())) == NULL) {
p = spc->spc_idleproc;
if (p == NULL) {
int s;
/*
* We get here if someone decides to switch during
* boot before forking kthreads, bleh.
* This is kind of like a stupid idle loop.
*/
#ifdef MULTIPROCESSOR
__mp_unlock(&sched_lock);
#endif
spl0();
delay(10);
SCHED_LOCK(s);
goto again;
}
KASSERT(p);
if (p == NULL)
panic("no idleproc set on CPU%d",
CPU_INFO_UNIT(curcpu()));
p->p_stat = SRUN;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_time.c,v 1.166 2023/09/10 03:08:05 cheloha Exp $ */
/* $OpenBSD: kern_time.c,v 1.167 2023/10/17 00:04:02 cheloha Exp $ */
/* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */
/*
@ -755,7 +755,7 @@ itimerdecr(struct itimerspec *itp, const struct timespec *decrement)
}
void
itimer_update(struct clockintr *cl, void *cf, void *arg)
itimer_update(struct clockrequest *cr, void *cf, void *arg)
{
struct timespec elapsed;
uint64_t nsecs;
@ -770,7 +770,7 @@ itimer_update(struct clockintr *cl, void *cf, void *arg)
if (!ISSET(pr->ps_flags, PS_ITIMER))
return;
nsecs = clockintr_advance(cl, hardclock_period) * hardclock_period;
nsecs = clockrequest_advance(cr, hardclock_period) * hardclock_period;
NSEC_TO_TIMESPEC(nsecs, &elapsed);
mtx_enter(&itimer_mtx);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sched_bsd.c,v 1.88 2023/10/11 15:42:44 cheloha Exp $ */
/* $OpenBSD: sched_bsd.c,v 1.89 2023/10/17 00:04:02 cheloha Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@ -83,13 +83,13 @@ struct loadavg averunnable;
* Force switch among equal priority processes every 100ms.
*/
void
roundrobin(struct clockintr *cl, void *cf, void *arg)
roundrobin(struct clockrequest *cr, void *cf, void *arg)
{
uint64_t count;
struct cpu_info *ci = curcpu();
struct schedstate_percpu *spc = &ci->ci_schedstate;
count = clockintr_advance(cl, roundrobin_period);
count = clockrequest_advance(cr, roundrobin_period);
if (ci->ci_curproc != NULL) {
if (spc->spc_schedflags & SPCF_SEENRR || count >= 2) {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: subr_prof.c,v 1.39 2023/10/11 15:42:44 cheloha Exp $ */
/* $OpenBSD: subr_prof.c,v 1.40 2023/10/17 00:04:02 cheloha Exp $ */
/* $NetBSD: subr_prof.c,v 1.12 1996/04/22 01:38:50 christos Exp $ */
/*-
@ -64,7 +64,7 @@ u_int gmon_cpu_count; /* [K] number of CPUs with profiling enabled */
extern char etext[];
void gmonclock(struct clockintr *, void *, void *);
void gmonclock(struct clockrequest *, void *, void *);
void
prof_init(void)
@ -236,14 +236,14 @@ sysctl_doprof(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
}
void
gmonclock(struct clockintr *cl, void *cf, void *arg)
gmonclock(struct clockrequest *cr, void *cf, void *arg)
{
uint64_t count;
struct clockframe *frame = cf;
struct gmonparam *g = curcpu()->ci_gmon;
u_long i;
count = clockintr_advance(cl, profclock_period);
count = clockrequest_advance(cr, profclock_period);
if (count > ULONG_MAX)
count = ULONG_MAX;
@ -307,13 +307,13 @@ sys_profil(struct proc *p, void *v, register_t *retval)
}
void
profclock(struct clockintr *cl, void *cf, void *arg)
profclock(struct clockrequest *cr, void *cf, void *arg)
{
uint64_t count;
struct clockframe *frame = cf;
struct proc *p = curproc;
count = clockintr_advance(cl, profclock_period);
count = clockrequest_advance(cr, profclock_period);
if (count > ULONG_MAX)
count = ULONG_MAX;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uipc_mbuf.c,v 1.287 2023/06/23 04:36:49 gnezdo Exp $ */
/* $OpenBSD: uipc_mbuf.c,v 1.288 2023/10/20 16:25:15 bluhm Exp $ */
/* $NetBSD: uipc_mbuf.c,v 1.15.4.1 1996/06/13 17:11:44 cgd Exp $ */
/*
@ -1080,9 +1080,7 @@ m_split(struct mbuf *m0, int len0, int wait)
n->m_len = 0;
return (n);
}
if (m->m_flags & M_EXT)
goto extpacket;
if (remain > MHLEN) {
if ((m->m_flags & M_EXT) == 0 && remain > MHLEN) {
/* m can't be the lead packet */
m_align(n, 0);
n->m_next = m_split(m, len, wait);
@ -1094,8 +1092,7 @@ m_split(struct mbuf *m0, int len0, int wait)
n->m_len = 0;
return (n);
}
} else
m_align(n, remain);
}
} else if (remain == 0) {
n = m->m_next;
m->m_next = NULL;
@ -1104,14 +1101,13 @@ m_split(struct mbuf *m0, int len0, int wait)
MGET(n, wait, m->m_type);
if (n == NULL)
return (NULL);
m_align(n, remain);
}
extpacket:
if (m->m_flags & M_EXT) {
n->m_ext = m->m_ext;
MCLADDREFERENCE(m, n);
n->m_data = m->m_data + len;
} else {
m_align(n, remain);
memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain);
}
n->m_len = remain;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uipc_socket2.c,v 1.137 2023/07/04 22:28:24 mvs Exp $ */
/* $OpenBSD: uipc_socket2.c,v 1.138 2023/10/30 13:27:53 bluhm Exp $ */
/* $NetBSD: uipc_socket2.c,v 1.11 1996/02/04 02:17:55 christos Exp $ */
/*
@ -920,7 +920,7 @@ sbappendcontrol(struct socket *so, struct sockbuf *sb, struct mbuf *m0,
struct mbuf *control)
{
struct mbuf *m, *mlast, *n;
int space = 0;
int eor = 0, space = 0;
if (control == NULL)
panic("sbappendcontrol");
@ -930,8 +930,16 @@ sbappendcontrol(struct socket *so, struct sockbuf *sb, struct mbuf *m0,
break;
}
n = m; /* save pointer to last control buffer */
for (m = m0; m; m = m->m_next)
for (m = m0; m; m = m->m_next) {
space += m->m_len;
eor |= m->m_flags & M_EOR;
if (eor) {
if (m->m_next == NULL)
m->m_flags |= M_EOR;
else
m->m_flags &= ~M_EOR;
}
}
if (space > sbspace(so, sb))
return (0);
n->m_next = m0; /* concatenate data to control */