sync code with last improvements from OpenBSD

This commit is contained in:
purplerain 2023-09-17 17:16:41 +00:00
parent f0c5a45f3a
commit 6dffc8ab2a
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
28 changed files with 2476 additions and 1648 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clockintr.c,v 1.53 2023/09/15 11:48:49 deraadt Exp $ */
/* $OpenBSD: kern_clockintr.c,v 1.56 2023/09/17 15:24:35 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@ -31,13 +31,6 @@
#include <sys/sysctl.h>
#include <sys/time.h>
/*
* Protection for global variables in this file:
*
* I Immutable after initialization.
*/
uint32_t clockintr_flags; /* [I] global state + behavior flags */
void clockintr_hardclock(struct clockintr *, void *, void *);
void clockintr_schedule(struct clockintr *, uint64_t);
void clockintr_schedule_locked(struct clockintr *, uint64_t);
@ -50,19 +43,6 @@ void clockqueue_pend_insert(struct clockintr_queue *, struct clockintr *,
void clockqueue_reset_intrclock(struct clockintr_queue *);
uint64_t nsec_advance(uint64_t *, uint64_t, uint64_t);
/*
* Initialize global state. Set flags and compute intervals.
*/
void
clockintr_init(uint32_t flags)
{
KASSERT(CPU_IS_PRIMARY(curcpu()));
KASSERT(clockintr_flags == 0);
KASSERT(!ISSET(flags, ~CL_FLAG_MASK));
SET(clockintr_flags, flags | CL_INIT);
}
/*
* Ready the calling CPU for clockintr_dispatch(). If this is our
* first time here, install the intrclock, if any, and set necessary
@ -77,8 +57,6 @@ clockintr_cpu_init(const struct intrclock *ic)
struct schedstate_percpu *spc = &ci->ci_schedstate;
int reset_cq_intrclock = 0;
KASSERT(ISSET(clockintr_flags, CL_INIT));
if (ic != NULL)
clockqueue_intrclock_install(cq, ic);
@ -355,10 +333,9 @@ clockintr_cancel(struct clockintr *cl)
}
struct clockintr *
clockintr_establish(void *vci,
clockintr_establish(struct cpu_info *ci,
void (*func)(struct clockintr *, void *, void *), void *arg)
{
struct cpu_info *ci = vci;
struct clockintr *cl;
struct clockintr_queue *cq = &ci->ci_queue;
@ -370,7 +347,7 @@ clockintr_establish(void *vci,
cl->cl_queue = cq;
mtx_enter(&cq->cq_mtx);
TAILQ_INSERT_TAIL(&cq->cq_est, cl, cl_elink);
TAILQ_INSERT_TAIL(&cq->cq_all, cl, cl_alink);
mtx_leave(&cq->cq_mtx);
return cl;
}
@ -443,7 +420,7 @@ clockqueue_init(struct clockintr_queue *cq)
cq->cq_shadow.cl_queue = cq;
mtx_init(&cq->cq_mtx, IPL_CLOCK);
TAILQ_INIT(&cq->cq_est);
TAILQ_INIT(&cq->cq_all);
TAILQ_INIT(&cq->cq_pend);
cq->cq_gen = 1;
SET(cq->cq_flags, CQ_INIT);
@ -623,7 +600,7 @@ db_show_clockintr_cpu(struct cpu_info *ci)
db_show_clockintr(cq->cq_running, "run", cpu);
TAILQ_FOREACH(elm, &cq->cq_pend, cl_plink)
db_show_clockintr(elm, "pend", cpu);
TAILQ_FOREACH(elm, &cq->cq_est, cl_elink) {
TAILQ_FOREACH(elm, &cq->cq_all, cl_alink) {
if (!ISSET(elm->cl_flags, CLST_PENDING))
db_show_clockintr(elm, "idle", cpu);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sched_bsd.c,v 1.86 2023/09/10 03:08:05 cheloha Exp $ */
/* $OpenBSD: sched_bsd.c,v 1.87 2023/09/17 13:02:24 cheloha Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@ -117,9 +117,9 @@ roundrobin(struct clockintr *cl, void *cf, void *arg)
* 1, 5, and 15 minute intervals.
*/
void
update_loadavg(void *arg)
update_loadavg(void *unused)
{
struct timeout *to = (struct timeout *)arg;
static struct timeout to = TIMEOUT_INITIALIZER(update_loadavg, NULL);
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
u_int i, nrun = 0;
@ -135,7 +135,7 @@ update_loadavg(void *arg)
nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
}
timeout_add_sec(to, 5);
timeout_add_sec(&to, 5);
}
/*
@ -227,9 +227,9 @@ fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
* Recompute process priorities, every second.
*/
void
schedcpu(void *arg)
schedcpu(void *unused)
{
struct timeout *to = (struct timeout *)arg;
static struct timeout to = TIMEOUT_INITIALIZER(schedcpu, NULL);
fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
struct proc *p;
int s;
@ -280,7 +280,7 @@ schedcpu(void *arg)
SCHED_UNLOCK(s);
}
wakeup(&lbolt);
timeout_add_sec(to, 1);
timeout_add_sec(&to, 1);
}
/*
@ -726,23 +726,14 @@ sysctl_hwperfpolicy(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
}
#endif
/*
* Start the scheduler's periodic timeouts.
*/
void
scheduler_start(void)
{
static struct timeout schedcpu_to;
static struct timeout loadavg_to;
/*
* We avoid polluting the global namespace by keeping the scheduler
* timeouts static in this function.
* We setup the timeout here and kick schedcpu once to make it do
* its job.
*/
timeout_set(&schedcpu_to, schedcpu, &schedcpu_to);
timeout_set(&loadavg_to, update_loadavg, &loadavg_to);
schedcpu(&schedcpu_to);
update_loadavg(&loadavg_to);
schedcpu(NULL);
update_loadavg(NULL);
#ifndef SMALL_KERNEL
if (perfpolicy == PERFPOL_AUTO)