sync code with last improvements from OpenBSD

This commit is contained in:
purplerain 2023-09-17 17:16:41 +00:00
parent f0c5a45f3a
commit 6dffc8ab2a
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
28 changed files with 2476 additions and 1648 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: lapic.c,v 1.70 2023/09/14 19:39:47 cheloha Exp $ */
/* $OpenBSD: lapic.c,v 1.71 2023/09/17 14:50:50 cheloha Exp $ */
/* $NetBSD: lapic.c,v 1.2 2003/05/08 01:04:35 fvdl Exp $ */
/*-
@ -499,7 +499,6 @@ lapic_initclocks(void)
stathz = hz;
profhz = stathz * 10;
statclock_is_randomized = 1;
clockintr_init(0);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: clock.c,v 1.41 2023/08/23 01:55:46 cheloha Exp $ */
/* $OpenBSD: clock.c,v 1.42 2023/09/17 14:50:50 cheloha Exp $ */
/* $NetBSD: clock.c,v 1.1 2003/04/26 18:39:50 fvdl Exp $ */
/*-
@ -283,7 +283,6 @@ i8254_initclocks(void)
stathz = 128;
profhz = 1024; /* XXX does not divide into 1 billion */
clockintr_init(0);
}
void

View file

@ -1,4 +1,4 @@
/* $OpenBSD: agtimer.c,v 1.20 2023/09/14 19:39:47 cheloha Exp $ */
/* $OpenBSD: agtimer.c,v 1.21 2023/09/17 14:50:51 cheloha Exp $ */
/*
* Copyright (c) 2011 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2013 Patrick Wildt <patrick@blueri.se>
@ -231,7 +231,6 @@ agtimer_cpu_initclocks(void)
stathz = hz;
profhz = stathz * 10;
statclock_is_randomized = 1;
clockintr_init(0);
if (sc->sc_ticks_per_second != agtimer_frequency) {
agtimer_set_clockrate(agtimer_frequency);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: amptimer.c,v 1.19 2023/09/14 19:39:47 cheloha Exp $ */
/* $OpenBSD: amptimer.c,v 1.20 2023/09/17 14:50:51 cheloha Exp $ */
/*
* Copyright (c) 2011 Dale Rahn <drahn@openbsd.org>
*
@ -288,7 +288,6 @@ amptimer_cpu_initclocks(void)
stathz = hz;
profhz = hz * 10;
statclock_is_randomized = 1;
clockintr_init(0);
if (sc->sc_ticks_per_second != amptimer_frequency) {
amptimer_set_clockrate(amptimer_frequency);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: acpipci.c,v 1.40 2023/09/12 08:32:58 jmatthew Exp $ */
/* $OpenBSD: acpipci.c,v 1.41 2023/09/16 23:25:16 jmatthew Exp $ */
/*
* Copyright (c) 2018 Mark Kettenis
*
@ -844,7 +844,8 @@ acpipci_iort_map(struct acpi_iort *iort, uint32_t offset, uint32_t id,
itsn = (struct acpi_iort_its_node *)&node[1];
LIST_FOREACH(icl, &interrupt_controllers, ic_list) {
for (i = 0; i < itsn->number_of_itss; i++) {
if (icl->ic_gic_its_id == itsn->its_ids[i]) {
if (icl->ic_establish_msi != NULL &&
icl->ic_gic_its_id == itsn->its_ids[i]) {
*ic = icl;
break;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: agtimer.c,v 1.27 2023/09/14 19:39:47 cheloha Exp $ */
/* $OpenBSD: agtimer.c,v 1.28 2023/09/17 14:50:51 cheloha Exp $ */
/*
* Copyright (c) 2011 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2013 Patrick Wildt <patrick@blueri.se>
@ -294,7 +294,6 @@ agtimer_cpu_initclocks(void)
stathz = hz;
profhz = stathz * 10;
statclock_is_randomized = 1;
clockintr_init(0);
if (sc->sc_ticks_per_second != agtimer_frequency) {
agtimer_set_clockrate(agtimer_frequency);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: dmtimer.c,v 1.21 2023/09/14 19:39:47 cheloha Exp $ */
/* $OpenBSD: dmtimer.c,v 1.22 2023/09/17 14:50:51 cheloha Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2013 Raphael Graf <r@undefined.ch>
@ -233,7 +233,6 @@ dmtimer_cpu_initclocks(void)
stathz = hz;
profhz = stathz * 10;
statclock_is_randomized = 1;
clockintr_init(0);
sc->sc_ticks_per_second = TIMER_FREQUENCY; /* 32768 */
sc->sc_nsec_cycle_ratio =

View file

@ -1,4 +1,4 @@
/* $OpenBSD: gptimer.c,v 1.22 2023/09/14 19:39:47 cheloha Exp $ */
/* $OpenBSD: gptimer.c,v 1.23 2023/09/17 14:50:51 cheloha Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
*
@ -199,7 +199,6 @@ gptimer_cpu_initclocks(void)
stathz = hz;
profhz = stathz * 10;
statclock_is_randomized = 1;
clockintr_init(0);
gptimer_nsec_cycle_ratio = TIMER_FREQUENCY * (1ULL << 32) / 1000000000;
gptimer_nsec_max = UINT64_MAX / gptimer_nsec_cycle_ratio;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sxitimer.c,v 1.23 2023/09/14 19:39:47 cheloha Exp $ */
/* $OpenBSD: sxitimer.c,v 1.24 2023/09/17 14:50:51 cheloha Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2013 Raphael Graf <r@undefined.ch>
@ -181,7 +181,6 @@ sxitimer_attach(struct device *parent, struct device *self, void *aux)
stathz = hz;
profhz = stathz * 10;
statclock_is_randomized = 1;
clockintr_init(0);
/* stop timer, and set clk src */
bus_space_write_4(sxitimer_iot, sxitimer_ioh,

View file

@ -1,4 +1,4 @@
/* $OpenBSD: lapic.c,v 1.57 2023/09/14 19:39:48 cheloha Exp $ */
/* $OpenBSD: lapic.c,v 1.58 2023/09/17 14:50:51 cheloha Exp $ */
/* $NetBSD: lapic.c,v 1.1.2.8 2000/02/23 06:10:50 sommerfeld Exp $ */
/*-
@ -327,7 +327,6 @@ lapic_initclocks(void)
stathz = hz;
profhz = stathz * 10;
statclock_is_randomized = 1;
clockintr_init(0);
}
extern int gettick(void); /* XXX put in header file */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: clock.c,v 1.67 2023/08/23 01:55:46 cheloha Exp $ */
/* $OpenBSD: clock.c,v 1.68 2023/09/17 14:50:51 cheloha Exp $ */
/* $NetBSD: clock.c,v 1.39 1996/05/12 23:11:54 mycroft Exp $ */
/*-
@ -426,7 +426,6 @@ i8254_initclocks(void)
stathz = 128;
profhz = 1024; /* XXX does not divide into 1 billion */
clockintr_init(0);
}
void

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clockintr.c,v 1.53 2023/09/15 11:48:49 deraadt Exp $ */
/* $OpenBSD: kern_clockintr.c,v 1.56 2023/09/17 15:24:35 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@ -31,13 +31,6 @@
#include <sys/sysctl.h>
#include <sys/time.h>
/*
* Protection for global variables in this file:
*
* I Immutable after initialization.
*/
uint32_t clockintr_flags; /* [I] global state + behavior flags */
void clockintr_hardclock(struct clockintr *, void *, void *);
void clockintr_schedule(struct clockintr *, uint64_t);
void clockintr_schedule_locked(struct clockintr *, uint64_t);
@ -50,19 +43,6 @@ void clockqueue_pend_insert(struct clockintr_queue *, struct clockintr *,
void clockqueue_reset_intrclock(struct clockintr_queue *);
uint64_t nsec_advance(uint64_t *, uint64_t, uint64_t);
/*
* Initialize global state. Set flags and compute intervals.
*/
void
clockintr_init(uint32_t flags)
{
KASSERT(CPU_IS_PRIMARY(curcpu()));
KASSERT(clockintr_flags == 0);
KASSERT(!ISSET(flags, ~CL_FLAG_MASK));
SET(clockintr_flags, flags | CL_INIT);
}
/*
* Ready the calling CPU for clockintr_dispatch(). If this is our
* first time here, install the intrclock, if any, and set necessary
@ -77,8 +57,6 @@ clockintr_cpu_init(const struct intrclock *ic)
struct schedstate_percpu *spc = &ci->ci_schedstate;
int reset_cq_intrclock = 0;
KASSERT(ISSET(clockintr_flags, CL_INIT));
if (ic != NULL)
clockqueue_intrclock_install(cq, ic);
@ -355,10 +333,9 @@ clockintr_cancel(struct clockintr *cl)
}
struct clockintr *
clockintr_establish(void *vci,
clockintr_establish(struct cpu_info *ci,
void (*func)(struct clockintr *, void *, void *), void *arg)
{
struct cpu_info *ci = vci;
struct clockintr *cl;
struct clockintr_queue *cq = &ci->ci_queue;
@ -370,7 +347,7 @@ clockintr_establish(void *vci,
cl->cl_queue = cq;
mtx_enter(&cq->cq_mtx);
TAILQ_INSERT_TAIL(&cq->cq_est, cl, cl_elink);
TAILQ_INSERT_TAIL(&cq->cq_all, cl, cl_alink);
mtx_leave(&cq->cq_mtx);
return cl;
}
@ -443,7 +420,7 @@ clockqueue_init(struct clockintr_queue *cq)
cq->cq_shadow.cl_queue = cq;
mtx_init(&cq->cq_mtx, IPL_CLOCK);
TAILQ_INIT(&cq->cq_est);
TAILQ_INIT(&cq->cq_all);
TAILQ_INIT(&cq->cq_pend);
cq->cq_gen = 1;
SET(cq->cq_flags, CQ_INIT);
@ -623,7 +600,7 @@ db_show_clockintr_cpu(struct cpu_info *ci)
db_show_clockintr(cq->cq_running, "run", cpu);
TAILQ_FOREACH(elm, &cq->cq_pend, cl_plink)
db_show_clockintr(elm, "pend", cpu);
TAILQ_FOREACH(elm, &cq->cq_est, cl_elink) {
TAILQ_FOREACH(elm, &cq->cq_all, cl_alink) {
if (!ISSET(elm->cl_flags, CLST_PENDING))
db_show_clockintr(elm, "idle", cpu);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sched_bsd.c,v 1.86 2023/09/10 03:08:05 cheloha Exp $ */
/* $OpenBSD: sched_bsd.c,v 1.87 2023/09/17 13:02:24 cheloha Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@ -117,9 +117,9 @@ roundrobin(struct clockintr *cl, void *cf, void *arg)
* 1, 5, and 15 minute intervals.
*/
void
update_loadavg(void *arg)
update_loadavg(void *unused)
{
struct timeout *to = (struct timeout *)arg;
static struct timeout to = TIMEOUT_INITIALIZER(update_loadavg, NULL);
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
u_int i, nrun = 0;
@ -135,7 +135,7 @@ update_loadavg(void *arg)
nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
}
timeout_add_sec(to, 5);
timeout_add_sec(&to, 5);
}
/*
@ -227,9 +227,9 @@ fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
* Recompute process priorities, every second.
*/
void
schedcpu(void *arg)
schedcpu(void *unused)
{
struct timeout *to = (struct timeout *)arg;
static struct timeout to = TIMEOUT_INITIALIZER(schedcpu, NULL);
fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
struct proc *p;
int s;
@ -280,7 +280,7 @@ schedcpu(void *arg)
SCHED_UNLOCK(s);
}
wakeup(&lbolt);
timeout_add_sec(to, 1);
timeout_add_sec(&to, 1);
}
/*
@ -726,23 +726,14 @@ sysctl_hwperfpolicy(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
}
#endif
/*
* Start the scheduler's periodic timeouts.
*/
void
scheduler_start(void)
{
static struct timeout schedcpu_to;
static struct timeout loadavg_to;
/*
* We avoid polluting the global namespace by keeping the scheduler
* timeouts static in this function.
* We setup the timeout here and kick schedcpu once to make it do
* its job.
*/
timeout_set(&schedcpu_to, schedcpu, &schedcpu_to);
timeout_set(&loadavg_to, update_loadavg, &loadavg_to);
schedcpu(&schedcpu_to);
update_loadavg(&loadavg_to);
schedcpu(NULL);
update_loadavg(NULL);
#ifndef SMALL_KERNEL
if (perfpolicy == PERFPOL_AUTO)

View file

@ -1,4 +1,4 @@
/* $OpenBSD: clockintr.h,v 1.17 2023/09/15 11:48:48 deraadt Exp $ */
/* $OpenBSD: clockintr.h,v 1.20 2023/09/17 15:24:35 cheloha Exp $ */
/*
* Copyright (c) 2020-2022 Scott Cheloha <cheloha@openbsd.org>
*
@ -35,6 +35,8 @@ struct clockintr_stat {
#include <sys/mutex.h>
#include <sys/queue.h>
struct cpu_info;
/*
* Platform API
*/
@ -68,7 +70,7 @@ intrclock_trigger(struct intrclock *ic)
struct clockintr_queue;
struct clockintr {
uint64_t cl_expiration; /* [m] dispatch time */
TAILQ_ENTRY(clockintr) cl_elink; /* [m] cq_est glue */
TAILQ_ENTRY(clockintr) cl_alink; /* [m] cq_all glue */
TAILQ_ENTRY(clockintr) cl_plink; /* [m] cq_pend glue */
void *cl_arg; /* [I] argument */
void (*cl_func)(struct clockintr *, void *, void *); /* [I] callback */
@ -94,7 +96,7 @@ struct clockintr_queue {
struct clockintr cq_shadow; /* [o] copy of running clockintr */
struct mutex cq_mtx; /* [a] per-queue mutex */
uint64_t cq_uptime; /* [o] cached uptime */
TAILQ_HEAD(, clockintr) cq_est; /* [m] established clockintr list */
TAILQ_HEAD(, clockintr) cq_all; /* [m] established clockintr list */
TAILQ_HEAD(, clockintr) cq_pend;/* [m] pending clockintr list */
struct clockintr *cq_running; /* [m] running clockintr */
struct clockintr *cq_hardclock; /* [o] hardclock handle */
@ -109,16 +111,8 @@ struct clockintr_queue {
#define CQ_INTRCLOCK 0x00000002 /* intrclock installed */
#define CQ_STATE_MASK 0x00000003
/* Global state flags. */
#define CL_INIT 0x00000001 /* global init done */
#define CL_STATE_MASK 0x00000001
/* Global behavior flags. */
#define CL_FLAG_MASK 0x00000000
void clockintr_cpu_init(const struct intrclock *);
int clockintr_dispatch(void *);
void clockintr_init(uint32_t);
void clockintr_trigger(void);
/*
@ -128,7 +122,7 @@ void clockintr_trigger(void);
uint64_t clockintr_advance(struct clockintr *, uint64_t);
uint64_t clockintr_advance_random(struct clockintr *, uint64_t, uint32_t);
void clockintr_cancel(struct clockintr *);
struct clockintr *clockintr_establish(void *,
struct clockintr *clockintr_establish(struct cpu_info *,
void (*)(struct clockintr *, void *, void *), void *);
void clockintr_stagger(struct clockintr *, uint64_t, uint32_t, uint32_t);
void clockqueue_init(struct clockintr_queue *);