sync code with last improvements from OpenBSD

This commit is contained in:
purplerain 2023-09-15 02:35:30 +00:00
parent aec8820748
commit 66d94126c9
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
18 changed files with 112 additions and 108 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clockintr.c,v 1.47 2023/09/10 03:08:05 cheloha Exp $ */
/* $OpenBSD: kern_clockintr.c,v 1.52 2023/09/14 22:27:09 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@ -37,16 +37,10 @@
* I Immutable after initialization.
*/
uint32_t clockintr_flags; /* [I] global state + behavior flags */
uint32_t hardclock_period; /* [I] hardclock period (ns) */
uint32_t statclock_avg; /* [I] average statclock period (ns) */
uint32_t statclock_min; /* [I] minimum statclock period (ns) */
uint32_t statclock_mask; /* [I] set of allowed offsets */
uint64_t clockintr_advance_random(struct clockintr *, uint64_t, uint32_t);
void clockintr_hardclock(struct clockintr *, void *, void *);
void clockintr_schedule(struct clockintr *, uint64_t);
void clockintr_schedule_locked(struct clockintr *, uint64_t);
void clockintr_statclock(struct clockintr *, void *, void *);
void clockqueue_intrclock_install(struct clockintr_queue *,
const struct intrclock *);
uint64_t clockqueue_next(const struct clockintr_queue *);
@ -62,34 +56,10 @@ uint64_t nsec_advance(uint64_t *, uint64_t, uint64_t);
void
clockintr_init(uint32_t flags)
{
uint32_t half_avg, var;
KASSERT(CPU_IS_PRIMARY(curcpu()));
KASSERT(clockintr_flags == 0);
KASSERT(!ISSET(flags, ~CL_FLAG_MASK));
KASSERT(hz > 0 && hz <= 1000000000);
hardclock_period = 1000000000 / hz;
roundrobin_period = hardclock_period * 10;
KASSERT(stathz >= 1 && stathz <= 1000000000);
/*
* Compute the average statclock() period. Then find var, the
* largest power of two such that var <= statclock_avg / 2.
*/
statclock_avg = 1000000000 / stathz;
half_avg = statclock_avg / 2;
for (var = 1U << 31; var > half_avg; var /= 2)
continue;
/*
* Set a lower bound for the range using statclock_avg and var.
* The mask for that range is just (var - 1).
*/
statclock_min = statclock_avg - (var / 2);
statclock_mask = var - 1;
SET(clockintr_flags, flags | CL_INIT);
}
@ -112,19 +82,13 @@ clockintr_cpu_init(const struct intrclock *ic)
if (ic != NULL)
clockqueue_intrclock_install(cq, ic);
/* TODO: Remove these from struct clockintr_queue. */
/* TODO: Remove this from struct clockintr_queue. */
if (cq->cq_hardclock == NULL) {
cq->cq_hardclock = clockintr_establish(ci, clockintr_hardclock,
NULL);
if (cq->cq_hardclock == NULL)
panic("%s: failed to establish hardclock", __func__);
}
if (cq->cq_statclock == NULL) {
cq->cq_statclock = clockintr_establish(ci, clockintr_statclock,
NULL);
if (cq->cq_statclock == NULL)
panic("%s: failed to establish statclock", __func__);
}
/*
* Mask CQ_INTRCLOCK while we're advancing the internal clock
@ -169,13 +133,13 @@ clockintr_cpu_init(const struct intrclock *ic)
* We can always advance the statclock. There is no reason to
* stagger a randomized statclock.
*/
if (!ISSET(clockintr_flags, CL_RNDSTAT)) {
if (cq->cq_statclock->cl_expiration == 0) {
clockintr_stagger(cq->cq_statclock, statclock_avg,
if (!statclock_is_randomized) {
if (spc->spc_statclock->cl_expiration == 0) {
clockintr_stagger(spc->spc_statclock, statclock_avg,
multiplier, MAXCPUS);
}
}
clockintr_advance(cq->cq_statclock, statclock_avg);
clockintr_advance(spc->spc_statclock, statclock_avg);
/*
* XXX Need to find a better place to do this. We can't do it in
@ -470,21 +434,6 @@ clockintr_hardclock(struct clockintr *cl, void *frame, void *arg)
hardclock(frame);
}
void
clockintr_statclock(struct clockintr *cl, void *frame, void *arg)
{
uint64_t count, i;
if (ISSET(clockintr_flags, CL_RNDSTAT)) {
count = clockintr_advance_random(cl, statclock_min,
statclock_mask);
} else {
count = clockintr_advance(cl, statclock_avg);
}
for (i = 0; i < count; i++)
statclock(frame);
}
void
clockqueue_init(struct clockintr_queue *cq)
{