sync code with last improvements from OpenBSD

This commit is contained in:
purplerain 2023-09-10 18:41:05 +00:00
parent 887dd091b7
commit 010ec4e74c
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
33 changed files with 409 additions and 201 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clockintr.c,v 1.43 2023/09/09 16:34:39 cheloha Exp $ */
/* $OpenBSD: kern_clockintr.c,v 1.47 2023/09/10 03:08:05 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@ -43,10 +43,10 @@ uint32_t statclock_min; /* [I] minimum statclock period (ns) */
uint32_t statclock_mask; /* [I] set of allowed offsets */
uint64_t clockintr_advance_random(struct clockintr *, uint64_t, uint32_t);
void clockintr_hardclock(struct clockintr *, void *);
void clockintr_hardclock(struct clockintr *, void *, void *);
void clockintr_schedule(struct clockintr *, uint64_t);
void clockintr_schedule_locked(struct clockintr *, uint64_t);
void clockintr_statclock(struct clockintr *, void *);
void clockintr_statclock(struct clockintr *, void *, void *);
void clockqueue_intrclock_install(struct clockintr_queue *,
const struct intrclock *);
uint64_t clockqueue_next(const struct clockintr_queue *);
@ -114,12 +114,14 @@ clockintr_cpu_init(const struct intrclock *ic)
/* TODO: Remove these from struct clockintr_queue. */
if (cq->cq_hardclock == NULL) {
cq->cq_hardclock = clockintr_establish(ci, clockintr_hardclock);
cq->cq_hardclock = clockintr_establish(ci, clockintr_hardclock,
NULL);
if (cq->cq_hardclock == NULL)
panic("%s: failed to establish hardclock", __func__);
}
if (cq->cq_statclock == NULL) {
cq->cq_statclock = clockintr_establish(ci, clockintr_statclock);
cq->cq_statclock = clockintr_establish(ci, clockintr_statclock,
NULL);
if (cq->cq_statclock == NULL)
panic("%s: failed to establish statclock", __func__);
}
@ -219,7 +221,7 @@ clockintr_dispatch(void *frame)
{
uint64_t lateness, run = 0, start;
struct cpu_info *ci = curcpu();
struct clockintr *cl;
struct clockintr *cl, *shadow;
struct clockintr_queue *cq = &ci->ci_queue;
uint32_t ogen;
@ -257,24 +259,30 @@ clockintr_dispatch(void *frame)
if (cq->cq_uptime < cl->cl_expiration)
break;
}
/*
* This clockintr has expired. Initialize a shadow copy
* and execute it.
*/
clockqueue_pend_delete(cq, cl);
cq->cq_shadow.cl_expiration = cl->cl_expiration;
cq->cq_shadow.cl_func = cl->cl_func;
shadow = &cq->cq_shadow;
shadow->cl_expiration = cl->cl_expiration;
shadow->cl_arg = cl->cl_arg;
shadow->cl_func = cl->cl_func;
cq->cq_running = cl;
mtx_leave(&cq->cq_mtx);
cq->cq_shadow.cl_func(&cq->cq_shadow, frame);
shadow->cl_func(shadow, frame, shadow->cl_arg);
mtx_enter(&cq->cq_mtx);
cq->cq_running = NULL;
if (ISSET(cl->cl_flags, CLST_IGNORE_SHADOW)) {
CLR(cl->cl_flags, CLST_IGNORE_SHADOW);
CLR(cq->cq_shadow.cl_flags, CLST_SHADOW_PENDING);
CLR(shadow->cl_flags, CLST_SHADOW_PENDING);
}
if (ISSET(cq->cq_shadow.cl_flags, CLST_SHADOW_PENDING)) {
CLR(cq->cq_shadow.cl_flags, CLST_SHADOW_PENDING);
clockqueue_pend_insert(cq, cl,
cq->cq_shadow.cl_expiration);
if (ISSET(shadow->cl_flags, CLST_SHADOW_PENDING)) {
CLR(shadow->cl_flags, CLST_SHADOW_PENDING);
clockqueue_pend_insert(cq, cl, shadow->cl_expiration);
}
run++;
}
@ -326,14 +334,13 @@ clockintr_advance(struct clockintr *cl, uint64_t period)
if (cl == &cq->cq_shadow) {
count = nsec_advance(&cl->cl_expiration, period, cq->cq_uptime);
SET(cl->cl_flags, CLST_SHADOW_PENDING);
return count;
} else {
mtx_enter(&cq->cq_mtx);
expiration = cl->cl_expiration;
count = nsec_advance(&expiration, period, nsecuptime());
clockintr_schedule_locked(cl, expiration);
mtx_leave(&cq->cq_mtx);
}
mtx_enter(&cq->cq_mtx);
expiration = cl->cl_expiration;
count = nsec_advance(&expiration, period, nsecuptime());
clockintr_schedule_locked(cl, expiration);
mtx_leave(&cq->cq_mtx);
return count;
}
@ -385,7 +392,7 @@ clockintr_cancel(struct clockintr *cl)
struct clockintr *
clockintr_establish(struct cpu_info *ci,
void (*func)(struct clockintr *, void *))
void (*func)(struct clockintr *, void *, void *), void *arg)
{
struct clockintr *cl;
struct clockintr_queue *cq = &ci->ci_queue;
@ -393,6 +400,7 @@ clockintr_establish(struct cpu_info *ci,
cl = malloc(sizeof *cl, M_DEVBUF, M_NOWAIT | M_ZERO);
if (cl == NULL)
return NULL;
cl->cl_arg = arg;
cl->cl_func = func;
cl->cl_queue = cq;
@ -410,12 +418,11 @@ clockintr_schedule(struct clockintr *cl, uint64_t expiration)
if (cl == &cq->cq_shadow) {
cl->cl_expiration = expiration;
SET(cl->cl_flags, CLST_SHADOW_PENDING);
return;
} else {
mtx_enter(&cq->cq_mtx);
clockintr_schedule_locked(cl, expiration);
mtx_leave(&cq->cq_mtx);
}
mtx_enter(&cq->cq_mtx);
clockintr_schedule_locked(cl, expiration);
mtx_leave(&cq->cq_mtx);
}
void
@ -454,7 +461,7 @@ clockintr_stagger(struct clockintr *cl, uint64_t period, uint32_t n,
}
void
clockintr_hardclock(struct clockintr *cl, void *frame)
clockintr_hardclock(struct clockintr *cl, void *frame, void *arg)
{
uint64_t count, i;
@ -464,7 +471,7 @@ clockintr_hardclock(struct clockintr *cl, void *frame)
}
void
clockintr_statclock(struct clockintr *cl, void *frame)
clockintr_statclock(struct clockintr *cl, void *frame, void *arg)
{
uint64_t count, i;