sync with OpenBSD -current

This commit is contained in:
purplerain 2024-02-09 22:30:40 +00:00
parent d68c02c8a8
commit 550dd57439
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
39 changed files with 1599 additions and 402 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clock.c,v 1.121 2023/10/17 00:04:02 cheloha Exp $ */
/* $OpenBSD: kern_clock.c,v 1.122 2024/02/09 17:42:18 cheloha Exp $ */
/* $NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $ */
/*-
@ -50,11 +50,6 @@
#include <sys/sched.h>
#include <sys/timetc.h>
#include "dt.h"
#if NDT > 0
#include <dev/dt/dtvar.h>
#endif
/*
* Clock handling routines.
*
@ -145,12 +140,6 @@ initclocks(void)
void
hardclock(struct clockframe *frame)
{
#if NDT > 0
DT_ENTER(profile, NULL);
if (CPU_IS_PRIMARY(curcpu()))
DT_ENTER(interval, NULL);
#endif
/*
* If we are not the primary CPU, we're not allowed to do
* any more work.

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clockintr.c,v 1.64 2024/01/24 19:23:38 cheloha Exp $ */
/* $OpenBSD: kern_clockintr.c,v 1.66 2024/02/09 16:52:58 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@ -31,6 +31,7 @@
#include <sys/sysctl.h>
#include <sys/time.h>
void clockintr_cancel_locked(struct clockintr *);
void clockintr_hardclock(struct clockrequest *, void *, void *);
void clockintr_schedule_locked(struct clockintr *, uint64_t);
void clockqueue_intrclock_install(struct clockintr_queue *,
@ -226,6 +227,12 @@ clockintr_dispatch(void *frame)
CLR(request->cr_flags, CR_RESCHEDULE);
clockqueue_pend_insert(cq, cl, request->cr_expiration);
}
if (ISSET(cq->cq_flags, CQ_NEED_WAKEUP)) {
CLR(cq->cq_flags, CQ_NEED_WAKEUP);
mtx_leave(&cq->cq_mtx);
wakeup(&cq->cq_running);
mtx_enter(&cq->cq_mtx);
}
run++;
}
@ -317,9 +324,20 @@ void
clockintr_cancel(struct clockintr *cl)
{
struct clockintr_queue *cq = cl->cl_queue;
int was_next;
mtx_enter(&cq->cq_mtx);
clockintr_cancel_locked(cl);
mtx_leave(&cq->cq_mtx);
}
void
clockintr_cancel_locked(struct clockintr *cl)
{
struct clockintr_queue *cq = cl->cl_queue;
int was_next;
MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
if (ISSET(cl->cl_flags, CLST_PENDING)) {
was_next = cl == TAILQ_FIRST(&cq->cq_pend);
clockqueue_pend_delete(cq, cl);
@ -332,7 +350,6 @@ clockintr_cancel(struct clockintr *cl)
}
if (cl == cq->cq_running)
SET(cq->cq_flags, CQ_IGNORE_REQUEST);
mtx_leave(&cq->cq_mtx);
}
void
@ -341,15 +358,41 @@ clockintr_bind(struct clockintr *cl, struct cpu_info *ci,
{
struct clockintr_queue *cq = &ci->ci_queue;
splassert(IPL_NONE);
KASSERT(cl->cl_queue == NULL);
mtx_enter(&cq->cq_mtx);
cl->cl_arg = arg;
cl->cl_func = func;
cl->cl_queue = cq;
mtx_enter(&cq->cq_mtx);
TAILQ_INSERT_TAIL(&cq->cq_all, cl, cl_alink);
mtx_leave(&cq->cq_mtx);
}
void
clockintr_unbind(struct clockintr *cl, uint32_t flags)
{
struct clockintr_queue *cq = cl->cl_queue;
KASSERT(!ISSET(flags, ~CL_FLAG_MASK));
mtx_enter(&cq->cq_mtx);
clockintr_cancel_locked(cl);
cl->cl_arg = NULL;
cl->cl_func = NULL;
cl->cl_queue = NULL;
TAILQ_REMOVE(&cq->cq_all, cl, cl_alink);
if (ISSET(flags, CL_BARRIER) && cl == cq->cq_running) {
SET(cq->cq_flags, CQ_NEED_WAKEUP);
msleep_nsec(&cq->cq_running, &cq->cq_mtx, PWAIT | PNORELOCK,
"clkbar", INFSLP);
} else
mtx_leave(&cq->cq_mtx);
}
void
clockintr_schedule(struct clockintr *cl, uint64_t expiration)
{