sync with OpenBSD -current

This commit is contained in:
purplerain 2024-01-25 02:58:42 +00:00
parent 029e4bda60
commit 125fcc4eee
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
28 changed files with 244 additions and 238 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clockintr.c,v 1.63 2024/01/15 01:15:37 cheloha Exp $ */
/* $OpenBSD: kern_clockintr.c,v 1.64 2024/01/24 19:23:38 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@ -62,11 +62,9 @@ clockintr_cpu_init(const struct intrclock *ic)
clockqueue_intrclock_install(cq, ic);
/* TODO: Remove this from struct clockintr_queue. */
if (cq->cq_hardclock == NULL) {
cq->cq_hardclock = clockintr_establish(ci, clockintr_hardclock,
if (cq->cq_hardclock.cl_expiration == 0) {
clockintr_bind(&cq->cq_hardclock, ci, clockintr_hardclock,
NULL);
if (cq->cq_hardclock == NULL)
panic("%s: failed to establish hardclock", __func__);
}
/*
@ -96,16 +94,16 @@ clockintr_cpu_init(const struct intrclock *ic)
* behalf.
*/
if (CPU_IS_PRIMARY(ci)) {
if (cq->cq_hardclock->cl_expiration == 0)
clockintr_schedule(cq->cq_hardclock, 0);
if (cq->cq_hardclock.cl_expiration == 0)
clockintr_schedule(&cq->cq_hardclock, 0);
else
clockintr_advance(cq->cq_hardclock, hardclock_period);
clockintr_advance(&cq->cq_hardclock, hardclock_period);
} else {
if (cq->cq_hardclock->cl_expiration == 0) {
clockintr_stagger(cq->cq_hardclock, hardclock_period,
if (cq->cq_hardclock.cl_expiration == 0) {
clockintr_stagger(&cq->cq_hardclock, hardclock_period,
multiplier, MAXCPUS);
}
clockintr_advance(cq->cq_hardclock, hardclock_period);
clockintr_advance(&cq->cq_hardclock, hardclock_period);
}
/*
@ -113,30 +111,30 @@ clockintr_cpu_init(const struct intrclock *ic)
* stagger a randomized statclock.
*/
if (!statclock_is_randomized) {
if (spc->spc_statclock->cl_expiration == 0) {
clockintr_stagger(spc->spc_statclock, statclock_avg,
if (spc->spc_statclock.cl_expiration == 0) {
clockintr_stagger(&spc->spc_statclock, statclock_avg,
multiplier, MAXCPUS);
}
}
clockintr_advance(spc->spc_statclock, statclock_avg);
clockintr_advance(&spc->spc_statclock, statclock_avg);
/*
* XXX Need to find a better place to do this. We can't do it in
* sched_init_cpu() because initclocks() runs after it.
*/
if (spc->spc_itimer->cl_expiration == 0) {
clockintr_stagger(spc->spc_itimer, hardclock_period,
if (spc->spc_itimer.cl_expiration == 0) {
clockintr_stagger(&spc->spc_itimer, hardclock_period,
multiplier, MAXCPUS);
}
if (spc->spc_profclock->cl_expiration == 0) {
clockintr_stagger(spc->spc_profclock, profclock_period,
if (spc->spc_profclock.cl_expiration == 0) {
clockintr_stagger(&spc->spc_profclock, profclock_period,
multiplier, MAXCPUS);
}
if (spc->spc_roundrobin->cl_expiration == 0) {
clockintr_stagger(spc->spc_roundrobin, hardclock_period,
if (spc->spc_roundrobin.cl_expiration == 0) {
clockintr_stagger(&spc->spc_roundrobin, hardclock_period,
multiplier, MAXCPUS);
}
clockintr_advance(spc->spc_roundrobin, roundrobin_period);
clockintr_advance(&spc->spc_roundrobin, roundrobin_period);
if (reset_cq_intrclock)
SET(cq->cq_flags, CQ_INTRCLOCK);
@ -337,16 +335,12 @@ clockintr_cancel(struct clockintr *cl)
mtx_leave(&cq->cq_mtx);
}
struct clockintr *
clockintr_establish(struct cpu_info *ci,
void
clockintr_bind(struct clockintr *cl, struct cpu_info *ci,
void (*func)(struct clockrequest *, void *, void *), void *arg)
{
struct clockintr *cl;
struct clockintr_queue *cq = &ci->ci_queue;
cl = malloc(sizeof *cl, M_DEVBUF, M_NOWAIT | M_ZERO);
if (cl == NULL)
return NULL;
cl->cl_arg = arg;
cl->cl_func = func;
cl->cl_queue = cq;
@ -354,7 +348,6 @@ clockintr_establish(struct cpu_info *ci,
mtx_enter(&cq->cq_mtx);
TAILQ_INSERT_TAIL(&cq->cq_all, cl, cl_alink);
mtx_leave(&cq->cq_mtx);
return cl;
}
void

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_fork.c,v 1.256 2024/01/19 01:43:26 bluhm Exp $ */
/* $OpenBSD: kern_fork.c,v 1.257 2024/01/24 19:23:38 cheloha Exp $ */
/* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */
/*
@ -704,11 +704,11 @@ proc_trampoline_mi(void)
/* Start any optional clock interrupts needed by the thread. */
if (ISSET(p->p_p->ps_flags, PS_ITIMER)) {
atomic_setbits_int(&spc->spc_schedflags, SPCF_ITIMER);
clockintr_advance(spc->spc_itimer, hardclock_period);
clockintr_advance(&spc->spc_itimer, hardclock_period);
}
if (ISSET(p->p_p->ps_flags, PS_PROFIL)) {
atomic_setbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK);
clockintr_advance(spc->spc_profclock, profclock_period);
clockintr_advance(&spc->spc_profclock, profclock_period);
}
nanouptime(&spc->spc_runtime);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sched.c,v 1.93 2023/10/24 13:20:11 claudio Exp $ */
/* $OpenBSD: kern_sched.c,v 1.94 2024/01/24 19:23:38 cheloha Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
@ -88,18 +88,10 @@ sched_init_cpu(struct cpu_info *ci)
spc->spc_idleproc = NULL;
spc->spc_itimer = clockintr_establish(ci, itimer_update, NULL);
if (spc->spc_itimer == NULL)
panic("%s: clockintr_establish itimer_update", __func__);
spc->spc_profclock = clockintr_establish(ci, profclock, NULL);
if (spc->spc_profclock == NULL)
panic("%s: clockintr_establish profclock", __func__);
spc->spc_roundrobin = clockintr_establish(ci, roundrobin, NULL);
if (spc->spc_roundrobin == NULL)
panic("%s: clockintr_establish roundrobin", __func__);
spc->spc_statclock = clockintr_establish(ci, statclock, NULL);
if (spc->spc_statclock == NULL)
panic("%s: clockintr_establish statclock", __func__);
clockintr_bind(&spc->spc_itimer, ci, itimer_update, NULL);
clockintr_bind(&spc->spc_profclock, ci, profclock, NULL);
clockintr_bind(&spc->spc_roundrobin, ci, roundrobin, NULL);
clockintr_bind(&spc->spc_statclock, ci, statclock, NULL);
kthread_create_deferred(sched_kthreads_create, ci);
@ -244,11 +236,11 @@ sched_toidle(void)
if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) {
atomic_clearbits_int(&spc->spc_schedflags, SPCF_ITIMER);
clockintr_cancel(spc->spc_itimer);
clockintr_cancel(&spc->spc_itimer);
}
if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) {
atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK);
clockintr_cancel(spc->spc_profclock);
clockintr_cancel(&spc->spc_profclock);
}
atomic_clearbits_int(&spc->spc_schedflags, SPCF_SWITCHCLEAR);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sched_bsd.c,v 1.89 2023/10/17 00:04:02 cheloha Exp $ */
/* $OpenBSD: sched_bsd.c,v 1.90 2024/01/24 19:23:38 cheloha Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@ -396,11 +396,11 @@ mi_switch(void)
/* Stop any optional clock interrupts. */
if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) {
atomic_clearbits_int(&spc->spc_schedflags, SPCF_ITIMER);
clockintr_cancel(spc->spc_itimer);
clockintr_cancel(&spc->spc_itimer);
}
if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) {
atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK);
clockintr_cancel(spc->spc_profclock);
clockintr_cancel(&spc->spc_profclock);
}
/*
@ -451,11 +451,11 @@ mi_switch(void)
/* Start any optional clock interrupts needed by the thread. */
if (ISSET(p->p_p->ps_flags, PS_ITIMER)) {
atomic_setbits_int(&spc->spc_schedflags, SPCF_ITIMER);
clockintr_advance(spc->spc_itimer, hardclock_period);
clockintr_advance(&spc->spc_itimer, hardclock_period);
}
if (ISSET(p->p_p->ps_flags, PS_PROFIL)) {
atomic_setbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK);
clockintr_advance(spc->spc_profclock, profclock_period);
clockintr_advance(&spc->spc_profclock, profclock_period);
}
nanouptime(&spc->spc_runtime);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: subr_prof.c,v 1.40 2023/10/17 00:04:02 cheloha Exp $ */
/* $OpenBSD: subr_prof.c,v 1.41 2024/01/24 19:23:38 cheloha Exp $ */
/* $NetBSD: subr_prof.c,v 1.12 1996/04/22 01:38:50 christos Exp $ */
/*-
@ -101,19 +101,16 @@ prof_init(void)
/* Allocate and initialize one profiling buffer per CPU. */
CPU_INFO_FOREACH(cii, ci) {
ci->ci_gmonclock = clockintr_establish(ci, gmonclock, NULL);
if (ci->ci_gmonclock == NULL) {
printf("%s: clockintr_establish gmonclock\n", __func__);
return;
}
clockintr_stagger(ci->ci_gmonclock, profclock_period,
CPU_INFO_UNIT(ci), MAXCPUS);
cp = km_alloc(round_page(size), &kv_any, &kp_zero, &kd_nowait);
if (cp == NULL) {
printf("No memory for profiling.\n");
return;
}
clockintr_bind(&ci->ci_gmonclock, ci, gmonclock, NULL);
clockintr_stagger(&ci->ci_gmonclock, profclock_period,
CPU_INFO_UNIT(ci), MAXCPUS);
p = (struct gmonparam *)cp;
cp += sizeof(*p);
p->tos = (struct tostruct *)cp;
@ -159,7 +156,7 @@ prof_state_toggle(struct cpu_info *ci, int oldstate)
if (error == 0) {
if (++gmon_cpu_count == 1)
startprofclock(&process0);
clockintr_advance(ci->ci_gmonclock, profclock_period);
clockintr_advance(&ci->ci_gmonclock, profclock_period);
}
break;
default:
@ -167,7 +164,7 @@ prof_state_toggle(struct cpu_info *ci, int oldstate)
gp->state = GMON_PROF_OFF;
/* FALLTHROUGH */
case GMON_PROF_OFF:
clockintr_cancel(ci->ci_gmonclock);
clockintr_cancel(&ci->ci_gmonclock);
if (--gmon_cpu_count == 0)
stopprofclock(&process0);
#if !defined(GPROF)