sync with OpenBSD -current

This commit is contained in:
purplerain 2024-07-08 18:11:41 +00:00
parent b97c2ce374
commit d93a7459f8
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
97 changed files with 717 additions and 833 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_acct.c,v 1.48 2024/04/13 23:44:11 jsg Exp $ */
/* $OpenBSD: kern_acct.c,v 1.49 2024/07/08 13:17:11 claudio Exp $ */
/* $NetBSD: kern_acct.c,v 1.42 1996/02/04 02:15:12 christos Exp $ */
/*-
@ -169,6 +169,7 @@ acct_process(struct proc *p)
struct acct acct;
struct process *pr = p->p_p;
struct rusage *r;
struct tusage tu;
struct timespec booted, elapsed, realstart, st, tmp, uptime, ut;
int t;
struct vnode *vp;
@ -196,7 +197,8 @@ acct_process(struct proc *p)
memcpy(acct.ac_comm, pr->ps_comm, sizeof acct.ac_comm);
/* (2) The amount of user and system time that was used */
calctsru(&pr->ps_tu, &ut, &st, NULL);
tuagg_get_process(&tu, pr);
calctsru(&tu, &ut, &st, NULL);
acct.ac_utime = encode_comp_t(ut.tv_sec, ut.tv_nsec);
acct.ac_stime = encode_comp_t(st.tv_sec, st.tv_nsec);
@ -231,7 +233,7 @@ acct_process(struct proc *p)
else
acct.ac_tty = -1;
/* (8) The boolean flags that tell how process terminated or misbehaved. */
/* (8) The flags that tell how process terminated or misbehaved. */
acct.ac_flag = pr->ps_acflag;
/* Extensions */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clock.c,v 1.123 2024/02/12 22:07:33 cheloha Exp $ */
/* $OpenBSD: kern_clock.c,v 1.124 2024/07/08 13:17:11 claudio Exp $ */
/* $NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $ */
/*-
@ -281,7 +281,9 @@ statclock(struct clockrequest *cr, void *cf, void *arg)
* Came from user mode; CPU was in user state.
* If this process is being profiled record the tick.
*/
p->p_uticks += count;
tu_enter(&p->p_tu);
p->p_tu.tu_uticks += count;
tu_leave(&p->p_tu);
if (pr->ps_nice > NZERO)
spc->spc_cp_time[CP_NICE] += count;
else
@ -301,12 +303,17 @@ statclock(struct clockrequest *cr, void *cf, void *arg)
* in ``non-process'' (i.e., interrupt) work.
*/
if (CLKF_INTR(frame)) {
if (p != NULL)
p->p_iticks += count;
if (p != NULL) {
tu_enter(&p->p_tu);
p->p_tu.tu_iticks += count;
tu_leave(&p->p_tu);
}
spc->spc_cp_time[spc->spc_spinning ?
CP_SPIN : CP_INTR] += count;
} else if (p != NULL && p != spc->spc_idleproc) {
p->p_sticks += count;
tu_enter(&p->p_tu);
p->p_tu.tu_sticks += count;
tu_leave(&p->p_tu);
spc->spc_cp_time[spc->spc_spinning ?
CP_SPIN : CP_SYS] += count;
} else

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_exec.c,v 1.255 2024/04/02 08:39:16 deraadt Exp $ */
/* $OpenBSD: kern_exec.c,v 1.256 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $ */
/*-
@ -699,6 +699,7 @@ sys_execve(struct proc *p, void *v, register_t *retval)
/* reset CPU time usage for the thread, but not the process */
timespecclear(&p->p_tu.tu_runtime);
p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0;
p->p_tu.tu_gen = 0;
memset(p->p_name, 0, sizeof p->p_name);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_exit.c,v 1.222 2024/06/03 12:48:25 claudio Exp $ */
/* $OpenBSD: kern_exit.c,v 1.224 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */
/*
@ -118,7 +118,7 @@ exit1(struct proc *p, int xexit, int xsig, int flags)
{
struct process *pr, *qr, *nqr;
struct rusage *rup;
struct timespec ts;
struct timespec ts, pts;
atomic_setbits_int(&p->p_flag, P_WEXIT);
@ -168,6 +168,19 @@ exit1(struct proc *p, int xexit, int xsig, int flags)
wakeup(&pr->ps_singlecnt);
}
/* proc is off ps_threads list so update accounting of process now */
nanouptime(&ts);
if (timespeccmp(&ts, &curcpu()->ci_schedstate.spc_runtime, <))
timespecclear(&pts);
else
timespecsub(&ts, &curcpu()->ci_schedstate.spc_runtime, &pts);
tu_enter(&p->p_tu);
timespecadd(&p->p_tu.tu_runtime, &pts, &p->p_tu.tu_runtime);
tu_leave(&p->p_tu);
/* adjust spc_runtime to not double account the runtime from above */
curcpu()->ci_schedstate.spc_runtime = ts;
tuagg_add_process(p->p_p, p);
if ((p->p_flag & P_THREAD) == 0) {
/* main thread gotta wait because it has the pid, et al */
while (pr->ps_threadcnt + pr->ps_exitcnt > 1)
@ -323,14 +336,6 @@ exit1(struct proc *p, int xexit, int xsig, int flags)
/* add thread's accumulated rusage into the process's total */
ruadd(rup, &p->p_ru);
nanouptime(&ts);
if (timespeccmp(&ts, &curcpu()->ci_schedstate.spc_runtime, <))
timespecclear(&ts);
else
timespecsub(&ts, &curcpu()->ci_schedstate.spc_runtime, &ts);
SCHED_LOCK();
tuagg_locked(pr, p, &ts);
SCHED_UNLOCK();
/*
* clear %cpu usage during swap
@ -340,7 +345,7 @@ exit1(struct proc *p, int xexit, int xsig, int flags)
if ((p->p_flag & P_THREAD) == 0) {
/*
* Final thread has died, so add on our children's rusage
* and calculate the total times
* and calculate the total times.
*/
calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL);
ruadd(rup, &pr->ps_cru);
@ -358,7 +363,7 @@ exit1(struct proc *p, int xexit, int xsig, int flags)
}
}
/* just a thread? detach it from its process */
/* just a thread? check if last one standing. */
if (p->p_flag & P_THREAD) {
/* scheduler_wait_hook(pr->ps_mainproc, p); XXX */
mtx_enter(&pr->ps_mtx);
@ -398,12 +403,8 @@ struct mutex deadproc_mutex =
struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc);
/*
* We are called from cpu_exit() once it is safe to schedule the
* dead process's resources to be freed.
*
* NOTE: One must be careful with locking in this routine. It's
* called from a critical section in machine-dependent code, so
* we should refrain from changing any interrupt state.
* We are called from sched_idle() once it is safe to schedule the
* dead process's resources to be freed. So this is not allowed to sleep.
*
* We lock the deadproc list, place the proc on that list (using
* the p_hash member), and wake up the reaper.
@ -411,6 +412,11 @@ struct proclist deadproc = LIST_HEAD_INITIALIZER(deadproc);
void
exit2(struct proc *p)
{
/* account the remainder of time spent in exit1() */
mtx_enter(&p->p_p->ps_mtx);
tuagg_add_process(p->p_p, p);
mtx_leave(&p->p_p->ps_mtx);
mtx_enter(&deadproc_mutex);
LIST_INSERT_HEAD(&deadproc, p, p_hash);
mtx_leave(&deadproc_mutex);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_proc.c,v 1.98 2024/05/20 10:32:20 claudio Exp $ */
/* $OpenBSD: kern_proc.c,v 1.99 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: kern_proc.c,v 1.14 1996/02/09 18:59:41 christos Exp $ */
/*
@ -503,9 +503,9 @@ proc_printit(struct proc *p, const char *modif,
(*pr)(" process=%p user=%p, vmspace=%p\n",
p->p_p, p->p_addr, p->p_vmspace);
(*pr)(" estcpu=%u, cpticks=%d, pctcpu=%u.%u, "
"user=%u, sys=%u, intr=%u\n",
"user=%llu, sys=%llu, intr=%llu\n",
p->p_estcpu, p->p_cpticks, p->p_pctcpu / 100, p->p_pctcpu % 100,
p->p_uticks, p->p_sticks, p->p_iticks);
p->p_tu.tu_uticks, p->p_tu.tu_sticks, p->p_tu.tu_iticks);
}
#include <machine/db_machdep.h>

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_resource.c,v 1.84 2024/06/03 12:48:25 claudio Exp $ */
/* $OpenBSD: kern_resource.c,v 1.85 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */
/*-
@ -64,7 +64,7 @@ struct plimit *lim_copy(struct plimit *);
struct plimit *lim_write_begin(void);
void lim_write_commit(struct plimit *);
void tuagg_sub(struct tusage *, struct proc *, const struct timespec *);
void tuagg_sumup(struct tusage *, const struct tusage *);
/*
* Patchable maximum data and stack limits.
@ -368,36 +368,80 @@ sys_getrlimit(struct proc *p, void *v, register_t *retval)
return (error);
}
/* Add the counts from *from to *tu, ensuring a consistent read of *from. */
void
tuagg_sub(struct tusage *tup, struct proc *p, const struct timespec *ts)
tuagg_sumup(struct tusage *tu, const struct tusage *from)
{
if (ts != NULL)
timespecadd(&tup->tu_runtime, ts, &tup->tu_runtime);
tup->tu_uticks += p->p_uticks;
tup->tu_sticks += p->p_sticks;
tup->tu_iticks += p->p_iticks;
struct tusage tmp;
uint64_t enter, leave;
enter = from->tu_gen;
for (;;) {
/* the generation number is odd during an update */
while (enter & 1) {
CPU_BUSY_CYCLE();
enter = from->tu_gen;
}
membar_consumer();
tmp = *from;
membar_consumer();
leave = from->tu_gen;
if (enter == leave)
break;
enter = leave;
}
tu->tu_uticks += tmp.tu_uticks;
tu->tu_sticks += tmp.tu_sticks;
tu->tu_iticks += tmp.tu_iticks;
timespecadd(&tu->tu_runtime, &tmp.tu_runtime, &tu->tu_runtime);
}
void
tuagg_get_proc(struct tusage *tu, struct proc *p)
{
memset(tu, 0, sizeof(*tu));
tuagg_sumup(tu, &p->p_tu);
}
void
tuagg_get_process(struct tusage *tu, struct process *pr)
{
struct proc *q;
memset(tu, 0, sizeof(*tu));
mtx_enter(&pr->ps_mtx);
tuagg_sumup(tu, &pr->ps_tu);
/* add on all living threads */
TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link)
tuagg_sumup(tu, &q->p_tu);
mtx_leave(&pr->ps_mtx);
}
/*
* Aggregate a single thread's immediate time counts into the running
* totals for the thread and process
* Update the process ps_tu usage with the values from proc p while
* doing so the times for proc p are reset.
* This requires that p is either curproc or SDEAD and that the
* IPL is higher than IPL_STATCLOCK. ps_mtx uses IPL_HIGH so
* this should always be the case.
*/
void
tuagg_locked(struct process *pr, struct proc *p, const struct timespec *ts)
tuagg_add_process(struct process *pr, struct proc *p)
{
tuagg_sub(&pr->ps_tu, p, ts);
tuagg_sub(&p->p_tu, p, ts);
p->p_uticks = 0;
p->p_sticks = 0;
p->p_iticks = 0;
}
MUTEX_ASSERT_LOCKED(&pr->ps_mtx);
splassert(IPL_STATCLOCK);
KASSERT(curproc == p || p->p_stat == SDEAD);
void
tuagg(struct process *pr, struct proc *p)
{
SCHED_LOCK();
tuagg_locked(pr, p, NULL);
SCHED_UNLOCK();
tu_enter(&pr->ps_tu);
tuagg_sumup(&pr->ps_tu, &p->p_tu);
tu_leave(&pr->ps_tu);
/* Now reset CPU time usage for the thread. */
timespecclear(&p->p_tu.tu_runtime);
p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0;
}
/*
@ -474,6 +518,7 @@ dogetrusage(struct proc *p, int who, struct rusage *rup)
{
struct process *pr = p->p_p;
struct proc *q;
struct tusage tu = { 0 };
KERNEL_ASSERT_LOCKED();
@ -484,14 +529,15 @@ dogetrusage(struct proc *p, int who, struct rusage *rup)
*rup = *pr->ps_ru;
else
memset(rup, 0, sizeof(*rup));
tuagg_sumup(&tu, &pr->ps_tu);
/* add on all living threads */
TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
ruadd(rup, &q->p_ru);
tuagg(pr, q);
tuagg_sumup(&tu, &q->p_tu);
}
calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL);
calcru(&tu, &rup->ru_utime, &rup->ru_stime, NULL);
break;
case RUSAGE_THREAD:

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sched.c,v 1.96 2024/06/03 12:48:25 claudio Exp $ */
/* $OpenBSD: kern_sched.c,v 1.99 2024/07/08 16:15:42 mpi Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
@ -213,9 +213,22 @@ void
sched_exit(struct proc *p)
{
struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
struct timespec ts;
LIST_INSERT_HEAD(&spc->spc_deadproc, p, p_hash);
/* update the tu_runtime one last time */
nanouptime(&ts);
if (timespeccmp(&ts, &spc->spc_runtime, <))
timespecclear(&ts);
else
timespecsub(&ts, &spc->spc_runtime, &ts);
/* add the time counts for this thread */
tu_enter(&p->p_tu);
timespecadd(&p->p_tu.tu_runtime, &ts, &p->p_tu.tu_runtime);
tu_leave(&p->p_tu);
KERNEL_ASSERT_LOCKED();
sched_toidle();
}
@ -633,6 +646,14 @@ sched_peg_curproc(struct cpu_info *ci)
SCHED_UNLOCK();
}
void
sched_unpeg_curproc(void)
{
struct proc *p = curproc;
atomic_clearbits_int(&p->p_flag, P_CPUPEG);
}
#ifdef MULTIPROCESSOR
void
@ -699,7 +720,7 @@ sched_barrier_task(void *arg)
sched_peg_curproc(ci);
cond_signal(&sb->cond);
atomic_clearbits_int(&curproc->p_flag, P_CPUPEG);
sched_unpeg_curproc();
}
void

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_smr.c,v 1.16 2022/08/14 01:58:27 jsg Exp $ */
/* $OpenBSD: kern_smr.c,v 1.17 2024/07/08 14:46:47 mpi Exp $ */
/*
* Copyright (c) 2019-2020 Visa Hankala
@ -163,7 +163,7 @@ smr_grace_wait(void)
sched_peg_curproc(ci);
KASSERT(ci->ci_schedstate.spc_smrgp == smrgp);
}
atomic_clearbits_int(&curproc->p_flag, P_CPUPEG);
sched_unpeg_curproc();
#endif /* MULTIPROCESSOR */
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sysctl.c,v 1.427 2024/04/12 16:07:09 bluhm Exp $ */
/* $OpenBSD: kern_sysctl.c,v 1.428 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: kern_sysctl.c,v 1.17 1996/05/20 17:49:05 mrg Exp $ */
/*-
@ -1773,14 +1773,18 @@ fill_kproc(struct process *pr, struct kinfo_proc *ki, struct proc *p,
struct tty *tp;
struct vmspace *vm = pr->ps_vmspace;
struct timespec booted, st, ut, utc;
struct tusage tu;
int isthread;
isthread = p != NULL;
if (!isthread)
if (!isthread) {
p = pr->ps_mainproc; /* XXX */
tuagg_get_process(&tu, pr);
} else
tuagg_get_proc(&tu, p);
FILL_KPROC(ki, strlcpy, p, pr, pr->ps_ucred, pr->ps_pgrp,
p, pr, s, vm, pr->ps_limit, pr->ps_sigacts, isthread,
p, pr, s, vm, pr->ps_limit, pr->ps_sigacts, &tu, isthread,
show_pointers);
/* stuff that's too painful to generalize into the macros */
@ -1803,7 +1807,7 @@ fill_kproc(struct process *pr, struct kinfo_proc *ki, struct proc *p,
if ((pr->ps_flags & PS_ZOMBIE) == 0) {
if ((pr->ps_flags & PS_EMBRYO) == 0 && vm != NULL)
ki->p_vm_rssize = vm_resident_count(vm);
calctsru(isthread ? &p->p_tu : &pr->ps_tu, &ut, &st, NULL);
calctsru(&tu, &ut, &st, NULL);
ki->p_uutime_sec = ut.tv_sec;
ki->p_uutime_usec = ut.tv_nsec/1000;
ki->p_ustime_sec = st.tv_sec;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_time.c,v 1.167 2023/10/17 00:04:02 cheloha Exp $ */
/* $OpenBSD: kern_time.c,v 1.168 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */
/*
@ -40,6 +40,7 @@
#include <sys/rwlock.h>
#include <sys/proc.h>
#include <sys/ktrace.h>
#include <sys/resourcevar.h>
#include <sys/signalvar.h>
#include <sys/stdint.h>
#include <sys/pledge.h>
@ -112,6 +113,7 @@ settime(const struct timespec *ts)
int
clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
{
struct tusage tu;
struct proc *q;
int error = 0;
@ -128,13 +130,15 @@ clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
break;
case CLOCK_PROCESS_CPUTIME_ID:
nanouptime(tp);
tuagg_get_process(&tu, p->p_p);
timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp);
timespecadd(tp, &tu.tu_runtime, tp);
break;
case CLOCK_THREAD_CPUTIME_ID:
nanouptime(tp);
tuagg_get_proc(&tu, p);
timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
timespecadd(tp, &p->p_tu.tu_runtime, tp);
timespecadd(tp, &tu.tu_runtime, tp);
break;
default:
/* check for clock from pthread_getcpuclockid() */

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sched_bsd.c,v 1.93 2024/06/03 12:48:25 claudio Exp $ */
/* $OpenBSD: sched_bsd.c,v 1.94 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@ -344,7 +344,6 @@ mi_switch(void)
struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
struct proc *p = curproc;
struct proc *nextproc;
struct process *pr = p->p_p;
struct timespec ts;
int oldipl;
#ifdef MULTIPROCESSOR
@ -382,9 +381,9 @@ mi_switch(void)
} else {
timespecsub(&ts, &spc->spc_runtime, &ts);
}
/* add the time counts for this thread to the process's total */
tuagg_locked(pr, p, &ts);
tu_enter(&p->p_tu);
timespecadd(&p->p_tu.tu_runtime, &ts, &p->p_tu.tu_runtime);
tu_leave(&p->p_tu);
/* Stop any optional clock interrupts. */
if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) {

View file

@ -1,4 +1,4 @@
/* $OpenBSD: tty.c,v 1.176 2022/08/14 01:58:28 jsg Exp $ */
/* $OpenBSD: tty.c,v 1.177 2024/07/08 13:17:12 claudio Exp $ */
/* $NetBSD: tty.c,v 1.68.4.2 1996/06/06 16:04:52 thorpej Exp $ */
/*-
@ -2152,6 +2152,7 @@ ttyinfo(struct tty *tp)
{
struct process *pr, *pickpr;
struct proc *p, *pick;
struct tusage tu;
struct timespec utime, stime;
int tmp;
@ -2214,7 +2215,8 @@ update_pickpr:
pickpr->ps_vmspace != NULL)
rss = vm_resident_count(pickpr->ps_vmspace);
calctsru(&pickpr->ps_tu, &utime, &stime, NULL);
tuagg_get_process(&tu, pickpr);
calctsru(&tu, &utime, &stime, NULL);
/* Round up and print user time. */
utime.tv_nsec += 5000000;