sync code with last fixes and improvements from OpenBSD

This commit is contained in:
purplerain 2023-06-07 21:20:56 +00:00
parent 4b78db449c
commit bf0676207f
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
2406 changed files with 6353 additions and 434004 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_exec.c,v 1.247 2023/04/24 10:22:06 kettenis Exp $ */
/* $OpenBSD: kern_exec.c,v 1.248 2023/05/30 08:30:01 jsg Exp $ */
/* $NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $ */
/*-
@ -533,7 +533,7 @@ sys_execve(struct proc *p, void *v, register_t *retval)
/*
* XXX As a transition mechanism, we don't enforce branch
* target control floe integrety on partitions mounted with
* target control flow integrity on partitions mounted with
* the wxallowed flag.
*/
if (pr->ps_textvp->v_mount &&

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_pledge.c,v 1.304 2023/02/19 18:46:46 anton Exp $ */
/* $OpenBSD: kern_pledge.c,v 1.306 2023/06/02 17:44:29 cheloha Exp $ */
/*
* Copyright (c) 2015 Nicholas Marriott <nicm@openbsd.org>
@ -145,6 +145,9 @@ const uint64_t pledge_syscalls[SYS_MAXSYSCALL] = {
*/
[SYS_sysctl] = PLEDGE_STDIO,
/* For moncontrol(3). Only allowed to disable profiling. */
[SYS_profil] = PLEDGE_STDIO,
/* Support for malloc(3) family of operations */
[SYS_getentropy] = PLEDGE_STDIO,
[SYS_madvise] = PLEDGE_STDIO,
@ -231,6 +234,7 @@ const uint64_t pledge_syscalls[SYS_MAXSYSCALL] = {
[SYS_socketpair] = PLEDGE_STDIO,
[SYS_wait4] = PLEDGE_STDIO,
[SYS_waitid] = PLEDGE_STDIO,
/*
* Can kill self with "stdio". Killing another pid
@ -1585,6 +1589,16 @@ pledge_kill(struct proc *p, pid_t pid)
return pledge_fail(p, EPERM, PLEDGE_PROC);
}
int
pledge_profil(struct proc *p, u_int scale)
{
if ((p->p_p->ps_flags & PS_PLEDGE) == 0)
return 0;
if (scale != 0)
return pledge_fail(p, EPERM, PLEDGE_STDIO);
return 0;
}
int
pledge_protexec(struct proc *p, int prot)
{

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_synch.c,v 1.191 2023/02/15 20:43:41 mvs Exp $ */
/* $OpenBSD: kern_synch.c,v 1.192 2023/06/01 10:21:26 claudio Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*
@ -470,20 +470,22 @@ sleep_signal_check(void)
}
int
wakeup_proc(struct proc *p, const volatile void *chan)
wakeup_proc(struct proc *p, const volatile void *chan, int flags)
{
int s, awakened = 0;
int awakened = 0;
SCHED_ASSERT_LOCKED();
SCHED_LOCK(s);
if (p->p_wchan != NULL &&
((chan == NULL) || (p->p_wchan == chan))) {
awakened = 1;
if (flags)
atomic_setbits_int(&p->p_flag, flags);
if (p->p_stat == SSLEEP)
setrunnable(p);
else
unsleep(p);
}
SCHED_UNLOCK(s);
return awakened;
}
@ -502,8 +504,7 @@ endtsleep(void *arg)
int s;
SCHED_LOCK(s);
if (wakeup_proc(p, NULL))
atomic_setbits_int(&p->p_flag, P_TIMEOUT);
wakeup_proc(p, NULL, P_TIMEOUT);
SCHED_UNLOCK(s);
}
@ -548,7 +549,7 @@ wakeup_n(const volatile void *ident, int n)
if (p->p_stat != SSLEEP && p->p_stat != SSTOP)
panic("wakeup: p_stat is %d", (int)p->p_stat);
#endif
if (wakeup_proc(p, ident))
if (wakeup_proc(p, ident, 0))
--n;
}
SCHED_UNLOCK(s);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sysctl.c,v 1.414 2023/05/18 10:23:19 mvs Exp $ */
/* $OpenBSD: kern_sysctl.c,v 1.415 2023/05/21 12:47:54 claudio Exp $ */
/* $NetBSD: kern_sysctl.c,v 1.17 1996/05/20 17:49:05 mrg Exp $ */
/*-
@ -841,7 +841,7 @@ sysctl_hwchargestop(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
int stop = hw_battery_chargestop;
int error;
if (!hw_battery_setchargestart)
if (!hw_battery_setchargestop)
return EOPNOTSUPP;
error = sysctl_int_bounded(oldp, oldlenp, newp, newlen,

View file

@ -1,4 +1,4 @@
/* $OpenBSD: subr_blist.c,v 1.3 2022/08/13 16:02:15 semarie Exp $ */
/* $OpenBSD: subr_blist.c,v 1.4 2023/05/30 08:30:01 jsg Exp $ */
/* DragonFlyBSD:7b80531f545c7d3c51c1660130c71d01f6bccbe0:/sys/kern/subr_blist.c */
/*
* BLIST.C - Bitmap allocator/deallocator, using a radix tree with hinting
@ -70,8 +70,8 @@
* due to swap interleaving not all that much less), but the blist code
* scales much, much better.
*
* LAYOUT: The radix tree is layed out recursively using a
* linear array. Each meta node is immediately followed (layed out
* LAYOUT: The radix tree is laid out recursively using a
* linear array. Each meta node is immediately followed (laid out
* sequentially in memory) by BLIST_META_RADIX lower level nodes. This
* is a recursive structure but one that can be easily scanned through
* a very simple 'skip' calculation. In order to support large radixes,
@ -90,7 +90,7 @@
* ranges.
*
* NOTE: The radix may exceed BLIST_BMAP_RADIX bits in order to support
* up to 2^(BLIST_BMAP_RADIX-1) blocks. The first divison will
* up to 2^(BLIST_BMAP_RADIX-1) blocks. The first division will
* drop the radix down and fit it within a signed BLIST_BMAP_RADIX
* bit integer.
*
@ -285,7 +285,7 @@ blist_allocat(blist_t bl, swblk_t count, swblk_t blkat)
/*
* blist_free() - free up space in the block bitmap. Return the base
* of a contiguous region. Panic if an inconsistancy is
* of a contiguous region. Panic if an inconsistency is
* found.
*/
@ -418,7 +418,7 @@ blst_radix_gapfind(blmeta_t *scan, swblk_t blk, swblk_t radix, swblk_t skip,
swblk_t next_skip;
if (radix == BLIST_BMAP_RADIX) {
/* leaf node: we considere only completely free bitmap as free */
/* leaf node: we consider only completely free bitmaps as free */
if (state == GAPFIND_FIRSTFREE) {
if (scan->u.bmu_bitmap == (u_swblk_t)-1) {
/* node is fully free */
@ -729,7 +729,7 @@ blst_leaf_free(blmeta_t *scan, swblk_t blk, swblk_t count)
* We could probably do a better job here. We are required to make
* bighint at least as large as the biggest contiguous block of
* data. If we just shoehorn it, a little extra overhead will
* be incured on the next allocation (but only that one typically).
* be incurred on the next allocation (but only that one typically).
*/
scan->bm_bighint = BLIST_BMAP_RADIX;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: subr_prof.c,v 1.33 2023/04/25 01:32:36 cheloha Exp $ */
/* $OpenBSD: subr_prof.c,v 1.35 2023/06/02 17:44:29 cheloha Exp $ */
/* $NetBSD: subr_prof.c,v 1.12 1996/04/22 01:38:50 christos Exp $ */
/*-
@ -34,6 +34,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/pledge.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
#include <sys/mount.h>
@ -137,7 +138,7 @@ prof_state_toggle(struct gmonparam *gp, int oldstate)
#if !defined(GPROF)
/*
* If this is not a profiling kernel, we need to patch
* all symbols that can be instrummented.
* all symbols that can be instrumented.
*/
error = db_prof_enable();
#endif
@ -236,7 +237,11 @@ sys_profil(struct proc *p, void *v, register_t *retval)
} */ *uap = v;
struct process *pr = p->p_p;
struct uprof *upp;
int s;
int error, s;
error = pledge_profil(p, SCARG(uap, scale));
if (error)
return error;
if (SCARG(uap, scale) > (1 << 16))
return (EINVAL);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: subr_witness.c,v 1.48 2022/02/21 14:16:49 jsg Exp $ */
/* $OpenBSD: subr_witness.c,v 1.50 2023/05/30 08:30:01 jsg Exp $ */
/*-
* Copyright (c) 2008 Isilon Systems, Inc.
@ -413,11 +413,11 @@ static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
static struct witness *w_data;
static uint8_t **w_rmatrix;
static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
static struct lock_list_entry *w_locklistdata;
static struct witness_hash w_hash; /* The witness hash table. */
/* The lock order data hash */
static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
static struct witness_lock_order_data *w_lodata;
static struct witness_lock_order_data *w_lofree = NULL;
static struct witness_lock_order_hash w_lohash;
static int w_max_used_index = 0;
@ -523,6 +523,11 @@ witness_initialize(void)
w_lock_stack_num);
}
w_locklistdata = (void *)uvm_pageboot_alloc(
sizeof(struct lock_list_entry) * LOCK_CHILDCOUNT);
memset(w_locklistdata, 0, sizeof(struct lock_list_entry) *
LOCK_CHILDCOUNT);
s = splhigh();
for (i = 0; i < w_lock_stack_num; i++)
witness_lock_stack_free(&stacks[i]);
@ -965,7 +970,7 @@ witness_checkorder(struct lock_object *lock, int flags,
/*
* If we are locking a sleepable lock and this lock
* isn't sleepable, we want to treat it as a lock
* order violation to enfore a general lock order of
* order violation to enforce a general lock order of
* sleepable locks before non-sleepable locks.
*/
if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
@ -2339,9 +2344,12 @@ witness_init_hash_tables(void)
w_hash.wh_count = 0;
/* Initialize the lock order data hash. */
w_lodata = (void *)uvm_pageboot_alloc(
sizeof(struct witness_lock_order_data) * WITNESS_LO_DATA_COUNT);
memset(w_lodata, 0, sizeof(struct witness_lock_order_data) *
WITNESS_LO_DATA_COUNT);
w_lofree = NULL;
for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
w_lodata[i].wlod_next = w_lofree;
w_lofree = &w_lodata[i];
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vfs_sync.c,v 1.68 2022/08/14 01:58:28 jsg Exp $ */
/* $OpenBSD: vfs_sync.c,v 1.69 2023/05/25 07:45:33 claudio Exp $ */
/*
* Portions of this code are:
@ -245,7 +245,7 @@ int
speedup_syncer(void)
{
if (syncerproc)
wakeup_proc(syncerproc, &syncer_chan);
wakeup_one(&syncer_chan);
if (rushjob < syncdelay / 2) {
rushjob += 1;
stat_rush_requests += 1;