sync code with last improvements from OpenBSD

This commit is contained in:
purplerain 2023-08-18 19:17:13 +00:00
parent 27298272ec
commit 7116a5838e
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
35 changed files with 509 additions and 254 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sched_bsd.c,v 1.81 2023/08/14 08:33:24 mpi Exp $ */
/* $OpenBSD: sched_bsd.c,v 1.82 2023/08/18 09:18:52 claudio Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@ -61,9 +61,24 @@ int lbolt; /* once a second sleep address */
struct __mp_lock sched_lock;
#endif
void update_loadavg(void *);
void schedcpu(void *);
uint32_t decay_aftersleep(uint32_t, uint32_t);
extern struct cpuset sched_idle_cpus;
/*
* constants for averages over 1, 5, and 15 minutes when sampling at
* 5 second intervals.
*/
static const fixpt_t cexp[3] = {
0.9200444146293232 * FSCALE, /* exp(-1/12) */
0.9834714538216174 * FSCALE, /* exp(-1/60) */
0.9944598480048967 * FSCALE, /* exp(-1/180) */
};
struct loadavg averunnable;
/*
* Force switch among equal priority processes every 100ms.
*/
@ -95,6 +110,34 @@ roundrobin(struct clockintr *cl, void *cf)
need_resched(ci);
}
/*
* update_loadav: compute a tenex style load average of a quantity on
* 1, 5, and 15 minute intervals.
*/
void
update_loadavg(void *arg)
{
struct timeout *to = (struct timeout *)arg;
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
u_int i, nrun = 0;
CPU_INFO_FOREACH(cii, ci) {
if (!cpuset_isset(&sched_idle_cpus, ci))
nrun++;
nrun += ci->ci_schedstate.spc_nrun;
}
for (i = 0; i < 3; i++) {
averunnable.ldavg[i] = (cexp[i] * averunnable.ldavg[i] +
nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
}
timeout_add_sec(to, 5);
}
/*
* Constants for digital decay and forget:
* 90% of (p_estcpu) usage in 5 * loadav time
@ -236,7 +279,6 @@ schedcpu(void *arg)
}
SCHED_UNLOCK(s);
}
uvm_meter();
wakeup(&lbolt);
timeout_add_sec(to, 1);
}
@ -691,6 +733,7 @@ void
scheduler_start(void)
{
static struct timeout schedcpu_to;
static struct timeout loadavg_to;
/*
* We avoid polluting the global namespace by keeping the scheduler
@ -699,7 +742,10 @@ scheduler_start(void)
* its job.
*/
timeout_set(&schedcpu_to, schedcpu, &schedcpu_to);
timeout_set(&loadavg_to, update_loadavg, &loadavg_to);
schedcpu(&schedcpu_to);
update_loadavg(&loadavg_to);
#ifndef SMALL_KERNEL
if (perfpolicy == PERFPOL_AUTO)

View file

@ -320,8 +320,8 @@ ZEXTERN int ZEXPORT deflate(z_streamp strm, int flush);
with the same value of the flush parameter and more output space (updated
avail_out), until the flush is complete (deflate returns with non-zero
avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that
avail_out is greater than six to avoid repeated flush markers due to
avail_out == 0 on return.
avail_out is greater than six when the flush marker begins, in order to avoid
repeated flush markers upon calling deflate() again when avail_out == 0.
If the parameter flush is set to Z_FINISH, pending input is processed,
pending output is flushed and deflate returns with Z_STREAM_END if there was

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if.c,v 1.706 2023/07/07 19:45:26 bluhm Exp $ */
/* $OpenBSD: if.c,v 1.707 2023/08/18 08:10:16 jsg Exp $ */
/* $NetBSD: if.c,v 1.35 1996/05/07 05:26:04 thorpej Exp $ */
/*
@ -910,7 +910,7 @@ if_output_tso(struct ifnet *ifp, struct mbuf **mp, struct sockaddr *dst,
/*
* Try to send with TSO first. When forwarding LRO may set
* maximium segment size in mbuf header. Chop TCP segment
* maximum segment size in mbuf header. Chop TCP segment
* even if it would fit interface MTU to preserve maximum
* path MTU.
*/

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_pfsync.c,v 1.319 2023/07/31 11:13:09 dlg Exp $ */
/* $OpenBSD: if_pfsync.c,v 1.320 2023/08/18 08:03:57 jsg Exp $ */
/*
* Copyright (c) 2002 Michael Shalayeff
@ -1528,7 +1528,7 @@ pfsync_sendout(struct pfsync_softc *sc, struct mbuf *m)
{
struct ip_moptions imo;
unsigned int len = m->m_pkthdr.len;
#if NBPF > 0
#if NBPFILTER > 0
caddr_t if_bpf = sc->sc_if.if_bpf;
if (if_bpf)
bpf_mtap(if_bpf, m, BPF_DIRECTION_OUT);
@ -2628,9 +2628,6 @@ pfsync_input(struct mbuf *m, uint8_t ttl, unsigned int hlen)
unsigned int len;
void (*in)(struct pfsync_softc *,
const caddr_t, unsigned int, unsigned int);
#if NBPF > 0
caddr_t if_bpf;
#endif
pfsyncstat_inc(pfsyncs_ipackets);
@ -2650,9 +2647,6 @@ pfsync_input(struct mbuf *m, uint8_t ttl, unsigned int hlen)
goto leave;
}
#if NBPF > 0
#endif
/* verify that the IP TTL is 255. */
if (ttl != PFSYNC_DFLTTL) {
pfsyncstat_inc(pfsyncs_badttl);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: wg_cookie.c,v 1.4 2022/03/17 18:51:56 tb Exp $ */
/* $OpenBSD: wg_cookie.c,v 1.5 2023/08/18 08:11:47 jsg Exp $ */
/*
* Copyright (C) 2015-2020 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
* Copyright (C) 2019-2020 Matt Dunwoodie <ncon@noconroy.net>
@ -385,7 +385,7 @@ ratelimit_allow(struct ratelimit *rl, struct sockaddr *sa)
* lapsed since our last_time, adding that, ensuring that we
* cap the tokens at TOKEN_MAX. If the endpoint has no tokens
* left (that is tokens <= INITIATION_COST) then we block the
* request, otherwise we subtract the INITITIATION_COST and
* request, otherwise we subtract the INITIATION_COST and
* return OK. */
diff = r->r_last_time;
getnanouptime(&r->r_last_time);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uvm_extern.h,v 1.170 2023/06/21 21:16:21 cheloha Exp $ */
/* $OpenBSD: uvm_extern.h,v 1.171 2023/08/18 09:18:52 claudio Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
@ -414,7 +414,6 @@ void uvmspace_free(struct vmspace *);
struct vmspace *uvmspace_share(struct process *);
int uvm_share(vm_map_t, vaddr_t, vm_prot_t,
vm_map_t, vaddr_t, vsize_t);
void uvm_meter(void);
int uvm_sysctl(int *, u_int, void *, size_t *,
void *, size_t, struct proc *);
struct vm_page *uvm_pagealloc(struct uvm_object *,

View file

@ -1,4 +1,4 @@
/* $OpenBSD: uvm_meter.c,v 1.48 2023/08/03 16:12:08 claudio Exp $ */
/* $OpenBSD: uvm_meter.c,v 1.49 2023/08/18 09:18:52 claudio Exp $ */
/* $NetBSD: uvm_meter.c,v 1.21 2001/07/14 06:36:03 matt Exp $ */
/*
@ -63,58 +63,12 @@
#define MAXSLP 20
int maxslp = MAXSLP; /* patchable ... */
struct loadavg averunnable;
/*
* constants for averages over 1, 5, and 15 minutes when sampling at
* 5 second intervals.
*/
extern struct loadavg averunnable;
static const fixpt_t cexp[3] = {
0.9200444146293232 * FSCALE, /* exp(-1/12) */
0.9834714538216174 * FSCALE, /* exp(-1/60) */
0.9944598480048967 * FSCALE, /* exp(-1/180) */
};
static void uvm_loadav(struct loadavg *);
void uvm_total(struct vmtotal *);
void uvmexp_read(struct uvmexp *);
/*
* uvm_meter: calculate load average
*/
void
uvm_meter(void)
{
if ((gettime() % 5) == 0)
uvm_loadav(&averunnable);
}
/*
* uvm_loadav: compute a tenex style load average of a quantity on
* 1, 5, and 15 minute intervals.
*/
static void
uvm_loadav(struct loadavg *avg)
{
extern struct cpuset sched_idle_cpus;
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
u_int i, nrun = 0;
CPU_INFO_FOREACH(cii, ci) {
if (!cpuset_isset(&sched_idle_cpus, ci))
nrun++;
nrun += ci->ci_schedstate.spc_nrun;
}
for (i = 0; i < 3; i++) {
avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
}
}
char malloc_conf[16];
/*