sync code with last improvements from OpenBSD

This commit is contained in:
purplerain 2023-09-10 18:41:05 +00:00
parent 887dd091b7
commit 010ec4e74c
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
33 changed files with 409 additions and 201 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ucode.c,v 1.7 2023/08/09 02:59:41 jsg Exp $ */
/* $OpenBSD: ucode.c,v 1.8 2023/09/10 09:32:31 jsg Exp $ */
/*
* Copyright (c) 2018 Stefan Fritsch <fritsch@genua.de>
* Copyright (c) 2018 Patrick Wildt <patrick@blueri.se>
@ -141,6 +141,7 @@ cpu_ucode_amd_apply(struct cpu_info *ci)
uint16_t eid = 0;
uint32_t sig, ebx, ecx, edx;
uint64_t start = 0;
uint32_t patch_len = 0;
if (cpu_ucode_data == NULL || cpu_ucode_size == 0) {
DPRINTF(("%s: no microcode provided\n", __func__));
@ -187,8 +188,10 @@ cpu_ucode_amd_apply(struct cpu_info *ci)
goto out;
}
memcpy(&ap, &cpu_ucode_data[i], sizeof(ap));
if (ap.type == 1 && ap.eid == eid && ap.level > level)
if (ap.type == 1 && ap.eid == eid && ap.level > level) {
start = (uint64_t)&cpu_ucode_data[i + 8];
patch_len = ap.len;
}
if (i + ap.len + 8 > cpu_ucode_size) {
DPRINTF(("%s: truncated patch\n", __func__));
goto out;
@ -197,9 +200,16 @@ cpu_ucode_amd_apply(struct cpu_info *ci)
}
if (start != 0) {
/* alignment required on fam 15h */
uint8_t *p = malloc(patch_len, M_TEMP, M_NOWAIT);
if (p == NULL)
goto out;
memcpy(p, (uint8_t *)start, patch_len);
start = (uint64_t)p;
wrmsr(MSR_PATCH_LOADER, start);
level = rdmsr(MSR_PATCH_LEVEL);
DPRINTF(("%s: new patch level 0x%llx\n", __func__, level));
free(p, M_TEMP, patch_len);
}
out:
mtx_leave(&cpu_ucode_mtx);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: ucode.c,v 1.5 2023/08/09 02:59:41 jsg Exp $ */
/* $OpenBSD: ucode.c,v 1.6 2023/09/10 09:32:31 jsg Exp $ */
/*
* Copyright (c) 2018 Stefan Fritsch <fritsch@genua.de>
* Copyright (c) 2018 Patrick Wildt <patrick@blueri.se>
@ -164,6 +164,7 @@ cpu_ucode_amd_apply(struct cpu_info *ci)
uint16_t eid = 0;
uint32_t sig, ebx, ecx, edx;
uint64_t start = 0;
uint32_t patch_len = 0;
if (cpu_ucode_data == NULL || cpu_ucode_size == 0) {
DPRINTF(("%s: no microcode provided\n", __func__));
@ -210,8 +211,10 @@ cpu_ucode_amd_apply(struct cpu_info *ci)
goto out;
}
memcpy(&ap, &cpu_ucode_data[i], sizeof(ap));
if (ap.type == 1 && ap.eid == eid && ap.level > level)
if (ap.type == 1 && ap.eid == eid && ap.level > level) {
start = (uint64_t)&cpu_ucode_data[i + 8];
patch_len = ap.len;
}
if (i + ap.len + 8 > cpu_ucode_size) {
DPRINTF(("%s: truncated patch\n", __func__));
goto out;
@ -220,9 +223,16 @@ cpu_ucode_amd_apply(struct cpu_info *ci)
}
if (start != 0) {
/* alignment required on fam 15h */
uint8_t *p = malloc(patch_len, M_TEMP, M_NOWAIT);
if (p == NULL)
goto out;
memcpy(p, (uint8_t *)start, patch_len);
start = (uint64_t)p;
wrmsr(MSR_PATCH_LOADER, start);
level = rdmsr(MSR_PATCH_LEVEL);
DPRINTF(("%s: new patch level 0x%llx\n", __func__, level));
free(p, M_TEMP, patch_len);
}
out:
mtx_leave(&cpu_ucode_mtx);

View file

@ -36,7 +36,7 @@ umask 007
if [ ! -r version -o ! -s version ]
then
echo 0 > version
echo 1337 > version
fi
touch version

View file

@ -1,4 +1,4 @@
/* $OpenBSD: rkdrm.c,v 1.15 2023/01/01 01:34:33 jsg Exp $ */
/* $OpenBSD: rkdrm.c,v 1.16 2023/09/10 06:25:09 jsg Exp $ */
/* $NetBSD: rk_drm.c,v 1.3 2019/12/15 01:00:58 mrg Exp $ */
/*-
* Copyright (c) 2019 Jared D. McNeill <jmcneill@invisible.ca>
@ -283,6 +283,9 @@ rkdrm_wsioctl(void *v, u_long cmd, caddr_t data, int flag, struct proc *p)
case WSDISPLAYIO_LINEBYTES:
*(u_int *)data = ri->ri_stride;
return 0;
case WSDISPLAYIO_SVIDEO:
case WSDISPLAYIO_GVIDEO:
return 0;
}
return (-1);

View file

@ -3349,6 +3349,9 @@ amdgpu_wsioctl(void *v, u_long cmd, caddr_t data, int flag, struct proc *p)
return 0;
}
break;
case WSDISPLAYIO_SVIDEO:
case WSDISPLAYIO_GVIDEO:
return 0;
}
return (-1);

View file

@ -2256,6 +2256,9 @@ inteldrm_wsioctl(void *v, u_long cmd, caddr_t data, int flag, struct proc *p)
return 0;
}
break;
case WSDISPLAYIO_SVIDEO:
case WSDISPLAYIO_GVIDEO:
return 0;
}
return (-1);

View file

@ -843,6 +843,9 @@ radeondrm_wsioctl(void *v, u_long cmd, caddr_t data, int flag, struct proc *p)
if (ws_set_param == NULL)
return 0;
return ws_set_param(dp);
case WSDISPLAYIO_SVIDEO:
case WSDISPLAYIO_GVIDEO:
return 0;
default:
return -1;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clock.c,v 1.115 2023/08/23 01:55:45 cheloha Exp $ */
/* $OpenBSD: kern_clock.c,v 1.116 2023/09/09 18:19:03 cheloha Exp $ */
/* $NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $ */
/*-
@ -79,7 +79,6 @@
*/
int stathz;
int schedhz;
int profhz;
int profprocs;
int ticks = INT_MAX - (15 * 60 * HZ);
@ -295,13 +294,10 @@ statclock(struct clockframe *frame)
if (p != NULL) {
p->p_cpticks++;
/*
* If no schedclock is provided, call it here at ~~12-25 Hz;
* ~~16 Hz is best
* schedclock() runs every fourth statclock().
*/
if (schedhz == 0) {
if ((++spc->spc_schedticks & 3) == 0)
schedclock(p);
}
if ((++spc->spc_schedticks & 3) == 0)
schedclock(p);
}
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clockintr.c,v 1.43 2023/09/09 16:34:39 cheloha Exp $ */
/* $OpenBSD: kern_clockintr.c,v 1.47 2023/09/10 03:08:05 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@ -43,10 +43,10 @@ uint32_t statclock_min; /* [I] minimum statclock period (ns) */
uint32_t statclock_mask; /* [I] set of allowed offsets */
uint64_t clockintr_advance_random(struct clockintr *, uint64_t, uint32_t);
void clockintr_hardclock(struct clockintr *, void *);
void clockintr_hardclock(struct clockintr *, void *, void *);
void clockintr_schedule(struct clockintr *, uint64_t);
void clockintr_schedule_locked(struct clockintr *, uint64_t);
void clockintr_statclock(struct clockintr *, void *);
void clockintr_statclock(struct clockintr *, void *, void *);
void clockqueue_intrclock_install(struct clockintr_queue *,
const struct intrclock *);
uint64_t clockqueue_next(const struct clockintr_queue *);
@ -114,12 +114,14 @@ clockintr_cpu_init(const struct intrclock *ic)
/* TODO: Remove these from struct clockintr_queue. */
if (cq->cq_hardclock == NULL) {
cq->cq_hardclock = clockintr_establish(ci, clockintr_hardclock);
cq->cq_hardclock = clockintr_establish(ci, clockintr_hardclock,
NULL);
if (cq->cq_hardclock == NULL)
panic("%s: failed to establish hardclock", __func__);
}
if (cq->cq_statclock == NULL) {
cq->cq_statclock = clockintr_establish(ci, clockintr_statclock);
cq->cq_statclock = clockintr_establish(ci, clockintr_statclock,
NULL);
if (cq->cq_statclock == NULL)
panic("%s: failed to establish statclock", __func__);
}
@ -219,7 +221,7 @@ clockintr_dispatch(void *frame)
{
uint64_t lateness, run = 0, start;
struct cpu_info *ci = curcpu();
struct clockintr *cl;
struct clockintr *cl, *shadow;
struct clockintr_queue *cq = &ci->ci_queue;
uint32_t ogen;
@ -257,24 +259,30 @@ clockintr_dispatch(void *frame)
if (cq->cq_uptime < cl->cl_expiration)
break;
}
/*
* This clockintr has expired. Initialize a shadow copy
* and execute it.
*/
clockqueue_pend_delete(cq, cl);
cq->cq_shadow.cl_expiration = cl->cl_expiration;
cq->cq_shadow.cl_func = cl->cl_func;
shadow = &cq->cq_shadow;
shadow->cl_expiration = cl->cl_expiration;
shadow->cl_arg = cl->cl_arg;
shadow->cl_func = cl->cl_func;
cq->cq_running = cl;
mtx_leave(&cq->cq_mtx);
cq->cq_shadow.cl_func(&cq->cq_shadow, frame);
shadow->cl_func(shadow, frame, shadow->cl_arg);
mtx_enter(&cq->cq_mtx);
cq->cq_running = NULL;
if (ISSET(cl->cl_flags, CLST_IGNORE_SHADOW)) {
CLR(cl->cl_flags, CLST_IGNORE_SHADOW);
CLR(cq->cq_shadow.cl_flags, CLST_SHADOW_PENDING);
CLR(shadow->cl_flags, CLST_SHADOW_PENDING);
}
if (ISSET(cq->cq_shadow.cl_flags, CLST_SHADOW_PENDING)) {
CLR(cq->cq_shadow.cl_flags, CLST_SHADOW_PENDING);
clockqueue_pend_insert(cq, cl,
cq->cq_shadow.cl_expiration);
if (ISSET(shadow->cl_flags, CLST_SHADOW_PENDING)) {
CLR(shadow->cl_flags, CLST_SHADOW_PENDING);
clockqueue_pend_insert(cq, cl, shadow->cl_expiration);
}
run++;
}
@ -326,14 +334,13 @@ clockintr_advance(struct clockintr *cl, uint64_t period)
if (cl == &cq->cq_shadow) {
count = nsec_advance(&cl->cl_expiration, period, cq->cq_uptime);
SET(cl->cl_flags, CLST_SHADOW_PENDING);
return count;
} else {
mtx_enter(&cq->cq_mtx);
expiration = cl->cl_expiration;
count = nsec_advance(&expiration, period, nsecuptime());
clockintr_schedule_locked(cl, expiration);
mtx_leave(&cq->cq_mtx);
}
mtx_enter(&cq->cq_mtx);
expiration = cl->cl_expiration;
count = nsec_advance(&expiration, period, nsecuptime());
clockintr_schedule_locked(cl, expiration);
mtx_leave(&cq->cq_mtx);
return count;
}
@ -385,7 +392,7 @@ clockintr_cancel(struct clockintr *cl)
struct clockintr *
clockintr_establish(struct cpu_info *ci,
void (*func)(struct clockintr *, void *))
void (*func)(struct clockintr *, void *, void *), void *arg)
{
struct clockintr *cl;
struct clockintr_queue *cq = &ci->ci_queue;
@ -393,6 +400,7 @@ clockintr_establish(struct cpu_info *ci,
cl = malloc(sizeof *cl, M_DEVBUF, M_NOWAIT | M_ZERO);
if (cl == NULL)
return NULL;
cl->cl_arg = arg;
cl->cl_func = func;
cl->cl_queue = cq;
@ -410,12 +418,11 @@ clockintr_schedule(struct clockintr *cl, uint64_t expiration)
if (cl == &cq->cq_shadow) {
cl->cl_expiration = expiration;
SET(cl->cl_flags, CLST_SHADOW_PENDING);
return;
} else {
mtx_enter(&cq->cq_mtx);
clockintr_schedule_locked(cl, expiration);
mtx_leave(&cq->cq_mtx);
}
mtx_enter(&cq->cq_mtx);
clockintr_schedule_locked(cl, expiration);
mtx_leave(&cq->cq_mtx);
}
void
@ -454,7 +461,7 @@ clockintr_stagger(struct clockintr *cl, uint64_t period, uint32_t n,
}
void
clockintr_hardclock(struct clockintr *cl, void *frame)
clockintr_hardclock(struct clockintr *cl, void *frame, void *arg)
{
uint64_t count, i;
@ -464,7 +471,7 @@ clockintr_hardclock(struct clockintr *cl, void *frame)
}
void
clockintr_statclock(struct clockintr *cl, void *frame)
clockintr_statclock(struct clockintr *cl, void *frame, void *arg)
{
uint64_t count, i;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sched.c,v 1.89 2023/09/06 02:09:58 cheloha Exp $ */
/* $OpenBSD: kern_sched.c,v 1.90 2023/09/10 03:08:05 cheloha Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
@ -88,13 +88,13 @@ sched_init_cpu(struct cpu_info *ci)
spc->spc_idleproc = NULL;
spc->spc_itimer = clockintr_establish(ci, itimer_update);
spc->spc_itimer = clockintr_establish(ci, itimer_update, NULL);
if (spc->spc_itimer == NULL)
panic("%s: clockintr_establish itimer_update", __func__);
spc->spc_profclock = clockintr_establish(ci, profclock);
spc->spc_profclock = clockintr_establish(ci, profclock, NULL);
if (spc->spc_profclock == NULL)
panic("%s: clockintr_establish profclock", __func__);
spc->spc_roundrobin = clockintr_establish(ci, roundrobin);
spc->spc_roundrobin = clockintr_establish(ci, roundrobin, NULL);
if (spc->spc_roundrobin == NULL)
panic("%s: clockintr_establish roundrobin", __func__);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: kern_time.c,v 1.165 2023/08/29 16:19:34 claudio Exp $ */
/* $OpenBSD: kern_time.c,v 1.166 2023/09/10 03:08:05 cheloha Exp $ */
/* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */
/*
@ -755,7 +755,7 @@ itimerdecr(struct itimerspec *itp, const struct timespec *decrement)
}
void
itimer_update(struct clockintr *cl, void *cf)
itimer_update(struct clockintr *cl, void *cf, void *arg)
{
struct timespec elapsed;
uint64_t nsecs;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sched_bsd.c,v 1.85 2023/08/30 09:02:38 claudio Exp $ */
/* $OpenBSD: sched_bsd.c,v 1.86 2023/09/10 03:08:05 cheloha Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@ -83,7 +83,7 @@ struct loadavg averunnable;
* Force switch among equal priority processes every 100ms.
*/
void
roundrobin(struct clockintr *cl, void *cf)
roundrobin(struct clockintr *cl, void *cf, void *arg)
{
uint64_t count;
struct cpu_info *ci = curcpu();

View file

@ -1,4 +1,4 @@
/* $OpenBSD: subr_prof.c,v 1.37 2023/09/06 02:09:58 cheloha Exp $ */
/* $OpenBSD: subr_prof.c,v 1.38 2023/09/10 03:08:05 cheloha Exp $ */
/* $NetBSD: subr_prof.c,v 1.12 1996/04/22 01:38:50 christos Exp $ */
/*-
@ -64,7 +64,7 @@ u_int gmon_cpu_count; /* [K] number of CPUs with profiling enabled */
extern char etext[];
void gmonclock(struct clockintr *, void *);
void gmonclock(struct clockintr *, void *, void *);
void
prof_init(void)
@ -101,7 +101,7 @@ prof_init(void)
/* Allocate and initialize one profiling buffer per CPU. */
CPU_INFO_FOREACH(cii, ci) {
ci->ci_gmonclock = clockintr_establish(ci, gmonclock);
ci->ci_gmonclock = clockintr_establish(ci, gmonclock, NULL);
if (ci->ci_gmonclock == NULL) {
printf("%s: clockintr_establish gmonclock\n", __func__);
return;
@ -236,7 +236,7 @@ sysctl_doprof(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
}
void
gmonclock(struct clockintr *cl, void *cf)
gmonclock(struct clockintr *cl, void *cf, void *arg)
{
uint64_t count;
struct clockframe *frame = cf;
@ -307,7 +307,7 @@ sys_profil(struct proc *p, void *v, register_t *retval)
}
void
profclock(struct clockintr *cl, void *cf)
profclock(struct clockintr *cl, void *cf, void *arg)
{
uint64_t count;
struct clockframe *frame = cf;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: clockintr.h,v 1.12 2023/09/06 02:33:18 cheloha Exp $ */
/* $OpenBSD: clockintr.h,v 1.13 2023/09/10 03:08:05 cheloha Exp $ */
/*
* Copyright (c) 2020-2022 Scott Cheloha <cheloha@openbsd.org>
*
@ -70,7 +70,8 @@ struct clockintr {
uint64_t cl_expiration; /* [m] dispatch time */
TAILQ_ENTRY(clockintr) cl_elink; /* [m] cq_est glue */
TAILQ_ENTRY(clockintr) cl_plink; /* [m] cq_pend glue */
void (*cl_func)(struct clockintr *, void *); /* [I] callback */
void *cl_arg; /* [I] argument */
void (*cl_func)(struct clockintr *, void *, void *); /* [I] callback */
struct clockintr_queue *cl_queue; /* [I] parent queue */
uint32_t cl_flags; /* [m] CLST_* flags */
};
@ -129,7 +130,7 @@ void clockintr_trigger(void);
uint64_t clockintr_advance(struct clockintr *, uint64_t);
void clockintr_cancel(struct clockintr *);
struct clockintr *clockintr_establish(struct cpu_info *,
void (*)(struct clockintr *, void *));
void (*)(struct clockintr *, void *, void *), void *);
void clockintr_stagger(struct clockintr *, uint64_t, uint32_t, uint32_t);
void clockqueue_init(struct clockintr_queue *);
int sysctl_clockintr(int *, u_int, void *, size_t *, void *, size_t);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: resourcevar.h,v 1.28 2023/08/29 16:19:34 claudio Exp $ */
/* $OpenBSD: resourcevar.h,v 1.29 2023/09/10 03:08:05 cheloha Exp $ */
/* $NetBSD: resourcevar.h,v 1.12 1995/11/22 23:01:53 cgd Exp $ */
/*
@ -66,7 +66,7 @@ extern uint32_t profclock_period;
void addupc_intr(struct proc *, u_long, u_long);
void addupc_task(struct proc *, u_long, u_int);
void profclock(struct clockintr *, void *);
void profclock(struct clockintr *, void *, void *);
void tuagg_locked(struct process *, struct proc *, const struct timespec *);
void tuagg(struct process *, struct proc *);
struct tusage;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: sched.h,v 1.61 2023/08/11 22:02:50 cheloha Exp $ */
/* $OpenBSD: sched.h,v 1.63 2023/09/10 03:08:05 cheloha Exp $ */
/* $NetBSD: sched.h,v 1.2 1999/02/28 18:14:58 ross Exp $ */
/*-
@ -146,11 +146,10 @@ struct cpustats {
#define ESTCPULIM(e) min((e), NICE_WEIGHT * PRIO_MAX - SCHED_PPQ)
extern uint32_t roundrobin_period;
extern int schedhz; /* ideally: 16 */
struct proc;
void schedclock(struct proc *);
void roundrobin(struct clockintr *, void *);
void roundrobin(struct clockintr *, void *, void *);
void scheduler_start(void);
void userret(struct proc *p);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: time.h,v 1.64 2023/08/05 20:07:56 cheloha Exp $ */
/* $OpenBSD: time.h,v 1.65 2023/09/10 03:08:05 cheloha Exp $ */
/* $NetBSD: time.h,v 1.18 1996/04/23 10:29:33 mycroft Exp $ */
/*
@ -331,7 +331,7 @@ struct proc;
int clock_gettime(struct proc *, clockid_t, struct timespec *);
struct clockintr;
void itimer_update(struct clockintr *, void *);
void itimer_update(struct clockintr *, void *, void *);
void cancel_all_itimers(void);
int settime(const struct timespec *);