sync code with last improvements from OpenBSD
This commit is contained in:
parent
30061c429a
commit
c346c8d04f
5 changed files with 41 additions and 54 deletions
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: if_ix.c,v 1.203 2023/08/03 18:56:32 jan Exp $ */
|
||||
/* $OpenBSD: if_ix.c,v 1.204 2023/08/21 21:45:18 bluhm Exp $ */
|
||||
|
||||
/******************************************************************************
|
||||
|
||||
|
@ -3230,12 +3230,6 @@ ixgbe_rxeof(struct rx_ring *rxr)
|
|||
sendmp = mp;
|
||||
sendmp->m_pkthdr.len = 0;
|
||||
sendmp->m_pkthdr.ph_mss = 0;
|
||||
#if NVLAN > 0
|
||||
if (staterr & IXGBE_RXD_STAT_VP) {
|
||||
sendmp->m_pkthdr.ether_vtag = vtag;
|
||||
SET(sendmp->m_flags, M_VLANTAG);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
sendmp->m_pkthdr.len += mp->m_len;
|
||||
/*
|
||||
|
@ -3256,7 +3250,12 @@ ixgbe_rxeof(struct rx_ring *rxr)
|
|||
uint16_t pkts;
|
||||
|
||||
ixgbe_rx_checksum(staterr, sendmp);
|
||||
|
||||
#if NVLAN > 0
|
||||
if (staterr & IXGBE_RXD_STAT_VP) {
|
||||
sendmp->m_pkthdr.ether_vtag = vtag;
|
||||
SET(sendmp->m_flags, M_VLANTAG);
|
||||
}
|
||||
#endif
|
||||
if (hashtype != IXGBE_RXDADV_RSSTYPE_NONE) {
|
||||
sendmp->m_pkthdr.ph_flowid = hash;
|
||||
SET(sendmp->m_pkthdr.csum_flags, M_FLOWID);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: kern_clockintr.c,v 1.31 2023/08/11 22:02:50 cheloha Exp $ */
|
||||
/* $OpenBSD: kern_clockintr.c,v 1.32 2023/08/21 17:22:04 cheloha Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
|
||||
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
|
||||
|
@ -38,7 +38,6 @@
|
|||
*/
|
||||
u_int clockintr_flags; /* [I] global state + behavior flags */
|
||||
uint32_t hardclock_period; /* [I] hardclock period (ns) */
|
||||
uint32_t schedclock_period; /* [I] schedclock period (ns) */
|
||||
uint32_t statclock_avg; /* [I] average statclock period (ns) */
|
||||
uint32_t statclock_min; /* [I] minimum statclock period (ns) */
|
||||
uint32_t statclock_mask; /* [I] set of allowed offsets */
|
||||
|
@ -47,7 +46,6 @@ void clockintr_cancel_locked(struct clockintr *);
|
|||
uint64_t clockintr_expiration(const struct clockintr *);
|
||||
void clockintr_hardclock(struct clockintr *, void *);
|
||||
uint64_t clockintr_nsecuptime(const struct clockintr *);
|
||||
void clockintr_schedclock(struct clockintr *, void *);
|
||||
void clockintr_schedule(struct clockintr *, uint64_t);
|
||||
void clockintr_schedule_locked(struct clockintr *, uint64_t);
|
||||
void clockintr_statclock(struct clockintr *, void *);
|
||||
|
@ -89,10 +87,6 @@ clockintr_init(u_int flags)
|
|||
statclock_min = statclock_avg - (var / 2);
|
||||
statclock_mask = var - 1;
|
||||
|
||||
KASSERT(schedhz >= 0 && schedhz <= 1000000000);
|
||||
if (schedhz != 0)
|
||||
schedclock_period = 1000000000 / schedhz;
|
||||
|
||||
SET(clockintr_flags, flags | CL_INIT);
|
||||
}
|
||||
|
||||
|
@ -128,12 +122,6 @@ clockintr_cpu_init(const struct intrclock *ic)
|
|||
if (cq->cq_statclock == NULL)
|
||||
panic("%s: failed to establish statclock", __func__);
|
||||
}
|
||||
if (schedhz != 0 && cq->cq_schedclock == NULL) {
|
||||
cq->cq_schedclock = clockintr_establish(cq,
|
||||
clockintr_schedclock);
|
||||
if (cq->cq_schedclock == NULL)
|
||||
panic("%s: failed to establish schedclock", __func__);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mask CQ_INTRCLOCK while we're advancing the internal clock
|
||||
|
@ -175,8 +163,8 @@ clockintr_cpu_init(const struct intrclock *ic)
|
|||
}
|
||||
|
||||
/*
|
||||
* We can always advance the statclock and schedclock.
|
||||
* There is no reason to stagger a randomized statclock.
|
||||
* We can always advance the statclock. There is no reason to
|
||||
* stagger a randomized statclock.
|
||||
*/
|
||||
if (!ISSET(clockintr_flags, CL_RNDSTAT)) {
|
||||
if (cq->cq_statclock->cl_expiration == 0) {
|
||||
|
@ -185,13 +173,6 @@ clockintr_cpu_init(const struct intrclock *ic)
|
|||
}
|
||||
}
|
||||
clockintr_advance(cq->cq_statclock, statclock_avg);
|
||||
if (schedhz != 0) {
|
||||
if (cq->cq_schedclock->cl_expiration == 0) {
|
||||
clockintr_stagger(cq->cq_schedclock, schedclock_period,
|
||||
multiplier, MAXCPUS);
|
||||
}
|
||||
clockintr_advance(cq->cq_schedclock, schedclock_period);
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX Need to find a better place to do this. We can't do it in
|
||||
|
@ -514,19 +495,6 @@ clockintr_hardclock(struct clockintr *cl, void *frame)
|
|||
hardclock(frame);
|
||||
}
|
||||
|
||||
void
|
||||
clockintr_schedclock(struct clockintr *cl, void *unused)
|
||||
{
|
||||
uint64_t count, i;
|
||||
struct proc *p = curproc;
|
||||
|
||||
count = clockintr_advance(cl, schedclock_period);
|
||||
if (p != NULL) {
|
||||
for (i = 0; i < count; i++)
|
||||
schedclock(p);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
clockintr_statclock(struct clockintr *cl, void *frame)
|
||||
{
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: clockintr.h,v 1.9 2023/07/25 18:16:19 cheloha Exp $ */
|
||||
/* $OpenBSD: clockintr.h,v 1.10 2023/08/21 17:22:04 cheloha Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2020-2022 Scott Cheloha <cheloha@openbsd.org>
|
||||
*
|
||||
|
@ -97,7 +97,6 @@ struct clockintr_queue {
|
|||
TAILQ_HEAD(, clockintr) cq_pend;/* [m] pending clockintr list */
|
||||
struct clockintr *cq_running; /* [m] running clockintr */
|
||||
struct clockintr *cq_hardclock; /* [o] hardclock handle */
|
||||
struct clockintr *cq_schedclock;/* [o] schedclock handle, if any */
|
||||
struct clockintr *cq_statclock; /* [o] statclock handle */
|
||||
struct intrclock cq_intrclock; /* [I] local interrupt clock */
|
||||
struct clockintr_stat cq_stat; /* [o] dispatch statistics */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue