sync
This commit is contained in:
parent
f1b2576417
commit
2a351e0cdc
347 changed files with 9596 additions and 5486 deletions
|
@ -2436,6 +2436,10 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
long timeout = msecs_to_jiffies(2000);
|
||||
int r;
|
||||
|
||||
/* No valid flags defined yet */
|
||||
if (args->in.flags)
|
||||
return -EINVAL;
|
||||
|
||||
switch (args->in.op) {
|
||||
case AMDGPU_VM_OP_RESERVE_VMID:
|
||||
/* We only have requirement to reserve vmid from gfxhub */
|
||||
|
|
|
@ -348,6 +348,35 @@ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* update_planes_and_stream_adapter() - Send planes to be updated in DC
|
||||
*
|
||||
* DC has a generic way to update planes and stream via
|
||||
* dc_update_planes_and_stream function; however, DM might need some
|
||||
* adjustments and preparation before calling it. This function is a wrapper
|
||||
* for the dc_update_planes_and_stream that does any required configuration
|
||||
* before passing control to DC.
|
||||
*/
|
||||
static inline bool update_planes_and_stream_adapter(struct dc *dc,
|
||||
int update_type,
|
||||
int planes_count,
|
||||
struct dc_stream_state *stream,
|
||||
struct dc_stream_update *stream_update,
|
||||
struct dc_surface_update *array_of_surface_update)
|
||||
{
|
||||
/*
|
||||
* Previous frame finished and HW is ready for optimization.
|
||||
*/
|
||||
if (update_type == UPDATE_TYPE_FAST)
|
||||
dc_post_update_surfaces_to_stream(dc);
|
||||
|
||||
return dc_update_planes_and_stream(dc,
|
||||
array_of_surface_update,
|
||||
planes_count,
|
||||
stream,
|
||||
stream_update);
|
||||
}
|
||||
|
||||
/**
|
||||
* dm_pflip_high_irq() - Handle pageflip interrupt
|
||||
* @interrupt_params: ignored
|
||||
|
@ -2634,10 +2663,13 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
|
|||
bundle->surface_updates[m].surface->force_full_update =
|
||||
true;
|
||||
}
|
||||
dc_commit_updates_for_stream(
|
||||
dm->dc, bundle->surface_updates,
|
||||
dc_state->stream_status->plane_count,
|
||||
dc_state->streams[k], &bundle->stream_update, dc_state);
|
||||
|
||||
update_planes_and_stream_adapter(dm->dc,
|
||||
UPDATE_TYPE_FULL,
|
||||
dc_state->stream_status->plane_count,
|
||||
dc_state->streams[k],
|
||||
&bundle->stream_update,
|
||||
bundle->surface_updates);
|
||||
}
|
||||
|
||||
cleanup:
|
||||
|
@ -7874,6 +7906,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
|
||||
bundle->stream_update.abm_level = &acrtc_state->abm_level;
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
|
||||
acrtc_state->stream->link->psr_settings.psr_allow_active)
|
||||
amdgpu_dm_psr_disable(acrtc_state->stream);
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
|
||||
/*
|
||||
* If FreeSync state on the stream has changed then we need to
|
||||
* re-adjust the min/max bounds now that DC doesn't handle this
|
||||
|
@ -7887,16 +7925,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
|
||||
}
|
||||
mutex_lock(&dm->dc_lock);
|
||||
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
|
||||
acrtc_state->stream->link->psr_settings.psr_allow_active)
|
||||
amdgpu_dm_psr_disable(acrtc_state->stream);
|
||||
|
||||
dc_commit_updates_for_stream(dm->dc,
|
||||
bundle->surface_updates,
|
||||
planes_count,
|
||||
acrtc_state->stream,
|
||||
&bundle->stream_update,
|
||||
dc_state);
|
||||
update_planes_and_stream_adapter(dm->dc,
|
||||
acrtc_state->update_type,
|
||||
planes_count,
|
||||
acrtc_state->stream,
|
||||
&bundle->stream_update,
|
||||
bundle->surface_updates);
|
||||
|
||||
/**
|
||||
* Enable or disable the interrupts on the backend.
|
||||
|
@ -8338,12 +8372,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
dc_commit_updates_for_stream(dm->dc,
|
||||
dummy_updates,
|
||||
status->plane_count,
|
||||
dm_new_crtc_state->stream,
|
||||
&stream_update,
|
||||
dc_state);
|
||||
dc_update_planes_and_stream(dm->dc,
|
||||
dummy_updates,
|
||||
status->plane_count,
|
||||
dm_new_crtc_state->stream,
|
||||
&stream_update);
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -401,8 +401,13 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
|
|||
{
|
||||
int i;
|
||||
|
||||
if (memcmp(adjust, &stream->adjust, sizeof(struct dc_crtc_timing_adjust)) == 0)
|
||||
return true;
|
||||
/*
|
||||
* Don't adjust DRR while there's bandwidth optimizations pending to
|
||||
* avoid conflicting with firmware updates.
|
||||
*/
|
||||
if (dc->ctx->dce_version > DCE_VERSION_MAX)
|
||||
if (dc->optimized_required || dc->wm_optimized_required)
|
||||
return false;
|
||||
|
||||
stream->adjust.v_total_max = adjust->v_total_max;
|
||||
stream->adjust.v_total_mid = adjust->v_total_mid;
|
||||
|
@ -2024,27 +2029,33 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
|
|||
|
||||
post_surface_trace(dc);
|
||||
|
||||
if (dc->ctx->dce_version >= DCE_VERSION_MAX)
|
||||
TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
|
||||
else
|
||||
/*
|
||||
* Only relevant for DCN behavior where we can guarantee the optimization
|
||||
* is safe to apply - retain the legacy behavior for DCE.
|
||||
*/
|
||||
|
||||
if (dc->ctx->dce_version < DCE_VERSION_MAX)
|
||||
TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
|
||||
else {
|
||||
TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
|
||||
|
||||
if (is_flip_pending_in_pipes(dc, context))
|
||||
return;
|
||||
if (is_flip_pending_in_pipes(dc, context))
|
||||
return;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++)
|
||||
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
|
||||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
|
||||
context->res_ctx.pipe_ctx[i].pipe_idx = i;
|
||||
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
|
||||
}
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++)
|
||||
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
|
||||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
|
||||
context->res_ctx.pipe_ctx[i].pipe_idx = i;
|
||||
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
|
||||
}
|
||||
|
||||
process_deferred_updates(dc);
|
||||
process_deferred_updates(dc);
|
||||
|
||||
dc->hwss.optimize_bandwidth(dc, context);
|
||||
dc->hwss.optimize_bandwidth(dc, context);
|
||||
|
||||
if (dc->debug.enable_double_buffered_dsc_pg_support)
|
||||
dc->hwss.update_dsc_pg(dc, context, true);
|
||||
if (dc->debug.enable_double_buffered_dsc_pg_support)
|
||||
dc->hwss.update_dsc_pg(dc, context, true);
|
||||
}
|
||||
|
||||
dc->optimized_required = false;
|
||||
dc->wm_optimized_required = false;
|
||||
|
@ -3869,12 +3880,9 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
|||
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
|
||||
new_pipe->plane_state->force_full_update = true;
|
||||
}
|
||||
} else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
|
||||
} else if (update_type == UPDATE_TYPE_FAST) {
|
||||
/*
|
||||
* Previous frame finished and HW is ready for optimization.
|
||||
*
|
||||
* Only relevant for DCN behavior where we can guarantee the optimization
|
||||
* is safe to apply - retain the legacy behavior for DCE.
|
||||
*/
|
||||
dc_post_update_surfaces_to_stream(dc);
|
||||
}
|
||||
|
|
|
@ -552,7 +552,6 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_radeon_gem_set_domain *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_bo *robj;
|
||||
int r;
|
||||
|
||||
/* for now if someone requests domain CPU -
|
||||
|
@ -565,13 +564,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||
up_read(&rdev->exclusive_lock);
|
||||
return -ENOENT;
|
||||
}
|
||||
robj = gem_to_radeon_bo(gobj);
|
||||
|
||||
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
|
||||
|
||||
drm_gem_object_put(gobj);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
r = radeon_gem_handle_lockup(robj->rdev, r);
|
||||
r = radeon_gem_handle_lockup(rdev, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: if_bge.c,v 1.400 2023/01/18 23:31:37 kettenis Exp $ */
|
||||
/* $OpenBSD: if_bge.c,v 1.401 2023/07/04 10:22:39 jmatthew Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2001 Wind River Systems
|
||||
|
@ -74,6 +74,7 @@
|
|||
|
||||
#include "bpfilter.h"
|
||||
#include "vlan.h"
|
||||
#include "kstat.h"
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
|
@ -85,6 +86,7 @@
|
|||
#include <sys/timeout.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/atomic.h>
|
||||
#include <sys/kstat.h>
|
||||
|
||||
#include <net/if.h>
|
||||
#include <net/if_media.h>
|
||||
|
@ -203,6 +205,58 @@ void bge_ape_unlock(struct bge_softc *, int);
|
|||
void bge_ape_send_event(struct bge_softc *, uint32_t);
|
||||
void bge_ape_driver_state_change(struct bge_softc *, int);
|
||||
|
||||
#if NKSTAT > 0
|
||||
void bge_kstat_attach(struct bge_softc *);
|
||||
|
||||
enum {
|
||||
bge_stat_out_octets = 0,
|
||||
bge_stat_collisions,
|
||||
bge_stat_xon_sent,
|
||||
bge_stat_xoff_sent,
|
||||
bge_stat_xmit_errors,
|
||||
bge_stat_coll_frames,
|
||||
bge_stat_multicoll_frames,
|
||||
bge_stat_deferred_xmit,
|
||||
bge_stat_excess_coll,
|
||||
bge_stat_late_coll,
|
||||
bge_stat_out_ucast_pkt,
|
||||
bge_stat_out_mcast_pkt,
|
||||
bge_stat_out_bcast_pkt,
|
||||
bge_stat_in_octets,
|
||||
bge_stat_fragments,
|
||||
bge_stat_in_ucast_pkt,
|
||||
bge_stat_in_mcast_pkt,
|
||||
bge_stat_in_bcast_pkt,
|
||||
bge_stat_fcs_errors,
|
||||
bge_stat_align_errors,
|
||||
bge_stat_xon_rcvd,
|
||||
bge_stat_xoff_rcvd,
|
||||
bge_stat_ctrl_frame_rcvd,
|
||||
bge_stat_xoff_entered,
|
||||
bge_stat_too_long_frames,
|
||||
bge_stat_jabbers,
|
||||
bge_stat_too_short_pkts,
|
||||
|
||||
bge_stat_dma_rq_full,
|
||||
bge_stat_dma_hprq_full,
|
||||
bge_stat_sdc_queue_full,
|
||||
bge_stat_nic_sendprod_set,
|
||||
bge_stat_status_updated,
|
||||
bge_stat_irqs,
|
||||
bge_stat_avoided_irqs,
|
||||
bge_stat_tx_thresh_hit,
|
||||
|
||||
bge_stat_filtdrop,
|
||||
bge_stat_dma_wrq_full,
|
||||
bge_stat_dma_hpwrq_full,
|
||||
bge_stat_out_of_bds,
|
||||
bge_stat_if_in_drops,
|
||||
bge_stat_if_in_errors,
|
||||
bge_stat_rx_thresh_hit,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef BGE_DEBUG
|
||||
#define DPRINTF(x) do { if (bgedebug) printf x; } while (0)
|
||||
#define DPRINTFN(n,x) do { if (bgedebug >= (n)) printf x; } while (0)
|
||||
|
@ -2993,6 +3047,12 @@ bge_attach(struct device *parent, struct device *self, void *aux)
|
|||
else
|
||||
sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
|
||||
|
||||
mtx_init(&sc->bge_kstat_mtx, IPL_SOFTCLOCK);
|
||||
#if NKSTAT > 0
|
||||
if (BGE_IS_5705_PLUS(sc))
|
||||
bge_kstat_attach(sc);
|
||||
#endif
|
||||
|
||||
/* Set up ifnet structure */
|
||||
ifp = &sc->arpcom.ac_if;
|
||||
ifp->if_softc = sc;
|
||||
|
@ -3767,9 +3827,11 @@ bge_tick(void *xsc)
|
|||
|
||||
s = splnet();
|
||||
|
||||
if (BGE_IS_5705_PLUS(sc))
|
||||
if (BGE_IS_5705_PLUS(sc)) {
|
||||
mtx_enter(&sc->bge_kstat_mtx);
|
||||
bge_stats_update_regs(sc);
|
||||
else
|
||||
mtx_leave(&sc->bge_kstat_mtx);
|
||||
} else
|
||||
bge_stats_update(sc);
|
||||
|
||||
if (sc->bge_flags & BGE_FIBER_TBI) {
|
||||
|
@ -3799,12 +3861,16 @@ void
|
|||
bge_stats_update_regs(struct bge_softc *sc)
|
||||
{
|
||||
struct ifnet *ifp = &sc->arpcom.ac_if;
|
||||
uint32_t collisions, discards, inerrors;
|
||||
uint32_t ucast, mcast, bcast;
|
||||
u_int32_t val;
|
||||
#if NKSTAT > 0
|
||||
struct kstat_kv *kvs = sc->bge_kstat->ks_data;
|
||||
#endif
|
||||
|
||||
sc->bge_tx_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
|
||||
collisions = CSR_READ_4(sc, BGE_MAC_STATS +
|
||||
offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
|
||||
|
||||
sc->bge_rx_overruns += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
|
||||
|
||||
/*
|
||||
* XXX
|
||||
* Unlike other controllers, the BGE_RXLP_LOCSTAT_IFIN_DROPS counter
|
||||
|
@ -3826,23 +3892,22 @@ bge_stats_update_regs(struct bge_softc *sc)
|
|||
BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762 &&
|
||||
sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
|
||||
sc->bge_chipid != BGE_CHIPID_BCM5720_A0)
|
||||
sc->bge_rx_discards += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
|
||||
discards = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
|
||||
else
|
||||
discards = 0;
|
||||
|
||||
sc->bge_rx_inerrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
|
||||
inerrors = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
|
||||
|
||||
ifp->if_collisions = sc->bge_tx_collisions;
|
||||
ifp->if_ierrors = sc->bge_rx_discards + sc->bge_rx_inerrors;
|
||||
ifp->if_collisions += collisions;
|
||||
ifp->if_ierrors += discards + inerrors;
|
||||
|
||||
ucast = CSR_READ_4(sc, BGE_MAC_STATS +
|
||||
offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts));
|
||||
mcast = CSR_READ_4(sc, BGE_MAC_STATS +
|
||||
offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts));
|
||||
bcast = CSR_READ_4(sc, BGE_MAC_STATS +
|
||||
offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts));
|
||||
if (sc->bge_flags & BGE_RDMA_BUG) {
|
||||
u_int32_t val, ucast, mcast, bcast;
|
||||
|
||||
ucast = CSR_READ_4(sc, BGE_MAC_STATS +
|
||||
offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts));
|
||||
mcast = CSR_READ_4(sc, BGE_MAC_STATS +
|
||||
offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts));
|
||||
bcast = CSR_READ_4(sc, BGE_MAC_STATS +
|
||||
offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts));
|
||||
|
||||
/*
|
||||
* If controller transmitted more than BGE_NUM_RDMA_CHANNELS
|
||||
* frames, it's safe to disable workaround for DMA engine's
|
||||
|
@ -3858,6 +3923,15 @@ bge_stats_update_regs(struct bge_softc *sc)
|
|||
sc->bge_flags &= ~BGE_RDMA_BUG;
|
||||
}
|
||||
}
|
||||
|
||||
#if NKSTAT > 0
|
||||
kstat_kv_u32(&kvs[bge_stat_out_ucast_pkt]) += ucast;
|
||||
kstat_kv_u32(&kvs[bge_stat_out_mcast_pkt]) += mcast;
|
||||
kstat_kv_u32(&kvs[bge_stat_out_bcast_pkt]) += bcast;
|
||||
kstat_kv_u32(&kvs[bge_stat_collisions]) += collisions;
|
||||
kstat_kv_u32(&kvs[bge_stat_if_in_drops]) += discards;
|
||||
kstat_kv_u32(&kvs[bge_stat_if_in_errors]) += inerrors;
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -4814,3 +4888,151 @@ bge_link_upd(struct bge_softc *sc)
|
|||
BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
|
||||
BGE_MACSTAT_LINK_CHANGED);
|
||||
}
|
||||
|
||||
#if NKSTAT > 0
|
||||
|
||||
struct bge_stat {
|
||||
char name[KSTAT_KV_NAMELEN];
|
||||
enum kstat_kv_unit unit;
|
||||
bus_size_t reg;
|
||||
};
|
||||
|
||||
#define MACREG(_f) \
|
||||
BGE_MAC_STATS + offsetof(struct bge_mac_stats_regs, _f)
|
||||
|
||||
static const struct bge_stat bge_kstat_tpl[] = {
|
||||
/* MAC stats */
|
||||
[bge_stat_out_octets] = { "out octets", KSTAT_KV_U_BYTES,
|
||||
MACREG(ifHCOutOctets) },
|
||||
[bge_stat_collisions] = { "collisions", KSTAT_KV_U_NONE, 0 },
|
||||
[bge_stat_xon_sent] = { "xon sent", KSTAT_KV_U_NONE,
|
||||
MACREG(outXonSent) },
|
||||
[bge_stat_xoff_sent] = { "xoff sent", KSTAT_KV_U_NONE,
|
||||
MACREG(outXonSent) },
|
||||
[bge_stat_xmit_errors] = { "xmit errors", KSTAT_KV_U_NONE,
|
||||
MACREG(dot3StatsInternalMacTransmitErrors) },
|
||||
[bge_stat_coll_frames] = { "coll frames", KSTAT_KV_U_PACKETS,
|
||||
MACREG(dot3StatsSingleCollisionFrames) },
|
||||
[bge_stat_multicoll_frames] = { "multicoll frames", KSTAT_KV_U_PACKETS,
|
||||
MACREG(dot3StatsMultipleCollisionFrames) },
|
||||
[bge_stat_deferred_xmit] = { "deferred xmit", KSTAT_KV_U_NONE,
|
||||
MACREG(dot3StatsDeferredTransmissions) },
|
||||
[bge_stat_excess_coll] = { "excess coll", KSTAT_KV_U_NONE,
|
||||
MACREG(dot3StatsExcessiveCollisions) },
|
||||
[bge_stat_late_coll] = { "late coll", KSTAT_KV_U_NONE,
|
||||
MACREG(dot3StatsLateCollisions) },
|
||||
[bge_stat_out_ucast_pkt] = { "out ucast pkts", KSTAT_KV_U_PACKETS, 0 },
|
||||
[bge_stat_out_mcast_pkt] = { "out mcast pkts", KSTAT_KV_U_PACKETS, 0 },
|
||||
[bge_stat_out_bcast_pkt] = { "out bcast pkts", KSTAT_KV_U_PACKETS, 0 },
|
||||
[bge_stat_in_octets] = { "in octets", KSTAT_KV_U_BYTES,
|
||||
MACREG(ifHCInOctets) },
|
||||
[bge_stat_fragments] = { "fragments", KSTAT_KV_U_NONE,
|
||||
MACREG(etherStatsFragments) },
|
||||
[bge_stat_in_ucast_pkt] = { "in ucast pkts", KSTAT_KV_U_PACKETS,
|
||||
MACREG(ifHCInUcastPkts) },
|
||||
[bge_stat_in_mcast_pkt] = { "in mcast pkts", KSTAT_KV_U_PACKETS,
|
||||
MACREG(ifHCInMulticastPkts) },
|
||||
[bge_stat_in_bcast_pkt] = { "in bcast pkts", KSTAT_KV_U_PACKETS,
|
||||
MACREG(ifHCInBroadcastPkts) },
|
||||
[bge_stat_fcs_errors] = { "FCS errors", KSTAT_KV_U_NONE,
|
||||
MACREG(dot3StatsFCSErrors) },
|
||||
[bge_stat_align_errors] = { "align errors", KSTAT_KV_U_NONE,
|
||||
MACREG(dot3StatsAlignmentErrors) },
|
||||
[bge_stat_xon_rcvd] = { "xon rcvd", KSTAT_KV_U_NONE,
|
||||
MACREG(xonPauseFramesReceived) },
|
||||
[bge_stat_xoff_rcvd] = { "xoff rcvd", KSTAT_KV_U_NONE,
|
||||
MACREG(xoffPauseFramesReceived) },
|
||||
[bge_stat_ctrl_frame_rcvd] = { "ctrlframes rcvd", KSTAT_KV_U_NONE,
|
||||
MACREG(macControlFramesReceived) },
|
||||
[bge_stat_xoff_entered] = { "xoff entered", KSTAT_KV_U_NONE,
|
||||
MACREG(xoffStateEntered) },
|
||||
[bge_stat_too_long_frames] = { "too long frames", KSTAT_KV_U_NONE,
|
||||
MACREG(dot3StatsFramesTooLong) },
|
||||
[bge_stat_jabbers] = { "jabbers", KSTAT_KV_U_NONE,
|
||||
MACREG(etherStatsJabbers) },
|
||||
[bge_stat_too_short_pkts] = { "too short pkts", KSTAT_KV_U_NONE,
|
||||
MACREG(etherStatsUndersizePkts) },
|
||||
|
||||
/* Send Data Initiator stats */
|
||||
[bge_stat_dma_rq_full] = { "DMA RQ full", KSTAT_KV_U_NONE,
|
||||
BGE_LOCSTATS_DMA_RQ_FULL },
|
||||
[bge_stat_dma_hprq_full] = { "DMA HPRQ full", KSTAT_KV_U_NONE,
|
||||
BGE_LOCSTATS_DMA_HIPRIO_RQ_FULL },
|
||||
[bge_stat_sdc_queue_full] = { "SDC queue full", KSTAT_KV_U_NONE,
|
||||
BGE_LOCSTATS_SDC_QUEUE_FULL },
|
||||
[bge_stat_nic_sendprod_set] = { "sendprod set", KSTAT_KV_U_NONE,
|
||||
BGE_LOCSTATS_NIC_SENDPROD_SET },
|
||||
[bge_stat_status_updated] = { "stats updated", KSTAT_KV_U_NONE,
|
||||
BGE_LOCSTATS_STATS_UPDATED },
|
||||
[bge_stat_irqs] = { "irqs", KSTAT_KV_U_NONE, BGE_LOCSTATS_IRQS },
|
||||
[bge_stat_avoided_irqs] = { "avoided irqs", KSTAT_KV_U_NONE,
|
||||
BGE_LOCSTATS_AVOIDED_IRQS },
|
||||
[bge_stat_tx_thresh_hit] = { "tx thresh hit", KSTAT_KV_U_NONE,
|
||||
BGE_LOCSTATS_TX_THRESH_HIT },
|
||||
|
||||
/* Receive List Placement stats */
|
||||
[bge_stat_filtdrop] = { "filtdrop", KSTAT_KV_U_NONE,
|
||||
BGE_RXLP_LOCSTAT_FILTDROP },
|
||||
[bge_stat_dma_wrq_full] = { "DMA WRQ full", KSTAT_KV_U_NONE,
|
||||
BGE_RXLP_LOCSTAT_DMA_WRQ_FULL },
|
||||
[bge_stat_dma_hpwrq_full] = { "DMA HPWRQ full", KSTAT_KV_U_NONE,
|
||||
BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL },
|
||||
[bge_stat_out_of_bds] = { "out of BDs", KSTAT_KV_U_NONE,
|
||||
BGE_RXLP_LOCSTAT_OUT_OF_BDS },
|
||||
[bge_stat_if_in_drops] = { "if in drops", KSTAT_KV_U_NONE, 0 },
|
||||
[bge_stat_if_in_errors] = { "if in errors", KSTAT_KV_U_NONE, 0 },
|
||||
[bge_stat_rx_thresh_hit] = { "rx thresh hit", KSTAT_KV_U_NONE,
|
||||
BGE_RXLP_LOCSTAT_RXTHRESH_HIT },
|
||||
};
|
||||
|
||||
int
|
||||
bge_kstat_read(struct kstat *ks)
|
||||
{
|
||||
struct bge_softc *sc = ks->ks_softc;
|
||||
struct kstat_kv *kvs = ks->ks_data;
|
||||
int i;
|
||||
|
||||
bge_stats_update_regs(sc);
|
||||
|
||||
for (i = 0; i < nitems(bge_kstat_tpl); i++) {
|
||||
if (bge_kstat_tpl[i].reg != 0)
|
||||
kstat_kv_u32(kvs) += CSR_READ_4(sc,
|
||||
bge_kstat_tpl[i].reg);
|
||||
kvs++;
|
||||
}
|
||||
|
||||
getnanouptime(&ks->ks_updated);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
bge_kstat_attach(struct bge_softc *sc)
|
||||
{
|
||||
struct kstat *ks;
|
||||
struct kstat_kv *kvs;
|
||||
int i;
|
||||
|
||||
|
||||
ks = kstat_create(sc->bge_dev.dv_xname, 0, "bge-stats", 0,
|
||||
KSTAT_T_KV, 0);
|
||||
if (ks == NULL)
|
||||
return;
|
||||
|
||||
kvs = mallocarray(nitems(bge_kstat_tpl), sizeof(*kvs), M_DEVBUF,
|
||||
M_ZERO | M_WAITOK);
|
||||
for (i = 0; i < nitems(bge_kstat_tpl); i++) {
|
||||
const struct bge_stat *tpl = &bge_kstat_tpl[i];
|
||||
kstat_kv_unit_init(&kvs[i], tpl->name, KSTAT_KV_T_UINT32,
|
||||
tpl->unit);
|
||||
}
|
||||
|
||||
kstat_set_mutex(ks, &sc->bge_kstat_mtx);
|
||||
ks->ks_softc = sc;
|
||||
ks->ks_data = kvs;
|
||||
ks->ks_datalen = nitems(bge_kstat_tpl) * sizeof(*kvs);
|
||||
ks->ks_read = bge_kstat_read;
|
||||
|
||||
sc->bge_kstat = ks;
|
||||
kstat_install(ks);
|
||||
}
|
||||
#endif /* NKSTAT > 0 */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: if_bgereg.h,v 1.135 2022/01/09 05:42:46 jsg Exp $ */
|
||||
/* $OpenBSD: if_bgereg.h,v 1.136 2023/07/04 10:22:39 jmatthew Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2001 Wind River Systems
|
||||
|
@ -2942,4 +2942,7 @@ struct bge_softc {
|
|||
u_int32_t bge_rx_overruns;
|
||||
u_int32_t bge_tx_collisions;
|
||||
bus_dmamap_t bge_txdma[BGE_TX_RING_CNT];
|
||||
|
||||
struct mutex bge_kstat_mtx;
|
||||
struct kstat *bge_kstat;
|
||||
};
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: if_iwm.c,v 1.407 2023/04/14 12:45:10 stsp Exp $ */
|
||||
/* $OpenBSD: if_iwm.c,v 1.408 2023/07/05 15:07:28 stsp Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
|
||||
|
@ -8574,7 +8574,7 @@ iwm_bgscan_done(struct ieee80211com *ic,
|
|||
free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
|
||||
sc->bgscan_unref_arg = arg;
|
||||
sc->bgscan_unref_arg_size = arg_size;
|
||||
iwm_add_task(sc, sc->sc_nswq, &sc->bgscan_done_task);
|
||||
iwm_add_task(sc, systq, &sc->bgscan_done_task);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: if_iwx.c,v 1.173 2023/06/27 15:31:27 stsp Exp $ */
|
||||
/* $OpenBSD: if_iwx.c,v 1.175 2023/07/05 15:07:28 stsp Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
|
||||
|
@ -2925,7 +2925,7 @@ iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
|
|||
cmd_v0.cb_size = htole32(0);
|
||||
cmd_v0.byte_cnt_addr = htole64(0);
|
||||
cmd_v0.tfdq_addr = htole64(0);
|
||||
hcmd.id = IWX_SCD_QUEUE_CFG,
|
||||
hcmd.id = IWX_SCD_QUEUE_CFG;
|
||||
hcmd.data[0] = &cmd_v0;
|
||||
hcmd.len[0] = sizeof(cmd_v0);
|
||||
} else if (cmd_ver == 3) {
|
||||
|
@ -7607,7 +7607,7 @@ iwx_bgscan_done(struct ieee80211com *ic,
|
|||
free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
|
||||
sc->bgscan_unref_arg = arg;
|
||||
sc->bgscan_unref_arg_size = arg_size;
|
||||
iwx_add_task(sc, sc->sc_nswq, &sc->bgscan_done_task);
|
||||
iwx_add_task(sc, systq, &sc->bgscan_done_task);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -8048,7 +8048,7 @@ iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
|
|||
idle_cnt = chains_static;
|
||||
active_cnt = chains_dynamic;
|
||||
|
||||
cmd.phy_id = htole32(phyctxt->id),
|
||||
cmd.phy_id = htole32(phyctxt->id);
|
||||
cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
|
||||
IWX_PHY_RX_CHAIN_VALID_POS);
|
||||
cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: mfii.c,v 1.88 2023/05/25 19:35:58 kurt Exp $ */
|
||||
/* $OpenBSD: mfii.c,v 1.89 2023/07/06 10:17:43 visa Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
|
||||
|
@ -1764,8 +1764,9 @@ mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
|
|||
int
|
||||
mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
|
||||
{
|
||||
struct mutex m = MUTEX_INITIALIZER_FLAGS(IPL_BIO, __MTX_NAME,
|
||||
MTX_NOWITNESS);
|
||||
struct mutex m;
|
||||
|
||||
mtx_init(&m, IPL_BIO);
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: mpii.c,v 1.145 2023/05/25 19:35:58 kurt Exp $ */
|
||||
/* $OpenBSD: mpii.c,v 1.146 2023/07/06 10:17:43 visa Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2010, 2012 Mike Belopuhov
|
||||
* Copyright (c) 2009 James Giannoules
|
||||
|
@ -2857,11 +2857,12 @@ mpii_init_queues(struct mpii_softc *sc)
|
|||
void
|
||||
mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb)
|
||||
{
|
||||
struct mutex mtx = MUTEX_INITIALIZER_FLAGS(IPL_BIO,
|
||||
__MTX_NAME, MTX_NOWITNESS);
|
||||
struct mutex mtx;
|
||||
void (*done)(struct mpii_ccb *);
|
||||
void *cookie;
|
||||
|
||||
mtx_init(&mtx, IPL_BIO);
|
||||
|
||||
done = ccb->ccb_done;
|
||||
cookie = ccb->ccb_cookie;
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: virtio_pci.c,v 1.33 2023/05/29 08:13:35 sf Exp $ */
|
||||
/* $OpenBSD: virtio_pci.c,v 1.34 2023/07/05 18:11:08 patrick Exp $ */
|
||||
/* $NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -976,7 +976,7 @@ virtio_pci_setup_msix(struct virtio_pci_softc *sc, struct pci_attach_args *pa,
|
|||
for (i = 0; i < vsc->sc_nvqs; i++)
|
||||
virtio_pci_set_msix_queue_vector(sc, i, 1);
|
||||
} else {
|
||||
for (i = 0; i <= vsc->sc_nvqs; i++) {
|
||||
for (i = 0; i < vsc->sc_nvqs; i++) {
|
||||
if (virtio_pci_msix_establish(sc, pa, i + 1,
|
||||
virtio_pci_queue_intr, &vsc->sc_vqs[i])) {
|
||||
goto fail;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue