sync with OpenBSD -current

This commit is contained in:
purplerain 2024-08-01 19:31:36 +00:00
parent 6c1791ec4e
commit d56bde2791
Signed by: purplerain
GPG key ID: F42C07F07E2E35B7
27 changed files with 162 additions and 54 deletions

View file

@ -1,4 +1,4 @@
/* $OpenBSD: if_vio.c,v 1.44 2024/07/26 07:55:23 sf Exp $ */
/* $OpenBSD: if_vio.c,v 1.45 2024/08/01 11:13:19 sf Exp $ */
/*
* Copyright (c) 2012 Stefan Fritsch, Alexander Fiveg.
@ -1303,7 +1303,7 @@ vio_txtick(void *arg)
{
struct virtqueue *vq = arg;
int s = splnet();
vio_tx_intr(vq);
virtio_check_vq(vq->vq_owner, vq);
splx(s);
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: vioblk.c,v 1.40 2024/07/26 07:55:23 sf Exp $ */
/* $OpenBSD: vioblk.c,v 1.41 2024/08/01 11:13:19 sf Exp $ */
/*
* Copyright (c) 2012 Stefan Fritsch.
@ -375,7 +375,7 @@ vioblk_reset(struct vioblk_softc *sc)
virtio_reset(sc->sc_virtio);
/* finish requests that have been completed */
vioblk_vq_done(&sc->sc_vq[0]);
virtio_check_vq(sc->sc_virtio, &sc->sc_vq[0]);
/* abort all remaining requests */
for (i = 0; i < sc->sc_nreqs; i++) {
@ -535,7 +535,7 @@ vioblk_scsi_cmd(struct scsi_xfer *xs)
if (!ISSET(xs->flags, SCSI_POLL)) {
/* check if some xfers are done: */
if (sc->sc_queued > 1)
vioblk_vq_done(vq);
virtio_check_vq(sc->sc_virtio, vq);
splx(s);
return;
}

View file

@ -1,4 +1,4 @@
/* $OpenBSD: viocon.c,v 1.12 2024/06/26 01:40:49 jsg Exp $ */
/* $OpenBSD: viocon.c,v 1.13 2024/08/01 11:13:19 sf Exp $ */
/*
* Copyright (c) 2013-2015 Stefan Fritsch <sf@sfritsch.de>
@ -463,7 +463,7 @@ vioconhwiflow(struct tty *tp, int stop)
virtio_stop_vq_intr(vp->vp_sc->sc_virtio, vp->vp_rx);
} else {
virtio_start_vq_intr(vp->vp_sc->sc_virtio, vp->vp_rx);
softintr_schedule(vp->vp_si);
virtio_check_vq(vp->vp_sc->sc_virtio, vp->vp_rx);
}
splx(s);
return 1;

View file

@ -1,4 +1,4 @@
/* $OpenBSD: viogpu.c,v 1.6 2024/05/24 10:05:55 jsg Exp $ */
/* $OpenBSD: viogpu.c,v 1.7 2024/08/01 11:13:19 sf Exp $ */
/*
* Copyright (c) 2021-2023 joshua stein <jcs@openbsd.org>
@ -42,7 +42,7 @@ struct viogpu_softc;
int viogpu_match(struct device *, void *, void *);
void viogpu_attach(struct device *, struct device *, void *);
int viogpu_send_cmd(struct viogpu_softc *, void *, size_t, void *, size_t);
int viogpu_vq_wait(struct virtqueue *vq);
int viogpu_vq_done(struct virtqueue *vq);
void viogpu_rx_soft(void *arg);
int viogpu_get_display_info(struct viogpu_softc *);
@ -178,7 +178,7 @@ viogpu_attach(struct device *parent, struct device *self, void *aux)
printf(": alloc_vq failed\n");
return;
}
sc->sc_vqs[VQCTRL].vq_done = viogpu_vq_wait;
sc->sc_vqs[VQCTRL].vq_done = viogpu_vq_done;
if (virtio_alloc_vq(vsc, &sc->sc_vqs[VQCURS], VQCURS, NBPG, 1,
"cursor")) {
@ -211,6 +211,8 @@ viogpu_attach(struct device *parent, struct device *self, void *aux)
goto unmap;
}
virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
if (viogpu_get_display_info(sc) != 0)
goto unmap;
@ -230,8 +232,6 @@ viogpu_attach(struct device *parent, struct device *self, void *aux)
sc->sc_fb_dma_kva, sc->sc_fb_dma_size, NULL, BUS_DMA_NOWAIT) != 0)
goto fb_unmap;
virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
if (viogpu_create_2d(sc, 1, sc->sc_fb_width, sc->sc_fb_height) != 0)
goto fb_unmap;
@ -325,14 +325,14 @@ viogpu_repaint(void *arg)
}
int
viogpu_vq_wait(struct virtqueue *vq)
viogpu_vq_done(struct virtqueue *vq)
{
struct virtio_softc *vsc = vq->vq_owner;
struct viogpu_softc *sc = (struct viogpu_softc *)vsc->sc_child;
int slot, len;
while (virtio_dequeue(vsc, vq, &slot, &len) != 0)
;
if (virtio_dequeue(vsc, vq, &slot, &len) != 0)
return 0;
bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
BUS_DMASYNC_POSTREAD);
@ -402,7 +402,8 @@ viogpu_send_cmd(struct viogpu_softc *sc, void *cmd, size_t cmd_size, void *ret,
virtio_enqueue_p(vq, slot, sc->sc_dma_map, cmd_size, ret_size, 0);
virtio_enqueue_commit(vsc, vq, slot, 1);
viogpu_vq_wait(vq);
while (virtio_check_vq(vsc, vq) == 0)
;
bus_dmamap_sync(vsc->sc_dmat, sc->sc_dma_map, 0, cmd_size,
BUS_DMASYNC_POSTWRITE);

View file

@ -1,4 +1,4 @@
/* $OpenBSD: virtio.c,v 1.28 2024/07/26 07:55:23 sf Exp $ */
/* $OpenBSD: virtio.c,v 1.29 2024/08/01 11:13:19 sf Exp $ */
/* $NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $ */
/*
@ -185,6 +185,9 @@ virtio_reinit_end(struct virtio_softc *sc)
/*
* dmamap sync operations for a virtqueue.
*
* XXX These should be more fine grained. Syncing the whole ring if we
* XXX only need a few bytes is inefficient if we use bounce buffers.
*/
static inline void
vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
@ -202,6 +205,15 @@ vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
ops);
}
static inline void
vq_sync_aring_used_event(struct virtio_softc *sc, struct virtqueue *vq, int ops)
{
bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, vq->vq_availoffset +
offsetof(struct vring_avail, ring) + vq->vq_num * sizeof(uint16_t),
sizeof(uint16_t), ops);
}
static inline void
vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
{
@ -210,6 +222,16 @@ vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
sizeof(struct vring_used_elem), ops);
}
static inline void
vq_sync_uring_avail_event(struct virtio_softc *sc, struct virtqueue *vq, int ops)
{
bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
vq->vq_usedoffset + offsetof(struct vring_used, ring) +
vq->vq_num * sizeof(struct vring_used_elem), sizeof(uint16_t),
ops);
}
static inline void
vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
int ops)
@ -672,11 +694,13 @@ virtio_enqueue_p(struct virtqueue *vq, int slot, bus_dmamap_t dmamap,
static void
publish_avail_idx(struct virtio_softc *sc, struct virtqueue *vq)
{
/* first make sure the avail ring entries are visible to the device */
vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
virtio_membar_producer();
vq->vq_avail->idx = vq->vq_avail_idx;
vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
/* make the avail idx visible to the device */
vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
vq->vq_queued = 1;
}
@ -706,6 +730,7 @@ notify:
publish_avail_idx(sc, vq);
virtio_membar_sync();
vq_sync_uring_avail_event(sc, vq, BUS_DMASYNC_POSTREAD);
t = VQ_AVAIL_EVENT(vq) + 1;
if ((uint16_t)(n - t) < (uint16_t)(n - o))
sc->sc_ops->kick(sc, vq->vq_index);
@ -713,6 +738,7 @@ notify:
publish_avail_idx(sc, vq);
virtio_membar_sync();
vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
sc->sc_ops->kick(sc, vq->vq_index);
}
@ -781,8 +807,10 @@ virtio_enqueue_trim(struct virtqueue *vq, int slot, int nsegs)
* Dequeue a request.
*/
/*
* dequeue: dequeue a request from uring; dmamap_sync for uring is
* already done in the interrupt handler.
* dequeue: dequeue a request from uring; bus_dmamap_sync for uring must
* already have been done, usually by virtio_check_vq()
* in the interrupt handler. This means that polling virtio_dequeue()
* repeatedly until it returns 0 does not work.
*/
int
virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
@ -797,6 +825,7 @@ virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
usedidx &= vq->vq_mask;
virtio_membar_consumer();
vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
slot = vq->vq_used->ring[usedidx].id;
qe = &vq->vq_entries[slot];
@ -852,7 +881,7 @@ virtio_postpone_intr(struct virtqueue *vq, uint16_t nslots)
VQ_USED_EVENT(vq) = idx;
virtio_membar_sync();
vq_sync_aring(vq->vq_owner, vq, BUS_DMASYNC_PREWRITE);
vq_sync_aring_used_event(vq->vq_owner, vq, BUS_DMASYNC_PREWRITE);
vq->vq_queued++;
if (nslots < virtio_nused(vq))
@ -905,6 +934,7 @@ virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
* interrupt.
*/
VQ_USED_EVENT(vq) = vq->vq_used_idx + 0x8000;
vq_sync_aring_used_event(sc, vq, BUS_DMASYNC_PREWRITE);
} else {
vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
@ -920,16 +950,19 @@ virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
* interrupts is done through setting the latest
* consumed index in the used_event field
*/
if (virtio_has_feature(sc, VIRTIO_F_RING_EVENT_IDX))
if (virtio_has_feature(sc, VIRTIO_F_RING_EVENT_IDX)) {
VQ_USED_EVENT(vq) = vq->vq_used_idx;
else
vq_sync_aring_used_event(sc, vq, BUS_DMASYNC_PREWRITE);
} else {
vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
}
virtio_membar_sync();
vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
vq->vq_queued++;
vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
if (vq->vq_used_idx != vq->vq_used->idx)
return 1;
@ -945,6 +978,7 @@ virtio_nused(struct virtqueue *vq)
{
uint16_t n;
vq_sync_uring(vq->vq_owner, vq, BUS_DMASYNC_POSTREAD);
n = (uint16_t)(vq->vq_used->idx - vq->vq_used_idx);
VIRTIO_ASSERT(n <= vq->vq_num);