diff --git a/devicemodel/hw/pci/virtio/virtio.c b/devicemodel/hw/pci/virtio/virtio.c index 19458fe68..d4b5ff412 100644 --- a/devicemodel/hw/pci/virtio/virtio.c +++ b/devicemodel/hw/pci/virtio/virtio.c @@ -193,7 +193,7 @@ virtio_vq_init(struct virtio_base *base, uint32_t pfn) vq = &base->queues[base->curq]; vq->pfn = pfn; phys = (uint64_t)pfn << VRING_PAGE_BITS; - size = vring_size(vq->qsize); + size = virtio_vring_size(vq->qsize); vb = paddr_guest2host(base->dev->vmctx, phys, size); /* First page(s) are descriptors... */ @@ -201,14 +201,14 @@ virtio_vq_init(struct virtio_base *base, uint32_t pfn) vb += vq->qsize * sizeof(struct virtio_desc); /* ... immediately followed by "avail" ring (entirely uint16_t's) */ - vq->avail = (struct vring_avail *)vb; + vq->avail = (struct virtio_vring_avail *)vb; vb += (2 + vq->qsize + 1) * sizeof(uint16_t); /* Then it's rounded up to the next page... */ vb = (char *)roundup2((uintptr_t)vb, VRING_ALIGN); /* ... and the last page(s) are the used ring. */ - vq->used = (struct vring_used *)vb; + vq->used = (struct virtio_vring_used *)vb; /* Mark queue as allocated, and start at 0 when we use it. */ vq->flags = VQ_ALLOC; @@ -244,13 +244,13 @@ virtio_vq_enable(struct virtio_base *base) phys = (((uint64_t)vq->gpa_avail[1]) << 32) | vq->gpa_avail[0]; size = (2 + qsz + 1) * sizeof(uint16_t); vb = paddr_guest2host(base->dev->vmctx, phys, size); - vq->avail = (struct vring_avail *)vb; + vq->avail = (struct virtio_vring_avail *)vb; /* used ring */ phys = (((uint64_t)vq->gpa_used[1]) << 32) | vq->gpa_used[0]; size = sizeof(uint16_t) * 3 + sizeof(struct virtio_used) * qsz; vb = paddr_guest2host(base->dev->vmctx, phys, size); - vq->used = (struct vring_used *)vb; + vq->used = (struct virtio_vring_used *)vb; /* Mark queue as allocated, and start at 0 when we use it. */ vq->flags = VQ_ALLOC; @@ -378,11 +378,11 @@ vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx, return -1; } vdir = &vq->desc[next]; - if ((vdir->flags & VRING_DESC_F_INDIRECT) == 0) { + if ((vdir->flags & ACRN_VRING_DESC_F_INDIRECT) == 0) { _vq_record(i, vdir, ctx, iov, n_iov, flags); i++; } else if ((base->device_caps & - VIRTIO_RING_F_INDIRECT_DESC) == 0) { + ACRN_VIRTIO_RING_F_INDIRECT_DESC) == 0) { fprintf(stderr, "%s: descriptor has forbidden INDIRECT flag, " "driver confused?\r\n", @@ -409,7 +409,7 @@ vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx, next = 0; for (;;) { vp = &vindir[next]; - if (vp->flags & VRING_DESC_F_INDIRECT) { + if (vp->flags & ACRN_VRING_DESC_F_INDIRECT) { fprintf(stderr, "%s: indirect desc has INDIR flag," " driver confused?\r\n", @@ -419,7 +419,7 @@ vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx, _vq_record(i, vp, ctx, iov, n_iov, flags); if (++i > VQ_MAX_DESCRIPTORS) goto loopy; - if ((vp->flags & VRING_DESC_F_NEXT) == 0) + if ((vp->flags & ACRN_VRING_DESC_F_NEXT) == 0) break; next = vp->next; if (next >= n_indir) { @@ -431,7 +431,7 @@ vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx, } } } - if ((vdir->flags & VRING_DESC_F_NEXT) == 0) + if ((vdir->flags & ACRN_VRING_DESC_F_NEXT) == 0) return i; } loopy: @@ -464,7 +464,7 @@ void vq_relchain(struct virtio_vq_info *vq, uint16_t idx, uint32_t iolen) { uint16_t uidx, mask; - volatile struct vring_used *vuh; + volatile struct virtio_vring_used *vuh; volatile struct virtio_used *vue; /* @@ -513,7 +513,7 @@ vq_endchains(struct virtio_vq_info *vq, int used_all_avail) * Interrupt generation: if we're using EVENT_IDX, * interrupt if we've crossed the event threshold. * Otherwise interrupt is generated if we added "used" entries, - * but suppressed by VRING_AVAIL_F_NO_INTERRUPT. + * but suppressed by ACRN_VRING_AVAIL_F_NO_INTERRUPT. * * In any case, though, if NOTIFY_ON_EMPTY is set and the * entire avail was processed, we need to interrupt always. @@ -522,9 +522,9 @@ vq_endchains(struct virtio_vq_info *vq, int used_all_avail) old_idx = vq->save_used; vq->save_used = new_idx = vq->used->idx; if (used_all_avail && - (base->negotiated_caps & VIRTIO_F_NOTIFY_ON_EMPTY)) + (base->negotiated_caps & ACRN_VIRTIO_F_NOTIFY_ON_EMPTY)) intr = 1; - else if (base->negotiated_caps & VIRTIO_RING_F_EVENT_IDX) { + else if (base->negotiated_caps & ACRN_VIRTIO_RING_F_EVENT_IDX) { event_idx = VQ_USED_EVENT_IDX(vq); /* * This calculation is per docs and the kernel @@ -534,7 +534,7 @@ vq_endchains(struct virtio_vq_info *vq, int used_all_avail) (uint16_t)(new_idx - old_idx); } else { intr = new_idx != old_idx && - !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT); + !(vq->avail->flags & ACRN_VRING_AVAIL_F_NO_INTERRUPT); } if (intr) vq_interrupt(base, vq); @@ -1020,7 +1020,7 @@ virtio_set_modern_bar(struct virtio_base *base, bool use_notify_pio) vops = base->vops; - if (!vops || (base->device_caps & VIRTIO_F_VERSION_1) == 0) + if (!vops || (base->device_caps & ACRN_VIRTIO_F_VERSION_1) == 0) return -1; if (use_notify_pio) @@ -1036,7 +1036,7 @@ virtio_set_modern_bar(struct virtio_base *base, bool use_notify_pio) void virtio_dev_error(struct virtio_base *base) { - if (base->negotiated_caps & VIRTIO_F_VERSION_1) { + if (base->negotiated_caps & ACRN_VIRTIO_F_VERSION_1) { /* see 2.1.2. if DRIVER_OK is set, need to send * a device configuration change notification to the driver */ diff --git a/devicemodel/hw/pci/virtio/virtio_block.c b/devicemodel/hw/pci/virtio/virtio_block.c index 0eed07260..f5502c7bb 100644 --- a/devicemodel/hw/pci/virtio/virtio_block.c +++ b/devicemodel/hw/pci/virtio/virtio_block.c @@ -64,7 +64,7 @@ (VIRTIO_BLK_F_SEG_MAX | \ VIRTIO_BLK_F_BLK_SIZE | \ VIRTIO_BLK_F_TOPOLOGY | \ - VIRTIO_RING_F_INDIRECT_DESC) /* indirect descriptors */ + ACRN_VIRTIO_RING_F_INDIRECT_DESC) /* indirect descriptors */ /* * Writeback cache bits @@ -214,7 +214,7 @@ virtio_blk_proc(struct virtio_blk *blk, struct virtio_vq_info *vq) assert(n >= 2 && n <= BLOCKIF_IOV_MAX + 2); io = &blk->ios[idx]; - assert((flags[0] & VRING_DESC_F_WRITE) == 0); + assert((flags[0] & ACRN_VRING_DESC_F_WRITE) == 0); assert(iov[0].iov_len == sizeof(struct virtio_blk_hdr)); vbh = iov[0].iov_base; memcpy(&io->req.iov, &iov[1], sizeof(struct iovec) * (n - 2)); @@ -222,7 +222,7 @@ virtio_blk_proc(struct virtio_blk *blk, struct virtio_vq_info *vq) io->req.offset = vbh->sector * DEV_BSIZE; io->status = iov[--n].iov_base; assert(iov[n].iov_len == 1); - assert(flags[n] & VRING_DESC_F_WRITE); + assert(flags[n] & ACRN_VRING_DESC_F_WRITE); /* * XXX @@ -240,7 +240,7 @@ virtio_blk_proc(struct virtio_blk *blk, struct virtio_vq_info *vq) * therefore test the inverse of the descriptor bit * to the op. */ - assert(((flags[i] & VRING_DESC_F_WRITE) == 0) == writeop); + assert(((flags[i] & ACRN_VRING_DESC_F_WRITE) == 0) == writeop); iolen += iov[i].iov_len; } io->req.resid = iolen; diff --git a/devicemodel/hw/pci/virtio/virtio_console.c b/devicemodel/hw/pci/virtio/virtio_console.c index 52f77de32..64452dcdb 100644 --- a/devicemodel/hw/pci/virtio/virtio_console.c +++ b/devicemodel/hw/pci/virtio/virtio_console.c @@ -409,7 +409,7 @@ virtio_console_notify_rx(void *vdev, struct virtio_vq_info *vq) if (!port->rx_ready) { port->rx_ready = 1; - vq->used->flags |= VRING_USED_F_NO_NOTIFY; + vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY; } } diff --git a/devicemodel/hw/pci/virtio/virtio_heci.c b/devicemodel/hw/pci/virtio/virtio_heci.c index e5af6fd5d..53cb9006b 100644 --- a/devicemodel/hw/pci/virtio/virtio_heci.c +++ b/devicemodel/hw/pci/virtio/virtio_heci.c @@ -972,7 +972,7 @@ static void *virtio_heci_tx_thread(void *param) while (vheci->status != VHECI_DEINIT) { /* note - tx mutex is locked here */ while (!vq_has_descs(vq)) { - vq->used->flags &= ~VRING_USED_F_NO_NOTIFY; + vq->used->flags &= ~ACRN_VRING_USED_F_NO_NOTIFY; mb(); if (vq_has_descs(vq) && vheci->status != VHECI_RESET) @@ -984,7 +984,7 @@ static void *virtio_heci_tx_thread(void *param) if (vheci->status == VHECI_DEINIT) goto out; } - vq->used->flags |= VRING_USED_F_NO_NOTIFY; + vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY; pthread_mutex_unlock(&vheci->tx_mutex); do { @@ -1125,7 +1125,7 @@ static void *virtio_heci_rx_thread(void *param) while (vheci->status != VHECI_DEINIT) { /* note - rx mutex is locked here */ while (vq_ring_ready(vq)) { - vq->used->flags &= ~VRING_USED_F_NO_NOTIFY; + vq->used->flags &= ~ACRN_VRING_USED_F_NO_NOTIFY; mb(); if (vq_has_descs(vq) && vheci->rx_need_sched && @@ -1138,7 +1138,7 @@ static void *virtio_heci_rx_thread(void *param) if (vheci->status == VHECI_DEINIT) goto out; } - vq->used->flags |= VRING_USED_F_NO_NOTIFY; + vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY; do { if (virtio_heci_proc_rx(vheci, vq)) @@ -1166,7 +1166,7 @@ virtio_heci_notify_rx(void *heci, struct virtio_vq_info *vq) /* Signal the rx thread for processing */ pthread_mutex_lock(&vheci->rx_mutex); DPRINTF(("vheci: RX: New IN buffer available!\n\r")); - vq->used->flags |= VRING_USED_F_NO_NOTIFY; + vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY; pthread_cond_signal(&vheci->rx_cond); pthread_mutex_unlock(&vheci->rx_mutex); } @@ -1184,7 +1184,7 @@ virtio_heci_notify_tx(void *heci, struct virtio_vq_info *vq) /* Signal the tx thread for processing */ pthread_mutex_lock(&vheci->tx_mutex); DPRINTF(("vheci: TX: New OUT buffer available!\n\r")); - vq->used->flags |= VRING_USED_F_NO_NOTIFY; + vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY; pthread_cond_signal(&vheci->tx_cond); pthread_mutex_unlock(&vheci->tx_mutex); } diff --git a/devicemodel/hw/pci/virtio/virtio_input.c b/devicemodel/hw/pci/virtio/virtio_input.c index fa8476da7..f0579292b 100644 --- a/devicemodel/hw/pci/virtio/virtio_input.c +++ b/devicemodel/hw/pci/virtio/virtio_input.c @@ -47,7 +47,7 @@ static int virtio_input_debug; /* * Host capabilities */ -#define VIRTIO_INPUT_S_HOSTCAPS (VIRTIO_F_VERSION_1) +#define VIRTIO_INPUT_S_HOSTCAPS (ACRN_VIRTIO_F_VERSION_1) enum virtio_input_config_select { VIRTIO_INPUT_CFG_UNSET = 0x00, diff --git a/devicemodel/hw/pci/virtio/virtio_net.c b/devicemodel/hw/pci/virtio/virtio_net.c index 0863c701e..73708d28e 100644 --- a/devicemodel/hw/pci/virtio/virtio_net.c +++ b/devicemodel/hw/pci/virtio/virtio_net.c @@ -74,7 +74,7 @@ #define VIRTIO_NET_S_HOSTCAPS \ (VIRTIO_NET_F_MAC | VIRTIO_NET_F_MRG_RXBUF | VIRTIO_NET_F_STATUS | \ - VIRTIO_F_NOTIFY_ON_EMPTY | VIRTIO_RING_F_INDIRECT_DESC) + ACRN_VIRTIO_F_NOTIFY_ON_EMPTY | ACRN_VIRTIO_RING_F_INDIRECT_DESC) /* is address mcast/bcast? */ #define ETHER_IS_MULTICAST(addr) (*(addr) & 0x01) @@ -432,7 +432,7 @@ virtio_net_ping_rxq(void *vdev, struct virtio_vq_info *vq) */ if (net->rx_ready == 0) { net->rx_ready = 1; - vq->used->flags |= VRING_USED_F_NO_NOTIFY; + vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY; } } @@ -478,7 +478,7 @@ virtio_net_ping_txq(void *vdev, struct virtio_vq_info *vq) /* Signal the tx thread for processing */ pthread_mutex_lock(&net->tx_mtx); - vq->used->flags |= VRING_USED_F_NO_NOTIFY; + vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY; if (net->tx_in_progress == 0) pthread_cond_signal(&net->tx_cond); pthread_mutex_unlock(&net->tx_mtx); @@ -512,7 +512,7 @@ virtio_net_tx_thread(void *param) for (;;) { /* note - tx mutex is locked here */ while (net->resetting || !vq_has_descs(vq)) { - vq->used->flags &= ~VRING_USED_F_NO_NOTIFY; + vq->used->flags &= ~ACRN_VRING_USED_F_NO_NOTIFY; /* memory barrier */ mb(); if (!net->resetting && vq_has_descs(vq)) @@ -527,7 +527,7 @@ virtio_net_tx_thread(void *param) return NULL; } } - vq->used->flags |= VRING_USED_F_NO_NOTIFY; + vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY; net->tx_in_progress = 1; pthread_mutex_unlock(&net->tx_mtx); diff --git a/devicemodel/include/virtio.h b/devicemodel/include/virtio.h index ad0266663..60f06c4fb 100644 --- a/devicemodel/include/virtio.h +++ b/devicemodel/include/virtio.h @@ -116,9 +116,9 @@ * The two event fields, and , in the * avail and used rings (respectively -- note the reversal!), are * always provided, but are used only if the virtual device - * negotiates the VIRTIO_RING_F_EVENT_IDX feature during feature + * negotiates the ACRN_VIRTIO_RING_F_EVENT_IDX feature during feature * negotiation. Similarly, both rings provide a flag -- - * VRING_AVAIL_F_NO_INTERRUPT and VRING_USED_F_NO_NOTIFY -- in + * ACRN_VRING_AVAIL_F_NO_INTERRUPT and ACRN_VRING_USED_F_NO_NOTIFY -- in * their field, indicating that the guest does not need an * interrupt, or that the hypervisor driver does not need a * notify, when descriptors are added to the corresponding ring. @@ -137,9 +137,9 @@ #define VRING_ALIGN 4096 -#define VRING_DESC_F_NEXT (1 << 0) -#define VRING_DESC_F_WRITE (1 << 1) -#define VRING_DESC_F_INDIRECT (1 << 2) +#define ACRN_VRING_DESC_F_NEXT (1 << 0) +#define ACRN_VRING_DESC_F_WRITE (1 << 1) +#define ACRN_VRING_DESC_F_INDIRECT (1 << 2) struct virtio_desc { /* AKA vring_desc */ uint64_t addr; /* guest physical address */ @@ -153,17 +153,17 @@ struct virtio_used { /* AKA vring_used_elem */ uint32_t tlen; /* length written-to */ } __attribute__((packed)); -#define VRING_AVAIL_F_NO_INTERRUPT 1 +#define ACRN_VRING_AVAIL_F_NO_INTERRUPT 1 -struct vring_avail { +struct virtio_vring_avail { uint16_t flags; /* VRING_AVAIL_F_* */ uint16_t idx; /* counts to 65535, then cycles */ uint16_t ring[]; /* size N, reported in QNUM value */ /* uint16_t used_event; -- after N ring entries */ } __attribute__((packed)); -#define VRING_USED_F_NO_NOTIFY 1 -struct vring_used { +#define ACRN_VRING_USED_F_NO_NOTIFY 1 +struct virtio_vring_used { uint16_t flags; /* VRING_USED_F_* */ uint16_t idx; /* counts to 65535, then cycles */ struct virtio_used ring[]; @@ -310,12 +310,12 @@ struct vring_used { * Feature flags. * Note: bits 0 through 23 are reserved to each device type. */ -#define VIRTIO_F_NOTIFY_ON_EMPTY (1 << 24) -#define VIRTIO_RING_F_INDIRECT_DESC (1 << 28) -#define VIRTIO_RING_F_EVENT_IDX (1 << 29) +#define ACRN_VIRTIO_F_NOTIFY_ON_EMPTY (1 << 24) +#define ACRN_VIRTIO_RING_F_INDIRECT_DESC (1 << 28) +#define ACRN_VIRTIO_RING_F_EVENT_IDX (1 << 29) /* v1.0 compliant. */ -#define VIRTIO_F_VERSION_1 (1UL << 32) +#define ACRN_VIRTIO_F_VERSION_1 (1UL << 32) /* From section 2.3, "Virtqueue Configuration", of the virtio specification */ /** @@ -327,7 +327,7 @@ struct vring_used { * @return size of a certain virtqueue, in bytes. */ static inline size_t -vring_size(u_int qsz) +virtio_vring_size(u_int qsz) { size_t size; @@ -594,9 +594,9 @@ struct virtio_vq_info { volatile struct virtio_desc *desc; /**< descriptor array */ - volatile struct vring_avail *avail; + volatile struct virtio_vring_avail *avail; /**< the "avail" ring */ - volatile struct vring_used *used; + volatile struct virtio_vring_used *used; /**< the "used" ring */ uint32_t gpa_desc[2]; /**< gpa of descriptors */