dm: virtio: rename virtio ring structures and feature bits

Some virtio ring structures and virtio feature bits are using the
same name/definition as those in kernel header files(linux/
virtio_ring.h, linux/virtio_config.h). Kernel header files must
be included to perform ioctls to support vhost. There are
compiling errors due to duplicated definitions. In this patch
the following renamings are done:

VRING_DESC_F_NEXT -> ACRN_VRING_DESC_F_NEXT
VRING_DESC_F_WRITE -> ACRN_VRING_DESC_F_WRITE
VRING_DESC_F_INDIRECT -> ACRN_VRING_DESC_F_INDIRECT

VRING_AVAIL_F_NO_INTERRUPT -> ACRN_VRING_AVAIL_F_NO_INTERRUPT
VRING_USED_F_NO_NOTIFY -> ACRN_VRING_USED_F_NO_NOTIFY

VIRTIO_F_NOTIFY_ON_EMPTY -> ACRN_VIRTIO_F_NOTIFY_ON_EMPTY
VIRTIO_RING_F_INDIRECT_DESC -> ACRN_VIRTIO_RING_F_INDIRECT_DESC
VIRTIO_RING_F_EVENT_IDX -> ACRN_VIRTIO_RING_F_EVENT_IDX
VIRTIO_F_VERSION_1 -> ACRN_VIRTIO_F_VERSION_1

vring_avail -> virtio_vring_avail
vring_used -> virtio_vring_used
vring_size -> virtio_vring_size

Tracked-On: #1329
Signed-off-by: Jian Jun Chen <jian.jun.chen@intel.com>
Acked-by: Yu Wang <yu1.wang@intel.com>
This commit is contained in:
Jian Jun Chen 2018-08-10 14:46:39 +08:00 committed by lijinxia
parent dd6a5fbe95
commit 781e7dfb29
7 changed files with 50 additions and 50 deletions

View File

@ -193,7 +193,7 @@ virtio_vq_init(struct virtio_base *base, uint32_t pfn)
vq = &base->queues[base->curq]; vq = &base->queues[base->curq];
vq->pfn = pfn; vq->pfn = pfn;
phys = (uint64_t)pfn << VRING_PAGE_BITS; phys = (uint64_t)pfn << VRING_PAGE_BITS;
size = vring_size(vq->qsize); size = virtio_vring_size(vq->qsize);
vb = paddr_guest2host(base->dev->vmctx, phys, size); vb = paddr_guest2host(base->dev->vmctx, phys, size);
/* First page(s) are descriptors... */ /* First page(s) are descriptors... */
@ -201,14 +201,14 @@ virtio_vq_init(struct virtio_base *base, uint32_t pfn)
vb += vq->qsize * sizeof(struct virtio_desc); vb += vq->qsize * sizeof(struct virtio_desc);
/* ... immediately followed by "avail" ring (entirely uint16_t's) */ /* ... immediately followed by "avail" ring (entirely uint16_t's) */
vq->avail = (struct vring_avail *)vb; vq->avail = (struct virtio_vring_avail *)vb;
vb += (2 + vq->qsize + 1) * sizeof(uint16_t); vb += (2 + vq->qsize + 1) * sizeof(uint16_t);
/* Then it's rounded up to the next page... */ /* Then it's rounded up to the next page... */
vb = (char *)roundup2((uintptr_t)vb, VRING_ALIGN); vb = (char *)roundup2((uintptr_t)vb, VRING_ALIGN);
/* ... and the last page(s) are the used ring. */ /* ... and the last page(s) are the used ring. */
vq->used = (struct vring_used *)vb; vq->used = (struct virtio_vring_used *)vb;
/* Mark queue as allocated, and start at 0 when we use it. */ /* Mark queue as allocated, and start at 0 when we use it. */
vq->flags = VQ_ALLOC; vq->flags = VQ_ALLOC;
@ -244,13 +244,13 @@ virtio_vq_enable(struct virtio_base *base)
phys = (((uint64_t)vq->gpa_avail[1]) << 32) | vq->gpa_avail[0]; phys = (((uint64_t)vq->gpa_avail[1]) << 32) | vq->gpa_avail[0];
size = (2 + qsz + 1) * sizeof(uint16_t); size = (2 + qsz + 1) * sizeof(uint16_t);
vb = paddr_guest2host(base->dev->vmctx, phys, size); vb = paddr_guest2host(base->dev->vmctx, phys, size);
vq->avail = (struct vring_avail *)vb; vq->avail = (struct virtio_vring_avail *)vb;
/* used ring */ /* used ring */
phys = (((uint64_t)vq->gpa_used[1]) << 32) | vq->gpa_used[0]; phys = (((uint64_t)vq->gpa_used[1]) << 32) | vq->gpa_used[0];
size = sizeof(uint16_t) * 3 + sizeof(struct virtio_used) * qsz; size = sizeof(uint16_t) * 3 + sizeof(struct virtio_used) * qsz;
vb = paddr_guest2host(base->dev->vmctx, phys, size); vb = paddr_guest2host(base->dev->vmctx, phys, size);
vq->used = (struct vring_used *)vb; vq->used = (struct virtio_vring_used *)vb;
/* Mark queue as allocated, and start at 0 when we use it. */ /* Mark queue as allocated, and start at 0 when we use it. */
vq->flags = VQ_ALLOC; vq->flags = VQ_ALLOC;
@ -378,11 +378,11 @@ vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx,
return -1; return -1;
} }
vdir = &vq->desc[next]; vdir = &vq->desc[next];
if ((vdir->flags & VRING_DESC_F_INDIRECT) == 0) { if ((vdir->flags & ACRN_VRING_DESC_F_INDIRECT) == 0) {
_vq_record(i, vdir, ctx, iov, n_iov, flags); _vq_record(i, vdir, ctx, iov, n_iov, flags);
i++; i++;
} else if ((base->device_caps & } else if ((base->device_caps &
VIRTIO_RING_F_INDIRECT_DESC) == 0) { ACRN_VIRTIO_RING_F_INDIRECT_DESC) == 0) {
fprintf(stderr, fprintf(stderr,
"%s: descriptor has forbidden INDIRECT flag, " "%s: descriptor has forbidden INDIRECT flag, "
"driver confused?\r\n", "driver confused?\r\n",
@ -409,7 +409,7 @@ vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx,
next = 0; next = 0;
for (;;) { for (;;) {
vp = &vindir[next]; vp = &vindir[next];
if (vp->flags & VRING_DESC_F_INDIRECT) { if (vp->flags & ACRN_VRING_DESC_F_INDIRECT) {
fprintf(stderr, fprintf(stderr,
"%s: indirect desc has INDIR flag," "%s: indirect desc has INDIR flag,"
" driver confused?\r\n", " driver confused?\r\n",
@ -419,7 +419,7 @@ vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx,
_vq_record(i, vp, ctx, iov, n_iov, flags); _vq_record(i, vp, ctx, iov, n_iov, flags);
if (++i > VQ_MAX_DESCRIPTORS) if (++i > VQ_MAX_DESCRIPTORS)
goto loopy; goto loopy;
if ((vp->flags & VRING_DESC_F_NEXT) == 0) if ((vp->flags & ACRN_VRING_DESC_F_NEXT) == 0)
break; break;
next = vp->next; next = vp->next;
if (next >= n_indir) { if (next >= n_indir) {
@ -431,7 +431,7 @@ vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx,
} }
} }
} }
if ((vdir->flags & VRING_DESC_F_NEXT) == 0) if ((vdir->flags & ACRN_VRING_DESC_F_NEXT) == 0)
return i; return i;
} }
loopy: loopy:
@ -464,7 +464,7 @@ void
vq_relchain(struct virtio_vq_info *vq, uint16_t idx, uint32_t iolen) vq_relchain(struct virtio_vq_info *vq, uint16_t idx, uint32_t iolen)
{ {
uint16_t uidx, mask; uint16_t uidx, mask;
volatile struct vring_used *vuh; volatile struct virtio_vring_used *vuh;
volatile struct virtio_used *vue; volatile struct virtio_used *vue;
/* /*
@ -513,7 +513,7 @@ vq_endchains(struct virtio_vq_info *vq, int used_all_avail)
* Interrupt generation: if we're using EVENT_IDX, * Interrupt generation: if we're using EVENT_IDX,
* interrupt if we've crossed the event threshold. * interrupt if we've crossed the event threshold.
* Otherwise interrupt is generated if we added "used" entries, * Otherwise interrupt is generated if we added "used" entries,
* but suppressed by VRING_AVAIL_F_NO_INTERRUPT. * but suppressed by ACRN_VRING_AVAIL_F_NO_INTERRUPT.
* *
* In any case, though, if NOTIFY_ON_EMPTY is set and the * In any case, though, if NOTIFY_ON_EMPTY is set and the
* entire avail was processed, we need to interrupt always. * entire avail was processed, we need to interrupt always.
@ -522,9 +522,9 @@ vq_endchains(struct virtio_vq_info *vq, int used_all_avail)
old_idx = vq->save_used; old_idx = vq->save_used;
vq->save_used = new_idx = vq->used->idx; vq->save_used = new_idx = vq->used->idx;
if (used_all_avail && if (used_all_avail &&
(base->negotiated_caps & VIRTIO_F_NOTIFY_ON_EMPTY)) (base->negotiated_caps & ACRN_VIRTIO_F_NOTIFY_ON_EMPTY))
intr = 1; intr = 1;
else if (base->negotiated_caps & VIRTIO_RING_F_EVENT_IDX) { else if (base->negotiated_caps & ACRN_VIRTIO_RING_F_EVENT_IDX) {
event_idx = VQ_USED_EVENT_IDX(vq); event_idx = VQ_USED_EVENT_IDX(vq);
/* /*
* This calculation is per docs and the kernel * This calculation is per docs and the kernel
@ -534,7 +534,7 @@ vq_endchains(struct virtio_vq_info *vq, int used_all_avail)
(uint16_t)(new_idx - old_idx); (uint16_t)(new_idx - old_idx);
} else { } else {
intr = new_idx != old_idx && intr = new_idx != old_idx &&
!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT); !(vq->avail->flags & ACRN_VRING_AVAIL_F_NO_INTERRUPT);
} }
if (intr) if (intr)
vq_interrupt(base, vq); vq_interrupt(base, vq);
@ -1020,7 +1020,7 @@ virtio_set_modern_bar(struct virtio_base *base, bool use_notify_pio)
vops = base->vops; vops = base->vops;
if (!vops || (base->device_caps & VIRTIO_F_VERSION_1) == 0) if (!vops || (base->device_caps & ACRN_VIRTIO_F_VERSION_1) == 0)
return -1; return -1;
if (use_notify_pio) if (use_notify_pio)
@ -1036,7 +1036,7 @@ virtio_set_modern_bar(struct virtio_base *base, bool use_notify_pio)
void void
virtio_dev_error(struct virtio_base *base) virtio_dev_error(struct virtio_base *base)
{ {
if (base->negotiated_caps & VIRTIO_F_VERSION_1) { if (base->negotiated_caps & ACRN_VIRTIO_F_VERSION_1) {
/* see 2.1.2. if DRIVER_OK is set, need to send /* see 2.1.2. if DRIVER_OK is set, need to send
* a device configuration change notification to the driver * a device configuration change notification to the driver
*/ */

View File

@ -64,7 +64,7 @@
(VIRTIO_BLK_F_SEG_MAX | \ (VIRTIO_BLK_F_SEG_MAX | \
VIRTIO_BLK_F_BLK_SIZE | \ VIRTIO_BLK_F_BLK_SIZE | \
VIRTIO_BLK_F_TOPOLOGY | \ VIRTIO_BLK_F_TOPOLOGY | \
VIRTIO_RING_F_INDIRECT_DESC) /* indirect descriptors */ ACRN_VIRTIO_RING_F_INDIRECT_DESC) /* indirect descriptors */
/* /*
* Writeback cache bits * Writeback cache bits
@ -214,7 +214,7 @@ virtio_blk_proc(struct virtio_blk *blk, struct virtio_vq_info *vq)
assert(n >= 2 && n <= BLOCKIF_IOV_MAX + 2); assert(n >= 2 && n <= BLOCKIF_IOV_MAX + 2);
io = &blk->ios[idx]; io = &blk->ios[idx];
assert((flags[0] & VRING_DESC_F_WRITE) == 0); assert((flags[0] & ACRN_VRING_DESC_F_WRITE) == 0);
assert(iov[0].iov_len == sizeof(struct virtio_blk_hdr)); assert(iov[0].iov_len == sizeof(struct virtio_blk_hdr));
vbh = iov[0].iov_base; vbh = iov[0].iov_base;
memcpy(&io->req.iov, &iov[1], sizeof(struct iovec) * (n - 2)); memcpy(&io->req.iov, &iov[1], sizeof(struct iovec) * (n - 2));
@ -222,7 +222,7 @@ virtio_blk_proc(struct virtio_blk *blk, struct virtio_vq_info *vq)
io->req.offset = vbh->sector * DEV_BSIZE; io->req.offset = vbh->sector * DEV_BSIZE;
io->status = iov[--n].iov_base; io->status = iov[--n].iov_base;
assert(iov[n].iov_len == 1); assert(iov[n].iov_len == 1);
assert(flags[n] & VRING_DESC_F_WRITE); assert(flags[n] & ACRN_VRING_DESC_F_WRITE);
/* /*
* XXX * XXX
@ -240,7 +240,7 @@ virtio_blk_proc(struct virtio_blk *blk, struct virtio_vq_info *vq)
* therefore test the inverse of the descriptor bit * therefore test the inverse of the descriptor bit
* to the op. * to the op.
*/ */
assert(((flags[i] & VRING_DESC_F_WRITE) == 0) == writeop); assert(((flags[i] & ACRN_VRING_DESC_F_WRITE) == 0) == writeop);
iolen += iov[i].iov_len; iolen += iov[i].iov_len;
} }
io->req.resid = iolen; io->req.resid = iolen;

View File

@ -409,7 +409,7 @@ virtio_console_notify_rx(void *vdev, struct virtio_vq_info *vq)
if (!port->rx_ready) { if (!port->rx_ready) {
port->rx_ready = 1; port->rx_ready = 1;
vq->used->flags |= VRING_USED_F_NO_NOTIFY; vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY;
} }
} }

View File

@ -972,7 +972,7 @@ static void *virtio_heci_tx_thread(void *param)
while (vheci->status != VHECI_DEINIT) { while (vheci->status != VHECI_DEINIT) {
/* note - tx mutex is locked here */ /* note - tx mutex is locked here */
while (!vq_has_descs(vq)) { while (!vq_has_descs(vq)) {
vq->used->flags &= ~VRING_USED_F_NO_NOTIFY; vq->used->flags &= ~ACRN_VRING_USED_F_NO_NOTIFY;
mb(); mb();
if (vq_has_descs(vq) && if (vq_has_descs(vq) &&
vheci->status != VHECI_RESET) vheci->status != VHECI_RESET)
@ -984,7 +984,7 @@ static void *virtio_heci_tx_thread(void *param)
if (vheci->status == VHECI_DEINIT) if (vheci->status == VHECI_DEINIT)
goto out; goto out;
} }
vq->used->flags |= VRING_USED_F_NO_NOTIFY; vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY;
pthread_mutex_unlock(&vheci->tx_mutex); pthread_mutex_unlock(&vheci->tx_mutex);
do { do {
@ -1125,7 +1125,7 @@ static void *virtio_heci_rx_thread(void *param)
while (vheci->status != VHECI_DEINIT) { while (vheci->status != VHECI_DEINIT) {
/* note - rx mutex is locked here */ /* note - rx mutex is locked here */
while (vq_ring_ready(vq)) { while (vq_ring_ready(vq)) {
vq->used->flags &= ~VRING_USED_F_NO_NOTIFY; vq->used->flags &= ~ACRN_VRING_USED_F_NO_NOTIFY;
mb(); mb();
if (vq_has_descs(vq) && if (vq_has_descs(vq) &&
vheci->rx_need_sched && vheci->rx_need_sched &&
@ -1138,7 +1138,7 @@ static void *virtio_heci_rx_thread(void *param)
if (vheci->status == VHECI_DEINIT) if (vheci->status == VHECI_DEINIT)
goto out; goto out;
} }
vq->used->flags |= VRING_USED_F_NO_NOTIFY; vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY;
do { do {
if (virtio_heci_proc_rx(vheci, vq)) if (virtio_heci_proc_rx(vheci, vq))
@ -1166,7 +1166,7 @@ virtio_heci_notify_rx(void *heci, struct virtio_vq_info *vq)
/* Signal the rx thread for processing */ /* Signal the rx thread for processing */
pthread_mutex_lock(&vheci->rx_mutex); pthread_mutex_lock(&vheci->rx_mutex);
DPRINTF(("vheci: RX: New IN buffer available!\n\r")); DPRINTF(("vheci: RX: New IN buffer available!\n\r"));
vq->used->flags |= VRING_USED_F_NO_NOTIFY; vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY;
pthread_cond_signal(&vheci->rx_cond); pthread_cond_signal(&vheci->rx_cond);
pthread_mutex_unlock(&vheci->rx_mutex); pthread_mutex_unlock(&vheci->rx_mutex);
} }
@ -1184,7 +1184,7 @@ virtio_heci_notify_tx(void *heci, struct virtio_vq_info *vq)
/* Signal the tx thread for processing */ /* Signal the tx thread for processing */
pthread_mutex_lock(&vheci->tx_mutex); pthread_mutex_lock(&vheci->tx_mutex);
DPRINTF(("vheci: TX: New OUT buffer available!\n\r")); DPRINTF(("vheci: TX: New OUT buffer available!\n\r"));
vq->used->flags |= VRING_USED_F_NO_NOTIFY; vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY;
pthread_cond_signal(&vheci->tx_cond); pthread_cond_signal(&vheci->tx_cond);
pthread_mutex_unlock(&vheci->tx_mutex); pthread_mutex_unlock(&vheci->tx_mutex);
} }

View File

@ -47,7 +47,7 @@ static int virtio_input_debug;
/* /*
* Host capabilities * Host capabilities
*/ */
#define VIRTIO_INPUT_S_HOSTCAPS (VIRTIO_F_VERSION_1) #define VIRTIO_INPUT_S_HOSTCAPS (ACRN_VIRTIO_F_VERSION_1)
enum virtio_input_config_select { enum virtio_input_config_select {
VIRTIO_INPUT_CFG_UNSET = 0x00, VIRTIO_INPUT_CFG_UNSET = 0x00,

View File

@ -74,7 +74,7 @@
#define VIRTIO_NET_S_HOSTCAPS \ #define VIRTIO_NET_S_HOSTCAPS \
(VIRTIO_NET_F_MAC | VIRTIO_NET_F_MRG_RXBUF | VIRTIO_NET_F_STATUS | \ (VIRTIO_NET_F_MAC | VIRTIO_NET_F_MRG_RXBUF | VIRTIO_NET_F_STATUS | \
VIRTIO_F_NOTIFY_ON_EMPTY | VIRTIO_RING_F_INDIRECT_DESC) ACRN_VIRTIO_F_NOTIFY_ON_EMPTY | ACRN_VIRTIO_RING_F_INDIRECT_DESC)
/* is address mcast/bcast? */ /* is address mcast/bcast? */
#define ETHER_IS_MULTICAST(addr) (*(addr) & 0x01) #define ETHER_IS_MULTICAST(addr) (*(addr) & 0x01)
@ -432,7 +432,7 @@ virtio_net_ping_rxq(void *vdev, struct virtio_vq_info *vq)
*/ */
if (net->rx_ready == 0) { if (net->rx_ready == 0) {
net->rx_ready = 1; net->rx_ready = 1;
vq->used->flags |= VRING_USED_F_NO_NOTIFY; vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY;
} }
} }
@ -478,7 +478,7 @@ virtio_net_ping_txq(void *vdev, struct virtio_vq_info *vq)
/* Signal the tx thread for processing */ /* Signal the tx thread for processing */
pthread_mutex_lock(&net->tx_mtx); pthread_mutex_lock(&net->tx_mtx);
vq->used->flags |= VRING_USED_F_NO_NOTIFY; vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY;
if (net->tx_in_progress == 0) if (net->tx_in_progress == 0)
pthread_cond_signal(&net->tx_cond); pthread_cond_signal(&net->tx_cond);
pthread_mutex_unlock(&net->tx_mtx); pthread_mutex_unlock(&net->tx_mtx);
@ -512,7 +512,7 @@ virtio_net_tx_thread(void *param)
for (;;) { for (;;) {
/* note - tx mutex is locked here */ /* note - tx mutex is locked here */
while (net->resetting || !vq_has_descs(vq)) { while (net->resetting || !vq_has_descs(vq)) {
vq->used->flags &= ~VRING_USED_F_NO_NOTIFY; vq->used->flags &= ~ACRN_VRING_USED_F_NO_NOTIFY;
/* memory barrier */ /* memory barrier */
mb(); mb();
if (!net->resetting && vq_has_descs(vq)) if (!net->resetting && vq_has_descs(vq))
@ -527,7 +527,7 @@ virtio_net_tx_thread(void *param)
return NULL; return NULL;
} }
} }
vq->used->flags |= VRING_USED_F_NO_NOTIFY; vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY;
net->tx_in_progress = 1; net->tx_in_progress = 1;
pthread_mutex_unlock(&net->tx_mtx); pthread_mutex_unlock(&net->tx_mtx);

View File

@ -116,9 +116,9 @@
* The two event fields, <used_event> and <avail_event>, in the * The two event fields, <used_event> and <avail_event>, in the
* avail and used rings (respectively -- note the reversal!), are * avail and used rings (respectively -- note the reversal!), are
* always provided, but are used only if the virtual device * always provided, but are used only if the virtual device
* negotiates the VIRTIO_RING_F_EVENT_IDX feature during feature * negotiates the ACRN_VIRTIO_RING_F_EVENT_IDX feature during feature
* negotiation. Similarly, both rings provide a flag -- * negotiation. Similarly, both rings provide a flag --
* VRING_AVAIL_F_NO_INTERRUPT and VRING_USED_F_NO_NOTIFY -- in * ACRN_VRING_AVAIL_F_NO_INTERRUPT and ACRN_VRING_USED_F_NO_NOTIFY -- in
* their <flags> field, indicating that the guest does not need an * their <flags> field, indicating that the guest does not need an
* interrupt, or that the hypervisor driver does not need a * interrupt, or that the hypervisor driver does not need a
* notify, when descriptors are added to the corresponding ring. * notify, when descriptors are added to the corresponding ring.
@ -137,9 +137,9 @@
#define VRING_ALIGN 4096 #define VRING_ALIGN 4096
#define VRING_DESC_F_NEXT (1 << 0) #define ACRN_VRING_DESC_F_NEXT (1 << 0)
#define VRING_DESC_F_WRITE (1 << 1) #define ACRN_VRING_DESC_F_WRITE (1 << 1)
#define VRING_DESC_F_INDIRECT (1 << 2) #define ACRN_VRING_DESC_F_INDIRECT (1 << 2)
struct virtio_desc { /* AKA vring_desc */ struct virtio_desc { /* AKA vring_desc */
uint64_t addr; /* guest physical address */ uint64_t addr; /* guest physical address */
@ -153,17 +153,17 @@ struct virtio_used { /* AKA vring_used_elem */
uint32_t tlen; /* length written-to */ uint32_t tlen; /* length written-to */
} __attribute__((packed)); } __attribute__((packed));
#define VRING_AVAIL_F_NO_INTERRUPT 1 #define ACRN_VRING_AVAIL_F_NO_INTERRUPT 1
struct vring_avail { struct virtio_vring_avail {
uint16_t flags; /* VRING_AVAIL_F_* */ uint16_t flags; /* VRING_AVAIL_F_* */
uint16_t idx; /* counts to 65535, then cycles */ uint16_t idx; /* counts to 65535, then cycles */
uint16_t ring[]; /* size N, reported in QNUM value */ uint16_t ring[]; /* size N, reported in QNUM value */
/* uint16_t used_event; -- after N ring entries */ /* uint16_t used_event; -- after N ring entries */
} __attribute__((packed)); } __attribute__((packed));
#define VRING_USED_F_NO_NOTIFY 1 #define ACRN_VRING_USED_F_NO_NOTIFY 1
struct vring_used { struct virtio_vring_used {
uint16_t flags; /* VRING_USED_F_* */ uint16_t flags; /* VRING_USED_F_* */
uint16_t idx; /* counts to 65535, then cycles */ uint16_t idx; /* counts to 65535, then cycles */
struct virtio_used ring[]; struct virtio_used ring[];
@ -310,12 +310,12 @@ struct vring_used {
* Feature flags. * Feature flags.
* Note: bits 0 through 23 are reserved to each device type. * Note: bits 0 through 23 are reserved to each device type.
*/ */
#define VIRTIO_F_NOTIFY_ON_EMPTY (1 << 24) #define ACRN_VIRTIO_F_NOTIFY_ON_EMPTY (1 << 24)
#define VIRTIO_RING_F_INDIRECT_DESC (1 << 28) #define ACRN_VIRTIO_RING_F_INDIRECT_DESC (1 << 28)
#define VIRTIO_RING_F_EVENT_IDX (1 << 29) #define ACRN_VIRTIO_RING_F_EVENT_IDX (1 << 29)
/* v1.0 compliant. */ /* v1.0 compliant. */
#define VIRTIO_F_VERSION_1 (1UL << 32) #define ACRN_VIRTIO_F_VERSION_1 (1UL << 32)
/* From section 2.3, "Virtqueue Configuration", of the virtio specification */ /* From section 2.3, "Virtqueue Configuration", of the virtio specification */
/** /**
@ -327,7 +327,7 @@ struct vring_used {
* @return size of a certain virtqueue, in bytes. * @return size of a certain virtqueue, in bytes.
*/ */
static inline size_t static inline size_t
vring_size(u_int qsz) virtio_vring_size(u_int qsz)
{ {
size_t size; size_t size;
@ -594,9 +594,9 @@ struct virtio_vq_info {
volatile struct virtio_desc *desc; volatile struct virtio_desc *desc;
/**< descriptor array */ /**< descriptor array */
volatile struct vring_avail *avail; volatile struct virtio_vring_avail *avail;
/**< the "avail" ring */ /**< the "avail" ring */
volatile struct vring_used *used; volatile struct virtio_vring_used *used;
/**< the "used" ring */ /**< the "used" ring */
uint32_t gpa_desc[2]; /**< gpa of descriptors */ uint32_t gpa_desc[2]; /**< gpa of descriptors */