HV: io: refine state transitions of VHM requests

Instead of using two members for maintaining the state of a VHM request, this
patch replaces the transitions with a single state. Basically the lifecycle of a
VHM request shall be:

    FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ...

The structure header of vhm_request has more details of the transitions access
limitations under different states.

Also drop the set but unused member vcpu.ioreq_pending.

For backward-compatibility, the obsolete 'valid' member is still kept and
maintained before SOS and DM adapts to the new state transitions.

v2 -> v3:

    * Use complete_ioreq to mark an I/O request finished in
      dm_emulate_(pio|mmio)_post.

Signed-off-by: Junjie Mao <junjie.mao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Junjie Mao
2018-07-27 21:24:39 +08:00
committed by lijinxia
parent 941eb9db02
commit 17771c0ac2
8 changed files with 144 additions and 51 deletions

View File

@@ -117,7 +117,6 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
vcpu->launched = false;
vcpu->paused_cnt = 0U;
vcpu->running = 0;
vcpu->ioreq_pending = 0;
vcpu->arch_vcpu.nr_sipi = 0;
vcpu->pending_pre_work = 0U;
vcpu->state = VCPU_INIT;
@@ -277,7 +276,6 @@ void reset_vcpu(struct vcpu *vcpu)
vcpu->launched = false;
vcpu->paused_cnt = 0U;
vcpu->running = 0;
vcpu->ioreq_pending = 0;
vcpu->arch_vcpu.nr_sipi = 0;
vcpu->pending_pre_work = 0U;

View File

@@ -613,12 +613,10 @@ int vioapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req,
gpa,
&data);
mmio->value = (uint64_t)data;
io_req->processed = REQ_STATE_SUCCESS;
} else if (mmio->direction == REQUEST_WRITE) {
vioapic_mmio_write(vm,
gpa,
data);
io_req->processed = REQ_STATE_SUCCESS;
} else {
/* Can never happen due to the range of direction. */
}
@@ -627,6 +625,7 @@ int vioapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req,
ret = -EINVAL;
}
io_req->processed = REQ_STATE_COMPLETE;
return ret;
}

View File

@@ -2069,17 +2069,16 @@ int vlapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req,
gpa,
&mmio_req->value,
mmio_req->size);
io_req->processed = REQ_STATE_SUCCESS;
} else if (mmio_req->direction == REQUEST_WRITE) {
ret = vlapic_write_mmio_reg(vcpu,
gpa,
mmio_req->value,
mmio_req->size);
io_req->processed = REQ_STATE_SUCCESS;
} else {
/* Can never happen due to the range of mmio_req->direction. */
}
io_req->processed = REQ_STATE_COMPLETE;
return ret;
}

View File

@@ -9,6 +9,12 @@
#include "guest/instr_emul_wrapper.h"
#include "guest/instr_emul.h"
static void complete_ioreq(struct vhm_request *vhm_req)
{
vhm_req->valid = 0;
atomic_store32(&vhm_req->processed, REQ_STATE_FREE);
}
/**
* @pre io_req->type == REQ_PORTIO
*/
@@ -19,7 +25,7 @@ emulate_pio_post(struct vcpu *vcpu, struct io_request *io_req)
struct pio_request *pio_req = &io_req->reqs.pio;
uint64_t mask = 0xFFFFFFFFUL >> (32UL - 8UL * pio_req->size);
if (io_req->processed == REQ_STATE_SUCCESS) {
if (io_req->processed == REQ_STATE_COMPLETE) {
if (pio_req->direction == REQUEST_READ) {
uint64_t value = (uint64_t)pio_req->value;
int32_t context_idx = vcpu->arch_vcpu.cur_context;
@@ -53,11 +59,10 @@ int32_t dm_emulate_pio_post(struct vcpu *vcpu)
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
vhm_req = &req_buf->req_queue[cur];
io_req->processed = vhm_req->processed;
pio_req->value = vhm_req->reqs.pio.value;
/* VHM emulation data already copy to req, mark to free slot now */
vhm_req->valid = 0;
complete_ioreq(vhm_req);
return emulate_pio_post(vcpu, io_req);
}
@@ -70,7 +75,7 @@ int32_t emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req)
int32_t ret;
struct mmio_request *mmio_req = &io_req->reqs.mmio;
if (io_req->processed == REQ_STATE_SUCCESS) {
if (io_req->processed == REQ_STATE_COMPLETE) {
if (mmio_req->direction == REQUEST_READ) {
/* Emulate instruction and update vcpu register set */
ret = emulate_instruction(vcpu);
@@ -99,38 +104,26 @@ int32_t dm_emulate_mmio_post(struct vcpu *vcpu)
vhm_req = &req_buf->req_queue[cur];
mmio_req->value = vhm_req->reqs.mmio.value;
io_req->processed = vhm_req->processed;
/* VHM emulation data already copy to req, mark to free slot now */
vhm_req->valid = 0;
complete_ioreq(vhm_req);
return emulate_mmio_post(vcpu, io_req);
}
static void complete_ioreq(struct vcpu *vcpu)
{
union vhm_request_buffer *req_buf;
struct vhm_request *vhm_req;
req_buf = (union vhm_request_buffer *)
vcpu->vm->sw.io_shared_page;
vhm_req = &req_buf->req_queue[vcpu->vcpu_id];
vhm_req->valid = 0;
atomic_store32(&vcpu->ioreq_pending, 0U);
}
void emulate_io_post(struct vcpu *vcpu)
{
union vhm_request_buffer *req_buf;
struct vhm_request *vhm_req;
struct io_request *io_req = &vcpu->req;
req_buf = (union vhm_request_buffer *)vcpu->vm->sw.io_shared_page;
vhm_req = &req_buf->req_queue[vcpu->vcpu_id];
io_req->processed = atomic_load32(&vhm_req->processed);
if ((vhm_req->valid == 0) ||
((vhm_req->processed != REQ_STATE_SUCCESS) &&
(vhm_req->processed != REQ_STATE_FAILED))) {
((io_req->processed != REQ_STATE_COMPLETE) &&
(io_req->processed != REQ_STATE_FAILED))) {
return;
}
@@ -139,7 +132,7 @@ void emulate_io_post(struct vcpu *vcpu)
* mark ioreq done and don't resume vcpu.
*/
if (vcpu->state == VCPU_ZOMBIE) {
complete_ioreq(vcpu);
complete_ioreq(vhm_req);
return;
}
@@ -162,7 +155,7 @@ void emulate_io_post(struct vcpu *vcpu)
default:
/* REQ_WP can only be triggered on writes which do not need
* post-work. Just mark the ioreq done. */
complete_ioreq(vcpu);
complete_ioreq(vhm_req);
break;
}
@@ -222,7 +215,7 @@ hv_emulate_pio(struct vcpu *vcpu, struct io_request *io_req)
}
/* TODO: failures in the handlers should be reflected
* here. */
io_req->processed = REQ_STATE_SUCCESS;
io_req->processed = REQ_STATE_COMPLETE;
status = 0;
break;
}