HV: io: add post-work for PCICFG and WP requests

Currently no post-work is done for I/O requests of type PCICFG or WP. The
impacts include:

    1. ''valid'' in VHM request buffers are left as 1 even after the I/O request
       completes. This violates the pre-condition of acrn_insert_request_wait()
       but does not cause failures since a new I/O request can never happen
       before the previous one completes.

    2. Values read from PCI configuration spaces are never passed to UOS.

This patch adds the post-work for these two kinds of I/O requests. The post-work
for port I/O is invoked for PCICFG since it is essentially a port I/O and the
request structure is compatible. No post-work is needed for WP as it is only
triggered for EPT violations on writes, while post-work is mainly for reads.

v2 -> v3:

    * Consistently use 0/1 (not false/true) for the ''valid'' member.
    * Add comments to suggest when the hypervisor can see REQ_PCICFG and why
      dm_emulate_pio_post also works in such cases.
    * Rename: mark_ioreq_done -> complete_ioreq
    * Rename: complete_request -> emulate_io_post
    * Rename: hcall_notify_req_finish -> hcall_notify_ioreq_finish

Signed-off-by: Junjie Mao <junjie.mao@intel.com>
Reviewed-by: Jason Chen CJ <jason.cj.chen@intel.com>
This commit is contained in:
Junjie Mao 2018-07-27 18:15:24 +08:00 committed by lijinxia
parent 26ab2c9146
commit d8179519b9
3 changed files with 29 additions and 12 deletions

View File

@ -103,7 +103,7 @@ int vmcall_vmexit_handler(struct vcpu *vcpu)
case HC_NOTIFY_REQUEST_FINISH:
/* param1: vmid
* param2: vcpu_id */
ret = hcall_notify_req_finish((uint16_t)param1,
ret = hcall_notify_ioreq_finish((uint16_t)param1,
(uint16_t)param2);
break;

View File

@ -361,20 +361,27 @@ int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param)
return ret;
}
static void complete_request(struct vcpu *vcpu)
static void complete_ioreq(struct vcpu *vcpu)
{
union vhm_request_buffer *req_buf;
struct vhm_request *vhm_req;
req_buf = (union vhm_request_buffer *)
vcpu->vm->sw.io_shared_page;
vhm_req = &req_buf->req_queue[vcpu->vcpu_id];
vhm_req->valid = 0;
atomic_store32(&vcpu->ioreq_pending, 0U);
}
static void emulate_io_post(struct vcpu *vcpu)
{
/*
* If vcpu is in Zombie state and will be destroyed soon. Just
* mark ioreq done and don't resume vcpu.
*/
if (vcpu->state == VCPU_ZOMBIE) {
union vhm_request_buffer *req_buf;
req_buf = (union vhm_request_buffer *)
vcpu->vm->sw.io_shared_page;
req_buf->req_queue[vcpu->vcpu_id].valid = false;
atomic_store32(&vcpu->ioreq_pending, 0U);
complete_ioreq(vcpu);
return;
}
@ -384,17 +391,27 @@ static void complete_request(struct vcpu *vcpu)
break;
case REQ_PORTIO:
case REQ_PCICFG:
/* REQ_PORTIO on 0xcf8 & 0xcfc may switch to REQ_PCICFG in some
* cases. It works to apply the post-work for REQ_PORTIO on
* REQ_PCICFG because the format of the first 28 bytes of
* REQ_PORTIO & REQ_PCICFG requests are exactly the same and
* post-work is mainly interested in the read value.
*/
dm_emulate_pio_post(vcpu);
break;
default:
/* REQ_WP can only be triggered on writes which do not need
* post-work. Just mark the ioreq done. */
complete_ioreq(vcpu);
break;
}
resume_vcpu(vcpu);
}
int32_t hcall_notify_req_finish(uint16_t vmid, uint16_t vcpu_id)
int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
{
union vhm_request_buffer *req_buf;
struct vhm_request *req;
@ -423,7 +440,7 @@ int32_t hcall_notify_req_finish(uint16_t vmid, uint16_t vcpu_id)
if ((req->valid != 0) &&
((req->processed == REQ_STATE_SUCCESS) ||
(req->processed == REQ_STATE_FAILED))) {
complete_request(vcpu);
emulate_io_post(vcpu);
}
return 0;

View File

@ -203,7 +203,7 @@ int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_notify_req_finish(uint16_t vmid, uint16_t vcpu_id);
int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id);
/**
* @brief setup ept memory mapping