hv: extend struct pi_desc to support VT-d posted interrupts

For CPU side posted interrupts, it only uses bit 0 (ON) of the PI's 64-bit control
, other bits are don't care. This is not the case for VT-d posted
interrupts, define more bit fields for the PI's 64-bit control.
Use bitmap functions to manipulate the bit fields atomically.

Some MISRA-C violation and coding style fixes

Tracked-On: #4506
Signed-off-by: dongshen <dongsheng.x.zhang@intel.com>
Reviewed-by: Eddie Dong <eddie.dong@Intel.com>
This commit is contained in:
dongshen 2020-03-18 14:31:18 -07:00 committed by wenlingz
parent 8f732f2809
commit 0f3c876a91
3 changed files with 44 additions and 15 deletions

View File

@ -270,7 +270,8 @@ static void init_xsave(struct acrn_vcpu *vcpu)
(void)memset((void *)area, 0U, XSAVE_STATE_AREA_SIZE); (void)memset((void *)area, 0U, XSAVE_STATE_AREA_SIZE);
/* xsaves only support compacted format, so set it in xcomp_bv[63], /* xsaves only support compacted format, so set it in xcomp_bv[63],
* keep the reset area in header area as zero. */ * keep the reset area in header area as zero.
*/
ectx->xs_area.xsave_hdr.hdr.xcomp_bv |= XSAVE_COMPACTED_FORMAT; ectx->xs_area.xsave_hdr.hdr.xcomp_bv |= XSAVE_COMPACTED_FORMAT;
} }
@ -402,7 +403,7 @@ void init_vcpu_protect_mode_regs(struct acrn_vcpu *vcpu, uint64_t vgdt_base_gpa)
{ {
struct acrn_vcpu_regs vcpu_regs; struct acrn_vcpu_regs vcpu_regs;
(void)memcpy_s((void*)&vcpu_regs, sizeof(struct acrn_vcpu_regs), (void)memcpy_s((void *)&vcpu_regs, sizeof(struct acrn_vcpu_regs),
(void *)&protect_mode_init_vregs, sizeof(struct acrn_vcpu_regs)); (void *)&protect_mode_init_vregs, sizeof(struct acrn_vcpu_regs));
vcpu_regs.gdt.base = vgdt_base_gpa; vcpu_regs.gdt.base = vgdt_base_gpa;
@ -472,6 +473,12 @@ int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn
*/ */
vcpu->arch.vpid = 1U + (vm->vm_id * MAX_VCPUS_PER_VM) + vcpu->vcpu_id; vcpu->arch.vpid = 1U + (vm->vm_id * MAX_VCPUS_PER_VM) + vcpu->vcpu_id;
vcpu->arch.pid.control.bits.nv = POSTED_INTR_VECTOR;
/* ACRN does not support vCPU migration, one vCPU always runs on
* the same pCPU, so PI's ndst is never changed after startup.
*/
vcpu->arch.pid.control.bits.ndst = per_cpu(lapic_id, pcpu_id);
/* Create per vcpu vlapic */ /* Create per vcpu vlapic */
vlapic_create(vcpu); vlapic_create(vcpu);
@ -644,8 +651,8 @@ void kick_vcpu(const struct acrn_vcpu *vcpu)
} }
/* /*
* @pre (&vcpu->stack[CONFIG_STACK_SIZE] & (CPU_STACK_ALIGN - 1UL)) == 0 * @pre (&vcpu->stack[CONFIG_STACK_SIZE] & (CPU_STACK_ALIGN - 1UL)) == 0
*/ */
static uint64_t build_stack_frame(struct acrn_vcpu *vcpu) static uint64_t build_stack_frame(struct acrn_vcpu *vcpu)
{ {
uint64_t stacktop = (uint64_t)&vcpu->stack[CONFIG_STACK_SIZE]; uint64_t stacktop = (uint64_t)&vcpu->stack[CONFIG_STACK_SIZE];

View File

@ -1957,8 +1957,8 @@ vlapic_intr_msi(struct acrn_vm *vm, uint64_t addr, uint64_t msg)
phys = (address.bits.dest_mode == MSI_ADDR_DESTMODE_PHYS); phys = (address.bits.dest_mode == MSI_ADDR_DESTMODE_PHYS);
rh = (address.bits.rh == MSI_ADDR_RH); rh = (address.bits.rh == MSI_ADDR_RH);
delmode = data.bits.delivery_mode; delmode = (uint32_t)(data.bits.delivery_mode);
vec = data.bits.vector; vec = (uint32_t)(data.bits.vector);
dev_dbg(DBG_LEVEL_VLAPIC, "lapic MSI %s dest %#x, vec %u", dev_dbg(DBG_LEVEL_VLAPIC, "lapic MSI %s dest %#x, vec %u",
phys ? "physical" : "logical", dest, vec); phys ? "physical" : "logical", dest, vec);
@ -2214,11 +2214,9 @@ apicv_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector)
bool notify = false; bool notify = false;
pid = get_pi_desc(vlapic->vcpu); pid = get_pi_desc(vlapic->vcpu);
idx = vector >> 6U; idx = vector >> 6U;
if (!bitmap_test_and_set_lock((uint16_t)(vector & 0x3fU), &pid->pir[idx])) { if (!bitmap_test_and_set_lock((uint16_t)(vector & 0x3fU), &pid->pir[idx])) {
notify = (atomic_cmpxchg64(&pid->pending, 0UL, 1UL) == 0UL); notify = (bitmap_test_and_set_lock(POSTED_INTR_ON, &pid->control.value) == false);
} }
return notify; return notify;
} }
@ -2278,7 +2276,7 @@ static void vlapic_apicv_inject_pir(struct acrn_vlapic *vlapic)
struct lapic_reg *irr = NULL; struct lapic_reg *irr = NULL;
pid = get_pi_desc(vlapic->vcpu); pid = get_pi_desc(vlapic->vcpu);
if (atomic_cmpxchg64(&pid->pending, 1UL, 0UL) == 1UL) { if (bitmap_test_and_clear_lock(POSTED_INTR_ON, &pid->control.value)) {
pirval = 0UL; pirval = 0UL;
lapic = &(vlapic->apic_page); lapic = &(vlapic->apic_page);
irr = &lapic->irr[0]; irr = &lapic->irr[0];
@ -2445,7 +2443,7 @@ int32_t apic_access_vmexit_handler(struct acrn_vcpu *vcpu)
* 2 = linear access for an instruction fetch * 2 = linear access for an instruction fetch
* c) we suppose the guest goes wrong when it will access the APIC-access page * c) we suppose the guest goes wrong when it will access the APIC-access page
* when process event-delivery. According chap 26.5.1.2 VM Exits During Event Injection, * when process event-delivery. According chap 26.5.1.2 VM Exits During Event Injection,
* vol 3, sdm: If the "virtualize APIC accesses"<EFBFBD> VM-execution control is 1 and * vol 3, sdm: If the "virtualize APIC accesses" VM-execution control is 1 and
* event delivery generates an access to the APIC-access page, that access is treated as * event delivery generates an access to the APIC-access page, that access is treated as
* described in Section 29.4 and may cause a VM exit. * described in Section 29.4 and may cause a VM exit.
* 3 = linear access (read or write) during event delivery * 3 = linear access (read or write) during event delivery

View File

@ -378,10 +378,33 @@
struct pi_desc { struct pi_desc {
/* Posted Interrupt Requests, one bit per requested vector */ /* Posted Interrupt Requests, one bit per requested vector */
uint64_t pir[4]; uint64_t pir[4];
uint64_t pending;
uint32_t unused[3]; union {
struct {
/* Outstanding Notification */
uint16_t on:1;
/* Suppress Notification, of non-urgent interrupts */
uint16_t sn:1;
uint16_t rsvd_1:14;
/* Notification Vector */
uint8_t nv;
uint8_t rsvd_2;
/* Notification destination, a physical LAPIC ID */
uint32_t ndst;
} bits;
uint64_t value;
} control;
uint32_t rsvd[6];
} __aligned(64); } __aligned(64);
/* External Interfaces */ /* External Interfaces */
void vmx_on(void); void vmx_on(void);
void vmx_off(void); void vmx_off(void);
@ -408,4 +431,5 @@ void exec_vmwrite64(uint32_t field_full, uint64_t value);
void exec_vmclear(void *addr); void exec_vmclear(void *addr);
void exec_vmptrld(void *addr); void exec_vmptrld(void *addr);
#define POSTED_INTR_ON 0U
#endif /* VMX_H_ */ #endif /* VMX_H_ */