hv: tee: Handling x86_tee secure interrupts corner cases

Previous upstreamed patches handles the secure/non-secure interrupts in
handle_x86_tee_int. However there is a corner case in which there might
be unhandled secure interrupts (in a very short time window) when TEE
yields vCPU. For this case we always make sure that no secure interrupts
are pending in TEE's vlapic before scheduling REE.

Also in previous patches, if non-secure interrupt comes when TEE is
handling its secure interrupts, hypervisor injects a predefined vector
into TEE's vlapic. TEE does not consume this vector in secure interrupt
handling routine so it stays in vIRR, but it should be cleared because the
actual interrupt will be consumed in REE after VM Entry.

v3:
    Fix comments on interrupt priority

v2:
    Add comments explaining the priority of secure/non-secure interrupts

Tracked-On: #6571
Signed-off-by: Yifan Liu <yifan1.liu@intel.com>
Reviewed-by: Wang, Yu1 <yu1.wang@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Yifan Liu 2021-12-02 11:05:11 +00:00 committed by wenlingz
parent fd7ab300a8
commit fa6b55db68

View File

@ -10,6 +10,9 @@
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/guest/optee.h> #include <asm/guest/optee.h>
#include <asm/trampoline.h> #include <asm/trampoline.h>
#include <asm/guest/vlapic.h>
#include <asm/guest/virq.h>
#include <asm/lapic.h>
#include <reloc.h> #include <reloc.h>
#include <hypercall.h> #include <hypercall.h>
#include <logmsg.h> #include <logmsg.h>
@ -56,6 +59,7 @@ static int32_t tee_switch_to_ree(struct acrn_vcpu *vcpu)
uint64_t rdi, rsi, rdx, rbx; uint64_t rdi, rsi, rdx, rbx;
struct acrn_vm *ree_vm; struct acrn_vm *ree_vm;
struct acrn_vcpu *ree_vcpu; struct acrn_vcpu *ree_vcpu;
uint32_t pending_intr;
int32_t ret = -EINVAL; int32_t ret = -EINVAL;
rdi = vcpu_get_gpreg(vcpu, CPU_REG_RDI); rdi = vcpu_get_gpreg(vcpu, CPU_REG_RDI);
@ -78,6 +82,28 @@ static int32_t tee_switch_to_ree(struct acrn_vcpu *vcpu)
vcpu_set_gpreg(ree_vcpu, CPU_REG_RBX, rbx); vcpu_set_gpreg(ree_vcpu, CPU_REG_RBX, rbx);
} }
pending_intr = vlapic_get_next_pending_intr(vcpu);
if (prio(pending_intr) > prio(TEE_FIXED_NONSECURE_VECTOR)) {
/* For TEE, all non-secure interrupts are represented as
* TEE_FIXED_NONSECURE_VECTOR that has lower priority than all
* secure interrupts.
*
* If there are secure interrupts pending, we inject TEE's PI
* ANV and schedules REE. This way REE gets trapped immediately
* after VM Entry and will go through the secure interrupt handling
* flow in handle_x86_tee_int.
*/
send_single_ipi(pcpuid_from_vcpu(ree_vcpu),
(uint32_t)(vcpu->arch.pid.control.bits.nv));
} else if (prio(pending_intr) == prio(TEE_FIXED_NONSECURE_VECTOR)) {
/* The TEE_FIXED_NONSECURE_VECTOR needs to be cleared as the
* pending non-secure interrupts will be handled immediately
* after resuming to REE. On ARM this is automatically done
* by hardware and ACRN emulates this behavior.
*/
vlapic_clear_pending_intr(vcpu, TEE_FIXED_NONSECURE_VECTOR);
}
sleep_thread(&vcpu->thread_obj); sleep_thread(&vcpu->thread_obj);
ret = 0; ret = 0;
} else { } else {