mirror of
				https://github.com/projectacrn/acrn-hypervisor.git
				synced 2025-10-31 17:34:34 +00:00 
			
		
		
		
	- move functions related vmexit from `guest.h` to `vmexit.h` - move functions related msr from `guest.h` to `msr.h` - move functions related vm_sw_loader from `guest.h` to `vm.h` - move function `vmx_vmrun` from `guest.h` to `vcpu.h` - move MACROs related to vcpu from `guest.h` to `vcpu.h` - move MACRO `E820_MAX_ENTRIES` from `guest.h` to `e820.h` - move MACROs related to irq from `guest.h` to `irq.h` - rename `guest.h` to `guest_memory.h` Tracked-On: #2503 Signed-off-by: Shiqing Gao <shiqing.gao@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
		
			
				
	
	
		
			90 lines
		
	
	
		
			2.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			90 lines
		
	
	
		
			2.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * Copyright (C) 2018 Intel Corporation. All rights reserved.
 | |
|  *
 | |
|  * SPDX-License-Identifier: BSD-3-Clause
 | |
|  */
 | |
| 
 | |
| #ifndef VMEXIT_H_
 | |
| #define VMEXIT_H_
 | |
| 
 | |
| struct vm_exit_dispatch {
 | |
| 	int32_t (*handler)(struct acrn_vcpu *);
 | |
| 	uint32_t need_exit_qualification;
 | |
| };
 | |
| 
 | |
| int32_t vmexit_handler(struct acrn_vcpu *vcpu);
 | |
| int32_t vmcall_vmexit_handler(struct acrn_vcpu *vcpu);
 | |
| int32_t cpuid_vmexit_handler(struct acrn_vcpu *vcpu);
 | |
| int32_t rdmsr_vmexit_handler(struct acrn_vcpu *vcpu);
 | |
| int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu);
 | |
| 
 | |
| extern void vm_exit(void);
 | |
| static inline uint64_t
 | |
| vm_exit_qualification_bit_mask(uint64_t exit_qual, uint32_t msb, uint32_t lsb)
 | |
| {
 | |
| 	return (exit_qual &
 | |
| 			(((1UL << (msb + 1U)) - 1UL) - ((1UL << lsb) - 1UL)));
 | |
| }
 | |
| 
 | |
| /* access Control-Register Info using exit qualification field */
 | |
| static inline uint64_t vm_exit_cr_access_cr_num(uint64_t exit_qual)
 | |
| {
 | |
| 	return (vm_exit_qualification_bit_mask(exit_qual, 3U, 0U) >> 0U);
 | |
| }
 | |
| 
 | |
| static inline uint64_t vm_exit_cr_access_type(uint64_t exit_qual)
 | |
| {
 | |
| 	return (vm_exit_qualification_bit_mask(exit_qual, 5U, 4U) >> 4U);
 | |
| }
 | |
| 
 | |
| static inline uint64_t vm_exit_cr_access_lmsw_op(uint64_t exit_qual)
 | |
| {
 | |
| 	return (vm_exit_qualification_bit_mask(exit_qual, 6U, 6U) >> 6U);
 | |
| }
 | |
| 
 | |
| static inline uint64_t vm_exit_cr_access_reg_idx(uint64_t exit_qual)
 | |
| {
 | |
| 	return (vm_exit_qualification_bit_mask(exit_qual, 11U, 8U) >> 8U);
 | |
| }
 | |
| 
 | |
| static inline uint64_t vm_exit_cr_access_lmsw_src_date(uint64_t exit_qual)
 | |
| {
 | |
| 	return (vm_exit_qualification_bit_mask(exit_qual, 31U, 16U) >> 16U);
 | |
| }
 | |
| 
 | |
| /* access IO Access Info using exit qualification field */
 | |
| static inline uint64_t vm_exit_io_instruction_size(uint64_t exit_qual)
 | |
| {
 | |
| 	return (vm_exit_qualification_bit_mask(exit_qual, 2U, 0U) >> 0U);
 | |
| }
 | |
| 
 | |
| static inline uint64_t
 | |
| vm_exit_io_instruction_access_direction(uint64_t exit_qual)
 | |
| {
 | |
| 	return (vm_exit_qualification_bit_mask(exit_qual, 3U, 3U) >> 3U);
 | |
| }
 | |
| 
 | |
| static inline uint64_t vm_exit_io_instruction_is_string(uint64_t exit_qual)
 | |
| {
 | |
| 	return (vm_exit_qualification_bit_mask(exit_qual, 4U, 4U) >> 4U);
 | |
| }
 | |
| 
 | |
| static inline uint64_t
 | |
| vm_exit_io_instruction_is_rep_prefixed(uint64_t exit_qual)
 | |
| {
 | |
| 	return (vm_exit_qualification_bit_mask(exit_qual, 5U, 5U) >> 5U);
 | |
| }
 | |
| 
 | |
| static inline uint64_t
 | |
| vm_exit_io_instruction_is_operand_encoding(uint64_t exit_qual)
 | |
| {
 | |
| 	return (vm_exit_qualification_bit_mask(exit_qual, 6U, 6U) >> 6U);
 | |
| }
 | |
| 
 | |
| static inline uint64_t vm_exit_io_instruction_port_number(uint64_t exit_qual)
 | |
| {
 | |
| 	return (vm_exit_qualification_bit_mask(exit_qual, 31U, 16U) >> 16U);
 | |
| }
 | |
| 
 | |
| #endif /* VMEXIT_H_ */
 |