mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-22 09:17:58 +00:00
initial import
internal commit: 14ac2bc2299032fa6714d1fefa7cf0987b3e3085 Signed-off-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
524
hypervisor/include/arch/x86/apicreg.h
Normal file
524
hypervisor/include/arch/x86/apicreg.h
Normal file
@@ -0,0 +1,524 @@
|
||||
/*-
|
||||
* Copyright (c) 1996, by Peter Wemm and Steve Passe
|
||||
* Copyright (c) 2017 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. The name of the developer may NOT be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef _APICREG_H_
|
||||
#define _APICREG_H_
|
||||
|
||||
/*
|
||||
* Local && I/O APIC definitions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Pentium P54C+ Built-in APIC
|
||||
* (Advanced programmable Interrupt Controller)
|
||||
*
|
||||
* Base Address of Built-in APIC in memory location
|
||||
* is 0xfee00000.
|
||||
*
|
||||
* Map of APIC Registers:
|
||||
*
|
||||
* Offset (hex) Description Read/Write state
|
||||
* 000 Reserved
|
||||
* 010 Reserved
|
||||
* 020 ID Local APIC ID R/W
|
||||
* 030 VER Local APIC Version R
|
||||
* 040 Reserved
|
||||
* 050 Reserved
|
||||
* 060 Reserved
|
||||
* 070 Reserved
|
||||
* 080 Task Priority Register R/W
|
||||
* 090 Arbitration Priority Register R
|
||||
* 0A0 Processor Priority Register R
|
||||
* 0B0 EOI Register W
|
||||
* 0C0 RRR Remote read R
|
||||
* 0D0 Logical Destination R/W
|
||||
* 0E0 Destination Format Register 0..27 R; 28..31 R/W
|
||||
* 0F0 SVR Spurious Interrupt Vector Reg. 0..3 R; 4..9 R/W
|
||||
* 100 ISR 000-031 R
|
||||
* 110 ISR 032-063 R
|
||||
* 120 ISR 064-095 R
|
||||
* 130 ISR 095-128 R
|
||||
* 140 ISR 128-159 R
|
||||
* 150 ISR 160-191 R
|
||||
* 160 ISR 192-223 R
|
||||
* 170 ISR 224-255 R
|
||||
* 180 TMR 000-031 R
|
||||
* 190 TMR 032-063 R
|
||||
* 1A0 TMR 064-095 R
|
||||
* 1B0 TMR 095-128 R
|
||||
* 1C0 TMR 128-159 R
|
||||
* 1D0 TMR 160-191 R
|
||||
* 1E0 TMR 192-223 R
|
||||
* 1F0 TMR 224-255 R
|
||||
* 200 IRR 000-031 R
|
||||
* 210 IRR 032-063 R
|
||||
* 220 IRR 064-095 R
|
||||
* 230 IRR 095-128 R
|
||||
* 240 IRR 128-159 R
|
||||
* 250 IRR 160-191 R
|
||||
* 260 IRR 192-223 R
|
||||
* 270 IRR 224-255 R
|
||||
* 280 Error Status Register R
|
||||
* 290 Reserved
|
||||
* 2A0 Reserved
|
||||
* 2B0 Reserved
|
||||
* 2C0 Reserved
|
||||
* 2D0 Reserved
|
||||
* 2E0 Reserved
|
||||
* 2F0 Local Vector Table (CMCI) R/W
|
||||
* 300 ICR_LOW Interrupt Command Reg. (0-31) R/W
|
||||
* 310 ICR_HI Interrupt Command Reg. (32-63) R/W
|
||||
* 320 Local Vector Table (Timer) R/W
|
||||
* 330 Local Vector Table (Thermal) R/W (PIV+)
|
||||
* 340 Local Vector Table (Performance) R/W (P6+)
|
||||
* 350 LVT1 Local Vector Table (LINT0) R/W
|
||||
* 360 LVT2 Local Vector Table (LINT1) R/W
|
||||
* 370 LVT3 Local Vector Table (ERROR) R/W
|
||||
* 380 Initial Count Reg. for Timer R/W
|
||||
* 390 Current Count of Timer R
|
||||
* 3A0 Reserved
|
||||
* 3B0 Reserved
|
||||
* 3C0 Reserved
|
||||
* 3D0 Reserved
|
||||
* 3E0 Timer Divide Configuration Reg. R/W
|
||||
* 3F0 Reserved
|
||||
*/
|
||||
|
||||
|
||||
/******************************************************************************
|
||||
* global defines, etc.
|
||||
*/
|
||||
|
||||
|
||||
/******************************************************************************
|
||||
* LOCAL APIC structure
|
||||
*/
|
||||
|
||||
#ifndef LOCORE
|
||||
|
||||
#define PAD3 int: 32; int: 32; int: 32
|
||||
#define PAD4 int: 32; int: 32; int: 32; int: 32
|
||||
|
||||
struct lapic_reg {
|
||||
uint32_t val; PAD3;
|
||||
};
|
||||
|
||||
struct lapic {
|
||||
/* reserved */ PAD4;
|
||||
/* reserved */ PAD4;
|
||||
uint32_t id; PAD3;
|
||||
uint32_t version; PAD3;
|
||||
/* reserved */ PAD4;
|
||||
/* reserved */ PAD4;
|
||||
/* reserved */ PAD4;
|
||||
/* reserved */ PAD4;
|
||||
uint32_t tpr; PAD3;
|
||||
uint32_t apr; PAD3;
|
||||
uint32_t ppr; PAD3;
|
||||
uint32_t eoi; PAD3;
|
||||
/* reserved */ PAD4;
|
||||
uint32_t ldr; PAD3;
|
||||
uint32_t dfr; PAD3;
|
||||
uint32_t svr; PAD3;
|
||||
struct lapic_reg isr[8];
|
||||
struct lapic_reg tmr[8];
|
||||
struct lapic_reg irr[8];
|
||||
uint32_t esr; PAD3;
|
||||
/* reserved */ PAD4;
|
||||
/* reserved */ PAD4;
|
||||
/* reserved */ PAD4;
|
||||
/* reserved */ PAD4;
|
||||
/* reserved */ PAD4;
|
||||
/* reserved */ PAD4;
|
||||
uint32_t lvt_cmci; PAD3;
|
||||
uint32_t icr_lo; PAD3;
|
||||
uint32_t icr_hi; PAD3;
|
||||
uint32_t lvt_timer; PAD3;
|
||||
uint32_t lvt_thermal; PAD3;
|
||||
uint32_t lvt_pcint; PAD3;
|
||||
uint32_t lvt_lint0; PAD3;
|
||||
uint32_t lvt_lint1; PAD3;
|
||||
uint32_t lvt_error; PAD3;
|
||||
uint32_t icr_timer; PAD3;
|
||||
uint32_t ccr_timer; PAD3;
|
||||
/* reserved */ PAD4;
|
||||
/* reserved */ PAD4;
|
||||
/* reserved */ PAD4;
|
||||
/* reserved */ PAD4;
|
||||
uint32_t dcr_timer; PAD3;
|
||||
/* reserved */ PAD4;
|
||||
};
|
||||
|
||||
enum LAPIC_REGISTERS {
|
||||
LAPIC_ID = 0x2,
|
||||
LAPIC_VERSION = 0x3,
|
||||
LAPIC_TPR = 0x8,
|
||||
LAPIC_APR = 0x9,
|
||||
LAPIC_PPR = 0xa,
|
||||
LAPIC_EOI = 0xb,
|
||||
LAPIC_LDR = 0xd,
|
||||
LAPIC_DFR = 0xe, /* Not in x2APIC */
|
||||
LAPIC_SVR = 0xf,
|
||||
LAPIC_ISR0 = 0x10,
|
||||
LAPIC_ISR1 = 0x11,
|
||||
LAPIC_ISR2 = 0x12,
|
||||
LAPIC_ISR3 = 0x13,
|
||||
LAPIC_ISR4 = 0x14,
|
||||
LAPIC_ISR5 = 0x15,
|
||||
LAPIC_ISR6 = 0x16,
|
||||
LAPIC_ISR7 = 0x17,
|
||||
LAPIC_TMR0 = 0x18,
|
||||
LAPIC_TMR1 = 0x19,
|
||||
LAPIC_TMR2 = 0x1a,
|
||||
LAPIC_TMR3 = 0x1b,
|
||||
LAPIC_TMR4 = 0x1c,
|
||||
LAPIC_TMR5 = 0x1d,
|
||||
LAPIC_TMR6 = 0x1e,
|
||||
LAPIC_TMR7 = 0x1f,
|
||||
LAPIC_IRR0 = 0x20,
|
||||
LAPIC_IRR1 = 0x21,
|
||||
LAPIC_IRR2 = 0x22,
|
||||
LAPIC_IRR3 = 0x23,
|
||||
LAPIC_IRR4 = 0x24,
|
||||
LAPIC_IRR5 = 0x25,
|
||||
LAPIC_IRR6 = 0x26,
|
||||
LAPIC_IRR7 = 0x27,
|
||||
LAPIC_ESR = 0x28,
|
||||
LAPIC_LVT_CMCI = 0x2f,
|
||||
LAPIC_ICR_LO = 0x30,
|
||||
LAPIC_ICR_HI = 0x31, /* Not in x2APIC */
|
||||
LAPIC_LVT_TIMER = 0x32,
|
||||
LAPIC_LVT_THERMAL = 0x33,
|
||||
LAPIC_LVT_PCINT = 0x34,
|
||||
LAPIC_LVT_LINT0 = 0x35,
|
||||
LAPIC_LVT_LINT1 = 0x36,
|
||||
LAPIC_LVT_ERROR = 0x37,
|
||||
LAPIC_ICR_TIMER = 0x38,
|
||||
LAPIC_CCR_TIMER = 0x39,
|
||||
LAPIC_DCR_TIMER = 0x3e,
|
||||
LAPIC_SELF_IPI = 0x3f, /* Only in x2APIC */
|
||||
LAPIC_EXT_FEATURES = 0x40, /* AMD */
|
||||
LAPIC_EXT_CTRL = 0x41, /* AMD */
|
||||
LAPIC_EXT_SEOI = 0x42, /* AMD */
|
||||
LAPIC_EXT_IER0 = 0x48, /* AMD */
|
||||
LAPIC_EXT_IER1 = 0x49, /* AMD */
|
||||
LAPIC_EXT_IER2 = 0x4a, /* AMD */
|
||||
LAPIC_EXT_IER3 = 0x4b, /* AMD */
|
||||
LAPIC_EXT_IER4 = 0x4c, /* AMD */
|
||||
LAPIC_EXT_IER5 = 0x4d, /* AMD */
|
||||
LAPIC_EXT_IER6 = 0x4e, /* AMD */
|
||||
LAPIC_EXT_IER7 = 0x4f, /* AMD */
|
||||
LAPIC_EXT_LVT0 = 0x50, /* AMD */
|
||||
LAPIC_EXT_LVT1 = 0x51, /* AMD */
|
||||
LAPIC_EXT_LVT2 = 0x52, /* AMD */
|
||||
LAPIC_EXT_LVT3 = 0x53, /* AMD */
|
||||
};
|
||||
|
||||
#define LAPIC_MEM_MUL 0x10
|
||||
|
||||
/*
|
||||
* Although some registers are available on AMD processors only,
|
||||
* it's not a big waste to reserve them on all platforms.
|
||||
* However, we need to watch out for this space being assigned for
|
||||
* non-APIC purposes in the future processor models.
|
||||
*/
|
||||
#define LAPIC_MEM_REGION ((LAPIC_EXT_LVT3 + 1) * LAPIC_MEM_MUL)
|
||||
|
||||
/******************************************************************************
|
||||
* I/O APIC structure
|
||||
*/
|
||||
|
||||
struct ioapic {
|
||||
uint32_t ioregsel; PAD3;
|
||||
uint32_t iowin; PAD3;
|
||||
};
|
||||
|
||||
#undef PAD4
|
||||
#undef PAD3
|
||||
|
||||
#endif /* !LOCORE */
|
||||
|
||||
|
||||
/******************************************************************************
|
||||
* various code 'logical' values
|
||||
*/
|
||||
|
||||
/******************************************************************************
|
||||
* LOCAL APIC defines
|
||||
*/
|
||||
|
||||
/* default physical locations of LOCAL (CPU) APICs */
|
||||
#define DEFAULT_APIC_BASE 0xfee00000
|
||||
|
||||
/* constants relating to APIC ID registers */
|
||||
#define APIC_ID_MASK 0xff000000
|
||||
#define APIC_ID_SHIFT 24
|
||||
#define APIC_ID_CLUSTER 0xf0
|
||||
#define APIC_ID_CLUSTER_ID 0x0f
|
||||
#define APIC_MAX_CLUSTER 0xe
|
||||
#define APIC_MAX_INTRACLUSTER_ID 3
|
||||
#define APIC_ID_CLUSTER_SHIFT 4
|
||||
|
||||
/* fields in VER */
|
||||
#define APIC_VER_VERSION 0x000000ff
|
||||
#define APIC_VER_MAXLVT 0x00ff0000
|
||||
#define MAXLVTSHIFT 16
|
||||
#define APIC_VER_EOI_SUPPRESSION 0x01000000
|
||||
#define APIC_VER_AMD_EXT_SPACE 0x80000000
|
||||
|
||||
/* fields in LDR */
|
||||
#define APIC_LDR_RESERVED 0x00ffffff
|
||||
|
||||
/* fields in DFR */
|
||||
#define APIC_DFR_RESERVED 0x0fffffff
|
||||
#define APIC_DFR_MODEL_MASK 0xf0000000
|
||||
#define APIC_DFR_MODEL_FLAT 0xf0000000
|
||||
#define APIC_DFR_MODEL_CLUSTER 0x00000000
|
||||
|
||||
/* fields in SVR */
|
||||
#define APIC_SVR_VECTOR 0x000000ff
|
||||
#define APIC_SVR_VEC_PROG 0x000000f0
|
||||
#define APIC_SVR_VEC_FIX 0x0000000f
|
||||
#define APIC_SVR_ENABLE 0x00000100
|
||||
#define APIC_SVR_SWDIS 0x00000000
|
||||
#define APIC_SVR_SWEN 0x00000100
|
||||
#define APIC_SVR_FOCUS 0x00000200
|
||||
#define APIC_SVR_FEN 0x00000000
|
||||
#define APIC_SVR_FDIS 0x00000200
|
||||
#define APIC_SVR_EOI_SUPPRESSION 0x00001000
|
||||
|
||||
/* fields in TPR */
|
||||
#define APIC_TPR_PRIO 0x000000ff
|
||||
#define APIC_TPR_INT 0x000000f0
|
||||
#define APIC_TPR_SUB 0x0000000f
|
||||
|
||||
/* fields in ESR */
|
||||
#define APIC_ESR_SEND_CS_ERROR 0x00000001
|
||||
#define APIC_ESR_RECEIVE_CS_ERROR 0x00000002
|
||||
#define APIC_ESR_SEND_ACCEPT 0x00000004
|
||||
#define APIC_ESR_RECEIVE_ACCEPT 0x00000008
|
||||
#define APIC_ESR_SEND_ILLEGAL_VECTOR 0x00000020
|
||||
#define APIC_ESR_RECEIVE_ILLEGAL_VECTOR 0x00000040
|
||||
#define APIC_ESR_ILLEGAL_REGISTER 0x00000080
|
||||
|
||||
/* fields in ICR_LOW */
|
||||
#define APIC_VECTOR_MASK 0x000000ff
|
||||
|
||||
#define APIC_DELMODE_MASK 0x00000700
|
||||
#define APIC_DELMODE_FIXED 0x00000000
|
||||
#define APIC_DELMODE_LOWPRIO 0x00000100
|
||||
#define APIC_DELMODE_SMI 0x00000200
|
||||
#define APIC_DELMODE_RR 0x00000300
|
||||
#define APIC_DELMODE_NMI 0x00000400
|
||||
#define APIC_DELMODE_INIT 0x00000500
|
||||
#define APIC_DELMODE_STARTUP 0x00000600
|
||||
#define APIC_DELMODE_RESV 0x00000700
|
||||
|
||||
#define APIC_DESTMODE_MASK 0x00000800
|
||||
#define APIC_DESTMODE_PHY 0x00000000
|
||||
#define APIC_DESTMODE_LOG 0x00000800
|
||||
|
||||
#define APIC_DELSTAT_MASK 0x00001000
|
||||
#define APIC_DELSTAT_IDLE 0x00000000
|
||||
#define APIC_DELSTAT_PEND 0x00001000
|
||||
|
||||
#define APIC_RESV1_MASK 0x00002000
|
||||
|
||||
#define APIC_LEVEL_MASK 0x00004000
|
||||
#define APIC_LEVEL_DEASSERT 0x00000000
|
||||
#define APIC_LEVEL_ASSERT 0x00004000
|
||||
|
||||
#define APIC_TRIGMOD_MASK 0x00008000
|
||||
#define APIC_TRIGMOD_EDGE 0x00000000
|
||||
#define APIC_TRIGMOD_LEVEL 0x00008000
|
||||
|
||||
#define APIC_RRSTAT_MASK 0x00030000
|
||||
#define APIC_RRSTAT_INVALID 0x00000000
|
||||
#define APIC_RRSTAT_INPROG 0x00010000
|
||||
#define APIC_RRSTAT_VALID 0x00020000
|
||||
#define APIC_RRSTAT_RESV 0x00030000
|
||||
|
||||
#define APIC_DEST_MASK 0x000c0000
|
||||
#define APIC_DEST_DESTFLD 0x00000000
|
||||
#define APIC_DEST_SELF 0x00040000
|
||||
#define APIC_DEST_ALLISELF 0x00080000
|
||||
#define APIC_DEST_ALLESELF 0x000c0000
|
||||
|
||||
#define APIC_RESV2_MASK 0xfff00000
|
||||
|
||||
#define APIC_ICRLO_RESV_MASK (APIC_RESV1_MASK | APIC_RESV2_MASK)
|
||||
|
||||
/* fields in LVT1/2 */
|
||||
#define APIC_LVT_VECTOR 0x000000ff
|
||||
#define APIC_LVT_DM 0x00000700
|
||||
#define APIC_LVT_DM_FIXED 0x00000000
|
||||
#define APIC_LVT_DM_SMI 0x00000200
|
||||
#define APIC_LVT_DM_NMI 0x00000400
|
||||
#define APIC_LVT_DM_INIT 0x00000500
|
||||
#define APIC_LVT_DM_EXTINT 0x00000700
|
||||
#define APIC_LVT_DS 0x00001000
|
||||
#define APIC_LVT_IIPP 0x00002000
|
||||
#define APIC_LVT_IIPP_INTALO 0x00002000
|
||||
#define APIC_LVT_IIPP_INTAHI 0x00000000
|
||||
#define APIC_LVT_RIRR 0x00004000
|
||||
#define APIC_LVT_TM 0x00008000
|
||||
#define APIC_LVT_M 0x00010000
|
||||
|
||||
|
||||
/* fields in LVT Timer */
|
||||
#define APIC_LVTT_VECTOR 0x000000ff
|
||||
#define APIC_LVTT_DS 0x00001000
|
||||
#define APIC_LVTT_M 0x00010000
|
||||
#define APIC_LVTT_TM 0x00060000
|
||||
#define APIC_LVTT_TM_ONE_SHOT 0x00000000
|
||||
#define APIC_LVTT_TM_PERIODIC 0x00020000
|
||||
#define APIC_LVTT_TM_TSCDLT 0x00040000
|
||||
#define APIC_LVTT_TM_RSRV 0x00060000
|
||||
|
||||
/* APIC timer current count */
|
||||
#define APIC_TIMER_MAX_COUNT 0xffffffff
|
||||
|
||||
/* fields in TDCR */
|
||||
#define APIC_TDCR_2 0x00
|
||||
#define APIC_TDCR_4 0x01
|
||||
#define APIC_TDCR_8 0x02
|
||||
#define APIC_TDCR_16 0x03
|
||||
#define APIC_TDCR_32 0x08
|
||||
#define APIC_TDCR_64 0x09
|
||||
#define APIC_TDCR_128 0x0a
|
||||
#define APIC_TDCR_1 0x0b
|
||||
|
||||
/* Constants related to AMD Extended APIC Features Register */
|
||||
#define APIC_EXTF_ELVT_MASK 0x00ff0000
|
||||
#define APIC_EXTF_ELVT_SHIFT 16
|
||||
#define APIC_EXTF_EXTID_CAP 0x00000004
|
||||
#define APIC_EXTF_SEIO_CAP 0x00000002
|
||||
#define APIC_EXTF_IER_CAP 0x00000001
|
||||
|
||||
/* LVT table indices */
|
||||
#define APIC_LVT_LINT0 0
|
||||
#define APIC_LVT_LINT1 1
|
||||
#define APIC_LVT_TIMER 2
|
||||
#define APIC_LVT_ERROR 3
|
||||
#define APIC_LVT_PMC 4
|
||||
#define APIC_LVT_THERMAL 5
|
||||
#define APIC_LVT_CMCI 6
|
||||
#define APIC_LVT_MAX APIC_LVT_CMCI
|
||||
|
||||
/* AMD extended LVT constants, seem to be assigned by fiat */
|
||||
#define APIC_ELVT_IBS 0 /* Instruction based sampling */
|
||||
#define APIC_ELVT_MCA 1 /* MCE thresholding */
|
||||
#define APIC_ELVT_DEI 2 /* Deferred error interrupt */
|
||||
#define APIC_ELVT_SBI 3 /* Sideband interface */
|
||||
#define APIC_ELVT_MAX APIC_ELVT_SBI
|
||||
|
||||
/******************************************************************************
|
||||
* I/O APIC defines
|
||||
*/
|
||||
|
||||
/* default physical locations of an IO APIC */
|
||||
#define DEFAULT_IO_APIC_BASE 0xfec00000
|
||||
|
||||
/* window register offset */
|
||||
#define IOAPIC_WINDOW 0x10
|
||||
#define IOAPIC_EOIR 0x40
|
||||
|
||||
/* indexes into IO APIC */
|
||||
#define IOAPIC_ID 0x00
|
||||
#define IOAPIC_VER 0x01
|
||||
#define IOAPIC_ARB 0x02
|
||||
#define IOAPIC_REDTBL 0x10
|
||||
#define IOAPIC_REDTBL0 IOAPIC_REDTBL
|
||||
#define IOAPIC_REDTBL1 (IOAPIC_REDTBL+0x02)
|
||||
#define IOAPIC_REDTBL2 (IOAPIC_REDTBL+0x04)
|
||||
#define IOAPIC_REDTBL3 (IOAPIC_REDTBL+0x06)
|
||||
#define IOAPIC_REDTBL4 (IOAPIC_REDTBL+0x08)
|
||||
#define IOAPIC_REDTBL5 (IOAPIC_REDTBL+0x0a)
|
||||
#define IOAPIC_REDTBL6 (IOAPIC_REDTBL+0x0c)
|
||||
#define IOAPIC_REDTBL7 (IOAPIC_REDTBL+0x0e)
|
||||
#define IOAPIC_REDTBL8 (IOAPIC_REDTBL+0x10)
|
||||
#define IOAPIC_REDTBL9 (IOAPIC_REDTBL+0x12)
|
||||
#define IOAPIC_REDTBL10 (IOAPIC_REDTBL+0x14)
|
||||
#define IOAPIC_REDTBL11 (IOAPIC_REDTBL+0x16)
|
||||
#define IOAPIC_REDTBL12 (IOAPIC_REDTBL+0x18)
|
||||
#define IOAPIC_REDTBL13 (IOAPIC_REDTBL+0x1a)
|
||||
#define IOAPIC_REDTBL14 (IOAPIC_REDTBL+0x1c)
|
||||
#define IOAPIC_REDTBL15 (IOAPIC_REDTBL+0x1e)
|
||||
#define IOAPIC_REDTBL16 (IOAPIC_REDTBL+0x20)
|
||||
#define IOAPIC_REDTBL17 (IOAPIC_REDTBL+0x22)
|
||||
#define IOAPIC_REDTBL18 (IOAPIC_REDTBL+0x24)
|
||||
#define IOAPIC_REDTBL19 (IOAPIC_REDTBL+0x26)
|
||||
#define IOAPIC_REDTBL20 (IOAPIC_REDTBL+0x28)
|
||||
#define IOAPIC_REDTBL21 (IOAPIC_REDTBL+0x2a)
|
||||
#define IOAPIC_REDTBL22 (IOAPIC_REDTBL+0x2c)
|
||||
#define IOAPIC_REDTBL23 (IOAPIC_REDTBL+0x2e)
|
||||
|
||||
/* fields in VER, for redirection entry */
|
||||
#define IOAPIC_MAX_RTE_MASK 0x00ff0000
|
||||
#define MAX_RTE_SHIFT 16
|
||||
|
||||
/*
|
||||
* fields in the IO APIC's redirection table entries
|
||||
*/
|
||||
#define IOAPIC_RTE_DEST APIC_ID_MASK /* broadcast addr: all APICs */
|
||||
|
||||
#define IOAPIC_RTE_RESV 0x00fe0000 /* reserved */
|
||||
|
||||
#define IOAPIC_RTE_INTMASK 0x00010000 /* R/W: INTerrupt mask */
|
||||
#define IOAPIC_RTE_INTMCLR 0x00000000 /* clear, allow INTs */
|
||||
#define IOAPIC_RTE_INTMSET 0x00010000 /* set, inhibit INTs */
|
||||
|
||||
#define IOAPIC_RTE_TRGRMOD 0x00008000 /* R/W: trigger mode */
|
||||
#define IOAPIC_RTE_TRGREDG 0x00000000 /* edge */
|
||||
#define IOAPIC_RTE_TRGRLVL 0x00008000 /* level */
|
||||
|
||||
#define IOAPIC_RTE_REM_IRR 0x00004000 /* RO: remote IRR */
|
||||
|
||||
#define IOAPIC_RTE_INTPOL 0x00002000 /*R/W:INT input pin polarity*/
|
||||
#define IOAPIC_RTE_INTAHI 0x00000000 /* active high */
|
||||
#define IOAPIC_RTE_INTALO 0x00002000 /* active low */
|
||||
|
||||
#define IOAPIC_RTE_DELIVS 0x00001000 /* RO: delivery status */
|
||||
|
||||
#define IOAPIC_RTE_DESTMOD 0x00000800 /*R/W:destination mode*/
|
||||
#define IOAPIC_RTE_DESTPHY 0x00000000 /* physical */
|
||||
#define IOAPIC_RTE_DESTLOG 0x00000800 /* logical */
|
||||
|
||||
#define IOAPIC_RTE_DELMOD 0x00000700 /* R/W: delivery mode */
|
||||
#define IOAPIC_RTE_DELFIXED 0x00000000 /* fixed */
|
||||
#define IOAPIC_RTE_DELLOPRI 0x00000100 /* lowest priority */
|
||||
#define IOAPIC_RTE_DELSMI 0x00000200 /*System Management INT*/
|
||||
#define IOAPIC_RTE_DELRSV1 0x00000300 /* reserved */
|
||||
#define IOAPIC_RTE_DELNMI 0x00000400 /* NMI signal */
|
||||
#define IOAPIC_RTE_DELINIT 0x00000500 /* INIT signal */
|
||||
#define IOAPIC_RTE_DELRSV2 0x00000600 /* reserved */
|
||||
#define IOAPIC_RTE_DELEXINT 0x00000700 /* External INTerrupt */
|
||||
|
||||
#define IOAPIC_RTE_INTVEC 0x000000ff /*R/W: INT vector field*/
|
||||
|
||||
#endif /* _APICREG_H_ */
|
99
hypervisor/include/arch/x86/assign.h
Normal file
99
hypervisor/include/arch/x86/assign.h
Normal file
@@ -0,0 +1,99 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef ASSIGN_H
|
||||
#define ASSIGN_H
|
||||
|
||||
enum ptdev_intr_type {
|
||||
PTDEV_INTR_MSI,
|
||||
PTDEV_INTR_INTX
|
||||
};
|
||||
|
||||
enum ptdev_vpin_source {
|
||||
PTDEV_VPIN_IOAPIC,
|
||||
PTDEV_VPIN_PIC,
|
||||
};
|
||||
|
||||
/* entry per guest virt vector */
|
||||
struct ptdev_msi_info {
|
||||
uint32_t vmsi_addr; /* virt msi_addr */
|
||||
uint32_t vmsi_data; /* virt msi_data */
|
||||
uint16_t vmsi_ctl; /* virt msi_ctl */
|
||||
uint32_t pmsi_addr; /* phys msi_addr */
|
||||
uint32_t pmsi_data; /* phys msi_data */
|
||||
int msix; /* 0-MSI, 1-MSIX */
|
||||
int msix_entry_index; /* MSI: 0, MSIX: index of vector table*/
|
||||
int virt_vector;
|
||||
int phys_vector;
|
||||
};
|
||||
|
||||
/* entry per guest vioapic pin */
|
||||
struct ptdev_intx_info {
|
||||
enum ptdev_vpin_source vpin_src;
|
||||
uint8_t virt_pin;
|
||||
uint8_t phys_pin;
|
||||
};
|
||||
|
||||
/* entry per each allocated irq/vector */
|
||||
struct ptdev_remapping_info {
|
||||
struct vm *vm;
|
||||
uint16_t virt_bdf; /* PCI bus:slot.func*/
|
||||
uint16_t phys_bdf; /* PCI bus:slot.func*/
|
||||
uint32_t active; /* 1=active, 0=inactive and to free*/
|
||||
enum ptdev_intr_type type;
|
||||
struct dev_handler_node *node;
|
||||
struct list_head softirq_node;
|
||||
struct list_head entry_node;
|
||||
|
||||
union {
|
||||
struct ptdev_msi_info msi;
|
||||
struct ptdev_intx_info intx;
|
||||
};
|
||||
};
|
||||
|
||||
void ptdev_intx_ack(struct vm *vm, int virt_pin,
|
||||
enum ptdev_vpin_source vpin_src);
|
||||
int ptdev_msix_remap(struct vm *vm, uint16_t virt_bdf,
|
||||
struct ptdev_msi_info *info);
|
||||
int ptdev_intx_pin_remap(struct vm *vm, struct ptdev_intx_info *info);
|
||||
void ptdev_softirq(int cpu);
|
||||
void ptdev_init(void);
|
||||
void ptdev_vm_init(struct vm *vm);
|
||||
void ptdev_vm_deinit(struct vm *vm);
|
||||
void ptdev_add_intx_remapping(struct vm *vm, uint16_t virt_bdf,
|
||||
uint16_t phys_bdf, uint8_t virt_pin, uint8_t phys_pin, bool pic_pin);
|
||||
void ptdev_remove_intx_remapping(struct vm *vm, uint8_t virt_pin, bool pic_pin);
|
||||
void ptdev_add_msix_remapping(struct vm *vm, uint16_t virt_bdf,
|
||||
uint16_t phys_bdf, int vector_count);
|
||||
void ptdev_remove_msix_remapping(struct vm *vm, uint16_t virt_bdf,
|
||||
int vector_count);
|
||||
int get_ptdev_info(char *str, int str_max);
|
||||
|
||||
#endif /* ASSIGN_H */
|
412
hypervisor/include/arch/x86/cpu.h
Normal file
412
hypervisor/include/arch/x86/cpu.h
Normal file
@@ -0,0 +1,412 @@
|
||||
/*-
|
||||
* Copyright (c) 1989, 1990 William F. Jolitz
|
||||
* Copyright (c) 1990 The Regents of the University of California.
|
||||
* Copyright (c) 2017 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* William Jolitz.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)segments.h 7.1 (Berkeley) 5/9/91
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef CPU_H
|
||||
#define CPU_H
|
||||
|
||||
/* Define page size */
|
||||
#define CPU_PAGE_SHIFT 12
|
||||
#define CPU_PAGE_SIZE 0x1000
|
||||
|
||||
/* Define CPU stack alignment */
|
||||
#define CPU_STACK_ALIGN 16
|
||||
|
||||
/* CR0 register definitions */
|
||||
#define CR0_PG (1<<31) /* paging enable */
|
||||
#define CR0_CD (1<<30) /* cache disable */
|
||||
#define CR0_NW (1<<29) /* not write through */
|
||||
#define CR0_AM (1<<18) /* alignment mask */
|
||||
#define CR0_WP (1<<16) /* write protect */
|
||||
#define CR0_NE (1<<5) /* numeric error */
|
||||
#define CR0_ET (1<<4) /* extension type */
|
||||
#define CR0_TS (1<<3) /* task switched */
|
||||
#define CR0_EM (1<<2) /* emulation */
|
||||
#define CR0_MP (1<<1) /* monitor coprocessor */
|
||||
#define CR0_PE (1<<0) /* protected mode enabled */
|
||||
|
||||
/* CR3 register definitions */
|
||||
#define CR3_PWT (1<<3) /* page-level write through */
|
||||
#define CR3_PCD (1<<4) /* page-level cache disable */
|
||||
|
||||
/* CR4 register definitions */
|
||||
#define CR4_VME (1<<0) /* virtual 8086 mode extensions */
|
||||
#define CR4_PVI (1<<1) /* protected mode virtual interrupts */
|
||||
#define CR4_TSD (1<<2) /* time stamp disable */
|
||||
#define CR4_DE (1<<3) /* debugging extensions */
|
||||
#define CR4_PSE (1<<4) /* page size extensions */
|
||||
#define CR4_PAE (1<<5) /* physical address extensions */
|
||||
#define CR4_MCE (1<<6) /* machine check enable */
|
||||
#define CR4_PGE (1<<7) /* page global enable */
|
||||
#define CR4_PCE (1<<8)
|
||||
/* performance monitoring counter enable */
|
||||
#define CR4_OSFXSR (1<<9) /* OS support for FXSAVE/FXRSTOR */
|
||||
#define CR4_OSXMMEXCPT (1<<10)
|
||||
/* OS support for unmasked SIMD floating point exceptions */
|
||||
#define CR4_VMXE (1<<13) /* VMX enable */
|
||||
#define CR4_SMXE (1<<14) /* SMX enable */
|
||||
#define CR4_PCIDE (1<<17) /* PCID enable */
|
||||
#define CR4_OSXSAVE (1<<18)
|
||||
/* XSAVE and Processor Extended States enable bit */
|
||||
|
||||
|
||||
/*
|
||||
* Entries in the Interrupt Descriptor Table (IDT)
|
||||
*/
|
||||
#define IDT_DE 0 /* #DE: Divide Error */
|
||||
#define IDT_DB 1 /* #DB: Debug */
|
||||
#define IDT_NMI 2 /* Nonmaskable External Interrupt */
|
||||
#define IDT_BP 3 /* #BP: Breakpoint */
|
||||
#define IDT_OF 4 /* #OF: Overflow */
|
||||
#define IDT_BR 5 /* #BR: Bound Range Exceeded */
|
||||
#define IDT_UD 6 /* #UD: Undefined/Invalid Opcode */
|
||||
#define IDT_NM 7 /* #NM: No Math Coprocessor */
|
||||
#define IDT_DF 8 /* #DF: Double Fault */
|
||||
#define IDT_FPUGP 9 /* Coprocessor Segment Overrun */
|
||||
#define IDT_TS 10 /* #TS: Invalid TSS */
|
||||
#define IDT_NP 11 /* #NP: Segment Not Present */
|
||||
#define IDT_SS 12 /* #SS: Stack Segment Fault */
|
||||
#define IDT_GP 13 /* #GP: General Protection Fault */
|
||||
#define IDT_PF 14 /* #PF: Page Fault */
|
||||
#define IDT_MF 16 /* #MF: FPU Floating-Point Error */
|
||||
#define IDT_AC 17 /* #AC: Alignment Check */
|
||||
#define IDT_MC 18 /* #MC: Machine Check */
|
||||
#define IDT_XF 19 /* #XF: SIMD Floating-Point Exception */
|
||||
|
||||
/*Bits in EFER special registers */
|
||||
#define EFER_LMA 0x000000400 /* Long mode active (R) */
|
||||
|
||||
/* CPU clock frequencies (FSB) */
|
||||
#define CPU_FSB_83KHZ 83200
|
||||
#define CPU_FSB_100KHZ 99840
|
||||
#define CPU_FSB_133KHZ 133200
|
||||
#define CPU_FSB_166KHZ 166400
|
||||
|
||||
/* Time conversions */
|
||||
#define CPU_GHZ_TO_HZ 1000000000
|
||||
#define CPU_GHZ_TO_KHZ 1000000
|
||||
#define CPU_GHZ_TO_MHZ 1000
|
||||
#define CPU_MHZ_TO_HZ 1000000
|
||||
#define CPU_MHZ_TO_KHZ 1000
|
||||
|
||||
/* Boot CPU ID */
|
||||
#define CPU_BOOT_ID 0
|
||||
|
||||
/* CPU states defined */
|
||||
#define CPU_STATE_RESET 0
|
||||
#define CPU_STATE_INITIALIZING 1
|
||||
#define CPU_STATE_RUNNING 2
|
||||
#define CPU_STATE_HALTED 3
|
||||
#define CPU_STATE_DEAD 4
|
||||
|
||||
/* hypervisor stack bottom magic('intl') */
|
||||
#define SP_BOTTOM_MAGIC 0x696e746c
|
||||
|
||||
/* type of speculation control
|
||||
* 0 - no speculation control support
|
||||
* 1 - raw IBRS + IPBP support
|
||||
* 2 - with STIBP optimization support
|
||||
*/
|
||||
#define IBRS_NONE 0
|
||||
#define IBRS_RAW 1
|
||||
#define IBRS_OPT 2
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
|
||||
/**********************************/
|
||||
/* EXTERNAL VARIABLES */
|
||||
/**********************************/
|
||||
extern const uint8_t _ld_cpu_secondary_reset_load[];
|
||||
extern uint8_t _ld_cpu_secondary_reset_start[];
|
||||
extern const uint64_t _ld_cpu_secondary_reset_size;
|
||||
extern uint8_t _ld_bss_start[];
|
||||
extern uint8_t _ld_bss_end[];
|
||||
extern uint8_t _ld_cpu_data_start[];
|
||||
extern uint8_t _ld_cpu_data_end[];
|
||||
|
||||
extern int ibrs_type;
|
||||
|
||||
/*
|
||||
* To support per_cpu access, we use a special section ".cpu_data" to define
|
||||
* the pattern of per CPU data. And we allocate memory for per CPU data
|
||||
* according to multiple this section size and pcpu number.
|
||||
*
|
||||
* +------------------+------------------+---+------------------+
|
||||
* | percpu for pcpu0 | percpu for pcpu1 |...| percpu for pcpuX |
|
||||
* +------------------+------------------+---+------------------+
|
||||
* ^ ^
|
||||
* | |
|
||||
* --.cpu_data size--
|
||||
*
|
||||
* To access per cpu data, we use:
|
||||
* per_cpu_data_base_ptr + curr_pcpu_id * cpu_data_section_size +
|
||||
* offset_of_symbol_in_cpu_data_section
|
||||
* to locate the per cpu data.
|
||||
*/
|
||||
|
||||
/* declare per cpu data */
|
||||
#define EXTERN_CPU_DATA(type, name) \
|
||||
extern __typeof__(type) cpu_data_##name
|
||||
|
||||
EXTERN_CPU_DATA(uint8_t, lapic_id);
|
||||
EXTERN_CPU_DATA(void *, vcpu);
|
||||
EXTERN_CPU_DATA(uint8_t[STACK_SIZE], stack) __aligned(16);
|
||||
|
||||
/* define per cpu data */
|
||||
#define DEFINE_CPU_DATA(type, name) \
|
||||
__typeof__(type) cpu_data_##name \
|
||||
__attribute__((__section__(".cpu_data")))
|
||||
|
||||
extern void *per_cpu_data_base_ptr;
|
||||
extern int phy_cpu_num;
|
||||
|
||||
#define PER_CPU_DATA_OFFSET(sym_addr) \
|
||||
((uint64_t)(sym_addr) - (uint64_t)(_ld_cpu_data_start))
|
||||
|
||||
#define PER_CPU_DATA_SIZE \
|
||||
((uint64_t)_ld_cpu_data_end - (uint64_t)(_ld_cpu_data_start))
|
||||
|
||||
/*
|
||||
* get percpu data for pcpu_id.
|
||||
*
|
||||
* It returns:
|
||||
* per_cpu_data_##name[pcpu_id];
|
||||
*/
|
||||
#define per_cpu(name, pcpu_id) \
|
||||
(*({ uint64_t base = (uint64_t)per_cpu_data_base_ptr; \
|
||||
uint64_t off = PER_CPU_DATA_OFFSET(&cpu_data_##name); \
|
||||
((typeof(&cpu_data_##name))(base + \
|
||||
(pcpu_id) * PER_CPU_DATA_SIZE + off)); \
|
||||
}))
|
||||
|
||||
/* get percpu data for current pcpu */
|
||||
#define get_cpu_var(name) per_cpu(name, get_cpu_id())
|
||||
|
||||
/* Function prototypes */
|
||||
void cpu_halt(uint32_t logical_id);
|
||||
uint64_t cpu_cycles_per_second(void);
|
||||
uint64_t tsc_cycles_in_period(uint16_t timer_period_in_us);
|
||||
void cpu_secondary_reset(void);
|
||||
int hv_main(int cpu_id);
|
||||
bool check_tsc_adjust_support(void);
|
||||
bool check_ibrs_ibpb_support(void);
|
||||
bool check_stibp_support(void);
|
||||
bool is_apicv_enabled(void);
|
||||
|
||||
/* Read control register */
|
||||
#define CPU_CR_READ(cr, result_ptr) \
|
||||
{ \
|
||||
asm volatile ("mov %%" __CPP_STRING(cr) ", %0" \
|
||||
: "=r"(*result_ptr)); \
|
||||
}
|
||||
|
||||
/* Write control register */
|
||||
#define CPU_CR_WRITE(cr, value) \
|
||||
{ \
|
||||
asm volatile ("mov %0, %%" __CPP_STRING(cr) \
|
||||
: /* No output */ \
|
||||
: "r"(value)); \
|
||||
}
|
||||
|
||||
/* Read MSR */
|
||||
#define CPU_MSR_READ(reg, msr_val_ptr) \
|
||||
{ \
|
||||
uint32_t msrl, msrh; \
|
||||
asm volatile (" rdmsr ":"=a"(msrl), \
|
||||
"=d"(msrh) : "c" (reg)); \
|
||||
*msr_val_ptr = ((uint64_t)msrh<<32) | msrl; \
|
||||
}
|
||||
|
||||
/* Write MSR */
|
||||
#define CPU_MSR_WRITE(reg, msr_val) \
|
||||
{ \
|
||||
uint32_t msrl, msrh; \
|
||||
msrl = (uint32_t)msr_val; \
|
||||
msrh = (uint32_t)(msr_val >> 32); \
|
||||
asm volatile (" wrmsr " : : "c" (reg), \
|
||||
"a" (msrl), "d" (msrh)); \
|
||||
}
|
||||
|
||||
/* Disables interrupts on the current CPU */
|
||||
#define CPU_IRQ_DISABLE() \
|
||||
{ \
|
||||
asm volatile ("cli\n" : : : "cc"); \
|
||||
}
|
||||
|
||||
/* Enables interrupts on the current CPU */
|
||||
#define CPU_IRQ_ENABLE() \
|
||||
{ \
|
||||
asm volatile ("sti\n" : : : "cc"); \
|
||||
}
|
||||
|
||||
/* This macro writes the stack pointer. */
|
||||
#define CPU_SP_WRITE(stack_ptr) \
|
||||
{ \
|
||||
uint64_t rsp = (uint64_t)stack_ptr & ~(CPU_STACK_ALIGN - 1); \
|
||||
asm volatile ("movq %0, %%rsp" : : "r"(rsp)); \
|
||||
}
|
||||
|
||||
/* Synchronizes all read accesses from memory */
|
||||
#define CPU_MEMORY_READ_BARRIER() \
|
||||
{ \
|
||||
asm volatile ("lfence\n" : : : "memory"); \
|
||||
}
|
||||
|
||||
/* Synchronizes all write accesses to memory */
|
||||
#define CPU_MEMORY_WRITE_BARRIER() \
|
||||
{ \
|
||||
asm volatile ("sfence\n" : : : "memory"); \
|
||||
}
|
||||
|
||||
/* Synchronizes all read and write accesses to/from memory */
|
||||
#define CPU_MEMORY_BARRIER() \
|
||||
{ \
|
||||
asm volatile ("mfence\n" : : : "memory"); \
|
||||
}
|
||||
|
||||
/* Write the task register */
|
||||
#define CPU_LTR_EXECUTE(ltr_ptr) \
|
||||
{ \
|
||||
asm volatile ("ltr %%ax\n" : : "a"(ltr_ptr)); \
|
||||
}
|
||||
|
||||
/* Read time-stamp counter / processor ID */
|
||||
#define CPU_RDTSCP_EXECUTE(timestamp_ptr, cpu_id_ptr) \
|
||||
{ \
|
||||
uint32_t tsl, tsh; \
|
||||
asm volatile ("rdtscp":"=a"(tsl), "=d"(tsh), \
|
||||
"=c"(*cpu_id_ptr)); \
|
||||
*timestamp_ptr = ((uint64_t)tsh << 32) | tsl; \
|
||||
}
|
||||
|
||||
/* Define variable(s) required to save / restore architecture interrupt state.
|
||||
* These variable(s) are used in conjunction with the ESAL_AR_INT_ALL_DISABLE()
|
||||
* and ESAL_AR_INT_ALL_RESTORE() macros to hold any data that must be preserved
|
||||
* in order to allow these macros to function correctly.
|
||||
*/
|
||||
#define CPU_INT_CONTROL_VARS uint64_t cpu_int_value
|
||||
|
||||
/* Macro to save rflags register */
|
||||
#define CPU_RFLAGS_SAVE(rflags_ptr) \
|
||||
{ \
|
||||
asm volatile (" pushf"); \
|
||||
asm volatile (" pop %0" \
|
||||
: "=r" (*(rflags_ptr)) \
|
||||
: /* No inputs */); \
|
||||
}
|
||||
|
||||
/* Macro to restore rflags register */
|
||||
#define CPU_RFLAGS_RESTORE(rflags) \
|
||||
{ \
|
||||
asm volatile (" push %0" : : "r" (rflags)); \
|
||||
asm volatile (" popf"); \
|
||||
}
|
||||
|
||||
/* This macro locks out interrupts and saves the current architecture status
|
||||
* register / state register to the specified address. This function does not
|
||||
* attempt to mask any bits in the return register value and can be used as a
|
||||
* quick method to guard a critical section.
|
||||
* NOTE: This macro is used in conjunction with CPU_INT_ALL_RESTORE
|
||||
* defined below and CPU_INT_CONTROL_VARS defined above.
|
||||
*/
|
||||
|
||||
#define CPU_INT_ALL_DISABLE() \
|
||||
{ \
|
||||
CPU_RFLAGS_SAVE(&cpu_int_value); \
|
||||
CPU_IRQ_DISABLE(); \
|
||||
}
|
||||
|
||||
/* This macro restores the architecture status / state register used to lockout
|
||||
* interrupts to the value provided. The intent of this function is to be a
|
||||
* fast mechanism to restore the interrupt level at the end of a critical
|
||||
* section to its original level.
|
||||
* NOTE: This macro is used in conjunction with CPU_INT_ALL_DISABLE
|
||||
* and CPU_INT_CONTROL_VARS defined above.
|
||||
*/
|
||||
#define CPU_INT_ALL_RESTORE() \
|
||||
{ \
|
||||
CPU_RFLAGS_RESTORE(cpu_int_value); \
|
||||
}
|
||||
|
||||
/* Macro to get CPU ID */
|
||||
static inline uint32_t get_cpu_id(void)
|
||||
{
|
||||
uint32_t tsl, tsh, cpu_id;
|
||||
|
||||
asm volatile ("rdtscp":"=a" (tsl), "=d"(tsh), "=c"(cpu_id)::);
|
||||
return cpu_id;
|
||||
}
|
||||
|
||||
static inline uint64_t cpu_rsp_get(void)
|
||||
{
|
||||
uint64_t ret;
|
||||
|
||||
asm volatile("movq %%rsp, %0"
|
||||
: "=r"(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline uint64_t cpu_rbp_get(void)
|
||||
{
|
||||
uint64_t ret;
|
||||
|
||||
asm volatile("movq %%rbp, %0"
|
||||
: "=r"(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline uint64_t
|
||||
msr_read(uint32_t reg_num)
|
||||
{
|
||||
uint64_t msr_val;
|
||||
|
||||
CPU_MSR_READ(reg_num, &msr_val);
|
||||
return msr_val;
|
||||
}
|
||||
|
||||
static inline void
|
||||
msr_write(uint32_t reg_num, uint64_t value64)
|
||||
{
|
||||
CPU_MSR_WRITE(reg_num, value64);
|
||||
}
|
||||
|
||||
#else /* ASSEMBLER defined */
|
||||
|
||||
#endif /* ASSEMBLER defined */
|
||||
|
||||
#endif /* CPU_H */
|
152
hypervisor/include/arch/x86/cpuid.h
Normal file
152
hypervisor/include/arch/x86/cpuid.h
Normal file
@@ -0,0 +1,152 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* cpuid.h
|
||||
*
|
||||
* Created on: Jan 4, 2018
|
||||
* Author: don
|
||||
*/
|
||||
|
||||
#ifndef CPUID_H_
|
||||
#define CPUID_H_
|
||||
|
||||
/* CPUID bit definitions */
|
||||
#define CPUID_ECX_SSE3 (1<<0)
|
||||
#define CPUID_ECX_PCLMUL (1<<1)
|
||||
#define CPUID_ECX_DTES64 (1<<2)
|
||||
#define CPUID_ECX_MONITOR (1<<3)
|
||||
#define CPUID_ECX_DS_CPL (1<<4)
|
||||
#define CPUID_ECX_VMX (1<<5)
|
||||
#define CPUID_ECX_SMX (1<<6)
|
||||
#define CPUID_ECX_EST (1<<7)
|
||||
#define CPUID_ECX_TM2 (1<<8)
|
||||
#define CPUID_ECX_SSSE3 (1<<9)
|
||||
#define CPUID_ECX_CID (1<<10)
|
||||
#define CPUID_ECX_FMA (1<<12)
|
||||
#define CPUID_ECX_CX16 (1<<13)
|
||||
#define CPUID_ECX_ETPRD (1<<14)
|
||||
#define CPUID_ECX_PDCM (1<<15)
|
||||
#define CPUID_ECX_DCA (1<<18)
|
||||
#define CPUID_ECX_SSE4_1 (1<<19)
|
||||
#define CPUID_ECX_SSE4_2 (1<<20)
|
||||
#define CPUID_ECX_x2APIC (1<<21)
|
||||
#define CPUID_ECX_MOVBE (1<<22)
|
||||
#define CPUID_ECX_POPCNT (1<<23)
|
||||
#define CPUID_ECX_AES (1<<25)
|
||||
#define CPUID_ECX_XSAVE (1<<26)
|
||||
#define CPUID_ECX_OSXSAVE (1<<27)
|
||||
#define CPUID_ECX_AVX (1<<28)
|
||||
#define CPUID_EDX_FPU (1<<0)
|
||||
#define CPUID_EDX_VME (1<<1)
|
||||
#define CPUID_EDX_DE (1<<2)
|
||||
#define CPUID_EDX_PSE (1<<3)
|
||||
#define CPUID_EDX_TSC (1<<4)
|
||||
#define CPUID_EDX_MSR (1<<5)
|
||||
#define CPUID_EDX_PAE (1<<6)
|
||||
#define CPUID_EDX_MCE (1<<7)
|
||||
#define CPUID_EDX_CX8 (1<<8)
|
||||
#define CPUID_EDX_APIC (1<<9)
|
||||
#define CPUID_EDX_SEP (1<<11)
|
||||
#define CPUID_EDX_MTRR (1<<12)
|
||||
#define CPUID_EDX_PGE (1<<13)
|
||||
#define CPUID_EDX_MCA (1<<14)
|
||||
#define CPUID_EDX_CMOV (1<<15)
|
||||
#define CPUID_EDX_PAT (1<<16)
|
||||
#define CPUID_EDX_PSE36 (1<<17)
|
||||
#define CPUID_EDX_PSN (1<<18)
|
||||
#define CPUID_EDX_CLF (1<<19)
|
||||
#define CPUID_EDX_DTES (1<<21)
|
||||
#define CPUID_EDX_ACPI (1<<22)
|
||||
#define CPUID_EDX_MMX (1<<23)
|
||||
#define CPUID_EDX_FXSR (1<<24)
|
||||
#define CPUID_EDX_SSE (1<<25)
|
||||
#define CPUID_EDX_SSE2 (1<<26)
|
||||
#define CPUID_EDX_SS (1<<27)
|
||||
#define CPUID_EDX_HTT (1<<28)
|
||||
#define CPUID_EDX_TM1 (1<<29)
|
||||
#define CPUID_EDX_IA64 (1<<30)
|
||||
#define CPUID_EDX_PBE (1<<31)
|
||||
/* CPUID.07H:EBX.TSC_ADJUST*/
|
||||
#define CPUID_EBX_TSC_ADJ (1<<1)
|
||||
/* CPUID.07H:EDX.IBRS_IBPB*/
|
||||
#define CPUID_EDX_IBRS_IBPB (1<<26)
|
||||
/* CPUID.07H:EDX.STIBP*/
|
||||
#define CPUID_EDX_STIBP (1<<27)
|
||||
/* CPUID.80000001H:EDX.Page1GB*/
|
||||
#define CPUID_EDX_PAGE1GB (1<<26)
|
||||
/* CPUID.07H:EBX.INVPCID*/
|
||||
#define CPUID_EBX_INVPCID (1<<10)
|
||||
/* CPUID.01H:ECX.PCID*/
|
||||
#define CPUID_ECX_PCID (1<<17)
|
||||
|
||||
/* CPUID source operands */
|
||||
#define CPUID_VENDORSTRING 0
|
||||
#define CPUID_FEATURES 1
|
||||
#define CPUID_TLB 2
|
||||
#define CPUID_SERIALNUM 3
|
||||
#define CPUID_EXTEND_FEATURE 7
|
||||
#define CPUID_EXTEND_FUNCTION_1 0x80000001
|
||||
|
||||
|
||||
enum cpuid_cache_idx {
|
||||
CPUID_VENDORSTRING_CACHE_IDX = 0,
|
||||
CPUID_FEATURES_CACHE_IDX,
|
||||
CPUID_EXTEND_FEATURE_CACHE_IDX,
|
||||
CPUID_EXTEND_FEATURE_CACHE_MAX
|
||||
};
|
||||
|
||||
struct cpuid_cache_entry {
|
||||
uint32_t a;
|
||||
uint32_t b;
|
||||
uint32_t c;
|
||||
uint32_t d;
|
||||
uint32_t inited;
|
||||
uint32_t reserved;
|
||||
};
|
||||
|
||||
static inline void native_cpuid_count(uint32_t op, uint32_t count,
|
||||
uint32_t *a, uint32_t *b, uint32_t *c, uint32_t *d)
|
||||
{
|
||||
/* Execute CPUID instruction and save results */
|
||||
asm volatile("cpuid":"=a"(*a), "=b"(*b),
|
||||
"=c"(*c), "=d"(*d)
|
||||
: "a"(op), "c" (count));
|
||||
}
|
||||
|
||||
void cpuid_count(uint32_t op, uint32_t count,
|
||||
uint32_t *a, uint32_t *b, uint32_t *c, uint32_t *d);
|
||||
|
||||
#define cpuid(op, a, b, c, d) cpuid_count(op, 0, a, b, c, d)
|
||||
|
||||
void emulate_cpuid(struct vcpu *vcpu, uint32_t src_op, uint32_t *eax_ptr,
|
||||
uint32_t *ebx_ptr, uint32_t *ecx_ptr, uint32_t *edx_ptr);
|
||||
|
||||
#endif /* CPUID_H_ */
|
314
hypervisor/include/arch/x86/gdt.h
Normal file
314
hypervisor/include/arch/x86/gdt.h
Normal file
@@ -0,0 +1,314 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef GDT_H
|
||||
#define GDT_H
|
||||
|
||||
/* GDT is defined in assembly so it can be used to switch modes before long mode
|
||||
* is established.
|
||||
* With 64-bit EFI this is not required since are already in long mode when EFI
|
||||
* transfers control to the hypervisor. However, for any instantiation of the
|
||||
* ACRN Hypervisor that requires a boot from reset the GDT will be
|
||||
* used as mode transitions are being made to ultimately end up in long mode.
|
||||
* For this reason we establish the GDT in assembly.
|
||||
* This should not affect usage and convenience of interacting with the GDT in C
|
||||
* as the complete definition of the GDT is driven by the defines in this file.
|
||||
*
|
||||
* Unless it proves to be not viable we will use a single GDT for all hypervisor
|
||||
* CPUs, with space for per CPU LDT and TSS.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Segment selectors in x86-64 and i386 are the same size, 8 bytes.
|
||||
* Local Descriptor Table (LDT) selectors are 16 bytes on x86-64 instead of 8
|
||||
* bytes.
|
||||
* Task State Segment (TSS) selectors are 16 bytes on x86-64 instead of 8 bytes.
|
||||
*/
|
||||
#define X64_SEG_DESC_SIZE (0x8) /* In long mode SEG Descriptors are 8 bytes */
|
||||
#define X64_LDT_DESC_SIZE (0x10)/* In long mode LDT Descriptors are 16 bytes */
|
||||
#define X64_TSS_DESC_SIZE (0x10)/* In long mode TSS Descriptors are 16 bytes */
|
||||
|
||||
/*****************************************************************************
|
||||
*
|
||||
* BEGIN: Definition of the GDT.
|
||||
*
|
||||
* NOTE:
|
||||
* If you change the size of the GDT or rearrange the location of descriptors
|
||||
* within the GDT you must change both the defines and the C structure header.
|
||||
*
|
||||
*****************************************************************************/
|
||||
/* Number of global 8 byte segments descriptor(s) */
|
||||
#define HOST_GDT_RING0_SEG_SELECTORS (0x3) /* rsvd, code, data */
|
||||
/* Offsets of global 8 byte segment descriptors */
|
||||
#define HOST_GDT_RING0_RSVD_SEL (0x0000)
|
||||
#define HOST_GDT_RING0_CODE_SEL (0x0008)
|
||||
#define HOST_GDT_RING0_DATA_SEL (0x0010)
|
||||
/* Number of global 16 byte LDT descriptor(s) */
|
||||
#define HOST_GDT_RING0_TSS_SELECTORS (0x1)
|
||||
/* One for each CPU in the hypervisor. */
|
||||
|
||||
/*****************************************************************************
|
||||
*
|
||||
* END: Definition of the GDT.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
/* Offset to start of LDT Descriptors */
|
||||
#define HOST_GDT_RING0_LDT_SEL \
|
||||
(HOST_GDT_RING0_SEG_SELECTORS * X64_SEG_DESC_SIZE)
|
||||
/* Offset to start of LDT Descriptors */
|
||||
#define HOST_GDT_RING0_CPU_TSS_SEL (HOST_GDT_RING0_LDT_SEL)
|
||||
/* Size of the GDT */
|
||||
#define HOST_GDT_SIZE \
|
||||
(HOST_GDT_RING0_CPU_TSS_SEL + \
|
||||
(HOST_GDT_RING0_TSS_SELECTORS * X64_TSS_DESC_SIZE))
|
||||
|
||||
/* Defined position of Interrupt Stack Tables */
|
||||
#define MACHINE_CHECK_IST (0x1)
|
||||
#define DOUBLE_FAULT_IST (0x2)
|
||||
#define STACK_FAULT_IST (0x3)
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
|
||||
#include <types.h>
|
||||
#include <cpu.h>
|
||||
|
||||
#define TSS_AVAIL (9)
|
||||
|
||||
/*
|
||||
* Definition of an 8 byte code segment descriptor.
|
||||
*/
|
||||
union code_segment_descriptor {
|
||||
uint64_t value;
|
||||
struct {
|
||||
union {
|
||||
uint32_t value;
|
||||
struct {
|
||||
uint32_t limit_15_0:16;
|
||||
uint32_t base_15_0:16;
|
||||
} bits;
|
||||
} low32;
|
||||
union {
|
||||
uint32_t value;
|
||||
struct {
|
||||
uint32_t base_23_16:8;
|
||||
uint32_t accessed:1;
|
||||
uint32_t readeable:1;
|
||||
uint32_t conforming:1;
|
||||
uint32_t bit11_set:1;
|
||||
uint32_t bit12_set:1;
|
||||
uint32_t dpl:2;
|
||||
uint32_t present:1;
|
||||
uint32_t limit_19_16:4;
|
||||
uint32_t avl:1;
|
||||
uint32_t x64flag:1;
|
||||
uint32_t dflt:1;
|
||||
uint32_t granularity:1;
|
||||
uint32_t base_31_24:8;
|
||||
} bits;
|
||||
} high32;
|
||||
};
|
||||
} __aligned(8);
|
||||
|
||||
/*
|
||||
* Definition of an 8 byte data segment descriptor.
|
||||
*/
|
||||
union data_segment_descriptor {
|
||||
uint64_t value;
|
||||
struct {
|
||||
union {
|
||||
uint32_t value;
|
||||
struct {
|
||||
uint32_t limit_15_0:16;
|
||||
uint32_t base_15_0:16;
|
||||
} bits;
|
||||
} low32;
|
||||
union {
|
||||
uint32_t value;
|
||||
struct {
|
||||
uint32_t base_23_16:8;
|
||||
uint32_t accessed:1;
|
||||
uint32_t writeable:1;
|
||||
uint32_t expansion:1;
|
||||
uint32_t bit11_clr:1;
|
||||
uint32_t bit12_set:1;
|
||||
uint32_t dpl:2;
|
||||
uint32_t present:1;
|
||||
uint32_t limit_19_16:4;
|
||||
uint32_t avl:1;
|
||||
uint32_t rsvd_clr:1;
|
||||
uint32_t big:1;
|
||||
uint32_t granularity:1;
|
||||
uint32_t base_31_24:8;
|
||||
} bits;
|
||||
} high32;
|
||||
};
|
||||
} __aligned(8);
|
||||
|
||||
/*
|
||||
* Definition of an 8 byte system segment descriptor.
|
||||
*/
|
||||
union system_segment_descriptor {
|
||||
uint64_t value;
|
||||
struct {
|
||||
union {
|
||||
uint32_t value;
|
||||
struct {
|
||||
uint32_t limit_15_0:16;
|
||||
uint32_t base_15_0:16;
|
||||
} bits;
|
||||
} low32;
|
||||
union {
|
||||
uint32_t value;
|
||||
struct {
|
||||
uint32_t base_23_16:8;
|
||||
uint32_t type:4;
|
||||
uint32_t bit12_clr:1;
|
||||
uint32_t dpl:2;
|
||||
uint32_t present:1;
|
||||
uint32_t limit_19_16:4;
|
||||
uint32_t rsvd_1:1;
|
||||
uint32_t rsvd_2_clr:1;
|
||||
uint32_t rsvd_3:1;
|
||||
uint32_t granularity:1;
|
||||
uint32_t base_31_24:8;
|
||||
} bits;
|
||||
} high32;
|
||||
};
|
||||
} __aligned(8);
|
||||
|
||||
/*
|
||||
* Definition of 16 byte TSS and LDT selectors.
|
||||
*/
|
||||
union tss_64_descriptor {
|
||||
uint64_t value;
|
||||
struct {
|
||||
union {
|
||||
uint32_t value;
|
||||
struct {
|
||||
uint32_t limit_15_0:16;
|
||||
uint32_t base_15_0:16;
|
||||
} bits;
|
||||
} low32;
|
||||
union {
|
||||
uint32_t value;
|
||||
struct {
|
||||
uint32_t base_23_16:8;
|
||||
uint32_t type:4;
|
||||
uint32_t bit12_clr:1;
|
||||
uint32_t dpl:2;
|
||||
uint32_t present:1;
|
||||
uint32_t limit_19_16:4;
|
||||
uint32_t rsvd_1:1;
|
||||
uint32_t rsvd_2_clr:1;
|
||||
uint32_t rsvd_3:1;
|
||||
uint32_t granularity:1;
|
||||
uint32_t base_31_24:8;
|
||||
} bits;
|
||||
} high32;
|
||||
uint32_t base_addr_63_32;
|
||||
union {
|
||||
uint32_t value;
|
||||
struct {
|
||||
uint32_t rsvd_7_0:8;
|
||||
uint32_t bits_12_8_clr:4;
|
||||
uint32_t rsvd_31_13:20;
|
||||
} bits;
|
||||
} offset_12;
|
||||
};
|
||||
} __aligned(8);
|
||||
|
||||
/*****************************************************************************
|
||||
*
|
||||
* BEGIN: Definition of the GDT.
|
||||
*
|
||||
* NOTE:
|
||||
* If you change the size of the GDT or rearrange the location of descriptors
|
||||
* within the GDT you must change both the defines and the C structure header.
|
||||
*
|
||||
*****************************************************************************/
|
||||
struct host_gdt {
|
||||
uint64_t rsvd;
|
||||
|
||||
union code_segment_descriptor host_gdt_code_descriptor;
|
||||
union data_segment_descriptor host_gdt_data_descriptor;
|
||||
union tss_64_descriptor host_gdt_tss_descriptors;
|
||||
} __aligned(8);
|
||||
|
||||
/*****************************************************************************
|
||||
*
|
||||
* END: Definition of the GDT.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* x86-64 Task State Segment (TSS) definition.
|
||||
*/
|
||||
struct tss_64 {
|
||||
uint32_t rsvd1;
|
||||
uint64_t rsp0;
|
||||
uint64_t rsp1;
|
||||
uint64_t rsp2;
|
||||
uint32_t rsvd2;
|
||||
uint32_t rsvd3;
|
||||
uint64_t ist1;
|
||||
uint64_t ist2;
|
||||
uint64_t ist3;
|
||||
uint64_t ist4;
|
||||
uint64_t ist5;
|
||||
uint64_t ist6;
|
||||
uint64_t ist7;
|
||||
uint32_t rsvd4;
|
||||
uint32_t rsvd5;
|
||||
uint16_t rsvd6;
|
||||
uint16_t io_map_base_addr;
|
||||
} __packed __aligned(16);
|
||||
|
||||
/*
|
||||
* Definition of the GDT descriptor.
|
||||
*/
|
||||
struct host_gdt_descriptor {
|
||||
unsigned short len;
|
||||
struct host_gdt *gdt;
|
||||
} __packed;
|
||||
|
||||
extern struct host_gdt HOST_GDT;
|
||||
extern struct host_gdt_descriptor HOST_GDTR;
|
||||
void load_gdtr_and_tr(void);
|
||||
|
||||
EXTERN_CPU_DATA(struct tss_64, tss);
|
||||
EXTERN_CPU_DATA(struct host_gdt, gdt);
|
||||
EXTERN_CPU_DATA(uint8_t[STACK_SIZE], mc_stack) __aligned(16);
|
||||
EXTERN_CPU_DATA(uint8_t[STACK_SIZE], df_stack) __aligned(16);
|
||||
EXTERN_CPU_DATA(uint8_t[STACK_SIZE], sf_stack) __aligned(16);
|
||||
|
||||
#endif /* end #ifndef ASSEMBLER */
|
||||
|
||||
#endif /* GDT_H */
|
115
hypervisor/include/arch/x86/guest/guest.h
Normal file
115
hypervisor/include/arch/x86/guest/guest.h
Normal file
@@ -0,0 +1,115 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef GUEST_H
|
||||
#define GUEST_H
|
||||
|
||||
/* Defines for VM Launch and Resume */
|
||||
#define VM_RESUME 0
|
||||
#define VM_LAUNCH 1
|
||||
|
||||
#define ACRN_DBG_PTIRQ 6
|
||||
#define ACRN_DBG_IRQ 6
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
|
||||
#define foreach_vcpu(idx, vm, vcpu) \
|
||||
for (idx = 0, vcpu = vm->hw.vcpu_array[idx]; \
|
||||
(idx < vm->hw.num_vcpus) & (vcpu != NULL); \
|
||||
idx++, vcpu = vm->hw.vcpu_array[idx])
|
||||
|
||||
struct vhm_request;
|
||||
|
||||
/*
|
||||
* VCPU related APIs
|
||||
*/
|
||||
#define ACRN_REQUEST_EVENT 0
|
||||
#define ACRN_REQUEST_EXTINT 1
|
||||
#define ACRN_REQUEST_NMI 2
|
||||
#define ACRN_REQUEST_GP 3
|
||||
#define ACRN_REQUEST_TMR_UPDATE 4
|
||||
#define ACRN_REQUEST_TLB_FLUSH 5
|
||||
|
||||
#define E820_MAX_ENTRIES 32
|
||||
|
||||
struct e820_mem_params {
|
||||
uint64_t mem_bottom;
|
||||
uint64_t mem_top;
|
||||
uint64_t max_ram_blk_base; /* used for the start address of UOS */
|
||||
uint64_t max_ram_blk_size;
|
||||
};
|
||||
|
||||
int prepare_vm0_memmap_and_e820(struct vm *vm);
|
||||
|
||||
/* Definition for a mem map lookup */
|
||||
struct vm_lu_mem_map {
|
||||
struct list_head list; /* EPT mem map lookup list*/
|
||||
void *hpa; /* Host physical start address of the map*/
|
||||
void *gpa; /* Guest physical start address of the map */
|
||||
uint64_t size; /* Size of map */
|
||||
};
|
||||
|
||||
/*
|
||||
* VM related APIs
|
||||
*/
|
||||
bool is_vm0(struct vm *vm);
|
||||
bool vm_lapic_disabled(struct vm *vm);
|
||||
uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask);
|
||||
int init_vm0_boot_info(struct vm *vm);
|
||||
|
||||
uint64_t gva2gpa(struct vm *vm, uint64_t cr3, uint64_t gva);
|
||||
|
||||
struct vcpu *get_primary_vcpu(struct vm *vm);
|
||||
struct vcpu *vcpu_from_vid(struct vm *vm, int vcpu_id);
|
||||
struct vcpu *vcpu_from_pid(struct vm *vm, int pcpu_id);
|
||||
|
||||
void init_e820(void);
|
||||
void obtain_e820_mem_info(void);
|
||||
extern uint32_t e820_entries;
|
||||
extern struct e820_entry e820[E820_MAX_ENTRIES];
|
||||
extern uint32_t boot_regs[];
|
||||
extern struct e820_mem_params e820_mem;
|
||||
|
||||
int rdmsr_handler(struct vcpu *vcpu);
|
||||
int wrmsr_handler(struct vcpu *vcpu);
|
||||
void init_msr_emulation(struct vcpu *vcpu);
|
||||
|
||||
extern const char vm_exit[];
|
||||
int vmx_vmrun(struct run_context *context, int ops, int ibrs);
|
||||
|
||||
int load_guest(struct vm *vm, struct vcpu *vcpu);
|
||||
int general_sw_loader(struct vm *vm, struct vcpu *vcpu);
|
||||
|
||||
typedef int (*vm_sw_loader_t)(struct vm *, struct vcpu *);
|
||||
extern vm_sw_loader_t vm_sw_loader;
|
||||
|
||||
#endif /* !ASSEMBLER */
|
||||
|
||||
#endif /* GUEST_H*/
|
288
hypervisor/include/arch/x86/guest/vcpu.h
Normal file
288
hypervisor/include/arch/x86/guest/vcpu.h
Normal file
@@ -0,0 +1,288 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _VCPU_H_
|
||||
#define _VCPU_H_
|
||||
|
||||
#define ACRN_VCPU_MMIO_COMPLETE (0)
|
||||
|
||||
/* Size of various elements within the VCPU structure */
|
||||
#define REG_SIZE 8
|
||||
|
||||
/* Number of GPRs saved / restored for guest in VCPU structure */
|
||||
#define NUM_GPRS 15
|
||||
#define GUEST_STATE_AREA_SIZE 512
|
||||
|
||||
/* Indexes of GPRs saved / restored for guest */
|
||||
#define VMX_MACHINE_T_GUEST_RAX_INDEX 0
|
||||
#define VMX_MACHINE_T_GUEST_RBX_INDEX 1
|
||||
#define VMX_MACHINE_T_GUEST_RCX_INDEX 2
|
||||
#define VMX_MACHINE_T_GUEST_RDX_INDEX 3
|
||||
#define VMX_MACHINE_T_GUEST_RBP_INDEX 4
|
||||
#define VMX_MACHINE_T_GUEST_RSI_INDEX 5
|
||||
#define VMX_MACHINE_T_GUEST_R8_INDEX 6
|
||||
#define VMX_MACHINE_T_GUEST_R9_INDEX 7
|
||||
#define VMX_MACHINE_T_GUEST_R10_INDEX 8
|
||||
#define VMX_MACHINE_T_GUEST_R11_INDEX 9
|
||||
#define VMX_MACHINE_T_GUEST_R12_INDEX 10
|
||||
#define VMX_MACHINE_T_GUEST_R13_INDEX 11
|
||||
#define VMX_MACHINE_T_GUEST_R14_INDEX 12
|
||||
#define VMX_MACHINE_T_GUEST_R15_INDEX 13
|
||||
#define VMX_MACHINE_T_GUEST_RDI_INDEX 14
|
||||
|
||||
/* Offsets of GPRs for guest within the VCPU data structure */
|
||||
#define VMX_MACHINE_T_GUEST_RAX_OFFSET (VMX_MACHINE_T_GUEST_RAX_INDEX*REG_SIZE)
|
||||
#define VMX_MACHINE_T_GUEST_RBX_OFFSET (VMX_MACHINE_T_GUEST_RBX_INDEX*REG_SIZE)
|
||||
#define VMX_MACHINE_T_GUEST_RCX_OFFSET (VMX_MACHINE_T_GUEST_RCX_INDEX*REG_SIZE)
|
||||
#define VMX_MACHINE_T_GUEST_RDX_OFFSET (VMX_MACHINE_T_GUEST_RDX_INDEX*REG_SIZE)
|
||||
#define VMX_MACHINE_T_GUEST_RBP_OFFSET (VMX_MACHINE_T_GUEST_RBP_INDEX*REG_SIZE)
|
||||
#define VMX_MACHINE_T_GUEST_RSI_OFFSET (VMX_MACHINE_T_GUEST_RSI_INDEX*REG_SIZE)
|
||||
#define VMX_MACHINE_T_GUEST_RDI_OFFSET (VMX_MACHINE_T_GUEST_RDI_INDEX*REG_SIZE)
|
||||
#define VMX_MACHINE_T_GUEST_R8_OFFSET (VMX_MACHINE_T_GUEST_R8_INDEX*REG_SIZE)
|
||||
#define VMX_MACHINE_T_GUEST_R9_OFFSET (VMX_MACHINE_T_GUEST_R9_INDEX*REG_SIZE)
|
||||
#define VMX_MACHINE_T_GUEST_R10_OFFSET (VMX_MACHINE_T_GUEST_R10_INDEX*REG_SIZE)
|
||||
#define VMX_MACHINE_T_GUEST_R11_OFFSET (VMX_MACHINE_T_GUEST_R11_INDEX*REG_SIZE)
|
||||
#define VMX_MACHINE_T_GUEST_R12_OFFSET (VMX_MACHINE_T_GUEST_R12_INDEX*REG_SIZE)
|
||||
#define VMX_MACHINE_T_GUEST_R13_OFFSET (VMX_MACHINE_T_GUEST_R13_INDEX*REG_SIZE)
|
||||
#define VMX_MACHINE_T_GUEST_R14_OFFSET (VMX_MACHINE_T_GUEST_R14_INDEX*REG_SIZE)
|
||||
#define VMX_MACHINE_T_GUEST_R15_OFFSET (VMX_MACHINE_T_GUEST_R15_INDEX*REG_SIZE)
|
||||
|
||||
/* Hard-coded offset of cr2 in struct run_context!! */
|
||||
#define VMX_MACHINE_T_GUEST_CR2_OFFSET (128)
|
||||
|
||||
/* Hard-coded offset of cr2 in struct run_context!! */
|
||||
#define VMX_MACHINE_T_GUEST_SPEC_CTRL_OFFSET (192)
|
||||
|
||||
/*sizes of various registers within the VCPU data structure */
|
||||
#define VMX_CPU_S_FXSAVE_GUEST_AREA_SIZE GUEST_STATE_AREA_SIZE
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
|
||||
enum vcpu_state {
|
||||
VCPU_INIT,
|
||||
VCPU_RUNNING,
|
||||
VCPU_PAUSED,
|
||||
VCPU_ZOMBIE,
|
||||
VCPU_UNKNOWN_STATE,
|
||||
};
|
||||
|
||||
struct cpu_regs {
|
||||
uint64_t rax;
|
||||
uint64_t rbx;
|
||||
uint64_t rcx;
|
||||
uint64_t rdx;
|
||||
uint64_t rbp;
|
||||
uint64_t rsi;
|
||||
uint64_t r8;
|
||||
uint64_t r9;
|
||||
uint64_t r10;
|
||||
uint64_t r11;
|
||||
uint64_t r12;
|
||||
uint64_t r13;
|
||||
uint64_t r14;
|
||||
uint64_t r15;
|
||||
uint64_t rdi;
|
||||
};
|
||||
|
||||
struct segment {
|
||||
uint64_t selector;
|
||||
uint64_t base;
|
||||
uint64_t limit;
|
||||
uint64_t attr;
|
||||
};
|
||||
|
||||
struct run_context {
|
||||
/* Contains the guest register set.
|
||||
* NOTE: This must be the first element in the structure, so that the offsets
|
||||
* in vmx_asm.S match
|
||||
*/
|
||||
union {
|
||||
struct cpu_regs regs;
|
||||
uint64_t longs[NUM_GPRS];
|
||||
} guest_cpu_regs;
|
||||
|
||||
/** The guests CR registers 0, 2, 3 and 4. */
|
||||
uint64_t cr0;
|
||||
|
||||
/* VMX_MACHINE_T_GUEST_CR2_OFFSET =
|
||||
* offsetof(struct run_context, cr2) = 128
|
||||
*/
|
||||
uint64_t cr2;
|
||||
uint64_t cr3;
|
||||
uint64_t cr4;
|
||||
|
||||
uint64_t rip;
|
||||
uint64_t rsp;
|
||||
uint64_t rflags;
|
||||
|
||||
uint64_t dr7;
|
||||
uint64_t tsc_offset;
|
||||
|
||||
/* MSRs */
|
||||
/* VMX_MACHINE_T_GUEST_SPEC_CTRL_OFFSET =
|
||||
* offsetof(struct run_context, ia32_spec_ctrl) = 192
|
||||
*/
|
||||
uint64_t ia32_spec_ctrl;
|
||||
uint64_t ia32_star;
|
||||
uint64_t ia32_lstar;
|
||||
uint64_t ia32_fmask;
|
||||
uint64_t ia32_kernel_gs_base;
|
||||
|
||||
uint64_t ia32_pat;
|
||||
uint64_t ia32_efer;
|
||||
uint64_t ia32_sysenter_cs;
|
||||
uint64_t ia32_sysenter_esp;
|
||||
uint64_t ia32_sysenter_eip;
|
||||
uint64_t ia32_debugctl;
|
||||
|
||||
/* segment registers */
|
||||
struct segment cs;
|
||||
struct segment ss;
|
||||
struct segment ds;
|
||||
struct segment es;
|
||||
struct segment fs;
|
||||
struct segment gs;
|
||||
struct segment tr;
|
||||
struct segment idtr;
|
||||
struct segment ldtr;
|
||||
struct segment gdtr;
|
||||
|
||||
/* The 512 bytes area to save the FPU/MMX/SSE states for the guest */
|
||||
uint64_t
|
||||
fxstore_guest_area[VMX_CPU_S_FXSAVE_GUEST_AREA_SIZE / sizeof(uint64_t)]
|
||||
__aligned(16);
|
||||
};
|
||||
|
||||
/* 2 worlds: 0 for Normal World, 1 for Secure World */
|
||||
#define NR_WORLD 2
|
||||
#define NORMAL_WORLD 0
|
||||
#define SECURE_WORLD 1
|
||||
|
||||
struct vcpu_arch {
|
||||
int cur_context;
|
||||
struct run_context contexts[NR_WORLD];
|
||||
|
||||
/* A pointer to the VMCS for this CPU. */
|
||||
void *vmcs;
|
||||
|
||||
/* Holds the information needed for IRQ/exception handling. */
|
||||
struct {
|
||||
/* The number of the exception to raise. */
|
||||
int exception;
|
||||
|
||||
/* The error number for the exception. */
|
||||
int error;
|
||||
} exception_info;
|
||||
|
||||
uint8_t lapic_mask;
|
||||
uint32_t irq_window_enabled;
|
||||
uint32_t nrexits;
|
||||
|
||||
/* Auxiliary TSC value */
|
||||
uint64_t msr_tsc_aux;
|
||||
|
||||
/* VCPU context state information */
|
||||
uint64_t exit_reason;
|
||||
uint64_t exit_interrupt_info;
|
||||
uint64_t exit_qualification;
|
||||
uint8_t inst_len;
|
||||
|
||||
/* Information related to secondary / AP VCPU start-up */
|
||||
uint8_t cpu_mode;
|
||||
uint8_t nr_sipi;
|
||||
uint32_t sipi_vector;
|
||||
|
||||
/* interrupt injection information */
|
||||
uint64_t pending_intr;
|
||||
|
||||
/* per vcpu lapic */
|
||||
void *vlapic;
|
||||
};
|
||||
|
||||
struct vm;
|
||||
struct vcpu {
|
||||
int pcpu_id; /* Physical CPU ID of this VCPU */
|
||||
int vcpu_id; /* virtual identifier for VCPU */
|
||||
struct vcpu_arch arch_vcpu;
|
||||
/* Architecture specific definitions for this VCPU */
|
||||
struct vm *vm; /* Reference to the VM this VCPU belongs to */
|
||||
void *entry_addr; /* Entry address for this VCPU when first started */
|
||||
|
||||
/* State of this VCPU before suspend */
|
||||
volatile enum vcpu_state prev_state;
|
||||
volatile enum vcpu_state state; /* State of this VCPU */
|
||||
/* State of debug request for this VCPU */
|
||||
volatile enum vcpu_state dbg_req_state;
|
||||
unsigned long sync; /*hold the bit events*/
|
||||
struct vlapic *vlapic; /* per vCPU virtualized LAPIC */
|
||||
|
||||
struct list_head run_list; /* inserted to schedule runqueue */
|
||||
unsigned long pending_pre_work; /* any pre work pending? */
|
||||
bool launched; /* Whether the vcpu is launched on target pcpu */
|
||||
unsigned int paused_cnt; /* how many times vcpu is paused */
|
||||
unsigned int running; /* vcpu is picked up and run? */
|
||||
unsigned int ioreq_pending; /* ioreq is ongoing or not? */
|
||||
|
||||
struct vhm_request req; /* used by io/ept emulation */
|
||||
struct mem_io mmio; /* used by io/ept emulation */
|
||||
|
||||
/* save guest msr tsc aux register.
|
||||
* Before VMENTRY, save guest MSR_TSC_AUX to this fields.
|
||||
* After VMEXIT, restore this fields to guest MSR_TSC_AUX.
|
||||
* This is only temperary workaround. Once MSR emulation
|
||||
* is enabled, we should remove this fields and related
|
||||
* code.
|
||||
*/
|
||||
uint64_t msr_tsc_aux_guest;
|
||||
uint64_t *guest_msrs;
|
||||
};
|
||||
|
||||
#define is_vcpu_bsp(vcpu) ((vcpu)->vcpu_id == 0)
|
||||
/* do not update Guest RIP for next VM Enter */
|
||||
#define VCPU_RETAIN_RIP(vcpu) ((vcpu)->arch_vcpu.inst_len = 0)
|
||||
|
||||
/* External Interfaces */
|
||||
int create_vcpu(int cpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle);
|
||||
int start_vcpu(struct vcpu *vcpu);
|
||||
int shutdown_vcpu(struct vcpu *vcpu);
|
||||
int destroy_vcpu(struct vcpu *vcpu);
|
||||
|
||||
void reset_vcpu(struct vcpu *vcpu);
|
||||
void init_vcpu(struct vcpu *vcpu);
|
||||
void pause_vcpu(struct vcpu *vcpu, enum vcpu_state new_state);
|
||||
void resume_vcpu(struct vcpu *vcpu);
|
||||
void schedule_vcpu(struct vcpu *vcpu);
|
||||
int prepare_vcpu(struct vm *vm, int pcpu_id);
|
||||
|
||||
void request_vcpu_pre_work(struct vcpu *vcpu, int pre_work_id);
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
57
hypervisor/include/arch/x86/guest/vioapic.h
Normal file
57
hypervisor/include/arch/x86/guest/vioapic.h
Normal file
@@ -0,0 +1,57 @@
|
||||
/*-
|
||||
* Copyright (c) 2013 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
|
||||
* Copyright (c) 2013 Neel Natu <neel@freebsd.org>
|
||||
* Copyright (c) 2017 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef _VIOAPIC_H_
|
||||
#define _VIOAPIC_H_
|
||||
|
||||
#define VIOAPIC_BASE 0xFEC00000UL
|
||||
#define VIOAPIC_SIZE 4096UL
|
||||
|
||||
struct vioapic *vioapic_init(struct vm *vm);
|
||||
void vioapic_cleanup(struct vioapic *vioapic);
|
||||
|
||||
int vioapic_assert_irq(struct vm *vm, int irq);
|
||||
int vioapic_deassert_irq(struct vm *vm, int irq);
|
||||
int vioapic_pulse_irq(struct vm *vm, int irq);
|
||||
void vioapic_update_tmr(struct vcpu *vcpu);
|
||||
|
||||
int vioapic_mmio_write(void *vm, uint64_t gpa,
|
||||
uint64_t wval, int size);
|
||||
int vioapic_mmio_read(void *vm, uint64_t gpa,
|
||||
uint64_t *rval, int size);
|
||||
|
||||
int vioapic_pincount(struct vm *vm);
|
||||
void vioapic_process_eoi(struct vm *vm, int vector);
|
||||
bool vioapic_get_rte(struct vm *vm, int pin, void *rte);
|
||||
int vioapic_mmio_access_handler(struct vcpu *vcpu, struct mem_io *mmio,
|
||||
void *handler_private_data);
|
||||
|
||||
int get_vioapic_info(char *str, int str_max, int vmid);
|
||||
#endif
|
132
hypervisor/include/arch/x86/guest/vlapic.h
Normal file
132
hypervisor/include/arch/x86/guest/vlapic.h
Normal file
@@ -0,0 +1,132 @@
|
||||
/*-
|
||||
* Copyright (c) 2011 NetApp, Inc.
|
||||
* Copyright (c) 2017 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef _VLAPIC_H_
|
||||
#define _VLAPIC_H_
|
||||
|
||||
struct vlapic;
|
||||
|
||||
/* APIC write handlers */
|
||||
void vlapic_set_cr8(struct vlapic *vlapic, uint64_t val);
|
||||
uint64_t vlapic_get_cr8(struct vlapic *vlapic);
|
||||
|
||||
/*
|
||||
* Returns 0 if there is no eligible vector that can be delivered to the
|
||||
* guest at this time and non-zero otherwise.
|
||||
*
|
||||
* If an eligible vector number is found and 'vecptr' is not NULL then it will
|
||||
* be stored in the location pointed to by 'vecptr'.
|
||||
*
|
||||
* Note that the vector does not automatically transition to the ISR as a
|
||||
* result of calling this function.
|
||||
*/
|
||||
int vlapic_pending_intr(struct vlapic *vlapic, int *vecptr);
|
||||
|
||||
/*
|
||||
* Transition 'vector' from IRR to ISR. This function is called with the
|
||||
* vector returned by 'vlapic_pending_intr()' when the guest is able to
|
||||
* accept this interrupt (i.e. RFLAGS.IF = 1 and no conditions exist that
|
||||
* block interrupt delivery).
|
||||
*/
|
||||
void vlapic_intr_accepted(struct vlapic *vlapic, int vector);
|
||||
|
||||
struct vlapic *vm_lapic_from_vcpuid(struct vm *vm, int vcpu_id);
|
||||
struct vlapic *vm_lapic_from_pcpuid(struct vm *vm, int pcpu_id);
|
||||
bool vlapic_msr(uint32_t num);
|
||||
int vlapic_rdmsr(struct vcpu *vcpu, uint32_t msr, uint64_t *rval, bool *retu);
|
||||
int vlapic_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t wval, bool *retu);
|
||||
|
||||
int vlapic_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size);
|
||||
int vlapic_mmio_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size);
|
||||
|
||||
/*
|
||||
* Signals to the LAPIC that an interrupt at 'vector' needs to be generated
|
||||
* to the 'cpu', the state is recorded in IRR.
|
||||
*/
|
||||
int vlapic_set_intr(struct vcpu *vcpu, int vector, bool trig);
|
||||
|
||||
#define LAPIC_TRIG_LEVEL true
|
||||
#define LAPIC_TRIG_EDGE false
|
||||
static inline int
|
||||
vlapic_intr_level(struct vcpu *vcpu, int vector)
|
||||
{
|
||||
return vlapic_set_intr(vcpu, vector, LAPIC_TRIG_LEVEL);
|
||||
}
|
||||
|
||||
static inline int
|
||||
vlapic_intr_edge(struct vcpu *vcpu, int vector)
|
||||
{
|
||||
return vlapic_set_intr(vcpu, vector, LAPIC_TRIG_EDGE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Triggers the LAPIC local interrupt (LVT) 'vector' on 'cpu'. 'cpu' can
|
||||
* be set to -1 to trigger the interrupt on all CPUs.
|
||||
*/
|
||||
int vlapic_set_local_intr(struct vm *vm, int cpu, int vector);
|
||||
|
||||
int vlapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg);
|
||||
|
||||
void vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest,
|
||||
bool phys, int delmode, int vec);
|
||||
|
||||
/* Reset the trigger-mode bits for all vectors to be edge-triggered */
|
||||
void vlapic_reset_tmr(struct vlapic *vlapic);
|
||||
|
||||
/*
|
||||
* Set the trigger-mode bit associated with 'vector' to level-triggered if
|
||||
* the (dest,phys,delmode) tuple resolves to an interrupt being delivered to
|
||||
* this 'vlapic'.
|
||||
*/
|
||||
void vlapic_set_tmr_one_vec(struct vlapic *vlapic, int delmode,
|
||||
int vector, bool level);
|
||||
|
||||
void
|
||||
vlapic_apicv_batch_set_tmr(struct vlapic *vlapic);
|
||||
|
||||
int vlapic_mmio_access_handler(struct vcpu *vcpu, struct mem_io *mmio,
|
||||
void *handler_private_data);
|
||||
|
||||
uint32_t vlapic_get_id(struct vlapic *vlapic);
|
||||
uint8_t vlapic_get_apicid(struct vlapic *vlapic);
|
||||
|
||||
int vlapic_create(struct vcpu *vcpu);
|
||||
void vlapic_free(struct vcpu *vcpu);
|
||||
void vlapic_init(struct vlapic *vlapic);
|
||||
bool vlapic_enabled(struct vlapic *vlapic);
|
||||
uint64_t apicv_get_apic_access_addr(struct vm *vm);
|
||||
uint64_t apicv_get_apic_page_addr(struct vlapic *vlapic);
|
||||
bool vlapic_apicv_enabled(struct vcpu *vcpu);
|
||||
void apicv_inject_pir(struct vlapic *vlapic);
|
||||
int apicv_access_exit_handler(struct vcpu *vcpu);
|
||||
int apicv_write_exit_handler(struct vcpu *vcpu);
|
||||
int apicv_virtualized_eoi_exit_handler(struct vcpu *vcpu);
|
||||
|
||||
void calcvdest(struct vm *vm, uint64_t *dmask, uint32_t dest, bool phys);
|
||||
#endif /* _VLAPIC_H_ */
|
202
hypervisor/include/arch/x86/guest/vm.h
Normal file
202
hypervisor/include/arch/x86/guest/vm.h
Normal file
@@ -0,0 +1,202 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef VM_H_
|
||||
#define VM_H_
|
||||
|
||||
enum vm_privilege_level {
|
||||
VM_PRIVILEGE_LEVEL_HIGH = 0,
|
||||
VM_PRIVILEGE_LEVEL_MEDIUM,
|
||||
VM_PRIVILEGE_LEVEL_LOW
|
||||
};
|
||||
|
||||
#define MAX_VM_NAME_LEN 16
|
||||
struct vm_attr {
|
||||
char name[16]; /* Virtual machine name string */
|
||||
int id; /* Virtual machine identifier */
|
||||
int boot_idx; /* Index indicating the boot sequence for this VM */
|
||||
};
|
||||
|
||||
struct vm_hw_info {
|
||||
int num_vcpus; /* Number of total virtual cores */
|
||||
uint32_t created_vcpus; /* Number of created vcpus */
|
||||
struct vcpu **vcpu_array; /* vcpu array of this VM */
|
||||
uint64_t gpa_lowtop; /* top lowmem gpa of this VM */
|
||||
};
|
||||
|
||||
struct sw_linux {
|
||||
void *ramdisk_src_addr;
|
||||
void *ramdisk_load_addr;
|
||||
uint32_t ramdisk_size;
|
||||
void *bootargs_src_addr;
|
||||
void *bootargs_load_addr;
|
||||
uint32_t bootargs_size;
|
||||
void *dtb_src_addr;
|
||||
void *dtb_load_addr;
|
||||
uint32_t dtb_size;
|
||||
};
|
||||
|
||||
struct sw_kernel_info {
|
||||
void *kernel_src_addr;
|
||||
void *kernel_load_addr;
|
||||
void *kernel_entry_addr;
|
||||
uint32_t kernel_size;
|
||||
};
|
||||
|
||||
struct vm_sw_info {
|
||||
int kernel_type; /* Guest kernel type */
|
||||
/* Kernel information (common for all guest types) */
|
||||
struct sw_kernel_info kernel_info;
|
||||
/* Additional information specific to Linux guests */
|
||||
struct sw_linux linux_info;
|
||||
/* GPA Address of guest OS's request buffer */
|
||||
uint64_t req_buf;
|
||||
};
|
||||
|
||||
/* VM guest types */
|
||||
#define VM_LINUX_GUEST 0x02
|
||||
#define VM_MONO_GUEST 0x01
|
||||
|
||||
enum vpic_wire_mode {
|
||||
VPIC_WIRE_INTR = 0,
|
||||
VPIC_WIRE_LAPIC,
|
||||
VPIC_WIRE_IOAPIC,
|
||||
VPIC_WIRE_NULL
|
||||
};
|
||||
|
||||
/* Enumerated type for VM states */
|
||||
enum vm_state {
|
||||
VM_CREATED = 0, /* VM created / awaiting start (boot) */
|
||||
VM_STARTED, /* VM started (booted) */
|
||||
VM_PAUSED, /* VM paused */
|
||||
VM_STATE_UNKNOWN
|
||||
};
|
||||
|
||||
/* Structure for VM state information */
|
||||
struct vm_state_info {
|
||||
enum vm_state state; /* State of the VM */
|
||||
unsigned int privilege; /* Privilege level of the VM */
|
||||
unsigned int boot_count;/* Number of times the VM has booted */
|
||||
|
||||
};
|
||||
|
||||
struct vm_arch {
|
||||
void *guest_pml4; /* Guest pml4 */
|
||||
void *ept; /* EPT hierarchy */
|
||||
void *m2p; /* machine address to guest physical address */
|
||||
void *tmp_pg_array; /* Page array for tmp guest paging struct */
|
||||
void *iobitmap[2];/* IO bitmap page array base address for this VM */
|
||||
void *msr_bitmap; /* MSR bitmap page base address for this VM */
|
||||
void *virt_ioapic; /* Virtual IOAPIC base address */
|
||||
/**
|
||||
* A link to the IO handler of this VM.
|
||||
* We only register io handle to this link
|
||||
* when create VM on sequences and ungister it when
|
||||
* destory VM. So there no need lock to prevent preempt.
|
||||
* Besides, there only a few io handlers now, we don't
|
||||
* need binary search temporary.
|
||||
*/
|
||||
struct vm_io_handler *io_handler;
|
||||
|
||||
/* reference to virtual platform to come here (as needed) */
|
||||
};
|
||||
|
||||
struct vpic;
|
||||
struct vm {
|
||||
struct vm_attr attr; /* Reference to this VM's attributes */
|
||||
struct vm_hw_info hw; /* Reference to this VM's HW information */
|
||||
struct vm_sw_info sw; /* Reference to SW associated with this VM */
|
||||
struct vm_arch arch_vm; /* Reference to this VM's arch information */
|
||||
struct vm_state_info state_info;/* State info of this VM */
|
||||
enum vm_state state; /* VM state */
|
||||
struct vcpu *current_vcpu; /* VCPU that caused vm exit */
|
||||
void *vuart; /* Virtual UART */
|
||||
struct vpic *vpic; /* Virtual PIC */
|
||||
uint32_t vpic_wire_mode;
|
||||
struct iommu_domain *iommu_domain; /* iommu domain of this VM */
|
||||
struct list_head list; /* list of VM */
|
||||
spinlock_t spinlock; /* Spin-lock used to protect VM modifications */
|
||||
|
||||
struct list_head mmio_list; /* list for mmio. This list is not updated
|
||||
* when vm is active. So no lock needed
|
||||
*/
|
||||
|
||||
struct _vm_shared_memory *shared_memory_area;
|
||||
|
||||
struct {
|
||||
struct _vm_virtual_device_node *head;
|
||||
struct _vm_virtual_device_node *tail;
|
||||
} virtual_device_list;
|
||||
|
||||
/* passthrough device link */
|
||||
struct list_head ptdev_list;
|
||||
spinlock_t ptdev_lock;
|
||||
|
||||
unsigned char GUID[16];
|
||||
unsigned int secure_world_enabled;
|
||||
};
|
||||
|
||||
struct vm_description {
|
||||
/* Virtual machine identifier, assigned by the system */
|
||||
char *vm_attr_name;
|
||||
/* The logical CPU IDs associated with this VM - The first CPU listed
|
||||
* will be the VM's BSP
|
||||
*/
|
||||
int *vm_hw_logical_core_ids;
|
||||
unsigned char GUID[16]; /* GUID of the vm will be created */
|
||||
int vm_hw_num_cores; /* Number of virtual cores */
|
||||
/* Indicates to APs that the BSP has created a VM for this
|
||||
* description
|
||||
*/
|
||||
bool vm_created;
|
||||
/* Index indicating VM's privilege level */
|
||||
unsigned int vm_state_info_privilege;
|
||||
unsigned int secure_world_enabled; /* secure_world enabled? */
|
||||
};
|
||||
|
||||
struct vm_description_array {
|
||||
int num_vm_desc;
|
||||
struct vm_description vm_desc_array[];
|
||||
};
|
||||
|
||||
int shutdown_vm(struct vm *vm);
|
||||
int pause_vm(struct vm *vm);
|
||||
int start_vm(struct vm *vm);
|
||||
int create_vm(struct vm_description *vm_desc, struct vm **vm);
|
||||
int prepare_vm0(void);
|
||||
|
||||
struct vm *get_vm_from_vmid(int vm_id);
|
||||
struct vm_description *get_vm_desc(int idx);
|
||||
|
||||
extern struct list_head vm_list;
|
||||
extern spinlock_t vm_list_lock;
|
||||
extern bool x2apic_enabled;
|
||||
|
||||
#endif /* VM_H_ */
|
110
hypervisor/include/arch/x86/guest/vpic.h
Normal file
110
hypervisor/include/arch/x86/guest/vpic.h
Normal file
@@ -0,0 +1,110 @@
|
||||
/*-
|
||||
* Copyright (c) 2014 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
|
||||
* Copyright (c) 2017 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef _VPIC_H_
|
||||
#define _VPIC_H_
|
||||
|
||||
#define ICU_IMR_OFFSET 1
|
||||
|
||||
/* Initialization control word 1. Written to even address. */
|
||||
#define ICW1_IC4 0x01 /* ICW4 present */
|
||||
#define ICW1_SNGL 0x02 /* 1 = single, 0 = cascaded */
|
||||
#define ICW1_ADI 0x04 /* 1 = 4, 0 = 8 byte vectors */
|
||||
#define ICW1_LTIM 0x08 /* 1 = level trigger, 0 = edge */
|
||||
#define ICW1_RESET 0x10 /* must be 1 */
|
||||
/* 0x20 - 0x80 - in 8080/8085 mode only */
|
||||
|
||||
/* Initialization control word 2. Written to the odd address. */
|
||||
/* No definitions, it is the base vector of the IDT for 8086 mode */
|
||||
|
||||
/* Initialization control word 3. Written to the odd address. */
|
||||
/* For a master PIC, bitfield indicating a slave 8259 on given input */
|
||||
/* For slave, lower 3 bits are the slave's ID binary id on master */
|
||||
|
||||
/* Initialization control word 4. Written to the odd address. */
|
||||
#define ICW4_8086 0x01 /* 1 = 8086, 0 = 8080 */
|
||||
#define ICW4_AEOI 0x02 /* 1 = Auto EOI */
|
||||
#define ICW4_MS 0x04 /* 1 = buffered master, 0 = slave */
|
||||
#define ICW4_BUF 0x08 /* 1 = enable buffer mode */
|
||||
#define ICW4_SFNM 0x10 /* 1 = special fully nested mode */
|
||||
|
||||
/* Operation control words. Written after initialization. */
|
||||
|
||||
/* Operation control word type 1 */
|
||||
/*
|
||||
* No definitions. Written to the odd address. Bitmask for interrupts.
|
||||
* 1 = disabled.
|
||||
*/
|
||||
|
||||
/* Operation control word type 2. Bit 3 (0x08) must be zero. Even address. */
|
||||
#define OCW2_L0 0x01 /* Level */
|
||||
#define OCW2_L1 0x02
|
||||
#define OCW2_L2 0x04
|
||||
/* 0x08 must be 0 to select OCW2 vs OCW3 */
|
||||
/* 0x10 must be 0 to select OCW2 vs ICW1 */
|
||||
#define OCW2_EOI 0x20 /* 1 = EOI */
|
||||
#define OCW2_SL 0x40 /* EOI mode */
|
||||
#define OCW2_R 0x80 /* EOI mode */
|
||||
|
||||
/* Operation control word type 3. Bit 3 (0x08) must be set. Even address. */
|
||||
#define OCW3_RIS 0x01 /* 1 = read IS, 0 = read IR */
|
||||
#define OCW3_RR 0x02 /* register read */
|
||||
#define OCW3_P 0x04 /* poll mode command */
|
||||
/* 0x08 must be 1 to select OCW3 vs OCW2 */
|
||||
#define OCW3_SEL 0x08 /* must be 1 */
|
||||
/* 0x10 must be 0 to select OCW3 vs ICW1 */
|
||||
#define OCW3_SMM 0x20 /* special mode mask */
|
||||
#define OCW3_ESMM 0x40 /* enable SMM */
|
||||
|
||||
#define IO_ELCR1 0x4d0
|
||||
#define IO_ELCR2 0x4d1
|
||||
|
||||
enum vpic_trigger {
|
||||
EDGE_TRIGGER,
|
||||
LEVEL_TRIGGER
|
||||
};
|
||||
|
||||
void *vpic_init(struct vm *vm);
|
||||
void vpic_cleanup(struct vm *vm);
|
||||
|
||||
int vpic_assert_irq(struct vm *vm, int irq);
|
||||
int vpic_deassert_irq(struct vm *vm, int irq);
|
||||
int vpic_pulse_irq(struct vm *vm, int irq);
|
||||
|
||||
void vpic_pending_intr(struct vm *vm, int *vecptr);
|
||||
void vpic_intr_accepted(struct vm *vm, int vector);
|
||||
int vpic_set_irq_trigger(struct vm *vm, int irq, enum vpic_trigger trigger);
|
||||
int vpic_get_irq_trigger(struct vm *vm, int irq, enum vpic_trigger *trigger);
|
||||
|
||||
struct vm_io_handler *vpic_create_io_handler(int flags, uint32_t port,
|
||||
uint32_t len);
|
||||
|
||||
bool vpic_is_pin_mask(struct vpic *vpic, uint8_t virt_pin);
|
||||
|
||||
#endif /* _VPIC_H_ */
|
60
hypervisor/include/arch/x86/hv_arch.h
Normal file
60
hypervisor/include/arch/x86/hv_arch.h
Normal file
@@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef HV_ARCH_H
|
||||
#define HV_ARCH_H
|
||||
|
||||
#include <cpu.h>
|
||||
#include <gdt.h>
|
||||
#include <idt.h>
|
||||
#include <apicreg.h>
|
||||
#include <ioapic.h>
|
||||
#include <lapic.h>
|
||||
#include <msr.h>
|
||||
#include <io.h>
|
||||
#include <vcpu.h>
|
||||
#include <vm.h>
|
||||
#include <cpuid.h>
|
||||
#include <mmu.h>
|
||||
#include <intr_ctx.h>
|
||||
#include <irq.h>
|
||||
#include <timer.h>
|
||||
#include <softirq.h>
|
||||
#include <vmx.h>
|
||||
#include <assign.h>
|
||||
#include <vtd.h>
|
||||
|
||||
#include <vpic.h>
|
||||
#include <vlapic.h>
|
||||
#include <vioapic.h>
|
||||
#include <guest.h>
|
||||
#include <vmexit.h>
|
||||
|
||||
#endif /* HV_ARCH_H */
|
111
hypervisor/include/arch/x86/idt.h
Normal file
111
hypervisor/include/arch/x86/idt.h
Normal file
@@ -0,0 +1,111 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef IDT_H
|
||||
#define IDT_H
|
||||
|
||||
/*
|
||||
* IDT is defined in assembly so we handle exceptions as early as possible.
|
||||
*/
|
||||
|
||||
/* Interrupt Descriptor Table (LDT) selectors are 16 bytes on x86-64 instead of
|
||||
* 8 bytes.
|
||||
*/
|
||||
#define X64_IDT_DESC_SIZE (0x10)
|
||||
/* Number of the HOST IDT entries */
|
||||
#define HOST_IDT_ENTRIES (0x100)
|
||||
/* Size of the IDT */
|
||||
#define HOST_IDT_SIZE (HOST_IDT_ENTRIES * X64_IDT_DESC_SIZE)
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
|
||||
/*
|
||||
* Definition of an 16 byte IDT selector.
|
||||
*/
|
||||
union idt_64_descriptor {
|
||||
uint64_t value;
|
||||
struct {
|
||||
union {
|
||||
uint32_t value;
|
||||
struct {
|
||||
uint32_t offset_15_0:16;
|
||||
uint32_t segment_sel:16;
|
||||
} bits;
|
||||
} low32;
|
||||
union {
|
||||
uint32_t value;
|
||||
struct {
|
||||
uint32_t ist:3;
|
||||
uint32_t bit_3_clr:1;
|
||||
uint32_t bit_4_clr:1;
|
||||
uint32_t bits_5_7_clr:3;
|
||||
uint32_t type:4;
|
||||
uint32_t bit_12_clr:1;
|
||||
uint32_t dpl:2;
|
||||
uint32_t present:1;
|
||||
uint32_t offset_31_16:16;
|
||||
} bits;
|
||||
} high32;
|
||||
uint32_t offset_63_32;
|
||||
uint32_t rsvd;
|
||||
};
|
||||
} __aligned(8);
|
||||
|
||||
/*****************************************************************************
|
||||
*
|
||||
* Definition of the IDT.
|
||||
*
|
||||
*****************************************************************************/
|
||||
struct host_idt {
|
||||
union idt_64_descriptor host_idt_descriptors[HOST_IDT_ENTRIES];
|
||||
} __aligned(8);
|
||||
|
||||
/*
|
||||
* Definition of the IDT descriptor.
|
||||
*/
|
||||
struct host_idt_descriptor {
|
||||
unsigned short len;
|
||||
struct host_idt *idt;
|
||||
} __packed;
|
||||
|
||||
extern struct host_idt HOST_IDT;
|
||||
extern struct host_idt_descriptor HOST_IDTR;
|
||||
|
||||
static inline void set_idt(struct host_idt_descriptor *idtd)
|
||||
{
|
||||
|
||||
asm volatile (" lidtq %[idtd]\n" : /* no output parameters */
|
||||
: /* input parameters */
|
||||
[idtd] "m"(*idtd));
|
||||
}
|
||||
|
||||
#endif /* end #ifndef ASSEMBLER */
|
||||
|
||||
#endif /* IDT_H */
|
64
hypervisor/include/arch/x86/intr_ctx.h
Normal file
64
hypervisor/include/arch/x86/intr_ctx.h
Normal file
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef INTR_CTX_H
|
||||
#define INTR_CTX_H
|
||||
|
||||
/*
|
||||
* Definition of the stack frame layout
|
||||
*/
|
||||
struct intr_ctx {
|
||||
uint64_t r12;
|
||||
uint64_t r13;
|
||||
uint64_t r14;
|
||||
uint64_t r15;
|
||||
uint64_t rbx;
|
||||
uint64_t rbp;
|
||||
|
||||
uint64_t rax;
|
||||
uint64_t rcx;
|
||||
uint64_t rdx;
|
||||
uint64_t rsi;
|
||||
uint64_t rdi;
|
||||
uint64_t r8;
|
||||
uint64_t r9;
|
||||
uint64_t r10;
|
||||
uint64_t r11;
|
||||
|
||||
uint64_t vector;
|
||||
uint64_t error_code;
|
||||
uint64_t rip;
|
||||
uint64_t cs;
|
||||
uint64_t rflags;
|
||||
uint64_t rsp;
|
||||
uint64_t ss;
|
||||
};
|
||||
|
||||
#endif /* INTR_CTX_H */
|
622
hypervisor/include/arch/x86/io.h
Normal file
622
hypervisor/include/arch/x86/io.h
Normal file
@@ -0,0 +1,622 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef IO_H
|
||||
#define IO_H
|
||||
|
||||
/* Definition of a IO port range */
|
||||
struct vm_io_range {
|
||||
uint16_t base; /* IO port base */
|
||||
uint16_t len; /* IO port range */
|
||||
int flags; /* IO port attributes */
|
||||
};
|
||||
|
||||
/* Write 1 byte to specified I/O port */
|
||||
static inline void io_write_byte(uint8_t value, uint16_t port)
|
||||
{
|
||||
asm volatile ("outb %0,%1"::"a" (value), "dN"(port));
|
||||
}
|
||||
|
||||
/* Read 1 byte from specified I/O port */
|
||||
static inline uint8_t io_read_byte(uint16_t port)
|
||||
{
|
||||
uint8_t value;
|
||||
|
||||
asm volatile ("inb %1,%0":"=a" (value):"dN"(port));
|
||||
return value;
|
||||
}
|
||||
|
||||
/* Write 2 bytes to specified I/O port */
|
||||
static inline void io_write_word(uint16_t value, uint16_t port)
|
||||
{
|
||||
asm volatile ("outw %0,%1"::"a" (value), "dN"(port));
|
||||
}
|
||||
|
||||
/* Read 2 bytes from specified I/O port */
|
||||
static inline uint16_t io_read_word(uint16_t port)
|
||||
{
|
||||
uint16_t value;
|
||||
|
||||
asm volatile ("inw %1,%0":"=a" (value):"dN"(port));
|
||||
return value;
|
||||
}
|
||||
|
||||
/* Write 4 bytes to specified I/O port */
|
||||
static inline void io_write_long(uint32_t value, uint16_t port)
|
||||
{
|
||||
asm volatile ("outl %0,%1"::"a" (value), "dN"(port));
|
||||
}
|
||||
|
||||
/* Read 4 bytes from specified I/O port */
|
||||
static inline uint32_t io_read_long(uint16_t port)
|
||||
{
|
||||
uint32_t value;
|
||||
|
||||
asm volatile ("inl %1,%0":"=a" (value):"dN"(port));
|
||||
return value;
|
||||
}
|
||||
|
||||
static inline void io_write(uint32_t v, ioport_t addr, size_t sz)
|
||||
{
|
||||
if (sz == 1)
|
||||
io_write_byte(v, addr);
|
||||
else if (sz == 2)
|
||||
io_write_word(v, addr);
|
||||
else
|
||||
io_write_long(v, addr);
|
||||
}
|
||||
|
||||
static inline uint32_t io_read(ioport_t addr, size_t sz)
|
||||
{
|
||||
if (sz == 1)
|
||||
return io_read_byte(addr);
|
||||
if (sz == 2)
|
||||
return io_read_word(addr);
|
||||
return io_read_long(addr);
|
||||
}
|
||||
|
||||
struct vm_io_handler;
|
||||
struct vm;
|
||||
struct vcpu;
|
||||
|
||||
typedef
|
||||
uint32_t (*io_read_fn_t)(struct vm_io_handler *, struct vm *,
|
||||
ioport_t, size_t);
|
||||
|
||||
typedef
|
||||
void (*io_write_fn_t)(struct vm_io_handler *, struct vm *,
|
||||
ioport_t, size_t, uint32_t);
|
||||
|
||||
/* Describes a single IO handler description entry. */
|
||||
struct vm_io_handler_desc {
|
||||
|
||||
/** The base address of the IO range for this description. */
|
||||
ioport_t addr;
|
||||
/** The number of bytes covered by this description. */
|
||||
size_t len;
|
||||
|
||||
/** A pointer to the "read" function.
|
||||
*
|
||||
* The read function is called from the hypervisor whenever
|
||||
* a read access to a range described in "ranges" occur.
|
||||
* The arguments to the callback are:
|
||||
*
|
||||
* - The address of the port to read from.
|
||||
* - The width of the read operation (1,2 or 4).
|
||||
*
|
||||
* The implementation must return the ports content as
|
||||
* byte, word or doubleword (depending on the width).
|
||||
*
|
||||
* If the pointer is null, a read of 1's is assumed.
|
||||
*/
|
||||
|
||||
io_read_fn_t io_read;
|
||||
/** A pointer to the "write" function.
|
||||
*
|
||||
* The write function is called from the hypervisor code
|
||||
* whenever a write access to a range described in "ranges"
|
||||
* occur. The arguments to the callback are:
|
||||
*
|
||||
* - The address of the port to write to.
|
||||
* - The width of the write operation (1,2 or 4).
|
||||
* - The value to write as byte, word or doubleword
|
||||
* (depending on the width)
|
||||
*
|
||||
* The implementation must write the value to the port.
|
||||
*
|
||||
* If the pointer is null, the write access is ignored.
|
||||
*/
|
||||
|
||||
io_write_fn_t io_write;
|
||||
};
|
||||
|
||||
struct vm_io_handler {
|
||||
struct vm_io_handler *next;
|
||||
struct vm_io_handler_desc desc;
|
||||
};
|
||||
|
||||
#define IO_ATTR_R 0
|
||||
#define IO_ATTR_RW 1
|
||||
#define IO_ATTR_NO_ACCESS 2
|
||||
|
||||
/* External Interfaces */
|
||||
int io_instr_handler(struct vcpu *vcpu);
|
||||
void setup_io_bitmap(struct vm *vm);
|
||||
void free_io_emulation_resource(struct vm *vm);
|
||||
void register_io_emulation_handler(struct vm *vm, struct vm_io_range *range,
|
||||
io_read_fn_t io_read_fn_ptr,
|
||||
io_write_fn_t io_write_fn_ptr);
|
||||
int dm_emulate_pio_post(struct vcpu *vcpu);
|
||||
|
||||
/** Writes a 32 bit value to a memory mapped IO device.
|
||||
*
|
||||
* @param value The 32 bit value to write.
|
||||
* @param addr The memory address to write to.
|
||||
*/
|
||||
static inline void mmio_write_long(uint32_t value, mmio_addr_t addr)
|
||||
{
|
||||
*((uint32_t *)addr) = value;
|
||||
}
|
||||
|
||||
/** Writes a 16 bit value to a memory mapped IO device.
|
||||
*
|
||||
* @param value The 16 bit value to write.
|
||||
* @param addr The memory address to write to.
|
||||
*/
|
||||
static inline void mmio_write_word(uint32_t value, mmio_addr_t addr)
|
||||
{
|
||||
*((uint16_t *)addr) = value;
|
||||
}
|
||||
|
||||
/** Writes an 8 bit value to a memory mapped IO device.
|
||||
*
|
||||
* @param value The 8 bit value to write.
|
||||
* @param addr The memory address to write to.
|
||||
*/
|
||||
static inline void mmio_write_byte(uint32_t value, mmio_addr_t addr)
|
||||
{
|
||||
*((uint8_t *)addr) = value;
|
||||
}
|
||||
|
||||
/** Reads a 32 bit value from a memory mapped IO device.
|
||||
*
|
||||
* @param addr The memory address to read from.
|
||||
*
|
||||
* @return The 32 bit value read from the given address.
|
||||
*/
|
||||
static inline uint32_t mmio_read_long(mmio_addr_t addr)
|
||||
{
|
||||
return *((uint32_t *)addr);
|
||||
}
|
||||
|
||||
/** Reads a 16 bit value from a memory mapped IO device.
|
||||
*
|
||||
* @param addr The memory address to read from.
|
||||
*
|
||||
* @return The 16 bit value read from the given address.
|
||||
*/
|
||||
static inline uint16_t mmio_read_word(mmio_addr_t addr)
|
||||
{
|
||||
return *((uint16_t *)addr);
|
||||
}
|
||||
|
||||
/** Reads an 8 bit value from a memory mapped IO device.
|
||||
*
|
||||
* @param addr The memory address to read from.
|
||||
*
|
||||
* @return The 8 bit value read from the given address.
|
||||
*/
|
||||
static inline uint8_t mmio_read_byte(mmio_addr_t addr)
|
||||
{
|
||||
return *((uint8_t *)addr);
|
||||
}
|
||||
|
||||
/** Sets bits in a 32 bit value from a memory mapped IO device.
|
||||
*
|
||||
* @param mask Contains the bits to set at the memory address.
|
||||
* Bits set in this mask are set in the memory
|
||||
* location.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void mmio_or_long(uint32_t mask, mmio_addr_t addr)
|
||||
{
|
||||
*((uint32_t *)addr) |= mask;
|
||||
}
|
||||
|
||||
/** Sets bits in a 16 bit value from a memory mapped IO device.
|
||||
*
|
||||
* @param mask Contains the bits to set at the memory address.
|
||||
* Bits set in this mask are set in the memory
|
||||
* location.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void mmio_or_word(uint32_t mask, mmio_addr_t addr)
|
||||
{
|
||||
*((uint16_t *)addr) |= mask;
|
||||
}
|
||||
|
||||
/** Sets bits in an 8 bit value from a memory mapped IO device.
|
||||
*
|
||||
* @param mask Contains the bits to set at the memory address.
|
||||
* Bits set in this mask are set in the memory
|
||||
* location.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void mmio_or_byte(uint32_t mask, mmio_addr_t addr)
|
||||
{
|
||||
*((uint8_t *)addr) |= mask;
|
||||
}
|
||||
|
||||
/** Clears bits in a 32 bit value from a memory mapped IO device.
|
||||
*
|
||||
* @param mask Contains the bits to clear at the memory address.
|
||||
* Bits set in this mask are cleared in the memory
|
||||
* location.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void mmio_and_long(uint32_t mask, mmio_addr_t addr)
|
||||
{
|
||||
*((uint32_t *)addr) &= ~mask;
|
||||
}
|
||||
|
||||
/** Clears bits in a 16 bit value from a memory mapped IO device.
|
||||
*
|
||||
* @param mask Contains the bits to clear at the memory address.
|
||||
* Bits set in this mask are cleared in the memory
|
||||
* location.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void mmio_and_word(uint32_t mask, mmio_addr_t addr)
|
||||
{
|
||||
*((uint16_t *)addr) &= ~mask;
|
||||
}
|
||||
|
||||
/** Clears bits in an 8 bit value from a memory mapped IO device.
|
||||
*
|
||||
* @param mask Contains the bits to clear at the memory address.
|
||||
* Bits set in this mask are cleared in the memory
|
||||
* location.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void mmio_and_byte(uint32_t mask, mmio_addr_t addr)
|
||||
{
|
||||
*((uint8_t *)addr) &= ~mask;
|
||||
}
|
||||
|
||||
/** Performs a read-modify-write cycle for a 32 bit value from a MMIO device.
|
||||
*
|
||||
* Reads a 32 bit value from a memory mapped IO device, sets and clears
|
||||
* bits and writes the value back. If a bit is specified in both, the 'set'
|
||||
* and in the 'clear' mask, it is undefined whether the resulting bit is set
|
||||
* or cleared.
|
||||
*
|
||||
* @param set Contains the bits to set. Bits set in this mask
|
||||
* are set at the memory address.
|
||||
* @param clear Contains the bits to clear. Bits set in this
|
||||
* mask are cleared at the memory address.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void mmio_rmw_long(uint32_t set, uint32_t clear, mmio_addr_t addr)
|
||||
{
|
||||
*((uint32_t *)addr) =
|
||||
(*((uint32_t *)addr) & ~clear) | set;
|
||||
}
|
||||
|
||||
/** Performs a read-modify-write cycle for a 16 bit value from a MMIO device.
|
||||
*
|
||||
* Reads a 16 bit value from a memory mapped IO device, sets and clears
|
||||
* bits and writes the value back. If a bit is specified in both, the 'set'
|
||||
* and in the 'clear' mask, it is undefined whether the resulting bit is set
|
||||
* or cleared.
|
||||
*
|
||||
* @param set Contains the bits to set. Bits set in this mask
|
||||
* are set at the memory address.
|
||||
* @param clear Contains the bits to clear. Bits set in this
|
||||
* mask are cleared at the memory address.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void mmio_rmw_word(uint32_t set, uint32_t clear, mmio_addr_t addr)
|
||||
{
|
||||
*((uint16_t *)addr) =
|
||||
(*((uint16_t *)addr) & ~clear) | set;
|
||||
}
|
||||
|
||||
/** Performs a read-modify-write cycle for an 8 bit value from a MMIO device.
|
||||
*
|
||||
* Reads an 8 bit value from a memory mapped IO device, sets and clears
|
||||
* bits and writes the value back. If a bit is specified in both, the 'set'
|
||||
* and in the 'clear' mask, it is undefined whether the resulting bit is set
|
||||
* or cleared.
|
||||
*
|
||||
* @param set Contains the bits to set. Bits set in this mask
|
||||
* are set at the memory address.
|
||||
* @param clear Contains the bits to clear. Bits set in this
|
||||
* mask are cleared at the memory address.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void mmio_rmw_byte(uint32_t set, uint32_t clear, mmio_addr_t addr)
|
||||
{
|
||||
*((uint8_t *)addr) = (*((uint8_t *)addr) & ~clear) | set;
|
||||
}
|
||||
|
||||
/** Writes a 32 bit value to a memory mapped IO device (ROM code version).
|
||||
*
|
||||
* @param value The 32 bit value to write.
|
||||
* @param addr The memory address to write to.
|
||||
*/
|
||||
static inline void __mmio_write_long(uint32_t value, mmio_addr_t addr)
|
||||
{
|
||||
*((uint32_t *)addr) = value;
|
||||
}
|
||||
|
||||
/** Writes a 16 bit value to a memory mapped IO device (ROM code version).
|
||||
*
|
||||
* @param value The 16 bit value to write.
|
||||
* @param addr The memory address to write to.
|
||||
*/
|
||||
static inline void __mmio_write_word(uint32_t value, mmio_addr_t addr)
|
||||
{
|
||||
*((uint16_t *)addr) = value;
|
||||
}
|
||||
|
||||
/** Writes an 8 bit value to a memory mapped IO device (ROM code version).
|
||||
*
|
||||
* @param value The 8 bit value to write.
|
||||
* @param addr The memory address to write to.
|
||||
*/
|
||||
static inline void __mmio_write_byte(uint32_t value, mmio_addr_t addr)
|
||||
{
|
||||
*((uint8_t *)addr) = value;
|
||||
}
|
||||
|
||||
/** Reads a 32 bit value from a memory mapped IO device (ROM code version).
|
||||
*
|
||||
* @param addr The memory address to read from.
|
||||
*
|
||||
* @return The 32 bit value read from the given address.
|
||||
*/
|
||||
static inline uint32_t __mmio_read_long(mmio_addr_t addr)
|
||||
{
|
||||
return *((uint32_t *)addr);
|
||||
}
|
||||
|
||||
/** Reads a 16 bit value from a memory mapped IO device (ROM code version).
|
||||
*
|
||||
* @param addr The memory address to read from.
|
||||
*
|
||||
* @return The 16 bit value read from the given address.
|
||||
*/
|
||||
static inline uint16_t __mmio_read_word(mmio_addr_t addr)
|
||||
{
|
||||
return *((uint16_t *)addr);
|
||||
}
|
||||
|
||||
/** Reads an 8 bit value from a memory mapped IO device (ROM code version).
|
||||
*
|
||||
* @param addr The memory address to read from.
|
||||
*
|
||||
* @return The 32 16 value read from the given address.
|
||||
*/
|
||||
static inline uint8_t __mmio_read_byte(mmio_addr_t addr)
|
||||
{
|
||||
return *((uint8_t *)addr);
|
||||
}
|
||||
|
||||
/** Sets bits in a 32 bit value from a MMIO device (ROM code version).
|
||||
*
|
||||
* @param mask Contains the bits to set at the memory address.
|
||||
* Bits set in this mask are set in the memory
|
||||
* location.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void __mmio_or_long(uint32_t mask, mmio_addr_t addr)
|
||||
{
|
||||
*((uint32_t *)addr) |= mask;
|
||||
}
|
||||
|
||||
/** Sets bits in a 16 bit value from a MMIO device (ROM code version).
|
||||
*
|
||||
* @param mask Contains the bits to set at the memory address.
|
||||
* Bits set in this mask are set in the memory
|
||||
* location.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void __mmio_or_word(uint32_t mask, mmio_addr_t addr)
|
||||
{
|
||||
*((uint16_t *)addr) |= mask;
|
||||
}
|
||||
|
||||
/** Sets bits in an 8 bit value from a MMIO device (ROM code version).
|
||||
*
|
||||
* @param mask Contains the bits to set at the memory address.
|
||||
* Bits set in this mask are set in the memory
|
||||
* location.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void __mmio_or_byte(uint32_t mask, mmio_addr_t addr)
|
||||
{
|
||||
*((uint8_t *)addr) |= mask;
|
||||
}
|
||||
|
||||
/** Clears bits in a 32 bit value from a MMIO device (ROM code version).
|
||||
*
|
||||
* @param mask Contains the bits to clear at the memory address.
|
||||
* Bits set in this mask are cleared in the memory
|
||||
* location.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void __mmio_and_long(uint32_t mask, mmio_addr_t addr)
|
||||
{
|
||||
*((uint32_t *)addr) &= ~mask;
|
||||
}
|
||||
|
||||
/** Clears bits in a 16 bit value from a MMIO device (ROM code version).
|
||||
*
|
||||
* @param mask Contains the bits to clear at the memory address.
|
||||
* Bits set in this mask are cleared in the memory
|
||||
* location.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void __mmio_and_word(uint32_t mask, mmio_addr_t addr)
|
||||
{
|
||||
*((uint16_t *)addr) &= ~mask;
|
||||
}
|
||||
|
||||
/** Clears bits in an 8 bit value from a MMIO device (ROM code version).
|
||||
*
|
||||
* @param mask Contains the bits to clear at the memory address.
|
||||
* Bits set in this mask are cleared in the memory
|
||||
* location.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void __mmio_and_byte(uint32_t mask, mmio_addr_t addr)
|
||||
{
|
||||
*((uint8_t *)addr) &= ~mask;
|
||||
}
|
||||
|
||||
/** Performs a read-modify-write cycle for a 32 bit value from a MMIO device
|
||||
* (ROM code version).
|
||||
*
|
||||
* Reads a 32 bit value from a memory mapped IO device, sets and clears
|
||||
* bits and writes the value back. If a bit is specified in both, the 'set'
|
||||
* and in the 'clear' mask, it is undefined whether the resulting bit is set
|
||||
* or cleared.
|
||||
*
|
||||
* @param set Contains the bits to set. Bits set in this mask
|
||||
* are set at the memory address.
|
||||
* @param clear Contains the bits to clear. Bits set in this
|
||||
* mask are cleared at the memory address.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void
|
||||
__mmio_rmw_long(uint32_t set, uint32_t clear, mmio_addr_t addr)
|
||||
{
|
||||
*((uint32_t *)addr) =
|
||||
(*((uint32_t *)addr) & ~clear) | set;
|
||||
}
|
||||
|
||||
/** Performs a read-modify-write cycle for a 16 bit value from a MMIO device
|
||||
* (ROM code version).
|
||||
*
|
||||
* Reads a 16 bit value from a memory mapped IO device, sets and clears
|
||||
* bits and writes the value back. If a bit is specified in both, the 'set'
|
||||
* and in the 'clear' mask, it is undefined whether the resulting bit is set
|
||||
* or cleared.
|
||||
*
|
||||
* @param set Contains the bits to set. Bits set in this mask
|
||||
* are set at the memory address.
|
||||
* @param clear Contains the bits to clear. Bits set in this
|
||||
* mask are cleared at the memory address.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void
|
||||
__mmio_rmw_word(uint32_t set, uint32_t clear, mmio_addr_t addr)
|
||||
{
|
||||
*((uint16_t *)addr) =
|
||||
(*((uint16_t *)addr) & ~clear) | set;
|
||||
}
|
||||
|
||||
/** Performs a read-modify-write cycle for an 8 bit value from a MMIO device
|
||||
* (ROM code version).
|
||||
*
|
||||
* Reads an 8 bit value from a memory mapped IO device, sets and clears
|
||||
* bits and writes the value back. If a bit is specified in both, the 'set'
|
||||
* and in the 'clear' mask, it is undefined whether the resulting bit is set
|
||||
* or cleared.
|
||||
*
|
||||
* @param set Contains the bits to set. Bits set in this mask
|
||||
* are set at the memory address.
|
||||
* @param clear Contains the bits to clear. Bits set in this
|
||||
* mask are cleared at the memory address.
|
||||
* @param addr The memory address to read from/write to.
|
||||
*/
|
||||
static inline void
|
||||
__mmio_rmw_byte(uint32_t set, uint32_t clear, mmio_addr_t addr)
|
||||
{
|
||||
*((uint8_t *)addr) = (*((uint8_t *)addr) & ~clear) | set;
|
||||
}
|
||||
|
||||
/** Reads a 32 Bit memory mapped IO register, mask it and write it back into
|
||||
* memory mapped IO register.
|
||||
*
|
||||
* @param addr The address of the memory mapped IO register.
|
||||
* @param mask The mask to apply to the value read.
|
||||
* @param value The 32 bit value to write.
|
||||
*/
|
||||
static inline void setl(mmio_addr_t addr, uint32_t mask, uint32_t value)
|
||||
{
|
||||
mmio_write_long((mmio_read_long(addr) & ~mask) | value, addr);
|
||||
}
|
||||
|
||||
/** Reads a 16 Bit memory mapped IO register, mask it and write it back into
|
||||
* memory mapped IO register.
|
||||
*
|
||||
* @param addr The address of the memory mapped IO register.
|
||||
* @param mask The mask to apply to the value read.
|
||||
* @param value The 16 bit value to write.
|
||||
*/
|
||||
static inline void setw(mmio_addr_t addr, uint32_t mask, uint32_t value)
|
||||
{
|
||||
mmio_write_word((mmio_read_word(addr) & ~mask) | value, addr);
|
||||
}
|
||||
|
||||
/** Reads a 8 Bit memory mapped IO register, mask it and write it back into
|
||||
* memory mapped IO register.
|
||||
*
|
||||
* @param addr The address of the memory mapped IO register.
|
||||
* @param mask The mask to apply to the value read.
|
||||
* @param value The 8 bit value to write.
|
||||
*/
|
||||
static inline void setb(mmio_addr_t addr, uint32_t mask, uint32_t value)
|
||||
{
|
||||
mmio_write_byte((mmio_read_byte(addr) & ~mask) | value, addr);
|
||||
}
|
||||
|
||||
/* MMIO memory access types */
|
||||
enum mem_io_type {
|
||||
HV_MEM_IO_READ = 0,
|
||||
HV_MEM_IO_WRITE,
|
||||
};
|
||||
|
||||
/* MMIO emulation related structures */
|
||||
#define MMIO_TRANS_VALID 1
|
||||
#define MMIO_TRANS_INVALID 0
|
||||
struct mem_io {
|
||||
uint64_t paddr; /* Physical address being accessed */
|
||||
enum mem_io_type read_write; /* 0 = read / 1 = write operation */
|
||||
uint8_t access_size; /* Access size being emulated */
|
||||
uint8_t sign_extend_read; /* 1 if sign extension required for read */
|
||||
uint64_t value; /* Value read or value to write */
|
||||
uint8_t mmio_status; /* Indicates if this MMIO transaction is valid */
|
||||
/* Used to store emulation context for this mmio transaction */
|
||||
void *private_data;
|
||||
};
|
||||
|
||||
#endif /* _IO_H defined */
|
57
hypervisor/include/arch/x86/ioapic.h
Normal file
57
hypervisor/include/arch/x86/ioapic.h
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef IOAPIC_H
|
||||
#define IOAPIC_H
|
||||
|
||||
/* IOAPIC_MAX_LINES is architecturally defined.
|
||||
* The usable RTEs may be a subset of the total on a per IO APIC basis.
|
||||
*/
|
||||
#define IOAPIC_MAX_LINES 120
|
||||
#define NR_LEGACY_IRQ 16
|
||||
#define NR_MAX_GSI (NR_IOAPICS*IOAPIC_MAX_LINES)
|
||||
|
||||
#define GSI_MASK_IRQ(irq) irq_gsi_mask_unmask((irq), true)
|
||||
#define GSI_UNMASK_IRQ(irq) irq_gsi_mask_unmask((irq), false)
|
||||
#define GSI_SET_RTE(irq, rte) ioapic_set_rte((irq), (rte))
|
||||
|
||||
void setup_ioapic_irq(void);
|
||||
int get_ioapic_info(char *str, int str_max_len);
|
||||
|
||||
bool irq_is_gsi(int irq);
|
||||
int irq_gsi_num(void);
|
||||
int irq_to_pin(int irq);
|
||||
int pin_to_irq(int pin);
|
||||
void irq_gsi_mask_unmask(int irq, bool mask);
|
||||
void ioapic_set_rte(int irq, uint64_t rte);
|
||||
void ioapic_get_rte(int irq, uint64_t *rte);
|
||||
|
||||
extern uint16_t legacy_irq_to_pin[];
|
||||
#endif /* IOAPIC_H */
|
164
hypervisor/include/arch/x86/irq.h
Normal file
164
hypervisor/include/arch/x86/irq.h
Normal file
@@ -0,0 +1,164 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef IRQ_H
|
||||
#define IRQ_H
|
||||
|
||||
/* vectors for normal, usually for devices */
|
||||
#define VECTOR_FOR_NOR_LOWPRI_START 0x20
|
||||
#define VECTOR_FOR_NOR_LOWPRI_END 0x7F
|
||||
#define VECTOR_FOR_NOR_HIGHPRI_START 0x80
|
||||
#define VECTOR_FOR_NOR_HIGHPRI_END 0xDF
|
||||
#define VECTOR_FOR_NOR_END VECTOR_FOR_NOR_HIGHPRI_END
|
||||
|
||||
#define VECTOR_FOR_INTR_START VECTOR_FOR_NOR_LOWPRI_START
|
||||
|
||||
/* vectors for priority, usually for HV service */
|
||||
#define VECTOR_FOR_PRI_START 0xE0
|
||||
#define VECTOR_FOR_PRI_END 0xFF
|
||||
#define VECTOR_TIMER 0xEF
|
||||
#define VECTOR_NOTIFY_VCPU 0xF0
|
||||
#define VECTOR_VIRT_IRQ_VHM 0xF7
|
||||
#define VECTOR_SPURIOUS 0xFF
|
||||
|
||||
#define NR_MAX_VECTOR 0xFF
|
||||
#define VECTOR_INVALID (NR_MAX_VECTOR + 1)
|
||||
#define IRQ_INVALID (NR_MAX_IRQS+1)
|
||||
|
||||
#define NR_MAX_IRQS (256+16)
|
||||
#define DEFAULT_DEST_MODE IOAPIC_RTE_DESTLOG
|
||||
#define DEFAULT_DELIVERY_MODE IOAPIC_RTE_DELLOPRI
|
||||
#define ALL_CPUS_MASK ((1 << phy_cpu_num) - 1)
|
||||
|
||||
struct irq_desc;
|
||||
|
||||
enum irq_mode {
|
||||
IRQ_PULSE,
|
||||
IRQ_ASSERT,
|
||||
IRQ_DEASSERT,
|
||||
};
|
||||
|
||||
enum irq_state {
|
||||
IRQ_NOT_ASSIGNED = 0,
|
||||
IRQ_ASSIGNED_SHARED,
|
||||
IRQ_ASSIGNED_NOSHARE,
|
||||
};
|
||||
|
||||
enum irq_desc_state {
|
||||
IRQ_DESC_PENDING,
|
||||
IRQ_DESC_IN_PROCESS,
|
||||
};
|
||||
|
||||
typedef int (*dev_handler_t)(int irq, void*);
|
||||
struct dev_handler_node {
|
||||
char name[32];
|
||||
void *dev_data;
|
||||
dev_handler_t dev_handler;
|
||||
struct dev_handler_node *next;
|
||||
struct irq_desc *desc;
|
||||
};
|
||||
|
||||
struct irq_routing_entry {
|
||||
unsigned short bdf; /* BDF */
|
||||
int irq; /* PCI cfg offset 0x3C: IRQ pin */
|
||||
int intx; /* PCI cfg offset 0x3D: 0-3 = INTA,INTB,INTC,INTD*/
|
||||
};
|
||||
|
||||
int irq_mark_used(int irq);
|
||||
int irq_alloc(void);
|
||||
|
||||
int irq_desc_alloc_vector(int irq, bool lowpri);
|
||||
void irq_desc_try_free_vector(int irq);
|
||||
|
||||
int irq_to_vector(int irq);
|
||||
int dev_to_irq(struct dev_handler_node *node);
|
||||
int dev_to_vector(struct dev_handler_node *node);
|
||||
|
||||
int handle_level_interrupt_common(struct irq_desc *desc, void *handler_data);
|
||||
int common_handler_edge(struct irq_desc *desc, void *handler_data);
|
||||
int common_dev_handler_level(struct irq_desc *desc, void *handler_data);
|
||||
int quick_handler_nolock(struct irq_desc *desc, void *handler_data);
|
||||
|
||||
typedef int (*irq_handler_t)(struct irq_desc*, void*);
|
||||
void update_irq_handler(int irq, irq_handler_t func);
|
||||
|
||||
int init_default_irqs(unsigned int cpu);
|
||||
|
||||
int dispatch_interrupt(struct intr_ctx *ctx);
|
||||
|
||||
struct dev_handler_node*
|
||||
pri_register_handler(int irq,
|
||||
int vector,
|
||||
dev_handler_t func,
|
||||
void *dev_data,
|
||||
const char *name);
|
||||
|
||||
struct dev_handler_node*
|
||||
normal_register_handler(int irq,
|
||||
dev_handler_t func,
|
||||
void *dev_data,
|
||||
bool share,
|
||||
bool lowpri,
|
||||
const char *name);
|
||||
void unregister_handler_common(struct dev_handler_node *node);
|
||||
|
||||
int get_cpu_interrupt_info(char *str, int str_max);
|
||||
|
||||
void setup_notification(void);
|
||||
|
||||
typedef int (*spurious_handler_t)(int);
|
||||
extern spurious_handler_t spurious_handler;
|
||||
|
||||
/*
|
||||
* Some MSI message definitions
|
||||
*/
|
||||
#define MSI_ADDR_MASK 0xfff00000
|
||||
#define MSI_ADDR_BASE 0xfee00000
|
||||
#define MSI_ADDR_RH 0x00000008 /* Redirection Hint */
|
||||
#define MSI_ADDR_LOG 0x00000004 /* Destination Mode */
|
||||
|
||||
/* RFLAGS */
|
||||
#define HV_ARCH_VCPU_RFLAGS_IF (1<<9)
|
||||
|
||||
/* Interruptability State info */
|
||||
#define HV_ARCH_VCPU_BLOCKED_BY_MOVSS (1<<1)
|
||||
#define HV_ARCH_VCPU_BLOCKED_BY_STI (1<<0)
|
||||
|
||||
int vcpu_inject_extint(struct vcpu *vcpu);
|
||||
int vcpu_inject_nmi(struct vcpu *vcpu);
|
||||
int vcpu_inject_gp(struct vcpu *vcpu);
|
||||
int vcpu_make_request(struct vcpu *vcpu, int eventid);
|
||||
|
||||
int exception_handler(struct vcpu *vcpu);
|
||||
int interrupt_win_exiting_handler(struct vcpu *vcpu);
|
||||
int external_interrupt_handler(struct vcpu *vcpu);
|
||||
int acrn_do_intr_process(struct vcpu *vcpu);
|
||||
int interrupt_init(uint32_t logical_id);
|
||||
#endif /* IRQ_H */
|
174
hypervisor/include/arch/x86/lapic.h
Normal file
174
hypervisor/include/arch/x86/lapic.h
Normal file
@@ -0,0 +1,174 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef INTR_LAPIC_H
|
||||
#define INTR_LAPIC_H
|
||||
|
||||
#define DEBUG_LAPIC 0
|
||||
|
||||
enum intr_lapic_icr_delivery_mode {
|
||||
INTR_LAPIC_ICR_FIXED = 0x0,
|
||||
INTR_LAPIC_ICR_LP = 0x1,
|
||||
INTR_LAPIC_ICR_SMI = 0x2,
|
||||
INTR_LAPIC_ICR_NMI = 0x4,
|
||||
INTR_LAPIC_ICR_INIT = 0x5,
|
||||
INTR_LAPIC_ICR_STARTUP = 0x6,
|
||||
};
|
||||
|
||||
enum intr_lapic_icr_dest_mode {
|
||||
INTR_LAPIC_ICR_PHYSICAL = 0x0,
|
||||
INTR_LAPIC_ICR_LOGICAL = 0x1
|
||||
};
|
||||
|
||||
enum intr_lapic_icr_level {
|
||||
INTR_LAPIC_ICR_DEASSERT = 0x0,
|
||||
INTR_LAPIC_ICR_ASSERT = 0x1,
|
||||
};
|
||||
|
||||
enum intr_lapic_icr_trigger {
|
||||
INTR_LAPIC_ICR_EDGE = 0x0,
|
||||
INTR_LAPIC_ICR_LEVEL = 0x1,
|
||||
};
|
||||
|
||||
enum intr_lapic_icr_shorthand {
|
||||
INTR_LAPIC_ICR_USE_DEST_ARRAY = 0x0,
|
||||
INTR_LAPIC_ICR_SELF = 0x1,
|
||||
INTR_LAPIC_ICR_ALL_INC_SELF = 0x2,
|
||||
INTR_LAPIC_ICR_ALL_EX_SELF = 0x3,
|
||||
};
|
||||
|
||||
/* Default LAPIC base */
|
||||
#define LAPIC_BASE 0xFEE00000
|
||||
|
||||
/* LAPIC register offset for memory mapped IO access */
|
||||
#define LAPIC_ID_REGISTER 0x00000020
|
||||
#define LAPIC_VERSION_REGISTER 0x00000030
|
||||
#define LAPIC_TASK_PRIORITY_REGISTER 0x00000080
|
||||
#define LAPIC_ARBITRATION_PRIORITY_REGISTER 0x00000090
|
||||
#define LAPIC_PROCESSOR_PRIORITY_REGISTER 0x000000A0
|
||||
#define LAPIC_EOI_REGISTER 0x000000B0
|
||||
#define LAPIC_REMOTE_READ_REGISTER 0x000000C0
|
||||
#define LAPIC_LOGICAL_DESTINATION_REGISTER 0x000000D0
|
||||
#define LAPIC_DESTINATION_FORMAT_REGISTER 0x000000E0
|
||||
#define LAPIC_SPURIOUS_VECTOR_REGISTER 0x000000F0
|
||||
#define LAPIC_IN_SERVICE_REGISTER_0 0x00000100
|
||||
#define LAPIC_IN_SERVICE_REGISTER_1 0x00000110
|
||||
#define LAPIC_IN_SERVICE_REGISTER_2 0x00000120
|
||||
#define LAPIC_IN_SERVICE_REGISTER_3 0x00000130
|
||||
#define LAPIC_IN_SERVICE_REGISTER_4 0x00000140
|
||||
#define LAPIC_IN_SERVICE_REGISTER_5 0x00000150
|
||||
#define LAPIC_IN_SERVICE_REGISTER_6 0x00000160
|
||||
#define LAPIC_IN_SERVICE_REGISTER_7 0x00000170
|
||||
#define LAPIC_TRIGGER_MODE_REGISTER_0 0x00000180
|
||||
#define LAPIC_TRIGGER_MODE_REGISTER_1 0x00000190
|
||||
#define LAPIC_TRIGGER_MODE_REGISTER_2 0x000001A0
|
||||
#define LAPIC_TRIGGER_MODE_REGISTER_3 0x000001B0
|
||||
#define LAPIC_TRIGGER_MODE_REGISTER_4 0x000001C0
|
||||
#define LAPIC_TRIGGER_MODE_REGISTER_5 0x000001D0
|
||||
#define LAPIC_TRIGGER_MODE_REGISTER_6 0x000001E0
|
||||
#define LAPIC_TRIGGER_MODE_REGISTER_7 0x000001F0
|
||||
#define LAPIC_INT_REQUEST_REGISTER_0 0x00000200
|
||||
#define LAPIC_INT_REQUEST_REGISTER_1 0x00000210
|
||||
#define LAPIC_INT_REQUEST_REGISTER_2 0x00000220
|
||||
#define LAPIC_INT_REQUEST_REGISTER_3 0x00000230
|
||||
#define LAPIC_INT_REQUEST_REGISTER_4 0x00000240
|
||||
#define LAPIC_INT_REQUEST_REGISTER_5 0x00000250
|
||||
#define LAPIC_INT_REQUEST_REGISTER_6 0x00000260
|
||||
#define LAPIC_INT_REQUEST_REGISTER_7 0x00000270
|
||||
#define LAPIC_ERROR_STATUS_REGISTER 0x00000280
|
||||
#define LAPIC_LVT_CMCI_REGISTER 0x000002F0
|
||||
#define LAPIC_INT_COMMAND_REGISTER_0 0x00000300
|
||||
#define LAPIC_INT_COMMAND_REGISTER_1 0x00000310
|
||||
#define LAPIC_LVT_TIMER_REGISTER 0x00000320
|
||||
#define LAPIC_LVT_THERMAL_SENSOR_REGISTER 0x00000330
|
||||
#define LAPIC_LVT_PMC_REGISTER 0x00000340
|
||||
#define LAPIC_LVT_LINT0_REGISTER 0x00000350
|
||||
#define LAPIC_LVT_LINT1_REGISTER 0x00000360
|
||||
#define LAPIC_LVT_ERROR_REGISTER 0x00000370
|
||||
#define LAPIC_INITIAL_COUNT_REGISTER 0x00000380
|
||||
#define LAPIC_CURRENT_COUNT_REGISTER 0x00000390
|
||||
#define LAPIC_DIVIDE_CONFIGURATION_REGISTER 0x000003E0
|
||||
|
||||
/* LAPIC CPUID bit and bitmask definitions */
|
||||
#define CPUID_OUT_RDX_APIC_PRESENT ((uint64_t) 1 << 9)
|
||||
#define CPUID_OUT_RCX_X2APIC_PRESENT ((uint64_t) 1 << 21)
|
||||
|
||||
/* LAPIC MSR bit and bitmask definitions */
|
||||
#define MSR_01B_XAPIC_GLOBAL_ENABLE ((uint64_t) 1 << 11)
|
||||
|
||||
/* LAPIC register bit and bitmask definitions */
|
||||
#define LAPIC_SVR_VECTOR 0x000000FF
|
||||
#define LAPIC_SVR_APIC_ENABLE_MASK 0x00000100
|
||||
|
||||
#define LAPIC_LVT_MASK 0x00010000
|
||||
#define LAPIC_DELIVERY_MODE_EXTINT_MASK 0x00000700
|
||||
|
||||
/* LAPIC Timer bit and bitmask definitions */
|
||||
#define LAPIC_TMR_ONESHOT ((uint32_t) 0x0 << 17)
|
||||
#define LAPIC_TMR_PERIODIC ((uint32_t) 0x1 << 17)
|
||||
#define LAPIC_TMR_TSC_DEADLINE ((uint32_t) 0x2 << 17)
|
||||
|
||||
enum intr_cpu_startup_shorthand {
|
||||
INTR_CPU_STARTUP_USE_DEST,
|
||||
INTR_CPU_STARTUP_ALL_EX_SELF,
|
||||
INTR_CPU_STARTUP_UNKNOWN,
|
||||
};
|
||||
|
||||
union lapic_id {
|
||||
uint32_t value;
|
||||
struct {
|
||||
uint8_t xapic_id;
|
||||
uint8_t rsvd[3];
|
||||
} xapic;
|
||||
union {
|
||||
uint32_t value;
|
||||
struct {
|
||||
uint8_t xapic_id;
|
||||
uint8_t xapic_edid;
|
||||
uint8_t rsvd[2];
|
||||
} ioxapic_view;
|
||||
struct {
|
||||
uint32_t x2apic_id:4;
|
||||
uint32_t x2apic_cluster:28;
|
||||
} ldr_view;
|
||||
} x2apic;
|
||||
};
|
||||
|
||||
int early_init_lapic(void);
|
||||
int init_lapic(uint32_t cpu_id);
|
||||
int send_lapic_eoi(void);
|
||||
uint32_t get_cur_lapic_id(void);
|
||||
int send_startup_ipi(enum intr_cpu_startup_shorthand cpu_startup_shorthand,
|
||||
uint32_t cpu_startup_dest,
|
||||
paddr_t cpu_startup_start_address);
|
||||
/* API to send an IPI to a single guest */
|
||||
void send_single_ipi(uint32_t pcpu_id, uint32_t vector);
|
||||
|
||||
#endif /* INTR_LAPIC_H */
|
394
hypervisor/include/arch/x86/mmu.h
Normal file
394
hypervisor/include/arch/x86/mmu.h
Normal file
@@ -0,0 +1,394 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef MMU_H
|
||||
#define MMU_H
|
||||
|
||||
/* Size of all page-table entries (in bytes) */
|
||||
#define IA32E_COMM_ENTRY_SIZE 8
|
||||
|
||||
/* Definitions common for all IA-32e related paging entries */
|
||||
#define IA32E_COMM_P_BIT 0x0000000000000001
|
||||
#define IA32E_COMM_RW_BIT 0x0000000000000002
|
||||
#define IA32E_COMM_US_BIT 0x0000000000000004
|
||||
#define IA32E_COMM_PWT_BIT 0x0000000000000008
|
||||
#define IA32E_COMM_PCD_BIT 0x0000000000000010
|
||||
#define IA32E_COMM_A_BIT 0x0000000000000020
|
||||
#define IA32E_COMM_XD_BIT 0x8000000000000000
|
||||
|
||||
/* Defines for EPT paging entries */
|
||||
#define IA32E_EPT_R_BIT 0x0000000000000001
|
||||
#define IA32E_EPT_W_BIT 0x0000000000000002
|
||||
#define IA32E_EPT_X_BIT 0x0000000000000004
|
||||
#define IA32E_EPT_UNCACHED (0<<3)
|
||||
#define IA32E_EPT_WC (1<<3)
|
||||
#define IA32E_EPT_WT (4<<3)
|
||||
#define IA32E_EPT_WP (5<<3)
|
||||
#define IA32E_EPT_WB (6<<3)
|
||||
#define IA32E_EPT_PAT_IGNORE 0x0000000000000040
|
||||
#define IA32E_EPT_ACCESS_FLAG 0x0000000000000100
|
||||
#define IA32E_EPT_DIRTY_FLAG 0x0000000000000200
|
||||
#define IA32E_EPT_SNOOP_CTRL 0x0000000000000800
|
||||
#define IA32E_EPT_SUPPRESS_VE 0x8000000000000000
|
||||
|
||||
/* Definitions common or ignored for all IA-32e related paging entries */
|
||||
#define IA32E_COMM_D_BIT 0x0000000000000040
|
||||
#define IA32E_COMM_G_BIT 0x0000000000000100
|
||||
|
||||
/* Definitions exclusive to a Page Map Level 4 Entry (PML4E) */
|
||||
#define IA32E_PML4E_INDEX_MASK_START 39
|
||||
#define IA32E_PML4E_ADDR_MASK 0x0000FF8000000000
|
||||
|
||||
/* Definitions exclusive to a Page Directory Pointer Table Entry (PDPTE) */
|
||||
#define IA32E_PDPTE_D_BIT 0x0000000000000040
|
||||
#define IA32E_PDPTE_PS_BIT 0x0000000000000080
|
||||
#define IA32E_PDPTE_PAT_BIT 0x0000000000001000
|
||||
#define IA32E_PDPTE_ADDR_MASK 0x0000FFFFC0000000
|
||||
#define IA32E_PDPTE_INDEX_MASK_START \
|
||||
(IA32E_PML4E_INDEX_MASK_START - IA32E_INDEX_MASK_BITS)
|
||||
|
||||
/* Definitions exclusive to a Page Directory Entry (PDE) 1G or 2M */
|
||||
#define IA32E_PDE_D_BIT 0x0000000000000040
|
||||
#define IA32E_PDE_PS_BIT 0x0000000000000080
|
||||
#define IA32E_PDE_PAT_BIT 0x0000000000001000
|
||||
#define IA32E_PDE_ADDR_MASK 0x0000FFFFFFE00000
|
||||
#define IA32E_PDE_INDEX_MASK_START \
|
||||
(IA32E_PDPTE_INDEX_MASK_START - IA32E_INDEX_MASK_BITS)
|
||||
|
||||
/* Definitions exclusive to Page Table Entries (PTE) */
|
||||
#define IA32E_PTE_D_BIT 0x0000000000000040
|
||||
#define IA32E_PTE_PAT_BIT 0x0000000000000080
|
||||
#define IA32E_PTE_G_BIT 0x0000000000000100
|
||||
#define IA32E_PTE_ADDR_MASK 0x0000FFFFFFFFF000
|
||||
#define IA32E_PTE_INDEX_MASK_START \
|
||||
(IA32E_PDE_INDEX_MASK_START - IA32E_INDEX_MASK_BITS)
|
||||
|
||||
/** The 'Present' bit in a 32 bit paging page directory entry */
|
||||
#define MMU_32BIT_PDE_P 0x00000001
|
||||
/** The 'Read/Write' bit in a 32 bit paging page directory entry */
|
||||
#define MMU_32BIT_PDE_RW 0x00000002
|
||||
/** The 'User/Supervisor' bit in a 32 bit paging page directory entry */
|
||||
#define MMU_32BIT_PDE_US 0x00000004
|
||||
/** The 'Page Write Through' bit in a 32 bit paging page directory entry */
|
||||
#define MMU_32BIT_PDE_PWT 0x00000008
|
||||
/** The 'Page Cache Disable' bit in a 32 bit paging page directory entry */
|
||||
#define MMU_32BIT_PDE_PCD 0x00000010
|
||||
/** The 'Accessed' bit in a 32 bit paging page directory entry */
|
||||
#define MMU_32BIT_PDE_A 0x00000020
|
||||
/** The 'Dirty' bit in a 32 bit paging page directory entry */
|
||||
#define MMU_32BIT_PDE_D 0x00000040
|
||||
/** The 'Page Size' bit in a 32 bit paging page directory entry */
|
||||
#define MMU_32BIT_PDE_PS 0x00000080
|
||||
/** The 'Global' bit in a 32 bit paging page directory entry */
|
||||
#define MMU_32BIT_PDE_G 0x00000100
|
||||
/** The 'PAT' bit in a page 32 bit paging directory entry */
|
||||
#define MMU_32BIT_PDE_PAT 0x00001000
|
||||
/** The flag that indicates that the page fault was caused by a non present
|
||||
* page.
|
||||
*/
|
||||
#define PAGE_FAULT_P_FLAG 0x00000001
|
||||
/** The flag that indicates that the page fault was caused by a write access. */
|
||||
#define PAGE_FAULT_WR_FLAG 0x00000002
|
||||
/** The flag that indicates that the page fault was caused in user mode. */
|
||||
#define PAGE_FAULT_US_FLAG 0x00000004
|
||||
/** The flag that indicates that the page fault was caused by a reserved bit
|
||||
* violation.
|
||||
*/
|
||||
#define PAGE_FAULT_RSVD_FLAG 0x00000008
|
||||
/** The flag that indicates that the page fault was caused by an instruction
|
||||
* fetch.
|
||||
*/
|
||||
#define PAGE_FAULT_ID_FLAG 0x00000010
|
||||
|
||||
/* Defines used for common memory sizes */
|
||||
#define MEM_1K 1024UL
|
||||
#define MEM_2K (MEM_1K * 2UL)
|
||||
#define MEM_4K (MEM_1K * 4UL)
|
||||
#define MEM_8K (MEM_1K * 8UL)
|
||||
#define MEM_16K (MEM_1K * 16UL)
|
||||
#define MEM_32K (MEM_1K * 32UL)
|
||||
#define MEM_64K (MEM_1K * 64UL)
|
||||
#define MEM_128K (MEM_1K * 128UL)
|
||||
#define MEM_256K (MEM_1K * 256UL)
|
||||
#define MEM_512K (MEM_1K * 512UL)
|
||||
#define MEM_1M (MEM_1K * 1024UL)
|
||||
#define MEM_2M (MEM_1M * 2UL)
|
||||
#define MEM_4M (MEM_1M * 4UL)
|
||||
#define MEM_8M (MEM_1M * 8UL)
|
||||
#define MEM_16M (MEM_1M * 16UL)
|
||||
#define MEM_32M (MEM_1M * 32UL)
|
||||
#define MEM_64M (MEM_1M * 64UL)
|
||||
#define MEM_128M (MEM_1M * 128UL)
|
||||
#define MEM_256M (MEM_1M * 256UL)
|
||||
#define MEM_512M (MEM_1M * 512UL)
|
||||
#define MEM_1G (MEM_1M * 1024UL)
|
||||
#define MEM_2G (MEM_1G * 2UL)
|
||||
#define MEM_3G (MEM_1G * 3UL)
|
||||
#define MEM_4G (MEM_1G * 4UL)
|
||||
#define MEM_5G (MEM_1G * 5UL)
|
||||
#define MEM_6G (MEM_1G * 6UL)
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
|
||||
/* Define cache line size (in bytes) */
|
||||
#define CACHE_LINE_SIZE 64
|
||||
|
||||
/* Size of all page structures for IA-32e */
|
||||
#define IA32E_STRUCT_SIZE MEM_4K
|
||||
|
||||
/* IA32E Paging constants */
|
||||
#define IA32E_INDEX_MASK_BITS 9
|
||||
#define IA32E_NUM_ENTRIES 512
|
||||
#define IA32E_INDEX_MASK (uint64_t)(IA32E_NUM_ENTRIES - 1)
|
||||
#define IA32E_REF_MASK 0x7FFFFFFFFFFFF000
|
||||
#define IA32E_FIRST_BLOCK_INDEX 1
|
||||
|
||||
/* Macro to get PML4 index given an address */
|
||||
#define IA32E_PML4E_INDEX_CALC(address) \
|
||||
(uint32_t)((((uint64_t)address >> IA32E_PML4E_INDEX_MASK_START) & \
|
||||
IA32E_INDEX_MASK) * sizeof(uint64_t))
|
||||
|
||||
/* Macro to get PDPT index given an address */
|
||||
#define IA32E_PDPTE_INDEX_CALC(address) \
|
||||
(uint32_t)((((uint64_t)address >> IA32E_PDPTE_INDEX_MASK_START) & \
|
||||
IA32E_INDEX_MASK) * sizeof(uint64_t))
|
||||
|
||||
/* Macro to get PD index given an address */
|
||||
#define IA32E_PDE_INDEX_CALC(address) \
|
||||
(uint32_t)((((uint64_t)address >> IA32E_PDE_INDEX_MASK_START) & \
|
||||
IA32E_INDEX_MASK) * sizeof(uint64_t))
|
||||
|
||||
/* Macro to get PT index given an address */
|
||||
#define IA32E_PTE_INDEX_CALC(address) \
|
||||
(uint32_t)((((uint64_t)address >> IA32E_PTE_INDEX_MASK_START) & \
|
||||
IA32E_INDEX_MASK) * sizeof(uint64_t))
|
||||
|
||||
/* Macro to obtain a 2 MB page offset from given linear address */
|
||||
#define IA32E_GET_2MB_PG_OFFSET(address) \
|
||||
(address & 0x001FFFFF)
|
||||
|
||||
/* Macro to obtain a 4KB page offset from given linear address */
|
||||
#define IA32E_GET_4KB_PG_OFFSET(address) \
|
||||
(address & 0x00000FFF)
|
||||
|
||||
/*
|
||||
* The following generic attributes MMU_MEM_ATTR_FLAG_xxx may be OR'd with one
|
||||
* and only one of the MMU_MEM_ATTR_TYPE_xxx definitions
|
||||
*/
|
||||
|
||||
/* Generic memory attributes */
|
||||
#define MMU_MEM_ATTR_READ 0x00000001
|
||||
#define MMU_MEM_ATTR_WRITE 0x00000002
|
||||
#define MMU_MEM_ATTR_EXECUTE 0x00000004
|
||||
#define MMU_MEM_ATTR_USER 0x00000008
|
||||
#define MMU_MEM_ATTR_WB_CACHE 0x00000040
|
||||
#define MMU_MEM_ATTR_WT_CACHE 0x00000080
|
||||
#define MMU_MEM_ATTR_UNCACHED 0x00000100
|
||||
#define MMU_MEM_ATTR_WC 0x00000200
|
||||
#define MMU_MEM_ATTR_WP 0x00000400
|
||||
|
||||
/* Definitions for memory types related to x64 */
|
||||
#define MMU_MEM_ATTR_BIT_READ_WRITE IA32E_COMM_RW_BIT
|
||||
#define MMU_MEM_ATTR_BIT_USER_ACCESSIBLE IA32E_COMM_US_BIT
|
||||
#define MMU_MEM_ATTR_BIT_EXECUTE_DISABLE IA32E_COMM_XD_BIT
|
||||
|
||||
/* Selection of Page Attribute Table (PAT) entries with PAT, PCD and PWT
|
||||
* encoding. See also pat.h
|
||||
*/
|
||||
/* Selects PAT0 WB */
|
||||
#define MMU_MEM_ATTR_TYPE_CACHED_WB (0x0000000000000000)
|
||||
/* Selects PAT1 WT */
|
||||
#define MMU_MEM_ATTR_TYPE_CACHED_WT (IA32E_COMM_PWT_BIT)
|
||||
/* Selects PAT2 UCM */
|
||||
#define MMU_MEM_ATTR_TYPE_UNCACHED_MINUS (IA32E_COMM_PCD_BIT)
|
||||
/* Selects PAT3 UC */
|
||||
#define MMU_MEM_ATTR_TYPE_UNCACHED \
|
||||
(IA32E_COMM_PCD_BIT | IA32E_COMM_PWT_BIT)
|
||||
/* Selects PAT6 WC */
|
||||
#define MMU_MEM_ATTR_TYPE_WRITE_COMBINED \
|
||||
(IA32E_PDPTE_PAT_BIT | IA32E_COMM_PCD_BIT)
|
||||
/* Selects PAT7 WP */
|
||||
#define MMU_MEM_ATTR_TYPE_WRITE_PROTECTED \
|
||||
(IA32E_PDPTE_PAT_BIT | IA32E_COMM_PCD_BIT | IA32E_COMM_PWT_BIT)
|
||||
|
||||
#define ROUND_PAGE_UP(addr) (((addr) + CPU_PAGE_SIZE - 1) & IA32E_REF_MASK)
|
||||
#define ROUND_PAGE_DOWN(addr) ((addr) & IA32E_REF_MASK)
|
||||
|
||||
struct map_params {
|
||||
/* enum _page_table_type: HOST or EPT*/
|
||||
int page_table_type;
|
||||
/* used HVA->HPA for HOST, used GPA->HPA for EPT */
|
||||
void *pml4_base;
|
||||
/* used HPA->HVA for HOST, used HPA->GPA for EPT */
|
||||
void *pml4_inverted;
|
||||
};
|
||||
struct entry_params {
|
||||
uint32_t entry_level;
|
||||
uint32_t entry_present;
|
||||
uint64_t entry_base;
|
||||
uint64_t entry_off;
|
||||
uint64_t entry_val;
|
||||
uint64_t page_size;
|
||||
};
|
||||
|
||||
enum _page_table_type {
|
||||
PT_HOST = 0, /* Mapping for hypervisor */
|
||||
PT_EPT = 1,
|
||||
PAGETABLE_TYPE_UNKNOWN,
|
||||
};
|
||||
|
||||
/* Represent the 4 levels of translation tables in IA-32e paging mode */
|
||||
enum _page_table_level {
|
||||
IA32E_PML4 = 0,
|
||||
IA32E_PDPT = 1,
|
||||
IA32E_PD = 2,
|
||||
IA32E_PT = 3,
|
||||
IA32E_UNKNOWN,
|
||||
};
|
||||
|
||||
/* Page table entry present */
|
||||
enum _page_table_present {
|
||||
PT_NOT_PRESENT = 0,
|
||||
PT_PRESENT = 1,
|
||||
};
|
||||
|
||||
/* Page size */
|
||||
#define PAGE_SIZE_4K MEM_4K
|
||||
#define PAGE_SIZE_2M MEM_2M
|
||||
#define PAGE_SIZE_1G MEM_1G
|
||||
|
||||
/* Macros for reading/writing memory */
|
||||
#define MEM_READ8(addr) (*(volatile uint8_t *)(addr))
|
||||
#define MEM_WRITE8(addr, data) \
|
||||
(*(volatile uint8_t *)(addr) = (uint8_t)(data))
|
||||
#define MEM_READ16(addr) (*(volatile uint16_t *)(addr))
|
||||
#define MEM_WRITE16(addr, data) \
|
||||
(*(volatile uint16_t *)(addr) = (uint16_t)(data))
|
||||
#define MEM_READ32(addr) (*(volatile uint32_t *)(addr))
|
||||
#define MEM_WRITE32(addr, data) \
|
||||
(*(volatile uint32_t *)(addr) = (uint32_t)(data))
|
||||
#define MEM_READ64(addr) (*(volatile uint64_t *)(addr))
|
||||
#define MEM_WRITE64(addr, data) \
|
||||
(*(volatile uint64_t *)(addr) = (uint64_t)(data))
|
||||
|
||||
/* Typedef for MMIO handler and range check routine */
|
||||
typedef int(*hv_mem_io_handler_t)(struct vcpu *, struct mem_io *, void *);
|
||||
|
||||
/* Structure for MMIO handler node */
|
||||
struct mem_io_node {
|
||||
hv_mem_io_handler_t read_write;
|
||||
void *handler_private_data;
|
||||
struct list_head list;
|
||||
uint64_t range_start;
|
||||
uint64_t range_end;
|
||||
};
|
||||
|
||||
void *get_paging_pml4(void);
|
||||
void *alloc_paging_struct();
|
||||
void enable_paging(void *pml4_base_addr);
|
||||
void init_paging(void);
|
||||
void map_mem(struct map_params *map_params, void *paddr, void *vaddr,
|
||||
uint64_t size, uint32_t flags);
|
||||
void unmap_mem(struct map_params *map_params, void *paddr, void *vaddr,
|
||||
uint64_t size, uint32_t flags);
|
||||
void modify_mem(struct map_params *map_params, void *paddr, void *vaddr,
|
||||
uint64_t size, uint32_t flags);
|
||||
void mmu_invept(struct vcpu *vcpu);
|
||||
void obtain_last_page_table_entry(struct map_params *map_params,
|
||||
struct entry_params *entry, void *addr, bool direct);
|
||||
|
||||
int register_mmio_emulation_handler(struct vm *vm,
|
||||
hv_mem_io_handler_t read_write, uint64_t start,
|
||||
uint64_t end, void *handler_private_data);
|
||||
|
||||
void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
|
||||
uint64_t end);
|
||||
|
||||
#pragma pack(1)
|
||||
|
||||
/** Defines a single entry in an E820 memory map. */
|
||||
struct e820_entry {
|
||||
/** The base address of the memory range. */
|
||||
uint64_t baseaddr;
|
||||
/** The length of the memory range. */
|
||||
uint64_t length;
|
||||
/** The type of memory region. */
|
||||
uint32_t type;
|
||||
};
|
||||
|
||||
#pragma pack()
|
||||
|
||||
/* E820 memory types */
|
||||
#define E820_TYPE_RAM 1 /* EFI 1, 2, 3, 4, 5, 6, 7 */
|
||||
#define E820_TYPE_RESERVED 2
|
||||
/* EFI 0, 11, 12, 13 (everything not used elsewhere) */
|
||||
#define E820_TYPE_ACPI_RECLAIM 3 /* EFI 9 */
|
||||
#define E820_TYPE_ACPI_NVS 4 /* EFI 10 */
|
||||
#define E820_TYPE_UNUSABLE 5 /* EFI 8 */
|
||||
|
||||
/** Calculates the page table address for a given address.
|
||||
*
|
||||
* @param pd The base address of the page directory.
|
||||
* @param vaddr The virtual address to calculate the page table address for.
|
||||
*
|
||||
* @return A pointer to the page table for the specified virtual address.
|
||||
*
|
||||
*/
|
||||
static inline void *mmu_pt_for_pde(uint32_t *pd, uint32_t vaddr)
|
||||
{
|
||||
return pd + ((vaddr >> 22) + 1) * 1024;
|
||||
}
|
||||
|
||||
#define CACHE_FLUSH_INVALIDATE_ALL() \
|
||||
{ \
|
||||
asm volatile (" wbinvd\n" : : : "memory"); \
|
||||
}
|
||||
|
||||
/* External variable declarations */
|
||||
extern uint8_t CPU_Boot_Page_Tables_Start_VM[];
|
||||
|
||||
/* External Interfaces */
|
||||
int is_ept_supported(void);
|
||||
void *create_guest_paging(struct vm *vm);
|
||||
void destroy_ept(struct vm *vm);
|
||||
uint64_t gpa2hpa(struct vm *vm, uint64_t gpa);
|
||||
uint64_t gpa2hpa_check(struct vm *vm, uint64_t gpa,
|
||||
uint64_t size, int *found, bool assert);
|
||||
uint64_t hpa2gpa(struct vm *vm, uint64_t hpa);
|
||||
int ept_mmap(struct vm *vm, uint64_t hpa,
|
||||
uint64_t gpa, uint64_t size, uint32_t type, uint32_t prot);
|
||||
|
||||
int ept_violation_handler(struct vcpu *vcpu);
|
||||
int ept_misconfig_handler(struct vcpu *vcpu);
|
||||
int dm_emulate_mmio_post(struct vcpu *vcpu);
|
||||
|
||||
#endif /* ASSEMBLER not defined */
|
||||
|
||||
#endif /* MMU_H */
|
563
hypervisor/include/arch/x86/msr.h
Normal file
563
hypervisor/include/arch/x86/msr.h
Normal file
@@ -0,0 +1,563 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef MSR_H
|
||||
#define MSR_H
|
||||
|
||||
/* architectural (common) MSRs */
|
||||
#define MSR_IA32_P5_MC_ADDR 0x00000000
|
||||
/* Machine check address for MC exception handler */
|
||||
#define MSR_IA32_P5_MC_TYPE 0x00000001
|
||||
/* Machine check error type for MC exception handler */
|
||||
#define MSR_IA32_MONITOR_FILTER_SIZE 0x00000006
|
||||
/* System coherence line size for MWAIT/MONITOR */
|
||||
#define MSR_IA32_TIME_STAMP_COUNTER 0x00000010 /* TSC as MSR */
|
||||
#define MSR_IA32_PLATFORM_ID 0x00000017 /* Platform ID */
|
||||
#define MSR_IA32_APIC_BASE 0x0000001B
|
||||
/* Information about LAPIC */
|
||||
#define MSR_IA32_FEATURE_CONTROL 0x0000003A
|
||||
/* Speculation Control */
|
||||
#define MSR_IA32_SPEC_CTRL 0x00000048
|
||||
/* Prediction Command */
|
||||
#define MSR_IA32_PRED_CMD 0x00000049
|
||||
/* Control Features in Intel 64 processor */
|
||||
#define MSR_IA32_ADJUST_TSC 0x0000003B /* Adjust TSC value */
|
||||
#define MSR_IA32_BIOS_UPDT_TRIG 0x00000079
|
||||
/* BIOS update trigger */
|
||||
#define MSR_IA32_BIOS_SIGN_ID 0x0000008B
|
||||
/* BIOS update signature */
|
||||
#define MSR_IA32_SMM_MONITOR_CTL 0x0000009B
|
||||
/* SMM monitor configuration */
|
||||
#define MSR_IA32_PMC0 0x000000C1
|
||||
/* General performance counter 0 */
|
||||
#define MSR_IA32_PMC1 0x000000C2
|
||||
/* General performance counter 1 */
|
||||
#define MSR_IA32_PMC2 0x000000C3
|
||||
/* General performance counter 2 */
|
||||
#define MSR_IA32_PMC3 0x000000C4
|
||||
/* General performance counter 3 */
|
||||
#define MSR_IA32_MPERF 0x000000E7
|
||||
/* Max. qualified performance clock counter */
|
||||
#define MSR_IA32_APERF 0x000000E8
|
||||
/* Actual performance clock counter */
|
||||
#define MSR_IA32_MTRR_CAP 0x000000FE /* MTRR capability */
|
||||
#define MSR_IA32_SYSENTER_CS 0x00000174 /* CS for sysenter */
|
||||
#define MSR_IA32_SYSENTER_ESP 0x00000175 /* ESP for sysenter */
|
||||
#define MSR_IA32_SYSENTER_EIP 0x00000176 /* EIP for sysenter */
|
||||
#define MSR_IA32_MCG_CAP 0x00000179
|
||||
/* Global machine check capability */
|
||||
#define MSR_IA32_MCG_STATUS 0x0000017A
|
||||
/* Global machine check status */
|
||||
#define MSR_IA32_MCG_CTL 0x0000017B
|
||||
/* Global machine check control */
|
||||
#define MSR_IA32_PERFEVTSEL0 0x00000186
|
||||
/* Performance Event Select Register 0 */
|
||||
#define MSR_IA32_PERFEVTSEL1 0x00000187
|
||||
/* Performance Event Select Register 1 */
|
||||
#define MSR_IA32_PERFEVTSEL2 0x00000188
|
||||
/* Performance Event Select Register 2 */
|
||||
#define MSR_IA32_PERFEVTSEL3 0x00000189
|
||||
/* Performance Event Select Register 3 */
|
||||
#define MSR_IA32_PERF_STATUS 0x00000198
|
||||
/* Current performance state */
|
||||
#define MSR_IA32_PERF_CTL 0x00000199
|
||||
/* Performance control */
|
||||
#define MSR_IA32_CLOCK_MODULATION 0x0000019A
|
||||
/* Clock modulation control */
|
||||
#define MSR_IA32_THERM_INTERRUPT 0x0000019B
|
||||
/* Thermal interrupt control */
|
||||
#define MSR_IA32_THERM_STATUS 0x0000019C
|
||||
/* Thermal status information */
|
||||
#define MSR_IA32_MISC_ENABLE 0x000001A0
|
||||
/* Enable misc. processor features */
|
||||
#define MSR_IA32_ENERGY_PERF_BIAS 0x000001B0
|
||||
/* Performance energy bias hint */
|
||||
#define MSR_IA32_DEBUGCTL 0x000001D9
|
||||
/* Trace/Profile resource control */
|
||||
#define MSR_IA32_SMRR_PHYSBASE 0x000001F2 /* SMRR base address */
|
||||
#define MSR_IA32_SMRR_PHYSMASK 0x000001F3 /* SMRR range mask */
|
||||
#define MSR_IA32_PLATFORM_DCA_CAP 0x000001F8 /* DCA capability */
|
||||
#define MSR_IA32_CPU_DCA_CAP 0x000001F9
|
||||
/* Prefetch hint type capability */
|
||||
#define MSR_IA32_DCA_0_CAP 0x000001FA
|
||||
/* DCA type 0 status/control */
|
||||
#define MSR_IA32_MTRR_PHYSBASE_0 0x00000200
|
||||
/* variable range MTRR base 0 */
|
||||
#define MSR_IA32_MTRR_PHYSMASK_0 0x00000201
|
||||
/* variable range MTRR mask 0 */
|
||||
#define MSR_IA32_MTRR_PHYSBASE_1 0x00000202
|
||||
/* variable range MTRR base 1 */
|
||||
#define MSR_IA32_MTRR_PHYSMASK_1 0x00000203
|
||||
/* variable range MTRR mask 1 */
|
||||
#define MSR_IA32_MTRR_PHYSBASE_2 0x00000204
|
||||
/* variable range MTRR base 2 */
|
||||
#define MSR_IA32_MTRR_PHYSMASK_2 0x00000205
|
||||
/* variable range MTRR mask 2 */
|
||||
#define MSR_IA32_MTRR_PHYSBASE_3 0x00000206
|
||||
/* variable range MTRR base 3 */
|
||||
#define MSR_IA32_MTRR_PHYSMASK_3 0x00000207
|
||||
/* variable range MTRR mask 3 */
|
||||
#define MSR_IA32_MTRR_PHYSBASE_4 0x00000208
|
||||
/* variable range MTRR base 4 */
|
||||
#define MSR_IA32_MTRR_PHYSMASK_4 0x00000209
|
||||
/* variable range MTRR mask 4 */
|
||||
#define MSR_IA32_MTRR_PHYSBASE_5 0x0000020A
|
||||
/* variable range MTRR base 5 */
|
||||
#define MSR_IA32_MTRR_PHYSMASK_5 0x0000020B
|
||||
/* variable range MTRR mask 5 */
|
||||
#define MSR_IA32_MTRR_PHYSBASE_6 0x0000020C
|
||||
/* variable range MTRR base 6 */
|
||||
#define MSR_IA32_MTRR_PHYSMASK_6 0x0000020D
|
||||
/* variable range MTRR mask 6 */
|
||||
#define MSR_IA32_MTRR_PHYSBASE_7 0x0000020E
|
||||
/* variable range MTRR base 7 */
|
||||
#define MSR_IA32_MTRR_PHYSMASK_7 0x0000020F
|
||||
/* variable range MTRR mask 7 */
|
||||
#define MSR_IA32_MTRR_PHYSBASE_8 0x00000210
|
||||
/* variable range MTRR base 8 */
|
||||
#define MSR_IA32_MTRR_PHYSMASK_8 0x00000211
|
||||
/* variable range MTRR mask 8 */
|
||||
#define MSR_IA32_MTRR_PHYSBASE_9 0x00000212
|
||||
/* variable range MTRR base 9 */
|
||||
#define MSR_IA32_MTRR_PHYSMASK_9 0x00000213
|
||||
/* variable range MTRR mask 9 */
|
||||
#define MSR_IA32_MTRR_FIX64K_00000 0x00000250
|
||||
/* fixed range MTRR 16K/0x00000 */
|
||||
#define MSR_IA32_MTRR_FIX16K_80000 0x00000258
|
||||
/* fixed range MTRR 16K/0x80000 */
|
||||
#define MSR_IA32_MTRR_FIX16K_A0000 0x00000259
|
||||
/* fixed range MTRR 16K/0xA0000 */
|
||||
#define MSR_IA32_MTRR_FIX4K_C0000 0x00000268
|
||||
/* fixed range MTRR 4K/0xC0000 */
|
||||
#define MSR_IA32_MTRR_FIX4K_C8000 0x00000269
|
||||
/* fixed range MTRR 4K/0xC8000 */
|
||||
#define MSR_IA32_MTRR_FIX4K_D0000 0x0000026A
|
||||
/* fixed range MTRR 4K/0xD0000 */
|
||||
#define MSR_IA32_MTRR_FIX4K_D8000 0x0000026B
|
||||
/* fixed range MTRR 4K/0xD8000 */
|
||||
#define MSR_IA32_MTRR_FIX4K_E0000 0x0000026C
|
||||
/* fixed range MTRR 4K/0xE0000 */
|
||||
#define MSR_IA32_MTRR_FIX4K_E8000 0x0000026D
|
||||
/* fixed range MTRR 4K/0xE8000 */
|
||||
#define MSR_IA32_MTRR_FIX4K_F0000 0x0000026E
|
||||
/* fixed range MTRR 4K/0xF0000 */
|
||||
#define MSR_IA32_MTRR_FIX4K_F8000 0x0000026F
|
||||
/* fixed range MTRR 4K/0xF8000 */
|
||||
#define MSR_IA32_PAT 0x00000277 /* PAT */
|
||||
#define MSR_IA32_MC0_CTL2 0x00000280
|
||||
/* Corrected error count threshold 0 */
|
||||
#define MSR_IA32_MC1_CTL2 0x00000281
|
||||
/* Corrected error count threshold 1 */
|
||||
#define MSR_IA32_MC2_CTL2 0x00000282
|
||||
/* Corrected error count threshold 2 */
|
||||
#define MSR_IA32_MC3_CTL2 0x00000283
|
||||
/* Corrected error count threshold 3 */
|
||||
#define MSR_IA32_MC4_CTL2 0x00000284
|
||||
/* Corrected error count threshold 4 */
|
||||
#define MSR_IA32_MC5_CTL2 0x00000285
|
||||
/* Corrected error count threshold 5 */
|
||||
#define MSR_IA32_MC6_CTL2 0x00000286
|
||||
/* Corrected error count threshold 6 */
|
||||
#define MSR_IA32_MC7_CTL2 0x00000287
|
||||
/* Corrected error count threshold 7 */
|
||||
#define MSR_IA32_MC8_CTL2 0x00000288
|
||||
/* Corrected error count threshold 8 */
|
||||
#define MSR_IA32_MC9_CTL2 0x00000289
|
||||
/* Corrected error count threshold 9 */
|
||||
#define MSR_IA32_MC10_CTL2 0x0000028A
|
||||
/* Corrected error count threshold 10 */
|
||||
#define MSR_IA32_MC11_CTL2 0x0000028B
|
||||
/* Corrected error count threshold 11 */
|
||||
#define MSR_IA32_MC12_CTL2 0x0000028C
|
||||
/* Corrected error count threshold 12 */
|
||||
#define MSR_IA32_MC13_CTL2 0x0000028D
|
||||
/* Corrected error count threshold 13 */
|
||||
#define MSR_IA32_MC14_CTL2 0x0000028E
|
||||
/* Corrected error count threshold 14 */
|
||||
#define MSR_IA32_MC15_CTL2 0x0000028F
|
||||
/* Corrected error count threshold 15 */
|
||||
#define MSR_IA32_MC16_CTL2 0x00000290
|
||||
/* Corrected error count threshold 16 */
|
||||
#define MSR_IA32_MC17_CTL2 0x00000291
|
||||
/* Corrected error count threshold 17 */
|
||||
#define MSR_IA32_MC18_CTL2 0x00000292
|
||||
/* Corrected error count threshold 18 */
|
||||
#define MSR_IA32_MC19_CTL2 0x00000293
|
||||
/* Corrected error count threshold 19 */
|
||||
#define MSR_IA32_MC20_CTL2 0x00000294
|
||||
/* Corrected error count threshold 20 */
|
||||
#define MSR_IA32_MC21_CTL2 0x00000295
|
||||
/* Corrected error count threshold 21 */
|
||||
#define MSR_IA32_MTRR_DEF_TYPE 0x000002FF
|
||||
/* Default memory type/MTRR control */
|
||||
#define MSR_IA32_FIXED_CTR0 0x00000309
|
||||
/* Fixed-function performance counter 0 */
|
||||
#define MSR_IA32_FIXED_CTR1 0x0000030A
|
||||
/* Fixed-function performance counter 1 */
|
||||
#define MSR_IA32_FIXED_CTR2 0x0000030B
|
||||
/* Fixed-function performance counter 2 */
|
||||
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
|
||||
/* Performance capability */
|
||||
#define MSR_IA32_FIXED_CTR_CTL 0x0000038D
|
||||
/* Fixed-function performance counter control */
|
||||
#define MSR_IA32_PERF_GLOBAL_STATUS 0x0000038E
|
||||
/* Global performance counter status */
|
||||
#define MSR_IA32_PERF_GLOBAL_CTRL 0x0000038F
|
||||
/* Global performance counter control */
|
||||
#define MSR_IA32_PERF_GLOBAL_OVF_CTRL 0x00000390
|
||||
/* Global performance counter overflow control */
|
||||
#define MSR_IA32_PEBS_ENABLE 0x000003F1 /* PEBS control */
|
||||
#define MSR_IA32_MC0_CTL 0x00000400 /* MC 0 control */
|
||||
#define MSR_IA32_MC0_STATUS 0x00000401 /* MC 0 status */
|
||||
#define MSR_IA32_MC0_ADDR 0x00000402 /* MC 0 address */
|
||||
#define MSR_IA32_MC0_MISC 0x00000403 /* MC 0 misc. */
|
||||
#define MSR_IA32_MC1_CTL 0x00000404 /* MC 1 control */
|
||||
#define MSR_IA32_MC1_STATUS 0x00000405 /* MC 1 status */
|
||||
#define MSR_IA32_MC1_ADDR 0x00000406 /* MC 1 address */
|
||||
#define MSR_IA32_MC1_MISC 0x00000407 /* MC 1 misc. */
|
||||
#define MSR_IA32_MC2_CTL 0x00000408 /* MC 2 control */
|
||||
#define MSR_IA32_MC2_STATUS 0x00000409 /* MC 2 status */
|
||||
#define MSR_IA32_MC2_ADDR 0x0000040A /* MC 2 address */
|
||||
#define MSR_IA32_MC2_MISC 0x0000040B /* MC 2 misc. */
|
||||
#define MSR_IA32_MC3_CTL 0x0000040C /* MC 3 control */
|
||||
#define MSR_IA32_MC3_STATUS 0x0000040D /* MC 3 status */
|
||||
#define MSR_IA32_MC3_ADDR 0x0000040E /* MC 3 address */
|
||||
#define MSR_IA32_MC3_MISC 0x0000040F /* MC 3 misc. */
|
||||
#define MSR_IA32_MC4_CTL 0x00000410 /* MC 4 control */
|
||||
#define MSR_IA32_MC4_STATUS 0x00000411 /* MC 4 status */
|
||||
#define MSR_IA32_MC4_ADDR 0x00000412 /* MC 4 address */
|
||||
#define MSR_IA32_MC4_MISC 0x00000413 /* MC 4 misc. */
|
||||
#define MSR_IA32_MC5_CTL 0x00000414 /* MC 5 control */
|
||||
#define MSR_IA32_MC5_STATUS 0x00000415 /* MC 5 status */
|
||||
#define MSR_IA32_MC5_ADDR 0x00000416 /* MC 5 address */
|
||||
#define MSR_IA32_MC5_MISC 0x00000417 /* MC 5 misc. */
|
||||
#define MSR_IA32_MC6_CTL 0x00000418 /* MC 6 control */
|
||||
#define MSR_IA32_MC6_STATUS 0x00000419 /* MC 6 status */
|
||||
#define MSR_IA32_MC6_ADDR 0x0000041A /* MC 6 address */
|
||||
#define MSR_IA32_MC6_MISC 0x0000041B /* MC 6 misc. */
|
||||
#define MSR_IA32_MC7_CTL 0x0000041C /* MC 7 control */
|
||||
#define MSR_IA32_MC7_STATUS 0x0000041D /* MC 7 status */
|
||||
#define MSR_IA32_MC7_ADDR 0x0000041E /* MC 7 address */
|
||||
#define MSR_IA32_MC7_MISC 0x0000041F /* MC 7 misc. */
|
||||
#define MSR_IA32_MC8_CTL 0x00000420 /* MC 8 control */
|
||||
#define MSR_IA32_MC8_STATUS 0x00000421 /* MC 8 status */
|
||||
#define MSR_IA32_MC8_ADDR 0x00000422 /* MC 8 address */
|
||||
#define MSR_IA32_MC8_MISC 0x00000423 /* MC 8 misc. */
|
||||
#define MSR_IA32_MC9_CTL 0x00000424 /* MC 9 control */
|
||||
#define MSR_IA32_MC9_STATUS 0x00000425 /* MC 9 status */
|
||||
#define MSR_IA32_MC9_ADDR 0x00000426 /* MC 9 address */
|
||||
#define MSR_IA32_MC9_MISC 0x00000427 /* MC 9 misc. */
|
||||
#define MSR_IA32_MC10_CTL 0x00000428 /* MC 10 control */
|
||||
#define MSR_IA32_MC10_STATUS 0x00000429 /* MC 10 status */
|
||||
#define MSR_IA32_MC10_ADDR 0x0000042A /* MC 10 address */
|
||||
#define MSR_IA32_MC10_MISC 0x0000042B /* MC 10 misc. */
|
||||
#define MSR_IA32_MC11_CTL 0x0000042C /* MC 11 control */
|
||||
#define MSR_IA32_MC11_STATUS 0x0000042D /* MC 11 status */
|
||||
#define MSR_IA32_MC11_ADDR 0x0000042E /* MC 11 address */
|
||||
#define MSR_IA32_MC11_MISC 0x0000042F /* MC 11 misc. */
|
||||
#define MSR_IA32_MC12_CTL 0x00000430 /* MC 12 control */
|
||||
#define MSR_IA32_MC12_STATUS 0x00000431 /* MC 12 status */
|
||||
#define MSR_IA32_MC12_ADDR 0x00000432 /* MC 12 address */
|
||||
#define MSR_IA32_MC12_MISC 0x00000433 /* MC 12 misc. */
|
||||
#define MSR_IA32_MC13_CTL 0x00000434 /* MC 13 control */
|
||||
#define MSR_IA32_MC13_STATUS 0x00000435 /* MC 13 status */
|
||||
#define MSR_IA32_MC13_ADDR 0x00000436 /* MC 13 address */
|
||||
#define MSR_IA32_MC13_MISC 0x00000437 /* MC 13 misc. */
|
||||
#define MSR_IA32_MC14_CTL 0x00000438 /* MC 14 control */
|
||||
#define MSR_IA32_MC14_STATUS 0x00000439 /* MC 14 status */
|
||||
#define MSR_IA32_MC14_ADDR 0x0000043A /* MC 14 address */
|
||||
#define MSR_IA32_MC14_MISC 0x0000043B /* MC 14 misc. */
|
||||
#define MSR_IA32_MC15_CTL 0x0000043C /* MC 15 control */
|
||||
#define MSR_IA32_MC15_STATUS 0x0000043D /* MC 15 status */
|
||||
#define MSR_IA32_MC15_ADDR 0x0000043E /* MC 15 address */
|
||||
#define MSR_IA32_MC15_MISC 0x0000043F /* MC 15 misc. */
|
||||
#define MSR_IA32_MC16_CTL 0x00000440 /* MC 16 control */
|
||||
#define MSR_IA32_MC16_STATUS 0x00000441 /* MC 16 status */
|
||||
#define MSR_IA32_MC16_ADDR 0x00000442 /* MC 16 address */
|
||||
#define MSR_IA32_MC16_MISC 0x00000443 /* MC 16 misc. */
|
||||
#define MSR_IA32_MC17_CTL 0x00000444 /* MC 17 control */
|
||||
#define MSR_IA32_MC17_STATUS 0x00000445 /* MC 17 status */
|
||||
#define MSR_IA32_MC17_ADDR 0x00000446 /* MC 17 address */
|
||||
#define MSR_IA32_MC17_MISC 0x00000447 /* MC 17 misc. */
|
||||
#define MSR_IA32_MC18_CTL 0x00000448 /* MC 18 control */
|
||||
#define MSR_IA32_MC18_STATUS 0x00000449 /* MC 18 status */
|
||||
#define MSR_IA32_MC18_ADDR 0x0000044A /* MC 18 address */
|
||||
#define MSR_IA32_MC18_MISC 0x0000044B /* MC 18 misc. */
|
||||
#define MSR_IA32_MC19_CTL 0x0000044C /* MC 19 control */
|
||||
#define MSR_IA32_MC19_STATUS 0x0000044D /* MC 19 status */
|
||||
#define MSR_IA32_MC19_ADDR 0x0000044E /* MC 19 address */
|
||||
#define MSR_IA32_MC19_MISC 0x0000044F /* MC 19 misc. */
|
||||
#define MSR_IA32_MC20_CTL 0x00000450 /* MC 20 control */
|
||||
#define MSR_IA32_MC20_STATUS 0x00000451 /* MC 20 status */
|
||||
#define MSR_IA32_MC20_ADDR 0x00000452 /* MC 20 address */
|
||||
#define MSR_IA32_MC20_MISC 0x00000453 /* MC 20 misc. */
|
||||
#define MSR_IA32_MC21_CTL 0x00000454 /* MC 21 control */
|
||||
#define MSR_IA32_MC21_STATUS 0x00000455 /* MC 21 status */
|
||||
#define MSR_IA32_MC21_ADDR 0x00000456 /* MC 21 address */
|
||||
#define MSR_IA32_MC21_MISC 0x00000457 /* MC 21 misc. */
|
||||
#define MSR_IA32_VMX_BASIC 0x00000480
|
||||
/* Capability reporting register basic VMX capabilities */
|
||||
#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
|
||||
/* Capability reporting register pin based VM execution controls */
|
||||
#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
|
||||
/* Capability reporting register primary processor based VM execution controls*/
|
||||
#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
|
||||
/* Capability reporting register VM exit controls */
|
||||
#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
|
||||
/* Capability reporting register VM entry controls */
|
||||
#define MSR_IA32_VMX_MISC 0x00000485
|
||||
/* Reporting register misc. VMX capabilities */
|
||||
#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
|
||||
/* Capability reporting register of CR0 bits fixed to 0 */
|
||||
#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
|
||||
/* Capability reporting register of CR0 bits fixed to 1 */
|
||||
#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
|
||||
/* Capability reporting register of CR4 bits fixed to 0 */
|
||||
#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
|
||||
/* Capability reporting register of CR4 bits fixed to 1 */
|
||||
#define MSR_IA32_VMX_VMCS_ENUM 0x0000048A
|
||||
/* Capability reporting register of VMCS field enumeration */
|
||||
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048B
|
||||
/* Capability reporting register of secondary processor based VM execution
|
||||
* controls
|
||||
*/
|
||||
#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048C
|
||||
/* Capability reporting register of EPT and VPID */
|
||||
#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048D
|
||||
/* Capability reporting register of pin based VM execution flex controls */
|
||||
#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048E
|
||||
/* Capability reporting register of primary processor based VM execution flex
|
||||
* controls
|
||||
*/
|
||||
#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048F
|
||||
/* Capability reporting register of VM exit flex controls */
|
||||
#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
|
||||
/* Capability reporting register of VM entry flex controls */
|
||||
#define MSR_IA32_DS_AREA 0x00000600 /* DS save area */
|
||||
/* APIC TSC deadline MSR */
|
||||
#define MSR_IA32_TSC_DEADLINE 0x000006E0
|
||||
#define MSR_IA32_EXT_XAPICID 0x00000802 /* x2APIC ID */
|
||||
#define MSR_IA32_EXT_APIC_VERSION 0x00000803 /* x2APIC version */
|
||||
#define MSR_IA32_EXT_APIC_TPR 0x00000808
|
||||
/* x2APIC task priority */
|
||||
#define MSR_IA32_EXT_APIC_PPR 0x0000080A
|
||||
/* x2APIC processor priority */
|
||||
#define MSR_IA32_EXT_APIC_EOI 0x0000080B /* x2APIC EOI */
|
||||
#define MSR_IA32_EXT_APIC_LDR 0x0000080D
|
||||
/* x2APIC logical destination */
|
||||
#define MSR_IA32_EXT_APIC_SIVR 0x0000080F
|
||||
/* x2APIC spurious interrupt vector */
|
||||
#define MSR_IA32_EXT_APIC_ISR0 0x00000810
|
||||
/* x2APIC in-service register 0 */
|
||||
#define MSR_IA32_EXT_APIC_ISR1 0x00000811
|
||||
/* x2APIC in-service register 1 */
|
||||
#define MSR_IA32_EXT_APIC_ISR2 0x00000812
|
||||
/* x2APIC in-service register 2 */
|
||||
#define MSR_IA32_EXT_APIC_ISR3 0x00000813
|
||||
/* x2APIC in-service register 3 */
|
||||
#define MSR_IA32_EXT_APIC_ISR4 0x00000814
|
||||
/* x2APIC in-service register 4 */
|
||||
#define MSR_IA32_EXT_APIC_ISR5 0x00000815
|
||||
/* x2APIC in-service register 5 */
|
||||
#define MSR_IA32_EXT_APIC_ISR6 0x00000816
|
||||
/* x2APIC in-service register 6 */
|
||||
#define MSR_IA32_EXT_APIC_ISR7 0x00000817
|
||||
/* x2APIC in-service register 7 */
|
||||
#define MSR_IA32_EXT_APIC_TMR0 0x00000818
|
||||
/* x2APIC trigger mode register 0 */
|
||||
#define MSR_IA32_EXT_APIC_TMR1 0x00000819
|
||||
/* x2APIC trigger mode register 1 */
|
||||
#define MSR_IA32_EXT_APIC_TMR2 0x0000081A
|
||||
/* x2APIC trigger mode register 2 */
|
||||
#define MSR_IA32_EXT_APIC_TMR3 0x0000081B
|
||||
/* x2APIC trigger mode register 3 */
|
||||
#define MSR_IA32_EXT_APIC_TMR4 0x0000081C
|
||||
/* x2APIC trigger mode register 4 */
|
||||
#define MSR_IA32_EXT_APIC_TMR5 0x0000081D
|
||||
/* x2APIC trigger mode register 5 */
|
||||
#define MSR_IA32_EXT_APIC_TMR6 0x0000081E
|
||||
/* x2APIC trigger mode register 6 */
|
||||
#define MSR_IA32_EXT_APIC_TMR7 0x0000081F
|
||||
/* x2APIC trigger mode register 7 */
|
||||
#define MSR_IA32_EXT_APIC_IRR0 0x00000820
|
||||
/* x2APIC interrupt request register 0 */
|
||||
#define MSR_IA32_EXT_APIC_IRR1 0x00000821
|
||||
/* x2APIC interrupt request register 1 */
|
||||
#define MSR_IA32_EXT_APIC_IRR2 0x00000822
|
||||
/* x2APIC interrupt request register 2 */
|
||||
#define MSR_IA32_EXT_APIC_IRR3 0x00000823
|
||||
/* x2APIC interrupt request register 3 */
|
||||
#define MSR_IA32_EXT_APIC_IRR4 0x00000824
|
||||
/* x2APIC interrupt request register 4 */
|
||||
#define MSR_IA32_EXT_APIC_IRR5 0x00000825
|
||||
/* x2APIC interrupt request register 5 */
|
||||
#define MSR_IA32_EXT_APIC_IRR6 0x00000826
|
||||
/* x2APIC interrupt request register 6 */
|
||||
#define MSR_IA32_EXT_APIC_IRR7 0x00000827
|
||||
/* x2APIC interrupt request register 7 */
|
||||
#define MSR_IA32_EXT_APIC_ESR 0x00000828
|
||||
/* x2APIC error status */
|
||||
#define MSR_IA32_EXT_APIC_LVT_CMCI 0x0000082F
|
||||
/* x2APIC LVT corrected machine check interrupt register */
|
||||
#define MSR_IA32_EXT_APIC_ICR 0x00000830
|
||||
/* x2APIC interrupt command register */
|
||||
#define MSR_IA32_EXT_APIC_LVT_TIMER 0x00000832
|
||||
/* x2APIC LVT timer interrupt register */
|
||||
#define MSR_IA32_EXT_APIC_LVT_THERMAL 0x00000833
|
||||
/* x2APIC LVT thermal sensor interrupt register */
|
||||
#define MSR_IA32_EXT_APIC_LVT_PMI 0x00000834
|
||||
/* x2APIC LVT performance monitor interrupt register */
|
||||
#define MSR_IA32_EXT_APIC_LVT_LINT0 0x00000835
|
||||
/* x2APIC LVT LINT0 register */
|
||||
#define MSR_IA32_EXT_APIC_LVT_LINT1 0x00000836
|
||||
/* x2APIC LVT LINT1 register */
|
||||
#define MSR_IA32_EXT_APIC_LVT_ERROR 0x00000837
|
||||
/* x2APIC LVT error register */
|
||||
#define MSR_IA32_EXT_APIC_INIT_COUNT 0x00000838
|
||||
/* x2APIC initial count register */
|
||||
#define MSR_IA32_EXT_APIC_CUR_COUNT 0x00000839
|
||||
/* x2APIC current count register */
|
||||
#define MSR_IA32_EXT_APIC_DIV_CONF 0x0000083E
|
||||
/* x2APIC divide configuration register */
|
||||
#define MSR_IA32_EXT_APIC_SELF_IPI 0x0000083F
|
||||
/* x2APIC self IPI register */
|
||||
#define MSR_IA32_EFER 0xC0000080
|
||||
/* Extended feature enables */
|
||||
#define MSR_IA32_STAR 0xC0000081
|
||||
/* System call target address */
|
||||
#define MSR_IA32_LSTAR 0xC0000082
|
||||
/* IA-32e mode system call target address */
|
||||
#define MSR_IA32_FMASK 0xC0000084
|
||||
/* System call flag mask */
|
||||
#define MSR_IA32_FS_BASE 0xC0000100
|
||||
/* Map of BASE address of FS */
|
||||
#define MSR_IA32_GS_BASE 0xC0000101
|
||||
/* Map of BASE address of GS */
|
||||
#define MSR_IA32_KERNEL_GS_BASE 0xC0000102
|
||||
/* Swap target of BASE address of GS */
|
||||
#define MSR_IA32_TSC_AUX 0xC0000103 /* Auxiliary TSC */
|
||||
|
||||
/* ATOM specific MSRs */
|
||||
#define MSR_ATOM_EBL_CR_POWERON 0x0000002A
|
||||
/* Processor hard power-on configuration */
|
||||
#define MSR_ATOM_LASTBRANCH_0_FROM_IP 0x00000040
|
||||
/* Last branch record 0 from IP */
|
||||
#define MSR_ATOM_LASTBRANCH_1_FROM_IP 0x00000041
|
||||
/* Last branch record 1 from IP */
|
||||
#define MSR_ATOM_LASTBRANCH_2_FROM_IP 0x00000042
|
||||
/* Last branch record 2 from IP */
|
||||
#define MSR_ATOM_LASTBRANCH_3_FROM_IP 0x00000043
|
||||
/* Last branch record 3 from IP */
|
||||
#define MSR_ATOM_LASTBRANCH_4_FROM_IP 0x00000044
|
||||
/* Last branch record 4 from IP */
|
||||
#define MSR_ATOM_LASTBRANCH_5_FROM_IP 0x00000045
|
||||
/* Last branch record 5 from IP */
|
||||
#define MSR_ATOM_LASTBRANCH_6_FROM_IP 0x00000046
|
||||
/* Last branch record 6 from IP */
|
||||
#define MSR_ATOM_LASTBRANCH_7_FROM_IP 0x00000047
|
||||
/* Last branch record 7 from IP */
|
||||
#define MSR_ATOM_LASTBRANCH_0_TO_LIP 0x00000060
|
||||
/* Last branch record 0 to IP */
|
||||
#define MSR_ATOM_LASTBRANCH_1_TO_LIP 0x00000061
|
||||
/* Last branch record 1 to IP */
|
||||
#define MSR_ATOM_LASTBRANCH_2_TO_LIP 0x00000062
|
||||
/* Last branch record 2 to IP */
|
||||
#define MSR_ATOM_LASTBRANCH_3_TO_LIP 0x00000063
|
||||
/* Last branch record 3 to IP */
|
||||
#define MSR_ATOM_LASTBRANCH_4_TO_LIP 0x00000064
|
||||
/* Last branch record 4 to IP */
|
||||
#define MSR_ATOM_LASTBRANCH_5_TO_LIP 0x00000065
|
||||
/* Last branch record 5 to IP */
|
||||
#define MSR_ATOM_LASTBRANCH_6_TO_LIP 0x00000066
|
||||
/* Last branch record 6 to IP */
|
||||
#define MSR_ATOM_LASTBRANCH_7_TO_LIP 0x00000067
|
||||
/* Last branch record 7 to IP */
|
||||
#define MSR_ATOM_FSB_FREQ 0x000000CD /* Scalable bus speed */
|
||||
#define MSR_PLATFORM_INFO 0x000000CE
|
||||
/* Maximum resolved bus ratio */
|
||||
#define MSR_ATOM_BBL_CR_CTL3 0x0000011E /* L2 hardware enabled */
|
||||
#define MSR_ATOM_THERM2_CTL 0x0000019D
|
||||
/* Mode of automatic thermal monitor */
|
||||
#define MSR_ATOM_LASTBRANCH_TOS 0x000001C9
|
||||
/* Last branch record stack TOS */
|
||||
#define MSR_ATOM_LER_FROM_LIP 0x000001DD
|
||||
/* Last exception record from linear IP */
|
||||
#define MSR_ATOM_LER_TO_LIP 0x000001DE
|
||||
/* Last exception record to linear IP */
|
||||
|
||||
/* LINCROFT specific MSRs */
|
||||
#define MSR_LNC_BIOS_CACHE_AS_RAM 0x000002E0 /* Configure CAR */
|
||||
|
||||
/* MSR_IA32_VMX_EPT_VPID_CAP: EPT and VPID capability bits */
|
||||
#define MSR_VMX_EPT_VPID_CAP_1GB (1UL << 17)/* EPT 1GB page */
|
||||
#define MSR_VMX_INVEPT (1UL << 20)/* INVEPT */
|
||||
#define MSR_VMX_INVEPT_SINGLE_CONTEXT (1UL << 25)/* INVEPT Single */
|
||||
#define MSR_VMX_INVEPT_GLOBAL_CONTEXT (1UL << 26)/* INVEPT Global */
|
||||
#define MSR_VMX_INVVPID (1UL << 32)/* INVVPID */
|
||||
#define MSR_VMX_INVVPID_SINGLE_CONTEXT (1UL << 41)/* INVVPID Single */
|
||||
#define MSR_VMX_INVVPID_GLOBAL_CONTEXT (1UL << 42)/* INVVPID Global */
|
||||
|
||||
/* EFER bits */
|
||||
#define MSR_IA32_EFER_SCE_BIT (1<<0)
|
||||
#define MSR_IA32_EFER_LME_BIT (1<<8) /* IA32e mode enable */
|
||||
#define MSR_IA32_EFER_LMA_BIT (1<<10) /* IA32e mode active */
|
||||
#define MSR_IA32_EFER_NXE_BIT (1<<11)
|
||||
|
||||
/* FEATURE CONTROL bits */
|
||||
#define MSR_IA32_FEATURE_CONTROL_LOCK (1<<0)
|
||||
#define MSR_IA32_FEATURE_CONTROL_VMX_SMX (1<<1)
|
||||
#define MSR_IA32_FEATURE_CONTROL_VMX_NO_SMX (1<<2)
|
||||
|
||||
/* PAT memory type definitions */
|
||||
#define PAT_MEM_TYPE_UC 0x00 /* uncached */
|
||||
#define PAT_MEM_TYPE_WC 0x01 /* write combining */
|
||||
#define PAT_MEM_TYPE_WT 0x04 /* write through */
|
||||
#define PAT_MEM_TYPE_WP 0x05 /* write protected */
|
||||
#define PAT_MEM_TYPE_WB 0x06 /* writeback */
|
||||
#define PAT_MEM_TYPE_UCM 0x07 /* uncached minus */
|
||||
|
||||
/* MTRR memory type definitions */
|
||||
#define MTRR_MEM_TYPE_UC 0x00 /* uncached */
|
||||
#define MTRR_MEM_TYPE_WC 0x01 /* write combining */
|
||||
#define MTRR_MEM_TYPE_WT 0x04 /* write through */
|
||||
#define MTRR_MEM_TYPE_WP 0x05 /* write protected */
|
||||
#define MTRR_MEM_TYPE_WB 0x06 /* writeback */
|
||||
|
||||
/* misc. MTRR flag definitions */
|
||||
#define MTRR_ENABLE 0x800 /* MTRR enable */
|
||||
#define MTRR_FIX_ENABLE 0x400 /* fixed range MTRR enable */
|
||||
#define MTRR_VALID 0x800 /* MTRR setting is valid */
|
||||
|
||||
/* SPEC & PRED bit */
|
||||
#define SPEC_ENABLE_IBRS (1<<0)
|
||||
#define SPEC_ENABLE_STIBP (1<<1)
|
||||
#define PRED_SET_IBPB (1<<0)
|
||||
|
||||
#endif /* MSR_H */
|
105
hypervisor/include/arch/x86/multiboot.h
Normal file
105
hypervisor/include/arch/x86/multiboot.h
Normal file
@@ -0,0 +1,105 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef MULTIBOOT_H
|
||||
#define MULTIBOOT_H
|
||||
|
||||
#define MULTIBOOT_INFO_MAGIC 0x2BADB002
|
||||
#define MULTIBOOT_INFO_HAS_CMDLINE 0x00000004
|
||||
#define MULTIBOOT_INFO_HAS_MODS 0x00000008
|
||||
|
||||
struct multiboot_info {
|
||||
uint32_t mi_flags;
|
||||
|
||||
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_MEMORY. */
|
||||
uint32_t mi_mem_lower;
|
||||
uint32_t mi_mem_upper;
|
||||
|
||||
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_BOOT_DEVICE. */
|
||||
uint8_t mi_boot_device_part3;
|
||||
uint8_t mi_boot_device_part2;
|
||||
uint8_t mi_boot_device_part1;
|
||||
uint8_t mi_boot_device_drive;
|
||||
|
||||
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_CMDLINE. */
|
||||
uint32_t mi_cmdline;
|
||||
|
||||
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_MODS. */
|
||||
uint32_t mi_mods_count;
|
||||
uint32_t mi_mods_addr;
|
||||
|
||||
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_{AOUT,ELF}_SYMS. */
|
||||
uint32_t mi_elfshdr_num;
|
||||
uint32_t mi_elfshdr_size;
|
||||
uint32_t mi_elfshdr_addr;
|
||||
uint32_t mi_elfshdr_shndx;
|
||||
|
||||
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_MMAP. */
|
||||
uint32_t mi_mmap_length;
|
||||
uint32_t mi_mmap_addr;
|
||||
|
||||
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_DRIVES. */
|
||||
uint32_t mi_drives_length;
|
||||
uint32_t mi_drives_addr;
|
||||
|
||||
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_CONFIG_TABLE. */
|
||||
uint32_t unused_mi_config_table;
|
||||
|
||||
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_LOADER_NAME. */
|
||||
uint32_t mi_loader_name;
|
||||
|
||||
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_APM. */
|
||||
uint32_t unused_mi_apm_table;
|
||||
|
||||
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_VBE. */
|
||||
uint32_t unused_mi_vbe_control_info;
|
||||
uint32_t unused_mi_vbe_mode_info;
|
||||
uint32_t unused_mi_vbe_interface_seg;
|
||||
uint32_t unused_mi_vbe_interface_off;
|
||||
uint32_t unused_mi_vbe_interface_len;
|
||||
};
|
||||
|
||||
struct multiboot_mmap {
|
||||
uint32_t size;
|
||||
uint64_t baseaddr;
|
||||
uint64_t length;
|
||||
uint32_t type;
|
||||
} __packed;
|
||||
|
||||
struct multiboot_module {
|
||||
uint32_t mm_mod_start;
|
||||
uint32_t mm_mod_end;
|
||||
uint32_t mm_string;
|
||||
uint32_t mm_reserved;
|
||||
};
|
||||
|
||||
int parse_hv_cmdline(void);
|
||||
|
||||
#endif
|
47
hypervisor/include/arch/x86/softirq.h
Normal file
47
hypervisor/include/arch/x86/softirq.h
Normal file
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef SOFTIRQ_H
|
||||
#define SOFTIRQ_H
|
||||
|
||||
#define SOFTIRQ_TIMER 0
|
||||
#define SOFTIRQ_DEV_ASSIGN 1
|
||||
#define SOFTIRQ_MAX 2
|
||||
#define SOFTIRQ_MASK ((1UL<<SOFTIRQ_MAX)-1)
|
||||
|
||||
/* used for atomic value for prevent recursive */
|
||||
#define SOFTIRQ_ATOMIC 63
|
||||
|
||||
void enable_softirq(int cpu_id);
|
||||
void disable_softirq(int cpu_id);
|
||||
void init_softirq(void);
|
||||
void raise_softirq(int softirq_id);
|
||||
void exec_softirq(void);
|
||||
#endif /* SOFTIRQ_H */
|
47
hypervisor/include/arch/x86/timer.h
Normal file
47
hypervisor/include/arch/x86/timer.h
Normal file
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef TIMER_H
|
||||
#define TIMER_H
|
||||
|
||||
typedef int (*timer_handle_t)(uint64_t);
|
||||
|
||||
long add_timer(timer_handle_t func, uint64_t data, uint64_t deadline);
|
||||
bool cancel_timer(long handle, int cpu_id);
|
||||
long update_timer(long handle, timer_handle_t func, uint64_t data,
|
||||
uint64_t deadline);
|
||||
|
||||
int timer_softirq(int cpu_id);
|
||||
void timer_init(void);
|
||||
void timer_cleanup(void);
|
||||
void dump_timer_pool_info(int cpu_id);
|
||||
void check_tsc(void);
|
||||
|
||||
#endif /* TIMER_H */
|
75
hypervisor/include/arch/x86/vmexit.h
Normal file
75
hypervisor/include/arch/x86/vmexit.h
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef VMEXIT_H_
|
||||
#define VMEXIT_H_
|
||||
|
||||
struct vm_exit_dispatch {
|
||||
int (*handler)(struct vcpu *);
|
||||
uint32_t need_exit_qualification;
|
||||
};
|
||||
|
||||
struct vm_exit_dispatch *vmexit_handler(struct vcpu *vcpu);
|
||||
int vmcall_handler(struct vcpu *vcpu);
|
||||
int cpuid_handler(struct vcpu *vcpu);
|
||||
int cr_access_handler(struct vcpu *vcpu);
|
||||
int get_vmexit_profile(char *str, int str_max);
|
||||
|
||||
#define VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, MSB, LSB) \
|
||||
(exit_qual & (((1UL << (MSB+1))-1) - ((1UL << (LSB))-1)))
|
||||
|
||||
|
||||
/* MACROs to access Control-Register Info using exit qualification field */
|
||||
#define VM_EXIT_CR_ACCESS_CR_NUM(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 3, 0) >> 0)
|
||||
#define VM_EXIT_CR_ACCESS_ACCESS_TYPE(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 5, 4) >> 4)
|
||||
#define VM_EXIT_CR_ACCESS_LMSW_OP(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 6, 6) >> 6)
|
||||
#define VM_EXIT_CR_ACCESS_REG_IDX(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 11, 8) >> 8)
|
||||
#define VM_EXIT_CR_ACCESS_LMSW_SRC_DATE(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 31, 16) >> 16)
|
||||
|
||||
/* MACROs to access IO Access Info using exit qualification field */
|
||||
#define VM_EXIT_IO_INSTRUCTION_SIZE(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 2, 0) >> 0)
|
||||
#define VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 3, 3) >> 3)
|
||||
#define VM_EXIT_IO_INSTRUCTION_IS_STRING(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 4, 4) >> 4)
|
||||
#define VM_EXIT_IO_INSTRUCTION_IS_REP_PREFIXED(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 5, 5) >> 5)
|
||||
#define VM_EXIT_IO_INSTRUCTION_IS_OPERAND_ENCODING(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 6, 6) >> 6)
|
||||
#define VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 31, 16) >> 16)
|
||||
|
||||
#endif /* VMEXIT_H_ */
|
433
hypervisor/include/arch/x86/vmx.h
Normal file
433
hypervisor/include/arch/x86/vmx.h
Normal file
@@ -0,0 +1,433 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef VMX_H_
|
||||
#define VMX_H_
|
||||
|
||||
/* 16-bit control fields */
|
||||
#define VMX_VPID 0x00000000
|
||||
/* 16-bit guest-state fields */
|
||||
#define VMX_GUEST_ES_SEL 0x00000800
|
||||
#define VMX_GUEST_CS_SEL 0x00000802
|
||||
#define VMX_GUEST_SS_SEL 0x00000804
|
||||
#define VMX_GUEST_DS_SEL 0x00000806
|
||||
#define VMX_GUEST_FS_SEL 0x00000808
|
||||
#define VMX_GUEST_GS_SEL 0x0000080a
|
||||
#define VMX_GUEST_LDTR_SEL 0x0000080c
|
||||
#define VMX_GUEST_TR_SEL 0x0000080e
|
||||
#define VMX_GUEST_INTR_STATUS 0x00000810
|
||||
/* 16-bit host-state fields */
|
||||
#define VMX_HOST_ES_SEL 0x00000c00
|
||||
#define VMX_HOST_CS_SEL 0x00000c02
|
||||
#define VMX_HOST_SS_SEL 0x00000c04
|
||||
#define VMX_HOST_DS_SEL 0x00000c06
|
||||
#define VMX_HOST_FS_SEL 0x00000c08
|
||||
#define VMX_HOST_GS_SEL 0x00000c0a
|
||||
#define VMX_HOST_TR_SEL 0x00000c0c
|
||||
/* 64-bit control fields */
|
||||
#define VMX_IO_BITMAP_A_FULL 0x00002000
|
||||
#define VMX_IO_BITMAP_A_HIGH 0x00002001
|
||||
#define VMX_IO_BITMAP_B_FULL 0x00002002
|
||||
#define VMX_IO_BITMAP_B_HIGH 0x00002003
|
||||
#define VMX_MSR_BITMAP_FULL 0x00002004
|
||||
#define VMX_MSR_BITMAP_HIGH 0x00002005
|
||||
#define VMX_EXIT_MSR_STORE_ADDR_FULL 0x00002006
|
||||
#define VMX_EXIT_MSR_STORE_ADDR_HIGH 0x00002007
|
||||
#define VMX_EXIT_MSR_LOAD_ADDR_FULL 0x00002008
|
||||
#define VMX_EXIT_MSR_LOAD_ADDR_HIGH 0x00002009
|
||||
#define VMX_ENTRY_MSR_LOAD_ADDR_FULL 0x0000200a
|
||||
#define VMX_ENTRY_MSR_LOAD_ADDR_HIGH 0x0000200b
|
||||
#define VMX_EXECUTIVE_VMCS_PTR_FULL 0x0000200c
|
||||
#define VMX_EXECUTIVE_VMCS_PTR_HIGH 0x0000200d
|
||||
#define VMX_TSC_OFFSET_FULL 0x00002010
|
||||
#define VMX_TSC_OFFSET_HIGH 0x00002011
|
||||
#define VMX_VIRTUAL_APIC_PAGE_ADDR_FULL 0x00002012
|
||||
#define VMX_VIRTUAL_APIC_PAGE_ADDR_HIGH 0x00002013
|
||||
#define VMX_APIC_ACCESS_ADDR_FULL 0x00002014
|
||||
#define VMX_APIC_ACCESS_ADDR_HIGH 0x00002015
|
||||
#define VMX_EPT_POINTER_FULL 0x0000201A
|
||||
#define VMX_EPT_POINTER_HIGH 0x0000201B
|
||||
#define VMX_EOI_EXIT0_FULL 0x0000201C
|
||||
#define VMX_EOI_EXIT0_HIGH 0x0000201D
|
||||
#define VMX_EOI_EXIT1_FULL 0x0000201E
|
||||
#define VMX_EOI_EXIT1_HIGH 0x0000201F
|
||||
#define VMX_EOI_EXIT2_FULL 0x00002020
|
||||
#define VMX_EOI_EXIT2_HIGH 0x00002021
|
||||
#define VMX_EOI_EXIT3_FULL 0x00002022
|
||||
#define VMX_EOI_EXIT3_HIGH 0x00002023
|
||||
#define VMX_EOI_EXIT(vector) (VMX_EOI_EXIT0_FULL + ((vector) / 64) * 2)
|
||||
/* 64-bit read-only data fields */
|
||||
#define VMX_GUEST_PHYSICAL_ADDR_FULL 0x00002400
|
||||
#define VMX_GUEST_PHYSICAL_ADDR_HIGH 0x00002401
|
||||
/* 64-bit guest-state fields */
|
||||
#define VMX_VMS_LINK_PTR_FULL 0x00002800
|
||||
#define VMX_VMS_LINK_PTR_HIGH 0x00002801
|
||||
#define VMX_GUEST_IA32_DEBUGCTL_FULL 0x00002802
|
||||
#define VMX_GUEST_IA32_DEBUGCTL_HIGH 0x00002803
|
||||
#define VMX_GUEST_IA32_PAT_FULL 0x00002804
|
||||
#define VMX_GUEST_IA32_PAT_HIGH 0x00002805
|
||||
#define VMX_GUEST_IA32_EFER_FULL 0x00002806
|
||||
#define VMX_GUEST_IA32_EFER_HIGH 0x00002807
|
||||
#define VMX_GUEST_IA32_PERF_CTL_FULL 0x00002808
|
||||
#define VMX_GUEST_IA32_PERF_CTL_HIGH 0x00002809
|
||||
#define VMX_GUEST_PDPTE0_FULL 0x0000280A
|
||||
#define VMX_GUEST_PDPTE0_HIGH 0x0000280B
|
||||
#define VMX_GUEST_PDPTE1_FULL 0x0000280C
|
||||
#define VMX_GUEST_PDPTE1_HIGH 0x0000280D
|
||||
#define VMX_GUEST_PDPTE2_FULL 0x0000280E
|
||||
#define VMX_GUEST_PDPTE2_HIGH 0x0000280F
|
||||
#define VMX_GUEST_PDPTE3_FULL 0x00002810
|
||||
#define VMX_GUEST_PDPTE3_HIGH 0x00002811
|
||||
/* 64-bit host-state fields */
|
||||
#define VMX_HOST_IA32_PAT_FULL 0x00002C00
|
||||
#define VMX_HOST_IA32_PAT_HIGH 0x00002C01
|
||||
#define VMX_HOST_IA32_EFER_FULL 0x00002C02
|
||||
#define VMX_HOST_IA32_EFER_HIGH 0x00002C03
|
||||
#define VMX_HOST_IA32_PERF_CTL_FULL 0x00002C04
|
||||
#define VMX_HOST_IA32_PERF_CTL_HIGH 0x00002C05
|
||||
/* 32-bit control fields */
|
||||
#define VMX_PIN_VM_EXEC_CONTROLS 0x00004000
|
||||
#define VMX_PROC_VM_EXEC_CONTROLS 0x00004002
|
||||
#define VMX_EXCEPTION_BITMAP 0x00004004
|
||||
#define VMX_PF_EC_MASK 0x00004006
|
||||
#define VMX_PF_EC_MATCH 0x00004008
|
||||
#define VMX_CR3_TARGET_COUNT 0x0000400a
|
||||
#define VMX_EXIT_CONTROLS 0x0000400c
|
||||
#define VMX_EXIT_MSR_STORE_COUNT 0x0000400e
|
||||
#define VMX_EXIT_MSR_LOAD_COUNT 0x00004010
|
||||
#define VMX_ENTRY_CONTROLS 0x00004012
|
||||
#define VMX_ENTRY_MSR_LOAD_COUNT 0x00004014
|
||||
#define VMX_ENTRY_INT_INFO_FIELD 0x00004016
|
||||
#define VMX_ENTRY_EXCEPTION_EC 0x00004018
|
||||
#define VMX_ENTRY_INSTR_LENGTH 0x0000401a
|
||||
#define VMX_TPR_THRESHOLD 0x0000401c
|
||||
#define VMX_PROC_VM_EXEC_CONTROLS2 0x0000401E
|
||||
#define VMX_PLE_GAP 0x00004020
|
||||
#define VMX_PLE_WINDOW 0x00004022
|
||||
/* 32-bit read-only data fields */
|
||||
#define VMX_INSTR_ERROR 0x00004400
|
||||
#define VMX_EXIT_REASON 0x00004402
|
||||
#define VMX_EXIT_INT_INFO 0x00004404
|
||||
#define VMX_EXIT_INT_EC 0x00004406
|
||||
#define VMX_IDT_VEC_INFO_FIELD 0x00004408
|
||||
#define VMX_IDT_VEC_EC 0x0000440a
|
||||
#define VMX_EXIT_INSTR_LEN 0x0000440c
|
||||
#define VMX_INSTR_INFO 0x0000440e
|
||||
/* 32-bit guest-state fields */
|
||||
#define VMX_GUEST_ES_LIMIT 0x00004800
|
||||
#define VMX_GUEST_CS_LIMIT 0x00004802
|
||||
#define VMX_GUEST_SS_LIMIT 0x00004804
|
||||
#define VMX_GUEST_DS_LIMIT 0x00004806
|
||||
#define VMX_GUEST_FS_LIMIT 0x00004808
|
||||
#define VMX_GUEST_GS_LIMIT 0x0000480a
|
||||
#define VMX_GUEST_LDTR_LIMIT 0x0000480c
|
||||
#define VMX_GUEST_TR_LIMIT 0x0000480e
|
||||
#define VMX_GUEST_GDTR_LIMIT 0x00004810
|
||||
#define VMX_GUEST_IDTR_LIMIT 0x00004812
|
||||
#define VMX_GUEST_ES_ATTR 0x00004814
|
||||
#define VMX_GUEST_CS_ATTR 0x00004816
|
||||
#define VMX_GUEST_SS_ATTR 0x00004818
|
||||
#define VMX_GUEST_DS_ATTR 0x0000481a
|
||||
#define VMX_GUEST_FS_ATTR 0x0000481c
|
||||
#define VMX_GUEST_GS_ATTR 0x0000481e
|
||||
#define VMX_GUEST_LDTR_ATTR 0x00004820
|
||||
#define VMX_GUEST_TR_ATTR 0x00004822
|
||||
#define VMX_GUEST_INTERRUPTIBILITY_INFO 0x00004824
|
||||
#define VMX_GUEST_ACTIVITY_STATE 0x00004826
|
||||
#define VMX_GUEST_SMBASE 0x00004828
|
||||
#define VMX_GUEST_IA32_SYSENTER_CS 0x0000482a
|
||||
#define VMX_GUEST_TIMER 0x0000482E
|
||||
/* 32-bit host-state fields */
|
||||
#define VMX_HOST_IA32_SYSENTER_CS 0x00004c00
|
||||
/* natural-width control fields */
|
||||
#define VMX_CR0_MASK 0x00006000
|
||||
#define VMX_CR4_MASK 0x00006002
|
||||
#define VMX_CR0_READ_SHADOW 0x00006004
|
||||
#define VMX_CR4_READ_SHADOW 0x00006006
|
||||
#define VMX_CR3_TARGET_0 0x00006008
|
||||
#define VMX_CR3_TARGET_1 0x0000600a
|
||||
#define VMX_CR3_TARGET_2 0x0000600c
|
||||
#define VMX_CR3_TARGET_3 0x0000600e
|
||||
/* natural-width read-only data fields */
|
||||
#define VMX_EXIT_QUALIFICATION 0x00006400
|
||||
#define VMX_IO_RCX 0x00006402
|
||||
#define VMX_IO_RDI 0x00006406
|
||||
#define VMX_GUEST_LINEAR_ADDR 0x0000640a
|
||||
/* natural-width guest-state fields */
|
||||
#define VMX_GUEST_CR0 0x00006800
|
||||
#define VMX_GUEST_CR3 0x00006802
|
||||
#define VMX_GUEST_CR4 0x00006804
|
||||
#define VMX_GUEST_ES_BASE 0x00006806
|
||||
#define VMX_GUEST_CS_BASE 0x00006808
|
||||
#define VMX_GUEST_SS_BASE 0x0000680a
|
||||
#define VMX_GUEST_DS_BASE 0x0000680c
|
||||
#define VMX_GUEST_FS_BASE 0x0000680e
|
||||
#define VMX_GUEST_GS_BASE 0x00006810
|
||||
#define VMX_GUEST_LDTR_BASE 0x00006812
|
||||
#define VMX_GUEST_TR_BASE 0x00006814
|
||||
#define VMX_GUEST_GDTR_BASE 0x00006816
|
||||
#define VMX_GUEST_IDTR_BASE 0x00006818
|
||||
#define VMX_GUEST_DR7 0x0000681a
|
||||
#define VMX_GUEST_RSP 0x0000681c
|
||||
#define VMX_GUEST_RIP 0x0000681e
|
||||
#define VMX_GUEST_RFLAGS 0x00006820
|
||||
#define VMX_GUEST_PENDING_DEBUG_EXCEPT 0x00006822
|
||||
#define VMX_GUEST_IA32_SYSENTER_ESP 0x00006824
|
||||
#define VMX_GUEST_IA32_SYSENTER_EIP 0x00006826
|
||||
/* natural-width host-state fields */
|
||||
#define VMX_HOST_CR0 0x00006c00
|
||||
#define VMX_HOST_CR3 0x00006c02
|
||||
#define VMX_HOST_CR4 0x00006c04
|
||||
#define VMX_HOST_FS_BASE 0x00006c06
|
||||
#define VMX_HOST_GS_BASE 0x00006c08
|
||||
#define VMX_HOST_TR_BASE 0x00006c0a
|
||||
#define VMX_HOST_GDTR_BASE 0x00006c0c
|
||||
#define VMX_HOST_IDTR_BASE 0x00006c0e
|
||||
#define VMX_HOST_IA32_SYSENTER_ESP 0x00006c10
|
||||
#define VMX_HOST_IA32_SYSENTER_EIP 0x00006c12
|
||||
#define VMX_HOST_RSP 0x00006c14
|
||||
#define VMX_HOST_RIP 0x00006c16
|
||||
/*
|
||||
* Basic VM exit reasons
|
||||
*/
|
||||
#define VMX_EXIT_REASON_EXCEPTION_OR_NMI 0x00000000
|
||||
#define VMX_EXIT_REASON_EXTERNAL_INTERRUPT 0x00000001
|
||||
#define VMX_EXIT_REASON_TRIPLE_FAULT 0x00000002
|
||||
#define VMX_EXIT_REASON_INIT_SIGNAL 0x00000003
|
||||
#define VMX_EXIT_REASON_STARTUP_IPI 0x00000004
|
||||
#define VMX_EXIT_REASON_IO_SMI 0x00000005
|
||||
#define VMX_EXIT_REASON_OTHER_SMI 0x00000006
|
||||
#define VMX_EXIT_REASON_INTERRUPT_WINDOW 0x00000007
|
||||
#define VMX_EXIT_REASON_NMI_WINDOW 0x00000008
|
||||
#define VMX_EXIT_REASON_TASK_SWITCH 0x00000009
|
||||
#define VMX_EXIT_REASON_CPUID 0x0000000A
|
||||
#define VMX_EXIT_REASON_GETSEC 0x0000000B
|
||||
#define VMX_EXIT_REASON_HLT 0x0000000C
|
||||
#define VMX_EXIT_REASON_INVD 0x0000000D
|
||||
#define VMX_EXIT_REASON_INVLPG 0x0000000E
|
||||
#define VMX_EXIT_REASON_RDPMC 0x0000000F
|
||||
#define VMX_EXIT_REASON_RDTSC 0x00000010
|
||||
#define VMX_EXIT_REASON_RSM 0x00000011
|
||||
#define VMX_EXIT_REASON_VMCALL 0x00000012
|
||||
#define VMX_EXIT_REASON_VMCLEAR 0x00000013
|
||||
#define VMX_EXIT_REASON_VMLAUNCH 0x00000014
|
||||
#define VMX_EXIT_REASON_VMPTRLD 0x00000015
|
||||
#define VMX_EXIT_REASON_VMPTRST 0x00000016
|
||||
#define VMX_EXIT_REASON_VMREAD 0x00000017
|
||||
#define VMX_EXIT_REASON_VMRESUME 0x00000018
|
||||
#define VMX_EXIT_REASON_VMWRITE 0x00000019
|
||||
#define VMX_EXIT_REASON_VMXOFF 0x0000001A
|
||||
#define VMX_EXIT_REASON_VMXON 0x0000001B
|
||||
#define VMX_EXIT_REASON_CR_ACCESS 0x0000001C
|
||||
#define VMX_EXIT_REASON_DR_ACCESS 0x0000001D
|
||||
#define VMX_EXIT_REASON_IO_INSTRUCTION 0x0000001E
|
||||
#define VMX_EXIT_REASON_RDMSR 0x0000001F
|
||||
#define VMX_EXIT_REASON_WRMSR 0x00000020
|
||||
#define VMX_EXIT_REASON_ENTRY_FAILURE_INVALID_GUEST_STATE 0x00000021
|
||||
#define VMX_EXIT_REASON_ENTRY_FAILURE_MSR_LOADING 0x00000022
|
||||
/* entry 0x23 (35) is missing */
|
||||
#define VMX_EXIT_REASON_MWAIT 0x00000024
|
||||
#define VMX_EXIT_REASON_MONITOR_TRAP 0x00000025
|
||||
/* entry 0x26 (38) is missing */
|
||||
#define VMX_EXIT_REASON_MONITOR 0x00000027
|
||||
#define VMX_EXIT_REASON_PAUSE 0x00000028
|
||||
#define VMX_EXIT_REASON_ENTRY_FAILURE_MACHINE_CHECK 0x00000029
|
||||
/* entry 0x2A (42) is missing */
|
||||
#define VMX_EXIT_REASON_TPR_BELOW_THRESHOLD 0x0000002B
|
||||
#define VMX_EXIT_REASON_APIC_ACCESS 0x0000002C
|
||||
#define VMX_EXIT_REASON_VIRTUALIZED_EOI 0x0000002D
|
||||
#define VMX_EXIT_REASON_GDTR_IDTR_ACCESS 0x0000002E
|
||||
#define VMX_EXIT_REASON_LDTR_TR_ACCESS 0x0000002F
|
||||
#define VMX_EXIT_REASON_EPT_VIOLATION 0x00000030
|
||||
#define VMX_EXIT_REASON_EPT_MISCONFIGURATION 0x00000031
|
||||
#define VMX_EXIT_REASON_INVEPT 0x00000032
|
||||
#define VMX_EXIT_REASON_RDTSCP 0x00000033
|
||||
#define VMX_EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED 0x00000034
|
||||
#define VMX_EXIT_REASON_INVVPID 0x00000035
|
||||
#define VMX_EXIT_REASON_WBINVD 0x00000036
|
||||
#define VMX_EXIT_REASON_XSETBV 0x00000037
|
||||
#define VMX_EXIT_REASON_APIC_WRITE 0x00000038
|
||||
|
||||
/* VMX execution control bits (pin based) */
|
||||
#define VMX_PINBASED_CTLS_IRQ_EXIT (1<<0)
|
||||
#define VMX_PINBASED_CTLS_NMI_EXIT (1<<3)
|
||||
#define VMX_PINBASED_CTLS_VIRT_NMI (1<<5)
|
||||
#define VMX_PINBASED_CTLS_ENABLE_PTMR (1<<6)
|
||||
#define VMX_PINBASED_CTLS_POST_IRQ (1<<7)
|
||||
|
||||
/* VMX execution control bits (processor based) */
|
||||
#define VMX_PROCBASED_CTLS_IRQ_WIN (1<<2)
|
||||
#define VMX_PROCBASED_CTLS_TSC_OFF (1<<3)
|
||||
#define VMX_PROCBASED_CTLS_HLT (1<<7)
|
||||
#define VMX_PROCBASED_CTLS_INVLPG (1<<9)
|
||||
#define VMX_PROCBASED_CTLS_MWAIT (1<<10)
|
||||
#define VMX_PROCBASED_CTLS_RDPMC (1<<11)
|
||||
#define VMX_PROCBASED_CTLS_RDTSC (1<<12)
|
||||
#define VMX_PROCBASED_CTLS_CR3_LOAD (1<<15)
|
||||
#define VMX_PROCBASED_CTLS_CR3_STORE (1<<16)
|
||||
#define VMX_PROCBASED_CTLS_CR8_LOAD (1<<19)
|
||||
#define VMX_PROCBASED_CTLS_CR8_STORE (1<<20)
|
||||
#define VMX_PROCBASED_CTLS_TPR_SHADOW (1<<21)
|
||||
#define VMX_PROCBASED_CTLS_NMI_WINEXIT (1<<22)
|
||||
#define VMX_PROCBASED_CTLS_MOV_DR (1<<23)
|
||||
#define VMX_PROCBASED_CTLS_UNCOND_IO (1<<24)
|
||||
#define VMX_PROCBASED_CTLS_IO_BITMAP (1<<25)
|
||||
#define VMX_PROCBASED_CTLS_MON_TRAP (1<<27)
|
||||
#define VMX_PROCBASED_CTLS_MSR_BITMAP (1<<28)
|
||||
#define VMX_PROCBASED_CTLS_MONITOR (1<<29)
|
||||
#define VMX_PROCBASED_CTLS_PAUSE (1<<30)
|
||||
#define VMX_PROCBASED_CTLS_SECONDARY (1<<31)
|
||||
#define VMX_PROCBASED_CTLS2_VAPIC (1<<0)
|
||||
#define VMX_PROCBASED_CTLS2_EPT (1<<1)
|
||||
#define VMX_PROCBASED_CTLS2_DESC_TABLE (1<<2)
|
||||
#define VMX_PROCBASED_CTLS2_RDTSCP (1<<3)
|
||||
#define VMX_PROCBASED_CTLS2_VX2APIC (1<<4)
|
||||
#define VMX_PROCBASED_CTLS2_VPID (1<<5)
|
||||
#define VMX_PROCBASED_CTLS2_WBINVD (1<<6)
|
||||
#define VMX_PROCBASED_CTLS2_UNRESTRICT (1<<7)
|
||||
#define VMX_PROCBASED_CTLS2_VAPIC_REGS (1<<8)
|
||||
#define VMX_PROCBASED_CTLS2_VIRQ (1<<9)
|
||||
#define VMX_PROCBASED_CTLS2_PAUSE_LOOP (1<<10)
|
||||
#define VMX_PROCBASED_CTLS2_RDRAND (1<<11)
|
||||
#define VMX_PROCBASED_CTLS2_INVPCID (1<<12)
|
||||
#define VMX_PROCBASED_CTLS2_VM_FUNCS (1<<13)
|
||||
#define VMX_PROCBASED_CTLS2_VMCS_SHADW (1<<14)
|
||||
#define VMX_PROCBASED_CTLS2_RDSEED (1<<16)
|
||||
#define VMX_PROCBASED_CTLS2_EPT_VE (1<<18)
|
||||
#define VMX_PROCBASED_CTLS2_XSVE_XRSTR (1<<20)
|
||||
|
||||
/* VMX exit control bits */
|
||||
#define VMX_EXIT_CTLS_SAVE_DBG (1<<2)
|
||||
#define VMX_EXIT_CTLS_HOST_ADDR64 (1<<9)
|
||||
#define VMX_EXIT_CTLS_LOAD_PERF (1<<12)
|
||||
#define VMX_EXIT_CTLS_ACK_IRQ (1<<15)
|
||||
#define VMX_EXIT_CTLS_SAVE_PAT (1<<18)
|
||||
#define VMX_EXIT_CTLS_LOAD_PAT (1<<19)
|
||||
#define VMX_EXIT_CTLS_SAVE_EFER (1<<20)
|
||||
#define VMX_EXIT_CTLS_LOAD_EFER (1<<21)
|
||||
#define VMX_EXIT_CTLS_SAVE_PTMR (1<<22)
|
||||
|
||||
/* VMX entry control bits */
|
||||
#define VMX_ENTRY_CTLS_LOAD_DBG (1<<2)
|
||||
#define VMX_ENTRY_CTLS_IA32E_MODE (1<<9)
|
||||
#define VMX_ENTRY_CTLS_ENTRY_SMM (1<<10)
|
||||
#define VMX_ENTRY_CTLS_DEACT_DUAL (1<<11)
|
||||
#define VMX_ENTRY_CTLS_LOAD_PERF (1<<13)
|
||||
#define VMX_ENTRY_CTLS_LOAD_PAT (1<<14)
|
||||
#define VMX_ENTRY_CTLS_LOAD_EFER (1<<15)
|
||||
|
||||
/* VMX entry/exit Interrupt info */
|
||||
#define VMX_INT_INFO_ERR_CODE_VALID (1<<11)
|
||||
#define VMX_INT_INFO_VALID (1<<31)
|
||||
#define VMX_INT_TYPE_EXT_INT 0
|
||||
#define VMX_INT_TYPE_NMI 2
|
||||
#define VMX_INT_TYPE_HW_EXP 3
|
||||
#define VMX_INT_TYPE_SW_EXP 6
|
||||
|
||||
#define VM_SUCCESS 0
|
||||
#define VM_FAIL -1
|
||||
|
||||
#define VMX_VMENTRY_FAIL 0x80000000
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
|
||||
#define RFLAGS_C (1<<0)
|
||||
#define RFLAGS_Z (1<<6)
|
||||
|
||||
/*
|
||||
* Handling of CR0:
|
||||
*
|
||||
* - PE (0) Must always be 1. Attempt to write to it must lead to a VM exit.
|
||||
* - MP (1) coprocessor related => no action needed
|
||||
* - EM (2) coprocessor related => no action needed
|
||||
* - TS (3) no action needed
|
||||
* - ET (4) typically hardcoded to 1. => no action needed
|
||||
* - NE (5) coprocessor related => no action needed
|
||||
* - WP (16) inhibits supervisor level procedures to write into ro-pages
|
||||
* => no action needed
|
||||
* - AM (18) alignment mask => no action needed
|
||||
* - NW (29) not write through => no action
|
||||
* - CD (30) cache disable => no action
|
||||
* - PG (31) paging => must always be 1. Attempt to write to it must lead to
|
||||
* a VM exit.
|
||||
*/
|
||||
|
||||
/* we must guard protected mode and paging */
|
||||
#define CR0_GUEST_HOST_MASK (CR0_PE | CR0_PG | CR0_WP)
|
||||
/* initially, the guest runs in protected mode enabled, but with no paging */
|
||||
#define CR0_READ_SHADOW CR0_PE
|
||||
|
||||
/*
|
||||
* Handling of CR4:
|
||||
*
|
||||
* - VME (0) must always be 0 => must lead to a VM exit
|
||||
* - PVI (1) must always be 0 => must lead to a VM exit
|
||||
* - TSD (2) don't care
|
||||
* - DE (3) don't care
|
||||
* - PSE (4) must always be 1 => must lead to a VM exit
|
||||
* - PAE (5) must always be 0 => must lead to a VM exit
|
||||
* - MCE (6) don't care
|
||||
* - PGE (7) => important for TLB flush
|
||||
* - PCE (8) don't care
|
||||
* - OSFXSR (9) don't care
|
||||
* - OSXMMEXCPT (10) don't care
|
||||
* - VMXE (13) must always be 1 => must lead to a VM exit
|
||||
* - SMXE (14) must always be 0 => must lead to a VM exit
|
||||
* - PCIDE (17) => important for TLB flush
|
||||
* - OSXSAVE (18) don't care
|
||||
*/
|
||||
|
||||
#define CR4_GUEST_HOST_MASK (CR4_VME | CR4_PVI | CR4_PSE | CR4_PAE | \
|
||||
CR4_VMXE | CR4_SMXE | CR4_PGE | CR4_PCIDE)
|
||||
#define CR4_READ_SHADOW (CR4_PGE | CR4_PSE)
|
||||
|
||||
/* VCPU config definitions */
|
||||
#define REAL_MODE 1
|
||||
#define PAGE_PROTECTED_MODE 2
|
||||
|
||||
/* External Interfaces */
|
||||
int check_vmx_support(void);
|
||||
int exec_vmxon_instr(void);
|
||||
uint64_t exec_vmread(uint32_t field);
|
||||
uint64_t exec_vmread64(uint32_t field_full);
|
||||
void exec_vmwrite(uint32_t field, uint64_t value);
|
||||
void exec_vmwrite64(uint32_t field_full, uint64_t value);
|
||||
int init_vmcs(struct vcpu *vcpu);
|
||||
|
||||
int exec_vmclear(void *addr);
|
||||
int exec_vmptrld(void *addr);
|
||||
|
||||
static inline uint8_t get_vcpu_mode(struct vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch_vcpu.cpu_mode;
|
||||
}
|
||||
#endif /* ASSEMBLER */
|
||||
|
||||
#endif /* VMX_H_ */
|
258
hypervisor/include/arch/x86/vtd.h
Normal file
258
hypervisor/include/arch/x86/vtd.h
Normal file
@@ -0,0 +1,258 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef VTD_H
|
||||
#define VTD_H
|
||||
/*
|
||||
* Intel IOMMU register specification per version 1.0 public spec.
|
||||
*/
|
||||
|
||||
#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
|
||||
#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
|
||||
#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
|
||||
#define DMAR_GCMD_REG 0x18 /* Global command register */
|
||||
#define DMAR_GSTS_REG 0x1c /* Global status register */
|
||||
#define DMAR_RTADDR_REG 0x20 /* Root entry table */
|
||||
#define DMAR_CCMD_REG 0x28 /* Context command reg */
|
||||
#define DMAR_FSTS_REG 0x34 /* Fault Status register */
|
||||
#define DMAR_FECTL_REG 0x38 /* Fault control register */
|
||||
#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
|
||||
#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
|
||||
#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
|
||||
#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
|
||||
#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
|
||||
#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
|
||||
#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
|
||||
#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
|
||||
#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
|
||||
#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
|
||||
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
|
||||
#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
|
||||
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
|
||||
#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
|
||||
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
|
||||
|
||||
#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
|
||||
#define DMAR_VER_MINOR(v) ((v) & 0x0f)
|
||||
|
||||
/*
|
||||
* Decoding Capability Register
|
||||
*/
|
||||
#define iommu_cap_pi(c) (((c) >> 59) & 1)
|
||||
#define iommu_cap_read_drain(c) (((c) >> 55) & 1)
|
||||
#define iommu_cap_write_drain(c) (((c) >> 54) & 1)
|
||||
#define iommu_cap_max_amask_val(c) (((c) >> 48) & 0x3f)
|
||||
#define iommu_cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
|
||||
#define iommu_cap_pgsel_inv(c) (((c) >> 39) & 1)
|
||||
|
||||
#define iommu_cap_super_page_val(c) (((c) >> 34) & 0xf)
|
||||
#define iommu_cap_super_offset(c) \
|
||||
(((find_first_bit(&iommu_cap_super_page_val(c), 4)) \
|
||||
* OFFSET_STRIDE) + 21)
|
||||
|
||||
#define iommu_cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
|
||||
#define iommu_cap_max_fault_reg_offset(c) \
|
||||
(iommu_cap_fault_reg_offset(c) + iommu_cap_num_fault_regs(c) * 16)
|
||||
|
||||
#define iommu_cap_zlr(c) (((c) >> 22) & 1)
|
||||
#define iommu_cap_isoch(c) (((c) >> 23) & 1)
|
||||
#define iommu_cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
|
||||
#define iommu_cap_sagaw(c) (((c) >> 8) & 0x1f)
|
||||
#define iommu_cap_caching_mode(c) (((c) >> 7) & 1)
|
||||
#define iommu_cap_phmr(c) (((c) >> 6) & 1)
|
||||
#define iommu_cap_plmr(c) (((c) >> 5) & 1)
|
||||
#define iommu_cap_rwbf(c) (((c) >> 4) & 1)
|
||||
#define iommu_cap_afl(c) (((c) >> 3) & 1)
|
||||
#define iommu_cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
|
||||
|
||||
/*
|
||||
* Decoding Extended Capability Register
|
||||
*/
|
||||
#define iommu_ecap_c(c) (((c) >> 0) & 1)
|
||||
#define iommu_ecap_qi(c) (((c) >> 1) & 1)
|
||||
#define iommu_ecap_dt(c) (((c) >> 2) & 1)
|
||||
#define iommu_ecap_ir(c) (((c) >> 3) & 1)
|
||||
#define iommu_ecap_eim(c) (((c) >> 4) & 1)
|
||||
#define iommu_ecap_pt(c) (((c) >> 6) & 1)
|
||||
#define iommu_ecap_sc(c) (((c) >> 7) & 1)
|
||||
#define iommu_ecap_iro(c) (((c) >> 8) & 0x3ff)
|
||||
#define iommu_ecap_mhmv(c) (((c) >> 20) & 0xf)
|
||||
#define iommu_ecap_ecs(c) (((c) >> 24) & 1)
|
||||
#define iommu_ecap_mts(c) (((c) >> 25) & 1)
|
||||
#define iommu_ecap_nest(c) (((c) >> 26) & 1)
|
||||
#define iommu_ecap_dis(c) (((c) >> 27) & 1)
|
||||
#define iommu_ecap_prs(c) (((c) >> 29) & 1)
|
||||
#define iommu_ecap_ers(c) (((c) >> 30) & 1)
|
||||
#define iommu_ecap_srs(c) (((c) >> 31) & 1)
|
||||
#define iommu_ecap_nwfs(c) (((c) >> 33) & 1)
|
||||
#define iommu_ecap_eafs(c) (((c) >> 34) & 1)
|
||||
#define iommu_ecap_pss(c) (((c) >> 35) & 0x1f)
|
||||
#define iommu_ecap_pasid(c) (((c) >> 40) & 1)
|
||||
#define iommu_ecap_dit(c) (((c) >> 41) & 1)
|
||||
#define iommu_ecap_pds(c) (((c) >> 42) & 1)
|
||||
|
||||
/* PMEN_REG */
|
||||
#define DMA_PMEN_EPM (((uint32_t)1)<<31)
|
||||
#define DMA_PMEN_PRS (((uint32_t)1)<<0)
|
||||
|
||||
/* GCMD_REG */
|
||||
#define DMA_GCMD_TE (((uint32_t)1) << 31)
|
||||
#define DMA_GCMD_SRTP (((uint32_t)1) << 30)
|
||||
#define DMA_GCMD_SFL (((uint32_t)1) << 29)
|
||||
#define DMA_GCMD_EAFL (((uint32_t)1) << 28)
|
||||
#define DMA_GCMD_WBF (((uint32_t)1) << 27)
|
||||
#define DMA_GCMD_QIE (((uint32_t)1) << 26)
|
||||
#define DMA_GCMD_SIRTP (((uint32_t)1) << 24)
|
||||
#define DMA_GCMD_IRE (((uint32_t) 1) << 25)
|
||||
#define DMA_GCMD_CFI (((uint32_t) 1) << 23)
|
||||
|
||||
/* GSTS_REG */
|
||||
#define DMA_GSTS_TES (((uint32_t)1) << 31)
|
||||
#define DMA_GSTS_RTPS (((uint32_t)1) << 30)
|
||||
#define DMA_GSTS_FLS (((uint32_t)1) << 29)
|
||||
#define DMA_GSTS_AFLS (((uint32_t)1) << 28)
|
||||
#define DMA_GSTS_WBFS (((uint32_t)1) << 27)
|
||||
#define DMA_GSTS_QIES (((uint32_t)1) << 26)
|
||||
#define DMA_GSTS_IRTPS (((uint32_t)1) << 24)
|
||||
#define DMA_GSTS_IRES (((uint32_t)1) << 25)
|
||||
#define DMA_GSTS_CFIS (((uint32_t)1) << 23)
|
||||
|
||||
/* CCMD_REG */
|
||||
#define DMA_CCMD_ICC (((uint64_t)1) << 63)
|
||||
#define DMA_CCMD_ICC_32 (((uint32_t)1) << 31)
|
||||
#define DMA_CCMD_GLOBAL_INVL (((uint64_t)1) << 61)
|
||||
#define DMA_CCMD_DOMAIN_INVL (((uint64_t)2) << 61)
|
||||
#define DMA_CCMD_DEVICE_INVL (((uint64_t)3) << 61)
|
||||
#define DMA_CCMD_FM(m) (((uint64_t)((m) & 0x3)) << 32)
|
||||
#define DMA_CCMD_MASK_NOBIT 0
|
||||
#define DMA_CCMD_MASK_1BIT 1
|
||||
#define DMA_CCMD_MASK_2BIT 2
|
||||
#define DMA_CCMD_MASK_3BIT 3
|
||||
#define DMA_CCMD_SID(s) (((uint64_t)((s) & 0xffff)) << 16)
|
||||
#define DMA_CCMD_DID(d) ((uint64_t)((d) & 0xffff))
|
||||
#define DMA_CCMD_GET_CAIG_32(v) (((uint32_t)(v) >> 27) & 0x3)
|
||||
|
||||
/* IOTLB_REG */
|
||||
#define DMA_IOTLB_IVT (((uint64_t)1) << 63)
|
||||
#define DMA_IOTLB_IVT_32 (((uint32_t)1) << 31)
|
||||
#define DMA_IOTLB_GLOBAL_INVL (((uint64_t)1) << 60)
|
||||
#define DMA_IOTLB_DOMAIN_INVL (((uint64_t)2) << 60)
|
||||
#define DMA_IOTLB_PAGE_INVL (((uint64_t)3) << 60)
|
||||
#define DMA_IOTLB_DR (((uint64_t)1) << 49)
|
||||
#define DMA_IOTLB_DW (((uint64_t)1) << 48)
|
||||
#define DMA_IOTLB_DID(d) \
|
||||
(((uint64_t)((d) & 0xffff)) << 32)
|
||||
#define DMA_IOTLB_GET_IAIG_32(v) (((uint32_t)(v) >> 25) & 0x3)
|
||||
|
||||
/* INVALIDATE_ADDRESS_REG */
|
||||
#define DMA_IOTLB_INVL_ADDR_AM(m) ((uint64_t)((m) & 0x3f))
|
||||
#define DMA_IOTLB_INVL_ADDR_IH_UNMODIFIED (((uint64_t)1) << 6)
|
||||
|
||||
/* FECTL_REG */
|
||||
#define DMA_FECTL_IM (((uint32_t)1) << 31)
|
||||
|
||||
/* FSTS_REG */
|
||||
#define DMA_FSTS_PFO(s) (((s) >> 0) & 1)
|
||||
#define DMA_FSTS_PPF(s) (((s) >> 1) & 1)
|
||||
#define DMA_FSTS_AFO(s) (((s) >> 2) & 1)
|
||||
#define DMA_FSTS_APF(s) (((s) >> 3) & 1)
|
||||
#define DMA_FSTS_IQE(s) (((s) >> 4) & 1)
|
||||
#define DMA_FSTS_ICE(s) (((s) >> 5) & 1)
|
||||
#define DMA_FSTS_ITE(s) (((s) >> 6) & 1)
|
||||
#define DMA_FSTS_PRO(s) (((s) >> 7) & 1)
|
||||
#define DMA_FSTS_FRI(s) (((s) >> 8) & 0xFF)
|
||||
|
||||
/* FRCD_REGs: upper 64 bits*/
|
||||
#define DMA_FRCD_UP_F(r) (((r) >> 63) & 1)
|
||||
#define DMA_FRCD_UP_T(r) (((r) >> 62) & 1)
|
||||
#define DMA_FRCD_UP_AT(r) (((r) >> 60) & 3)
|
||||
#define DMA_FRCD_UP_PASID(r) (((r) >> 40) & 0xfffff)
|
||||
#define DMA_FRCD_UP_FR(r) (((r) >> 32) & 0xff)
|
||||
#define DMA_FRCD_UP_PP(r) (((r) >> 31) & 1)
|
||||
#define DMA_FRCD_UP_EXE(r) (((r) >> 30) & 1)
|
||||
#define DMA_FRCD_UP_PRIV(r) (((r) >> 29) & 1)
|
||||
#define DMA_FRCD_UP_SID(r) (((r) >> 0) & 0xffff)
|
||||
|
||||
#define DMAR_CONTEXT_TRANSLATION_TYPE_TRANSLATED 0x00
|
||||
#define DMAR_CONTEXT_TRANSLATION_TYPE_RESERVED 0x01
|
||||
#define DMAR_CONTEXT_TRANSLATION_TYPE_PASSED_THROUGH 0x02
|
||||
|
||||
#define DRHD_FLAG_INCLUDE_PCI_ALL_MASK (1)
|
||||
|
||||
#define DEVFUN(dev, fun) (((dev & 0x1F) << 3) | ((fun & 0x7)))
|
||||
|
||||
struct dmar_dev_scope {
|
||||
uint8_t bus;
|
||||
uint8_t devfun;
|
||||
};
|
||||
|
||||
struct dmar_drhd {
|
||||
uint32_t dev_cnt;
|
||||
uint16_t segment;
|
||||
uint8_t flags;
|
||||
bool ignore;
|
||||
uint64_t reg_base_addr;
|
||||
/* assume no pci device hotplug support */
|
||||
struct dmar_dev_scope *devices;
|
||||
};
|
||||
|
||||
struct dmar_info {
|
||||
uint32_t drhd_count;
|
||||
struct dmar_drhd *drhd_units;
|
||||
};
|
||||
|
||||
extern struct dmar_info *get_dmar_info(void);
|
||||
|
||||
struct iommu_domain;
|
||||
|
||||
/* Assign a device specified by bus & devfun to a iommu domain */
|
||||
int assign_iommu_device(struct iommu_domain *domain,
|
||||
uint8_t bus, uint8_t devfun);
|
||||
|
||||
/* Unassign a device specified by bus & devfun to a iommu domain */
|
||||
int unassign_iommu_device(struct iommu_domain *domain,
|
||||
uint8_t bus, uint8_t devfun);
|
||||
|
||||
/* Create a iommu domain for a VM specified by vm_id */
|
||||
struct iommu_domain *create_iommu_domain(int vm_id,
|
||||
void *translation_table, int addr_width);
|
||||
|
||||
/* Destroy the iommu domain */
|
||||
int destroy_iommu_domain(struct iommu_domain *domain);
|
||||
|
||||
/* Enable translation of iommu*/
|
||||
void enable_iommu(void);
|
||||
|
||||
/* Disable translation of iommu*/
|
||||
void disable_iommu(void);
|
||||
|
||||
/* iommu initialization */
|
||||
int init_iommu(void);
|
||||
#endif
|
Reference in New Issue
Block a user