initial import

internal commit: 14ac2bc2299032fa6714d1fefa7cf0987b3e3085

Signed-off-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Eddie Dong
2018-03-07 20:57:14 +08:00
committed by lijinxia
parent bd31b1c53e
commit 7a3a539b17
156 changed files with 41265 additions and 0 deletions

View File

@@ -0,0 +1,524 @@
/*-
* Copyright (c) 1996, by Peter Wemm and Steve Passe
* Copyright (c) 2017 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. The name of the developer may NOT be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _APICREG_H_
#define _APICREG_H_
/*
* Local && I/O APIC definitions.
*/
/*
* Pentium P54C+ Built-in APIC
* (Advanced programmable Interrupt Controller)
*
* Base Address of Built-in APIC in memory location
* is 0xfee00000.
*
* Map of APIC Registers:
*
* Offset (hex) Description Read/Write state
* 000 Reserved
* 010 Reserved
* 020 ID Local APIC ID R/W
* 030 VER Local APIC Version R
* 040 Reserved
* 050 Reserved
* 060 Reserved
* 070 Reserved
* 080 Task Priority Register R/W
* 090 Arbitration Priority Register R
* 0A0 Processor Priority Register R
* 0B0 EOI Register W
* 0C0 RRR Remote read R
* 0D0 Logical Destination R/W
* 0E0 Destination Format Register 0..27 R; 28..31 R/W
* 0F0 SVR Spurious Interrupt Vector Reg. 0..3 R; 4..9 R/W
* 100 ISR 000-031 R
* 110 ISR 032-063 R
* 120 ISR 064-095 R
* 130 ISR 095-128 R
* 140 ISR 128-159 R
* 150 ISR 160-191 R
* 160 ISR 192-223 R
* 170 ISR 224-255 R
* 180 TMR 000-031 R
* 190 TMR 032-063 R
* 1A0 TMR 064-095 R
* 1B0 TMR 095-128 R
* 1C0 TMR 128-159 R
* 1D0 TMR 160-191 R
* 1E0 TMR 192-223 R
* 1F0 TMR 224-255 R
* 200 IRR 000-031 R
* 210 IRR 032-063 R
* 220 IRR 064-095 R
* 230 IRR 095-128 R
* 240 IRR 128-159 R
* 250 IRR 160-191 R
* 260 IRR 192-223 R
* 270 IRR 224-255 R
* 280 Error Status Register R
* 290 Reserved
* 2A0 Reserved
* 2B0 Reserved
* 2C0 Reserved
* 2D0 Reserved
* 2E0 Reserved
* 2F0 Local Vector Table (CMCI) R/W
* 300 ICR_LOW Interrupt Command Reg. (0-31) R/W
* 310 ICR_HI Interrupt Command Reg. (32-63) R/W
* 320 Local Vector Table (Timer) R/W
* 330 Local Vector Table (Thermal) R/W (PIV+)
* 340 Local Vector Table (Performance) R/W (P6+)
* 350 LVT1 Local Vector Table (LINT0) R/W
* 360 LVT2 Local Vector Table (LINT1) R/W
* 370 LVT3 Local Vector Table (ERROR) R/W
* 380 Initial Count Reg. for Timer R/W
* 390 Current Count of Timer R
* 3A0 Reserved
* 3B0 Reserved
* 3C0 Reserved
* 3D0 Reserved
* 3E0 Timer Divide Configuration Reg. R/W
* 3F0 Reserved
*/
/******************************************************************************
* global defines, etc.
*/
/******************************************************************************
* LOCAL APIC structure
*/
#ifndef LOCORE
#define PAD3 int: 32; int: 32; int: 32
#define PAD4 int: 32; int: 32; int: 32; int: 32
struct lapic_reg {
uint32_t val; PAD3;
};
struct lapic {
/* reserved */ PAD4;
/* reserved */ PAD4;
uint32_t id; PAD3;
uint32_t version; PAD3;
/* reserved */ PAD4;
/* reserved */ PAD4;
/* reserved */ PAD4;
/* reserved */ PAD4;
uint32_t tpr; PAD3;
uint32_t apr; PAD3;
uint32_t ppr; PAD3;
uint32_t eoi; PAD3;
/* reserved */ PAD4;
uint32_t ldr; PAD3;
uint32_t dfr; PAD3;
uint32_t svr; PAD3;
struct lapic_reg isr[8];
struct lapic_reg tmr[8];
struct lapic_reg irr[8];
uint32_t esr; PAD3;
/* reserved */ PAD4;
/* reserved */ PAD4;
/* reserved */ PAD4;
/* reserved */ PAD4;
/* reserved */ PAD4;
/* reserved */ PAD4;
uint32_t lvt_cmci; PAD3;
uint32_t icr_lo; PAD3;
uint32_t icr_hi; PAD3;
uint32_t lvt_timer; PAD3;
uint32_t lvt_thermal; PAD3;
uint32_t lvt_pcint; PAD3;
uint32_t lvt_lint0; PAD3;
uint32_t lvt_lint1; PAD3;
uint32_t lvt_error; PAD3;
uint32_t icr_timer; PAD3;
uint32_t ccr_timer; PAD3;
/* reserved */ PAD4;
/* reserved */ PAD4;
/* reserved */ PAD4;
/* reserved */ PAD4;
uint32_t dcr_timer; PAD3;
/* reserved */ PAD4;
};
enum LAPIC_REGISTERS {
LAPIC_ID = 0x2,
LAPIC_VERSION = 0x3,
LAPIC_TPR = 0x8,
LAPIC_APR = 0x9,
LAPIC_PPR = 0xa,
LAPIC_EOI = 0xb,
LAPIC_LDR = 0xd,
LAPIC_DFR = 0xe, /* Not in x2APIC */
LAPIC_SVR = 0xf,
LAPIC_ISR0 = 0x10,
LAPIC_ISR1 = 0x11,
LAPIC_ISR2 = 0x12,
LAPIC_ISR3 = 0x13,
LAPIC_ISR4 = 0x14,
LAPIC_ISR5 = 0x15,
LAPIC_ISR6 = 0x16,
LAPIC_ISR7 = 0x17,
LAPIC_TMR0 = 0x18,
LAPIC_TMR1 = 0x19,
LAPIC_TMR2 = 0x1a,
LAPIC_TMR3 = 0x1b,
LAPIC_TMR4 = 0x1c,
LAPIC_TMR5 = 0x1d,
LAPIC_TMR6 = 0x1e,
LAPIC_TMR7 = 0x1f,
LAPIC_IRR0 = 0x20,
LAPIC_IRR1 = 0x21,
LAPIC_IRR2 = 0x22,
LAPIC_IRR3 = 0x23,
LAPIC_IRR4 = 0x24,
LAPIC_IRR5 = 0x25,
LAPIC_IRR6 = 0x26,
LAPIC_IRR7 = 0x27,
LAPIC_ESR = 0x28,
LAPIC_LVT_CMCI = 0x2f,
LAPIC_ICR_LO = 0x30,
LAPIC_ICR_HI = 0x31, /* Not in x2APIC */
LAPIC_LVT_TIMER = 0x32,
LAPIC_LVT_THERMAL = 0x33,
LAPIC_LVT_PCINT = 0x34,
LAPIC_LVT_LINT0 = 0x35,
LAPIC_LVT_LINT1 = 0x36,
LAPIC_LVT_ERROR = 0x37,
LAPIC_ICR_TIMER = 0x38,
LAPIC_CCR_TIMER = 0x39,
LAPIC_DCR_TIMER = 0x3e,
LAPIC_SELF_IPI = 0x3f, /* Only in x2APIC */
LAPIC_EXT_FEATURES = 0x40, /* AMD */
LAPIC_EXT_CTRL = 0x41, /* AMD */
LAPIC_EXT_SEOI = 0x42, /* AMD */
LAPIC_EXT_IER0 = 0x48, /* AMD */
LAPIC_EXT_IER1 = 0x49, /* AMD */
LAPIC_EXT_IER2 = 0x4a, /* AMD */
LAPIC_EXT_IER3 = 0x4b, /* AMD */
LAPIC_EXT_IER4 = 0x4c, /* AMD */
LAPIC_EXT_IER5 = 0x4d, /* AMD */
LAPIC_EXT_IER6 = 0x4e, /* AMD */
LAPIC_EXT_IER7 = 0x4f, /* AMD */
LAPIC_EXT_LVT0 = 0x50, /* AMD */
LAPIC_EXT_LVT1 = 0x51, /* AMD */
LAPIC_EXT_LVT2 = 0x52, /* AMD */
LAPIC_EXT_LVT3 = 0x53, /* AMD */
};
#define LAPIC_MEM_MUL 0x10
/*
* Although some registers are available on AMD processors only,
* it's not a big waste to reserve them on all platforms.
* However, we need to watch out for this space being assigned for
* non-APIC purposes in the future processor models.
*/
#define LAPIC_MEM_REGION ((LAPIC_EXT_LVT3 + 1) * LAPIC_MEM_MUL)
/******************************************************************************
* I/O APIC structure
*/
struct ioapic {
uint32_t ioregsel; PAD3;
uint32_t iowin; PAD3;
};
#undef PAD4
#undef PAD3
#endif /* !LOCORE */
/******************************************************************************
* various code 'logical' values
*/
/******************************************************************************
* LOCAL APIC defines
*/
/* default physical locations of LOCAL (CPU) APICs */
#define DEFAULT_APIC_BASE 0xfee00000
/* constants relating to APIC ID registers */
#define APIC_ID_MASK 0xff000000
#define APIC_ID_SHIFT 24
#define APIC_ID_CLUSTER 0xf0
#define APIC_ID_CLUSTER_ID 0x0f
#define APIC_MAX_CLUSTER 0xe
#define APIC_MAX_INTRACLUSTER_ID 3
#define APIC_ID_CLUSTER_SHIFT 4
/* fields in VER */
#define APIC_VER_VERSION 0x000000ff
#define APIC_VER_MAXLVT 0x00ff0000
#define MAXLVTSHIFT 16
#define APIC_VER_EOI_SUPPRESSION 0x01000000
#define APIC_VER_AMD_EXT_SPACE 0x80000000
/* fields in LDR */
#define APIC_LDR_RESERVED 0x00ffffff
/* fields in DFR */
#define APIC_DFR_RESERVED 0x0fffffff
#define APIC_DFR_MODEL_MASK 0xf0000000
#define APIC_DFR_MODEL_FLAT 0xf0000000
#define APIC_DFR_MODEL_CLUSTER 0x00000000
/* fields in SVR */
#define APIC_SVR_VECTOR 0x000000ff
#define APIC_SVR_VEC_PROG 0x000000f0
#define APIC_SVR_VEC_FIX 0x0000000f
#define APIC_SVR_ENABLE 0x00000100
#define APIC_SVR_SWDIS 0x00000000
#define APIC_SVR_SWEN 0x00000100
#define APIC_SVR_FOCUS 0x00000200
#define APIC_SVR_FEN 0x00000000
#define APIC_SVR_FDIS 0x00000200
#define APIC_SVR_EOI_SUPPRESSION 0x00001000
/* fields in TPR */
#define APIC_TPR_PRIO 0x000000ff
#define APIC_TPR_INT 0x000000f0
#define APIC_TPR_SUB 0x0000000f
/* fields in ESR */
#define APIC_ESR_SEND_CS_ERROR 0x00000001
#define APIC_ESR_RECEIVE_CS_ERROR 0x00000002
#define APIC_ESR_SEND_ACCEPT 0x00000004
#define APIC_ESR_RECEIVE_ACCEPT 0x00000008
#define APIC_ESR_SEND_ILLEGAL_VECTOR 0x00000020
#define APIC_ESR_RECEIVE_ILLEGAL_VECTOR 0x00000040
#define APIC_ESR_ILLEGAL_REGISTER 0x00000080
/* fields in ICR_LOW */
#define APIC_VECTOR_MASK 0x000000ff
#define APIC_DELMODE_MASK 0x00000700
#define APIC_DELMODE_FIXED 0x00000000
#define APIC_DELMODE_LOWPRIO 0x00000100
#define APIC_DELMODE_SMI 0x00000200
#define APIC_DELMODE_RR 0x00000300
#define APIC_DELMODE_NMI 0x00000400
#define APIC_DELMODE_INIT 0x00000500
#define APIC_DELMODE_STARTUP 0x00000600
#define APIC_DELMODE_RESV 0x00000700
#define APIC_DESTMODE_MASK 0x00000800
#define APIC_DESTMODE_PHY 0x00000000
#define APIC_DESTMODE_LOG 0x00000800
#define APIC_DELSTAT_MASK 0x00001000
#define APIC_DELSTAT_IDLE 0x00000000
#define APIC_DELSTAT_PEND 0x00001000
#define APIC_RESV1_MASK 0x00002000
#define APIC_LEVEL_MASK 0x00004000
#define APIC_LEVEL_DEASSERT 0x00000000
#define APIC_LEVEL_ASSERT 0x00004000
#define APIC_TRIGMOD_MASK 0x00008000
#define APIC_TRIGMOD_EDGE 0x00000000
#define APIC_TRIGMOD_LEVEL 0x00008000
#define APIC_RRSTAT_MASK 0x00030000
#define APIC_RRSTAT_INVALID 0x00000000
#define APIC_RRSTAT_INPROG 0x00010000
#define APIC_RRSTAT_VALID 0x00020000
#define APIC_RRSTAT_RESV 0x00030000
#define APIC_DEST_MASK 0x000c0000
#define APIC_DEST_DESTFLD 0x00000000
#define APIC_DEST_SELF 0x00040000
#define APIC_DEST_ALLISELF 0x00080000
#define APIC_DEST_ALLESELF 0x000c0000
#define APIC_RESV2_MASK 0xfff00000
#define APIC_ICRLO_RESV_MASK (APIC_RESV1_MASK | APIC_RESV2_MASK)
/* fields in LVT1/2 */
#define APIC_LVT_VECTOR 0x000000ff
#define APIC_LVT_DM 0x00000700
#define APIC_LVT_DM_FIXED 0x00000000
#define APIC_LVT_DM_SMI 0x00000200
#define APIC_LVT_DM_NMI 0x00000400
#define APIC_LVT_DM_INIT 0x00000500
#define APIC_LVT_DM_EXTINT 0x00000700
#define APIC_LVT_DS 0x00001000
#define APIC_LVT_IIPP 0x00002000
#define APIC_LVT_IIPP_INTALO 0x00002000
#define APIC_LVT_IIPP_INTAHI 0x00000000
#define APIC_LVT_RIRR 0x00004000
#define APIC_LVT_TM 0x00008000
#define APIC_LVT_M 0x00010000
/* fields in LVT Timer */
#define APIC_LVTT_VECTOR 0x000000ff
#define APIC_LVTT_DS 0x00001000
#define APIC_LVTT_M 0x00010000
#define APIC_LVTT_TM 0x00060000
#define APIC_LVTT_TM_ONE_SHOT 0x00000000
#define APIC_LVTT_TM_PERIODIC 0x00020000
#define APIC_LVTT_TM_TSCDLT 0x00040000
#define APIC_LVTT_TM_RSRV 0x00060000
/* APIC timer current count */
#define APIC_TIMER_MAX_COUNT 0xffffffff
/* fields in TDCR */
#define APIC_TDCR_2 0x00
#define APIC_TDCR_4 0x01
#define APIC_TDCR_8 0x02
#define APIC_TDCR_16 0x03
#define APIC_TDCR_32 0x08
#define APIC_TDCR_64 0x09
#define APIC_TDCR_128 0x0a
#define APIC_TDCR_1 0x0b
/* Constants related to AMD Extended APIC Features Register */
#define APIC_EXTF_ELVT_MASK 0x00ff0000
#define APIC_EXTF_ELVT_SHIFT 16
#define APIC_EXTF_EXTID_CAP 0x00000004
#define APIC_EXTF_SEIO_CAP 0x00000002
#define APIC_EXTF_IER_CAP 0x00000001
/* LVT table indices */
#define APIC_LVT_LINT0 0
#define APIC_LVT_LINT1 1
#define APIC_LVT_TIMER 2
#define APIC_LVT_ERROR 3
#define APIC_LVT_PMC 4
#define APIC_LVT_THERMAL 5
#define APIC_LVT_CMCI 6
#define APIC_LVT_MAX APIC_LVT_CMCI
/* AMD extended LVT constants, seem to be assigned by fiat */
#define APIC_ELVT_IBS 0 /* Instruction based sampling */
#define APIC_ELVT_MCA 1 /* MCE thresholding */
#define APIC_ELVT_DEI 2 /* Deferred error interrupt */
#define APIC_ELVT_SBI 3 /* Sideband interface */
#define APIC_ELVT_MAX APIC_ELVT_SBI
/******************************************************************************
* I/O APIC defines
*/
/* default physical locations of an IO APIC */
#define DEFAULT_IO_APIC_BASE 0xfec00000
/* window register offset */
#define IOAPIC_WINDOW 0x10
#define IOAPIC_EOIR 0x40
/* indexes into IO APIC */
#define IOAPIC_ID 0x00
#define IOAPIC_VER 0x01
#define IOAPIC_ARB 0x02
#define IOAPIC_REDTBL 0x10
#define IOAPIC_REDTBL0 IOAPIC_REDTBL
#define IOAPIC_REDTBL1 (IOAPIC_REDTBL+0x02)
#define IOAPIC_REDTBL2 (IOAPIC_REDTBL+0x04)
#define IOAPIC_REDTBL3 (IOAPIC_REDTBL+0x06)
#define IOAPIC_REDTBL4 (IOAPIC_REDTBL+0x08)
#define IOAPIC_REDTBL5 (IOAPIC_REDTBL+0x0a)
#define IOAPIC_REDTBL6 (IOAPIC_REDTBL+0x0c)
#define IOAPIC_REDTBL7 (IOAPIC_REDTBL+0x0e)
#define IOAPIC_REDTBL8 (IOAPIC_REDTBL+0x10)
#define IOAPIC_REDTBL9 (IOAPIC_REDTBL+0x12)
#define IOAPIC_REDTBL10 (IOAPIC_REDTBL+0x14)
#define IOAPIC_REDTBL11 (IOAPIC_REDTBL+0x16)
#define IOAPIC_REDTBL12 (IOAPIC_REDTBL+0x18)
#define IOAPIC_REDTBL13 (IOAPIC_REDTBL+0x1a)
#define IOAPIC_REDTBL14 (IOAPIC_REDTBL+0x1c)
#define IOAPIC_REDTBL15 (IOAPIC_REDTBL+0x1e)
#define IOAPIC_REDTBL16 (IOAPIC_REDTBL+0x20)
#define IOAPIC_REDTBL17 (IOAPIC_REDTBL+0x22)
#define IOAPIC_REDTBL18 (IOAPIC_REDTBL+0x24)
#define IOAPIC_REDTBL19 (IOAPIC_REDTBL+0x26)
#define IOAPIC_REDTBL20 (IOAPIC_REDTBL+0x28)
#define IOAPIC_REDTBL21 (IOAPIC_REDTBL+0x2a)
#define IOAPIC_REDTBL22 (IOAPIC_REDTBL+0x2c)
#define IOAPIC_REDTBL23 (IOAPIC_REDTBL+0x2e)
/* fields in VER, for redirection entry */
#define IOAPIC_MAX_RTE_MASK 0x00ff0000
#define MAX_RTE_SHIFT 16
/*
* fields in the IO APIC's redirection table entries
*/
#define IOAPIC_RTE_DEST APIC_ID_MASK /* broadcast addr: all APICs */
#define IOAPIC_RTE_RESV 0x00fe0000 /* reserved */
#define IOAPIC_RTE_INTMASK 0x00010000 /* R/W: INTerrupt mask */
#define IOAPIC_RTE_INTMCLR 0x00000000 /* clear, allow INTs */
#define IOAPIC_RTE_INTMSET 0x00010000 /* set, inhibit INTs */
#define IOAPIC_RTE_TRGRMOD 0x00008000 /* R/W: trigger mode */
#define IOAPIC_RTE_TRGREDG 0x00000000 /* edge */
#define IOAPIC_RTE_TRGRLVL 0x00008000 /* level */
#define IOAPIC_RTE_REM_IRR 0x00004000 /* RO: remote IRR */
#define IOAPIC_RTE_INTPOL 0x00002000 /*R/W:INT input pin polarity*/
#define IOAPIC_RTE_INTAHI 0x00000000 /* active high */
#define IOAPIC_RTE_INTALO 0x00002000 /* active low */
#define IOAPIC_RTE_DELIVS 0x00001000 /* RO: delivery status */
#define IOAPIC_RTE_DESTMOD 0x00000800 /*R/W:destination mode*/
#define IOAPIC_RTE_DESTPHY 0x00000000 /* physical */
#define IOAPIC_RTE_DESTLOG 0x00000800 /* logical */
#define IOAPIC_RTE_DELMOD 0x00000700 /* R/W: delivery mode */
#define IOAPIC_RTE_DELFIXED 0x00000000 /* fixed */
#define IOAPIC_RTE_DELLOPRI 0x00000100 /* lowest priority */
#define IOAPIC_RTE_DELSMI 0x00000200 /*System Management INT*/
#define IOAPIC_RTE_DELRSV1 0x00000300 /* reserved */
#define IOAPIC_RTE_DELNMI 0x00000400 /* NMI signal */
#define IOAPIC_RTE_DELINIT 0x00000500 /* INIT signal */
#define IOAPIC_RTE_DELRSV2 0x00000600 /* reserved */
#define IOAPIC_RTE_DELEXINT 0x00000700 /* External INTerrupt */
#define IOAPIC_RTE_INTVEC 0x000000ff /*R/W: INT vector field*/
#endif /* _APICREG_H_ */

View File

@@ -0,0 +1,99 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ASSIGN_H
#define ASSIGN_H
enum ptdev_intr_type {
PTDEV_INTR_MSI,
PTDEV_INTR_INTX
};
enum ptdev_vpin_source {
PTDEV_VPIN_IOAPIC,
PTDEV_VPIN_PIC,
};
/* entry per guest virt vector */
struct ptdev_msi_info {
uint32_t vmsi_addr; /* virt msi_addr */
uint32_t vmsi_data; /* virt msi_data */
uint16_t vmsi_ctl; /* virt msi_ctl */
uint32_t pmsi_addr; /* phys msi_addr */
uint32_t pmsi_data; /* phys msi_data */
int msix; /* 0-MSI, 1-MSIX */
int msix_entry_index; /* MSI: 0, MSIX: index of vector table*/
int virt_vector;
int phys_vector;
};
/* entry per guest vioapic pin */
struct ptdev_intx_info {
enum ptdev_vpin_source vpin_src;
uint8_t virt_pin;
uint8_t phys_pin;
};
/* entry per each allocated irq/vector */
struct ptdev_remapping_info {
struct vm *vm;
uint16_t virt_bdf; /* PCI bus:slot.func*/
uint16_t phys_bdf; /* PCI bus:slot.func*/
uint32_t active; /* 1=active, 0=inactive and to free*/
enum ptdev_intr_type type;
struct dev_handler_node *node;
struct list_head softirq_node;
struct list_head entry_node;
union {
struct ptdev_msi_info msi;
struct ptdev_intx_info intx;
};
};
void ptdev_intx_ack(struct vm *vm, int virt_pin,
enum ptdev_vpin_source vpin_src);
int ptdev_msix_remap(struct vm *vm, uint16_t virt_bdf,
struct ptdev_msi_info *info);
int ptdev_intx_pin_remap(struct vm *vm, struct ptdev_intx_info *info);
void ptdev_softirq(int cpu);
void ptdev_init(void);
void ptdev_vm_init(struct vm *vm);
void ptdev_vm_deinit(struct vm *vm);
void ptdev_add_intx_remapping(struct vm *vm, uint16_t virt_bdf,
uint16_t phys_bdf, uint8_t virt_pin, uint8_t phys_pin, bool pic_pin);
void ptdev_remove_intx_remapping(struct vm *vm, uint8_t virt_pin, bool pic_pin);
void ptdev_add_msix_remapping(struct vm *vm, uint16_t virt_bdf,
uint16_t phys_bdf, int vector_count);
void ptdev_remove_msix_remapping(struct vm *vm, uint16_t virt_bdf,
int vector_count);
int get_ptdev_info(char *str, int str_max);
#endif /* ASSIGN_H */

View File

@@ -0,0 +1,412 @@
/*-
* Copyright (c) 1989, 1990 William F. Jolitz
* Copyright (c) 1990 The Regents of the University of California.
* Copyright (c) 2017 Intel Corporation
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)segments.h 7.1 (Berkeley) 5/9/91
* $FreeBSD$
*/
#ifndef CPU_H
#define CPU_H
/* Define page size */
#define CPU_PAGE_SHIFT 12
#define CPU_PAGE_SIZE 0x1000
/* Define CPU stack alignment */
#define CPU_STACK_ALIGN 16
/* CR0 register definitions */
#define CR0_PG (1<<31) /* paging enable */
#define CR0_CD (1<<30) /* cache disable */
#define CR0_NW (1<<29) /* not write through */
#define CR0_AM (1<<18) /* alignment mask */
#define CR0_WP (1<<16) /* write protect */
#define CR0_NE (1<<5) /* numeric error */
#define CR0_ET (1<<4) /* extension type */
#define CR0_TS (1<<3) /* task switched */
#define CR0_EM (1<<2) /* emulation */
#define CR0_MP (1<<1) /* monitor coprocessor */
#define CR0_PE (1<<0) /* protected mode enabled */
/* CR3 register definitions */
#define CR3_PWT (1<<3) /* page-level write through */
#define CR3_PCD (1<<4) /* page-level cache disable */
/* CR4 register definitions */
#define CR4_VME (1<<0) /* virtual 8086 mode extensions */
#define CR4_PVI (1<<1) /* protected mode virtual interrupts */
#define CR4_TSD (1<<2) /* time stamp disable */
#define CR4_DE (1<<3) /* debugging extensions */
#define CR4_PSE (1<<4) /* page size extensions */
#define CR4_PAE (1<<5) /* physical address extensions */
#define CR4_MCE (1<<6) /* machine check enable */
#define CR4_PGE (1<<7) /* page global enable */
#define CR4_PCE (1<<8)
/* performance monitoring counter enable */
#define CR4_OSFXSR (1<<9) /* OS support for FXSAVE/FXRSTOR */
#define CR4_OSXMMEXCPT (1<<10)
/* OS support for unmasked SIMD floating point exceptions */
#define CR4_VMXE (1<<13) /* VMX enable */
#define CR4_SMXE (1<<14) /* SMX enable */
#define CR4_PCIDE (1<<17) /* PCID enable */
#define CR4_OSXSAVE (1<<18)
/* XSAVE and Processor Extended States enable bit */
/*
* Entries in the Interrupt Descriptor Table (IDT)
*/
#define IDT_DE 0 /* #DE: Divide Error */
#define IDT_DB 1 /* #DB: Debug */
#define IDT_NMI 2 /* Nonmaskable External Interrupt */
#define IDT_BP 3 /* #BP: Breakpoint */
#define IDT_OF 4 /* #OF: Overflow */
#define IDT_BR 5 /* #BR: Bound Range Exceeded */
#define IDT_UD 6 /* #UD: Undefined/Invalid Opcode */
#define IDT_NM 7 /* #NM: No Math Coprocessor */
#define IDT_DF 8 /* #DF: Double Fault */
#define IDT_FPUGP 9 /* Coprocessor Segment Overrun */
#define IDT_TS 10 /* #TS: Invalid TSS */
#define IDT_NP 11 /* #NP: Segment Not Present */
#define IDT_SS 12 /* #SS: Stack Segment Fault */
#define IDT_GP 13 /* #GP: General Protection Fault */
#define IDT_PF 14 /* #PF: Page Fault */
#define IDT_MF 16 /* #MF: FPU Floating-Point Error */
#define IDT_AC 17 /* #AC: Alignment Check */
#define IDT_MC 18 /* #MC: Machine Check */
#define IDT_XF 19 /* #XF: SIMD Floating-Point Exception */
/*Bits in EFER special registers */
#define EFER_LMA 0x000000400 /* Long mode active (R) */
/* CPU clock frequencies (FSB) */
#define CPU_FSB_83KHZ 83200
#define CPU_FSB_100KHZ 99840
#define CPU_FSB_133KHZ 133200
#define CPU_FSB_166KHZ 166400
/* Time conversions */
#define CPU_GHZ_TO_HZ 1000000000
#define CPU_GHZ_TO_KHZ 1000000
#define CPU_GHZ_TO_MHZ 1000
#define CPU_MHZ_TO_HZ 1000000
#define CPU_MHZ_TO_KHZ 1000
/* Boot CPU ID */
#define CPU_BOOT_ID 0
/* CPU states defined */
#define CPU_STATE_RESET 0
#define CPU_STATE_INITIALIZING 1
#define CPU_STATE_RUNNING 2
#define CPU_STATE_HALTED 3
#define CPU_STATE_DEAD 4
/* hypervisor stack bottom magic('intl') */
#define SP_BOTTOM_MAGIC 0x696e746c
/* type of speculation control
* 0 - no speculation control support
* 1 - raw IBRS + IPBP support
* 2 - with STIBP optimization support
*/
#define IBRS_NONE 0
#define IBRS_RAW 1
#define IBRS_OPT 2
#ifndef ASSEMBLER
/**********************************/
/* EXTERNAL VARIABLES */
/**********************************/
extern const uint8_t _ld_cpu_secondary_reset_load[];
extern uint8_t _ld_cpu_secondary_reset_start[];
extern const uint64_t _ld_cpu_secondary_reset_size;
extern uint8_t _ld_bss_start[];
extern uint8_t _ld_bss_end[];
extern uint8_t _ld_cpu_data_start[];
extern uint8_t _ld_cpu_data_end[];
extern int ibrs_type;
/*
* To support per_cpu access, we use a special section ".cpu_data" to define
* the pattern of per CPU data. And we allocate memory for per CPU data
* according to multiple this section size and pcpu number.
*
* +------------------+------------------+---+------------------+
* | percpu for pcpu0 | percpu for pcpu1 |...| percpu for pcpuX |
* +------------------+------------------+---+------------------+
* ^ ^
* | |
* --.cpu_data size--
*
* To access per cpu data, we use:
* per_cpu_data_base_ptr + curr_pcpu_id * cpu_data_section_size +
* offset_of_symbol_in_cpu_data_section
* to locate the per cpu data.
*/
/* declare per cpu data */
#define EXTERN_CPU_DATA(type, name) \
extern __typeof__(type) cpu_data_##name
EXTERN_CPU_DATA(uint8_t, lapic_id);
EXTERN_CPU_DATA(void *, vcpu);
EXTERN_CPU_DATA(uint8_t[STACK_SIZE], stack) __aligned(16);
/* define per cpu data */
#define DEFINE_CPU_DATA(type, name) \
__typeof__(type) cpu_data_##name \
__attribute__((__section__(".cpu_data")))
extern void *per_cpu_data_base_ptr;
extern int phy_cpu_num;
#define PER_CPU_DATA_OFFSET(sym_addr) \
((uint64_t)(sym_addr) - (uint64_t)(_ld_cpu_data_start))
#define PER_CPU_DATA_SIZE \
((uint64_t)_ld_cpu_data_end - (uint64_t)(_ld_cpu_data_start))
/*
* get percpu data for pcpu_id.
*
* It returns:
* per_cpu_data_##name[pcpu_id];
*/
#define per_cpu(name, pcpu_id) \
(*({ uint64_t base = (uint64_t)per_cpu_data_base_ptr; \
uint64_t off = PER_CPU_DATA_OFFSET(&cpu_data_##name); \
((typeof(&cpu_data_##name))(base + \
(pcpu_id) * PER_CPU_DATA_SIZE + off)); \
}))
/* get percpu data for current pcpu */
#define get_cpu_var(name) per_cpu(name, get_cpu_id())
/* Function prototypes */
void cpu_halt(uint32_t logical_id);
uint64_t cpu_cycles_per_second(void);
uint64_t tsc_cycles_in_period(uint16_t timer_period_in_us);
void cpu_secondary_reset(void);
int hv_main(int cpu_id);
bool check_tsc_adjust_support(void);
bool check_ibrs_ibpb_support(void);
bool check_stibp_support(void);
bool is_apicv_enabled(void);
/* Read control register */
#define CPU_CR_READ(cr, result_ptr) \
{ \
asm volatile ("mov %%" __CPP_STRING(cr) ", %0" \
: "=r"(*result_ptr)); \
}
/* Write control register */
#define CPU_CR_WRITE(cr, value) \
{ \
asm volatile ("mov %0, %%" __CPP_STRING(cr) \
: /* No output */ \
: "r"(value)); \
}
/* Read MSR */
#define CPU_MSR_READ(reg, msr_val_ptr) \
{ \
uint32_t msrl, msrh; \
asm volatile (" rdmsr ":"=a"(msrl), \
"=d"(msrh) : "c" (reg)); \
*msr_val_ptr = ((uint64_t)msrh<<32) | msrl; \
}
/* Write MSR */
#define CPU_MSR_WRITE(reg, msr_val) \
{ \
uint32_t msrl, msrh; \
msrl = (uint32_t)msr_val; \
msrh = (uint32_t)(msr_val >> 32); \
asm volatile (" wrmsr " : : "c" (reg), \
"a" (msrl), "d" (msrh)); \
}
/* Disables interrupts on the current CPU */
#define CPU_IRQ_DISABLE() \
{ \
asm volatile ("cli\n" : : : "cc"); \
}
/* Enables interrupts on the current CPU */
#define CPU_IRQ_ENABLE() \
{ \
asm volatile ("sti\n" : : : "cc"); \
}
/* This macro writes the stack pointer. */
#define CPU_SP_WRITE(stack_ptr) \
{ \
uint64_t rsp = (uint64_t)stack_ptr & ~(CPU_STACK_ALIGN - 1); \
asm volatile ("movq %0, %%rsp" : : "r"(rsp)); \
}
/* Synchronizes all read accesses from memory */
#define CPU_MEMORY_READ_BARRIER() \
{ \
asm volatile ("lfence\n" : : : "memory"); \
}
/* Synchronizes all write accesses to memory */
#define CPU_MEMORY_WRITE_BARRIER() \
{ \
asm volatile ("sfence\n" : : : "memory"); \
}
/* Synchronizes all read and write accesses to/from memory */
#define CPU_MEMORY_BARRIER() \
{ \
asm volatile ("mfence\n" : : : "memory"); \
}
/* Write the task register */
#define CPU_LTR_EXECUTE(ltr_ptr) \
{ \
asm volatile ("ltr %%ax\n" : : "a"(ltr_ptr)); \
}
/* Read time-stamp counter / processor ID */
#define CPU_RDTSCP_EXECUTE(timestamp_ptr, cpu_id_ptr) \
{ \
uint32_t tsl, tsh; \
asm volatile ("rdtscp":"=a"(tsl), "=d"(tsh), \
"=c"(*cpu_id_ptr)); \
*timestamp_ptr = ((uint64_t)tsh << 32) | tsl; \
}
/* Define variable(s) required to save / restore architecture interrupt state.
* These variable(s) are used in conjunction with the ESAL_AR_INT_ALL_DISABLE()
* and ESAL_AR_INT_ALL_RESTORE() macros to hold any data that must be preserved
* in order to allow these macros to function correctly.
*/
#define CPU_INT_CONTROL_VARS uint64_t cpu_int_value
/* Macro to save rflags register */
#define CPU_RFLAGS_SAVE(rflags_ptr) \
{ \
asm volatile (" pushf"); \
asm volatile (" pop %0" \
: "=r" (*(rflags_ptr)) \
: /* No inputs */); \
}
/* Macro to restore rflags register */
#define CPU_RFLAGS_RESTORE(rflags) \
{ \
asm volatile (" push %0" : : "r" (rflags)); \
asm volatile (" popf"); \
}
/* This macro locks out interrupts and saves the current architecture status
* register / state register to the specified address. This function does not
* attempt to mask any bits in the return register value and can be used as a
* quick method to guard a critical section.
* NOTE: This macro is used in conjunction with CPU_INT_ALL_RESTORE
* defined below and CPU_INT_CONTROL_VARS defined above.
*/
#define CPU_INT_ALL_DISABLE() \
{ \
CPU_RFLAGS_SAVE(&cpu_int_value); \
CPU_IRQ_DISABLE(); \
}
/* This macro restores the architecture status / state register used to lockout
* interrupts to the value provided. The intent of this function is to be a
* fast mechanism to restore the interrupt level at the end of a critical
* section to its original level.
* NOTE: This macro is used in conjunction with CPU_INT_ALL_DISABLE
* and CPU_INT_CONTROL_VARS defined above.
*/
#define CPU_INT_ALL_RESTORE() \
{ \
CPU_RFLAGS_RESTORE(cpu_int_value); \
}
/* Macro to get CPU ID */
static inline uint32_t get_cpu_id(void)
{
uint32_t tsl, tsh, cpu_id;
asm volatile ("rdtscp":"=a" (tsl), "=d"(tsh), "=c"(cpu_id)::);
return cpu_id;
}
static inline uint64_t cpu_rsp_get(void)
{
uint64_t ret;
asm volatile("movq %%rsp, %0"
: "=r"(ret));
return ret;
}
static inline uint64_t cpu_rbp_get(void)
{
uint64_t ret;
asm volatile("movq %%rbp, %0"
: "=r"(ret));
return ret;
}
static inline uint64_t
msr_read(uint32_t reg_num)
{
uint64_t msr_val;
CPU_MSR_READ(reg_num, &msr_val);
return msr_val;
}
static inline void
msr_write(uint32_t reg_num, uint64_t value64)
{
CPU_MSR_WRITE(reg_num, value64);
}
#else /* ASSEMBLER defined */
#endif /* ASSEMBLER defined */
#endif /* CPU_H */

View File

@@ -0,0 +1,152 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* cpuid.h
*
* Created on: Jan 4, 2018
* Author: don
*/
#ifndef CPUID_H_
#define CPUID_H_
/* CPUID bit definitions */
#define CPUID_ECX_SSE3 (1<<0)
#define CPUID_ECX_PCLMUL (1<<1)
#define CPUID_ECX_DTES64 (1<<2)
#define CPUID_ECX_MONITOR (1<<3)
#define CPUID_ECX_DS_CPL (1<<4)
#define CPUID_ECX_VMX (1<<5)
#define CPUID_ECX_SMX (1<<6)
#define CPUID_ECX_EST (1<<7)
#define CPUID_ECX_TM2 (1<<8)
#define CPUID_ECX_SSSE3 (1<<9)
#define CPUID_ECX_CID (1<<10)
#define CPUID_ECX_FMA (1<<12)
#define CPUID_ECX_CX16 (1<<13)
#define CPUID_ECX_ETPRD (1<<14)
#define CPUID_ECX_PDCM (1<<15)
#define CPUID_ECX_DCA (1<<18)
#define CPUID_ECX_SSE4_1 (1<<19)
#define CPUID_ECX_SSE4_2 (1<<20)
#define CPUID_ECX_x2APIC (1<<21)
#define CPUID_ECX_MOVBE (1<<22)
#define CPUID_ECX_POPCNT (1<<23)
#define CPUID_ECX_AES (1<<25)
#define CPUID_ECX_XSAVE (1<<26)
#define CPUID_ECX_OSXSAVE (1<<27)
#define CPUID_ECX_AVX (1<<28)
#define CPUID_EDX_FPU (1<<0)
#define CPUID_EDX_VME (1<<1)
#define CPUID_EDX_DE (1<<2)
#define CPUID_EDX_PSE (1<<3)
#define CPUID_EDX_TSC (1<<4)
#define CPUID_EDX_MSR (1<<5)
#define CPUID_EDX_PAE (1<<6)
#define CPUID_EDX_MCE (1<<7)
#define CPUID_EDX_CX8 (1<<8)
#define CPUID_EDX_APIC (1<<9)
#define CPUID_EDX_SEP (1<<11)
#define CPUID_EDX_MTRR (1<<12)
#define CPUID_EDX_PGE (1<<13)
#define CPUID_EDX_MCA (1<<14)
#define CPUID_EDX_CMOV (1<<15)
#define CPUID_EDX_PAT (1<<16)
#define CPUID_EDX_PSE36 (1<<17)
#define CPUID_EDX_PSN (1<<18)
#define CPUID_EDX_CLF (1<<19)
#define CPUID_EDX_DTES (1<<21)
#define CPUID_EDX_ACPI (1<<22)
#define CPUID_EDX_MMX (1<<23)
#define CPUID_EDX_FXSR (1<<24)
#define CPUID_EDX_SSE (1<<25)
#define CPUID_EDX_SSE2 (1<<26)
#define CPUID_EDX_SS (1<<27)
#define CPUID_EDX_HTT (1<<28)
#define CPUID_EDX_TM1 (1<<29)
#define CPUID_EDX_IA64 (1<<30)
#define CPUID_EDX_PBE (1<<31)
/* CPUID.07H:EBX.TSC_ADJUST*/
#define CPUID_EBX_TSC_ADJ (1<<1)
/* CPUID.07H:EDX.IBRS_IBPB*/
#define CPUID_EDX_IBRS_IBPB (1<<26)
/* CPUID.07H:EDX.STIBP*/
#define CPUID_EDX_STIBP (1<<27)
/* CPUID.80000001H:EDX.Page1GB*/
#define CPUID_EDX_PAGE1GB (1<<26)
/* CPUID.07H:EBX.INVPCID*/
#define CPUID_EBX_INVPCID (1<<10)
/* CPUID.01H:ECX.PCID*/
#define CPUID_ECX_PCID (1<<17)
/* CPUID source operands */
#define CPUID_VENDORSTRING 0
#define CPUID_FEATURES 1
#define CPUID_TLB 2
#define CPUID_SERIALNUM 3
#define CPUID_EXTEND_FEATURE 7
#define CPUID_EXTEND_FUNCTION_1 0x80000001
enum cpuid_cache_idx {
CPUID_VENDORSTRING_CACHE_IDX = 0,
CPUID_FEATURES_CACHE_IDX,
CPUID_EXTEND_FEATURE_CACHE_IDX,
CPUID_EXTEND_FEATURE_CACHE_MAX
};
struct cpuid_cache_entry {
uint32_t a;
uint32_t b;
uint32_t c;
uint32_t d;
uint32_t inited;
uint32_t reserved;
};
static inline void native_cpuid_count(uint32_t op, uint32_t count,
uint32_t *a, uint32_t *b, uint32_t *c, uint32_t *d)
{
/* Execute CPUID instruction and save results */
asm volatile("cpuid":"=a"(*a), "=b"(*b),
"=c"(*c), "=d"(*d)
: "a"(op), "c" (count));
}
void cpuid_count(uint32_t op, uint32_t count,
uint32_t *a, uint32_t *b, uint32_t *c, uint32_t *d);
#define cpuid(op, a, b, c, d) cpuid_count(op, 0, a, b, c, d)
void emulate_cpuid(struct vcpu *vcpu, uint32_t src_op, uint32_t *eax_ptr,
uint32_t *ebx_ptr, uint32_t *ecx_ptr, uint32_t *edx_ptr);
#endif /* CPUID_H_ */

View File

@@ -0,0 +1,314 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GDT_H
#define GDT_H
/* GDT is defined in assembly so it can be used to switch modes before long mode
* is established.
* With 64-bit EFI this is not required since are already in long mode when EFI
* transfers control to the hypervisor. However, for any instantiation of the
* ACRN Hypervisor that requires a boot from reset the GDT will be
* used as mode transitions are being made to ultimately end up in long mode.
* For this reason we establish the GDT in assembly.
* This should not affect usage and convenience of interacting with the GDT in C
* as the complete definition of the GDT is driven by the defines in this file.
*
* Unless it proves to be not viable we will use a single GDT for all hypervisor
* CPUs, with space for per CPU LDT and TSS.
*/
/*
* Segment selectors in x86-64 and i386 are the same size, 8 bytes.
* Local Descriptor Table (LDT) selectors are 16 bytes on x86-64 instead of 8
* bytes.
* Task State Segment (TSS) selectors are 16 bytes on x86-64 instead of 8 bytes.
*/
#define X64_SEG_DESC_SIZE (0x8) /* In long mode SEG Descriptors are 8 bytes */
#define X64_LDT_DESC_SIZE (0x10)/* In long mode LDT Descriptors are 16 bytes */
#define X64_TSS_DESC_SIZE (0x10)/* In long mode TSS Descriptors are 16 bytes */
/*****************************************************************************
*
* BEGIN: Definition of the GDT.
*
* NOTE:
* If you change the size of the GDT or rearrange the location of descriptors
* within the GDT you must change both the defines and the C structure header.
*
*****************************************************************************/
/* Number of global 8 byte segments descriptor(s) */
#define HOST_GDT_RING0_SEG_SELECTORS (0x3) /* rsvd, code, data */
/* Offsets of global 8 byte segment descriptors */
#define HOST_GDT_RING0_RSVD_SEL (0x0000)
#define HOST_GDT_RING0_CODE_SEL (0x0008)
#define HOST_GDT_RING0_DATA_SEL (0x0010)
/* Number of global 16 byte LDT descriptor(s) */
#define HOST_GDT_RING0_TSS_SELECTORS (0x1)
/* One for each CPU in the hypervisor. */
/*****************************************************************************
*
* END: Definition of the GDT.
*
*****************************************************************************/
/* Offset to start of LDT Descriptors */
#define HOST_GDT_RING0_LDT_SEL \
(HOST_GDT_RING0_SEG_SELECTORS * X64_SEG_DESC_SIZE)
/* Offset to start of LDT Descriptors */
#define HOST_GDT_RING0_CPU_TSS_SEL (HOST_GDT_RING0_LDT_SEL)
/* Size of the GDT */
#define HOST_GDT_SIZE \
(HOST_GDT_RING0_CPU_TSS_SEL + \
(HOST_GDT_RING0_TSS_SELECTORS * X64_TSS_DESC_SIZE))
/* Defined position of Interrupt Stack Tables */
#define MACHINE_CHECK_IST (0x1)
#define DOUBLE_FAULT_IST (0x2)
#define STACK_FAULT_IST (0x3)
#ifndef ASSEMBLER
#include <types.h>
#include <cpu.h>
#define TSS_AVAIL (9)
/*
* Definition of an 8 byte code segment descriptor.
*/
union code_segment_descriptor {
uint64_t value;
struct {
union {
uint32_t value;
struct {
uint32_t limit_15_0:16;
uint32_t base_15_0:16;
} bits;
} low32;
union {
uint32_t value;
struct {
uint32_t base_23_16:8;
uint32_t accessed:1;
uint32_t readeable:1;
uint32_t conforming:1;
uint32_t bit11_set:1;
uint32_t bit12_set:1;
uint32_t dpl:2;
uint32_t present:1;
uint32_t limit_19_16:4;
uint32_t avl:1;
uint32_t x64flag:1;
uint32_t dflt:1;
uint32_t granularity:1;
uint32_t base_31_24:8;
} bits;
} high32;
};
} __aligned(8);
/*
* Definition of an 8 byte data segment descriptor.
*/
union data_segment_descriptor {
uint64_t value;
struct {
union {
uint32_t value;
struct {
uint32_t limit_15_0:16;
uint32_t base_15_0:16;
} bits;
} low32;
union {
uint32_t value;
struct {
uint32_t base_23_16:8;
uint32_t accessed:1;
uint32_t writeable:1;
uint32_t expansion:1;
uint32_t bit11_clr:1;
uint32_t bit12_set:1;
uint32_t dpl:2;
uint32_t present:1;
uint32_t limit_19_16:4;
uint32_t avl:1;
uint32_t rsvd_clr:1;
uint32_t big:1;
uint32_t granularity:1;
uint32_t base_31_24:8;
} bits;
} high32;
};
} __aligned(8);
/*
* Definition of an 8 byte system segment descriptor.
*/
union system_segment_descriptor {
uint64_t value;
struct {
union {
uint32_t value;
struct {
uint32_t limit_15_0:16;
uint32_t base_15_0:16;
} bits;
} low32;
union {
uint32_t value;
struct {
uint32_t base_23_16:8;
uint32_t type:4;
uint32_t bit12_clr:1;
uint32_t dpl:2;
uint32_t present:1;
uint32_t limit_19_16:4;
uint32_t rsvd_1:1;
uint32_t rsvd_2_clr:1;
uint32_t rsvd_3:1;
uint32_t granularity:1;
uint32_t base_31_24:8;
} bits;
} high32;
};
} __aligned(8);
/*
* Definition of 16 byte TSS and LDT selectors.
*/
union tss_64_descriptor {
uint64_t value;
struct {
union {
uint32_t value;
struct {
uint32_t limit_15_0:16;
uint32_t base_15_0:16;
} bits;
} low32;
union {
uint32_t value;
struct {
uint32_t base_23_16:8;
uint32_t type:4;
uint32_t bit12_clr:1;
uint32_t dpl:2;
uint32_t present:1;
uint32_t limit_19_16:4;
uint32_t rsvd_1:1;
uint32_t rsvd_2_clr:1;
uint32_t rsvd_3:1;
uint32_t granularity:1;
uint32_t base_31_24:8;
} bits;
} high32;
uint32_t base_addr_63_32;
union {
uint32_t value;
struct {
uint32_t rsvd_7_0:8;
uint32_t bits_12_8_clr:4;
uint32_t rsvd_31_13:20;
} bits;
} offset_12;
};
} __aligned(8);
/*****************************************************************************
*
* BEGIN: Definition of the GDT.
*
* NOTE:
* If you change the size of the GDT or rearrange the location of descriptors
* within the GDT you must change both the defines and the C structure header.
*
*****************************************************************************/
struct host_gdt {
uint64_t rsvd;
union code_segment_descriptor host_gdt_code_descriptor;
union data_segment_descriptor host_gdt_data_descriptor;
union tss_64_descriptor host_gdt_tss_descriptors;
} __aligned(8);
/*****************************************************************************
*
* END: Definition of the GDT.
*
*****************************************************************************/
/*
* x86-64 Task State Segment (TSS) definition.
*/
struct tss_64 {
uint32_t rsvd1;
uint64_t rsp0;
uint64_t rsp1;
uint64_t rsp2;
uint32_t rsvd2;
uint32_t rsvd3;
uint64_t ist1;
uint64_t ist2;
uint64_t ist3;
uint64_t ist4;
uint64_t ist5;
uint64_t ist6;
uint64_t ist7;
uint32_t rsvd4;
uint32_t rsvd5;
uint16_t rsvd6;
uint16_t io_map_base_addr;
} __packed __aligned(16);
/*
* Definition of the GDT descriptor.
*/
struct host_gdt_descriptor {
unsigned short len;
struct host_gdt *gdt;
} __packed;
extern struct host_gdt HOST_GDT;
extern struct host_gdt_descriptor HOST_GDTR;
void load_gdtr_and_tr(void);
EXTERN_CPU_DATA(struct tss_64, tss);
EXTERN_CPU_DATA(struct host_gdt, gdt);
EXTERN_CPU_DATA(uint8_t[STACK_SIZE], mc_stack) __aligned(16);
EXTERN_CPU_DATA(uint8_t[STACK_SIZE], df_stack) __aligned(16);
EXTERN_CPU_DATA(uint8_t[STACK_SIZE], sf_stack) __aligned(16);
#endif /* end #ifndef ASSEMBLER */
#endif /* GDT_H */

View File

@@ -0,0 +1,115 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GUEST_H
#define GUEST_H
/* Defines for VM Launch and Resume */
#define VM_RESUME 0
#define VM_LAUNCH 1
#define ACRN_DBG_PTIRQ 6
#define ACRN_DBG_IRQ 6
#ifndef ASSEMBLER
#define foreach_vcpu(idx, vm, vcpu) \
for (idx = 0, vcpu = vm->hw.vcpu_array[idx]; \
(idx < vm->hw.num_vcpus) & (vcpu != NULL); \
idx++, vcpu = vm->hw.vcpu_array[idx])
struct vhm_request;
/*
* VCPU related APIs
*/
#define ACRN_REQUEST_EVENT 0
#define ACRN_REQUEST_EXTINT 1
#define ACRN_REQUEST_NMI 2
#define ACRN_REQUEST_GP 3
#define ACRN_REQUEST_TMR_UPDATE 4
#define ACRN_REQUEST_TLB_FLUSH 5
#define E820_MAX_ENTRIES 32
struct e820_mem_params {
uint64_t mem_bottom;
uint64_t mem_top;
uint64_t max_ram_blk_base; /* used for the start address of UOS */
uint64_t max_ram_blk_size;
};
int prepare_vm0_memmap_and_e820(struct vm *vm);
/* Definition for a mem map lookup */
struct vm_lu_mem_map {
struct list_head list; /* EPT mem map lookup list*/
void *hpa; /* Host physical start address of the map*/
void *gpa; /* Guest physical start address of the map */
uint64_t size; /* Size of map */
};
/*
* VM related APIs
*/
bool is_vm0(struct vm *vm);
bool vm_lapic_disabled(struct vm *vm);
uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask);
int init_vm0_boot_info(struct vm *vm);
uint64_t gva2gpa(struct vm *vm, uint64_t cr3, uint64_t gva);
struct vcpu *get_primary_vcpu(struct vm *vm);
struct vcpu *vcpu_from_vid(struct vm *vm, int vcpu_id);
struct vcpu *vcpu_from_pid(struct vm *vm, int pcpu_id);
void init_e820(void);
void obtain_e820_mem_info(void);
extern uint32_t e820_entries;
extern struct e820_entry e820[E820_MAX_ENTRIES];
extern uint32_t boot_regs[];
extern struct e820_mem_params e820_mem;
int rdmsr_handler(struct vcpu *vcpu);
int wrmsr_handler(struct vcpu *vcpu);
void init_msr_emulation(struct vcpu *vcpu);
extern const char vm_exit[];
int vmx_vmrun(struct run_context *context, int ops, int ibrs);
int load_guest(struct vm *vm, struct vcpu *vcpu);
int general_sw_loader(struct vm *vm, struct vcpu *vcpu);
typedef int (*vm_sw_loader_t)(struct vm *, struct vcpu *);
extern vm_sw_loader_t vm_sw_loader;
#endif /* !ASSEMBLER */
#endif /* GUEST_H*/

View File

@@ -0,0 +1,288 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _VCPU_H_
#define _VCPU_H_
#define ACRN_VCPU_MMIO_COMPLETE (0)
/* Size of various elements within the VCPU structure */
#define REG_SIZE 8
/* Number of GPRs saved / restored for guest in VCPU structure */
#define NUM_GPRS 15
#define GUEST_STATE_AREA_SIZE 512
/* Indexes of GPRs saved / restored for guest */
#define VMX_MACHINE_T_GUEST_RAX_INDEX 0
#define VMX_MACHINE_T_GUEST_RBX_INDEX 1
#define VMX_MACHINE_T_GUEST_RCX_INDEX 2
#define VMX_MACHINE_T_GUEST_RDX_INDEX 3
#define VMX_MACHINE_T_GUEST_RBP_INDEX 4
#define VMX_MACHINE_T_GUEST_RSI_INDEX 5
#define VMX_MACHINE_T_GUEST_R8_INDEX 6
#define VMX_MACHINE_T_GUEST_R9_INDEX 7
#define VMX_MACHINE_T_GUEST_R10_INDEX 8
#define VMX_MACHINE_T_GUEST_R11_INDEX 9
#define VMX_MACHINE_T_GUEST_R12_INDEX 10
#define VMX_MACHINE_T_GUEST_R13_INDEX 11
#define VMX_MACHINE_T_GUEST_R14_INDEX 12
#define VMX_MACHINE_T_GUEST_R15_INDEX 13
#define VMX_MACHINE_T_GUEST_RDI_INDEX 14
/* Offsets of GPRs for guest within the VCPU data structure */
#define VMX_MACHINE_T_GUEST_RAX_OFFSET (VMX_MACHINE_T_GUEST_RAX_INDEX*REG_SIZE)
#define VMX_MACHINE_T_GUEST_RBX_OFFSET (VMX_MACHINE_T_GUEST_RBX_INDEX*REG_SIZE)
#define VMX_MACHINE_T_GUEST_RCX_OFFSET (VMX_MACHINE_T_GUEST_RCX_INDEX*REG_SIZE)
#define VMX_MACHINE_T_GUEST_RDX_OFFSET (VMX_MACHINE_T_GUEST_RDX_INDEX*REG_SIZE)
#define VMX_MACHINE_T_GUEST_RBP_OFFSET (VMX_MACHINE_T_GUEST_RBP_INDEX*REG_SIZE)
#define VMX_MACHINE_T_GUEST_RSI_OFFSET (VMX_MACHINE_T_GUEST_RSI_INDEX*REG_SIZE)
#define VMX_MACHINE_T_GUEST_RDI_OFFSET (VMX_MACHINE_T_GUEST_RDI_INDEX*REG_SIZE)
#define VMX_MACHINE_T_GUEST_R8_OFFSET (VMX_MACHINE_T_GUEST_R8_INDEX*REG_SIZE)
#define VMX_MACHINE_T_GUEST_R9_OFFSET (VMX_MACHINE_T_GUEST_R9_INDEX*REG_SIZE)
#define VMX_MACHINE_T_GUEST_R10_OFFSET (VMX_MACHINE_T_GUEST_R10_INDEX*REG_SIZE)
#define VMX_MACHINE_T_GUEST_R11_OFFSET (VMX_MACHINE_T_GUEST_R11_INDEX*REG_SIZE)
#define VMX_MACHINE_T_GUEST_R12_OFFSET (VMX_MACHINE_T_GUEST_R12_INDEX*REG_SIZE)
#define VMX_MACHINE_T_GUEST_R13_OFFSET (VMX_MACHINE_T_GUEST_R13_INDEX*REG_SIZE)
#define VMX_MACHINE_T_GUEST_R14_OFFSET (VMX_MACHINE_T_GUEST_R14_INDEX*REG_SIZE)
#define VMX_MACHINE_T_GUEST_R15_OFFSET (VMX_MACHINE_T_GUEST_R15_INDEX*REG_SIZE)
/* Hard-coded offset of cr2 in struct run_context!! */
#define VMX_MACHINE_T_GUEST_CR2_OFFSET (128)
/* Hard-coded offset of cr2 in struct run_context!! */
#define VMX_MACHINE_T_GUEST_SPEC_CTRL_OFFSET (192)
/*sizes of various registers within the VCPU data structure */
#define VMX_CPU_S_FXSAVE_GUEST_AREA_SIZE GUEST_STATE_AREA_SIZE
#ifndef ASSEMBLER
enum vcpu_state {
VCPU_INIT,
VCPU_RUNNING,
VCPU_PAUSED,
VCPU_ZOMBIE,
VCPU_UNKNOWN_STATE,
};
struct cpu_regs {
uint64_t rax;
uint64_t rbx;
uint64_t rcx;
uint64_t rdx;
uint64_t rbp;
uint64_t rsi;
uint64_t r8;
uint64_t r9;
uint64_t r10;
uint64_t r11;
uint64_t r12;
uint64_t r13;
uint64_t r14;
uint64_t r15;
uint64_t rdi;
};
struct segment {
uint64_t selector;
uint64_t base;
uint64_t limit;
uint64_t attr;
};
struct run_context {
/* Contains the guest register set.
* NOTE: This must be the first element in the structure, so that the offsets
* in vmx_asm.S match
*/
union {
struct cpu_regs regs;
uint64_t longs[NUM_GPRS];
} guest_cpu_regs;
/** The guests CR registers 0, 2, 3 and 4. */
uint64_t cr0;
/* VMX_MACHINE_T_GUEST_CR2_OFFSET =
* offsetof(struct run_context, cr2) = 128
*/
uint64_t cr2;
uint64_t cr3;
uint64_t cr4;
uint64_t rip;
uint64_t rsp;
uint64_t rflags;
uint64_t dr7;
uint64_t tsc_offset;
/* MSRs */
/* VMX_MACHINE_T_GUEST_SPEC_CTRL_OFFSET =
* offsetof(struct run_context, ia32_spec_ctrl) = 192
*/
uint64_t ia32_spec_ctrl;
uint64_t ia32_star;
uint64_t ia32_lstar;
uint64_t ia32_fmask;
uint64_t ia32_kernel_gs_base;
uint64_t ia32_pat;
uint64_t ia32_efer;
uint64_t ia32_sysenter_cs;
uint64_t ia32_sysenter_esp;
uint64_t ia32_sysenter_eip;
uint64_t ia32_debugctl;
/* segment registers */
struct segment cs;
struct segment ss;
struct segment ds;
struct segment es;
struct segment fs;
struct segment gs;
struct segment tr;
struct segment idtr;
struct segment ldtr;
struct segment gdtr;
/* The 512 bytes area to save the FPU/MMX/SSE states for the guest */
uint64_t
fxstore_guest_area[VMX_CPU_S_FXSAVE_GUEST_AREA_SIZE / sizeof(uint64_t)]
__aligned(16);
};
/* 2 worlds: 0 for Normal World, 1 for Secure World */
#define NR_WORLD 2
#define NORMAL_WORLD 0
#define SECURE_WORLD 1
struct vcpu_arch {
int cur_context;
struct run_context contexts[NR_WORLD];
/* A pointer to the VMCS for this CPU. */
void *vmcs;
/* Holds the information needed for IRQ/exception handling. */
struct {
/* The number of the exception to raise. */
int exception;
/* The error number for the exception. */
int error;
} exception_info;
uint8_t lapic_mask;
uint32_t irq_window_enabled;
uint32_t nrexits;
/* Auxiliary TSC value */
uint64_t msr_tsc_aux;
/* VCPU context state information */
uint64_t exit_reason;
uint64_t exit_interrupt_info;
uint64_t exit_qualification;
uint8_t inst_len;
/* Information related to secondary / AP VCPU start-up */
uint8_t cpu_mode;
uint8_t nr_sipi;
uint32_t sipi_vector;
/* interrupt injection information */
uint64_t pending_intr;
/* per vcpu lapic */
void *vlapic;
};
struct vm;
struct vcpu {
int pcpu_id; /* Physical CPU ID of this VCPU */
int vcpu_id; /* virtual identifier for VCPU */
struct vcpu_arch arch_vcpu;
/* Architecture specific definitions for this VCPU */
struct vm *vm; /* Reference to the VM this VCPU belongs to */
void *entry_addr; /* Entry address for this VCPU when first started */
/* State of this VCPU before suspend */
volatile enum vcpu_state prev_state;
volatile enum vcpu_state state; /* State of this VCPU */
/* State of debug request for this VCPU */
volatile enum vcpu_state dbg_req_state;
unsigned long sync; /*hold the bit events*/
struct vlapic *vlapic; /* per vCPU virtualized LAPIC */
struct list_head run_list; /* inserted to schedule runqueue */
unsigned long pending_pre_work; /* any pre work pending? */
bool launched; /* Whether the vcpu is launched on target pcpu */
unsigned int paused_cnt; /* how many times vcpu is paused */
unsigned int running; /* vcpu is picked up and run? */
unsigned int ioreq_pending; /* ioreq is ongoing or not? */
struct vhm_request req; /* used by io/ept emulation */
struct mem_io mmio; /* used by io/ept emulation */
/* save guest msr tsc aux register.
* Before VMENTRY, save guest MSR_TSC_AUX to this fields.
* After VMEXIT, restore this fields to guest MSR_TSC_AUX.
* This is only temperary workaround. Once MSR emulation
* is enabled, we should remove this fields and related
* code.
*/
uint64_t msr_tsc_aux_guest;
uint64_t *guest_msrs;
};
#define is_vcpu_bsp(vcpu) ((vcpu)->vcpu_id == 0)
/* do not update Guest RIP for next VM Enter */
#define VCPU_RETAIN_RIP(vcpu) ((vcpu)->arch_vcpu.inst_len = 0)
/* External Interfaces */
int create_vcpu(int cpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle);
int start_vcpu(struct vcpu *vcpu);
int shutdown_vcpu(struct vcpu *vcpu);
int destroy_vcpu(struct vcpu *vcpu);
void reset_vcpu(struct vcpu *vcpu);
void init_vcpu(struct vcpu *vcpu);
void pause_vcpu(struct vcpu *vcpu, enum vcpu_state new_state);
void resume_vcpu(struct vcpu *vcpu);
void schedule_vcpu(struct vcpu *vcpu);
int prepare_vcpu(struct vm *vm, int pcpu_id);
void request_vcpu_pre_work(struct vcpu *vcpu, int pre_work_id);
#endif
#endif

View File

@@ -0,0 +1,57 @@
/*-
* Copyright (c) 2013 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* Copyright (c) 2013 Neel Natu <neel@freebsd.org>
* Copyright (c) 2017 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _VIOAPIC_H_
#define _VIOAPIC_H_
#define VIOAPIC_BASE 0xFEC00000UL
#define VIOAPIC_SIZE 4096UL
struct vioapic *vioapic_init(struct vm *vm);
void vioapic_cleanup(struct vioapic *vioapic);
int vioapic_assert_irq(struct vm *vm, int irq);
int vioapic_deassert_irq(struct vm *vm, int irq);
int vioapic_pulse_irq(struct vm *vm, int irq);
void vioapic_update_tmr(struct vcpu *vcpu);
int vioapic_mmio_write(void *vm, uint64_t gpa,
uint64_t wval, int size);
int vioapic_mmio_read(void *vm, uint64_t gpa,
uint64_t *rval, int size);
int vioapic_pincount(struct vm *vm);
void vioapic_process_eoi(struct vm *vm, int vector);
bool vioapic_get_rte(struct vm *vm, int pin, void *rte);
int vioapic_mmio_access_handler(struct vcpu *vcpu, struct mem_io *mmio,
void *handler_private_data);
int get_vioapic_info(char *str, int str_max, int vmid);
#endif

View File

@@ -0,0 +1,132 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* Copyright (c) 2017 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _VLAPIC_H_
#define _VLAPIC_H_
struct vlapic;
/* APIC write handlers */
void vlapic_set_cr8(struct vlapic *vlapic, uint64_t val);
uint64_t vlapic_get_cr8(struct vlapic *vlapic);
/*
* Returns 0 if there is no eligible vector that can be delivered to the
* guest at this time and non-zero otherwise.
*
* If an eligible vector number is found and 'vecptr' is not NULL then it will
* be stored in the location pointed to by 'vecptr'.
*
* Note that the vector does not automatically transition to the ISR as a
* result of calling this function.
*/
int vlapic_pending_intr(struct vlapic *vlapic, int *vecptr);
/*
* Transition 'vector' from IRR to ISR. This function is called with the
* vector returned by 'vlapic_pending_intr()' when the guest is able to
* accept this interrupt (i.e. RFLAGS.IF = 1 and no conditions exist that
* block interrupt delivery).
*/
void vlapic_intr_accepted(struct vlapic *vlapic, int vector);
struct vlapic *vm_lapic_from_vcpuid(struct vm *vm, int vcpu_id);
struct vlapic *vm_lapic_from_pcpuid(struct vm *vm, int pcpu_id);
bool vlapic_msr(uint32_t num);
int vlapic_rdmsr(struct vcpu *vcpu, uint32_t msr, uint64_t *rval, bool *retu);
int vlapic_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t wval, bool *retu);
int vlapic_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size);
int vlapic_mmio_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size);
/*
* Signals to the LAPIC that an interrupt at 'vector' needs to be generated
* to the 'cpu', the state is recorded in IRR.
*/
int vlapic_set_intr(struct vcpu *vcpu, int vector, bool trig);
#define LAPIC_TRIG_LEVEL true
#define LAPIC_TRIG_EDGE false
static inline int
vlapic_intr_level(struct vcpu *vcpu, int vector)
{
return vlapic_set_intr(vcpu, vector, LAPIC_TRIG_LEVEL);
}
static inline int
vlapic_intr_edge(struct vcpu *vcpu, int vector)
{
return vlapic_set_intr(vcpu, vector, LAPIC_TRIG_EDGE);
}
/*
* Triggers the LAPIC local interrupt (LVT) 'vector' on 'cpu'. 'cpu' can
* be set to -1 to trigger the interrupt on all CPUs.
*/
int vlapic_set_local_intr(struct vm *vm, int cpu, int vector);
int vlapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg);
void vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest,
bool phys, int delmode, int vec);
/* Reset the trigger-mode bits for all vectors to be edge-triggered */
void vlapic_reset_tmr(struct vlapic *vlapic);
/*
* Set the trigger-mode bit associated with 'vector' to level-triggered if
* the (dest,phys,delmode) tuple resolves to an interrupt being delivered to
* this 'vlapic'.
*/
void vlapic_set_tmr_one_vec(struct vlapic *vlapic, int delmode,
int vector, bool level);
void
vlapic_apicv_batch_set_tmr(struct vlapic *vlapic);
int vlapic_mmio_access_handler(struct vcpu *vcpu, struct mem_io *mmio,
void *handler_private_data);
uint32_t vlapic_get_id(struct vlapic *vlapic);
uint8_t vlapic_get_apicid(struct vlapic *vlapic);
int vlapic_create(struct vcpu *vcpu);
void vlapic_free(struct vcpu *vcpu);
void vlapic_init(struct vlapic *vlapic);
bool vlapic_enabled(struct vlapic *vlapic);
uint64_t apicv_get_apic_access_addr(struct vm *vm);
uint64_t apicv_get_apic_page_addr(struct vlapic *vlapic);
bool vlapic_apicv_enabled(struct vcpu *vcpu);
void apicv_inject_pir(struct vlapic *vlapic);
int apicv_access_exit_handler(struct vcpu *vcpu);
int apicv_write_exit_handler(struct vcpu *vcpu);
int apicv_virtualized_eoi_exit_handler(struct vcpu *vcpu);
void calcvdest(struct vm *vm, uint64_t *dmask, uint32_t dest, bool phys);
#endif /* _VLAPIC_H_ */

View File

@@ -0,0 +1,202 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef VM_H_
#define VM_H_
enum vm_privilege_level {
VM_PRIVILEGE_LEVEL_HIGH = 0,
VM_PRIVILEGE_LEVEL_MEDIUM,
VM_PRIVILEGE_LEVEL_LOW
};
#define MAX_VM_NAME_LEN 16
struct vm_attr {
char name[16]; /* Virtual machine name string */
int id; /* Virtual machine identifier */
int boot_idx; /* Index indicating the boot sequence for this VM */
};
struct vm_hw_info {
int num_vcpus; /* Number of total virtual cores */
uint32_t created_vcpus; /* Number of created vcpus */
struct vcpu **vcpu_array; /* vcpu array of this VM */
uint64_t gpa_lowtop; /* top lowmem gpa of this VM */
};
struct sw_linux {
void *ramdisk_src_addr;
void *ramdisk_load_addr;
uint32_t ramdisk_size;
void *bootargs_src_addr;
void *bootargs_load_addr;
uint32_t bootargs_size;
void *dtb_src_addr;
void *dtb_load_addr;
uint32_t dtb_size;
};
struct sw_kernel_info {
void *kernel_src_addr;
void *kernel_load_addr;
void *kernel_entry_addr;
uint32_t kernel_size;
};
struct vm_sw_info {
int kernel_type; /* Guest kernel type */
/* Kernel information (common for all guest types) */
struct sw_kernel_info kernel_info;
/* Additional information specific to Linux guests */
struct sw_linux linux_info;
/* GPA Address of guest OS's request buffer */
uint64_t req_buf;
};
/* VM guest types */
#define VM_LINUX_GUEST 0x02
#define VM_MONO_GUEST 0x01
enum vpic_wire_mode {
VPIC_WIRE_INTR = 0,
VPIC_WIRE_LAPIC,
VPIC_WIRE_IOAPIC,
VPIC_WIRE_NULL
};
/* Enumerated type for VM states */
enum vm_state {
VM_CREATED = 0, /* VM created / awaiting start (boot) */
VM_STARTED, /* VM started (booted) */
VM_PAUSED, /* VM paused */
VM_STATE_UNKNOWN
};
/* Structure for VM state information */
struct vm_state_info {
enum vm_state state; /* State of the VM */
unsigned int privilege; /* Privilege level of the VM */
unsigned int boot_count;/* Number of times the VM has booted */
};
struct vm_arch {
void *guest_pml4; /* Guest pml4 */
void *ept; /* EPT hierarchy */
void *m2p; /* machine address to guest physical address */
void *tmp_pg_array; /* Page array for tmp guest paging struct */
void *iobitmap[2];/* IO bitmap page array base address for this VM */
void *msr_bitmap; /* MSR bitmap page base address for this VM */
void *virt_ioapic; /* Virtual IOAPIC base address */
/**
* A link to the IO handler of this VM.
* We only register io handle to this link
* when create VM on sequences and ungister it when
* destory VM. So there no need lock to prevent preempt.
* Besides, there only a few io handlers now, we don't
* need binary search temporary.
*/
struct vm_io_handler *io_handler;
/* reference to virtual platform to come here (as needed) */
};
struct vpic;
struct vm {
struct vm_attr attr; /* Reference to this VM's attributes */
struct vm_hw_info hw; /* Reference to this VM's HW information */
struct vm_sw_info sw; /* Reference to SW associated with this VM */
struct vm_arch arch_vm; /* Reference to this VM's arch information */
struct vm_state_info state_info;/* State info of this VM */
enum vm_state state; /* VM state */
struct vcpu *current_vcpu; /* VCPU that caused vm exit */
void *vuart; /* Virtual UART */
struct vpic *vpic; /* Virtual PIC */
uint32_t vpic_wire_mode;
struct iommu_domain *iommu_domain; /* iommu domain of this VM */
struct list_head list; /* list of VM */
spinlock_t spinlock; /* Spin-lock used to protect VM modifications */
struct list_head mmio_list; /* list for mmio. This list is not updated
* when vm is active. So no lock needed
*/
struct _vm_shared_memory *shared_memory_area;
struct {
struct _vm_virtual_device_node *head;
struct _vm_virtual_device_node *tail;
} virtual_device_list;
/* passthrough device link */
struct list_head ptdev_list;
spinlock_t ptdev_lock;
unsigned char GUID[16];
unsigned int secure_world_enabled;
};
struct vm_description {
/* Virtual machine identifier, assigned by the system */
char *vm_attr_name;
/* The logical CPU IDs associated with this VM - The first CPU listed
* will be the VM's BSP
*/
int *vm_hw_logical_core_ids;
unsigned char GUID[16]; /* GUID of the vm will be created */
int vm_hw_num_cores; /* Number of virtual cores */
/* Indicates to APs that the BSP has created a VM for this
* description
*/
bool vm_created;
/* Index indicating VM's privilege level */
unsigned int vm_state_info_privilege;
unsigned int secure_world_enabled; /* secure_world enabled? */
};
struct vm_description_array {
int num_vm_desc;
struct vm_description vm_desc_array[];
};
int shutdown_vm(struct vm *vm);
int pause_vm(struct vm *vm);
int start_vm(struct vm *vm);
int create_vm(struct vm_description *vm_desc, struct vm **vm);
int prepare_vm0(void);
struct vm *get_vm_from_vmid(int vm_id);
struct vm_description *get_vm_desc(int idx);
extern struct list_head vm_list;
extern spinlock_t vm_list_lock;
extern bool x2apic_enabled;
#endif /* VM_H_ */

View File

@@ -0,0 +1,110 @@
/*-
* Copyright (c) 2014 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* Copyright (c) 2017 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _VPIC_H_
#define _VPIC_H_
#define ICU_IMR_OFFSET 1
/* Initialization control word 1. Written to even address. */
#define ICW1_IC4 0x01 /* ICW4 present */
#define ICW1_SNGL 0x02 /* 1 = single, 0 = cascaded */
#define ICW1_ADI 0x04 /* 1 = 4, 0 = 8 byte vectors */
#define ICW1_LTIM 0x08 /* 1 = level trigger, 0 = edge */
#define ICW1_RESET 0x10 /* must be 1 */
/* 0x20 - 0x80 - in 8080/8085 mode only */
/* Initialization control word 2. Written to the odd address. */
/* No definitions, it is the base vector of the IDT for 8086 mode */
/* Initialization control word 3. Written to the odd address. */
/* For a master PIC, bitfield indicating a slave 8259 on given input */
/* For slave, lower 3 bits are the slave's ID binary id on master */
/* Initialization control word 4. Written to the odd address. */
#define ICW4_8086 0x01 /* 1 = 8086, 0 = 8080 */
#define ICW4_AEOI 0x02 /* 1 = Auto EOI */
#define ICW4_MS 0x04 /* 1 = buffered master, 0 = slave */
#define ICW4_BUF 0x08 /* 1 = enable buffer mode */
#define ICW4_SFNM 0x10 /* 1 = special fully nested mode */
/* Operation control words. Written after initialization. */
/* Operation control word type 1 */
/*
* No definitions. Written to the odd address. Bitmask for interrupts.
* 1 = disabled.
*/
/* Operation control word type 2. Bit 3 (0x08) must be zero. Even address. */
#define OCW2_L0 0x01 /* Level */
#define OCW2_L1 0x02
#define OCW2_L2 0x04
/* 0x08 must be 0 to select OCW2 vs OCW3 */
/* 0x10 must be 0 to select OCW2 vs ICW1 */
#define OCW2_EOI 0x20 /* 1 = EOI */
#define OCW2_SL 0x40 /* EOI mode */
#define OCW2_R 0x80 /* EOI mode */
/* Operation control word type 3. Bit 3 (0x08) must be set. Even address. */
#define OCW3_RIS 0x01 /* 1 = read IS, 0 = read IR */
#define OCW3_RR 0x02 /* register read */
#define OCW3_P 0x04 /* poll mode command */
/* 0x08 must be 1 to select OCW3 vs OCW2 */
#define OCW3_SEL 0x08 /* must be 1 */
/* 0x10 must be 0 to select OCW3 vs ICW1 */
#define OCW3_SMM 0x20 /* special mode mask */
#define OCW3_ESMM 0x40 /* enable SMM */
#define IO_ELCR1 0x4d0
#define IO_ELCR2 0x4d1
enum vpic_trigger {
EDGE_TRIGGER,
LEVEL_TRIGGER
};
void *vpic_init(struct vm *vm);
void vpic_cleanup(struct vm *vm);
int vpic_assert_irq(struct vm *vm, int irq);
int vpic_deassert_irq(struct vm *vm, int irq);
int vpic_pulse_irq(struct vm *vm, int irq);
void vpic_pending_intr(struct vm *vm, int *vecptr);
void vpic_intr_accepted(struct vm *vm, int vector);
int vpic_set_irq_trigger(struct vm *vm, int irq, enum vpic_trigger trigger);
int vpic_get_irq_trigger(struct vm *vm, int irq, enum vpic_trigger *trigger);
struct vm_io_handler *vpic_create_io_handler(int flags, uint32_t port,
uint32_t len);
bool vpic_is_pin_mask(struct vpic *vpic, uint8_t virt_pin);
#endif /* _VPIC_H_ */

View File

@@ -0,0 +1,60 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef HV_ARCH_H
#define HV_ARCH_H
#include <cpu.h>
#include <gdt.h>
#include <idt.h>
#include <apicreg.h>
#include <ioapic.h>
#include <lapic.h>
#include <msr.h>
#include <io.h>
#include <vcpu.h>
#include <vm.h>
#include <cpuid.h>
#include <mmu.h>
#include <intr_ctx.h>
#include <irq.h>
#include <timer.h>
#include <softirq.h>
#include <vmx.h>
#include <assign.h>
#include <vtd.h>
#include <vpic.h>
#include <vlapic.h>
#include <vioapic.h>
#include <guest.h>
#include <vmexit.h>
#endif /* HV_ARCH_H */

View File

@@ -0,0 +1,111 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef IDT_H
#define IDT_H
/*
* IDT is defined in assembly so we handle exceptions as early as possible.
*/
/* Interrupt Descriptor Table (LDT) selectors are 16 bytes on x86-64 instead of
* 8 bytes.
*/
#define X64_IDT_DESC_SIZE (0x10)
/* Number of the HOST IDT entries */
#define HOST_IDT_ENTRIES (0x100)
/* Size of the IDT */
#define HOST_IDT_SIZE (HOST_IDT_ENTRIES * X64_IDT_DESC_SIZE)
#ifndef ASSEMBLER
/*
* Definition of an 16 byte IDT selector.
*/
union idt_64_descriptor {
uint64_t value;
struct {
union {
uint32_t value;
struct {
uint32_t offset_15_0:16;
uint32_t segment_sel:16;
} bits;
} low32;
union {
uint32_t value;
struct {
uint32_t ist:3;
uint32_t bit_3_clr:1;
uint32_t bit_4_clr:1;
uint32_t bits_5_7_clr:3;
uint32_t type:4;
uint32_t bit_12_clr:1;
uint32_t dpl:2;
uint32_t present:1;
uint32_t offset_31_16:16;
} bits;
} high32;
uint32_t offset_63_32;
uint32_t rsvd;
};
} __aligned(8);
/*****************************************************************************
*
* Definition of the IDT.
*
*****************************************************************************/
struct host_idt {
union idt_64_descriptor host_idt_descriptors[HOST_IDT_ENTRIES];
} __aligned(8);
/*
* Definition of the IDT descriptor.
*/
struct host_idt_descriptor {
unsigned short len;
struct host_idt *idt;
} __packed;
extern struct host_idt HOST_IDT;
extern struct host_idt_descriptor HOST_IDTR;
static inline void set_idt(struct host_idt_descriptor *idtd)
{
asm volatile (" lidtq %[idtd]\n" : /* no output parameters */
: /* input parameters */
[idtd] "m"(*idtd));
}
#endif /* end #ifndef ASSEMBLER */
#endif /* IDT_H */

View File

@@ -0,0 +1,64 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef INTR_CTX_H
#define INTR_CTX_H
/*
* Definition of the stack frame layout
*/
struct intr_ctx {
uint64_t r12;
uint64_t r13;
uint64_t r14;
uint64_t r15;
uint64_t rbx;
uint64_t rbp;
uint64_t rax;
uint64_t rcx;
uint64_t rdx;
uint64_t rsi;
uint64_t rdi;
uint64_t r8;
uint64_t r9;
uint64_t r10;
uint64_t r11;
uint64_t vector;
uint64_t error_code;
uint64_t rip;
uint64_t cs;
uint64_t rflags;
uint64_t rsp;
uint64_t ss;
};
#endif /* INTR_CTX_H */

View File

@@ -0,0 +1,622 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef IO_H
#define IO_H
/* Definition of a IO port range */
struct vm_io_range {
uint16_t base; /* IO port base */
uint16_t len; /* IO port range */
int flags; /* IO port attributes */
};
/* Write 1 byte to specified I/O port */
static inline void io_write_byte(uint8_t value, uint16_t port)
{
asm volatile ("outb %0,%1"::"a" (value), "dN"(port));
}
/* Read 1 byte from specified I/O port */
static inline uint8_t io_read_byte(uint16_t port)
{
uint8_t value;
asm volatile ("inb %1,%0":"=a" (value):"dN"(port));
return value;
}
/* Write 2 bytes to specified I/O port */
static inline void io_write_word(uint16_t value, uint16_t port)
{
asm volatile ("outw %0,%1"::"a" (value), "dN"(port));
}
/* Read 2 bytes from specified I/O port */
static inline uint16_t io_read_word(uint16_t port)
{
uint16_t value;
asm volatile ("inw %1,%0":"=a" (value):"dN"(port));
return value;
}
/* Write 4 bytes to specified I/O port */
static inline void io_write_long(uint32_t value, uint16_t port)
{
asm volatile ("outl %0,%1"::"a" (value), "dN"(port));
}
/* Read 4 bytes from specified I/O port */
static inline uint32_t io_read_long(uint16_t port)
{
uint32_t value;
asm volatile ("inl %1,%0":"=a" (value):"dN"(port));
return value;
}
static inline void io_write(uint32_t v, ioport_t addr, size_t sz)
{
if (sz == 1)
io_write_byte(v, addr);
else if (sz == 2)
io_write_word(v, addr);
else
io_write_long(v, addr);
}
static inline uint32_t io_read(ioport_t addr, size_t sz)
{
if (sz == 1)
return io_read_byte(addr);
if (sz == 2)
return io_read_word(addr);
return io_read_long(addr);
}
struct vm_io_handler;
struct vm;
struct vcpu;
typedef
uint32_t (*io_read_fn_t)(struct vm_io_handler *, struct vm *,
ioport_t, size_t);
typedef
void (*io_write_fn_t)(struct vm_io_handler *, struct vm *,
ioport_t, size_t, uint32_t);
/* Describes a single IO handler description entry. */
struct vm_io_handler_desc {
/** The base address of the IO range for this description. */
ioport_t addr;
/** The number of bytes covered by this description. */
size_t len;
/** A pointer to the "read" function.
*
* The read function is called from the hypervisor whenever
* a read access to a range described in "ranges" occur.
* The arguments to the callback are:
*
* - The address of the port to read from.
* - The width of the read operation (1,2 or 4).
*
* The implementation must return the ports content as
* byte, word or doubleword (depending on the width).
*
* If the pointer is null, a read of 1's is assumed.
*/
io_read_fn_t io_read;
/** A pointer to the "write" function.
*
* The write function is called from the hypervisor code
* whenever a write access to a range described in "ranges"
* occur. The arguments to the callback are:
*
* - The address of the port to write to.
* - The width of the write operation (1,2 or 4).
* - The value to write as byte, word or doubleword
* (depending on the width)
*
* The implementation must write the value to the port.
*
* If the pointer is null, the write access is ignored.
*/
io_write_fn_t io_write;
};
struct vm_io_handler {
struct vm_io_handler *next;
struct vm_io_handler_desc desc;
};
#define IO_ATTR_R 0
#define IO_ATTR_RW 1
#define IO_ATTR_NO_ACCESS 2
/* External Interfaces */
int io_instr_handler(struct vcpu *vcpu);
void setup_io_bitmap(struct vm *vm);
void free_io_emulation_resource(struct vm *vm);
void register_io_emulation_handler(struct vm *vm, struct vm_io_range *range,
io_read_fn_t io_read_fn_ptr,
io_write_fn_t io_write_fn_ptr);
int dm_emulate_pio_post(struct vcpu *vcpu);
/** Writes a 32 bit value to a memory mapped IO device.
*
* @param value The 32 bit value to write.
* @param addr The memory address to write to.
*/
static inline void mmio_write_long(uint32_t value, mmio_addr_t addr)
{
*((uint32_t *)addr) = value;
}
/** Writes a 16 bit value to a memory mapped IO device.
*
* @param value The 16 bit value to write.
* @param addr The memory address to write to.
*/
static inline void mmio_write_word(uint32_t value, mmio_addr_t addr)
{
*((uint16_t *)addr) = value;
}
/** Writes an 8 bit value to a memory mapped IO device.
*
* @param value The 8 bit value to write.
* @param addr The memory address to write to.
*/
static inline void mmio_write_byte(uint32_t value, mmio_addr_t addr)
{
*((uint8_t *)addr) = value;
}
/** Reads a 32 bit value from a memory mapped IO device.
*
* @param addr The memory address to read from.
*
* @return The 32 bit value read from the given address.
*/
static inline uint32_t mmio_read_long(mmio_addr_t addr)
{
return *((uint32_t *)addr);
}
/** Reads a 16 bit value from a memory mapped IO device.
*
* @param addr The memory address to read from.
*
* @return The 16 bit value read from the given address.
*/
static inline uint16_t mmio_read_word(mmio_addr_t addr)
{
return *((uint16_t *)addr);
}
/** Reads an 8 bit value from a memory mapped IO device.
*
* @param addr The memory address to read from.
*
* @return The 8 bit value read from the given address.
*/
static inline uint8_t mmio_read_byte(mmio_addr_t addr)
{
return *((uint8_t *)addr);
}
/** Sets bits in a 32 bit value from a memory mapped IO device.
*
* @param mask Contains the bits to set at the memory address.
* Bits set in this mask are set in the memory
* location.
* @param addr The memory address to read from/write to.
*/
static inline void mmio_or_long(uint32_t mask, mmio_addr_t addr)
{
*((uint32_t *)addr) |= mask;
}
/** Sets bits in a 16 bit value from a memory mapped IO device.
*
* @param mask Contains the bits to set at the memory address.
* Bits set in this mask are set in the memory
* location.
* @param addr The memory address to read from/write to.
*/
static inline void mmio_or_word(uint32_t mask, mmio_addr_t addr)
{
*((uint16_t *)addr) |= mask;
}
/** Sets bits in an 8 bit value from a memory mapped IO device.
*
* @param mask Contains the bits to set at the memory address.
* Bits set in this mask are set in the memory
* location.
* @param addr The memory address to read from/write to.
*/
static inline void mmio_or_byte(uint32_t mask, mmio_addr_t addr)
{
*((uint8_t *)addr) |= mask;
}
/** Clears bits in a 32 bit value from a memory mapped IO device.
*
* @param mask Contains the bits to clear at the memory address.
* Bits set in this mask are cleared in the memory
* location.
* @param addr The memory address to read from/write to.
*/
static inline void mmio_and_long(uint32_t mask, mmio_addr_t addr)
{
*((uint32_t *)addr) &= ~mask;
}
/** Clears bits in a 16 bit value from a memory mapped IO device.
*
* @param mask Contains the bits to clear at the memory address.
* Bits set in this mask are cleared in the memory
* location.
* @param addr The memory address to read from/write to.
*/
static inline void mmio_and_word(uint32_t mask, mmio_addr_t addr)
{
*((uint16_t *)addr) &= ~mask;
}
/** Clears bits in an 8 bit value from a memory mapped IO device.
*
* @param mask Contains the bits to clear at the memory address.
* Bits set in this mask are cleared in the memory
* location.
* @param addr The memory address to read from/write to.
*/
static inline void mmio_and_byte(uint32_t mask, mmio_addr_t addr)
{
*((uint8_t *)addr) &= ~mask;
}
/** Performs a read-modify-write cycle for a 32 bit value from a MMIO device.
*
* Reads a 32 bit value from a memory mapped IO device, sets and clears
* bits and writes the value back. If a bit is specified in both, the 'set'
* and in the 'clear' mask, it is undefined whether the resulting bit is set
* or cleared.
*
* @param set Contains the bits to set. Bits set in this mask
* are set at the memory address.
* @param clear Contains the bits to clear. Bits set in this
* mask are cleared at the memory address.
* @param addr The memory address to read from/write to.
*/
static inline void mmio_rmw_long(uint32_t set, uint32_t clear, mmio_addr_t addr)
{
*((uint32_t *)addr) =
(*((uint32_t *)addr) & ~clear) | set;
}
/** Performs a read-modify-write cycle for a 16 bit value from a MMIO device.
*
* Reads a 16 bit value from a memory mapped IO device, sets and clears
* bits and writes the value back. If a bit is specified in both, the 'set'
* and in the 'clear' mask, it is undefined whether the resulting bit is set
* or cleared.
*
* @param set Contains the bits to set. Bits set in this mask
* are set at the memory address.
* @param clear Contains the bits to clear. Bits set in this
* mask are cleared at the memory address.
* @param addr The memory address to read from/write to.
*/
static inline void mmio_rmw_word(uint32_t set, uint32_t clear, mmio_addr_t addr)
{
*((uint16_t *)addr) =
(*((uint16_t *)addr) & ~clear) | set;
}
/** Performs a read-modify-write cycle for an 8 bit value from a MMIO device.
*
* Reads an 8 bit value from a memory mapped IO device, sets and clears
* bits and writes the value back. If a bit is specified in both, the 'set'
* and in the 'clear' mask, it is undefined whether the resulting bit is set
* or cleared.
*
* @param set Contains the bits to set. Bits set in this mask
* are set at the memory address.
* @param clear Contains the bits to clear. Bits set in this
* mask are cleared at the memory address.
* @param addr The memory address to read from/write to.
*/
static inline void mmio_rmw_byte(uint32_t set, uint32_t clear, mmio_addr_t addr)
{
*((uint8_t *)addr) = (*((uint8_t *)addr) & ~clear) | set;
}
/** Writes a 32 bit value to a memory mapped IO device (ROM code version).
*
* @param value The 32 bit value to write.
* @param addr The memory address to write to.
*/
static inline void __mmio_write_long(uint32_t value, mmio_addr_t addr)
{
*((uint32_t *)addr) = value;
}
/** Writes a 16 bit value to a memory mapped IO device (ROM code version).
*
* @param value The 16 bit value to write.
* @param addr The memory address to write to.
*/
static inline void __mmio_write_word(uint32_t value, mmio_addr_t addr)
{
*((uint16_t *)addr) = value;
}
/** Writes an 8 bit value to a memory mapped IO device (ROM code version).
*
* @param value The 8 bit value to write.
* @param addr The memory address to write to.
*/
static inline void __mmio_write_byte(uint32_t value, mmio_addr_t addr)
{
*((uint8_t *)addr) = value;
}
/** Reads a 32 bit value from a memory mapped IO device (ROM code version).
*
* @param addr The memory address to read from.
*
* @return The 32 bit value read from the given address.
*/
static inline uint32_t __mmio_read_long(mmio_addr_t addr)
{
return *((uint32_t *)addr);
}
/** Reads a 16 bit value from a memory mapped IO device (ROM code version).
*
* @param addr The memory address to read from.
*
* @return The 16 bit value read from the given address.
*/
static inline uint16_t __mmio_read_word(mmio_addr_t addr)
{
return *((uint16_t *)addr);
}
/** Reads an 8 bit value from a memory mapped IO device (ROM code version).
*
* @param addr The memory address to read from.
*
* @return The 32 16 value read from the given address.
*/
static inline uint8_t __mmio_read_byte(mmio_addr_t addr)
{
return *((uint8_t *)addr);
}
/** Sets bits in a 32 bit value from a MMIO device (ROM code version).
*
* @param mask Contains the bits to set at the memory address.
* Bits set in this mask are set in the memory
* location.
* @param addr The memory address to read from/write to.
*/
static inline void __mmio_or_long(uint32_t mask, mmio_addr_t addr)
{
*((uint32_t *)addr) |= mask;
}
/** Sets bits in a 16 bit value from a MMIO device (ROM code version).
*
* @param mask Contains the bits to set at the memory address.
* Bits set in this mask are set in the memory
* location.
* @param addr The memory address to read from/write to.
*/
static inline void __mmio_or_word(uint32_t mask, mmio_addr_t addr)
{
*((uint16_t *)addr) |= mask;
}
/** Sets bits in an 8 bit value from a MMIO device (ROM code version).
*
* @param mask Contains the bits to set at the memory address.
* Bits set in this mask are set in the memory
* location.
* @param addr The memory address to read from/write to.
*/
static inline void __mmio_or_byte(uint32_t mask, mmio_addr_t addr)
{
*((uint8_t *)addr) |= mask;
}
/** Clears bits in a 32 bit value from a MMIO device (ROM code version).
*
* @param mask Contains the bits to clear at the memory address.
* Bits set in this mask are cleared in the memory
* location.
* @param addr The memory address to read from/write to.
*/
static inline void __mmio_and_long(uint32_t mask, mmio_addr_t addr)
{
*((uint32_t *)addr) &= ~mask;
}
/** Clears bits in a 16 bit value from a MMIO device (ROM code version).
*
* @param mask Contains the bits to clear at the memory address.
* Bits set in this mask are cleared in the memory
* location.
* @param addr The memory address to read from/write to.
*/
static inline void __mmio_and_word(uint32_t mask, mmio_addr_t addr)
{
*((uint16_t *)addr) &= ~mask;
}
/** Clears bits in an 8 bit value from a MMIO device (ROM code version).
*
* @param mask Contains the bits to clear at the memory address.
* Bits set in this mask are cleared in the memory
* location.
* @param addr The memory address to read from/write to.
*/
static inline void __mmio_and_byte(uint32_t mask, mmio_addr_t addr)
{
*((uint8_t *)addr) &= ~mask;
}
/** Performs a read-modify-write cycle for a 32 bit value from a MMIO device
* (ROM code version).
*
* Reads a 32 bit value from a memory mapped IO device, sets and clears
* bits and writes the value back. If a bit is specified in both, the 'set'
* and in the 'clear' mask, it is undefined whether the resulting bit is set
* or cleared.
*
* @param set Contains the bits to set. Bits set in this mask
* are set at the memory address.
* @param clear Contains the bits to clear. Bits set in this
* mask are cleared at the memory address.
* @param addr The memory address to read from/write to.
*/
static inline void
__mmio_rmw_long(uint32_t set, uint32_t clear, mmio_addr_t addr)
{
*((uint32_t *)addr) =
(*((uint32_t *)addr) & ~clear) | set;
}
/** Performs a read-modify-write cycle for a 16 bit value from a MMIO device
* (ROM code version).
*
* Reads a 16 bit value from a memory mapped IO device, sets and clears
* bits and writes the value back. If a bit is specified in both, the 'set'
* and in the 'clear' mask, it is undefined whether the resulting bit is set
* or cleared.
*
* @param set Contains the bits to set. Bits set in this mask
* are set at the memory address.
* @param clear Contains the bits to clear. Bits set in this
* mask are cleared at the memory address.
* @param addr The memory address to read from/write to.
*/
static inline void
__mmio_rmw_word(uint32_t set, uint32_t clear, mmio_addr_t addr)
{
*((uint16_t *)addr) =
(*((uint16_t *)addr) & ~clear) | set;
}
/** Performs a read-modify-write cycle for an 8 bit value from a MMIO device
* (ROM code version).
*
* Reads an 8 bit value from a memory mapped IO device, sets and clears
* bits and writes the value back. If a bit is specified in both, the 'set'
* and in the 'clear' mask, it is undefined whether the resulting bit is set
* or cleared.
*
* @param set Contains the bits to set. Bits set in this mask
* are set at the memory address.
* @param clear Contains the bits to clear. Bits set in this
* mask are cleared at the memory address.
* @param addr The memory address to read from/write to.
*/
static inline void
__mmio_rmw_byte(uint32_t set, uint32_t clear, mmio_addr_t addr)
{
*((uint8_t *)addr) = (*((uint8_t *)addr) & ~clear) | set;
}
/** Reads a 32 Bit memory mapped IO register, mask it and write it back into
* memory mapped IO register.
*
* @param addr The address of the memory mapped IO register.
* @param mask The mask to apply to the value read.
* @param value The 32 bit value to write.
*/
static inline void setl(mmio_addr_t addr, uint32_t mask, uint32_t value)
{
mmio_write_long((mmio_read_long(addr) & ~mask) | value, addr);
}
/** Reads a 16 Bit memory mapped IO register, mask it and write it back into
* memory mapped IO register.
*
* @param addr The address of the memory mapped IO register.
* @param mask The mask to apply to the value read.
* @param value The 16 bit value to write.
*/
static inline void setw(mmio_addr_t addr, uint32_t mask, uint32_t value)
{
mmio_write_word((mmio_read_word(addr) & ~mask) | value, addr);
}
/** Reads a 8 Bit memory mapped IO register, mask it and write it back into
* memory mapped IO register.
*
* @param addr The address of the memory mapped IO register.
* @param mask The mask to apply to the value read.
* @param value The 8 bit value to write.
*/
static inline void setb(mmio_addr_t addr, uint32_t mask, uint32_t value)
{
mmio_write_byte((mmio_read_byte(addr) & ~mask) | value, addr);
}
/* MMIO memory access types */
enum mem_io_type {
HV_MEM_IO_READ = 0,
HV_MEM_IO_WRITE,
};
/* MMIO emulation related structures */
#define MMIO_TRANS_VALID 1
#define MMIO_TRANS_INVALID 0
struct mem_io {
uint64_t paddr; /* Physical address being accessed */
enum mem_io_type read_write; /* 0 = read / 1 = write operation */
uint8_t access_size; /* Access size being emulated */
uint8_t sign_extend_read; /* 1 if sign extension required for read */
uint64_t value; /* Value read or value to write */
uint8_t mmio_status; /* Indicates if this MMIO transaction is valid */
/* Used to store emulation context for this mmio transaction */
void *private_data;
};
#endif /* _IO_H defined */

View File

@@ -0,0 +1,57 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef IOAPIC_H
#define IOAPIC_H
/* IOAPIC_MAX_LINES is architecturally defined.
* The usable RTEs may be a subset of the total on a per IO APIC basis.
*/
#define IOAPIC_MAX_LINES 120
#define NR_LEGACY_IRQ 16
#define NR_MAX_GSI (NR_IOAPICS*IOAPIC_MAX_LINES)
#define GSI_MASK_IRQ(irq) irq_gsi_mask_unmask((irq), true)
#define GSI_UNMASK_IRQ(irq) irq_gsi_mask_unmask((irq), false)
#define GSI_SET_RTE(irq, rte) ioapic_set_rte((irq), (rte))
void setup_ioapic_irq(void);
int get_ioapic_info(char *str, int str_max_len);
bool irq_is_gsi(int irq);
int irq_gsi_num(void);
int irq_to_pin(int irq);
int pin_to_irq(int pin);
void irq_gsi_mask_unmask(int irq, bool mask);
void ioapic_set_rte(int irq, uint64_t rte);
void ioapic_get_rte(int irq, uint64_t *rte);
extern uint16_t legacy_irq_to_pin[];
#endif /* IOAPIC_H */

View File

@@ -0,0 +1,164 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef IRQ_H
#define IRQ_H
/* vectors for normal, usually for devices */
#define VECTOR_FOR_NOR_LOWPRI_START 0x20
#define VECTOR_FOR_NOR_LOWPRI_END 0x7F
#define VECTOR_FOR_NOR_HIGHPRI_START 0x80
#define VECTOR_FOR_NOR_HIGHPRI_END 0xDF
#define VECTOR_FOR_NOR_END VECTOR_FOR_NOR_HIGHPRI_END
#define VECTOR_FOR_INTR_START VECTOR_FOR_NOR_LOWPRI_START
/* vectors for priority, usually for HV service */
#define VECTOR_FOR_PRI_START 0xE0
#define VECTOR_FOR_PRI_END 0xFF
#define VECTOR_TIMER 0xEF
#define VECTOR_NOTIFY_VCPU 0xF0
#define VECTOR_VIRT_IRQ_VHM 0xF7
#define VECTOR_SPURIOUS 0xFF
#define NR_MAX_VECTOR 0xFF
#define VECTOR_INVALID (NR_MAX_VECTOR + 1)
#define IRQ_INVALID (NR_MAX_IRQS+1)
#define NR_MAX_IRQS (256+16)
#define DEFAULT_DEST_MODE IOAPIC_RTE_DESTLOG
#define DEFAULT_DELIVERY_MODE IOAPIC_RTE_DELLOPRI
#define ALL_CPUS_MASK ((1 << phy_cpu_num) - 1)
struct irq_desc;
enum irq_mode {
IRQ_PULSE,
IRQ_ASSERT,
IRQ_DEASSERT,
};
enum irq_state {
IRQ_NOT_ASSIGNED = 0,
IRQ_ASSIGNED_SHARED,
IRQ_ASSIGNED_NOSHARE,
};
enum irq_desc_state {
IRQ_DESC_PENDING,
IRQ_DESC_IN_PROCESS,
};
typedef int (*dev_handler_t)(int irq, void*);
struct dev_handler_node {
char name[32];
void *dev_data;
dev_handler_t dev_handler;
struct dev_handler_node *next;
struct irq_desc *desc;
};
struct irq_routing_entry {
unsigned short bdf; /* BDF */
int irq; /* PCI cfg offset 0x3C: IRQ pin */
int intx; /* PCI cfg offset 0x3D: 0-3 = INTA,INTB,INTC,INTD*/
};
int irq_mark_used(int irq);
int irq_alloc(void);
int irq_desc_alloc_vector(int irq, bool lowpri);
void irq_desc_try_free_vector(int irq);
int irq_to_vector(int irq);
int dev_to_irq(struct dev_handler_node *node);
int dev_to_vector(struct dev_handler_node *node);
int handle_level_interrupt_common(struct irq_desc *desc, void *handler_data);
int common_handler_edge(struct irq_desc *desc, void *handler_data);
int common_dev_handler_level(struct irq_desc *desc, void *handler_data);
int quick_handler_nolock(struct irq_desc *desc, void *handler_data);
typedef int (*irq_handler_t)(struct irq_desc*, void*);
void update_irq_handler(int irq, irq_handler_t func);
int init_default_irqs(unsigned int cpu);
int dispatch_interrupt(struct intr_ctx *ctx);
struct dev_handler_node*
pri_register_handler(int irq,
int vector,
dev_handler_t func,
void *dev_data,
const char *name);
struct dev_handler_node*
normal_register_handler(int irq,
dev_handler_t func,
void *dev_data,
bool share,
bool lowpri,
const char *name);
void unregister_handler_common(struct dev_handler_node *node);
int get_cpu_interrupt_info(char *str, int str_max);
void setup_notification(void);
typedef int (*spurious_handler_t)(int);
extern spurious_handler_t spurious_handler;
/*
* Some MSI message definitions
*/
#define MSI_ADDR_MASK 0xfff00000
#define MSI_ADDR_BASE 0xfee00000
#define MSI_ADDR_RH 0x00000008 /* Redirection Hint */
#define MSI_ADDR_LOG 0x00000004 /* Destination Mode */
/* RFLAGS */
#define HV_ARCH_VCPU_RFLAGS_IF (1<<9)
/* Interruptability State info */
#define HV_ARCH_VCPU_BLOCKED_BY_MOVSS (1<<1)
#define HV_ARCH_VCPU_BLOCKED_BY_STI (1<<0)
int vcpu_inject_extint(struct vcpu *vcpu);
int vcpu_inject_nmi(struct vcpu *vcpu);
int vcpu_inject_gp(struct vcpu *vcpu);
int vcpu_make_request(struct vcpu *vcpu, int eventid);
int exception_handler(struct vcpu *vcpu);
int interrupt_win_exiting_handler(struct vcpu *vcpu);
int external_interrupt_handler(struct vcpu *vcpu);
int acrn_do_intr_process(struct vcpu *vcpu);
int interrupt_init(uint32_t logical_id);
#endif /* IRQ_H */

View File

@@ -0,0 +1,174 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef INTR_LAPIC_H
#define INTR_LAPIC_H
#define DEBUG_LAPIC 0
enum intr_lapic_icr_delivery_mode {
INTR_LAPIC_ICR_FIXED = 0x0,
INTR_LAPIC_ICR_LP = 0x1,
INTR_LAPIC_ICR_SMI = 0x2,
INTR_LAPIC_ICR_NMI = 0x4,
INTR_LAPIC_ICR_INIT = 0x5,
INTR_LAPIC_ICR_STARTUP = 0x6,
};
enum intr_lapic_icr_dest_mode {
INTR_LAPIC_ICR_PHYSICAL = 0x0,
INTR_LAPIC_ICR_LOGICAL = 0x1
};
enum intr_lapic_icr_level {
INTR_LAPIC_ICR_DEASSERT = 0x0,
INTR_LAPIC_ICR_ASSERT = 0x1,
};
enum intr_lapic_icr_trigger {
INTR_LAPIC_ICR_EDGE = 0x0,
INTR_LAPIC_ICR_LEVEL = 0x1,
};
enum intr_lapic_icr_shorthand {
INTR_LAPIC_ICR_USE_DEST_ARRAY = 0x0,
INTR_LAPIC_ICR_SELF = 0x1,
INTR_LAPIC_ICR_ALL_INC_SELF = 0x2,
INTR_LAPIC_ICR_ALL_EX_SELF = 0x3,
};
/* Default LAPIC base */
#define LAPIC_BASE 0xFEE00000
/* LAPIC register offset for memory mapped IO access */
#define LAPIC_ID_REGISTER 0x00000020
#define LAPIC_VERSION_REGISTER 0x00000030
#define LAPIC_TASK_PRIORITY_REGISTER 0x00000080
#define LAPIC_ARBITRATION_PRIORITY_REGISTER 0x00000090
#define LAPIC_PROCESSOR_PRIORITY_REGISTER 0x000000A0
#define LAPIC_EOI_REGISTER 0x000000B0
#define LAPIC_REMOTE_READ_REGISTER 0x000000C0
#define LAPIC_LOGICAL_DESTINATION_REGISTER 0x000000D0
#define LAPIC_DESTINATION_FORMAT_REGISTER 0x000000E0
#define LAPIC_SPURIOUS_VECTOR_REGISTER 0x000000F0
#define LAPIC_IN_SERVICE_REGISTER_0 0x00000100
#define LAPIC_IN_SERVICE_REGISTER_1 0x00000110
#define LAPIC_IN_SERVICE_REGISTER_2 0x00000120
#define LAPIC_IN_SERVICE_REGISTER_3 0x00000130
#define LAPIC_IN_SERVICE_REGISTER_4 0x00000140
#define LAPIC_IN_SERVICE_REGISTER_5 0x00000150
#define LAPIC_IN_SERVICE_REGISTER_6 0x00000160
#define LAPIC_IN_SERVICE_REGISTER_7 0x00000170
#define LAPIC_TRIGGER_MODE_REGISTER_0 0x00000180
#define LAPIC_TRIGGER_MODE_REGISTER_1 0x00000190
#define LAPIC_TRIGGER_MODE_REGISTER_2 0x000001A0
#define LAPIC_TRIGGER_MODE_REGISTER_3 0x000001B0
#define LAPIC_TRIGGER_MODE_REGISTER_4 0x000001C0
#define LAPIC_TRIGGER_MODE_REGISTER_5 0x000001D0
#define LAPIC_TRIGGER_MODE_REGISTER_6 0x000001E0
#define LAPIC_TRIGGER_MODE_REGISTER_7 0x000001F0
#define LAPIC_INT_REQUEST_REGISTER_0 0x00000200
#define LAPIC_INT_REQUEST_REGISTER_1 0x00000210
#define LAPIC_INT_REQUEST_REGISTER_2 0x00000220
#define LAPIC_INT_REQUEST_REGISTER_3 0x00000230
#define LAPIC_INT_REQUEST_REGISTER_4 0x00000240
#define LAPIC_INT_REQUEST_REGISTER_5 0x00000250
#define LAPIC_INT_REQUEST_REGISTER_6 0x00000260
#define LAPIC_INT_REQUEST_REGISTER_7 0x00000270
#define LAPIC_ERROR_STATUS_REGISTER 0x00000280
#define LAPIC_LVT_CMCI_REGISTER 0x000002F0
#define LAPIC_INT_COMMAND_REGISTER_0 0x00000300
#define LAPIC_INT_COMMAND_REGISTER_1 0x00000310
#define LAPIC_LVT_TIMER_REGISTER 0x00000320
#define LAPIC_LVT_THERMAL_SENSOR_REGISTER 0x00000330
#define LAPIC_LVT_PMC_REGISTER 0x00000340
#define LAPIC_LVT_LINT0_REGISTER 0x00000350
#define LAPIC_LVT_LINT1_REGISTER 0x00000360
#define LAPIC_LVT_ERROR_REGISTER 0x00000370
#define LAPIC_INITIAL_COUNT_REGISTER 0x00000380
#define LAPIC_CURRENT_COUNT_REGISTER 0x00000390
#define LAPIC_DIVIDE_CONFIGURATION_REGISTER 0x000003E0
/* LAPIC CPUID bit and bitmask definitions */
#define CPUID_OUT_RDX_APIC_PRESENT ((uint64_t) 1 << 9)
#define CPUID_OUT_RCX_X2APIC_PRESENT ((uint64_t) 1 << 21)
/* LAPIC MSR bit and bitmask definitions */
#define MSR_01B_XAPIC_GLOBAL_ENABLE ((uint64_t) 1 << 11)
/* LAPIC register bit and bitmask definitions */
#define LAPIC_SVR_VECTOR 0x000000FF
#define LAPIC_SVR_APIC_ENABLE_MASK 0x00000100
#define LAPIC_LVT_MASK 0x00010000
#define LAPIC_DELIVERY_MODE_EXTINT_MASK 0x00000700
/* LAPIC Timer bit and bitmask definitions */
#define LAPIC_TMR_ONESHOT ((uint32_t) 0x0 << 17)
#define LAPIC_TMR_PERIODIC ((uint32_t) 0x1 << 17)
#define LAPIC_TMR_TSC_DEADLINE ((uint32_t) 0x2 << 17)
enum intr_cpu_startup_shorthand {
INTR_CPU_STARTUP_USE_DEST,
INTR_CPU_STARTUP_ALL_EX_SELF,
INTR_CPU_STARTUP_UNKNOWN,
};
union lapic_id {
uint32_t value;
struct {
uint8_t xapic_id;
uint8_t rsvd[3];
} xapic;
union {
uint32_t value;
struct {
uint8_t xapic_id;
uint8_t xapic_edid;
uint8_t rsvd[2];
} ioxapic_view;
struct {
uint32_t x2apic_id:4;
uint32_t x2apic_cluster:28;
} ldr_view;
} x2apic;
};
int early_init_lapic(void);
int init_lapic(uint32_t cpu_id);
int send_lapic_eoi(void);
uint32_t get_cur_lapic_id(void);
int send_startup_ipi(enum intr_cpu_startup_shorthand cpu_startup_shorthand,
uint32_t cpu_startup_dest,
paddr_t cpu_startup_start_address);
/* API to send an IPI to a single guest */
void send_single_ipi(uint32_t pcpu_id, uint32_t vector);
#endif /* INTR_LAPIC_H */

View File

@@ -0,0 +1,394 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MMU_H
#define MMU_H
/* Size of all page-table entries (in bytes) */
#define IA32E_COMM_ENTRY_SIZE 8
/* Definitions common for all IA-32e related paging entries */
#define IA32E_COMM_P_BIT 0x0000000000000001
#define IA32E_COMM_RW_BIT 0x0000000000000002
#define IA32E_COMM_US_BIT 0x0000000000000004
#define IA32E_COMM_PWT_BIT 0x0000000000000008
#define IA32E_COMM_PCD_BIT 0x0000000000000010
#define IA32E_COMM_A_BIT 0x0000000000000020
#define IA32E_COMM_XD_BIT 0x8000000000000000
/* Defines for EPT paging entries */
#define IA32E_EPT_R_BIT 0x0000000000000001
#define IA32E_EPT_W_BIT 0x0000000000000002
#define IA32E_EPT_X_BIT 0x0000000000000004
#define IA32E_EPT_UNCACHED (0<<3)
#define IA32E_EPT_WC (1<<3)
#define IA32E_EPT_WT (4<<3)
#define IA32E_EPT_WP (5<<3)
#define IA32E_EPT_WB (6<<3)
#define IA32E_EPT_PAT_IGNORE 0x0000000000000040
#define IA32E_EPT_ACCESS_FLAG 0x0000000000000100
#define IA32E_EPT_DIRTY_FLAG 0x0000000000000200
#define IA32E_EPT_SNOOP_CTRL 0x0000000000000800
#define IA32E_EPT_SUPPRESS_VE 0x8000000000000000
/* Definitions common or ignored for all IA-32e related paging entries */
#define IA32E_COMM_D_BIT 0x0000000000000040
#define IA32E_COMM_G_BIT 0x0000000000000100
/* Definitions exclusive to a Page Map Level 4 Entry (PML4E) */
#define IA32E_PML4E_INDEX_MASK_START 39
#define IA32E_PML4E_ADDR_MASK 0x0000FF8000000000
/* Definitions exclusive to a Page Directory Pointer Table Entry (PDPTE) */
#define IA32E_PDPTE_D_BIT 0x0000000000000040
#define IA32E_PDPTE_PS_BIT 0x0000000000000080
#define IA32E_PDPTE_PAT_BIT 0x0000000000001000
#define IA32E_PDPTE_ADDR_MASK 0x0000FFFFC0000000
#define IA32E_PDPTE_INDEX_MASK_START \
(IA32E_PML4E_INDEX_MASK_START - IA32E_INDEX_MASK_BITS)
/* Definitions exclusive to a Page Directory Entry (PDE) 1G or 2M */
#define IA32E_PDE_D_BIT 0x0000000000000040
#define IA32E_PDE_PS_BIT 0x0000000000000080
#define IA32E_PDE_PAT_BIT 0x0000000000001000
#define IA32E_PDE_ADDR_MASK 0x0000FFFFFFE00000
#define IA32E_PDE_INDEX_MASK_START \
(IA32E_PDPTE_INDEX_MASK_START - IA32E_INDEX_MASK_BITS)
/* Definitions exclusive to Page Table Entries (PTE) */
#define IA32E_PTE_D_BIT 0x0000000000000040
#define IA32E_PTE_PAT_BIT 0x0000000000000080
#define IA32E_PTE_G_BIT 0x0000000000000100
#define IA32E_PTE_ADDR_MASK 0x0000FFFFFFFFF000
#define IA32E_PTE_INDEX_MASK_START \
(IA32E_PDE_INDEX_MASK_START - IA32E_INDEX_MASK_BITS)
/** The 'Present' bit in a 32 bit paging page directory entry */
#define MMU_32BIT_PDE_P 0x00000001
/** The 'Read/Write' bit in a 32 bit paging page directory entry */
#define MMU_32BIT_PDE_RW 0x00000002
/** The 'User/Supervisor' bit in a 32 bit paging page directory entry */
#define MMU_32BIT_PDE_US 0x00000004
/** The 'Page Write Through' bit in a 32 bit paging page directory entry */
#define MMU_32BIT_PDE_PWT 0x00000008
/** The 'Page Cache Disable' bit in a 32 bit paging page directory entry */
#define MMU_32BIT_PDE_PCD 0x00000010
/** The 'Accessed' bit in a 32 bit paging page directory entry */
#define MMU_32BIT_PDE_A 0x00000020
/** The 'Dirty' bit in a 32 bit paging page directory entry */
#define MMU_32BIT_PDE_D 0x00000040
/** The 'Page Size' bit in a 32 bit paging page directory entry */
#define MMU_32BIT_PDE_PS 0x00000080
/** The 'Global' bit in a 32 bit paging page directory entry */
#define MMU_32BIT_PDE_G 0x00000100
/** The 'PAT' bit in a page 32 bit paging directory entry */
#define MMU_32BIT_PDE_PAT 0x00001000
/** The flag that indicates that the page fault was caused by a non present
* page.
*/
#define PAGE_FAULT_P_FLAG 0x00000001
/** The flag that indicates that the page fault was caused by a write access. */
#define PAGE_FAULT_WR_FLAG 0x00000002
/** The flag that indicates that the page fault was caused in user mode. */
#define PAGE_FAULT_US_FLAG 0x00000004
/** The flag that indicates that the page fault was caused by a reserved bit
* violation.
*/
#define PAGE_FAULT_RSVD_FLAG 0x00000008
/** The flag that indicates that the page fault was caused by an instruction
* fetch.
*/
#define PAGE_FAULT_ID_FLAG 0x00000010
/* Defines used for common memory sizes */
#define MEM_1K 1024UL
#define MEM_2K (MEM_1K * 2UL)
#define MEM_4K (MEM_1K * 4UL)
#define MEM_8K (MEM_1K * 8UL)
#define MEM_16K (MEM_1K * 16UL)
#define MEM_32K (MEM_1K * 32UL)
#define MEM_64K (MEM_1K * 64UL)
#define MEM_128K (MEM_1K * 128UL)
#define MEM_256K (MEM_1K * 256UL)
#define MEM_512K (MEM_1K * 512UL)
#define MEM_1M (MEM_1K * 1024UL)
#define MEM_2M (MEM_1M * 2UL)
#define MEM_4M (MEM_1M * 4UL)
#define MEM_8M (MEM_1M * 8UL)
#define MEM_16M (MEM_1M * 16UL)
#define MEM_32M (MEM_1M * 32UL)
#define MEM_64M (MEM_1M * 64UL)
#define MEM_128M (MEM_1M * 128UL)
#define MEM_256M (MEM_1M * 256UL)
#define MEM_512M (MEM_1M * 512UL)
#define MEM_1G (MEM_1M * 1024UL)
#define MEM_2G (MEM_1G * 2UL)
#define MEM_3G (MEM_1G * 3UL)
#define MEM_4G (MEM_1G * 4UL)
#define MEM_5G (MEM_1G * 5UL)
#define MEM_6G (MEM_1G * 6UL)
#ifndef ASSEMBLER
/* Define cache line size (in bytes) */
#define CACHE_LINE_SIZE 64
/* Size of all page structures for IA-32e */
#define IA32E_STRUCT_SIZE MEM_4K
/* IA32E Paging constants */
#define IA32E_INDEX_MASK_BITS 9
#define IA32E_NUM_ENTRIES 512
#define IA32E_INDEX_MASK (uint64_t)(IA32E_NUM_ENTRIES - 1)
#define IA32E_REF_MASK 0x7FFFFFFFFFFFF000
#define IA32E_FIRST_BLOCK_INDEX 1
/* Macro to get PML4 index given an address */
#define IA32E_PML4E_INDEX_CALC(address) \
(uint32_t)((((uint64_t)address >> IA32E_PML4E_INDEX_MASK_START) & \
IA32E_INDEX_MASK) * sizeof(uint64_t))
/* Macro to get PDPT index given an address */
#define IA32E_PDPTE_INDEX_CALC(address) \
(uint32_t)((((uint64_t)address >> IA32E_PDPTE_INDEX_MASK_START) & \
IA32E_INDEX_MASK) * sizeof(uint64_t))
/* Macro to get PD index given an address */
#define IA32E_PDE_INDEX_CALC(address) \
(uint32_t)((((uint64_t)address >> IA32E_PDE_INDEX_MASK_START) & \
IA32E_INDEX_MASK) * sizeof(uint64_t))
/* Macro to get PT index given an address */
#define IA32E_PTE_INDEX_CALC(address) \
(uint32_t)((((uint64_t)address >> IA32E_PTE_INDEX_MASK_START) & \
IA32E_INDEX_MASK) * sizeof(uint64_t))
/* Macro to obtain a 2 MB page offset from given linear address */
#define IA32E_GET_2MB_PG_OFFSET(address) \
(address & 0x001FFFFF)
/* Macro to obtain a 4KB page offset from given linear address */
#define IA32E_GET_4KB_PG_OFFSET(address) \
(address & 0x00000FFF)
/*
* The following generic attributes MMU_MEM_ATTR_FLAG_xxx may be OR'd with one
* and only one of the MMU_MEM_ATTR_TYPE_xxx definitions
*/
/* Generic memory attributes */
#define MMU_MEM_ATTR_READ 0x00000001
#define MMU_MEM_ATTR_WRITE 0x00000002
#define MMU_MEM_ATTR_EXECUTE 0x00000004
#define MMU_MEM_ATTR_USER 0x00000008
#define MMU_MEM_ATTR_WB_CACHE 0x00000040
#define MMU_MEM_ATTR_WT_CACHE 0x00000080
#define MMU_MEM_ATTR_UNCACHED 0x00000100
#define MMU_MEM_ATTR_WC 0x00000200
#define MMU_MEM_ATTR_WP 0x00000400
/* Definitions for memory types related to x64 */
#define MMU_MEM_ATTR_BIT_READ_WRITE IA32E_COMM_RW_BIT
#define MMU_MEM_ATTR_BIT_USER_ACCESSIBLE IA32E_COMM_US_BIT
#define MMU_MEM_ATTR_BIT_EXECUTE_DISABLE IA32E_COMM_XD_BIT
/* Selection of Page Attribute Table (PAT) entries with PAT, PCD and PWT
* encoding. See also pat.h
*/
/* Selects PAT0 WB */
#define MMU_MEM_ATTR_TYPE_CACHED_WB (0x0000000000000000)
/* Selects PAT1 WT */
#define MMU_MEM_ATTR_TYPE_CACHED_WT (IA32E_COMM_PWT_BIT)
/* Selects PAT2 UCM */
#define MMU_MEM_ATTR_TYPE_UNCACHED_MINUS (IA32E_COMM_PCD_BIT)
/* Selects PAT3 UC */
#define MMU_MEM_ATTR_TYPE_UNCACHED \
(IA32E_COMM_PCD_BIT | IA32E_COMM_PWT_BIT)
/* Selects PAT6 WC */
#define MMU_MEM_ATTR_TYPE_WRITE_COMBINED \
(IA32E_PDPTE_PAT_BIT | IA32E_COMM_PCD_BIT)
/* Selects PAT7 WP */
#define MMU_MEM_ATTR_TYPE_WRITE_PROTECTED \
(IA32E_PDPTE_PAT_BIT | IA32E_COMM_PCD_BIT | IA32E_COMM_PWT_BIT)
#define ROUND_PAGE_UP(addr) (((addr) + CPU_PAGE_SIZE - 1) & IA32E_REF_MASK)
#define ROUND_PAGE_DOWN(addr) ((addr) & IA32E_REF_MASK)
struct map_params {
/* enum _page_table_type: HOST or EPT*/
int page_table_type;
/* used HVA->HPA for HOST, used GPA->HPA for EPT */
void *pml4_base;
/* used HPA->HVA for HOST, used HPA->GPA for EPT */
void *pml4_inverted;
};
struct entry_params {
uint32_t entry_level;
uint32_t entry_present;
uint64_t entry_base;
uint64_t entry_off;
uint64_t entry_val;
uint64_t page_size;
};
enum _page_table_type {
PT_HOST = 0, /* Mapping for hypervisor */
PT_EPT = 1,
PAGETABLE_TYPE_UNKNOWN,
};
/* Represent the 4 levels of translation tables in IA-32e paging mode */
enum _page_table_level {
IA32E_PML4 = 0,
IA32E_PDPT = 1,
IA32E_PD = 2,
IA32E_PT = 3,
IA32E_UNKNOWN,
};
/* Page table entry present */
enum _page_table_present {
PT_NOT_PRESENT = 0,
PT_PRESENT = 1,
};
/* Page size */
#define PAGE_SIZE_4K MEM_4K
#define PAGE_SIZE_2M MEM_2M
#define PAGE_SIZE_1G MEM_1G
/* Macros for reading/writing memory */
#define MEM_READ8(addr) (*(volatile uint8_t *)(addr))
#define MEM_WRITE8(addr, data) \
(*(volatile uint8_t *)(addr) = (uint8_t)(data))
#define MEM_READ16(addr) (*(volatile uint16_t *)(addr))
#define MEM_WRITE16(addr, data) \
(*(volatile uint16_t *)(addr) = (uint16_t)(data))
#define MEM_READ32(addr) (*(volatile uint32_t *)(addr))
#define MEM_WRITE32(addr, data) \
(*(volatile uint32_t *)(addr) = (uint32_t)(data))
#define MEM_READ64(addr) (*(volatile uint64_t *)(addr))
#define MEM_WRITE64(addr, data) \
(*(volatile uint64_t *)(addr) = (uint64_t)(data))
/* Typedef for MMIO handler and range check routine */
typedef int(*hv_mem_io_handler_t)(struct vcpu *, struct mem_io *, void *);
/* Structure for MMIO handler node */
struct mem_io_node {
hv_mem_io_handler_t read_write;
void *handler_private_data;
struct list_head list;
uint64_t range_start;
uint64_t range_end;
};
void *get_paging_pml4(void);
void *alloc_paging_struct();
void enable_paging(void *pml4_base_addr);
void init_paging(void);
void map_mem(struct map_params *map_params, void *paddr, void *vaddr,
uint64_t size, uint32_t flags);
void unmap_mem(struct map_params *map_params, void *paddr, void *vaddr,
uint64_t size, uint32_t flags);
void modify_mem(struct map_params *map_params, void *paddr, void *vaddr,
uint64_t size, uint32_t flags);
void mmu_invept(struct vcpu *vcpu);
void obtain_last_page_table_entry(struct map_params *map_params,
struct entry_params *entry, void *addr, bool direct);
int register_mmio_emulation_handler(struct vm *vm,
hv_mem_io_handler_t read_write, uint64_t start,
uint64_t end, void *handler_private_data);
void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
uint64_t end);
#pragma pack(1)
/** Defines a single entry in an E820 memory map. */
struct e820_entry {
/** The base address of the memory range. */
uint64_t baseaddr;
/** The length of the memory range. */
uint64_t length;
/** The type of memory region. */
uint32_t type;
};
#pragma pack()
/* E820 memory types */
#define E820_TYPE_RAM 1 /* EFI 1, 2, 3, 4, 5, 6, 7 */
#define E820_TYPE_RESERVED 2
/* EFI 0, 11, 12, 13 (everything not used elsewhere) */
#define E820_TYPE_ACPI_RECLAIM 3 /* EFI 9 */
#define E820_TYPE_ACPI_NVS 4 /* EFI 10 */
#define E820_TYPE_UNUSABLE 5 /* EFI 8 */
/** Calculates the page table address for a given address.
*
* @param pd The base address of the page directory.
* @param vaddr The virtual address to calculate the page table address for.
*
* @return A pointer to the page table for the specified virtual address.
*
*/
static inline void *mmu_pt_for_pde(uint32_t *pd, uint32_t vaddr)
{
return pd + ((vaddr >> 22) + 1) * 1024;
}
#define CACHE_FLUSH_INVALIDATE_ALL() \
{ \
asm volatile (" wbinvd\n" : : : "memory"); \
}
/* External variable declarations */
extern uint8_t CPU_Boot_Page_Tables_Start_VM[];
/* External Interfaces */
int is_ept_supported(void);
void *create_guest_paging(struct vm *vm);
void destroy_ept(struct vm *vm);
uint64_t gpa2hpa(struct vm *vm, uint64_t gpa);
uint64_t gpa2hpa_check(struct vm *vm, uint64_t gpa,
uint64_t size, int *found, bool assert);
uint64_t hpa2gpa(struct vm *vm, uint64_t hpa);
int ept_mmap(struct vm *vm, uint64_t hpa,
uint64_t gpa, uint64_t size, uint32_t type, uint32_t prot);
int ept_violation_handler(struct vcpu *vcpu);
int ept_misconfig_handler(struct vcpu *vcpu);
int dm_emulate_mmio_post(struct vcpu *vcpu);
#endif /* ASSEMBLER not defined */
#endif /* MMU_H */

View File

@@ -0,0 +1,563 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MSR_H
#define MSR_H
/* architectural (common) MSRs */
#define MSR_IA32_P5_MC_ADDR 0x00000000
/* Machine check address for MC exception handler */
#define MSR_IA32_P5_MC_TYPE 0x00000001
/* Machine check error type for MC exception handler */
#define MSR_IA32_MONITOR_FILTER_SIZE 0x00000006
/* System coherence line size for MWAIT/MONITOR */
#define MSR_IA32_TIME_STAMP_COUNTER 0x00000010 /* TSC as MSR */
#define MSR_IA32_PLATFORM_ID 0x00000017 /* Platform ID */
#define MSR_IA32_APIC_BASE 0x0000001B
/* Information about LAPIC */
#define MSR_IA32_FEATURE_CONTROL 0x0000003A
/* Speculation Control */
#define MSR_IA32_SPEC_CTRL 0x00000048
/* Prediction Command */
#define MSR_IA32_PRED_CMD 0x00000049
/* Control Features in Intel 64 processor */
#define MSR_IA32_ADJUST_TSC 0x0000003B /* Adjust TSC value */
#define MSR_IA32_BIOS_UPDT_TRIG 0x00000079
/* BIOS update trigger */
#define MSR_IA32_BIOS_SIGN_ID 0x0000008B
/* BIOS update signature */
#define MSR_IA32_SMM_MONITOR_CTL 0x0000009B
/* SMM monitor configuration */
#define MSR_IA32_PMC0 0x000000C1
/* General performance counter 0 */
#define MSR_IA32_PMC1 0x000000C2
/* General performance counter 1 */
#define MSR_IA32_PMC2 0x000000C3
/* General performance counter 2 */
#define MSR_IA32_PMC3 0x000000C4
/* General performance counter 3 */
#define MSR_IA32_MPERF 0x000000E7
/* Max. qualified performance clock counter */
#define MSR_IA32_APERF 0x000000E8
/* Actual performance clock counter */
#define MSR_IA32_MTRR_CAP 0x000000FE /* MTRR capability */
#define MSR_IA32_SYSENTER_CS 0x00000174 /* CS for sysenter */
#define MSR_IA32_SYSENTER_ESP 0x00000175 /* ESP for sysenter */
#define MSR_IA32_SYSENTER_EIP 0x00000176 /* EIP for sysenter */
#define MSR_IA32_MCG_CAP 0x00000179
/* Global machine check capability */
#define MSR_IA32_MCG_STATUS 0x0000017A
/* Global machine check status */
#define MSR_IA32_MCG_CTL 0x0000017B
/* Global machine check control */
#define MSR_IA32_PERFEVTSEL0 0x00000186
/* Performance Event Select Register 0 */
#define MSR_IA32_PERFEVTSEL1 0x00000187
/* Performance Event Select Register 1 */
#define MSR_IA32_PERFEVTSEL2 0x00000188
/* Performance Event Select Register 2 */
#define MSR_IA32_PERFEVTSEL3 0x00000189
/* Performance Event Select Register 3 */
#define MSR_IA32_PERF_STATUS 0x00000198
/* Current performance state */
#define MSR_IA32_PERF_CTL 0x00000199
/* Performance control */
#define MSR_IA32_CLOCK_MODULATION 0x0000019A
/* Clock modulation control */
#define MSR_IA32_THERM_INTERRUPT 0x0000019B
/* Thermal interrupt control */
#define MSR_IA32_THERM_STATUS 0x0000019C
/* Thermal status information */
#define MSR_IA32_MISC_ENABLE 0x000001A0
/* Enable misc. processor features */
#define MSR_IA32_ENERGY_PERF_BIAS 0x000001B0
/* Performance energy bias hint */
#define MSR_IA32_DEBUGCTL 0x000001D9
/* Trace/Profile resource control */
#define MSR_IA32_SMRR_PHYSBASE 0x000001F2 /* SMRR base address */
#define MSR_IA32_SMRR_PHYSMASK 0x000001F3 /* SMRR range mask */
#define MSR_IA32_PLATFORM_DCA_CAP 0x000001F8 /* DCA capability */
#define MSR_IA32_CPU_DCA_CAP 0x000001F9
/* Prefetch hint type capability */
#define MSR_IA32_DCA_0_CAP 0x000001FA
/* DCA type 0 status/control */
#define MSR_IA32_MTRR_PHYSBASE_0 0x00000200
/* variable range MTRR base 0 */
#define MSR_IA32_MTRR_PHYSMASK_0 0x00000201
/* variable range MTRR mask 0 */
#define MSR_IA32_MTRR_PHYSBASE_1 0x00000202
/* variable range MTRR base 1 */
#define MSR_IA32_MTRR_PHYSMASK_1 0x00000203
/* variable range MTRR mask 1 */
#define MSR_IA32_MTRR_PHYSBASE_2 0x00000204
/* variable range MTRR base 2 */
#define MSR_IA32_MTRR_PHYSMASK_2 0x00000205
/* variable range MTRR mask 2 */
#define MSR_IA32_MTRR_PHYSBASE_3 0x00000206
/* variable range MTRR base 3 */
#define MSR_IA32_MTRR_PHYSMASK_3 0x00000207
/* variable range MTRR mask 3 */
#define MSR_IA32_MTRR_PHYSBASE_4 0x00000208
/* variable range MTRR base 4 */
#define MSR_IA32_MTRR_PHYSMASK_4 0x00000209
/* variable range MTRR mask 4 */
#define MSR_IA32_MTRR_PHYSBASE_5 0x0000020A
/* variable range MTRR base 5 */
#define MSR_IA32_MTRR_PHYSMASK_5 0x0000020B
/* variable range MTRR mask 5 */
#define MSR_IA32_MTRR_PHYSBASE_6 0x0000020C
/* variable range MTRR base 6 */
#define MSR_IA32_MTRR_PHYSMASK_6 0x0000020D
/* variable range MTRR mask 6 */
#define MSR_IA32_MTRR_PHYSBASE_7 0x0000020E
/* variable range MTRR base 7 */
#define MSR_IA32_MTRR_PHYSMASK_7 0x0000020F
/* variable range MTRR mask 7 */
#define MSR_IA32_MTRR_PHYSBASE_8 0x00000210
/* variable range MTRR base 8 */
#define MSR_IA32_MTRR_PHYSMASK_8 0x00000211
/* variable range MTRR mask 8 */
#define MSR_IA32_MTRR_PHYSBASE_9 0x00000212
/* variable range MTRR base 9 */
#define MSR_IA32_MTRR_PHYSMASK_9 0x00000213
/* variable range MTRR mask 9 */
#define MSR_IA32_MTRR_FIX64K_00000 0x00000250
/* fixed range MTRR 16K/0x00000 */
#define MSR_IA32_MTRR_FIX16K_80000 0x00000258
/* fixed range MTRR 16K/0x80000 */
#define MSR_IA32_MTRR_FIX16K_A0000 0x00000259
/* fixed range MTRR 16K/0xA0000 */
#define MSR_IA32_MTRR_FIX4K_C0000 0x00000268
/* fixed range MTRR 4K/0xC0000 */
#define MSR_IA32_MTRR_FIX4K_C8000 0x00000269
/* fixed range MTRR 4K/0xC8000 */
#define MSR_IA32_MTRR_FIX4K_D0000 0x0000026A
/* fixed range MTRR 4K/0xD0000 */
#define MSR_IA32_MTRR_FIX4K_D8000 0x0000026B
/* fixed range MTRR 4K/0xD8000 */
#define MSR_IA32_MTRR_FIX4K_E0000 0x0000026C
/* fixed range MTRR 4K/0xE0000 */
#define MSR_IA32_MTRR_FIX4K_E8000 0x0000026D
/* fixed range MTRR 4K/0xE8000 */
#define MSR_IA32_MTRR_FIX4K_F0000 0x0000026E
/* fixed range MTRR 4K/0xF0000 */
#define MSR_IA32_MTRR_FIX4K_F8000 0x0000026F
/* fixed range MTRR 4K/0xF8000 */
#define MSR_IA32_PAT 0x00000277 /* PAT */
#define MSR_IA32_MC0_CTL2 0x00000280
/* Corrected error count threshold 0 */
#define MSR_IA32_MC1_CTL2 0x00000281
/* Corrected error count threshold 1 */
#define MSR_IA32_MC2_CTL2 0x00000282
/* Corrected error count threshold 2 */
#define MSR_IA32_MC3_CTL2 0x00000283
/* Corrected error count threshold 3 */
#define MSR_IA32_MC4_CTL2 0x00000284
/* Corrected error count threshold 4 */
#define MSR_IA32_MC5_CTL2 0x00000285
/* Corrected error count threshold 5 */
#define MSR_IA32_MC6_CTL2 0x00000286
/* Corrected error count threshold 6 */
#define MSR_IA32_MC7_CTL2 0x00000287
/* Corrected error count threshold 7 */
#define MSR_IA32_MC8_CTL2 0x00000288
/* Corrected error count threshold 8 */
#define MSR_IA32_MC9_CTL2 0x00000289
/* Corrected error count threshold 9 */
#define MSR_IA32_MC10_CTL2 0x0000028A
/* Corrected error count threshold 10 */
#define MSR_IA32_MC11_CTL2 0x0000028B
/* Corrected error count threshold 11 */
#define MSR_IA32_MC12_CTL2 0x0000028C
/* Corrected error count threshold 12 */
#define MSR_IA32_MC13_CTL2 0x0000028D
/* Corrected error count threshold 13 */
#define MSR_IA32_MC14_CTL2 0x0000028E
/* Corrected error count threshold 14 */
#define MSR_IA32_MC15_CTL2 0x0000028F
/* Corrected error count threshold 15 */
#define MSR_IA32_MC16_CTL2 0x00000290
/* Corrected error count threshold 16 */
#define MSR_IA32_MC17_CTL2 0x00000291
/* Corrected error count threshold 17 */
#define MSR_IA32_MC18_CTL2 0x00000292
/* Corrected error count threshold 18 */
#define MSR_IA32_MC19_CTL2 0x00000293
/* Corrected error count threshold 19 */
#define MSR_IA32_MC20_CTL2 0x00000294
/* Corrected error count threshold 20 */
#define MSR_IA32_MC21_CTL2 0x00000295
/* Corrected error count threshold 21 */
#define MSR_IA32_MTRR_DEF_TYPE 0x000002FF
/* Default memory type/MTRR control */
#define MSR_IA32_FIXED_CTR0 0x00000309
/* Fixed-function performance counter 0 */
#define MSR_IA32_FIXED_CTR1 0x0000030A
/* Fixed-function performance counter 1 */
#define MSR_IA32_FIXED_CTR2 0x0000030B
/* Fixed-function performance counter 2 */
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
/* Performance capability */
#define MSR_IA32_FIXED_CTR_CTL 0x0000038D
/* Fixed-function performance counter control */
#define MSR_IA32_PERF_GLOBAL_STATUS 0x0000038E
/* Global performance counter status */
#define MSR_IA32_PERF_GLOBAL_CTRL 0x0000038F
/* Global performance counter control */
#define MSR_IA32_PERF_GLOBAL_OVF_CTRL 0x00000390
/* Global performance counter overflow control */
#define MSR_IA32_PEBS_ENABLE 0x000003F1 /* PEBS control */
#define MSR_IA32_MC0_CTL 0x00000400 /* MC 0 control */
#define MSR_IA32_MC0_STATUS 0x00000401 /* MC 0 status */
#define MSR_IA32_MC0_ADDR 0x00000402 /* MC 0 address */
#define MSR_IA32_MC0_MISC 0x00000403 /* MC 0 misc. */
#define MSR_IA32_MC1_CTL 0x00000404 /* MC 1 control */
#define MSR_IA32_MC1_STATUS 0x00000405 /* MC 1 status */
#define MSR_IA32_MC1_ADDR 0x00000406 /* MC 1 address */
#define MSR_IA32_MC1_MISC 0x00000407 /* MC 1 misc. */
#define MSR_IA32_MC2_CTL 0x00000408 /* MC 2 control */
#define MSR_IA32_MC2_STATUS 0x00000409 /* MC 2 status */
#define MSR_IA32_MC2_ADDR 0x0000040A /* MC 2 address */
#define MSR_IA32_MC2_MISC 0x0000040B /* MC 2 misc. */
#define MSR_IA32_MC3_CTL 0x0000040C /* MC 3 control */
#define MSR_IA32_MC3_STATUS 0x0000040D /* MC 3 status */
#define MSR_IA32_MC3_ADDR 0x0000040E /* MC 3 address */
#define MSR_IA32_MC3_MISC 0x0000040F /* MC 3 misc. */
#define MSR_IA32_MC4_CTL 0x00000410 /* MC 4 control */
#define MSR_IA32_MC4_STATUS 0x00000411 /* MC 4 status */
#define MSR_IA32_MC4_ADDR 0x00000412 /* MC 4 address */
#define MSR_IA32_MC4_MISC 0x00000413 /* MC 4 misc. */
#define MSR_IA32_MC5_CTL 0x00000414 /* MC 5 control */
#define MSR_IA32_MC5_STATUS 0x00000415 /* MC 5 status */
#define MSR_IA32_MC5_ADDR 0x00000416 /* MC 5 address */
#define MSR_IA32_MC5_MISC 0x00000417 /* MC 5 misc. */
#define MSR_IA32_MC6_CTL 0x00000418 /* MC 6 control */
#define MSR_IA32_MC6_STATUS 0x00000419 /* MC 6 status */
#define MSR_IA32_MC6_ADDR 0x0000041A /* MC 6 address */
#define MSR_IA32_MC6_MISC 0x0000041B /* MC 6 misc. */
#define MSR_IA32_MC7_CTL 0x0000041C /* MC 7 control */
#define MSR_IA32_MC7_STATUS 0x0000041D /* MC 7 status */
#define MSR_IA32_MC7_ADDR 0x0000041E /* MC 7 address */
#define MSR_IA32_MC7_MISC 0x0000041F /* MC 7 misc. */
#define MSR_IA32_MC8_CTL 0x00000420 /* MC 8 control */
#define MSR_IA32_MC8_STATUS 0x00000421 /* MC 8 status */
#define MSR_IA32_MC8_ADDR 0x00000422 /* MC 8 address */
#define MSR_IA32_MC8_MISC 0x00000423 /* MC 8 misc. */
#define MSR_IA32_MC9_CTL 0x00000424 /* MC 9 control */
#define MSR_IA32_MC9_STATUS 0x00000425 /* MC 9 status */
#define MSR_IA32_MC9_ADDR 0x00000426 /* MC 9 address */
#define MSR_IA32_MC9_MISC 0x00000427 /* MC 9 misc. */
#define MSR_IA32_MC10_CTL 0x00000428 /* MC 10 control */
#define MSR_IA32_MC10_STATUS 0x00000429 /* MC 10 status */
#define MSR_IA32_MC10_ADDR 0x0000042A /* MC 10 address */
#define MSR_IA32_MC10_MISC 0x0000042B /* MC 10 misc. */
#define MSR_IA32_MC11_CTL 0x0000042C /* MC 11 control */
#define MSR_IA32_MC11_STATUS 0x0000042D /* MC 11 status */
#define MSR_IA32_MC11_ADDR 0x0000042E /* MC 11 address */
#define MSR_IA32_MC11_MISC 0x0000042F /* MC 11 misc. */
#define MSR_IA32_MC12_CTL 0x00000430 /* MC 12 control */
#define MSR_IA32_MC12_STATUS 0x00000431 /* MC 12 status */
#define MSR_IA32_MC12_ADDR 0x00000432 /* MC 12 address */
#define MSR_IA32_MC12_MISC 0x00000433 /* MC 12 misc. */
#define MSR_IA32_MC13_CTL 0x00000434 /* MC 13 control */
#define MSR_IA32_MC13_STATUS 0x00000435 /* MC 13 status */
#define MSR_IA32_MC13_ADDR 0x00000436 /* MC 13 address */
#define MSR_IA32_MC13_MISC 0x00000437 /* MC 13 misc. */
#define MSR_IA32_MC14_CTL 0x00000438 /* MC 14 control */
#define MSR_IA32_MC14_STATUS 0x00000439 /* MC 14 status */
#define MSR_IA32_MC14_ADDR 0x0000043A /* MC 14 address */
#define MSR_IA32_MC14_MISC 0x0000043B /* MC 14 misc. */
#define MSR_IA32_MC15_CTL 0x0000043C /* MC 15 control */
#define MSR_IA32_MC15_STATUS 0x0000043D /* MC 15 status */
#define MSR_IA32_MC15_ADDR 0x0000043E /* MC 15 address */
#define MSR_IA32_MC15_MISC 0x0000043F /* MC 15 misc. */
#define MSR_IA32_MC16_CTL 0x00000440 /* MC 16 control */
#define MSR_IA32_MC16_STATUS 0x00000441 /* MC 16 status */
#define MSR_IA32_MC16_ADDR 0x00000442 /* MC 16 address */
#define MSR_IA32_MC16_MISC 0x00000443 /* MC 16 misc. */
#define MSR_IA32_MC17_CTL 0x00000444 /* MC 17 control */
#define MSR_IA32_MC17_STATUS 0x00000445 /* MC 17 status */
#define MSR_IA32_MC17_ADDR 0x00000446 /* MC 17 address */
#define MSR_IA32_MC17_MISC 0x00000447 /* MC 17 misc. */
#define MSR_IA32_MC18_CTL 0x00000448 /* MC 18 control */
#define MSR_IA32_MC18_STATUS 0x00000449 /* MC 18 status */
#define MSR_IA32_MC18_ADDR 0x0000044A /* MC 18 address */
#define MSR_IA32_MC18_MISC 0x0000044B /* MC 18 misc. */
#define MSR_IA32_MC19_CTL 0x0000044C /* MC 19 control */
#define MSR_IA32_MC19_STATUS 0x0000044D /* MC 19 status */
#define MSR_IA32_MC19_ADDR 0x0000044E /* MC 19 address */
#define MSR_IA32_MC19_MISC 0x0000044F /* MC 19 misc. */
#define MSR_IA32_MC20_CTL 0x00000450 /* MC 20 control */
#define MSR_IA32_MC20_STATUS 0x00000451 /* MC 20 status */
#define MSR_IA32_MC20_ADDR 0x00000452 /* MC 20 address */
#define MSR_IA32_MC20_MISC 0x00000453 /* MC 20 misc. */
#define MSR_IA32_MC21_CTL 0x00000454 /* MC 21 control */
#define MSR_IA32_MC21_STATUS 0x00000455 /* MC 21 status */
#define MSR_IA32_MC21_ADDR 0x00000456 /* MC 21 address */
#define MSR_IA32_MC21_MISC 0x00000457 /* MC 21 misc. */
#define MSR_IA32_VMX_BASIC 0x00000480
/* Capability reporting register basic VMX capabilities */
#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
/* Capability reporting register pin based VM execution controls */
#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
/* Capability reporting register primary processor based VM execution controls*/
#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
/* Capability reporting register VM exit controls */
#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
/* Capability reporting register VM entry controls */
#define MSR_IA32_VMX_MISC 0x00000485
/* Reporting register misc. VMX capabilities */
#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
/* Capability reporting register of CR0 bits fixed to 0 */
#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
/* Capability reporting register of CR0 bits fixed to 1 */
#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
/* Capability reporting register of CR4 bits fixed to 0 */
#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
/* Capability reporting register of CR4 bits fixed to 1 */
#define MSR_IA32_VMX_VMCS_ENUM 0x0000048A
/* Capability reporting register of VMCS field enumeration */
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048B
/* Capability reporting register of secondary processor based VM execution
* controls
*/
#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048C
/* Capability reporting register of EPT and VPID */
#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048D
/* Capability reporting register of pin based VM execution flex controls */
#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048E
/* Capability reporting register of primary processor based VM execution flex
* controls
*/
#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048F
/* Capability reporting register of VM exit flex controls */
#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
/* Capability reporting register of VM entry flex controls */
#define MSR_IA32_DS_AREA 0x00000600 /* DS save area */
/* APIC TSC deadline MSR */
#define MSR_IA32_TSC_DEADLINE 0x000006E0
#define MSR_IA32_EXT_XAPICID 0x00000802 /* x2APIC ID */
#define MSR_IA32_EXT_APIC_VERSION 0x00000803 /* x2APIC version */
#define MSR_IA32_EXT_APIC_TPR 0x00000808
/* x2APIC task priority */
#define MSR_IA32_EXT_APIC_PPR 0x0000080A
/* x2APIC processor priority */
#define MSR_IA32_EXT_APIC_EOI 0x0000080B /* x2APIC EOI */
#define MSR_IA32_EXT_APIC_LDR 0x0000080D
/* x2APIC logical destination */
#define MSR_IA32_EXT_APIC_SIVR 0x0000080F
/* x2APIC spurious interrupt vector */
#define MSR_IA32_EXT_APIC_ISR0 0x00000810
/* x2APIC in-service register 0 */
#define MSR_IA32_EXT_APIC_ISR1 0x00000811
/* x2APIC in-service register 1 */
#define MSR_IA32_EXT_APIC_ISR2 0x00000812
/* x2APIC in-service register 2 */
#define MSR_IA32_EXT_APIC_ISR3 0x00000813
/* x2APIC in-service register 3 */
#define MSR_IA32_EXT_APIC_ISR4 0x00000814
/* x2APIC in-service register 4 */
#define MSR_IA32_EXT_APIC_ISR5 0x00000815
/* x2APIC in-service register 5 */
#define MSR_IA32_EXT_APIC_ISR6 0x00000816
/* x2APIC in-service register 6 */
#define MSR_IA32_EXT_APIC_ISR7 0x00000817
/* x2APIC in-service register 7 */
#define MSR_IA32_EXT_APIC_TMR0 0x00000818
/* x2APIC trigger mode register 0 */
#define MSR_IA32_EXT_APIC_TMR1 0x00000819
/* x2APIC trigger mode register 1 */
#define MSR_IA32_EXT_APIC_TMR2 0x0000081A
/* x2APIC trigger mode register 2 */
#define MSR_IA32_EXT_APIC_TMR3 0x0000081B
/* x2APIC trigger mode register 3 */
#define MSR_IA32_EXT_APIC_TMR4 0x0000081C
/* x2APIC trigger mode register 4 */
#define MSR_IA32_EXT_APIC_TMR5 0x0000081D
/* x2APIC trigger mode register 5 */
#define MSR_IA32_EXT_APIC_TMR6 0x0000081E
/* x2APIC trigger mode register 6 */
#define MSR_IA32_EXT_APIC_TMR7 0x0000081F
/* x2APIC trigger mode register 7 */
#define MSR_IA32_EXT_APIC_IRR0 0x00000820
/* x2APIC interrupt request register 0 */
#define MSR_IA32_EXT_APIC_IRR1 0x00000821
/* x2APIC interrupt request register 1 */
#define MSR_IA32_EXT_APIC_IRR2 0x00000822
/* x2APIC interrupt request register 2 */
#define MSR_IA32_EXT_APIC_IRR3 0x00000823
/* x2APIC interrupt request register 3 */
#define MSR_IA32_EXT_APIC_IRR4 0x00000824
/* x2APIC interrupt request register 4 */
#define MSR_IA32_EXT_APIC_IRR5 0x00000825
/* x2APIC interrupt request register 5 */
#define MSR_IA32_EXT_APIC_IRR6 0x00000826
/* x2APIC interrupt request register 6 */
#define MSR_IA32_EXT_APIC_IRR7 0x00000827
/* x2APIC interrupt request register 7 */
#define MSR_IA32_EXT_APIC_ESR 0x00000828
/* x2APIC error status */
#define MSR_IA32_EXT_APIC_LVT_CMCI 0x0000082F
/* x2APIC LVT corrected machine check interrupt register */
#define MSR_IA32_EXT_APIC_ICR 0x00000830
/* x2APIC interrupt command register */
#define MSR_IA32_EXT_APIC_LVT_TIMER 0x00000832
/* x2APIC LVT timer interrupt register */
#define MSR_IA32_EXT_APIC_LVT_THERMAL 0x00000833
/* x2APIC LVT thermal sensor interrupt register */
#define MSR_IA32_EXT_APIC_LVT_PMI 0x00000834
/* x2APIC LVT performance monitor interrupt register */
#define MSR_IA32_EXT_APIC_LVT_LINT0 0x00000835
/* x2APIC LVT LINT0 register */
#define MSR_IA32_EXT_APIC_LVT_LINT1 0x00000836
/* x2APIC LVT LINT1 register */
#define MSR_IA32_EXT_APIC_LVT_ERROR 0x00000837
/* x2APIC LVT error register */
#define MSR_IA32_EXT_APIC_INIT_COUNT 0x00000838
/* x2APIC initial count register */
#define MSR_IA32_EXT_APIC_CUR_COUNT 0x00000839
/* x2APIC current count register */
#define MSR_IA32_EXT_APIC_DIV_CONF 0x0000083E
/* x2APIC divide configuration register */
#define MSR_IA32_EXT_APIC_SELF_IPI 0x0000083F
/* x2APIC self IPI register */
#define MSR_IA32_EFER 0xC0000080
/* Extended feature enables */
#define MSR_IA32_STAR 0xC0000081
/* System call target address */
#define MSR_IA32_LSTAR 0xC0000082
/* IA-32e mode system call target address */
#define MSR_IA32_FMASK 0xC0000084
/* System call flag mask */
#define MSR_IA32_FS_BASE 0xC0000100
/* Map of BASE address of FS */
#define MSR_IA32_GS_BASE 0xC0000101
/* Map of BASE address of GS */
#define MSR_IA32_KERNEL_GS_BASE 0xC0000102
/* Swap target of BASE address of GS */
#define MSR_IA32_TSC_AUX 0xC0000103 /* Auxiliary TSC */
/* ATOM specific MSRs */
#define MSR_ATOM_EBL_CR_POWERON 0x0000002A
/* Processor hard power-on configuration */
#define MSR_ATOM_LASTBRANCH_0_FROM_IP 0x00000040
/* Last branch record 0 from IP */
#define MSR_ATOM_LASTBRANCH_1_FROM_IP 0x00000041
/* Last branch record 1 from IP */
#define MSR_ATOM_LASTBRANCH_2_FROM_IP 0x00000042
/* Last branch record 2 from IP */
#define MSR_ATOM_LASTBRANCH_3_FROM_IP 0x00000043
/* Last branch record 3 from IP */
#define MSR_ATOM_LASTBRANCH_4_FROM_IP 0x00000044
/* Last branch record 4 from IP */
#define MSR_ATOM_LASTBRANCH_5_FROM_IP 0x00000045
/* Last branch record 5 from IP */
#define MSR_ATOM_LASTBRANCH_6_FROM_IP 0x00000046
/* Last branch record 6 from IP */
#define MSR_ATOM_LASTBRANCH_7_FROM_IP 0x00000047
/* Last branch record 7 from IP */
#define MSR_ATOM_LASTBRANCH_0_TO_LIP 0x00000060
/* Last branch record 0 to IP */
#define MSR_ATOM_LASTBRANCH_1_TO_LIP 0x00000061
/* Last branch record 1 to IP */
#define MSR_ATOM_LASTBRANCH_2_TO_LIP 0x00000062
/* Last branch record 2 to IP */
#define MSR_ATOM_LASTBRANCH_3_TO_LIP 0x00000063
/* Last branch record 3 to IP */
#define MSR_ATOM_LASTBRANCH_4_TO_LIP 0x00000064
/* Last branch record 4 to IP */
#define MSR_ATOM_LASTBRANCH_5_TO_LIP 0x00000065
/* Last branch record 5 to IP */
#define MSR_ATOM_LASTBRANCH_6_TO_LIP 0x00000066
/* Last branch record 6 to IP */
#define MSR_ATOM_LASTBRANCH_7_TO_LIP 0x00000067
/* Last branch record 7 to IP */
#define MSR_ATOM_FSB_FREQ 0x000000CD /* Scalable bus speed */
#define MSR_PLATFORM_INFO 0x000000CE
/* Maximum resolved bus ratio */
#define MSR_ATOM_BBL_CR_CTL3 0x0000011E /* L2 hardware enabled */
#define MSR_ATOM_THERM2_CTL 0x0000019D
/* Mode of automatic thermal monitor */
#define MSR_ATOM_LASTBRANCH_TOS 0x000001C9
/* Last branch record stack TOS */
#define MSR_ATOM_LER_FROM_LIP 0x000001DD
/* Last exception record from linear IP */
#define MSR_ATOM_LER_TO_LIP 0x000001DE
/* Last exception record to linear IP */
/* LINCROFT specific MSRs */
#define MSR_LNC_BIOS_CACHE_AS_RAM 0x000002E0 /* Configure CAR */
/* MSR_IA32_VMX_EPT_VPID_CAP: EPT and VPID capability bits */
#define MSR_VMX_EPT_VPID_CAP_1GB (1UL << 17)/* EPT 1GB page */
#define MSR_VMX_INVEPT (1UL << 20)/* INVEPT */
#define MSR_VMX_INVEPT_SINGLE_CONTEXT (1UL << 25)/* INVEPT Single */
#define MSR_VMX_INVEPT_GLOBAL_CONTEXT (1UL << 26)/* INVEPT Global */
#define MSR_VMX_INVVPID (1UL << 32)/* INVVPID */
#define MSR_VMX_INVVPID_SINGLE_CONTEXT (1UL << 41)/* INVVPID Single */
#define MSR_VMX_INVVPID_GLOBAL_CONTEXT (1UL << 42)/* INVVPID Global */
/* EFER bits */
#define MSR_IA32_EFER_SCE_BIT (1<<0)
#define MSR_IA32_EFER_LME_BIT (1<<8) /* IA32e mode enable */
#define MSR_IA32_EFER_LMA_BIT (1<<10) /* IA32e mode active */
#define MSR_IA32_EFER_NXE_BIT (1<<11)
/* FEATURE CONTROL bits */
#define MSR_IA32_FEATURE_CONTROL_LOCK (1<<0)
#define MSR_IA32_FEATURE_CONTROL_VMX_SMX (1<<1)
#define MSR_IA32_FEATURE_CONTROL_VMX_NO_SMX (1<<2)
/* PAT memory type definitions */
#define PAT_MEM_TYPE_UC 0x00 /* uncached */
#define PAT_MEM_TYPE_WC 0x01 /* write combining */
#define PAT_MEM_TYPE_WT 0x04 /* write through */
#define PAT_MEM_TYPE_WP 0x05 /* write protected */
#define PAT_MEM_TYPE_WB 0x06 /* writeback */
#define PAT_MEM_TYPE_UCM 0x07 /* uncached minus */
/* MTRR memory type definitions */
#define MTRR_MEM_TYPE_UC 0x00 /* uncached */
#define MTRR_MEM_TYPE_WC 0x01 /* write combining */
#define MTRR_MEM_TYPE_WT 0x04 /* write through */
#define MTRR_MEM_TYPE_WP 0x05 /* write protected */
#define MTRR_MEM_TYPE_WB 0x06 /* writeback */
/* misc. MTRR flag definitions */
#define MTRR_ENABLE 0x800 /* MTRR enable */
#define MTRR_FIX_ENABLE 0x400 /* fixed range MTRR enable */
#define MTRR_VALID 0x800 /* MTRR setting is valid */
/* SPEC & PRED bit */
#define SPEC_ENABLE_IBRS (1<<0)
#define SPEC_ENABLE_STIBP (1<<1)
#define PRED_SET_IBPB (1<<0)
#endif /* MSR_H */

View File

@@ -0,0 +1,105 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MULTIBOOT_H
#define MULTIBOOT_H
#define MULTIBOOT_INFO_MAGIC 0x2BADB002
#define MULTIBOOT_INFO_HAS_CMDLINE 0x00000004
#define MULTIBOOT_INFO_HAS_MODS 0x00000008
struct multiboot_info {
uint32_t mi_flags;
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_MEMORY. */
uint32_t mi_mem_lower;
uint32_t mi_mem_upper;
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_BOOT_DEVICE. */
uint8_t mi_boot_device_part3;
uint8_t mi_boot_device_part2;
uint8_t mi_boot_device_part1;
uint8_t mi_boot_device_drive;
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_CMDLINE. */
uint32_t mi_cmdline;
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_MODS. */
uint32_t mi_mods_count;
uint32_t mi_mods_addr;
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_{AOUT,ELF}_SYMS. */
uint32_t mi_elfshdr_num;
uint32_t mi_elfshdr_size;
uint32_t mi_elfshdr_addr;
uint32_t mi_elfshdr_shndx;
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_MMAP. */
uint32_t mi_mmap_length;
uint32_t mi_mmap_addr;
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_DRIVES. */
uint32_t mi_drives_length;
uint32_t mi_drives_addr;
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_CONFIG_TABLE. */
uint32_t unused_mi_config_table;
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_LOADER_NAME. */
uint32_t mi_loader_name;
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_APM. */
uint32_t unused_mi_apm_table;
/* Valid if mi_flags sets MULTIBOOT_INFO_HAS_VBE. */
uint32_t unused_mi_vbe_control_info;
uint32_t unused_mi_vbe_mode_info;
uint32_t unused_mi_vbe_interface_seg;
uint32_t unused_mi_vbe_interface_off;
uint32_t unused_mi_vbe_interface_len;
};
struct multiboot_mmap {
uint32_t size;
uint64_t baseaddr;
uint64_t length;
uint32_t type;
} __packed;
struct multiboot_module {
uint32_t mm_mod_start;
uint32_t mm_mod_end;
uint32_t mm_string;
uint32_t mm_reserved;
};
int parse_hv_cmdline(void);
#endif

View File

@@ -0,0 +1,47 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SOFTIRQ_H
#define SOFTIRQ_H
#define SOFTIRQ_TIMER 0
#define SOFTIRQ_DEV_ASSIGN 1
#define SOFTIRQ_MAX 2
#define SOFTIRQ_MASK ((1UL<<SOFTIRQ_MAX)-1)
/* used for atomic value for prevent recursive */
#define SOFTIRQ_ATOMIC 63
void enable_softirq(int cpu_id);
void disable_softirq(int cpu_id);
void init_softirq(void);
void raise_softirq(int softirq_id);
void exec_softirq(void);
#endif /* SOFTIRQ_H */

View File

@@ -0,0 +1,47 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TIMER_H
#define TIMER_H
typedef int (*timer_handle_t)(uint64_t);
long add_timer(timer_handle_t func, uint64_t data, uint64_t deadline);
bool cancel_timer(long handle, int cpu_id);
long update_timer(long handle, timer_handle_t func, uint64_t data,
uint64_t deadline);
int timer_softirq(int cpu_id);
void timer_init(void);
void timer_cleanup(void);
void dump_timer_pool_info(int cpu_id);
void check_tsc(void);
#endif /* TIMER_H */

View File

@@ -0,0 +1,75 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef VMEXIT_H_
#define VMEXIT_H_
struct vm_exit_dispatch {
int (*handler)(struct vcpu *);
uint32_t need_exit_qualification;
};
struct vm_exit_dispatch *vmexit_handler(struct vcpu *vcpu);
int vmcall_handler(struct vcpu *vcpu);
int cpuid_handler(struct vcpu *vcpu);
int cr_access_handler(struct vcpu *vcpu);
int get_vmexit_profile(char *str, int str_max);
#define VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, MSB, LSB) \
(exit_qual & (((1UL << (MSB+1))-1) - ((1UL << (LSB))-1)))
/* MACROs to access Control-Register Info using exit qualification field */
#define VM_EXIT_CR_ACCESS_CR_NUM(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 3, 0) >> 0)
#define VM_EXIT_CR_ACCESS_ACCESS_TYPE(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 5, 4) >> 4)
#define VM_EXIT_CR_ACCESS_LMSW_OP(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 6, 6) >> 6)
#define VM_EXIT_CR_ACCESS_REG_IDX(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 11, 8) >> 8)
#define VM_EXIT_CR_ACCESS_LMSW_SRC_DATE(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 31, 16) >> 16)
/* MACROs to access IO Access Info using exit qualification field */
#define VM_EXIT_IO_INSTRUCTION_SIZE(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 2, 0) >> 0)
#define VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 3, 3) >> 3)
#define VM_EXIT_IO_INSTRUCTION_IS_STRING(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 4, 4) >> 4)
#define VM_EXIT_IO_INSTRUCTION_IS_REP_PREFIXED(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 5, 5) >> 5)
#define VM_EXIT_IO_INSTRUCTION_IS_OPERAND_ENCODING(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 6, 6) >> 6)
#define VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 31, 16) >> 16)
#endif /* VMEXIT_H_ */

View File

@@ -0,0 +1,433 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef VMX_H_
#define VMX_H_
/* 16-bit control fields */
#define VMX_VPID 0x00000000
/* 16-bit guest-state fields */
#define VMX_GUEST_ES_SEL 0x00000800
#define VMX_GUEST_CS_SEL 0x00000802
#define VMX_GUEST_SS_SEL 0x00000804
#define VMX_GUEST_DS_SEL 0x00000806
#define VMX_GUEST_FS_SEL 0x00000808
#define VMX_GUEST_GS_SEL 0x0000080a
#define VMX_GUEST_LDTR_SEL 0x0000080c
#define VMX_GUEST_TR_SEL 0x0000080e
#define VMX_GUEST_INTR_STATUS 0x00000810
/* 16-bit host-state fields */
#define VMX_HOST_ES_SEL 0x00000c00
#define VMX_HOST_CS_SEL 0x00000c02
#define VMX_HOST_SS_SEL 0x00000c04
#define VMX_HOST_DS_SEL 0x00000c06
#define VMX_HOST_FS_SEL 0x00000c08
#define VMX_HOST_GS_SEL 0x00000c0a
#define VMX_HOST_TR_SEL 0x00000c0c
/* 64-bit control fields */
#define VMX_IO_BITMAP_A_FULL 0x00002000
#define VMX_IO_BITMAP_A_HIGH 0x00002001
#define VMX_IO_BITMAP_B_FULL 0x00002002
#define VMX_IO_BITMAP_B_HIGH 0x00002003
#define VMX_MSR_BITMAP_FULL 0x00002004
#define VMX_MSR_BITMAP_HIGH 0x00002005
#define VMX_EXIT_MSR_STORE_ADDR_FULL 0x00002006
#define VMX_EXIT_MSR_STORE_ADDR_HIGH 0x00002007
#define VMX_EXIT_MSR_LOAD_ADDR_FULL 0x00002008
#define VMX_EXIT_MSR_LOAD_ADDR_HIGH 0x00002009
#define VMX_ENTRY_MSR_LOAD_ADDR_FULL 0x0000200a
#define VMX_ENTRY_MSR_LOAD_ADDR_HIGH 0x0000200b
#define VMX_EXECUTIVE_VMCS_PTR_FULL 0x0000200c
#define VMX_EXECUTIVE_VMCS_PTR_HIGH 0x0000200d
#define VMX_TSC_OFFSET_FULL 0x00002010
#define VMX_TSC_OFFSET_HIGH 0x00002011
#define VMX_VIRTUAL_APIC_PAGE_ADDR_FULL 0x00002012
#define VMX_VIRTUAL_APIC_PAGE_ADDR_HIGH 0x00002013
#define VMX_APIC_ACCESS_ADDR_FULL 0x00002014
#define VMX_APIC_ACCESS_ADDR_HIGH 0x00002015
#define VMX_EPT_POINTER_FULL 0x0000201A
#define VMX_EPT_POINTER_HIGH 0x0000201B
#define VMX_EOI_EXIT0_FULL 0x0000201C
#define VMX_EOI_EXIT0_HIGH 0x0000201D
#define VMX_EOI_EXIT1_FULL 0x0000201E
#define VMX_EOI_EXIT1_HIGH 0x0000201F
#define VMX_EOI_EXIT2_FULL 0x00002020
#define VMX_EOI_EXIT2_HIGH 0x00002021
#define VMX_EOI_EXIT3_FULL 0x00002022
#define VMX_EOI_EXIT3_HIGH 0x00002023
#define VMX_EOI_EXIT(vector) (VMX_EOI_EXIT0_FULL + ((vector) / 64) * 2)
/* 64-bit read-only data fields */
#define VMX_GUEST_PHYSICAL_ADDR_FULL 0x00002400
#define VMX_GUEST_PHYSICAL_ADDR_HIGH 0x00002401
/* 64-bit guest-state fields */
#define VMX_VMS_LINK_PTR_FULL 0x00002800
#define VMX_VMS_LINK_PTR_HIGH 0x00002801
#define VMX_GUEST_IA32_DEBUGCTL_FULL 0x00002802
#define VMX_GUEST_IA32_DEBUGCTL_HIGH 0x00002803
#define VMX_GUEST_IA32_PAT_FULL 0x00002804
#define VMX_GUEST_IA32_PAT_HIGH 0x00002805
#define VMX_GUEST_IA32_EFER_FULL 0x00002806
#define VMX_GUEST_IA32_EFER_HIGH 0x00002807
#define VMX_GUEST_IA32_PERF_CTL_FULL 0x00002808
#define VMX_GUEST_IA32_PERF_CTL_HIGH 0x00002809
#define VMX_GUEST_PDPTE0_FULL 0x0000280A
#define VMX_GUEST_PDPTE0_HIGH 0x0000280B
#define VMX_GUEST_PDPTE1_FULL 0x0000280C
#define VMX_GUEST_PDPTE1_HIGH 0x0000280D
#define VMX_GUEST_PDPTE2_FULL 0x0000280E
#define VMX_GUEST_PDPTE2_HIGH 0x0000280F
#define VMX_GUEST_PDPTE3_FULL 0x00002810
#define VMX_GUEST_PDPTE3_HIGH 0x00002811
/* 64-bit host-state fields */
#define VMX_HOST_IA32_PAT_FULL 0x00002C00
#define VMX_HOST_IA32_PAT_HIGH 0x00002C01
#define VMX_HOST_IA32_EFER_FULL 0x00002C02
#define VMX_HOST_IA32_EFER_HIGH 0x00002C03
#define VMX_HOST_IA32_PERF_CTL_FULL 0x00002C04
#define VMX_HOST_IA32_PERF_CTL_HIGH 0x00002C05
/* 32-bit control fields */
#define VMX_PIN_VM_EXEC_CONTROLS 0x00004000
#define VMX_PROC_VM_EXEC_CONTROLS 0x00004002
#define VMX_EXCEPTION_BITMAP 0x00004004
#define VMX_PF_EC_MASK 0x00004006
#define VMX_PF_EC_MATCH 0x00004008
#define VMX_CR3_TARGET_COUNT 0x0000400a
#define VMX_EXIT_CONTROLS 0x0000400c
#define VMX_EXIT_MSR_STORE_COUNT 0x0000400e
#define VMX_EXIT_MSR_LOAD_COUNT 0x00004010
#define VMX_ENTRY_CONTROLS 0x00004012
#define VMX_ENTRY_MSR_LOAD_COUNT 0x00004014
#define VMX_ENTRY_INT_INFO_FIELD 0x00004016
#define VMX_ENTRY_EXCEPTION_EC 0x00004018
#define VMX_ENTRY_INSTR_LENGTH 0x0000401a
#define VMX_TPR_THRESHOLD 0x0000401c
#define VMX_PROC_VM_EXEC_CONTROLS2 0x0000401E
#define VMX_PLE_GAP 0x00004020
#define VMX_PLE_WINDOW 0x00004022
/* 32-bit read-only data fields */
#define VMX_INSTR_ERROR 0x00004400
#define VMX_EXIT_REASON 0x00004402
#define VMX_EXIT_INT_INFO 0x00004404
#define VMX_EXIT_INT_EC 0x00004406
#define VMX_IDT_VEC_INFO_FIELD 0x00004408
#define VMX_IDT_VEC_EC 0x0000440a
#define VMX_EXIT_INSTR_LEN 0x0000440c
#define VMX_INSTR_INFO 0x0000440e
/* 32-bit guest-state fields */
#define VMX_GUEST_ES_LIMIT 0x00004800
#define VMX_GUEST_CS_LIMIT 0x00004802
#define VMX_GUEST_SS_LIMIT 0x00004804
#define VMX_GUEST_DS_LIMIT 0x00004806
#define VMX_GUEST_FS_LIMIT 0x00004808
#define VMX_GUEST_GS_LIMIT 0x0000480a
#define VMX_GUEST_LDTR_LIMIT 0x0000480c
#define VMX_GUEST_TR_LIMIT 0x0000480e
#define VMX_GUEST_GDTR_LIMIT 0x00004810
#define VMX_GUEST_IDTR_LIMIT 0x00004812
#define VMX_GUEST_ES_ATTR 0x00004814
#define VMX_GUEST_CS_ATTR 0x00004816
#define VMX_GUEST_SS_ATTR 0x00004818
#define VMX_GUEST_DS_ATTR 0x0000481a
#define VMX_GUEST_FS_ATTR 0x0000481c
#define VMX_GUEST_GS_ATTR 0x0000481e
#define VMX_GUEST_LDTR_ATTR 0x00004820
#define VMX_GUEST_TR_ATTR 0x00004822
#define VMX_GUEST_INTERRUPTIBILITY_INFO 0x00004824
#define VMX_GUEST_ACTIVITY_STATE 0x00004826
#define VMX_GUEST_SMBASE 0x00004828
#define VMX_GUEST_IA32_SYSENTER_CS 0x0000482a
#define VMX_GUEST_TIMER 0x0000482E
/* 32-bit host-state fields */
#define VMX_HOST_IA32_SYSENTER_CS 0x00004c00
/* natural-width control fields */
#define VMX_CR0_MASK 0x00006000
#define VMX_CR4_MASK 0x00006002
#define VMX_CR0_READ_SHADOW 0x00006004
#define VMX_CR4_READ_SHADOW 0x00006006
#define VMX_CR3_TARGET_0 0x00006008
#define VMX_CR3_TARGET_1 0x0000600a
#define VMX_CR3_TARGET_2 0x0000600c
#define VMX_CR3_TARGET_3 0x0000600e
/* natural-width read-only data fields */
#define VMX_EXIT_QUALIFICATION 0x00006400
#define VMX_IO_RCX 0x00006402
#define VMX_IO_RDI 0x00006406
#define VMX_GUEST_LINEAR_ADDR 0x0000640a
/* natural-width guest-state fields */
#define VMX_GUEST_CR0 0x00006800
#define VMX_GUEST_CR3 0x00006802
#define VMX_GUEST_CR4 0x00006804
#define VMX_GUEST_ES_BASE 0x00006806
#define VMX_GUEST_CS_BASE 0x00006808
#define VMX_GUEST_SS_BASE 0x0000680a
#define VMX_GUEST_DS_BASE 0x0000680c
#define VMX_GUEST_FS_BASE 0x0000680e
#define VMX_GUEST_GS_BASE 0x00006810
#define VMX_GUEST_LDTR_BASE 0x00006812
#define VMX_GUEST_TR_BASE 0x00006814
#define VMX_GUEST_GDTR_BASE 0x00006816
#define VMX_GUEST_IDTR_BASE 0x00006818
#define VMX_GUEST_DR7 0x0000681a
#define VMX_GUEST_RSP 0x0000681c
#define VMX_GUEST_RIP 0x0000681e
#define VMX_GUEST_RFLAGS 0x00006820
#define VMX_GUEST_PENDING_DEBUG_EXCEPT 0x00006822
#define VMX_GUEST_IA32_SYSENTER_ESP 0x00006824
#define VMX_GUEST_IA32_SYSENTER_EIP 0x00006826
/* natural-width host-state fields */
#define VMX_HOST_CR0 0x00006c00
#define VMX_HOST_CR3 0x00006c02
#define VMX_HOST_CR4 0x00006c04
#define VMX_HOST_FS_BASE 0x00006c06
#define VMX_HOST_GS_BASE 0x00006c08
#define VMX_HOST_TR_BASE 0x00006c0a
#define VMX_HOST_GDTR_BASE 0x00006c0c
#define VMX_HOST_IDTR_BASE 0x00006c0e
#define VMX_HOST_IA32_SYSENTER_ESP 0x00006c10
#define VMX_HOST_IA32_SYSENTER_EIP 0x00006c12
#define VMX_HOST_RSP 0x00006c14
#define VMX_HOST_RIP 0x00006c16
/*
* Basic VM exit reasons
*/
#define VMX_EXIT_REASON_EXCEPTION_OR_NMI 0x00000000
#define VMX_EXIT_REASON_EXTERNAL_INTERRUPT 0x00000001
#define VMX_EXIT_REASON_TRIPLE_FAULT 0x00000002
#define VMX_EXIT_REASON_INIT_SIGNAL 0x00000003
#define VMX_EXIT_REASON_STARTUP_IPI 0x00000004
#define VMX_EXIT_REASON_IO_SMI 0x00000005
#define VMX_EXIT_REASON_OTHER_SMI 0x00000006
#define VMX_EXIT_REASON_INTERRUPT_WINDOW 0x00000007
#define VMX_EXIT_REASON_NMI_WINDOW 0x00000008
#define VMX_EXIT_REASON_TASK_SWITCH 0x00000009
#define VMX_EXIT_REASON_CPUID 0x0000000A
#define VMX_EXIT_REASON_GETSEC 0x0000000B
#define VMX_EXIT_REASON_HLT 0x0000000C
#define VMX_EXIT_REASON_INVD 0x0000000D
#define VMX_EXIT_REASON_INVLPG 0x0000000E
#define VMX_EXIT_REASON_RDPMC 0x0000000F
#define VMX_EXIT_REASON_RDTSC 0x00000010
#define VMX_EXIT_REASON_RSM 0x00000011
#define VMX_EXIT_REASON_VMCALL 0x00000012
#define VMX_EXIT_REASON_VMCLEAR 0x00000013
#define VMX_EXIT_REASON_VMLAUNCH 0x00000014
#define VMX_EXIT_REASON_VMPTRLD 0x00000015
#define VMX_EXIT_REASON_VMPTRST 0x00000016
#define VMX_EXIT_REASON_VMREAD 0x00000017
#define VMX_EXIT_REASON_VMRESUME 0x00000018
#define VMX_EXIT_REASON_VMWRITE 0x00000019
#define VMX_EXIT_REASON_VMXOFF 0x0000001A
#define VMX_EXIT_REASON_VMXON 0x0000001B
#define VMX_EXIT_REASON_CR_ACCESS 0x0000001C
#define VMX_EXIT_REASON_DR_ACCESS 0x0000001D
#define VMX_EXIT_REASON_IO_INSTRUCTION 0x0000001E
#define VMX_EXIT_REASON_RDMSR 0x0000001F
#define VMX_EXIT_REASON_WRMSR 0x00000020
#define VMX_EXIT_REASON_ENTRY_FAILURE_INVALID_GUEST_STATE 0x00000021
#define VMX_EXIT_REASON_ENTRY_FAILURE_MSR_LOADING 0x00000022
/* entry 0x23 (35) is missing */
#define VMX_EXIT_REASON_MWAIT 0x00000024
#define VMX_EXIT_REASON_MONITOR_TRAP 0x00000025
/* entry 0x26 (38) is missing */
#define VMX_EXIT_REASON_MONITOR 0x00000027
#define VMX_EXIT_REASON_PAUSE 0x00000028
#define VMX_EXIT_REASON_ENTRY_FAILURE_MACHINE_CHECK 0x00000029
/* entry 0x2A (42) is missing */
#define VMX_EXIT_REASON_TPR_BELOW_THRESHOLD 0x0000002B
#define VMX_EXIT_REASON_APIC_ACCESS 0x0000002C
#define VMX_EXIT_REASON_VIRTUALIZED_EOI 0x0000002D
#define VMX_EXIT_REASON_GDTR_IDTR_ACCESS 0x0000002E
#define VMX_EXIT_REASON_LDTR_TR_ACCESS 0x0000002F
#define VMX_EXIT_REASON_EPT_VIOLATION 0x00000030
#define VMX_EXIT_REASON_EPT_MISCONFIGURATION 0x00000031
#define VMX_EXIT_REASON_INVEPT 0x00000032
#define VMX_EXIT_REASON_RDTSCP 0x00000033
#define VMX_EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED 0x00000034
#define VMX_EXIT_REASON_INVVPID 0x00000035
#define VMX_EXIT_REASON_WBINVD 0x00000036
#define VMX_EXIT_REASON_XSETBV 0x00000037
#define VMX_EXIT_REASON_APIC_WRITE 0x00000038
/* VMX execution control bits (pin based) */
#define VMX_PINBASED_CTLS_IRQ_EXIT (1<<0)
#define VMX_PINBASED_CTLS_NMI_EXIT (1<<3)
#define VMX_PINBASED_CTLS_VIRT_NMI (1<<5)
#define VMX_PINBASED_CTLS_ENABLE_PTMR (1<<6)
#define VMX_PINBASED_CTLS_POST_IRQ (1<<7)
/* VMX execution control bits (processor based) */
#define VMX_PROCBASED_CTLS_IRQ_WIN (1<<2)
#define VMX_PROCBASED_CTLS_TSC_OFF (1<<3)
#define VMX_PROCBASED_CTLS_HLT (1<<7)
#define VMX_PROCBASED_CTLS_INVLPG (1<<9)
#define VMX_PROCBASED_CTLS_MWAIT (1<<10)
#define VMX_PROCBASED_CTLS_RDPMC (1<<11)
#define VMX_PROCBASED_CTLS_RDTSC (1<<12)
#define VMX_PROCBASED_CTLS_CR3_LOAD (1<<15)
#define VMX_PROCBASED_CTLS_CR3_STORE (1<<16)
#define VMX_PROCBASED_CTLS_CR8_LOAD (1<<19)
#define VMX_PROCBASED_CTLS_CR8_STORE (1<<20)
#define VMX_PROCBASED_CTLS_TPR_SHADOW (1<<21)
#define VMX_PROCBASED_CTLS_NMI_WINEXIT (1<<22)
#define VMX_PROCBASED_CTLS_MOV_DR (1<<23)
#define VMX_PROCBASED_CTLS_UNCOND_IO (1<<24)
#define VMX_PROCBASED_CTLS_IO_BITMAP (1<<25)
#define VMX_PROCBASED_CTLS_MON_TRAP (1<<27)
#define VMX_PROCBASED_CTLS_MSR_BITMAP (1<<28)
#define VMX_PROCBASED_CTLS_MONITOR (1<<29)
#define VMX_PROCBASED_CTLS_PAUSE (1<<30)
#define VMX_PROCBASED_CTLS_SECONDARY (1<<31)
#define VMX_PROCBASED_CTLS2_VAPIC (1<<0)
#define VMX_PROCBASED_CTLS2_EPT (1<<1)
#define VMX_PROCBASED_CTLS2_DESC_TABLE (1<<2)
#define VMX_PROCBASED_CTLS2_RDTSCP (1<<3)
#define VMX_PROCBASED_CTLS2_VX2APIC (1<<4)
#define VMX_PROCBASED_CTLS2_VPID (1<<5)
#define VMX_PROCBASED_CTLS2_WBINVD (1<<6)
#define VMX_PROCBASED_CTLS2_UNRESTRICT (1<<7)
#define VMX_PROCBASED_CTLS2_VAPIC_REGS (1<<8)
#define VMX_PROCBASED_CTLS2_VIRQ (1<<9)
#define VMX_PROCBASED_CTLS2_PAUSE_LOOP (1<<10)
#define VMX_PROCBASED_CTLS2_RDRAND (1<<11)
#define VMX_PROCBASED_CTLS2_INVPCID (1<<12)
#define VMX_PROCBASED_CTLS2_VM_FUNCS (1<<13)
#define VMX_PROCBASED_CTLS2_VMCS_SHADW (1<<14)
#define VMX_PROCBASED_CTLS2_RDSEED (1<<16)
#define VMX_PROCBASED_CTLS2_EPT_VE (1<<18)
#define VMX_PROCBASED_CTLS2_XSVE_XRSTR (1<<20)
/* VMX exit control bits */
#define VMX_EXIT_CTLS_SAVE_DBG (1<<2)
#define VMX_EXIT_CTLS_HOST_ADDR64 (1<<9)
#define VMX_EXIT_CTLS_LOAD_PERF (1<<12)
#define VMX_EXIT_CTLS_ACK_IRQ (1<<15)
#define VMX_EXIT_CTLS_SAVE_PAT (1<<18)
#define VMX_EXIT_CTLS_LOAD_PAT (1<<19)
#define VMX_EXIT_CTLS_SAVE_EFER (1<<20)
#define VMX_EXIT_CTLS_LOAD_EFER (1<<21)
#define VMX_EXIT_CTLS_SAVE_PTMR (1<<22)
/* VMX entry control bits */
#define VMX_ENTRY_CTLS_LOAD_DBG (1<<2)
#define VMX_ENTRY_CTLS_IA32E_MODE (1<<9)
#define VMX_ENTRY_CTLS_ENTRY_SMM (1<<10)
#define VMX_ENTRY_CTLS_DEACT_DUAL (1<<11)
#define VMX_ENTRY_CTLS_LOAD_PERF (1<<13)
#define VMX_ENTRY_CTLS_LOAD_PAT (1<<14)
#define VMX_ENTRY_CTLS_LOAD_EFER (1<<15)
/* VMX entry/exit Interrupt info */
#define VMX_INT_INFO_ERR_CODE_VALID (1<<11)
#define VMX_INT_INFO_VALID (1<<31)
#define VMX_INT_TYPE_EXT_INT 0
#define VMX_INT_TYPE_NMI 2
#define VMX_INT_TYPE_HW_EXP 3
#define VMX_INT_TYPE_SW_EXP 6
#define VM_SUCCESS 0
#define VM_FAIL -1
#define VMX_VMENTRY_FAIL 0x80000000
#ifndef ASSEMBLER
#define RFLAGS_C (1<<0)
#define RFLAGS_Z (1<<6)
/*
* Handling of CR0:
*
* - PE (0) Must always be 1. Attempt to write to it must lead to a VM exit.
* - MP (1) coprocessor related => no action needed
* - EM (2) coprocessor related => no action needed
* - TS (3) no action needed
* - ET (4) typically hardcoded to 1. => no action needed
* - NE (5) coprocessor related => no action needed
* - WP (16) inhibits supervisor level procedures to write into ro-pages
* => no action needed
* - AM (18) alignment mask => no action needed
* - NW (29) not write through => no action
* - CD (30) cache disable => no action
* - PG (31) paging => must always be 1. Attempt to write to it must lead to
* a VM exit.
*/
/* we must guard protected mode and paging */
#define CR0_GUEST_HOST_MASK (CR0_PE | CR0_PG | CR0_WP)
/* initially, the guest runs in protected mode enabled, but with no paging */
#define CR0_READ_SHADOW CR0_PE
/*
* Handling of CR4:
*
* - VME (0) must always be 0 => must lead to a VM exit
* - PVI (1) must always be 0 => must lead to a VM exit
* - TSD (2) don't care
* - DE (3) don't care
* - PSE (4) must always be 1 => must lead to a VM exit
* - PAE (5) must always be 0 => must lead to a VM exit
* - MCE (6) don't care
* - PGE (7) => important for TLB flush
* - PCE (8) don't care
* - OSFXSR (9) don't care
* - OSXMMEXCPT (10) don't care
* - VMXE (13) must always be 1 => must lead to a VM exit
* - SMXE (14) must always be 0 => must lead to a VM exit
* - PCIDE (17) => important for TLB flush
* - OSXSAVE (18) don't care
*/
#define CR4_GUEST_HOST_MASK (CR4_VME | CR4_PVI | CR4_PSE | CR4_PAE | \
CR4_VMXE | CR4_SMXE | CR4_PGE | CR4_PCIDE)
#define CR4_READ_SHADOW (CR4_PGE | CR4_PSE)
/* VCPU config definitions */
#define REAL_MODE 1
#define PAGE_PROTECTED_MODE 2
/* External Interfaces */
int check_vmx_support(void);
int exec_vmxon_instr(void);
uint64_t exec_vmread(uint32_t field);
uint64_t exec_vmread64(uint32_t field_full);
void exec_vmwrite(uint32_t field, uint64_t value);
void exec_vmwrite64(uint32_t field_full, uint64_t value);
int init_vmcs(struct vcpu *vcpu);
int exec_vmclear(void *addr);
int exec_vmptrld(void *addr);
static inline uint8_t get_vcpu_mode(struct vcpu *vcpu)
{
return vcpu->arch_vcpu.cpu_mode;
}
#endif /* ASSEMBLER */
#endif /* VMX_H_ */

View File

@@ -0,0 +1,258 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef VTD_H
#define VTD_H
/*
* Intel IOMMU register specification per version 1.0 public spec.
*/
#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
#define DMAR_GCMD_REG 0x18 /* Global command register */
#define DMAR_GSTS_REG 0x1c /* Global status register */
#define DMAR_RTADDR_REG 0x20 /* Root entry table */
#define DMAR_CCMD_REG 0x28 /* Context command reg */
#define DMAR_FSTS_REG 0x34 /* Fault Status register */
#define DMAR_FECTL_REG 0x38 /* Fault control register */
#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
#define DMAR_VER_MINOR(v) ((v) & 0x0f)
/*
* Decoding Capability Register
*/
#define iommu_cap_pi(c) (((c) >> 59) & 1)
#define iommu_cap_read_drain(c) (((c) >> 55) & 1)
#define iommu_cap_write_drain(c) (((c) >> 54) & 1)
#define iommu_cap_max_amask_val(c) (((c) >> 48) & 0x3f)
#define iommu_cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
#define iommu_cap_pgsel_inv(c) (((c) >> 39) & 1)
#define iommu_cap_super_page_val(c) (((c) >> 34) & 0xf)
#define iommu_cap_super_offset(c) \
(((find_first_bit(&iommu_cap_super_page_val(c), 4)) \
* OFFSET_STRIDE) + 21)
#define iommu_cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
#define iommu_cap_max_fault_reg_offset(c) \
(iommu_cap_fault_reg_offset(c) + iommu_cap_num_fault_regs(c) * 16)
#define iommu_cap_zlr(c) (((c) >> 22) & 1)
#define iommu_cap_isoch(c) (((c) >> 23) & 1)
#define iommu_cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
#define iommu_cap_sagaw(c) (((c) >> 8) & 0x1f)
#define iommu_cap_caching_mode(c) (((c) >> 7) & 1)
#define iommu_cap_phmr(c) (((c) >> 6) & 1)
#define iommu_cap_plmr(c) (((c) >> 5) & 1)
#define iommu_cap_rwbf(c) (((c) >> 4) & 1)
#define iommu_cap_afl(c) (((c) >> 3) & 1)
#define iommu_cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
/*
* Decoding Extended Capability Register
*/
#define iommu_ecap_c(c) (((c) >> 0) & 1)
#define iommu_ecap_qi(c) (((c) >> 1) & 1)
#define iommu_ecap_dt(c) (((c) >> 2) & 1)
#define iommu_ecap_ir(c) (((c) >> 3) & 1)
#define iommu_ecap_eim(c) (((c) >> 4) & 1)
#define iommu_ecap_pt(c) (((c) >> 6) & 1)
#define iommu_ecap_sc(c) (((c) >> 7) & 1)
#define iommu_ecap_iro(c) (((c) >> 8) & 0x3ff)
#define iommu_ecap_mhmv(c) (((c) >> 20) & 0xf)
#define iommu_ecap_ecs(c) (((c) >> 24) & 1)
#define iommu_ecap_mts(c) (((c) >> 25) & 1)
#define iommu_ecap_nest(c) (((c) >> 26) & 1)
#define iommu_ecap_dis(c) (((c) >> 27) & 1)
#define iommu_ecap_prs(c) (((c) >> 29) & 1)
#define iommu_ecap_ers(c) (((c) >> 30) & 1)
#define iommu_ecap_srs(c) (((c) >> 31) & 1)
#define iommu_ecap_nwfs(c) (((c) >> 33) & 1)
#define iommu_ecap_eafs(c) (((c) >> 34) & 1)
#define iommu_ecap_pss(c) (((c) >> 35) & 0x1f)
#define iommu_ecap_pasid(c) (((c) >> 40) & 1)
#define iommu_ecap_dit(c) (((c) >> 41) & 1)
#define iommu_ecap_pds(c) (((c) >> 42) & 1)
/* PMEN_REG */
#define DMA_PMEN_EPM (((uint32_t)1)<<31)
#define DMA_PMEN_PRS (((uint32_t)1)<<0)
/* GCMD_REG */
#define DMA_GCMD_TE (((uint32_t)1) << 31)
#define DMA_GCMD_SRTP (((uint32_t)1) << 30)
#define DMA_GCMD_SFL (((uint32_t)1) << 29)
#define DMA_GCMD_EAFL (((uint32_t)1) << 28)
#define DMA_GCMD_WBF (((uint32_t)1) << 27)
#define DMA_GCMD_QIE (((uint32_t)1) << 26)
#define DMA_GCMD_SIRTP (((uint32_t)1) << 24)
#define DMA_GCMD_IRE (((uint32_t) 1) << 25)
#define DMA_GCMD_CFI (((uint32_t) 1) << 23)
/* GSTS_REG */
#define DMA_GSTS_TES (((uint32_t)1) << 31)
#define DMA_GSTS_RTPS (((uint32_t)1) << 30)
#define DMA_GSTS_FLS (((uint32_t)1) << 29)
#define DMA_GSTS_AFLS (((uint32_t)1) << 28)
#define DMA_GSTS_WBFS (((uint32_t)1) << 27)
#define DMA_GSTS_QIES (((uint32_t)1) << 26)
#define DMA_GSTS_IRTPS (((uint32_t)1) << 24)
#define DMA_GSTS_IRES (((uint32_t)1) << 25)
#define DMA_GSTS_CFIS (((uint32_t)1) << 23)
/* CCMD_REG */
#define DMA_CCMD_ICC (((uint64_t)1) << 63)
#define DMA_CCMD_ICC_32 (((uint32_t)1) << 31)
#define DMA_CCMD_GLOBAL_INVL (((uint64_t)1) << 61)
#define DMA_CCMD_DOMAIN_INVL (((uint64_t)2) << 61)
#define DMA_CCMD_DEVICE_INVL (((uint64_t)3) << 61)
#define DMA_CCMD_FM(m) (((uint64_t)((m) & 0x3)) << 32)
#define DMA_CCMD_MASK_NOBIT 0
#define DMA_CCMD_MASK_1BIT 1
#define DMA_CCMD_MASK_2BIT 2
#define DMA_CCMD_MASK_3BIT 3
#define DMA_CCMD_SID(s) (((uint64_t)((s) & 0xffff)) << 16)
#define DMA_CCMD_DID(d) ((uint64_t)((d) & 0xffff))
#define DMA_CCMD_GET_CAIG_32(v) (((uint32_t)(v) >> 27) & 0x3)
/* IOTLB_REG */
#define DMA_IOTLB_IVT (((uint64_t)1) << 63)
#define DMA_IOTLB_IVT_32 (((uint32_t)1) << 31)
#define DMA_IOTLB_GLOBAL_INVL (((uint64_t)1) << 60)
#define DMA_IOTLB_DOMAIN_INVL (((uint64_t)2) << 60)
#define DMA_IOTLB_PAGE_INVL (((uint64_t)3) << 60)
#define DMA_IOTLB_DR (((uint64_t)1) << 49)
#define DMA_IOTLB_DW (((uint64_t)1) << 48)
#define DMA_IOTLB_DID(d) \
(((uint64_t)((d) & 0xffff)) << 32)
#define DMA_IOTLB_GET_IAIG_32(v) (((uint32_t)(v) >> 25) & 0x3)
/* INVALIDATE_ADDRESS_REG */
#define DMA_IOTLB_INVL_ADDR_AM(m) ((uint64_t)((m) & 0x3f))
#define DMA_IOTLB_INVL_ADDR_IH_UNMODIFIED (((uint64_t)1) << 6)
/* FECTL_REG */
#define DMA_FECTL_IM (((uint32_t)1) << 31)
/* FSTS_REG */
#define DMA_FSTS_PFO(s) (((s) >> 0) & 1)
#define DMA_FSTS_PPF(s) (((s) >> 1) & 1)
#define DMA_FSTS_AFO(s) (((s) >> 2) & 1)
#define DMA_FSTS_APF(s) (((s) >> 3) & 1)
#define DMA_FSTS_IQE(s) (((s) >> 4) & 1)
#define DMA_FSTS_ICE(s) (((s) >> 5) & 1)
#define DMA_FSTS_ITE(s) (((s) >> 6) & 1)
#define DMA_FSTS_PRO(s) (((s) >> 7) & 1)
#define DMA_FSTS_FRI(s) (((s) >> 8) & 0xFF)
/* FRCD_REGs: upper 64 bits*/
#define DMA_FRCD_UP_F(r) (((r) >> 63) & 1)
#define DMA_FRCD_UP_T(r) (((r) >> 62) & 1)
#define DMA_FRCD_UP_AT(r) (((r) >> 60) & 3)
#define DMA_FRCD_UP_PASID(r) (((r) >> 40) & 0xfffff)
#define DMA_FRCD_UP_FR(r) (((r) >> 32) & 0xff)
#define DMA_FRCD_UP_PP(r) (((r) >> 31) & 1)
#define DMA_FRCD_UP_EXE(r) (((r) >> 30) & 1)
#define DMA_FRCD_UP_PRIV(r) (((r) >> 29) & 1)
#define DMA_FRCD_UP_SID(r) (((r) >> 0) & 0xffff)
#define DMAR_CONTEXT_TRANSLATION_TYPE_TRANSLATED 0x00
#define DMAR_CONTEXT_TRANSLATION_TYPE_RESERVED 0x01
#define DMAR_CONTEXT_TRANSLATION_TYPE_PASSED_THROUGH 0x02
#define DRHD_FLAG_INCLUDE_PCI_ALL_MASK (1)
#define DEVFUN(dev, fun) (((dev & 0x1F) << 3) | ((fun & 0x7)))
struct dmar_dev_scope {
uint8_t bus;
uint8_t devfun;
};
struct dmar_drhd {
uint32_t dev_cnt;
uint16_t segment;
uint8_t flags;
bool ignore;
uint64_t reg_base_addr;
/* assume no pci device hotplug support */
struct dmar_dev_scope *devices;
};
struct dmar_info {
uint32_t drhd_count;
struct dmar_drhd *drhd_units;
};
extern struct dmar_info *get_dmar_info(void);
struct iommu_domain;
/* Assign a device specified by bus & devfun to a iommu domain */
int assign_iommu_device(struct iommu_domain *domain,
uint8_t bus, uint8_t devfun);
/* Unassign a device specified by bus & devfun to a iommu domain */
int unassign_iommu_device(struct iommu_domain *domain,
uint8_t bus, uint8_t devfun);
/* Create a iommu domain for a VM specified by vm_id */
struct iommu_domain *create_iommu_domain(int vm_id,
void *translation_table, int addr_width);
/* Destroy the iommu domain */
int destroy_iommu_domain(struct iommu_domain *domain);
/* Enable translation of iommu*/
void enable_iommu(void);
/* Disable translation of iommu*/
void disable_iommu(void);
/* iommu initialization */
int init_iommu(void);
#endif

View File

@@ -0,0 +1,62 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef UEFI_H
#define UEFI_H
typedef struct {
uint16_t limit;
uint64_t *base;
} __attribute__((packed)) dt_addr_t;
struct efi_ctx {
void* entry;
void* handle;
void* table;
dt_addr_t gdt;
dt_addr_t idt;
uint16_t tr_sel;
uint16_t ldt_sel;
uint64_t cr0;
uint64_t cr3;
uint64_t cr4;
uint64_t rflags;
uint16_t cs_sel;
uint32_t cs_ar;
uint16_t es_sel;
uint16_t ss_sel;
uint16_t ds_sel;
uint16_t fs_sel;
uint16_t gs_sel;
uint64_t rsp;
uint64_t efer;
}__attribute__((packed));
#endif /* UEFI_H*/

View File

@@ -0,0 +1,361 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file hypercall.h
*
* @brief public APIs for hypercall
*/
#ifndef HYPERCALL_H
#define HYPERCALL_H
struct vhm_request;
int acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req);
int acrn_insert_request_nowait(struct vcpu *vcpu, struct vhm_request *req);
int get_req_info(char *str, int str_max);
int acrn_vpic_inject_irq(struct vm *vm, int irq, enum irq_mode mode);
/**
* @brief Hypercall
*
* @addtogroup acrn_hypercall ACRN Hypercall
* @{
*/
/**
* @brief Get hypervisor api version
*
* The function only return api version information when VM is VM0.
*
* @param VM Pointer to VM data structure
* @param param guest physical memory address. The api version returned
* will be copied to this gpa
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_get_api_version(struct vm *vm, uint64_t param);
/**
* @brief create virtual machine
*
* Create a virtual machine based on parameter, currently there is no
* limitation for calling times of this function, will add MAX_VM_NUM
* support later.
*
* @param VM Pointer to VM data structure
* @param param guest physical memory address. This gpa points to
* struct acrn_create_vm
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_create_vm(struct vm *vm, uint64_t param);
/**
* @brief destroy virtual machine
*
* Destroy a virtual machine, it will pause target VM then shutdown it.
* The function will return -1 if the target VM does not exist.
*
* @param vmid ID of the VM
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_destroy_vm(uint64_t vmid);
/**
* @brief resume virtual machine
*
* Resume a virtual machine, it will schedule target VM's vcpu to run.
* The function will return -1 if the target VM does not exist or the
* IOReq buffer page for the VM is not ready.
*
* @param vmid ID of the VM
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_resume_vm(uint64_t vmid);
/**
* @brief pause virtual machine
*
* Pause a virtual machine, if the VM is already paused, the function
* will return 0 directly for success.
* The function will return -1 if the target VM does not exist.
*
* @param vmid ID of the VM
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_pause_vm(uint64_t vmid);
/**
* @brief create vcpu
*
* Create a vcpu based on parameter for a VM, it will allocate vcpu from
* freed physical cpus, if there is no available pcpu, the function will
* return -1.
*
* @param VM Pointer to VM data structure
* @param vmid ID of the VM
* @param param guest physical addressx. This gpa points to
* struct acrn_create_vcpu
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_create_vcpu(struct vm *vm, uint64_t vmid, uint64_t param);
/**
* @brief assert IRQ line
*
* Assert a virtual IRQ line for a VM, which could be from ISA or IOAPIC,
* normally it will active a level IRQ.
* The function will return -1 if the target VM does not exist.
*
* @param VM Pointer to VM data structure
* @param vmid ID of the VM
* @param param guest physical address. This gpa points to struct acrn_irqline
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_assert_irqline(struct vm *vm, uint64_t vmid, uint64_t param);
/**
* @brief deassert IRQ line
*
* Deassert a virtual IRQ line for a VM, which could be from ISA or IOAPIC,
* normally it will deactive a level IRQ.
* The function will return -1 if the target VM does not exist.
*
* @param VM Pointer to VM data structure
* @param vmid ID of the VM
* @param param guest physical address. This gpa points to struct acrn_irqline
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_deassert_irqline(struct vm *vm, uint64_t vmid, uint64_t param);
/**
* @brief trigger a pulse on IRQ line
*
* Trigger a pulse on a virtual IRQ line for a VM, which could be from ISA
* or IOAPIC, normally it triggers an edge IRQ.
* The function will return -1 if the target VM does not exist.
*
* @param VM Pointer to VM data structure
* @param vmid ID of the VM
* @param param guest physical address. This gpa points to struct acrn_irqline
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_pulse_irqline(struct vm *vm, uint64_t vmid, uint64_t param);
/**
* @brief inject MSI interrupt
*
* Inject a MSI interrupt for a VM.
* The function will return -1 if the target VM does not exist.
*
* @param VM Pointer to VM data structure
* @param vmid ID of the VM
* @param param guest physical address. This gpa points to struct acrn_msi_entry
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_inject_msi(struct vm *vm, uint64_t vmid, uint64_t param);
/**
* @brief set ioreq shared buffer
*
* Set the ioreq share buffer for a VM.
* The function will return -1 if the target VM does not exist.
*
* @param VM Pointer to VM data structure
* @param vmid ID of the VM
* @param param guest physical address. This gpa points to
* struct acrn_set_ioreq_buffer
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_set_ioreq_buffer(struct vm *vm, uint64_t vmid, uint64_t param);
/**
* @brief notify request done
*
* Notify the requestor VCPU for the completion of an ioreq.
* The function will return -1 if the target VM does not exist.
*
* @param vmid ID of the VM
* @param param vcpu ID of the requestor
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_notify_req_finish(uint64_t vmid, uint64_t param);
/**
* @brief setup ept memory mapping
*
* Set the ept memory mapping for a VM.
* The function will return -1 if the target VM does not exist.
*
* @param VM Pointer to VM data structure
* @param vmid ID of the VM
* @param param guest physical address. This gpa points to
* struct vm_set_memmap
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_set_vm_memmap(struct vm *vm, uint64_t vmid, uint64_t param);
/**
* @brief remap PCI MSI interrupt
*
* Remap a PCI MSI interrupt from a VM's virtual vector to native vector.
* The function will return -1 if the target VM does not exist.
*
* @param VM Pointer to VM data structure
* @param vmid ID of the VM
* @param param guest physical address. This gpa points to
* struct acrn_vm_pci_msix_remap
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_remap_pci_msix(struct vm *vm, uint64_t vmid, uint64_t param);
/**
* @brief translate guest physical address ot host physical address
*
* Translate guest physical address to host physical address for a VM.
* The function will return -1 if the target VM does not exist.
*
* @param VM Pointer to VM data structure
* @param vmid ID of the VM
* @param param guest physical address. This gpa points to struct vm_gpa2hpa
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_gpa_to_hpa(struct vm *vm, uint64_t vmid, uint64_t param);
/**
* @brief Assign one passthrough dev to VM.
*
* @param VM Pointer to VM data structure
* @param vmid ID of the VM
* @param param guest physical address. This gpa points to
* physical BDF of the assigning ptdev
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_assign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param);
/**
* @brief Deassign one passthrough dev from VM.
*
* @param VM Pointer to VM data structure
* @param vmid ID of the VM
* @param param guest physical address. This gpa points to
* physical BDF of the deassigning ptdev
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_deassign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param);
/**
* @brief Set interrupt mapping info of ptdev.
*
* @param VM Pointer to VM data structure
* @param vmid ID of the VM
* @param param guest physical address. This gpa points to data structure of
* hc_ptdev_irq including intr remapping info
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_set_ptdev_intr_info(struct vm *vm, uint64_t vmid, uint64_t param);
/**
* @brief Clear interrupt mapping info of ptdev.
*
* @param VM Pointer to VM data structure
* @param vmid ID of the VM
* @param param guest physical address. This gpa points to data structure of
* hc_ptdev_irq including intr remapping info
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_reset_ptdev_intr_info(struct vm *vm, uint64_t vmid,
uint64_t param);
/**
* @brief Setup a share buffer for a VM.
*
* @param VM Pointer to VM data structure
* @param param guest physical address. This gpa points to
* struct sbuf_setup_param
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_setup_sbuf(struct vm *vm, uint64_t param);
/**
* @}
*/
static inline int check_result(int found)
{
return found ? 0 : -1;
}
#define copy_from_vm(vm, ptr, gpa) ({ \
int found = 0; \
typeof(*(ptr)) *h_ptr = (ptr); \
typeof(*(ptr)) *g_ptr = \
(void *)gpa2hpa_check(vm, gpa, \
sizeof(*h_ptr), &found, true); \
if (found) { \
*h_ptr = *g_ptr; \
} \
check_result(found); \
})
#define copy_to_vm(vm, ptr, gpa) ({ \
int found = 0; \
typeof(*(ptr)) *h_ptr = (ptr); \
typeof(*(ptr)) *g_ptr = \
(void *)gpa2hpa_check(vm, gpa, \
sizeof(*h_ptr), &found, true); \
if (found) { \
*g_ptr = *h_ptr; \
} \
check_result(found); \
})
#endif /* HYPERCALL_H*/

View File

@@ -0,0 +1,55 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _HV_CORE_SCHEDULE_
#define _HV_CORE_SCHEDULE_
#define NEED_RESCHEDULED (1)
void init_scheduler(void);
void get_schedule_lock(int pcpu_id);
void release_schedule_lock(int pcpu_id);
void set_pcpu_used(int pcpu_id);
int allocate_pcpu(void);
void free_pcpu(int pcpu_id);
void add_vcpu_to_runqueue(struct vcpu *vcpu);
void remove_vcpu_from_runqueue(struct vcpu *vcpu);
void default_idle(void);
void make_reschedule_request(struct vcpu *vcpu);
int need_rescheduled(int pcpu_id);
void schedule(void);
void vcpu_thread(struct vcpu *vcpu);
#endif

View File

@@ -0,0 +1,54 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ASSERT_H
#define ASSERT_H
#ifdef HV_DEBUG
void __assert(uint32_t line, const char *file, char *txt);
#define ASSERT(x, ...) \
if (!(x)) {\
pr_fatal(__VA_ARGS__);\
__assert(__LINE__, __FILE__, "fatal error");\
}
#else
#define ASSERT(x, ...) \
if (!(x)) { \
do { \
asm volatile ("pause" ::: "memory"); \
} while (1); \
}
#endif
/* Force a compilation error if condition is false */
#define STATIC_ASSERT(condition) ((void)sizeof(char[(condition) ? 1 : -1]))
#endif /* ASSERT_H */

View File

@@ -0,0 +1,115 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CONSOLE_H
#define CONSOLE_H
#ifdef HV_DEBUG
/** Initializes the console module.
*
* @param cdev A pointer to the character device to use for the console.
*
* @return '0' on success. Any other value indicates an error.
*/
int console_init(void);
/** Writes a NUL terminated string to the console.
*
* @param str A pointer to the NUL terminated string to write.
*
* @return The number of characters written or -1 if an error occurred
* and no character was written.
*/
int console_puts(const char *str);
/** Writes a given number of characters to the console.
*
* @param str A pointer to character array to write.
* @param len The number of characters to write.
*
* @return The number of characters written or -1 if an error occurred
* and no character was written.
*/
int console_write(const char *str, size_t len);
/** Writes a single character to the console.
*
* @param ch The character to write.
*
* @preturn The number of characters written or -1 if an error
* occurred before any character was written.
*/
int console_putc(int ch);
/** Dumps an array to the console.
*
* This function dumps an array of bytes to the console
* in a hexadecimal format.
*
* @param p A pointer to the byte array to dump.
* @param len The number of bytes to dump.
*/
void console_dump_bytes(const void *p, unsigned int len);
void console_setup_timer(void);
uint32_t get_serial_handle(void);
#else
static inline int console_init(void)
{
return 0;
}
static inline int console_puts(__unused const char *str)
{
return 0;
}
static inline int console_write(__unused const char *str,
__unused size_t len)
{
return 0;
}
static inline int console_putc(__unused int ch)
{
return 0;
}
static inline void console_dump_bytes(__unused const void *p,
__unused unsigned int len)
{
}
static inline void console_setup_timer(void) {}
static inline uint32_t get_serial_handle(void) { return 0; }
#endif
#endif /* CONSOLE_H */

View File

@@ -0,0 +1,55 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef DUMP_H
#define DUMP_H
struct intr_ctx;
#ifdef HV_DEBUG
#define CALL_TRACE_HIERARCHY_MAX 20
#define DUMP_STACK_SIZE 0x200
void dump_exception(struct intr_ctx *ctx, uint32_t cpu_id);
void dump_interrupt(struct intr_ctx *ctx);
#else
static inline void dump_exception(__unused struct intr_ctx *ctx,
__unused uint32_t cpu_id)
{
}
static inline void dump_interrupt(__unused struct intr_ctx *ctx)
{
}
#endif
#endif /* DUMP_H */

View File

@@ -0,0 +1,109 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef LOGMSG_H
#define LOGMSG_H
/* Logging severity levels */
#define LOG_FATAL 1
#define LOG_ERROR 2
#define LOG_WARNING 3
#define LOG_INFO 4
#define LOG_DEBUG 5
/* Logging flags */
#define LOG_FLAG_STDOUT 0x00000001
#define LOG_FLAG_MEMORY 0x00000002
#if defined(HV_DEBUG)
extern uint32_t console_loglevel;
extern uint32_t mem_loglevel;
void init_logmsg(uint32_t mem_size, uint32_t flags);
void print_logmsg_buffer(uint32_t cpu_id);
void do_logmsg(uint32_t severity, const char *fmt, ...);
#else /* HV_DEBUG */
static inline void init_logmsg(__unused uint32_t mem_size,
__unused uint32_t flags)
{
}
static inline void do_logmsg(__unused uint32_t severity,
__unused const char *fmt, ...)
{
}
static inline void print_logmsg_buffer(__unused uint32_t cpu_id)
{
}
#endif /* HV_DEBUG */
#ifndef pr_fmt
#define pr_fmt(fmt) fmt
#endif
#define pr_fatal(fmt, ...) \
do { \
do_logmsg(LOG_FATAL, pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#define pr_err(fmt, ...) \
do { \
do_logmsg(LOG_ERROR, pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#define pr_warn(fmt, ...) \
do { \
do_logmsg(LOG_WARNING, pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#define pr_info(fmt, ...) \
do { \
do_logmsg(LOG_INFO, pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#define pr_dbg(fmt, ...) \
do { \
do_logmsg(LOG_DEBUG, pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#define dev_dbg(lvl, fmt, ...) \
do { \
do_logmsg(lvl, pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#define panic(...) \
do { pr_fatal("Instruction Decode PANIC: " __VA_ARGS__); \
while (1) { asm volatile ("pause" ::: "memory"); }; } while (0)
#endif /* LOGMSG_H */

View File

@@ -0,0 +1,88 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PRINTF_H
#define PRINTF_H
/** The well known printf() function.
*
* Formats a string and writes it to the console output.
*
* @param fmt A pointer to the NUL terminated format string.
*
* @return The number of characters actually written or a negative
* number if an error occurred.
*/
int printf(const char *fmt, ...);
/** The well known vprintf() function.
*
* Formats a string and writes it to the console output.
*
* @param fmt A pointer to the NUL terminated format string.
* @param args The variable long argument list as va_list.
* @return The number of characters actually written or a negative
* number if an error occurred.
*/
int vprintf(const char *fmt, va_list args);
/** The well known vsnprintf() function.
*
* Formats and writes a string with a max. size to memory.
*
* @param dst A pointer to the destination memory.
* @param sz The size of the destination memory.
* @param fmt A pointer to the NUL terminated format string.
* @param args The variable long argument list as va_list.
* @return The number of bytes which would be written, even if the destination
* is smaller. On error a negative number is returned.
*/
int vsnprintf(char *dst, int sz, const char *fmt, va_list args);
/** The well known snprintf() function.
*
* Formats a string and writes it to the console output.
*
* @param dest Pointer to the destination memory.
* @param sz Max. size of dest.
* @param fmt A pointer to the NUL terminated format string.
*
* @return The number of characters would by written or a negative
* number if an error occurred.
*
* @bug sz == 0 doesn't work
*/
int snprintf(char *dest, int sz, const char *fmt, ...);
#endif /* PRINTF_H */

View File

@@ -0,0 +1,155 @@
/*
* SHARED BUFFER
*
* Copyright (C) 2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Li Fei <fei1.li@intel.com>
*
*/
#ifndef SHARED_BUFFER_H
#define SHARED_BUFFER_H
#define SBUF_MAGIC 0x5aa57aa71aa13aa3
#define SBUF_MAX_SIZE (1 << 22)
#define SBUF_HEAD_SIZE 64
/* sbuf flags */
#define OVERRUN_CNT_EN (1 << 0) /* whether overrun counting is enabled */
#define OVERWRITE_EN (1 << 1) /* whether overwrite is enabled */
/**
* (sbuf) head + buf (store (ele_num - 1) elements at most)
* buffer empty: tail == head
* buffer full: (tail + ele_size) % size == head
*
* Base of memory for elements
* |
* |
* ----------------------------------------------------------------------
* | struct shared_buf | raw data (ele_size)| ... | raw data (ele_size) |
* ----------------------------------------------------------------------
* |
* |
* struct shared_buf *buf
*/
enum {
ACRN_TRACE,
ACRN_HVLOG,
ACRN_SBUF_ID_MAX,
};
/* Make sure sizeof(struct shared_buf) == SBUF_HEAD_SIZE */
struct shared_buf {
uint64_t magic;
uint32_t ele_num; /* number of elements */
uint32_t ele_size; /* sizeof of elements */
uint32_t head; /* offset from base, to read */
uint32_t tail; /* offset from base, to write */
uint64_t flags;
uint32_t overrun_cnt; /* count of overrun */
uint32_t size; /* ele_num * ele_size */
uint32_t padding[6];
};
#ifdef HV_DEBUG
EXTERN_CPU_DATA(uint64_t * [ACRN_SBUF_ID_MAX], sbuf);
static inline void sbuf_clear_flags(struct shared_buf *sbuf, uint64_t flags)
{
sbuf->flags &= ~flags;
}
static inline void sbuf_set_flags(struct shared_buf *sbuf, uint64_t flags)
{
sbuf->flags = flags;
}
static inline void sbuf_add_flags(struct shared_buf *sbuf, uint64_t flags)
{
sbuf->flags |= flags;
}
struct shared_buf *sbuf_allocate(uint32_t ele_num, uint32_t ele_size);
void sbuf_free(struct shared_buf *sbuf);
int sbuf_get(struct shared_buf *sbuf, uint8_t *data);
int sbuf_put(struct shared_buf *sbuf, uint8_t *data);
int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, uint64_t *hva);
#else /* HV_DEBUG */
static inline void sbuf_clear_flags(
__unused struct shared_buf *sbuf,
__unused uint64_t flags)
{
}
static inline void sbuf_set_flags(
__unused struct shared_buf *sbuf,
__unused uint64_t flags)
{
}
static inline void sbuf_add_flags(
__unused struct shared_buf *sbuf,
__unused uint64_t flags)
{
}
static inline struct shared_buf *sbuf_allocate(
__unused uint32_t ele_num,
__unused uint32_t ele_size)
{
return NULL;
}
static inline void sbuf_free(
__unused struct shared_buf *sbuf)
{
}
static inline int sbuf_get(
__unused struct shared_buf *sbuf,
__unused uint8_t *data)
{
return 0;
}
static inline int sbuf_put(
__unused struct shared_buf *sbuf,
__unused uint8_t *data)
{
return 0;
}
#endif /* HV_DEBUG */
#endif /* SHARED_BUFFER_H */

View File

@@ -0,0 +1,42 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SERIAL_H
#define SERIAL_H
#ifdef HV_DEBUG
int serial_init(void);
#else
static inline int serial_init(void) { return 0; }
#endif
void uart16550_set_property(int enabled, int port_mapped, uint64_t base_addr);
#endif

View File

@@ -0,0 +1,47 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SHELL_H
#define SHELL_H
/* Switching key combinations for shell and uart console */
#define GUEST_CONSOLE_TO_HV_SWITCH_KEY 0 /* CTRL + SPACE */
#ifdef HV_DEBUG
int shell_init(void);
void shell_kick_session(void);
int shell_switch_console(void);
#else
static inline int shell_init(void) { return 0; }
static inline void shell_kick_session(void) {}
static inline int shell_switch_console(void) { return 0; }
#endif
#endif /* SHELL_H */

View File

@@ -0,0 +1,280 @@
/*
* ACRN TRACE
*
* Copyright (C) 2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Li Fei <fei1.li@intel.com>
*
*/
#ifndef TRACE_H
#define TRACE_H
/* TIMER EVENT */
#define TRACE_TIMER_ACTION_ADDED 0x1
#define TRACE_TIMER_ACTION_PCKUP 0x2
#define TRACE_TIMER_ACTION_UPDAT 0x3
#define TRACE_TIMER_IRQ 0x4
#define TRACE_VM_EXIT 0x10
#define TRACE_VM_ENTER 0X11
#define TRC_VMEXIT_ENTRY 0x10000
#define TRC_VMEXIT_EXCEPTION_OR_NMI (TRC_VMEXIT_ENTRY + 0x00000000)
#define TRC_VMEXIT_EXTERNAL_INTERRUPT (TRC_VMEXIT_ENTRY + 0x00000001)
#define TRC_VMEXIT_INTERRUPT_WINDOW (TRC_VMEXIT_ENTRY + 0x00000002)
#define TRC_VMEXIT_CPUID (TRC_VMEXIT_ENTRY + 0x00000004)
#define TRC_VMEXIT_RDTSC (TRC_VMEXIT_ENTRY + 0x00000010)
#define TRC_VMEXIT_VMCALL (TRC_VMEXIT_ENTRY + 0x00000012)
#define TRC_VMEXIT_CR_ACCESS (TRC_VMEXIT_ENTRY + 0x0000001C)
#define TRC_VMEXIT_IO_INSTRUCTION (TRC_VMEXIT_ENTRY + 0x0000001E)
#define TRC_VMEXIT_RDMSR (TRC_VMEXIT_ENTRY + 0x0000001F)
#define TRC_VMEXIT_WRMSR (TRC_VMEXIT_ENTRY + 0x00000020)
#define TRC_VMEXIT_EPT_VIOLATION (TRC_VMEXIT_ENTRY + 0x00000030)
#define TRC_VMEXIT_EPT_MISCONFIGURATION (TRC_VMEXIT_ENTRY + 0x00000031)
#define TRC_VMEXIT_RDTSCP (TRC_VMEXIT_ENTRY + 0x00000033)
#define TRC_VMEXIT_APICV_WRITE (TRC_VMEXIT_ENTRY + 0x00000038)
#define TRC_VMEXIT_APICV_ACCESS (TRC_VMEXIT_ENTRY + 0x00000039)
#define TRC_VMEXIT_APICV_VIRT_EOI (TRC_VMEXIT_ENTRY + 0x0000003A)
#define TRC_VMEXIT_UNHANDLED 0x20000
#ifdef HV_DEBUG
#include <sbuf.h>
#define GEN_CASE(id) case (id): { id##_FMT; break; }
#define TRACE_CUSTOM 0xFC
#define TRACE_FUNC_ENTER 0xFD
#define TRACE_FUNC_EXIT 0xFE
#define TRACE_STR 0xFF
#define TRACE_TIMER_ACTION_ADDED_FMT \
{PR("TIMER_ACTION ADDED: ID %d, deadline %llx total: %d\n", \
(p)->a, ((uint64_t)((p)->c)<<32)|(p)->b, (p)->d); }
#define TRACE_TIMER_ACTION_PCKUP_FMT \
{PR("TIMER_ACTION PCKUP: ID %d, deadline %llx total: %d\n", \
(p)->a, ((uint64_t)((p)->c)<<32)|(p)->b, (p)->d); }
#define TRACE_TIMER_ACTION_UPDAT_FMT \
{PR("TIMER_ACTION UPDAT: ID %d, deadline %llx total: %d\n", \
(p)->a, ((unsigned long)((p)->c)<<32)|(p)->b, (p)->d); }
#define TRACE_TIMER_IRQ_FMT \
PR("TIMER_IRQ total: %llx\n", (p)->e)
#define TRACE_CUSTOM_FMT \
PR("CUSTOM: 0x%llx 0x%llx\n", (p)->e, (p)->f)
#define TRACE_FUNC_ENTER_FMT \
PR("ENTER: %s\n", (p)->str)
#define TRACE_FUNC_EXIT_FMT \
PR("EXIT : %s\n", (p)->str)
#define TRACE_STR_FMT \
PR("STR: %s\n", (p)->str)
#define ALL_CASES \
GEN_CASE(TRACE_TIMER_ACTION_ADDED); \
GEN_CASE(TRACE_TIMER_ACTION_PCKUP); \
GEN_CASE(TRACE_TIMER_ACTION_UPDAT); \
GEN_CASE(TRACE_TIMER_IRQ); \
GEN_CASE(TRACE_CUSTOM); \
GEN_CASE(TRACE_STR); \
GEN_CASE(TRACE_FUNC_ENTER); \
GEN_CASE(TRACE_FUNC_EXIT);
/* sizeof(trace_entry) == 3 x 64bit */
struct trace_entry {
uint64_t tsc; /* TSC */
uint64_t id;
union {
struct {
uint32_t a, b, c, d;
};
struct {
uint8_t a1, a2, a3, a4;
uint8_t b1, b2, b3, b4;
uint8_t c1, c2, c3, c4;
uint8_t d1, d2, d3, d4;
};
struct {
uint64_t e;
uint64_t f;
};
char str[16];
};
} __attribute__((aligned(8)));
static inline bool
trace_check(int cpu_id, __unused int evid)
{
if (cpu_id >= phy_cpu_num)
return false;
if (!per_cpu(sbuf, cpu_id)[ACRN_TRACE])
return false;
return true;
}
static inline void
_trace_put(int cpu_id, int evid, struct trace_entry *entry)
{
struct shared_buf *sbuf = (struct shared_buf *)
per_cpu(sbuf, cpu_id)[ACRN_TRACE];
entry->tsc = rdtsc();
entry->id = evid;
sbuf_put(sbuf, (uint8_t *)entry);
}
static inline void
TRACE_2L(int evid, uint64_t e, uint64_t f)
{
struct trace_entry entry;
int cpu_id = get_cpu_id();
if (!trace_check(cpu_id, evid))
return;
entry.e = e;
entry.f = f;
_trace_put(cpu_id, evid, &entry);
}
static inline void
TRACE_4I(int evid, uint32_t a, uint32_t b, uint32_t c,
uint32_t d)
{
struct trace_entry entry;
int cpu_id = get_cpu_id();
if (!trace_check(cpu_id, evid))
return;
entry.a = a;
entry.b = b;
entry.c = c;
entry.d = d;
_trace_put(cpu_id, evid, &entry);
}
static inline void
TRACE_6C(int evid, uint8_t a1, uint8_t a2, uint8_t a3,
uint8_t a4, uint8_t b1, uint8_t b2)
{
struct trace_entry entry;
int cpu_id = get_cpu_id();
if (!trace_check(cpu_id, evid))
return;
entry.a1 = a1;
entry.a2 = a2;
entry.a3 = a3;
entry.a4 = a4;
entry.b1 = b1;
entry.b2 = b2;
_trace_put(cpu_id, evid, &entry);
}
#define TRACE_ENTER TRACE_16STR(TRACE_FUNC_ENTER, __func__)
#define TRACE_EXIT TRACE_16STR(TRACE_FUNC_EXIT, __func__)
static inline void
TRACE_16STR(int evid, const char name[])
{
struct trace_entry entry;
int cpu_id = get_cpu_id();
int len;
int i;
if (!trace_check(cpu_id, evid))
return;
entry.e = 0;
entry.f = 0;
len = strnlen_s(name, 20);
len = (len > 16) ? 16 : len;
for (i = 0; i < len; i++)
entry.str[i] = name[i];
entry.str[15] = 0;
_trace_put(cpu_id, evid, &entry);
}
#else /* HV_DEBUG */
#define TRACE_TIMER_ACTION_ADDED_FMT
#define TRACE_TIMER_ACTION_PCKUP_FMT
#define TRACE_TIMER_ACTION_UPDAT_FMT
#define TRACE_TIMER_IRQ_FMT
#define TRACE_CUSTOM_FMT
#define TRACE_FUNC_ENTER_FMT
#define TRACE_FUNC_EXIT_FMT
#define TRACE_STR_FMT
#define TRACE_ENTER
#define TRACE_EXIT
static inline void
TRACE_2L(__unused int evid,
__unused uint64_t e,
__unused uint64_t f)
{
}
static inline void
TRACE_4I(__unused int evid,
__unused uint32_t a,
__unused uint32_t b,
__unused uint32_t c,
__unused uint32_t d)
{
}
static inline void
TRACE_6C(__unused int evid,
__unused uint8_t a1,
__unused uint8_t a2,
__unused uint8_t a3,
__unused uint8_t a4,
__unused uint8_t b1,
__unused uint8_t b2)
{
}
#endif /* HV_DEBUG */
#endif /* TRACE_H */

View File

@@ -0,0 +1,84 @@
/*-
* Copyright (c) 2013 Neel Natu <neel@freebsd.org>
* Copyright (c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _VUART_H_
#define _VUART_H_
struct fifo {
char *buf;
int rindex; /* index to read from */
int windex; /* index to write to */
int num; /* number of characters in the fifo */
int size; /* size of the fifo */
};
struct vuart {
char data; /* Data register (R/W) */
char ier; /* Interrupt enable register (R/W) */
char lcr; /* Line control register (R/W) */
char mcr; /* Modem control register (R/W) */
char lsr; /* Line status register (R/W) */
char msr; /* Modem status register (R/W) */
char fcr; /* FIFO control register (W) */
char scr; /* Scratch register (R/W) */
char dll; /* Baudrate divisor latch LSB */
char dlh; /* Baudrate divisor latch MSB */
struct fifo rxfifo;
struct fifo txfifo;
int base;
bool thre_int_pending; /* THRE interrupt pending */
bool active;
struct vm *vm;
spinlock_t lock; /* protects all softc elements */
};
#ifdef HV_DEBUG
void *vuart_init(struct vm *vm);
struct vuart *vuart_console_active(void);
void vuart_console_tx_chars(void);
void vuart_console_rx_chars(uint32_t serial_handle);
#else
static inline void *vuart_init(__unused struct vm *vm)
{
return NULL;
}
static inline struct vuart *vuart_console_active(void)
{
return NULL;
}
static inline void vuart_console_tx_chars(void) {}
static inline void vuart_console_rx_chars(
__unused uint32_t serial_handle)
{
}
#endif /*HV_DEBUG*/
#endif

View File

@@ -0,0 +1,45 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef HV_DEBUG_H
#define HV_DEBUG_H
#include <logmsg.h>
#include <serial.h>
#include <vuart.h>
#include <console.h>
#include <dump.h>
#include <trace.h>
#include <shell.h>
#include <sbuf.h>
#include <assert.h>
#include <printf.h>
#endif /* HV_DEBUG_H */

View File

@@ -0,0 +1,46 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef HV_LIB_H
#define HV_LIB_H
#include <types.h>
#include <errno.h>
#include <macros.h>
#include <rtl.h>
#include <spinlock.h>
#include <mem_mgt.h>
#include <stdarg.h>
#include <util.h>
#include <list.h>
#include <bits.h>
#include <strtol.h>
#endif /* HV_LIB_H */

View File

@@ -0,0 +1,59 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/************************************************************************
*
* FILE NAME
*
* hypervisor.h
*
* DESCRIPTION
*
* This file includes config header file "bsp_cfg.h" and other
* hypervisor used header files.
* It should be included in all the source files.
*
*
************************************************************************/
#ifndef HYPERVISOR_H
#define HYPERVISOR_H
/* Include config header file containing config options */
#include "bsp_cfg.h"
#ifndef ASSEMBLER
/* hpa <--> hva, now it is 1:1 mapping */
#define HPA2HVA(x) ((void *)(x))
#define HVA2HPA(x) ((uint64_t)(x))
/* gpa --> hpa -->hva */
#define GPA2HVA(vm, x) HPA2HVA(gpa2hpa(vm, x))
#endif /* !ASSEMBLER */
#endif /* HYPERVISOR_H */

View File

@@ -0,0 +1,575 @@
/*-
* Copyright (c) 1998 Doug Rabson
* Copyright (c) 2017 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef BITS_H
#define BITS_H
#define BUS_LOCK "lock ; "
/*
* #define atomic_set_char(P, V) (*(unsigned char *)(P) |= (V))
*/
static inline void atomic_set_char(unsigned char *p, unsigned char v)
{
__asm __volatile(BUS_LOCK "orb %b1,%0"
: "+m" (*p)
: "iq" (v)
: "cc", "memory");
}
/*
* #define atomic_clear_char(P, V) (*(unsigned char *)(P) &= ~(V))
*/
static inline void atomic_clear_char(unsigned char *p, unsigned char v)
{
__asm __volatile(BUS_LOCK "andb %b1,%0"
: "+m" (*p)
: "iq" (~v)
: "cc", "memory");
}
/*
* #define atomic_add_char(P, V) (*(unsigned char *)(P) += (V))
*/
static inline void atomic_add_char(unsigned char *p, unsigned char v)
{
__asm __volatile(BUS_LOCK "addb %b1,%0"
: "+m" (*p)
: "iq" (v)
: "cc", "memory");
}
/*
* #define atomic_subtract_char(P, V) (*(unsigned char *)(P) -= (V))
*/
static inline void atomic_subtract_char(unsigned char *p, unsigned char v)
{
__asm __volatile(BUS_LOCK "subb %b1,%0"
: "+m" (*p)
: "iq" (v)
: "cc", "memory");
}
/*
* #define atomic_set_short(P, V) (*(unsigned short *)(P) |= (V))
*/
static inline void atomic_set_short(unsigned short *p, unsigned short v)
{
__asm __volatile(BUS_LOCK "orw %w1,%0"
: "+m" (*p)
: "ir" (v)
: "cc", "memory");
}
/*
* #define atomic_clear_short(P, V) (*(unsigned short *)(P) &= ~(V))
*/
static inline void atomic_clear_short(unsigned short *p, unsigned short v)
{
__asm __volatile(BUS_LOCK "andw %w1,%0"
: "+m" (*p)
: "ir" (~v)
: "cc", "memory");
}
/*
* #define atomic_add_short(P, V) (*(unsigned short *)(P) += (V))
*/
static inline void atomic_add_short(unsigned short *p, unsigned short v)
{
__asm __volatile(BUS_LOCK "addw %w1,%0"
: "+m" (*p)
: "ir" (v)
: "cc", "memory");
}
/*
* #define atomic_subtract_short(P, V) (*(unsigned short *)(P) -= (V))
*/
static inline void atomic_subtract_short(unsigned short *p, unsigned short v)
{
__asm __volatile(BUS_LOCK "subw %w1,%0"
: "+m" (*p)
: "ir" (v)
: "cc", "memory");
}
/*
* #define atomic_set_int(P, V) (*(unsigned int *)(P) |= (V))
*/
static inline void atomic_set_int(unsigned int *p, unsigned int v)
{
__asm __volatile(BUS_LOCK "orl %1,%0"
: "+m" (*p)
: "ir" (v)
: "cc", "memory");
}
/*
* #define atomic_clear_int(P, V) (*(unsigned int *)(P) &= ~(V))
*/
static inline void atomic_clear_int(unsigned int *p, unsigned int v)
{
__asm __volatile(BUS_LOCK "andl %1,%0"
: "+m" (*p)
: "ir" (~v)
: "cc", "memory");
}
/*
* #define atomic_add_int(P, V) (*(unsigned int *)(P) += (V))
*/
static inline void atomic_add_int(unsigned int *p, unsigned int v)
{
__asm __volatile(BUS_LOCK "addl %1,%0"
: "+m" (*p)
: "ir" (v)
: "cc", "memory");
}
/*
* #define atomic_subtract_int(P, V) (*(unsigned int *)(P) -= (V))
*/
static inline void atomic_subtract_int(unsigned int *p, unsigned int v)
{
__asm __volatile(BUS_LOCK "subl %1,%0"
: "+m" (*p)
: "ir" (v)
: "cc", "memory");
}
/*
* #define atomic_swap_int(P, V) \
* (return (*(unsigned int *)(P)); *(unsigned int *)(P) = (V);)
*/
static inline int atomic_swap_int(unsigned int *p, unsigned int v)
{
__asm __volatile(BUS_LOCK "xchgl %1,%0"
: "+m" (*p), "+r" (v)
:
: "cc", "memory");
return v;
}
/*
* #define atomic_readandclear_int(P) \
* (return (*(unsigned int *)(P)); *(unsigned int *)(P) = 0;)
*/
#define atomic_readandclear_int(p) atomic_swap_int(p, 0)
/*
* #define atomic_set_long(P, V) (*(unsigned long *)(P) |= (V))
*/
static inline void atomic_set_long(unsigned long *p, unsigned long v)
{
__asm __volatile(BUS_LOCK "orq %1,%0"
: "+m" (*p)
: "ir" (v)
: "cc", "memory");
}
/*
* #define atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
*/
static inline void atomic_clear_long(unsigned long *p, unsigned long v)
{
__asm __volatile(BUS_LOCK "andq %1,%0"
: "+m" (*p)
: "ir" (~v)
: "cc", "memory");
}
/*
* #define atomic_add_long(P, V) (*(unsigned long *)(P) += (V))
*/
static inline void atomic_add_long(unsigned long *p, unsigned long v)
{
__asm __volatile(BUS_LOCK "addq %1,%0"
: "+m" (*p)
: "ir" (v)
: "cc", "memory");
}
/*
* #define atomic_subtract_long(P, V) (*(unsigned long *)(P) -= (V))
*/
static inline void atomic_subtract_long(unsigned long *p, unsigned long v)
{
__asm __volatile(BUS_LOCK "subq %1,%0"
: "+m" (*p)
: "ir" (v)
: "cc", "memory");
}
/*
* #define atomic_swap_long(P, V) \
* (return (*(unsigned long *)(P)); *(unsigned long *)(P) = (V);)
*/
static inline long atomic_swap_long(unsigned long *p, unsigned long v)
{
__asm __volatile(BUS_LOCK "xchgq %1,%0"
: "+m" (*p), "+r" (v)
:
: "cc", "memory");
return v;
}
/*
* #define atomic_readandclear_long(P) \
* (return (*(unsigned long *)(P)); *(unsigned long *)(P) = 0;)
*/
#define atomic_readandclear_long(p) atomic_swap_long(p, 0)
/*
* #define atomic_load_acq_int(P) (*(unsigned int*)(P))
*/
static inline int atomic_load_acq_int(unsigned int *p)
{
int ret;
__asm __volatile("movl %1,%0"
: "=r"(ret)
: "m" (*p)
: "cc", "memory");
return ret;
}
/*
* #define atomic_store_rel_int(P, V) (*(unsigned int *)(P) = (V))
*/
static inline void atomic_store_rel_int(unsigned int *p, unsigned int v)
{
__asm __volatile("movl %1,%0"
: "=m" (*p)
: "r" (v)
: "cc", "memory");
}
/*
* #define atomic_load_acq_long(P) (*(unsigned long*)(P))
*/
static inline long atomic_load_acq_long(unsigned long *p)
{
long ret;
__asm __volatile("movq %1,%0"
: "=r"(ret)
: "m" (*p)
: "cc", "memory");
return ret;
}
/*
* #define atomic_store_rel_long(P, V) (*(unsigned long *)(P) = (V))
*/
static inline void atomic_store_rel_long(unsigned long *p, unsigned long v)
{
__asm __volatile("movq %1,%0"
: "=m" (*p)
: "r" (v)
: "cc", "memory");
}
static inline int atomic_cmpxchg_int(unsigned int *p,
int old, int new)
{
int ret;
__asm __volatile(BUS_LOCK "cmpxchgl %2,%1"
: "=a" (ret), "+m" (*p)
: "r" (new), "0" (old)
: "memory");
return ret;
}
#define atomic_load_acq_32 atomic_load_acq_int
#define atomic_store_rel_32 atomic_store_rel_int
#define atomic_load_acq_64 atomic_load_acq_long
#define atomic_store_rel_64 atomic_store_rel_long
/*
* #define atomic_xadd_int(P, V) \
* (return (*(unsigned long *)(P)); *(unsigned long *)(P) += (V);)
*/
static inline int atomic_xadd_int(unsigned int *p, unsigned int v)
{
__asm __volatile(BUS_LOCK "xaddl %0,%1"
: "+r" (v), "+m" (*p)
:
: "cc", "memory");
return v;
}
static inline int atomic_add_return(int v, unsigned int *p)
{
return v + atomic_xadd_int(p, v);
}
static inline int atomic_sub_return(int v, unsigned int *p)
{
return atomic_xadd_int(p, -v) - v;
}
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic_dec_return(v) atomic_sub_return(1, (v))
static inline unsigned int
bsrl(unsigned int mask)
{
unsigned int result;
__asm __volatile("bsrl %1,%0"
: "=r" (result)
: "rm" (mask));
return result;
}
static inline unsigned long
bsrq(unsigned long mask)
{
unsigned long result;
__asm __volatile("bsrq %1,%0"
: "=r" (result)
: "rm" (mask));
return result;
}
/**
*
* fls - Find the Last (most significant) bit Set in value and
* return the bit index of that bit.
*
* Bits are numbered starting at 0,the least significant bit.
* A return value of -1 means that the argument was zero.
*
* Examples:
* fls (0x0) = -1
* fls (0x01) = 0
* fls (0xf0) = 7
* ...
* fls (0x80000001) = 31
*
* @param mask: 'int' type value
*
* @return value: zero-based bit index, -1 means 'mask' was zero.
*
* **/
static inline int
fls(int mask)
{
return (mask == 0 ? -1 : (int)bsrl((unsigned int)mask));
}
/* 64bit version of fls(). */
static inline int
flsl(long mask)
{
return (mask == 0 ? -1 : (int)bsrq((unsigned long)mask));
}
static inline unsigned long
bsfq(unsigned long mask)
{
unsigned long result;
__asm __volatile("bsfq %1,%0"
: "=r" (result)
: "rm" (mask));
return result;
}
/**
*
* ffsl - Find the First (least significant) bit Set in value(Long type)
* and return the index of that bit.
*
* Bits are numbered starting at 0,the least significant bit.
* A return value of -1 means that the argument was zero.
*
* Examples:
* ffsl (0x0) = -1
* ffsl (0x01) = 0
* ffsl (0xf0) = 4
* ffsl (0xf00) = 8
* ...
* ffsl (0x8000000000000001) = 0
* ffsl (0xf000000000000000) = 60
*
* @param mask: 'long' type value
*
* @return value: zero-based bit index, -1 means 'mask' was zero.
*
* **/
static inline int
ffsl(long mask)
{
return (mask == 0 ? -1 : (int)bsfq((unsigned long)mask));
}
static inline void
bitmap_set(int mask, unsigned long *bits)
{
/* (*bits) |= (1UL<<mask); */
__asm __volatile(BUS_LOCK "orq %1,%0"
: "+m" (*bits)
: "ir" (1UL<<mask)
: "cc", "memory");
}
static inline void
bitmap_clr(int mask, unsigned long *bits)
{
/* (*bits) &= ~(1UL<<mask); */
__asm __volatile(BUS_LOCK "andq %1,%0"
: "+m" (*bits)
: "ir" (~(1UL<<mask))
: "cc", "memory");
}
static inline int
bitmap_isset(int mask, unsigned long *bits)
{
/*
* return (*bits) & (1UL<<mask);
*/
int ret;
__asm __volatile("btq %2,%1\n\tsbbl %0, %0"
: "=r" (ret), "=m" (*bits)
: "ir" ((long)(mask) & 0x3f)
: "cc", "memory");
return (!!ret);
}
static inline int
bitmap_test_and_set(int mask, unsigned long *bits)
{
int ret;
__asm __volatile(BUS_LOCK "btsq %2,%1\n\tsbbl %0,%0"
: "=r" (ret), "=m" (*bits)
: "ir" ((long)(mask & 0x3f))
: "cc", "memory");
return (!!ret);
}
static inline int
bitmap_test_and_clear(int mask, unsigned long *bits)
{
/*
* bool ret = (*bits) & (1UL<<mask);
* (*bits) &= ~(1UL<<mask);
* return ret;
*/
int ret;
__asm __volatile(BUS_LOCK "btrq %2,%1\n\tsbbl %0,%0"
: "=r" (ret), "=m" (*bits)
: "ir" ((long)(mask) & 0x3f)
: "cc", "memory");
return (!!ret);
}
static inline void
bitmap_setof(int mask, unsigned long *bits)
{
/*
* *bits = 0;
* (*bits) |= (1UL<<mask);
*/
__asm __volatile(BUS_LOCK "xchgq %1,%0"
: "+m" (*bits)
: "ir" ((1UL<<mask))
: "cc", "memory");
}
static inline int
bitmap_ffs(unsigned long *bits)
{
return ffsl(*bits);
}
static inline int
atomic_cmpset_long(unsigned long *dst, unsigned long expect, unsigned long src)
{
unsigned char res;
__asm __volatile(BUS_LOCK "cmpxchg %3,%1\n\tsete %0"
: "=q" (res), "+m" (*dst), "+a" (expect)
: "r" (src)
: "memory", "cc");
return res;
}
/*bit scan forward for the least significant bit '0'*/
static inline int
get_first_zero_bit(unsigned long value)
{
return ffsl(~value);
}
/**
* Counts leading zeros.
*
* The number of leading zeros is defined as the number of
* most significant bits which are not '1'. E.g.:
* clz(0x80000000)==0
* clz(0x40000000)==1
* ...
* clz(0x00000001)==31
* clz(0x00000000)==32
*
* @param mask:The 32 bit value to count the number of leading zeros.
*
* @return The number of leading zeros in 'mask'.
*/
static inline int
clz(int mask)
{
return (31 - fls(mask));
}
/**
* Counts leading zeros (64 bit version).
*
* @param mask:The 64 bit value to count the number of leading zeros.
*
* @return The number of leading zeros in 'mask'.
*/
static inline int
clz64(long mask)
{
return (63 - flsl(mask));
}
#endif /* BITS_H*/

View File

@@ -0,0 +1,68 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef HKDF_H
#define HKDF_H
#include <types.h>
/*
* FUNCTION
* hkdf_sha256
*
* Description
* HMAC-based Extract-and-Expand Key Derivation Function.
*
* Parameters:
* out_key Pointer to key buffer which is used to save
* hkdf_sha256 result
* out_len The length of out_key
* secret Pointer to input keying material
* secret_len The length of secret
* salt Pointer to salt buffer, it is optional
* if not provided (salt == NULL), it is set internally
* to a string of hashlen(32) zeros
* salt_len The length of the salt value
* Ignored if salt is NULL
* info Pointer to application specific information, it is
* optional
* Ignored if info == NULL or a zero-length string
* info_len: The length of the info, ignored if info is NULL
*
* OUTPUTS
* 1 - Success
* 0 - Failure
*/
int hkdf_sha256(uint8_t *out_key, size_t out_len,
const uint8_t *secret, size_t secret_len,
const uint8_t *salt, size_t salt_len,
const uint8_t *info, size_t info_len);
#endif /* HKDF_H */

View File

@@ -0,0 +1,45 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ERRNO_H
#define ERRNO_H
/** Indicates that not enough memory. */
#define ENOMEM 1
/** Indicates that argument is not valid. */
#define EINVAL 2
/** Indicates that no such dev. */
#define ENODEV 3
/** Indicates that there is IO error. */
#define EIO 4
/** Indicates that target is busy. */
#define EBUSY 5
#endif /* ERRNO_H */

View File

@@ -0,0 +1,123 @@
/*-
* Copyright (C) 2005-2011 HighPoint Technologies, Inc.
* Copyright (c) 2017 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef LIST_H_
#define LIST_H_
struct list_head {
struct list_head *next, *prev;
};
#define INIT_LIST_HEAD(ptr) do { (ptr)->next = (ptr); (ptr)->prev = (ptr); } \
while (0)
static inline void __list_add(struct list_head *_new, struct list_head *prev,
struct list_head *next)
{
next->prev = _new;
_new->next = next;
_new->prev = prev;
prev->next = _new;
}
static inline void list_add(struct list_head *_new, struct list_head *head)
{
__list_add(_new, head, head->next);
}
static inline void list_add_tail(struct list_head *_new,
struct list_head *head)
{
__list_add(_new, head->prev, head);
}
static inline void __list_del(struct list_head *prev, struct list_head *next)
{
next->prev = prev;
prev->next = next;
}
static inline void list_del(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
}
static inline void list_del_init(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
INIT_LIST_HEAD(entry);
}
static inline int list_empty(struct list_head *head)
{
return head->next == head;
}
static inline void __list_splice(struct list_head *list,
struct list_head *head)
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
struct list_head *at = head->next;
first->prev = head;
head->next = first;
last->next = at;
at->prev = last;
}
static inline void list_splice(struct list_head *list, struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head);
}
static inline void list_splice_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head);
INIT_LIST_HEAD(list);
}
}
#define list_entry(ptr, type, member) \
((type *)((char *)(ptr)-(uint64_t)(&((type *)0)->member)))
#define list_for_each(pos, head) \
for (pos = (head)->next; pos != (head); pos = pos->next)
#define list_for_each_safe(pos, n, head) \
for (pos = (head)->next, n = pos->next; pos != (head); \
pos = n, n = pos->next)
#define get_first_item(attached, type, member) \
((type *)((char *)((attached)->next)-(uint64_t)(&((type *)0)->member)))
#endif /* LIST_H_ */

View File

@@ -0,0 +1,73 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MACROS_H
#define MACROS_H
/** Replaces 'x' by the string "x". */
#define __CPP_STRING(x) #x
/** Replaces 'x' by its value. */
#define CPP_STRING(x) __CPP_STRING(x)
/** Creates a bitfield mask.
*
* @param pos The position of the LSB within the mask.
* @param width The width of the bitfield in bits.
*
* @return The bitfield mask.
*/
#define BITFIELD_MASK(pos, width) (((1<<(width))-1)<<(pos))
#define BITFIELD_VALUE(v, pos, width) (((v)<<(pos)) & (((1<<(width))-1)<<(pos)))
#define MAKE_BITFIELD_MASK(id) BITFIELD_MASK(id ## _POS, id ## _WIDTH)
#define MAKE_BITFIELD_VALUE(v, id) BITFIELD_VALUE(v, id ## _POS, id ## _WIDTH)
/** Defines a register within a register block. */
#define REGISTER(base, off) (base ## _BASE + (off))
#define MAKE_MMIO_REGISTER_ADDRESS(chip, module, register) \
(chip ## _ ## module ## _BASE + \
(chip ## _ ## module ## _ ## register ## _REGISTER))
/* Macro used to check if a value is aligned to the required boundary.
* Returns TRUE if aligned; FALSE if not aligned
* NOTE: The required alignment must be a power of 2 (2, 4, 8, 16, 32, etc)
*/
#define MEM_ALIGNED_CHECK(value, req_align) \
(((uint64_t)(value) & ((uint64_t)(req_align) - (uint64_t)1)) == 0)
#if !defined(ASSEMBLER) && !defined(LINKER_SCRIPT)
#define ARRAY_LENGTH(x) (sizeof(x)/sizeof((x)[0]))
#endif
#endif /* INCLUDE_MACROS_H defined */

View File

@@ -0,0 +1,55 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __MEM_MGT_H__
#define __MEM_MGT_H__
/* Macros */
#define BITMAP_WORD_SIZE 32
struct mem_pool {
void *start_addr; /* Start Address of Memory Pool */
spinlock_t spinlock; /* To protect Memory Allocation */
uint32_t size; /* Size of Memory Pool in Bytes */
uint32_t buff_size; /* Size of one Buffer in Bytes */
uint32_t total_buffs; /* Total Buffers in Memory Pool */
uint32_t bmp_size; /* Size of Bitmap Array */
uint32_t *bitmap; /* Pointer to allocation bitmap */
uint32_t *contiguity_bitmap; /* Pointer to contiguity bitmap */
};
/* APIs exposing memory allocation/deallocation abstractions */
void *malloc(unsigned int num_bytes);
void *calloc(unsigned int num_elements, unsigned int element_size);
void *alloc_page();
void *alloc_pages(unsigned int page_num);
void free(void *ptr);
#endif /* MEM_MGT_H_ */

View File

@@ -0,0 +1,82 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef RTL_H
#define RTL_H
#include <types.h>
union u_qword {
struct {
uint32_t low;
uint32_t high;
} dwords;
uint64_t qword;
};
struct udiv_result {
union u_qword q;
union u_qword r;
};
/* Function prototypes */
void udelay(int us);
void *memchr(const void *void_s, int c, size_t n);
void *memmove(void *s1, const void *s2, size_t n);
int strcmp(const char *s1, const char *s2);
int strncmp(const char *s1, const char *s2, size_t n);
char *strcpy_s(char *d, size_t dmax, const char *s);
char *strncpy_s(char *d, size_t dmax, const char *s, size_t slen);
char *strchr(const char *s, int ch);
void mdelay(unsigned int ms);
size_t strnlen_s(const char *str, size_t maxlen);
void *memset(void *base, uint8_t v, size_t n);
void *memcpy_s(void *d, size_t dmax, const void *s, size_t slen);
int udiv64(uint64_t dividend, uint64_t divisor, struct udiv_result *res);
int udiv32(uint32_t dividend, uint32_t divisor, struct udiv_result *res);
extern uint64_t tsc_clock_freq;
#define US_TO_TICKS(x) ((x)*tsc_clock_freq/1000000UL)
#define TIME_MS_DELTA US_TO_TICKS(1000UL)
#define TICKS_TO_US(x) ((((x) * (1000000UL >> 8)) / tsc_clock_freq) << 8)
#define TICKS_TO_MS(x) (((x) * 1000UL) / tsc_clock_freq)
static inline uint64_t rdtsc(void)
{
uint32_t lo, hi;
asm volatile("rdtsc" : "=a" (lo), "=d" (hi));
return ((uint64_t)hi << 32) | lo;
}
#endif /* RTL_H */

View File

@@ -0,0 +1,110 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SPINLOCK_H
#define SPINLOCK_H
#include <hypervisor.h>
#ifndef ASSEMBLER
#include <types.h>
/** The architecture dependent spinlock type. */
typedef struct _spinlock {
uint32_t head;
uint32_t tail;
} spinlock_t;
/* Function prototypes */
int spinlock_init(spinlock_t *lock);
int spinlock_obtain(spinlock_t *lock);
static inline int spinlock_release(spinlock_t *lock)
{
/* Increment tail of queue */
asm volatile (" lock incl %[tail]\n"
:
: [tail] "m" (lock->tail)
: "cc", "memory");
return 0;
}
#else /* ASSEMBLER */
/** The offset of the head element. */
#define SYNC_SPINLOCK_HEAD_OFFSET 0
/** The offset of the tail element. */
#define SYNC_SPINLOCK_TAIL_OFFSET 4
.macro spinlock_obtain lock
movl $1, % eax
lea \lock, % ebx
lock xaddl % eax, SYNC_SPINLOCK_HEAD_OFFSET(%ebx)
cmpl % eax, SYNC_SPINLOCK_TAIL_OFFSET(%ebx)
jz 1f
2 :
pause
cmpl % eax, SYNC_SPINLOCK_TAIL_OFFSET(%ebx)
jnz 2b
1 :
.endm
#define spinlock_obtain(x) spinlock_obtain lock = (x)
.macro spinlock_release lock
lea \lock, % ebx
lock incl SYNC_SPINLOCK_TAIL_OFFSET(%ebx)
.endm
#define spinlock_release(x) spinlock_release lock = (x)
#endif /* ASSEMBLER */
#define spinlock_rflags unsigned long cpu_int_value
#define spinlock_irq_obtain(l) (CPU_IRQ_DISABLE(), spinlock_obtain((l)))
#define spinlock_irqsave_obtain(l) \
do { \
CPU_INT_ALL_DISABLE(); \
spinlock_obtain((l)); \
} while (0)
#define spinlock_irq_release(l) (spinlock_release((l)), CPU_IRQ_ENABLE())
#define spinlock_irqrestore_release(l) \
do { \
spinlock_release((l)); \
CPU_INT_ALL_RESTORE(); \
} while (0)
#endif /* SPINLOCK_H */

View File

@@ -0,0 +1,39 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef STDARG_H
#define STDARG_H
#include <types.h>
#define va_start(x, y) __builtin_va_start((x), (y))
#define va_end(x) __builtin_va_end(x)
#endif /* STDARG_H */

View File

@@ -0,0 +1,38 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef STRTOL_H
#define STRTOL_H
int atoi(const char *str);
long strtol(const char *nptr, char **endptr, register int base);
uint64_t strtoul(const char *nptr, char **endptr, register int base);
#endif /* !STRTOL_H */

View File

@@ -0,0 +1,84 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TYPES_H
#define TYPES_H
/* Define NULL value */
#define HV_NULL 0
/* Defines for TRUE / FALSE conditions */
#define HV_FALSE 0
#define HV_TRUE 1
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#define __aligned(x) __attribute__((aligned(x)))
#define __packed __attribute__((packed))
#define __unused __attribute__((unused))
#ifndef ASSEMBLER
/* Define standard data types. These definitions allow software components
* to perform in the same manner on different target platforms.
*/
typedef unsigned int uint32_t;
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef signed short int16_t;
typedef unsigned short uint16_t;
typedef signed int int32_t;
typedef unsigned int uint32_t;
typedef unsigned long uint64_t;
typedef signed long int64_t;
typedef unsigned int size_t;
typedef unsigned long mmio_addr_t;
typedef unsigned long vaddr_t;
typedef unsigned long paddr_t;
typedef unsigned long ioport_t;
typedef __builtin_va_list va_list;
typedef uint8_t bool;
#ifndef NULL
#define NULL ((void *) 0)
#endif
#ifndef true
#define true 1
#define false 0
#endif
#ifndef UINT64_MAX
#define UINT64_MAX (-1UL)
#endif
#endif /* ASSEMBLER */
#endif /* INCLUDE_TYPES_H defined */

View File

@@ -0,0 +1,59 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef UTIL_H
#define UTIL_H
/** Add an offset (in bytes) to an (base)address.
*
* @param addr Baseaddress
* @param off Offset
* @return Returns baseaddress + offset in bytes.
*/
#define ADD_OFFSET(addr, off) (void *)(((uint8_t *)(addr))+(off))
#define offsetof(st, m) __builtin_offsetof(st, m)
/** Round an integer (x) up to a multiple of y */
#define INT_ROUNDUP(x, y) (((x)+((y)-1))&-(y))
/** Round an integer up to a multiple of 4 */
#define INT_ROUNDUP4(x) INT_ROUNDUP(x, 4)
/** Round an integer up to a multiple of 8 */
#define INT_ROUNDUP8(x) INT_ROUNDUP(x, 8)
/** Round an integer up to a multiple of 8 */
#define INT_ROUNDUP16(x) INT_ROUNDUP(x, 16)
/** Roundup (x/y) to ( x/y + (x%y) ? 1 : 0) **/
#define INT_DIV_ROUNDUP(x, y) (((x)+(y)-1)/(y))
#endif /* UTIL_H */

View File

@@ -0,0 +1,289 @@
/*
* common definition
*
* Copyright (C) 2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file acrn_common.h
*
* @brief acrn common data structure for hypercall or ioctl
*/
#ifndef ACRN_COMMON_H
#define ACRN_COMMON_H
#include <types.h>
/*
* Common structures for ACRN/VHM/DM
*/
/*
* IO request
*/
#define VHM_REQUEST_MAX 16
#define REQ_STATE_PENDING 0
#define REQ_STATE_SUCCESS 1
#define REQ_STATE_PROCESSING 2
#define REQ_STATE_FAILED -1
#define REQ_PORTIO 0
#define REQ_MMIO 1
#define REQ_PCICFG 2
#define REQ_WP 3
#define REQUEST_READ 0
#define REQUEST_WRITE 1
/**
* @brief Hypercall
*
* @addtogroup acrn_hypercall ACRN Hypercall
* @{
*/
struct mmio_request {
uint32_t direction;
uint32_t reserved;
int64_t address;
int64_t size;
int64_t value;
} __aligned(8);
struct pio_request {
uint32_t direction;
uint32_t reserved;
int64_t address;
int64_t size;
int32_t value;
} __aligned(8);
struct pci_request {
uint32_t direction;
uint32_t reserved[3];/* need keep same header fields with pio_request */
int64_t size;
int32_t value;
int32_t bus;
int32_t dev;
int32_t func;
int32_t reg;
} __aligned(8);
/* vhm_request are 256Bytes aligned */
struct vhm_request {
/* offset: 0bytes - 63bytes */
union {
uint32_t type;
int32_t reserved0[16];
};
/* offset: 64bytes-127bytes */
union {
struct pio_request pio_request;
struct pci_request pci_request;
struct mmio_request mmio_request;
int64_t reserved1[8];
} reqs;
/* True: valid req which need VHM to process.
* ACRN write, VHM read only
**/
int32_t valid;
/* the client which is distributed to handle this request */
int32_t client;
/* 1: VHM had processed and success
* 0: VHM had not yet processed
* -1: VHM failed to process. Invalid request
* VHM write, ACRN read only
*/
int32_t processed;
} __aligned(256);
struct vhm_request_buffer {
union {
struct vhm_request req_queue[VHM_REQUEST_MAX];
int8_t reserved[4096];
};
} __aligned(4096);
/**
* @brief Info to create a VM, the parameter for HC_CREATE_VM hypercall
*/
struct acrn_create_vm {
/** created vmid return to VHM. Keep it first field */
int32_t vmid;
/** VCPU numbers this VM want to create */
uint32_t vcpu_num;
/** the GUID of this VM */
uint8_t GUID[16];
/** whether Secure World is enabled for this VM */
uint8_t secure_world_enabled;
/** Reserved for future use*/
uint8_t reserved[31];
} __aligned(8);
/**
* @brief Info to create a VCPU
*
* the parameter for HC_CREATE_VCPU hypercall
*/
struct acrn_create_vcpu {
/** the virtual CPU ID for the VCPU created */
uint32_t vcpu_id;
/** the physical CPU ID for the VCPU created */
uint32_t pcpu_id;
} __aligned(8);
/**
* @brief Info to set ioreq buffer for a created VM
*
* the parameter for HC_SET_IOREQ_BUFFER hypercall
*/
struct acrn_set_ioreq_buffer {
/** guest physical address of VM request_buffer */
uint64_t req_buf;
} __aligned(8);
/** Interrupt type for acrn_irqline: inject interrupt to IOAPIC */
#define ACRN_INTR_TYPE_ISA 0
/** Interrupt type for acrn_irqline: inject interrupt to both PIC and IOAPIC */
#define ACRN_INTR_TYPE_IOAPIC 1
/**
* @brief Info to assert/deassert/pulse a virtual IRQ line for a VM
*
* the parameter for HC_ASSERT_IRQLINE/HC_DEASSERT_IRQLINE/HC_PULSE_IRQLINE
* hypercall
*/
struct acrn_irqline {
/** interrupt type which could be IOAPIC or ISA */
uint32_t intr_type;
/** reserved for alignment padding */
uint32_t reserved;
/** pic IRQ for ISA type */
uint64_t pic_irq;
/** ioapic IRQ for IOAPIC & ISA TYPE,
* if -1 then this IRQ will not be injected
*/
uint64_t ioapic_irq;
} __aligned(8);
/**
* @brief Info to inject a MSI interrupt to VM
*
* the parameter for HC_INJECT_MSI hypercall
*/
struct acrn_msi_entry {
/** MSI addr[19:12] with dest VCPU ID */
uint64_t msi_addr;
/** MSI data[7:0] with vector */
uint64_t msi_data;
} __aligned(8);
/**
* @brief Info to inject a NMI interrupt for a VM
*/
struct acrn_nmi_entry {
/** virtual CPU ID to inject */
int64_t vcpu_id;
} __aligned(8);
/**
* @brief Info to remap pass-through PCI MSI for a VM
*
* the parameter for HC_VM_PCI_MSIX_REMAP hypercall
*/
struct acrn_vm_pci_msix_remap {
/** pass-through PCI device virtual BDF# */
uint16_t virt_bdf;
/** pass-through PCI device physical BDF# */
uint16_t phys_bdf;
/** pass-through PCI device MSI/MSI-X cap control data */
uint16_t msi_ctl;
/** reserved for alignment padding */
uint16_t reserved;
/** pass-through PCI device MSI address to remap, which will
* return the caller after remapping
*/
uint64_t msi_addr; /* IN/OUT: msi address to fix */
/** pass-through PCI device MSI data to remap, which will
* return the caller after remapping
*/
uint32_t msi_data;
/** pass-through PCI device is MSI or MSI-X
* 0 - MSI, 1 - MSI-X
*/
int32_t msix;
/** if the pass-through PCI device is MSI-X, this field contains
* the MSI-X entry table index
*/
int32_t msix_entry_index;
/** if the pass-through PCI device is MSI-X, this field contains
* Vector Control for MSI-X Entry, field defined in MSI-X spec
*/
uint32_t vector_ctl;
} __aligned(8);
/**
* @brief The guest config pointer offset.
*
* It's designed to support passing DM config data pointer, based on it,
* hypervisor would parse then pass DM defined configuration to GUEST VCPU
* when booting guest VM.
* the address 0xd0000 here is designed by DM, as it arranged all memory
* layout below 1M, DM should make sure there is no overlap for the address
* 0xd0000 usage.
*/
#define GUEST_CFG_OFFSET 0xd0000
/**
* @}
*/
#endif /* ACRN_COMMON_H */

View File

@@ -0,0 +1,232 @@
/*
* hypercall definition
*
* Copyright (C) 2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file acrn_hv_defs.h
*
* @brief acrn data structure for hypercall
*/
#ifndef ACRN_HV_DEFS_H
#define ACRN_HV_DEFS_H
/*
* Common structures for ACRN/VHM/DM
*/
#include "acrn_common.h"
/*
* Common structures for HV/VHM
*/
#define _HC_ID(x, y) (((x)<<24)|(y))
#define HC_ID 0x80UL
/* general */
#define HC_ID_GEN_BASE 0x0UL
#define HC_GET_API_VERSION _HC_ID(HC_ID, HC_ID_GEN_BASE + 0x00)
/* VM management */
#define HC_ID_VM_BASE 0x10UL
#define HC_CREATE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x00)
#define HC_DESTROY_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x01)
#define HC_START_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x02)
#define HC_PAUSE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x03)
#define HC_CREATE_VCPU _HC_ID(HC_ID, HC_ID_VM_BASE + 0x04)
/* IRQ and Interrupts */
#define HC_ID_IRQ_BASE 0x20UL
#define HC_ASSERT_IRQLINE _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x00)
#define HC_DEASSERT_IRQLINE _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x01)
#define HC_PULSE_IRQLINE _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x02)
#define HC_INJECT_MSI _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x03)
/* DM ioreq management */
#define HC_ID_IOREQ_BASE 0x30UL
#define HC_SET_IOREQ_BUFFER _HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x00)
#define HC_NOTIFY_REQUEST_FINISH _HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x01)
/* Guest memory management */
#define HC_ID_MEM_BASE 0x40UL
#define HC_VM_SET_MEMMAP _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x00)
#define HC_VM_GPA2HPA _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x01)
/* PCI assignment*/
#define HC_ID_PCI_BASE 0x50UL
#define HC_ASSIGN_PTDEV _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x00)
#define HC_DEASSIGN_PTDEV _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x01)
#define HC_VM_PCI_MSIX_REMAP _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x02)
#define HC_SET_PTDEV_INTR_INFO _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x03)
#define HC_RESET_PTDEV_INTR_INFO _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x04)
/* DEBUG */
#define HC_ID_DBG_BASE 0x60UL
#define HC_SETUP_SBUF _HC_ID(HC_ID, HC_ID_DBG_BASE + 0x00)
/* Trusty */
#define HC_ID_TRUSTY_BASE 0x70UL
#define HC_LAUNCH_TRUSTY _HC_ID(HC_ID, HC_ID_TRUSTY_BASE + 0x00)
#define HC_WORLD_SWITCH _HC_ID(HC_ID, HC_ID_TRUSTY_BASE + 0x01)
#define HC_GET_SEC_INFO _HC_ID(HC_ID, HC_ID_TRUSTY_BASE + 0x02)
#define ACRN_DOM0_VMID (0UL)
#define ACRN_INVALID_VMID (-1)
#define ACRN_INVALID_HPA (-1UL)
/* Generic memory attributes */
#define MEM_ACCESS_READ 0x00000001
#define MEM_ACCESS_WRITE 0x00000002
#define MEM_ACCESS_EXEC 0x00000004
#define MEM_ACCESS_RWX (MEM_ACCESS_READ | MEM_ACCESS_WRITE | \
MEM_ACCESS_EXEC)
#define MEM_ACCESS_RIGHT_MASK 0x00000007
#define MEM_TYPE_WB 0x00000040
#define MEM_TYPE_WT 0x00000080
#define MEM_TYPE_UC 0x00000100
#define MEM_TYPE_WC 0x00000200
#define MEM_TYPE_WP 0x00000400
#define MEM_TYPE_MASK 0x000007C0
/**
* @brief Hypercall
*
* @defgroup acrn_hypercall ACRN Hypercall
* @{
*/
/**
* @brief Info to set ept mapping
*
* the parameter for HC_VM_SET_MEMMAP hypercall
*/
struct vm_set_memmap {
#define MAP_MEM 0
#define MAP_MMIO 1
#define MAP_UNMAP 2
/** map type: MAP_MEM, MAP_MMIO or MAP_UNMAP */
uint32_t type;
/** reserved for alignment padding */
uint32_t reserved;
/** guest physical address to map */
uint64_t remote_gpa;
/** VM0's guest physcial address which remote gpa will be mapped to */
uint64_t vm0_gpa;
/** length of the map range */
uint64_t length;
/** memory attributes: memory type + RWX access right */
uint32_t prot;
} __aligned(8);
/**
* Setup parameter for share buffer, used for HC_SETUP_SBUF hypercall
*/
struct sbuf_setup_param {
/** sbuf physical cpu id */
uint32_t pcpu_id;
/** sbuf id */
uint32_t sbuf_id;
/** sbuf's guest physical address */
uint64_t gpa;
} __aligned(8);
/**
* Gpa to hpa translation parameter, used for HC_VM_GPA2HPA hypercall
*/
struct vm_gpa2hpa {
/** gpa to do translation */
uint64_t gpa;
/** hpa to return after translation */
uint64_t hpa;
} __aligned(8);
/**
* Intr mapping info per ptdev, the parameter for HC_SET_PTDEV_INTR_INFO
* hypercall
*/
struct hc_ptdev_irq {
#define IRQ_INTX 0
#define IRQ_MSI 1
#define IRQ_MSIX 2
/** irq mapping type: INTX or MSI */
uint32_t type;
/** virtual BDF of the ptdev */
uint16_t virt_bdf;
/** physical BDF of the ptdev */
uint16_t phys_bdf;
union {
/** INTX remapping info */
struct {
/** virtual IOAPIC/PIC pin */
uint32_t virt_pin;
/** physical IOAPIC pin */
uint32_t phys_pin;
/** is virtual pin from PIC */
uint32_t pic_pin;
} intx;
/** MSIx remapping info */
struct {
/** vector count of MSI/MSIX */
uint32_t vector_cnt;
} msix;
} is; /* irq source */
} __aligned(8);
/**
* Hypervisor api version info, return it for HC_GET_API_VERSION hyercall
*/
struct hc_api_version {
/** hypervisor api major version */
uint32_t major_version;
/** hypervisor api minor version */
uint32_t minor_version;
} __aligned(8);
/**
* @}
*/
#endif /* ACRN_HV_DEFS_H */