Compare commits
29 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
01e0ff077b | ||
|
7e28a17a53 | ||
|
6ad053fc88 | ||
|
cee2579e35 | ||
|
a9860fad05 | ||
|
b38003b870 | ||
|
3a001f9be6 | ||
|
1c8396abef | ||
|
ce3f31dcb6 | ||
|
b6cea37b49 | ||
|
31fb783879 | ||
|
d98901a890 | ||
|
1334349f89 | ||
|
feb1afbc3c | ||
|
080f43216c | ||
|
56446fe366 | ||
|
0016a64655 | ||
|
b1b4bc98af | ||
|
2cf158994b | ||
|
151305e8c9 | ||
|
ca99bb58a2 | ||
|
de188258f6 | ||
|
d83d0fed47 | ||
|
31c0362ac4 | ||
|
7315ff3cf9 | ||
|
5d3702af77 | ||
|
c34649aafa | ||
|
0bde54613b | ||
|
dde388d82c |
1
.gitignore
vendored
@ -31,7 +31,6 @@ debian/acrn-lifemngr
|
|||||||
debian/acrn-system
|
debian/acrn-system
|
||||||
debian/acrn-tools
|
debian/acrn-tools
|
||||||
debian/acrnd
|
debian/acrnd
|
||||||
debian/changelog
|
|
||||||
debian/debhelper-build-stamp
|
debian/debhelper-build-stamp
|
||||||
debian/files
|
debian/files
|
||||||
debian/grub-acrn
|
debian/grub-acrn
|
||||||
|
@ -13,9 +13,9 @@
|
|||||||
* @dongyaozu @NanlinXie
|
* @dongyaozu @NanlinXie
|
||||||
|
|
||||||
Makefile @terryzouhao @NanlinXie
|
Makefile @terryzouhao @NanlinXie
|
||||||
/hypervisor/ @dongyaozu @lifeix @junjiemao1
|
/hypervisor/ @dongyaozu @lifeix
|
||||||
/devicemodel/ @ywan170 @chejianj
|
/devicemodel/ @ywan170 @chejianj
|
||||||
/doc/ @NanlinXie
|
/doc/ @dbkinder @NanlinXie
|
||||||
/misc/debug_tools/acrn_crashlog/ @ywan170 @lifeix
|
/misc/debug_tools/acrn_crashlog/ @ywan170 @lifeix
|
||||||
/misc/debug_tools/acrn_log/ @ywan170 @lifeix
|
/misc/debug_tools/acrn_log/ @ywan170 @lifeix
|
||||||
/misc/debug_tools/acrn_trace/ @ywan170 @lifeix
|
/misc/debug_tools/acrn_trace/ @ywan170 @lifeix
|
||||||
@ -27,4 +27,4 @@ Makefile @terryzouhao @NanlinXie
|
|||||||
/misc/packaging/ @terryzouhao @NanlinXie
|
/misc/packaging/ @terryzouhao @NanlinXie
|
||||||
/misc/hv_prebuild/ @terryzouhao @NanlinXie
|
/misc/hv_prebuild/ @terryzouhao @NanlinXie
|
||||||
|
|
||||||
*.rst @NanlinXie
|
*.rst @dbkinder @NanlinXie
|
||||||
|
@ -48,7 +48,7 @@ the TSC and its membership, are described in the project's `technical-charter`_.
|
|||||||
|
|
||||||
These are the current TSC voting members and chair person:
|
These are the current TSC voting members and chair person:
|
||||||
|
|
||||||
- Yu Wang (chair): yu1.wang@intel.com
|
- Junjie Mao (chair): junjie.mao@intel.com
|
||||||
- Helmut Buchsbaum: helmut.buchsbaum@tttech-industrial.com
|
- Helmut Buchsbaum: helmut.buchsbaum@tttech-industrial.com
|
||||||
- Thomas Gleixner: thomas.gleixner@intel.com
|
- Thomas Gleixner: thomas.gleixner@intel.com
|
||||||
|
|
||||||
|
4
VERSION
@ -1,3 +1,3 @@
|
|||||||
MAJOR_VERSION=3
|
MAJOR_VERSION=3
|
||||||
MINOR_VERSION=4
|
MINOR_VERSION=2
|
||||||
EXTRA_VERSION=-unstable
|
EXTRA_VERSION=
|
||||||
|
53
debian/changelog
vendored
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
acrn-hypervisor (3.0~3.gbp9074bb) UNRELEASED; urgency=medium
|
||||||
|
|
||||||
|
** SNAPSHOT build @9074bb42223cf0a18a01ca7f5b481834ba79fc6a **
|
||||||
|
|
||||||
|
* Initial native packaging release for ACRN.
|
||||||
|
* d/rules: Update regarding config detection
|
||||||
|
* d/control: Pin elementpath and xmlschema
|
||||||
|
* debian: Add docker build helpers
|
||||||
|
* debian: Create local apt repo after build
|
||||||
|
* debian: Update and rename README.md
|
||||||
|
* debian: Add nuc7i7dnh/shared+initrd
|
||||||
|
* debian: Add kontron-COMe-mAL10/shared+initrd
|
||||||
|
* debian: Update changelog for UNRELEASED package build
|
||||||
|
* debian: Preinstall build requirements in docker helper
|
||||||
|
* debian: Silence build user creation
|
||||||
|
* life_mngr: Makefile: Use sysconfdir
|
||||||
|
* life_mngr: Makefile: Fix install path creation of CONF_DIR
|
||||||
|
* life_mngr: Makefile: Fix permission of life_mngr.conf
|
||||||
|
* debian: Fixup ACRN lifemngr package build
|
||||||
|
* debian/rules: Store board and scenario xml
|
||||||
|
* debian/grub: Adapt Linux commandline to ACRN scenario settings
|
||||||
|
* debian/grub: Remove override variable support
|
||||||
|
* debian: Use original acrnd.service
|
||||||
|
* acrn_log: Fix log path to comply with Linux FSSTD
|
||||||
|
* doc: Adapt documentation to change of acrnlog log path
|
||||||
|
* debian: Install acrnprobe.xml
|
||||||
|
* debian: linitian suppression rule update
|
||||||
|
* debian/control: Use compat level 12
|
||||||
|
* debian/rules: override_dh_strip: Fix wrong parameter
|
||||||
|
* debian: Trigger grub-acrn on acrn-hypervisor install
|
||||||
|
* debian/rules: Do not start services on install
|
||||||
|
* debian: acrn-tools: Add helper scripts
|
||||||
|
* debian: acrn-hypervisor: Refactor debconf
|
||||||
|
* debian: nuc7i7dnh/shared+initrd: Add boot parameters
|
||||||
|
* debian/configs: Add simatic-ipc227g/shared+initrd
|
||||||
|
* debian/rules: Add simatic-ipc227g
|
||||||
|
* debian: Add default configuration for acrnlog
|
||||||
|
* debian/configs: Add kontron-COMe-mAL10:shared
|
||||||
|
* debian/configs: Add nuc7i7dnh:shared
|
||||||
|
* debian/configs: Add simatic-ipc227g:shared
|
||||||
|
* debian: Separate build config items to acrn-hypervisor.conf.mk
|
||||||
|
* debian/rules: Generate launch scripts
|
||||||
|
* debian: Switch to elementpath 2.5.0-1 and xmlschema 1.10.0-1
|
||||||
|
* debian/docker: Add source package handling for local apt repository
|
||||||
|
* debian: Add build and installation description
|
||||||
|
* debian: Update README.md
|
||||||
|
* debian: Update changelog for UNRELEASED package build
|
||||||
|
* debian: Convert README.md to README.rst
|
||||||
|
* debian: Convert INSTALL.md to INSTALL.rst
|
||||||
|
* debian/acrn-doc.doc-base: Fix typo
|
||||||
|
* debian/configs: Remove proprietary configs, just provide a hook directory
|
||||||
|
|
||||||
|
-- Helmut Buchsbaum <helmut.buchsbaum@opensource.tttech-industrial.com> Mon, 09 May 2022 14:00:35 +0200
|
8
debian/debian_build.sh
vendored
@ -8,7 +8,6 @@ usage() {
|
|||||||
echo "Usage: $0 [--board_list ACRN_BOARDLIST] [--scenario_list ACRN_SCENARIOLIST] [--config_path CONFIGDIRS] [--release n|y] [acrn | board_inspector | clean]"
|
echo "Usage: $0 [--board_list ACRN_BOARDLIST] [--scenario_list ACRN_SCENARIOLIST] [--config_path CONFIGDIRS] [--release n|y] [acrn | board_inspector | clean]"
|
||||||
echo "Optional arguments:"
|
echo "Optional arguments:"
|
||||||
echo " -h, --help show this help message and exit"
|
echo " -h, --help show this help message and exit"
|
||||||
echo " -v, --verbose show verbose output"
|
|
||||||
echo " -b, --board_list list the boards to build, seperated by blank; build all scanned boards in the config path if specified as \"\"; build the default boards in debian rules if not specified"
|
echo " -b, --board_list list the boards to build, seperated by blank; build all scanned boards in the config path if specified as \"\"; build the default boards in debian rules if not specified"
|
||||||
echo " -s, --scenario_list list the scenarios to build, seperated by blank; build all scanned scenarios in the config path if specified as \"\"; build the default scenarios in debian rules if not specified"
|
echo " -s, --scenario_list list the scenarios to build, seperated by blank; build all scanned scenarios in the config path if specified as \"\"; build the default scenarios in debian rules if not specified"
|
||||||
echo " -c, --config_path specify the config path for the board and scenario configuration files, default use misc/config_tools/data if not specified"
|
echo " -c, --config_path specify the config path for the board and scenario configuration files, default use misc/config_tools/data if not specified"
|
||||||
@ -60,10 +59,6 @@ while [[ $# -gt 0 ]]; do
|
|||||||
release="$2"
|
release="$2"
|
||||||
shift 2
|
shift 2
|
||||||
;;
|
;;
|
||||||
-v|--verbose)
|
|
||||||
verbose=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-h|--help)
|
-h|--help)
|
||||||
usage
|
usage
|
||||||
exit 0
|
exit 0
|
||||||
@ -81,9 +76,6 @@ done
|
|||||||
set -- "${POSITIONAL_ARGS[@]}"
|
set -- "${POSITIONAL_ARGS[@]}"
|
||||||
|
|
||||||
cmd="debuild"
|
cmd="debuild"
|
||||||
if [ -n "$verbose" ]; then
|
|
||||||
cmd="$cmd -eDH_VERBOSE=1"
|
|
||||||
fi
|
|
||||||
if [ "$board_list" != "default" ]; then
|
if [ "$board_list" != "default" ]; then
|
||||||
echo "ACRN_BOARDLIST = ${board_list@Q}"
|
echo "ACRN_BOARDLIST = ${board_list@Q}"
|
||||||
cmd="$cmd -eACRN_BOARDLIST=${board_list@Q}"
|
cmd="$cmd -eACRN_BOARDLIST=${board_list@Q}"
|
||||||
|
2
debian/grub/20_acrn-board-inspector
vendored
@ -181,7 +181,7 @@ prepare_root_cache=
|
|||||||
boot_device_id=
|
boot_device_id=
|
||||||
|
|
||||||
while [ "x$list" != "x" ] ; do
|
while [ "x$list" != "x" ] ; do
|
||||||
linux=$(echo ${list} | tr ' ' '\n' | sed -e 's/\.old$/ 1/; / 1$/! s/$/ 2/' | LC_ALL=C sort -V -r | sed -e 's/ 1$/.old/; s/ 2$//' | head -n 1)
|
linux=`version_find_latest $list`
|
||||||
case $linux in
|
case $linux in
|
||||||
*.efi.signed)
|
*.efi.signed)
|
||||||
# We handle these in linux_entry.
|
# We handle these in linux_entry.
|
||||||
|
4
debian/grub/25_linux_acrn
vendored
@ -311,7 +311,7 @@ boot_device_id=
|
|||||||
acrn_first_entry=
|
acrn_first_entry=
|
||||||
|
|
||||||
while [ "x${acrn_list}" != "x" ] ; do
|
while [ "x${acrn_list}" != "x" ] ; do
|
||||||
current_acrn=$(echo ${acrn_list} | tr ' ' '\n' | sed -e 's/\.old$/ 1/; / 1$/! s/$/ 2/' | LC_ALL=C sort -V -r | sed -e 's/ 1$/.old/; s/ 2$//' | head -n 1)
|
current_acrn=$(version_find_latest $acrn_list)
|
||||||
acrn_basename=$(basename ${current_acrn})
|
acrn_basename=$(basename ${current_acrn})
|
||||||
acrn_dirname=$(dirname ${current_acrn})
|
acrn_dirname=$(dirname ${current_acrn})
|
||||||
rel_acrn_dirname=$(make_system_path_relative_to_its_root $acrn_dirname)
|
rel_acrn_dirname=$(make_system_path_relative_to_its_root $acrn_dirname)
|
||||||
@ -342,7 +342,7 @@ EOF
|
|||||||
|
|
||||||
# only if we have at least one ACRN capable kernel and a Service VM entry defined
|
# only if we have at least one ACRN capable kernel and a Service VM entry defined
|
||||||
while [ "x$list" != "x" ] && [ "x${ACRN_SERVICE_VM_ID}" != "x" ] ; do
|
while [ "x$list" != "x" ] && [ "x${ACRN_SERVICE_VM_ID}" != "x" ] ; do
|
||||||
linux=$(echo ${list} | tr ' ' '\n' | sed -e 's/\.old$/ 1/; / 1$/! s/$/ 2/' | LC_ALL=C sort -V -r | sed -e 's/ 1$/.old/; s/ 2$//' | head -n 1)
|
linux=$(version_find_latest $list)
|
||||||
gettext_printf "Found ACRN linux image: %s\n" "$linux" >&2
|
gettext_printf "Found ACRN linux image: %s\n" "$linux" >&2
|
||||||
basename=$(basename $linux)
|
basename=$(basename $linux)
|
||||||
dirname=$(dirname $linux)
|
dirname=$(dirname $linux)
|
||||||
|
@ -80,7 +80,6 @@ LDFLAGS += -L$(TOOLS_OUT)/services
|
|||||||
LIBS = -lrt
|
LIBS = -lrt
|
||||||
LIBS += -lpthread
|
LIBS += -lpthread
|
||||||
LIBS += -lcrypto
|
LIBS += -lcrypto
|
||||||
LIBS += -luring
|
|
||||||
LIBS += -lpciaccess
|
LIBS += -lpciaccess
|
||||||
LIBS += -lusb-1.0
|
LIBS += -lusb-1.0
|
||||||
LIBS += -lacrn-mngr
|
LIBS += -lacrn-mngr
|
||||||
@ -188,8 +187,6 @@ SRCS += core/cmd_monitor/socket.c
|
|||||||
SRCS += core/cmd_monitor/command.c
|
SRCS += core/cmd_monitor/command.c
|
||||||
SRCS += core/cmd_monitor/command_handler.c
|
SRCS += core/cmd_monitor/command_handler.c
|
||||||
SRCS += core/cmd_monitor/cmd_monitor.c
|
SRCS += core/cmd_monitor/cmd_monitor.c
|
||||||
SRCS += core/sbuf.c
|
|
||||||
SRCS += core/vm_event.c
|
|
||||||
|
|
||||||
# arch
|
# arch
|
||||||
SRCS += arch/x86/pm.c
|
SRCS += arch/x86/pm.c
|
||||||
|
@ -42,7 +42,6 @@
|
|||||||
#include "lpc.h"
|
#include "lpc.h"
|
||||||
#include "monitor.h"
|
#include "monitor.h"
|
||||||
#include "log.h"
|
#include "log.h"
|
||||||
#include "vm_event.h"
|
|
||||||
|
|
||||||
static pthread_mutex_t pm_lock = PTHREAD_MUTEX_INITIALIZER;
|
static pthread_mutex_t pm_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||||
static struct mevent *power_button;
|
static struct mevent *power_button;
|
||||||
@ -193,7 +192,7 @@ void
|
|||||||
pm_backto_wakeup(struct vmctx *ctx)
|
pm_backto_wakeup(struct vmctx *ctx)
|
||||||
{
|
{
|
||||||
/* According to ACPI 5.0 Table 4-16: bit 15, WAK_STS should be
|
/* According to ACPI 5.0 Table 4-16: bit 15, WAK_STS should be
|
||||||
* set when system transition to the working state
|
* set when system trasition to the working state
|
||||||
*/
|
*/
|
||||||
pm1_status |= PM1_WAK_STS;
|
pm1_status |= PM1_WAK_STS;
|
||||||
}
|
}
|
||||||
@ -243,14 +242,6 @@ power_button_handler(int signal, enum ev_type type, void *arg)
|
|||||||
inject_power_button_event(arg);
|
inject_power_button_event(arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
send_poweroff_event(void)
|
|
||||||
{
|
|
||||||
struct vm_event event;
|
|
||||||
event.type = VM_EVENT_POWEROFF;
|
|
||||||
dm_send_vm_event(&event);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
pm1_control_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
|
pm1_control_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
|
||||||
uint32_t *eax, void *arg)
|
uint32_t *eax, void *arg)
|
||||||
@ -274,7 +265,6 @@ pm1_control_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
|
|||||||
*/
|
*/
|
||||||
if (*eax & VIRTUAL_PM1A_SLP_EN) {
|
if (*eax & VIRTUAL_PM1A_SLP_EN) {
|
||||||
if ((pm1_control & VIRTUAL_PM1A_SLP_TYP) >> 10 == 5) {
|
if ((pm1_control & VIRTUAL_PM1A_SLP_TYP) >> 10 == 5) {
|
||||||
send_poweroff_event();
|
|
||||||
vm_suspend(ctx, VM_SUSPEND_POWEROFF);
|
vm_suspend(ctx, VM_SUSPEND_POWEROFF);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
16c698cb5e6354f514e307eb016237dd OVMF_CODE_debug.fd
|
3102701dfdaeab846577e139398af5b8 OVMF.fd
|
||||||
e4b15beab40a0482bbe821e151c96d75 OVMF_CODE.fd
|
75f5f310da41cd7bd1dd96ddcbcff8cf OVMF_debug.fd
|
||||||
0166b812b014a4c1809d22199823132a OVMF_debug.fd
|
dca1ed6265062454a9c445d6e6e33d05 OVMF_CODE.fd
|
||||||
bf8d9d3c85ba06ac940ee9d6d6f9f811 OVMF.fd
|
c2e1d7a8e1e4036a1d3f203dfc66bac0 OVMF_CODE_debug.fd
|
||||||
aa9703e68b787f1a391bcbf201c84e02 OVMF_VARS.fd
|
aa9703e68b787f1a391bcbf201c84e02 OVMF_VARS.fd
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
ed00f44b07d375d036b67d9b50b878c9c5b1bee59082b08f9e0126c06dd91914f6e9e2dc248fb1bc61350c377da284fa4150ffd2028a4daa34a6c3217047e32e OVMF_CODE_debug.fd
|
6a29eba62832a4e94aec79fb726cfd507b019e5c9039e0abf2efa023fd21373f911e8e7ba0ca1e20c495409b738d513ac9ac7912c05ef7db7159569af3cb3da8 OVMF.fd
|
||||||
a958f4f5dbe8ad98565e3ac4883fcac40a67bbdd184b2e3d7c1a7974181628c20498159dec64596879af12e0710e8e70748c27337060aab7da164a83b0124f08 OVMF_CODE.fd
|
91bdb40d49a994a4b45e25144917fbedefa0710d9d95aedf633197ab3cd7d6ca9c3a075bf120c15667f863840f48612297bd48275080a31d84217da076f2b629 OVMF_debug.fd
|
||||||
05e4996dd3d7a1e83aca8176ea0da4a92ea704116ea21f9b2df59f386b8bdbdb9e697731a111eaa31d994c66b66b732c105b979e4c5a7e8b6f7960145ebdf898 OVMF_debug.fd
|
ffbce1003d166751f842c34cdde6ae67df02a27f4388c58707d8ec3ca6da692cdeb6bae9b5eae8a8f0c55709be1852bfc84af8a2c6a8ab8e45fb0b84c3c383fd OVMF_CODE.fd
|
||||||
4647bfe6a4bc0430e0c751d51d06665b5500eeaf0170826abbc6f38f94c19946a6683ce4fc2f9c2e09753546e2f4f30d65643d5b511ecd28a9e73a362db133cc OVMF.fd
|
24e927b075be8071efcb5ca988537bd6593205bee5a5750e6036370a08add296d4e4966e2e326700a096167d6a8a55e0078936ae3dedd3492c9d46cc9bb0ac4a OVMF_CODE_debug.fd
|
||||||
6c5f0629cda9a950dd76ea4162c3e7d3483615fa6e9918da365900dbdcb84681192df70c4629237f0db19d0a27fbc574bb3d9f38a522246e5b91356cd5e5a1e5 OVMF_VARS.fd
|
6c5f0629cda9a950dd76ea4162c3e7d3483615fa6e9918da365900dbdcb84681192df70c4629237f0db19d0a27fbc574bb3d9f38a522246e5b91356cd5e5a1e5 OVMF_VARS.fd
|
||||||
|
@ -1,12 +1,3 @@
|
|||||||
OVMF release v3.3
|
|
||||||
|
|
||||||
- OvmfPkg: resolve AcrnS3Lib
|
|
||||||
- OvmfPkg: add AcrnS3Lib to support S3
|
|
||||||
- OvmfPkg: introduce AcrnS3Lib class
|
|
||||||
- OVMF:ACRN:PCI: Try to load ROM image for the PCI device with PCI_ROM
|
|
||||||
- OVMF:ACRN:PCI: Add LoadOpRomImageLight to Load the PCI Rom
|
|
||||||
- OVMF:ACRN:PCI: Write back the original value of PCI ROM
|
|
||||||
|
|
||||||
OVMF release v3.0
|
OVMF release v3.0
|
||||||
|
|
||||||
- VGA interface of virtio-gpu adapter support
|
- VGA interface of virtio-gpu adapter support
|
||||||
|
@ -99,7 +99,6 @@ static void register_socket_message_handlers(struct vmctx *ctx)
|
|||||||
arg.ctx_arg = ctx;
|
arg.ctx_arg = ctx;
|
||||||
register_command_handler(user_vm_destroy_handler, &arg, DESTROY);
|
register_command_handler(user_vm_destroy_handler, &arg, DESTROY);
|
||||||
register_command_handler(user_vm_blkrescan_handler, &arg, BLKRESCAN);
|
register_command_handler(user_vm_blkrescan_handler, &arg, BLKRESCAN);
|
||||||
register_command_handler(user_vm_register_vm_event_client_handler, &arg, REGISTER_VM_EVENT_CLIENT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int init_cmd_monitor(struct vmctx *ctx)
|
int init_cmd_monitor(struct vmctx *ctx)
|
||||||
|
@ -26,7 +26,6 @@
|
|||||||
#define CMD_OBJS \
|
#define CMD_OBJS \
|
||||||
GEN_CMD_OBJ(DESTROY), \
|
GEN_CMD_OBJ(DESTROY), \
|
||||||
GEN_CMD_OBJ(BLKRESCAN), \
|
GEN_CMD_OBJ(BLKRESCAN), \
|
||||||
GEN_CMD_OBJ(REGISTER_VM_EVENT_CLIENT), \
|
|
||||||
|
|
||||||
struct command dm_command_list[CMDS_NUM] = {CMD_OBJS};
|
struct command dm_command_list[CMDS_NUM] = {CMD_OBJS};
|
||||||
|
|
||||||
|
@ -10,9 +10,8 @@
|
|||||||
|
|
||||||
#define DESTROY "destroy"
|
#define DESTROY "destroy"
|
||||||
#define BLKRESCAN "blkrescan"
|
#define BLKRESCAN "blkrescan"
|
||||||
#define REGISTER_VM_EVENT_CLIENT "register_vm_event_client"
|
|
||||||
|
|
||||||
#define CMDS_NUM 3U
|
#define CMDS_NUM 2U
|
||||||
#define CMD_NAME_MAX 32U
|
#define CMD_NAME_MAX 32U
|
||||||
#define CMD_ARG_MAX 320U
|
#define CMD_ARG_MAX 320U
|
||||||
|
|
||||||
|
@ -67,78 +67,6 @@ static int send_socket_ack(struct socket_dev *sock, int fd, bool normal)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct socket_client *vm_event_client = NULL;
|
|
||||||
static pthread_mutex_t vm_event_client_mutex = PTHREAD_MUTEX_INITIALIZER;
|
|
||||||
|
|
||||||
static void vm_event_free_cb(struct socket_client *self)
|
|
||||||
{
|
|
||||||
vm_event_client = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int set_vm_event_client(struct socket_client *client)
|
|
||||||
{
|
|
||||||
if (vm_event_client != NULL) {
|
|
||||||
pr_err("vm event client already registerred.\n");
|
|
||||||
return -1;
|
|
||||||
} else {
|
|
||||||
vm_event_client = client;
|
|
||||||
client->per_client_mutex = &vm_event_client_mutex;
|
|
||||||
client->free_client_cb = vm_event_free_cb;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int vm_monitor_send_vm_event(const char *msg)
|
|
||||||
{
|
|
||||||
int ret = -1;
|
|
||||||
struct socket_client *client;
|
|
||||||
pthread_mutex_t *per_client_mutex = &vm_event_client_mutex;
|
|
||||||
|
|
||||||
pthread_mutex_lock(per_client_mutex);
|
|
||||||
client = vm_event_client;
|
|
||||||
if (msg == NULL || client == NULL) {
|
|
||||||
pthread_mutex_unlock(per_client_mutex);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
memset(client->buf, 0, CLIENT_BUF_LEN);
|
|
||||||
memcpy(client->buf, msg, strlen(msg));
|
|
||||||
client->len = strlen(msg);
|
|
||||||
ret = write_socket_char(client);
|
|
||||||
pthread_mutex_unlock(per_client_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* When a client issues the REGISTER_VM_EVENT_CLIENT command,
|
|
||||||
* this handler will register that client as this VM's only vm_event receiver,
|
|
||||||
* and keeps the socket connection. Then vm events will be sent to
|
|
||||||
* the client through this connection.
|
|
||||||
*/
|
|
||||||
int user_vm_register_vm_event_client_handler(void *arg, void *command_para)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct command_parameters *cmd_para = (struct command_parameters *)command_para;
|
|
||||||
struct handler_args *hdl_arg = (struct handler_args *)arg;
|
|
||||||
struct socket_dev *sock = (struct socket_dev *)hdl_arg->channel_arg;
|
|
||||||
struct socket_client *client = NULL;
|
|
||||||
bool cmd_completed = false;
|
|
||||||
|
|
||||||
client = find_socket_client(sock, cmd_para->fd);
|
|
||||||
if (client == NULL)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
if (set_vm_event_client(client) == 0) {
|
|
||||||
cmd_completed = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
pr_dbg("%s: client with fd %d registerred\n", __func__, client->fd);
|
|
||||||
|
|
||||||
ret = send_socket_ack(sock, cmd_para->fd, cmd_completed);
|
|
||||||
if (ret < 0) {
|
|
||||||
pr_err("%s: Failed to send ACK message by socket.\n", __func__);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int user_vm_destroy_handler(void *arg, void *command_para)
|
int user_vm_destroy_handler(void *arg, void *command_para)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -9,6 +9,4 @@ extern struct socket_dev *sock_server;
|
|||||||
|
|
||||||
int user_vm_destroy_handler(void *arg, void *command_para);
|
int user_vm_destroy_handler(void *arg, void *command_para);
|
||||||
int user_vm_blkrescan_handler(void *arg, void *command_para);
|
int user_vm_blkrescan_handler(void *arg, void *command_para);
|
||||||
int user_vm_register_vm_event_client_handler(void *arg, void *command_para);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -47,23 +47,13 @@ err:
|
|||||||
}
|
}
|
||||||
static void free_socket_client(struct socket_dev *sock, struct socket_client *client)
|
static void free_socket_client(struct socket_dev *sock, struct socket_client *client)
|
||||||
{
|
{
|
||||||
pthread_mutex_t *per_client_mutex = client->per_client_mutex;
|
|
||||||
pthread_mutex_lock(&sock->client_mtx);
|
pthread_mutex_lock(&sock->client_mtx);
|
||||||
LIST_REMOVE(client, list);
|
LIST_REMOVE(client, list);
|
||||||
pthread_mutex_unlock(&sock->client_mtx);
|
pthread_mutex_unlock(&sock->client_mtx);
|
||||||
|
|
||||||
if (per_client_mutex) {
|
|
||||||
pthread_mutex_lock(per_client_mutex);
|
|
||||||
}
|
|
||||||
if (client->free_client_cb) {
|
|
||||||
client->free_client_cb(client);
|
|
||||||
}
|
|
||||||
close(client->fd);
|
close(client->fd);
|
||||||
client->fd = -1;
|
client->fd = -1;
|
||||||
free(client);
|
free(client);
|
||||||
if (per_client_mutex) {
|
|
||||||
pthread_mutex_unlock(per_client_mutex);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int write_socket_char(struct socket_client *client)
|
int write_socket_char(struct socket_client *client)
|
||||||
@ -152,8 +142,7 @@ static struct socket_client *new_socket_client(struct socket_dev *sock)
|
|||||||
__func__);
|
__func__);
|
||||||
goto alloc_client;
|
goto alloc_client;
|
||||||
}
|
}
|
||||||
/* If per client mutex is needed, init in callback */
|
|
||||||
client->per_client_mutex = NULL;
|
|
||||||
client->addr_len = sizeof(client->addr);
|
client->addr_len = sizeof(client->addr);
|
||||||
client->fd =
|
client->fd =
|
||||||
accept(sock->sock_fd, (struct sockaddr *)&client->addr,
|
accept(sock->sock_fd, (struct sockaddr *)&client->addr,
|
||||||
@ -164,6 +153,7 @@ static struct socket_client *new_socket_client(struct socket_dev *sock)
|
|||||||
__func__, sock->sock_fd, strerror(errno));
|
__func__, sock->sock_fd, strerror(errno));
|
||||||
goto accept_con;
|
goto accept_con;
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_lock(&sock->client_mtx);
|
pthread_mutex_lock(&sock->client_mtx);
|
||||||
LIST_INSERT_HEAD(&sock->client_head, client, list);
|
LIST_INSERT_HEAD(&sock->client_head, client, list);
|
||||||
pthread_mutex_unlock(&sock->client_mtx);
|
pthread_mutex_unlock(&sock->client_mtx);
|
||||||
|
@ -21,11 +21,7 @@ struct socket_client {
|
|||||||
socklen_t addr_len;
|
socklen_t addr_len;
|
||||||
char buf[CLIENT_BUF_LEN];
|
char buf[CLIENT_BUF_LEN];
|
||||||
int len; /* buf len */
|
int len; /* buf len */
|
||||||
/* When a client is registered as vm_event receiver, we need this per_client_mutex
|
|
||||||
* to make sure it is safe to free the client when client disconnects.
|
|
||||||
*/
|
|
||||||
pthread_mutex_t *per_client_mutex;
|
|
||||||
void (*free_client_cb)(struct socket_client *self);
|
|
||||||
LIST_ENTRY(socket_client) list;
|
LIST_ENTRY(socket_client) list;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -446,7 +446,7 @@ static bool release_larger_freepage(int level_limit)
|
|||||||
if (hugetlb_priv[level].pages_delta >= 0)
|
if (hugetlb_priv[level].pages_delta >= 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* free one un-used larger page */
|
/* free one unsed larger page */
|
||||||
orig_pages = read_sys_info(hugetlb_priv[level].nr_pages_path);
|
orig_pages = read_sys_info(hugetlb_priv[level].nr_pages_path);
|
||||||
total_pages = orig_pages - 1;
|
total_pages = orig_pages - 1;
|
||||||
snprintf(cmd_buf, MAX_PATH_LEN, "echo %d > %s",
|
snprintf(cmd_buf, MAX_PATH_LEN, "echo %d > %s",
|
||||||
@ -483,7 +483,7 @@ static bool release_larger_freepage(int level_limit)
|
|||||||
*.D.enough higher level free pages, but not enough free memory for
|
*.D.enough higher level free pages, but not enough free memory for
|
||||||
* lower level gap pages, so release some higher level free pages for that.
|
* lower level gap pages, so release some higher level free pages for that.
|
||||||
* other info:
|
* other info:
|
||||||
*. even enough free memory, it is easier to reserve smaller pages than
|
*. even enough free memory, it is eaiser to reserve smaller pages than
|
||||||
* lager ones, for example:2MB easier than 1GB. One flow of current solution:
|
* lager ones, for example:2MB easier than 1GB. One flow of current solution:
|
||||||
*.it could leave Service VM very small free memory.
|
*.it could leave Service VM very small free memory.
|
||||||
*.return value: true: success; false: failure
|
*.return value: true: success; false: failure
|
||||||
@ -507,8 +507,8 @@ static bool hugetlb_reserve_pages(void)
|
|||||||
|
|
||||||
/* probably system allocates fewer pages than needed
|
/* probably system allocates fewer pages than needed
|
||||||
* especially for larger page like 1GB, even there is enough
|
* especially for larger page like 1GB, even there is enough
|
||||||
* free memory, it still can fail to allocate 1GB huge page.
|
* free memory, it stil can fail to allocate 1GB huge page.
|
||||||
* so if that, it needs the next level to handle it.
|
* so if that,it needs the next level to handle it.
|
||||||
*/
|
*/
|
||||||
if (level > HUGETLB_LV1) {
|
if (level > HUGETLB_LV1) {
|
||||||
left_gap = hugetlb_priv[level].pages_delta;
|
left_gap = hugetlb_priv[level].pages_delta;
|
||||||
@ -555,13 +555,8 @@ bool init_hugetlb(void)
|
|||||||
path[i] = 0;
|
path[i] = 0;
|
||||||
if (access(path, F_OK) != 0) {
|
if (access(path, F_OK) != 0) {
|
||||||
if (mkdir(path, 0755) < 0) {
|
if (mkdir(path, 0755) < 0) {
|
||||||
/* We might have multiple acrn-dm instances booting VMs at
|
pr_err("mkdir %s failed.\n", path);
|
||||||
* the same time
|
return -1;
|
||||||
*/
|
|
||||||
if (errno != EEXIST) {
|
|
||||||
pr_err("mkdir %s failed: %s\n", path, errormsg(errno));
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
path[i] = '/';
|
path[i] = '/';
|
||||||
@ -906,7 +901,7 @@ bool vm_allow_dmabuf(struct vmctx *ctx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (ctx->lowmem) {
|
if (ctx->lowmem) {
|
||||||
/* Check the lowmem is used by HUGETLB_LV1/HUGETLB_LV2 */
|
/* Check the lowhmem is used by HUGETLB_LV1/HUGETLB_LV2 */
|
||||||
mem_flags = 0;
|
mem_flags = 0;
|
||||||
if ((hugetlb_priv[HUGETLB_LV1].fd > 0) &&
|
if ((hugetlb_priv[HUGETLB_LV1].fd > 0) &&
|
||||||
(hugetlb_priv[HUGETLB_LV1].lowmem))
|
(hugetlb_priv[HUGETLB_LV1].lowmem))
|
||||||
|
@ -14,45 +14,46 @@
|
|||||||
#include <sys/queue.h>
|
#include <sys/queue.h>
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
#include <signal.h>
|
#include <signal.h>
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
#include "iothread.h"
|
#include "iothread.h"
|
||||||
#include "log.h"
|
#include "log.h"
|
||||||
#include "mevent.h"
|
#include "mevent.h"
|
||||||
#include "dm.h"
|
|
||||||
|
|
||||||
|
|
||||||
#define MEVENT_MAX 64
|
#define MEVENT_MAX 64
|
||||||
|
#define MAX_EVENT_NUM 64
|
||||||
static struct iothread_ctx ioctxes[IOTHREAD_NUM];
|
struct iothread_ctx {
|
||||||
static int ioctx_active_cnt;
|
pthread_t tid;
|
||||||
/* mutex to protect the free ioctx slot allocation */
|
int epfd;
|
||||||
static pthread_mutex_t ioctxes_mutex = PTHREAD_MUTEX_INITIALIZER;
|
bool started;
|
||||||
|
pthread_mutex_t mtx;
|
||||||
|
};
|
||||||
|
static struct iothread_ctx ioctx;
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
io_thread(void *arg)
|
io_thread(void *arg)
|
||||||
{
|
{
|
||||||
struct epoll_event eventlist[MEVENT_MAX];
|
struct epoll_event eventlist[MEVENT_MAX];
|
||||||
struct iothread_mevent *aevp;
|
struct iothread_mevent *aevp;
|
||||||
int i, n;
|
int i, n, status;
|
||||||
struct iothread_ctx *ioctx_x = (struct iothread_ctx *)arg;
|
char buf[MAX_EVENT_NUM];
|
||||||
|
|
||||||
set_thread_priority(PRIO_IOTHREAD, true);
|
while(ioctx.started) {
|
||||||
|
n = epoll_wait(ioctx.epfd, eventlist, MEVENT_MAX, -1);
|
||||||
while(ioctx_x->started) {
|
|
||||||
n = epoll_wait(ioctx_x->epfd, eventlist, MEVENT_MAX, -1);
|
|
||||||
if (n < 0) {
|
if (n < 0) {
|
||||||
if (errno == EINTR) {
|
if (errno == EINTR)
|
||||||
/* EINTR may happen when io_uring fd is monitored, it is harmless. */
|
pr_info("%s: exit from epoll_wait\n", __func__);
|
||||||
continue;
|
else
|
||||||
} else {
|
|
||||||
pr_err("%s: return from epoll wait with errno %d\r\n", __func__, errno);
|
pr_err("%s: return from epoll wait with errno %d\r\n", __func__, errno);
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
aevp = eventlist[i].data.ptr;
|
aevp = eventlist[i].data.ptr;
|
||||||
if (aevp && aevp->run) {
|
if (aevp && aevp->run) {
|
||||||
|
/* Mitigate the epoll_wait repeat cycles by reading out the events as more as possile.*/
|
||||||
|
do {
|
||||||
|
status = read(aevp->fd, buf, sizeof(buf));
|
||||||
|
} while (status == MAX_EVENT_NUM);
|
||||||
(*aevp->run)(aevp->arg);
|
(*aevp->run)(aevp->arg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -62,54 +63,36 @@ io_thread(void *arg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
iothread_start(struct iothread_ctx *ioctx_x)
|
iothread_start(void)
|
||||||
{
|
{
|
||||||
int ret;
|
pthread_mutex_lock(&ioctx.mtx);
|
||||||
|
|
||||||
pthread_mutex_lock(&ioctx_x->mtx);
|
if (ioctx.started) {
|
||||||
|
pthread_mutex_unlock(&ioctx.mtx);
|
||||||
if (ioctx_x->started) {
|
|
||||||
pthread_mutex_unlock(&ioctx_x->mtx);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pthread_create(&ioctx_x->tid, NULL, io_thread, ioctx_x) != 0) {
|
if (pthread_create(&ioctx.tid, NULL, io_thread, NULL) != 0) {
|
||||||
pthread_mutex_unlock(&ioctx_x->mtx);
|
pthread_mutex_unlock(&ioctx.mtx);
|
||||||
pr_err("%s", "iothread create failed\r\n");
|
pr_err("%s", "iothread create failed\r\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
ioctx.started = true;
|
||||||
ioctx_x->started = true;
|
pthread_setname_np(ioctx.tid, "iothread");
|
||||||
pthread_setname_np(ioctx_x->tid, ioctx_x->name);
|
pthread_mutex_unlock(&ioctx.mtx);
|
||||||
|
pr_info("iothread started\n");
|
||||||
if (CPU_COUNT(&(ioctx_x->cpuset)) != 0) {
|
|
||||||
ret = pthread_setaffinity_np(ioctx_x->tid, sizeof(cpuset_t), &(ioctx_x->cpuset));
|
|
||||||
if (ret != 0) {
|
|
||||||
pr_err("pthread_setaffinity_np fails %d \n", ret);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pthread_mutex_unlock(&ioctx_x->mtx);
|
|
||||||
pr_info("%s started\n", ioctx_x->name);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
iothread_add(struct iothread_ctx *ioctx_x, int fd, struct iothread_mevent *aevt)
|
iothread_add(int fd, struct iothread_mevent *aevt)
|
||||||
{
|
{
|
||||||
struct epoll_event ee;
|
struct epoll_event ee;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (ioctx_x == NULL) {
|
|
||||||
pr_err("%s: ioctx_x is NULL \n", __func__);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Create a epoll instance before the first fd is added.*/
|
/* Create a epoll instance before the first fd is added.*/
|
||||||
ee.events = EPOLLIN;
|
ee.events = EPOLLIN;
|
||||||
ee.data.ptr = aevt;
|
ee.data.ptr = aevt;
|
||||||
ret = epoll_ctl(ioctx_x->epfd, EPOLL_CTL_ADD, fd, &ee);
|
ret = epoll_ctl(ioctx.epfd, EPOLL_CTL_ADD, fd, &ee);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
pr_err("%s: failed to add fd, error is %d\n",
|
pr_err("%s: failed to add fd, error is %d\n",
|
||||||
__func__, errno);
|
__func__, errno);
|
||||||
@ -117,7 +100,7 @@ iothread_add(struct iothread_ctx *ioctx_x, int fd, struct iothread_mevent *aevt)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Start the iothread after the first fd is added.*/
|
/* Start the iothread after the first fd is added.*/
|
||||||
ret = iothread_start(ioctx_x);
|
ret = iothread_start();
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
pr_err("%s: failed to start iothread thread\n",
|
pr_err("%s: failed to start iothread thread\n",
|
||||||
__func__);
|
__func__);
|
||||||
@ -126,17 +109,12 @@ iothread_add(struct iothread_ctx *ioctx_x, int fd, struct iothread_mevent *aevt)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
iothread_del(struct iothread_ctx *ioctx_x, int fd)
|
iothread_del(int fd)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (ioctx_x == NULL) {
|
if (ioctx.epfd) {
|
||||||
pr_err("%s: ioctx_x is NULL \n", __func__);
|
ret = epoll_ctl(ioctx.epfd, EPOLL_CTL_DEL, fd, NULL);
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ioctx_x->epfd) {
|
|
||||||
ret = epoll_ctl(ioctx_x->epfd, EPOLL_CTL_DEL, fd, NULL);
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
pr_err("%s: failed to delete fd from epoll fd, error is %d\n",
|
pr_err("%s: failed to delete fd from epoll fd, error is %d\n",
|
||||||
__func__, errno);
|
__func__, errno);
|
||||||
@ -148,215 +126,40 @@ void
|
|||||||
iothread_deinit(void)
|
iothread_deinit(void)
|
||||||
{
|
{
|
||||||
void *jval;
|
void *jval;
|
||||||
int i;
|
|
||||||
struct iothread_ctx *ioctx_x;
|
|
||||||
|
|
||||||
pthread_mutex_lock(&ioctxes_mutex);
|
if (ioctx.tid > 0) {
|
||||||
for (i = 0; i < ioctx_active_cnt; i++) {
|
pthread_mutex_lock(&ioctx.mtx);
|
||||||
ioctx_x = &ioctxes[i];
|
ioctx.started = false;
|
||||||
|
pthread_mutex_unlock(&ioctx.mtx);
|
||||||
if (ioctx_x->tid > 0) {
|
pthread_kill(ioctx.tid, SIGCONT);
|
||||||
pthread_mutex_lock(&ioctx_x->mtx);
|
pthread_join(ioctx.tid, &jval);
|
||||||
ioctx_x->started = false;
|
|
||||||
pthread_mutex_unlock(&ioctx_x->mtx);
|
|
||||||
pthread_kill(ioctx_x->tid, SIGCONT);
|
|
||||||
pthread_join(ioctx_x->tid, &jval);
|
|
||||||
}
|
|
||||||
if (ioctx_x->epfd > 0) {
|
|
||||||
close(ioctx_x->epfd);
|
|
||||||
ioctx_x->epfd = -1;
|
|
||||||
}
|
|
||||||
pthread_mutex_destroy(&ioctx_x->mtx);
|
|
||||||
pr_info("%s stop \n", ioctx_x->name);
|
|
||||||
}
|
}
|
||||||
ioctx_active_cnt = 0;
|
if (ioctx.epfd > 0) {
|
||||||
pthread_mutex_unlock(&ioctxes_mutex);
|
close(ioctx.epfd);
|
||||||
|
ioctx.epfd = -1;
|
||||||
|
}
|
||||||
|
pthread_mutex_destroy(&ioctx.mtx);
|
||||||
|
pr_info("iothread stop\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
int
|
||||||
* Create @ioctx_num iothread context instances
|
iothread_init(void)
|
||||||
* Return NULL if fails. Otherwise, return the base of those iothread context instances.
|
|
||||||
*
|
|
||||||
* Notes:
|
|
||||||
* The caller of iothread_create() shall call iothread_free_options() afterwards to free the resources that
|
|
||||||
* are dynamically allocated during iothread_parse_options(), such as iothr_opt->cpusets.
|
|
||||||
*
|
|
||||||
* A general calling sequence from the virtual device owner is like:
|
|
||||||
* 1. Call iothread_parse_options() to parse the options from the user.
|
|
||||||
* 2. Call iothread_create() to create the iothread instances.
|
|
||||||
* 3. Call iothread_free_options() to free the dynamic resources.
|
|
||||||
*/
|
|
||||||
struct iothread_ctx *
|
|
||||||
iothread_create(struct iothreads_option *iothr_opt)
|
|
||||||
{
|
{
|
||||||
pthread_mutexattr_t attr;
|
pthread_mutexattr_t attr;
|
||||||
int i, ret, base, end;
|
|
||||||
struct iothread_ctx *ioctx_x;
|
|
||||||
struct iothread_ctx *ioctx_base = NULL;
|
|
||||||
ret = 0;
|
|
||||||
|
|
||||||
if (iothr_opt == NULL) {
|
pthread_mutexattr_init(&attr);
|
||||||
pr_err("%s: iothr_opt is NULL \n", __func__);
|
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
|
||||||
return ioctx_base;
|
pthread_mutex_init(&ioctx.mtx, &attr);
|
||||||
|
pthread_mutexattr_destroy(&attr);
|
||||||
|
|
||||||
|
ioctx.tid = 0;
|
||||||
|
ioctx.started = false;
|
||||||
|
ioctx.epfd = epoll_create1(0);
|
||||||
|
|
||||||
|
if (ioctx.epfd < 0) {
|
||||||
|
pr_err("%s: failed to create epoll fd, error is %d\r\n",
|
||||||
|
__func__, errno);
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_lock(&ioctxes_mutex);
|
|
||||||
base = ioctx_active_cnt;
|
|
||||||
end = base + iothr_opt->num;
|
|
||||||
|
|
||||||
if (end > IOTHREAD_NUM) {
|
|
||||||
ret = -1;
|
|
||||||
pr_err("%s: fails to create new iothread context, max number of instances is %d \n",
|
|
||||||
__func__, IOTHREAD_NUM);
|
|
||||||
} else {
|
|
||||||
for (i = base; i < end; i++) {
|
|
||||||
ioctx_x = &ioctxes[i];
|
|
||||||
|
|
||||||
pthread_mutexattr_init(&attr);
|
|
||||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
|
|
||||||
pthread_mutex_init(&(ioctx_x->mtx), &attr);
|
|
||||||
pthread_mutexattr_destroy(&attr);
|
|
||||||
|
|
||||||
ioctx_x->idx = i;
|
|
||||||
ioctx_x->tid = 0;
|
|
||||||
ioctx_x->started = false;
|
|
||||||
ioctx_x->epfd = epoll_create1(0);
|
|
||||||
|
|
||||||
CPU_ZERO(&(ioctx_x->cpuset));
|
|
||||||
if (iothr_opt->cpusets != NULL) {
|
|
||||||
memcpy(&(ioctx_x->cpuset), iothr_opt->cpusets + (i - base), sizeof(cpu_set_t));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (snprintf(ioctx_x->name, PTHREAD_NAME_MAX_LEN,
|
|
||||||
"iothr-%d-%s", ioctx_x->idx, iothr_opt->tag) >= PTHREAD_NAME_MAX_LEN) {
|
|
||||||
pr_err("%s: iothread name too long \n", __func__);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ioctx_x->epfd < 0) {
|
|
||||||
ret = -1;
|
|
||||||
pr_err("%s: failed to create epoll fd, error is %d\r\n",
|
|
||||||
__func__, errno);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (ret == 0) {
|
|
||||||
ioctx_base = &ioctxes[base];
|
|
||||||
ioctx_active_cnt = end;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pthread_mutex_unlock(&ioctxes_mutex);
|
|
||||||
|
|
||||||
return ioctx_base;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Parse the iothread options from @str and fill the options in @iothr_opt if successes.
|
|
||||||
* Return -1 if fails to parse. Otherwise, return 0.
|
|
||||||
*/
|
|
||||||
int
|
|
||||||
iothread_parse_options(char *str, struct iothreads_option *iothr_opt)
|
|
||||||
{
|
|
||||||
char *tmp_num = NULL;
|
|
||||||
char *tmp_cpusets = NULL;
|
|
||||||
char *tmp_cpux = NULL;
|
|
||||||
int service_vm_cpuid, iothread_sub_idx, num;
|
|
||||||
cpu_set_t *cpuset_list = NULL;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create one iothread instance if DM parameters contain 'iothread', but the number is not specified.
|
|
||||||
*/
|
|
||||||
num = 1;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Valid 'iothread' setting examples:
|
|
||||||
* - create 1 iothread instance for virtio-blk
|
|
||||||
* ... virtio-blk iothread,...
|
|
||||||
*
|
|
||||||
* - create 1 iothread instance for virtio-blk
|
|
||||||
* ... virtio-blk iothread=1,...
|
|
||||||
*
|
|
||||||
* - create 3 iothread instances for virtio-blk
|
|
||||||
* ... virtio-blk iothread=3,...
|
|
||||||
*
|
|
||||||
* - create 3 iothread instances for virtio-blk with CPU affinity settings
|
|
||||||
* ... virtio-blk iothread=3@0:1:2/0:1,...
|
|
||||||
* CPU affinity of iothread instances for this virtio-blk device:
|
|
||||||
* - 1st iothread instance <-> Service VM CPU 0,1,2
|
|
||||||
* - 2nd iothread instance <-> Service VM CPU 0,1
|
|
||||||
* - 3rd iothread instance <-> No CPU affinity settings
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
if (str != NULL) {
|
|
||||||
/*
|
|
||||||
* "@" is used to separate the following two settings:
|
|
||||||
* - the number of iothread instances
|
|
||||||
* - the CPU affinity settings for each iothread instance.
|
|
||||||
*/
|
|
||||||
tmp_num = strsep(&str, "@");
|
|
||||||
|
|
||||||
if (tmp_num != NULL) {
|
|
||||||
if (dm_strtoi(tmp_num, &tmp_num, 10, &num) || (num <= 0)) {
|
|
||||||
pr_err("%s: invalid iothread number %s \n", __func__, tmp_num);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
cpuset_list = calloc(num, sizeof(cpu_set_t));
|
|
||||||
if (cpuset_list == NULL) {
|
|
||||||
pr_err("%s: calloc cpuset_list returns NULL \n", __func__);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
iothread_sub_idx = 0;
|
|
||||||
while ((str != NULL) && (*str !='\0') && (iothread_sub_idx < num)) {
|
|
||||||
/* "/" is used to separate the CPU affinity setting for each iothread instance. */
|
|
||||||
tmp_cpusets = strsep(&str, "/");
|
|
||||||
|
|
||||||
CPU_ZERO(cpuset_list + iothread_sub_idx);
|
|
||||||
while ((tmp_cpusets != NULL) && (*tmp_cpusets !='\0')) {
|
|
||||||
/* ":" is used to separate different CPU cores. */
|
|
||||||
tmp_cpux = strsep(&tmp_cpusets, ":");
|
|
||||||
|
|
||||||
/*
|
|
||||||
* char '*' can be used to skip the setting for the
|
|
||||||
* specific iothread instance.
|
|
||||||
*/
|
|
||||||
if (*tmp_cpux == '*') {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (dm_strtoi(tmp_cpux, &tmp_cpux, 10, &service_vm_cpuid) ||
|
|
||||||
(service_vm_cpuid < 0)) {
|
|
||||||
pr_err("%s: invalid CPU affinity setting %s \n",
|
|
||||||
__func__, tmp_cpux);
|
|
||||||
|
|
||||||
free(cpuset_list);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
CPU_SET(service_vm_cpuid, cpuset_list + iothread_sub_idx);
|
|
||||||
pr_err("%s: iothread[%d]: set service_vm_cpuid %d \n",
|
|
||||||
__func__, iothread_sub_idx, service_vm_cpuid);
|
|
||||||
}
|
|
||||||
iothread_sub_idx++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
iothr_opt->num = num;
|
|
||||||
iothr_opt->cpusets = cpuset_list;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This interface is used to free the elements that are allocated dynamically in iothread_parse_options(),
|
|
||||||
* such as iothr_opt->cpusets.
|
|
||||||
*/
|
|
||||||
void iothread_free_options(struct iothreads_option *iothr_opt)
|
|
||||||
{
|
|
||||||
if ((iothr_opt != NULL) && (iothr_opt->cpusets != NULL)) {
|
|
||||||
free(iothr_opt->cpusets);
|
|
||||||
iothr_opt->cpusets = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
@ -71,7 +71,6 @@
|
|||||||
#include "cmd_monitor.h"
|
#include "cmd_monitor.h"
|
||||||
#include "vdisplay.h"
|
#include "vdisplay.h"
|
||||||
#include "iothread.h"
|
#include "iothread.h"
|
||||||
#include "vm_event.h"
|
|
||||||
|
|
||||||
#define VM_MAXCPU 16 /* maximum virtual cpus */
|
#define VM_MAXCPU 16 /* maximum virtual cpus */
|
||||||
|
|
||||||
@ -102,7 +101,6 @@ bool vtpm2;
|
|||||||
bool is_winvm;
|
bool is_winvm;
|
||||||
bool skip_pci_mem64bar_workaround = false;
|
bool skip_pci_mem64bar_workaround = false;
|
||||||
bool gfx_ui = false;
|
bool gfx_ui = false;
|
||||||
bool ovmf_loaded = false;
|
|
||||||
|
|
||||||
static int guest_ncpus;
|
static int guest_ncpus;
|
||||||
static int virtio_msix = 1;
|
static int virtio_msix = 1;
|
||||||
@ -168,7 +166,7 @@ usage(int code)
|
|||||||
" -v: version\n"
|
" -v: version\n"
|
||||||
" --ovmf: ovmf file path\n"
|
" --ovmf: ovmf file path\n"
|
||||||
" --iasl: iasl compiler path\n"
|
" --iasl: iasl compiler path\n"
|
||||||
" --ssram: Configure Software SRAM parameters\n"
|
" --ssram: Congfiure Software SRAM parameters\n"
|
||||||
" --cpu_affinity: list of Service VM vCPUs assigned to this User VM, the vCPUs are"
|
" --cpu_affinity: list of Service VM vCPUs assigned to this User VM, the vCPUs are"
|
||||||
" identified by their local APIC IDs.\n"
|
" identified by their local APIC IDs.\n"
|
||||||
" --enable_trusty: enable trusty for guest\n"
|
" --enable_trusty: enable trusty for guest\n"
|
||||||
@ -178,7 +176,7 @@ usage(int code)
|
|||||||
" --cmd_monitor: enable command monitor\n"
|
" --cmd_monitor: enable command monitor\n"
|
||||||
" its params: unix domain socket path\n"
|
" its params: unix domain socket path\n"
|
||||||
" --virtio_poll: enable virtio poll mode with poll interval with ns\n"
|
" --virtio_poll: enable virtio poll mode with poll interval with ns\n"
|
||||||
" --acpidev_pt: ACPI device ID args: HID in ACPI Table\n"
|
" --acpidev_pt: acpi device ID args: HID in ACPI Table\n"
|
||||||
" --mmiodev_pt: MMIO resources args: physical MMIO regions\n"
|
" --mmiodev_pt: MMIO resources args: physical MMIO regions\n"
|
||||||
" --vtpm2: Virtual TPM2 args: sock_path=$PATH_OF_SWTPM_SOCKET\n"
|
" --vtpm2: Virtual TPM2 args: sock_path=$PATH_OF_SWTPM_SOCKET\n"
|
||||||
" --lapic_pt: enable local apic passthrough\n"
|
" --lapic_pt: enable local apic passthrough\n"
|
||||||
@ -234,12 +232,6 @@ virtio_uses_msix(void)
|
|||||||
return virtio_msix;
|
return virtio_msix;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
|
||||||
guest_cpu_num(void)
|
|
||||||
{
|
|
||||||
return guest_ncpus;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
high_bios_size(void)
|
high_bios_size(void)
|
||||||
{
|
{
|
||||||
@ -248,64 +240,6 @@ high_bios_size(void)
|
|||||||
return roundup2(size, 2 * MB);
|
return roundup2(size, 2 * MB);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* set nice value of current pthread
|
|
||||||
* input range: [-20, 19]
|
|
||||||
* Lower priorities cause more favorable scheduling.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
set_thread_priority(int priority, bool reset_on_fork)
|
|
||||||
{
|
|
||||||
int ret, policy;
|
|
||||||
char tname[MAXCOMLEN + 1];
|
|
||||||
struct sched_param sp = { .sched_priority = 0 };
|
|
||||||
|
|
||||||
memset(tname, 0, sizeof(tname));
|
|
||||||
pthread_getname_np(pthread_self(), tname, sizeof(tname));
|
|
||||||
|
|
||||||
policy = sched_getscheduler(0);
|
|
||||||
if (policy == -1) {
|
|
||||||
pr_err("%s(%s), sched_getscheduler failed, errno = %d\n",
|
|
||||||
__func__, tname, errno);
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((policy & SCHED_RESET_ON_FORK) && !reset_on_fork)
|
|
||||||
policy &= ~SCHED_RESET_ON_FORK;
|
|
||||||
else if (((policy & SCHED_RESET_ON_FORK) == 0) && reset_on_fork)
|
|
||||||
policy |= SCHED_RESET_ON_FORK;
|
|
||||||
|
|
||||||
ret = sched_setscheduler(0, policy, &sp);
|
|
||||||
if (ret == -1) {
|
|
||||||
pr_err("%s(%s), sched_setscheduler failed, errno = %d\n",
|
|
||||||
__func__, tname, errno);
|
|
||||||
}
|
|
||||||
|
|
||||||
errno = 0;
|
|
||||||
ret = getpriority(PRIO_PROCESS, 0);
|
|
||||||
if (errno && (ret == -1)) {
|
|
||||||
pr_err("%s(%s), getpriority failed, errno = %d\n",
|
|
||||||
__func__, tname, errno);
|
|
||||||
} else {
|
|
||||||
pr_info("%s(%s), orig prio = %d\n",
|
|
||||||
__func__, tname, ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = setpriority(PRIO_PROCESS, 0, priority);
|
|
||||||
if (ret) {
|
|
||||||
pr_err("%s(%s), setpriority failed, errno = %d\n",
|
|
||||||
__func__, tname, errno);
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = getpriority(PRIO_PROCESS, 0);
|
|
||||||
if (ret != priority) {
|
|
||||||
pr_err("%s(%s), getpriority(%d) != setpriority(%d)\n",
|
|
||||||
__func__, tname, ret, priority);
|
|
||||||
} else {
|
|
||||||
pr_info("%s(%s), new priority = %d\n",
|
|
||||||
__func__, tname, ret);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
start_thread(void *param)
|
start_thread(void *param)
|
||||||
{
|
{
|
||||||
@ -675,8 +609,6 @@ vm_reset_vdevs(struct vmctx *ctx)
|
|||||||
pci_irq_deinit(ctx);
|
pci_irq_deinit(ctx);
|
||||||
ioapic_deinit();
|
ioapic_deinit();
|
||||||
|
|
||||||
iothread_deinit();
|
|
||||||
|
|
||||||
pci_irq_init(ctx);
|
pci_irq_init(ctx);
|
||||||
atkbdc_init(ctx);
|
atkbdc_init(ctx);
|
||||||
vrtc_init(ctx);
|
vrtc_init(ctx);
|
||||||
@ -742,7 +674,6 @@ vm_system_reset(struct vmctx *ctx)
|
|||||||
static void
|
static void
|
||||||
vm_suspend_resume(struct vmctx *ctx)
|
vm_suspend_resume(struct vmctx *ctx)
|
||||||
{
|
{
|
||||||
struct acrn_vcpu_regs bsp_regs;
|
|
||||||
/*
|
/*
|
||||||
* If we get warm reboot request, we don't want to exit the
|
* If we get warm reboot request, we don't want to exit the
|
||||||
* vcpu_loop/vm_loop/mevent_loop. So we do:
|
* vcpu_loop/vm_loop/mevent_loop. So we do:
|
||||||
@ -754,8 +685,6 @@ vm_suspend_resume(struct vmctx *ctx)
|
|||||||
* 6. hypercall restart vm
|
* 6. hypercall restart vm
|
||||||
*/
|
*/
|
||||||
vm_pause(ctx);
|
vm_pause(ctx);
|
||||||
if (ovmf_loaded)
|
|
||||||
vrtc_suspend(ctx);
|
|
||||||
|
|
||||||
vm_clear_ioreq(ctx);
|
vm_clear_ioreq(ctx);
|
||||||
vm_stop_watchdog(ctx);
|
vm_stop_watchdog(ctx);
|
||||||
@ -765,32 +694,8 @@ vm_suspend_resume(struct vmctx *ctx)
|
|||||||
vm_reset_watchdog(ctx);
|
vm_reset_watchdog(ctx);
|
||||||
vm_reset(ctx);
|
vm_reset(ctx);
|
||||||
|
|
||||||
bsp_regs = ctx->bsp_regs;
|
|
||||||
/* for bzImage or elf */
|
|
||||||
if (!ovmf_loaded) {
|
|
||||||
uint32_t *guest_wakeup_vec32;
|
|
||||||
/* 64BIT_WAKE_SUPPORTED_F is not set */
|
|
||||||
guest_wakeup_vec32 = paddr_guest2host(ctx,
|
|
||||||
get_acpi_wakingvector_offset(),
|
|
||||||
get_acpi_wakingvector_length());
|
|
||||||
/* set the BSP waking vector */
|
|
||||||
bsp_regs.vcpu_regs.cs_sel = (uint16_t)((*guest_wakeup_vec32 >> 4U) & 0xFFFFU);
|
|
||||||
bsp_regs.vcpu_regs.cs_base = bsp_regs.vcpu_regs.cs_sel << 4U;
|
|
||||||
/* real mode code segment */
|
|
||||||
bsp_regs.vcpu_regs.cs_ar = 0x009FU;
|
|
||||||
bsp_regs.vcpu_regs.cs_limit = 0xFFFFU;
|
|
||||||
bsp_regs.vcpu_regs.rip = 0x0U;
|
|
||||||
/* CR0_ET | CR0_NE */
|
|
||||||
bsp_regs.vcpu_regs.cr0 = 0x30;
|
|
||||||
/* real mode gdt */
|
|
||||||
bsp_regs.vcpu_regs.gdt.limit = 0xFFFFU;
|
|
||||||
bsp_regs.vcpu_regs.gdt.base = 0UL;
|
|
||||||
/* real mode idt */
|
|
||||||
bsp_regs.vcpu_regs.idt.limit = 0xFFFFU;
|
|
||||||
bsp_regs.vcpu_regs.idt.base = 0UL;
|
|
||||||
}
|
|
||||||
/* set the BSP init state */
|
/* set the BSP init state */
|
||||||
vm_set_vcpu_regs(ctx, &bsp_regs);
|
vm_set_vcpu_regs(ctx, &ctx->bsp_regs);
|
||||||
vm_run(ctx);
|
vm_run(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -830,7 +735,8 @@ vm_loop(struct vmctx *ctx)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (VM_SUSPEND_SYSTEM_RESET == vm_get_suspend_mode()) {
|
/* RTVM can't be reset */
|
||||||
|
if ((VM_SUSPEND_SYSTEM_RESET == vm_get_suspend_mode()) && (!is_rtvm)) {
|
||||||
vm_system_reset(ctx);
|
vm_system_reset(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -946,7 +852,7 @@ vm_init_asyncio(struct vmctx *ctx, uint64_t base)
|
|||||||
sbuf->overrun_cnt = 0;
|
sbuf->overrun_cnt = 0;
|
||||||
sbuf->head = 0;
|
sbuf->head = 0;
|
||||||
sbuf->tail = 0;
|
sbuf->tail = 0;
|
||||||
return vm_setup_asyncio(ctx, base);
|
return vm_setup_sbuf(ctx, ACRN_ASYNCIO, base);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
@ -1043,7 +949,6 @@ main(int argc, char *argv[])
|
|||||||
case CMD_OPT_OVMF:
|
case CMD_OPT_OVMF:
|
||||||
if (!vsbl_file_name && acrn_parse_ovmf(optarg) != 0)
|
if (!vsbl_file_name && acrn_parse_ovmf(optarg) != 0)
|
||||||
errx(EX_USAGE, "invalid ovmf param %s", optarg);
|
errx(EX_USAGE, "invalid ovmf param %s", optarg);
|
||||||
ovmf_loaded = true;
|
|
||||||
skip_pci_mem64bar_workaround = true;
|
skip_pci_mem64bar_workaround = true;
|
||||||
break;
|
break;
|
||||||
case CMD_OPT_IASL:
|
case CMD_OPT_IASL:
|
||||||
@ -1227,18 +1132,18 @@ main(int argc, char *argv[])
|
|||||||
goto mevent_fail;
|
goto mevent_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
error = iothread_init();
|
||||||
|
if (error) {
|
||||||
|
pr_err("Unable to initialize iothread (%d)\n", errno);
|
||||||
|
goto iothread_fail;
|
||||||
|
}
|
||||||
|
|
||||||
pr_notice("vm_init_vdevs\n");
|
pr_notice("vm_init_vdevs\n");
|
||||||
if (vm_init_vdevs(ctx) < 0) {
|
if (vm_init_vdevs(ctx) < 0) {
|
||||||
pr_err("Unable to init vdev (%d)\n", errno);
|
pr_err("Unable to init vdev (%d)\n", errno);
|
||||||
goto dev_fail;
|
goto dev_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_notice("vm setup vm event\n");
|
|
||||||
error = vm_event_init(ctx);
|
|
||||||
if (error) {
|
|
||||||
pr_warn("VM_EVENT is not supported by kernel or hyperviosr!\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* build the guest tables, MP etc.
|
* build the guest tables, MP etc.
|
||||||
*/
|
*/
|
||||||
@ -1293,7 +1198,6 @@ main(int argc, char *argv[])
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
vm_event_deinit();
|
|
||||||
vm_deinit_vdevs(ctx);
|
vm_deinit_vdevs(ctx);
|
||||||
mevent_deinit();
|
mevent_deinit();
|
||||||
iothread_deinit();
|
iothread_deinit();
|
||||||
@ -1306,14 +1210,13 @@ main(int argc, char *argv[])
|
|||||||
}
|
}
|
||||||
|
|
||||||
vm_fail:
|
vm_fail:
|
||||||
vm_event_deinit();
|
|
||||||
|
|
||||||
vm_deinit_vdevs(ctx);
|
vm_deinit_vdevs(ctx);
|
||||||
if (ssram)
|
if (ssram)
|
||||||
clean_vssram_configs();
|
clean_vssram_configs();
|
||||||
|
|
||||||
dev_fail:
|
dev_fail:
|
||||||
iothread_deinit();
|
iothread_deinit();
|
||||||
|
iothread_fail:
|
||||||
mevent_deinit();
|
mevent_deinit();
|
||||||
mevent_fail:
|
mevent_fail:
|
||||||
vm_unsetup_memory(ctx);
|
vm_unsetup_memory(ctx);
|
||||||
|
@ -72,7 +72,7 @@ struct mevent {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static LIST_HEAD(listhead, mevent) global_head;
|
static LIST_HEAD(listhead, mevent) global_head;
|
||||||
/* List holds the mevent node which is requested to be deleted */
|
/* List holds the mevent node which is requested to deleted */
|
||||||
static LIST_HEAD(del_listhead, mevent) del_head;
|
static LIST_HEAD(del_listhead, mevent) del_head;
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -124,10 +124,10 @@ static void *intr_storm_monitor_thread(void *arg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* calculate the delta of the two times count of interrupt;
|
* calc the delta of the two times count of interrupt;
|
||||||
* compare the IRQ number first, if not same just drop it,
|
* compare the IRQ num first, if not same just drop it,
|
||||||
* for it just happens rarely when devices dynamically
|
* for it just happens rarelly when devices dynamically
|
||||||
* allocation in Service VM or User VM, it can be calculated next time
|
* allocation in Service VM or User VM, it can be calc next time
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < hdr->buf_cnt; i += 2) {
|
for (i = 0; i < hdr->buf_cnt; i += 2) {
|
||||||
if (hdr->buffer[i] != intr_cnt_buf[i])
|
if (hdr->buffer[i] != intr_cnt_buf[i])
|
||||||
@ -350,7 +350,7 @@ static void handle_stop(struct mngr_msg *msg, int client_fd, void *param)
|
|||||||
ack.msgid = msg->msgid;
|
ack.msgid = msg->msgid;
|
||||||
ack.timestamp = msg->timestamp;
|
ack.timestamp = msg->timestamp;
|
||||||
|
|
||||||
if (msg->data.acrnd_stop.force) {
|
if (msg->data.acrnd_stop.force && !is_rtvm) {
|
||||||
pr_info("%s: setting VM state to %s\n", __func__, vm_state_to_str(VM_SUSPEND_POWEROFF));
|
pr_info("%s: setting VM state to %s\n", __func__, vm_state_to_str(VM_SUSPEND_POWEROFF));
|
||||||
vm_set_suspend_mode(VM_SUSPEND_POWEROFF);
|
vm_set_suspend_mode(VM_SUSPEND_POWEROFF);
|
||||||
ack.data.err = 0;
|
ack.data.err = 0;
|
||||||
|
@ -1,125 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2018-2023 Intel Corporation.
|
|
||||||
*
|
|
||||||
* SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <asm/errno.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdbool.h>
|
|
||||||
#include "sbuf.h"
|
|
||||||
#include <errno.h>
|
|
||||||
|
|
||||||
static inline uint32_t sbuf_next_ptr(uint32_t pos_arg,
|
|
||||||
uint32_t span, uint32_t scope)
|
|
||||||
{
|
|
||||||
uint32_t pos = pos_arg;
|
|
||||||
pos += span;
|
|
||||||
pos = (pos >= scope) ? (pos - scope) : pos;
|
|
||||||
return pos;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t sbuf_get(struct shared_buf *sbuf, uint8_t *data)
|
|
||||||
{
|
|
||||||
const void *from;
|
|
||||||
|
|
||||||
if ((sbuf == NULL) || (data == NULL))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (sbuf_is_empty(sbuf)) {
|
|
||||||
/* no data available */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
from = (void *)sbuf + SBUF_HEAD_SIZE + sbuf->head;
|
|
||||||
|
|
||||||
memcpy(data, from, sbuf->ele_size);
|
|
||||||
|
|
||||||
mb();
|
|
||||||
|
|
||||||
sbuf->head = sbuf_next_ptr(sbuf->head, sbuf->ele_size, sbuf->size);
|
|
||||||
|
|
||||||
return sbuf->ele_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
int sbuf_clear_buffered(struct shared_buf *sbuf)
|
|
||||||
{
|
|
||||||
if (sbuf == NULL)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
sbuf->head = sbuf->tail;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The high caller should guarantee each time there must have
|
|
||||||
* sbuf->ele_size data can be write form data.
|
|
||||||
* Caller should provide the max length of the data for safety reason.
|
|
||||||
*
|
|
||||||
* And this function should guarantee execution atomically.
|
|
||||||
*
|
|
||||||
* flag:
|
|
||||||
* If OVERWRITE_EN set, buf can store (ele_num - 1) elements at most.
|
|
||||||
* Should use lock to guarantee that only one read or write at
|
|
||||||
* the same time.
|
|
||||||
* if OVERWRITE_EN not set, buf can store (ele_num - 1) elements
|
|
||||||
* at most. Shouldn't modify the sbuf->head.
|
|
||||||
*
|
|
||||||
* return:
|
|
||||||
* ele_size: write succeeded.
|
|
||||||
* 0: no write, buf is full
|
|
||||||
* UINT32_MAX: failed, sbuf corrupted.
|
|
||||||
*/
|
|
||||||
uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t *data, uint32_t max_len)
|
|
||||||
{
|
|
||||||
uint32_t ele_size = sbuf->ele_size;
|
|
||||||
void *to;
|
|
||||||
uint32_t next_tail;
|
|
||||||
uint32_t ret;
|
|
||||||
bool trigger_overwrite = false;
|
|
||||||
|
|
||||||
next_tail = sbuf_next_ptr(sbuf->tail, ele_size, sbuf->size);
|
|
||||||
|
|
||||||
if ((next_tail == sbuf->head) && ((sbuf->flags & OVERWRITE_EN) == 0U)) {
|
|
||||||
/* if overrun is not enabled, return 0 directly */
|
|
||||||
ret = 0U;
|
|
||||||
} else if (ele_size <= max_len) {
|
|
||||||
if (next_tail == sbuf->head) {
|
|
||||||
/* accumulate overrun count if necessary */
|
|
||||||
sbuf->overrun_cnt += sbuf->flags & OVERRUN_CNT_EN;
|
|
||||||
trigger_overwrite = true;
|
|
||||||
}
|
|
||||||
to = (void *)sbuf + SBUF_HEAD_SIZE + sbuf->tail;
|
|
||||||
|
|
||||||
memcpy(to, data, ele_size);
|
|
||||||
/* make sure write data before update head */
|
|
||||||
mb();
|
|
||||||
|
|
||||||
if (trigger_overwrite) {
|
|
||||||
sbuf->head = sbuf_next_ptr(sbuf->head,
|
|
||||||
ele_size, sbuf->size);
|
|
||||||
}
|
|
||||||
sbuf->tail = next_tail;
|
|
||||||
ret = ele_size;
|
|
||||||
} else {
|
|
||||||
/* there must be something wrong */
|
|
||||||
ret = UINT32_MAX;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
void sbuf_init(struct shared_buf *sbuf, uint32_t total_size, uint32_t ele_size)
|
|
||||||
{
|
|
||||||
sbuf->magic = SBUF_MAGIC;
|
|
||||||
sbuf->ele_size = ele_size;
|
|
||||||
sbuf->ele_num = (total_size - SBUF_HEAD_SIZE) / sbuf->ele_size;
|
|
||||||
sbuf->size = sbuf->ele_size * sbuf->ele_num;
|
|
||||||
sbuf->flags = 0;
|
|
||||||
sbuf->overrun_cnt = 0;
|
|
||||||
sbuf->head = 0;
|
|
||||||
sbuf->tail = 0;
|
|
||||||
}
|
|
@ -297,7 +297,7 @@ acrn_sw_load(struct vmctx *ctx)
|
|||||||
{
|
{
|
||||||
if (vsbl_file_name)
|
if (vsbl_file_name)
|
||||||
return acrn_sw_load_vsbl(ctx);
|
return acrn_sw_load_vsbl(ctx);
|
||||||
else if (ovmf_loaded)
|
else if ((ovmf_file_name != NULL) ^ (ovmf_code_file_name && ovmf_vars_file_name))
|
||||||
return acrn_sw_load_ovmf(ctx);
|
return acrn_sw_load_ovmf(ctx);
|
||||||
else if (kernel_file_name)
|
else if (kernel_file_name)
|
||||||
return acrn_sw_load_bzimage(ctx);
|
return acrn_sw_load_bzimage(ctx);
|
||||||
|
@ -1,510 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2018-2023 Intel Corporation.
|
|
||||||
* SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <errno.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdbool.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <sys/epoll.h>
|
|
||||||
#include <sys/queue.h>
|
|
||||||
#include <pthread.h>
|
|
||||||
#include <sys/ioctl.h>
|
|
||||||
#include <sys/eventfd.h>
|
|
||||||
#include <acrn_common.h>
|
|
||||||
|
|
||||||
#include "vm_event.h"
|
|
||||||
#include "hsm_ioctl_defs.h"
|
|
||||||
#include "sbuf.h"
|
|
||||||
#include "log.h"
|
|
||||||
#include <cjson/cJSON.h>
|
|
||||||
#include "monitor.h"
|
|
||||||
#include "timer.h"
|
|
||||||
|
|
||||||
#define VM_EVENT_ELE_SIZE (sizeof(struct vm_event))
|
|
||||||
|
|
||||||
#define HV_VM_EVENT_TUNNEL 0
|
|
||||||
#define DM_VM_EVENT_TUNNEL 1
|
|
||||||
#define MAX_VM_EVENT_TUNNELS 2
|
|
||||||
#define MAX_EPOLL_EVENTS MAX_VM_EVENT_TUNNELS
|
|
||||||
|
|
||||||
#define THROTTLE_WINDOW 1U /* time window for throttle counter, in secs*/
|
|
||||||
|
|
||||||
#define BROKEN_TIME ((time_t)-1)
|
|
||||||
|
|
||||||
typedef void (*vm_event_handler)(struct vmctx *ctx, struct vm_event *event);
|
|
||||||
typedef void (*vm_event_generate_jdata)(cJSON *event_obj, struct vm_event *event);
|
|
||||||
|
|
||||||
static int epoll_fd;
|
|
||||||
static bool started = false;
|
|
||||||
static char hv_vm_event_page[4096] __aligned(4096);
|
|
||||||
static char dm_vm_event_page[4096] __aligned(4096);
|
|
||||||
static pthread_t vm_event_tid;
|
|
||||||
|
|
||||||
static void general_event_handler(struct vmctx *ctx, struct vm_event *event);
|
|
||||||
static void rtc_chg_event_handler(struct vmctx *ctx, struct vm_event *event);
|
|
||||||
|
|
||||||
static void gen_rtc_chg_jdata(cJSON *event_obj, struct vm_event *event);
|
|
||||||
|
|
||||||
enum event_source_type {
|
|
||||||
EVENT_SOURCE_TYPE_HV,
|
|
||||||
EVENT_SOURCE_TYPE_DM,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct vm_event_tunnel {
|
|
||||||
enum event_source_type type;
|
|
||||||
struct shared_buf *sbuf;
|
|
||||||
uint32_t sbuf_size;
|
|
||||||
int kick_fd;
|
|
||||||
pthread_mutex_t mtx;
|
|
||||||
bool enabled;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct event_throttle_ctl {
|
|
||||||
struct acrn_timer timer;
|
|
||||||
pthread_mutex_t mtx;
|
|
||||||
uint32_t event_counter;
|
|
||||||
uint32_t throttle_count; /* how many events has been throttled(dropped) */
|
|
||||||
bool is_up;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct vm_event_proc {
|
|
||||||
vm_event_handler ve_handler;
|
|
||||||
uint32_t throttle_rate; /* how many events allowed per sec */
|
|
||||||
struct event_throttle_ctl throttle_ctl;
|
|
||||||
vm_event_generate_jdata gen_jdata_handler; /* how to transtfer vm_event data to json txt */
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct vm_event_proc ve_proc[VM_EVENT_COUNT] = {
|
|
||||||
[VM_EVENT_RTC_CHG] = {
|
|
||||||
.ve_handler = rtc_chg_event_handler,
|
|
||||||
.gen_jdata_handler = gen_rtc_chg_jdata,
|
|
||||||
.throttle_rate = 1,
|
|
||||||
},
|
|
||||||
[VM_EVENT_POWEROFF] = {
|
|
||||||
.ve_handler = general_event_handler,
|
|
||||||
.gen_jdata_handler = NULL,
|
|
||||||
.throttle_rate = 1,
|
|
||||||
},
|
|
||||||
[VM_EVENT_TRIPLE_FAULT] = {
|
|
||||||
.ve_handler = general_event_handler,
|
|
||||||
.gen_jdata_handler = NULL,
|
|
||||||
.throttle_rate = 1,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
static inline struct vm_event_proc *get_vm_event_proc(struct vm_event *event)
|
|
||||||
{
|
|
||||||
struct vm_event_proc *proc = NULL;
|
|
||||||
if (event->type < VM_EVENT_COUNT) {
|
|
||||||
proc = &ve_proc[event->type];
|
|
||||||
}
|
|
||||||
return proc;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool event_throttle(struct vm_event *event)
|
|
||||||
{
|
|
||||||
struct vm_event_proc *proc;
|
|
||||||
struct event_throttle_ctl *ctl;
|
|
||||||
uint32_t current_rate;
|
|
||||||
bool ret = false;
|
|
||||||
|
|
||||||
proc = get_vm_event_proc(event);
|
|
||||||
if (proc) {
|
|
||||||
ctl = &proc->throttle_ctl;
|
|
||||||
if (ctl->is_up) {
|
|
||||||
pthread_mutex_lock(&ctl->mtx);
|
|
||||||
current_rate = ctl->event_counter / THROTTLE_WINDOW;
|
|
||||||
if (current_rate < proc->throttle_rate) {
|
|
||||||
ctl->event_counter++;
|
|
||||||
ret = false;
|
|
||||||
} else {
|
|
||||||
ret = true;
|
|
||||||
ctl->throttle_count++;
|
|
||||||
pr_notice("event %d throttle: %d dropped\n",
|
|
||||||
event->type, ctl->throttle_count);
|
|
||||||
}
|
|
||||||
pthread_mutex_unlock(&ctl->mtx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
void throttle_timer_cb(void *arg, uint64_t nexp)
|
|
||||||
{
|
|
||||||
struct event_throttle_ctl *ctl = (struct event_throttle_ctl *)arg;
|
|
||||||
pthread_mutex_lock(&ctl->mtx);
|
|
||||||
ctl->event_counter = 0;
|
|
||||||
pthread_mutex_unlock(&ctl->mtx);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void vm_event_throttle_init(struct vmctx *ctx)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
struct event_throttle_ctl *ctl;
|
|
||||||
int ret = 0;
|
|
||||||
struct itimerspec timer_spec;
|
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(ve_proc); i++) {
|
|
||||||
ctl = &ve_proc[i].throttle_ctl;
|
|
||||||
ctl->event_counter = 0U;
|
|
||||||
ctl->throttle_count = 0U;
|
|
||||||
ctl->is_up = false;
|
|
||||||
pthread_mutex_init(&ctl->mtx, NULL);
|
|
||||||
ctl->timer.clockid = CLOCK_MONOTONIC;
|
|
||||||
ret = acrn_timer_init(&ctl->timer, throttle_timer_cb, ctl);
|
|
||||||
if (ret < 0) {
|
|
||||||
pr_warn("failed to create timer for vm_event %d, throttle disabled\n", i);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
timer_spec.it_value.tv_sec = THROTTLE_WINDOW;
|
|
||||||
timer_spec.it_value.tv_nsec = 0;
|
|
||||||
timer_spec.it_interval.tv_sec = THROTTLE_WINDOW;
|
|
||||||
timer_spec.it_interval.tv_nsec = 0;
|
|
||||||
ret = acrn_timer_settime(&ctl->timer, &timer_spec);
|
|
||||||
if (ret < 0) {
|
|
||||||
pr_warn("failed to set timer for vm_event %d, throttle disabled\n", i);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
ctl->is_up = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void vm_event_throttle_deinit(void)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
struct event_throttle_ctl *ctl;
|
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(ve_proc); i++) {
|
|
||||||
ctl = &ve_proc[i].throttle_ctl;
|
|
||||||
if (ctl->timer.fd != -1) {
|
|
||||||
acrn_timer_deinit(&ctl->timer);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static char *generate_vm_event_message(struct vm_event *event)
|
|
||||||
{
|
|
||||||
char *event_msg = NULL;
|
|
||||||
cJSON *val;
|
|
||||||
cJSON *event_obj = cJSON_CreateObject();
|
|
||||||
struct vm_event_proc *proc;
|
|
||||||
|
|
||||||
if (event_obj == NULL)
|
|
||||||
return NULL;
|
|
||||||
val = cJSON_CreateNumber(event->type);
|
|
||||||
if (val == NULL)
|
|
||||||
return NULL;
|
|
||||||
cJSON_AddItemToObject(event_obj, "vm_event", val);
|
|
||||||
|
|
||||||
proc = get_vm_event_proc(event);
|
|
||||||
if (proc && proc->gen_jdata_handler) {
|
|
||||||
(proc->gen_jdata_handler)(event_obj, event);
|
|
||||||
}
|
|
||||||
|
|
||||||
event_msg = cJSON_Print(event_obj);
|
|
||||||
if (event_msg == NULL)
|
|
||||||
fprintf(stderr, "Failed to generate vm_event message.\n");
|
|
||||||
|
|
||||||
cJSON_Delete(event_obj);
|
|
||||||
|
|
||||||
return event_msg;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void emit_vm_event(struct vmctx *ctx, struct vm_event *event)
|
|
||||||
{
|
|
||||||
if (!event_throttle(event)) {
|
|
||||||
char *msg = generate_vm_event_message(event);
|
|
||||||
if (msg != NULL) {
|
|
||||||
vm_monitor_send_vm_event(msg);
|
|
||||||
free(msg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void general_event_handler(struct vmctx *ctx, struct vm_event *event)
|
|
||||||
{
|
|
||||||
emit_vm_event(ctx, event);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void gen_rtc_chg_jdata(cJSON *event_obj, struct vm_event *event)
|
|
||||||
{
|
|
||||||
struct rtc_change_event_data *data = (struct rtc_change_event_data *)event->event_data;
|
|
||||||
cJSON *val;
|
|
||||||
|
|
||||||
val = cJSON_CreateNumber(data->delta_time);
|
|
||||||
if (val != NULL) {
|
|
||||||
cJSON_AddItemToObject(event_obj, "delta_time", val);
|
|
||||||
}
|
|
||||||
val = cJSON_CreateNumber(data->last_time);
|
|
||||||
if (val != NULL) {
|
|
||||||
cJSON_AddItemToObject(event_obj, "last_time", val);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* assume we only have one unique rtc source */
|
|
||||||
|
|
||||||
static struct acrn_timer rtc_chg_event_timer = {
|
|
||||||
.clockid = CLOCK_MONOTONIC,
|
|
||||||
};
|
|
||||||
static pthread_mutex_t rtc_chg_mutex = PTHREAD_MUTEX_INITIALIZER;
|
|
||||||
static struct timespec time_window_start;
|
|
||||||
static time_t last_time_cached = BROKEN_TIME;
|
|
||||||
static time_t delta_time_sum = 0;
|
|
||||||
#define RTC_CHG_WAIT_TIME 1 /* 1 second */
|
|
||||||
static void rtc_chg_event_handler(struct vmctx *ctx, struct vm_event *event)
|
|
||||||
{
|
|
||||||
struct itimerspec timer_spec;
|
|
||||||
struct rtc_change_event_data *data = (struct rtc_change_event_data *)event->event_data;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* RTC time is not reliable until guest finishes updating all RTC date/time regs.
|
|
||||||
* So wait for some time, if no more change happens, we can conclude that the RTC
|
|
||||||
* change has been done.
|
|
||||||
*/
|
|
||||||
timer_spec.it_value.tv_sec = RTC_CHG_WAIT_TIME;
|
|
||||||
timer_spec.it_value.tv_nsec = 0;
|
|
||||||
timer_spec.it_interval.tv_sec = 0;
|
|
||||||
timer_spec.it_interval.tv_nsec = 0;
|
|
||||||
pthread_mutex_lock(&rtc_chg_mutex);
|
|
||||||
if (last_time_cached == BROKEN_TIME) {
|
|
||||||
last_time_cached = data->last_time;
|
|
||||||
}
|
|
||||||
delta_time_sum += data->delta_time;
|
|
||||||
/* The last timer will be overwriten if it is not triggered yet. */
|
|
||||||
acrn_timer_settime(&rtc_chg_event_timer, &timer_spec);
|
|
||||||
clock_gettime(CLOCK_MONOTONIC, &time_window_start);
|
|
||||||
pthread_mutex_unlock(&rtc_chg_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void rtc_chg_timer_cb(void *arg, uint64_t nexp)
|
|
||||||
{
|
|
||||||
struct timespec now, delta;
|
|
||||||
struct timespec time_window_size = {RTC_CHG_WAIT_TIME, 0};
|
|
||||||
struct vmctx *ctx = arg;
|
|
||||||
struct vm_event send_event;
|
|
||||||
struct rtc_change_event_data *data = (struct rtc_change_event_data *)send_event.event_data;
|
|
||||||
|
|
||||||
pthread_mutex_lock(&rtc_chg_mutex);
|
|
||||||
clock_gettime(CLOCK_MONOTONIC, &now);
|
|
||||||
delta = now;
|
|
||||||
timespecsub(&delta, &time_window_start);
|
|
||||||
/* possible racing problem here. make sure this is the right timer cb for the vm_event */
|
|
||||||
if (timespeccmp(&delta, &time_window_size, >=)) {
|
|
||||||
data->delta_time = delta_time_sum;
|
|
||||||
data->last_time = last_time_cached;
|
|
||||||
emit_vm_event(ctx, &send_event);
|
|
||||||
last_time_cached = BROKEN_TIME;
|
|
||||||
delta_time_sum = 0;
|
|
||||||
}
|
|
||||||
pthread_mutex_unlock(&rtc_chg_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void *vm_event_thread(void *param)
|
|
||||||
{
|
|
||||||
int n, i;
|
|
||||||
struct vm_event ve;
|
|
||||||
eventfd_t val;
|
|
||||||
struct vm_event_tunnel *tunnel;
|
|
||||||
struct vmctx *ctx = param;
|
|
||||||
|
|
||||||
struct epoll_event eventlist[MAX_EPOLL_EVENTS];
|
|
||||||
|
|
||||||
while (started) {
|
|
||||||
n = epoll_wait(epoll_fd, eventlist, MAX_EPOLL_EVENTS, -1);
|
|
||||||
if (n < 0) {
|
|
||||||
if (errno != EINTR) {
|
|
||||||
pr_err("%s: epoll failed %d\n", __func__, errno);
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
for (i = 0; i < n; i++) {
|
|
||||||
if (i < MAX_EPOLL_EVENTS) {
|
|
||||||
tunnel = eventlist[i].data.ptr;
|
|
||||||
eventfd_read(tunnel->kick_fd, &val);
|
|
||||||
if (tunnel && tunnel->enabled) {
|
|
||||||
while (!sbuf_is_empty(tunnel->sbuf)) {
|
|
||||||
struct vm_event_proc *proc;
|
|
||||||
sbuf_get(tunnel->sbuf, (uint8_t*)&ve);
|
|
||||||
pr_dbg("%ld vm event from%d %d\n", val, tunnel->type, ve.type);
|
|
||||||
proc = get_vm_event_proc(&ve);
|
|
||||||
if (proc && proc->ve_handler) {
|
|
||||||
(proc->ve_handler)(ctx, &ve);
|
|
||||||
} else {
|
|
||||||
pr_warn("%s: unhandled vm event type %d\n", __func__, ve.type);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct vm_event_tunnel ve_tunnel[MAX_VM_EVENT_TUNNELS] = {
|
|
||||||
{
|
|
||||||
.type = EVENT_SOURCE_TYPE_HV,
|
|
||||||
.sbuf = (struct shared_buf *)hv_vm_event_page,
|
|
||||||
.sbuf_size = 4096,
|
|
||||||
.enabled = false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
.type = EVENT_SOURCE_TYPE_DM,
|
|
||||||
.sbuf = (struct shared_buf *)dm_vm_event_page,
|
|
||||||
.sbuf_size = 4096,
|
|
||||||
.enabled = false,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
static int create_event_tunnel(struct vmctx *ctx, struct vm_event_tunnel *tunnel, int epoll_fd)
|
|
||||||
{
|
|
||||||
struct epoll_event ev;
|
|
||||||
enum event_source_type type = tunnel->type;
|
|
||||||
struct shared_buf *sbuf = tunnel->sbuf;
|
|
||||||
int kick_fd = -1;
|
|
||||||
int error;
|
|
||||||
|
|
||||||
sbuf_init(sbuf, tunnel->sbuf_size, VM_EVENT_ELE_SIZE);
|
|
||||||
|
|
||||||
if (type == EVENT_SOURCE_TYPE_HV) {
|
|
||||||
error = ioctl(ctx->fd, ACRN_IOCTL_SETUP_VM_EVENT_RING, sbuf);
|
|
||||||
if (error) {
|
|
||||||
pr_err("%s: Setting vm_event ring failed %d\n", __func__, error);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
kick_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
|
|
||||||
if (kick_fd < 0) {
|
|
||||||
pr_err("%s: eventfd failed %d\n", __func__, errno);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (type == EVENT_SOURCE_TYPE_HV) {
|
|
||||||
error = ioctl(ctx->fd, ACRN_IOCTL_SETUP_VM_EVENT_FD, kick_fd);
|
|
||||||
if (error) {
|
|
||||||
pr_err("%s: Setting vm_event fd failed %d\n", __func__, error);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ev.events = EPOLLIN;
|
|
||||||
ev.data.ptr = tunnel;
|
|
||||||
error = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, kick_fd, &ev);
|
|
||||||
if (error < 0) {
|
|
||||||
pr_err("%s: failed to add fd, error is %d\n", __func__, errno);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
tunnel->kick_fd = kick_fd;
|
|
||||||
pthread_mutex_init(&tunnel->mtx, NULL);
|
|
||||||
tunnel->enabled = true;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
out:
|
|
||||||
if (kick_fd >= 0) {
|
|
||||||
close(kick_fd);
|
|
||||||
}
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
void destory_event_tunnel(struct vm_event_tunnel *tunnel)
|
|
||||||
{
|
|
||||||
if (tunnel->enabled) {
|
|
||||||
close(tunnel->kick_fd);
|
|
||||||
tunnel->enabled = false;
|
|
||||||
pthread_mutex_destroy(&tunnel->mtx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int vm_event_init(struct vmctx *ctx)
|
|
||||||
{
|
|
||||||
int error;
|
|
||||||
|
|
||||||
epoll_fd = epoll_create1(0);
|
|
||||||
if (epoll_fd < 0) {
|
|
||||||
pr_err("%s: failed to create epoll %d\n", __func__, errno);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
error = create_event_tunnel(ctx, &ve_tunnel[HV_VM_EVENT_TUNNEL], epoll_fd);
|
|
||||||
if (error) {
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
error = create_event_tunnel(ctx, &ve_tunnel[DM_VM_EVENT_TUNNEL], epoll_fd);
|
|
||||||
if (error) {
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
vm_event_throttle_init(ctx);
|
|
||||||
|
|
||||||
error = pthread_create(&vm_event_tid, NULL, vm_event_thread, ctx);
|
|
||||||
if (error) {
|
|
||||||
pr_err("%s: vm_event create failed %d\n", __func__, errno);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
acrn_timer_init(&rtc_chg_event_timer, rtc_chg_timer_cb, ctx);
|
|
||||||
|
|
||||||
started = true;
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
out:
|
|
||||||
if (epoll_fd >= 0) {
|
|
||||||
close(epoll_fd);
|
|
||||||
}
|
|
||||||
destory_event_tunnel(&ve_tunnel[HV_VM_EVENT_TUNNEL]);
|
|
||||||
destory_event_tunnel(&ve_tunnel[DM_VM_EVENT_TUNNEL]);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int vm_event_deinit(void)
|
|
||||||
{
|
|
||||||
void *jval;
|
|
||||||
|
|
||||||
if (started) {
|
|
||||||
started = false;
|
|
||||||
vm_event_throttle_deinit();
|
|
||||||
pthread_kill(vm_event_tid, SIGCONT);
|
|
||||||
pthread_join(vm_event_tid, &jval);
|
|
||||||
close(epoll_fd);
|
|
||||||
destory_event_tunnel(&ve_tunnel[HV_VM_EVENT_TUNNEL]);
|
|
||||||
destory_event_tunnel(&ve_tunnel[DM_VM_EVENT_TUNNEL]);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Send a dm generated vm_event by putting it to sbuf.
|
|
||||||
* A thread will receive and process those events.
|
|
||||||
* Events will be dropped if sbuf is full.
|
|
||||||
* They also maight be dropped due to event throttle control in receive thread.
|
|
||||||
*/
|
|
||||||
int dm_send_vm_event(struct vm_event *event)
|
|
||||||
{
|
|
||||||
struct vm_event_tunnel *tunnel = &ve_tunnel[DM_VM_EVENT_TUNNEL];
|
|
||||||
struct shared_buf *sbuf;
|
|
||||||
int32_t ret = -1;
|
|
||||||
uint32_t size_sent;
|
|
||||||
|
|
||||||
if (!tunnel->enabled) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
sbuf = tunnel->sbuf;
|
|
||||||
|
|
||||||
if (sbuf != NULL) {
|
|
||||||
pthread_mutex_lock(&tunnel->mtx);
|
|
||||||
size_sent = sbuf_put(sbuf, (uint8_t *)event, sizeof(*event));
|
|
||||||
pthread_mutex_unlock(&tunnel->mtx);
|
|
||||||
if (size_sent == VM_EVENT_ELE_SIZE) {
|
|
||||||
eventfd_write(tunnel->kick_fd, 1UL);
|
|
||||||
ret = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
@ -295,14 +295,19 @@ vm_destroy(struct vmctx *ctx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
vm_setup_asyncio(struct vmctx *ctx, uint64_t base)
|
vm_setup_sbuf(struct vmctx *ctx, uint32_t sbuf_id, uint64_t base)
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
|
struct acrn_sbuf sbuf_param;
|
||||||
|
|
||||||
error = ioctl(ctx->fd, ACRN_IOCTL_SETUP_ASYNCIO, base);
|
bzero(&sbuf_param, sizeof(sbuf_param));
|
||||||
|
sbuf_param.sbuf_id = sbuf_id;
|
||||||
|
sbuf_param.base = base;
|
||||||
|
|
||||||
|
error = ioctl(ctx->fd, ACRN_IOCTL_SETUP_SBUF, &sbuf_param);
|
||||||
|
|
||||||
if (error) {
|
if (error) {
|
||||||
pr_err("ACRN_IOCTL_SETUP_ASYNCIO ioctl() returned an error: %s\n", errormsg(errno));
|
pr_err("ACRN_IOCTL_SBUF_PAGE ioctl() returned an error: %s\n", errormsg(errno));
|
||||||
}
|
}
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
|
@ -2405,7 +2405,7 @@ pci_ahci_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts, int atapi)
|
|||||||
*/
|
*/
|
||||||
snprintf(bident, sizeof(bident), "%02x:%02x:%02x", dev->slot,
|
snprintf(bident, sizeof(bident), "%02x:%02x:%02x", dev->slot,
|
||||||
dev->func, p);
|
dev->func, p);
|
||||||
bctxt = blockif_open(opts, bident, 1, NULL);
|
bctxt = blockif_open(opts, bident);
|
||||||
if (bctxt == NULL) {
|
if (bctxt == NULL) {
|
||||||
ahci_dev->ports = p;
|
ahci_dev->ports = p;
|
||||||
ret = 1;
|
ret = 1;
|
||||||
|
@ -1918,29 +1918,31 @@ pci_bus_write_dsdt(int bus)
|
|||||||
dsdt_line(" ,, , AddressRangeMemory, TypeStatic)");
|
dsdt_line(" ,, , AddressRangeMemory, TypeStatic)");
|
||||||
dsdt_line(" })");
|
dsdt_line(" })");
|
||||||
|
|
||||||
count = pci_count_lintr(bus);
|
if (!is_rtvm) {
|
||||||
if (count != 0) {
|
count = pci_count_lintr(bus);
|
||||||
dsdt_indent(2);
|
if (count != 0) {
|
||||||
dsdt_line("Name (PPRT, Package ()");
|
dsdt_indent(2);
|
||||||
dsdt_line("{");
|
dsdt_line("Name (PPRT, Package ()");
|
||||||
pci_walk_lintr(bus, pci_pirq_prt_entry, NULL);
|
dsdt_line("{");
|
||||||
dsdt_line("})");
|
pci_walk_lintr(bus, pci_pirq_prt_entry, NULL);
|
||||||
dsdt_line("Name (APRT, Package ()");
|
dsdt_line("})");
|
||||||
dsdt_line("{");
|
dsdt_line("Name (APRT, Package ()");
|
||||||
pci_walk_lintr(bus, pci_apic_prt_entry, NULL);
|
dsdt_line("{");
|
||||||
dsdt_line("})");
|
pci_walk_lintr(bus, pci_apic_prt_entry, NULL);
|
||||||
dsdt_line("Method (_PRT, 0, NotSerialized)");
|
dsdt_line("})");
|
||||||
dsdt_line("{");
|
dsdt_line("Method (_PRT, 0, NotSerialized)");
|
||||||
dsdt_line(" If (PICM)");
|
dsdt_line("{");
|
||||||
dsdt_line(" {");
|
dsdt_line(" If (PICM)");
|
||||||
dsdt_line(" Return (APRT)");
|
dsdt_line(" {");
|
||||||
dsdt_line(" }");
|
dsdt_line(" Return (APRT)");
|
||||||
dsdt_line(" Else");
|
dsdt_line(" }");
|
||||||
dsdt_line(" {");
|
dsdt_line(" Else");
|
||||||
dsdt_line(" Return (PPRT)");
|
dsdt_line(" {");
|
||||||
dsdt_line(" }");
|
dsdt_line(" Return (PPRT)");
|
||||||
dsdt_line("}");
|
dsdt_line(" }");
|
||||||
dsdt_unindent(2);
|
dsdt_line("}");
|
||||||
|
dsdt_unindent(2);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dsdt_indent(2);
|
dsdt_indent(2);
|
||||||
@ -2010,6 +2012,8 @@ pci_msix_enabled(struct pci_vdev *dev)
|
|||||||
*
|
*
|
||||||
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
||||||
* @param index MSIx table entry index.
|
* @param index MSIx table entry index.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
pci_generate_msix(struct pci_vdev *dev, int index)
|
pci_generate_msix(struct pci_vdev *dev, int index)
|
||||||
@ -2037,6 +2041,8 @@ pci_generate_msix(struct pci_vdev *dev, int index)
|
|||||||
*
|
*
|
||||||
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
||||||
* @param index Message data index.
|
* @param index Message data index.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
pci_generate_msi(struct pci_vdev *dev, int index)
|
pci_generate_msi(struct pci_vdev *dev, int index)
|
||||||
@ -2157,6 +2163,8 @@ pci_lintr_route(struct pci_vdev *dev)
|
|||||||
* @brief Assert INTx pin of virtual PCI device
|
* @brief Assert INTx pin of virtual PCI device
|
||||||
*
|
*
|
||||||
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
pci_lintr_assert(struct pci_vdev *dev)
|
pci_lintr_assert(struct pci_vdev *dev)
|
||||||
@ -2181,6 +2189,8 @@ pci_lintr_assert(struct pci_vdev *dev)
|
|||||||
* @brief Deassert INTx pin of virtual PCI device
|
* @brief Deassert INTx pin of virtual PCI device
|
||||||
*
|
*
|
||||||
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
pci_lintr_deassert(struct pci_vdev *dev)
|
pci_lintr_deassert(struct pci_vdev *dev)
|
||||||
|
@ -53,7 +53,6 @@
|
|||||||
#define IVSHMEM_DEVICE_ID 0x1110
|
#define IVSHMEM_DEVICE_ID 0x1110
|
||||||
#define IVSHMEM_CLASS 0x05
|
#define IVSHMEM_CLASS 0x05
|
||||||
#define IVSHMEM_REV 0x01
|
#define IVSHMEM_REV 0x01
|
||||||
#define IVSHMEM_INTEL_SUBVENDOR_ID 0x8086U
|
|
||||||
|
|
||||||
|
|
||||||
/* IVSHMEM MMIO Registers */
|
/* IVSHMEM MMIO Registers */
|
||||||
@ -250,13 +249,13 @@ pci_ivshmem_read(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
|
|||||||
static int
|
static int
|
||||||
pci_ivshmem_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
pci_ivshmem_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
||||||
{
|
{
|
||||||
uint32_t size, region_id = 0;
|
uint32_t size;
|
||||||
char *tmp, *name, *size_str, *orig;
|
char *tmp, *name, *orig;
|
||||||
struct pci_ivshmem_vdev *ivshmem_vdev = NULL;
|
struct pci_ivshmem_vdev *ivshmem_vdev = NULL;
|
||||||
bool is_hv_land;
|
bool is_hv_land;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
/* ivshmem device usage: "-s N,ivshmem,shm_name,shm_size,region_id" */
|
/* ivshmem device usage: "-s N,ivshmem,shm_name,shm_size" */
|
||||||
tmp = orig = strdup(opts);
|
tmp = orig = strdup(opts);
|
||||||
if (!orig) {
|
if (!orig) {
|
||||||
pr_warn("No memory for strdup\n");
|
pr_warn("No memory for strdup\n");
|
||||||
@ -278,9 +277,8 @@ pci_ivshmem_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_str = strsep(&tmp, ",");
|
if (dm_strtoui(tmp, &tmp, 10, &size) != 0) {
|
||||||
if (dm_strtoui(size_str, &size_str, 10, &size) != 0) {
|
pr_warn("the shared memory size is incorrect, %s\n", tmp);
|
||||||
pr_warn("the shared memory size is incorrect, %s\n", size_str);
|
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
size *= 0x100000; /* convert to megabytes */
|
size *= 0x100000; /* convert to megabytes */
|
||||||
@ -291,13 +289,6 @@ pci_ivshmem_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tmp) {
|
|
||||||
if (dm_strtoui(tmp, &tmp, 10, ®ion_id) != 0) {
|
|
||||||
pr_warn("shared memory region ID is incorrect, %s, 0 will used.\n", tmp);
|
|
||||||
region_id = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ivshmem_vdev = calloc(1, sizeof(struct pci_ivshmem_vdev));
|
ivshmem_vdev = calloc(1, sizeof(struct pci_ivshmem_vdev));
|
||||||
if (!ivshmem_vdev) {
|
if (!ivshmem_vdev) {
|
||||||
pr_warn("failed to allocate ivshmem device\n");
|
pr_warn("failed to allocate ivshmem device\n");
|
||||||
@ -313,8 +304,6 @@ pci_ivshmem_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
|||||||
pci_set_cfgdata16(dev, PCIR_DEVICE, IVSHMEM_DEVICE_ID);
|
pci_set_cfgdata16(dev, PCIR_DEVICE, IVSHMEM_DEVICE_ID);
|
||||||
pci_set_cfgdata16(dev, PCIR_REVID, IVSHMEM_REV);
|
pci_set_cfgdata16(dev, PCIR_REVID, IVSHMEM_REV);
|
||||||
pci_set_cfgdata8(dev, PCIR_CLASS, IVSHMEM_CLASS);
|
pci_set_cfgdata8(dev, PCIR_CLASS, IVSHMEM_CLASS);
|
||||||
pci_set_cfgdata16(dev, PCIR_SUBDEV_0, (uint16_t)region_id);
|
|
||||||
pci_set_cfgdata16(dev, PCIR_SUBVEND_0, IVSHMEM_INTEL_SUBVENDOR_ID);
|
|
||||||
|
|
||||||
pci_emul_alloc_bar(dev, IVSHMEM_MMIO_BAR, PCIBAR_MEM32, IVSHMEM_REG_SIZE);
|
pci_emul_alloc_bar(dev, IVSHMEM_MMIO_BAR, PCIBAR_MEM32, IVSHMEM_REG_SIZE);
|
||||||
pci_emul_alloc_bar(dev, IVSHMEM_MSIX_BAR, PCIBAR_MEM32, IVSHMEM_MSIX_PBA_SIZE);
|
pci_emul_alloc_bar(dev, IVSHMEM_MSIX_BAR, PCIBAR_MEM32, IVSHMEM_MSIX_PBA_SIZE);
|
||||||
|
@ -59,31 +59,27 @@ SYSRES_IO(NMISC_PORT, 1);
|
|||||||
|
|
||||||
static struct pci_vdev *lpc_bridge;
|
static struct pci_vdev *lpc_bridge;
|
||||||
|
|
||||||
#define LPC_UART_NUM 5
|
#define LPC_UART_NUM 2
|
||||||
static struct lpc_uart_vdev {
|
static struct lpc_uart_vdev {
|
||||||
struct uart_vdev *uart;
|
struct uart_vdev *uart;
|
||||||
const char *opts;
|
const char *opts;
|
||||||
int iobase;
|
int iobase;
|
||||||
int irq;
|
int irq;
|
||||||
int enabled; /* enabled/configured by user */
|
int enabled;
|
||||||
} lpc_uart_vdev[LPC_UART_NUM];
|
} lpc_uart_vdev[LPC_UART_NUM];
|
||||||
#define LPC_S5_UART_NAME "COM5"
|
|
||||||
|
|
||||||
static const char *lpc_uart_names[LPC_UART_NUM] = { "COM1", "COM2", "COM3", "COM4", LPC_S5_UART_NAME};
|
static const char *lpc_uart_names[LPC_UART_NUM] = { "COM1", "COM2" };
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* LPC device configuration is in the following form:
|
* LPC device configuration is in the following form:
|
||||||
* <lpc_device_name>[,<options>]
|
* <lpc_device_name>[,<options>]
|
||||||
* For e.g. "com1,stdio"
|
* For e.g. "com1,stdio"
|
||||||
* For S5 e.g. "com5,/dev/pts/0,0x9000,5"
|
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
lpc_device_parse(const char *opts)
|
lpc_device_parse(const char *opts)
|
||||||
{
|
{
|
||||||
int unit, error;
|
int unit, error;
|
||||||
char *str, *cpy, *lpcdev;
|
char *str, *cpy, *lpcdev;
|
||||||
char *lpcopt, *lpcport, *endptr;
|
|
||||||
int s5_port = 0, s5_irq = 0;
|
|
||||||
|
|
||||||
error = -1;
|
error = -1;
|
||||||
str = cpy = strdup(opts);
|
str = cpy = strdup(opts);
|
||||||
@ -91,26 +87,7 @@ lpc_device_parse(const char *opts)
|
|||||||
if (lpcdev != NULL) {
|
if (lpcdev != NULL) {
|
||||||
for (unit = 0; unit < LPC_UART_NUM; unit++) {
|
for (unit = 0; unit < LPC_UART_NUM; unit++) {
|
||||||
if (strcasecmp(lpcdev, lpc_uart_names[unit]) == 0) {
|
if (strcasecmp(lpcdev, lpc_uart_names[unit]) == 0) {
|
||||||
lpc_uart_vdev[unit].enabled = 1;
|
lpc_uart_vdev[unit].opts = str;
|
||||||
if(strcasecmp(lpcdev,LPC_S5_UART_NAME) == 0){
|
|
||||||
lpcopt = strsep(&str,",");
|
|
||||||
if(lpcopt != NULL){
|
|
||||||
lpc_uart_vdev[unit].opts = lpcopt;
|
|
||||||
}
|
|
||||||
lpcport = strsep(&str, ",");
|
|
||||||
if(lpcport != NULL){
|
|
||||||
if(dm_strtoul(lpcport, &endptr, 0, (long unsigned int*)&s5_port))
|
|
||||||
goto done;
|
|
||||||
if(dm_strtoul(str, &endptr, 0, (long unsigned int*)&s5_irq))
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
if((s5_port != 0) && (s5_irq != 0)){
|
|
||||||
uart_legacy_reinit_res(unit, s5_port, s5_irq);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else{
|
|
||||||
lpc_uart_vdev[unit].opts = str;
|
|
||||||
}
|
|
||||||
error = 0;
|
error = 0;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
@ -207,6 +184,7 @@ lpc_deinit(struct vmctx *ctx)
|
|||||||
uart_release_backend(lpc_uart->uart, lpc_uart->opts);
|
uart_release_backend(lpc_uart->uart, lpc_uart->opts);
|
||||||
uart_legacy_dealloc(unit);
|
uart_legacy_dealloc(unit);
|
||||||
lpc_uart->uart = NULL;
|
lpc_uart->uart = NULL;
|
||||||
|
lpc_uart->enabled = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -224,9 +202,6 @@ lpc_init(struct vmctx *ctx)
|
|||||||
lpc_uart = &lpc_uart_vdev[unit];
|
lpc_uart = &lpc_uart_vdev[unit];
|
||||||
name = lpc_uart_names[unit];
|
name = lpc_uart_names[unit];
|
||||||
|
|
||||||
if (lpc_uart->enabled == 0)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (uart_legacy_alloc(unit,
|
if (uart_legacy_alloc(unit,
|
||||||
&lpc_uart->iobase,
|
&lpc_uart->iobase,
|
||||||
&lpc_uart->irq) != 0) {
|
&lpc_uart->irq) != 0) {
|
||||||
@ -254,6 +229,7 @@ lpc_init(struct vmctx *ctx)
|
|||||||
error = register_inout(&iop);
|
error = register_inout(&iop);
|
||||||
if (error)
|
if (error)
|
||||||
goto init_failed;
|
goto init_failed;
|
||||||
|
lpc_uart->enabled = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -48,7 +48,6 @@
|
|||||||
#include "dm.h"
|
#include "dm.h"
|
||||||
#include "passthru.h"
|
#include "passthru.h"
|
||||||
#include "ptm.h"
|
#include "ptm.h"
|
||||||
#include "igd_pciids.h"
|
|
||||||
|
|
||||||
/* Some audio drivers get topology data from ACPI NHLT table.
|
/* Some audio drivers get topology data from ACPI NHLT table.
|
||||||
* For such drivers, we need to copy the host NHLT table to make it
|
* For such drivers, we need to copy the host NHLT table to make it
|
||||||
@ -61,9 +60,8 @@
|
|||||||
|
|
||||||
extern uint64_t audio_nhlt_len;
|
extern uint64_t audio_nhlt_len;
|
||||||
|
|
||||||
uint64_t gpu_dsm_hpa = 0;
|
uint32_t gpu_dsm_hpa = 0;
|
||||||
uint64_t gpu_dsm_gpa = 0;
|
uint32_t gpu_dsm_gpa = 0;
|
||||||
uint32_t gpu_dsm_size = 0;
|
|
||||||
uint32_t gpu_opregion_hpa = 0;
|
uint32_t gpu_opregion_hpa = 0;
|
||||||
uint32_t gpu_opregion_gpa = 0;
|
uint32_t gpu_opregion_gpa = 0;
|
||||||
|
|
||||||
@ -554,62 +552,7 @@ get_gpu_rsvmem_base_gpa()
|
|||||||
uint32_t
|
uint32_t
|
||||||
get_gpu_rsvmem_size()
|
get_gpu_rsvmem_size()
|
||||||
{
|
{
|
||||||
return GPU_OPREGION_SIZE + gpu_dsm_size;
|
return GPU_OPREGION_SIZE + GPU_DSM_SIZE;
|
||||||
}
|
|
||||||
|
|
||||||
static const struct igd_device igd_device_tbl[] = {
|
|
||||||
IGD_RPLP_DEVICE_IDS,
|
|
||||||
IGD_RPLS_DEVICE_IDS,
|
|
||||||
IGD_ADLN_DEVICE_IDS,
|
|
||||||
IGD_ADLP_DEVICE_IDS,
|
|
||||||
IGD_ADLS_DEVICE_IDS,
|
|
||||||
IGD_RKL_DEVICE_IDS,
|
|
||||||
IGD_TGL_DEVICE_IDS,
|
|
||||||
IGD_JSL_DEVICE_IDS,
|
|
||||||
IGD_EHL_DEVICE_IDS,
|
|
||||||
IGD_ICL_DEVICE_IDS,
|
|
||||||
IGD_CFL_DEVICE_IDS,
|
|
||||||
IGD_KBL_DEVICE_IDS,
|
|
||||||
IGD_GLK_DEVICE_IDS,
|
|
||||||
IGD_BXT_DEVICE_IDS,
|
|
||||||
IGD_SKL_DEVICE_IDS,
|
|
||||||
{ 0 }
|
|
||||||
};
|
|
||||||
|
|
||||||
int igd_gen(uint16_t device) {
|
|
||||||
const struct igd_device *entry;
|
|
||||||
|
|
||||||
for (entry = igd_device_tbl; entry->device != 0; entry++) {
|
|
||||||
if (entry->device == device) {
|
|
||||||
return entry->gen;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t igd_dsm_region_size(struct pci_device *igddev)
|
|
||||||
{
|
|
||||||
uint16_t ggc;
|
|
||||||
uint8_t gms;
|
|
||||||
|
|
||||||
ggc = read_config(igddev, PCIR_GGC, 2);
|
|
||||||
gms = ggc >> PCIR_GGC_GMS_SHIFT;
|
|
||||||
|
|
||||||
switch (gms) {
|
|
||||||
case 0x00 ... 0x10:
|
|
||||||
return gms * 32 * MB;
|
|
||||||
case 0x20:
|
|
||||||
return 1024 * MB;
|
|
||||||
case 0x30:
|
|
||||||
return 1536 * MB;
|
|
||||||
case 0x40:
|
|
||||||
return 2048 * MB;
|
|
||||||
case 0xf0 ... 0xfe:
|
|
||||||
return (gms - 0xf0 + 1) * 4 * MB;
|
|
||||||
}
|
|
||||||
|
|
||||||
pr_err("%s: Invalid GMS value in GGC register. GGC = %04x\n", __func__, ggc);
|
|
||||||
return 0; /* Should never reach here */
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -620,26 +563,80 @@ passthru_gpu_dsm_opregion(struct vmctx *ctx, struct passthru_dev *ptdev,
|
|||||||
struct acrn_pcidev *pcidev, uint16_t device)
|
struct acrn_pcidev *pcidev, uint16_t device)
|
||||||
{
|
{
|
||||||
uint32_t opregion_phys, dsm_mask_val;
|
uint32_t opregion_phys, dsm_mask_val;
|
||||||
int gen;
|
|
||||||
|
|
||||||
/* get opregion hpa */
|
/* get opregion hpa */
|
||||||
opregion_phys = read_config(ptdev->phys_dev, PCIR_ASLS_CTL, 4);
|
opregion_phys = read_config(ptdev->phys_dev, PCIR_ASLS_CTL, 4);
|
||||||
gpu_opregion_hpa = opregion_phys & PCIM_ASLS_OPREGION_MASK;
|
gpu_opregion_hpa = opregion_phys & PCIM_ASLS_OPREGION_MASK;
|
||||||
|
|
||||||
gen = igd_gen(device);
|
switch (device) {
|
||||||
if (!gen) {
|
/* ElkhartLake */
|
||||||
pr_warn("Device 8086:%04x is not an igd device in allowlist, assuming it is gen 11+. " \
|
case 0x4500:
|
||||||
"GVT-d may not working properly\n", device);
|
case 0x4541:
|
||||||
gen = 11;
|
case 0x4551:
|
||||||
}
|
case 0x4571:
|
||||||
|
/* TigerLake */
|
||||||
gpu_dsm_size = igd_dsm_region_size(ptdev->phys_dev);
|
case 0x9a40:
|
||||||
if (!gpu_dsm_size) {
|
case 0x9a49:
|
||||||
pr_err("Invalid igd dsm region size, check DVMT Pre-Allocated option in BIOS\n");
|
case 0x9a59:
|
||||||
return;
|
case 0x9a60:
|
||||||
}
|
case 0x9a68:
|
||||||
|
case 0x9a70:
|
||||||
if (gen >= 11) {
|
case 0x9a78:
|
||||||
|
case 0x9ac0:
|
||||||
|
case 0x9ac9:
|
||||||
|
case 0x9ad9:
|
||||||
|
case 0x9af8:
|
||||||
|
/* AlderLake */
|
||||||
|
case 0x4680:
|
||||||
|
case 0x4681:
|
||||||
|
case 0x4682:
|
||||||
|
case 0x4683:
|
||||||
|
case 0x4690:
|
||||||
|
case 0x4691:
|
||||||
|
case 0x4692:
|
||||||
|
case 0x4693:
|
||||||
|
case 0x4698:
|
||||||
|
case 0x4699:
|
||||||
|
/* ADL-P GT graphics */
|
||||||
|
case 0x4626:
|
||||||
|
case 0x4628:
|
||||||
|
case 0x462a:
|
||||||
|
case 0x46a0:
|
||||||
|
case 0x46a1:
|
||||||
|
case 0x46a2:
|
||||||
|
case 0x46a3:
|
||||||
|
case 0x46a6:
|
||||||
|
case 0x46a8:
|
||||||
|
case 0x46aa:
|
||||||
|
case 0x46b0:
|
||||||
|
case 0x46b1:
|
||||||
|
case 0x46b2:
|
||||||
|
case 0x46b3:
|
||||||
|
case 0x46c0:
|
||||||
|
case 0x46c1:
|
||||||
|
case 0x46c2:
|
||||||
|
case 0x46c3:
|
||||||
|
/* Alder Lake-N */
|
||||||
|
case 0x46d0:
|
||||||
|
case 0x46d1:
|
||||||
|
case 0x46d2:
|
||||||
|
/* Raptor Lake-S */
|
||||||
|
case 0xa780:
|
||||||
|
case 0xa781:
|
||||||
|
case 0xa782:
|
||||||
|
case 0xa783:
|
||||||
|
case 0xa788:
|
||||||
|
case 0xa789:
|
||||||
|
case 0xa78a:
|
||||||
|
case 0xa78b:
|
||||||
|
/* Raptor Lake-U */
|
||||||
|
case 0xa721:
|
||||||
|
case 0xa7a1:
|
||||||
|
case 0xa7a9:
|
||||||
|
/* Raptor Lake-P */
|
||||||
|
case 0xa720:
|
||||||
|
case 0xa7a0:
|
||||||
|
case 0xa7a8:
|
||||||
/* BDSM register has 64 bits.
|
/* BDSM register has 64 bits.
|
||||||
* bits 63:20 contains the base address of stolen memory
|
* bits 63:20 contains the base address of stolen memory
|
||||||
*/
|
*/
|
||||||
@ -658,7 +655,9 @@ passthru_gpu_dsm_opregion(struct vmctx *ctx, struct passthru_dev *ptdev,
|
|||||||
pci_set_cfgdata32(ptdev->dev, PCIR_GEN11_BDSM_DW1, 0);
|
pci_set_cfgdata32(ptdev->dev, PCIR_GEN11_BDSM_DW1, 0);
|
||||||
|
|
||||||
ptdev->has_virt_pcicfg_regs = &has_virt_pcicfg_regs_on_ehl_gpu;
|
ptdev->has_virt_pcicfg_regs = &has_virt_pcicfg_regs_on_ehl_gpu;
|
||||||
} else {
|
break;
|
||||||
|
/* If on default platforms, such as KBL,WHL */
|
||||||
|
default:
|
||||||
/* bits 31:20 contains the base address of stolen memory */
|
/* bits 31:20 contains the base address of stolen memory */
|
||||||
gpu_dsm_hpa = read_config(ptdev->phys_dev, PCIR_BDSM, 4);
|
gpu_dsm_hpa = read_config(ptdev->phys_dev, PCIR_BDSM, 4);
|
||||||
dsm_mask_val = gpu_dsm_hpa & ~PCIM_BDSM_MASK;
|
dsm_mask_val = gpu_dsm_hpa & ~PCIM_BDSM_MASK;
|
||||||
@ -668,14 +667,15 @@ passthru_gpu_dsm_opregion(struct vmctx *ctx, struct passthru_dev *ptdev,
|
|||||||
pci_set_cfgdata32(ptdev->dev, PCIR_BDSM, gpu_dsm_gpa | dsm_mask_val);
|
pci_set_cfgdata32(ptdev->dev, PCIR_BDSM, gpu_dsm_gpa | dsm_mask_val);
|
||||||
|
|
||||||
ptdev->has_virt_pcicfg_regs = &has_virt_pcicfg_regs_on_def_gpu;
|
ptdev->has_virt_pcicfg_regs = &has_virt_pcicfg_regs_on_def_gpu;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
gpu_opregion_gpa = gpu_dsm_gpa - GPU_OPREGION_SIZE;
|
gpu_opregion_gpa = gpu_dsm_gpa - GPU_OPREGION_SIZE;
|
||||||
pci_set_cfgdata32(ptdev->dev, PCIR_ASLS_CTL, gpu_opregion_gpa | (opregion_phys & ~PCIM_ASLS_OPREGION_MASK));
|
pci_set_cfgdata32(ptdev->dev, PCIR_ASLS_CTL, gpu_opregion_gpa | (opregion_phys & ~PCIM_ASLS_OPREGION_MASK));
|
||||||
|
|
||||||
/* initialize the EPT mapping for passthrough GPU dsm region */
|
/* initialize the EPT mapping for passthrough GPU dsm region */
|
||||||
vm_unmap_ptdev_mmio(ctx, 0, 2, 0, gpu_dsm_gpa, gpu_dsm_size, gpu_dsm_hpa);
|
vm_unmap_ptdev_mmio(ctx, 0, 2, 0, gpu_dsm_gpa, GPU_DSM_SIZE, gpu_dsm_hpa);
|
||||||
vm_map_ptdev_mmio(ctx, 0, 2, 0, gpu_dsm_gpa, gpu_dsm_size, gpu_dsm_hpa);
|
vm_map_ptdev_mmio(ctx, 0, 2, 0, gpu_dsm_gpa, GPU_DSM_SIZE, gpu_dsm_hpa);
|
||||||
|
|
||||||
/* initialize the EPT mapping for passthrough GPU opregion */
|
/* initialize the EPT mapping for passthrough GPU opregion */
|
||||||
vm_unmap_ptdev_mmio(ctx, 0, 2, 0, gpu_opregion_gpa, GPU_OPREGION_SIZE, gpu_opregion_hpa);
|
vm_unmap_ptdev_mmio(ctx, 0, 2, 0, gpu_opregion_gpa, GPU_OPREGION_SIZE, gpu_opregion_hpa);
|
||||||
@ -714,25 +714,22 @@ parse_vmsix_on_msi_bar_id(char *s, int *id, int base)
|
|||||||
static int
|
static int
|
||||||
passthru_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
passthru_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
||||||
{
|
{
|
||||||
int bus, slot, func, idx, irq, error;
|
int bus, slot, func, idx, error;
|
||||||
struct passthru_dev *ptdev;
|
struct passthru_dev *ptdev;
|
||||||
struct pci_device_iterator *iter;
|
struct pci_device_iterator *iter;
|
||||||
struct pci_device *phys_dev;
|
struct pci_device *phys_dev;
|
||||||
char *opt, *s_irq;
|
char *opt;
|
||||||
bool keep_gsi = false;
|
bool keep_gsi = false;
|
||||||
bool need_reset = true;
|
bool need_reset = true;
|
||||||
bool d3hot_reset = false;
|
bool d3hot_reset = false;
|
||||||
bool enable_ptm = false;
|
bool enable_ptm = false;
|
||||||
bool enable_irq = false;
|
|
||||||
int vrp_sec_bus = 0;
|
int vrp_sec_bus = 0;
|
||||||
int vmsix_on_msi_bar_id = -1;
|
int vmsix_on_msi_bar_id = -1;
|
||||||
struct acrn_pcidev pcidev = {};
|
struct acrn_pcidev pcidev = {};
|
||||||
uint16_t vendor = 0, device = 0;
|
uint16_t vendor = 0, device = 0;
|
||||||
uint8_t class = 0;
|
uint8_t class = 0;
|
||||||
char rom_file[256];
|
char rom_file[256];
|
||||||
char dsdt_path[256];
|
|
||||||
bool need_rombar = false;
|
bool need_rombar = false;
|
||||||
bool need_dsdt = false;
|
|
||||||
|
|
||||||
ptdev = NULL;
|
ptdev = NULL;
|
||||||
error = -EINVAL;
|
error = -EINVAL;
|
||||||
@ -749,7 +746,6 @@ passthru_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
|||||||
}
|
}
|
||||||
|
|
||||||
memset(rom_file, 0, sizeof(rom_file));
|
memset(rom_file, 0, sizeof(rom_file));
|
||||||
memset(dsdt_path, 0, sizeof(dsdt_path));
|
|
||||||
while ((opt = strsep(&opts, ",")) != NULL) {
|
while ((opt = strsep(&opts, ",")) != NULL) {
|
||||||
if (!strncmp(opt, "keep_gsi", 8))
|
if (!strncmp(opt, "keep_gsi", 8))
|
||||||
keep_gsi = true;
|
keep_gsi = true;
|
||||||
@ -774,25 +770,6 @@ passthru_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
strncpy(rom_file, opt, sizeof(rom_file));
|
strncpy(rom_file, opt, sizeof(rom_file));
|
||||||
} else if (!strncmp(opt, "irq=", 4)) {
|
|
||||||
if(dm_strtoi(opt + 4, &s_irq, 10, &irq) == 0) {
|
|
||||||
enable_irq = true;
|
|
||||||
pr_warn("IRQ %d might be shared by multiple devices!\n", irq);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
pr_err("Input IRQ number cannnot be recognized.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
} else if (!strncmp(opt, "dsdt=", 5)) {
|
|
||||||
need_dsdt = true;
|
|
||||||
opt += 5;
|
|
||||||
if (strlen(opt) >= sizeof(dsdt_path)) {
|
|
||||||
pr_err("dsdt file path too long, max supported path length is %d\n",
|
|
||||||
sizeof(dsdt_path)-1);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
strncpy(dsdt_path, opt, sizeof(dsdt_path) - 1);
|
|
||||||
pr_info("dsdt file path is %s\n", dsdt_path);
|
|
||||||
} else
|
} else
|
||||||
pr_warn("Invalid passthru options:%s", opt);
|
pr_warn("Invalid passthru options:%s", opt);
|
||||||
}
|
}
|
||||||
@ -916,12 +893,7 @@ passthru_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
|||||||
/* Allocates the virq if ptdev only support INTx */
|
/* Allocates the virq if ptdev only support INTx */
|
||||||
pci_lintr_request(dev);
|
pci_lintr_request(dev);
|
||||||
|
|
||||||
if(enable_irq) {
|
ptdev->phys_pin = read_config(ptdev->phys_dev, PCIR_INTLINE, 1);
|
||||||
ptdev->phys_pin = irq;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
ptdev->phys_pin = read_config(ptdev->phys_dev, PCIR_INTLINE, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ptdev->phys_pin == -1 || ptdev->phys_pin > 256) {
|
if (ptdev->phys_pin == -1 || ptdev->phys_pin > 256) {
|
||||||
pr_err("ptdev %x/%x/%x has wrong phys_pin %d, likely fail!",
|
pr_err("ptdev %x/%x/%x has wrong phys_pin %d, likely fail!",
|
||||||
@ -931,10 +903,6 @@ passthru_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ptdev->need_dsdt = need_dsdt;
|
|
||||||
memset(ptdev->dsdt_path, 0, sizeof(ptdev->dsdt_path));
|
|
||||||
strncpy(ptdev->dsdt_path, dsdt_path, sizeof(ptdev->dsdt_path) - 1);
|
|
||||||
|
|
||||||
if (enable_ptm) {
|
if (enable_ptm) {
|
||||||
error = ptm_probe(ctx, ptdev, &vrp_sec_bus);
|
error = ptm_probe(ctx, ptdev, &vrp_sec_bus);
|
||||||
|
|
||||||
@ -1003,7 +971,7 @@ passthru_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
|||||||
phys_bdf = ptdev->phys_bdf;
|
phys_bdf = ptdev->phys_bdf;
|
||||||
|
|
||||||
if (is_intel_graphics_dev(dev)) {
|
if (is_intel_graphics_dev(dev)) {
|
||||||
vm_unmap_ptdev_mmio(ctx, 0, 2, 0, gpu_dsm_gpa, gpu_dsm_size, gpu_dsm_hpa);
|
vm_unmap_ptdev_mmio(ctx, 0, 2, 0, gpu_dsm_gpa, GPU_DSM_SIZE, gpu_dsm_hpa);
|
||||||
vm_unmap_ptdev_mmio(ctx, 0, 2, 0, gpu_opregion_gpa, GPU_OPREGION_SIZE, gpu_opregion_hpa);
|
vm_unmap_ptdev_mmio(ctx, 0, 2, 0, gpu_opregion_gpa, GPU_OPREGION_SIZE, gpu_opregion_hpa);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1012,10 +980,13 @@ passthru_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
|||||||
pciaccess_cleanup();
|
pciaccess_cleanup();
|
||||||
free(ptdev);
|
free(ptdev);
|
||||||
|
|
||||||
/*Let device model to deassign pt device for all VMs, including RTVM if the Service VM plays
|
if (!is_rtvm) {
|
||||||
*supervisor role.*/
|
/* Let the HV to deassign the pt device for RTVM, In this case, the RTVM
|
||||||
vm_deassign_pcidev(ctx, &pcidev);
|
* could still be alive if DM died.
|
||||||
if (phys_bdf) {
|
*/
|
||||||
|
vm_deassign_pcidev(ctx, &pcidev);
|
||||||
|
}
|
||||||
|
if (!is_rtvm && phys_bdf) {
|
||||||
memset(reset_path, 0, sizeof(reset_path));
|
memset(reset_path, 0, sizeof(reset_path));
|
||||||
snprintf(reset_path, 40,
|
snprintf(reset_path, 40,
|
||||||
"/sys/bus/pci/devices/0000:%02x:%02x.%x/reset",
|
"/sys/bus/pci/devices/0000:%02x:%02x.%x/reset",
|
||||||
@ -1838,38 +1809,9 @@ write_dsdt_tsn(struct pci_vdev *dev, uint16_t device)
|
|||||||
dsdt_line("");
|
dsdt_line("");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
write_dsdt_file(struct pci_vdev *dev)
|
|
||||||
{
|
|
||||||
struct passthru_dev *ptdev = NULL;
|
|
||||||
FILE *fp;
|
|
||||||
char *line = NULL;
|
|
||||||
char *dsdt_path = NULL;
|
|
||||||
size_t len = 0;
|
|
||||||
ssize_t read;
|
|
||||||
|
|
||||||
ptdev = (struct passthru_dev *) dev->arg;
|
|
||||||
dsdt_path = ptdev->dsdt_path;
|
|
||||||
fp = fopen(dsdt_path, "r");
|
|
||||||
if (fp == NULL) {
|
|
||||||
pr_err("Cannot open dsdt file %s", dsdt_path);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
dsdt_line("");
|
|
||||||
/* Read each line of dsdt file */
|
|
||||||
while ((read = getline(&line, &len, fp)) != -1) {
|
|
||||||
dsdt_line(line);
|
|
||||||
}
|
|
||||||
if (line)
|
|
||||||
free(line);
|
|
||||||
fclose(fp);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
passthru_write_dsdt(struct pci_vdev *dev)
|
passthru_write_dsdt(struct pci_vdev *dev)
|
||||||
{
|
{
|
||||||
struct passthru_dev *ptdev = NULL;
|
|
||||||
uint16_t vendor = 0, device = 0;
|
uint16_t vendor = 0, device = 0;
|
||||||
|
|
||||||
vendor = pci_get_cfgdata16(dev, PCIR_VENDOR);
|
vendor = pci_get_cfgdata16(dev, PCIR_VENDOR);
|
||||||
@ -1878,7 +1820,6 @@ passthru_write_dsdt(struct pci_vdev *dev)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
device = pci_get_cfgdata16(dev, PCIR_DEVICE);
|
device = pci_get_cfgdata16(dev, PCIR_DEVICE);
|
||||||
ptdev = (struct passthru_dev *) dev->arg;
|
|
||||||
|
|
||||||
/* Provides ACPI extra info */
|
/* Provides ACPI extra info */
|
||||||
if (device == 0x5aaa)
|
if (device == 0x5aaa)
|
||||||
@ -1901,9 +1842,6 @@ passthru_write_dsdt(struct pci_vdev *dev)
|
|||||||
write_dsdt_sdc(dev);
|
write_dsdt_sdc(dev);
|
||||||
else if ((device == 0x4b32) || (device == 0x4ba0) || (device == 0x4bb0))
|
else if ((device == 0x4b32) || (device == 0x4ba0) || (device == 0x4bb0))
|
||||||
write_dsdt_tsn(dev, device);
|
write_dsdt_tsn(dev, device);
|
||||||
else if (ptdev->need_dsdt)
|
|
||||||
/* load DSDT by input file */
|
|
||||||
write_dsdt_file(dev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct pci_vdev_ops passthru = {
|
struct pci_vdev_ops passthru = {
|
||||||
|
@ -66,19 +66,13 @@ void iothread_handler(void *arg)
|
|||||||
struct virtio_base *base = viothrd->base;
|
struct virtio_base *base = viothrd->base;
|
||||||
int idx = viothrd->idx;
|
int idx = viothrd->idx;
|
||||||
struct virtio_vq_info *vq = &base->queues[idx];
|
struct virtio_vq_info *vq = &base->queues[idx];
|
||||||
eventfd_t val;
|
|
||||||
|
|
||||||
/* Mitigate the epoll_wait repeat cycles by reading out the event */
|
|
||||||
if (eventfd_read(vq->viothrd.iomvt.fd, &val) == -1) {
|
|
||||||
pr_err("%s: eventfd_read fails \r\n", __func__);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (viothrd->iothread_run) {
|
if (viothrd->iothread_run) {
|
||||||
pthread_mutex_lock(&vq->mtx);
|
if (base->mtx)
|
||||||
/* only vq specific data can be accessed in qnotify callback */
|
pthread_mutex_lock(base->mtx);
|
||||||
(*viothrd->iothread_run)(base, vq);
|
(*viothrd->iothread_run)(base, vq);
|
||||||
pthread_mutex_unlock(&vq->mtx);
|
if (base->mtx)
|
||||||
|
pthread_mutex_unlock(base->mtx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,12 +106,12 @@ virtio_set_iothread(struct virtio_base *base,
|
|||||||
vq->viothrd.iomvt.run = iothread_handler;
|
vq->viothrd.iomvt.run = iothread_handler;
|
||||||
vq->viothrd.iomvt.fd = vq->viothrd.kick_fd;
|
vq->viothrd.iomvt.fd = vq->viothrd.kick_fd;
|
||||||
|
|
||||||
if (!iothread_add(vq->viothrd.ioctx, vq->viothrd.kick_fd, &vq->viothrd.iomvt))
|
if (!iothread_add(vq->viothrd.kick_fd, &vq->viothrd.iomvt))
|
||||||
if (!virtio_register_ioeventfd(base, idx, true, vq->viothrd.kick_fd))
|
if (!virtio_register_ioeventfd(base, idx, true, vq->viothrd.kick_fd))
|
||||||
vq->viothrd.ioevent_started = true;
|
vq->viothrd.ioevent_started = true;
|
||||||
} else {
|
} else {
|
||||||
if (!virtio_register_ioeventfd(base, idx, false, vq->viothrd.kick_fd))
|
if (!virtio_register_ioeventfd(base, idx, false, vq->viothrd.kick_fd))
|
||||||
if (!iothread_del(vq->viothrd.ioctx, vq->viothrd.kick_fd)) {
|
if (!iothread_del(vq->viothrd.kick_fd)) {
|
||||||
vq->viothrd.ioevent_started = false;
|
vq->viothrd.ioevent_started = false;
|
||||||
if (vq->viothrd.kick_fd) {
|
if (vq->viothrd.kick_fd) {
|
||||||
close(vq->viothrd.kick_fd);
|
close(vq->viothrd.kick_fd);
|
||||||
@ -193,6 +187,8 @@ virtio_poll_timer(void *arg, uint64_t nexp)
|
|||||||
* @param pci_virtio_dev Pointer to instance of certain virtio device.
|
* @param pci_virtio_dev Pointer to instance of certain virtio device.
|
||||||
* @param dev Pointer to struct pci_vdev which emulates a PCI device.
|
* @param dev Pointer to struct pci_vdev which emulates a PCI device.
|
||||||
* @param queues Pointer to struct virtio_vq_info, normally an array.
|
* @param queues Pointer to struct virtio_vq_info, normally an array.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
virtio_linkup(struct virtio_base *base, struct virtio_ops *vops,
|
virtio_linkup(struct virtio_base *base, struct virtio_ops *vops,
|
||||||
@ -200,27 +196,13 @@ virtio_linkup(struct virtio_base *base, struct virtio_ops *vops,
|
|||||||
struct virtio_vq_info *queues,
|
struct virtio_vq_info *queues,
|
||||||
int backend_type)
|
int backend_type)
|
||||||
{
|
{
|
||||||
int i, rc;
|
int i;
|
||||||
pthread_mutexattr_t attr;
|
|
||||||
|
|
||||||
/* base and pci_virtio_dev addresses must match */
|
/* base and pci_virtio_dev addresses must match */
|
||||||
if ((void *)base != pci_virtio_dev) {
|
if ((void *)base != pci_virtio_dev) {
|
||||||
pr_err("virtio_base and pci_virtio_dev addresses don't match!\n");
|
pr_err("virtio_base and pci_virtio_dev addresses don't match!\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = pthread_mutexattr_init(&attr);
|
|
||||||
if (rc) {
|
|
||||||
pr_err("%s, pthread_mutexattr_init failed\n", __func__);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
|
|
||||||
if (rc) {
|
|
||||||
pr_err("%s, pthread_mutexattr_settype failed\n", __func__);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
base->vops = vops;
|
base->vops = vops;
|
||||||
base->dev = dev;
|
base->dev = dev;
|
||||||
dev->arg = base;
|
dev->arg = base;
|
||||||
@ -230,11 +212,6 @@ virtio_linkup(struct virtio_base *base, struct virtio_ops *vops,
|
|||||||
for (i = 0; i < vops->nvq; i++) {
|
for (i = 0; i < vops->nvq; i++) {
|
||||||
queues[i].base = base;
|
queues[i].base = base;
|
||||||
queues[i].num = i;
|
queues[i].num = i;
|
||||||
rc = pthread_mutex_init(&queues[i].mtx, &attr);
|
|
||||||
if (rc) {
|
|
||||||
pr_err("%s, pthread_mutex_init failed\n", __func__);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -249,6 +226,8 @@ virtio_linkup(struct virtio_base *base, struct virtio_ops *vops,
|
|||||||
* If MSI-X is enabled, this also resets all the vectors to NO_VECTOR.
|
* If MSI-X is enabled, this also resets all the vectors to NO_VECTOR.
|
||||||
*
|
*
|
||||||
* @param base Pointer to struct virtio_base.
|
* @param base Pointer to struct virtio_base.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
virtio_reset_dev(struct virtio_base *base)
|
virtio_reset_dev(struct virtio_base *base)
|
||||||
@ -296,6 +275,8 @@ virtio_reset_dev(struct virtio_base *base)
|
|||||||
*
|
*
|
||||||
* @param base Pointer to struct virtio_base.
|
* @param base Pointer to struct virtio_base.
|
||||||
* @param barnum Which BAR[0..5] to use.
|
* @param barnum Which BAR[0..5] to use.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
virtio_set_io_bar(struct virtio_base *base, int barnum)
|
virtio_set_io_bar(struct virtio_base *base, int barnum)
|
||||||
@ -784,6 +765,8 @@ vq_endchains(struct virtio_vq_info *vq, int used_all_avail)
|
|||||||
*
|
*
|
||||||
* @param base Pointer to struct virtio_base.
|
* @param base Pointer to struct virtio_base.
|
||||||
* @param vq Pointer to struct virtio_vq_info.
|
* @param vq Pointer to struct virtio_vq_info.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void vq_clear_used_ring_flags(struct virtio_base *base, struct virtio_vq_info *vq)
|
void vq_clear_used_ring_flags(struct virtio_base *base, struct virtio_vq_info *vq)
|
||||||
{
|
{
|
||||||
@ -1946,6 +1929,8 @@ virtio_pci_read(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
|
|||||||
* @param offset Register offset in bytes within a BAR region.
|
* @param offset Register offset in bytes within a BAR region.
|
||||||
* @param size Access range in bytes.
|
* @param size Access range in bytes.
|
||||||
* @param value Data value to be written into register.
|
* @param value Data value to be written into register.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
virtio_pci_write(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
|
virtio_pci_write(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
|
||||||
@ -2013,12 +1998,12 @@ int virtio_register_ioeventfd(struct virtio_base *base, int idx, bool is_registe
|
|||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
if (!is_register)
|
if (!is_register)
|
||||||
ioeventfd.flags |= ACRN_IOEVENTFD_FLAG_DEASSIGN;
|
ioeventfd.flags = ACRN_IOEVENTFD_FLAG_DEASSIGN;
|
||||||
if (base->iothread)
|
else if (base->iothread)
|
||||||
/* Enable ASYNCIO by default. If ASYNCIO is not supported by kernel
|
/* Enable ASYNCIO by default. If ASYNCIO is not supported by kernel
|
||||||
* or hyperviosr, this flag will be ignored.
|
* or hyperviosr, this flag will be ignored.
|
||||||
*/
|
*/
|
||||||
ioeventfd.flags |= ACRN_IOEVENTFD_FLAG_ASYNCIO;
|
ioeventfd.flags = ACRN_IOEVENTFD_FLAG_ASYNCIO;
|
||||||
/* register ioeventfd for kick */
|
/* register ioeventfd for kick */
|
||||||
if (base->device_caps & (1UL << VIRTIO_F_VERSION_1)) {
|
if (base->device_caps & (1UL << VIRTIO_F_VERSION_1)) {
|
||||||
/*
|
/*
|
||||||
|
@ -61,7 +61,7 @@
|
|||||||
|
|
||||||
/* Device can toggle its cache between writeback and writethrough modes */
|
/* Device can toggle its cache between writeback and writethrough modes */
|
||||||
#define VIRTIO_BLK_F_CONFIG_WCE (1 << 11)
|
#define VIRTIO_BLK_F_CONFIG_WCE (1 << 11)
|
||||||
#define VIRTIO_BLK_F_MQ (1 << 12) /* support more than one vq */
|
|
||||||
#define VIRTIO_BLK_F_DISCARD (1 << 13)
|
#define VIRTIO_BLK_F_DISCARD (1 << 13)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -101,8 +101,8 @@ struct virtio_blk_config {
|
|||||||
} topology;
|
} topology;
|
||||||
uint8_t writeback;
|
uint8_t writeback;
|
||||||
uint8_t unused;
|
uint8_t unused;
|
||||||
/* num_queues when VIRTIO_BLK_F_MQ is support*/
|
/* Reserve for num_queues when VIRTIO_BLK_F_MQ is support*/
|
||||||
uint16_t num_queues;
|
uint16_t reserve;
|
||||||
/* The maximum discard sectors (in 512-byte sectors) for one segment */
|
/* The maximum discard sectors (in 512-byte sectors) for one segment */
|
||||||
uint32_t max_discard_sectors;
|
uint32_t max_discard_sectors;
|
||||||
/* The maximum number of discard segments */
|
/* The maximum number of discard segments */
|
||||||
@ -156,16 +156,13 @@ struct virtio_blk_ioreq {
|
|||||||
struct virtio_blk {
|
struct virtio_blk {
|
||||||
struct virtio_base base;
|
struct virtio_base base;
|
||||||
pthread_mutex_t mtx;
|
pthread_mutex_t mtx;
|
||||||
struct virtio_vq_info *vqs;
|
struct virtio_vq_info vq;
|
||||||
struct virtio_blk_config cfg;
|
struct virtio_blk_config cfg;
|
||||||
bool dummy_bctxt; /* Used in blockrescan. Indicate if the bctxt can be used */
|
bool dummy_bctxt; /* Used in blockrescan. Indicate if the bctxt can be used */
|
||||||
struct blockif_ctxt *bc;
|
struct blockif_ctxt *bc;
|
||||||
char ident[VIRTIO_BLK_BLK_ID_BYTES + 1];
|
char ident[VIRTIO_BLK_BLK_ID_BYTES + 1];
|
||||||
struct virtio_blk_ioreq *ios;
|
struct virtio_blk_ioreq ios[VIRTIO_BLK_RINGSZ];
|
||||||
uint8_t original_wce;
|
uint8_t original_wce;
|
||||||
int num_vqs;
|
|
||||||
struct iothreads_info iothrds_info;
|
|
||||||
struct virtio_ops ops;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static void virtio_blk_reset(void *);
|
static void virtio_blk_reset(void *);
|
||||||
@ -173,6 +170,18 @@ static void virtio_blk_notify(void *, struct virtio_vq_info *);
|
|||||||
static int virtio_blk_cfgread(void *, int, int, uint32_t *);
|
static int virtio_blk_cfgread(void *, int, int, uint32_t *);
|
||||||
static int virtio_blk_cfgwrite(void *, int, int, uint32_t);
|
static int virtio_blk_cfgwrite(void *, int, int, uint32_t);
|
||||||
|
|
||||||
|
static struct virtio_ops virtio_blk_ops = {
|
||||||
|
"virtio_blk", /* our name */
|
||||||
|
1, /* we support 1 virtqueue */
|
||||||
|
sizeof(struct virtio_blk_config), /* config reg size */
|
||||||
|
virtio_blk_reset, /* reset */
|
||||||
|
virtio_blk_notify, /* device-wide qnotify */
|
||||||
|
virtio_blk_cfgread, /* read PCI config */
|
||||||
|
virtio_blk_cfgwrite, /* write PCI config */
|
||||||
|
NULL, /* apply negotiated features */
|
||||||
|
NULL, /* called on guest set status */
|
||||||
|
};
|
||||||
|
|
||||||
static void
|
static void
|
||||||
virtio_blk_reset(void *vdev)
|
virtio_blk_reset(void *vdev)
|
||||||
{
|
{
|
||||||
@ -190,7 +199,6 @@ virtio_blk_done(struct blockif_req *br, int err)
|
|||||||
{
|
{
|
||||||
struct virtio_blk_ioreq *io = br->param;
|
struct virtio_blk_ioreq *io = br->param;
|
||||||
struct virtio_blk *blk = io->blk;
|
struct virtio_blk *blk = io->blk;
|
||||||
struct virtio_vq_info *vq = blk->vqs + br->qidx;
|
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
DPRINTF(("virtio_blk: done with error = %d\n\r", err));
|
DPRINTF(("virtio_blk: done with error = %d\n\r", err));
|
||||||
@ -207,10 +215,10 @@ virtio_blk_done(struct blockif_req *br, int err)
|
|||||||
* Return the descriptor back to the host.
|
* Return the descriptor back to the host.
|
||||||
* We wrote 1 byte (our status) to host.
|
* We wrote 1 byte (our status) to host.
|
||||||
*/
|
*/
|
||||||
pthread_mutex_lock(&vq->mtx);
|
pthread_mutex_lock(&blk->mtx);
|
||||||
vq_relchain(vq, io->idx, 1);
|
vq_relchain(&blk->vq, io->idx, 1);
|
||||||
vq_endchains(vq, !vq_has_descs(vq));
|
vq_endchains(&blk->vq, !vq_has_descs(&blk->vq));
|
||||||
pthread_mutex_unlock(&vq->mtx);
|
pthread_mutex_unlock(&blk->mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -227,14 +235,13 @@ virtio_blk_proc(struct virtio_blk *blk, struct virtio_vq_info *vq)
|
|||||||
{
|
{
|
||||||
struct virtio_blk_hdr *vbh;
|
struct virtio_blk_hdr *vbh;
|
||||||
struct virtio_blk_ioreq *io;
|
struct virtio_blk_ioreq *io;
|
||||||
int i, n, qidx;
|
int i, n;
|
||||||
int err;
|
int err;
|
||||||
ssize_t iolen;
|
ssize_t iolen;
|
||||||
int writeop, type;
|
int writeop, type;
|
||||||
struct iovec iov[BLOCKIF_IOV_MAX + 2];
|
struct iovec iov[BLOCKIF_IOV_MAX + 2];
|
||||||
uint16_t idx, flags[BLOCKIF_IOV_MAX + 2];
|
uint16_t idx, flags[BLOCKIF_IOV_MAX + 2];
|
||||||
|
|
||||||
qidx = vq - blk->vqs;
|
|
||||||
idx = vq->qsize;
|
idx = vq->qsize;
|
||||||
n = vq_getchain(vq, &idx, iov, BLOCKIF_IOV_MAX + 2, flags);
|
n = vq_getchain(vq, &idx, iov, BLOCKIF_IOV_MAX + 2, flags);
|
||||||
|
|
||||||
@ -252,7 +259,7 @@ virtio_blk_proc(struct virtio_blk *blk, struct virtio_vq_info *vq)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
io = &blk->ios[qidx * VIRTIO_BLK_RINGSZ + idx];
|
io = &blk->ios[idx];
|
||||||
if ((flags[0] & VRING_DESC_F_WRITE) != 0) {
|
if ((flags[0] & VRING_DESC_F_WRITE) != 0) {
|
||||||
WPRINTF(("%s: the type for hdr should not be VRING_DESC_F_WRITE\n", __func__));
|
WPRINTF(("%s: the type for hdr should not be VRING_DESC_F_WRITE\n", __func__));
|
||||||
virtio_blk_abort(vq, idx);
|
virtio_blk_abort(vq, idx);
|
||||||
@ -413,9 +420,6 @@ virtio_blk_get_caps(struct virtio_blk *blk, bool wb)
|
|||||||
if (blockif_is_ro(blk->bc))
|
if (blockif_is_ro(blk->bc))
|
||||||
caps |= VIRTIO_BLK_F_RO;
|
caps |= VIRTIO_BLK_F_RO;
|
||||||
|
|
||||||
if (blk->num_vqs > 1)
|
|
||||||
caps |= VIRTIO_BLK_F_MQ;
|
|
||||||
|
|
||||||
return caps;
|
return caps;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -443,7 +447,6 @@ virtio_blk_update_config_space(struct virtio_blk *blk)
|
|||||||
(sto != 0) ? ((sts - sto) / sectsz) : 0;
|
(sto != 0) ? ((sts - sto) / sectsz) : 0;
|
||||||
blk->cfg.topology.min_io_size = 0;
|
blk->cfg.topology.min_io_size = 0;
|
||||||
blk->cfg.writeback = blockif_get_wce(blk->bc);
|
blk->cfg.writeback = blockif_get_wce(blk->bc);
|
||||||
blk->cfg.num_queues = (uint16_t)blk->num_vqs;
|
|
||||||
blk->original_wce = blk->cfg.writeback; /* save for reset */
|
blk->original_wce = blk->cfg.writeback; /* save for reset */
|
||||||
if (blockif_candiscard(blk->bc)) {
|
if (blockif_candiscard(blk->bc)) {
|
||||||
blk->cfg.max_discard_sectors = blockif_max_discard_sectors(blk->bc);
|
blk->cfg.max_discard_sectors = blockif_max_discard_sectors(blk->bc);
|
||||||
@ -453,18 +456,6 @@ virtio_blk_update_config_space(struct virtio_blk *blk)
|
|||||||
blk->base.device_caps =
|
blk->base.device_caps =
|
||||||
virtio_blk_get_caps(blk, !!blk->cfg.writeback);
|
virtio_blk_get_caps(blk, !!blk->cfg.writeback);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
virtio_blk_init_ops(struct virtio_blk *blk, int num_vqs)
|
|
||||||
{
|
|
||||||
blk->ops.name = "virtio_blk";
|
|
||||||
blk->ops.nvq = num_vqs;
|
|
||||||
blk->ops.cfgsize = sizeof(struct virtio_blk_config);
|
|
||||||
blk->ops.reset = virtio_blk_reset;
|
|
||||||
blk->ops.cfgread = virtio_blk_cfgread;
|
|
||||||
blk->ops.cfgwrite = virtio_blk_cfgwrite;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
virtio_blk_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
virtio_blk_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
||||||
{
|
{
|
||||||
@ -477,21 +468,14 @@ virtio_blk_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
|||||||
u_char digest[16];
|
u_char digest[16];
|
||||||
struct virtio_blk *blk;
|
struct virtio_blk *blk;
|
||||||
bool use_iothread;
|
bool use_iothread;
|
||||||
struct iothread_ctx *ioctx_base = NULL;
|
int i;
|
||||||
struct iothreads_info iothrds_info;
|
|
||||||
int num_vqs;
|
|
||||||
int i, j;
|
|
||||||
pthread_mutexattr_t attr;
|
pthread_mutexattr_t attr;
|
||||||
int rc;
|
int rc;
|
||||||
struct iothreads_option iot_opt;
|
|
||||||
|
|
||||||
memset(&iot_opt, 0, sizeof(iot_opt));
|
|
||||||
|
|
||||||
bctxt = NULL;
|
bctxt = NULL;
|
||||||
/* Assume the bctxt is valid, until identified otherwise */
|
/* Assume the bctxt is valid, until identified otherwise */
|
||||||
dummy_bctxt = false;
|
dummy_bctxt = false;
|
||||||
use_iothread = false;
|
use_iothread = false;
|
||||||
num_vqs = 1;
|
|
||||||
|
|
||||||
if (opts == NULL) {
|
if (opts == NULL) {
|
||||||
pr_err("virtio_blk: backing device required\n");
|
pr_err("virtio_blk: backing device required\n");
|
||||||
@ -517,75 +501,17 @@ virtio_blk_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (strstr(opts, "nodisk") == NULL) {
|
if (strstr(opts, "nodisk") == NULL) {
|
||||||
/*
|
opt = strsep(&opts_tmp, ",");
|
||||||
* both ",iothread" and ",mq=int" are consumed by virtio-blk
|
if (strcmp("iothread", opt) == 0) {
|
||||||
* and must be specified before any other opts which will
|
use_iothread = true;
|
||||||
* be used by blockif_open.
|
} else {
|
||||||
*/
|
/* The opts_start is truncated by strsep, opts_tmp is also
|
||||||
char *p = opts_start;
|
* changed by strsetp, so use opts which points to the
|
||||||
while (opts_tmp != NULL) {
|
* original parameter string
|
||||||
opt = strsep(&opts_tmp, ",");
|
|
||||||
|
|
||||||
if (!strncmp(opt, "iothread", strlen("iothread"))) {
|
|
||||||
use_iothread = true;
|
|
||||||
strsep(&opt, "=");
|
|
||||||
|
|
||||||
if (iothread_parse_options(opt, &iot_opt) < 0) {
|
|
||||||
free(opts_start);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
p = opts_tmp;
|
|
||||||
} else if (!strncmp(opt, "mq", strlen("mq"))) {
|
|
||||||
strsep(&opt, "=");
|
|
||||||
if (opt != NULL) {
|
|
||||||
if (dm_strtoi(opt, &opt, 10, &num_vqs) ||
|
|
||||||
(num_vqs <= 0)) {
|
|
||||||
WPRINTF(("%s: incorrect num queues %s\n",
|
|
||||||
__func__, opt));
|
|
||||||
free(opts_start);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
/* the max vq number allowed by FE is guest cpu num */
|
|
||||||
if (num_vqs > guest_cpu_num())
|
|
||||||
num_vqs = guest_cpu_num();
|
|
||||||
}
|
|
||||||
p = opts_tmp;
|
|
||||||
} else {
|
|
||||||
/* The opts_start is truncated by strsep, opts_tmp is also
|
|
||||||
* changed by strsetp, so use opts which points to the
|
|
||||||
* original parameter string
|
|
||||||
*/
|
|
||||||
p = opts + (p - opts_start);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (use_iothread) {
|
|
||||||
/*
|
|
||||||
* Creating more iothread instances than the number of virtqueues is not necessary.
|
|
||||||
* - One or more vqs can be handled in one iothread.
|
|
||||||
* - The mapping between virtqueues and iothreads is based on round robin.
|
|
||||||
*/
|
*/
|
||||||
if (iot_opt.num > num_vqs) {
|
opts_tmp = opts;
|
||||||
iot_opt.num = num_vqs;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (snprintf(iot_opt.tag, sizeof(iot_opt.tag), "blk%s", bident) >= sizeof(iot_opt.tag)) {
|
|
||||||
pr_err("%s: virtio-blk ioctx_tag too long \n", __func__);
|
|
||||||
}
|
|
||||||
|
|
||||||
ioctx_base = iothread_create(&iot_opt);
|
|
||||||
iothread_free_options(&iot_opt);
|
|
||||||
if (ioctx_base == NULL) {
|
|
||||||
pr_err("%s: Fails to create iothread context instance \n", __func__);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
iothrds_info.ioctx_base = ioctx_base;
|
bctxt = blockif_open(opts_tmp, bident);
|
||||||
iothrds_info.num = iot_opt.num;
|
|
||||||
|
|
||||||
bctxt = blockif_open(p, bident, num_vqs, &iothrds_info);
|
|
||||||
if (bctxt == NULL) {
|
if (bctxt == NULL) {
|
||||||
pr_err("Could not open backing file");
|
pr_err("Could not open backing file");
|
||||||
free(opts_start);
|
free(opts_start);
|
||||||
@ -604,39 +530,17 @@ virtio_blk_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
blk->iothrds_info.ioctx_base = ioctx_base;
|
|
||||||
blk->iothrds_info.num = iot_opt.num;
|
|
||||||
|
|
||||||
blk->bc = bctxt;
|
blk->bc = bctxt;
|
||||||
/* Update virtio-blk device struct of dummy ctxt*/
|
/* Update virtio-blk device struct of dummy ctxt*/
|
||||||
blk->dummy_bctxt = dummy_bctxt;
|
blk->dummy_bctxt = dummy_bctxt;
|
||||||
|
|
||||||
blk->num_vqs = num_vqs;
|
for (i = 0; i < VIRTIO_BLK_RINGSZ; i++) {
|
||||||
blk->vqs = calloc(blk->num_vqs, sizeof(struct virtio_vq_info));
|
struct virtio_blk_ioreq *io = &blk->ios[i];
|
||||||
if (!blk->vqs) {
|
|
||||||
WPRINTF(("virtio_blk: calloc vqs returns NULL\n"));
|
|
||||||
free(blk);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
blk->ios = calloc(blk->num_vqs * VIRTIO_BLK_RINGSZ,
|
|
||||||
sizeof(struct virtio_blk_ioreq));
|
|
||||||
if (!blk->ios) {
|
|
||||||
WPRINTF(("virtio_blk: calloc ios returns NULL\n"));
|
|
||||||
free(blk->vqs);
|
|
||||||
free(blk);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (j = 0; j < num_vqs; j++) {
|
io->req.callback = virtio_blk_done;
|
||||||
for (i = 0; i < VIRTIO_BLK_RINGSZ; i++) {
|
io->req.param = io;
|
||||||
struct virtio_blk_ioreq *io = &blk->ios[j * VIRTIO_BLK_RINGSZ + i];
|
io->blk = blk;
|
||||||
|
io->idx = i;
|
||||||
io->req.callback = virtio_blk_done;
|
|
||||||
io->req.param = io;
|
|
||||||
io->req.qidx = j;
|
|
||||||
io->blk = blk;
|
|
||||||
io->idx = i;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* init mutex attribute properly to avoid deadlock */
|
/* init mutex attribute properly to avoid deadlock */
|
||||||
@ -653,20 +557,13 @@ virtio_blk_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
|||||||
DPRINTF(("virtio_blk: pthread_mutex_init failed with "
|
DPRINTF(("virtio_blk: pthread_mutex_init failed with "
|
||||||
"error %d!\n", rc));
|
"error %d!\n", rc));
|
||||||
|
|
||||||
virtio_blk_init_ops(blk, num_vqs);
|
|
||||||
|
|
||||||
/* init virtio struct and virtqueues */
|
/* init virtio struct and virtqueues */
|
||||||
virtio_linkup(&blk->base, &(blk->ops), blk, dev, blk->vqs, BACKEND_VBSU);
|
virtio_linkup(&blk->base, &virtio_blk_ops, blk, dev, &blk->vq, BACKEND_VBSU);
|
||||||
blk->base.iothread = use_iothread;
|
blk->base.iothread = use_iothread;
|
||||||
blk->base.mtx = &blk->mtx;
|
blk->base.mtx = &blk->mtx;
|
||||||
|
|
||||||
for (j = 0; j < num_vqs; j++) {
|
blk->vq.qsize = VIRTIO_BLK_RINGSZ;
|
||||||
blk->vqs[j].qsize = VIRTIO_BLK_RINGSZ;
|
/* blk->vq.vq_notify = we have no per-queue notify */
|
||||||
blk->vqs[j].notify = virtio_blk_notify;
|
|
||||||
if (use_iothread) {
|
|
||||||
blk->vqs[j].viothrd.ioctx = ioctx_base + j % (iot_opt.num);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create an identifier for the backing file. Use parts of the
|
* Create an identifier for the backing file. Use parts of the
|
||||||
@ -748,10 +645,6 @@ virtio_blk_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
|||||||
blockif_close(bctxt);
|
blockif_close(bctxt);
|
||||||
}
|
}
|
||||||
virtio_reset_dev(&blk->base);
|
virtio_reset_dev(&blk->base);
|
||||||
if (blk->ios)
|
|
||||||
free(blk->ios);
|
|
||||||
if (blk->vqs)
|
|
||||||
free(blk->vqs);
|
|
||||||
free(blk);
|
free(blk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -851,7 +744,7 @@ virtio_blk_rescan(struct vmctx *ctx, struct pci_vdev *dev, char *newpath)
|
|||||||
|
|
||||||
pr_err("name=%s, Path=%s, ident=%s\n", dev->name, newpath, bident);
|
pr_err("name=%s, Path=%s, ident=%s\n", dev->name, newpath, bident);
|
||||||
/* update the bctxt for the virtio-blk device */
|
/* update the bctxt for the virtio-blk device */
|
||||||
bctxt = blockif_open(newpath, bident, blk->num_vqs, &blk->iothrds_info);
|
bctxt = blockif_open(newpath, bident);
|
||||||
if (bctxt == NULL) {
|
if (bctxt == NULL) {
|
||||||
pr_err("Error opening backing file\n");
|
pr_err("Error opening backing file\n");
|
||||||
goto end;
|
goto end;
|
||||||
|
@ -384,12 +384,6 @@ struct pci_xhci_vbdp_dev_state {
|
|||||||
uint8_t state;
|
uint8_t state;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct pci_xhci_async_request_node {
|
|
||||||
STAILQ_ENTRY(pci_xhci_async_request_node) link;
|
|
||||||
uint64_t offset;
|
|
||||||
uint64_t value;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct pci_xhci_vdev {
|
struct pci_xhci_vdev {
|
||||||
struct pci_vdev *dev;
|
struct pci_vdev *dev;
|
||||||
pthread_mutex_t mtx;
|
pthread_mutex_t mtx;
|
||||||
@ -430,12 +424,6 @@ struct pci_xhci_vdev {
|
|||||||
int vbdp_dev_num;
|
int vbdp_dev_num;
|
||||||
struct pci_xhci_vbdp_dev_state vbdp_devs[XHCI_MAX_VIRT_PORTS];
|
struct pci_xhci_vbdp_dev_state vbdp_devs[XHCI_MAX_VIRT_PORTS];
|
||||||
|
|
||||||
pthread_t async_thread;
|
|
||||||
bool async_transfer;
|
|
||||||
pthread_cond_t async_cond;
|
|
||||||
pthread_mutex_t async_tmx;
|
|
||||||
STAILQ_HEAD(, pci_xhci_async_request_node) async_head;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* native_ports uses for record the command line assigned native root
|
* native_ports uses for record the command line assigned native root
|
||||||
* hub ports and its child external hub ports.
|
* hub ports and its child external hub ports.
|
||||||
@ -497,7 +485,6 @@ static int pci_xhci_parse_extcap(struct pci_xhci_vdev *xdev, char *opts);
|
|||||||
static int pci_xhci_convert_speed(int lspeed);
|
static int pci_xhci_convert_speed(int lspeed);
|
||||||
static void pci_xhci_free_usb_xfer(struct pci_xhci_dev_emu *dev, struct usb_xfer *xfer);
|
static void pci_xhci_free_usb_xfer(struct pci_xhci_dev_emu *dev, struct usb_xfer *xfer);
|
||||||
static void pci_xhci_isoc_handler(void *arg, uint64_t param);
|
static void pci_xhci_isoc_handler(void *arg, uint64_t param);
|
||||||
static void pci_xhci_async_enqueue(struct pci_xhci_vdev *xdev, uint64_t offset, uint64_t value);
|
|
||||||
|
|
||||||
#define XHCI_OPT_MAX_LEN 32
|
#define XHCI_OPT_MAX_LEN 32
|
||||||
static struct pci_xhci_option_elem xhci_option_table[] = {
|
static struct pci_xhci_option_elem xhci_option_table[] = {
|
||||||
@ -1995,9 +1982,7 @@ pci_xhci_cmd_disable_slot(struct pci_xhci_vdev *xdev, uint32_t slot)
|
|||||||
slot, di->path.bus, usb_dev_path(&di->path));
|
slot, di->path.bus, usb_dev_path(&di->path));
|
||||||
|
|
||||||
/* release all the resource allocated for virtual device */
|
/* release all the resource allocated for virtual device */
|
||||||
pthread_mutex_unlock(&xdev->mtx);
|
|
||||||
pci_xhci_dev_destroy(dev);
|
pci_xhci_dev_destroy(dev);
|
||||||
pthread_mutex_lock(&xdev->mtx);
|
|
||||||
} else
|
} else
|
||||||
UPRINTF(LWRN, "invalid slot %d\r\n", slot);
|
UPRINTF(LWRN, "invalid slot %d\r\n", slot);
|
||||||
|
|
||||||
@ -2071,7 +2056,6 @@ pci_xhci_cmd_address_device(struct pci_xhci_vdev *xdev,
|
|||||||
struct usb_native_devinfo *di;
|
struct usb_native_devinfo *di;
|
||||||
uint32_t cmderr;
|
uint32_t cmderr;
|
||||||
uint8_t rh_port;
|
uint8_t rh_port;
|
||||||
int ret;
|
|
||||||
|
|
||||||
input_ctx = XHCI_GADDR(xdev, trb->qwTrb0 & ~0xFUL);
|
input_ctx = XHCI_GADDR(xdev, trb->qwTrb0 & ~0xFUL);
|
||||||
if (!input_ctx) {
|
if (!input_ctx) {
|
||||||
@ -2126,10 +2110,7 @@ pci_xhci_cmd_address_device(struct pci_xhci_vdev *xdev,
|
|||||||
"port %d\r\n", di->path.bus,
|
"port %d\r\n", di->path.bus,
|
||||||
usb_dev_path(&di->path), rh_port);
|
usb_dev_path(&di->path), rh_port);
|
||||||
|
|
||||||
pthread_mutex_unlock(&xdev->mtx);
|
|
||||||
dev = pci_xhci_dev_create(xdev, di);
|
dev = pci_xhci_dev_create(xdev, di);
|
||||||
pthread_mutex_lock(&xdev->mtx);
|
|
||||||
|
|
||||||
if (!dev) {
|
if (!dev) {
|
||||||
UPRINTF(LFTL, "fail to create device for %d-%s\r\n",
|
UPRINTF(LFTL, "fail to create device for %d-%s\r\n",
|
||||||
di->path.bus,
|
di->path.bus,
|
||||||
@ -2161,15 +2142,8 @@ pci_xhci_cmd_address_device(struct pci_xhci_vdev *xdev,
|
|||||||
dev->hci.hci_address = slot;
|
dev->hci.hci_address = slot;
|
||||||
dev->dev_ctx = dev_ctx;
|
dev->dev_ctx = dev_ctx;
|
||||||
|
|
||||||
if (dev->dev_ue->ue_reset == NULL) {
|
if (dev->dev_ue->ue_reset == NULL ||
|
||||||
cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON;
|
dev->dev_ue->ue_reset(dev->dev_instance) < 0) {
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
pthread_mutex_unlock(&xdev->mtx);
|
|
||||||
ret = dev->dev_ue->ue_reset(dev->dev_instance);
|
|
||||||
pthread_mutex_lock(&xdev->mtx);
|
|
||||||
if (ret < 0) {
|
|
||||||
cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON;
|
cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
@ -2417,7 +2391,6 @@ pci_xhci_cmd_reset_ep(struct pci_xhci_vdev *xdev,
|
|||||||
|
|
||||||
devep = &dev->eps[epid];
|
devep = &dev->eps[epid];
|
||||||
pthread_mutex_lock(&devep->mtx);
|
pthread_mutex_lock(&devep->mtx);
|
||||||
pthread_mutex_unlock(&xdev->mtx);
|
|
||||||
|
|
||||||
xfer = devep->ep_xfer;
|
xfer = devep->ep_xfer;
|
||||||
for (i = 0; i < xfer->max_blk_cnt; ++i) {
|
for (i = 0; i < xfer->max_blk_cnt; ++i) {
|
||||||
@ -2440,7 +2413,6 @@ pci_xhci_cmd_reset_ep(struct pci_xhci_vdev *xdev,
|
|||||||
UPRINTF(LDBG, "reset ep[%u] %08x %08x %016lx %08x\r\n",
|
UPRINTF(LDBG, "reset ep[%u] %08x %08x %016lx %08x\r\n",
|
||||||
epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2,
|
epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2,
|
||||||
ep_ctx->dwEpCtx4);
|
ep_ctx->dwEpCtx4);
|
||||||
pthread_mutex_lock(&xdev->mtx);
|
|
||||||
pthread_mutex_unlock(&devep->mtx);
|
pthread_mutex_unlock(&devep->mtx);
|
||||||
|
|
||||||
done:
|
done:
|
||||||
@ -3050,10 +3022,8 @@ pci_xhci_try_usb_xfer(struct pci_xhci_vdev *xdev,
|
|||||||
|
|
||||||
/* outstanding requests queued up */
|
/* outstanding requests queued up */
|
||||||
if (dev->dev_ue->ue_data != NULL) {
|
if (dev->dev_ue->ue_data != NULL) {
|
||||||
pthread_mutex_unlock(&xdev->mtx);
|
|
||||||
err = dev->dev_ue->ue_data(dev->dev_instance, xfer, epid & 0x1 ?
|
err = dev->dev_ue->ue_data(dev->dev_instance, xfer, epid & 0x1 ?
|
||||||
USB_XFER_IN : USB_XFER_OUT, epid/2);
|
USB_XFER_IN : USB_XFER_OUT, epid/2);
|
||||||
pthread_mutex_lock(&xdev->mtx);
|
|
||||||
if (err == USB_ERR_CANCELLED) {
|
if (err == USB_ERR_CANCELLED) {
|
||||||
if (USB_DATA_GET_ERRCODE(&xfer->data[xfer->head]) ==
|
if (USB_DATA_GET_ERRCODE(&xfer->data[xfer->head]) ==
|
||||||
USB_NAK)
|
USB_NAK)
|
||||||
@ -3316,11 +3286,8 @@ retry:
|
|||||||
|
|
||||||
if (epid == 1) {
|
if (epid == 1) {
|
||||||
err = USB_ERR_NOT_STARTED;
|
err = USB_ERR_NOT_STARTED;
|
||||||
if (dev->dev_ue->ue_request != NULL) {
|
if (dev->dev_ue->ue_request != NULL)
|
||||||
pthread_mutex_unlock(&xdev->mtx);
|
|
||||||
err = dev->dev_ue->ue_request(dev->dev_instance, xfer);
|
err = dev->dev_ue->ue_request(dev->dev_instance, xfer);
|
||||||
pthread_mutex_lock(&xdev->mtx);
|
|
||||||
}
|
|
||||||
setup_trb = NULL;
|
setup_trb = NULL;
|
||||||
} else {
|
} else {
|
||||||
/* handle data transfer */
|
/* handle data transfer */
|
||||||
@ -3691,24 +3658,21 @@ pci_xhci_write(struct vmctx *ctx,
|
|||||||
|
|
||||||
xdev = dev->arg;
|
xdev = dev->arg;
|
||||||
|
|
||||||
|
pthread_mutex_lock(&xdev->mtx);
|
||||||
if (offset < XHCI_CAPLEN) /* read only registers */
|
if (offset < XHCI_CAPLEN) /* read only registers */
|
||||||
UPRINTF(LWRN, "write RO-CAPs offset %ld\r\n", offset);
|
UPRINTF(LWRN, "write RO-CAPs offset %ld\r\n", offset);
|
||||||
else if (offset < xdev->dboff) {
|
else if (offset < xdev->dboff)
|
||||||
pthread_mutex_lock(&xdev->mtx);
|
|
||||||
pci_xhci_hostop_write(xdev, offset, value);
|
pci_xhci_hostop_write(xdev, offset, value);
|
||||||
pthread_mutex_unlock(&xdev->mtx);
|
else if (offset < xdev->rtsoff)
|
||||||
} else if (offset < xdev->rtsoff) {
|
pci_xhci_dbregs_write(xdev, offset, value);
|
||||||
pci_xhci_async_enqueue(xdev, offset, value);
|
else if (offset < xdev->rtsend)
|
||||||
} else if (offset < xdev->rtsend) {
|
|
||||||
pthread_mutex_lock(&xdev->mtx);
|
|
||||||
pci_xhci_rtsregs_write(xdev, offset, value);
|
pci_xhci_rtsregs_write(xdev, offset, value);
|
||||||
pthread_mutex_unlock(&xdev->mtx);
|
else if (offset < xdev->regsend)
|
||||||
} else if (offset < xdev->regsend) {
|
|
||||||
pthread_mutex_lock(&xdev->mtx);
|
|
||||||
pci_xhci_excap_write(xdev, offset, value);
|
pci_xhci_excap_write(xdev, offset, value);
|
||||||
pthread_mutex_unlock(&xdev->mtx);
|
else
|
||||||
} else
|
|
||||||
UPRINTF(LWRN, "write invalid offset %ld\r\n", offset);
|
UPRINTF(LWRN, "write invalid offset %ld\r\n", offset);
|
||||||
|
|
||||||
|
pthread_mutex_unlock(&xdev->mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t
|
static uint64_t
|
||||||
@ -3937,29 +3901,25 @@ pci_xhci_read(struct vmctx *ctx,
|
|||||||
uint32_t value;
|
uint32_t value;
|
||||||
|
|
||||||
xdev = dev->arg;
|
xdev = dev->arg;
|
||||||
if (offset < XHCI_CAPLEN) {
|
|
||||||
pthread_mutex_lock(&xdev->mtx);
|
pthread_mutex_lock(&xdev->mtx);
|
||||||
|
if (offset < XHCI_CAPLEN)
|
||||||
value = pci_xhci_hostcap_read(xdev, offset);
|
value = pci_xhci_hostcap_read(xdev, offset);
|
||||||
pthread_mutex_unlock(&xdev->mtx);
|
else if (offset < xdev->dboff)
|
||||||
} else if (offset < xdev->dboff) {
|
|
||||||
pthread_mutex_lock(&xdev->mtx);
|
|
||||||
value = pci_xhci_hostop_read(xdev, offset);
|
value = pci_xhci_hostop_read(xdev, offset);
|
||||||
pthread_mutex_unlock(&xdev->mtx);
|
else if (offset < xdev->rtsoff)
|
||||||
} else if (offset < xdev->rtsoff) {
|
|
||||||
value = pci_xhci_dbregs_read(xdev, offset);
|
value = pci_xhci_dbregs_read(xdev, offset);
|
||||||
} else if (offset < xdev->rtsend) {
|
else if (offset < xdev->rtsend)
|
||||||
pthread_mutex_lock(&xdev->mtx);
|
|
||||||
value = pci_xhci_rtsregs_read(xdev, offset);
|
value = pci_xhci_rtsregs_read(xdev, offset);
|
||||||
pthread_mutex_unlock(&xdev->mtx);
|
else if (offset < xdev->regsend)
|
||||||
} else if (offset < xdev->regsend) {
|
|
||||||
pthread_mutex_lock(&xdev->mtx);
|
|
||||||
value = pci_xhci_excap_read(xdev, offset);
|
value = pci_xhci_excap_read(xdev, offset);
|
||||||
pthread_mutex_unlock(&xdev->mtx);
|
else {
|
||||||
} else {
|
|
||||||
value = 0;
|
value = 0;
|
||||||
UPRINTF(LDBG, "read invalid offset %ld\r\n", offset);
|
UPRINTF(LDBG, "read invalid offset %ld\r\n", offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pthread_mutex_unlock(&xdev->mtx);
|
||||||
|
|
||||||
switch (size) {
|
switch (size) {
|
||||||
case 1:
|
case 1:
|
||||||
value &= 0xFF;
|
value &= 0xFF;
|
||||||
@ -4045,7 +4005,6 @@ pci_xhci_dev_intr(struct usb_hci *hci, int epctx)
|
|||||||
struct xhci_endp_ctx *ep_ctx;
|
struct xhci_endp_ctx *ep_ctx;
|
||||||
int dir_in;
|
int dir_in;
|
||||||
int epid;
|
int epid;
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
dir_in = epctx & 0x80;
|
dir_in = epctx & 0x80;
|
||||||
epid = epctx & ~0x80;
|
epid = epctx & ~0x80;
|
||||||
@ -4059,11 +4018,10 @@ pci_xhci_dev_intr(struct usb_hci *hci, int epctx)
|
|||||||
xdev = dev->xdev;
|
xdev = dev->xdev;
|
||||||
|
|
||||||
/* check if device is ready; OS has to initialise it */
|
/* check if device is ready; OS has to initialise it */
|
||||||
pthread_mutex_lock(&xdev->mtx);
|
|
||||||
if (xdev->rtsregs.erstba_p == NULL ||
|
if (xdev->rtsregs.erstba_p == NULL ||
|
||||||
(xdev->opregs.usbcmd & XHCI_CMD_RS) == 0 ||
|
(xdev->opregs.usbcmd & XHCI_CMD_RS) == 0 ||
|
||||||
dev->dev_ctx == NULL)
|
dev->dev_ctx == NULL)
|
||||||
goto out;
|
return 0;
|
||||||
|
|
||||||
p = XHCI_PORTREG_PTR(xdev, hci->hci_port);
|
p = XHCI_PORTREG_PTR(xdev, hci->hci_port);
|
||||||
|
|
||||||
@ -4072,18 +4030,16 @@ pci_xhci_dev_intr(struct usb_hci *hci, int epctx)
|
|||||||
p->portsc &= ~XHCI_PS_PLS_MASK;
|
p->portsc &= ~XHCI_PS_PLS_MASK;
|
||||||
p->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_RESUME);
|
p->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_RESUME);
|
||||||
if ((p->portsc & XHCI_PS_PLC) != 0)
|
if ((p->portsc & XHCI_PS_PLC) != 0)
|
||||||
goto out;
|
return 0;
|
||||||
|
|
||||||
p->portsc |= XHCI_PS_PLC;
|
p->portsc |= XHCI_PS_PLC;
|
||||||
|
|
||||||
pci_xhci_set_evtrb(&evtrb, hci->hci_port,
|
pci_xhci_set_evtrb(&evtrb, hci->hci_port,
|
||||||
XHCI_TRB_ERROR_SUCCESS,
|
XHCI_TRB_ERROR_SUCCESS,
|
||||||
XHCI_TRB_EVENT_PORT_STS_CHANGE);
|
XHCI_TRB_EVENT_PORT_STS_CHANGE);
|
||||||
|
|
||||||
if (pci_xhci_insert_event(xdev, &evtrb, 0) != 0) {
|
if (pci_xhci_insert_event(xdev, &evtrb, 0) != 0) {
|
||||||
UPRINTF(LFTL, "Failed to inject port status change event!\r\n");
|
UPRINTF(LFTL, "Failed to inject port status change event!\r\n");
|
||||||
ret = -ENAVAIL;
|
return -ENAVAIL;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4092,15 +4048,14 @@ pci_xhci_dev_intr(struct usb_hci *hci, int epctx)
|
|||||||
if ((ep_ctx->dwEpCtx0 & 0x7) == XHCI_ST_EPCTX_DISABLED) {
|
if ((ep_ctx->dwEpCtx0 & 0x7) == XHCI_ST_EPCTX_DISABLED) {
|
||||||
UPRINTF(LWRN, "device interrupt on disabled endpoint %d\r\n",
|
UPRINTF(LWRN, "device interrupt on disabled endpoint %d\r\n",
|
||||||
epid);
|
epid);
|
||||||
goto out;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
UPRINTF(LDBG, "device interrupt on endpoint %d\r\n", epid);
|
UPRINTF(LDBG, "device interrupt on endpoint %d\r\n", epid);
|
||||||
|
|
||||||
pci_xhci_device_doorbell(xdev, hci->hci_port, epid, 0);
|
pci_xhci_device_doorbell(xdev, hci->hci_port, epid, 0);
|
||||||
|
|
||||||
out:
|
return 0;
|
||||||
pthread_mutex_unlock(&xdev->mtx);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -4172,6 +4127,11 @@ pci_xhci_parse_bus_port(struct pci_xhci_vdev *xdev, char *opts)
|
|||||||
goto errout;
|
goto errout;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (bus >= USB_NATIVE_NUM_BUS || port >= USB_NATIVE_NUM_PORT) {
|
||||||
|
rc = -1;
|
||||||
|
goto errout;
|
||||||
|
}
|
||||||
|
|
||||||
if (!usb_native_is_bus_existed(bus) ||
|
if (!usb_native_is_bus_existed(bus) ||
|
||||||
!usb_native_is_port_existed(bus, port)) {
|
!usb_native_is_port_existed(bus, port)) {
|
||||||
rc = -2;
|
rc = -2;
|
||||||
@ -4458,55 +4418,6 @@ pci_xhci_isoc_handler(void *arg, uint64_t param)
|
|||||||
? "under" : "over", pdata->slot, pdata->epnum);
|
? "under" : "over", pdata->slot, pdata->epnum);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
pci_xhci_async_enqueue(struct pci_xhci_vdev *xdev, uint64_t offset, uint64_t value)
|
|
||||||
{
|
|
||||||
struct pci_xhci_async_request_node *request;
|
|
||||||
|
|
||||||
request = malloc(sizeof(struct pci_xhci_async_request_node));
|
|
||||||
if (request == NULL) {
|
|
||||||
UPRINTF(LFTL, "%s: malloc memory fail\r\n", __func__);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
request->offset = offset;
|
|
||||||
request->value = value;
|
|
||||||
|
|
||||||
pthread_mutex_lock(&xdev->async_tmx);
|
|
||||||
if (STAILQ_EMPTY(&xdev->async_head)) {
|
|
||||||
STAILQ_INSERT_HEAD(&xdev->async_head, request, link);
|
|
||||||
} else {
|
|
||||||
STAILQ_INSERT_TAIL(&xdev->async_head, request, link);
|
|
||||||
}
|
|
||||||
pthread_cond_signal(&xdev->async_cond);
|
|
||||||
pthread_mutex_unlock(&xdev->async_tmx);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void *
|
|
||||||
pci_xhci_ansyc_thread(void *data)
|
|
||||||
{
|
|
||||||
struct pci_xhci_vdev *xdev;
|
|
||||||
struct pci_xhci_async_request_node *request;
|
|
||||||
|
|
||||||
xdev = data;
|
|
||||||
pthread_mutex_lock(&xdev->async_tmx);
|
|
||||||
while (xdev->async_transfer || !STAILQ_EMPTY(&xdev->async_head)) {
|
|
||||||
if(STAILQ_EMPTY(&xdev->async_head))
|
|
||||||
pthread_cond_wait(&xdev->async_cond, &xdev->async_tmx);
|
|
||||||
if ((request = STAILQ_FIRST(&xdev->async_head)) == NULL) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
pthread_mutex_unlock(&xdev->async_tmx);
|
|
||||||
pthread_mutex_lock(&xdev->mtx);
|
|
||||||
pci_xhci_dbregs_write(xdev, request->offset, request->value);
|
|
||||||
pthread_mutex_unlock(&xdev->mtx);
|
|
||||||
pthread_mutex_lock(&xdev->async_tmx);
|
|
||||||
STAILQ_REMOVE_HEAD(&xdev->async_head, link);
|
|
||||||
free(request);
|
|
||||||
}
|
|
||||||
pthread_mutex_unlock(&xdev->async_tmx);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
pci_xhci_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
pci_xhci_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
||||||
{
|
{
|
||||||
@ -4637,15 +4548,6 @@ pci_xhci_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
|||||||
if (error)
|
if (error)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
xdev->async_transfer = true;
|
|
||||||
pthread_cond_init(&xdev->async_cond, NULL);
|
|
||||||
pthread_mutex_init(&xdev->async_tmx, NULL);
|
|
||||||
STAILQ_INIT(&xdev->async_head);
|
|
||||||
error = pthread_create(&xdev->async_thread, NULL, pci_xhci_ansyc_thread,
|
|
||||||
(void *)xdev);
|
|
||||||
if (error)
|
|
||||||
goto done;
|
|
||||||
|
|
||||||
xhci_in_use = 1;
|
xhci_in_use = 1;
|
||||||
done:
|
done:
|
||||||
if (error) {
|
if (error) {
|
||||||
@ -4701,14 +4603,6 @@ pci_xhci_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
|||||||
pthread_join(xdev->vbdp_thread, NULL);
|
pthread_join(xdev->vbdp_thread, NULL);
|
||||||
sem_close(&xdev->vbdp_sem);
|
sem_close(&xdev->vbdp_sem);
|
||||||
|
|
||||||
pthread_mutex_lock(&xdev->async_tmx);
|
|
||||||
xdev->async_transfer = false;
|
|
||||||
pthread_cond_signal(&xdev->async_cond);
|
|
||||||
pthread_mutex_unlock(&xdev->async_tmx);
|
|
||||||
pthread_join(xdev->async_thread, NULL);
|
|
||||||
pthread_cond_destroy(&xdev->async_cond);
|
|
||||||
pthread_mutex_destroy(&xdev->async_tmx);
|
|
||||||
|
|
||||||
pthread_mutex_destroy(&xdev->mtx);
|
pthread_mutex_destroy(&xdev->mtx);
|
||||||
free(xdev);
|
free(xdev);
|
||||||
xhci_in_use = 0;
|
xhci_in_use = 0;
|
||||||
|
@ -93,10 +93,6 @@
|
|||||||
#define RTCT_OFFSET 0xF00
|
#define RTCT_OFFSET 0xF00
|
||||||
#define DSDT_OFFSET 0x1100
|
#define DSDT_OFFSET 0x1100
|
||||||
|
|
||||||
/* Define the byte offset and byte length in FACS table */
|
|
||||||
#define WAKING_VECTOR_OFFSET 12
|
|
||||||
#define WAKING_VECTOR_LEN 4
|
|
||||||
|
|
||||||
#define ASL_TEMPLATE "dm.XXXXXXX"
|
#define ASL_TEMPLATE "dm.XXXXXXX"
|
||||||
#define ASL_SUFFIX ".aml"
|
#define ASL_SUFFIX ".aml"
|
||||||
|
|
||||||
@ -883,7 +879,6 @@ basl_fwrite_dsdt(FILE *fp, struct vmctx *ctx)
|
|||||||
|
|
||||||
acpi_dev_write_dsdt(ctx);
|
acpi_dev_write_dsdt(ctx);
|
||||||
|
|
||||||
osc_write_ospm_dsdt(ctx, basl_ncpu);
|
|
||||||
pm_write_dsdt(ctx, basl_ncpu);
|
pm_write_dsdt(ctx, basl_ncpu);
|
||||||
|
|
||||||
dsdt_line("}");
|
dsdt_line("}");
|
||||||
@ -1117,18 +1112,6 @@ get_acpi_table_length(void)
|
|||||||
return ACPI_LENGTH;
|
return ACPI_LENGTH;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t
|
|
||||||
get_acpi_wakingvector_offset(void)
|
|
||||||
{
|
|
||||||
return basl_acpi_base + FACS_OFFSET + WAKING_VECTOR_OFFSET;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t
|
|
||||||
get_acpi_wakingvector_length(void)
|
|
||||||
{
|
|
||||||
return WAKING_VECTOR_LEN;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
int
|
||||||
get_default_iasl_compiler(void)
|
get_default_iasl_compiler(void)
|
||||||
{
|
{
|
||||||
|
@ -22,15 +22,15 @@ static inline int get_vcpu_pm_info(struct vmctx *ctx, int vcpu_id,
|
|||||||
return vm_get_cpu_state(ctx, pm_info);
|
return vm_get_cpu_state(ctx, pm_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int get_vcpu_px_cnt(struct vmctx *ctx, int vcpu_id, uint8_t *px_cnt)
|
static inline uint8_t get_vcpu_px_cnt(struct vmctx *ctx, int vcpu_id)
|
||||||
{
|
{
|
||||||
uint64_t px_cnt_u64;
|
uint64_t px_cnt;
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = get_vcpu_pm_info(ctx, vcpu_id, ACRN_PMCMD_GET_PX_CNT, &px_cnt_u64);
|
if (get_vcpu_pm_info(ctx, vcpu_id, ACRN_PMCMD_GET_PX_CNT, &px_cnt)) {
|
||||||
*px_cnt = (uint8_t)px_cnt_u64;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return (uint8_t)px_cnt;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t get_vcpu_cx_cnt(struct vmctx *ctx, int vcpu_id)
|
uint8_t get_vcpu_cx_cnt(struct vmctx *ctx, int vcpu_id)
|
||||||
@ -259,13 +259,9 @@ static int dsdt_write_pss(struct vmctx *ctx, int vcpu_id)
|
|||||||
uint8_t vcpu_px_cnt;
|
uint8_t vcpu_px_cnt;
|
||||||
int i;
|
int i;
|
||||||
struct acrn_pstate_data *vcpu_px_data;
|
struct acrn_pstate_data *vcpu_px_data;
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = get_vcpu_px_cnt(ctx, vcpu_id, &vcpu_px_cnt);
|
vcpu_px_cnt = get_vcpu_px_cnt(ctx, vcpu_id);
|
||||||
/* vcpu_px_cnt = 0 Indicates vcpu supports continuous pstate.
|
if (!vcpu_px_cnt) {
|
||||||
* Then we should write _CPC instate of _PSS
|
|
||||||
*/
|
|
||||||
if (ret || !vcpu_px_cnt) {
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -320,49 +316,10 @@ static int dsdt_write_pss(struct vmctx *ctx, int vcpu_id)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* _CPC: Continuous Performance Control
|
|
||||||
* Hard code a V3 CPC table, describing HWP register interface.
|
|
||||||
*/
|
|
||||||
static void dsdt_write_cpc(void)
|
|
||||||
{
|
|
||||||
dsdt_line("");
|
|
||||||
dsdt_line(" Method (_CPC, 0, NotSerialized)");
|
|
||||||
dsdt_line(" {");
|
|
||||||
dsdt_line(" Return (Package (0x17)");
|
|
||||||
dsdt_line(" {");
|
|
||||||
dsdt_line(" 0x17,");
|
|
||||||
dsdt_line(" 0x03,");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(FFixedHW, 0x08, 0x00, 0x0000000000000771, 0x04, )},");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(FFixedHW, 0x08, 0x08, 0x00000000000000CE, 0x04, )},");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(FFixedHW, 0x08, 0x10, 0x0000000000000771, 0x04, )},");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(FFixedHW, 0x08, 0x18, 0x0000000000000771, 0x04, )},");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(FFixedHW, 0x08, 0x08, 0x0000000000000771, 0x04, )},");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(FFixedHW, 0x08, 0x10, 0x0000000000000774, 0x04, )},");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(FFixedHW, 0x08, 0x00, 0x0000000000000774, 0x04, )},");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(FFixedHW, 0x08, 0x08, 0x0000000000000774, 0x04, )},");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(SystemMemory, 0x00, 0x00, 0x0000000000000000, , )},");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(SystemMemory, 0x00, 0x00, 0x0000000000000000, , )},");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(SystemMemory, 0x00, 0x00, 0x0000000000000000, , )},");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(FFixedHW, 0x40, 0x00, 0x00000000000000E7, 0x04, )},");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(FFixedHW, 0x40, 0x00, 0x00000000000000E8, 0x04, )},");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(FFixedHW, 0x02, 0x01, 0x0000000000000777, 0x04, )},");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(FFixedHW, 0x01, 0x00, 0x0000000000000770, 0x04, )},");
|
|
||||||
dsdt_line(" One,");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(FFixedHW, 0x0A, 0x20, 0x0000000000000774, 0x04, )},");
|
|
||||||
dsdt_line(" ResourceTemplate() {Register(FFixedHW, 0x08, 0x18, 0x0000000000000774, 0x04, )},");
|
|
||||||
dsdt_line(" Zero,");
|
|
||||||
dsdt_line(" Zero,");
|
|
||||||
dsdt_line(" Zero");
|
|
||||||
dsdt_line(" })");
|
|
||||||
dsdt_line(" }");
|
|
||||||
}
|
|
||||||
|
|
||||||
void pm_write_dsdt(struct vmctx *ctx, int ncpu)
|
void pm_write_dsdt(struct vmctx *ctx, int ncpu)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int ret;
|
int ret;
|
||||||
bool is_cpc = false;
|
|
||||||
uint8_t px_cnt;
|
|
||||||
|
|
||||||
/* Scope (_PR) */
|
/* Scope (_PR) */
|
||||||
dsdt_line("");
|
dsdt_line("");
|
||||||
@ -407,85 +364,6 @@ void pm_write_dsdt(struct vmctx *ctx, int ncpu)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = get_vcpu_px_cnt(ctx, i, &px_cnt);
|
|
||||||
if (ret == 0 && px_cnt == 0) {
|
|
||||||
/* px_cnt = 0 Indicates vcpu supports continuous pstate.
|
|
||||||
* Then we can write _CPC
|
|
||||||
*/
|
|
||||||
is_cpc = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (is_cpc) {
|
|
||||||
if (i == 0) {
|
|
||||||
dsdt_write_cpc();
|
|
||||||
} else {
|
|
||||||
dsdt_line(" Method (_CPC, 0, NotSerialized)");
|
|
||||||
dsdt_line(" {");
|
|
||||||
dsdt_line(" Return (^^PR00._CPC)");
|
|
||||||
dsdt_line(" }");
|
|
||||||
dsdt_line("");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dsdt_line(" }");
|
dsdt_line(" }");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* _OSC: Operating System Capabilities
|
|
||||||
* Currently only support CPPC v2 capability.
|
|
||||||
* CPPC v2 capability: revision 2 of the _CPC object.
|
|
||||||
* If all vcpus don't support _CPC object, no need to add _OSC in DSDT.
|
|
||||||
*/
|
|
||||||
void osc_write_ospm_dsdt(struct vmctx *ctx, int ncpu)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
bool support_cpc = false;
|
|
||||||
uint8_t px_cnt;
|
|
||||||
|
|
||||||
/* check px_cnt on vBSP */
|
|
||||||
ret = get_vcpu_px_cnt(ctx, 0, &px_cnt);
|
|
||||||
if (ret == 0 && px_cnt == 0) {
|
|
||||||
/* px_cnt = 0 Indicates vcpu supports continuous pstate.
|
|
||||||
*/
|
|
||||||
support_cpc = true;
|
|
||||||
}
|
|
||||||
if (support_cpc) {
|
|
||||||
/* Scope (_SB._OSC) */
|
|
||||||
dsdt_line("");
|
|
||||||
dsdt_line(" Scope (_SB)");
|
|
||||||
dsdt_line(" {");
|
|
||||||
dsdt_line(" Method (_OSC, 4, NotSerialized) // _OSC: Operating System Capabilities");
|
|
||||||
dsdt_line(" {");
|
|
||||||
dsdt_line(" CreateDWordField (Arg3, 0x00, STS0)");
|
|
||||||
dsdt_line(" CreateDWordField (Arg3, 0x04, CAP0)");
|
|
||||||
dsdt_line(" If ((Arg0 == ToUUID (\"0811b06e-4a27-44f9-8d60-3cbbc22e7b48\") /* Platform-wide OSPM Capabilities */))");
|
|
||||||
dsdt_line(" {");
|
|
||||||
dsdt_line(" If ((Arg1 == One))");
|
|
||||||
dsdt_line(" {");
|
|
||||||
dsdt_line(" If ((CAP0 & 0x40))");
|
|
||||||
dsdt_line(" {");
|
|
||||||
dsdt_line(" CAP0 &= 0x00000040");
|
|
||||||
dsdt_line(" }");
|
|
||||||
dsdt_line(" Else");
|
|
||||||
dsdt_line(" {");
|
|
||||||
dsdt_line(" STS0 &= 0xFFFFFF00");
|
|
||||||
dsdt_line(" STS0 |= 0x02");
|
|
||||||
dsdt_line(" }");
|
|
||||||
dsdt_line(" }");
|
|
||||||
dsdt_line(" Else");
|
|
||||||
dsdt_line(" {");
|
|
||||||
dsdt_line(" STS0 &= 0xFFFFFF00");
|
|
||||||
dsdt_line(" STS0 |= 0x0A");
|
|
||||||
dsdt_line(" }");
|
|
||||||
dsdt_line(" }");
|
|
||||||
dsdt_line(" Else");
|
|
||||||
dsdt_line(" {");
|
|
||||||
dsdt_line(" STS0 &= 0xFFFFFF00");
|
|
||||||
dsdt_line(" STS0 |= 0x06");
|
|
||||||
dsdt_line(" }");
|
|
||||||
dsdt_line(" Return (Arg3)");
|
|
||||||
dsdt_line(" }");
|
|
||||||
dsdt_line(" }");
|
|
||||||
dsdt_line("");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -202,7 +202,7 @@ vhpet_counter(struct vhpet *vhpet, struct timespec *nowptr)
|
|||||||
val = vhpet->countbase;
|
val = vhpet->countbase;
|
||||||
|
|
||||||
if (vhpet_counter_enabled(vhpet)) {
|
if (vhpet_counter_enabled(vhpet)) {
|
||||||
if (clock_gettime(CLOCK_MONOTONIC, &now))
|
if (clock_gettime(CLOCK_REALTIME, &now))
|
||||||
pr_dbg("clock_gettime returned: %s", strerror(errno));
|
pr_dbg("clock_gettime returned: %s", strerror(errno));
|
||||||
|
|
||||||
/* delta = now - countbase_ts */
|
/* delta = now - countbase_ts */
|
||||||
@ -225,7 +225,7 @@ vhpet_counter(struct vhpet *vhpet, struct timespec *nowptr)
|
|||||||
*/
|
*/
|
||||||
if (nowptr) {
|
if (nowptr) {
|
||||||
pr_warn("vhpet unexpected nowptr");
|
pr_warn("vhpet unexpected nowptr");
|
||||||
if (clock_gettime(CLOCK_MONOTONIC, nowptr))
|
if (clock_gettime(CLOCK_REALTIME, nowptr))
|
||||||
pr_dbg("clock_gettime returned: %s", strerror(errno));
|
pr_dbg("clock_gettime returned: %s", strerror(errno));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -366,7 +366,7 @@ vhpet_timer_handler(void *a, uint64_t nexp)
|
|||||||
|
|
||||||
vhpet_timer_interrupt(vhpet, n);
|
vhpet_timer_interrupt(vhpet, n);
|
||||||
|
|
||||||
if (clock_gettime(CLOCK_MONOTONIC, &now))
|
if (clock_gettime(CLOCK_REALTIME, &now))
|
||||||
pr_dbg("clock_gettime returned: %s", strerror(errno));
|
pr_dbg("clock_gettime returned: %s", strerror(errno));
|
||||||
|
|
||||||
if (acrn_timer_gettime(vhpet_tmr(vhpet, n), &tmrts))
|
if (acrn_timer_gettime(vhpet_tmr(vhpet, n), &tmrts))
|
||||||
@ -548,7 +548,7 @@ vhpet_start_counting(struct vhpet *vhpet)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (clock_gettime(CLOCK_MONOTONIC, &vhpet->countbase_ts))
|
if (clock_gettime(CLOCK_REALTIME, &vhpet->countbase_ts))
|
||||||
pr_dbg("clock_gettime returned: %s", strerror(errno));
|
pr_dbg("clock_gettime returned: %s", strerror(errno));
|
||||||
|
|
||||||
/* Restart the timers based on the main counter base value */
|
/* Restart the timers based on the main counter base value */
|
||||||
@ -639,7 +639,7 @@ vhpet_timer_update_config(struct vhpet *vhpet, int n, uint64_t data,
|
|||||||
* - Timer remains in periodic mode
|
* - Timer remains in periodic mode
|
||||||
*/
|
*/
|
||||||
if (!vhpet_timer_enabled(vhpet, n)) {
|
if (!vhpet_timer_enabled(vhpet, n)) {
|
||||||
if (clock_gettime(CLOCK_MONOTONIC, &now))
|
if (clock_gettime(CLOCK_REALTIME, &now))
|
||||||
pr_dbg("clock_gettime returned: %s", strerror(errno));
|
pr_dbg("clock_gettime returned: %s", strerror(errno));
|
||||||
vhpet_stop_timer(vhpet, n, &now, true);
|
vhpet_stop_timer(vhpet, n, &now, true);
|
||||||
} else if (!(oldval & (HPET_TCNF_TYPE | HPET_TCNF_INT_ENB)) ||
|
} else if (!(oldval & (HPET_TCNF_TYPE | HPET_TCNF_INT_ENB)) ||
|
||||||
@ -998,7 +998,7 @@ vhpet_init(struct vmctx *ctx)
|
|||||||
arg->timer_num = i;
|
arg->timer_num = i;
|
||||||
|
|
||||||
tmr = &vhpet->timer[i].tmrlst[j].t;
|
tmr = &vhpet->timer[i].tmrlst[j].t;
|
||||||
tmr->clockid = CLOCK_MONOTONIC;
|
tmr->clockid = CLOCK_REALTIME;
|
||||||
error = acrn_timer_init(tmr, vhpet_timer_handler, arg);
|
error = acrn_timer_init(tmr, vhpet_timer_handler, arg);
|
||||||
|
|
||||||
if (error) {
|
if (error) {
|
||||||
|
@ -107,7 +107,7 @@ ticks_elapsed_since(const struct timespec *since)
|
|||||||
{
|
{
|
||||||
struct timespec ts;
|
struct timespec ts;
|
||||||
|
|
||||||
if (clock_gettime(CLOCK_MONOTONIC, &ts))
|
if (clock_gettime(CLOCK_REALTIME, &ts))
|
||||||
pr_dbg("clock_gettime returned: %s", strerror(errno));
|
pr_dbg("clock_gettime returned: %s", strerror(errno));
|
||||||
|
|
||||||
if (timespeccmp(&ts, since, <=))
|
if (timespeccmp(&ts, since, <=))
|
||||||
@ -192,7 +192,7 @@ pit_load_ce(struct channel *c)
|
|||||||
c->nullcnt = false;
|
c->nullcnt = false;
|
||||||
c->crbyte = 0;
|
c->crbyte = 0;
|
||||||
|
|
||||||
if (clock_gettime(CLOCK_MONOTONIC, &c->start_ts))
|
if (clock_gettime(CLOCK_REALTIME, &c->start_ts))
|
||||||
pr_dbg("clock_gettime returned: %s", strerror(errno));
|
pr_dbg("clock_gettime returned: %s", strerror(errno));
|
||||||
|
|
||||||
if (c->initial == 0 || c->initial > 0x10000) {
|
if (c->initial == 0 || c->initial > 0x10000) {
|
||||||
@ -330,7 +330,7 @@ pit_timer_start_cntr0(struct vpit *vpit)
|
|||||||
sigevt.sigev_notify = SIGEV_THREAD;
|
sigevt.sigev_notify = SIGEV_THREAD;
|
||||||
sigevt.sigev_notify_function = vpit_timer_handler;
|
sigevt.sigev_notify_function = vpit_timer_handler;
|
||||||
|
|
||||||
if (timer_create(CLOCK_MONOTONIC, &sigevt, &c->timer_id))
|
if (timer_create(CLOCK_REALTIME, &sigevt, &c->timer_id))
|
||||||
pr_dbg("timer_create returned: %s", strerror(errno));
|
pr_dbg("timer_create returned: %s", strerror(errno));
|
||||||
|
|
||||||
vpit_timer_arg[c->timer_idx].active = true;
|
vpit_timer_arg[c->timer_idx].active = true;
|
||||||
@ -360,7 +360,7 @@ pit_update_counter(struct vpit *vpit, struct channel *c, bool latch,
|
|||||||
|
|
||||||
c->initial = PIT_HZ_TO_TICKS(100);
|
c->initial = PIT_HZ_TO_TICKS(100);
|
||||||
delta_ticks = 0;
|
delta_ticks = 0;
|
||||||
if (clock_gettime(CLOCK_MONOTONIC, &c->start_ts))
|
if (clock_gettime(CLOCK_REALTIME, &c->start_ts))
|
||||||
pr_dbg("clock_gettime returned: %s", strerror(errno));
|
pr_dbg("clock_gettime returned: %s", strerror(errno));
|
||||||
} else
|
} else
|
||||||
delta_ticks = ticks_elapsed_since(&c->start_ts);
|
delta_ticks = ticks_elapsed_since(&c->start_ts);
|
||||||
|
@ -41,7 +41,6 @@
|
|||||||
#include "timer.h"
|
#include "timer.h"
|
||||||
#include "acpi.h"
|
#include "acpi.h"
|
||||||
#include "lpc.h"
|
#include "lpc.h"
|
||||||
#include "vm_event.h"
|
|
||||||
|
|
||||||
#include "log.h"
|
#include "log.h"
|
||||||
|
|
||||||
@ -81,7 +80,6 @@ struct vrtc {
|
|||||||
u_int addr; /* RTC register to read or write */
|
u_int addr; /* RTC register to read or write */
|
||||||
time_t base_uptime;
|
time_t base_uptime;
|
||||||
time_t base_rtctime;
|
time_t base_rtctime;
|
||||||
time_t halted_rtctime;
|
|
||||||
struct rtcdev rtcdev;
|
struct rtcdev rtcdev;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -222,17 +220,6 @@ update_enabled(struct vrtc *vrtc)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* monotonic time is number of seconds that the system has been running
|
|
||||||
* since it was booted. It is none setable. It is more suitable to be used
|
|
||||||
* as base time.
|
|
||||||
*/
|
|
||||||
static time_t monotonic_time(void)
|
|
||||||
{
|
|
||||||
struct timespec ts;
|
|
||||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
|
||||||
return ts.tv_sec;
|
|
||||||
}
|
|
||||||
|
|
||||||
static time_t
|
static time_t
|
||||||
vrtc_curtime(struct vrtc *vrtc, time_t *basetime)
|
vrtc_curtime(struct vrtc *vrtc, time_t *basetime)
|
||||||
{
|
{
|
||||||
@ -242,7 +229,7 @@ vrtc_curtime(struct vrtc *vrtc, time_t *basetime)
|
|||||||
t = vrtc->base_rtctime;
|
t = vrtc->base_rtctime;
|
||||||
*basetime = vrtc->base_uptime;
|
*basetime = vrtc->base_uptime;
|
||||||
if (update_enabled(vrtc)) {
|
if (update_enabled(vrtc)) {
|
||||||
now = monotonic_time();
|
now = time(NULL);
|
||||||
delta = now - vrtc->base_uptime;
|
delta = now - vrtc->base_uptime;
|
||||||
secs = delta;
|
secs = delta;
|
||||||
t += secs;
|
t += secs;
|
||||||
@ -727,18 +714,6 @@ vrtc_set_reg_c(struct vrtc *vrtc, uint8_t newval)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
send_rtc_chg_event(time_t newtime, time_t lasttime)
|
|
||||||
{
|
|
||||||
struct vm_event event;
|
|
||||||
struct rtc_change_event_data *data = (struct rtc_change_event_data *)event.event_data;
|
|
||||||
|
|
||||||
event.type = VM_EVENT_RTC_CHG;
|
|
||||||
data->delta_time = newtime - lasttime;
|
|
||||||
data->last_time = lasttime;
|
|
||||||
dm_send_vm_event(&event);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
vrtc_set_reg_b(struct vrtc *vrtc, uint8_t newval)
|
vrtc_set_reg_b(struct vrtc *vrtc, uint8_t newval)
|
||||||
{
|
{
|
||||||
@ -761,23 +736,16 @@ vrtc_set_reg_b(struct vrtc *vrtc, uint8_t newval)
|
|||||||
if (changed & RTCSB_HALT) {
|
if (changed & RTCSB_HALT) {
|
||||||
if ((newval & RTCSB_HALT) == 0) {
|
if ((newval & RTCSB_HALT) == 0) {
|
||||||
rtctime = rtc_to_secs(vrtc);
|
rtctime = rtc_to_secs(vrtc);
|
||||||
basetime = monotonic_time();
|
basetime = time(NULL);
|
||||||
if (rtctime == VRTC_BROKEN_TIME) {
|
if (rtctime == VRTC_BROKEN_TIME) {
|
||||||
if (rtc_flag_broken_time)
|
if (rtc_flag_broken_time)
|
||||||
return -1;
|
return -1;
|
||||||
} else {
|
|
||||||
/* send rtc change event if rtc time changed during halt */
|
|
||||||
if (vrtc->halted_rtctime != VRTC_BROKEN_TIME &&
|
|
||||||
rtctime != vrtc->halted_rtctime) {
|
|
||||||
send_rtc_chg_event(rtctime, vrtc->halted_rtctime);
|
|
||||||
vrtc->halted_rtctime = VRTC_BROKEN_TIME;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
curtime = vrtc_curtime(vrtc, &basetime);
|
curtime = vrtc_curtime(vrtc, &basetime);
|
||||||
if (curtime != vrtc->base_rtctime)
|
if (curtime != vrtc->base_rtctime)
|
||||||
return -1;
|
return -1;
|
||||||
vrtc->halted_rtctime = curtime;
|
|
||||||
/*
|
/*
|
||||||
* Force a refresh of the RTC date/time fields so
|
* Force a refresh of the RTC date/time fields so
|
||||||
* they reflect the time right before the guest set
|
* they reflect the time right before the guest set
|
||||||
@ -842,7 +810,7 @@ vrtc_set_reg_a(struct vrtc *vrtc, uint8_t newval)
|
|||||||
* maintain the illusion that the RTC date/time was frozen
|
* maintain the illusion that the RTC date/time was frozen
|
||||||
* while the dividers were disabled.
|
* while the dividers were disabled.
|
||||||
*/
|
*/
|
||||||
vrtc->base_uptime = monotonic_time();
|
vrtc->base_uptime = time(NULL);
|
||||||
RTC_DEBUG("RTC divider out of reset at %#lx/%#lx\n",
|
RTC_DEBUG("RTC divider out of reset at %#lx/%#lx\n",
|
||||||
vrtc->base_rtctime, vrtc->base_uptime);
|
vrtc->base_rtctime, vrtc->base_uptime);
|
||||||
} else {
|
} else {
|
||||||
@ -914,12 +882,6 @@ vrtc_addr_handler(struct vmctx *ctx, int vcpu, int in, int port,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool vrtc_is_time_register(uint32_t offset)
|
|
||||||
{
|
|
||||||
return ((offset == RTC_SEC) || (offset == RTC_MIN) || (offset == RTC_HRS) || (offset == RTC_DAY)
|
|
||||||
|| (offset == RTC_MONTH) || (offset == RTC_YEAR) || (offset == RTC_CENTURY));
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
int
|
||||||
vrtc_data_handler(struct vmctx *ctx, int vcpu, int in, int port,
|
vrtc_data_handler(struct vmctx *ctx, int vcpu, int in, int port,
|
||||||
int bytes, uint32_t *eax, void *arg)
|
int bytes, uint32_t *eax, void *arg)
|
||||||
@ -998,23 +960,14 @@ vrtc_data_handler(struct vmctx *ctx, int vcpu, int in, int port,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some guests (e.g. OpenBSD) write the century byte outside of RTCSB_HALT,
|
* XXX some guests (e.g. OpenBSD) write the century byte
|
||||||
* and some guests (e.g. WaaG) write all date/time outside of RTCSB_HALT,
|
* outside of RTCSB_HALT so re-calculate the RTC date/time.
|
||||||
* so re-calculate the RTC date/time.
|
|
||||||
*/
|
*/
|
||||||
if (vrtc_is_time_register(offset) && !rtc_halted(vrtc)) {
|
if (offset == RTC_CENTURY && !rtc_halted(vrtc)) {
|
||||||
time_t last_time = curtime;
|
|
||||||
curtime = rtc_to_secs(vrtc);
|
curtime = rtc_to_secs(vrtc);
|
||||||
error = vrtc_time_update(vrtc, curtime, monotonic_time());
|
error = vrtc_time_update(vrtc, curtime, time(NULL));
|
||||||
if ((error != 0) || (curtime == VRTC_BROKEN_TIME && rtc_flag_broken_time)) {
|
if ((error != 0) || (curtime == VRTC_BROKEN_TIME && rtc_flag_broken_time))
|
||||||
error = -1;
|
error = -1;
|
||||||
} else {
|
|
||||||
/* We don't know when the Guest has finished the RTC change action.
|
|
||||||
* So send an event each time the date/time regs has been updated.
|
|
||||||
* The event handler will process those events.
|
|
||||||
*/
|
|
||||||
send_rtc_chg_event(rtc_to_secs(vrtc), last_time);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1029,7 +982,7 @@ vrtc_set_time(struct vrtc *vrtc, time_t secs)
|
|||||||
int error;
|
int error;
|
||||||
|
|
||||||
pthread_mutex_lock(&vrtc->mtx);
|
pthread_mutex_lock(&vrtc->mtx);
|
||||||
error = vrtc_time_update(vrtc, secs, monotonic_time());
|
error = vrtc_time_update(vrtc, secs, time(NULL));
|
||||||
pthread_mutex_unlock(&vrtc->mtx);
|
pthread_mutex_unlock(&vrtc->mtx);
|
||||||
|
|
||||||
if (error)
|
if (error)
|
||||||
@ -1040,17 +993,6 @@ vrtc_set_time(struct vrtc *vrtc, time_t secs)
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* set CMOS shutdown status register (index 0xF) as S3_resume(0xFE)
|
|
||||||
* BIOS will read it and start S3 resume at POST Entry
|
|
||||||
*/
|
|
||||||
void vrtc_suspend(struct vmctx *ctx)
|
|
||||||
{
|
|
||||||
struct vrtc *vrtc = ctx->vrtc;
|
|
||||||
struct rtcdev *rtc = &vrtc->rtcdev;
|
|
||||||
|
|
||||||
*((uint8_t *)rtc + 0xF) = 0xFE;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
int
|
||||||
vrtc_init(struct vmctx *ctx)
|
vrtc_init(struct vmctx *ctx)
|
||||||
{
|
{
|
||||||
@ -1145,7 +1087,6 @@ vrtc_init(struct vmctx *ctx)
|
|||||||
/* Reset the index register to a safe value. */
|
/* Reset the index register to a safe value. */
|
||||||
vrtc->addr = RTC_STATUSD;
|
vrtc->addr = RTC_STATUSD;
|
||||||
|
|
||||||
vrtc->halted_rtctime = VRTC_BROKEN_TIME;
|
|
||||||
/*
|
/*
|
||||||
* Initialize RTC time to 00:00:00 Jan 1, 1970 if curtime = 0
|
* Initialize RTC time to 00:00:00 Jan 1, 1970 if curtime = 0
|
||||||
*/
|
*/
|
||||||
@ -1154,16 +1095,16 @@ vrtc_init(struct vmctx *ctx)
|
|||||||
|
|
||||||
pthread_mutex_lock(&vrtc->mtx);
|
pthread_mutex_lock(&vrtc->mtx);
|
||||||
vrtc->base_rtctime = VRTC_BROKEN_TIME;
|
vrtc->base_rtctime = VRTC_BROKEN_TIME;
|
||||||
vrtc_time_update(vrtc, curtime, monotonic_time());
|
vrtc_time_update(vrtc, curtime, time(NULL));
|
||||||
secs_to_rtc(curtime, vrtc, 0);
|
secs_to_rtc(curtime, vrtc, 0);
|
||||||
pthread_mutex_unlock(&vrtc->mtx);
|
pthread_mutex_unlock(&vrtc->mtx);
|
||||||
|
|
||||||
/* init periodic interrupt timer */
|
/* init periodic interrupt timer */
|
||||||
vrtc->periodic_timer.clockid = CLOCK_MONOTONIC;
|
vrtc->periodic_timer.clockid = CLOCK_REALTIME;
|
||||||
acrn_timer_init(&vrtc->periodic_timer, vrtc_periodic_timer, vrtc);
|
acrn_timer_init(&vrtc->periodic_timer, vrtc_periodic_timer, vrtc);
|
||||||
|
|
||||||
/* init update interrupt timer(1s)*/
|
/* init update interrupt timer(1s)*/
|
||||||
vrtc->update_timer.clockid = CLOCK_MONOTONIC;
|
vrtc->update_timer.clockid = CLOCK_REALTIME;
|
||||||
acrn_timer_init(&vrtc->update_timer, vrtc_update_timer, vrtc);
|
acrn_timer_init(&vrtc->update_timer, vrtc_update_timer, vrtc);
|
||||||
vrtc_start_timer(&vrtc->update_timer, 1, 0);
|
vrtc_start_timer(&vrtc->update_timer, 1, 0);
|
||||||
|
|
||||||
|
@ -29,7 +29,6 @@
|
|||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <signal.h>
|
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <termios.h>
|
#include <termios.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
@ -53,12 +52,6 @@
|
|||||||
#define COM1_IRQ 4
|
#define COM1_IRQ 4
|
||||||
#define COM2_BASE 0x2F8
|
#define COM2_BASE 0x2F8
|
||||||
#define COM2_IRQ 3
|
#define COM2_IRQ 3
|
||||||
#define COM3_BASE 0x3E8
|
|
||||||
#define COM3_IRQ 6
|
|
||||||
#define COM4_BASE 0x2E8
|
|
||||||
#define COM4_IRQ 7
|
|
||||||
#define COM5_BASE 0x9000 /*for S5 connection*/
|
|
||||||
#define COM5_IRQ 10
|
|
||||||
|
|
||||||
#define DEFAULT_RCLK 1843200
|
#define DEFAULT_RCLK 1843200
|
||||||
#define DEFAULT_BAUD 9600
|
#define DEFAULT_BAUD 9600
|
||||||
@ -90,13 +83,8 @@ static struct {
|
|||||||
} uart_lres[] = {
|
} uart_lres[] = {
|
||||||
{ COM1_BASE, COM1_IRQ, false},
|
{ COM1_BASE, COM1_IRQ, false},
|
||||||
{ COM2_BASE, COM2_IRQ, false},
|
{ COM2_BASE, COM2_IRQ, false},
|
||||||
{ COM3_BASE, COM3_IRQ, false},
|
|
||||||
{ COM4_BASE, COM4_IRQ, false},
|
|
||||||
{ COM5_BASE, COM5_IRQ, false},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool stdio_ctrl_a_pressed = false;
|
|
||||||
|
|
||||||
#define UART_NLDEVS (ARRAY_SIZE(uart_lres))
|
#define UART_NLDEVS (ARRAY_SIZE(uart_lres))
|
||||||
|
|
||||||
enum uart_be_type {
|
enum uart_be_type {
|
||||||
@ -630,18 +618,6 @@ uart_legacy_alloc(int which, int *baseaddr, int *irq)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
|
||||||
uart_legacy_reinit_res(int which, int baseaddr, int irq)
|
|
||||||
{
|
|
||||||
if (which < 0 || which >= UART_NLDEVS || uart_lres[which].inuse)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
uart_lres[which].baseaddr = baseaddr;
|
|
||||||
uart_lres[which].irq = irq;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
uart_legacy_dealloc(int which)
|
uart_legacy_dealloc(int which)
|
||||||
{
|
{
|
||||||
@ -720,18 +696,6 @@ uart_backend_read(struct uart_backend *be)
|
|||||||
|
|
||||||
switch (be->be_type) {
|
switch (be->be_type) {
|
||||||
case UART_BE_STDIO:
|
case UART_BE_STDIO:
|
||||||
rc = read(be->fd, &rb, 1);
|
|
||||||
if (rb == 0x01) { // Ctrl-a
|
|
||||||
DPRINTF(("%s: Got Ctrl-a\n", __func__));
|
|
||||||
stdio_ctrl_a_pressed = true;
|
|
||||||
} else if (stdio_ctrl_a_pressed) {
|
|
||||||
if (rb == 'x') {
|
|
||||||
DPRINTF(("%s: Got Ctrl-a x\n", __func__));
|
|
||||||
kill(getpid(), SIGINT);
|
|
||||||
}
|
|
||||||
stdio_ctrl_a_pressed = false;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case UART_BE_TTY:
|
case UART_BE_TTY:
|
||||||
/* fd is used to read */
|
/* fd is used to read */
|
||||||
rc = read(be->fd, &rb, 1);
|
rc = read(be->fd, &rb, 1);
|
||||||
|
@ -99,8 +99,6 @@ struct acpi_madt_local_apic {
|
|||||||
void acpi_table_enable(int num);
|
void acpi_table_enable(int num);
|
||||||
uint32_t get_acpi_base(void);
|
uint32_t get_acpi_base(void);
|
||||||
uint32_t get_acpi_table_length(void);
|
uint32_t get_acpi_table_length(void);
|
||||||
uint32_t get_acpi_wakingvector_offset(void);
|
|
||||||
uint32_t get_acpi_wakingvector_length(void);
|
|
||||||
|
|
||||||
struct vmctx;
|
struct vmctx;
|
||||||
|
|
||||||
@ -127,6 +125,4 @@ int acrn_parse_iasl(char *arg);
|
|||||||
int get_iasl_compiler(void);
|
int get_iasl_compiler(void);
|
||||||
int check_iasl_version(void);
|
int check_iasl_version(void);
|
||||||
|
|
||||||
void osc_write_ospm_dsdt(struct vmctx *ctx, int ncpu);
|
|
||||||
|
|
||||||
#endif /* _ACPI_H_ */
|
#endif /* _ACPI_H_ */
|
||||||
|
@ -39,68 +39,19 @@
|
|||||||
#include <sys/uio.h>
|
#include <sys/uio.h>
|
||||||
#include <sys/unistd.h>
|
#include <sys/unistd.h>
|
||||||
|
|
||||||
#include "iothread.h"
|
|
||||||
|
|
||||||
#define BLOCKIF_IOV_MAX 256 /* not practical to be IOV_MAX */
|
#define BLOCKIF_IOV_MAX 256 /* not practical to be IOV_MAX */
|
||||||
|
|
||||||
/*
|
|
||||||
* |<------------------------------------- bounced_size --------------------------------->|
|
|
||||||
* |<-------- alignment ------->| |<-------- alignment ------->|
|
|
||||||
* |<--- head --->|<------------------------ org_size ---------------------->|<-- tail -->|
|
|
||||||
* | | | | | |
|
|
||||||
* *--------------$-------------*----------- ... ------------*---------------$------------*
|
|
||||||
* | | | | | |
|
|
||||||
* | start end |
|
|
||||||
* aligned_dn_start aligned_dn_end
|
|
||||||
* |__________head_area_________| |__________tail_area_________|
|
|
||||||
* |<--- head --->| | |<-- end_rmd -->|<-- tail -->|
|
|
||||||
* |<-------- alignment ------->| |<-------- alignment ------->|
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
struct br_align_info {
|
|
||||||
uint32_t alignment;
|
|
||||||
|
|
||||||
bool is_iov_base_aligned;
|
|
||||||
bool is_iov_len_aligned;
|
|
||||||
bool is_offset_aligned;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Needs to convert the misaligned request to an aligned one when
|
|
||||||
* O_DIRECT is used, but the request (either buffer address/length, or offset) is not aligned.
|
|
||||||
*/
|
|
||||||
bool need_conversion;
|
|
||||||
|
|
||||||
uint32_t head;
|
|
||||||
uint32_t tail;
|
|
||||||
uint32_t org_size;
|
|
||||||
uint32_t bounced_size;
|
|
||||||
|
|
||||||
off_t aligned_dn_start;
|
|
||||||
off_t aligned_dn_end;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A bounce_iov for aligned read/write access.
|
|
||||||
* bounce_iov.iov_base is aligned to @alignment
|
|
||||||
* bounce_iov.iov_len is @bounced_size (@head + @org_size + @tail)
|
|
||||||
*/
|
|
||||||
struct iovec bounce_iov;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct blockif_req {
|
struct blockif_req {
|
||||||
struct iovec iov[BLOCKIF_IOV_MAX];
|
struct iovec iov[BLOCKIF_IOV_MAX];
|
||||||
int iovcnt;
|
int iovcnt;
|
||||||
off_t offset;
|
off_t offset;
|
||||||
ssize_t resid;
|
ssize_t resid;
|
||||||
void (*callback)(struct blockif_req *req, int err);
|
void (*callback)(struct blockif_req *req, int err);
|
||||||
void *param;
|
void *param;
|
||||||
int qidx;
|
|
||||||
|
|
||||||
struct br_align_info align_info;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct blockif_ctxt;
|
struct blockif_ctxt;
|
||||||
struct blockif_ctxt *blockif_open(const char *optstr, const char *ident, int queue_num,
|
struct blockif_ctxt *blockif_open(const char *optstr, const char *ident);
|
||||||
struct iothreads_info *iothrds_info);
|
|
||||||
off_t blockif_size(struct blockif_ctxt *bc);
|
off_t blockif_size(struct blockif_ctxt *bc);
|
||||||
void blockif_chs(struct blockif_ctxt *bc, uint16_t *c, uint8_t *h,
|
void blockif_chs(struct blockif_ctxt *bc, uint16_t *c, uint8_t *h,
|
||||||
uint8_t *s);
|
uint8_t *s);
|
||||||
|
@ -30,8 +30,6 @@
|
|||||||
#define _DM_H_
|
#define _DM_H_
|
||||||
|
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
#include <sys/resource.h>
|
|
||||||
|
|
||||||
#include "types.h"
|
#include "types.h"
|
||||||
#include "dm_string.h"
|
#include "dm_string.h"
|
||||||
#include "acrn_common.h"
|
#include "acrn_common.h"
|
||||||
@ -53,15 +51,6 @@ extern bool pt_tpm2;
|
|||||||
extern bool ssram;
|
extern bool ssram;
|
||||||
extern bool vtpm2;
|
extern bool vtpm2;
|
||||||
extern bool is_winvm;
|
extern bool is_winvm;
|
||||||
extern bool ovmf_loaded;
|
|
||||||
|
|
||||||
enum acrn_thread_prio {
|
|
||||||
PRIO_VCPU = PRIO_MIN,
|
|
||||||
PRIO_IOTHREAD = PRIO_MIN,
|
|
||||||
PRIO_VIRTIO_SND,
|
|
||||||
PRIO_VIRTIO_IPU,
|
|
||||||
PRIO_VIRTIO_GPU
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Convert guest physical address to host virtual address
|
* @brief Convert guest physical address to host virtual address
|
||||||
@ -74,9 +63,7 @@ enum acrn_thread_prio {
|
|||||||
*/
|
*/
|
||||||
void *paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len);
|
void *paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len);
|
||||||
int virtio_uses_msix(void);
|
int virtio_uses_msix(void);
|
||||||
int guest_cpu_num(void);
|
|
||||||
size_t high_bios_size(void);
|
size_t high_bios_size(void);
|
||||||
void init_debugexit(void);
|
void init_debugexit(void);
|
||||||
void deinit_debugexit(void);
|
void deinit_debugexit(void);
|
||||||
void set_thread_priority(int priority, bool reset_on_fork);
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -1,273 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2023 Intel Corporation.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the
|
|
||||||
* "Software"), to deal in the Software without restriction, including
|
|
||||||
* without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
* distribute, sub license, and/or sell copies of the Software, and to
|
|
||||||
* permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
* the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice (including the
|
|
||||||
* next paragraph) shall be included in all copies or substantial portions
|
|
||||||
* of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
* Device ids are available in linux kernel source tree and Intel website.
|
|
||||||
* https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/drm/i915_pciids.h
|
|
||||||
* https://dgpu-docs.intel.com/devices/hardware-table.html
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _IGD_PCIIDS_H_
|
|
||||||
#define _IGD_PCIIDS_H_
|
|
||||||
|
|
||||||
#include <stdint.h>
|
|
||||||
|
|
||||||
struct igd_device {
|
|
||||||
uint16_t device;
|
|
||||||
int gen;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define IGD_DEVICE_ENTRY(id, gen) \
|
|
||||||
{ id, gen }
|
|
||||||
|
|
||||||
|
|
||||||
/* Skylake, Gen 9 */
|
|
||||||
#define IGD_SKL_DEVICE_IDS \
|
|
||||||
IGD_DEVICE_ENTRY(0x1906, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x1913, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x190E, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x1915, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x1902, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x190A, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x190B, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x1917, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x1916, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x1921, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x191E, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x1912, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x191A, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x191B, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x191D, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x1923, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x1926, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x1927, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x192A, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x192B, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x192D, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x1932, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x193A, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x193B, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x193D, 9)
|
|
||||||
|
|
||||||
/* Apollo Lake, Gen 9 */
|
|
||||||
#define IGD_BXT_DEVICE_IDS \
|
|
||||||
IGD_DEVICE_ENTRY(0x0A84, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x1A84, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x1A85, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x5A84, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x5A85, 9)
|
|
||||||
|
|
||||||
/* Gemini Lake, Gen 9 */
|
|
||||||
#define IGD_GLK_DEVICE_IDS \
|
|
||||||
IGD_DEVICE_ENTRY(0x3184, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3185, 9)
|
|
||||||
|
|
||||||
/* Kaby Lake, Gen 9 */
|
|
||||||
#define IGD_KBL_DEVICE_IDS \
|
|
||||||
IGD_DEVICE_ENTRY(0x5906, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x5913, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x590E, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x5915, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x5902, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x5908, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x590A, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x590B, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x5916, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x5921, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x591E, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x5912, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x5917, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x591A, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x591B, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x591D, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x5926, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x5923, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x5927, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x593B, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x591C, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x87C0, 9)
|
|
||||||
|
|
||||||
/* Coffee Lake/Comet Lake, Gen 9 */
|
|
||||||
#define IGD_CFL_DEVICE_IDS \
|
|
||||||
IGD_DEVICE_ENTRY(0x87CA, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9BA2, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9BA4, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9BA5, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9BA8, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9B21, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9BAA, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9BAC, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9BC2, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9BC4, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9BC5, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9BC6, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9BC8, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9BE6, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9BF6, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9B41, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9BCA, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9BCC, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3E90, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3E93, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3E99, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3E91, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3E92, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3E96, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3E98, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3E9A, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3E9C, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3E94, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3E9B, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3EA9, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3EA5, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3EA6, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3EA7, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3EA8, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3EA1, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3EA4, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3EA0, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3EA3, 9), \
|
|
||||||
IGD_DEVICE_ENTRY(0x3EA2, 9)
|
|
||||||
|
|
||||||
/* Ice Lake, Gen 11 */
|
|
||||||
#define IGD_ICL_DEVICE_IDS \
|
|
||||||
IGD_DEVICE_ENTRY(0x8A50, 11), \
|
|
||||||
IGD_DEVICE_ENTRY(0x8A52, 11), \
|
|
||||||
IGD_DEVICE_ENTRY(0x8A53, 11), \
|
|
||||||
IGD_DEVICE_ENTRY(0x8A54, 11), \
|
|
||||||
IGD_DEVICE_ENTRY(0x8A56, 11), \
|
|
||||||
IGD_DEVICE_ENTRY(0x8A57, 11), \
|
|
||||||
IGD_DEVICE_ENTRY(0x8A58, 11), \
|
|
||||||
IGD_DEVICE_ENTRY(0x8A59, 11), \
|
|
||||||
IGD_DEVICE_ENTRY(0x8A5A, 11), \
|
|
||||||
IGD_DEVICE_ENTRY(0x8A5B, 11), \
|
|
||||||
IGD_DEVICE_ENTRY(0x8A5C, 11), \
|
|
||||||
IGD_DEVICE_ENTRY(0x8A70, 11), \
|
|
||||||
IGD_DEVICE_ENTRY(0x8A71, 11), \
|
|
||||||
IGD_DEVICE_ENTRY(0x8A51, 11), \
|
|
||||||
IGD_DEVICE_ENTRY(0x8A5D, 11)
|
|
||||||
|
|
||||||
/* Elkhart Lake, Gen 12 */
|
|
||||||
#define IGD_EHL_DEVICE_IDS \
|
|
||||||
IGD_DEVICE_ENTRY(0x4541, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4551, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4555, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4557, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4570, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4571, 12)
|
|
||||||
|
|
||||||
/* Jasper Lake, Gen 12 */
|
|
||||||
#define IGD_JSL_DEVICE_IDS \
|
|
||||||
IGD_DEVICE_ENTRY(0x4E51, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4E55, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4E57, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4E61, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4E71, 12)
|
|
||||||
|
|
||||||
/* Tiger Lake, Gen 12 */
|
|
||||||
#define IGD_TGL_DEVICE_IDS \
|
|
||||||
IGD_DEVICE_ENTRY(0x9A60, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9A68, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9A70, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9A40, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9A49, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9A59, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9A78, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9AC0, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9AC9, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9AD9, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x9AF8, 12)
|
|
||||||
|
|
||||||
/* Rocket Lake, Gen 12 */
|
|
||||||
#define IGD_RKL_DEVICE_IDS \
|
|
||||||
IGD_DEVICE_ENTRY(0x4C80, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4C8A, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4C8B, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4C8C, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4C90, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4C9A, 12)
|
|
||||||
|
|
||||||
/* Alder Lake-S, Gen 12 */
|
|
||||||
#define IGD_ADLS_DEVICE_IDS \
|
|
||||||
IGD_DEVICE_ENTRY(0x4680, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4682, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4688, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x468A, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x468B, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4690, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4692, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4693, 12)
|
|
||||||
|
|
||||||
/* Alder Lake-P, Gen 12 */
|
|
||||||
#define IGD_ADLP_DEVICE_IDS \
|
|
||||||
IGD_DEVICE_ENTRY(0x46A0, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46A1, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46A2, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46A3, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46A6, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46A8, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46AA, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x462A, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4626, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x4628, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46B0, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46B1, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46B2, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46B3, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46C0, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46C1, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46C2, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46C3, 12)
|
|
||||||
|
|
||||||
/* Alder Lake-N, Gen 12 */
|
|
||||||
#define IGD_ADLN_DEVICE_IDS \
|
|
||||||
IGD_DEVICE_ENTRY(0x46D0, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46D1, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46D2, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46D3, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0x46D4, 12)
|
|
||||||
|
|
||||||
/* Raptor Lake-S, Gen 12 */
|
|
||||||
#define IGD_RPLS_DEVICE_IDS \
|
|
||||||
IGD_DEVICE_ENTRY(0xA780, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0xA781, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0xA782, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0xA783, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0xA788, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0xA789, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0xA78A, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0xA78B, 12)
|
|
||||||
|
|
||||||
/* Raptor Lake-P, Gen 12 */
|
|
||||||
#define IGD_RPLP_DEVICE_IDS \
|
|
||||||
IGD_DEVICE_ENTRY(0xA721, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0xA7A1, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0xA7A9, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0xA7AC, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0xA7AD, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0xA720, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0xA7A0, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0xA7A8, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0xA7AA, 12), \
|
|
||||||
IGD_DEVICE_ENTRY(0xA7AB, 12)
|
|
||||||
|
|
||||||
#endif
|
|
@ -7,48 +7,14 @@
|
|||||||
#ifndef _iothread_CTX_H_
|
#ifndef _iothread_CTX_H_
|
||||||
#define _iothread_CTX_H_
|
#define _iothread_CTX_H_
|
||||||
|
|
||||||
#define IOTHREAD_NUM 40
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The pthread_setname_np() function can be used to set a unique name for a thread,
|
|
||||||
* which can be useful for debugging multithreaded applications.
|
|
||||||
* The thread name is a meaningful C language string,
|
|
||||||
* whose length is restricted to 16 characters, including the terminating null byte ('\0').
|
|
||||||
*/
|
|
||||||
#define PTHREAD_NAME_MAX_LEN 16
|
|
||||||
|
|
||||||
struct iothread_mevent {
|
struct iothread_mevent {
|
||||||
void (*run)(void *);
|
void (*run)(void *);
|
||||||
void *arg;
|
void *arg;
|
||||||
int fd;
|
int fd;
|
||||||
};
|
};
|
||||||
|
int iothread_add(int fd, struct iothread_mevent *aevt);
|
||||||
struct iothread_ctx {
|
int iothread_del(int fd);
|
||||||
pthread_t tid;
|
int iothread_init(void);
|
||||||
int epfd;
|
|
||||||
bool started;
|
|
||||||
pthread_mutex_t mtx;
|
|
||||||
int idx;
|
|
||||||
cpu_set_t cpuset;
|
|
||||||
char name[PTHREAD_NAME_MAX_LEN];
|
|
||||||
};
|
|
||||||
|
|
||||||
struct iothreads_option {
|
|
||||||
char tag[PTHREAD_NAME_MAX_LEN];
|
|
||||||
int num;
|
|
||||||
cpu_set_t *cpusets;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct iothreads_info {
|
|
||||||
struct iothread_ctx *ioctx_base;
|
|
||||||
int num;
|
|
||||||
};
|
|
||||||
|
|
||||||
int iothread_add(struct iothread_ctx *ioctx_x, int fd, struct iothread_mevent *aevt);
|
|
||||||
int iothread_del(struct iothread_ctx *ioctx_x, int fd);
|
|
||||||
void iothread_deinit(void);
|
void iothread_deinit(void);
|
||||||
struct iothread_ctx *iothread_create(struct iothreads_option *iothr_opt);
|
|
||||||
int iothread_parse_options(char *str, struct iothreads_option *iothr_opt);
|
|
||||||
void iothread_free_options(struct iothreads_option *iothr_opt);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -36,7 +36,4 @@ unsigned get_wakeup_reason(void);
|
|||||||
int set_wakeup_timer(time_t t);
|
int set_wakeup_timer(time_t t);
|
||||||
int acrn_parse_intr_monitor(const char *opt);
|
int acrn_parse_intr_monitor(const char *opt);
|
||||||
int vm_monitor_blkrescan(void *arg, char *devargs);
|
int vm_monitor_blkrescan(void *arg, char *devargs);
|
||||||
|
|
||||||
int vm_monitor_send_vm_event(const char *msg);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -9,7 +9,6 @@
|
|||||||
#define __PASSTHRU_H__
|
#define __PASSTHRU_H__
|
||||||
|
|
||||||
#include <types.h>
|
#include <types.h>
|
||||||
#include <limits.h>
|
|
||||||
|
|
||||||
#include "pciaccess.h"
|
#include "pciaccess.h"
|
||||||
#include "pci_core.h"
|
#include "pci_core.h"
|
||||||
@ -38,9 +37,7 @@ struct passthru_dev {
|
|||||||
bool need_reset;
|
bool need_reset;
|
||||||
bool d3hot_reset;
|
bool d3hot_reset;
|
||||||
bool need_rombar;
|
bool need_rombar;
|
||||||
bool need_dsdt;
|
|
||||||
char *rom_buffer;
|
char *rom_buffer;
|
||||||
char dsdt_path[256];
|
|
||||||
bool (*has_virt_pcicfg_regs)(int offset);
|
bool (*has_virt_pcicfg_regs)(int offset);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -296,6 +296,7 @@ void destory_io_rsvd_rgns(struct pci_vdev *vdev);
|
|||||||
* For OpRegion 2.0: ASLE.rvda = physical address, not support currently
|
* For OpRegion 2.0: ASLE.rvda = physical address, not support currently
|
||||||
*/
|
*/
|
||||||
#define GPU_DSM_GPA 0x7C000000
|
#define GPU_DSM_GPA 0x7C000000
|
||||||
|
#define GPU_DSM_SIZE 0x4000000
|
||||||
#define GPU_OPREGION_SIZE 0x5000
|
#define GPU_OPREGION_SIZE 0x5000
|
||||||
/*
|
/*
|
||||||
* TODO: Forced DSM/OPREGION size requires native BIOS configuration.
|
* TODO: Forced DSM/OPREGION size requires native BIOS configuration.
|
||||||
@ -349,6 +350,8 @@ int pci_emul_add_pciecap(struct pci_vdev *pi, int pcie_device_type);
|
|||||||
*
|
*
|
||||||
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
||||||
* @param index Message data index.
|
* @param index Message data index.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void pci_generate_msi(struct pci_vdev *dev, int index);
|
void pci_generate_msi(struct pci_vdev *dev, int index);
|
||||||
|
|
||||||
@ -357,6 +360,8 @@ void pci_generate_msi(struct pci_vdev *dev, int index);
|
|||||||
*
|
*
|
||||||
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
||||||
* @param index MSIs table entry index.
|
* @param index MSIs table entry index.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void pci_generate_msix(struct pci_vdev *dev, int index);
|
void pci_generate_msix(struct pci_vdev *dev, int index);
|
||||||
|
|
||||||
@ -364,6 +369,8 @@ void pci_generate_msix(struct pci_vdev *dev, int index);
|
|||||||
* @brief Assert INTx pin of virtual PCI device
|
* @brief Assert INTx pin of virtual PCI device
|
||||||
*
|
*
|
||||||
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void pci_lintr_assert(struct pci_vdev *dev);
|
void pci_lintr_assert(struct pci_vdev *dev);
|
||||||
|
|
||||||
@ -371,6 +378,8 @@ void pci_lintr_assert(struct pci_vdev *dev);
|
|||||||
* @brief Deassert INTx pin of virtual PCI device
|
* @brief Deassert INTx pin of virtual PCI device
|
||||||
*
|
*
|
||||||
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void pci_lintr_deassert(struct pci_vdev *dev);
|
void pci_lintr_deassert(struct pci_vdev *dev);
|
||||||
|
|
||||||
@ -408,6 +417,8 @@ struct pci_vdev *pci_get_vdev_info(int slot);
|
|||||||
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
||||||
* @param offset Offset in configuration space.
|
* @param offset Offset in configuration space.
|
||||||
* @param val Value in 1 byte.
|
* @param val Value in 1 byte.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
static inline void
|
static inline void
|
||||||
pci_set_cfgdata8(struct pci_vdev *dev, int offset, uint8_t val)
|
pci_set_cfgdata8(struct pci_vdev *dev, int offset, uint8_t val)
|
||||||
@ -425,6 +436,8 @@ pci_set_cfgdata8(struct pci_vdev *dev, int offset, uint8_t val)
|
|||||||
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
||||||
* @param offset Offset in configuration space.
|
* @param offset Offset in configuration space.
|
||||||
* @param val Value in 2 bytes.
|
* @param val Value in 2 bytes.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
static inline void
|
static inline void
|
||||||
pci_set_cfgdata16(struct pci_vdev *dev, int offset, uint16_t val)
|
pci_set_cfgdata16(struct pci_vdev *dev, int offset, uint16_t val)
|
||||||
@ -442,6 +455,8 @@ pci_set_cfgdata16(struct pci_vdev *dev, int offset, uint16_t val)
|
|||||||
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
* @param dev Pointer to struct pci_vdev representing virtual PCI device.
|
||||||
* @param offset Offset in configuration space.
|
* @param offset Offset in configuration space.
|
||||||
* @param val Value in 4 bytes.
|
* @param val Value in 4 bytes.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
static inline void
|
static inline void
|
||||||
pci_set_cfgdata32(struct pci_vdev *dev, int offset, uint32_t val)
|
pci_set_cfgdata32(struct pci_vdev *dev, int offset, uint32_t val)
|
||||||
|
@ -1066,8 +1066,6 @@
|
|||||||
#define PCIM_OSC_CTL_PCIE_CAP_STRUCT 0x10 /* Various Capability Structures */
|
#define PCIM_OSC_CTL_PCIE_CAP_STRUCT 0x10 /* Various Capability Structures */
|
||||||
|
|
||||||
/* Graphics definitions */
|
/* Graphics definitions */
|
||||||
#define PCIR_GGC 0x50 /* GMCH Graphics Control */
|
|
||||||
#define PCIR_GGC_GMS_SHIFT 8 /* Bit 15:8 Graphics Memory Size (GMS) */
|
|
||||||
#define PCIR_BDSM 0x5C /* BDSM graphics base data of stolen memory register */
|
#define PCIR_BDSM 0x5C /* BDSM graphics base data of stolen memory register */
|
||||||
#define PCIR_GEN11_BDSM_DW0 0xC0
|
#define PCIR_GEN11_BDSM_DW0 0xC0
|
||||||
#define PCIR_GEN11_BDSM_DW1 0xC4
|
#define PCIR_GEN11_BDSM_DW1 0xC4
|
||||||
|
@ -107,6 +107,8 @@
|
|||||||
_IOW(ACRN_IOCTL_TYPE, 0x41, struct acrn_vm_memmap)
|
_IOW(ACRN_IOCTL_TYPE, 0x41, struct acrn_vm_memmap)
|
||||||
#define ACRN_IOCTL_UNSET_MEMSEG \
|
#define ACRN_IOCTL_UNSET_MEMSEG \
|
||||||
_IOW(ACRN_IOCTL_TYPE, 0x42, struct acrn_vm_memmap)
|
_IOW(ACRN_IOCTL_TYPE, 0x42, struct acrn_vm_memmap)
|
||||||
|
#define ACRN_IOCTL_SETUP_SBUF \
|
||||||
|
_IOW(ACRN_IOCTL_TYPE, 0x43, struct acrn_sbuf)
|
||||||
|
|
||||||
/* PCI assignment*/
|
/* PCI assignment*/
|
||||||
#define ACRN_IOCTL_SET_PTDEV_INTR \
|
#define ACRN_IOCTL_SET_PTDEV_INTR \
|
||||||
@ -136,15 +138,6 @@
|
|||||||
#define ACRN_IOCTL_IRQFD \
|
#define ACRN_IOCTL_IRQFD \
|
||||||
_IOW(ACRN_IOCTL_TYPE, 0x71, struct acrn_irqfd)
|
_IOW(ACRN_IOCTL_TYPE, 0x71, struct acrn_irqfd)
|
||||||
|
|
||||||
/* Asynchronous IO */
|
|
||||||
#define ACRN_IOCTL_SETUP_ASYNCIO \
|
|
||||||
_IOW(ACRN_IOCTL_TYPE, 0x90, __u64)
|
|
||||||
|
|
||||||
/* VM EVENT */
|
|
||||||
#define ACRN_IOCTL_SETUP_VM_EVENT_RING \
|
|
||||||
_IOW(ACRN_IOCTL_TYPE, 0xa0, __u64)
|
|
||||||
#define ACRN_IOCTL_SETUP_VM_EVENT_FD \
|
|
||||||
_IOW(ACRN_IOCTL_TYPE, 0xa1, int)
|
|
||||||
|
|
||||||
#define ACRN_MEM_ACCESS_RIGHT_MASK 0x00000007U
|
#define ACRN_MEM_ACCESS_RIGHT_MASK 0x00000007U
|
||||||
#define ACRN_MEM_ACCESS_READ 0x00000001U
|
#define ACRN_MEM_ACCESS_READ 0x00000001U
|
||||||
@ -257,4 +250,13 @@ struct acrn_irqfd {
|
|||||||
struct acrn_msi_entry msi;
|
struct acrn_msi_entry msi;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief data structure to register a share buffer by ioctl
|
||||||
|
*/
|
||||||
|
struct acrn_sbuf {
|
||||||
|
/** Type of the sbuf. */
|
||||||
|
uint32_t sbuf_id;
|
||||||
|
/** Base address of the sbuf. */
|
||||||
|
uint64_t base;
|
||||||
|
};
|
||||||
#endif /* VHM_IOCTL_DEFS_H */
|
#endif /* VHM_IOCTL_DEFS_H */
|
||||||
|
@ -43,7 +43,6 @@ struct vrtc;
|
|||||||
struct vmctx;
|
struct vmctx;
|
||||||
|
|
||||||
int vrtc_init(struct vmctx *ctx);
|
int vrtc_init(struct vmctx *ctx);
|
||||||
void vrtc_suspend(struct vmctx *ctx);
|
|
||||||
void vrtc_enable_localtime(int l_time);
|
void vrtc_enable_localtime(int l_time);
|
||||||
void vrtc_deinit(struct vmctx *ctx);
|
void vrtc_deinit(struct vmctx *ctx);
|
||||||
int vrtc_set_time(struct vrtc *vrtc, time_t secs);
|
int vrtc_set_time(struct vrtc *vrtc, time_t secs);
|
||||||
|
@ -1,39 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2018-2023 Intel Corporation.
|
|
||||||
*
|
|
||||||
* SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef SHARED_BUF_H
|
|
||||||
#define SHARED_BUF_H
|
|
||||||
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include "acrn_common.h"
|
|
||||||
|
|
||||||
|
|
||||||
static inline bool sbuf_is_empty(struct shared_buf *sbuf)
|
|
||||||
{
|
|
||||||
return (sbuf->head == sbuf->tail);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void sbuf_clear_flags(struct shared_buf *sbuf, uint64_t flags)
|
|
||||||
{
|
|
||||||
sbuf->flags &= ~flags;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void sbuf_set_flags(struct shared_buf *sbuf, uint64_t flags)
|
|
||||||
{
|
|
||||||
sbuf->flags = flags;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void sbuf_add_flags(struct shared_buf *sbuf, uint64_t flags)
|
|
||||||
{
|
|
||||||
sbuf->flags |= flags;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t sbuf_get(struct shared_buf *sbuf, uint8_t *data);
|
|
||||||
uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t *data, uint32_t max_len);
|
|
||||||
int sbuf_clear_buffered(struct shared_buf *sbuf);
|
|
||||||
void sbuf_init(struct shared_buf *sbuf, uint32_t total_size, uint32_t ele_size);
|
|
||||||
|
|
||||||
#endif /* SHARED_BUF_H */
|
|
@ -36,7 +36,6 @@ struct uart_vdev;
|
|||||||
|
|
||||||
typedef void (*uart_intr_func_t)(void *arg);
|
typedef void (*uart_intr_func_t)(void *arg);
|
||||||
int uart_legacy_alloc(int unit, int *ioaddr, int *irq);
|
int uart_legacy_alloc(int unit, int *ioaddr, int *irq);
|
||||||
int uart_legacy_reinit_res(int which, int baseaddr, int irq);
|
|
||||||
void uart_legacy_dealloc(int which);
|
void uart_legacy_dealloc(int which);
|
||||||
uint8_t uart_read(struct uart_vdev *uart, int offset);
|
uint8_t uart_read(struct uart_vdev *uart, int offset);
|
||||||
void uart_write(struct uart_vdev *uart, int offset, uint8_t value);
|
void uart_write(struct uart_vdev *uart, int offset, uint8_t value);
|
||||||
|
@ -221,6 +221,12 @@ enum USB_ERRCODE {
|
|||||||
do { if (lvl <= usb_log_level) pr_dbg(LOG_TAG fmt, ##args); } while (0)
|
do { if (lvl <= usb_log_level) pr_dbg(LOG_TAG fmt, ##args); } while (0)
|
||||||
|
|
||||||
#define NATIVE_USBSYS_DEVDIR "/sys/bus/usb/devices"
|
#define NATIVE_USBSYS_DEVDIR "/sys/bus/usb/devices"
|
||||||
|
#define NATIVE_USB2_SPEED "480"
|
||||||
|
#define NATIVE_USB3_SPEED "5000"
|
||||||
|
#define USB_NATIVE_NUM_PORT 20
|
||||||
|
#define USB_NATIVE_NUM_BUS 5
|
||||||
|
|
||||||
|
#define USB_DROPPED_XFER_MAGIC 0xaaaaaaaa55555555
|
||||||
|
|
||||||
inline bool
|
inline bool
|
||||||
index_valid(int head, int tail, int maxcnt, int idx) {
|
index_valid(int head, int tail, int maxcnt, int idx) {
|
||||||
|
@ -427,7 +427,6 @@ struct virtio_iothread {
|
|||||||
int idx;
|
int idx;
|
||||||
int kick_fd;
|
int kick_fd;
|
||||||
bool ioevent_started;
|
bool ioevent_started;
|
||||||
struct iothread_ctx *ioctx;
|
|
||||||
struct iothread_mevent iomvt;
|
struct iothread_mevent iomvt;
|
||||||
void (*iothread_run)(void *, struct virtio_vq_info *);
|
void (*iothread_run)(void *, struct virtio_vq_info *);
|
||||||
};
|
};
|
||||||
@ -440,7 +439,6 @@ struct virtio_vq_info {
|
|||||||
struct virtio_base *base;
|
struct virtio_base *base;
|
||||||
/**< backpointer to virtio_base */
|
/**< backpointer to virtio_base */
|
||||||
uint16_t num; /**< the num'th queue in the virtio_base */
|
uint16_t num; /**< the num'th queue in the virtio_base */
|
||||||
pthread_mutex_t mtx; /**< per queue mutex */
|
|
||||||
|
|
||||||
uint16_t flags; /**< flags (see above) */
|
uint16_t flags; /**< flags (see above) */
|
||||||
uint16_t last_avail; /**< a recent value of avail->idx */
|
uint16_t last_avail; /**< a recent value of avail->idx */
|
||||||
@ -512,6 +510,8 @@ vq_has_descs(struct virtio_vq_info *vq)
|
|||||||
*
|
*
|
||||||
* @param vb Pointer to struct virtio_base.
|
* @param vb Pointer to struct virtio_base.
|
||||||
* @param vq Pointer to struct virtio_vq_info.
|
* @param vq Pointer to struct virtio_vq_info.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
static inline void
|
static inline void
|
||||||
vq_interrupt(struct virtio_base *vb, struct virtio_vq_info *vq)
|
vq_interrupt(struct virtio_base *vb, struct virtio_vq_info *vq)
|
||||||
@ -533,6 +533,8 @@ vq_interrupt(struct virtio_base *vb, struct virtio_vq_info *vq)
|
|||||||
* MSI-X or a generic MSI interrupt with config changed event.
|
* MSI-X or a generic MSI interrupt with config changed event.
|
||||||
*
|
*
|
||||||
* @param vb Pointer to struct virtio_base.
|
* @param vb Pointer to struct virtio_base.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
static inline void
|
static inline void
|
||||||
virtio_config_changed(struct virtio_base *vb)
|
virtio_config_changed(struct virtio_base *vb)
|
||||||
@ -565,6 +567,8 @@ struct iovec;
|
|||||||
* @param dev Pointer to struct pci_vdev which emulates a PCI device.
|
* @param dev Pointer to struct pci_vdev which emulates a PCI device.
|
||||||
* @param queues Pointer to struct virtio_vq_info, normally an array.
|
* @param queues Pointer to struct virtio_vq_info, normally an array.
|
||||||
* @param backend_type can be VBSU, VBSK or VHOST
|
* @param backend_type can be VBSU, VBSK or VHOST
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void virtio_linkup(struct virtio_base *base, struct virtio_ops *vops,
|
void virtio_linkup(struct virtio_base *base, struct virtio_ops *vops,
|
||||||
void *pci_virtio_dev, struct pci_vdev *dev,
|
void *pci_virtio_dev, struct pci_vdev *dev,
|
||||||
@ -620,6 +624,8 @@ int virtio_intr_init(struct virtio_base *base, int barnum, int use_msix);
|
|||||||
* If MSI-X is enabled, this also resets all the vectors to NO_VECTOR.
|
* If MSI-X is enabled, this also resets all the vectors to NO_VECTOR.
|
||||||
*
|
*
|
||||||
* @param base Pointer to struct virtio_base.
|
* @param base Pointer to struct virtio_base.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void virtio_reset_dev(struct virtio_base *base);
|
void virtio_reset_dev(struct virtio_base *base);
|
||||||
|
|
||||||
@ -628,6 +634,8 @@ void virtio_reset_dev(struct virtio_base *base);
|
|||||||
*
|
*
|
||||||
* @param base Pointer to struct virtio_base.
|
* @param base Pointer to struct virtio_base.
|
||||||
* @param barnum Which BAR[0..5] to use.
|
* @param barnum Which BAR[0..5] to use.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void virtio_set_io_bar(struct virtio_base *base, int barnum);
|
void virtio_set_io_bar(struct virtio_base *base, int barnum);
|
||||||
|
|
||||||
@ -652,6 +660,8 @@ int vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx,
|
|||||||
* available ring.
|
* available ring.
|
||||||
*
|
*
|
||||||
* @param vq Pointer to struct virtio_vq_info.
|
* @param vq Pointer to struct virtio_vq_info.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void vq_retchain(struct virtio_vq_info *vq);
|
void vq_retchain(struct virtio_vq_info *vq);
|
||||||
|
|
||||||
@ -662,6 +672,8 @@ void vq_retchain(struct virtio_vq_info *vq);
|
|||||||
* @param vq Pointer to struct virtio_vq_info.
|
* @param vq Pointer to struct virtio_vq_info.
|
||||||
* @param idx Pointer to available ring position, returned by vq_getchain().
|
* @param idx Pointer to available ring position, returned by vq_getchain().
|
||||||
* @param iolen Number of data bytes to be returned to frontend.
|
* @param iolen Number of data bytes to be returned to frontend.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void vq_relchain(struct virtio_vq_info *vq, uint16_t idx, uint32_t iolen);
|
void vq_relchain(struct virtio_vq_info *vq, uint16_t idx, uint32_t iolen);
|
||||||
|
|
||||||
@ -673,6 +685,8 @@ void vq_relchain(struct virtio_vq_info *vq, uint16_t idx, uint32_t iolen);
|
|||||||
*
|
*
|
||||||
* @param vq Pointer to struct virtio_vq_info.
|
* @param vq Pointer to struct virtio_vq_info.
|
||||||
* @param used_all_avail Flag indicating if driver used all available chains.
|
* @param used_all_avail Flag indicating if driver used all available chains.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void vq_endchains(struct virtio_vq_info *vq, int used_all_avail);
|
void vq_endchains(struct virtio_vq_info *vq, int used_all_avail);
|
||||||
|
|
||||||
@ -685,6 +699,8 @@ void vq_endchains(struct virtio_vq_info *vq, int used_all_avail);
|
|||||||
*
|
*
|
||||||
* @param base Pointer to struct virtio_base.
|
* @param base Pointer to struct virtio_base.
|
||||||
* @param vq Pointer to struct virtio_vq_info.
|
* @param vq Pointer to struct virtio_vq_info.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void vq_clear_used_ring_flags(struct virtio_base *base, struct virtio_vq_info *vq);
|
void vq_clear_used_ring_flags(struct virtio_base *base, struct virtio_vq_info *vq);
|
||||||
|
|
||||||
@ -719,6 +735,8 @@ uint64_t virtio_pci_read(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
|
|||||||
* @param offset Register offset in bytes within a BAR region.
|
* @param offset Register offset in bytes within a BAR region.
|
||||||
* @param size Access range in bytes.
|
* @param size Access range in bytes.
|
||||||
* @param value Data value to be written into register.
|
* @param value Data value to be written into register.
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
void virtio_pci_write(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
|
void virtio_pci_write(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
|
||||||
int baridx, uint64_t offset, int size, uint64_t value);
|
int baridx, uint64_t offset, int size, uint64_t value);
|
||||||
|
@ -1,18 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2019-2023 Intel Corporation.
|
|
||||||
*
|
|
||||||
* SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef VM_EVENT_H
|
|
||||||
#define VM_EVENT_H
|
|
||||||
|
|
||||||
#include <types.h>
|
|
||||||
#include "vmmapi.h"
|
|
||||||
|
|
||||||
int vm_event_init(struct vmctx *ctx);
|
|
||||||
int vm_event_deinit(void);
|
|
||||||
int dm_send_vm_event(struct vm_event *event);
|
|
||||||
uint32_t get_dm_vm_event_overrun_count(void);
|
|
||||||
|
|
||||||
#endif /* VM_EVENT_H */
|
|
@ -107,7 +107,7 @@ int vm_create_ioreq_client(struct vmctx *ctx);
|
|||||||
int vm_destroy_ioreq_client(struct vmctx *ctx);
|
int vm_destroy_ioreq_client(struct vmctx *ctx);
|
||||||
int vm_attach_ioreq_client(struct vmctx *ctx);
|
int vm_attach_ioreq_client(struct vmctx *ctx);
|
||||||
int vm_notify_request_done(struct vmctx *ctx, int vcpu);
|
int vm_notify_request_done(struct vmctx *ctx, int vcpu);
|
||||||
int vm_setup_asyncio(struct vmctx *ctx, uint64_t base);
|
int vm_setup_sbuf(struct vmctx *ctx, uint32_t sbuf_type, uint64_t base);
|
||||||
void vm_clear_ioreq(struct vmctx *ctx);
|
void vm_clear_ioreq(struct vmctx *ctx);
|
||||||
const char *vm_state_to_str(enum vm_suspend_how idx);
|
const char *vm_state_to_str(enum vm_suspend_how idx);
|
||||||
void vm_set_suspend_mode(enum vm_suspend_how how);
|
void vm_set_suspend_mode(enum vm_suspend_how how);
|
||||||
|
2
doc/_templates/layout.html
vendored
@ -5,7 +5,7 @@
|
|||||||
<p class="admonition-title">Important</p>
|
<p class="admonition-title">Important</p>
|
||||||
<p>This is the latest documentation for the unstable development branch of
|
<p>This is the latest documentation for the unstable development branch of
|
||||||
Project ACRN (master).<br/>Use the drop-down menu on the left to select
|
Project ACRN (master).<br/>Use the drop-down menu on the left to select
|
||||||
documentation for a stable release such as <a href="/3.2/">v3.2</a> or
|
documentation for a stable release such as <a href="/3.1/">v3.1</a> or
|
||||||
<a href="/3.0/">v3.0</a>.</p>
|
<a href="/3.0/">v3.0</a>.</p>
|
||||||
</div>
|
</div>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
@ -241,8 +241,6 @@ TAB_SIZE = 4
|
|||||||
# Allow for rst directives and advanced functions e.g. grid tables
|
# Allow for rst directives and advanced functions e.g. grid tables
|
||||||
ALIASES = "rst=\verbatim embed:rst:leading-asterisk"
|
ALIASES = "rst=\verbatim embed:rst:leading-asterisk"
|
||||||
ALIASES += "endrst=\endverbatim"
|
ALIASES += "endrst=\endverbatim"
|
||||||
ALIASES += consistency="\par consistency:^^"
|
|
||||||
ALIASES += alignment="\par alignment:^^"
|
|
||||||
|
|
||||||
# This tag can be used to specify a number of word-keyword mappings (TCL only).
|
# This tag can be used to specify a number of word-keyword mappings (TCL only).
|
||||||
# A mapping has the form "name=value". For example adding "class=itcl::class"
|
# A mapping has the form "name=value". For example adding "class=itcl::class"
|
||||||
|
@ -199,7 +199,6 @@ html_context = {
|
|||||||
'docs_title': docs_title,
|
'docs_title': docs_title,
|
||||||
'is_release': is_release,
|
'is_release': is_release,
|
||||||
'versions': ( ("latest", "/latest/"),
|
'versions': ( ("latest", "/latest/"),
|
||||||
("3.2", "/3.2/"),
|
|
||||||
("3.1", "/3.1/"),
|
("3.1", "/3.1/"),
|
||||||
("3.0", "/3.0/"),
|
("3.0", "/3.0/"),
|
||||||
("2.7", "/2.7/"),
|
("2.7", "/2.7/"),
|
||||||
|
@ -29,7 +29,6 @@ User VM Tutorials
|
|||||||
tutorials/using_xenomai_as_user_vm
|
tutorials/using_xenomai_as_user_vm
|
||||||
tutorials/using_vxworks_as_user_vm
|
tutorials/using_vxworks_as_user_vm
|
||||||
tutorials/using_zephyr_as_user_vm
|
tutorials/using_zephyr_as_user_vm
|
||||||
tutorials/using_celadon_as_user_vm
|
|
||||||
|
|
||||||
Configuration Tutorials
|
Configuration Tutorials
|
||||||
***********************
|
***********************
|
||||||
|
@ -70,24 +70,10 @@ Specifically:
|
|||||||
the hypervisor shell. Inputs to the physical UART will be
|
the hypervisor shell. Inputs to the physical UART will be
|
||||||
redirected to the vUART starting from the next timer event.
|
redirected to the vUART starting from the next timer event.
|
||||||
|
|
||||||
- The vUART enters escaping mode after a BREAK character is received from
|
- The vUART is deactivated after a :kbd:`Ctrl` + :kbd:`Space` hotkey is received
|
||||||
the physical UART. When in escaping mode, next received character will
|
from the physical UART. Inputs to the physical UART will be
|
||||||
be a command. After processing this command, the vUART exits the escaping
|
handled by the hypervisor shell starting from the next timer
|
||||||
mode. So far, following escaping commands are supported:
|
event.
|
||||||
|
|
||||||
- BREAK charater. If user sends break charater again in escaping mode,
|
|
||||||
one break charater will be sent to vUART.
|
|
||||||
|
|
||||||
- Character "e". This will deactive vUART. Inputs to the physical UART will
|
|
||||||
be handled by the hypervisor shell starting from the next timer event.
|
|
||||||
|
|
||||||
Other characters are not supported. The physical UART will prompt out an
|
|
||||||
"Unknown escaping key" message and the active vUART exits escaping mode.
|
|
||||||
|
|
||||||
Note that the BREAK character is a control character and different serial
|
|
||||||
terminals have different ways to send it, for example, `<Ctrl-A> + F`
|
|
||||||
in minicom, `<Ctrl-a> + <Ctrl-\>` in picocom, right click -> special
|
|
||||||
command -> break in putty serial terminal.
|
|
||||||
|
|
||||||
The workflows are described as follows:
|
The workflows are described as follows:
|
||||||
|
|
||||||
|
@ -106,12 +106,12 @@ MMIO Registers Definition
|
|||||||
* - IVSHMEM\_IRQ\_MASK\_REG
|
* - IVSHMEM\_IRQ\_MASK\_REG
|
||||||
- 0x0
|
- 0x0
|
||||||
- R/W
|
- R/W
|
||||||
- Interrupt Mask register is used for legacy interrupt.
|
- Interrupt Status register is used for legacy interrupt.
|
||||||
ivshmem doesn't support interrupts, so this register is reserved.
|
ivshmem doesn't support interrupts, so this register is reserved.
|
||||||
* - IVSHMEM\_IRQ\_STA\_REG
|
* - IVSHMEM\_IRQ\_STA\_REG
|
||||||
- 0x4
|
- 0x4
|
||||||
- R/W
|
- R/W
|
||||||
- Interrupt Status register is used for legacy interrupt.
|
- Interrupt Mask register is used for legacy interrupt.
|
||||||
ivshmem doesn't support interrupts, so this register is reserved.
|
ivshmem doesn't support interrupts, so this register is reserved.
|
||||||
* - IVSHMEM\_IV\_POS\_REG
|
* - IVSHMEM\_IV\_POS\_REG
|
||||||
- 0x8
|
- 0x8
|
||||||
|
@ -117,11 +117,10 @@ To set up the ACRN build environment on the development computer:
|
|||||||
python3 python3-pip libblkid-dev e2fslibs-dev \
|
python3 python3-pip libblkid-dev e2fslibs-dev \
|
||||||
pkg-config libnuma-dev libcjson-dev liblz4-tool flex bison \
|
pkg-config libnuma-dev libcjson-dev liblz4-tool flex bison \
|
||||||
xsltproc clang-format bc libpixman-1-dev libsdl2-dev libegl-dev \
|
xsltproc clang-format bc libpixman-1-dev libsdl2-dev libegl-dev \
|
||||||
libgles-dev libdrm-dev gnu-efi libelf-dev liburing-dev \
|
libgles-dev libdrm-dev gnu-efi libelf-dev \
|
||||||
build-essential git-buildpackage devscripts dpkg-dev equivs lintian \
|
build-essential git-buildpackage devscripts dpkg-dev equivs lintian \
|
||||||
apt-utils pristine-tar dh-python acpica-tools python3-tqdm \
|
apt-utils pristine-tar dh-python python3-lxml python3-defusedxml \
|
||||||
python3-elementpath python3-lxml python3-xmlschema python3-defusedxml
|
python3-tqdm python3-xmlschema python3-elementpath acpica-tools
|
||||||
|
|
||||||
|
|
||||||
#. Get the ACRN hypervisor and ACRN kernel source code, and check out the
|
#. Get the ACRN hypervisor and ACRN kernel source code, and check out the
|
||||||
current release branch.
|
current release branch.
|
||||||
@ -131,12 +130,12 @@ To set up the ACRN build environment on the development computer:
|
|||||||
cd ~/acrn-work
|
cd ~/acrn-work
|
||||||
git clone https://github.com/projectacrn/acrn-hypervisor.git
|
git clone https://github.com/projectacrn/acrn-hypervisor.git
|
||||||
cd acrn-hypervisor
|
cd acrn-hypervisor
|
||||||
git checkout release_3.3
|
git checkout release_3.2
|
||||||
|
|
||||||
cd ..
|
cd ..
|
||||||
git clone https://github.com/projectacrn/acrn-kernel.git
|
git clone https://github.com/projectacrn/acrn-kernel.git
|
||||||
cd acrn-kernel
|
cd acrn-kernel
|
||||||
git checkout release_3.3
|
git checkout release_3.2
|
||||||
|
|
||||||
.. _gsg-board-setup:
|
.. _gsg-board-setup:
|
||||||
|
|
||||||
@ -172,11 +171,11 @@ To set up the target hardware environment:
|
|||||||
|
|
||||||
#. Connect the monitor and power supply cable.
|
#. Connect the monitor and power supply cable.
|
||||||
|
|
||||||
#. Connect the target system to the LAN with the Ethernet cable or wifi.
|
#. Connect the target system to the LAN with the Ethernet cable.
|
||||||
|
|
||||||
Example of a target system with cables connected:
|
Example of a target system with cables connected:
|
||||||
|
|
||||||
.. image:: ./images/gsg_asus_minipc64.png
|
.. image:: ./images/gsg_vecow.png
|
||||||
:align: center
|
:align: center
|
||||||
|
|
||||||
Install OS on the Target
|
Install OS on the Target
|
||||||
@ -248,10 +247,9 @@ Configure Target BIOS Settings
|
|||||||
#. Boot your target and enter the BIOS configuration editor.
|
#. Boot your target and enter the BIOS configuration editor.
|
||||||
|
|
||||||
Tip: When you are booting your target, you'll see an option (quickly) to
|
Tip: When you are booting your target, you'll see an option (quickly) to
|
||||||
enter the BIOS configuration editor, typically by pressing :kbd:`F2`
|
enter the BIOS configuration editor, typically by pressing :kbd:`F2` during
|
||||||
or :kbd:`DEL` during the boot and before the GRUB menu (or Ubuntu login
|
the boot and before the GRUB menu (or Ubuntu login screen) appears. If you
|
||||||
screen) appears. If you are not quick enough, you can still choose
|
are not quick enough, you can reboot the system to try again.
|
||||||
``UEFI settings`` in the GRUB menu or just reboot the system to try again.
|
|
||||||
|
|
||||||
#. Configure these BIOS settings:
|
#. Configure these BIOS settings:
|
||||||
|
|
||||||
@ -341,7 +339,7 @@ Generate a Scenario Configuration File and Launch Script
|
|||||||
********************************************************
|
********************************************************
|
||||||
|
|
||||||
In this step, you will download, install, and use the `ACRN Configurator
|
In this step, you will download, install, and use the `ACRN Configurator
|
||||||
<https://github.com/projectacrn/acrn-hypervisor/releases/download/v3.3/acrn-configurator-3.3.deb>`__
|
<https://github.com/projectacrn/acrn-hypervisor/releases/download/v3.2/acrn-configurator-3.2.deb>`__
|
||||||
to generate a scenario configuration file and launch script.
|
to generate a scenario configuration file and launch script.
|
||||||
|
|
||||||
A **scenario configuration file** is an XML file that holds the parameters of
|
A **scenario configuration file** is an XML file that holds the parameters of
|
||||||
@ -357,7 +355,7 @@ post-launched User VM. Each User VM has its own launch script.
|
|||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
cd ~/acrn-work
|
cd ~/acrn-work
|
||||||
wget https://github.com/projectacrn/acrn-hypervisor/releases/download/v3.3/acrn-configurator-3.3.deb -P /tmp
|
wget https://github.com/projectacrn/acrn-hypervisor/releases/download/v3.2/acrn-configurator-3.2.deb -P /tmp
|
||||||
|
|
||||||
If you already have a previous version of the acrn-configurator installed,
|
If you already have a previous version of the acrn-configurator installed,
|
||||||
you should first remove it:
|
you should first remove it:
|
||||||
@ -370,7 +368,7 @@ post-launched User VM. Each User VM has its own launch script.
|
|||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
sudo apt install -y /tmp/acrn-configurator-3.3.deb
|
sudo apt install -y /tmp/acrn-configurator-3.2.deb
|
||||||
|
|
||||||
#. Launch the ACRN Configurator:
|
#. Launch the ACRN Configurator:
|
||||||
|
|
||||||
@ -435,7 +433,7 @@ post-launched User VM. Each User VM has its own launch script.
|
|||||||
:class: drop-shadow
|
:class: drop-shadow
|
||||||
|
|
||||||
The Configurator does consistency and validation checks when you load or save
|
The Configurator does consistency and validation checks when you load or save
|
||||||
a scenario. Notice the Hypervisor and VM1 tabs both have an error icon,
|
a scenario. Notice the Hypervisor and VM1 tabs both have an error icon,
|
||||||
meaning there are issues with configuration options in two areas. Since the
|
meaning there are issues with configuration options in two areas. Since the
|
||||||
Hypervisor tab is currently highlighted, we're seeing an issue we can resolve
|
Hypervisor tab is currently highlighted, we're seeing an issue we can resolve
|
||||||
on the Hypervisor settings. Once we resolve all the errors and save the
|
on the Hypervisor settings. Once we resolve all the errors and save the
|
||||||
@ -477,10 +475,11 @@ post-launched User VM. Each User VM has its own launch script.
|
|||||||
resolve the missing physical CPU affinity assignment error.)
|
resolve the missing physical CPU affinity assignment error.)
|
||||||
|
|
||||||
#. For **Virtio console device**, click **+** to add a device and keep the
|
#. For **Virtio console device**, click **+** to add a device and keep the
|
||||||
default options.
|
default options. This parameter specifies the console that you will use to
|
||||||
|
log in to the User VM later in this guide.
|
||||||
|
|
||||||
#. For **Virtio block device**, click **+** and enter
|
#. For **Virtio block device**, click **+** and enter
|
||||||
``/home/acrn/acrn-work/ubuntu-22.04.4-desktop-amd64.iso``. This parameter
|
``/home/acrn/acrn-work/ubuntu-22.04.2-desktop-amd64.iso``. This parameter
|
||||||
specifies the VM's OS image and its location on the target system. Later
|
specifies the VM's OS image and its location on the target system. Later
|
||||||
in this guide, you will save the ISO file to that directory. (If you used
|
in this guide, you will save the ISO file to that directory. (If you used
|
||||||
a different username when installing Ubuntu on the target system, here's
|
a different username when installing Ubuntu on the target system, here's
|
||||||
@ -490,7 +489,7 @@ post-launched User VM. Each User VM has its own launch script.
|
|||||||
:align: center
|
:align: center
|
||||||
:class: drop-shadow
|
:class: drop-shadow
|
||||||
|
|
||||||
.. image:: images/configurator_postvm02.png
|
.. image:: images/configurator-postvm02.png
|
||||||
:align: center
|
:align: center
|
||||||
:class: drop-shadow
|
:class: drop-shadow
|
||||||
|
|
||||||
@ -573,9 +572,10 @@ Build ACRN
|
|||||||
|
|
||||||
cd ..
|
cd ..
|
||||||
ls *acrn-service-vm*.deb
|
ls *acrn-service-vm*.deb
|
||||||
linux-headers-6.1.80-acrn-service-vm_6.1.80-acrn-service-vm-1_amd64.deb
|
linux-headers-5.15.71-acrn-service-vm_5.15.71-acrn-service-vm-1_amd64.deb
|
||||||
linux-image-6.1.80-acrn-service-vm_6.1.80-acrn-service-vm-1_amd64.deb
|
linux-image-5.15.71-acrn-service-vm_5.15.71-acrn-service-vm-1_amd64.deb
|
||||||
linux-libc-dev_6.1.80-acrn-service-vm-1_amd64.deb
|
linux-image-5.15.71-acrn-service-vm-dbg_5.15.71-acrn-service-vm-1_amd64.deb
|
||||||
|
linux-libc-dev_5.15.71-acrn-service-vm-1_amd64.deb
|
||||||
|
|
||||||
#. Use the ``scp`` command to copy files from your development computer to the
|
#. Use the ``scp`` command to copy files from your development computer to the
|
||||||
target system. Replace ``10.0.0.200`` with the target system's IP address
|
target system. Replace ``10.0.0.200`` with the target system's IP address
|
||||||
@ -617,13 +617,13 @@ Install ACRN
|
|||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
sudo reboot
|
reboot
|
||||||
|
|
||||||
The target system will reboot into the ACRN hypervisor and
|
The target system will reboot into the ACRN hypervisor and
|
||||||
start the Ubuntu Service VM.
|
start the Ubuntu Service VM.
|
||||||
|
|
||||||
#. Confirm that you see the GRUB menu with "Ubuntu with ACRN hypervisor, with Linux 6.1.80-acrn-service-vm (ACRN 3.3)"
|
#. Confirm that you see the GRUB menu with "Ubuntu-ACRN Board Inspector, with 5.15.0-56-generic" entry.
|
||||||
entry. Select it and proceed to booting ACRN. (It may be auto-selected, in which case it
|
Select it and proceed to booting ACRN. (It may be auto-selected, in which case it
|
||||||
will boot with this option automatically in 5 seconds.)
|
will boot with this option automatically in 5 seconds.)
|
||||||
|
|
||||||
Example grub menu shown as below:
|
Example grub menu shown as below:
|
||||||
@ -634,12 +634,10 @@ Install ACRN
|
|||||||
────────────────────────────────────────────────────────────────────────────────
|
────────────────────────────────────────────────────────────────────────────────
|
||||||
Ubuntu
|
Ubuntu
|
||||||
Advanced options for Ubuntu
|
Advanced options for Ubuntu
|
||||||
Ubuntu-ACRN Board Inspector, with Linux 6.5.0-18-generic
|
Ubuntu-ACRN Board Inspector, with Linux 5.15.71-acrn-service-vm
|
||||||
Ubuntu-ACRN Board Inspector, with Linux 6.1.80-acrn-service-vm
|
*Ubuntu-ACRN Board Inspector, with Linux 5.15.0-56-generic
|
||||||
Memory test (memtest86+x64.efi)
|
Ubuntu with ACRN hypervisor, with Linux 5.15.71-acrn-service-vm (ACRN 3.2)
|
||||||
Memory test (memtest86+x64.efi, serial console)
|
Ubuntu with ACRN hypervisor, with Linux 5.15.0-56-generic (ACRN 3.2)
|
||||||
Ubuntu with ACRN hypervisor, with Linux 6.5.0-18-generic (ACRN 3.3)
|
|
||||||
*Ubuntu with ACRN hypervisor, with Linux 6.1.80-acrn-service-vm (ACRN 3.3)
|
|
||||||
UEFI Firmware Settings
|
UEFI Firmware Settings
|
||||||
|
|
||||||
.. _gsg-run-acrn:
|
.. _gsg-run-acrn:
|
||||||
@ -678,7 +676,7 @@ The ACRN hypervisor boots the Ubuntu Service VM automatically.
|
|||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
sudo cp /usr/share/doc/acrnd/examples/* /etc/systemd/network
|
cp /usr/share/doc/acrnd/examples/* /etc/systemd/network
|
||||||
sudo systemctl enable --now systemd-networkd
|
sudo systemctl enable --now systemd-networkd
|
||||||
|
|
||||||
.. _gsg-user-vm:
|
.. _gsg-user-vm:
|
||||||
@ -690,7 +688,7 @@ Launch the User VM
|
|||||||
|
|
||||||
#. On the target system, use the web browser to visit the `official Ubuntu website <https://releases.ubuntu.com/jammy/>`__ and
|
#. On the target system, use the web browser to visit the `official Ubuntu website <https://releases.ubuntu.com/jammy/>`__ and
|
||||||
get the Ubuntu Desktop 22.04 LTS ISO image
|
get the Ubuntu Desktop 22.04 LTS ISO image
|
||||||
``ubuntu-22.04.4-desktop-amd64.iso`` for the User VM. (The same image you
|
``ubuntu-22.04.2-desktop-amd64.iso`` for the User VM. (The same image you
|
||||||
specified earlier in the ACRN Configurator UI.) Alternatively, instead of
|
specified earlier in the ACRN Configurator UI.) Alternatively, instead of
|
||||||
downloading it again, you could use ``scp`` to copy the ISO
|
downloading it again, you could use ``scp`` to copy the ISO
|
||||||
image file from the development system to the ``~/acrn-work`` directory on the target system.
|
image file from the development system to the ``~/acrn-work`` directory on the target system.
|
||||||
@ -702,7 +700,7 @@ Launch the User VM
|
|||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
cp ~/Downloads/ubuntu-22.04.4-desktop-amd64.iso ~/acrn-work
|
cp ~/Downloads/ubuntu-22.04.2-desktop-amd64.iso ~/acrn-work
|
||||||
|
|
||||||
#. Launch the User VM:
|
#. Launch the User VM:
|
||||||
|
|
||||||
@ -712,12 +710,23 @@ Launch the User VM
|
|||||||
sudo ~/acrn-work/launch_user_vm_id1.sh
|
sudo ~/acrn-work/launch_user_vm_id1.sh
|
||||||
|
|
||||||
#. It may take about a minute for the User VM to boot and start running the
|
#. It may take about a minute for the User VM to boot and start running the
|
||||||
Ubuntu image. You will see a lot of output, then the console of the User VM
|
Ubuntu image. You will see a lot of output, then the console of the User VM
|
||||||
will appear as follows:
|
will appear as follows:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
Welcome to Ubuntu 22.04.4 LTS (GNU/Linux 6.5.0-18-generic x86_64)
|
Ubuntu 22.04.2 LTS ubuntu hvc0
|
||||||
|
|
||||||
|
ubuntu login:
|
||||||
|
|
||||||
|
#. Log in to the User VM. For the Ubuntu 22.04 ISO, the user is ``ubuntu``, and
|
||||||
|
there's no password.
|
||||||
|
|
||||||
|
#. Confirm that you see output similar to this example:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
Welcome to Ubuntu 22.04.2 LTS (GNU/Linux 5.19.0-32-generic x86_64)
|
||||||
|
|
||||||
* Documentation: https://help.ubuntu.com
|
* Documentation: https://help.ubuntu.com
|
||||||
* Management: https://landscape.canonical.com
|
* Management: https://landscape.canonical.com
|
||||||
@ -747,20 +756,20 @@ Launch the User VM
|
|||||||
ubuntu@ubuntu:~$
|
ubuntu@ubuntu:~$
|
||||||
|
|
||||||
#. This User VM and the Service VM are running different Ubuntu images. Use this
|
#. This User VM and the Service VM are running different Ubuntu images. Use this
|
||||||
command to see that the User VM is running the downloaded Ubuntu image:
|
command to see that the User VM is running the downloaded Ubuntu ISO image:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
acrn@ubuntu:~$ uname -r
|
ubuntu@ubuntu:~$ uname -r
|
||||||
6.5.0-18-generic
|
5.19.0-32-generic
|
||||||
|
|
||||||
Then open a new terminal window and use the command to see that the Service
|
Then open a new terminal window and use the command to see that the Service
|
||||||
VM is running the ``acrn-kernel`` Service VM image:
|
VM is running the ``acrn-kernel`` Service VM image:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
acrn@asus-MINIPC-PN64:~$ uname -r
|
acrn@vecow:~$ uname -r
|
||||||
6.1.80-acrn-service-vm
|
5.15.71-acrn-service-vm
|
||||||
|
|
||||||
The User VM has launched successfully. You have completed this ACRN setup.
|
The User VM has launched successfully. You have completed this ACRN setup.
|
||||||
|
|
||||||
|
BIN
doc/getting-started/images/configurator-postvm02.png
Normal file
After Width: | Height: | Size: 54 KiB |
Before Width: | Height: | Size: 103 KiB |
Before Width: | Height: | Size: 535 KiB |
@ -117,7 +117,7 @@ As a normal (e.g., **acrn**) user, follow these steps:
|
|||||||
|
|
||||||
cd ~/acrn-work/acrn-hypervisor
|
cd ~/acrn-work/acrn-hypervisor
|
||||||
git fetch --all
|
git fetch --all
|
||||||
git checkout release_3.3
|
git checkout release_3.2
|
||||||
|
|
||||||
#. Build the ACRN sample application source code::
|
#. Build the ACRN sample application source code::
|
||||||
|
|
||||||
@ -522,7 +522,6 @@ Install and Run ACRN on the Target System
|
|||||||
|
|
||||||
and then the ``histapp.py`` application::
|
and then the ``histapp.py`` application::
|
||||||
|
|
||||||
pip install "numpy<2"
|
|
||||||
sudo python3 /root/histapp.py
|
sudo python3 /root/histapp.py
|
||||||
|
|
||||||
At this point, the HMI_VM is running and we've started the HMI parts of
|
At this point, the HMI_VM is running and we've started the HMI parts of
|
||||||
|
@ -78,7 +78,6 @@ license.
|
|||||||
contribute
|
contribute
|
||||||
release_notes/index
|
release_notes/index
|
||||||
asa
|
asa
|
||||||
projects/index
|
|
||||||
glossary
|
glossary
|
||||||
genindex
|
genindex
|
||||||
|
|
||||||
|
@ -1,13 +0,0 @@
|
|||||||
.. _projects:
|
|
||||||
|
|
||||||
Projects
|
|
||||||
########
|
|
||||||
|
|
||||||
Here is documentation for projects that build on the initial and continuing work
|
|
||||||
from the ACRN development team at Intel.
|
|
||||||
|
|
||||||
.. toctree::
|
|
||||||
:maxdepth: 1
|
|
||||||
|
|
||||||
multi-arch-support
|
|
||||||
|
|
@ -1,38 +0,0 @@
|
|||||||
.. _multi-arch-support:
|
|
||||||
|
|
||||||
Hypervisor Multi-Architecture and RISC-V Support
|
|
||||||
################################################
|
|
||||||
|
|
||||||
.. note:: This is a preliminary draft of a planned and as yet unreleased effort
|
|
||||||
to port the ACRN Hypervisor to non-Intel architectures.
|
|
||||||
|
|
||||||
From its first release in July 2018, the ACRN Hypervisor was designed for and
|
|
||||||
targeted to Intel platforms and relied on Intel Virtualization Technology (Intel
|
|
||||||
VT). From that base, we're expanding support to enable the ACRN hypervisor to
|
|
||||||
RISC-V64 architecture with a Hypervisor Extension.
|
|
||||||
|
|
||||||
RISC-V Support
|
|
||||||
**************
|
|
||||||
|
|
||||||
Adding multi-architecture support begins by refining the current architecture
|
|
||||||
abstraction layer and defining architecture-neutral APIs covering the management
|
|
||||||
of cores, caches, memory, interrupts, timers, and hardware virtualization
|
|
||||||
facilities. Then an implementation of those APIs for RISC-V will be introduced.
|
|
||||||
|
|
||||||
Based on its wide availability and flexibility, QEMU is the first RISC-V
|
|
||||||
(virtual) platform this project targets. Real platforms may be selected later
|
|
||||||
based on business and community interests.
|
|
||||||
|
|
||||||
Current State
|
|
||||||
=============
|
|
||||||
|
|
||||||
This project is currently under development and is not yet ready for production.
|
|
||||||
Once this support is implemented and has sufficient quality, this port will
|
|
||||||
become a part of the upstream ACRN project and we'll continue development there
|
|
||||||
and encourage contributions by the ACRN community.
|
|
||||||
|
|
||||||
License
|
|
||||||
=======
|
|
||||||
|
|
||||||
This project will be released under the BSD-3-Clause license, the same as the
|
|
||||||
rest of project ACRN.
|
|
@ -74,52 +74,52 @@ level includes the activities described in the lower levels.
|
|||||||
|
|
||||||
.. # Note For easier editing, I'm using unicode non-printing spaces in this table to help force the width of the first two columns to help prevent wrapping (using isn't compact enough)
|
.. # Note For easier editing, I'm using unicode non-printing spaces in this table to help force the width of the first two columns to help prevent wrapping (using isn't compact enough)
|
||||||
|
|
||||||
+------------------------+---------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
+------------------------+---------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||||
| | | .. rst-class:: |
|
| | | .. rst-class:: |
|
||||||
| | | centered |
|
| | | centered |
|
||||||
| | | |
|
| | | |
|
||||||
| | | ACRN Version |
|
| | | ACRN Version |
|
||||||
| | +-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+
|
| | +-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+
|
||||||
| Intel Processor Family | Tested Products | .. rst-class:: | .. rst-class:: | .. rst-class:: | .. rst-class:: | .. rst-class:: | .. rst-class:: | .. rst-class:: | .. rst-class:: | .. rst-class:: | .. rst-class:: |
|
| Intel Processor Family | Tested Products | .. rst-class:: | .. rst-class:: | .. rst-class:: | .. rst-class:: | .. rst-class:: | .. rst-class:: | .. rst-class:: | .. rst-class:: | .. rst-class:: |
|
||||||
| Code Name | | centered | centered | centered | centered | centered | centered | centered | centered | centered | centered |
|
| Code Name | | centered | centered | centered | centered | centered | centered | centered | centered | centered |
|
||||||
| | | | | | | | | | | | |
|
| | | | | | | | | | | |
|
||||||
| | | v1.0 | v1.6.1 | v2.0 | v2.5 | v2.6 | v2.7 | v3.0 | v3.1 | v3.2 | v3.3 |
|
| | | v1.0 | v1.6.1 | v2.0 | v2.5 | v2.6 | v2.7 | v3.0 | v3.1 | v3.2 |
|
||||||
+========================+=================================+===================+===================+===================+===================+===================+===================+===================+===================+===================+===================+
|
+========================+=================================+===================+===================+===================+===================+===================+===================+===================+===================+===================+
|
||||||
| Raptor Lake | `ASUS PN64-E1`_ | | .. rst-class:: | .. rst-class:: |
|
| Raptor Lake | `ASUS PN64-E1`_ | | .. rst-class:: |
|
||||||
| | | | centered | centered |
|
| | | | centered |
|
||||||
| | | | | |
|
| | | | |
|
||||||
| | | | Community | Maintenance |
|
| | | | Community |
|
||||||
+------------------------+---------------------------------+-----------------------------------------------------------------------------------------------------------------------+-------------------+-------------------+-------------------+-------------------+
|
+------------------------+---------------------------------+-----------------------------------------------------------------------------------------------------------------------+-------------------+-------------------+-------------------+
|
||||||
| Alder Lake | | `ASRock iEPF-9010S-EY4`_, | | .. rst-class:: | .. rst-class:: |
|
| Alder Lake | | `ASRock iEPF-9010S-EY4`_, | | .. rst-class:: | .. rst-class:: |
|
||||||
| | | `ASRock iEP-9010E`_ | | centered | centered |
|
| | | `ASRock iEP-9010E`_ | | centered | centered |
|
||||||
| | | | | |
|
| | | | | |
|
||||||
| | | | Release | Community |
|
| | | | Release | Community |
|
||||||
+------------------------+---------------------------------+-----------------------------------------------------------------------------------------------------------------------+-------------------+---------------------------------------+-------------------+
|
+------------------------+---------------------------------+-----------------------------------------------------------------------------------------------------------------------+-------------------+---------------------------------------+
|
||||||
| Tiger Lake | `Vecow SPC-7100`_ | | .. rst-class:: | .. rst-class:: |
|
| Tiger Lake | `Vecow SPC-7100`_ | | .. rst-class:: |
|
||||||
| | | | centered | centered |
|
| | | | centered |
|
||||||
| | | | | |
|
| | | | |
|
||||||
| | | | Maintenance | Community |
|
| | | | Maintenance |
|
||||||
+------------------------+---------------------------------+-----------------------------------------------------------+-------------------+---------------------------------------+-----------------------------------------------------------+-------------------+
|
+------------------------+---------------------------------+-----------------------------------------------------------+-------------------+---------------------------------------+-----------------------------------------------------------+
|
||||||
| Tiger Lake | `NUC11TNHi5`_ | | .. rst-class:: | .. rst-class:: | .. rst-class:: |
|
| Tiger Lake | `NUC11TNHi5`_ | | .. rst-class:: | .. rst-class:: | .. rst-class:: |
|
||||||
| | | | centered | centered | centered |
|
| | | | centered | centered | centered |
|
||||||
| | | | | | |
|
| | | | | | |
|
||||||
| | | | Release | Maintenance | Community |
|
| | | | Release | Maintenance | Community |
|
||||||
+------------------------+---------------------------------+---------------------------------------+-------------------+-------------------+-------------------+-------------------+-------------------------------------------------------------------------------+
|
+------------------------+---------------------------------+---------------------------------------+-------------------+-------------------+-------------------+-------------------+-----------------------------------------------------------+
|
||||||
| Whiskey Lake | `WHL-IPC-I5`_ | | .. rst-class:: | .. rst-class:: | .. rst-class:: |
|
| Whiskey Lake | `WHL-IPC-I5`_ | | .. rst-class:: | .. rst-class:: | .. rst-class:: |
|
||||||
| | | | centered | centered | centered |
|
| | | | centered | centered | centered |
|
||||||
| | | | | | |
|
| | | | | | |
|
||||||
| | | | Release | Maintenance | Community |
|
| | | | Release | Maintenance | Community |
|
||||||
+------------------------+---------------------------------+-------------------+-------------------+-------------------+-------------------+-------------------+---------------------------------------------------------------------------------------------------+
|
+------------------------+---------------------------------+-------------------+-------------------+-------------------+-------------------+-------------------+-------------------------------------------------------------------------------+
|
||||||
| Kaby Lake | `NUC7i7DNHE`_ | | .. rst-class:: | .. rst-class:: | .. rst-class:: |
|
| Kaby Lake | `NUC7i7DNHE`_ | | .. rst-class:: | .. rst-class:: | .. rst-class:: |
|
||||||
| | | | centered | centered | centered |
|
| | | | centered | centered | centered |
|
||||||
| | | | | | |
|
| | | | | | |
|
||||||
| | | | Release | Maintenance | Community |
|
| | | | Release | Maintenance | Community |
|
||||||
+------------------------+---------------------------------+-------------------+-------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------------------+
|
+------------------------+---------------------------------+-------------------+-------------------+---------------------------------------+---------------------------------------------------------------------------------------------------+
|
||||||
| Apollo Lake | | `NUC6CAYH`_, | .. rst-class:: | .. rst-class:: | .. rst-class:: |
|
| Apollo Lake | | `NUC6CAYH`_, | .. rst-class:: | .. rst-class:: | .. rst-class:: |
|
||||||
| | | `UP2-N3350`_, | centered | centered | centered |
|
| | | `UP2-N3350`_, | centered | centered | centered |
|
||||||
| | | `UP2-N4200`_, | | | |
|
| | | `UP2-N4200`_, | | | |
|
||||||
| | | `UP2-x5-E3940`_ | Release | Maintenance | Community |
|
| | | `UP2-x5-E3940`_ | Release | Maintenance | Community |
|
||||||
+------------------------+---------------------------------+-------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
+------------------------+---------------------------------+-------------------+-------------------+-------------------------------------------------------------------------------------------------------------------------------------------+
|
||||||
|
|
||||||
* **Release**: New ACRN features are complete and tested for the listed product.
|
* **Release**: New ACRN features are complete and tested for the listed product.
|
||||||
This product is recommended for this ACRN version. Support for older products
|
This product is recommended for this ACRN version. Support for older products
|
||||||
|
@ -79,7 +79,7 @@ to upgrade from prior ACRN versions:
|
|||||||
#. Manually edit your previous older scenario XML and launch XML files to make them
|
#. Manually edit your previous older scenario XML and launch XML files to make them
|
||||||
compatible with v3.2. This is not our recommended approach.
|
compatible with v3.2. This is not our recommended approach.
|
||||||
|
|
||||||
Here are some additional details about upgrading to the v3.2 release.
|
Here are some additional details about upgrading to the v3.22 release.
|
||||||
|
|
||||||
Generate New Board XML
|
Generate New Board XML
|
||||||
======================
|
======================
|
||||||
|
@ -1,199 +0,0 @@
|
|||||||
.. _release_notes_3.3:
|
|
||||||
|
|
||||||
ACRN v3.3 (Aug 2024)
|
|
||||||
####################
|
|
||||||
|
|
||||||
We are pleased to announce the release of the Project ACRN hypervisor
|
|
||||||
version 3.3.
|
|
||||||
|
|
||||||
ACRN is a flexible, lightweight reference hypervisor that is built with
|
|
||||||
real-time and safety-criticality in mind. It is optimized to streamline
|
|
||||||
embedded development through an open-source platform. See the
|
|
||||||
:ref:`introduction` introduction for more information.
|
|
||||||
|
|
||||||
All project ACRN source code is maintained in the
|
|
||||||
https://github.com/projectacrn/acrn-hypervisor repository and includes
|
|
||||||
folders for the ACRN hypervisor, the ACRN device model, tools, and
|
|
||||||
documentation. You can download this source code either as a zip or
|
|
||||||
tar.gz file (see the `ACRN v3.3 GitHub release page
|
|
||||||
<https://github.com/projectacrn/acrn-hypervisor/releases/tag/v3.3>`_) or
|
|
||||||
use Git ``clone`` and ``checkout`` commands::
|
|
||||||
|
|
||||||
git clone https://github.com/projectacrn/acrn-hypervisor
|
|
||||||
cd acrn-hypervisor
|
|
||||||
git checkout v3.3
|
|
||||||
|
|
||||||
The project's online technical documentation is also tagged to
|
|
||||||
correspond with a specific release: generated v3.3 documents can be
|
|
||||||
found at https://projectacrn.github.io/3.3/. Documentation for the
|
|
||||||
latest development branch is found at https://projectacrn.github.io/latest/.
|
|
||||||
|
|
||||||
ACRN v3.3 requires Ubuntu 22.04. Follow the instructions in the
|
|
||||||
:ref:`gsg` to get started with ACRN.
|
|
||||||
|
|
||||||
|
|
||||||
What's New in v3.3
|
|
||||||
******************
|
|
||||||
|
|
||||||
Generic Main VM Support
|
|
||||||
The v3.3 release now supports a new scenario called "Main VM". A "Service VM"
|
|
||||||
has two characteristics: (1) it is the default owner of physical resources
|
|
||||||
and (2) it can invoke VM management hypercalls. This release adds support
|
|
||||||
to configure a VM with only the physical resource ownership characteristic
|
|
||||||
and calling this a "Main VM". An example scenario is a pre-launched TEE
|
|
||||||
(Trusted Execution Environment) VM and a main REE (Rich Execution Environment)
|
|
||||||
VM.
|
|
||||||
|
|
||||||
Enabling Celadon as User VM
|
|
||||||
The acrn hypervisor now supports Celadon as User VM OS. Celadon is an
|
|
||||||
open-source project by Intel that provides a reference software stack for Android
|
|
||||||
on Intel architecture platforms, aiming to enable developers to optimize and test
|
|
||||||
Android on Intel-based devices.
|
|
||||||
|
|
||||||
Virtual Processor Performance Controls (vHWP)
|
|
||||||
The v3.3 release provides virtual HWP feature to a VM so that the VM can check
|
|
||||||
hardware performance ranges and adjust performance levels for performance or
|
|
||||||
power consumption.
|
|
||||||
|
|
||||||
Virtual Thermal Monitor and Software Controlled Clock Facilities
|
|
||||||
This release is able to virtualize processor thermal sensors and
|
|
||||||
controls for thermal management in VMs.
|
|
||||||
|
|
||||||
Hypervisor Runtime Core PM
|
|
||||||
The v3.3 release enhances processor power management in the hypervisor
|
|
||||||
at runtime to reduce power consumption when a core is idle.
|
|
||||||
|
|
||||||
Guest S3 Support
|
|
||||||
The v3.3 release supports suspend-to-RAM of post-launched VMs running
|
|
||||||
with OVMF.
|
|
||||||
|
|
||||||
System Performance Optimization - Virtio-blk Multi-Virtqueue Support
|
|
||||||
This release optimizes the virtio-block backend performance by allowing
|
|
||||||
multiple virtqueues between a frontend driver and the backend.
|
|
||||||
|
|
||||||
Notification of VM Events
|
|
||||||
Emit events (such as RTC changes and power cycles) to the monitor socket for
|
|
||||||
customizing further actions upon such events.
|
|
||||||
|
|
||||||
Enhance device model passthrough
|
|
||||||
This release support passthrough PCI device with legacy interrupt, some ACPI device like
|
|
||||||
GPIO controller, legacy UART.
|
|
||||||
|
|
||||||
ServiceVM supervisor role
|
|
||||||
User can config ServicVM as supervisor role in result it can manage the power status of
|
|
||||||
any guest VM.
|
|
||||||
|
|
||||||
|
|
||||||
Upgrading to v3.3 from Previous Releases
|
|
||||||
****************************************
|
|
||||||
|
|
||||||
We recommend you generate a new board XML for your target system with the v3.3
|
|
||||||
Board Inspector. You should also use the v3.3 Configurator to generate a new
|
|
||||||
scenario XML file and launch scripts. Scenario XML files and launch scripts
|
|
||||||
created by previous ACRN versions will not work with the v3.3 ACRN hypervisor
|
|
||||||
build process and could produce unexpected errors during the build.
|
|
||||||
|
|
||||||
Given the scope of changes for the v3.3 release, we have recommendations for how
|
|
||||||
to upgrade from prior ACRN versions:
|
|
||||||
|
|
||||||
1. Start fresh from our :ref:`gsg`. This is the best way to ensure you have a
|
|
||||||
v3.3-ready board XML file from your target system and generate a new scenario
|
|
||||||
XML and launch scripts from the new ACRN Configurator that are consistent and
|
|
||||||
will work for the v3.3 build system.
|
|
||||||
#. Use the :ref:`upgrader tool <upgrading_configuration>` to attempt upgrading
|
|
||||||
your configuration files that worked with prior releases. You'll need the
|
|
||||||
matched pair of scenario XML and launch XML files from a prior configuration,
|
|
||||||
and use them to create a new merged scenario XML file. See
|
|
||||||
:ref:`upgrading_configuration` for details.
|
|
||||||
#. Manually edit your previous older scenario XML and launch XML files to make them
|
|
||||||
compatible with v3.3. This is not our recommended approach.
|
|
||||||
|
|
||||||
Here are some additional details about upgrading to the v3.3 release.
|
|
||||||
|
|
||||||
Generate New Board XML
|
|
||||||
======================
|
|
||||||
|
|
||||||
Board XML files, generated by ACRN Board Inspector, contain board information
|
|
||||||
that is essential for building the ACRN hypervisor and setting up User VMs.
|
|
||||||
Compared to previous versions, ACRN v3.3 adds the following information to the
|
|
||||||
board XML file for supporting new features and fixes:
|
|
||||||
|
|
||||||
* Fix typo in PCIe PTM Capability name (See :acrn-pr:`8607`)
|
|
||||||
* Support motherboard which exposes MCFG1/MCFG2 instad of one ACPI MCFG
|
|
||||||
table. (See :acrn-pr:`8513`)
|
|
||||||
|
|
||||||
See the :ref:`board_inspector_tool` documentation for a complete list of steps
|
|
||||||
to install and run the tool.
|
|
||||||
|
|
||||||
Update Configuration Options
|
|
||||||
============================
|
|
||||||
|
|
||||||
As explained in this :ref:`upgrading_configuration` document, we do provide a
|
|
||||||
tool that can assist upgrading your existing pre-v3.3 scenario XML files in the
|
|
||||||
new merged v3.3 format. From there, you can use the v3.3 ACRN Configurator UI to
|
|
||||||
open the upgraded scenario file for viewing and further editing if the upgrader
|
|
||||||
tool lost meaningful data during the conversion.
|
|
||||||
|
|
||||||
The ACRN Configurator adds the following features and fixes to improve the user
|
|
||||||
experience:
|
|
||||||
|
|
||||||
* Support Main VM configuration. (See :acrn-pr:`8658`)
|
|
||||||
* Change Service VM to supervisor role. (See :acrn-pr:`8630`)
|
|
||||||
* Fix Vue3 version and update braces version (See :acrn-pr:`8627`)
|
|
||||||
* Fix openssl's vulnerability for tauri (See :acrn-pr:`8670`)
|
|
||||||
* Fix v-model used on props for Vue3 making strictly checking (See :acrn-pr:`8597`)
|
|
||||||
* Support vUART two options in configurator (See :acrn-pr:`8649`)
|
|
||||||
* Add checking cpu affinity and serial port for post-launch VM and hypervisor
|
|
||||||
while the user click to append a new VM. (See :acrn-pr:`8602`)
|
|
||||||
|
|
||||||
See the :ref:`scenario-config-options` documentation for details about all the
|
|
||||||
available configuration options in the new Configurator.
|
|
||||||
|
|
||||||
|
|
||||||
Document Updates
|
|
||||||
****************
|
|
||||||
|
|
||||||
Here are some of the more significant documentation updates from the v3.2 release:
|
|
||||||
|
|
||||||
.. rst-class:: rst-columns2
|
|
||||||
|
|
||||||
* :ref:`gsg`
|
|
||||||
* :ref:`using_celadon_as_user_vm`
|
|
||||||
* :ref:`release_notes_3.3`
|
|
||||||
* :ref:`hv-config`
|
|
||||||
* :ref:`acrn_configurator_tool`
|
|
||||||
* :ref:`GSG_sample_app`
|
|
||||||
* :ref:`acrnshell`
|
|
||||||
|
|
||||||
|
|
||||||
Fixed Issues Details
|
|
||||||
********************
|
|
||||||
|
|
||||||
.. comment example item
|
|
||||||
- :acrn-issue:`5626` - Host Call Trace once detected
|
|
||||||
|
|
||||||
- :acrn-issue:`8608` - hybrid vcpuid support
|
|
||||||
- :acrn-issue:`8590` - Hypervisor crashes after rebooting post-launched vm with passthrogh device for lots of times
|
|
||||||
- :acrn-issue:`8599` - Should clear pcpu_active_bitmap in start_pcpu
|
|
||||||
- :acrn-issue:`8590` - Hypervisor crashes after rebooting post-launched vm with passthrogh device for lots of times
|
|
||||||
- :acrn-issue:`8576` - Update-grub failed with GRUB 2.12
|
|
||||||
- :acrn-issue:`8518` - Initial boot log is lost in vm_console
|
|
||||||
- :acrn-issue:`8509` - S3 feature of Service VM OS is not available
|
|
||||||
- :acrn-issue:`8506` - Unable to passthrough USB device on bus 5 to guest
|
|
||||||
- :acrn-issue:`8500` - Add weight support for BVT scheduler
|
|
||||||
- :acrn-issue:`8495` - Service VM dead loops when booting up on platform with reserved memory as the last e820 entry
|
|
||||||
- :acrn-issue:`8492` - passthru multifunction device at function 0 will cause sub-function devices lost
|
|
||||||
- :acrn-issue:`8537` - Emulate COM3/4 in devicemodel
|
|
||||||
- :acrn-issue:`8491` - need to expose service vm config pointer
|
|
||||||
- :acrn-issue:`8579` - debian: fix broken grub config with grub 2.12
|
|
||||||
- :acrn-issue:`6631` - Fix Kata support with modify network configuration
|
|
||||||
|
|
||||||
Known Issues
|
|
||||||
************
|
|
||||||
|
|
||||||
- :acrn-issue:`6978` - openstack failed since ACRN v2.7
|
|
||||||
- :acrn-issue:`7827` - Pre_launched standard VMs cannot share CPU with Service VM in configurator
|
|
||||||
- :acrn-issue:`8471` - PTM enabling failure on i225 NIC
|
|
||||||
- :acrn-issue:`8472` - Failed to clear memory for post-launched standard VM
|
|
||||||
|
|
||||||
|
|
@ -444,18 +444,16 @@ how to build the Debian package from source code.
|
|||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
sudo apt install -y build-essential \
|
sudo apt install -y libwebkit2gtk-4.0-dev \
|
||||||
|
build-essential \
|
||||||
curl \
|
curl \
|
||||||
wget \
|
wget \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
libgtk-3-dev \
|
libgtk-3-dev \
|
||||||
|
libappindicator3-dev \
|
||||||
librsvg2-dev \
|
librsvg2-dev \
|
||||||
python3-venv
|
python3-venv
|
||||||
|
|
||||||
cd /tmp/
|
|
||||||
wget http://security.ubuntu.com/ubuntu/pool/main/w/webkit2gtk/libwebkit2gtk-4.0-37_2.44.2-0ubuntu0.22.04.1_amd64.deb http://mirrors.kernel.org/ubuntu/pool/main/i/icu/libicu70_70.1-2_amd64.deb http://security.ubuntu.com/ubuntu/pool/main/w/webkit2gtk/libjavascriptcoregtk-4.0-18_2.44.2-0ubuntu0.22.04.1_amd64.deb
|
|
||||||
sudo apt install ./libwebkit2gtk-4.0-37_2.44.2-0ubuntu0.22.04.1_amd64.deb ./libicu70_70.1-2_amd64.deb ./libjavascriptcoregtk-4.0-18_2.44.2-0ubuntu0.22.04.1_amd64.deb
|
|
||||||
|
|
||||||
#. Install Node.js (npm included) as follows:
|
#. Install Node.js (npm included) as follows:
|
||||||
|
|
||||||
a. We recommend using nvm to manage your Node.js runtime. It allows you to
|
a. We recommend using nvm to manage your Node.js runtime. It allows you to
|
||||||
|
@ -67,9 +67,6 @@ For the shared memory region:
|
|||||||
#. Enter a name for the shared memory region.
|
#. Enter a name for the shared memory region.
|
||||||
#. Select the source of the emulation, either Hypervisor or Device Model.
|
#. Select the source of the emulation, either Hypervisor or Device Model.
|
||||||
#. Select the size of the shared memory region.
|
#. Select the size of the shared memory region.
|
||||||
#. **Enter shared memory region ID, which can be in hexadecimal or decimal format**.
|
|
||||||
.. note::
|
|
||||||
Default value is 0 and IDs in 0x001 ~ 0xFFF are reserved, 0x1000 ~ 0xFFFF are available.
|
|
||||||
#. Select at least two VMs that can use the shared memory region.
|
#. Select at least two VMs that can use the shared memory region.
|
||||||
#. Enter a virtual Board:Device.Function (BDF) address for each VM or leave it
|
#. Enter a virtual Board:Device.Function (BDF) address for each VM or leave it
|
||||||
blank. If the field is blank, the tool provides an address when the
|
blank. If the field is blank, the tool provides an address when the
|
||||||
@ -91,4 +88,4 @@ Learn More
|
|||||||
ACRN supports multiple inter-VM communication methods. For a comparison, see
|
ACRN supports multiple inter-VM communication methods. For a comparison, see
|
||||||
:ref:`inter-vm_communication`.
|
:ref:`inter-vm_communication`.
|
||||||
|
|
||||||
For details on ACRN IVSHMEM high-level design, see :ref:`ivshmem-hld`.
|
For details on ACRN IVSHMEM high-level design, see :ref:`ivshmem-hld`.
|
Before Width: | Height: | Size: 4.7 MiB |
Before Width: | Height: | Size: 4.7 MiB |
Before Width: | Height: | Size: 65 KiB After Width: | Height: | Size: 50 KiB |
@ -168,7 +168,6 @@ when using one of these OSs:
|
|||||||
|
|
||||||
* :ref:`using_ubuntu_as_user_vm`
|
* :ref:`using_ubuntu_as_user_vm`
|
||||||
* :ref:`using_windows_as_user_vm`
|
* :ref:`using_windows_as_user_vm`
|
||||||
* :ref:`using_celadon_as_user_vm`
|
|
||||||
|
|
||||||
Real-time VM OS Considerations
|
Real-time VM OS Considerations
|
||||||
******************************
|
******************************
|
||||||
|
@ -1,197 +0,0 @@
|
|||||||
.. _using_celadon_as_user_vm:
|
|
||||||
|
|
||||||
Run Celadon as the User VM OS
|
|
||||||
#############################
|
|
||||||
|
|
||||||
Introduction to Celadon
|
|
||||||
***********************
|
|
||||||
`Celadon`_ ---- An open source Android software reference stack for Intel architecture.
|
|
||||||
|
|
||||||
This tutorial describes how to run Celadon Android as the User VM OS on ACRN hypervisor.
|
|
||||||
|
|
||||||
If you want to learn more about Celadon, refer to the
|
|
||||||
official `Celadon documentation <https://projectceladon.github.io>`__.
|
|
||||||
|
|
||||||
.. _Celadon:
|
|
||||||
http://github.com/projectceladon
|
|
||||||
|
|
||||||
|
|
||||||
Build Celadon Image from Source
|
|
||||||
*******************************
|
|
||||||
|
|
||||||
Before building the Celadon image, please make sure your development workstation
|
|
||||||
meets the following requirements: A 64-bit workstation running Ubuntu with **64GB memory** and
|
|
||||||
**350GB of free disk space**. If your workstation does not meet these requirements,
|
|
||||||
you may encouter unexpected errors.
|
|
||||||
|
|
||||||
Follow these instructions to build the Celadon images:
|
|
||||||
|
|
||||||
#. Install the repo tools:
|
|
||||||
|
|
||||||
.. code-block:: none
|
|
||||||
|
|
||||||
mkdir -p ~/bin
|
|
||||||
curl https://storage.googleapis.com/git-repo-downloads/repo > ~/bin/repo
|
|
||||||
chmod a+x ~/bin/repo
|
|
||||||
export PATH=~/bin:$PATH
|
|
||||||
|
|
||||||
#. Install the required building packages:
|
|
||||||
|
|
||||||
.. code-block:: none
|
|
||||||
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install openjdk-8-jdk git ccache automake \
|
|
||||||
lzop bison gperf build-essential zip curl \
|
|
||||||
zlib1g-dev g++-multilib python3-networkx \
|
|
||||||
libxml2-utils bzip2 libbz2-dev libbz2-1.0 \
|
|
||||||
libghc-bzlib-dev squashfs-tools pngcrush \
|
|
||||||
schedtool dpkg-dev liblz4-tool make optipng maven \
|
|
||||||
libssl-dev bc bsdmainutils gettext python3-mako \
|
|
||||||
libelf-dev sbsigntool dosfstools mtools efitools \
|
|
||||||
python3-pystache git-lfs python3 flex clang libncurses5 \
|
|
||||||
fakeroot ncurses-dev xz-utils python3-pip ninja-build \
|
|
||||||
cryptsetup-bin cutils cmake pkg-config xorriso mtools
|
|
||||||
sudo pip3 install mako==1.1.0 meson==0.60.0 dataclasses
|
|
||||||
sudo su
|
|
||||||
cd /usr/local/
|
|
||||||
wget https://github.com/KhronosGroup/glslang/releases/download/SDK-candidate-26-Jul-2020/glslang-master-linux-Release.zip && \
|
|
||||||
unzip glslang-master-linux-Release.zip bin/glslangValidator
|
|
||||||
|
|
||||||
#. Download the source code
|
|
||||||
|
|
||||||
.. code-block:: none
|
|
||||||
|
|
||||||
mkdir ~/civ
|
|
||||||
cd ~/civ
|
|
||||||
|
|
||||||
We choose Celadon Android 14 Base Releases `CIV_00.23.04.51_A14 <https://projectceladon.github.io/celadon-documentation/release-notes/base-releases-A14.html#civ-00-23-04-51-a14>`__.
|
|
||||||
Repo tool will download the entire source code into your local environments and it will cost several hours depending on your network.
|
|
||||||
|
|
||||||
.. code-block:: none
|
|
||||||
|
|
||||||
repo init -u https://github.com/projectceladon/manifest -b master -m stable-build/CIV_00.23.04.51_A14.xml
|
|
||||||
repo sync -c -q -j5
|
|
||||||
|
|
||||||
#. Disable Trusty:
|
|
||||||
|
|
||||||
Trusty is a mandatory component since Android Oreo Desert onwards. However, it's easier to boot Celadon as VM without Trusty feature.
|
|
||||||
So we recommend to **disable trusty**. To disable, set the 'trusty' and 'tpm' options to false in the mixins config file
|
|
||||||
``civ/device/intel/projectceladon/caas/mixins.spec`` as follows:
|
|
||||||
|
|
||||||
.. code-block:: none
|
|
||||||
|
|
||||||
[groups]
|
|
||||||
device-specific: celadon
|
|
||||||
treble: true
|
|
||||||
....
|
|
||||||
tpm: false
|
|
||||||
....
|
|
||||||
trusty: false
|
|
||||||
|
|
||||||
After modifing ``mixins.spec``, you must run the ``civ/device/intel/mixins/mixin-update`` script to apply the changes.
|
|
||||||
|
|
||||||
Enter the following commands to initialize your build variables and specifiy your celadon lunch target using ``lunch`` target:
|
|
||||||
|
|
||||||
.. code-block:: none
|
|
||||||
|
|
||||||
source build/envsetup.sh
|
|
||||||
lunch caas
|
|
||||||
|
|
||||||
Meanwhile, the following trusty related configs in **Android kernel** at
|
|
||||||
``device/intel/mixins/groups/kernel/gmin64/config-lts/linux-intel-lts2022/x86_64_defconfig``
|
|
||||||
should be disabled as:
|
|
||||||
|
|
||||||
.. code-block:: none
|
|
||||||
|
|
||||||
# CONFIG_TCG_TPM is not set
|
|
||||||
# CONFIG_HW_RANDOM_TPM is not set
|
|
||||||
# CONFIG_TRUSTY is not set
|
|
||||||
# CONFIG_TRUSTY_LOG is not set
|
|
||||||
# CONFIG_TRUSTY_VIRTIO is not set
|
|
||||||
# CONFIG_TRUSTY_VIRTIO_IPC is not set
|
|
||||||
# CONFIG_TRUSTY_X86_64 is not set
|
|
||||||
# CONFIG_TRUSTY_BACKUP_TIMER is not set
|
|
||||||
|
|
||||||
Run `mixin` command to apply this changes:
|
|
||||||
|
|
||||||
.. code-block:: none
|
|
||||||
|
|
||||||
cd ~/civ
|
|
||||||
mixin
|
|
||||||
|
|
||||||
#. Build Celadon flash image:
|
|
||||||
|
|
||||||
Then you are ready to build Celadon images. Build progress may cost several hours or even more depends on your building system:
|
|
||||||
|
|
||||||
.. code-block:: none
|
|
||||||
|
|
||||||
make flashfiles -j $(nproc)
|
|
||||||
|
|
||||||
|
|
||||||
#. Flash Celadon image into disk:
|
|
||||||
|
|
||||||
Caution: Please **remain only one hard disk** (the disk will be entirely removed and flashed) in your destination platform, otherwise
|
|
||||||
Celadon may flash into the wrong disk and cause data loss. There are two ways to do: i. Physically
|
|
||||||
remove the hard disk. ii. Disable the sata(or nvme) slot in the BIOS settings.
|
|
||||||
|
|
||||||
We test this VM in an ASUS MiniPC with two disk slots: one is a m.2 nvme slot and one is a sata slot. We run service OS
|
|
||||||
(Ubuntu) on a sata disk and run guest OS(Celadon Android) on a nvme disk.
|
|
||||||
|
|
||||||
Prepare an empty USB disk and plug it into your **development platform**, run ``lsblk`` command to find it. Assume it's ``/dev/sdc`` here.
|
|
||||||
|
|
||||||
.. code-block:: none
|
|
||||||
|
|
||||||
cd ~/civ/out/target/product/caas
|
|
||||||
unzip caas-flashfile-eng.dot.iso.zip
|
|
||||||
sudo dd if=~/civ/caas-flashfile-eng.dot.iso of=/dev/sdc status=progress
|
|
||||||
sudo eject /dev/sdc
|
|
||||||
|
|
||||||
Unplug the USB disk and plug it into your **destination platform**. Power on your destination platform and boot into this USB disk via BIOS settings. The flash progress
|
|
||||||
will require you press :kbd:`UP` or :kbd:`PgUp` to continue. When flash done, you can boot into Celadon Android.
|
|
||||||
|
|
||||||
#. ACRN Service VM Setup
|
|
||||||
|
|
||||||
Follow the steps in this :ref:`gsg` to set up ACRN based Ubuntu and launch the Service VM.
|
|
||||||
Modifiy the ACRN device model parameters in ``launch_user_vm_id1.sh`` as follows:
|
|
||||||
|
|
||||||
.. code-block:: none
|
|
||||||
|
|
||||||
dm_params=(
|
|
||||||
`add_cpus 8 9 16 17`
|
|
||||||
-m 8192M
|
|
||||||
--ovmf /usr/share/acrn/bios/OVMF.fd
|
|
||||||
`add_virtual_device 1:0 lpc`
|
|
||||||
`add_virtual_device 0:0 hostbridge`
|
|
||||||
`add_virtual_device 3 virtio-console @stdio:stdio_port`
|
|
||||||
`add_passthrough_device 2 0000:00:02.0`
|
|
||||||
`add_passthrough_device 4 0000:00:14.0`
|
|
||||||
`add_interrupt_storm_monitor 10000 10 1 100`
|
|
||||||
`add_passthrough_device 5 0000:01:00.0`
|
|
||||||
`add_logger_settings console=4 kmsg=3 disk=5`
|
|
||||||
VM1
|
|
||||||
)
|
|
||||||
|
|
||||||
#. Boot Celadon VM
|
|
||||||
|
|
||||||
Remotely connect to the target system via SSH and Boot Celadon VM via the launch script ``launch_user_vm_id1``:
|
|
||||||
|
|
||||||
.. code-block:: none
|
|
||||||
|
|
||||||
sudo chmod +x ./launch_user_vm_id1.sh
|
|
||||||
sudo ./launch_user_vm_id1.sh
|
|
||||||
|
|
||||||
Then the screen will temperatory go off. Wait for about one minute and the Android UI will appear as:
|
|
||||||
|
|
||||||
.. figure:: images/celadon_uservm_01.png
|
|
||||||
:align: center
|
|
||||||
:name: Android-screenlock
|
|
||||||
:class: drop-shadow
|
|
||||||
|
|
||||||
.. figure:: images/celadon_uservm_02.png
|
|
||||||
:align: center
|
|
||||||
:name: Android-desktop
|
|
||||||
:class: drop-shadow
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -167,9 +167,7 @@ vm_console
|
|||||||
|
|
||||||
The ``vm_console <vm_id>`` command switches the ACRN's console to become the
|
The ``vm_console <vm_id>`` command switches the ACRN's console to become the
|
||||||
VM's console.
|
VM's console.
|
||||||
Send a BREAK character to enter escaping mode and a character e to return to
|
Press :kbd:`Ctrl` + :kbd:`Alt` + :kbd:`Space` to return to the ACRN shell console.
|
||||||
the ACRN shell console. For details on how the hypervisor console works,
|
|
||||||
refer to :ref:`hv-console`.
|
|
||||||
|
|
||||||
vioapic
|
vioapic
|
||||||
=======
|
=======
|
||||||
|
@ -219,7 +219,6 @@ HW_C_SRCS += arch/x86/exception.c
|
|||||||
HW_C_SRCS += arch/x86/irq.c
|
HW_C_SRCS += arch/x86/irq.c
|
||||||
HW_C_SRCS += arch/x86/tsc.c
|
HW_C_SRCS += arch/x86/tsc.c
|
||||||
HW_C_SRCS += arch/x86/tsc_deadline_timer.c
|
HW_C_SRCS += arch/x86/tsc_deadline_timer.c
|
||||||
HW_C_SRCS += arch/x86/hw_thermal.c
|
|
||||||
HW_C_SRCS += arch/x86/vmx.c
|
HW_C_SRCS += arch/x86/vmx.c
|
||||||
HW_C_SRCS += arch/x86/cpu_state_tbl.c
|
HW_C_SRCS += arch/x86/cpu_state_tbl.c
|
||||||
HW_C_SRCS += arch/x86/pm.c
|
HW_C_SRCS += arch/x86/pm.c
|
||||||
@ -231,14 +230,12 @@ HW_C_SRCS += arch/x86/sgx.c
|
|||||||
HW_C_SRCS += common/ticks.c
|
HW_C_SRCS += common/ticks.c
|
||||||
HW_C_SRCS += common/delay.c
|
HW_C_SRCS += common/delay.c
|
||||||
HW_C_SRCS += common/timer.c
|
HW_C_SRCS += common/timer.c
|
||||||
HW_C_SRCS += common/thermal.c
|
|
||||||
HW_C_SRCS += common/irq.c
|
HW_C_SRCS += common/irq.c
|
||||||
HW_C_SRCS += common/softirq.c
|
HW_C_SRCS += common/softirq.c
|
||||||
HW_C_SRCS += common/schedule.c
|
HW_C_SRCS += common/schedule.c
|
||||||
HW_C_SRCS += common/event.c
|
HW_C_SRCS += common/event.c
|
||||||
HW_C_SRCS += common/efi_mmap.c
|
HW_C_SRCS += common/efi_mmap.c
|
||||||
HW_C_SRCS += common/sbuf.c
|
HW_C_SRCS += common/sbuf.c
|
||||||
HW_C_SRCS += common/vm_event.c
|
|
||||||
ifeq ($(CONFIG_SCHED_NOOP),y)
|
ifeq ($(CONFIG_SCHED_NOOP),y)
|
||||||
HW_C_SRCS += common/sched_noop.c
|
HW_C_SRCS += common/sched_noop.c
|
||||||
endif
|
endif
|
||||||
@ -318,7 +315,6 @@ VP_DM_C_SRCS += dm/vpci/vpci.c
|
|||||||
VP_DM_C_SRCS += dm/vpci/vhostbridge.c
|
VP_DM_C_SRCS += dm/vpci/vhostbridge.c
|
||||||
VP_DM_C_SRCS += dm/vpci/vroot_port.c
|
VP_DM_C_SRCS += dm/vpci/vroot_port.c
|
||||||
VP_DM_C_SRCS += dm/vpci/vpci_bridge.c
|
VP_DM_C_SRCS += dm/vpci/vpci_bridge.c
|
||||||
VP_DM_C_SRCS += dm/vpci/vpci_mf_dev.c
|
|
||||||
VP_DM_C_SRCS += dm/vpci/ivshmem.c
|
VP_DM_C_SRCS += dm/vpci/ivshmem.c
|
||||||
VP_DM_C_SRCS += dm/vpci/pci_pt.c
|
VP_DM_C_SRCS += dm/vpci/pci_pt.c
|
||||||
VP_DM_C_SRCS += dm/vpci/vmsi.c
|
VP_DM_C_SRCS += dm/vpci/vmsi.c
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
/*
|
/*
|
||||||
* @pre pdev != NULL;
|
* @pre pdev != NULL;
|
||||||
*/
|
*/
|
||||||
static bool allocate_to_prelaunched_vm(struct pci_pdev *pdev)
|
static bool is_allocated_to_prelaunched_vm(struct pci_pdev *pdev)
|
||||||
{
|
{
|
||||||
bool found = false;
|
bool found = false;
|
||||||
uint16_t vmid;
|
uint16_t vmid;
|
||||||
@ -52,46 +52,38 @@ static bool allocate_to_prelaunched_vm(struct pci_pdev *pdev)
|
|||||||
*/
|
*/
|
||||||
struct acrn_vm_pci_dev_config *init_one_dev_config(struct pci_pdev *pdev)
|
struct acrn_vm_pci_dev_config *init_one_dev_config(struct pci_pdev *pdev)
|
||||||
{
|
{
|
||||||
|
uint16_t vmid;
|
||||||
|
struct acrn_vm_config *vm_config;
|
||||||
struct acrn_vm_pci_dev_config *dev_config = NULL;
|
struct acrn_vm_pci_dev_config *dev_config = NULL;
|
||||||
bool is_allocated_to_prelaunched_vm = allocate_to_prelaunched_vm(pdev);
|
|
||||||
bool is_allocated_to_hv = is_hv_owned_pdev(pdev->bdf);
|
|
||||||
|
|
||||||
if (service_vm_config != NULL) {
|
if (!is_allocated_to_prelaunched_vm(pdev)) {
|
||||||
dev_config = &service_vm_config->pci_devs[service_vm_config->pci_dev_num];
|
for (vmid = 0U; vmid < CONFIG_MAX_VM_NUM; vmid++) {
|
||||||
|
vm_config = get_vm_config(vmid);
|
||||||
if (is_allocated_to_hv) {
|
if (vm_config->load_order != SERVICE_VM) {
|
||||||
/* Service VM need to emulate the type1 pdevs owned by HV */
|
continue;
|
||||||
dev_config->emu_type = PCI_DEV_TYPE_SERVICE_VM_EMUL;
|
|
||||||
if (is_bridge(pdev)) {
|
|
||||||
dev_config->vdev_ops = &vpci_bridge_ops;
|
|
||||||
} else if (is_host_bridge(pdev)) {
|
|
||||||
dev_config->vdev_ops = &vhostbridge_ops;
|
|
||||||
} else {
|
|
||||||
/* May have type0 device, E.g. debug pci uart */
|
|
||||||
dev_config = NULL;
|
|
||||||
}
|
}
|
||||||
} else if (is_allocated_to_prelaunched_vm) {
|
|
||||||
dev_config = NULL;
|
|
||||||
} else {
|
|
||||||
dev_config->emu_type = PCI_DEV_TYPE_PTDEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((is_allocated_to_hv || is_allocated_to_prelaunched_vm)
|
dev_config = &vm_config->pci_devs[vm_config->pci_dev_num];
|
||||||
&& (dev_config == NULL)
|
if (is_hv_owned_pdev(pdev->bdf)) {
|
||||||
&& is_pci_cfg_multifunction(pdev->hdr_type)
|
/* Service VM need to emulate the type1 pdevs owned by HV */
|
||||||
&& (pdev->bdf.bits.f == 0U))
|
dev_config->emu_type = PCI_DEV_TYPE_SERVICE_VM_EMUL;
|
||||||
{
|
if (is_bridge(pdev)) {
|
||||||
dev_config = &service_vm_config->pci_devs[service_vm_config->pci_dev_num];
|
dev_config->vdev_ops = &vpci_bridge_ops;
|
||||||
dev_config->emu_type = PCI_DEV_TYPE_DUMMY_MF_EMUL;
|
} else if (is_host_bridge(pdev)) {
|
||||||
dev_config->vdev_ops = &vpci_mf_dev_ops;
|
dev_config->vdev_ops = &vhostbridge_ops;
|
||||||
}
|
} else {
|
||||||
}
|
/* May have type0 device, E.g. debug pci uart */
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
dev_config->emu_type = PCI_DEV_TYPE_PTDEV;
|
||||||
|
}
|
||||||
|
|
||||||
if (dev_config != NULL) {
|
dev_config->vbdf.value = pdev->bdf.value;
|
||||||
dev_config->vbdf.value = pdev->bdf.value;
|
dev_config->pbdf.value = pdev->bdf.value;
|
||||||
dev_config->pbdf.value = pdev->bdf.value;
|
dev_config->pdev = pdev;
|
||||||
dev_config->pdev = pdev;
|
vm_config->pci_dev_num++;
|
||||||
service_vm_config->pci_dev_num++;
|
}
|
||||||
}
|
}
|
||||||
return dev_config;
|
return dev_config;
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,6 @@
|
|||||||
#include <asm/tsc.h>
|
#include <asm/tsc.h>
|
||||||
#include <ticks.h>
|
#include <ticks.h>
|
||||||
#include <delay.h>
|
#include <delay.h>
|
||||||
#include <thermal.h>
|
|
||||||
|
|
||||||
#define CPU_UP_TIMEOUT 100U /* millisecond */
|
#define CPU_UP_TIMEOUT 100U /* millisecond */
|
||||||
#define CPU_DOWN_TIMEOUT 100U /* millisecond */
|
#define CPU_DOWN_TIMEOUT 100U /* millisecond */
|
||||||
@ -269,7 +268,6 @@ void init_pcpu_post(uint16_t pcpu_id)
|
|||||||
init_interrupt(BSP_CPU_ID);
|
init_interrupt(BSP_CPU_ID);
|
||||||
|
|
||||||
timer_init();
|
timer_init();
|
||||||
thermal_init();
|
|
||||||
setup_notification();
|
setup_notification();
|
||||||
setup_pi_notification();
|
setup_pi_notification();
|
||||||
|
|
||||||
@ -311,7 +309,6 @@ void init_pcpu_post(uint16_t pcpu_id)
|
|||||||
init_interrupt(pcpu_id);
|
init_interrupt(pcpu_id);
|
||||||
|
|
||||||
timer_init();
|
timer_init();
|
||||||
thermal_init();
|
|
||||||
ptdev_init();
|
ptdev_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -465,25 +462,7 @@ void stop_pcpus(void)
|
|||||||
|
|
||||||
void cpu_do_idle(void)
|
void cpu_do_idle(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_KEEP_IRQ_DISABLED
|
|
||||||
asm_pause();
|
asm_pause();
|
||||||
#else
|
|
||||||
uint16_t pcpu_id = get_pcpu_id();
|
|
||||||
|
|
||||||
if (per_cpu(mode_to_idle, pcpu_id) == IDLE_MODE_HLT) {
|
|
||||||
asm_safe_hlt();
|
|
||||||
} else {
|
|
||||||
struct acrn_vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
|
|
||||||
|
|
||||||
if ((vcpu != NULL) && !is_lapic_pt_enabled(vcpu)) {
|
|
||||||
CPU_IRQ_ENABLE_ON_CONFIG();
|
|
||||||
}
|
|
||||||
asm_pause();
|
|
||||||
if ((vcpu != NULL) && !is_lapic_pt_enabled(vcpu)) {
|
|
||||||
CPU_IRQ_DISABLE_ON_CONFIG();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -119,6 +119,43 @@ static void insert_e820_entry(uint32_t index, uint64_t addr, uint64_t length, ui
|
|||||||
hv_e820[index].type = type;
|
hv_e820[index].type = type;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint64_t e820_alloc_region(uint64_t addr, uint64_t size)
|
||||||
|
{
|
||||||
|
uint32_t i;
|
||||||
|
uint64_t entry_start;
|
||||||
|
uint64_t entry_end;
|
||||||
|
uint64_t start_pa = round_page_down(addr);
|
||||||
|
uint64_t end_pa = round_page_up(addr + size);
|
||||||
|
struct e820_entry *entry;
|
||||||
|
|
||||||
|
for (i = 0U; i < hv_e820_entries_nr; i++) {
|
||||||
|
entry = &hv_e820[i];
|
||||||
|
entry_start = entry->baseaddr;
|
||||||
|
entry_end = entry->baseaddr + entry->length;
|
||||||
|
|
||||||
|
/* No need handle in these cases*/
|
||||||
|
if ((entry->type != E820_TYPE_RAM) || (entry_end <= start_pa) || (entry_start >= end_pa)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((entry_start <= start_pa) && (entry_end >= end_pa)) {
|
||||||
|
entry->length = start_pa - entry_start;
|
||||||
|
/*
|
||||||
|
* .......|start_pa... ....................End_pa|.....
|
||||||
|
* |entry_start..............................entry_end|
|
||||||
|
*/
|
||||||
|
if (end_pa < entry_end) {
|
||||||
|
insert_e820_entry(i + 1, end_pa, entry_end - end_pa, E820_TYPE_RAM);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pr_err("This region not in one entry!");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
static void init_e820_from_efi_mmap(void)
|
static void init_e820_from_efi_mmap(void)
|
||||||
{
|
{
|
||||||
uint32_t i, e820_idx = 0U;
|
uint32_t i, e820_idx = 0U;
|
||||||
@ -234,57 +271,14 @@ static void calculate_e820_ram_size(void)
|
|||||||
|
|
||||||
static void alloc_mods_memory(void)
|
static void alloc_mods_memory(void)
|
||||||
{
|
{
|
||||||
uint32_t mod_index, e820_index, target_index;
|
uint32_t i;
|
||||||
uint64_t mod_start, mod_end;
|
int64_t mod_start = 0UL;
|
||||||
uint64_t entry_start, entry_end;
|
|
||||||
struct acrn_boot_info *abi = get_acrn_boot_info();
|
struct acrn_boot_info *abi = get_acrn_boot_info();
|
||||||
|
|
||||||
/* 1st pass: remove the exact region */
|
for (i = 0; i < abi->mods_count; i++) {
|
||||||
for (mod_index = 0; mod_index < abi->mods_count; mod_index++) {
|
mod_start = hva2hpa(abi->mods[i].start);
|
||||||
mod_start = hva2hpa(abi->mods[mod_index].start);
|
e820_alloc_region(mod_start, abi->mods[i].size);
|
||||||
mod_end = hva2hpa(abi->mods[mod_index].start) + abi->mods[mod_index].size;
|
|
||||||
|
|
||||||
for (e820_index = 0; e820_index < hv_e820_entries_nr; e820_index++) {
|
|
||||||
entry_start = hv_e820[e820_index].baseaddr;
|
|
||||||
entry_end = hv_e820[e820_index].baseaddr + hv_e820[e820_index].length;
|
|
||||||
|
|
||||||
/* No need handle in these cases*/
|
|
||||||
if ((hv_e820[e820_index].type != E820_TYPE_RAM) || (entry_end <= mod_start) || (entry_start >= mod_end)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((entry_start <= mod_start) && (entry_end >= mod_end)) {
|
|
||||||
hv_e820[e820_index].length = mod_start - entry_start;
|
|
||||||
|
|
||||||
if (mod_end < entry_end) {
|
|
||||||
/*
|
|
||||||
* .......|start_pa... ....................end_pa|.....
|
|
||||||
* |entry_start..............................entry_end|
|
|
||||||
*/
|
|
||||||
insert_e820_entry(e820_index + 1, mod_end, entry_end - mod_end, E820_TYPE_RAM);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
panic("%s: region 0x%016x-0x%016x crosses multiple e820 entries, check your bootloader!",
|
|
||||||
__func__, entry_start, entry_end);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 2nd pass: shrink the entries to page boundary */
|
|
||||||
target_index = 0;
|
|
||||||
for (e820_index = 0; e820_index < hv_e820_entries_nr; e820_index++) {
|
|
||||||
entry_start = round_page_up(hv_e820[e820_index].baseaddr);
|
|
||||||
entry_end = round_page_down(hv_e820[e820_index].baseaddr + hv_e820[e820_index].length);
|
|
||||||
|
|
||||||
if (entry_start < entry_end) {
|
|
||||||
hv_e820[target_index].baseaddr = entry_start;
|
|
||||||
hv_e820[target_index].length = entry_end - entry_start;
|
|
||||||
hv_e820[target_index].type = hv_e820[e820_index].type;
|
|
||||||
target_index++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
memset(&hv_e820[target_index], 0, (hv_e820_entries_nr - target_index) * sizeof(struct e820_entry));
|
|
||||||
hv_e820_entries_nr = target_index;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -400,9 +400,15 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint3
|
|||||||
pr_err("INTX re-add vpin %d", virt_gsi);
|
pr_err("INTX re-add vpin %d", virt_gsi);
|
||||||
}
|
}
|
||||||
} else if (entry->vm != vm) {
|
} else if (entry->vm != vm) {
|
||||||
pr_err("INTX gsi%d already in vm%d with vgsi%d, not able to add into vm%d with vgsi%d",
|
if (is_service_vm(entry->vm)) {
|
||||||
phys_gsi, entry->vm->vm_id, entry->virt_sid.intx_id.gsi, vm->vm_id, virt_gsi);
|
entry->vm = vm;
|
||||||
entry = NULL;
|
entry->virt_sid.value = virt_sid.value;
|
||||||
|
entry->polarity = 0U;
|
||||||
|
} else {
|
||||||
|
pr_err("INTX gsi%d already in vm%d with vgsi%d, not able to add into vm%d with vgsi%d",
|
||||||
|
phys_gsi, entry->vm->vm_id, entry->virt_sid.intx_id.gsi, vm->vm_id, virt_gsi);
|
||||||
|
entry = NULL;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
/* The mapping has already been added to the VM. No action
|
/* The mapping has already been added to the VM. No action
|
||||||
* required.
|
* required.
|
||||||
@ -422,24 +428,15 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint3
|
|||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* deactivate & remove mapping entry of vpin for vm */
|
/* deactive & remove mapping entry of vpin for vm */
|
||||||
static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t gsi, enum intx_ctlr gsi_ctlr, bool is_phy_gsi)
|
static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, enum intx_ctlr vgsi_ctlr)
|
||||||
{
|
{
|
||||||
uint32_t phys_irq;
|
uint32_t phys_irq;
|
||||||
struct ptirq_remapping_info *entry;
|
struct ptirq_remapping_info *entry;
|
||||||
struct intr_source intr_src;
|
struct intr_source intr_src;
|
||||||
|
DEFINE_INTX_SID(virt_sid, virt_gsi, vgsi_ctlr);
|
||||||
|
|
||||||
if (is_phy_gsi) {
|
entry = find_ptirq_entry(PTDEV_INTR_INTX, &virt_sid, vm);
|
||||||
DEFINE_INTX_SID(sid, gsi, INTX_CTLR_IOAPIC);
|
|
||||||
entry = find_ptirq_entry(PTDEV_INTR_INTX, &sid, NULL);
|
|
||||||
if ((entry != NULL) && (entry->vm != vm)) {
|
|
||||||
entry = NULL;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
DEFINE_INTX_SID(sid, gsi, gsi_ctlr);
|
|
||||||
entry = find_ptirq_entry(PTDEV_INTR_INTX, &sid, vm);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry != NULL) {
|
if (entry != NULL) {
|
||||||
if (is_entry_active(entry)) {
|
if (is_entry_active(entry)) {
|
||||||
phys_irq = entry->allocated_pirq;
|
phys_irq = entry->allocated_pirq;
|
||||||
@ -452,11 +449,11 @@ static void remove_intx_remapping(const struct acrn_vm *vm, uint32_t gsi, enum i
|
|||||||
|
|
||||||
dmar_free_irte(&intr_src, entry->irte_idx);
|
dmar_free_irte(&intr_src, entry->irte_idx);
|
||||||
dev_dbg(DBG_LEVEL_IRQ,
|
dev_dbg(DBG_LEVEL_IRQ,
|
||||||
"deactivate %s intx entry:pgsi=%d, pirq=%d ",
|
"deactive %s intx entry:pgsi=%d, pirq=%d ",
|
||||||
(entry->virt_sid.intx_id.ctlr == INTX_CTLR_PIC) ? "vPIC" : "vIOAPIC",
|
(vgsi_ctlr == INTX_CTLR_PIC) ? "vPIC" : "vIOAPIC",
|
||||||
entry->phys_sid.intx_id.gsi, phys_irq);
|
entry->phys_sid.intx_id.gsi, phys_irq);
|
||||||
dev_dbg(DBG_LEVEL_IRQ, "from vm%d vgsi=%d\n",
|
dev_dbg(DBG_LEVEL_IRQ, "from vm%d vgsi=%d\n",
|
||||||
entry->vm->vm_id, entry->virt_sid.intx_id.gsi);
|
entry->vm->vm_id, virt_gsi);
|
||||||
}
|
}
|
||||||
|
|
||||||
ptirq_release_entry(entry);
|
ptirq_release_entry(entry);
|
||||||
@ -756,7 +753,7 @@ int32_t ptirq_intx_pin_remap(struct acrn_vm *vm, uint32_t virt_gsi, enum intx_ct
|
|||||||
uint32_t phys_gsi = virt_gsi;
|
uint32_t phys_gsi = virt_gsi;
|
||||||
|
|
||||||
remove_intx_remapping(vm, alt_virt_sid.intx_id.gsi,
|
remove_intx_remapping(vm, alt_virt_sid.intx_id.gsi,
|
||||||
alt_virt_sid.intx_id.ctlr, false);
|
alt_virt_sid.intx_id.ctlr);
|
||||||
entry = add_intx_remapping(vm, virt_gsi, phys_gsi, vgsi_ctlr);
|
entry = add_intx_remapping(vm, virt_gsi, phys_gsi, vgsi_ctlr);
|
||||||
if (entry == NULL) {
|
if (entry == NULL) {
|
||||||
pr_err("%s, add intx remapping failed", __func__);
|
pr_err("%s, add intx remapping failed", __func__);
|
||||||
@ -827,12 +824,12 @@ int32_t ptirq_add_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi, uint32_t
|
|||||||
/*
|
/*
|
||||||
* @pre vm != NULL
|
* @pre vm != NULL
|
||||||
*/
|
*/
|
||||||
void ptirq_remove_intx_remapping(const struct acrn_vm *vm, uint32_t gsi, bool pic_pin, bool is_phy_gsi)
|
void ptirq_remove_intx_remapping(const struct acrn_vm *vm, uint32_t virt_gsi, bool pic_pin)
|
||||||
{
|
{
|
||||||
enum intx_ctlr vgsi_ctlr = pic_pin ? INTX_CTLR_PIC : INTX_CTLR_IOAPIC;
|
enum intx_ctlr vgsi_ctlr = pic_pin ? INTX_CTLR_PIC : INTX_CTLR_IOAPIC;
|
||||||
|
|
||||||
spinlock_obtain(&ptdev_lock);
|
spinlock_obtain(&ptdev_lock);
|
||||||
remove_intx_remapping(vm, gsi, vgsi_ctlr, is_phy_gsi);
|
remove_intx_remapping(vm, virt_gsi, vgsi_ctlr);
|
||||||
spinlock_release(&ptdev_lock);
|
spinlock_release(&ptdev_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -860,6 +857,6 @@ void ptirq_remove_configured_intx_remappings(const struct acrn_vm *vm)
|
|||||||
uint16_t i;
|
uint16_t i;
|
||||||
|
|
||||||
for (i = 0; i < vm_config->pt_intx_num; i++) {
|
for (i = 0; i < vm_config->pt_intx_num; i++) {
|
||||||
ptirq_remove_intx_remapping(vm, vm_config->pt_intx[i].virt_gsi, false, false);
|
ptirq_remove_intx_remapping(vm, vm_config->pt_intx[i].virt_gsi, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -86,7 +86,7 @@ static void reserve_ept_bitmap(void)
|
|||||||
bitmap_size = (get_ept_page_num() * CONFIG_MAX_VM_NUM) / 8;
|
bitmap_size = (get_ept_page_num() * CONFIG_MAX_VM_NUM) / 8;
|
||||||
bitmap_offset = get_ept_page_num() / 8;
|
bitmap_offset = get_ept_page_num() / 8;
|
||||||
|
|
||||||
bitmap_base = e820_alloc_memory(bitmap_size, MEM_SIZE_MAX);
|
bitmap_base = e820_alloc_memory(bitmap_size, ~0UL);
|
||||||
set_paging_supervisor(bitmap_base, bitmap_size);
|
set_paging_supervisor(bitmap_base, bitmap_size);
|
||||||
|
|
||||||
for(i = 0; i < CONFIG_MAX_VM_NUM; i++){
|
for(i = 0; i < CONFIG_MAX_VM_NUM; i++){
|
||||||
@ -103,7 +103,7 @@ void reserve_buffer_for_ept_pages(void)
|
|||||||
uint16_t vm_id;
|
uint16_t vm_id;
|
||||||
uint32_t offset = 0U;
|
uint32_t offset = 0U;
|
||||||
|
|
||||||
page_base = e820_alloc_memory(get_total_ept_4k_pages_size(), MEM_SIZE_MAX);
|
page_base = e820_alloc_memory(get_total_ept_4k_pages_size(), ~0UL);
|
||||||
set_paging_supervisor(page_base, get_total_ept_4k_pages_size());
|
set_paging_supervisor(page_base, get_total_ept_4k_pages_size());
|
||||||
for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
|
for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
|
||||||
ept_pages[vm_id] = (struct page *)(void *)(page_base + offset);
|
ept_pages[vm_id] = (struct page *)(void *)(page_base + offset);
|
||||||
|
@ -65,6 +65,7 @@ static int32_t local_gva2gpa_common(struct acrn_vcpu *vcpu, const struct page_wa
|
|||||||
int32_t ret = 0;
|
int32_t ret = 0;
|
||||||
int32_t fault = 0;
|
int32_t fault = 0;
|
||||||
bool is_user_mode_addr = true;
|
bool is_user_mode_addr = true;
|
||||||
|
bool is_page_rw_flags_on = true;
|
||||||
|
|
||||||
if (pw_info->level < 1U) {
|
if (pw_info->level < 1U) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
@ -107,6 +108,7 @@ static int32_t local_gva2gpa_common(struct acrn_vcpu *vcpu, const struct page_wa
|
|||||||
* Case2: Usermode */
|
* Case2: Usermode */
|
||||||
fault = 1;
|
fault = 1;
|
||||||
}
|
}
|
||||||
|
is_page_rw_flags_on = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -140,8 +142,34 @@ static int32_t local_gva2gpa_common(struct acrn_vcpu *vcpu, const struct page_wa
|
|||||||
*/
|
*/
|
||||||
/* if smap is enabled and supervisor-mode access */
|
/* if smap is enabled and supervisor-mode access */
|
||||||
if ((fault == 0) && pw_info->is_smap_on && (!pw_info->is_user_mode_access) &&
|
if ((fault == 0) && pw_info->is_smap_on && (!pw_info->is_user_mode_access) &&
|
||||||
is_user_mode_addr && ((vcpu_get_rflags(vcpu) & RFLAGS_AC) == 0UL)) {
|
is_user_mode_addr) {
|
||||||
fault = 1;
|
bool acflag = ((vcpu_get_rflags(vcpu) & RFLAGS_AC) != 0UL);
|
||||||
|
|
||||||
|
/* read from user mode address, eflags.ac = 0 */
|
||||||
|
if ((!pw_info->is_write_access) && (!acflag)) {
|
||||||
|
fault = 1;
|
||||||
|
} else if (pw_info->is_write_access) {
|
||||||
|
/* write to user mode address */
|
||||||
|
|
||||||
|
/* cr0.wp = 0, eflags.ac = 0 */
|
||||||
|
if ((!pw_info->wp) && (!acflag)) {
|
||||||
|
fault = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* cr0.wp = 1, eflags.ac = 1, r/w flag is 0
|
||||||
|
* on any paging structure entry
|
||||||
|
*/
|
||||||
|
if (pw_info->wp && acflag && (!is_page_rw_flags_on)) {
|
||||||
|
fault = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* cr0.wp = 1, eflags.ac = 0 */
|
||||||
|
if (pw_info->wp && (!acflag)) {
|
||||||
|
fault = 1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* do nothing */
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* instruction fetch from user-mode address, smep on */
|
/* instruction fetch from user-mode address, smep on */
|
||||||
|
@ -309,14 +309,3 @@ hyperv_init_vcpuid_entry(uint32_t leaf, uint32_t subleaf, uint32_t flags,
|
|||||||
dev_dbg(DBG_LEVEL_HYPERV, "hv: %s: leaf=%x subleaf=%x flags=%x eax=%x ebx=%x ecx=%x edx=%x",
|
dev_dbg(DBG_LEVEL_HYPERV, "hv: %s: leaf=%x subleaf=%x flags=%x eax=%x ebx=%x ecx=%x edx=%x",
|
||||||
__func__, leaf, subleaf, flags, entry->eax, entry->ebx, entry->ecx, entry->edx);
|
__func__, leaf, subleaf, flags, entry->eax, entry->ebx, entry->ecx, entry->edx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
hyperv_page_destory(struct acrn_vm *vm)
|
|
||||||
{
|
|
||||||
/* Reset the hypercall page */
|
|
||||||
vm->arch_vm.hyperv.hypercall_page.enabled = 0U;
|
|
||||||
/* Reset OS id */
|
|
||||||
vm->arch_vm.hyperv.guest_os_id.val64 = 0UL;
|
|
||||||
/* Reset the TSC page */
|
|
||||||
vm->arch_vm.hyperv.ref_tsc_page.enabled = 0UL;
|
|
||||||
}
|
|
||||||
|
@ -528,22 +528,9 @@ int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn
|
|||||||
per_cpu(ever_run_vcpu, pcpu_id) = vcpu;
|
per_cpu(ever_run_vcpu, pcpu_id) = vcpu;
|
||||||
|
|
||||||
if (is_lapic_pt_configured(vm) || is_using_init_ipi()) {
|
if (is_lapic_pt_configured(vm) || is_using_init_ipi()) {
|
||||||
/* Lapic_pt pCPU does not enable irq in root mode. So it
|
|
||||||
* should be set to PAUSE idle mode.
|
|
||||||
* At this point the pCPU is possibly in HLT idle. And the
|
|
||||||
* kick mode is to be set to INIT kick, which will not be
|
|
||||||
* able to wake root mode HLT. So a kick(if pCPU is in HLT
|
|
||||||
* idle, the kick mode is certainly ipi kick) will change
|
|
||||||
* it to PAUSE idle right away.
|
|
||||||
*/
|
|
||||||
if (per_cpu(mode_to_idle, pcpu_id) == IDLE_MODE_HLT) {
|
|
||||||
per_cpu(mode_to_idle, pcpu_id) = IDLE_MODE_PAUSE;
|
|
||||||
kick_pcpu(pcpu_id);
|
|
||||||
}
|
|
||||||
per_cpu(mode_to_kick_pcpu, pcpu_id) = DEL_MODE_INIT;
|
per_cpu(mode_to_kick_pcpu, pcpu_id) = DEL_MODE_INIT;
|
||||||
} else {
|
} else {
|
||||||
per_cpu(mode_to_kick_pcpu, pcpu_id) = DEL_MODE_IPI;
|
per_cpu(mode_to_kick_pcpu, pcpu_id) = DEL_MODE_IPI;
|
||||||
per_cpu(mode_to_idle, pcpu_id) = IDLE_MODE_HLT;
|
|
||||||
}
|
}
|
||||||
pr_info("pcpu=%d, kick-mode=%d, use_init_flag=%d", pcpu_id,
|
pr_info("pcpu=%d, kick-mode=%d, use_init_flag=%d", pcpu_id,
|
||||||
per_cpu(mode_to_kick_pcpu, pcpu_id), is_using_init_ipi());
|
per_cpu(mode_to_kick_pcpu, pcpu_id), is_using_init_ipi());
|
||||||
@ -572,16 +559,6 @@ int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn
|
|||||||
*/
|
*/
|
||||||
vcpu->arch.vpid = ALLOCATED_MIN_L1_VPID + (vm->vm_id * MAX_VCPUS_PER_VM) + vcpu->vcpu_id;
|
vcpu->arch.vpid = ALLOCATED_MIN_L1_VPID + (vm->vm_id * MAX_VCPUS_PER_VM) + vcpu->vcpu_id;
|
||||||
|
|
||||||
/*
|
|
||||||
* There are two locally independent writing operations, namely the
|
|
||||||
* assignment of vcpu->vm and vcpu_array[]. Compilers may optimize
|
|
||||||
* and reorder writing operations while users of vcpu_array[] may
|
|
||||||
* assume the presence of vcpu->vm. A compiler barrier is added here
|
|
||||||
* to prevent compiler reordering, ensuring that assignments to
|
|
||||||
* vcpu->vm precede vcpu_array[].
|
|
||||||
*/
|
|
||||||
cpu_compiler_barrier();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ACRN uses the following approach to manage VT-d PI notification vectors:
|
* ACRN uses the following approach to manage VT-d PI notification vectors:
|
||||||
* Allocate unique Activation Notification Vectors (ANV) for each vCPU that
|
* Allocate unique Activation Notification Vectors (ANV) for each vCPU that
|
||||||
@ -996,7 +973,8 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
|
|||||||
vcpu->thread_obj.host_sp = build_stack_frame(vcpu);
|
vcpu->thread_obj.host_sp = build_stack_frame(vcpu);
|
||||||
vcpu->thread_obj.switch_out = context_switch_out;
|
vcpu->thread_obj.switch_out = context_switch_out;
|
||||||
vcpu->thread_obj.switch_in = context_switch_in;
|
vcpu->thread_obj.switch_in = context_switch_in;
|
||||||
init_thread_data(&vcpu->thread_obj, &get_vm_config(vm->vm_id)->sched_params);
|
vcpu->thread_obj.priority = get_vm_config(vm->vm_id)->vm_prio;
|
||||||
|
init_thread_data(&vcpu->thread_obj);
|
||||||
for (i = 0; i < VCPU_EVENT_NUM; i++) {
|
for (i = 0; i < VCPU_EVENT_NUM; i++) {
|
||||||
init_event(&vcpu->events[i]);
|
init_event(&vcpu->events[i]);
|
||||||
}
|
}
|
||||||
@ -1058,8 +1036,8 @@ void vcpu_handle_pi_notification(uint32_t vcpu_index)
|
|||||||
* Record this request as ACRN_REQUEST_EVENT,
|
* Record this request as ACRN_REQUEST_EVENT,
|
||||||
* so that vlapic_inject_intr() will sync PIR to vIRR
|
* so that vlapic_inject_intr() will sync PIR to vIRR
|
||||||
*/
|
*/
|
||||||
vcpu_make_request(vcpu, ACRN_REQUEST_EVENT);
|
|
||||||
signal_event(&vcpu->events[VCPU_EVENT_VIRTUAL_INTERRUPT]);
|
signal_event(&vcpu->events[VCPU_EVENT_VIRTUAL_INTERRUPT]);
|
||||||
|
vcpu_make_request(vcpu, ACRN_REQUEST_EVENT);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -18,11 +18,6 @@
|
|||||||
#include <asm/rdt.h>
|
#include <asm/rdt.h>
|
||||||
#include <asm/guest/vcat.h>
|
#include <asm/guest/vcat.h>
|
||||||
|
|
||||||
static struct percpu_cpuids {
|
|
||||||
uint32_t leaf_nr;
|
|
||||||
uint32_t leaves[MAX_VM_VCPUID_ENTRIES];
|
|
||||||
} pcpu_cpuids;
|
|
||||||
|
|
||||||
static inline const struct vcpuid_entry *local_find_vcpuid_entry(const struct acrn_vcpu *vcpu,
|
static inline const struct vcpuid_entry *local_find_vcpuid_entry(const struct acrn_vcpu *vcpu,
|
||||||
uint32_t leaf, uint32_t subleaf)
|
uint32_t leaf, uint32_t subleaf)
|
||||||
{
|
{
|
||||||
@ -120,6 +115,73 @@ static void init_vcpuid_entry(uint32_t leaf, uint32_t subleaf,
|
|||||||
entry->flags = flags;
|
entry->flags = flags;
|
||||||
|
|
||||||
switch (leaf) {
|
switch (leaf) {
|
||||||
|
|
||||||
|
case 0x06U:
|
||||||
|
cpuid_subleaf(leaf, subleaf, &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
|
||||||
|
entry->eax &= ~(CPUID_EAX_HWP | CPUID_EAX_HWP_N | CPUID_EAX_HWP_AW | CPUID_EAX_HWP_EPP | CPUID_EAX_HWP_PLR);
|
||||||
|
entry->ecx &= ~CPUID_ECX_HCFC;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 0x07U:
|
||||||
|
if (subleaf == 0U) {
|
||||||
|
uint64_t cr4_reserved_mask = get_cr4_reserved_bits();
|
||||||
|
|
||||||
|
cpuid_subleaf(leaf, subleaf, &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
|
||||||
|
|
||||||
|
entry->ebx &= ~(CPUID_EBX_PQM | CPUID_EBX_PQE);
|
||||||
|
|
||||||
|
/* mask LA57 */
|
||||||
|
entry->ecx &= ~CPUID_ECX_LA57;
|
||||||
|
|
||||||
|
/* mask SGX and SGX_LC */
|
||||||
|
entry->ebx &= ~CPUID_EBX_SGX;
|
||||||
|
entry->ecx &= ~CPUID_ECX_SGX_LC;
|
||||||
|
|
||||||
|
/* mask MPX */
|
||||||
|
entry->ebx &= ~CPUID_EBX_MPX;
|
||||||
|
|
||||||
|
/* mask Intel Processor Trace, since 14h is disabled */
|
||||||
|
entry->ebx &= ~CPUID_EBX_PROC_TRC;
|
||||||
|
|
||||||
|
/* mask CET shadow stack and indirect branch tracking */
|
||||||
|
entry->ecx &= ~CPUID_ECX_CET_SS;
|
||||||
|
entry->edx &= ~CPUID_EDX_CET_IBT;
|
||||||
|
|
||||||
|
if ((cr4_reserved_mask & CR4_FSGSBASE) != 0UL) {
|
||||||
|
entry->ebx &= ~CPUID_EBX_FSGSBASE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((cr4_reserved_mask & CR4_SMEP) != 0UL) {
|
||||||
|
entry->ebx &= ~CPUID_EBX_SMEP;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((cr4_reserved_mask & CR4_SMAP) != 0UL) {
|
||||||
|
entry->ebx &= ~CPUID_EBX_SMAP;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((cr4_reserved_mask & CR4_UMIP) != 0UL) {
|
||||||
|
entry->ecx &= ~CPUID_ECX_UMIP;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((cr4_reserved_mask & CR4_PKE) != 0UL) {
|
||||||
|
entry->ecx &= ~CPUID_ECX_PKE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((cr4_reserved_mask & CR4_LA57) != 0UL) {
|
||||||
|
entry->ecx &= ~CPUID_ECX_LA57;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((cr4_reserved_mask & CR4_PKS) != 0UL) {
|
||||||
|
entry->ecx &= ~CPUID_ECX_PKS;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
entry->eax = 0U;
|
||||||
|
entry->ebx = 0U;
|
||||||
|
entry->ecx = 0U;
|
||||||
|
entry->edx = 0U;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
case 0x16U:
|
case 0x16U:
|
||||||
cpu_info = get_pcpu_info();
|
cpu_info = get_pcpu_info();
|
||||||
if (cpu_info->cpuid_level >= 0x16U) {
|
if (cpu_info->cpuid_level >= 0x16U) {
|
||||||
@ -376,165 +438,6 @@ static int32_t set_vcpuid_vcat_10h(struct acrn_vm *vm)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void guest_cpuid_04h(__unused struct acrn_vm *vm, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
|
|
||||||
{
|
|
||||||
struct vcpuid_entry entry;
|
|
||||||
|
|
||||||
cpuid_subleaf(CPUID_CACHE, *ecx, &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
|
|
||||||
if (entry.eax != 0U) {
|
|
||||||
#ifdef CONFIG_VCAT_ENABLED
|
|
||||||
if (is_vcat_configured(vm)) {
|
|
||||||
/* set_vcpuid_vcat_04h will not change entry.eax */
|
|
||||||
result = set_vcpuid_vcat_04h(vm, &entry);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
*eax = entry.eax;
|
|
||||||
*ebx = entry.ebx;
|
|
||||||
*ecx = entry.ecx;
|
|
||||||
*edx = entry.edx;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int32_t set_vcpuid_cache(struct acrn_vm *vm)
|
|
||||||
{
|
|
||||||
int32_t result = 0;
|
|
||||||
struct vcpuid_entry entry;
|
|
||||||
uint32_t i;
|
|
||||||
|
|
||||||
entry.leaf = CPUID_CACHE;
|
|
||||||
entry.flags = CPUID_CHECK_SUBLEAF;
|
|
||||||
for (i = 0U; ; i++) {
|
|
||||||
entry.subleaf = i;
|
|
||||||
entry.ecx = i;
|
|
||||||
guest_cpuid_04h(vm, &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
|
|
||||||
if (entry.eax == 0U) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
result = set_vcpuid_entry(vm, &entry);
|
|
||||||
if (result != 0) {
|
|
||||||
/* wants to break out of switch */
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int32_t set_vcpuid_extfeat(struct acrn_vm *vm)
|
|
||||||
{
|
|
||||||
uint64_t cr4_reserved_mask = get_cr4_reserved_bits();
|
|
||||||
int32_t result = 0;
|
|
||||||
struct vcpuid_entry entry;
|
|
||||||
uint32_t i, sub_leaves;
|
|
||||||
|
|
||||||
/* cpuid.07h.0h */
|
|
||||||
cpuid_subleaf(CPUID_EXTEND_FEATURE, 0U, &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
|
|
||||||
|
|
||||||
entry.ebx &= ~(CPUID_EBX_PQM | CPUID_EBX_PQE);
|
|
||||||
if (is_vsgx_supported(vm->vm_id)) {
|
|
||||||
entry.ebx |= CPUID_EBX_SGX;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_VCAT_ENABLED
|
|
||||||
if (is_vcat_configured(vm)) {
|
|
||||||
/* Bit 15: Supports Intel Resource Director Technology (Intel RDT) Allocation capability if 1 */
|
|
||||||
entry.ebx |= CPUID_EBX_PQE;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
/* mask LA57 */
|
|
||||||
entry.ecx &= ~CPUID_ECX_LA57;
|
|
||||||
|
|
||||||
/* mask SGX and SGX_LC */
|
|
||||||
entry.ebx &= ~CPUID_EBX_SGX;
|
|
||||||
entry.ecx &= ~CPUID_ECX_SGX_LC;
|
|
||||||
|
|
||||||
/* mask MPX */
|
|
||||||
entry.ebx &= ~CPUID_EBX_MPX;
|
|
||||||
|
|
||||||
/* mask Intel Processor Trace, since 14h is disabled */
|
|
||||||
entry.ebx &= ~CPUID_EBX_PROC_TRC;
|
|
||||||
|
|
||||||
/* mask CET shadow stack and indirect branch tracking */
|
|
||||||
entry.ecx &= ~CPUID_ECX_CET_SS;
|
|
||||||
entry.edx &= ~CPUID_EDX_CET_IBT;
|
|
||||||
|
|
||||||
/* mask WAITPKG */
|
|
||||||
entry.ecx &= ~CPUID_ECX_WAITPKG;
|
|
||||||
|
|
||||||
if ((cr4_reserved_mask & CR4_FSGSBASE) != 0UL) {
|
|
||||||
entry.ebx &= ~CPUID_EBX_FSGSBASE;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((cr4_reserved_mask & CR4_SMEP) != 0UL) {
|
|
||||||
entry.ebx &= ~CPUID_EBX_SMEP;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((cr4_reserved_mask & CR4_SMAP) != 0UL) {
|
|
||||||
entry.ebx &= ~CPUID_EBX_SMAP;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((cr4_reserved_mask & CR4_UMIP) != 0UL) {
|
|
||||||
entry.ecx &= ~CPUID_ECX_UMIP;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((cr4_reserved_mask & CR4_PKE) != 0UL) {
|
|
||||||
entry.ecx &= ~CPUID_ECX_PKE;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((cr4_reserved_mask & CR4_LA57) != 0UL) {
|
|
||||||
entry.ecx &= ~CPUID_ECX_LA57;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((cr4_reserved_mask & CR4_PKS) != 0UL) {
|
|
||||||
entry.ecx &= ~CPUID_ECX_PKS;
|
|
||||||
}
|
|
||||||
|
|
||||||
entry.leaf = CPUID_EXTEND_FEATURE;
|
|
||||||
entry.subleaf = 0U;
|
|
||||||
entry.flags = CPUID_CHECK_SUBLEAF;
|
|
||||||
result = set_vcpuid_entry(vm, &entry);
|
|
||||||
if (result == 0) {
|
|
||||||
sub_leaves = entry.eax;
|
|
||||||
for (i = 1U; i <= sub_leaves; i++) {
|
|
||||||
cpuid_subleaf(CPUID_EXTEND_FEATURE, i, &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
|
|
||||||
entry.subleaf = i;
|
|
||||||
result = set_vcpuid_entry(vm, &entry);
|
|
||||||
if (result != 0) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void guest_cpuid_06h(struct acrn_vm *vm, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
|
|
||||||
{
|
|
||||||
cpuid_subleaf(CPUID_THERMAL_POWER, *ecx, eax, ebx, ecx, edx);
|
|
||||||
|
|
||||||
/* Always hide package level HWP controls and HWP interrupt*/
|
|
||||||
*eax &= ~(CPUID_EAX_HWP_CTL | CPUID_EAX_HWP_PLR | CPUID_EAX_HWP_N);
|
|
||||||
*eax &= ~(CPUID_EAX_HFI | CPUID_EAX_ITD);
|
|
||||||
/* Since HFI is hidden, hide the edx too */
|
|
||||||
*edx = 0U;
|
|
||||||
if (!is_vhwp_configured(vm)) {
|
|
||||||
*eax &= ~(CPUID_EAX_HWP | CPUID_EAX_HWP_AW | CPUID_EAX_HWP_EPP);
|
|
||||||
*ecx &= ~CPUID_ECX_HCFC;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int32_t set_vcpuid_thermal_power(struct acrn_vm *vm)
|
|
||||||
{
|
|
||||||
struct vcpuid_entry entry;
|
|
||||||
|
|
||||||
entry.leaf = CPUID_THERMAL_POWER;
|
|
||||||
entry.subleaf = 0;
|
|
||||||
entry.ecx = 0;
|
|
||||||
entry.flags = CPUID_CHECK_SUBLEAF;
|
|
||||||
guest_cpuid_06h(vm, &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
|
|
||||||
|
|
||||||
return set_vcpuid_entry(vm, &entry);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int32_t set_vcpuid_extended_function(struct acrn_vm *vm)
|
static int32_t set_vcpuid_extended_function(struct acrn_vm *vm)
|
||||||
{
|
{
|
||||||
uint32_t i, limit;
|
uint32_t i, limit;
|
||||||
@ -596,40 +499,7 @@ static int32_t set_vcpuid_extended_function(struct acrn_vm *vm)
|
|||||||
|
|
||||||
static inline bool is_percpu_related(uint32_t leaf)
|
static inline bool is_percpu_related(uint32_t leaf)
|
||||||
{
|
{
|
||||||
uint32_t i;
|
return ((leaf == 0x1U) || (leaf == 0xbU) || (leaf == 0xdU) || (leaf == 0x19U) || (leaf == 0x80000001U) || (leaf == 0x2U) || (leaf == 0x1aU));
|
||||||
bool ret = false;
|
|
||||||
|
|
||||||
for (i = 0; i < pcpu_cpuids.leaf_nr; i++) {
|
|
||||||
if (leaf == pcpu_cpuids.leaves[i]) {
|
|
||||||
ret = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void percpu_cpuid_init(void)
|
|
||||||
{
|
|
||||||
/* 0x1U, 0xBU, 0xDU, 0x19U, 0x1FU, 0x80000001U */
|
|
||||||
uint32_t percpu_leaves[] = {CPUID_FEATURES, CPUID_EXTEND_TOPOLOGY,
|
|
||||||
CPUID_XSAVE_FEATURES, CPUID_KEY_LOCKER,
|
|
||||||
CPUID_V2_EXTEND_TOPOLOGY, CPUID_EXTEND_FUNCTION_1};
|
|
||||||
|
|
||||||
pcpu_cpuids.leaf_nr = sizeof(percpu_leaves)/sizeof(uint32_t);
|
|
||||||
memcpy_s(pcpu_cpuids.leaves, sizeof(percpu_leaves),
|
|
||||||
percpu_leaves, sizeof(percpu_leaves));
|
|
||||||
|
|
||||||
/* hybrid related percpu leaves*/
|
|
||||||
if (pcpu_has_cap(X86_FEATURE_HYBRID)) {
|
|
||||||
/* 0x2U, 0x4U, 0x6U, 0x14U, 0x16U, 0x18U, 0x1A, 0x1C, 0x80000006U */
|
|
||||||
uint32_t hybrid_leaves[] = {CPUID_TLB, CPUID_CACHE,
|
|
||||||
CPUID_THERMAL_POWER, CPUID_TRACE, CPUID_FREQ,
|
|
||||||
CPUID_ADDR_TRANS, CPUID_MODEL_ID, CPUID_LAST_BRANCH_RECORD,
|
|
||||||
CPUID_EXTEND_CACHE};
|
|
||||||
memcpy_s(pcpu_cpuids.leaves + pcpu_cpuids.leaf_nr,
|
|
||||||
sizeof(hybrid_leaves), hybrid_leaves, sizeof(hybrid_leaves));
|
|
||||||
pcpu_cpuids.leaf_nr += sizeof(hybrid_leaves)/sizeof(uint32_t);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t set_vcpuid_entries(struct acrn_vm *vm)
|
int32_t set_vcpuid_entries(struct acrn_vm *vm)
|
||||||
@ -637,7 +507,7 @@ int32_t set_vcpuid_entries(struct acrn_vm *vm)
|
|||||||
int32_t result;
|
int32_t result;
|
||||||
struct vcpuid_entry entry;
|
struct vcpuid_entry entry;
|
||||||
uint32_t limit;
|
uint32_t limit;
|
||||||
uint32_t i;
|
uint32_t i, j;
|
||||||
struct cpuinfo_x86 *cpu_info = get_pcpu_info();
|
struct cpuinfo_x86 *cpu_info = get_pcpu_info();
|
||||||
|
|
||||||
init_vcpuid_entry(0U, 0U, 0U, &entry);
|
init_vcpuid_entry(0U, 0U, 0U, &entry);
|
||||||
@ -647,8 +517,6 @@ int32_t set_vcpuid_entries(struct acrn_vm *vm)
|
|||||||
}
|
}
|
||||||
result = set_vcpuid_entry(vm, &entry);
|
result = set_vcpuid_entry(vm, &entry);
|
||||||
if (result == 0) {
|
if (result == 0) {
|
||||||
percpu_cpuid_init();
|
|
||||||
|
|
||||||
limit = entry.eax;
|
limit = entry.eax;
|
||||||
vm->vcpuid_level = limit;
|
vm->vcpuid_level = limit;
|
||||||
|
|
||||||
@ -658,40 +526,64 @@ int32_t set_vcpuid_entries(struct acrn_vm *vm)
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch (i) {
|
switch (i) {
|
||||||
/* 0x4U */
|
case 0x04U:
|
||||||
case CPUID_CACHE:
|
for (j = 0U; ; j++) {
|
||||||
result = set_vcpuid_cache(vm);
|
init_vcpuid_entry(i, j, CPUID_CHECK_SUBLEAF, &entry);
|
||||||
|
if (entry.eax == 0U) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_VCAT_ENABLED
|
||||||
|
if (is_vcat_configured(vm)) {
|
||||||
|
result = set_vcpuid_vcat_04h(vm, &entry);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
result = set_vcpuid_entry(vm, &entry);
|
||||||
|
if (result != 0) {
|
||||||
|
/* wants to break out of switch */
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
/* MONITOR/MWAIT */
|
/* MONITOR/MWAIT */
|
||||||
case 0x05U:
|
case 0x05U:
|
||||||
break;
|
break;
|
||||||
/* 0x06U */
|
case 0x07U:
|
||||||
case CPUID_THERMAL_POWER:
|
init_vcpuid_entry(i, 0U, CPUID_CHECK_SUBLEAF, &entry);
|
||||||
result = set_vcpuid_thermal_power(vm);
|
if (entry.eax != 0U) {
|
||||||
|
pr_warn("vcpuid: only support subleaf 0 for cpu leaf 07h");
|
||||||
|
entry.eax = 0U;
|
||||||
|
}
|
||||||
|
if (is_vsgx_supported(vm->vm_id)) {
|
||||||
|
entry.ebx |= CPUID_EBX_SGX;
|
||||||
|
}
|
||||||
|
entry.ecx &= ~CPUID_ECX_WAITPKG;
|
||||||
|
|
||||||
|
#ifdef CONFIG_VCAT_ENABLED
|
||||||
|
if (is_vcat_configured(vm)) {
|
||||||
|
/* Bit 15: Supports Intel Resource Director Technology (Intel RDT) Allocation capability if 1 */
|
||||||
|
entry.ebx |= CPUID_EBX_PQE;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
result = set_vcpuid_entry(vm, &entry);
|
||||||
break;
|
break;
|
||||||
/* 0x07U */
|
case 0x12U:
|
||||||
case CPUID_EXTEND_FEATURE:
|
|
||||||
result = set_vcpuid_extfeat(vm);
|
|
||||||
break;
|
|
||||||
/* 0x12U */
|
|
||||||
case CPUID_SGX_CAP:
|
|
||||||
result = set_vcpuid_sgx(vm);
|
result = set_vcpuid_sgx(vm);
|
||||||
break;
|
break;
|
||||||
/* These features are disabled */
|
/* These features are disabled */
|
||||||
/* PMU is not supported except for core partition VM, like RTVM */
|
/* PMU is not supported except for core partition VM, like RTVM */
|
||||||
/* 0x0aU */
|
case 0x0aU:
|
||||||
case CPUID_ARCH_PERF_MON:
|
|
||||||
if (is_pmu_pt_configured(vm)) {
|
if (is_pmu_pt_configured(vm)) {
|
||||||
init_vcpuid_entry(i, 0U, 0U, &entry);
|
init_vcpuid_entry(i, 0U, 0U, &entry);
|
||||||
result = set_vcpuid_entry(vm, &entry);
|
result = set_vcpuid_entry(vm, &entry);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* 0xFU, Intel RDT */
|
/* Intel RDT */
|
||||||
case CPUID_RDT_MONITOR:
|
case 0x0fU:
|
||||||
break;
|
break;
|
||||||
/* 0x10U, Intel RDT */
|
/* Intel RDT */
|
||||||
case CPUID_RDT_ALLOCATION:
|
case 0x10U:
|
||||||
#ifdef CONFIG_VCAT_ENABLED
|
#ifdef CONFIG_VCAT_ENABLED
|
||||||
if (is_vcat_configured(vm)) {
|
if (is_vcat_configured(vm)) {
|
||||||
result = set_vcpuid_vcat_10h(vm);
|
result = set_vcpuid_vcat_10h(vm);
|
||||||
@ -699,10 +591,12 @@ int32_t set_vcpuid_entries(struct acrn_vm *vm)
|
|||||||
#endif
|
#endif
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* 0x14U, Intel Processor Trace */
|
/* Intel Processor Trace */
|
||||||
case CPUID_TRACE:
|
case 0x14U:
|
||||||
/* 0x1BU, PCONFIG */
|
/* PCONFIG */
|
||||||
case CPUID_PCONFIG:
|
case 0x1bU:
|
||||||
|
/* V2 Extended Topology Enumeration Leaf */
|
||||||
|
case 0x1fU:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
init_vcpuid_entry(i, 0U, 0U, &entry);
|
init_vcpuid_entry(i, 0U, 0U, &entry);
|
||||||
@ -873,14 +767,6 @@ static void guest_cpuid_19h(struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void guest_cpuid_1fh(struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
|
|
||||||
{
|
|
||||||
cpuid_subleaf(0x1fU, *ecx, eax, ebx, ecx, edx);
|
|
||||||
|
|
||||||
/* Patching X2APIC */
|
|
||||||
*edx = vlapic_get_apicid(vcpu_vlapic(vcpu));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void guest_cpuid_80000001h(const struct acrn_vcpu *vcpu,
|
static void guest_cpuid_80000001h(const struct acrn_vcpu *vcpu,
|
||||||
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
|
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
|
||||||
{
|
{
|
||||||
@ -948,56 +834,29 @@ void guest_cpuid(struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx, uint32_t
|
|||||||
} else {
|
} else {
|
||||||
/* percpu related */
|
/* percpu related */
|
||||||
switch (leaf) {
|
switch (leaf) {
|
||||||
/* 0x01U */
|
case 0x01U:
|
||||||
case CPUID_FEATURES:
|
|
||||||
guest_cpuid_01h(vcpu, eax, ebx, ecx, edx);
|
guest_cpuid_01h(vcpu, eax, ebx, ecx, edx);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* 0x04U for hybrid arch */
|
case 0x0bU:
|
||||||
case CPUID_CACHE:
|
|
||||||
guest_cpuid_04h(vcpu->vm, eax, ebx, ecx, edx);
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* 0x06U for hybrid arch */
|
|
||||||
case CPUID_THERMAL_POWER:
|
|
||||||
guest_cpuid_06h(vcpu->vm, eax, ebx, ecx, edx);
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* 0x0BU */
|
|
||||||
case CPUID_EXTEND_TOPOLOGY:
|
|
||||||
guest_cpuid_0bh(vcpu, eax, ebx, ecx, edx);
|
guest_cpuid_0bh(vcpu, eax, ebx, ecx, edx);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* 0x0dU */
|
case 0x0dU:
|
||||||
case CPUID_XSAVE_FEATURES:
|
|
||||||
guest_cpuid_0dh(vcpu, eax, ebx, ecx, edx);
|
guest_cpuid_0dh(vcpu, eax, ebx, ecx, edx);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* 0x14U for hybrid arch */
|
case 0x19U:
|
||||||
case CPUID_TRACE:
|
|
||||||
*eax = 0U;
|
|
||||||
*ebx = 0U;
|
|
||||||
*ecx = 0U;
|
|
||||||
*edx = 0U;
|
|
||||||
break;
|
|
||||||
/* 0x19U */
|
|
||||||
case CPUID_KEY_LOCKER:
|
|
||||||
guest_cpuid_19h(vcpu, eax, ebx, ecx, edx);
|
guest_cpuid_19h(vcpu, eax, ebx, ecx, edx);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* 0x1fU */
|
case 0x80000001U:
|
||||||
case CPUID_V2_EXTEND_TOPOLOGY:
|
|
||||||
guest_cpuid_1fh(vcpu, eax, ebx, ecx, edx);
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* 0x80000001U */
|
|
||||||
case CPUID_EXTEND_FUNCTION_1:
|
|
||||||
guest_cpuid_80000001h(vcpu, eax, ebx, ecx, edx);
|
guest_cpuid_80000001h(vcpu, eax, ebx, ecx, edx);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
/*
|
/*
|
||||||
* In this switch statement, leaf 0x01/0x04/0x06/0x0b/0x0d/0x19/0x1f/0x80000001
|
* In this switch statement, leaf 0x01/0x0b/0x0d/0x19/0x80000001
|
||||||
* shall be handled specifically. All the other cases
|
* shall be handled specifically. All the other cases
|
||||||
* just return physical value.
|
* just return physical value.
|
||||||
*/
|
*/
|
||||||
|