doc: add Technical intro doc
Add the "Introduction of Project Acorn" doc. Also adds improvements to the doc generation processes, content styles, removed doxygen-generated API material. Signed-off-by: David B. Kinder <david.b.kinder@intel.com>
22
Makefile
@ -1,8 +1,14 @@
|
|||||||
# Minimal makefile for Sphinx documentation
|
# Minimal makefile for Sphinx documentation
|
||||||
#
|
#
|
||||||
|
|
||||||
|
ifeq ($(VERBOSE),1)
|
||||||
|
Q =
|
||||||
|
else
|
||||||
|
Q = @
|
||||||
|
endif
|
||||||
|
|
||||||
# You can set these variables from the command line.
|
# You can set these variables from the command line.
|
||||||
SPHINXOPTS =
|
SPHINXOPTS = -q
|
||||||
SPHINXBUILD = sphinx-build
|
SPHINXBUILD = sphinx-build
|
||||||
SPHINXPROJ = "Project ACRN"
|
SPHINXPROJ = "Project ACRN"
|
||||||
SOURCEDIR = .
|
SOURCEDIR = .
|
||||||
@ -17,7 +23,7 @@ help:
|
|||||||
.PHONY: help Makefile
|
.PHONY: help Makefile
|
||||||
|
|
||||||
pullsource:
|
pullsource:
|
||||||
$(Q)scripts/pullsource.sh
|
scripts/pullsource.sh
|
||||||
|
|
||||||
|
|
||||||
# Generate the doxygen xml (for Sphinx) and copy the doxygen html to the
|
# Generate the doxygen xml (for Sphinx) and copy the doxygen html to the
|
||||||
@ -25,21 +31,19 @@ pullsource:
|
|||||||
|
|
||||||
doxy: pullsource
|
doxy: pullsource
|
||||||
$(Q)(cat acrn.doxyfile) | doxygen - 2>&1
|
$(Q)(cat acrn.doxyfile) | doxygen - 2>&1
|
||||||
$(Q)mkdir -p _build/html/api/doxygen
|
|
||||||
$(Q)cp -r doxygen/html/* _build/html/api/doxygen
|
|
||||||
|
|
||||||
# Remove generated content (Sphinx and doxygen)
|
# Remove generated content (Sphinx and doxygen)
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
$(Q)rm -fr $(BUILDDIR) doxygen _source
|
rm -fr $(BUILDDIR) doxygen
|
||||||
|
|
||||||
# Copy material over to the GitHub pages staging repo
|
# Copy material over to the GitHub pages staging repo
|
||||||
|
# along with a README
|
||||||
|
|
||||||
publish:
|
publish:
|
||||||
$(Q)mv $(PUBLISHDIR)/README.md $(PUBLISHDIR)/.README.md
|
rm -fr $(PUBLISHDIR)/*
|
||||||
$(Q)rm -fr $(PUBLISHDIR)/*
|
cp -r $(BUILDDIR)/html/* $(PUBLISHDIR)
|
||||||
$(Q)mv $(PUBLISHDIR)/.README.md $(PUBLISHDIR)/README.md
|
cp scripts/publish-README.md $(PUBLISHDIR)/README.md
|
||||||
$(Q)cp -r _build/html/* $(PUBLISHDIR)
|
|
||||||
|
|
||||||
|
|
||||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||||
|
@ -724,7 +724,7 @@ CITE_BIB_FILES =
|
|||||||
# messages are off.
|
# messages are off.
|
||||||
# The default value is: NO.
|
# The default value is: NO.
|
||||||
|
|
||||||
QUIET = NO
|
QUIET = YES
|
||||||
|
|
||||||
# The WARNINGS tag can be used to turn on/off the warning messages that are
|
# The WARNINGS tag can be used to turn on/off the warning messages that are
|
||||||
# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
|
# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
|
||||||
@ -791,7 +791,11 @@ WARN_LOGFILE =
|
|||||||
# Note: If this tag is empty the current directory is searched.
|
# Note: If this tag is empty the current directory is searched.
|
||||||
|
|
||||||
INPUT = custom-doxygen/mainpage.md \
|
INPUT = custom-doxygen/mainpage.md \
|
||||||
_source/
|
../acrn-hypervisor/include/common/hypercall.h \
|
||||||
|
../acrn-hypervisor/include/public/acrn_common.h \
|
||||||
|
../acrn-hypervisor/include/public/acrn_hv_defs.h \
|
||||||
|
../acrn-devicemodel/include/virtio.h
|
||||||
|
|
||||||
|
|
||||||
# This tag can be used to specify the character encoding of the source files
|
# This tag can be used to specify the character encoding of the source files
|
||||||
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
|
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
|
||||||
|
@ -11,9 +11,6 @@ the code. If you are looking for a specific API, enter it on the search
|
|||||||
box. The search results display all sections containing information
|
box. The search results display all sections containing information
|
||||||
about that API.
|
about that API.
|
||||||
|
|
||||||
As a convenience, we've also published the `doxygen-generated API
|
|
||||||
<doxygen>`_ files as an alternate view of the Project ACRN APIs.
|
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
|
|
||||||
|
38
conf.py
@ -52,11 +52,37 @@ author = u'Project ARCN developers'
|
|||||||
# The version info for the project you're documenting, acts as replacement for
|
# The version info for the project you're documenting, acts as replacement for
|
||||||
# |version| and |release|, also used in various other places throughout the
|
# |version| and |release|, also used in various other places throughout the
|
||||||
# built documents.
|
# built documents.
|
||||||
|
|
||||||
|
# The following code tries to extract the information by reading the
|
||||||
|
# Makefile from the acrn-hypervisor repo by finding these lines:
|
||||||
|
# MAJOR_VERSION=0
|
||||||
|
# MINOR_VERSION=1
|
||||||
|
|
||||||
|
try:
|
||||||
|
version_major = None
|
||||||
|
version_minor = None
|
||||||
|
for line in open(os.path.normpath("../acrn-hypervisor/Makefile")) :
|
||||||
|
if line.count("=") :
|
||||||
|
key, val = [x.strip() for x in line.split('=', 2)]
|
||||||
|
if key == 'MAJOR_VERSION':
|
||||||
|
version_major = val
|
||||||
|
if key == 'MINOR_VERSION':
|
||||||
|
version_minor = val
|
||||||
|
if version_major and version_minor :
|
||||||
|
break
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
if version_major and version_minor :
|
||||||
|
version = release = "v " + version_major + '.' + version_minor
|
||||||
|
else:
|
||||||
|
sys.stderr.write('Warning: Could not extract hypervisor version from Makefile\n')
|
||||||
|
version = release = "unknown"
|
||||||
#
|
#
|
||||||
# The short X.Y version.
|
# The short X.Y version.
|
||||||
version = u'0.1'
|
# version = u'0.1'
|
||||||
# The full version, including alpha/beta/rc tags.
|
# The full version, including alpha/beta/rc tags.
|
||||||
release = u'0.1'
|
# release = u'0.1'
|
||||||
|
|
||||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||||
# for a list of supported languages.
|
# for a list of supported languages.
|
||||||
@ -118,6 +144,10 @@ else:
|
|||||||
html_logo = 'images/ACRN_Logo_300w.png'
|
html_logo = 'images/ACRN_Logo_300w.png'
|
||||||
html_favicon = 'images/ACRN-favicon-32x32.png'
|
html_favicon = 'images/ACRN-favicon-32x32.png'
|
||||||
|
|
||||||
|
numfig = True
|
||||||
|
#numfig_secnum_depth = (2)
|
||||||
|
numfig_format = {'figure': 'Figure %s', 'table': 'Table %s', 'code-block': 'Code Block %s'}
|
||||||
|
|
||||||
# Add any paths that contain custom static files (such as style sheets) here,
|
# Add any paths that contain custom static files (such as style sheets) here,
|
||||||
# relative to this directory. They are copied after the builtin static files,
|
# relative to this directory. They are copied after the builtin static files,
|
||||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||||
@ -136,6 +166,10 @@ html_show_sphinx = False
|
|||||||
# If true, links to the reST sources are added to the pages.
|
# If true, links to the reST sources are added to the pages.
|
||||||
html_show_sourcelink = False
|
html_show_sourcelink = False
|
||||||
|
|
||||||
|
# If not '', a 'Last updated on:' timestamp is inserted at every page
|
||||||
|
# bottom,
|
||||||
|
# using the given strftime format.
|
||||||
|
html_last_updated_fmt = '%b %d, %Y'
|
||||||
|
|
||||||
# -- Options for HTMLHelp output ------------------------------------------
|
# -- Options for HTMLHelp output ------------------------------------------
|
||||||
|
|
||||||
|
@ -6,5 +6,5 @@ Getting Started Guide
|
|||||||
This Getting Started Guide (GSG) provides information for setting up
|
This Getting Started Guide (GSG) provides information for setting up
|
||||||
your host development computer with the needed software and tools for
|
your host development computer with the needed software and tools for
|
||||||
developing with Project ACRN. We'll also provide setup
|
developing with Project ACRN. We'll also provide setup
|
||||||
instructions for the targetted hardware platform, and running a sample
|
instructions for the targeted hardware platform, and running a sample
|
||||||
application on this platform.
|
application on this platform.
|
||||||
|
114
glossary.rst
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
:orphan:
|
||||||
|
|
||||||
|
.. _glossary:
|
||||||
|
|
||||||
|
Glossary of Terms
|
||||||
|
#################
|
||||||
|
|
||||||
|
.. glossary::
|
||||||
|
:sorted:
|
||||||
|
|
||||||
|
API
|
||||||
|
Application Program Interface: A defined set of routines and protocols for
|
||||||
|
building application software.
|
||||||
|
|
||||||
|
ACPI
|
||||||
|
Advanced Configuration and Power Interface
|
||||||
|
|
||||||
|
BIOS
|
||||||
|
Basic Input/Output System.
|
||||||
|
|
||||||
|
GPU
|
||||||
|
Graphics Processing Unit
|
||||||
|
|
||||||
|
I2C
|
||||||
|
Inter-Integrated Circuit
|
||||||
|
|
||||||
|
IC
|
||||||
|
Instrument Cluster
|
||||||
|
|
||||||
|
IVE
|
||||||
|
In-Vehicle Experience
|
||||||
|
|
||||||
|
IVI
|
||||||
|
In-vehicle Infotainment
|
||||||
|
|
||||||
|
OS
|
||||||
|
Operating System
|
||||||
|
|
||||||
|
OSPM
|
||||||
|
Operating System Power Management
|
||||||
|
|
||||||
|
PCI
|
||||||
|
Peripheral Component Interface.
|
||||||
|
|
||||||
|
PM
|
||||||
|
Power Management
|
||||||
|
|
||||||
|
Pass-Through Devices
|
||||||
|
Physical devices (typically PCI) exclusively assigned to a guest. In
|
||||||
|
the Project ACRN architecture, pass-through devices are owned by the
|
||||||
|
foreground OS.
|
||||||
|
|
||||||
|
PV
|
||||||
|
Para-virtualization (See
|
||||||
|
https://en.wikipedia.org/wiki/Paravirtualization)
|
||||||
|
|
||||||
|
RSE
|
||||||
|
Rear Seat Entertainment
|
||||||
|
|
||||||
|
SDC
|
||||||
|
Software Defined Cockpit
|
||||||
|
|
||||||
|
SOS
|
||||||
|
Service OS
|
||||||
|
|
||||||
|
UEFI
|
||||||
|
Unified Extensible Firmare Interface. UEFI replaces the
|
||||||
|
traditional BIOS on PCs, while also providing BIOS emulation for
|
||||||
|
backward compatibility. UEFI can run in 32-bit or 64-bit mode and, more
|
||||||
|
important, support Secure Boot, checking the OS validity to ensure no
|
||||||
|
malware has tampered with the boot process.
|
||||||
|
|
||||||
|
UOS
|
||||||
|
User OS (also known as Guest OS)
|
||||||
|
|
||||||
|
VHM
|
||||||
|
Virtio and Hypervisor Service Module
|
||||||
|
|
||||||
|
VM
|
||||||
|
Virtual Machine
|
||||||
|
|
||||||
|
VMM
|
||||||
|
Virtual Machine Monitor
|
||||||
|
|
||||||
|
VMX
|
||||||
|
Virtual Machine Extension
|
||||||
|
|
||||||
|
Virtio-BE
|
||||||
|
Back-End, VirtIO framework provides front-end driver and back-end driver
|
||||||
|
for IO mediators, developer has habit of using Shorthand. So they say
|
||||||
|
Virtio-BE and Virtio-FE
|
||||||
|
|
||||||
|
Virtio-FE
|
||||||
|
Front-End, VirtIO framework provides front-end driver and back-end
|
||||||
|
driver for IO mediators, developer has habit of using Shorthand. So
|
||||||
|
they say Virtio-BE and Virtio-FE
|
||||||
|
|
||||||
|
VT
|
||||||
|
Intel Virtualization Technology
|
||||||
|
|
||||||
|
VT-d
|
||||||
|
Virtualization Technology for Directed I/O
|
||||||
|
|
||||||
|
IDT
|
||||||
|
Interrupt Descriptor Table: a data structure used by the x86
|
||||||
|
architecture to implement an interrupt vector table. The IDT is used
|
||||||
|
to determine the correct response to interrupts and exceptions.
|
||||||
|
|
||||||
|
ISR
|
||||||
|
Interrupt Service Routine: Also known as an interrupt handler, an ISR
|
||||||
|
is a callback function whose execution is triggered by a hardware
|
||||||
|
interrupt (or software interrupt instructions) and is used to handle
|
||||||
|
high-priority conditions that require interrupting the current code
|
||||||
|
executing on the processor.
|
@ -31,3 +31,9 @@ Sections
|
|||||||
release_notes.rst
|
release_notes.rst
|
||||||
contribute.rst
|
contribute.rst
|
||||||
api/index.rst
|
api/index.rst
|
||||||
|
|
||||||
|
Indices and Tables
|
||||||
|
******************
|
||||||
|
|
||||||
|
* :ref:`glossary`
|
||||||
|
* :ref:`genindex`
|
||||||
|
BIN
introduction/images/IVI-block.png
Normal file
After Width: | Height: | Size: 67 KiB |
BIN
introduction/images/VMX-brief.png
Normal file
After Width: | Height: | Size: 14 KiB |
BIN
introduction/images/architecture.png
Normal file
After Width: | Height: | Size: 61 KiB |
BIN
introduction/images/boot-flow.png
Normal file
After Width: | Height: | Size: 18 KiB |
BIN
introduction/images/device-model.png
Normal file
After Width: | Height: | Size: 48 KiB |
BIN
introduction/images/device-passthrough.png
Normal file
After Width: | Height: | Size: 16 KiB |
BIN
introduction/images/io-emulation-path.png
Normal file
After Width: | Height: | Size: 56 KiB |
BIN
introduction/images/virtio-architecture.png
Normal file
After Width: | Height: | Size: 38 KiB |
BIN
introduction/images/virtio-framework-kernel.png
Normal file
After Width: | Height: | Size: 68 KiB |
BIN
introduction/images/virtio-framework-userland.png
Normal file
After Width: | Height: | Size: 66 KiB |
@ -1,32 +1,570 @@
|
|||||||
.. _introduction:
|
.. _introduction:
|
||||||
|
|
||||||
Introducing Project ACRN
|
Introduction to Project ACRN
|
||||||
########################
|
############################
|
||||||
|
|
||||||
The Project ACRN Embedded Hypervisor is a flexible and lighweight bare
|
The open source project ACRN defines a device hypervisor reference stack
|
||||||
metal hypervisor, built with real-time, functional safety, and security
|
and an architecture for running multiple software subsystems, managed
|
||||||
in mind. It streamlines embedded development through a scalable open
|
securely, on a consolidated system by means of a virtual machine
|
||||||
source reference platform that addresses embedded developers' needs.
|
manager. It also defines a reference framework implementation for
|
||||||
|
virtual device emulation, called the "ACRN Device Model".
|
||||||
|
|
||||||
This open source embedded hypervisor defines a software architecture for
|
The ACRN Hypervisor is a Type 1 reference hypervisor stack, running
|
||||||
running multiple software subsystems managed securely on a consolidated
|
directly on the bare-metal hardware, and is suitable for a variety of
|
||||||
system (by means of a virtual machine manager), and defines a reference
|
IoT and embedded device solutions. The ACRN hypervisor addresses the gap
|
||||||
framework Device Model implementation for devices emulation.
|
that currently exists between datacenter hypervisors, and hard
|
||||||
|
partitioning hypervisors. The ACRN hypervisor architecture partitions
|
||||||
|
the system into different functional domains, with carefully selected
|
||||||
|
guest OS sharing optimizations for IoT and embedded devices.
|
||||||
|
|
||||||
This embedded hypervisor is type-1 reference hypervisor, running
|
An interesting use case example for the ACRN Hypervisor is in automotive
|
||||||
directly on the system hardware. It can be used for building software
|
scenario. The ACRN hypervisor can be used for building a Software
|
||||||
defined cockpit (SDC) or In-Vehicle Experience (IVE) solutions running
|
Defined Cockpit (SDC) or an In-Vehicle Experience (IVE) solution. As a
|
||||||
on Intel Architecture Apollo Lake platforms. As a reference
|
reference implementation, ACRN provides the basis for embedded
|
||||||
implementation, it provides the basis for embedded hypervisor vendors to
|
hypervisor vendors to build solutions with a reference I/O mediation
|
||||||
build solutions with an open source reference I/O mediation solution,
|
solution.
|
||||||
and provides auto makers a reference software stack for SDC usage.
|
|
||||||
|
|
||||||
This embedded hypervisor is a partitioning hypervisor reference stack,
|
In this scenario, an automotive SDC system consists of the Instrument
|
||||||
also suitable for non-automotive IoT & embedded device solutions. It
|
Cluster (IC) system, the In-Vehicle Infotainment (IVI) system, and one
|
||||||
will be addressing the gap that currently exists between datacenter
|
or more Rear Seat Entertainment (RSE) systems. Each system is running as
|
||||||
hypervisors, hard partitioning hypervisors, and select industrial
|
an isolated Virtual Machine (VM) for overall system safety
|
||||||
applications. Extending the scope of this open source embedded
|
considerations.
|
||||||
hypervisor relies on the involvement of community developers like you!
|
|
||||||
|
|
||||||
This embedded hypervisor is able to support both Linux* and Android* as
|
An **Instrument Cluster (IC)** system is used to show the driver operational
|
||||||
a Guest OS, managed by the hypervisor, where applications can run.
|
information about the vehicle, such as:
|
||||||
|
|
||||||
|
- the speed, the fuel level, trip mile and other driving information of
|
||||||
|
the car;
|
||||||
|
- projecting heads-up images on the windshield, with alerts for low
|
||||||
|
fuel or tire pressure;
|
||||||
|
- showing rear-view camera, and surround-view for parking assistance.
|
||||||
|
|
||||||
|
An **In-Vehicle Infotainment (IVI)** system's capabilities can include:
|
||||||
|
|
||||||
|
- navigation systems, radios, and other entertainment systems;
|
||||||
|
- connection to mobile devices for phone calls, music, and applications
|
||||||
|
via voice recognition;
|
||||||
|
- control interaction by gesture recognition or touch.
|
||||||
|
|
||||||
|
A **Rear Seat Entertainment (RSE)** system could run:
|
||||||
|
|
||||||
|
- entertainment system;
|
||||||
|
- virtual office;
|
||||||
|
- connection to the front-seat IVI system and mobile devices (cloud
|
||||||
|
connectivity).
|
||||||
|
- connection to mobile devices for phone calls, music, and
|
||||||
|
applications via voice recognition;
|
||||||
|
- control interaction by gesture recognition or touch
|
||||||
|
|
||||||
|
The ACRN hypervisor can support both Linux\* VM and Android\* VM as a
|
||||||
|
User OS, with the User OS managed by the ACRN hypervisor. Developers and
|
||||||
|
OEMs can use this reference stack to run their own VMs, together with
|
||||||
|
IC, IVI, and RSE VMs. The Service OS runs as VM0 (also known as Dom0 in
|
||||||
|
other hypervisors) and the User OS runs as VM1, (also known as DomU).
|
||||||
|
|
||||||
|
:numref:`ivi-block` shows an example block diagram of using the ACRN
|
||||||
|
hypervisor.
|
||||||
|
|
||||||
|
.. figure:: images/IVI-block.png
|
||||||
|
:align: center
|
||||||
|
:name: ivi-block
|
||||||
|
|
||||||
|
Service OS and User OS on top of ACRN hypervisor
|
||||||
|
|
||||||
|
This ACRN hypervisor block diagram shows:
|
||||||
|
|
||||||
|
- The ACRN hypervisor sits right on top of the bootloader for fast
|
||||||
|
booting capabilities.
|
||||||
|
- Partitioning of resources to ensure safety-critical and non-safety
|
||||||
|
critical domains are able to coexist on one platform.
|
||||||
|
- Rich I/O mediators allows various I/O devices shared across VMs, and
|
||||||
|
thus delivers a comprehensive user experience
|
||||||
|
- Multiple operating systems are supported by one SoC through efficient
|
||||||
|
virtualization.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
The yellow color parts in :numref:`ivi-block` are part of the project
|
||||||
|
ACRN software stack. This is a reference architecture diagram and not
|
||||||
|
all features mentioned are fully functional. Other blocks will come from
|
||||||
|
other (open source) projects and are listed here for reference only.
|
||||||
|
|
||||||
|
For example: the Service OS and Linux Guest can come from the Clear
|
||||||
|
Linux project at https://clearlinux.org and (in later updates) the
|
||||||
|
Android as a Guest support can come from https://01.org/android-ia.
|
||||||
|
|
||||||
|
For the current ACRN-supported feature list, please see
|
||||||
|
:ref:`release_notes`.
|
||||||
|
|
||||||
|
Licensing
|
||||||
|
*********
|
||||||
|
.. _BSD-3-Clause: https://opensource.org/licenses/BSD-3-Clause
|
||||||
|
|
||||||
|
Both the ACRN hypervisor and ACRN Device model software are provided
|
||||||
|
under the permissive `BSD-3-Clause`_ license, which allows
|
||||||
|
*"redistribution and use in source and binary forms, with or without
|
||||||
|
modification"* together with the intact copyright notice and
|
||||||
|
disclaimers noted in the license.
|
||||||
|
|
||||||
|
|
||||||
|
ACRN Device Model, Service OS, and User OS
|
||||||
|
******************************************
|
||||||
|
|
||||||
|
To keep the hypervisor code base as small and efficient as possible, the
|
||||||
|
bulk of the device model implementation resides in the Service OS to
|
||||||
|
provide sharing and other capabilities. The details of which devices are
|
||||||
|
shared and the mechanism used for their sharing is described in
|
||||||
|
`pass-through`_ section below.
|
||||||
|
|
||||||
|
The Service OS runs with the system's highest virtual machine priority
|
||||||
|
to ensure required device time-sensitive requirements and system quality
|
||||||
|
of service (QoS). Service OS tasks run with mixed priority. Upon a
|
||||||
|
callback servicing a particular User OS request, the corresponding
|
||||||
|
software (or mediator) in the Service OS inherits the User OS priority.
|
||||||
|
There may also be additional low-priority background tasks within the
|
||||||
|
Service OS.
|
||||||
|
|
||||||
|
In the automotive example we described above, the User OS is the central
|
||||||
|
hub of vehicle control and in-vehicle entertainment. It provides support
|
||||||
|
for radio and entertainment options, control of the vehicle climate
|
||||||
|
control, and vehicle navigation displays. It also provides connectivity
|
||||||
|
options for using USB, Bluetooth, and WiFi for third-party device
|
||||||
|
interaction with the vehicle, such as Android Auto\* or Apple CarPlay*,
|
||||||
|
and many other features.
|
||||||
|
|
||||||
|
Boot Sequence
|
||||||
|
*************
|
||||||
|
|
||||||
|
In :numref:`boot-flow` we show a verified Boot Sequence with UEFI
|
||||||
|
on an Intel |reg| Architecture platform NUC (see :ref:`hardware`).
|
||||||
|
|
||||||
|
.. figure:: images/boot-flow.png
|
||||||
|
:align: center
|
||||||
|
:name: boot-flow
|
||||||
|
|
||||||
|
ACRN Hypervisor Boot Flow
|
||||||
|
|
||||||
|
The Boot process proceeds as follows:
|
||||||
|
|
||||||
|
1. UEFI verifies and boots the ACRN hypervisor and Service OS Bootloader
|
||||||
|
2. UEFI (or Service OS Bootloader) verifies and boots Service OS kernel
|
||||||
|
3. Service OS kernel verifies and loads ACRN Device Model and Virtual
|
||||||
|
bootloader through dm-verity
|
||||||
|
4. Virtual bootloader starts the User-side verified boot process
|
||||||
|
|
||||||
|
|
||||||
|
ACRN Hypervisor Architecture
|
||||||
|
****************************
|
||||||
|
|
||||||
|
ACRN hypervisor is a Type 1 hypervisor, running directly on bare-metal
|
||||||
|
hardware. It implements a hybrid VMM architecture, using a privileged
|
||||||
|
service VM, running the Service OS that manages the I/O devices and
|
||||||
|
provides I/O mediation. Multiple User VMs are supported, with each of
|
||||||
|
them running Linux\* or Android\* OS as the User OS .
|
||||||
|
|
||||||
|
Running systems in separate VMs provides isolation between other VMs and
|
||||||
|
their applications, reducing potential attack surfaces and minimizing
|
||||||
|
safety interference. However, running the systems in separate VMs may
|
||||||
|
introduce additional latency for applications.
|
||||||
|
|
||||||
|
:numref:`ACRN-architecture` shows the ACRN hypervisor architecture, with
|
||||||
|
the automotive example IC VM and service VM together. The Service OS
|
||||||
|
(SOS) owns most of the devices including the platform devices, and
|
||||||
|
provides I/O mediation. Some of the PCIe devices may be passed through
|
||||||
|
to the User OSes via the VM configuration. The SOS runs the IC
|
||||||
|
applications and hypervisor-specific applications together, such as the
|
||||||
|
ACRN device model, and ACRN VM manager.
|
||||||
|
|
||||||
|
ACRN hypervisor also runs the ACRN VM manager to collect running
|
||||||
|
information of the User OS, and controls the User VM such as starting,
|
||||||
|
stopping, and pausing a VM, pausing or resuming a virtual CPU.
|
||||||
|
|
||||||
|
.. figure:: images/architecture.png
|
||||||
|
:align: center
|
||||||
|
:name: ACRN-architecture
|
||||||
|
|
||||||
|
ACRN Hypervisor Architecture
|
||||||
|
|
||||||
|
ACRN hypervisor takes advantage of Intel Virtualization Technology
|
||||||
|
(Intel VT), and ACRN hypervisor runs in Virtual Machine Extension (VMX)
|
||||||
|
root operation, or host mode, or VMM mode. All the guests, including
|
||||||
|
UOS and SOS, run in VMX non-root operation, or guest mode. (Hereafter,
|
||||||
|
we use the terms VMM mode and Guest mode for simplicity).
|
||||||
|
|
||||||
|
The VMM mode has 4 protection rings, but runs the ACRN hypervisor in
|
||||||
|
ring 0 privilege only, leaving rings 1-3 unused. The guest (including
|
||||||
|
SOS & UOS), running in Guest mode, also has its own four protection
|
||||||
|
rings (ring 0 to 3). The User kernel runs in ring 0 of guest mode, and
|
||||||
|
user land applications run in ring 3 of User mode (ring 1 & 2 are
|
||||||
|
usually not used by commercial OSes).
|
||||||
|
|
||||||
|
.. figure:: images/VMX-brief.png
|
||||||
|
:align: center
|
||||||
|
:name: VMX-brief
|
||||||
|
|
||||||
|
VMX Brief
|
||||||
|
|
||||||
|
As shown in :numref:`VMX-brief`, VMM mode and guest mode are switched
|
||||||
|
through VM Exit and VM Entry. When the bootloader hands off control to
|
||||||
|
the ACRN hypervisor, the processor hasn't enabled VMX operation yet. The
|
||||||
|
ACRN hypervisor needs to enable VMX operation thru a VMXON instruction
|
||||||
|
first. Initially, the processor stays in VMM mode when the VMX operation
|
||||||
|
is enabled. It enters guest mode thru a VM resume instruction (or first
|
||||||
|
time VM launch), and returns back to VMM mode thru a VM exit event. VM
|
||||||
|
exit occurs in response to certain instructions and events.
|
||||||
|
|
||||||
|
The behavior of processor execution in guest mode is controlled by a
|
||||||
|
virtual machine control structure (VMCS). VMCS contains the guest state
|
||||||
|
(loaded at VM Entry, and saved at VM Exit), the host state, (loaded at
|
||||||
|
the time of VM exit), and the guest execution controls. ACRN hypervisor
|
||||||
|
creates a VMCS data structure for each virtual CPU, and uses the VMCS to
|
||||||
|
configure the behavior of the processor running in guest mode.
|
||||||
|
|
||||||
|
When the execution of the guest hits a sensitive instruction, a VM exit
|
||||||
|
event may happen as defined in the VMCS configuration. Control goes back
|
||||||
|
to the ACRN hypervisor when the VM exit happens. The ACRN hypervisor
|
||||||
|
emulates the guest instruction (if the exit was due to privilege issue)
|
||||||
|
and resumes the guest to its next instruction, or fixes the VM exit
|
||||||
|
reason (for example if a guest memory page is not mapped yet) and resume
|
||||||
|
the guest to re-execute the instruction.
|
||||||
|
|
||||||
|
Note that the address space used in VMM mode is different from that in
|
||||||
|
guest mode. The guest mode and VMM mode use different memory mapping
|
||||||
|
tables, and therefore the ACRN hypervisor is protected from guest
|
||||||
|
access. The ACRN hypervisor uses EPT to map the guest address, using the
|
||||||
|
guest page table to map from guest linear address to guest physical
|
||||||
|
address, and using the EPT table to map from guest physical address to
|
||||||
|
machine physical address or host physical address (HPA).
|
||||||
|
|
||||||
|
ACRN Device Model Architecture
|
||||||
|
******************************
|
||||||
|
|
||||||
|
Because devices may need to be shared between VMs, device emulation is
|
||||||
|
used to give VM applications (and OSes) access to these shared devices.
|
||||||
|
Traditionally there are three architectural approaches to device
|
||||||
|
emulation:
|
||||||
|
|
||||||
|
* The first architecture is device emulation within the hypervisor which
|
||||||
|
is a common method implemented within the VMware\* workstation product
|
||||||
|
(an operating system-based hypervisor). In this method, the hypervisor
|
||||||
|
includes emulations of common devices that the various guest operating
|
||||||
|
systems can share, including virtual disks, virtual network adapters,
|
||||||
|
and other necessary platform elements.
|
||||||
|
|
||||||
|
* The second architecture is called user space device emulation. As the
|
||||||
|
name implies, rather than the device emulation being embedded within
|
||||||
|
the hypervisor, it is instead implemented in a separate user space
|
||||||
|
application. QEMU, for example, provides this kind of device emulation
|
||||||
|
also used by a large number of independent hypervisors. This model is
|
||||||
|
advantageous, because the device emulation is independent of the
|
||||||
|
hypervisor and can therefore be shared for other hypervisors. It also
|
||||||
|
permits arbitrary device emulation without having to burden the
|
||||||
|
hypervisor (which operates in a privileged state) with this
|
||||||
|
functionality.
|
||||||
|
|
||||||
|
* The third variation on hypervisor-based device emulation is
|
||||||
|
paravirtualized (PV) drivers. In this model introduced by the `XEN
|
||||||
|
project`_ the hypervisor includes the physical drivers, and each guest
|
||||||
|
operating system includes a hypervisor-aware driver that works in
|
||||||
|
concert with the hypervisor drivers.
|
||||||
|
|
||||||
|
.. _XEN project:
|
||||||
|
https://wiki.xenproject.org/wiki/Understanding_the_Virtualization_Spectrum
|
||||||
|
|
||||||
|
In the device emulation models discussed above, there's a price to pay
|
||||||
|
for sharing devices. Whether device emulation is performed in the
|
||||||
|
hypervisor, or in user space within an independent VM, overhead exists.
|
||||||
|
This overhead is worthwhile as long as the devices need to be shared by
|
||||||
|
multiple guest operating systems. If sharing is not necessary, then
|
||||||
|
there are more efficient methods for accessing devices, for example
|
||||||
|
"pass-through".
|
||||||
|
|
||||||
|
ACRN device model is a placeholder of the UOS. It allocates memory for
|
||||||
|
the User OS, configures and initializes the devices used by the UOS,
|
||||||
|
loads the virtual firmware, initializes the virtual CPU state, and
|
||||||
|
invokes the ACRN hypervisor service to execute the guest instructions.
|
||||||
|
ACRN Device model is an application running in the Service OS that
|
||||||
|
emulates devices based on command line configuration, as shown in
|
||||||
|
the architecture diagram :numref:`device-model` below:
|
||||||
|
|
||||||
|
.. figure:: images/device-model.png
|
||||||
|
:align: center
|
||||||
|
:name: device-model
|
||||||
|
|
||||||
|
ACRN Device Model
|
||||||
|
|
||||||
|
ACRN Device model incorporates these three aspects:
|
||||||
|
|
||||||
|
**Device Emulation**:
|
||||||
|
ACRN Device model provides device emulation routines that register
|
||||||
|
their I/O handlers to the I/O dispatcher. When there is an I/O request
|
||||||
|
from the User OS device, the I/O dispatcher sends this request to the
|
||||||
|
corresponding device emulation routine.
|
||||||
|
|
||||||
|
**I/O Path**:
|
||||||
|
see `ACRN-io-mediator`_ below
|
||||||
|
|
||||||
|
**VHM**:
|
||||||
|
The Virtio and Hypervisor Service Module is a kernel module in the
|
||||||
|
Service OS acting as a middle layer to support the device model. The VHM
|
||||||
|
and its client handling flow is described below:
|
||||||
|
|
||||||
|
#. ACRN hypervisor IOREQ is forwarded to the VHM by an upcall
|
||||||
|
notification to the SOS.
|
||||||
|
#. VHM will mark the IOREQ as "in process" so that the same IOREQ will
|
||||||
|
not pick up again. The IOREQ will be sent to the client for handling.
|
||||||
|
Meanwhile, the VHM is ready for another IOREQ.
|
||||||
|
#. IOREQ clients are either an SOS Userland application or a Service OS
|
||||||
|
Kernel space module. Once the IOREQ is processed and completed, the
|
||||||
|
Client will issue an IOCTL call to the VHM to notify an IOREQ state
|
||||||
|
change. The VHM then checks and hypercalls to ACRN hypervisor
|
||||||
|
notifying it that the IOREQ has completed.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
Userland: dm as ACRN Device Model.
|
||||||
|
|
||||||
|
Kernel space: VBS-K, MPT Service, VHM itself
|
||||||
|
|
||||||
|
.. _pass-through:
|
||||||
|
|
||||||
|
Device pass through
|
||||||
|
*******************
|
||||||
|
|
||||||
|
At the highest level, device pass-through is about providing isolation
|
||||||
|
of a device to a given guest operating system so that the device can be
|
||||||
|
used exclusively by that guest.
|
||||||
|
|
||||||
|
.. figure:: images/device-passthrough.png
|
||||||
|
:align: center
|
||||||
|
:name: device-passthrough
|
||||||
|
|
||||||
|
Device Passthrough
|
||||||
|
|
||||||
|
Near-native performance can be achieved by using device passthrough.
|
||||||
|
This is ideal for networking applications (or those with high disk I/O
|
||||||
|
needs) that have not adopted virtualization because of contention and
|
||||||
|
performance degradation through the hypervisor (using a driver in the
|
||||||
|
hypervisor or through the hypervisor to a user space emulation).
|
||||||
|
Assigning devices to specific guests is also useful when those devices
|
||||||
|
inherently wouldn't be shared. For example, if a system includes
|
||||||
|
multiple video adapters, those adapters could be passed through to
|
||||||
|
unique guest domains.
|
||||||
|
|
||||||
|
Finally, there may be specialized PCI devices that only one guest domain
|
||||||
|
uses, so they should be passed through to the guest. Individual USB
|
||||||
|
ports could be isolated to a given domain too, or a serial port (which
|
||||||
|
is itself not shareable) could be isolated to a particular guest. In
|
||||||
|
ACRN hypervisor, we support USB controller Pass through only and we
|
||||||
|
don't support pass through for a legacy serial port, (for example
|
||||||
|
0x3f8).
|
||||||
|
|
||||||
|
|
||||||
|
Hardware support for device passthrough
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
Intel's current processor architectures provides support for device
|
||||||
|
pass-through with VT-d. VT-d maps guest physical address to machine
|
||||||
|
physical address, so device can use guest physical address directly.
|
||||||
|
When this mapping occurs, the hardware takes care of access (and
|
||||||
|
protection), and the guest operating system can use the device as if it
|
||||||
|
were a non-virtualized system. In addition to mapping guest to physical
|
||||||
|
memory, isolation prevents this device from accessing memory belonging
|
||||||
|
to other guests or the hypervisor.
|
||||||
|
|
||||||
|
Another innovation that helps interrupts scale to large numbers of VMs
|
||||||
|
is called Message Signaled Interrupts (MSI). Rather than relying on
|
||||||
|
physical interrupt pins to be associated with a guest, MSI transforms
|
||||||
|
interrupts into messages that are more easily virtualized (scaling to
|
||||||
|
thousands of individual interrupts). MSI has been available since PCI
|
||||||
|
version 2.2 but is also available in PCI Express (PCIe), where it allows
|
||||||
|
fabrics to scale to many devices. MSI is ideal for I/O virtualization,
|
||||||
|
as it allows isolation of interrupt sources (as opposed to physical pins
|
||||||
|
that must be multiplexed or routed through software).
|
||||||
|
|
||||||
|
Hypervisor support for device passthrough
|
||||||
|
=========================================
|
||||||
|
|
||||||
|
By using the latest virtualization-enhanced processor architectures,
|
||||||
|
hypervisors and virtualization solutions can support device
|
||||||
|
pass-through (using VT-d), including Xen, KVM, and ACRN hypervisor.
|
||||||
|
In most cases, the guest operating system (User
|
||||||
|
OS) must be compiled to support pass-through, by using
|
||||||
|
kernel build-time options. Hiding the devices from the host VM may also
|
||||||
|
be required (as is done with Xen using pciback). Some restrictions apply
|
||||||
|
in PCI, for example, PCI devices behind a PCIe-to-PCI bridge must be
|
||||||
|
assigned to the same guest OS. PCIe does not have this restriction.
|
||||||
|
|
||||||
|
.. _ACRN-io-mediator:
|
||||||
|
|
||||||
|
ACRN I/O mediator
|
||||||
|
*****************
|
||||||
|
|
||||||
|
:numref:`io-emulation-path` shows the flow of an example I/O emulation path.
|
||||||
|
|
||||||
|
.. figure:: images/io-emulation-path.png
|
||||||
|
:align: center
|
||||||
|
:name: io-emulation-path
|
||||||
|
|
||||||
|
I/O Emulation Path
|
||||||
|
|
||||||
|
Following along with the numbered items in :numref:`io-emulation-path`:
|
||||||
|
|
||||||
|
1. When a guest execute an I/O instruction (PIO or MMIO), a VM exit happens.
|
||||||
|
ACRN hypervisor takes control, and analyzes the the VM
|
||||||
|
exit reason, which is a VMX_EXIT_REASON_IO_INSTRUCTION for PIO access.
|
||||||
|
2. ACRN hypervisor fetches and analyzes the guest instruction, and
|
||||||
|
notices it is a PIO instruction (``in AL, 20h`` in this example), and put
|
||||||
|
the decoded information (including the PIO address, size of access,
|
||||||
|
read/write, and target register) into the shared page, and
|
||||||
|
notify/interrupt the SOS to process.
|
||||||
|
3. The Virtio and hypervisor service module (VHM) in SOS receives the
|
||||||
|
interrupt, and queries the IO request ring to get the PIO instruction
|
||||||
|
details.
|
||||||
|
4. It checks to see if any kernel device claims
|
||||||
|
ownership of the IO port: if a kernel module claimed it, the kernel
|
||||||
|
module is activated to execute its processing APIs. Otherwise, the VHM
|
||||||
|
module leaves the IO request in the shared page and wakes up the
|
||||||
|
device model thread to process.
|
||||||
|
5. The ACRN device model follow the same mechanism as the VHM. The I/O
|
||||||
|
processing thread of device model queries the IO request ring to get the
|
||||||
|
PIO instruction details and checks to see if any (guest) device emulation
|
||||||
|
module claims ownership of the IO port: if a module claimed it,
|
||||||
|
the module is invoked to execute its processing APIs.
|
||||||
|
6. After the ACRN device module completes the emulation (port IO 20h access
|
||||||
|
in this example), (say uDev1 here), uDev1 puts the result into the
|
||||||
|
shared page (in register AL in this example).
|
||||||
|
7. ACRN device model then returns control to ACRN hypervisor to indicate the
|
||||||
|
completion of an IO instruction emulation, typically thru VHM/hypercall.
|
||||||
|
8. The ACRN hypervisor then knows IO emulation is complete, and copies
|
||||||
|
the result to the guest register context.
|
||||||
|
9. The ACRN hypervisor finally advances the guest IP to
|
||||||
|
indicate completion of instruction execution, and resumes the guest.
|
||||||
|
|
||||||
|
The MMIO path is very similar, except the VM exit reason is different. MMIO
|
||||||
|
access usually is trapped thru VMX_EXIT_REASON_EPT_VIOLATION in
|
||||||
|
the hypervisor.
|
||||||
|
|
||||||
|
Virtio framework architecture
|
||||||
|
*****************************
|
||||||
|
|
||||||
|
.. _Virtio spec:
|
||||||
|
http://docs.oasis-open.org/virtio/virtio/v1.0/virtio-v1.0.html
|
||||||
|
|
||||||
|
Virtio is an abstraction for a set of common emulated devices in any
|
||||||
|
type of hypervisor. In the ACRN reference stack, our
|
||||||
|
implementation is compatible with `Virtio spec`_ 0.9 and 1.0. By
|
||||||
|
following this spec, virtual environments and guests
|
||||||
|
should have a straightforward, efficient, standard and extensible
|
||||||
|
mechanism for virtual devices, rather than boutique per-environment or
|
||||||
|
per-OS mechanisms.
|
||||||
|
|
||||||
|
Virtio provides a common frontend driver framework which not only
|
||||||
|
standardizes device interfaces, but also increases code reuse across
|
||||||
|
different virtualization platforms.
|
||||||
|
|
||||||
|
.. figure:: images/virtio-architecture.png
|
||||||
|
:align: center
|
||||||
|
:name: virtio-architecture
|
||||||
|
|
||||||
|
Virtio Architecture
|
||||||
|
|
||||||
|
To better understand Virtio, especially its usage in
|
||||||
|
the ACRN project, several key concepts of Virtio are highlighted
|
||||||
|
here:
|
||||||
|
|
||||||
|
**Front-End Virtio driver** (a.k.a. frontend driver, or FE driver in this document)
|
||||||
|
Virtio adopts a frontend-backend architecture, which enables a simple
|
||||||
|
but flexible framework for both frontend and backend Virtio driver. The
|
||||||
|
FE driver provides APIs to configure the interface, pass messages, produce
|
||||||
|
requests, and notify backend Virtio driver. As a result, the FE driver
|
||||||
|
is easy to implement and the performance overhead of emulating device is
|
||||||
|
eliminated.
|
||||||
|
|
||||||
|
**Back-End Virtio driver** (a.k.a. backend driver, or BE driver in this document)
|
||||||
|
Similar to FE driver, the BE driver, runs either in user-land or
|
||||||
|
kernel-land of host OS. The BE driver consumes requests from FE driver
|
||||||
|
and send them to the host's native device driver. Once the requests are
|
||||||
|
done by the host native device driver, the BE driver notifies the FE
|
||||||
|
driver about the completeness of the requests.
|
||||||
|
|
||||||
|
**Straightforward**: Virtio devices as standard devices on existing Buses
|
||||||
|
Instead of creating new device buses from scratch, Virtio devices are
|
||||||
|
built on existing buses. This gives a straightforward way for both FE
|
||||||
|
and BE drivers to interact with each other. For example, FE driver could
|
||||||
|
read/write registers of the device, and the virtual device could
|
||||||
|
interrupt FE driver, on behalf of the BE driver, in case of something is
|
||||||
|
happening. Currently Virtio supports PCI/PCIe bus and MMIO bus. In
|
||||||
|
ACRN project, only PCI/PCIe bus is supported, and all the Virtio devices
|
||||||
|
share the same vendor ID 0x1AF4.
|
||||||
|
|
||||||
|
**Efficient**: batching operation is encouraged
|
||||||
|
Batching operation and deferred notification are important to achieve
|
||||||
|
high-performance I/O, since notification between FE and BE driver
|
||||||
|
usually involves an expensive exit of the guest. Therefore batching
|
||||||
|
operating and notification suppression are highly encouraged if
|
||||||
|
possible. This will give an efficient implementation for the performance
|
||||||
|
critical devices.
|
||||||
|
|
||||||
|
**Standard: virtqueue**
|
||||||
|
All the Virtio devices share a standard ring buffer and descriptor
|
||||||
|
mechanism, called a virtqueue, shown in Figure 6. A virtqueue
|
||||||
|
is a queue of scatter-gather buffers. There are three important
|
||||||
|
methods on virtqueues:
|
||||||
|
|
||||||
|
* ``add_buf`` is for adding a request/response buffer in a virtqueue
|
||||||
|
* ``get_buf`` is for getting a response/request in a virtqueue, and
|
||||||
|
* ``kick`` is for notifying the other side for a virtqueue to
|
||||||
|
consume buffers.
|
||||||
|
|
||||||
|
The virtqueues are created in guest physical memory by the FE drivers.
|
||||||
|
The BE drivers only need to parse the virtqueue structures to obtain
|
||||||
|
the requests and get the requests done. How virtqueue is organized is
|
||||||
|
specific to the User OS. In the implementation of Virtio in Linux, the
|
||||||
|
virtqueue is implemented as a ring buffer structure called vring.
|
||||||
|
|
||||||
|
In ACRN, the virtqueue APIs can be leveraged
|
||||||
|
directly so users don't need to worry about the details of the
|
||||||
|
virtqueue. Refer to the User OS for
|
||||||
|
more details about the virtqueue implementations.
|
||||||
|
|
||||||
|
**Extensible: feature bits**
|
||||||
|
A simple extensible feature negotiation mechanism exists for each virtual
|
||||||
|
device and its driver. Each virtual device could claim its
|
||||||
|
device specific features while the corresponding driver could respond to
|
||||||
|
the device with the subset of features the driver understands. The
|
||||||
|
feature mechanism enables forward and backward compatibility for the
|
||||||
|
virtual device and driver.
|
||||||
|
|
||||||
|
In the ACRN reference stack, we implement user-land and kernel
|
||||||
|
space as shown in :numref:`virtio-framework-userland`:
|
||||||
|
|
||||||
|
.. figure:: images/virtio-framework-userland.png
|
||||||
|
:align: center
|
||||||
|
:name: virtio-framework-userland
|
||||||
|
|
||||||
|
Virtio Framework - User Land
|
||||||
|
|
||||||
|
In the Virtio user-land framework, the implementation is compatible with
|
||||||
|
Virtio Spec 0.9/1.0. The VBS-U is statically linked with Device Model,
|
||||||
|
and communicates with Device Model through the PCIe interface: PIO/MMIO
|
||||||
|
or MSI/MSIx. VBS-U accesses Virtio APIs through user space vring service
|
||||||
|
API helpers. User space vring service API helpers access shared ring
|
||||||
|
through remote memory map (mmap). VHM maps UOS memory with the help of
|
||||||
|
ACRN Hypervisor.
|
||||||
|
|
||||||
|
.. figure:: images/virtio-framework-kernel.png
|
||||||
|
:align: center
|
||||||
|
:name: virtio-framework-kernel
|
||||||
|
|
||||||
|
Virtio Framework - Kernel Space
|
||||||
|
|
||||||
|
VBS-U offloads data plane processing to VBS-K. VBS-U initializes VBS-K
|
||||||
|
at the right timings, for example. The FE driver sets
|
||||||
|
VIRTIO_CONFIG_S_DRIVER_OK to avoid unnecessary device configuration
|
||||||
|
changes while running. VBS-K can access shared rings through VBS-K
|
||||||
|
virtqueue APIs. VBS-K virtqueue APIs are similar to VBS-U virtqueue
|
||||||
|
APIs. VBS-K registers as VHM client(s) to handle a continuous range of
|
||||||
|
registers
|
||||||
|
|
||||||
|
There may be one or more VHM-clients for each VBS-K, and there can be a
|
||||||
|
single VHM-client for all VBS-Ks as well. VBS-K notifies FE through VHM
|
||||||
|
interrupt APIs.
|
||||||
|
66
introduction/index.rst.sav
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
.. _introduction:
|
||||||
|
|
||||||
|
Introducing Project ACRN
|
||||||
|
########################
|
||||||
|
|
||||||
|
The open source project ACRN, defines a device hypervisor reference
|
||||||
|
stack and an architecture for running multiple software subsystems,
|
||||||
|
managed securely, on a consolidated system by means of a virtual machine
|
||||||
|
manager. It also defines a reference framework implementation for
|
||||||
|
virtual device emulation, called the “ACRN Device Model”.
|
||||||
|
|
||||||
|
The ACRN Hypervisor is a Type 1 reference hypervisor stack, running
|
||||||
|
directly on the bare-metal hardware, and is suitable for a variety of
|
||||||
|
IoT and embedded device solutions. The ACRN hypervisor addresses the gap
|
||||||
|
that currently exists between datacenter hypervisors, and hard
|
||||||
|
partitioning hypervisors. The ACRN hypervisor architecture partitions
|
||||||
|
the system into different functional domains, with carefully selected
|
||||||
|
guest OS sharing optimiztionsfor IoT and embedded devices.
|
||||||
|
|
||||||
|
Automotive use case scenario
|
||||||
|
****************************
|
||||||
|
|
||||||
|
A good use case example for the ACRN Hypervisor is in an automotive
|
||||||
|
scenario. The ACRN hypervisor can be used for building a Software
|
||||||
|
Defined Cockpit (SDC) or an In-Vehicle Experience (IVE) solution. As a
|
||||||
|
reference implementation, Project ACRN provides the basis for embedded
|
||||||
|
hypervisor vendors to build solutions with a reference I/O mediation
|
||||||
|
solution.
|
||||||
|
|
||||||
|
For example, an automotive SDC system consists of the Instrument Cluster
|
||||||
|
(IC) system, the In-Vehicle Infotainment (IVI) system, and one or more
|
||||||
|
Rear Seat Entertainment (RSE) systems. Each system can run on the same
|
||||||
|
hardware as isolated Virtual Machines (VM), for overall system safety
|
||||||
|
considerations.
|
||||||
|
|
||||||
|
An **Instrument Cluster (IC)** system is used to show the driver operational
|
||||||
|
information about the vehicle, such as:
|
||||||
|
|
||||||
|
* the speed, the fuel level, trip mile and other driving information
|
||||||
|
of the car;
|
||||||
|
* projecting heads-up images on the windshield, with alerts for low
|
||||||
|
fuel or tire pressure;
|
||||||
|
* showing rear-view camera, and surround-view for parking assistance.
|
||||||
|
|
||||||
|
An **In-Vehicle Infotainment (IVI)** system's capabilities can include:
|
||||||
|
|
||||||
|
* navigation systems, radios, and other entertainment systems;
|
||||||
|
* connection to mobile devices for phone calls, music, and
|
||||||
|
applications via voice recognition;
|
||||||
|
* control interaction by gesture recognition or touch.
|
||||||
|
|
||||||
|
A **Rear Seat Entertainment (RSE)** system could run:
|
||||||
|
|
||||||
|
* entertainment system;
|
||||||
|
* virtual office;
|
||||||
|
* connection to the front-seat IVI system and mobile devices (cloud
|
||||||
|
connectivity).
|
||||||
|
* connection to mobile devices for phone calls, music, and
|
||||||
|
applications via voice recognition;
|
||||||
|
* control interaction by gesture recognition or touch
|
||||||
|
|
||||||
|
The ACRN hypervisor supports both Linux* VM and Android* VM as a User
|
||||||
|
OS, with the User OS managed by the ACRN hypervisor. Developers and OEMs
|
||||||
|
can use this reference stack to run their own VMs, together with the IC,
|
||||||
|
IVI, and RSE VMs. The Service OS runs as VM0, (also known as Dom0 in
|
||||||
|
other hypervisors), and the User OS runs as VM1, (also known as DomU).
|
5
scripts/publish-README.md
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# projectacrn.github.io
|
||||||
|
This is the Project ACRN Documentation Publishing site for GitHub Pages.
|
||||||
|
Content changes are not made directly in this repo. Instead, edit content
|
||||||
|
in the acrn-documentation repo, re-generate the HTML with Sphinx, and push
|
||||||
|
the updated content here for publishing.
|
@ -1,7 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# pull fresh copies of the ACRN source and copy public API headers
|
# pull fresh copies of the ACRN source for use by doxygen
|
||||||
# over to the documentation tree
|
|
||||||
|
|
||||||
if [ ! -d "../acrn-hypervisor" ]; then
|
if [ ! -d "../acrn-hypervisor" ]; then
|
||||||
echo Repo for acrn-hypervisor is missing.
|
echo Repo for acrn-hypervisor is missing.
|
||||||
@ -13,15 +12,4 @@ if [ ! -d "../acrn-devicemodel" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
cd ../acrn-hypervisor;git pull
|
cd ../acrn-hypervisor;git pull
|
||||||
|
|
||||||
mkdir -p ../acrn-documentation/_source/hypervisor/include/common
|
|
||||||
cp include/common/hypercall.h ../acrn-documentation/_source/hypervisor/include/common
|
|
||||||
|
|
||||||
mkdir -p ../acrn-documentation/_source/hypervisor/include/public
|
|
||||||
cp include/public/acrn_common.h ../acrn-documentation/_source/hypervisor/include/public
|
|
||||||
cp include/public/acrn_hv_defs.h ../acrn-documentation/_source/hypervisor/include/public
|
|
||||||
|
|
||||||
cd ../acrn-devicemodel;git pull
|
cd ../acrn-devicemodel;git pull
|
||||||
|
|
||||||
mkdir -p ../acrn-documentation/_source/devicemodel/include
|
|
||||||
cp include/virtio.h ../acrn-documentation/_source/devicemodel/include
|
|
||||||
|
4
scripts/requirements.txt
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
breathe==4.7.3
|
||||||
|
sphinx==1.6.5
|
||||||
|
docutils==0.14
|
||||||
|
sphinx_rtd_theme
|
@ -5,6 +5,7 @@
|
|||||||
max-width: none;
|
max-width: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* (temporarily) add an under development tagline to the bread crumb */
|
||||||
.wy-breadcrumbs::after {
|
.wy-breadcrumbs::after {
|
||||||
content: " (Content under development)";
|
content: " (Content under development)";
|
||||||
background-color: #FFFACD;
|
background-color: #FFFACD;
|
||||||
@ -12,6 +13,16 @@
|
|||||||
font-weight: bold;
|
font-weight: bold;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Make the version number more visible */
|
||||||
|
.wy-side-nav-search>div.version {
|
||||||
|
color: rgba(255,255,255,1);
|
||||||
|
}
|
||||||
|
|
||||||
|
p.caption {
|
||||||
|
# border-top: 1px solid;
|
||||||
|
margin-top: 1em;
|
||||||
|
}
|
||||||
|
|
||||||
/* make .. hlist:: tables fill the page */
|
/* make .. hlist:: tables fill the page */
|
||||||
table.hlist {
|
table.hlist {
|
||||||
width: 95% !important;
|
width: 95% !important;
|
||||||
|