mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-23 22:18:17 +00:00
doc: update release branch with new docs
Signed-off-by: David B. Kinder <david.b.kinder@intel.com>
This commit is contained in:
parent
d57a213242
commit
0859836756
@ -6,5 +6,9 @@
|
|||||||
# Sphinx 2.0
|
# Sphinx 2.0
|
||||||
^(?P<filename>[-._/\w]+/hld/[-._/\w]+.rst):(?P<lineno>[0-9]+): WARNING: Duplicate declaration, .*
|
^(?P<filename>[-._/\w]+/hld/[-._/\w]+.rst):(?P<lineno>[0-9]+): WARNING: Duplicate declaration, .*
|
||||||
#
|
#
|
||||||
|
^(?P<filename>[-._/\w]+/api/[-._/\w]+.rst):(?P<lineno>[0-9]+): WARNING: duplicate C object description.*
|
||||||
|
#
|
||||||
|
^(?P<filename>[-._/\w]+/hld/[-._/\w]+.rst):(?P<lineno>[0-9]+): WARNING: duplicate C object description.*
|
||||||
|
#
|
||||||
^(?P<filename>[-._/\w]+/hld/[-._/\w]+.rst):(?P<lineno>[0-9]+): WARNING: Duplicate C\+\+ declaration, .*
|
^(?P<filename>[-._/\w]+/hld/[-._/\w]+.rst):(?P<lineno>[0-9]+): WARNING: Duplicate C\+\+ declaration, .*
|
||||||
^Declaration is .*
|
^Declaration is .*
|
||||||
|
16
doc/asa.rst
16
doc/asa.rst
@ -3,6 +3,22 @@
|
|||||||
Security Advisory
|
Security Advisory
|
||||||
#################
|
#################
|
||||||
|
|
||||||
|
Addressed in ACRN v2.3
|
||||||
|
************************
|
||||||
|
|
||||||
|
We recommend that all developers upgrade to this v2.3 release (or later), which
|
||||||
|
addresses the following security issue that was discovered in previous releases:
|
||||||
|
|
||||||
|
------
|
||||||
|
|
||||||
|
- NULL Pointer Dereference in ``devicemodel\hw\pci\virtio\virtio_mei.c``
|
||||||
|
``vmei_proc_tx()`` function tries to find the ``iov_base`` by calling
|
||||||
|
function ``paddr_guest2host()``, which may return NULL (the ``vd``
|
||||||
|
struct control by the User VM OS). There is a use of ``iov_base``
|
||||||
|
afterward that can cause a NULL pointer dereference (CVE-2020-28346).
|
||||||
|
|
||||||
|
**Affected Release:** v2.2 and earlier.
|
||||||
|
|
||||||
Addressed in ACRN v2.1
|
Addressed in ACRN v2.1
|
||||||
************************
|
************************
|
||||||
|
|
||||||
|
20
doc/conf.py
20
doc/conf.py
@ -37,7 +37,7 @@ if "RELEASE" in os.environ:
|
|||||||
sys.path.insert(0, os.path.join(os.path.abspath('.'), 'extensions'))
|
sys.path.insert(0, os.path.join(os.path.abspath('.'), 'extensions'))
|
||||||
extensions = [
|
extensions = [
|
||||||
'breathe', 'sphinx.ext.graphviz', 'sphinx.ext.extlinks',
|
'breathe', 'sphinx.ext.graphviz', 'sphinx.ext.extlinks',
|
||||||
'kerneldoc', 'eager_only', 'html_redirects',
|
'kerneldoc', 'eager_only', 'html_redirects', 'link_roles',
|
||||||
'sphinx_tabs.tabs'
|
'sphinx_tabs.tabs'
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -166,7 +166,7 @@ else:
|
|||||||
'analytics_id': '',
|
'analytics_id': '',
|
||||||
'logo_only': False,
|
'logo_only': False,
|
||||||
'display_version': True,
|
'display_version': True,
|
||||||
'prev_next_buttons_location': 'None',
|
#'prev_next_buttons_location': 'None',
|
||||||
# Toc options
|
# Toc options
|
||||||
'collapse_navigation': False,
|
'collapse_navigation': False,
|
||||||
'sticky_navigation': True,
|
'sticky_navigation': True,
|
||||||
@ -318,7 +318,21 @@ breathe_projects = {
|
|||||||
"Project ACRN" : "doxygen/xml",
|
"Project ACRN" : "doxygen/xml",
|
||||||
}
|
}
|
||||||
breathe_default_project = "Project ACRN"
|
breathe_default_project = "Project ACRN"
|
||||||
breathe_default_members = ('members', 'undoc-members', 'content-only')
|
# breathe_default_members = ('members', 'undoc-members', 'content-only')
|
||||||
|
breathe_domain_by_extension = {
|
||||||
|
"h": "c",
|
||||||
|
"c": "c",
|
||||||
|
}
|
||||||
|
|
||||||
|
cpp_id_attributes = [
|
||||||
|
'__syscall', '__deprecated', '__may_alias',
|
||||||
|
'__used', '__unused', '__weak',
|
||||||
|
'__DEPRECATED_MACRO', 'FUNC_NORETURN',
|
||||||
|
'__subsystem',
|
||||||
|
]
|
||||||
|
c_id_attributes = cpp_id_attributes
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Custom added feature to allow redirecting old URLs (caused by
|
# Custom added feature to allow redirecting old URLs (caused by
|
||||||
|
@ -72,6 +72,7 @@ Enable ACRN Features
|
|||||||
tutorials/setup_openstack_libvirt
|
tutorials/setup_openstack_libvirt
|
||||||
tutorials/acrn_on_qemu
|
tutorials/acrn_on_qemu
|
||||||
tutorials/using_grub
|
tutorials/using_grub
|
||||||
|
tutorials/acrn-secure-boot-with-grub
|
||||||
tutorials/pre-launched-rt
|
tutorials/pre-launched-rt
|
||||||
tutorials/enable_ivshmem
|
tutorials/enable_ivshmem
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ software continues to be available under the terms that the author
|
|||||||
desired.
|
desired.
|
||||||
|
|
||||||
Project ACRN uses a BSD-3-Clause license, as found in the
|
Project ACRN uses a BSD-3-Clause license, as found in the
|
||||||
`LICENSE <https://github.com/projectacrn/acrn-hypervisor/blob/master/LICENSE>`__
|
:acrn_file:`LICENSE <LICENSE>`
|
||||||
in the project's GitHub repo.
|
in the project's GitHub repo.
|
||||||
|
|
||||||
A license tells you what rights you have as a developer, as provided by
|
A license tells you what rights you have as a developer, as provided by
|
||||||
@ -221,14 +221,12 @@ following exceptions:
|
|||||||
8-column wide.
|
8-column wide.
|
||||||
|
|
||||||
You can use *checkpatch* from Linux kernel to check the compliance. ACRN
|
You can use *checkpatch* from Linux kernel to check the compliance. ACRN
|
||||||
maintains a `checkpatch conf`_ which customizes the script to stop warning on
|
maintains a :acrn_file:`.checkpatch.conf <.checkpatch.conf>` file
|
||||||
|
that customizes the script to stop warnings on
|
||||||
the exceptions above. Invoke *checkpatch* with the root of ``acrn-hypervisor``
|
the exceptions above. Invoke *checkpatch* with the root of ``acrn-hypervisor``
|
||||||
repository as the current working directory to make the configurations
|
repository as the current working directory to make the configurations
|
||||||
effective.
|
effective.
|
||||||
|
|
||||||
.. _checkpatch conf:
|
|
||||||
https://github.com/projectacrn/acrn-hypervisor/blob/master/.checkpatch.conf
|
|
||||||
|
|
||||||
.. _Contribution workflow:
|
.. _Contribution workflow:
|
||||||
|
|
||||||
Contribution Workflow
|
Contribution Workflow
|
||||||
|
@ -297,6 +297,17 @@ markup (double backticks) to indicate a ``filename``.
|
|||||||
|
|
||||||
Don't use items within a single backtick, for example ```word```.
|
Don't use items within a single backtick, for example ```word```.
|
||||||
|
|
||||||
|
For references to files that are in the ACRN Hypervisor GitHub tree, a special
|
||||||
|
role can be used that creates a hyperlink to that file. For example a
|
||||||
|
GitHub link to the reST file used to create this document can be generated
|
||||||
|
using ``:acrn_file:`doc/developer-guides/doc_guidelines.rst``` that will show
|
||||||
|
up as :acrn_file:`doc/developer-guides/doc_guidelines.rst`, a link to
|
||||||
|
the “blob” file in the GitHub repo as displayed by GitHub. There’s also an
|
||||||
|
``:acrn_raw:`doc/developer-guides/doc_guidelines.rst``` role that will link
|
||||||
|
to the “raw” uninterpreted file,
|
||||||
|
:acrn_raw:`doc/developer-guides/doc_guidelines.rst` file. (Click
|
||||||
|
on these links to see the difference.)
|
||||||
|
|
||||||
.. _internal-linking:
|
.. _internal-linking:
|
||||||
|
|
||||||
Internal Cross-Reference Linking
|
Internal Cross-Reference Linking
|
||||||
|
@ -37,7 +37,7 @@ may only be done between VMs using the same solution.
|
|||||||
ivshmem hv:
|
ivshmem hv:
|
||||||
The **ivshmem hv** implements register virtualization
|
The **ivshmem hv** implements register virtualization
|
||||||
and shared memory mapping in the ACRN hypervisor.
|
and shared memory mapping in the ACRN hypervisor.
|
||||||
It will support notification/interrupt mechanism in the future.
|
Notification/interrupt mechanism is supported.
|
||||||
|
|
||||||
ivshmem dm:
|
ivshmem dm:
|
||||||
The **ivshmem dm** implements register virtualization
|
The **ivshmem dm** implements register virtualization
|
||||||
@ -45,16 +45,32 @@ ivshmem dm:
|
|||||||
It will support notification/interrupt mechanism in the future.
|
It will support notification/interrupt mechanism in the future.
|
||||||
|
|
||||||
ivshmem server:
|
ivshmem server:
|
||||||
A daemon for inter-VM notification capability that will work with **ivshmem
|
With **ivshmem server** support, VMs with ivshmem devices enabled can send
|
||||||
dm**. This is currently **not implemented**, so the inter-VM communication
|
notification (interrupt) to each other by writing the target peer ID (VM ID)
|
||||||
doesn't support a notification mechanism.
|
and vector index to the doorbell register of ivshmem device, **ivshmem server**
|
||||||
|
forwards this notification event to target VM.
|
||||||
|
|
||||||
|
Two types of **ivshmem server** are defined in ACRN:
|
||||||
|
|
||||||
|
User land **ivshmem server** is a daemon in user space to forward notifications
|
||||||
|
for **dm-land** ivshmem devices only, by co-working with **ivshmem dm**.
|
||||||
|
User land **ivshmem server** is not implemented.
|
||||||
|
|
||||||
|
HV-land **ivshmem server** plays similar role with user land **ivshmem server**,
|
||||||
|
but it is a hypervisor module and forwards notification (virtual interrupt) to
|
||||||
|
target VM with **hv-land** ivshmem devices enabled.
|
||||||
|
|
||||||
Ivshmem Device Introduction
|
Ivshmem Device Introduction
|
||||||
***************************
|
***************************
|
||||||
|
|
||||||
The ``ivshmem`` device is a virtual standard PCI device consisting of
|
The ``ivshmem`` device is a virtual standard PCI device consisting of
|
||||||
two Base Address Registers (BARs): BAR0 is used for emulating interrupt
|
three Base Address Registers (BARs):
|
||||||
related registers, and BAR2 is used for exposing shared memory region. The ``ivshmem`` device doesn't support any extra capabilities.
|
|
||||||
|
* BAR0 is used for emulating interrupt related registers,
|
||||||
|
* BAR1 is used for emulating MSIX entry table, and
|
||||||
|
* BAR2 is used for exposing a shared memory region.
|
||||||
|
|
||||||
|
The ``ivshmem`` device supports no extra capabilities.
|
||||||
|
|
||||||
Configuration Space Definition
|
Configuration Space Definition
|
||||||
|
|
||||||
|
@ -426,10 +426,11 @@ our case, we use systemd to automatically create the network by default.
|
|||||||
You can check the files with prefix 50- in the Service VM
|
You can check the files with prefix 50- in the Service VM
|
||||||
``/usr/lib/systemd/network/``:
|
``/usr/lib/systemd/network/``:
|
||||||
|
|
||||||
- `50-acrn.netdev <https://raw.githubusercontent.com/projectacrn/acrn-hypervisor/master/misc/acrnbridge/acrn.netdev>`__
|
- :acrn_raw:`50-acrn.netdev <misc/acrnbridge/acrn.netdev>`
|
||||||
- `50-acrn.network <https://raw.githubusercontent.com/projectacrn/acrn-hypervisor/master/misc/acrnbridge/acrn.network>`__
|
- :acrn_raw:`50-acrn.netdev <misc/acrnbridge/acrn.netdev>`
|
||||||
- `50-tap0.netdev <https://raw.githubusercontent.com/projectacrn/acrn-hypervisor/master/misc/acrnbridge/tap0.netdev>`__
|
- :acrn_raw:`50-acrn.network <misc/acrnbridge/acrn.network>`
|
||||||
- `50-eth.network <https://raw.githubusercontent.com/projectacrn/acrn-hypervisor/master/misc/acrnbridge/eth.network>`__
|
- :acrn_raw:`50-tap0.netdev <misc/acrnbridge/tap0.netdev>`
|
||||||
|
- :acrn_raw:`50-eth.network <misc/acrnbridge/eth.network>`
|
||||||
|
|
||||||
When the Service VM is started, run ``ifconfig`` to show the devices created by
|
When the Service VM is started, run ``ifconfig`` to show the devices created by
|
||||||
this systemd configuration:
|
this systemd configuration:
|
||||||
|
@ -30,7 +30,7 @@ class EagerOnly(sphinx.directives.other.Only):
|
|||||||
# Evaluate the condition eagerly, and if false return no nodes right away
|
# Evaluate the condition eagerly, and if false return no nodes right away
|
||||||
env = self.state.document.settings.env
|
env = self.state.document.settings.env
|
||||||
env.app.builder.tags.add('TRUE')
|
env.app.builder.tags.add('TRUE')
|
||||||
#print(repr(self.arguments[0]))
|
|
||||||
if not env.app.builder.tags.eval_condition(self.arguments[0]):
|
if not env.app.builder.tags.eval_condition(self.arguments[0]):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
@ -43,3 +43,13 @@ class EagerOnly(sphinx.directives.other.Only):
|
|||||||
|
|
||||||
def setup(app):
|
def setup(app):
|
||||||
directives.register_directive('only', EagerOnly)
|
directives.register_directive('only', EagerOnly)
|
||||||
|
|
||||||
|
# The tags.add call above is setting tags.tags['TRUE'] = True.
|
||||||
|
# The operation is idempotent and will have taken effect before
|
||||||
|
# the next eval_condition() which may rely on it so this is thread
|
||||||
|
# safe both for read and writes (all other operations are local to
|
||||||
|
# the local nodes variable).
|
||||||
|
return {
|
||||||
|
'parallel_read_safe': True,
|
||||||
|
'parallel_write_safe': True,
|
||||||
|
}
|
||||||
|
62
doc/extensions/link_roles.py
Normal file
62
doc/extensions/link_roles.py
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
# Copyright (c) 2019 Intel Corporation
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# based on http://protips.readthedocs.io/link-roles.html
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
import re
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
from os import path
|
||||||
|
import subprocess
|
||||||
|
from docutils import nodes
|
||||||
|
|
||||||
|
|
||||||
|
def run_cmd_get_output(cmd):
|
||||||
|
try:
|
||||||
|
with open(os.devnull, 'w') as devnull:
|
||||||
|
output = subprocess.check_output(cmd, stderr=devnull, shell=True).strip()
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
output = e.output.decode('ascii')
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
def get_github_rev():
|
||||||
|
tag = run_cmd_get_output('git describe --exact-match')
|
||||||
|
if tag:
|
||||||
|
return tag.decode("utf-8")
|
||||||
|
else:
|
||||||
|
return 'master'
|
||||||
|
|
||||||
|
|
||||||
|
def setup(app):
|
||||||
|
rev = get_github_rev()
|
||||||
|
|
||||||
|
baseurl = 'https://github.com/projectacrn/acrn-hypervisor'
|
||||||
|
|
||||||
|
app.add_role('acrn_file', autolink('{}/blob/{}/%s'.format(baseurl, rev)))
|
||||||
|
app.add_role('acrn_raw', autolink('{}/raw/{}/%s'.format(baseurl, rev)))
|
||||||
|
|
||||||
|
# The role just creates new nodes based on information in the
|
||||||
|
# arguments; its behavior doesn't depend on any other documents.
|
||||||
|
return {
|
||||||
|
'parallel_read_safe': True,
|
||||||
|
'parallel_write_safe': True,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def autolink(pattern):
|
||||||
|
def role(name, rawtext, text, lineno, inliner, options={}, content=[]):
|
||||||
|
m = re.search(r'(.*)\s*<(.*)>', text)
|
||||||
|
if m:
|
||||||
|
link_text = m.group(1)
|
||||||
|
link = m.group(2)
|
||||||
|
else:
|
||||||
|
link_text = text
|
||||||
|
link = text
|
||||||
|
url = pattern % (link,)
|
||||||
|
node = nodes.reference(rawtext, link_text, refuri=url, **options)
|
||||||
|
return [node], []
|
||||||
|
return role
|
8
doc/genindex.rst
Normal file
8
doc/genindex.rst
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
.. Place holder Index. The sphinx-generated genindex.html will overwrite
|
||||||
|
this placeholder when the website is created, but his file will give us
|
||||||
|
something we can add to the TOC. Technically this isn't advised, but
|
||||||
|
works.
|
||||||
|
|
||||||
|
Index
|
||||||
|
#####
|
||||||
|
|
@ -3,9 +3,6 @@
|
|||||||
Build ACRN from Source
|
Build ACRN from Source
|
||||||
######################
|
######################
|
||||||
|
|
||||||
Introduction
|
|
||||||
************
|
|
||||||
|
|
||||||
Following a general embedded-system programming model, the ACRN
|
Following a general embedded-system programming model, the ACRN
|
||||||
hypervisor is designed to be customized at build time per hardware
|
hypervisor is designed to be customized at build time per hardware
|
||||||
platform and per usage scenario, rather than one binary for all
|
platform and per usage scenario, rather than one binary for all
|
||||||
@ -40,6 +37,10 @@ the ACRN hypervisor for the following reasons:
|
|||||||
Build the ACRN hypervisor, device model, and tools from source by following
|
Build the ACRN hypervisor, device model, and tools from source by following
|
||||||
these steps.
|
these steps.
|
||||||
|
|
||||||
|
.. contents::
|
||||||
|
:local:
|
||||||
|
:depth: 1
|
||||||
|
|
||||||
.. _install-build-tools-dependencies:
|
.. _install-build-tools-dependencies:
|
||||||
|
|
||||||
.. rst-class:: numbered-step
|
.. rst-class:: numbered-step
|
||||||
@ -66,7 +67,6 @@ Install the necessary tools for the following systems:
|
|||||||
$ sudo apt install gcc \
|
$ sudo apt install gcc \
|
||||||
git \
|
git \
|
||||||
make \
|
make \
|
||||||
gnu-efi \
|
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
libpciaccess-dev \
|
libpciaccess-dev \
|
||||||
uuid-dev \
|
uuid-dev \
|
||||||
@ -106,7 +106,6 @@ The `acrn-hypervisor <https://github.com/projectacrn/acrn-hypervisor/>`_
|
|||||||
repository contains four main components:
|
repository contains four main components:
|
||||||
|
|
||||||
1. The ACRN hypervisor code, located in the ``hypervisor`` directory.
|
1. The ACRN hypervisor code, located in the ``hypervisor`` directory.
|
||||||
#. The EFI stub code, located in the ``misc/efi-stub`` directory.
|
|
||||||
#. The ACRN device model code, located in the ``devicemodel`` directory.
|
#. The ACRN device model code, located in the ``devicemodel`` directory.
|
||||||
#. The ACRN tools source code, located in the ``misc/tools`` directory.
|
#. The ACRN tools source code, located in the ``misc/tools`` directory.
|
||||||
|
|
||||||
@ -251,29 +250,18 @@ Now you can build all these components at once as follows:
|
|||||||
|
|
||||||
The build results are found in the ``build`` directory. You can specify
|
The build results are found in the ``build`` directory. You can specify
|
||||||
a different Output folder by setting the ``O`` ``make`` parameter,
|
a different Output folder by setting the ``O`` ``make`` parameter,
|
||||||
for example: ``make O=build-nuc BOARD=nuc7i7dnb``.
|
for example: ``make O=build-nuc``.
|
||||||
|
|
||||||
If you only need the hypervisor, use this command:
|
|
||||||
|
|
||||||
.. code-block:: none
|
.. code-block:: none
|
||||||
|
|
||||||
$ make clean # Remove files previously built
|
$ make all BOARD_FILE=$PWD/misc/vm_configs/xmls/board-xmls/nuc7i7dnb.xml \
|
||||||
$ make -C hypervisor
|
|
||||||
$ make -C misc/efi-stub HV_OBJDIR=$PWD/hypervisor/build EFI_OBJDIR=$PWD/hypervisor/build
|
|
||||||
|
|
||||||
The ``acrn.efi`` will be generated in the ``./hypervisor/build/acrn.efi`` directory hypervisor.
|
|
||||||
|
|
||||||
As mentioned in :ref:`ACRN Configuration Tool <vm_config_workflow>`, the
|
|
||||||
Board configuration and VM configuration can be imported from XML files.
|
|
||||||
If you want to build the hypervisor with XML configuration files,
|
|
||||||
specify the file location as follows (assuming you're at the top level
|
|
||||||
of the acrn-hypervisor directory):
|
|
||||||
|
|
||||||
.. code-block:: none
|
|
||||||
|
|
||||||
$ make BOARD_FILE=$PWD/misc/vm_configs/xmls/board-xmls/nuc7i7dnb.xml \
|
|
||||||
SCENARIO_FILE=$PWD/misc/vm_configs/xmls/config-xmls/nuc7i7dnb/industry.xml TARGET_DIR=xxx
|
SCENARIO_FILE=$PWD/misc/vm_configs/xmls/config-xmls/nuc7i7dnb/industry.xml TARGET_DIR=xxx
|
||||||
|
|
||||||
|
The build results are found in the ``build`` directory. You can specify
|
||||||
|
a different build folder by setting the ``O`` ``make`` parameter,
|
||||||
|
for example: ``make O=build-nuc``.
|
||||||
|
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
The ``BOARD`` and ``SCENARIO`` parameters are not needed because the
|
The ``BOARD`` and ``SCENARIO`` parameters are not needed because the
|
||||||
|
@ -3,13 +3,17 @@
|
|||||||
Getting Started Guide for ACRN Industry Scenario with Ubuntu Service VM
|
Getting Started Guide for ACRN Industry Scenario with Ubuntu Service VM
|
||||||
#######################################################################
|
#######################################################################
|
||||||
|
|
||||||
|
.. contents::
|
||||||
|
:local:
|
||||||
|
:depth: 1
|
||||||
|
|
||||||
Verified version
|
Verified version
|
||||||
****************
|
****************
|
||||||
|
|
||||||
- Ubuntu version: **18.04**
|
- Ubuntu version: **18.04**
|
||||||
- GCC version: **7.4**
|
- GCC version: **7.5**
|
||||||
- ACRN-hypervisor branch: **release_2.2 (acrn-2020w40.1-180000p)**
|
- ACRN-hypervisor branch: **release_2.3 (v2.3)**
|
||||||
- ACRN-Kernel (Service VM kernel): **release_2.2 (5.4.43-PKT-200203T060100Z)**
|
- ACRN-Kernel (Service VM kernel): **release_2.3 (v2.3)**
|
||||||
- RT kernel for Ubuntu User OS: **4.19/preempt-rt (4.19.72-rt25)**
|
- RT kernel for Ubuntu User OS: **4.19/preempt-rt (4.19.72-rt25)**
|
||||||
- HW: Maxtang Intel WHL-U i7-8665U (`AX8665U-A2 <http://www.maxtangpc.com/fanlessembeddedcomputers/140.html>`_)
|
- HW: Maxtang Intel WHL-U i7-8665U (`AX8665U-A2 <http://www.maxtangpc.com/fanlessembeddedcomputers/140.html>`_)
|
||||||
|
|
||||||
@ -23,13 +27,13 @@ Prerequisites
|
|||||||
- Ethernet cables
|
- Ethernet cables
|
||||||
- A grub-2.04-7 bootloader with the following patch:
|
- A grub-2.04-7 bootloader with the following patch:
|
||||||
|
|
||||||
http://git.savannah.gnu.org/cgit/grub.git/commit/?id=0f3f5b7c13fa9b677a64cf11f20eca0f850a2b20: multiboot2: Set min address for mbi allocation to 0x1000
|
http://git.savannah.gnu.org/cgit/grub.git/commit/?id=0f3f5b7c13fa9b677a64cf11f20eca0f850a2b20:
|
||||||
|
multiboot2: Set min address for mbi allocation to 0x1000
|
||||||
|
|
||||||
Install Ubuntu for the Service and User VMs
|
.. rst-class:: numbered-step
|
||||||
*******************************************
|
|
||||||
|
|
||||||
Hardware Connection
|
Hardware Connection
|
||||||
===================
|
*******************
|
||||||
|
|
||||||
Connect the WHL Maxtang with the appropriate external devices.
|
Connect the WHL Maxtang with the appropriate external devices.
|
||||||
|
|
||||||
@ -42,11 +46,13 @@ Connect the WHL Maxtang with the appropriate external devices.
|
|||||||
|
|
||||||
.. figure:: images/rt-ind-ubun-hw-2.png
|
.. figure:: images/rt-ind-ubun-hw-2.png
|
||||||
|
|
||||||
Install the Ubuntu User VM (RTVM) on the SATA disk
|
.. rst-class:: numbered-step
|
||||||
==================================================
|
|
||||||
|
|
||||||
Install Ubuntu on the SATA disk
|
|
||||||
-------------------------------
|
.. _install-ubuntu-rtvm-sata:
|
||||||
|
|
||||||
|
Install the Ubuntu User VM (RTVM) on the SATA disk
|
||||||
|
**************************************************
|
||||||
|
|
||||||
.. note:: The WHL Maxtang machine contains both an NVMe and SATA disk.
|
.. note:: The WHL Maxtang machine contains both an NVMe and SATA disk.
|
||||||
Before you install the Ubuntu User VM on the SATA disk, either
|
Before you install the Ubuntu User VM on the SATA disk, either
|
||||||
@ -74,11 +80,12 @@ Install Ubuntu on the SATA disk
|
|||||||
This Ubuntu installation will be modified later (see `Build and Install the RT kernel for the Ubuntu User VM`_)
|
This Ubuntu installation will be modified later (see `Build and Install the RT kernel for the Ubuntu User VM`_)
|
||||||
to turn it into a real-time User VM (RTVM).
|
to turn it into a real-time User VM (RTVM).
|
||||||
|
|
||||||
Install the Ubuntu Service VM on the NVMe disk
|
.. rst-class:: numbered-step
|
||||||
==============================================
|
|
||||||
|
|
||||||
Install Ubuntu on the NVMe disk
|
.. _install-ubuntu-Service VM-NVMe:
|
||||||
-------------------------------
|
|
||||||
|
Install the Ubuntu Service VM on the NVMe disk
|
||||||
|
**********************************************
|
||||||
|
|
||||||
.. note:: Before you install the Ubuntu Service VM on the NVMe disk, either
|
.. note:: Before you install the Ubuntu Service VM on the NVMe disk, either
|
||||||
remove the SATA disk or disable it in the BIOS. Disable it by going to:
|
remove the SATA disk or disable it in the BIOS. Disable it by going to:
|
||||||
@ -106,6 +113,10 @@ Install Ubuntu on the NVMe disk
|
|||||||
.. note:: Set ``acrn`` as the username for the Ubuntu Service VM.
|
.. note:: Set ``acrn`` as the username for the Ubuntu Service VM.
|
||||||
|
|
||||||
|
|
||||||
|
.. rst-class:: numbered-step
|
||||||
|
|
||||||
|
.. _build-and-install-acrn-on-ubuntu:
|
||||||
|
|
||||||
Build and Install ACRN on Ubuntu
|
Build and Install ACRN on Ubuntu
|
||||||
********************************
|
********************************
|
||||||
|
|
||||||
@ -135,7 +146,6 @@ Build the ACRN Hypervisor on Ubuntu
|
|||||||
$ sudo -E apt install gcc \
|
$ sudo -E apt install gcc \
|
||||||
git \
|
git \
|
||||||
make \
|
make \
|
||||||
gnu-efi \
|
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
libpciaccess-dev \
|
libpciaccess-dev \
|
||||||
uuid-dev \
|
uuid-dev \
|
||||||
@ -165,7 +175,6 @@ Build the ACRN Hypervisor on Ubuntu
|
|||||||
|
|
||||||
.. code-block:: none
|
.. code-block:: none
|
||||||
|
|
||||||
$ sudo -E apt-get install iasl
|
|
||||||
$ cd /home/acrn/work
|
$ cd /home/acrn/work
|
||||||
$ wget https://acpica.org/sites/acpica/files/acpica-unix-20191018.tar.gz
|
$ wget https://acpica.org/sites/acpica/files/acpica-unix-20191018.tar.gz
|
||||||
$ tar zxvf acpica-unix-20191018.tar.gz
|
$ tar zxvf acpica-unix-20191018.tar.gz
|
||||||
@ -185,11 +194,11 @@ Build the ACRN Hypervisor on Ubuntu
|
|||||||
$ git clone https://github.com/projectacrn/acrn-hypervisor
|
$ git clone https://github.com/projectacrn/acrn-hypervisor
|
||||||
$ cd acrn-hypervisor
|
$ cd acrn-hypervisor
|
||||||
|
|
||||||
#. Switch to the v2.2 version:
|
#. Switch to the v2.3 version:
|
||||||
|
|
||||||
.. code-block:: none
|
.. code-block:: none
|
||||||
|
|
||||||
$ git checkout -b v2.2 remotes/origin/release_2.2
|
$ git checkout v2.3
|
||||||
|
|
||||||
#. Build ACRN:
|
#. Build ACRN:
|
||||||
|
|
||||||
@ -214,7 +223,7 @@ Build and install the ACRN kernel
|
|||||||
|
|
||||||
.. code-block:: none
|
.. code-block:: none
|
||||||
|
|
||||||
$ git checkout -b v2.2 remotes/origin/release_2.2
|
$ git checkout v2.3
|
||||||
$ cp kernel_config_uefi_sos .config
|
$ cp kernel_config_uefi_sos .config
|
||||||
$ make olddefconfig
|
$ make olddefconfig
|
||||||
$ make all
|
$ make all
|
||||||
@ -334,32 +343,6 @@ The User VM will be launched by OVMF, so copy it to the specific folder:
|
|||||||
$ sudo mkdir -p /usr/share/acrn/bios
|
$ sudo mkdir -p /usr/share/acrn/bios
|
||||||
$ sudo cp /home/acrn/work/acrn-hypervisor/devicemodel/bios/OVMF.fd /usr/share/acrn/bios
|
$ sudo cp /home/acrn/work/acrn-hypervisor/devicemodel/bios/OVMF.fd /usr/share/acrn/bios
|
||||||
|
|
||||||
Install IASL in Ubuntu for User VM launch
|
|
||||||
-----------------------------------------
|
|
||||||
|
|
||||||
Starting with the ACRN v2.2 release, we use the ``iasl`` tool to
|
|
||||||
compile an offline ACPI binary for pre-launched VMs while building ACRN,
|
|
||||||
so we need to install the ``iasl`` tool in the ACRN build environment.
|
|
||||||
|
|
||||||
Follow these steps to install ``iasl`` (and its dependencies) and
|
|
||||||
then update the ``iasl`` binary with a newer version not available
|
|
||||||
in Ubuntu 18.04:
|
|
||||||
|
|
||||||
.. code-block:: none
|
|
||||||
|
|
||||||
$ sudo -E apt-get install iasl
|
|
||||||
$ cd /home/acrn/work
|
|
||||||
$ wget https://acpica.org/sites/acpica/files/acpica-unix-20191018.tar.gz
|
|
||||||
$ tar zxvf acpica-unix-20191018.tar.gz
|
|
||||||
$ cd acpica-unix-20191018
|
|
||||||
$ make clean && make iasl
|
|
||||||
$ sudo cp ./generate/unix/bin/iasl /usr/sbin/
|
|
||||||
|
|
||||||
.. note:: While there are newer versions of software available from
|
|
||||||
the `ACPICA downloads site <https://acpica.org/downloads>`_, this
|
|
||||||
20191018 version has been verified to work.
|
|
||||||
|
|
||||||
|
|
||||||
Build and Install the RT kernel for the Ubuntu User VM
|
Build and Install the RT kernel for the Ubuntu User VM
|
||||||
------------------------------------------------------
|
------------------------------------------------------
|
||||||
|
|
||||||
@ -402,6 +385,8 @@ Follow these instructions to build the RT kernel.
|
|||||||
$ sudo cp -r /mnt/lib/modules/lib/modules/4.19.72-rt25 /mnt/lib/modules/
|
$ sudo cp -r /mnt/lib/modules/lib/modules/4.19.72-rt25 /mnt/lib/modules/
|
||||||
$ sudo cd ~ && sudo umount /mnt && sync
|
$ sudo cd ~ && sudo umount /mnt && sync
|
||||||
|
|
||||||
|
.. rst-class:: numbered-step
|
||||||
|
|
||||||
Launch the RTVM
|
Launch the RTVM
|
||||||
***************
|
***************
|
||||||
|
|
||||||
@ -466,7 +451,8 @@ Launch the RTVM
|
|||||||
|
|
||||||
.. code-block:: none
|
.. code-block:: none
|
||||||
|
|
||||||
$ sudo /usr/share/acrn/samples/nuc/launch_hard_rt_vm.sh
|
$ sudo cp /home/acrn/work/acrn-hyperviso/misc/vm_configs/sample_launch_scripts/nuc/launch_hard_rt_vm.sh /usr/share/acrn/
|
||||||
|
$ sudo /usr/share/acrn/launch_hard_rt_vm.sh
|
||||||
|
|
||||||
Recommended BIOS settings for RTVM
|
Recommended BIOS settings for RTVM
|
||||||
----------------------------------
|
----------------------------------
|
||||||
@ -530,13 +516,13 @@ this, follow the below steps to allocate all housekeeping tasks to core 0:
|
|||||||
#. Prepare the RTVM launch script
|
#. Prepare the RTVM launch script
|
||||||
|
|
||||||
Follow the `Passthrough a hard disk to RTVM`_ section to make adjustments to
|
Follow the `Passthrough a hard disk to RTVM`_ section to make adjustments to
|
||||||
the ``/usr/share/acrn/samples/nuc/launch_hard_rt_vm.sh`` launch script.
|
the ``/usr/share/acrn/launch_hard_rt_vm.sh`` launch script.
|
||||||
|
|
||||||
#. Launch the RTVM:
|
#. Launch the RTVM:
|
||||||
|
|
||||||
.. code-block:: none
|
.. code-block:: none
|
||||||
|
|
||||||
$ sudo /usr/share/acrn/samples/nuc/launch_hard_rt_vm.sh
|
$ sudo /usr/share/acrn/launch_hard_rt_vm.sh
|
||||||
|
|
||||||
#. Log in to the RTVM as root and run the script as below:
|
#. Log in to the RTVM as root and run the script as below:
|
||||||
|
|
||||||
@ -601,6 +587,8 @@ Run cyclictest
|
|||||||
:-q: quiet mode; print a summary only on exit
|
:-q: quiet mode; print a summary only on exit
|
||||||
:-H 30000 --histfile=test.log: dump the latency histogram to a local file
|
:-H 30000 --histfile=test.log: dump the latency histogram to a local file
|
||||||
|
|
||||||
|
.. rst-class:: numbered-step
|
||||||
|
|
||||||
Launch the Windows VM
|
Launch the Windows VM
|
||||||
*********************
|
*********************
|
||||||
|
|
||||||
@ -664,7 +652,7 @@ Passthrough a hard disk to RTVM
|
|||||||
|
|
||||||
.. code-block:: none
|
.. code-block:: none
|
||||||
|
|
||||||
# vim /usr/share/acrn/samples/nuc/launch_hard_rt_vm.sh
|
# vim /usr/share/acrn/launch_hard_rt_vm.sh
|
||||||
|
|
||||||
passthru_vpid=(
|
passthru_vpid=(
|
||||||
["eth"]="8086 156f"
|
["eth"]="8086 156f"
|
||||||
@ -706,4 +694,4 @@ Passthrough a hard disk to RTVM
|
|||||||
|
|
||||||
.. code-block:: none
|
.. code-block:: none
|
||||||
|
|
||||||
$ sudo /usr/share/acrn/samples/nuc/launch_hard_rt_vm.sh
|
$ sudo /usr/share/acrn/launch_hard_rt_vm.sh
|
||||||
|
@ -84,8 +84,6 @@ license.
|
|||||||
asa
|
asa
|
||||||
FAQ <faq>
|
FAQ <faq>
|
||||||
glossary
|
glossary
|
||||||
|
genindex
|
||||||
.. _BSD 3-clause license:
|
|
||||||
https://github.com/projectacrn/acrn-hypervisor/blob/master/LICENSE
|
|
||||||
|
|
||||||
.. _Project ACRN GitHub repo: https://github.com/projectacrn
|
.. _Project ACRN GitHub repo: https://github.com/projectacrn
|
||||||
|
@ -93,10 +93,10 @@ above, i.e. the *logical partitioning*, *sharing*, and *hybrid* modes. They
|
|||||||
further specify the number of VMs that can be run, their attributes and the
|
further specify the number of VMs that can be run, their attributes and the
|
||||||
resources they have access to, either shared with other VMs or exclusively.
|
resources they have access to, either shared with other VMs or exclusively.
|
||||||
|
|
||||||
The predefined scenarios are in the `misc/vm_configs/scenarios
|
The predefined scenarios are in the
|
||||||
<https://github.com/projectacrn/acrn-hypervisor/tree/master/misc/vm_configs/scenarios>`_
|
:acrn_file:`misc/vm_configs/scenarios` folder
|
||||||
folder in the source code. XML examples for some platforms can also be found under
|
in the source code. XML examples for some platforms can also be found under
|
||||||
`misc/vm_configs/xmls/config-xmls <https://github.com/projectacrn/acrn-hypervisor/tree/master/misc/vm_configs/xmls/config-xmls/>`_.
|
:acrn_file:`misc/vm_configs/xmls/config-xmls`.
|
||||||
|
|
||||||
The :ref:`acrn_configuration_tool` tutorial explains how to use the ACRN
|
The :ref:`acrn_configuration_tool` tutorial explains how to use the ACRN
|
||||||
Configuration tool to create your own scenario or modify an existing one.
|
Configuration tool to create your own scenario or modify an existing one.
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
:orphan:
|
:orphan:
|
||||||
|
|
||||||
|
.. _nocl:
|
||||||
|
|
||||||
.. comment This page is a common place holder for references to /latest/
|
.. comment This page is a common place holder for references to /latest/
|
||||||
documentation that was removed from the 2.2 release but there are
|
documentation that was removed from the 2.2 release but there are
|
||||||
lingering references to these docs out in the wild and in the Google
|
lingering references to these docs out in the wild and in the Google
|
||||||
|
149
doc/release_notes/release_notes_2.3.rst
Normal file
149
doc/release_notes/release_notes_2.3.rst
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
.. _release_notes_2.3:
|
||||||
|
|
||||||
|
ACRN v2.3 (Dec 2020)
|
||||||
|
####################
|
||||||
|
|
||||||
|
We are pleased to announce the release of the Project ACRN
|
||||||
|
hypervisor version 2.3.
|
||||||
|
|
||||||
|
ACRN is a flexible, lightweight reference hypervisor that is built with
|
||||||
|
real-time and safety-criticality in mind. It is optimized to streamline
|
||||||
|
embedded development through an open source platform. Check out the
|
||||||
|
:ref:`introduction` introduction for more information. All project ACRN
|
||||||
|
source code is maintained in the
|
||||||
|
https://github.com/projectacrn/acrn-hypervisor repository and includes
|
||||||
|
folders for the ACRN hypervisor, the ACRN device model, tools, and
|
||||||
|
documentation. You can either download this source code as a zip or
|
||||||
|
tar.gz file (see the `ACRN v2.3 GitHub release page
|
||||||
|
<https://github.com/projectacrn/acrn-hypervisor/releases/tag/v2.3>`_) or
|
||||||
|
use Git clone and checkout commands::
|
||||||
|
|
||||||
|
git clone https://github.com/projectacrn/acrn-hypervisor
|
||||||
|
cd acrn-hypervisor
|
||||||
|
git checkout v2.3
|
||||||
|
|
||||||
|
The project's online technical documentation is also tagged to
|
||||||
|
correspond with a specific release: generated v2.3 documents can be
|
||||||
|
found at https://projectacrn.github.io/2.3/. Documentation for the
|
||||||
|
latest under-development branch is found at
|
||||||
|
https://projectacrn.github.io/latest/.
|
||||||
|
|
||||||
|
ACRN v2.3 requires Ubuntu 18.04. Follow the instructions in the
|
||||||
|
:ref:`rt_industry_ubuntu_setup` to get started with ACRN.
|
||||||
|
|
||||||
|
|
||||||
|
What's New in v2.3
|
||||||
|
******************
|
||||||
|
|
||||||
|
Enhanced GPU passthru (GVT-d)
|
||||||
|
GPU passthru (GVT-d) to Windows as a guest is now enabled for 11th Gen
|
||||||
|
Intel® Core™ processors (codenamed Tiger Lake-UP3).
|
||||||
|
|
||||||
|
Shared memory based inter-VM communication (ivshmem) is extended
|
||||||
|
ivshmem now supports interrupts (See :ref:`ivshmem-hld`).
|
||||||
|
|
||||||
|
Enhanced vUART support
|
||||||
|
Added PCI vUART (up to 8) for VM-to-VM communication. Legacy vUART
|
||||||
|
(0x3F8-like) is available for console (debugging) support.
|
||||||
|
|
||||||
|
End-to-end secure boot improvement
|
||||||
|
OVMF can be loaded now as two blobs, one for code and the other for data.
|
||||||
|
The code blob can be verified by the Service VM's ``dm-verity`` as
|
||||||
|
a step in the end-to-end secure boot.
|
||||||
|
|
||||||
|
Enhanced system shutdown
|
||||||
|
The pre-launched VM may now initiate a system shutdown or reset.
|
||||||
|
|
||||||
|
Removed deprivileged boot mode support
|
||||||
|
ACRN has supported deprivileged boot mode to ease the integration of
|
||||||
|
Linux distributions such as Clear Linux. Unfortunately, deprivileged boot
|
||||||
|
mode limits ACRN's scalability and is unsuitable for ACRN's hybrid
|
||||||
|
hypervisor mode. In ACRN v2.2, deprivileged boot mode was no longer the default
|
||||||
|
and completely removed in ACRN v2.3. We're focusing instead
|
||||||
|
on using the simpler and more scalable multiboot2 boot (via Grub).
|
||||||
|
Multiboot2 is not supported in
|
||||||
|
Clear Linux so we have chosen Ubuntu (and Yocto Project) as the
|
||||||
|
preferred Service VM OSs moving forward.
|
||||||
|
|
||||||
|
Document updates
|
||||||
|
****************
|
||||||
|
|
||||||
|
New and updated reference documents are available, including:
|
||||||
|
|
||||||
|
.. rst-class:: rst-columns2
|
||||||
|
|
||||||
|
* :ref:`asa`
|
||||||
|
* :ref:`GVT-g-porting`
|
||||||
|
* :ref:`vbsk-overhead`
|
||||||
|
* :ref:`asm_coding_guidelines`
|
||||||
|
* :ref:`c_coding_guidelines`
|
||||||
|
* :ref:`contribute_guidelines`
|
||||||
|
* :ref:`doc_guidelines`
|
||||||
|
* :ref:`hld-devicemodel`
|
||||||
|
* :ref:`hld-overview`
|
||||||
|
* :ref:`hld-power-management`
|
||||||
|
* :ref:`hld-security`
|
||||||
|
* :ref:`hld-trace-log`
|
||||||
|
* :ref:`hld-virtio-devices`
|
||||||
|
* :ref:`ivshmem-hld`
|
||||||
|
* :ref:`l1tf`
|
||||||
|
* :ref:`modularity`
|
||||||
|
* :ref:`sw_design_guidelines`
|
||||||
|
* :ref:`rt_industry_ubuntu_setup`
|
||||||
|
* :ref:`introduction`
|
||||||
|
* :ref:`release_notes_2.3`
|
||||||
|
* :ref:`acrn_configuration_tool`
|
||||||
|
* :ref:`acrn_on_qemu`
|
||||||
|
* :ref:`acrn-debug`
|
||||||
|
* :ref:`acrn_doc`
|
||||||
|
* :ref:`enable_ivshmem`
|
||||||
|
* :ref:`enable-s5`
|
||||||
|
* :ref:`rdt_configuration`
|
||||||
|
* :ref:`rt_performance_tuning`
|
||||||
|
* :ref:`rt_perf_tips_rtvm`
|
||||||
|
* :ref:`run-kata-containers`
|
||||||
|
* :ref:`running_deb_as_serv_vm`
|
||||||
|
* :ref:`running_deb_as_user_vm`
|
||||||
|
* :ref:`running_ubun_as_user_vm`
|
||||||
|
* :ref:`setup_openstack_libvirt`
|
||||||
|
* :ref:`sgx_virt`
|
||||||
|
* :ref:`sriov_virtualization`
|
||||||
|
* :ref:`using_grub`
|
||||||
|
* :ref:`using_hybrid_mode_on_nuc`
|
||||||
|
* :ref:`using_partition_mode_on_nuc`
|
||||||
|
* :ref:`using_windows_as_uos`
|
||||||
|
* :ref:`using_zephyr_as_uos`
|
||||||
|
* :ref:`vuart_config`
|
||||||
|
* :ref:`how-to-enable-secure-boot-for-windows`
|
||||||
|
* :ref:`acrn-dm_parameters`
|
||||||
|
|
||||||
|
Because we're dropped deprivileged boot mode support, we're also
|
||||||
|
switching our Service VM of choice away from Clear Linux and have
|
||||||
|
removed Clear Linux-specific tutorials. Deleted documents are still
|
||||||
|
available in the `version-specific v2.1 documentation
|
||||||
|
<https://projectacrn.github.io/v2.1/>`_.
|
||||||
|
|
||||||
|
|
||||||
|
Fixed Issues Details
|
||||||
|
********************
|
||||||
|
- :acrn-issue:`4958` - clean up spin lock for hypervisor
|
||||||
|
- :acrn-issue:`5316` - add default BAR GPA for vmsi over msi
|
||||||
|
- :acrn-issue:`5346` - Hide PCI bridge 00:1c.0 from SOS
|
||||||
|
- :acrn-issue:`5411` - Supporting power off for pre-launched VMs
|
||||||
|
- :acrn-issue:`5461` - DM: gvt: Identical mapping for GPU DSM
|
||||||
|
- :acrn-issue:`5463` - WaaG hang in boot loading screen in cold boot test.
|
||||||
|
- :acrn-issue:`5482` - acrn-config: insert vbdf in hex format and vuart vbdf logic
|
||||||
|
- :acrn-issue:`5490` - Unhandled Exception happened in HV console when shutdown YaaG with shm device enabled
|
||||||
|
- :acrn-issue:`5491` - TGL can't setup 6 pci-vuarts
|
||||||
|
- :acrn-issue:`5498` - Hide IO 0x3f8 for SOS
|
||||||
|
- :acrn-issue:`5501` - [WHL][Yocto][Hybrid] in hybrid mode ACRN HV env, can not shutdown pre-launched RTVM
|
||||||
|
- :acrn-issue:`5506` - [EHL][SBL][hybrid_rt] in hybrid_rt, TPM can not work in pre-launched RTVM
|
||||||
|
- :acrn-issue:`5508` - hv-bugfix-for-hv-emulated-device-de-init_v2.3
|
||||||
|
- :acrn-issue:`5514` - Coding style issue for xhci.c
|
||||||
|
|
||||||
|
Known Issues
|
||||||
|
************
|
||||||
|
- :acrn-issue:`5151` - [WHL][VxWorks] Launch VxWorks fails due to no suitable video mode found
|
||||||
|
- :acrn-issue:`5369` - [TGL][qemu] Cannot launch qemu on TGL
|
||||||
|
- :acrn-issue:`5572` - [WHL][Hybrid_rt] build hybrid_rt hv from source, boot RTVM fail
|
||||||
|
- :acrn-issue:`5573` - [EHL][logical_partition] build EHL logical_partition hv from source Fail
|
@ -8,8 +8,6 @@ using ACRN in a reference setup. We'll show how to set up your
|
|||||||
development and target hardware, and then how to boot the ACRN
|
development and target hardware, and then how to boot the ACRN
|
||||||
hypervisor, the Service VM, and a User VM on the Intel platform.
|
hypervisor, the Service VM, and a User VM on the Intel platform.
|
||||||
|
|
||||||
.. _Clear Linux: https://clearlinux.org
|
|
||||||
|
|
||||||
ACRN is supported on Apollo Lake and Kaby Lake Intel platforms,
|
ACRN is supported on Apollo Lake and Kaby Lake Intel platforms,
|
||||||
as described in :ref:`hardware`.
|
as described in :ref:`hardware`.
|
||||||
|
|
||||||
|
259
doc/tutorials/acrn-secure-boot-with-grub.rst
Normal file
259
doc/tutorials/acrn-secure-boot-with-grub.rst
Normal file
@ -0,0 +1,259 @@
|
|||||||
|
.. _how-to-enable-acrn-secure-boot-with-grub:
|
||||||
|
|
||||||
|
Enable ACRN Secure Boot with GRUB
|
||||||
|
#################################
|
||||||
|
|
||||||
|
This document shows how to enable ACRN secure boot with GRUB including:
|
||||||
|
|
||||||
|
- ACRN Secure Boot Sequence
|
||||||
|
- Generate GPG Key
|
||||||
|
- Setup Standalone GRUB EFI Binary
|
||||||
|
- Enable UEFI Secure Boot
|
||||||
|
|
||||||
|
**Validation Environment:**
|
||||||
|
|
||||||
|
- Hardware Platform: TGL-I7, Supported hardware described in
|
||||||
|
:ref:`hardware`.
|
||||||
|
- ACRN Scenario: Industry
|
||||||
|
- Service VM: Yocto & Ubuntu
|
||||||
|
- GRUB: 2.04
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
Note that GRUB may stop booting in case of problems, make sure you
|
||||||
|
know how to recover a bootloader on your platform.
|
||||||
|
|
||||||
|
ACRN Secure Boot Sequence
|
||||||
|
*************************
|
||||||
|
|
||||||
|
ACRN can be booted by Multiboot compatible bootloader, following diagram
|
||||||
|
illustrates the boot sequence of ACRN with GRUB:
|
||||||
|
|
||||||
|
.. image:: images/acrn_secureboot_flow.png
|
||||||
|
:align: center
|
||||||
|
:width: 800px
|
||||||
|
|
||||||
|
|
||||||
|
For details on enabling GRUB on ACRN, see :ref:`using_grub`.
|
||||||
|
|
||||||
|
From a secureboot point of view:
|
||||||
|
|
||||||
|
- UEFI firmware verifies shim/GRUB
|
||||||
|
- GRUB verifies ACRN, Service VM kernel, and pre-launched User VM kernel
|
||||||
|
- Service VM OS kernel verifies the Device Model (``acrn-dm``) and User
|
||||||
|
VM OVMF bootloader (with the help of ``acrn-dm``)
|
||||||
|
- User VM virtual bootloader (e.g. OVMF) starts the guest side verified boot process
|
||||||
|
|
||||||
|
This document shows you how to enable GRUB to
|
||||||
|
verify ACRN binaries such ``acrn.bin``, Service VM kernel (``bzImage``), and
|
||||||
|
if present, a pre-launched User VM kernel image.
|
||||||
|
|
||||||
|
.. rst-class:: numbered-step
|
||||||
|
|
||||||
|
Generate GPG Key
|
||||||
|
****************
|
||||||
|
|
||||||
|
GRUB supports loading GPG signed files only if digital signatures are
|
||||||
|
enabled. Here's an example of generating a GPG signing key::
|
||||||
|
|
||||||
|
mkdir --mode 0700 keys
|
||||||
|
gpg --homedir keys --gen-key
|
||||||
|
gpg --homedir keys --export > boot.key
|
||||||
|
|
||||||
|
The :command:`gpg --gen-key` generates a public and private key pair.
|
||||||
|
The private key is used to sign GRUB configuration files and ACRN
|
||||||
|
binaries. The public key will be embedded in GRUB and is used to verify
|
||||||
|
GRUB configuration files or binaries GRUB tries to load.
|
||||||
|
|
||||||
|
.. rst-class:: numbered-step
|
||||||
|
|
||||||
|
Setup Standalone GRUB EFI Binary
|
||||||
|
********************************
|
||||||
|
|
||||||
|
Prepare Initial GRUB Configuration grub.init.cfg
|
||||||
|
================================================
|
||||||
|
|
||||||
|
Create file ``grub.init.cfg`` to store the following minimal GRUB
|
||||||
|
configuration. The environment variable ``check_signatures=enforce``
|
||||||
|
tells GRUB to enable digital signatures::
|
||||||
|
|
||||||
|
set check_signatures=enforce
|
||||||
|
export check_signatures
|
||||||
|
|
||||||
|
search --no-floppy --fs-uuid --set=root ESP_UUID
|
||||||
|
configfile /grub.cfg
|
||||||
|
echo /grub.cfg did not boot the system, rebooting in 10 seconds.
|
||||||
|
sleep 10
|
||||||
|
reboot
|
||||||
|
|
||||||
|
Replace the ESP_UUID with the UUID of your EFI system partition (found
|
||||||
|
by running the :command:`lsblk -f`. In the example output below,
|
||||||
|
the UUID is ``24FC-BE7A``:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
:emphasize-lines: 2
|
||||||
|
|
||||||
|
sda
|
||||||
|
├─sda1 vfat ESP 24FC-BE7A /boot/efi
|
||||||
|
├─sda2 vfat OS 7015-557F
|
||||||
|
├─sda3 ext4 UBUNTU e8640994-b2a3-45ad-9b72-e68960fb22f0 /
|
||||||
|
└─sda4 swap 262d1113-64be-4910-a700-670b9d2277cc [SWAP]
|
||||||
|
|
||||||
|
|
||||||
|
Enable Authentication in GRUB
|
||||||
|
=============================
|
||||||
|
|
||||||
|
With authentication enabled, a user/password is required to restrict
|
||||||
|
access to the GRUB shell, where arbitrary commands could be run.
|
||||||
|
A typical GRUB configuration fragment (added to ``grub.init.cfg``) might
|
||||||
|
look like this::
|
||||||
|
|
||||||
|
set superusers="root"
|
||||||
|
export superusers
|
||||||
|
password_pbkdf2 root GRUB_PASSWORD_HASH
|
||||||
|
|
||||||
|
Replace the ``GRUB_PASSWORD_HASH`` with the result of the :command:`grub-mkpasswd-pbkdf2`
|
||||||
|
with your custom passphrase.
|
||||||
|
|
||||||
|
Use this command to sign the :file:`grub.init.cfg` file with your private
|
||||||
|
GPG key and create the :file:`grub.init.cfg.sig`::
|
||||||
|
|
||||||
|
gpg --homedir keys --detach-sign grub.init.cfg
|
||||||
|
|
||||||
|
|
||||||
|
Create Standalone GRUB EFI Binary
|
||||||
|
=================================
|
||||||
|
|
||||||
|
Use the ``grub-mkstandalone`` tool to create a standalone GRUB EFI binary
|
||||||
|
file with the buit-in modules and the signed ``grub.init.cfg`` file.
|
||||||
|
The ``--pubkey`` option adds a GPG public key that will be used for
|
||||||
|
verification. The public key ``boot.key`` is no longer required.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
You should make a backup copy of your current GRUB image
|
||||||
|
(:file:`grubx64.efi`) before replacing it with the new signed GRUB image.
|
||||||
|
This would allow you to restore GRUB in case of errors updating it.
|
||||||
|
|
||||||
|
Here's an example sequence to do this build::
|
||||||
|
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
|
||||||
|
TARGET_EFI='path/to/grubx64.efi'
|
||||||
|
|
||||||
|
# GRUB doesn't allow loading new modules from disk when secure boot is in
|
||||||
|
# effect, therefore pre-load the required modules.
|
||||||
|
|
||||||
|
MODULES="all_video archelp boot bufio configfile crypto echo efi_gop efi_uga ext2 extcmd \
|
||||||
|
fat font fshelp gcry_dsa gcry_rsa gcry_sha1 gcry_sha512 gettext gfxterm linux linuxefi ls \
|
||||||
|
memdisk minicmd mmap mpi normal part_gpt part_msdos password_pbkdf2 pbkdf2 reboot relocator \
|
||||||
|
search search_fs_file search_fs_uuid search_label sleep tar terminal verifiers video_fb"
|
||||||
|
|
||||||
|
grub-mkstandalone \
|
||||||
|
--directory /usr/lib/grub/x86_64-efi \
|
||||||
|
--format x86_64-efi \
|
||||||
|
--modules "$MODULES" \
|
||||||
|
--pubkey ./boot.key \
|
||||||
|
--output ./grubx64.efi \
|
||||||
|
"boot/grub/grub.cfg=./grub.init.cfg" \
|
||||||
|
"boot/grub/grub.cfg.sig=./grub.init.cfg.sig"
|
||||||
|
|
||||||
|
echo "writing signed grub.efi to '$TARGET_EFI'"
|
||||||
|
sudo cp ./grubx64.efi "$TARGET_EFI"
|
||||||
|
|
||||||
|
|
||||||
|
.. rst-class:: numbered-step
|
||||||
|
|
||||||
|
Prepare grub.cfg
|
||||||
|
****************
|
||||||
|
|
||||||
|
Define the menu entry for your system in a new GRUB configuration :file:`grub.cfg`.
|
||||||
|
For example::
|
||||||
|
|
||||||
|
# @/boot/efi/grub.cfg for grub secure boot
|
||||||
|
set timeout_style=menu
|
||||||
|
set timeout=5
|
||||||
|
set gfxmode=auto
|
||||||
|
set gfxpayload=keep
|
||||||
|
terminal_output gfxterm
|
||||||
|
|
||||||
|
menuentry "ACRN Multiboot Ubuntu Service VM" --users "" --id ubuntu-service-vm {
|
||||||
|
|
||||||
|
search --no-floppy --fs-uuid --set 3df12ea1-ef12-426b-be98-774665c7483a
|
||||||
|
|
||||||
|
echo 'loading ACRN...'
|
||||||
|
multiboot2 /boot/acrn/acrn.bin root=PARTUUID="c8ee7d92-8935-4e86-9e12-05dbeb412ad6"
|
||||||
|
module2 /boot/bzImage Linux_bzImage
|
||||||
|
}
|
||||||
|
|
||||||
|
Use the output of the :command:`blkid` to find the right values for the
|
||||||
|
UUID (``--set``) and PARTUUID (``root=PARTUUID=`` parameter) of the root
|
||||||
|
partition (e.g. `/dev/nvme0n1p2`) according to your your hardware.
|
||||||
|
|
||||||
|
Copy this new :file:`grub.cfg` to your ESP (e.g. `/boot/efi/EFI/`).
|
||||||
|
|
||||||
|
|
||||||
|
.. rst-class:: numbered-step
|
||||||
|
|
||||||
|
Sign grub.cfg and ACRN Binaries
|
||||||
|
*******************************
|
||||||
|
|
||||||
|
The :file:`grub.cfg` and all ACRN binaries that will be loaded by GRUB
|
||||||
|
**must** be signed with the same GPG key.
|
||||||
|
|
||||||
|
Here's sequence example of signing the individual binaries::
|
||||||
|
|
||||||
|
gpg --homedir keys --detach-sign path/to/grub.cfg
|
||||||
|
gpg --homedir keys --detach-sign path/to/acrn.bin
|
||||||
|
gpg --homedir keys --detach-sign path/to/sos_kernel/bzImage
|
||||||
|
|
||||||
|
Now, you can reboot and the system will boot with the signed GRUB EFI binary.
|
||||||
|
GRUB will refuse to boot if any files it attempts to load have been tampered
|
||||||
|
with.
|
||||||
|
|
||||||
|
|
||||||
|
.. rst-class:: numbered-step
|
||||||
|
|
||||||
|
Enable UEFI Secure Boot
|
||||||
|
***********************
|
||||||
|
|
||||||
|
Creating UEFI Secure Boot Key
|
||||||
|
=============================
|
||||||
|
|
||||||
|
-Generate your own keys for Secure Boot::
|
||||||
|
|
||||||
|
openssl req -new -x509 -newkey rsa:2048 -subj "/CN=PK/" -keyout PK.key -out PK.crt -days 7300 -nodes -sha256
|
||||||
|
openssl req -new -x509 -newkey rsa:2048 -subj "/CN=KEK/" -keyout KEK.key -out KEK.crt -days 7300 -nodes -sha256
|
||||||
|
openssl req -new -x509 -newkey rsa:2048 -subj "/CN=db/" -keyout db.key -out db.crt -days 7300 -nodes -sha256
|
||||||
|
|
||||||
|
-Convert ``*.crt`` keys to the ESL format understood for UEFI::
|
||||||
|
|
||||||
|
cert-to-efi-sig-list PK.crt PK.esl
|
||||||
|
cert-to-efi-sig-list KEK.crt KEK.esl
|
||||||
|
cert-to-efi-sig-list db.crt db.esl
|
||||||
|
|
||||||
|
-Sign ESL files::
|
||||||
|
|
||||||
|
sign-efi-sig-list -k PK.key -c PK.crt PK PK.esl PK.auth
|
||||||
|
sign-efi-sig-list -k PK.key -c PK.crt KEK KEK.esl KEK.auth
|
||||||
|
sign-efi-sig-list -k KEK.key -c KEK.crt db db.esl db.auth
|
||||||
|
|
||||||
|
The keys to be enrolled in UEFI firmware: :file:`PK.der`, :file:`KEK.der`, :file:`db.der`.
|
||||||
|
The keys to sign bootloader image: :file:`grubx64.efi`, :file:`db.key` , :file:`db.crt`.
|
||||||
|
|
||||||
|
Sign GRUB Image With ``db`` Key
|
||||||
|
================================
|
||||||
|
|
||||||
|
sbsign --key db.key --cert db.crt path/to/grubx64.efi
|
||||||
|
|
||||||
|
:file:`grubx64.efi.signed` will be created, it will be your bootloader.
|
||||||
|
|
||||||
|
Enroll UEFI Keys To UEFI Firmware
|
||||||
|
=================================
|
||||||
|
|
||||||
|
Enroll ``PK`` (:file:`PK.der`), ``KEK`` (:file:`KEK.der`) and ``db``
|
||||||
|
(:file:`db.der`) in Secure Boot Configuration UI, which depends on your
|
||||||
|
platform UEFI firmware. In UEFI configuration menu UI, follow the steps
|
||||||
|
in :ref:`this section <qemu_inject_boot_keys>` that shows how to enroll UEFI
|
||||||
|
keys, using your own key files. From now on, only EFI binaries
|
||||||
|
signed with any ``db`` key (:file:`grubx64.efi.signed` in this case) can
|
||||||
|
be loaded by UEFI firmware.
|
@ -110,6 +110,9 @@ Additional scenario XML elements:
|
|||||||
|
|
||||||
``SERIAL_CONSOLE`` (a child node of ``DEBUG_OPTIONS``):
|
``SERIAL_CONSOLE`` (a child node of ``DEBUG_OPTIONS``):
|
||||||
Specify the host serial device is used for hypervisor debugging.
|
Specify the host serial device is used for hypervisor debugging.
|
||||||
|
This configuration is valid only if Service VM ``legacy_vuart0``
|
||||||
|
is enabled. Leave this field empty if Service VM ``console_vuart``
|
||||||
|
is enabled. Using ``bootargs`` for ``console_vuart`` configuration.
|
||||||
|
|
||||||
``MEM_LOGLEVEL`` (a child node of ``DEBUG_OPTIONS``):
|
``MEM_LOGLEVEL`` (a child node of ``DEBUG_OPTIONS``):
|
||||||
Specify the default log level in memory.
|
Specify the default log level in memory.
|
||||||
@ -294,7 +297,7 @@ Additional scenario XML elements:
|
|||||||
|
|
||||||
``bootargs`` (a child node of ``os_config``):
|
``bootargs`` (a child node of ``os_config``):
|
||||||
For internal use only and is not configurable. Specify the kernel boot arguments
|
For internal use only and is not configurable. Specify the kernel boot arguments
|
||||||
in bootargs under the parent of board_private.
|
in ``bootargs`` under the parent of ``board_private``.
|
||||||
|
|
||||||
``kern_load_addr`` (a child node of ``os_config``):
|
``kern_load_addr`` (a child node of ``os_config``):
|
||||||
The loading address in host memory for the VM kernel.
|
The loading address in host memory for the VM kernel.
|
||||||
@ -302,27 +305,45 @@ Additional scenario XML elements:
|
|||||||
``kern_entry_addr`` (a child node of ``os_config``):
|
``kern_entry_addr`` (a child node of ``os_config``):
|
||||||
The entry address in host memory for the VM kernel.
|
The entry address in host memory for the VM kernel.
|
||||||
|
|
||||||
``vuart``:
|
``legacy_vuart``:
|
||||||
Specify the vUART (aka COM) with the vUART ID by its ``id`` attribute.
|
Specify the vUART (aka COM) with the vUART ID by its ``id`` attribute.
|
||||||
Refer to :ref:`vuart_config` for detailed vUART settings.
|
Refer to :ref:`vuart_config` for detailed vUART settings.
|
||||||
|
|
||||||
``type`` (a child node of ``vuart``):
|
``console_vuart``:
|
||||||
|
Specify the console vUART (aka PCI based vUART) with the vUART ID by
|
||||||
|
its ``id`` attribute.
|
||||||
|
Refer to :ref:`vuart_config` for detailed vUART settings.
|
||||||
|
|
||||||
|
``communication_vuart``:
|
||||||
|
Specify the communication vUART (aka PCI based vUART) with the vUART ID by
|
||||||
|
its ``id`` attribute.
|
||||||
|
Refer to :ref:`vuart_config` for detailed vUART settings.
|
||||||
|
|
||||||
|
``type`` (a child node of ``legacy_vuart``):
|
||||||
vUART (aka COM) type; currently only supports the legacy PIO mode.
|
vUART (aka COM) type; currently only supports the legacy PIO mode.
|
||||||
|
|
||||||
``base`` (a child node of ``vuart``):
|
``base`` (a child node of ``legacy_vuart``, ``console_vuart``, and ``communication_vuart``):
|
||||||
vUART (A.K.A COM) enabling switch. Enable by exposing its COM_BASE
|
vUART (A.K.A COM) enabling switch. Enable by exposing its COM_BASE
|
||||||
(SOS_COM_BASE for Service VM); disable by returning INVALID_COM_BASE.
|
(SOS_COM_BASE for Service VM); disable by returning INVALID_COM_BASE.
|
||||||
|
|
||||||
``irq`` (a child node of ``vuart``):
|
console and communication vUART (A.K.A PCI based vUART) enabling switch.
|
||||||
|
Enable by specifying PCI_VUART; disable by returning INVALID_PCI_BASE.
|
||||||
|
|
||||||
|
``irq`` (a child node of ``legacy_vuart``):
|
||||||
vCOM IRQ.
|
vCOM IRQ.
|
||||||
|
|
||||||
``target_vm_id`` (a child node of ``vuart1``):
|
``target_vm_id`` (a child node of ``legacy_vuart1``, ``communication_vuart``):
|
||||||
COM2 is used for VM communications. When it is enabled, specify which
|
COM2 is used for VM communications. When it is enabled, specify which
|
||||||
target VM the current VM connects to.
|
target VM the current VM connects to.
|
||||||
|
|
||||||
``target_uart_id`` (a child node of ``vuart1``):
|
``communication_vuart`` is used for VM communications. When it is enabled, specify
|
||||||
|
which target VM the current VM connects to.
|
||||||
|
|
||||||
|
``target_uart_id`` (a child node of ``legacy_vuart1`` and ``communication_vuart``):
|
||||||
Target vUART ID to which the vCOM2 connects.
|
Target vUART ID to which the vCOM2 connects.
|
||||||
|
|
||||||
|
Target vUART ID to which the ``communication_vuart`` connects.
|
||||||
|
|
||||||
``pci_dev_num``:
|
``pci_dev_num``:
|
||||||
PCI devices number of the VM; it is hard-coded for each scenario so it
|
PCI devices number of the VM; it is hard-coded for each scenario so it
|
||||||
is not configurable for now.
|
is not configurable for now.
|
||||||
@ -486,7 +507,7 @@ Here is the offline configuration tool workflow:
|
|||||||
specified board name.
|
specified board name.
|
||||||
|
|
||||||
| **Native Linux requirement:**
|
| **Native Linux requirement:**
|
||||||
| **Release:** Ubuntu 18.04+ or Clear Linux 30210+
|
| **Release:** Ubuntu 18.04+
|
||||||
| **Tools:** cpuid, rdmsr, lspci, dmidecode (optional)
|
| **Tools:** cpuid, rdmsr, lspci, dmidecode (optional)
|
||||||
| **Kernel cmdline:** "idle=nomwait intel_idle.max_cstate=0 intel_pstate=disable"
|
| **Kernel cmdline:** "idle=nomwait intel_idle.max_cstate=0 intel_pstate=disable"
|
||||||
|
|
||||||
|
@ -66,6 +66,32 @@ enable it using the :ref:`acrn_configuration_tool` with these steps:
|
|||||||
|
|
||||||
- Build the XML configuration, refer to :ref:`getting-started-building`
|
- Build the XML configuration, refer to :ref:`getting-started-building`
|
||||||
|
|
||||||
|
ivshmem notification mechanism
|
||||||
|
******************************
|
||||||
|
|
||||||
|
Notification (doorbell) of ivshmem device allows VMs with ivshmem
|
||||||
|
devices enabled to notify (interrupt) each other following this flow:
|
||||||
|
|
||||||
|
Notification Sender (VM):
|
||||||
|
VM triggers the notification to target VM by writing target Peer ID
|
||||||
|
(Equals to VM ID of target VM) and vector index to doorbell register of
|
||||||
|
ivshmem device, the layout of doorbell register is described in
|
||||||
|
:ref:`ivshmem-hld`.
|
||||||
|
|
||||||
|
Hypervisor:
|
||||||
|
When doorbell register is programmed, hypervisor will search the
|
||||||
|
target VM by target Peer ID and inject MSI interrupt to the target VM.
|
||||||
|
|
||||||
|
Notification Receiver (VM):
|
||||||
|
VM receives MSI interrupt and forward it to related application.
|
||||||
|
|
||||||
|
ACRN supports up to 8 (MSI-X) interrupt vectors for ivshmem device.
|
||||||
|
Guest VMs shall implement their own mechanism to forward MSI interrupts
|
||||||
|
to applications.
|
||||||
|
|
||||||
|
.. note:: Notification is supported only for HV-land ivshmem devices. (Future
|
||||||
|
support may include notification for DM-land ivshmem devices.)
|
||||||
|
|
||||||
Inter-VM Communication Examples
|
Inter-VM Communication Examples
|
||||||
*******************************
|
*******************************
|
||||||
|
|
||||||
|
@ -27,6 +27,7 @@ The diagram below shows the overall architecture:
|
|||||||
|
|
||||||
.. figure:: images/s5_overall_architecture.png
|
.. figure:: images/s5_overall_architecture.png
|
||||||
:align: center
|
:align: center
|
||||||
|
:name: s5-architecture
|
||||||
|
|
||||||
S5 overall architecture
|
S5 overall architecture
|
||||||
|
|
||||||
@ -160,22 +161,20 @@ The procedure for enabling S5 is specific to the particular OS:
|
|||||||
|
|
||||||
How to test
|
How to test
|
||||||
***********
|
***********
|
||||||
|
As described in :ref:`vuart_config`, two vUARTs are defined in
|
||||||
|
pre-defined ACRN scenarios: vUART0/ttyS0 for the console and
|
||||||
|
vUART1/ttyS1 for S5-related communication (as shown in :ref:`s5-architecture`).
|
||||||
|
|
||||||
.. note:: The :ref:`CBC <IOC_virtualization_hld>` tools and service installed by
|
For Yocto Project (Poky) or Ubuntu rootfs, the ``serial-getty``
|
||||||
the `software-defined-cockpit
|
service for ``ttyS1`` conflicts with the S5-related communication
|
||||||
<https://github.com/clearlinux/clr-bundles/blob/master/bundles/software-defined-cockpit>`_ bundle
|
use of ``vUART1``. We can eliminate the conflict by preventing
|
||||||
will conflict with the vUART and hence need to be masked.
|
that service from being started
|
||||||
|
either automatically or manually, by masking the service
|
||||||
|
using this command
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
systemctl mask cbc_attach
|
systemctl mask serial-getty@ttyS1.service
|
||||||
systemctl mask cbc_thermal_fuse
|
|
||||||
systemctl mask cbc_thermald
|
|
||||||
systemctl mask cbc_lifecycle.service
|
|
||||||
|
|
||||||
Or::
|
|
||||||
|
|
||||||
ps -ef|grep cbc; kill -9 cbc_pid
|
|
||||||
|
|
||||||
#. Refer to the :ref:`enable_s5` section to set up the S5 environment for the User VMs.
|
#. Refer to the :ref:`enable_s5` section to set up the S5 environment for the User VMs.
|
||||||
|
|
||||||
@ -206,3 +205,45 @@ How to test
|
|||||||
|
|
||||||
# acrnctl list
|
# acrnctl list
|
||||||
vm1 stopped
|
vm1 stopped
|
||||||
|
|
||||||
|
System Shutdown
|
||||||
|
***************
|
||||||
|
|
||||||
|
Using a coordinating script, ``misc/life_mngr/s5_trigger.sh``, in conjunction with
|
||||||
|
the lifecycle manager in each VM, graceful system shutdown can be performed.
|
||||||
|
|
||||||
|
.. note:: Please install ``s5_trigger.sh`` manually to root's home directory.
|
||||||
|
|
||||||
|
.. code-block:: none
|
||||||
|
|
||||||
|
$ sudo install -p -m 0755 -t ~root misc/life_mngr/s5_trigger.sh
|
||||||
|
|
||||||
|
In the ``hybrid_rt`` scenario, the script can send a shutdown command via ``ttyS1``
|
||||||
|
in the Service VM, which is connected to ``ttyS1`` in the pre-launched VM. The
|
||||||
|
lifecycle manager in the pre-launched VM receives the shutdown command, sends an
|
||||||
|
ack message, and proceeds to shut itself down accordingly.
|
||||||
|
|
||||||
|
.. figure:: images/system_shutdown.png
|
||||||
|
:align: center
|
||||||
|
|
||||||
|
Graceful system shutdown flow
|
||||||
|
|
||||||
|
#. The HMI Windows Guest uses the lifecycle manager to send a shutdown request to
|
||||||
|
the Service VM
|
||||||
|
#. The lifecycle manager in the Service VM responds with an ack message and
|
||||||
|
executes ``s5_trigger.sh``
|
||||||
|
#. After receiving the ack message, the lifecycle manager in the HMI Windows Guest
|
||||||
|
shuts down the guest
|
||||||
|
#. The ``s5_trigger.sh`` script in the Service VM shuts down the Linux Guest by
|
||||||
|
using ``acrnctl`` to send a shutdown request
|
||||||
|
#. After receiving the shutdown request, the lifecycle manager in the Linux Guest
|
||||||
|
responds with an ack message and shuts down the guest
|
||||||
|
#. The ``s5_trigger.sh`` script in the Service VM shuts down the Pre-launched RTVM
|
||||||
|
by sending a shutdown request to its ``ttyS1``
|
||||||
|
#. After receiving the shutdown request, the lifecycle manager in the Pre-launched
|
||||||
|
RTVM responds with an ack message
|
||||||
|
#. The lifecycle manager in the Pre-launched RTVM shuts down the guest using
|
||||||
|
standard PM registers
|
||||||
|
#. After receiving the ack message, the ``s5_trigger.sh`` script in the Service VM
|
||||||
|
shuts down the Service VM
|
||||||
|
#. The hypervisor shuts down the system after all of its guests have shut down
|
||||||
|
BIN
doc/tutorials/images/acrn_secureboot_flow.png
Normal file
BIN
doc/tutorials/images/acrn_secureboot_flow.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 10 KiB |
BIN
doc/tutorials/images/system_shutdown.png
Normal file
BIN
doc/tutorials/images/system_shutdown.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 15 KiB |
@ -50,8 +50,7 @@ install Ubuntu on the NVMe drive, and use grub to launch the Service VM.
|
|||||||
Install Pre-Launched RT Filesystem on SATA and Kernel Image on NVMe
|
Install Pre-Launched RT Filesystem on SATA and Kernel Image on NVMe
|
||||||
===================================================================
|
===================================================================
|
||||||
|
|
||||||
.. important:: Need to add instructions to download the RTVM image and burn it to the
|
Follow the :ref:`install-ubuntu-rtvm-sata` guide to install RT rootfs on SATA drive.
|
||||||
SATA drive.
|
|
||||||
|
|
||||||
The Kernel should
|
The Kernel should
|
||||||
be on the NVMe drive along with GRUB. You'll need to copy the RT kernel
|
be on the NVMe drive along with GRUB. You'll need to copy the RT kernel
|
||||||
@ -94,6 +93,7 @@ like this:
|
|||||||
multiboot2 /EFI/BOOT/acrn.bin
|
multiboot2 /EFI/BOOT/acrn.bin
|
||||||
module2 /EFI/BOOT/bzImage_RT RT_bzImage
|
module2 /EFI/BOOT/bzImage_RT RT_bzImage
|
||||||
module2 /EFI/BOOT/bzImage Linux_bzImage
|
module2 /EFI/BOOT/bzImage Linux_bzImage
|
||||||
|
module2 /boot/ACPI_VM0.bin ACPI_VM0
|
||||||
}
|
}
|
||||||
|
|
||||||
Reboot the system, and it will boot into Pre-Launched RT Mode
|
Reboot the system, and it will boot into Pre-Launched RT Mode
|
||||||
|
@ -9,9 +9,8 @@ individuals who have made common cause to create a `free
|
|||||||
stable Debian release <https://www.debian.org/releases/stable/>`_ is
|
stable Debian release <https://www.debian.org/releases/stable/>`_ is
|
||||||
10.0.
|
10.0.
|
||||||
|
|
||||||
This tutorial describes how to use Debian 10.0 instead of `Clear Linux
|
This tutorial describes how to use Debian 10.0 as the Service VM OS with
|
||||||
OS <https://clearlinux.org>`_ as the Service VM with the ACRN
|
the ACRN hypervisor.
|
||||||
hypervisor.
|
|
||||||
|
|
||||||
Prerequisites
|
Prerequisites
|
||||||
*************
|
*************
|
||||||
@ -24,10 +23,10 @@ Use the following instructions to install Debian.
|
|||||||
the bottom of the page).
|
the bottom of the page).
|
||||||
- Follow the `Debian installation guide
|
- Follow the `Debian installation guide
|
||||||
<https://www.debian.org/releases/stable/amd64/index.en.html>`_ to
|
<https://www.debian.org/releases/stable/amd64/index.en.html>`_ to
|
||||||
install it on your Intel NUC; we are using a Kaby Lake Intel NUC (NUC7i7DNHE)
|
install it on your board; we are using a Kaby Lake Intel NUC (NUC7i7DNHE)
|
||||||
in this tutorial.
|
in this tutorial.
|
||||||
- :ref:`install-build-tools-dependencies` for ACRN.
|
- :ref:`install-build-tools-dependencies` for ACRN.
|
||||||
- Update to the latest iASL (required by the ACRN Device Model):
|
- Update to the newer iASL:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
@ -43,90 +42,95 @@ Use the following instructions to install Debian.
|
|||||||
Validated Versions
|
Validated Versions
|
||||||
******************
|
******************
|
||||||
|
|
||||||
- **Debian version:** 10.0 (buster)
|
- **Debian version:** 10.1 (buster)
|
||||||
- **ACRN hypervisor tag:** acrn-2019w35.1-140000p
|
- **ACRN hypervisor tag:** acrn-2020w40.1-180000p
|
||||||
- **Debian Service VM Kernel version:** 4.19.68-84.iot-lts2018-sos
|
- **Debian Service VM Kernel version:** release_2.2
|
||||||
|
|
||||||
Install ACRN on the Debian VM
|
Install ACRN on the Debian VM
|
||||||
*****************************
|
*****************************
|
||||||
|
|
||||||
1. Clone the `Project ACRN <https://github.com/projectacrn/acrn-hypervisor>`_ code repository:
|
#. Clone the `Project ACRN <https://github.com/projectacrn/acrn-hypervisor>`_ code repository:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
$ cd ~
|
$ cd ~
|
||||||
$ git clone https://github.com/projectacrn/acrn-hypervisor
|
$ git clone https://github.com/projectacrn/acrn-hypervisor
|
||||||
$ cd acrn-hypervisor
|
$ cd acrn-hypervisor
|
||||||
$ git checkout acrn-2019w35.1-140000p
|
$ git checkout acrn-2020w40.1-180000p
|
||||||
|
|
||||||
#. Build and install ACRN:
|
#. Build and install ACRN:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
$ make BOARD=nuc7i7dnb FIRMWARE=uefi
|
$ make all BOARD_FILE=misc/vm_configs/xmls/board-xmls/nuc7i7dnb.xml SCENARIO_FILE=misc/vm_configs/xmls/config-xmls/nuc7i7dnb/industry.xml RELEASE=0
|
||||||
$ sudo make install
|
$ sudo make install
|
||||||
|
$ sudo mkdir /boot/acrn/
|
||||||
|
$ sudo cp ~/acrn-hypervisor/build/hypervisor/acrn.bin /boot/acrn/
|
||||||
|
|
||||||
#. Install the hypervisor.
|
#. Build and Install the Service VM kernel:
|
||||||
The ACRN Device Model and tools were installed as part of a previous
|
|
||||||
step. However, ``make install`` does not install the hypervisor (acrn.efi)
|
|
||||||
on your EFI System Partition (ESP), nor does it configure your EFI
|
|
||||||
firmware to boot it automatically. Follow the steps below to perform
|
|
||||||
these operations and complete the ACRN installation. Note that we are
|
|
||||||
using a SATA disk in this section.
|
|
||||||
|
|
||||||
a. Add the ACRN hypervisor (as the root user):
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
$ sudo mkdir /boot/efi/EFI/acrn/
|
|
||||||
$ sudo cp ~/acrn-hypervisor/build/hypervisor/acrn.efi /boot/efi/EFI/acrn/
|
|
||||||
$ sudo efibootmgr -c -l "\EFI\acrn\acrn.efi" -d /dev/sda -p 1 -L "ACRN Hypervisor" -u "bootloader=\EFI\debian\grubx64.efi "
|
|
||||||
$ sudo efibootmgr -v # shows output as below
|
|
||||||
Timeout: 1 seconds
|
|
||||||
BootOrder: 0009,0003,0004,0007,0005,0006,0001,0008,0002,0000
|
|
||||||
Boot0000* ACRN VenHw(99e275e7-75a0-4b37-a2e6-c5385e6c00cb)
|
|
||||||
Boot0001* ACRN VenHw(99e275e7-75a0-4b37-a2e6-c5385e6c00cb)
|
|
||||||
Boot0002* debian VenHw(99e275e7-75a0-4b37-a2e6-c5385e6c00cb)
|
|
||||||
Boot0003* UEFI : INTEL SSDPEKKW256G8 : PART 0 : OS Bootloader PciRoot(0x0)/Pci(0x1d,0x0)/Pci(0x0,0x0)/NVMe(0x1,00-00-00-00-00-00-00-00)/HD(1,GPT,89d38801-d55b-4bf6-be05-79a5a7b87e66,0x800,0x47000)..BO
|
|
||||||
Boot0004* UEFI : INTEL SSDPEKKW256G8 : PART 3 : OS Bootloader PciRoot(0x0)/Pci(0x1d,0x0)/Pci(0x0,0x0)/NVMe(0x1,00-00-00-00-00-00-00-00)/HD(4,GPT,550e1da5-6533-4e64-8d3f-0beadfb20d33,0x1c6da800,0x47000)..BO
|
|
||||||
Boot0005* UEFI : LAN : PXE IP4 Intel(R) Ethernet Connection I219-LM PciRoot(0x0)/Pci(0x1f,0x6)/MAC(54b2030f4b84,0)/IPv4(0.0.0.00.0.0.0,0,0)..BO
|
|
||||||
Boot0006* UEFI : LAN : PXE IP6 Intel(R) Ethernet Connection I219-LM PciRoot(0x0)/Pci(0x1f,0x6)/MAC(54b2030f4b84,0)/IPv6([::]:<->[::]:,0,0)..BO
|
|
||||||
Boot0007* UEFI : Built-in EFI Shell VenMedia(5023b95c-db26-429b-a648-bd47664c8012)..BO
|
|
||||||
Boot0008* Linux bootloader VenHw(99e275e7-75a0-4b37-a2e6-c5385e6c00cb)
|
|
||||||
Boot0009* ACRN Hypervisor HD(1,GPT,94597852-7166-4216-b0f1-cef5fd1f2349,0x800,0x100000)/File(\EFI\acrn\acrn.efi)b.o.o.t.l.o.a.d.e.r.=.\.E.F.I.\.d.e.b.i.a.n.\.g.r.u.b.x.6.4...e.f.i.
|
|
||||||
|
|
||||||
#. Install the Service VM kernel and reboot:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
$ mkdir ~/sos-kernel && cd ~/sos-kernel
|
$ mkdir ~/sos-kernel && cd ~/sos-kernel
|
||||||
$ wget https://download.clearlinux.org/releases/30930/clear/x86_64/os/Packages/linux-iot-lts2018-sos-4.19.68-84.x86_64.rpm
|
$ git clone https://github.com/projectacrn/acrn-kernel
|
||||||
$ sudo apt install rpm2cpio
|
$ cd acrn-kernel
|
||||||
$ rpm2cpio linux-iot-lts2018-sos-4.19.68-84.x86_64.rpm | cpio -idmv
|
$ git checkout release_2.2
|
||||||
$ sudo cp -r ~/sos-kernel/usr/lib/modules/4.19.68-84.iot-lts2018-sos /lib/modules/
|
$ cp kernel_config_uefi_sos .config
|
||||||
$ sudo mkdir /boot/acrn/
|
$ make olddefconfig
|
||||||
$ sudo cp ~/sos-kernel/usr/lib/kernel/org.clearlinux.iot-lts2018-sos.4.19.68-84 /boot/acrn/
|
$ make all
|
||||||
$ sudo vi /etc/grub.d/40_custom
|
$ sudo make modules_install
|
||||||
<To add below>
|
$ sudo cp arch/x86/boot/bzImage /boot/bzImage
|
||||||
menuentry 'ACRN Debian Service VM' {
|
|
||||||
|
#. Update Grub for the Debian Service VM
|
||||||
|
|
||||||
|
Update the ``/etc/grub.d/40_custom`` file as shown below.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
Enter the command line for the kernel in ``/etc/grub.d/40_custom`` as
|
||||||
|
a single line and not as multiple lines. Otherwise, the kernel will
|
||||||
|
fail to boot.
|
||||||
|
|
||||||
|
.. code-block:: none
|
||||||
|
|
||||||
|
menuentry "ACRN Multiboot Debian Service VM" --id debian-service-vm {
|
||||||
recordfail
|
recordfail
|
||||||
load_video
|
load_video
|
||||||
insmod gzio
|
insmod gzio
|
||||||
insmod part_gpt
|
insmod part_gpt
|
||||||
insmod ext2
|
insmod ext2
|
||||||
|
|
||||||
linux /boot/acrn/org.clearlinux.iot-lts2018-sos.4.19.68-84 console=tty0 console=ttyS0 root=/dev/sda2 rw rootwait ignore_loglevel no_timer_check consoleblank=0 i915.nuclear_pageflip=1 i915.avail_planes_per_pipe=0x01010F i915.domain_plane_owners=0x011111110000 i915.enable_gvt=1 i915.enable_guc=0 hvlog=2M@0x1FE00000 memmap=2M\$0x1FE00000
|
search --no-floppy --fs-uuid --set 9bd58889-add7-410c-bdb7-1fbc2af9b0e1
|
||||||
|
echo 'loading ACRN...'
|
||||||
|
multiboot2 /boot/acrn/acrn.bin root=PARTUUID="e515916d-aac4-4439-aaa0-33231a9f4d83"
|
||||||
|
module2 /boot/bzImage Linux_bzImage
|
||||||
}
|
}
|
||||||
$ sudo vi /etc/default/grub
|
|
||||||
<Specify the default grub to the ACRN Debian Service VM entry>
|
.. note::
|
||||||
GRUB_DEFAULT=5
|
Update this to use the UUID (``--set``) and PARTUUID (``root=`` parameter)
|
||||||
|
(or use the device node directly) of the root partition (e.g.
|
||||||
|
``/dev/nvme0n1p2``). Hint: use ``sudo blkid <device node>``.
|
||||||
|
|
||||||
|
Update the kernel name if you used a different name as the source
|
||||||
|
for your Service VM kernel.
|
||||||
|
|
||||||
|
#. Modify the ``/etc/default/grub`` file to make the Grub menu visible when
|
||||||
|
booting and make it load the Service VM kernel by default. Modify the
|
||||||
|
lines shown below:
|
||||||
|
|
||||||
|
.. code-block:: none
|
||||||
|
|
||||||
|
GRUB_DEFAULT=debian-service-vm
|
||||||
|
#GRUB_TIMEOUT_STYLE=hidden
|
||||||
|
GRUB_TIMEOUT=5
|
||||||
|
GRUB_CMDLINE_LINUX="text"
|
||||||
|
|
||||||
|
#. Update Grub on your system:
|
||||||
|
|
||||||
|
.. code-block:: none
|
||||||
|
|
||||||
$ sudo update-grub
|
$ sudo update-grub
|
||||||
$ sudo reboot
|
$ sudo reboot
|
||||||
|
|
||||||
You should see the Grub menu with the new "ACRN Debian Service VM"
|
|
||||||
entry. Select it and proceed to booting the platform. The system will
|
|
||||||
start the Debian Desktop and you can now log in (as before).
|
|
||||||
|
|
||||||
#. Log in to the Debian Service VM and check the ACRN status:
|
#. Log in to the Debian Service VM and check the ACRN status:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
@ -137,16 +141,10 @@ Install ACRN on the Debian VM
|
|||||||
[ 0.982837] ACRN HVLog: Failed to init last hvlog devs, errno -19
|
[ 0.982837] ACRN HVLog: Failed to init last hvlog devs, errno -19
|
||||||
[ 0.983023] ACRN HVLog: Initialized hvlog module with 4 cp
|
[ 0.983023] ACRN HVLog: Initialized hvlog module with 4 cp
|
||||||
|
|
||||||
$ uname -a
|
Enable the network sharing to give network access to User VM
|
||||||
Linux debian 4.19.68-84.iot-lts2018-sos #1 SMP Debian 4.19.37-5+deb10u2 (2019-08-08) x86_64 GNU/Linux
|
************************************************************
|
||||||
|
|
||||||
#. Enable the network sharing to give network access to User VM:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
$ sudo systemctl enable systemd-networkd
|
$ sudo systemctl enable systemd-networkd
|
||||||
$ sudo systemctl start systemd-networkd
|
$ sudo systemctl start systemd-networkd
|
||||||
|
|
||||||
#. Prepare and Start a User VM.
|
|
||||||
|
|
||||||
.. important:: Need instructions for this.
|
|
||||||
|
@ -9,13 +9,10 @@ Prerequisites
|
|||||||
This tutorial assumes you have already set up the ACRN Service VM on an
|
This tutorial assumes you have already set up the ACRN Service VM on an
|
||||||
Intel NUC Kit. If you have not, refer to the following instructions:
|
Intel NUC Kit. If you have not, refer to the following instructions:
|
||||||
|
|
||||||
- Install a `Clear Linux OS
|
- Install a `Ubuntu 18.04 desktop ISO
|
||||||
<https://docs.01.org/clearlinux/latest/get-started/bare-metal-install-server.html>`_
|
<http://releases.ubuntu.com/18.04.3/ubuntu-18.04.3-desktop-amd64.iso?_ga=2.160010942.221344839.1566963570-491064742.1554370503>`_
|
||||||
on your Intel NUC kit.
|
on your board.
|
||||||
- Follow the instructions at XXX to set up the
|
- Follow the instructions :ref:`install-ubuntu-Service VM-NVMe` guide to setup the Service VM.
|
||||||
Service VM automatically on your Intel NUC kit. Follow steps 1 - 4.
|
|
||||||
|
|
||||||
.. important:: need updated instructions that aren't Clear Linux dependent
|
|
||||||
|
|
||||||
We are using a Kaby Lake Intel NUC (NUC7i7DNHE) and Debian 10 as the User VM in this tutorial.
|
We are using a Kaby Lake Intel NUC (NUC7i7DNHE) and Debian 10 as the User VM in this tutorial.
|
||||||
|
|
||||||
@ -63,9 +60,9 @@ Hardware Configurations
|
|||||||
Validated Versions
|
Validated Versions
|
||||||
==================
|
==================
|
||||||
|
|
||||||
- **Clear Linux version:** 30920
|
- **Ubuntu version:** 18.04
|
||||||
- **ACRN hypervisor tag:** acrn-2019w36.2-140000p
|
- **ACRN hypervisor tag:** v2.2
|
||||||
- **Service VM Kernel version:** 4.19.68-84.iot-lts2018-sos
|
- **Service VM Kernel version:** v2.2
|
||||||
|
|
||||||
Build the Debian KVM Image
|
Build the Debian KVM Image
|
||||||
**************************
|
**************************
|
||||||
|
@ -9,14 +9,11 @@ Prerequisites
|
|||||||
This tutorial assumes you have already set up the ACRN Service VM on an
|
This tutorial assumes you have already set up the ACRN Service VM on an
|
||||||
Intel NUC Kit. If you have not, refer to the following instructions:
|
Intel NUC Kit. If you have not, refer to the following instructions:
|
||||||
|
|
||||||
- Install a `Clear Linux OS
|
- Install a `Ubuntu 18.04 desktop ISO
|
||||||
<https://docs.01.org/clearlinux/latest/get-started/bare-metal-install-server.html>`_
|
<http://releases.ubuntu.com/18.04.3/ubuntu-18.04.3-desktop-amd64.iso?_ga=2.160010942.221344839.1566963570-491064742.1554370503>`_
|
||||||
on your Intel NUC kit.
|
on your board.
|
||||||
- Follow the instructions at XXX to set up the
|
- Follow the instructions :ref:`install-ubuntu-Service VM-NVMe` to set up the Service VM.
|
||||||
Service VM automatically on your Intel NUC kit. Follow steps 1 - 4.
|
|
||||||
|
|
||||||
.. important:: need updated instructions that aren't Clear Linux
|
|
||||||
dependent
|
|
||||||
|
|
||||||
Before you start this tutorial, make sure the KVM tools are installed on the
|
Before you start this tutorial, make sure the KVM tools are installed on the
|
||||||
development machine and set **IGD Aperture Size to 512** in the BIOS
|
development machine and set **IGD Aperture Size to 512** in the BIOS
|
||||||
@ -62,9 +59,9 @@ Hardware Configurations
|
|||||||
Validated Versions
|
Validated Versions
|
||||||
==================
|
==================
|
||||||
|
|
||||||
- **Clear Linux version:** 30920
|
- **Ubuntu version:** 18.04
|
||||||
- **ACRN hypervisor tag:** acrn-2019w36.2-140000p
|
- **ACRN hypervisor tag:** v2.2
|
||||||
- **Service VM Kernel version:** 4.19.68-84.iot-lts2018-sos
|
- **Service VM Kernel version:** v2.2
|
||||||
|
|
||||||
.. _build-the-ubuntu-kvm-image:
|
.. _build-the-ubuntu-kvm-image:
|
||||||
|
|
||||||
|
@ -12,25 +12,23 @@ to avoid crashing your system and to take advantage of easy
|
|||||||
snapshots/restores so that you can quickly roll back your system in the
|
snapshots/restores so that you can quickly roll back your system in the
|
||||||
event of setup failure. (You should only install OpenStack directly on Ubuntu if
|
event of setup failure. (You should only install OpenStack directly on Ubuntu if
|
||||||
you have a dedicated testing machine.) This setup utilizes LXC/LXD on
|
you have a dedicated testing machine.) This setup utilizes LXC/LXD on
|
||||||
Ubuntu 16.04 or 18.04.
|
Ubuntu 18.04.
|
||||||
|
|
||||||
Install ACRN
|
Install ACRN
|
||||||
************
|
************
|
||||||
|
|
||||||
#. Install ACRN using Ubuntu 16.04 or 18.04 as its Service VM.
|
#. Install ACRN using Ubuntu 18.04 as its Service VM. Refer to
|
||||||
|
:ref:`Build and Install ACRN on Ubuntu <build-and-install-acrn-on-ubuntu>`.
|
||||||
.. important:: Need instructions from deleted document (using Ubuntu
|
|
||||||
as SOS)
|
|
||||||
|
|
||||||
#. Make the acrn-kernel using the `kernel_config_uefi_sos
|
#. Make the acrn-kernel using the `kernel_config_uefi_sos
|
||||||
<https://raw.githubusercontent.com/projectacrn/acrn-kernel/master/kernel_config_uefi_sos>`_
|
<https://raw.githubusercontent.com/projectacrn/acrn-kernel/master/kernel_config_uefi_sos>`_
|
||||||
configuration file (from the ``acrn-kernel`` repo).
|
configuration file (from the ``acrn-kernel`` repo).
|
||||||
|
|
||||||
#. Add the following kernel boot arg to give the Service VM more loop
|
#. Add the following kernel boot arg to give the Service VM more memory
|
||||||
devices. Refer to `Kernel Boot Parameters
|
and more loop devices. Refer to `Kernel Boot Parameters
|
||||||
<https://wiki.ubuntu.com/Kernel/KernelBootParameters>`_ documentation::
|
<https://wiki.ubuntu.com/Kernel/KernelBootParameters>`_ documentation::
|
||||||
|
|
||||||
max_loop=16
|
hugepagesz=1G hugepages=10 max_loop=16
|
||||||
|
|
||||||
#. Boot the Service VM with this new ``acrn-kernel`` using the ACRN
|
#. Boot the Service VM with this new ``acrn-kernel`` using the ACRN
|
||||||
hypervisor.
|
hypervisor.
|
||||||
@ -40,17 +38,15 @@ Install ACRN
|
|||||||
<https://maslosoft.com/kb/how-to-clean-old-snaps/>`_ to clean up old
|
<https://maslosoft.com/kb/how-to-clean-old-snaps/>`_ to clean up old
|
||||||
snap revisions if you're running out of loop devices.
|
snap revisions if you're running out of loop devices.
|
||||||
#. Make sure the networking bridge ``acrn-br0`` is created. If not,
|
#. Make sure the networking bridge ``acrn-br0`` is created. If not,
|
||||||
create it using the instructions in XXX.
|
create it using the instructions in
|
||||||
|
:ref:`Build and Install ACRN on Ubuntu <build-and-install-acrn-on-ubuntu>`.
|
||||||
.. important:: need instructions from deleted document (using Ubuntu
|
|
||||||
as SOS)
|
|
||||||
|
|
||||||
Set up and launch LXC/LXD
|
Set up and launch LXC/LXD
|
||||||
*************************
|
*************************
|
||||||
|
|
||||||
1. Set up the LXC/LXD Linux container engine using these `instructions
|
1. Set up the LXC/LXD Linux container engine using these `instructions
|
||||||
<https://ubuntu.com/tutorials/tutorial-setting-up-lxd-1604>`_ provided
|
<https://ubuntu.com/tutorials/tutorial-setting-up-lxd-1604>`_ provided
|
||||||
by Ubuntu (for release 16.04).
|
by Ubuntu.
|
||||||
|
|
||||||
Refer to the following additional information for the setup
|
Refer to the following additional information for the setup
|
||||||
procedure:
|
procedure:
|
||||||
@ -59,8 +55,10 @@ Set up and launch LXC/LXD
|
|||||||
backend).
|
backend).
|
||||||
- Answer ``dir`` (and not ``zfs``) when prompted for the name of the storage backend to use.
|
- Answer ``dir`` (and not ``zfs``) when prompted for the name of the storage backend to use.
|
||||||
- Set up ``lxdbr0`` as instructed.
|
- Set up ``lxdbr0`` as instructed.
|
||||||
- Before launching a container, make sure ``lxc-checkconfig | grep missing`` does not show any missing
|
- Before launching a container, install lxc-utils by ``apt-get install lxc-utils``,
|
||||||
kernel features.
|
make sure ``lxc-checkconfig | grep missing`` does not show any missing kernel features
|
||||||
|
except ``CONFIG_NF_NAT_IPV4`` and ``CONFIG_NF_NAT_IPV6``, which
|
||||||
|
were renamed in recent kernels.
|
||||||
|
|
||||||
2. Create an Ubuntu 18.04 container named ``openstack``::
|
2. Create an Ubuntu 18.04 container named ``openstack``::
|
||||||
|
|
||||||
@ -128,7 +126,7 @@ Set up and launch LXC/LXD
|
|||||||
|
|
||||||
8. Log in to the ``openstack`` container again::
|
8. Log in to the ``openstack`` container again::
|
||||||
|
|
||||||
$ xc exec openstack -- su -l
|
$ lxc exec openstack -- su -l
|
||||||
|
|
||||||
9. If needed, set up the proxy inside the ``openstack`` container via
|
9. If needed, set up the proxy inside the ``openstack`` container via
|
||||||
``/etc/environment`` and make sure ``no_proxy`` is properly set up.
|
``/etc/environment`` and make sure ``no_proxy`` is properly set up.
|
||||||
@ -139,7 +137,7 @@ Set up and launch LXC/LXD
|
|||||||
|
|
||||||
10. Add a new user named **stack** and set permissions::
|
10. Add a new user named **stack** and set permissions::
|
||||||
|
|
||||||
$ sudo useradd -s /bin/bash -d /opt/stack -m stack
|
$ useradd -s /bin/bash -d /opt/stack -m stack
|
||||||
$ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
$ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||||
|
|
||||||
11. Log off and restart the ``openstack`` container::
|
11. Log off and restart the ``openstack`` container::
|
||||||
@ -166,17 +164,15 @@ Set up ACRN prerequisites inside the container
|
|||||||
|
|
||||||
$ git clone https://github.com/projectacrn/acrn-hypervisor
|
$ git clone https://github.com/projectacrn/acrn-hypervisor
|
||||||
$ cd acrn-hypervisor
|
$ cd acrn-hypervisor
|
||||||
$ git checkout v1.6.1
|
$ git checkout v2.3
|
||||||
$ make
|
$ make
|
||||||
$ cd misc/acrn-manager/; make
|
$ cd misc/acrn-manager/; make
|
||||||
|
|
||||||
Install only the user-space components: ``acrn-dm``, ``acrnctl``, and
|
Install only the user-space components: ``acrn-dm``, ``acrnctl``, and
|
||||||
``acrnd``
|
``acrnd``
|
||||||
|
|
||||||
3. Download, compile, and install ``iasl``. Refer to XXX.
|
3. Download, compile, and install ``iasl``. Refer to
|
||||||
|
:ref:`Build and Install ACRN on Ubuntu <build-and-install-acrn-on-ubuntu>`.
|
||||||
.. important:: need instructions from deleted document (using Ubuntu
|
|
||||||
as SOS)
|
|
||||||
|
|
||||||
Set up libvirt
|
Set up libvirt
|
||||||
**************
|
**************
|
||||||
@ -185,7 +181,7 @@ Set up libvirt
|
|||||||
|
|
||||||
$ sudo apt install libdevmapper-dev libnl-route-3-dev libnl-3-dev python \
|
$ sudo apt install libdevmapper-dev libnl-route-3-dev libnl-3-dev python \
|
||||||
automake autoconf autopoint libtool xsltproc libxml2-utils gettext \
|
automake autoconf autopoint libtool xsltproc libxml2-utils gettext \
|
||||||
libxml2-dev libpciaccess-dev
|
libxml2-dev libpciaccess-dev gnutls-dev python3-docutils
|
||||||
|
|
||||||
|
|
||||||
2. Download libvirt/ACRN::
|
2. Download libvirt/ACRN::
|
||||||
@ -195,7 +191,9 @@ Set up libvirt
|
|||||||
3. Build and install libvirt::
|
3. Build and install libvirt::
|
||||||
|
|
||||||
$ cd acrn-libvirt
|
$ cd acrn-libvirt
|
||||||
$ ./autogen.sh --prefix=/usr --disable-werror --with-test-suite=no \
|
$ mkdir build
|
||||||
|
$ cd build
|
||||||
|
$ ../autogen.sh --prefix=/usr --disable-werror --with-test-suite=no \
|
||||||
--with-qemu=no --with-openvz=no --with-vmware=no --with-phyp=no \
|
--with-qemu=no --with-openvz=no --with-vmware=no --with-phyp=no \
|
||||||
--with-vbox=no --with-lxc=no --with-uml=no --with-esx=no
|
--with-vbox=no --with-lxc=no --with-uml=no --with-esx=no
|
||||||
|
|
||||||
@ -223,11 +221,12 @@ Use DevStack to install OpenStack. Refer to the `DevStack instructions <https://
|
|||||||
|
|
||||||
$ git clone https://opendev.org/openstack/devstack.git -b stable/train
|
$ git clone https://opendev.org/openstack/devstack.git -b stable/train
|
||||||
|
|
||||||
2. Go into the ``devstack`` directory and apply an ACRN patch::
|
2. Go into the ``devstack`` directory, download an ACRN patch from
|
||||||
|
:acrn_raw:`doc/tutorials/0001-devstack-installation-for-acrn.patch`,
|
||||||
|
and apply it ::
|
||||||
|
|
||||||
$ cd devstack
|
$ cd devstack
|
||||||
$ curl https://raw.githubusercontent.com/projectacrn/acrn-hypervisor/master/doc/tutorials/0001-devstack-installation-for-acrn.patch \
|
$ git apply 0001-devstack-installation-for-acrn.patch
|
||||||
| git apply
|
|
||||||
|
|
||||||
3. Edit ``lib/nova_plugins/hypervisor-libvirt``:
|
3. Edit ``lib/nova_plugins/hypervisor-libvirt``:
|
||||||
|
|
||||||
|
@ -2,11 +2,10 @@
|
|||||||
|
|
||||||
Getting Started Guide for ACRN hybrid mode
|
Getting Started Guide for ACRN hybrid mode
|
||||||
##########################################
|
##########################################
|
||||||
|
|
||||||
ACRN hypervisor supports a hybrid scenario where the User VM (such as Zephyr
|
ACRN hypervisor supports a hybrid scenario where the User VM (such as Zephyr
|
||||||
or Ubuntu) runs in a pre-launched VM or in a post-launched VM that is
|
or Ubuntu) runs in a pre-launched VM or in a post-launched VM that is
|
||||||
launched by a Device model in the Service VM. The following guidelines
|
launched by a Device model in the Service VM.
|
||||||
describe how to set up the ACRN hypervisor hybrid scenario on the Intel NUC,
|
|
||||||
as shown in :numref:`hybrid_scenario_on_nuc`.
|
|
||||||
|
|
||||||
.. figure:: images/hybrid_scenario_on_nuc.png
|
.. figure:: images/hybrid_scenario_on_nuc.png
|
||||||
:align: center
|
:align: center
|
||||||
@ -15,6 +14,14 @@ as shown in :numref:`hybrid_scenario_on_nuc`.
|
|||||||
|
|
||||||
The Hybrid scenario on the Intel NUC
|
The Hybrid scenario on the Intel NUC
|
||||||
|
|
||||||
|
The following guidelines
|
||||||
|
describe how to set up the ACRN hypervisor hybrid scenario on the Intel NUC,
|
||||||
|
as shown in :numref:`hybrid_scenario_on_nuc`.
|
||||||
|
|
||||||
|
.. contents::
|
||||||
|
:local:
|
||||||
|
:depth: 1
|
||||||
|
|
||||||
Prerequisites
|
Prerequisites
|
||||||
*************
|
*************
|
||||||
- Use the `Intel NUC Kit NUC7i7DNHE <https://www.intel.com/content/www/us/en/products/boards-kits/nuc/kits/nuc7i7dnhe.html>`_.
|
- Use the `Intel NUC Kit NUC7i7DNHE <https://www.intel.com/content/www/us/en/products/boards-kits/nuc/kits/nuc7i7dnhe.html>`_.
|
||||||
@ -22,6 +29,8 @@ Prerequisites
|
|||||||
- Install Ubuntu 18.04 on your SATA device or on the NVME disk of your
|
- Install Ubuntu 18.04 on your SATA device or on the NVME disk of your
|
||||||
Intel NUC.
|
Intel NUC.
|
||||||
|
|
||||||
|
.. rst-class:: numbered-step
|
||||||
|
|
||||||
Update Ubuntu GRUB
|
Update Ubuntu GRUB
|
||||||
******************
|
******************
|
||||||
|
|
||||||
@ -78,8 +87,10 @@ Perform the following to update Ubuntu GRUB so it can boot the hypervisor and lo
|
|||||||
the ACRN hypervisor on the Intel NUC's display. The GRUB loader will boot the
|
the ACRN hypervisor on the Intel NUC's display. The GRUB loader will boot the
|
||||||
hypervisor, and the hypervisor will start the VMs automatically.
|
hypervisor, and the hypervisor will start the VMs automatically.
|
||||||
|
|
||||||
Hybrid Scenario Startup Checking
|
.. rst-class:: numbered-step
|
||||||
********************************
|
|
||||||
|
Hybrid Scenario Startup Check
|
||||||
|
*****************************
|
||||||
#. Use these steps to verify that the hypervisor is properly running:
|
#. Use these steps to verify that the hypervisor is properly running:
|
||||||
|
|
||||||
a. Log in to the ACRN hypervisor shell from the serial console.
|
a. Log in to the ACRN hypervisor shell from the serial console.
|
||||||
|
@ -10,12 +10,16 @@ guidelines provide step-by-step instructions on how to set up the ACRN
|
|||||||
hypervisor logical partition scenario on Intel NUC while running two
|
hypervisor logical partition scenario on Intel NUC while running two
|
||||||
pre-launched VMs.
|
pre-launched VMs.
|
||||||
|
|
||||||
|
.. contents::
|
||||||
|
:local:
|
||||||
|
:depth: 1
|
||||||
|
|
||||||
Validated Versions
|
Validated Versions
|
||||||
******************
|
******************
|
||||||
|
|
||||||
- Ubuntu version: **18.04**
|
- Ubuntu version: **18.04**
|
||||||
- ACRN hypervisor tag: **v2.1**
|
- ACRN hypervisor tag: **v2.3**
|
||||||
- ACRN kernel tag: **v2.1**
|
- ACRN kernel tag: **v2.3**
|
||||||
|
|
||||||
Prerequisites
|
Prerequisites
|
||||||
*************
|
*************
|
||||||
@ -35,6 +39,8 @@ Prerequisites
|
|||||||
The two pre-launched VMs will mount the root file systems via the SATA controller and
|
The two pre-launched VMs will mount the root file systems via the SATA controller and
|
||||||
the USB controller respectively.
|
the USB controller respectively.
|
||||||
|
|
||||||
|
.. rst-class:: numbered-step
|
||||||
|
|
||||||
Update kernel image and modules of pre-launched VM
|
Update kernel image and modules of pre-launched VM
|
||||||
**************************************************
|
**************************************************
|
||||||
#. On your development workstation, clone the ACRN kernel source tree, and
|
#. On your development workstation, clone the ACRN kernel source tree, and
|
||||||
@ -97,6 +103,8 @@ Update kernel image and modules of pre-launched VM
|
|||||||
|
|
||||||
$ sudo cp <path-to-kernel-image-built-in-step1>/bzImage /boot/
|
$ sudo cp <path-to-kernel-image-built-in-step1>/bzImage /boot/
|
||||||
|
|
||||||
|
.. rst-class:: numbered-step
|
||||||
|
|
||||||
Update ACRN hypervisor image
|
Update ACRN hypervisor image
|
||||||
****************************
|
****************************
|
||||||
|
|
||||||
@ -137,13 +145,13 @@ Update ACRN hypervisor image
|
|||||||
Refer to :ref:`getting-started-building` to set up the ACRN build
|
Refer to :ref:`getting-started-building` to set up the ACRN build
|
||||||
environment on your development workstation.
|
environment on your development workstation.
|
||||||
|
|
||||||
Clone the ACRN source code and check out to the tag v2.1:
|
Clone the ACRN source code and check out to the tag v2.3:
|
||||||
|
|
||||||
.. code-block:: none
|
.. code-block:: none
|
||||||
|
|
||||||
$ git clone https://github.com/projectacrn/acrn-hypervisor.git
|
$ git clone https://github.com/projectacrn/acrn-hypervisor.git
|
||||||
$ cd acrn-hypervisor
|
$ cd acrn-hypervisor
|
||||||
$ git checkout v2.1
|
$ git checkout v2.3
|
||||||
|
|
||||||
Build the ACRN hypervisor and ACPI binaries for pre-launched VMs with default xmls:
|
Build the ACRN hypervisor and ACPI binaries for pre-launched VMs with default xmls:
|
||||||
|
|
||||||
@ -179,6 +187,8 @@ Update ACRN hypervisor image
|
|||||||
#. Copy the ``acrn.bin``, ``ACPI_VM0.bin``, and ``ACPI_VM1.bin`` from the removable disk to ``/boot``
|
#. Copy the ``acrn.bin``, ``ACPI_VM0.bin``, and ``ACPI_VM1.bin`` from the removable disk to ``/boot``
|
||||||
directory.
|
directory.
|
||||||
|
|
||||||
|
.. rst-class:: numbered-step
|
||||||
|
|
||||||
Update Ubuntu GRUB to boot hypervisor and load kernel image
|
Update Ubuntu GRUB to boot hypervisor and load kernel image
|
||||||
***********************************************************
|
***********************************************************
|
||||||
|
|
||||||
@ -237,8 +247,10 @@ Update Ubuntu GRUB to boot hypervisor and load kernel image
|
|||||||
the Intel NUC's display. The GRUB loader will boot the hypervisor, and the
|
the Intel NUC's display. The GRUB loader will boot the hypervisor, and the
|
||||||
hypervisor will automatically start the two pre-launched VMs.
|
hypervisor will automatically start the two pre-launched VMs.
|
||||||
|
|
||||||
Logical partition scenario startup checking
|
.. rst-class:: numbered-step
|
||||||
*******************************************
|
|
||||||
|
Logical partition scenario startup check
|
||||||
|
****************************************
|
||||||
|
|
||||||
#. Use these steps to verify that the hypervisor is properly running:
|
#. Use these steps to verify that the hypervisor is properly running:
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@ Run VxWorks as the User VM
|
|||||||
|
|
||||||
`VxWorks`_\* is a real-time proprietary OS designed for use in embedded systems requiring real-time, deterministic
|
`VxWorks`_\* is a real-time proprietary OS designed for use in embedded systems requiring real-time, deterministic
|
||||||
performance. This tutorial describes how to run VxWorks as the User VM on the ACRN hypervisor
|
performance. This tutorial describes how to run VxWorks as the User VM on the ACRN hypervisor
|
||||||
based on Clear Linux 29970 (ACRN tag v1.1).
|
based on Ubuntu Service VM (ACRN tag v2.0).
|
||||||
|
|
||||||
.. note:: You'll need to be a Wind River* customer and have purchased VxWorks to follow this tutorial.
|
.. note:: You'll need to be a Wind River* customer and have purchased VxWorks to follow this tutorial.
|
||||||
|
|
||||||
@ -92,10 +92,8 @@ Steps for Using VxWorks as User VM
|
|||||||
|
|
||||||
You now have a virtual disk image with bootable VxWorks in ``VxWorks.img``.
|
You now have a virtual disk image with bootable VxWorks in ``VxWorks.img``.
|
||||||
|
|
||||||
#. Follow XXX to boot the ACRN Service VM.
|
#. Follow :ref:`install-ubuntu-Service VM-NVMe` to boot the ACRN Service VM.
|
||||||
|
|
||||||
.. important:: need instructions from deleted document (using SDC
|
|
||||||
mode on the Intel NUC)
|
|
||||||
|
|
||||||
#. Boot VxWorks as User VM.
|
#. Boot VxWorks as User VM.
|
||||||
|
|
||||||
|
@ -92,11 +92,9 @@ Steps for Using Zephyr as User VM
|
|||||||
the ACRN Service VM, then you will need to transfer this image to the
|
the ACRN Service VM, then you will need to transfer this image to the
|
||||||
ACRN Service VM (via, e.g, a USB drive or network)
|
ACRN Service VM (via, e.g, a USB drive or network)
|
||||||
|
|
||||||
#. Follow XXX to boot "The ACRN Service OS" based on Clear Linux OS 28620
|
#. Follow :ref:`install-ubuntu-Service VM-NVMe`
|
||||||
(ACRN tag: acrn-2019w14.3-140000p)
|
to boot "The ACRN Service OS" based on Ubnuntu OS (ACRN tag: v2.2)
|
||||||
|
|
||||||
.. important:: need to remove reference to Clear Linux and reference
|
|
||||||
to deleted document (use SDC mode on the Intel NUC)
|
|
||||||
|
|
||||||
#. Boot Zephyr as User VM
|
#. Boot Zephyr as User VM
|
||||||
|
|
||||||
|
@ -48,6 +48,8 @@ Console enable list
|
|||||||
| | (vUART enable) | (vUART enable) | RTVM | |
|
| | (vUART enable) | (vUART enable) | RTVM | |
|
||||||
+-----------------+-----------------------+--------------------+----------------+----------------+
|
+-----------------+-----------------------+--------------------+----------------+----------------+
|
||||||
|
|
||||||
|
.. _how-to-configure-a-console-port:
|
||||||
|
|
||||||
How to configure a console port
|
How to configure a console port
|
||||||
===============================
|
===============================
|
||||||
|
|
||||||
@ -71,6 +73,8 @@ Example:
|
|||||||
.irq = COM1_IRQ,
|
.irq = COM1_IRQ,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
.. _how-to-configure-a-communication-port:
|
||||||
|
|
||||||
How to configure a communication port
|
How to configure a communication port
|
||||||
=====================================
|
=====================================
|
||||||
|
|
||||||
@ -139,7 +143,7 @@ Test the communication port
|
|||||||
===========================
|
===========================
|
||||||
|
|
||||||
After you have configured the communication port in hypervisor, you can
|
After you have configured the communication port in hypervisor, you can
|
||||||
access the corresponding port. For example, in Clear Linux:
|
access the corresponding port. For example, in Linux OS:
|
||||||
|
|
||||||
1. With ``echo`` and ``cat``
|
1. With ``echo`` and ``cat``
|
||||||
|
|
||||||
@ -214,3 +218,191 @@ started, as shown in the diagram below:
|
|||||||
the hypervisor is not sufficient. Currently, we recommend that you use
|
the hypervisor is not sufficient. Currently, we recommend that you use
|
||||||
the configuration in the figure 3 data flow. This may be refined in the
|
the configuration in the figure 3 data flow. This may be refined in the
|
||||||
future.
|
future.
|
||||||
|
|
||||||
|
Use PCI-vUART
|
||||||
|
#############
|
||||||
|
|
||||||
|
PCI Interface of ACRN vUART
|
||||||
|
===========================
|
||||||
|
|
||||||
|
When you set :ref:`vuart[0] and vuart[1] <vuart_config>`, the ACRN
|
||||||
|
hypervisor emulates virtual legacy serial devices (I/O port and IRQ) for
|
||||||
|
VMs. So ``vuart[0]`` and ``vuart[1]`` are legacy vUARTs. ACRN
|
||||||
|
hypervisor can also emulate virtual PCI serial devices (BDF, MMIO
|
||||||
|
registers and MSIX capability). These virtual PCI serial devices are
|
||||||
|
called PCI-vUART, and have an advantage in device enumeration for the
|
||||||
|
guest OS. It is easy to add new PCI-vUART ports to a VM.
|
||||||
|
|
||||||
|
.. _index-of-vuart:
|
||||||
|
|
||||||
|
Index of vUART
|
||||||
|
==============
|
||||||
|
|
||||||
|
ACRN hypervisor supports PCI-vUARTs and legacy vUARTs as ACRN vUARTs.
|
||||||
|
Each vUART port has its own ``vuart_idx``. ACRN hypervisor supports up
|
||||||
|
to 8 vUART for each VM, from ``vuart_idx=0`` to ``vuart_idx=7``.
|
||||||
|
Suppose we use vUART0 for a port with ``vuart_idx=0``, vUART1 for
|
||||||
|
``vuart_idx=1``, and so on.
|
||||||
|
|
||||||
|
Please pay attention to these points:
|
||||||
|
|
||||||
|
* vUART0 is the console port, vUART1-vUART7 are inter-VM communication ports.
|
||||||
|
* Each communication port must set the connection to another communication vUART port of another VM.
|
||||||
|
* When legacy ``vuart[0]`` is available, it is vUART0. A PCI-vUART can't
|
||||||
|
be vUART0 unless ``vuart[0]`` is not set.
|
||||||
|
* When legacy ``vuart[1]`` is available, it is vUART1. PCI-vUART can't
|
||||||
|
be vUART1 unless ``vuart[1]`` is not set.
|
||||||
|
|
||||||
|
Setup ACRN vUART Using Configuration Tools
|
||||||
|
==========================================
|
||||||
|
|
||||||
|
When you set up ACRN VM configurations with PCI-vUART, it is better to
|
||||||
|
use the ACRN configuration tools because of all the PCI resources required: BDF number,
|
||||||
|
address and size of mmio registers, and address and size of MSIX entry
|
||||||
|
tables. These settings can't conflict with another PCI device. Furthermore,
|
||||||
|
whether PCI-vUART can use ``vuart_idx=0`` and ``vuart_idx=1`` depends on legacy
|
||||||
|
vUART settings. Configuration tools will override your settings in
|
||||||
|
:ref:`How to Configure a Console Port <how-to-configure-a-console-port>`
|
||||||
|
and :ref:`How to Configure a Communication Port
|
||||||
|
<how-to-configure-a-communication-port>`.
|
||||||
|
|
||||||
|
You can configure both Legacy vUART and PCI-vUART in
|
||||||
|
``./misc/vm_configs/xmls/config-xmls/<board>/<scenario>.xml``. For
|
||||||
|
example, if VM0 has a legacy vUART0 and a PCI-vUART1, VM1 has no legacy
|
||||||
|
vUART but has a PCI-vUART0 and a PCI-vUART1, VM0's PCI-vUART1 and VM1's
|
||||||
|
PCI-vUART1 are connected to each other. You should configure then like this:
|
||||||
|
|
||||||
|
.. code-block:: none
|
||||||
|
|
||||||
|
<vm id="0">
|
||||||
|
<legacy_vuart id="0">
|
||||||
|
<type>VUART_LEGACY_PIO</type> /* vuart[0] is console port */
|
||||||
|
<base>COM1_BASE</base> /* vuart[0] is used */
|
||||||
|
<irq>COM1_IRQ</irq>
|
||||||
|
</legacy_vuart>
|
||||||
|
<legacy_vuart id="1">
|
||||||
|
<type>VUART_LEGACY_PIO</type>
|
||||||
|
<base>INVALID_COM_BASE</base> /* vuart[1] is not used */
|
||||||
|
</legacy_vuart>
|
||||||
|
<console_vuart id="0">
|
||||||
|
<base>INVALID_PCI_BASE</base> /* PCI-vUART0 can't be used, because vuart[0] */
|
||||||
|
</console_vuart>
|
||||||
|
<communication_vuart id="1">
|
||||||
|
<base>PCI_VUART</base> /* PCI-vUART1 is communication port, connect to vUART1 of VM1 */
|
||||||
|
<target_vm_id>1</target_vm_id>
|
||||||
|
<target_uart_id>1</target_uart_id>
|
||||||
|
</communication_vuart>
|
||||||
|
</vm>
|
||||||
|
|
||||||
|
<vm id="1">
|
||||||
|
<legacy_vuart id="0">
|
||||||
|
<type>VUART_LEGACY_PIO</type>
|
||||||
|
<base>INVALID_COM_BASE</base> /* vuart[0] is not used */
|
||||||
|
</legacy_vuart>
|
||||||
|
<legacy_vuart id="1">
|
||||||
|
<type>VUART_LEGACY_PIO</type>
|
||||||
|
<base>INVALID_COM_BASE</base> /* vuart[1] is not used */
|
||||||
|
</legacy_vuart>
|
||||||
|
<console_vuart id="0">
|
||||||
|
<base>PCI_VUART</base> /* PCI-vUART0 is console port */
|
||||||
|
</console_vuart>
|
||||||
|
<communication_vuart id="1">
|
||||||
|
<base>PCI_VUART</base> /* PCI-vUART1 is communication port, connect to vUART1 of VM0 */
|
||||||
|
<target_vm_id>0</target_vm_id>
|
||||||
|
<target_uart_id>1</target_uart_id>
|
||||||
|
</communication_vuart>
|
||||||
|
</vm>
|
||||||
|
|
||||||
|
The ACRN vUART related XML fields:
|
||||||
|
|
||||||
|
- ``id`` in ``<legacy_vuart>``, value of ``vuart_idx``, ``id=0`` is for
|
||||||
|
legacy ``vuart[0]`` configuration, ``id=1`` is for ``vuart[1]``.
|
||||||
|
- ``type`` in ``<legacy_vuart>``, type is always ``VUART_LEGACY_PIO``
|
||||||
|
for legacy vUART.
|
||||||
|
- ``base`` in ``<legacy_vuart>``, if use the legacy vUART port, set
|
||||||
|
COM1_BASE for ``vuart[0]``, set ``COM2_BASE`` for ``vuart[1]``.
|
||||||
|
``INVALID_COM_BASE`` means do not use the legacy vUART port.
|
||||||
|
- ``irq`` in ``<legacy_vuart>``, if you use the legacy vUART port, set
|
||||||
|
``COM1_IRQ`` for ``vuart[0]``, set ``COM2_IRQ`` for ``vuart[1]``.
|
||||||
|
- ``id`` in ``<console_vuart>`` and ``<communication_vuart>``,
|
||||||
|
``vuart_idx`` for PCI-vUART
|
||||||
|
- ``base`` in ``<console_vuart>`` and ``<communication_vuart>``,
|
||||||
|
``PCI_VUART`` means use this PCI-vUART, ``INVALID_PCI_BASE`` means do
|
||||||
|
not use this PCI-VUART.
|
||||||
|
- ``target_vm_id`` and ``target_uart_id``, connection settings for this
|
||||||
|
vUART port.
|
||||||
|
|
||||||
|
Run the command to build ACRN with this XML configuration file::
|
||||||
|
|
||||||
|
make BOARD_FILE=$PWD/misc/acrn-config/xmls/board-xmls/<board>.xml \
|
||||||
|
SCENARIO_FILE=$PWD/misc/acrn-config/xmls/config-xmls/<board>/<scenario>.xml
|
||||||
|
|
||||||
|
The configuration tools will test your settings, and check :ref:`vUART
|
||||||
|
Rules <index-of-vuart>` for compilation issue. After compiling, you can find
|
||||||
|
``./misc/vm_configs/scenarios/<scenario>/<board>/pci_dev.c`` has been
|
||||||
|
changed by the configuration tools based on the XML settings, something like:
|
||||||
|
|
||||||
|
.. code-block:: none
|
||||||
|
|
||||||
|
struct acrn_vm_pci_dev_config vm0_pci_devs[] = {
|
||||||
|
{
|
||||||
|
.emu_type = PCI_DEV_TYPE_HVEMUL,
|
||||||
|
.vbdf.bits = {.b = 0x00U, .d = 0x05U, .f = 0x00U},
|
||||||
|
.vdev_ops = &vmcs9900_ops,
|
||||||
|
.vbar_base[0] = 0x80003000,
|
||||||
|
.vbar_base[1] = 0x80004000,
|
||||||
|
.vuart_idx = 1, /* PCI-vUART1 of VM0 */
|
||||||
|
.t_vuart.vm_id = 1U, /* connected to VM1's vUART1 */
|
||||||
|
.t_vuart.vuart_id = 1U,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
This struct shows a PCI-vUART with ``vuart_idx=1``, ``BDF 00:05.0``, its
|
||||||
|
a PCI-vUART1 of
|
||||||
|
VM0, and it is connected to VM1's vUART1 port. When VM0 wants to communicate
|
||||||
|
with VM1, it can use ``/dev/ttyS*``, the character device file of
|
||||||
|
VM0's PCI-vUART1. Usually, legacy ``vuart[0]`` is ``ttyS0`` in VM, and
|
||||||
|
``vuart[1]`` is ``ttyS1``. So we hope PCI-vUART0 is ``ttyS0``,
|
||||||
|
PCI-VUART1 is ``ttyS1`` and so on through
|
||||||
|
PCI-vUART7 is ``ttyS7``, but that is not true. We can use BDF to identify
|
||||||
|
PCI-vUART in VM.
|
||||||
|
|
||||||
|
If you run ``dmesg | grep tty`` at a VM shell, you may see:
|
||||||
|
|
||||||
|
.. code-block:: none
|
||||||
|
|
||||||
|
[ 1.276891] 0000:00:05.0: ttyS4 at MMIO 0xa1414000 (irq = 124, base_baud = 115200) is a 16550A
|
||||||
|
|
||||||
|
We know for VM0 guest OS, ``ttyS4`` has BDF 00:05.0 and is PCI-vUART1.
|
||||||
|
VM0 can communicate with VM1 by reading from or writing to ``/dev/ttyS4``.
|
||||||
|
|
||||||
|
If VM0 and VM1 are pre-launched VMs, or Service VM, ACRN hypervisor will
|
||||||
|
create PCI-vUART virtual devices automatically. For post-launched VMs,
|
||||||
|
created by ``acrn-dm``, an additional ``acrn-dm`` option is needed
|
||||||
|
to create a PCI-vUART virtual device:
|
||||||
|
|
||||||
|
.. code-block:: none
|
||||||
|
|
||||||
|
-s <slot>,uart,vuart_idx:<val>
|
||||||
|
|
||||||
|
Kernel Config for Legacy vUART
|
||||||
|
==============================
|
||||||
|
|
||||||
|
When ACRN hypervisor passthroughs a local APIC to a VM, there is IRQ
|
||||||
|
injection issue for legacy vUART. The kernel driver must work in
|
||||||
|
polling mode to avoid the problem. The VM kernel should have these config
|
||||||
|
symbols set:
|
||||||
|
|
||||||
|
.. code-block:: none
|
||||||
|
|
||||||
|
CONFIG_SERIAL_8250_EXTENDED=y
|
||||||
|
CONFIG_SERIAL_8250_DETECT_IRQ=y
|
||||||
|
|
||||||
|
Kernel Cmdline for PCI-vUART console
|
||||||
|
====================================
|
||||||
|
|
||||||
|
When an ACRN VM does not have a legacy ``vuart[0]`` but has a
|
||||||
|
PCI-vUART0, you can use PCI-vUART0 for VM serial input/output. Check
|
||||||
|
which tty has the BDF of PCI-vUART0; usually it is not ``/dev/ttyS0``.
|
||||||
|
For example, if ``/dev/ttyS4`` is PCI-vUART0, you must set
|
||||||
|
``console=/dev/ttyS4`` in the kernel cmdline.
|
||||||
|
@ -473,6 +473,8 @@ Notes:
|
|||||||
|
|
||||||
- Make sure your GCC is 5.X. GCC 6 and above is NOT supported.
|
- Make sure your GCC is 5.X. GCC 6 and above is NOT supported.
|
||||||
|
|
||||||
|
.. _qemu_inject_boot_keys:
|
||||||
|
|
||||||
Use QEMU to inject secure boot keys into OVMF
|
Use QEMU to inject secure boot keys into OVMF
|
||||||
*********************************************
|
*********************************************
|
||||||
|
|
||||||
|
@ -55,13 +55,13 @@ Here are descriptions for each of these ``acrn-dm`` command line parameters:
|
|||||||
- ACRN implements GVT-g for graphics virtualization (aka AcrnGT). This
|
- ACRN implements GVT-g for graphics virtualization (aka AcrnGT). This
|
||||||
option allows you to set some of its parameters.
|
option allows you to set some of its parameters.
|
||||||
|
|
||||||
GVT_args format: ``gvt_high_gm_sz gvt_low_gm_sz gvt_fence_sz``
|
GVT_args format: ``low_gm_sz high_gm_sz fence_sz``
|
||||||
|
|
||||||
Where:
|
Where:
|
||||||
|
|
||||||
- ``gvt_high_gm_sz``: GVT-g aperture size, unit is MB
|
- ``low_gm_sz``: GVT-g aperture size, unit is MB
|
||||||
- ``gvt_low_gm_sz``: GVT-g hidden gfx memory size, unit is MB
|
- ``high_gm_sz``: GVT-g hidden gfx memory size, unit is MB
|
||||||
- ``gvt_fence_sz``: the number of fence registers
|
- ``fence_sz``: the number of fence registers
|
||||||
|
|
||||||
Example::
|
Example::
|
||||||
|
|
||||||
|
@ -54,8 +54,8 @@ container::
|
|||||||
|
|
||||||
# acrnctl add launch_uos.sh -C
|
# acrnctl add launch_uos.sh -C
|
||||||
|
|
||||||
.. note:: You can download an `example launch_uos.sh script
|
.. note:: You can download an :acrn_raw:`example launch_uos.sh script
|
||||||
<https://raw.githubusercontent.com/projectacrn/acrn-hypervisor/master/devicemodel/samples/nuc/launch_uos.sh>`_
|
<devicemodel/samples/nuc/launch_uos.sh>`
|
||||||
that supports the ``-C`` (``run_container`` function) option.
|
that supports the ``-C`` (``run_container`` function) option.
|
||||||
|
|
||||||
Note that the launch script must only launch one User VM instance.
|
Note that the launch script must only launch one User VM instance.
|
||||||
|
@ -179,7 +179,6 @@ This time when you boot your target system you'll see some new options:
|
|||||||
Advanced options for Ubuntu
|
Advanced options for Ubuntu
|
||||||
System setup
|
System setup
|
||||||
*ACRN multiboot2
|
*ACRN multiboot2
|
||||||
ACRN efi
|
|
||||||
|
|
||||||
If your target system has a serial port active, you can simply hit
|
If your target system has a serial port active, you can simply hit
|
||||||
:kbd:`return` (or wait for the timeout) to boot with this
|
:kbd:`return` (or wait for the timeout) to boot with this
|
||||||
|
Loading…
Reference in New Issue
Block a user