ACRN: misc: Unify terminology for sos/uos rin macro

Rename SOS_VM_NUM to SERVICE_VM_NUM.
rename SOS_SOCKET_PORT to SERVICE_VM_SOCKET_PORT.
rename PROCESS_RUN_IN_SOS to PROCESS_RUN_IN_SERVICE_VM.
rename PCI_DEV_TYPE_SOSEMUL to PCI_DEV_TYPE_SERVICE_VM_EMUL.
rename SHUTDOWN_REQ_FROM_SOS to SHUTDOWN_REQ_FROM_SERVICE_VM.
rename PROCESS_RUN_IN_SOS to PROCESS_RUN_IN_SERVICE_VM.
rename SHUTDOWN_REQ_FROM_UOS to SHUTDOWN_REQ_FROM_USER_VM.
rename UOS_SOCKET_PORT to USER_VM_SOCKET_PORT.
rename SOS_CONSOLE to SERVICE_VM_OS_CONSOLE.
rename SOS_LCS_SOCK to SERVICE_VM_LCS_SOCK.
rename SOS_VM_BOOTARGS to SERVICE_VM_OS_BOOTARGS.
rename SOS_ROOTFS to SERVICE_VM_ROOTFS.
rename SOS_IDLE to SERVICE_VM_IDLE.
rename SEVERITY_SOS to SEVERITY_SERVICE_VM.
rename SOS_VM_UUID to SERVICE_VM_UUID.
rename SOS_REQ to SERVICE_VM_REQ.
rename RTCT_NATIVE_FILE_PATH_IN_SOS to RTCT_NATIVE_FILE_PATH_IN_SERVICE_VM.
rename CBC_REQ_T_UOS_ACTIVE to CBC_REQ_T_USER_VM_ACTIVE.
rename CBC_REQ_T_UOS_INACTIVE to CBC_REQ_T_USER_VM_INACTIV.
rename uos_active to user_vm_active.

Tracked-On: #6744
Signed-off-by: Liu Long <long.liu@linux.intel.com>
Reviewed-by: Geoffroy Van Cutsem <geoffroy.vancutsem@intel.com>
This commit is contained in:
Liu Long 2021-10-29 15:06:51 +08:00 committed by wenlingz
parent e9c4ced460
commit 14c6e21efa
50 changed files with 231 additions and 226 deletions

View File

@ -67,8 +67,8 @@ static int
vm_suspend_handler(void *arg)
{
/*
* Invoke vm_stop_handler directly in here since suspend of UOS is
* set by UOS power button setting.
* Invoke vm_stop_handler directly in here since suspend of User VM is
* set by User VM power button setting.
*/
return vm_stop_handler(arg);
}
@ -211,7 +211,7 @@ power_button_init(struct vmctx *ctx)
}
/*
* Suspend or shutdown UOS by acrnctl suspend and
* Suspend or shutdown User VM by acrnctl suspend and
* stop command.
*/
if (monitor_run == false) {

View File

@ -491,7 +491,7 @@ static int read_sys_info(const char *sys_path)
return pages;
}
/* check if enough free huge pages for the UOS */
/* check if enough free huge pages for the User VM */
static bool hugetlb_check_memgap(void)
{
int lvl, free_pages, need_pages;
@ -590,7 +590,7 @@ static bool release_larger_freepage(int level_limit)
* other info:
*. even enough free memory, it is eaiser to reserve smaller pages than
* lager ones, for example:2MB easier than 1GB. One flow of current solution:
*.it could leave SOS very small free memory.
*.it could leave Service VM very small free memory.
*.return value: true: success; false: failure
*/
static bool hugetlb_reserve_pages(void)

View File

@ -411,7 +411,7 @@ handle_vmexit(struct vmctx *ctx, struct acrn_io_request *io_req, int vcpu)
(*handler[exitcode])(ctx, io_req, &vcpu);
/* We cannot notify the HSM/hypervisor on the request completion at this
* point if the UOS is in suspend or system reset mode, as the VM is
* point if the User VM is in suspend or system reset mode, as the VM is
* still not paused and a notification can kick off the vcpu to run
* again. Postpone the notification till vm_system_reset() or
* vm_suspend_resume() for resetting the ioreq states in the HSM and
@ -617,7 +617,7 @@ vm_system_reset(struct vmctx *ctx)
* 1. pause VM
* 2. flush and clear ioreqs
* 3. reset virtual devices
* 4. load software for UOS
* 4. load software for User VM
* 5. hypercall reset vm
* 6. reset suspend mode to VM_SUSPEND_NONE
*/
@ -635,7 +635,7 @@ vm_system_reset(struct vmctx *ctx)
* When handling emergency mode triggered by one vcpu without
* offlining any other vcpus, there can be multiple IO requests
* with various states. We should be careful on potential races
* when resetting especially in SMP SOS. vm_clear_ioreq can be used
* when resetting especially in SMP Service VM. vm_clear_ioreq can be used
* to clear all ioreq status in HSM after VM pause, then let VM
* reset in hypervisor reset all ioreqs.
*/
@ -835,7 +835,7 @@ main(int argc, char *argv[])
/*
* Ignore SIGPIPE signal and handle the error directly when write()
* function fails. this will help us to catch the write failure rather
* than crashing the UOS.
* than crashing the User VM.
*/
if (signal(SIGPIPE, SIG_IGN) == SIG_ERR)
fprintf(stderr, "cannot register handler for SIGPIPE\n");

View File

@ -127,7 +127,7 @@ static void *intr_storm_monitor_thread(void *arg)
* calc the delta of the two times count of interrupt;
* compare the IRQ num first, if not same just drop it,
* for it just happens rarelly when devices dynamically
* allocation in SOS or UOS, it can be calc next time
* allocation in Service VM or User VM, it can be calc next time
*/
for (i = 0; i < hdr->buf_cnt; i += 2) {
if (hdr->buffer[i] != intr_cnt_buf[i])
@ -194,7 +194,7 @@ static void stop_intr_storm_monitor(void)
}
/*
.* interrupt monitor setting params, current interrupt mitigation will delay UOS's
.* interrupt monitor setting params, current interrupt mitigation will delay User VM's
.* pass-through devices' interrupt injection, the settings input from acrn-dm:
.* params:
.* threshold: each intr count/second when intr storm happens;

View File

@ -8,7 +8,7 @@
*
*/
/* vuart can be used communication between SOS and UOS, here it is used as power manager control. */
/* vuart can be used communication between Service VM and User VM, here it is used as power manager control. */
#include <stdio.h>
#include <stdlib.h>
@ -33,7 +33,7 @@
#define SHUTDOWN_CMD "shutdown"
#define CMD_LEN 16
#define MAX_NODE_PATH 128
#define SOS_SOCKET_PORT 0x2000
#define SERVICE_VM_SOCKET_PORT 0x2000
static const char * const node_name[] = {
"pty",
@ -98,7 +98,7 @@ static int pm_setup_socket(void)
memset(&socket_addr, 0, sizeof(struct sockaddr_in));
socket_addr.sin_family = AF_INET;
socket_addr.sin_port = htons(SOS_SOCKET_PORT);
socket_addr.sin_port = htons(SERVICE_VM_SOCKET_PORT);
socket_addr.sin_addr.s_addr = inet_addr("127.0.0.1");
if (connect(socket_fd, (struct sockaddr *)&socket_addr, sizeof(socket_addr)) == -1) {
@ -132,7 +132,7 @@ static void *pm_monitor_loop(void *arg)
if (FD_ISSET(node_fd, &read_fd)) {
if (read_bytes(node_fd, (uint8_t *)buf_node, CMD_LEN,
&count_node, &eof)) {
pr_info("Received msg[%s] from UOS, count=%d\r\n",
pr_info("Received msg[%s] from User VM, count=%d\r\n",
buf_node, count_node);
rc = write(socket_fd, buf_node, count_node);
@ -147,7 +147,7 @@ static void *pm_monitor_loop(void *arg)
if (FD_ISSET(socket_fd, &read_fd)) {
if (read_bytes(socket_fd, (uint8_t *)buf_socket, CMD_LEN,
&count_socket, &eof)) {
pr_info("Received msg[%s] from life_mngr on SOS, count=%d\r\n",
pr_info("Received msg[%s] from life_mngr on Service VM, count=%d\r\n",
buf_socket, count_socket);
pthread_mutex_lock(&pm_vuart_lock);
rc = write(node_fd, buf_socket, count_socket);
@ -196,8 +196,8 @@ static int start_pm_monitor_thread(void)
/*
* --pm_vuart configuration is in the following 2 forms:
* A: pty-link, like: pty,/run/acrn/vuart-vm1, (also set it in -l com2,/run/acrn/vuart-vm1)
* the SOS and UOS will communicate by: SOS:pty-link-node <--> SOS:com2 <--> UOS: /dev/ttyS1
* B: tty-node, like: tty,/dev/ttyS1, SOS and UOS communicate by: SOS:ttyS1 <--> HV <-->UOS:ttySn
* the Service VM and User VM will communicate by: (Service VM):pty-link-node <--> (Service VM):com2 <--> (User VM): /dev/ttyS1
* B: tty-node, like: tty,/dev/ttyS1, (Service VM) and (User VM) communicate by: (Service VM):ttyS1 <--> HV <-->(User VM):ttySn
*/
int parse_pm_by_vuart(const char *opts)
{

View File

@ -37,7 +37,7 @@
#define SETUP_SIG 0x5a5aaa55
/* If we load kernel/ramdisk/bootargs directly, the UOS
/* If we load kernel/ramdisk/bootargs directly, the User VM
* memory layout will be like:
*
* | ... |

View File

@ -37,7 +37,7 @@
#include "log.h"
/* If the vsbl is loaded by DM, the UOS memory layout will be like:
/* If the vsbl is loaded by DM, the User VM memory layout will be like:
*
* | ... |
* +--------------------------------------------------+

View File

@ -407,13 +407,13 @@ vm_unsetup_memory(struct vmctx *ctx)
/*
* For security reason, clean the VM's memory region
* to avoid secret information leaking in below case:
* After a UOS is destroyed, the memory will be reclaimed,
* then if the new UOS starts, that memory region may be
* allocated the new UOS, the previous UOS sensitive data
* may be leaked to the new UOS if the memory is not cleared.
* After a User VM is destroyed, the memory will be reclaimed,
* then if the new User VM starts, that memory region may be
* allocated the new User VM, the previous User VM sensitive data
* may be leaked to the new User VM if the memory is not cleared.
*
* For rtvm, we can't clean VM's memory as RTVM may still
* run. But we need to return the memory to SOS here.
* run. But we need to return the memory to Service VM here.
* Otherwise, VM can't be restart again.
*/

View File

@ -313,8 +313,8 @@ pci_ivshmem_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
rc = create_ivshmem_from_hv(ctx, dev, name, size);
} else {
/*
* TODO: If UOS reprograms ivshmem BAR2, the shared memory will be
* unavailable for UOS, so we need to remap GPA and HPA of shared
* TODO: If User VM reprograms ivshmem BAR2, the shared memory will be
* unavailable for User VM, so we need to remap GPA and HPA of shared
* memory in this case.
*/
rc = create_ivshmem_from_dm(ctx, dev, name, size);

View File

@ -6,7 +6,7 @@
/*
* The Intel Trace Hub (aka. North Peak, NPK) is a trace aggregator for
* Software, Firmware, and Hardware. On the virtualization platform, it
* can be used to output the traces from SOS/UOS/Hypervisor/FW together
* can be used to output the traces from Service VM/User VM/Hypervisor/FW together
* with unified timestamps.
*
* There are 2 software visible MMIO space in the npk pci device. One is
@ -39,50 +39,50 @@
*
* CSR and STMR are treated differently in npk virtualization because:
* 1. CSR configuration should come from just one OS, instead of each OS.
* In our case, it should come from SOS.
* In our case, it should come from Service VM.
* 2. For performance and timing concern, the traces from each OS should
* be written to STMR directly.
*
* Based on these, the npk virtualization is implemented in this way:
* 1. The physical CSR is owned by SOS, and dm/npk emulates a software
* one for the UOS, to keep the npk driver on UOS unchanged. Some CSR
* initial values are configured to make the UOS npk driver think it
* is working on a real npk. The CSR configuration from UOS is ignored
* 1. The physical CSR is owned by Service VM, and dm/npk emulates a software
* one for the User VM, to keep the npk driver on User VM unchanged. Some CSR
* initial values are configured to make the User VM npk driver think it
* is working on a real npk. The CSR configuration from User VM is ignored
* by dm, and it will not bring any side-effect. Because traces are the
* only things needed from UOS, the location to send traces to and the
* only things needed from User VM, the location to send traces to and the
* trace format are not affected by the CSR configuration.
* 2. Part of the physical STMR will be reserved for the SOS, and the
* others will be passed through to the UOS, so that the UOS can write
* 2. Part of the physical STMR will be reserved for the Service VM, and the
* others will be passed through to the User VM, so that the User VM can write
* the traces to the MMIO space directly.
*
* A parameter is needed to indicate the offset and size of the Masters
* to pass through to the UOS. For example, "-s 0:2,npk,512/256", there
* to pass through to the User VM. For example, "-s 0:2,npk,512/256", there
* are 256 Masters from #768 (256+512, #256 is the starting Master for
* software tracing) passed through to the UOS.
* software tracing) passed through to the User VM.
*
* CSR STMR
* SOS: +--------------+ +----------------------------------+
* | physical CSR | | Reserved for SOS | |
* Service VM: +--------------+ +----------------------------------+
* | physical CSR | | Reserved for Service VM | |
* +--------------+ +----------------------------------+
* UOS: +--------------+ +---------------+
* | sw CSR by dm | | mapped to UOS |
* +--------------+ +---------------+
* User VM: +--------------+ +------------------+
* | sw CSR by dm | | mapped to User VM|
* +--------------+ +------------------+
*
* Here is an overall flow about how it works.
* 1. System boots up, and the npk driver on SOS is loaded.
* 1. System boots up, and the npk driver on Service VM is loaded.
* 2. The dm is launched with parameters to enable npk virtualization.
* 3. The dm/npk sets up a bar for CSR, and some values are initialized
* based on the parameters, for example, the total number of Masters for
* the UOS.
* the User VM.
* 4. The dm/npk sets up a bar for STMR, and maps part of the physical
* STMR to it with an offset, according to the parameters.
* 5. The UOS boots up, and the native npk driver on the UOS is loaded.
* 6. Enable the traces from UOS, and the traces are written directly to
* 5. The User VM boots up, and the native npk driver on the User VM is loaded.
* 6. Enable the traces from User VM, and the traces are written directly to
* STMR, but not output by npk for now.
* 7. Enable the npk output on SOS, and now the traces are output by npk
* 7. Enable the npk output on Service VM, and now the traces are output by npk
* to the selected target.
* 8. If the memory is the selected target, the traces can be retrieved
* from memory on SOS, after stopping the traces.
* from memory on Service VM, after stopping the traces.
*/
#include <stdio.h>
@ -173,7 +173,7 @@ static inline int valid_param(uint32_t m_off, uint32_t m_num)
/*
* Set up a bar for CSR, and some values are initialized based on the
* parameters, for example, the total number of Masters for the UOS.
* parameters, for example, the total number of Masters for the User VM.
* Set up a bar for STMR, and map part of the physical STMR to it with
* an offset, according to the parameters.
*/
@ -204,12 +204,13 @@ static int pci_npk_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
* v v v v
* +--------------------+--------------------+-------------------+
* | | | |
* | Reserved for SOS | Mapped for UOS#x | |
* |Reserved for | Mapped for | |
* | Service VM | User VM#x | |
* | | | |
* +--------------------+--------------------+-------------------+
* ^ ^
* | |
* +--sw_bar for host +--sw_bar for UOS#x
* +--sw_bar for host +--sw_bar for User VM#x
*/
/* get the host offset and the number for this guest */
@ -285,7 +286,7 @@ static int pci_npk_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
}
/*
* map this part of STMR to the guest so that the traces from UOS are
* map this part of STMR to the guest so that the traces from User VM are
* written directly to it.
*/
error = vm_map_ptdev_mmio(ctx, dev->bus, dev->slot, dev->func,
@ -300,7 +301,7 @@ static int pci_npk_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
d = &regs_default_val[i];
npk_csr[d->csr].data.u32[d->offset >> 2] = d->default_val;
}
/* setup the SW Master Start/Stop and Channels per Master for UOS */
/* setup the SW Master Start/Stop and Channels per Master for User VM */
npk_sth_reg32(NPK_CSR_STHCAP0) = NPK_SW_MSTR_STRT |
((m_num + NPK_SW_MSTR_STRT - 1) << 16);
npk_sth_reg32(NPK_CSR_STHCAP1) = ((NPK_SW_MSTR_STRT - 1) << 24) |
@ -325,7 +326,7 @@ static void pci_npk_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
npk_in_use = 0;
}
/* the CSR configuration from UOS will not take effect on the physical NPK */
/* the CSR configuration from User VM will not take effect on the physical NPK */
static void pci_npk_write(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int baridx, uint64_t offset, int size, uint64_t value)
{

View File

@ -318,10 +318,10 @@ cfginit(struct vmctx *ctx, struct passthru_dev *ptdev, int bus,
irq_type = ACRN_PTDEV_IRQ_INTX;
}
/* If SOS kernel provides 'reset' entry in sysfs, related dev has some
/* If Service VM kernel provides 'reset' entry in sysfs, related dev has some
* reset capability, e.g. FLR, or secondary bus reset. We do 2 things:
* - reset each dev before passthrough to achieve valid dev state after
* UOS reboot
* User VM reboot
* - refuse to passthrough PCIe dev without any reset capability
*/
if (ptdev->need_reset) {

View File

@ -26,14 +26,15 @@
* | | +------------------+
* | | |
* +------------------+ | +------------------+
* | CoreU SOS Daemon | | | CoreU UOS Daemon |
* | CoreU Service VM | | | CoreU User VM |
* | Daemon | | | Daemon |
* +------------------+ | +------------------+
* |
* Service OS User Space | User OS User Space
* Service VM User Space | User VM User Space
* |
* -------------------------- | ---------------------------
* |
* Service OS Kernel Space | User OS Kernel Space
* Service VM Kernel Space | User VM Kernel Space
* |
* | +------------------+
* | | CoreU Frontend |
@ -41,12 +42,12 @@
* | |
* +-------------+
*
* Above diagram illustrates the CoreU architecture in ACRN. In SOS, CoreU
* daemon starts upon the system boots. In UOS, CoreU daemon gets the PAVP
* Above diagram illustrates the CoreU architecture in ACRN. In Service VM, CoreU
* daemon starts upon the system boots. In User VM, CoreU daemon gets the PAVP
* session status by open/read/write /dev/coreu0 which is created by CoreU
* frontend, instead of accessing GPU. Then the CoreU frontend sends the
* requests to the CoreU backend thru virtio mechanism. CoreU backend talks to
* CoreU SOS daemon to get the PAVP session status.
* CoreU Service VM daemon to get the PAVP session status.
*
*/
@ -324,7 +325,7 @@ virtio_coreu_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
* connect to coreu daemon in init phase
*
* @FIXME if failed connecting to CoreU daemon, the return value should
* be set appropriately for SOS not exposing the CoreU PCI device to UOS
* be set appropriately for Service VM not exposing the CoreU PCI device to User VM
*/
vcoreu->fd = connect_coreu_daemon();
if (vcoreu->fd < 0) {

View File

@ -39,17 +39,17 @@
* +---------+ | | |
* v v v |
* +----------------+ +-----+ +----------------+ | +---------------+
* -+ /dev/gpiochip0 +---+ ... +---+ /dev/gpiochipN +-----+ UOS +-
* -+ /dev/gpiochip0 +---+ ... +---+ /dev/gpiochipN +-----+ User VM +-
* + + + + + + | +/dev/gpiochip0 +
* +------------+---+ +--+--+ +-------------+--+ | +------+--------+
* Kernel space | +--------------+ | | |
* +--------------------+ | | | |
* v v v | v
* +---------------------+ +---------------------+ | +-------------+
* | | | | | |UOS Virtio |
* +---------------------+ +---------------------+ | +--------------+
* | | | | | |User VM Virtio|
* | pinctrl subsystem |<---+ gpiolib subsystem | +->+GPIO Driver |
* | | | | | |
* +--------+------------+ +----------+----------+ +-------------+
* +--------+------------+ +----------+----------+ +--------------+
* | +------------------------+
* | |
* ----------|---|----------------------------------------------------------
@ -65,7 +65,7 @@
/*
* GPIO IRQ virtualization architecture
*
* SOS UOS
* Service VM User VM
* +-------------------------------+
* | virtio GPIO mediator |
* | +-------------------------+ |
@ -85,10 +85,10 @@
* | gpiolib framework| | | |IRQ consumer| |IRQ consumer|
* +------------------+ | | +------------+ +------------+
* | | +----------------------------+
* | | | UOS gpiolib framework |
* | | | User VM gpiolib framework|
* | | +----------------------------+
* | | +----------------------+
* | +-+ UOS virtio GPIO |
* | +-+ User VM virtio GPIO|
* +--->| IRQ chip |
* +----------------------+
*/
@ -640,8 +640,8 @@ virtio_gpio_proc(struct virtio_gpio *gpio, struct iovec *iov, int n)
/*
* if the user provides the name of gpios in the
* command line paremeter, then provide it to UOS,
* otherwise provide the physical name of gpio to UOS.
* command line paremeter, then provide it to User VM,
* otherwise provide the physical name of gpio to User VM.
*/
if (strnlen(line->vname, sizeof(line->vname)))
strncpy(data[i].name, line->vname,

View File

@ -20,14 +20,15 @@
* | | +------------------+
* | | |
* +------------------+ | +------------------+
* | HDCP SOS Daemon | | | HDCP UOS Daemon |
* | HDCP Service VM | | | HDCP User VM |
* | Daemon | | | Daemon |
* +------------------+ | +------------------+
* |
* Service OS User Space | User OS User Space
* Service VM User Space | User VM User Space
* |
* -------------------------- | ---------------------------
* |
* Service OS Kernel Space | User OS Kernel Space
* Service VM Kernel Space | User VM Kernel Space
* |
* +------------------+ | +------------------+
* | i915 HDCP Driver | | | HDCP Front End |
@ -35,12 +36,12 @@
* | |
* +-------------+
*
* Above diagram illustrates the HDCP architecture in ACRN. In SOS, HDCP
* library being used by media app. In UOS, HDCP Daemon gets the HDCP
* Above diagram illustrates the HDCP architecture in ACRN. In Service VM, HDCP
* library being used by media app. In User VM, HDCP Daemon gets the HDCP
* request by open/read/write /dev/hdcp0 which is created by HDCP
* frontend, instead of accessing GPU. Then the HDCP frontend sends the
* requests to the HDCP backend thru virtio mechanism. HDCP backend talks to
* HDCP SOS daemon that will ask HDCP Kernel Driver to execute the requsted
* HDCP Service VM daemon that will ask HDCP Kernel Driver to execute the requsted
* operation.
*
*/
@ -453,7 +454,7 @@ virtio_hdcp_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
* connect to hdcp daemon in init phase
*
* @FIXME if failed connecting to HDCP daemon, the return value should
* be set appropriately for SOS not exposing the HDCP PCI device to UOS
* be set appropriately for Service VM not exposing the HDCP PCI device to User VM
*/
vhdcp->fd = connect_hdcp_daemon();
if (vhdcp->fd < 0) {

View File

@ -40,12 +40,12 @@
* User space +-------+ | +-----------+ |
* v v v |
* +---------+----+ +-----+--------+ +-----+------+ | +-----------+
* ---+ /dev/i2c-0 +--+ /dev/i2c-1 +--+ /dev/i2c-n +----+--+UOS: |
* ---+ /dev/i2c-0 +--+ /dev/i2c-1 +--+ /dev/i2c-n +----+--+User VM: |
* | | | | | | |/dev/i2c-n |
* +----------+---+ +-------+------+ +-----+------+ | +-----+-----+
* Kernel space v v v | v
* +-----+-------+ +----+--------+ +----+--------+ | +-----+------------+
* |i2c adapter 0| |i2c adapter 1| |i2c adapter n| +->|UOS: |
* |i2c adapter 0| |i2c adapter 1| |i2c adapter n| +->|User VM: |
* | | | | | |virtio i2c adapter|
* +-----+-------+ +-------------+ +-------------+ +------------------+
* --------------+-----------------------------------------

View File

@ -1086,7 +1086,7 @@ vmei_hbm_disconnect_client(struct vmei_host_client *hclient)
disconnect_req.me_addr = hclient->me_addr;
disconnect_req.host_addr = hclient->host_addr;
HCL_DBG(hclient, "DM->UOS: Disconnect Client\n");
HCL_DBG(hclient, "DM->User_VM: Disconnect Client\n");
return vmei_hbm_response(vmei, &disconnect_req, sizeof(disconnect_req));
}
@ -1115,7 +1115,7 @@ vmei_hbm_flow_ctl_req(struct vmei_host_client *hclient)
flow_ctl_req.me_addr = hclient->me_addr;
flow_ctl_req.host_addr = hclient->host_addr;
HCL_DBG(hclient, "DM->UOS: Flow Control\n");
HCL_DBG(hclient, "DM->User_VM: Flow Control\n");
return vmei_hbm_response(vmei, &flow_ctl_req, sizeof(flow_ctl_req));
}
@ -1535,9 +1535,9 @@ vmei_proc_tx(struct virtio_mei *vmei, struct virtio_vq_info *vq)
data_len = iov[1].iov_len;
DPRINTF("TX: UOS->DM, hdr[h=%02d me=%02d comp=%1d] length[%d]\n",
DPRINTF("TX: User_VM->DM, hdr[h=%02d me=%02d comp=%1d] length[%d]\n",
hdr->host_addr, hdr->me_addr, hdr->msg_complete, hdr->length);
vmei_dbg_print_hex("TX: UOS->DM", data, data_len);
vmei_dbg_print_hex("TX: User_VM->DM", data, data_len);
if (hdr->length < data_len) {
pr_err("%s: supplied buffer has invalid size");
@ -1783,7 +1783,7 @@ vmei_rx_callback(int fd, enum ev_type type, void *param)
if (hclient->recv_offset) {
/* still has data in recv_buf, wait guest reading */
HCL_DBG(hclient, "data in recv_buf, wait for UOS reading.\n");
HCL_DBG(hclient, "data in recv_buf, wait for User VM reading.\n");
goto out;
}
@ -1833,7 +1833,7 @@ vmei_proc_vclient_rx(struct vmei_host_client *hclient,
}
len = hclient->recv_offset - hclient->recv_handled;
HCL_DBG(hclient, "RX: DM->UOS: off=%d len=%d\n",
HCL_DBG(hclient, "RX: DM->User_VM: off=%d len=%d\n",
hclient->recv_handled, len);
buf_len = VMEI_BUF_SZ - sizeof(*hdr);
@ -1849,7 +1849,7 @@ vmei_proc_vclient_rx(struct vmei_host_client *hclient,
memcpy(buf, hclient->recv_buf + hclient->recv_handled, len);
hclient->recv_handled += len;
HCL_DBG(hclient, "RX: complete = %d DM->UOS:off=%d len=%d\n",
HCL_DBG(hclient, "RX: complete = %d DM->User_VM:off=%d len=%d\n",
complete, hclient->recv_handled, len);
len += sizeof(struct mei_msg_hdr);
@ -1866,12 +1866,12 @@ vmei_proc_vclient_rx(struct vmei_host_client *hclient,
}
/**
* vmei_proc_rx() process rx UOS
* vmei_proc_rx() process rx User VM
* @vmei: virtio mei device
* @vq: virtio queue
*
* Function looks for client with pending buffer and sends
* it to the UOS.
* it to the User VM.
*
* Locking: Must run under rx mutex
* Return:

View File

@ -29,9 +29,9 @@
* SUCH DAMAGE.
*
* Create virtio rpmb backend VBS-U. This component will work with RPMB FE
* driver to provide one communication channel between UOS and SOS.
* driver to provide one communication channel between User VM and Service VM.
* The message from RPMB daemon in Android will be transferred over the
* channel and finally arrived RPMB physical driver on SOS kernel.
* channel and finally arrived RPMB physical driver on Service VM kernel.
*
*/
@ -75,7 +75,7 @@ struct virtio_rpmb {
struct virtio_vq_info vq;
pthread_mutex_t mtx;
/*
* Different UOS (with vmid) will access physical rpmb area
* Different User VM (with vmid) will access physical rpmb area
* with different offsets.
*/
int vmid;

View File

@ -43,9 +43,11 @@
* | | +---------------+ | | | | app |
* | +---------|-----------+ | | +----------------------+
* +------------|--------------+ | echo H or D |
* | SOS USER SPACE | | UOS USER SPACE
* | Service VM | | v User VM
* | User Space | | | User Space
* -------------|--------------------|-------------|-----------------
* v SOS KERNEL SPACE | v UOS KERNEL SPACE
* v SERVICE VM | v User VM
* | Kernel Space | | Kernel Space
* +------------------------------+ | +--------------------------+
* | native drd sysfs interface | | |native drd sysfs interface|
* +------------------------------+ | +--------------------------+

View File

@ -1102,8 +1102,8 @@ static struct {
*/
int create_and_inject_vrtct(struct vmctx *ctx)
{
#define RTCT_NATIVE_FILE_PATH_IN_SOS "/sys/firmware/acpi/tables/PTCT"
#define RTCT_V2_NATIVE_FILE_PATH_IN_SOS "/sys/firmware/acpi/tables/RTCT"
#define RTCT_NATIVE_FILE_PATH_IN_SERVICE_VM "/sys/firmware/acpi/tables/PTCT"
#define RTCT_V2_NATIVE_FILE_PATH_IN_SERVICE_VM "/sys/firmware/acpi/tables/RTCT"
#define RTCT_BUF_LEN 0x200 /* Otherwise, need to modify DSDT_OFFSET corresponding */
@ -1122,9 +1122,9 @@ int create_and_inject_vrtct(struct vmctx *ctx)
};
/* Name of native RTCT table is "PTCT"(v1) or "RTCT"(v2) */
native_rtct_fd = open(RTCT_NATIVE_FILE_PATH_IN_SOS, O_RDONLY);
native_rtct_fd = open(RTCT_NATIVE_FILE_PATH_IN_SERVICE_VM, O_RDONLY);
if (native_rtct_fd < 0) {
native_rtct_fd = open(RTCT_V2_NATIVE_FILE_PATH_IN_SOS, O_RDONLY);
native_rtct_fd = open(RTCT_V2_NATIVE_FILE_PATH_IN_SERVICE_VM, O_RDONLY);
if (native_rtct_fd < 0) {
pr_err("RTCT file is NOT detected.\n");
return -1;

View File

@ -98,8 +98,8 @@ static char virtual_uart_path[32 + MAX_VMNAME_LEN];
* Need to send open channel command to CBC signal char device before receive
* signal data.
* NOTE: Only send open channel command, no need to send close channel since
* close channel command would deactivate the signal channel for all UOS, so
* there will be a SOS service to deactivate signal channel in the future.
* close channel command would deactivate the signal channel for all User VM, so
* there will be a Service VM service to deactivate signal channel in the future.
*/
static uint8_t cbc_open_channel_command[] = {0xFD, 0x00, 0x00, 0x00};
@ -119,9 +119,9 @@ static int dummy2_sfd = -1;
/*
* VM Manager interfaces description.
*
* +---------+ +---------+ +---------+
* |IOC | VM stop |VM | |SOS |
* |Mediator |<----------------+Manager | |Lifecycle|
* +---------+ +---------+ +----------+
* |IOC | VM stop |VM | |Service VM|
* |Mediator |<----------------+Manager | |Lifecycle |
* | | | | | |
* | | VM suspend | | | |
* | |<----------------+ | | |
@ -131,11 +131,11 @@ static int dummy2_sfd = -1;
* | |get_wakeup_reason| |get wakeup reason| |
* | |for resume flow | |via unix socket | |
* | +---------------->| +---------------->| |
* +---------+ +---------+ +---------+
* +---------+ +---------+ +----------+
*
* Only support stop/resume/suspend in IOC mediator currently.
* For resume request, IOC mediator will get the wakeup reason from SOS
* lifecycle service, then pass to UOS once received HB INIT from UOS.
* For resume request, IOC mediator will get the wakeup reason from Service VM
* lifecycle service, then pass to User VM once received HB INIT from User VM.
* For stop and suspend requests, they are implemented as wakeup reason of
* ignition button.
*/
@ -166,11 +166,11 @@ static struct monitor_vm_ops vm_ops = {
* +-----------+ +-----------+
*
* INIT state: The state is IOC mediator initialized IOC state, all of CBC
* protocol packats are handler normally. In this state, UOS has
* protocol packats are handler normally. In this state, User VM has
* not yet sent active heartbeat.
*
* ACTIVE state: Enter this state if HB ACTIVE event is triggered that indicates
* UOS state has been active and need to set the bit 23(SoC active
* User VM state has been active and need to set the bit 23(SoC active
* bit) in the wakeup reason.
*
* SUSPENDING state: Enter this state if RAM REFRESH event or HB INACTIVE event
@ -898,8 +898,8 @@ send_tx_request(struct ioc_dev *ioc, enum cbc_request_type type)
static int
process_hb_active_event(struct ioc_dev *ioc)
{
/* Enable wakeup reason bit 23 that indicating UOS is active */
return send_tx_request(ioc, CBC_REQ_T_UOS_ACTIVE);
/* Enable wakeup reason bit 23 that indicating User VM is active */
return send_tx_request(ioc, CBC_REQ_T_USER_VM_ACTIVE);
}
/*
@ -917,7 +917,7 @@ process_ram_refresh_event(struct ioc_dev *ioc)
* Tx handler sents shutdown wakeup reason,
* Then enter suspended state.
*/
rc = send_tx_request(ioc, CBC_REQ_T_UOS_INACTIVE);
rc = send_tx_request(ioc, CBC_REQ_T_USER_VM_INACTIVE);
/*
* TODO: set suspend to PM DM
@ -941,7 +941,7 @@ process_hb_inactive_event(struct ioc_dev *ioc)
* Tx sents shutdown wakeup reason,
* Then enter shutdown state.
*/
rc = send_tx_request(ioc, CBC_REQ_T_UOS_INACTIVE);
rc = send_tx_request(ioc, CBC_REQ_T_USER_VM_INACTIVE);
/*
* TODO: set shutdown to PM DM
@ -1010,9 +1010,9 @@ process_resume_event(struct ioc_dev *ioc)
}
/*
* The signal channel is inactive after SOS resumed, need to send
* The signal channel is inactive after Service VM resumed, need to send
* open channel command again to activate the signal channel.
* And it would not impact to UOS itself enter/exit S3.
* And it would not impact to User VM itself enter/exit S3.
*/
if (ioc_ch_xmit(IOC_NATIVE_SIGNAL, cbc_open_channel_command,
sizeof(cbc_open_channel_command)) <= 0)

View File

@ -590,7 +590,7 @@ cbc_update_heartbeat(struct cbc_pkt *pkt, uint8_t cmd, uint8_t sus_action)
}
/*
* Update wakeup reason value and notify UOS immediately.
* Update wakeup reason value and notify User VM immediately.
* Some events can change the wakeup reason include periodic wakeup reason
* from IOC firmware, IOC boot/resume reason, heartbeat state changing.
*/
@ -621,7 +621,7 @@ cbc_update_wakeup_reason(struct cbc_pkt *pkt, uint32_t reason)
}
/*
* CBC wakeup reason processing is main entry for Tx(IOC->UOS) lifecycle
* CBC wakeup reason processing is main entry for Tx(IOC->User VM) lifecycle
* service.
*/
static void
@ -640,20 +640,20 @@ cbc_process_wakeup_reason(struct cbc_pkt *pkt)
reason = payload[0] | (payload[1] << 8) | (payload[2] << 16);
/*
* Save the reason for UOS status switching from inactive to active,
* Save the reason for User VM status switching from inactive to active,
* since need to send a wakeup reason immediatly after the switching.
*/
pkt->reason = reason;
if (pkt->uos_active) {
if (pkt->user_vm_active) {
reason |= CBC_WK_RSN_SOC;
/* Unset RTC bit if UOS sends active heartbeat */
/* Unset RTC bit if User VM sends active heartbeat */
reason &= ~CBC_WK_RSN_RTC;
} else {
/*
* If UOS is inactive, indicate the acrnd boot reason
* as UOS periodic wakeup reason.
* If User VM is inactive, indicate the acrnd boot reason
* as User VM periodic wakeup reason.
*/
reason = pkt->ioc->boot_reason;
@ -708,7 +708,7 @@ cbc_update_rtc_timer(uint16_t value, uint8_t unit)
}
/*
* CBC heartbeat processing is main entry for Rx(UOS->IOC) lifecycle service.
* CBC heartbeat processing is main entry for Rx(User_VM->IOC) lifecycle service.
*/
static void
cbc_process_heartbeat(struct cbc_pkt *pkt)
@ -753,7 +753,7 @@ cbc_process_signal(struct cbc_pkt *pkt)
* link_len is 0 means the packet is transmitted to PTY(UART DM)
* if the signal channel is not active, do not transmit it to PTY
* to CBC cdevs, always forward the signals because signal channel
* status only for UOS
* status only for User VM
*/
if (pkt->req->link_len == 0 && is_active == false &&
(cmd == CBC_SD_SINGLE_SIGNAL ||
@ -945,22 +945,22 @@ cbc_tx_handler(struct cbc_pkt *pkt)
CBC_WK_RSN_SOC);
cbc_send_pkt(pkt);
/* Heartbeat init also indicates UOS enter active state */
pkt->uos_active = true;
} else if (pkt->req->rtype == CBC_REQ_T_UOS_ACTIVE) {
/* Heartbeat init also indicates User VM enter active state */
pkt->user_vm_active = true;
} else if (pkt->req->rtype == CBC_REQ_T_USER_VM_ACTIVE) {
cbc_update_wakeup_reason(pkt, pkt->ioc->boot_reason |
CBC_WK_RSN_SOC);
cbc_send_pkt(pkt);
/* Enable UOS active flag */
pkt->uos_active = true;
} else if (pkt->req->rtype == CBC_REQ_T_UOS_INACTIVE) {
/* Enable User VM active flag */
pkt->user_vm_active = true;
} else if (pkt->req->rtype == CBC_REQ_T_USER_VM_INACTIVE) {
cbc_update_wakeup_reason(pkt, CBC_WK_RSN_SHUTDOWN);
cbc_send_pkt(pkt);
/* Disable UOS active flag */
pkt->uos_active = false;
/* Disable User VM active flag */
pkt->user_vm_active = false;
/*
* After sending shutdown wakeup reason, then trigger shutdown

View File

@ -66,7 +66,7 @@ static uint16_t get_rpmb_blocks(void)
}
/* Common area of RPMB refers to the start area of RPMB
* shared among all UOS with RO access.
* shared among all User VM with RO access.
* It's predefined to 32KB in size which contains:
* AttKB(up to 16KB), RPMB info header (256B)
* and the remaining size for future uasge.
@ -84,7 +84,7 @@ static uint16_t get_accessible_blocks(void)
}
/* Todo: To get the uos number, e.g. No.0 or No.1, which is
used for calculating UOS RPMB range address.
used for calculating User VM RPMB range address.
But this will be removed after config file is supported.
We plan to predefine such info and save to config file.
*/

View File

@ -122,7 +122,7 @@ typedef struct {
/* This is the main data structure for tpm emulator,
* it will work with one SWTPM instance to
* provide TPM functionlity to UOS.
* provide TPM functionlity to User VM.
*
* ctrl_chan_fd: fd to communicate with SWTPM ctrl channel
* cmd_chan_fd: fd to communicate with SWTPM cmd channel

View File

@ -1055,7 +1055,7 @@ uart_release_backend(struct uart_vdev *uart, const char *opts)
/*
* By current design, for the invalid PTY parameters, the virtual uarts
* are still expose to UOS but all data be dropped by backend service.
* are still expose to User VM but all data be dropped by backend service.
* The uart backend is not setup for this case, so don't try to release
* the uart backend in here.
* TODO: need re-visit the whole policy for such scenario in future.

View File

@ -642,8 +642,8 @@ enum cbc_request_type {
CBC_REQ_T_SUSPEND, /* CBC suspend request */
CBC_REQ_T_SHUTDOWN, /* CBC shutdown request */
CBC_REQ_T_HB_INIT, /* CBC Heartbeat init request */
CBC_REQ_T_UOS_ACTIVE, /* CBC UOS active request */
CBC_REQ_T_UOS_INACTIVE /* CBC UOS inactive request */
CBC_REQ_T_USER_VM_ACTIVE, /* CBC User VM active request */
CBC_REQ_T_USER_VM_INACTIVE /* CBC User VM inactive request */
};
/*
@ -759,7 +759,7 @@ enum vm_request_type {
* CBC packet is mainly structure for CBC protocol process.
*/
struct cbc_pkt {
bool uos_active; /* Mark UOS active status */
bool user_vm_active; /* Mark User VM active status */
uint32_t reason; /* Record current wakeup reason */
struct cbc_request *req; /* CBC packet data */
struct cbc_config *cfg; /* CBC and whitelist configurations */

View File

@ -66,7 +66,7 @@ struct acrn_vm_pci_dev_config *init_one_dev_config(struct pci_pdev *pdev)
dev_config = &vm_config->pci_devs[vm_config->pci_dev_num];
if (is_hv_owned_pdev(pdev->bdf)) {
/* Service VM need to emulate the type1 pdevs owned by HV */
dev_config->emu_type = PCI_DEV_TYPE_SOSEMUL;
dev_config->emu_type = PCI_DEV_TYPE_SERVICE_VM_EMUL;
if (is_bridge(pdev)) {
dev_config->vdev_ops = &vpci_bridge_ops;
} else if (is_host_bridge(pdev)) {

View File

@ -312,7 +312,7 @@ void init_paging(void)
pgtable_modify_or_del_map((uint64_t *)ppt_mmu_pml4_addr, round_pde_down(hv_hva),
round_pde_up((uint64_t)&ld_text_end) - round_pde_down(hv_hva), 0UL,
PAGE_NX, &ppt_pgtable, MR_MODIFY);
#if (SOS_VM_NUM == 1)
#if (SERVICE_VM_NUM == 1)
pgtable_modify_or_del_map((uint64_t *)ppt_mmu_pml4_addr, (uint64_t)get_sworld_memory_base(),
TRUSTY_RAM_SIZE * MAX_POST_VM_NUM, PAGE_USER, 0UL, &ppt_pgtable, MR_MODIFY);
#endif

View File

@ -72,7 +72,7 @@ static int32_t init_vm_kernel_info(struct acrn_vm *vm, const struct abi_module *
}
/* cmdline parsed from abi module string, for pre-launched VMs and Service VM only. */
static char mod_cmdline[PRE_VM_NUM + SOS_VM_NUM][MAX_BOOTARGS_SIZE] = { '\0' };
static char mod_cmdline[PRE_VM_NUM + SERVICE_VM_NUM][MAX_BOOTARGS_SIZE] = { '\0' };
/**
* @pre vm != NULL && abi != NULL

View File

@ -20,7 +20,7 @@
#define MAX_MOD_STRING_SIZE 2048U
/* The modules in multiboot are: Pre-launched VM: kernel/ramdisk/acpi; Service VM: kernel/ramdisk */
#define MAX_MODULE_NUM (3U * PRE_VM_NUM + 2U * SOS_VM_NUM)
#define MAX_MODULE_NUM (3U * PRE_VM_NUM + 2U * SERVICE_VM_NUM)
/* The vACPI module size is fixed to 1MB */
#define ACPI_MODULE_SIZE MEM_1M

View File

@ -71,7 +71,7 @@ bool is_hypercall_from_ring0(void)
inline static bool is_severity_pass(uint16_t target_vmid)
{
return SEVERITY_SOS >= get_vm_severity(target_vmid);
return SEVERITY_SERVICE_VM >= get_vm_severity(target_vmid);
}
/**

View File

@ -17,7 +17,7 @@
#include <asm/sgx.h>
#include <acrn_hv_defs.h>
#define CONFIG_MAX_VM_NUM (PRE_VM_NUM + SOS_VM_NUM + MAX_POST_VM_NUM)
#define CONFIG_MAX_VM_NUM (PRE_VM_NUM + SERVICE_VM_NUM + MAX_POST_VM_NUM)
#define AFFINITY_CPU(n) (1UL << (n))
#define MAX_VCPUS_PER_VM MAX_PCPU_NUM
@ -26,20 +26,20 @@
#define MAX_MOD_TAG_LEN 32U
#ifdef CONFIG_SCHED_NOOP
#define SOS_IDLE ""
#define SERVICE_VM_IDLE ""
#else
#define SOS_IDLE "idle=halt "
#define SERVICE_VM_IDLE "idle=halt "
#endif
#define PCI_DEV_TYPE_PTDEV (1U << 0U)
#define PCI_DEV_TYPE_HVEMUL (1U << 1U)
#define PCI_DEV_TYPE_SOSEMUL (1U << 2U)
#define PCI_DEV_TYPE_SERVICE_VM_EMUL (1U << 2U)
#define MAX_MMIO_DEV_NUM 2U
#define CONFIG_SOS_VM .load_order = SOS_VM, \
.uuid = SOS_VM_UUID, \
.severity = SEVERITY_SOS
.uuid = SERVICE_VM_UUID, \
.severity = SEVERITY_SERVICE_VM
#define CONFIG_SAFETY_VM(idx) .load_order = PRE_LAUNCHED_VM, \
.uuid = SAFETY_VM_UUID##idx, \
@ -69,7 +69,7 @@
enum acrn_vm_severity {
SEVERITY_SAFETY_VM = 0x40U,
SEVERITY_RTVM = 0x30U,
SEVERITY_SOS = 0x20U,
SEVERITY_SERVICE_VM = 0x20U,
SEVERITY_STANDARD_VM = 0x10U,
};

View File

@ -8,7 +8,7 @@
#define VM_UUIDS_H
/* dbbbd434-7a57-4216-a12c-2201f1ab0240 */
#define SOS_VM_UUID {0xdbU, 0xbbU, 0xd4U, 0x34U, 0x7aU, 0x57U, 0x42U, 0x16U, \
#define SERVICE_VM_UUID {0xdbU, 0xbbU, 0xd4U, 0x34U, 0x7aU, 0x57U, 0x42U, 0x16U, \
0xa1U, 0x2cU, 0x22U, 0x01U, 0xf1U, 0xabU, 0x02U, 0x40U}
/* fc836901-8685-4bc0-8b71-6e31dc36fa47 */

View File

@ -144,7 +144,7 @@
<kern_type>KERNEL_BZIMAGE</kern_type>
<kern_mod>Linux_bzImage</kern_mod>
<ramdisk_mod/>
<bootargs>SOS_VM_BOOTARGS</bootargs>
<bootargs>SERVICE_VM_OS_BOOTARGS</bootargs>
</os_config>
<cpu_affinity>
<pcpu_id>0</pcpu_id>

View File

@ -77,7 +77,7 @@
<kern_type>KERNEL_BZIMAGE</kern_type>
<kern_mod>Linux_bzImage</kern_mod>
<ramdisk_mod/>
<bootargs>SOS_VM_BOOTARGS</bootargs>
<bootargs>SERVICE_VM_OS_BOOTARGS</bootargs>
</os_config>
<legacy_vuart id="0">
<type>VUART_LEGACY_PIO</type>

View File

@ -142,7 +142,7 @@
<kern_type>KERNEL_BZIMAGE</kern_type>
<kern_mod>Linux_bzImage</kern_mod>
<ramdisk_mod></ramdisk_mod>
<bootargs>SOS_VM_BOOTARGS</bootargs>
<bootargs>SERVICE_VM_OS_BOOTARGS</bootargs>
</os_config>
<legacy_vuart id="0">
<type>VUART_LEGACY_PIO</type>

View File

@ -78,7 +78,7 @@
<kern_type>KERNEL_BZIMAGE</kern_type>
<kern_mod>Linux_bzImage</kern_mod>
<ramdisk_mod></ramdisk_mod>
<bootargs>SOS_VM_BOOTARGS</bootargs>
<bootargs>SERVICE_VM_OS_BOOTARGS</bootargs>
</os_config>
<legacy_vuart id="0">
<type>VUART_LEGACY_PIO</type>

View File

@ -76,7 +76,7 @@
<kern_type>KERNEL_BZIMAGE</kern_type>
<kern_mod>Linux_bzImage</kern_mod>
<ramdisk_mod/>
<bootargs>SOS_VM_BOOTARGS</bootargs>
<bootargs>SERVICE_VM_OS_BOOTARGS</bootargs>
</os_config>
<legacy_vuart id="0">
<type>VUART_LEGACY_PIO</type>

View File

@ -138,7 +138,7 @@
<kern_type>KERNEL_BZIMAGE</kern_type>
<kern_mod>Linux_bzImage</kern_mod>
<ramdisk_mod/>
<bootargs>SOS_VM_BOOTARGS</bootargs>
<bootargs>SERVICE_VM_OS_BOOTARGS</bootargs>
</os_config>
<legacy_vuart id="0">
<type>VUART_LEGACY_PIO</type>

View File

@ -143,7 +143,7 @@
<kern_type>KERNEL_BZIMAGE</kern_type>
<kern_mod>Linux_bzImage</kern_mod>
<ramdisk_mod/>
<bootargs>SOS_VM_BOOTARGS</bootargs>
<bootargs>SERVICE_VM_OS_BOOTARGS</bootargs>
</os_config>
<legacy_vuart id="0">
<type>VUART_LEGACY_PIO</type>

View File

@ -75,7 +75,7 @@
<kern_type>KERNEL_BZIMAGE</kern_type>
<kern_mod>Linux_bzImage</kern_mod>
<ramdisk_mod/>
<bootargs>SOS_VM_BOOTARGS</bootargs>
<bootargs>SERVICE_VM_OS_BOOTARGS</bootargs>
</os_config>
<legacy_vuart id="0">
<type>VUART_LEGACY_PIO</type>

View File

@ -76,7 +76,7 @@
<kern_type>KERNEL_BZIMAGE</kern_type>
<kern_mod>Linux_bzImage</kern_mod>
<ramdisk_mod/>
<bootargs>SOS_VM_BOOTARGS</bootargs>
<bootargs>SERVICE_VM_OS_BOOTARGS</bootargs>
</os_config>
<legacy_vuart id="0">
<type>VUART_LEGACY_PIO</type>

View File

@ -57,7 +57,7 @@ UUID_DB = {
}
VM_DB = {
'SOS_VM':{'load_type':'SOS_VM', 'severity':'SEVERITY_SOS', 'uuid':UUID_DB['SOS_VM']},
'SOS_VM':{'load_type':'SOS_VM', 'severity':'SEVERITY_SERVICE_VM', 'uuid':UUID_DB['SOS_VM']},
'SAFETY_VM':{'load_type':'PRE_LAUNCHED_VM', 'severity':'SEVERITY_SAFETY_VM', 'uuid':UUID_DB['SAFETY_VM']},
'PRE_RT_VM':{'load_type':'PRE_LAUNCHED_VM', 'severity':'SEVERITY_RTVM', 'uuid':UUID_DB['PRE_RT_VM']},
'PRE_STD_VM':{'load_type':'PRE_LAUNCHED_VM', 'severity':'SEVERITY_STANDARD_VM', 'uuid':UUID_DB['PRE_STD_VM']},
@ -501,9 +501,9 @@ def os_kern_args_check(id_kern_args_dic, prime_item, item):
if vm_i not in id_kern_args_dic.keys():
continue
kern_args = id_kern_args_dic[vm_i]
if "SOS_" in vm_type and kern_args != "SOS_VM_BOOTARGS":
if "SOS_" in vm_type and kern_args != "SERVICE_VM_OS_BOOTARGS":
key = "vm:id={},{},{}".format(vm_i, prime_item, item)
ERR_LIST[key] = "VM os config kernel service os should be SOS_VM_BOOTARGS"
ERR_LIST[key] = "VM os config kernel service os should be SERVICE_VM_OS_BOOTARGS"
def os_kern_load_addr_check(kern_type, id_kern_load_addr_dic, prime_item, item):

View File

@ -26,14 +26,14 @@ def gen_common_header(config):
def scenario_vm_num(scenario_items, config):
print("", file=config)
print("/* SOS_VM_NUM can only be 0U or 1U;", file=config)
print(" * When SOS_VM_NUM is 0U, MAX_POST_VM_NUM must be 0U too;", file=config)
print("/* SERVICE_VM_NUM can only be 0U or 1U;", file=config)
print(" * When SERVICE_VM_NUM is 0U, MAX_POST_VM_NUM must be 0U too;", file=config)
print(" * MAX_POST_VM_NUM must be bigger than CONFIG_MAX_KATA_VM_NUM;", file=config)
print(" */", file=config)
load_type_cnt = scenario_items['vm'].load_order_cnt
print("#define PRE_VM_NUM\t\t\t{}U".format(load_type_cnt.pre_vm), file=config)
print("#define SOS_VM_NUM\t\t\t{}U".format(load_type_cnt.sos_vm), file=config)
print("#define SERVICE_VM_NUM\t\t\t{}U".format(load_type_cnt.sos_vm), file=config)
print("#define MAX_POST_VM_NUM\t\t\t{}U".format(load_type_cnt.post_vm), file=config)
print("#define CONFIG_MAX_KATA_VM_NUM\t\t{}U".format(scenario_cfg_lib.KATA_VM_COUNT), file=config)
@ -70,9 +70,9 @@ def gen_sos_header(scenario_items, config):
if vm_type == 'SOS_VM':
print("/* SOS_VM == VM{0} */".format(vm_i), file=config)
print("#define SOS_VM_BOOTARGS\t\t\tSOS_ROOTFS\t\\", file=config)
print("\t\t\t\t\tSOS_CONSOLE\t\\", file=config)
print("\t\t\t\t\tSOS_IDLE\t\\", file=config)
print("#define SERVICE_VM_OS_BOOTARGS\t\t\tSERVICE_VM_ROOTFS\t\\", file=config)
print("\t\t\t\t\tSERVICE_VM_OS_CONSOLE\t\\", file=config)
print("\t\t\t\t\tSERVICE_VM_IDLE\t\\", file=config)
print("\t\t\t\t\tSOS_BOOTARGS_DIFF", file=config)
print("", file=config)

View File

@ -66,7 +66,7 @@
</xsl:template>
<xsl:template name="sos_rootfs">
<xsl:value-of select="acrn:define('SOS_ROOTFS', concat($quot, 'root=', vm/board_private/rootfs[text()], ' ', $quot), '')" />
<xsl:value-of select="acrn:define('SERVICE_VM_ROOTFS', concat($quot, 'root=', vm/board_private/rootfs[text()], ' ', $quot), '')" />
</xsl:template>
<xsl:template name="sos_serial_console">
@ -84,7 +84,7 @@
</xsl:if>
</xsl:if>
</xsl:variable>
<xsl:value-of select="acrn:define('SOS_CONSOLE', $sos_console, '')" />
<xsl:value-of select="acrn:define('SERVICE_VM_OS_CONSOLE', $sos_console, '')" />
</xsl:template>
<xsl:template name="sos_bootargs_diff">

View File

@ -212,7 +212,7 @@
<xsl:if test="normalize-space(bootargs)">
<xsl:choose>
<xsl:when test="acrn:is-sos-vm(../vm_type)">
<xsl:value-of select="acrn:initializer('bootargs', 'SOS_VM_BOOTARGS')" />
<xsl:value-of select="acrn:initializer('bootargs', 'SERVICE_VM_OS_BOOTARGS')" />
</xsl:when>
<xsl:when test="acrn:is-pre-launched-vm(../vm_type)">
<xsl:value-of select="acrn:initializer('bootargs', concat('VM', ../@id, '_BOOT_ARGS'))" />

View File

@ -37,10 +37,10 @@
</xsl:template>
<xsl:template name ="vm_count">
<xsl:value-of select="acrn:comment('SOS_VM_NUM can only be 0U or 1U; When SOS_VM_NUM is 0U, MAX_POST_VM_NUM must be 0U too; MAX_POST_VM_NUM must be bigger than CONFIG_MAX_KATA_VM_NUM.')" />
<xsl:value-of select="acrn:comment('SERVICE_VM_NUM can only be 0U or 1U; When SERVICE_VM_NUM is 0U, MAX_POST_VM_NUM must be 0U too; MAX_POST_VM_NUM must be bigger than CONFIG_MAX_KATA_VM_NUM.')" />
<xsl:value-of select="$newline" />
<xsl:value-of select="acrn:define('PRE_VM_NUM', count(vm[acrn:is-pre-launched-vm(vm_type)]), 'U')" />
<xsl:value-of select="acrn:define('SOS_VM_NUM', count(vm[acrn:is-sos-vm(vm_type)]), 'U')" />
<xsl:value-of select="acrn:define('SERVICE_VM_NUM', count(vm[acrn:is-sos-vm(vm_type)]), 'U')" />
<xsl:value-of select="acrn:define('MAX_POST_VM_NUM', count(vm[acrn:is-post-launched-vm(vm_type)]), 'U')" />
<xsl:value-of select="acrn:define('CONFIG_MAX_KATA_VM_NUM', count(vm[acrn:is-kata-vm(vm_type)]), 'U')" />
</xsl:template>
@ -62,7 +62,7 @@
<xsl:if test="count(vm[vm_type='SOS_VM'])">
<xsl:value-of select="acrn:comment(concat('SOS_VM == VM', vm[vm_type='SOS_VM']/@id))" />
<xsl:value-of select="$newline" />
<xsl:value-of select="acrn:define('SOS_VM_BOOTARGS', 'SOS_ROOTFS SOS_CONSOLE SOS_IDLE SOS_BOOTARGS_DIFF', '')" />
<xsl:value-of select="acrn:define('SERVICE_VM_OS_BOOTARGS', 'SERVICE_VM_ROOTFS SERVICE_VM_OS_CONSOLE SERVICE_VM_IDLE SOS_BOOTARGS_DIFF', '')" />
</xsl:if>
</xsl:template>

View File

@ -129,8 +129,8 @@ bool sanitize_vm_config(void)
} else if (is_safety_vm_uuid(vm_config->uuid) && (vm_config->severity != (uint8_t)SEVERITY_SAFETY_VM)) {
ret = false;
} else {
#if (SOS_VM_NUM == 1U)
if (vm_config->severity <= SEVERITY_SOS) {
#if (SERVICE_VM_NUM == 1U)
if (vm_config->severity <= SEVERITY_SERVICE_VM) {
/* If there are both SOS and Pre-launched VM, make sure pre-launched VM has higher severity than SOS */
printf("%s: pre-launched vm doesn't has higher severity than SOS \n", __func__);
ret = false;
@ -141,7 +141,7 @@ bool sanitize_vm_config(void)
case SOS_VM:
break;
case POST_LAUNCHED_VM:
if ((vm_config->severity == (uint8_t)SEVERITY_SAFETY_VM) || (vm_config->severity == (uint8_t)SEVERITY_SOS)) {
if ((vm_config->severity == (uint8_t)SEVERITY_SAFETY_VM) || (vm_config->severity == (uint8_t)SEVERITY_SERVICE_VM)) {
ret = false;
}
break;

View File

@ -22,7 +22,7 @@
#include "ioc.h"
#define ACRND_NAME "acrnd"
#define SOS_LCS_SOCK "sos-lcs"
#define SERVICE_VM_LCS_SOCK "service-vm-lcs"
#define HW_IOC_PATH "/dev/cbc-early-signals"
#define VMS_STOP_TIMEOUT 20U /* Time to wait VMs to stop */
#define SOCK_TIMEOUT 2U
@ -321,10 +321,10 @@ unsigned get_sos_wakeup_reason(void)
struct mngr_msg req;
struct mngr_msg ack;
client_fd = mngr_open_un(SOS_LCS_SOCK, MNGR_CLIENT);
client_fd = mngr_open_un(SERVICE_VM_LCS_SOCK, MNGR_CLIENT);
if (client_fd <= 0) {
fprintf(stderr, "Failed to open the socket(%s) to query the "
"reason for the wake-up", SOS_LCS_SOCK);
"reason for the wake-up", SERVICE_VM_LCS_SOCK);
goto EXIT;
}
@ -383,7 +383,7 @@ static int set_sos_timer(time_t due_time)
struct mngr_msg req;
struct mngr_msg ack;
client_fd = mngr_open_un(SOS_LCS_SOCK, MNGR_CLIENT);
client_fd = mngr_open_un(SERVICE_VM_LCS_SOCK, MNGR_CLIENT);
if (client_fd <= 0) {
perror("Failed to open sock for to req wkup_reason");
ret = client_fd;
@ -513,7 +513,7 @@ static void* notify_stop_state(void *arg)
store_timer_list();
lcs_fd = mngr_open_un(SOS_LCS_SOCK, MNGR_CLIENT);
lcs_fd = mngr_open_un(SERVICE_VM_LCS_SOCK, MNGR_CLIENT);
if (lcs_fd < 0) {
fprintf(stderr, "cannot open sos-lcs.socket\n");
goto exit;

View File

@ -24,22 +24,22 @@
#define MSG_SIZE 8U
#define NODE_SIZE 3U
#define TRY_SEND_CNT 3U
#define SOS_SOCKET_PORT (0x2000U)
#define UOS_SOCKET_PORT (SOS_SOCKET_PORT + 1U)
#define SERVICE_VM_SOCKET_PORT (0x2000U)
#define USER_VM_SOCKET_PORT (SERVICE_VM_SOCKET_PORT + 1U)
/* life_mngr process run in SOS or UOS */
/* life_mngr process run in Service VM or User VM */
enum process_env {
PROCESS_UNKNOWN = 0,
PROCESS_RUN_IN_SOS,
PROCESS_RUN_IN_UOS,
PROCESS_RUN_IN_SERVICE_VM,
PROCESS_RUN_IN_USER_VM,
};
/* Enumerated shutdown state machine only for UOS thread */
/* Enumerated shutdown state machine only for User VM thread */
enum shutdown_state {
SHUTDOWN_REQ_WAITING = 0, /* Can receive shutdown cmd in this state */
SHUTDOWN_ACK_WAITING, /* Wait acked message from SOS */
SHUTDOWN_REQ_FROM_SOS, /* Trigger shutdown by SOS */
SHUTDOWN_REQ_FROM_UOS, /* Trigger shutdown by UOS */
SHUTDOWN_ACK_WAITING, /* Wait acked message from Service VM */
SHUTDOWN_REQ_FROM_SERVICE_VM, /* Trigger shutdown by Service VM */
SHUTDOWN_REQ_FROM_USER_VM, /* Trigger shutdown by User VM */
};
@ -169,8 +169,8 @@ static void *sos_socket_thread(void *arg)
int flags, nfds, num, ret;
char buf[BUFF_SIZE];
listen_fd = setup_socket_listen(SOS_SOCKET_PORT);
LOG_PRINTF("life_mngr:listen_fd=0x%x socket port is 0x%x\r\n", listen_fd, SOS_SOCKET_PORT);
listen_fd = setup_socket_listen(SERVICE_VM_SOCKET_PORT);
LOG_PRINTF("life_mngr:listen_fd=0x%x socket port is 0x%x\r\n", listen_fd, SERVICE_VM_SOCKET_PORT);
connect_fd = accept(listen_fd, (struct sockaddr *)&client, &len);
if (connect_fd == -1) {
@ -259,7 +259,7 @@ static void *listener_fn_to_sos(void *arg)
bool shutdown_self = false;
unsigned char buf[BUFF_SIZE];
/* UOS-server wait for message from SOS */
/* User VM server wait for message from Service VM */
do {
memset(buf, 0, sizeof(buf));
ret = receive_message(tty_dev_fd, buf, sizeof(buf));
@ -268,18 +268,18 @@ static void *listener_fn_to_sos(void *arg)
}
switch (shutdown_state) {
/* it can receive shutdown command from SOS */
/* it can receive shutdown command from Service VM */
case SHUTDOWN_REQ_WAITING:
case SHUTDOWN_REQ_FROM_SOS:
case SHUTDOWN_REQ_FROM_SERVICE_VM:
if ((ret > 0) && (strncmp(SHUTDOWN_CMD, (const char *)buf, strlen(SHUTDOWN_CMD)) == 0)) {
shutdown_state = SHUTDOWN_REQ_FROM_SOS;
shutdown_state = SHUTDOWN_REQ_FROM_SERVICE_VM;
ret = send_message(tty_dev_fd, ACK_CMD, sizeof(ACK_CMD));
if (ret != 0) {
LOG_WRITE("UOS send acked message failed!\n");
LOG_WRITE("User VM send acked message failed!\n");
} else {
shutdown_self = true;
}
LOG_WRITE("UOS start shutdown\n");
LOG_WRITE("User VM start shutdown\n");
}
break;
@ -295,7 +295,7 @@ static void *listener_fn_to_sos(void *arg)
}
retry--;
} else {
LOG_PRINTF("Cann't not receive acked message from SOS, have try %d times\r\n",
LOG_PRINTF("Cann't not receive acked message from Service VM, have try %d times\r\n",
TRY_SEND_CNT);
shutdown_state = SHUTDOWN_REQ_WAITING;
retry = TRY_SEND_CNT;
@ -333,9 +333,9 @@ static void *listener_fn_to_operator(void *arg)
int num, ret;
char buf[BUFF_SIZE];
listen_fd = setup_socket_listen(UOS_SOCKET_PORT);
listen_fd = setup_socket_listen(USER_VM_SOCKET_PORT);
LOG_PRINTF("listen_fd=0x%x socket port is 0x%x\r\n",
listen_fd, UOS_SOCKET_PORT);
listen_fd, USER_VM_SOCKET_PORT);
while (1) {
connect_fd = accept(listen_fd, (struct sockaddr *)&client, &len);
@ -364,7 +364,7 @@ static void *listener_fn_to_operator(void *arg)
}
continue;
}
shutdown_state = SHUTDOWN_REQ_FROM_UOS;
shutdown_state = SHUTDOWN_REQ_FROM_USER_VM;
/* send acked message to the caller */
LOG_WRITE("Send acked message to the caller\r\n");
ret = send_message(connect_fd, ACK_CMD, sizeof(ACK_CMD));
@ -415,7 +415,7 @@ int main(int argc, char *argv[])
}
if (strncmp("uos", argv[1], NODE_SIZE) == 0) {
env = PROCESS_RUN_IN_UOS;
env = PROCESS_RUN_IN_USER_VM;
devname_uos = argv[2];
tty_dev_fd = open(devname_uos, O_RDWR | O_NOCTTY | O_SYNC | O_NONBLOCK);
if (tty_dev_fd < 0) {
@ -431,7 +431,7 @@ int main(int argc, char *argv[])
ret = pthread_create(&uos_thread_pid_2, NULL, listener_fn_to_operator, NULL);
} else if (strncmp("sos", argv[1], NODE_SIZE) == 0) {
env = PROCESS_RUN_IN_SOS;
env = PROCESS_RUN_IN_SERVICE_VM;
ret = pthread_create(&sos_socket_pid, NULL, sos_socket_thread, NULL);
} else {
LOG_WRITE("Invalid param. Example: [./life_mngr uos /dev/ttyS1] or ./life_mngr sos /dev/ttyS1]\n");
@ -439,9 +439,9 @@ int main(int argc, char *argv[])
return -EINVAL;
}
if (env == PROCESS_RUN_IN_SOS) {
if (env == PROCESS_RUN_IN_SERVICE_VM) {
pthread_join(sos_socket_pid, NULL);
} else if (env == PROCESS_RUN_IN_UOS) {
} else if (env == PROCESS_RUN_IN_USER_VM) {
pthread_join(uos_thread_pid_1, NULL);
pthread_join(uos_thread_pid_2, NULL);
close(tty_dev_fd);

View File

@ -7,7 +7,7 @@
#include <stdio.h>
#define SOS_REQ "shutdown"
#define SERVICE_VM_REQ "shutdown"
#define UOS_ACK "acked"
#define BUFF_SIZE 16U
#define MSG_SIZE 8U
@ -69,7 +69,7 @@ int main()
continue;
}
if (strncmp(recvbuf, SOS_REQ, MSG_SIZE) == 0)
if (strncmp(recvbuf, SERVICE_VM_REQ, MSG_SIZE) == 0)
{
WriteFile(hCom, UOS_ACK, sizeof(UOS_ACK), NULL, NULL);
system("shutdown -s -t 0");