Merge pull request #159 from gvancuts/master

Merging acrn-devicemodel and acrn-documentation
This commit is contained in:
lijinxia
2018-05-11 17:00:46 +08:00
committed by GitHub
380 changed files with 58299 additions and 3 deletions

14
.gitignore vendored
View File

@@ -1,3 +1,11 @@
build/
bsp/uefi/include/bsp/version.h
bsp/sbl/include/bsp/version.h
hypervisor/build/
hypervisor/bsp/uefi/include/bsp/version.h
hypervisor/bsp/sbl/include/bsp/version.h
devicemdodel/build/
devicemodel/include/version.h
doc/doxygen
doc/_build
*.bak
*.sav
*.log
*.warnings

View File

@@ -0,0 +1,10 @@
# Build container based on Clearlinux
FROM clearlinux:base
# python-basic-dev is only there because it gives us
# the openssl/md5.h header that we need
RUN swupd bundle-add os-core-dev dev-utils-dev
WORKDIR /root/acrn
CMD ["/bin/bash"]

25
devicemodel/.travis.yml Normal file
View File

@@ -0,0 +1,25 @@
sudo: required
language: c
env:
global:
- OS_TESTED_CL="clearlinux"
- OS_TESTED_UBUNTU="ubuntu16.04"
- OS_TESTED_FEDORA="fedora26"
services:
- docker
before_install:
- docker build -t ${OS_TESTED_CL} -f .travis-dockerfiles/Dockerfile.${OS_TESTED_CL} .
- docker build -t ${OS_TESTED_UBUNTU} -f .travis-dockerfiles/Dockerfile.${OS_TESTED_UBUNTU} .
- docker build -t ${OS_TESTED_FEDORA} -f .travis-dockerfiles/Dockerfile.${OS_TESTED_FEDORA} .
- docker images
install: true
script:
- docker run -v $PWD:/root/acrn ${OS_TESTED_CL} /bin/bash -c "make clean && make"
- docker run -v $PWD:/root/acrn ${OS_TESTED_UBUNTU} /bin/bash -c "make clean && make"
- docker run -v $PWD:/root/acrn ${OS_TESTED_FEDORA} /bin/bash -c "make clean && make"

34
devicemodel/MAINTAINERS Normal file
View File

@@ -0,0 +1,34 @@
ACRN Device Model Maintainers
===========================
This file provides information about the primary maintainers for
ACRN Device Model Maintainers.
In general, you should not privately email the maintainer. You should
email the acrn-dev list, but you can also Cc the maintainer.
Descriptions of section entries:
L: Mailing list that is relevant to this area (default is acrn-dev)
Patches and questions should be sent to the email list.
M: Cc address for patches and questions (ie, the package maintainer)
W: Web-page with status/info
T: SCM tree type and location. Type is one of: git, svn.
S: Status, one of the following:
Supported: Someone is actually paid to look after this.
Maintained: Someone actually looks after it.
Odd Fixes: It has a maintainer but they don't have time to do
much other than throw the odd patch in. See below.
Orphan: No current maintainer [but maybe you could take the
role as you write your new code].
Obsolete: Old code. Something tagged obsolete generally means
it has been replaced by a better system and you
should be using that.
Maintainers List
----------------
W: https://projectacrn.org/ and https://projectacrn.github.io/
S: Supported
L: https://lists.projectacrn.org/g/acrn-dev
T: git - https://github.com/projectacrn/acrn-devicemodel.git
M: Anthony Xu <anthony.xu@intel.com>

161
devicemodel/Makefile Normal file
View File

@@ -0,0 +1,161 @@
#
# ACRN-DM
#
MAJOR_VERSION=0
MINOR_VERSION=1
RC_VERSION=4
BASEDIR := $(shell pwd)
DM_OBJDIR ?= $(CURDIR)/build
CC ?= gcc
CFLAGS := -g -O0 -std=gnu11
CFLAGS += -D_GNU_SOURCE
CFLAGS += -DNO_OPENSSL
CFLAGS += -m64
CFLAGS += -Wall -ffunction-sections
CFLAGS += -Werror
CFLAGS += -O2 -D_FORTIFY_SOURCE=2
CFLAGS += -Wformat -Wformat-security -fno-strict-aliasing
CFLAGS += -I$(BASEDIR)/include
CFLAGS += -I$(BASEDIR)/include/public
GCC_MAJOR=$(shell echo __GNUC__ | $(CC) -E -x c - | tail -n 1)
GCC_MINOR=$(shell echo __GNUC_MINOR__ | $(CC) -E -x c - | tail -n 1)
#enable stack overflow check
STACK_PROTECTOR := 1
ifdef STACK_PROTECTOR
ifeq (true, $(shell [ $(GCC_MAJOR) -gt 4 ] && echo true))
CFLAGS += -fstack-protector-strong
else
ifeq (true, $(shell [ $(GCC_MAJOR) -eq 4 ] && [ $(GCC_MINOR) -ge 9 ] && echo true))
CFLAGS += -fstack-protector-strong
else
CFLAGS += -fstack-protector
endif
endif
endif
LDFLAGS += -Wl,-z,noexecstack
LDFLAGS += -Wl,-z,relro,-z,now
LIBS = -lrt
LIBS += -lpthread
LIBS += -lcrypto
LIBS += -lpciaccess
LIBS += -lz
LIBS += -luuid
# hw
SRCS += hw/block_if.c
SRCS += hw/usb_core.c
SRCS += hw/uart_core.c
SRCS += hw/pci/virtio/virtio.c
SRCS += hw/pci/virtio/virtio_kernel.c
SRCS += hw/platform/usb_mouse.c
SRCS += hw/platform/atkbdc.c
SRCS += hw/platform/ps2mouse.c
SRCS += hw/platform/rtc.c
SRCS += hw/platform/ps2kbd.c
SRCS += hw/platform/ioapic.c
SRCS += hw/platform/cmos_io.c
SRCS += hw/platform/ioc.c
SRCS += hw/platform/ioc_cbc.c
SRCS += hw/platform/acpi/acpi.c
SRCS += hw/platform/acpi/acpi_pm.c
SRCS += hw/pci/wdt_i6300esb.c
SRCS += hw/pci/lpc.c
SRCS += hw/pci/xhci.c
SRCS += hw/pci/core.c
SRCS += hw/pci/virtio/virtio_console.c
SRCS += hw/pci/virtio/virtio_block.c
SRCS += hw/pci/virtio/virtio_input.c
SRCS += hw/pci/ahci.c
SRCS += hw/pci/hostbridge.c
SRCS += hw/pci/passthrough.c
SRCS += hw/pci/virtio/virtio_net.c
SRCS += hw/pci/virtio/virtio_rnd.c
SRCS += hw/pci/virtio/virtio_hyper_dmabuf.c
SRCS += hw/pci/virtio/virtio_heci.c
SRCS += hw/pci/irq.c
SRCS += hw/pci/uart.c
# core
#SRCS += core/bootrom.c
SRCS += core/monitor.c
SRCS += core/sw_load_common.c
SRCS += core/sw_load_bzimage.c
SRCS += core/sw_load_vsbl.c
SRCS += core/smbiostbl.c
SRCS += core/mevent.c
SRCS += core/gc.c
SRCS += core/console.c
SRCS += core/inout.c
SRCS += core/mem.c
SRCS += core/post.c
SRCS += core/consport.c
SRCS += core/vmmapi.c
SRCS += core/mptbl.c
SRCS += core/main.c
SRCS += core/hugetlb.c
# arch
SRCS += arch/x86/pm.c
OBJS := $(patsubst %.c,$(DM_OBJDIR)/%.o,$(SRCS))
HEADERS := $(shell find $(BASEDIR) -name '*.h')
DISTCLEAN_OBJS := $(shell find $(BASEDIR) -name '*.o')
PROGRAM := acrn-dm
SAMPLES := $(wildcard samples/*)
all: include/version.h $(PROGRAM)
@echo -n ""
$(PROGRAM): $(OBJS)
$(CC) -o $(DM_OBJDIR)/$@ $(CFLAGS) $(LDFLAGS) $^ $(LIBS)
clean:
rm -f $(OBJS)
rm -f include/version.h
rm -f $(OBJS)
rm -rf $(DM_OBJDIR)
if test -f $(PROGRAM); then rm $(PROGRAM); fi
distclean:
rm -f $(DISTCLEAN_OBJS)
rm -f include/version.h
rm -f $(OBJS)
rm -rf $(DM_OBJDIR)
rm -f tags TAGS cscope.files cscope.in.out cscope.out cscope.po.out GTAGS GPATH GRTAGS GSYMS
include/version.h:
touch include/version.h
@COMMIT=`git rev-parse --verify --short HEAD 2>/dev/null`;\
DIRTY=`git diff-index --name-only HEAD`;\
if [ -n "$$DIRTY" ];then PATCH="$$COMMIT-dirty";else PATCH="$$COMMIT";fi;\
TIME=`date "+%Y-%m-%d %H:%M:%S"`;\
cat license_header > include/version.h;\
echo "#define DM_MAJOR_VERSION $(MAJOR_VERSION)" >> include/version.h;\
echo "#define DM_MINOR_VERSION $(MINOR_VERSION)" >> include/version.h;\
echo "#define DM_RC_VERSION $(RC_VERSION)" >> include/version.h;\
echo "#define DM_BUILD_VERSION "\""$$PATCH"\""" >> include/version.h;\
echo "#define DM_BUILD_TIME "\""$$TIME"\""" >> include/version.h;\
echo "#define DM_BUILD_USER "\""$(USER)"\""" >> include/version.h
$(DM_OBJDIR)/%.o: %.c $(HEADERS)
[ ! -e $@ ] && mkdir -p $(dir $@); \
$(CC) $(CFLAGS) -c $< -o $@
install: $(DM_OBJDIR)/$(PROGRAM) install-samples
install -D $(DM_OBJDIR)/$(PROGRAM) $(DESTDIR)/usr/bin/$(PROGRAM)
install-samples: $(SAMPLES)
install -d $(DESTDIR)/usr/share/acrn/demo
install -t $(DESTDIR)/usr/share/acrn/demo $^

76
devicemodel/README.rst Normal file
View File

@@ -0,0 +1,76 @@
ACRN Device Model
#################
Introduction
============
The ACRN Device Model provides **device sharing** capabilities between the Service OS and Guest OSs. It is a component that is used in conjunction with the `ACRN Hypervisor`_ and this is installed within the Service OS. You can find out more about Project ACRN on the `Project ACRN documentation`_ website.
Building the Device Model
=========================
Build dependencies
******************
* For Clear Linux
.. code-block:: console
sudo swupd bundle-add os-clr-on-clr \
os-utils-gui-dev
* For CentOS
.. code-block:: console
sudo yum install gcc \
libuuid-devel \
openssl-devel \
libpciaccess-devel
* For Fedora 27
.. code-block:: console
sudo dnf install gcc \
libuuid-devel \
openssl-devel \
libpciaccess-devel
Build
*****
To build the Device Model
.. code-block:: console
make
To clean the build artefacts
.. code-block:: console
make clean
Runtime dependencies
********************
* On CentOS
.. code-block:: console
sudo yum install openssl-libs \
zlib \
libpciaccess \
libuuid
* On Fedora 27
.. code-block:: console
sudo dnf install openssl-libs \
zlib \
libpciaccess \
libuuid
.. _`ACRN Hypervisor`: https://github.com/projectacrn/acrn-hypervisor
.. _`Project ACRN documentation`: https://projectacrn.github.io/

310
devicemodel/arch/x86/pm.c Normal file
View File

@@ -0,0 +1,310 @@
/*-
* Copyright (c) 2013 Hudson River Trading LLC
* Written by: John H. Baldwin <jhb@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <assert.h>
#include <errno.h>
#include <pthread.h>
#include <signal.h>
#include <stdbool.h>
#include "vmmapi.h"
#include "vmm.h"
#include "acpi.h"
#include "inout.h"
#include "mevent.h"
#include "irq.h"
#include "lpc.h"
static pthread_mutex_t pm_lock = PTHREAD_MUTEX_INITIALIZER;
static struct mevent *power_button;
static sig_t old_power_handler;
/*
* Reset Control register at I/O port 0xcf9. Bit 2 forces a system
* reset when it transitions from 0 to 1. Bit 1 selects the type of
* reset to attempt: 0 selects a "soft" reset, and 1 selects a "hard"
* reset.
*/
static int
reset_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
int error;
static uint8_t reset_control;
if (bytes != 1)
return -1;
if (in)
*eax = reset_control;
else {
reset_control = *eax;
/* Treat hard and soft resets the same. */
if (reset_control & 0x4) {
error = vm_suspend(ctx, VM_SUSPEND_RESET);
assert(error == 0 || errno == EALREADY);
}
/* cold reset should clear the value in 0xcf9 */
if (reset_control & 0x8) {
reset_control = 0;
}
}
return 0;
}
INOUT_PORT(reset_reg, 0xCF9, IOPORT_F_INOUT, reset_handler);
/*
* ACPI's SCI is a level-triggered interrupt.
*/
static int sci_active;
static void
sci_assert(struct vmctx *ctx)
{
if (sci_active)
return;
vm_isa_assert_irq(ctx, SCI_INT, SCI_INT);
sci_active = 1;
}
static void
sci_deassert(struct vmctx *ctx)
{
if (!sci_active)
return;
vm_isa_deassert_irq(ctx, SCI_INT, SCI_INT);
sci_active = 0;
}
/*
* Power Management 1 Event Registers
*
* The only power management event supported is a power button upon
* receiving SIGTERM.
*/
static uint16_t pm1_enable, pm1_status;
#define PM1_TMR_STS 0x0001
#define PM1_BM_STS 0x0010
#define PM1_GBL_STS 0x0020
#define PM1_PWRBTN_STS 0x0100
#define PM1_SLPBTN_STS 0x0200
#define PM1_RTC_STS 0x0400
#define PM1_WAK_STS 0x8000
#define PM1_TMR_EN 0x0001
#define PM1_GBL_EN 0x0020
#define PM1_PWRBTN_EN 0x0100
#define PM1_SLPBTN_EN 0x0200
#define PM1_RTC_EN 0x0400
static void
sci_update(struct vmctx *ctx)
{
int need_sci;
/* See if the SCI should be active or not. */
need_sci = 0;
if ((pm1_enable & PM1_TMR_EN) && (pm1_status & PM1_TMR_STS))
need_sci = 1;
if ((pm1_enable & PM1_GBL_EN) && (pm1_status & PM1_GBL_STS))
need_sci = 1;
if ((pm1_enable & PM1_PWRBTN_EN) && (pm1_status & PM1_PWRBTN_STS))
need_sci = 1;
if ((pm1_enable & PM1_SLPBTN_EN) && (pm1_status & PM1_SLPBTN_STS))
need_sci = 1;
if ((pm1_enable & PM1_RTC_EN) && (pm1_status & PM1_RTC_STS))
need_sci = 1;
if (need_sci)
sci_assert(ctx);
else
sci_deassert(ctx);
}
static int
pm1_status_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
if (bytes != 2)
return -1;
pthread_mutex_lock(&pm_lock);
if (in)
*eax = pm1_status;
else {
/*
* Writes are only permitted to clear certain bits by
* writing 1 to those flags.
*/
pm1_status &= ~(*eax & (PM1_WAK_STS | PM1_RTC_STS |
PM1_SLPBTN_STS | PM1_PWRBTN_STS | PM1_BM_STS));
sci_update(ctx);
}
pthread_mutex_unlock(&pm_lock);
return 0;
}
static int
pm1_enable_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
if (bytes != 2)
return -1;
pthread_mutex_lock(&pm_lock);
if (in)
*eax = pm1_enable;
else {
/*
* Only permit certain bits to be set. We never use
* the global lock, but ACPI-CA whines profusely if it
* can't set GBL_EN.
*/
pm1_enable = *eax & (PM1_PWRBTN_EN | PM1_GBL_EN);
sci_update(ctx);
}
pthread_mutex_unlock(&pm_lock);
return 0;
}
INOUT_PORT(pm1_status, PM1A_EVT_ADDR, IOPORT_F_INOUT, pm1_status_handler);
INOUT_PORT(pm1_enable, PM1A_EVT_ADDR + 2, IOPORT_F_INOUT, pm1_enable_handler);
static void
power_button_handler(int signal, enum ev_type type, void *arg)
{
struct vmctx *ctx;
ctx = arg;
pthread_mutex_lock(&pm_lock);
if (!(pm1_status & PM1_PWRBTN_STS)) {
pm1_status |= PM1_PWRBTN_STS;
sci_update(ctx);
}
pthread_mutex_unlock(&pm_lock);
}
/*
* Power Management 1 Control Register
*
* This is mostly unimplemented except that we wish to handle writes that
* set SPL_EN to handle S5 (soft power off).
*/
static uint16_t pm1_control;
#define PM1_SCI_EN 0x0001
#define PM1_SLP_TYP 0x1c00
#define PM1_SLP_EN 0x2000
#define PM1_ALWAYS_ZERO 0xc003
static int
pm1_control_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
int error;
if (bytes != 2)
return -1;
if (in)
*eax = pm1_control;
else {
/*
* Various bits are write-only or reserved, so force them
* to zero in pm1_control. Always preserve SCI_EN as OSPM
* can never change it.
*/
pm1_control = (pm1_control & PM1_SCI_EN) |
(*eax & ~(PM1_SLP_EN | PM1_ALWAYS_ZERO));
/*
* If SLP_EN is set, check for S5. ACRN-DM's _S5_ method
* says that '5' should be stored in SLP_TYP for S5.
*/
if (*eax & PM1_SLP_EN) {
if ((pm1_control & PM1_SLP_TYP) >> 10 == 5) {
error = vm_suspend(ctx, VM_SUSPEND_POWEROFF);
assert(error == 0 || errno == EALREADY);
}
}
}
return 0;
}
INOUT_PORT(pm1_control, PM1A_CNT_ADDR, IOPORT_F_INOUT, pm1_control_handler);
SYSRES_IO(PM1A_EVT_ADDR, 8);
/*
* ACPI SMI Command Register
*
* This write-only register is used to enable and disable ACPI.
*/
static int
smi_cmd_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
assert(!in);
if (bytes != 1)
return -1;
pthread_mutex_lock(&pm_lock);
switch (*eax) {
case ACPI_ENABLE:
pm1_control |= PM1_SCI_EN;
if (power_button == NULL) {
power_button = mevent_add(SIGTERM, EVF_SIGNAL,
power_button_handler, ctx);
old_power_handler = signal(SIGTERM, SIG_IGN);
}
break;
case ACPI_DISABLE:
pm1_control &= ~PM1_SCI_EN;
if (power_button != NULL) {
mevent_delete(power_button);
power_button = NULL;
signal(SIGTERM, old_power_handler);
}
break;
}
pthread_mutex_unlock(&pm_lock);
return 0;
}
INOUT_PORT(smi_cmd, SMI_CMD, IOPORT_F_OUT, smi_cmd_handler);
SYSRES_IO(SMI_CMD, 1);
void
sci_init(struct vmctx *ctx)
{
/*
* Mark ACPI's SCI as level trigger and bump its use count
* in the PIRQ router.
*/
pci_irq_use(SCI_INT);
}

133
devicemodel/core/console.c Normal file
View File

@@ -0,0 +1,133 @@
/*-
* Copyright (c) 2015 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stdio.h>
#include <sys/cdefs.h>
#include <sys/types.h>
#include "gc.h"
#include "console.h"
static struct {
struct gfx_ctx *gc;
fb_render_func_t fb_render_cb;
void *fb_arg;
kbd_event_func_t kbd_event_cb;
void *kbd_arg;
int kbd_priority;
ptr_event_func_t ptr_event_cb;
void *ptr_arg;
int ptr_priority;
} console;
void
console_init(int w, int h, void *fbaddr)
{
console.gc = gc_init(w, h, fbaddr);
}
void
console_set_fbaddr(void *fbaddr)
{
gc_set_fbaddr(console.gc, fbaddr);
}
struct gfx_ctx_image *
console_get_image(void)
{
struct gfx_ctx_image *image;
image = gc_get_image(console.gc);
return image;
}
void
console_fb_register(fb_render_func_t render_cb, void *arg)
{
console.fb_render_cb = render_cb;
console.fb_arg = arg;
}
void
console_refresh(void)
{
if (console.fb_render_cb)
(*console.fb_render_cb)(console.gc, console.fb_arg);
}
void
console_kbd_register(kbd_event_func_t event_cb, void *arg, int pri)
{
if (pri > console.kbd_priority) {
console.kbd_event_cb = event_cb;
console.kbd_arg = arg;
console.kbd_priority = pri;
}
}
void
console_kbd_unregister(void)
{
console.kbd_event_cb = NULL;
console.kbd_arg = NULL;
console.kbd_priority = 0;
}
void
console_ptr_register(ptr_event_func_t event_cb, void *arg, int pri)
{
if (pri > console.ptr_priority) {
console.ptr_event_cb = event_cb;
console.ptr_arg = arg;
console.ptr_priority = pri;
}
}
void
console_ptr_unregister()
{
console.ptr_event_cb = NULL;
console.ptr_arg = NULL;
console.ptr_priority = 0;
}
void
console_key_event(int down, uint32_t keysym)
{
if (console.kbd_event_cb)
(*console.kbd_event_cb)(down, keysym, console.kbd_arg);
}
void
console_ptr_event(uint8_t button, int x, int y)
{
if (console.ptr_event_cb)
(*console.ptr_event_cb)(button, x, y, console.ptr_arg);
}

170
devicemodel/core/consport.c Normal file
View File

@@ -0,0 +1,170 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/select.h>
#include <err.h>
#include <stdio.h>
#include <stdlib.h>
#include <termios.h>
#include <unistd.h>
#include <stdbool.h>
#include <sysexits.h>
#include "inout.h"
#include "lpc.h"
#define BVM_CONSOLE_PORT 0x220
#define BVM_CONS_SIG ('b' << 8 | 'v')
static bool bvmcons_enabled = false;
static struct termios tio_orig, tio_new;
static void
ttyclose(void)
{
tcsetattr(STDIN_FILENO, TCSANOW, &tio_orig);
}
static void
ttyopen(void)
{
tcgetattr(STDIN_FILENO, &tio_orig);
cfmakeraw(&tio_new);
tcsetattr(STDIN_FILENO, TCSANOW, &tio_new);
atexit(ttyclose);
}
static bool
tty_char_available(void)
{
fd_set rfds;
struct timeval tv;
FD_ZERO(&rfds);
FD_SET(STDIN_FILENO, &rfds);
tv.tv_sec = 0;
tv.tv_usec = 0;
if (select(STDIN_FILENO + 1, &rfds, NULL, NULL, &tv) > 0)
return true;
else
return false;
}
static int
ttyread(void)
{
char rb;
if (tty_char_available()) {
if (read(STDIN_FILENO, &rb, 1) > 0)
return (rb & 0xff);
}
return -1;
}
static int
ttywrite(unsigned char wb)
{
if (write(STDOUT_FILENO, &wb, 1) > 0)
return 1;
return -1;
}
static int
console_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
static int opened;
if (bytes == 2 && in) {
*eax = BVM_CONS_SIG;
return 0;
}
/*
* Guests might probe this port to look for old ISA devices
* using single-byte reads. Return 0xff for those.
*/
if (bytes == 1 && in) {
*eax = 0xff;
return 0;
}
if (bytes != 4)
return -1;
if (!opened) {
ttyopen();
opened = 1;
}
if (in)
*eax = ttyread();
else
ttywrite(*eax);
return 0;
}
SYSRES_IO(BVM_CONSOLE_PORT, 4);
static struct inout_port consport = {
"bvmcons",
BVM_CONSOLE_PORT,
1,
IOPORT_F_INOUT,
console_handler
};
void
enable_bvmcons(void)
{
bvmcons_enabled = true;
}
int
init_bvmcons(void)
{
if (bvmcons_enabled)
register_inout(&consport);
return 0;
}
void
deinit_bvmcons(void)
{
if (bvmcons_enabled)
unregister_inout(&consport);
}

76
devicemodel/core/gc.c Normal file
View File

@@ -0,0 +1,76 @@
#include <sys/cdefs.h>
#include <sys/types.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include "gc.h"
struct gfx_ctx {
struct gfx_ctx_image *gc_image;
int raw;
};
struct gfx_ctx *
gc_init(int width, int height, void *fbaddr)
{
struct gfx_ctx *gc;
struct gfx_ctx_image *gc_image;
gc = calloc(1, sizeof(struct gfx_ctx));
assert(gc != NULL);
gc_image = calloc(1, sizeof(struct gfx_ctx_image));
assert(gc_image != NULL);
gc_image->width = width;
gc_image->height = height;
if (fbaddr) {
gc_image->data = fbaddr;
gc->raw = 1;
} else {
gc_image->data = calloc(width * height, sizeof(uint32_t));
gc->raw = 0;
}
gc->gc_image = gc_image;
return gc;
}
void
gc_set_fbaddr(struct gfx_ctx *gc, void *fbaddr)
{
gc->raw = 1;
if (gc->gc_image->data && gc->gc_image->data != fbaddr)
free(gc->gc_image->data);
gc->gc_image->data = fbaddr;
}
void
gc_resize(struct gfx_ctx *gc, int width, int height)
{
struct gfx_ctx_image *gc_image;
gc_image = gc->gc_image;
gc_image->width = width;
gc_image->height = height;
if (!gc->raw) {
gc_image->data = realloc(gc_image->data,
width * height * sizeof(uint32_t));
if (gc_image->data != NULL)
memset(gc_image->data, 0, width * height *
sizeof(uint32_t));
}
}
struct gfx_ctx_image *
gc_get_image(struct gfx_ctx *gc)
{
if (gc == NULL)
return NULL;
return gc->gc_image;
}

530
devicemodel/core/hugetlb.c Normal file
View File

@@ -0,0 +1,530 @@
/*-
* Copyright (c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <unistd.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/vfs.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <ctype.h>
#include <fcntl.h>
#include <errno.h>
#include <assert.h>
#include "vmm.h"
#include "vhm_ioctl_defs.h"
#include "vmmapi.h"
#define HUGETLB_LV1 0
#define HUGETLB_LV2 1
#define HUGETLB_LV_MAX 2
#define MAX_PATH_LEN 128
#define HUGETLBFS_MAGIC 0x958458f6
/* HugePage Level 1 for 2M page, Level 2 for 1G page*/
#define PATH_HUGETLB_LV1 "/run/hugepage/acrn/huge_lv1/"
#define OPT_HUGETLB_LV1 "pagesize=2M"
#define PATH_HUGETLB_LV2 "/run/hugepage/acrn/huge_lv2/"
#define OPT_HUGETLB_LV2 "pagesize=1G"
/* hugetlb_info record private information for one specific hugetlbfs:
* - mounted: is hugetlbfs mounted for below mount_path
* - mount_path: hugetlbfs mount path
* - mount_opt: hugetlb mount option
* - node_path: record for hugetlbfs node path
* - pg_size: this hugetlbfs's page size
* - lowmem: lowmem of this hugetlbfs need allocate
* - highmem: highmem of this hugetlbfs need allocate
*/
struct hugetlb_info {
bool mounted;
char *mount_path;
char *mount_opt;
char node_path[MAX_PATH_LEN];
int fd;
int pg_size;
size_t lowmem;
size_t highmem;
};
static struct hugetlb_info hugetlb_priv[HUGETLB_LV_MAX] = {
{
.mounted = false,
.mount_path = PATH_HUGETLB_LV1,
.mount_opt = OPT_HUGETLB_LV1,
.fd = -1,
.pg_size = 0,
.lowmem = 0,
.highmem = 0,
},
{
.mounted = false,
.mount_path = PATH_HUGETLB_LV2,
.mount_opt = OPT_HUGETLB_LV2,
.fd = -1,
.pg_size = 0,
.lowmem = 0,
.highmem = 0,
},
};
static void *ptr;
static size_t total_size;
static int hugetlb_lv_max;
static int open_hugetlbfs(struct vmctx *ctx, int level)
{
char uuid_str[48];
uint8_t UUID[16];
char *path;
struct statfs fs;
if (level >= HUGETLB_LV_MAX) {
perror("exceed max hugetlb level");
return -EINVAL;
}
path = hugetlb_priv[level].node_path;
strncpy(path, hugetlb_priv[level].mount_path, MAX_PATH_LEN);
/* UUID will use 32 bytes */
if (strlen(path) + 32 > MAX_PATH_LEN) {
perror("PATH overflow");
return -ENOMEM;
}
uuid_copy(UUID, ctx->vm_uuid);
sprintf(uuid_str, "%02X%02X%02X%02X%02X%02X%02X%02X"
"%02X%02X%02X%02X%02X%02X%02X%02X\n",
UUID[0], UUID[1], UUID[2], UUID[3],
UUID[4], UUID[5], UUID[6], UUID[7],
UUID[8], UUID[9], UUID[10], UUID[11],
UUID[12], UUID[13], UUID[14], UUID[15]);
strncat(path, uuid_str, strlen(uuid_str));
printf("open hugetlbfs file %s\n", path);
hugetlb_priv[level].fd = open(path, O_CREAT | O_RDWR, 0644);
if (hugetlb_priv[level].fd < 0) {
perror("Open hugtlbfs failed");
return -EINVAL;
}
/* get the pagesize */
if (fstatfs(hugetlb_priv[level].fd, &fs) != 0) {
perror("Failed to get statfs fo hugetlbfs");
return -EINVAL;
}
if (fs.f_type == HUGETLBFS_MAGIC) {
/* get hugepage size from fstat*/
hugetlb_priv[level].pg_size = fs.f_bsize;
} else {
close(hugetlb_priv[level].fd);
unlink(hugetlb_priv[level].node_path);
hugetlb_priv[level].fd = -1;
return -EINVAL;
}
return 0;
}
static void close_hugetlbfs(int level)
{
if (level >= HUGETLB_LV_MAX) {
perror("exceed max hugetlb level");
return;
}
if (hugetlb_priv[level].fd >= 0) {
close(hugetlb_priv[level].fd);
hugetlb_priv[level].fd = -1;
unlink(hugetlb_priv[level].node_path);
hugetlb_priv[level].pg_size = 0;
}
}
static bool should_enable_hugetlb_level(int level)
{
if (level >= HUGETLB_LV_MAX) {
perror("exceed max hugetlb level");
return false;
}
return (hugetlb_priv[level].lowmem > 0 ||
hugetlb_priv[level].highmem > 0);
}
/*
* level : hugepage level
* len : region length for mmap
* offset : region start offset from ctx->baseaddr
* skip : skip offset in different level hugetlbfs fd
*/
static int mmap_hugetlbfs(struct vmctx *ctx, int level, size_t len,
size_t offset, size_t skip)
{
char *addr;
size_t pagesz = 0;
int fd, i;
if (level >= HUGETLB_LV_MAX) {
perror("exceed max hugetlb level");
return -EINVAL;
}
fd = hugetlb_priv[level].fd;
addr = mmap(ctx->baseaddr + offset, len, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_FIXED, fd, skip);
if (addr == MAP_FAILED)
return -ENOMEM;
printf("mmap 0x%lx@%p\n", len, addr);
/* pre-allocate hugepages by touch them */
pagesz = hugetlb_priv[level].pg_size;
printf("touch %ld pages with pagesz 0x%lx\n", len/pagesz, pagesz);
for (i = 0; i < len/pagesz; i++) {
*(volatile char *)addr = *addr;
addr += pagesz;
}
return 0;
}
static int mmap_hugetlbfs_lowmem(struct vmctx *ctx)
{
size_t len, offset, skip;
int level, ret = 0, pg_size;
offset = skip = 0;
for (level = hugetlb_lv_max - 1; level >= HUGETLB_LV1; level--) {
len = hugetlb_priv[level].lowmem;
pg_size = hugetlb_priv[level].pg_size;
while (len > 0) {
ret = mmap_hugetlbfs(ctx, level, len, offset, skip);
if (ret < 0 && level > HUGETLB_LV1) {
len -= pg_size;
hugetlb_priv[level].lowmem = len;
hugetlb_priv[level-1].lowmem += pg_size;
} else if (ret < 0 && level == HUGETLB_LV1)
return ret;
else {
offset += len;
break;
}
}
}
return 0;
}
static int mmap_hugetlbfs_highmem(struct vmctx *ctx)
{
size_t len, offset, skip;
int level, ret = 0, pg_size;
offset = 4 * GB;
for (level = hugetlb_lv_max - 1; level >= HUGETLB_LV1; level--) {
skip = hugetlb_priv[level].lowmem;
len = hugetlb_priv[level].highmem;
pg_size = hugetlb_priv[level].pg_size;
while (len > 0) {
ret = mmap_hugetlbfs(ctx, level, len, offset, skip);
if (ret < 0 && level > HUGETLB_LV1) {
len -= pg_size;
hugetlb_priv[level].highmem = len;
hugetlb_priv[level-1].highmem += pg_size;
} else if (ret < 0 && level == HUGETLB_LV1)
return ret;
else {
offset += len;
break;
}
}
}
return 0;
}
static int create_hugetlb_dirs(int level)
{
char tmp_path[MAX_PATH_LEN], *path;
int i, len;
if (level >= HUGETLB_LV_MAX) {
perror("exceed max hugetlb level");
return -EINVAL;
}
path = hugetlb_priv[level].mount_path;
len = strlen(path);
if (len >= MAX_PATH_LEN) {
perror("exceed max path len");
return -EINVAL;
}
strcpy(tmp_path, path);
if (tmp_path[len - 1] != '/')
strcat(tmp_path, "/");
len = strlen(tmp_path);
for (i = 1; i < len; i++) {
if (tmp_path[i] == '/') {
tmp_path[i] = 0;
if (access(tmp_path, F_OK) != 0) {
if (mkdir(tmp_path, 0755) < 0) {
perror("mkdir failed");
return -1;
}
}
tmp_path[i] = '/';
}
}
return 0;
}
static int mount_hugetlbfs(int level)
{
int ret;
if (level >= HUGETLB_LV_MAX) {
perror("exceed max hugetlb level");
return -EINVAL;
}
if (hugetlb_priv[level].mounted)
return 0;
/* only support x86 as HUGETLB level-1 2M page, level-2 1G page*/
ret = mount("none", hugetlb_priv[level].mount_path, "hugetlbfs",
0, hugetlb_priv[level].mount_opt);
if (ret == 0)
hugetlb_priv[level].mounted = true;
return ret;
}
static void umount_hugetlbfs(int level)
{
if (level >= HUGETLB_LV_MAX) {
perror("exceed max hugetlb level");
return;
}
if (hugetlb_priv[level].mounted) {
umount(hugetlb_priv[level].mount_path);
hugetlb_priv[level].mounted = false;
}
}
bool check_hugetlb_support(void)
{
int level;
for (level = HUGETLB_LV1; level < HUGETLB_LV_MAX; level++) {
if (create_hugetlb_dirs(level) < 0)
return false;
}
for (level = HUGETLB_LV1; level < HUGETLB_LV_MAX; level++) {
if (mount_hugetlbfs(level) < 0) {
level--;
break;
}
}
if (level < HUGETLB_LV1) /* mount fail for level 1 */
return false;
else if (level == HUGETLB_LV1) /* mount fail for level 2 */
printf("WARNING: only level 1 hugetlb supported");
hugetlb_lv_max = level;
return true;
}
int hugetlb_setup_memory(struct vmctx *ctx)
{
int level;
size_t lowmem, highmem;
/* for first time DM start UOS, hugetlbfs is already mounted by
* check_hugetlb_support; but for reboot, here need re-mount
* it as it already be umount by hugetlb_unsetup_memory
* TODO: actually, correct reboot process should not change memory
* layout, the setup_memory should be removed from reboot process
*/
for (level = HUGETLB_LV1; level < hugetlb_lv_max; level++)
mount_hugetlbfs(level);
/* open hugetlbfs and get pagesize for two level */
for (level = HUGETLB_LV1; level < hugetlb_lv_max; level++) {
if (open_hugetlbfs(ctx, level) < 0) {
perror("failed to open hugetlbfs");
goto err;
}
}
/* all memory should be at least align with
* hugetlb_priv[HUGETLB_LV1].pg_size */
ctx->lowmem =
ALIGN_DOWN(ctx->lowmem, hugetlb_priv[HUGETLB_LV1].pg_size);
ctx->highmem =
ALIGN_DOWN(ctx->highmem, hugetlb_priv[HUGETLB_LV1].pg_size);
if (ctx->highmem > 0)
total_size = 4 * GB + ctx->highmem;
else
total_size = ctx->lowmem;
if (total_size == 0) {
perror("vm request 0 memory");
goto err;
}
/* check & set hugetlb level memory size for lowmem & highmem */
highmem = ctx->highmem;
lowmem = ctx->lowmem;
for (level = hugetlb_lv_max - 1; level >= HUGETLB_LV1; level--) {
hugetlb_priv[level].lowmem =
ALIGN_DOWN(lowmem, hugetlb_priv[level].pg_size);
hugetlb_priv[level].highmem =
ALIGN_DOWN(highmem, hugetlb_priv[level].pg_size);
if (level > HUGETLB_LV1) {
hugetlb_priv[level-1].lowmem = lowmem =
lowmem - hugetlb_priv[level].lowmem;
hugetlb_priv[level-1].highmem = highmem =
highmem - hugetlb_priv[level].highmem;
}
}
/* align up total size with huge page size for vma alignment */
for (level = hugetlb_lv_max - 1; level >= HUGETLB_LV1; level--) {
if (should_enable_hugetlb_level(level)) {
total_size += hugetlb_priv[level].pg_size;
break;
}
}
/* dump hugepage trying to setup */
printf("\ntry to setup hugepage with:\n");
for (level = HUGETLB_LV1; level < hugetlb_lv_max; level++) {
printf("\tlevel %d - lowmem 0x%lx, highmem 0x%lx\n", level,
hugetlb_priv[level].lowmem,
hugetlb_priv[level].highmem);
}
printf("total_size 0x%lx\n\n", total_size);
/* basic overview vma */
ptr = mmap(NULL, total_size, PROT_NONE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (ptr == MAP_FAILED) {
perror("anony mmap fail");
goto err;
}
/* align up baseaddr according to hugepage level size */
for (level = hugetlb_lv_max - 1; level >= HUGETLB_LV1; level--) {
if (should_enable_hugetlb_level(level)) {
ctx->baseaddr = (void *)ALIGN_UP((size_t)ptr,
hugetlb_priv[level].pg_size);
break;
}
}
printf("mmap ptr 0x%p -> baseaddr 0x%p\n", ptr, ctx->baseaddr);
/* mmap lowmem */
if (mmap_hugetlbfs_lowmem(ctx) < 0)
goto err;
/* mmap highmem */
if (mmap_hugetlbfs_highmem(ctx) < 0)
goto err;
/* dump hugepage really setup */
printf("\nreally setup hugepage with:\n");
for (level = HUGETLB_LV1; level < hugetlb_lv_max; level++) {
printf("\tlevel %d - lowmem 0x%lx, highmem 0x%lx\n", level,
hugetlb_priv[level].lowmem,
hugetlb_priv[level].highmem);
}
printf("total_size 0x%lx\n\n", total_size);
/* map ept for lowmem*/
if (vm_map_memseg_vma(ctx, ctx->lowmem, 0,
(uint64_t)ctx->baseaddr, PROT_ALL) < 0)
goto err;
/* map ept for highmem*/
if (ctx->highmem > 0) {
if (vm_map_memseg_vma(ctx, ctx->highmem, 4 * GB,
(uint64_t)(ctx->baseaddr + 4 * GB), PROT_ALL) < 0)
goto err;
}
return 0;
err:
if (ptr) {
munmap(ptr, total_size);
ptr = NULL;
}
for (level = HUGETLB_LV1; level < hugetlb_lv_max; level++) {
close_hugetlbfs(level);
umount_hugetlbfs(level);
}
return -ENOMEM;
}
void hugetlb_unsetup_memory(struct vmctx *ctx)
{
int level;
if (total_size > 0) {
munmap(ptr, total_size);
total_size = 0;
ptr = NULL;
}
for (level = HUGETLB_LV1; level < hugetlb_lv_max; level++) {
close_hugetlbfs(level);
umount_hugetlbfs(level);
}
}

192
devicemodel/core/inout.c Normal file
View File

@@ -0,0 +1,192 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/mman.h>
#include <linux/uio.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <assert.h>
#include "vmm.h"
#include "vmmapi.h"
#include "dm.h"
#include "inout.h"
SET_DECLARE(inout_port_set, struct inout_port);
#define MAX_IOPORTS (1 << 16)
#define VERIFY_IOPORT(port, size) \
assert((port) >= 0 && (size) > 0 && ((port) + (size)) <= MAX_IOPORTS)
static struct {
const char *name;
int flags;
inout_func_t handler;
void *arg;
} inout_handlers[MAX_IOPORTS];
static int
default_inout(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
if (in) {
switch (bytes) {
case 4:
*eax = 0xffffffff;
break;
case 2:
*eax = 0xffff;
break;
case 1:
*eax = 0xff;
break;
}
}
return 0;
}
static void
register_default_iohandler(int start, int size)
{
struct inout_port iop;
VERIFY_IOPORT(start, size);
bzero(&iop, sizeof(iop));
iop.name = "default";
iop.port = start;
iop.size = size;
iop.flags = IOPORT_F_INOUT | IOPORT_F_DEFAULT;
iop.handler = default_inout;
register_inout(&iop);
}
int
emulate_inout(struct vmctx *ctx, int *pvcpu, struct pio_request *pio_request,
int strict)
{
int bytes, flags, in, port;
inout_func_t handler;
void *arg;
int retval;
bytes = pio_request->size;
in = (pio_request->direction == REQUEST_READ);
port = pio_request->address;
assert(port < MAX_IOPORTS);
assert(bytes == 1 || bytes == 2 || bytes == 4);
handler = inout_handlers[port].handler;
if (strict && handler == default_inout)
return -1;
flags = inout_handlers[port].flags;
arg = inout_handlers[port].arg;
if (pio_request->direction == REQUEST_READ) {
if (!(flags & IOPORT_F_IN))
return -1;
} else {
if (!(flags & IOPORT_F_OUT))
return -1;
}
retval = handler(ctx, *pvcpu, in, port, bytes,
(uint32_t *)&(pio_request->value), arg);
return retval;
}
void
init_inout(void)
{
struct inout_port **iopp, *iop;
/*
* Set up the default handler for all ports
*/
register_default_iohandler(0, MAX_IOPORTS);
/*
* Overwrite with specified handlers
*/
SET_FOREACH(iopp, inout_port_set) {
iop = *iopp;
assert(iop->port < MAX_IOPORTS);
inout_handlers[iop->port].name = iop->name;
inout_handlers[iop->port].flags = iop->flags;
inout_handlers[iop->port].handler = iop->handler;
inout_handlers[iop->port].arg = NULL;
}
}
int
register_inout(struct inout_port *iop)
{
int i;
VERIFY_IOPORT(iop->port, iop->size);
/*
* Verify that the new registration is not overwriting an already
* allocated i/o range.
*/
if ((iop->flags & IOPORT_F_DEFAULT) == 0) {
for (i = iop->port; i < iop->port + iop->size; i++) {
if ((inout_handlers[i].flags & IOPORT_F_DEFAULT) == 0)
return -1;
}
}
for (i = iop->port; i < iop->port + iop->size; i++) {
inout_handlers[i].name = iop->name;
inout_handlers[i].flags = iop->flags;
inout_handlers[i].handler = iop->handler;
inout_handlers[i].arg = iop->arg;
}
return 0;
}
int
unregister_inout(struct inout_port *iop)
{
VERIFY_IOPORT(iop->port, iop->size);
assert(inout_handlers[iop->port].name == iop->name);
register_default_iohandler(iop->port, iop->size);
return 0;
}

914
devicemodel/core/main.c Normal file
View File

@@ -0,0 +1,914 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <err.h>
#include <errno.h>
#include <libgen.h>
#include <unistd.h>
#include <assert.h>
#include <pthread.h>
#include <sysexits.h>
#include <stdbool.h>
#include <getopt.h>
#include "types.h"
#include "vmm.h"
#include "vmmapi.h"
#include "sw_load.h"
#include "cpuset.h"
#include "dm.h"
#include "acpi.h"
#include "atkbdc.h"
#include "inout.h"
#include "ioapic.h"
#include "mem.h"
#include "mevent.h"
#include "mptbl.h"
#include "pci_core.h"
#include "irq.h"
#include "lpc.h"
#include "smbiostbl.h"
#include "rtc.h"
#include "version.h"
#include "sw_load.h"
#include "monitor.h"
#include "ioc.h"
#define GUEST_NIO_PORT 0x488 /* guest upcalls via i/o port */
typedef int (*vmexit_handler_t)(struct vmctx *,
struct vhm_request *, int *vcpu);
char *vmname;
int guest_ncpus;
char *guest_uuid_str;
char *vsbl_file_name;
uint8_t trusty_enabled;
bool stdio_in_use;
bool hugetlb;
static int guest_vmexit_on_hlt, guest_vmexit_on_pause;
static int virtio_msix = 1;
static int x2apic_mode; /* default is xAPIC */
static int strictio;
static int strictmsr = 1;
static int acpi;
static char *progname;
static const int BSP;
static cpuset_t cpumask;
static void vm_loop(struct vmctx *ctx);
static int quit_vm_loop;
static char vhm_request_page[4096] __attribute__ ((aligned(4096)));
static struct vhm_request *vhm_req_buf =
(struct vhm_request *)&vhm_request_page;
struct dmstats {
uint64_t vmexit_bogus;
uint64_t vmexit_reqidle;
uint64_t vmexit_hlt;
uint64_t vmexit_pause;
uint64_t vmexit_mtrap;
uint64_t cpu_switch_rotate;
uint64_t cpu_switch_direct;
uint64_t vmexit_mmio_emul;
} stats;
struct mt_vmm_info {
pthread_t mt_thr;
struct vmctx *mt_ctx;
int mt_vcpu;
} mt_vmm_info[VM_MAXCPU];
static cpuset_t *vcpumap[VM_MAXCPU] = { NULL };
static struct vmctx *_ctx;
static void
usage(int code)
{
fprintf(stderr,
"Usage: %s [-abehuwxACHPSTWY] [-c vcpus] [-g <gdb port>] [-l <lpc>]\n"
" %*s [-m mem] [-p vcpu:hostcpu] [-s <pci>] [-U uuid] \n"
" %*s [--vsbl vsbl_file_name] [--part_info part_info_name]\n"
" %*s [--enable_trusty] <vm>\n"
" -a: local apic is in xAPIC mode (deprecated)\n"
" -A: create ACPI tables\n"
" -c: # cpus (default 1)\n"
" -C: include guest memory in core file\n"
" -e: exit on unhandled I/O access\n"
" -g: gdb port\n"
" -h: help\n"
" -H: vmexit from the guest on hlt\n"
" -l: LPC device configuration\n"
" -m: memory size in MB\n"
" -M: do not hide INTx link for MSI&INTx capable ptdev\n"
" -p: pin 'vcpu' to 'hostcpu'\n"
" -P: vmexit from the guest on pause\n"
" -s: <slot,driver,configinfo> PCI slot config\n"
" -S: guest memory cannot be swapped\n"
" -u: RTC keeps UTC time\n"
" -U: uuid\n"
" -w: ignore unimplemented MSRs\n"
" -W: force virtio to use single-vector MSI\n"
" -T: use hugetlb for memory allocation\n"
" -x: local apic is in x2APIC mode\n"
" -Y: disable MPtable generation\n"
" -k: kernel image path\n"
" -r: ramdisk image path\n"
" -B: bootargs for kernel\n"
" -v: version\n"
" -i: ioc boot parameters\n"
" --vsbl: vsbl file path\n"
" --part_info: guest partition info file path\n"
" --enable_trusty: enable trusty for guest\n",
progname, (int)strlen(progname), "", (int)strlen(progname), "",
(int)strlen(progname), "");
exit(code);
}
static void
print_version(void)
{
if (DM_RC_VERSION)
fprintf(stderr, "DM version is: %d.%d-%d-%s, build by %s@%s\n",
DM_MAJOR_VERSION, DM_MINOR_VERSION, DM_RC_VERSION,
DM_BUILD_VERSION, DM_BUILD_USER, DM_BUILD_TIME);
else
fprintf(stderr, "DM version is: %d.%d-%s, build by %s@%s\n",
DM_MAJOR_VERSION, DM_MINOR_VERSION, DM_BUILD_VERSION,
DM_BUILD_USER, DM_BUILD_TIME);
exit(0);
}
static int
pincpu_parse(const char *opt)
{
int vcpu, pcpu;
if (sscanf(opt, "%d:%d", &vcpu, &pcpu) != 2) {
fprintf(stderr, "invalid format: %s\n", opt);
return -1;
}
if (vcpu < 0 || vcpu >= VM_MAXCPU) {
fprintf(stderr, "vcpu '%d' outside valid range from 0 to %d\n",
vcpu, VM_MAXCPU - 1);
return -1;
}
if (pcpu < 0 || pcpu >= CPU_SETSIZE) {
fprintf(stderr,
"hostcpu '%d' outside valid range from 0 to %d\n",
pcpu, CPU_SETSIZE - 1);
return -1;
}
if (vcpumap[vcpu] == NULL) {
vcpumap[vcpu] = malloc(sizeof(cpuset_t));
if (vcpumap[vcpu] == NULL) {
perror("malloc");
return -1;
}
CPU_ZERO(vcpumap[vcpu]);
}
CPU_SET(pcpu, vcpumap[vcpu]);
return 0;
}
void *
paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len)
{
return vm_map_gpa(ctx, gaddr, len);
}
void *
dm_gpa2hva(uint64_t gpa, size_t size)
{
return vm_map_gpa(_ctx, gpa, size);
}
int
virtio_uses_msix(void)
{
return virtio_msix;
}
static void *
start_thread(void *param)
{
char tname[MAXCOMLEN + 1];
struct mt_vmm_info *mtp;
int vcpu;
mtp = param;
vcpu = mtp->mt_vcpu;
snprintf(tname, sizeof(tname), "vcpu %d", vcpu);
pthread_setname_np(mtp->mt_thr, tname);
vm_loop(mtp->mt_ctx);
/* reset or halt */
return NULL;
}
static void
add_cpu(struct vmctx *ctx, int guest_ncpus)
{
int i;
int error;
for (i = 0; i < guest_ncpus; i++) {
error = vm_create_vcpu(ctx, i);
if (error != 0)
err(EX_OSERR, "could not create CPU %d", i);
CPU_SET_ATOMIC(i, &cpumask);
mt_vmm_info[i].mt_ctx = ctx;
mt_vmm_info[i].mt_vcpu = i;
}
error = pthread_create(&mt_vmm_info[0].mt_thr, NULL,
start_thread, &mt_vmm_info[0]);
assert(error == 0);
}
static int
delete_cpu(struct vmctx *ctx, int vcpu)
{
if (!CPU_ISSET(vcpu, &cpumask)) {
fprintf(stderr, "Attempting to delete unknown cpu %d\n", vcpu);
exit(1);
}
/* wait for vm_loop cleanup */
quit_vm_loop = 1;
vm_destroy_ioreq_client(ctx);
while (quit_vm_loop)
usleep(10000);
CPU_CLR_ATOMIC(vcpu, &cpumask);
return CPU_EMPTY(&cpumask);
}
static int
vmexit_inout(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
int error;
int bytes, port, in;
port = vhm_req->reqs.pio_request.address;
bytes = vhm_req->reqs.pio_request.size;
in = (vhm_req->reqs.pio_request.direction == REQUEST_READ);
error = emulate_inout(ctx, pvcpu, &vhm_req->reqs.pio_request, strictio);
if (error) {
fprintf(stderr, "Unhandled %s%c 0x%04x\n",
in ? "in" : "out",
bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'),
port);
return VMEXIT_ABORT;
} else {
return VMEXIT_CONTINUE;
}
}
static int
vmexit_mmio_emul(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
int err;
stats.vmexit_mmio_emul++;
err = emulate_mem(ctx, &vhm_req->reqs.mmio_request);
if (err) {
if (err == -ESRCH)
fprintf(stderr, "Unhandled memory access to 0x%lx\n",
vhm_req->reqs.mmio_request.address);
fprintf(stderr, "Failed to emulate instruction [");
fprintf(stderr, "mmio address 0x%lx, size %ld",
vhm_req->reqs.mmio_request.address,
vhm_req->reqs.mmio_request.size);
vhm_req->processed = REQ_STATE_FAILED;
return VMEXIT_ABORT;
}
vhm_req->processed = REQ_STATE_SUCCESS;
return VMEXIT_CONTINUE;
}
static int
vmexit_pci_emul(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
int err, in = (vhm_req->reqs.pci_request.direction == REQUEST_READ);
err = emulate_pci_cfgrw(ctx, *pvcpu, in,
vhm_req->reqs.pci_request.bus,
vhm_req->reqs.pci_request.dev,
vhm_req->reqs.pci_request.func,
vhm_req->reqs.pci_request.reg,
vhm_req->reqs.pci_request.size,
&vhm_req->reqs.pci_request.value);
if (err) {
fprintf(stderr, "Unhandled pci cfg rw at %x:%x.%x reg 0x%x\n",
vhm_req->reqs.pci_request.bus,
vhm_req->reqs.pci_request.dev,
vhm_req->reqs.pci_request.func,
vhm_req->reqs.pci_request.reg);
return VMEXIT_ABORT;
}
vhm_req->processed = REQ_STATE_SUCCESS;
return VMEXIT_CONTINUE;
}
#define DEBUG_EPT_MISCONFIG
#ifdef DEBUG_EPT_MISCONFIG
#define EXIT_REASON_EPT_MISCONFIG 49
#define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400
#define VMCS_IDENT(x) ((x) | 0x80000000)
#endif /* #ifdef DEBUG_EPT_MISCONFIG */
static int
vmexit_bogus(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
stats.vmexit_bogus++;
return VMEXIT_CONTINUE;
}
static int
vmexit_reqidle(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
stats.vmexit_reqidle++;
return VMEXIT_CONTINUE;
}
static int
vmexit_hlt(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
stats.vmexit_hlt++;
/*
* Just continue execution with the next instruction. We use
* the HLT VM exit as a way to be friendly with the host
* scheduler.
*/
return VMEXIT_CONTINUE;
}
static int
vmexit_pause(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
stats.vmexit_pause++;
return VMEXIT_CONTINUE;
}
static int
vmexit_mtrap(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
stats.vmexit_mtrap++;
return VMEXIT_CONTINUE;
}
static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
[VM_EXITCODE_INOUT] = vmexit_inout,
[VM_EXITCODE_MMIO_EMUL] = vmexit_mmio_emul,
[VM_EXITCODE_PCI_CFG] = vmexit_pci_emul,
[VM_EXITCODE_BOGUS] = vmexit_bogus,
[VM_EXITCODE_REQIDLE] = vmexit_reqidle,
[VM_EXITCODE_MTRAP] = vmexit_mtrap,
[VM_EXITCODE_HLT] = vmexit_hlt,
[VM_EXITCODE_PAUSE] = vmexit_pause,
};
static void
handle_vmexit(struct vmctx *ctx, struct vhm_request *vhm_req, int vcpu)
{
int rc;
enum vm_exitcode exitcode;
exitcode = vhm_req->type;
if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
fprintf(stderr, "handle vmexit: unexpected exitcode 0x%x\n",
exitcode);
exit(1);
}
rc = (*handler[exitcode])(ctx, vhm_req, &vcpu);
switch (rc) {
case VMEXIT_CONTINUE:
vhm_req->processed = REQ_STATE_SUCCESS;
break;
case VMEXIT_ABORT:
vhm_req->processed = REQ_STATE_FAILED;
abort();
default:
exit(1);
}
vm_notify_request_done(ctx, vcpu);
}
static int
vm_init_vdevs(struct vmctx *ctx)
{
int ret;
init_mem();
init_inout();
pci_irq_init(ctx);
atkbdc_init(ctx);
ioapic_init(ctx);
/*
* We don't care ioc_init return value so far.
* Will add return value check once ioc is full function.
*/
ret = ioc_init(ctx);
ret = vrtc_init(ctx);
if (ret < 0)
goto vrtc_fail;
sci_init(ctx);
init_bvmcons();
ret = monitor_init(ctx);
if (ret < 0)
goto monitor_fail;
ret = init_pci(ctx);
if (ret < 0)
goto pci_fail;
return 0;
pci_fail:
monitor_close();
monitor_fail:
deinit_bvmcons();
vrtc_deinit(ctx);
vrtc_fail:
ioc_deinit(ctx);
atkbdc_deinit(ctx);
pci_irq_deinit(ctx);
return -1;
}
static void
vm_deinit_vdevs(struct vmctx *ctx)
{
deinit_pci(ctx);
monitor_close();
deinit_bvmcons();
vrtc_deinit(ctx);
ioc_deinit(ctx);
atkbdc_deinit(ctx);
pci_irq_deinit(ctx);
}
static void
vm_loop(struct vmctx *ctx)
{
int error;
ctx->ioreq_client = vm_create_ioreq_client(ctx);
assert(ctx->ioreq_client > 0);
error = vm_run(ctx);
assert(error == 0);
while (1) {
int vcpu;
struct vhm_request *vhm_req;
error = vm_attach_ioreq_client(ctx);
if (error)
break;
for (vcpu = 0; vcpu < 4; vcpu++) {
vhm_req = &vhm_req_buf[vcpu];
if (vhm_req->valid
&& (vhm_req->processed == REQ_STATE_PROCESSING)
&& (vhm_req->client == ctx->ioreq_client))
handle_vmexit(ctx, vhm_req, vcpu);
}
}
quit_vm_loop = 0;
printf("VM loop exit\n");
}
static int
num_vcpus_allowed(struct vmctx *ctx)
{
/* TODO: add ioctl to get gerneric information including
* virtual cpus, now hardcode
*/
return VM_MAXCPU;
}
static struct vmctx *
do_open(const char *vmname)
{
struct vmctx *ctx;
int error;
error = vm_create(vmname);
if (error) {
perror("vm_create");
exit(1);
}
ctx = vm_open(vmname);
if (ctx == NULL) {
perror("vm_open");
exit(1);
}
return ctx;
}
static void
sig_handler_term(int signo)
{
printf("Receive SIGINT to terminate application...\n");
vm_set_suspend_mode(VM_SUSPEND_POWEROFF);
mevent_notify();
}
enum {
CMD_OPT_VSBL = 1000,
CMD_OPT_PART_INFO,
CMD_OPT_TRUSTY_ENABLE,
};
static struct option long_options[] = {
{"no_x2apic_mode", no_argument, 0, 'a' },
{"acpi", no_argument, 0, 'A' },
{"bvmcons", no_argument, 0, 'b' },
{"pincpu", required_argument, 0, 'p' },
{"ncpus", required_argument, 0, 'c' },
{"memflags_incore", no_argument, 0, 'C' },
{"gdb_port", required_argument, 0, 'g' },
{"lpc", required_argument, 0, 'l' },
{"pci_slot", required_argument, 0, 's' },
{"memflags_wired", no_argument, 0, 'S' },
{"memsize", required_argument, 0, 'm' },
{"ioapic", no_argument, 0, 'I' },
{"vmexit_pause", no_argument, 0, 'p' },
{"strictio", no_argument, 0, 'e' },
{"rtc_localtime", no_argument, 0, 'u' },
{"uuid", required_argument, 0, 'U' },
{"strictmsr", no_argument, 0, 'w' },
{"virtio_msix", no_argument, 0, 'W' },
{"x2apic_mode", no_argument, 0, 'x' },
{"mptgen", no_argument, 0, 'Y' },
{"kernel", required_argument, 0, 'k' },
{"ramdisk", required_argument, 0, 'r' },
{"bootargs", required_argument, 0, 'B' },
{"ptdev_msi", no_argument, 0, 'M' },
{"version", no_argument, 0, 'v' },
{"gvtargs", required_argument, 0, 'G' },
{"help", no_argument, 0, 'h' },
/* Following cmd option only has long option */
{"vsbl", required_argument, 0, CMD_OPT_VSBL},
{"part_info", required_argument, 0, CMD_OPT_PART_INFO},
{"enable_trusty", no_argument, 0,
CMD_OPT_TRUSTY_ENABLE},
{0, 0, 0, 0 },
};
int
main(int argc, char *argv[])
{
int c, error, gdb_port, err;
int max_vcpus, mptgen, memflags;
struct vmctx *ctx;
size_t memsize;
char *optstr;
int option_idx = 0;
progname = basename(argv[0]);
gdb_port = 0;
guest_ncpus = 1;
memsize = 256 * MB;
mptgen = 1;
memflags = 0;
quit_vm_loop = 0;
hugetlb = 0;
if (signal(SIGHUP, sig_handler_term) == SIG_ERR)
fprintf(stderr, "cannot register handler for SIGHUP\n");
if (signal(SIGINT, sig_handler_term) == SIG_ERR)
fprintf(stderr, "cannot register handler for SIGINT\n");
optstr = "abehuwxACHIMPSTWYvk:r:B:p:g:c:s:m:l:U:G:i:";
while ((c = getopt_long(argc, argv, optstr, long_options,
&option_idx)) != -1) {
switch (c) {
case 'a':
x2apic_mode = 0;
break;
case 'A':
acpi = 1;
break;
case 'b':
enable_bvmcons();
break;
case 'p':
if (pincpu_parse(optarg) != 0) {
errx(EX_USAGE,
"invalid vcpu pinning configuration '%s'",
optarg);
}
break;
case 'c':
guest_ncpus = atoi(optarg);
break;
case 'C':
memflags |= VM_MEM_F_INCORE;
break;
case 'g':
gdb_port = atoi(optarg);
break;
case 'i':
ioc_parse(optarg);
break;
case 'l':
if (lpc_device_parse(optarg) != 0) {
errx(EX_USAGE,
"invalid lpc device configuration '%s'",
optarg);
}
break;
case 's':
if (pci_parse_slot(optarg) != 0)
exit(1);
else
break;
case 'S':
memflags |= VM_MEM_F_WIRED;
break;
case 'm':
error = vm_parse_memsize(optarg, &memsize);
if (error)
errx(EX_USAGE, "invalid memsize '%s'", optarg);
break;
case 'H':
guest_vmexit_on_hlt = 1;
break;
case 'I':
/*
* The "-I" option was used to add an ioapic to the
* virtual machine.
*
* An ioapic is now provided unconditionally for each
* virtual machine and this option is now deprecated.
*/
break;
case 'P':
guest_vmexit_on_pause = 1;
break;
case 'e':
strictio = 1;
break;
case 'u':
vrtc_enable_localtime(0);
break;
case 'U':
guest_uuid_str = optarg;
break;
case 'w':
strictmsr = 0;
break;
case 'W':
virtio_msix = 0;
break;
case 'T':
if (check_hugetlb_support())
hugetlb = 1;
break;
case 'x':
x2apic_mode = 1;
break;
case 'Y':
mptgen = 0;
break;
case 'k':
if (acrn_parse_kernel(optarg) != 0)
exit(1);
else
break;
case 'r':
if (acrn_parse_ramdisk(optarg) != 0)
exit(1);
else
break;
case 'B':
if (acrn_parse_bootargs(optarg) != 0)
exit(1);
else
break;
break;
case 'M':
ptdev_prefer_msi(false);
break;
case 'v':
print_version();
break;
case CMD_OPT_VSBL:
if (acrn_parse_vsbl(optarg) != 0) {
errx(EX_USAGE, "invalid vsbl param %s", optarg);
exit(1);
}
break;
case CMD_OPT_PART_INFO:
if (acrn_parse_guest_part_info(optarg) != 0) {
errx(EX_USAGE,
"invalid guest partition info param %s",
optarg);
exit(1);
}
break;
case CMD_OPT_TRUSTY_ENABLE:
trusty_enabled = 1;
break;
case 'h':
usage(0);
default:
usage(1);
}
}
argc -= optind;
argv += optind;
if (argc != 1)
usage(1);
vmname = argv[0];
for (;;) {
ctx = do_open(vmname);
/* set IOReq buffer page */
error = vm_set_shared_io_page(ctx, (unsigned long)vhm_req_buf);
if (error)
goto fail;
if (guest_ncpus < 1) {
fprintf(stderr, "Invalid guest vCPUs (%d)\n",
guest_ncpus);
goto fail;
}
max_vcpus = num_vcpus_allowed(ctx);
if (guest_ncpus > max_vcpus) {
fprintf(stderr, "%d vCPUs requested but %d available\n",
guest_ncpus, max_vcpus);
goto fail;
}
vm_set_memflags(ctx, memflags);
err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL);
if (err) {
fprintf(stderr, "Unable to setup memory (%d)\n", errno);
goto fail;
}
err = mevent_init();
if (err) {
fprintf(stderr, "Unable to initialize mevent (%d)\n",
errno);
goto mevent_fail;
}
if (gdb_port != 0)
fprintf(stderr, "dbgport not supported\n");
if (vm_init_vdevs(ctx) < 0) {
fprintf(stderr, "Unable to init vdev (%d)\n", errno);
goto dev_fail;
}
/*
* build the guest tables, MP etc.
*/
if (mptgen) {
error = mptable_build(ctx, guest_ncpus);
if (error) {
goto vm_fail;
}
}
error = smbios_build(ctx);
if (error)
goto vm_fail;
if (acpi) {
error = acpi_build(ctx, guest_ncpus);
if (error)
goto vm_fail;
}
error = acrn_sw_load(ctx);
if (error)
goto vm_fail;
/*
* Change the proc title to include the VM name.
*/
/*setproctitle("%s", vmname);*/
/*
* Add CPU 0
*/
add_cpu(ctx, guest_ncpus);
/* Make a copy for ctx */
_ctx = ctx;
/*
* Head off to the main event dispatch loop
*/
mevent_dispatch();
vm_pause(ctx);
delete_cpu(ctx, BSP);
if (vm_get_suspend_mode() != VM_SUSPEND_RESET)
break;
vm_deinit_vdevs(ctx);
mevent_deinit();
vm_unsetup_memory(ctx);
vm_destroy(ctx);
vm_close(ctx);
_ctx = 0;
vm_set_suspend_mode(VM_SUSPEND_NONE);
}
vm_fail:
vm_deinit_vdevs(ctx);
dev_fail:
mevent_deinit();
mevent_fail:
vm_unsetup_memory(ctx);
fail:
vm_destroy(ctx);
vm_close(ctx);
exit(0);
}

299
devicemodel/core/mem.c Normal file
View File

@@ -0,0 +1,299 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Memory ranges are represented with an RB tree. On insertion, the range
* is checked for overlaps. On lookup, the key has the same base and limit
* so it can be searched within the range.
*/
#include <sys/cdefs.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <assert.h>
#include <pthread.h>
#include "vmm.h"
#include "types.h"
#include "mem.h"
#include "tree.h"
struct mmio_rb_range {
RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */
struct mem_range mr_param;
uint64_t mr_base;
uint64_t mr_end;
};
struct mmio_rb_tree;
RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
/*
* Per-VM cache. Since most accesses from a vCPU will be to
* consecutive addresses in a range, it makes sense to cache the
* result of a lookup.
*/
static struct mmio_rb_range *mmio_hint;
static pthread_rwlock_t mmio_rwlock;
static int
mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b)
{
if (a->mr_end < b->mr_base)
return -1;
else if (a->mr_base > b->mr_end)
return 1;
return 0;
}
static int
mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr,
struct mmio_rb_range **entry)
{
struct mmio_rb_range find, *res;
find.mr_base = find.mr_end = addr;
res = RB_FIND(mmio_rb_tree, rbt, &find);
if (res != NULL) {
*entry = res;
return 0;
}
return -1;
}
__attribute__((unused))
static int
mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new)
{
struct mmio_rb_range *overlap;
overlap = RB_INSERT(mmio_rb_tree, rbt, new);
if (overlap != NULL) {
#ifdef RB_DEBUG
printf("overlap detected: new %lx:%lx, tree %lx:%lx\n",
new->mr_base, new->mr_end,
overlap->mr_base, overlap->mr_end);
#endif
return -1;
}
return 0;
}
#if RB_DEBUG
static void
mmio_rb_dump(struct mmio_rb_tree *rbt)
{
struct mmio_rb_range *np;
pthread_rwlock_rdlock(&mmio_rwlock);
RB_FOREACH(np, mmio_rb_tree, rbt) {
printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end,
np->mr_param.name);
}
pthread_rwlock_unlock(&mmio_rwlock);
}
#endif
RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
__attribute__((unused))
static int
mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
{
int error;
struct mem_range *mr = arg;
error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size,
rval, mr->arg1, mr->arg2);
return error;
}
__attribute__((unused))
static int
mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
{
int error;
struct mem_range *mr = arg;
error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size,
&wval, mr->arg1, mr->arg2);
return error;
}
int
emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req)
{
uint64_t paddr = mmio_req->address;
int size = mmio_req->size;
struct mmio_rb_range *entry = NULL;
int err;
pthread_rwlock_rdlock(&mmio_rwlock);
/*
* First check the per-VM cache
*/
if (mmio_hint && paddr >= mmio_hint->mr_base &&
paddr <= mmio_hint->mr_end)
entry = mmio_hint;
if (entry == NULL) {
if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0)
/* Update the per-VMU cache */
mmio_hint = entry;
else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
pthread_rwlock_unlock(&mmio_rwlock);
return -ESRCH;
}
}
assert(entry != NULL);
if (mmio_req->direction == REQUEST_READ)
err = mem_read(ctx, 0, paddr, (uint64_t *)&mmio_req->value,
size, &entry->mr_param);
else
err = mem_write(ctx, 0, paddr, mmio_req->value,
size, &entry->mr_param);
pthread_rwlock_unlock(&mmio_rwlock);
return err;
}
static int
register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
{
struct mmio_rb_range *entry, *mrp;
int err;
err = 0;
mrp = malloc(sizeof(struct mmio_rb_range));
if (mrp != NULL) {
mrp->mr_param = *memp;
mrp->mr_base = memp->base;
mrp->mr_end = memp->base + memp->size - 1;
pthread_rwlock_wrlock(&mmio_rwlock);
if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
err = mmio_rb_add(rbt, mrp);
pthread_rwlock_unlock(&mmio_rwlock);
if (err)
free(mrp);
} else
err = -1;
return err;
}
int
register_mem(struct mem_range *memp)
{
return register_mem_int(&mmio_rb_root, memp);
}
int
register_mem_fallback(struct mem_range *memp)
{
return register_mem_int(&mmio_rb_fallback, memp);
}
int
unregister_mem_fallback(struct mem_range *memp)
{
struct mem_range *mr;
struct mmio_rb_range *entry = NULL;
int err;
pthread_rwlock_wrlock(&mmio_rwlock);
err = mmio_rb_lookup(&mmio_rb_fallback, memp->base, &entry);
if (err == 0) {
mr = &entry->mr_param;
assert(mr->name == memp->name);
assert(mr->base == memp->base && mr->size == memp->size);
assert((mr->flags & MEM_F_IMMUTABLE) == 0);
RB_REMOVE(mmio_rb_tree, &mmio_rb_fallback, entry);
/* flush Per-VM cache */
if (mmio_hint == entry)
mmio_hint = NULL;
}
pthread_rwlock_unlock(&mmio_rwlock);
if (entry)
free(entry);
return err;
}
int
unregister_mem(struct mem_range *memp)
{
struct mem_range *mr;
struct mmio_rb_range *entry = NULL;
int err;
pthread_rwlock_wrlock(&mmio_rwlock);
err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry);
if (err == 0) {
mr = &entry->mr_param;
assert(mr->name == memp->name);
assert(mr->base == memp->base && mr->size == memp->size);
assert((mr->flags & MEM_F_IMMUTABLE) == 0);
RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry);
/* flush Per-VM cache */
if (mmio_hint == entry)
mmio_hint = NULL;
}
pthread_rwlock_unlock(&mmio_rwlock);
if (entry)
free(entry);
return err;
}
void
init_mem(void)
{
RB_INIT(&mmio_rb_root);
RB_INIT(&mmio_rb_fallback);
pthread_rwlock_init(&mmio_rwlock, NULL);
}

341
devicemodel/core/mevent.c Normal file
View File

@@ -0,0 +1,341 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Micro event library for FreeBSD, designed for a single i/o thread
* using EPOLL, and having events be persistent by default.
*/
#include <sys/cdefs.h>
#include <assert.h>
#include <err.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <sysexits.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/epoll.h>
#include <sys/queue.h>
#include <pthread.h>
#include "mevent.h"
#include "vmm.h"
#include "vmmapi.h"
#define MEVENT_MAX 64
#define MEV_ADD 1
#define MEV_ENABLE 2
#define MEV_DISABLE 3
#define MEV_DEL_PENDING 4
static int epoll_fd;
static pthread_t mevent_tid;
static int mevent_pipefd[2];
static pthread_mutex_t mevent_lmutex = PTHREAD_MUTEX_INITIALIZER;
struct mevent {
void (*me_func)(int, enum ev_type, void *);
int me_fd;
enum ev_type me_type;
void *me_param;
int me_cq;
int me_state;
int me_closefd;
LIST_ENTRY(mevent) me_list;
};
static LIST_HEAD(listhead, mevent) global_head;
static void
mevent_qlock(void)
{
pthread_mutex_lock(&mevent_lmutex);
}
static void
mevent_qunlock(void)
{
pthread_mutex_unlock(&mevent_lmutex);
}
static void
mevent_pipe_read(int fd, enum ev_type type, void *param)
{
char buf[MEVENT_MAX];
int status;
/*
* Drain the pipe read side. The fd is non-blocking so this is
* safe to do.
*/
do {
status = read(fd, buf, sizeof(buf));
} while (status == MEVENT_MAX);
}
/*On error, -1 is returned, else return zero*/
int
mevent_notify(void)
{
char c;
/*
* If calling from outside the i/o thread, write a byte on the
* pipe to force the i/o thread to exit the blocking epoll call.
*/
if (mevent_pipefd[1] != 0 && pthread_self() != mevent_tid)
if (write(mevent_pipefd[1], &c, 1) <= 0)
return -1;
return 0;
}
static int
mevent_kq_filter(struct mevent *mevp)
{
int retval;
retval = 0;
if (mevp->me_type == EVF_READ)
retval = EPOLLIN;
if (mevp->me_type == EVF_WRITE)
retval = EPOLLOUT;
return retval;
}
static void
mevent_destroy()
{
struct mevent *mevp, *tmpp;
struct epoll_event ee;
mevent_qlock();
list_foreach_safe(mevp, &global_head, me_list, tmpp) {
LIST_REMOVE(mevp, me_list);
ee.events = mevent_kq_filter(mevp);
ee.data.ptr = mevp;
epoll_ctl(epoll_fd, EPOLL_CTL_DEL, mevp->me_fd, &ee);
if ((mevp->me_type == EVF_READ ||
mevp->me_type == EVF_WRITE)
&& mevp->me_fd != STDIN_FILENO)
close(mevp->me_fd);
free(mevp);
}
mevent_qunlock();
}
static void
mevent_handle(struct epoll_event *kev, int numev)
{
int i;
struct mevent *mevp;
for (i = 0; i < numev; i++) {
mevp = kev[i].data.ptr;
/* XXX check for EV_ERROR ? */
(*mevp->me_func)(mevp->me_fd, mevp->me_type, mevp->me_param);
}
}
struct mevent *
mevent_add(int tfd, enum ev_type type,
void (*func)(int, enum ev_type, void *), void *param)
{
int ret;
struct epoll_event ee;
struct mevent *lp, *mevp;
if (tfd < 0 || func == NULL)
return NULL;
if (type == EVF_TIMER)
return NULL;
mevent_qlock();
/* Verify that the fd/type tuple is not present in the list */
LIST_FOREACH(lp, &global_head, me_list) {
if (lp->me_fd == tfd && lp->me_type == type) {
mevent_qunlock();
return lp;
}
}
mevent_qunlock();
/*
* Allocate an entry, populate it, and add it to the list.
*/
mevp = calloc(1, sizeof(struct mevent));
if (mevp == NULL)
return NULL;
mevp->me_fd = tfd;
mevp->me_type = type;
mevp->me_func = func;
mevp->me_param = param;
ee.events = mevent_kq_filter(mevp);
ee.data.ptr = mevp;
ret = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, mevp->me_fd, &ee);
if (ret == 0) {
mevent_qlock();
LIST_INSERT_HEAD(&global_head, mevp, me_list);
mevent_qunlock();
return mevp;
} else {
free(mevp);
return NULL;
}
}
int
mevent_enable(struct mevent *evp)
{
return 0;
}
int
mevent_disable(struct mevent *evp)
{
return 0;
}
static int
mevent_delete_event(struct mevent *evp, int closefd)
{
struct epoll_event ee;
mevent_qlock();
LIST_REMOVE(evp, me_list);
mevent_qunlock();
ee.events = mevent_kq_filter(evp);
ee.data.ptr = evp;
epoll_ctl(epoll_fd, EPOLL_CTL_DEL, evp->me_fd, &ee);
if (closefd)
close(evp->me_fd);
free(evp);
return 0;
}
int
mevent_delete(struct mevent *evp)
{
return mevent_delete_event(evp, 0);
}
int
mevent_delete_close(struct mevent *evp)
{
return mevent_delete_event(evp, 1);
}
static void
mevent_set_name(void)
{
pthread_setname_np(mevent_tid, "mevent");
}
int
mevent_init(void)
{
epoll_fd = epoll_create1(0);
assert(epoll_fd >= 0);
if (epoll_fd >= 0)
return 0;
else
return -1;
}
void
mevent_deinit(void)
{
mevent_destroy();
close(epoll_fd);
}
void
mevent_dispatch(void)
{
struct epoll_event eventlist[MEVENT_MAX];
struct mevent *pipev;
int ret;
mevent_tid = pthread_self();
mevent_set_name();
/*
* Open the pipe that will be used for other threads to force
* the blocking kqueue call to exit by writing to it. Set the
* descriptor to non-blocking.
*/
ret = pipe(mevent_pipefd);
if (ret < 0) {
perror("pipe");
exit(0);
}
/*
* Add internal event handler for the pipe write fd
*/
pipev = mevent_add(mevent_pipefd[0], EVF_READ, mevent_pipe_read, NULL);
assert(pipev != NULL);
for (;;) {
/*
* Block awaiting events
*/
ret = epoll_wait(epoll_fd, eventlist, MEVENT_MAX, -1);
if (ret == -1 && errno != EINTR)
perror("Error return from epoll_wait");
/*
* Handle reported events
*/
mevent_handle(eventlist, ret);
if (vm_get_suspend_mode() != VM_SUSPEND_NONE)
break;
}
}

472
devicemodel/core/monitor.c Normal file
View File

@@ -0,0 +1,472 @@
/*
* Project Acrn
* Acrn-dm-monitor
*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Author: TaoYuhong <yuhong.tao@intel.com>
*/
#include <stdio.h>
#include <unistd.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <pthread.h>
#include <stdlib.h>
#include <sys/queue.h>
#include <errno.h>
#include <time.h>
#include "dm.h"
#include "vmmapi.h"
#include "mevent.h"
#include "monitor.h"
/* Data structure and functions for processing received messages */
struct monitor_msg_handle {
struct vmm_msg msg;
void (*callback) (struct vmm_msg * msg, struct msg_sender * sender,
void *priv);
void *priv;
LIST_ENTRY(monitor_msg_handle) list;
};
static LIST_HEAD(mmh_list_struct, monitor_msg_handle) mmh_head;
static pthread_mutex_t mmh_mutex = PTHREAD_MUTEX_INITIALIZER;
static int can_register_handler = 0; /* Do not allow anyone add his handler,
untill we have added some researved ones */
static int monitor_add_handler(struct monitor_msg_handle *handle)
{
struct monitor_msg_handle *hp;
pthread_mutex_lock(&mmh_mutex);
LIST_FOREACH(hp, &mmh_head, list)
if (hp->msg.msgid == handle->msg.msgid) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
pthread_mutex_unlock(&mmh_mutex);
return -1;
}
LIST_INSERT_HEAD(&mmh_head, handle, list);
pthread_mutex_unlock(&mmh_mutex);
return 0;
}
int monitor_register_handler(struct vmm_msg *msg,
void (*callback) (struct vmm_msg * msg,
struct msg_sender * client,
void *priv), void *priv)
{
struct monitor_msg_handle *handle;
int ret;
if (!can_register_handler)
return -1;
handle = calloc(1, sizeof(struct monitor_msg_handle));
if (!handle) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
return -1;
}
handle->msg.msgid = msg->msgid;
handle->callback = callback;
handle->priv = priv;
ret = monitor_add_handler(handle);
if (ret)
free(handle);
return ret;
}
/* messages handled by monitor */
static int write_msg_to(int fd, void *data, unsigned long timeout_usec)
{
struct vmm_msg *msg = data;
fd_set wfd;
struct timeval timeout;
int ret = 0;
if (msg->len < sizeof(struct vmm_msg)) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
return -1;
}
if (msg->msgid > MSGID_MAX) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
return -1;
}
if (msg->magic != VMM_MSG_MAGIC) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
msg->magic = VMM_MSG_MAGIC;
}
msg->timestamp = time(NULL);
FD_ZERO(&wfd);
FD_SET(fd, &wfd);
timeout.tv_sec = 0;
timeout.tv_usec = timeout_usec;
select(fd + 1, NULL, &wfd, NULL, &timeout);
if (FD_ISSET(fd, &wfd))
ret = write(fd, msg, msg->len);
return ret;
}
/* MSG_HANDSHAKE, handshake message handler*/
#define TIMEOUT_USEC 100000
static VMM_MSG_STR(handshake_badname, "Error: bad name!");
static VMM_MSG_STR(handshake_ok, "acrn-dm read you request");
static void handshake_acrn_dm(struct vmm_msg *msg, struct msg_sender *sender,
void *priv)
{
struct vmm_msg_handshake *hsk = (void *)msg;
int ret;
ret = strnlen(hsk->name, CLIENT_NAME_LEN);
if (ret >= CLIENT_NAME_LEN) {
write_msg_to(sender->fd, &handshake_badname, TIMEOUT_USEC);
return;
}
strncpy(sender->name, hsk->name, CLIENT_NAME_LEN);
sender->broadcast = hsk->broadcast;
write_msg_to(sender->fd, &handshake_ok, TIMEOUT_USEC);
}
static struct monitor_msg_handle handle_handshake = {
.msg = {.msgid = MSG_HANDSHAKE},
.callback = handshake_acrn_dm,
};
/* vm manager can comunicate with dm-monitor, use unix socket,
* the monitor is the server, and there may have many clients,
* a client send a message, trigger right msg handler. And msg handler
* should only reply to message sender.
*/
static struct sockaddr_un monitor_addr; /* one monitor */
static int monitor_fd;
struct vmm_client {
/* msg_sender will be seen/modify by msg handler */
struct msg_sender sender;
/* the rest should be invisible for msg_handler */
struct sockaddr_un addr;
int fd;
socklen_t addr_len;
void *buf;
int len; /* buf len */
struct mevent *mev;
LIST_ENTRY(vmm_client) list;
};
static LIST_HEAD(client_list_struct, vmm_client) client_head;
static int num_client = 0;
static pthread_mutex_t client_mutex = PTHREAD_MUTEX_INITIALIZER;
static void vmm_client_free_res(struct vmm_client *client)
{
mevent_delete(client->mev);
close(client->fd);
client->fd = -1;
free(client->buf);
client->buf = NULL;
free(client);
}
static void vmm_client_free(struct vmm_client *client)
{
pthread_mutex_lock(&client_mutex);
LIST_REMOVE(client, list);
num_client--;
pthread_mutex_unlock(&client_mutex);
vmm_client_free_res(client);
}
static VMM_MSG_STR(unsupported_msgid, "Error: unsupported msgid!");
static int monitor_parse_buf(struct vmm_client *client)
{
struct vmm_msg *msg;
struct monitor_msg_handle *handle;
size_t p = 0;
int handled = 0;
if (client->len < sizeof(struct vmm_msg))
return -1;
do {
msg = client->buf + p;
/* do we out-of-bounary? */
if (p + msg->len > client->len) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
break;
}
LIST_FOREACH(handle, &mmh_head, list) {
if (msg->magic != VMM_MSG_MAGIC)
return -1;
if (handle->msg.msgid != msg->msgid)
continue;
client->sender.fd = client->fd;
handle->callback(msg, &client->sender, handle->priv);
handled = 1;
break;
}
p += msg->len;
} while (p < client->len);
if (!handled)
write_msg_to(client->fd, &unsupported_msgid, TIMEOUT_USEC);
return 0;
}
static void mevent_read_func(int fd, enum ev_type type, void *param)
{
struct vmm_client *client = param;
client->len = read(fd, client->buf, VMM_MSG_MAX_LEN);
if (client->len <= 0) {
fprintf(stderr, "Disconnect(%d)!\r\n", client->fd);
vmm_client_free(client);
return;
}
if (client->len == VMM_MSG_MAX_LEN) {
fprintf(stderr, "TODO: buf overflow!\r\n");
return;
}
monitor_parse_buf(client);
}
static struct vmm_client *vmm_client_new(void)
{
struct vmm_client *client;
client = calloc(1, sizeof(struct vmm_client));
if (!client) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
goto alloc_client;
}
memset(client, 0, sizeof(struct vmm_client));
client->buf = calloc(1, VMM_MSG_MAX_LEN);
if (!client->buf) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
goto alloc_buf;
}
client->addr_len = sizeof(client->addr);
client->fd =
accept(monitor_fd, (struct sockaddr *)&client->addr, &client->addr_len);
if (client->fd < 0) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
goto accept_con;
}
client->mev =
mevent_add(client->fd, EVF_READ, mevent_read_func, client);
if (!client->mev) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
goto add_mev;
}
pthread_mutex_lock(&client_mutex);
LIST_INSERT_HEAD(&client_head, client, list);
num_client++;
pthread_mutex_unlock(&client_mutex);
return client;
add_mev:
close(client->fd);
client->fd = -1;
accept_con:
free(client->buf);
client->buf = NULL;
alloc_buf:
free(client);
alloc_client:
return NULL;
}
int monitor_broadcast(struct vmm_msg *msg)
{
struct vmm_client *client;
fd_set wfd;
int max_fd = 0;
struct timeval timeout;
int ret = 0;
if (msg->len < sizeof(struct vmm_msg)) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
return -1;
}
if (msg->msgid > MSGID_MAX) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
return -1;
}
if (msg->magic != VMM_MSG_MAGIC) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
msg->magic = VMM_MSG_MAGIC;
}
msg->timestamp = time(NULL);
pthread_mutex_lock(&client_mutex);
FD_ZERO(&wfd);
LIST_FOREACH(client, &client_head, list) {
if (!client->sender.broadcast)
continue;
FD_SET(client->fd, &wfd);
if (client->fd > max_fd)
max_fd = client->fd;
}
timeout.tv_sec = 0;
timeout.tv_usec = 10000;
select(max_fd + 1, NULL, &wfd, NULL, &timeout);
LIST_FOREACH(client, &client_head, list) {
if (!client->sender.broadcast)
continue;
if (FD_ISSET(client->fd, &wfd)) {
ret = write(client->fd, msg->payload,
msg->len - sizeof(struct vmm_msg));
if (ret < 0)
continue;
}
}
pthread_mutex_unlock(&client_mutex);
return 0;
}
/* monitor thread */
static int monitor_running = 1;
static pthread_t monitor_thread;
static void *monitor_server_func(void *arg)
{
struct vmm_client *client;
while (monitor_running) {
client = vmm_client_new();
if (!client) {
usleep(10000);
continue;
}
fprintf(stderr, "Connected:%d\r\n", client->fd);
}
fprintf(stderr, "%s quit!\r\n", __FUNCTION__);
return NULL;
}
int monitor_init(struct vmctx *ctx)
{
int ret;
char path[128] = { };
ret = system("mkdir -p /run/acrn/");
if (ret) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
goto socket_err;
}
memset(&monitor_addr, 0, sizeof(monitor_addr));
snprintf(path, sizeof(path), "/run/acrn/%s-monitor.socket", vmname);
unlink(path);
monitor_fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (monitor_fd < 0) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
goto socket_err;
}
monitor_addr.sun_family = AF_UNIX;
strncpy(monitor_addr.sun_path, path, sizeof(monitor_addr.sun_path));
ret = bind(monitor_fd, (struct sockaddr *)&monitor_addr, sizeof(monitor_addr));
if (ret < 0) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
goto bind_err;
}
listen(monitor_fd, 1);
ret = pthread_create(&monitor_thread, NULL, monitor_server_func, NULL);
if (ret) {
fprintf(stderr, "%s %d\r\n", __FUNCTION__, __LINE__);
goto thread_err;
}
/* Messages handled by monitor */
monitor_add_handler(&handle_handshake);
__sync_fetch_and_add(&can_register_handler, 1);
return 0;
thread_err:
monitor_thread = 0;
unlink(path);
bind_err:
close(monitor_fd);
socket_err:
return -1;
}
void monitor_close(void)
{
struct vmm_client *client;
if (!monitor_thread)
return;
shutdown(monitor_fd, SHUT_RDWR);
close(monitor_fd);
monitor_running = 0;
pthread_join(monitor_thread, NULL);
unlink(monitor_addr.sun_path);
/* client buf-mem and fd may be still in use by msg-handler */
/* which is handled by mevent */
pthread_mutex_lock(&client_mutex);
LIST_FOREACH(client, &client_head, list) {
vmm_client_free_res(client);
}
pthread_mutex_unlock(&client_mutex);
}

367
devicemodel/core/mptbl.c Normal file
View File

@@ -0,0 +1,367 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <stdio.h>
#include <string.h>
#include "types.h"
#include "mptable.h"
#include "acpi.h"
#include "dm.h"
#include "mptbl.h"
#include "pci_core.h"
#define MPTABLE_BASE 0xF0000
/* floating pointer length + maximum length of configuration table */
#define MPTABLE_MAX_LENGTH (65536 + 16)
#define LAPIC_PADDR 0xFEE00000
#define LAPIC_VERSION 16
#define IOAPIC_PADDR 0xFEC00000
#define IOAPIC_VERSION 0x11
#define MP_SPECREV 4
#define MPFP_SIG "_MP_"
/* Configuration header defines */
#define MPCH_SIG "PCMP"
#define MPCH_OEMID "BHyVe "
#define MPCH_OEMID_LEN 8
#define MPCH_PRODID "Hypervisor "
#define MPCH_PRODID_LEN 12
/* Processor entry defines */
#define MPEP_SIG_FAMILY 6 /* XXX acrn-dm should supply this */
#define MPEP_SIG_MODEL 26
#define MPEP_SIG_STEPPING 5
#define MPEP_SIG \
((MPEP_SIG_FAMILY << 8) | \
(MPEP_SIG_MODEL << 4) | \
(MPEP_SIG_STEPPING))
#define MPEP_FEATURES (0xBFEBFBFF) /* XXX Intel i7 */
/* Number of local intr entries */
#define MPEII_NUM_LOCAL_IRQ 2
/* Bus entry defines */
#define MPE_NUM_BUSES 2
#define MPE_BUSNAME_LEN 6
#define MPE_BUSNAME_ISA "ISA "
#define MPE_BUSNAME_PCI "PCI "
static void *oem_tbl_start;
static int oem_tbl_size;
static uint8_t
mpt_compute_checksum(void *base, size_t len)
{
uint8_t *bytes;
uint8_t sum;
for (bytes = base, sum = 0; len > 0; len--)
sum += *bytes++;
return (256 - sum);
}
static void
mpt_build_mpfp(mpfps_t mpfp, vm_paddr_t gpa)
{
memset(mpfp, 0, sizeof(*mpfp));
memcpy(mpfp->signature, MPFP_SIG, 4);
mpfp->pap = gpa + sizeof(*mpfp);
mpfp->length = 1;
mpfp->spec_rev = MP_SPECREV;
mpfp->checksum = mpt_compute_checksum(mpfp, sizeof(*mpfp));
}
static void
mpt_build_mpch(mpcth_t mpch)
{
memset(mpch, 0, sizeof(*mpch));
memcpy(mpch->signature, MPCH_SIG, 4);
mpch->spec_rev = MP_SPECREV;
memcpy(mpch->oem_id, MPCH_OEMID, MPCH_OEMID_LEN);
memcpy(mpch->product_id, MPCH_PRODID, MPCH_PRODID_LEN);
mpch->apic_address = LAPIC_PADDR;
}
static void
mpt_build_proc_entries(proc_entry_ptr mpep, int ncpu)
{
int i;
for (i = 0; i < ncpu; i++) {
memset(mpep, 0, sizeof(*mpep));
mpep->type = MPCT_ENTRY_PROCESSOR;
mpep->apic_id = i; /* XXX */
mpep->apic_version = LAPIC_VERSION;
mpep->cpu_flags = PROCENTRY_FLAG_EN;
if (i == 0)
mpep->cpu_flags |= PROCENTRY_FLAG_BP;
mpep->cpu_signature = MPEP_SIG;
mpep->feature_flags = MPEP_FEATURES;
mpep++;
}
}
static void
mpt_build_localint_entries(int_entry_ptr mpie)
{
/* Hardcode LINT0 as ExtINT on all CPUs. */
memset(mpie, 0, sizeof(*mpie));
mpie->type = MPCT_ENTRY_LOCAL_INT;
mpie->int_type = INTENTRY_TYPE_EXTINT;
mpie->int_flags = INTENTRY_FLAGS_POLARITY_CONFORM |
INTENTRY_FLAGS_TRIGGER_CONFORM;
mpie->dst_apic_id = 0xff;
mpie->dst_apic_int = 0;
mpie++;
/* Hardcode LINT1 as NMI on all CPUs. */
memset(mpie, 0, sizeof(*mpie));
mpie->type = MPCT_ENTRY_LOCAL_INT;
mpie->int_type = INTENTRY_TYPE_NMI;
mpie->int_flags = INTENTRY_FLAGS_POLARITY_CONFORM |
INTENTRY_FLAGS_TRIGGER_CONFORM;
mpie->dst_apic_id = 0xff;
mpie->dst_apic_int = 1;
}
static void
mpt_build_bus_entries(bus_entry_ptr mpeb)
{
memset(mpeb, 0, sizeof(*mpeb));
mpeb->type = MPCT_ENTRY_BUS;
mpeb->bus_id = 0;
memcpy(mpeb->bus_type, MPE_BUSNAME_PCI, MPE_BUSNAME_LEN);
mpeb++;
memset(mpeb, 0, sizeof(*mpeb));
mpeb->type = MPCT_ENTRY_BUS;
mpeb->bus_id = 1;
memcpy(mpeb->bus_type, MPE_BUSNAME_ISA, MPE_BUSNAME_LEN);
}
static void
mpt_build_ioapic_entries(io_apic_entry_ptr mpei, int id)
{
memset(mpei, 0, sizeof(*mpei));
mpei->type = MPCT_ENTRY_IOAPIC;
mpei->apic_id = id;
mpei->apic_version = IOAPIC_VERSION;
mpei->apic_flags = IOAPICENTRY_FLAG_EN;
mpei->apic_address = IOAPIC_PADDR;
}
static int
mpt_count_ioint_entries(void)
{
int bus, count;
count = 0;
for (bus = 0; bus <= PCI_BUSMAX; bus++)
count += pci_count_lintr(bus);
/*
* Always include entries for the first 16 pins along with a entry
* for each active PCI INTx pin.
*/
return (16 + count);
}
static void
mpt_generate_pci_int(int bus, int slot, int pin, int pirq_pin, int ioapic_irq,
void *arg)
{
int_entry_ptr *mpiep, mpie;
mpiep = arg;
mpie = *mpiep;
memset(mpie, 0, sizeof(*mpie));
/*
* This is always after another I/O interrupt entry, so cheat
* and fetch the I/O APIC ID from the prior entry.
*/
mpie->type = MPCT_ENTRY_INT;
mpie->int_type = INTENTRY_TYPE_INT;
mpie->src_bus_id = bus;
mpie->src_bus_irq = slot << 2 | (pin - 1);
mpie->dst_apic_id = mpie[-1].dst_apic_id;
mpie->dst_apic_int = ioapic_irq;
*mpiep = mpie + 1;
}
static void
mpt_build_ioint_entries(int_entry_ptr mpie, int id)
{
int pin, bus;
/*
* The following config is taken from kernel mptable.c
* mptable_parse_default_config_ints(...), for now
* just use the default config, tweek later if needed.
*/
/* First, generate the first 16 pins. */
for (pin = 0; pin < 16; pin++) {
memset(mpie, 0, sizeof(*mpie));
mpie->type = MPCT_ENTRY_INT;
mpie->src_bus_id = 1;
mpie->dst_apic_id = id;
/*
* All default configs route IRQs from bus 0 to the first 16
* pins of the first I/O APIC with an APIC ID of 2.
*/
mpie->dst_apic_int = pin;
switch (pin) {
case 0:
/* Pin 0 is an ExtINT pin. */
mpie->int_type = INTENTRY_TYPE_EXTINT;
break;
case 2:
/* IRQ 0 is routed to pin 2. */
mpie->int_type = INTENTRY_TYPE_INT;
mpie->src_bus_irq = 0;
break;
case SCI_INT:
/* ACPI SCI is level triggered and active-lo. */
mpie->int_flags = INTENTRY_FLAGS_POLARITY_ACTIVELO |
INTENTRY_FLAGS_TRIGGER_LEVEL;
mpie->int_type = INTENTRY_TYPE_INT;
mpie->src_bus_irq = SCI_INT;
break;
default:
/* All other pins are identity mapped. */
mpie->int_type = INTENTRY_TYPE_INT;
mpie->src_bus_irq = pin;
break;
}
mpie++;
}
/* Next, generate entries for any PCI INTx interrupts. */
for (bus = 0; bus <= PCI_BUSMAX; bus++)
pci_walk_lintr(bus, mpt_generate_pci_int, &mpie);
}
void
mptable_add_oemtbl(void *tbl, int tblsz)
{
oem_tbl_start = tbl;
oem_tbl_size = tblsz;
}
int
mptable_build(struct vmctx *ctx, int ncpu)
{
mpcth_t mpch;
bus_entry_ptr mpeb;
io_apic_entry_ptr mpei;
proc_entry_ptr mpep;
mpfps_t mpfp;
int_entry_ptr mpie;
int ioints, bus;
char *curraddr;
char *startaddr;
startaddr = paddr_guest2host(ctx, MPTABLE_BASE, MPTABLE_MAX_LENGTH);
if (startaddr == NULL) {
fprintf(stderr, "mptable requires mapped mem\n");
return -1;
}
/*
* There is no way to advertise multiple PCI hierarchies via MPtable
* so require that there is no PCI hierarchy with a non-zero bus
* number.
*/
for (bus = 1; bus <= PCI_BUSMAX; bus++) {
if (pci_bus_configured(bus)) {
fprintf(stderr, "MPtable is incompatible with "
"multiple PCI hierarchies.\r\n");
fprintf(stderr, "MPtable generation can be disabled "
"by passing the -Y option to acrn-dm.\r\n");
return -1;
}
}
curraddr = startaddr;
mpfp = (mpfps_t)curraddr;
mpt_build_mpfp(mpfp, MPTABLE_BASE);
curraddr += sizeof(*mpfp);
mpch = (mpcth_t)curraddr;
mpt_build_mpch(mpch);
curraddr += sizeof(*mpch);
mpep = (proc_entry_ptr)curraddr;
mpt_build_proc_entries(mpep, ncpu);
curraddr += sizeof(*mpep) * ncpu;
mpch->entry_count += ncpu;
mpeb = (bus_entry_ptr) curraddr;
mpt_build_bus_entries(mpeb);
curraddr += sizeof(*mpeb) * MPE_NUM_BUSES;
mpch->entry_count += MPE_NUM_BUSES;
mpei = (io_apic_entry_ptr)curraddr;
mpt_build_ioapic_entries(mpei, 0);
curraddr += sizeof(*mpei);
mpch->entry_count++;
mpie = (int_entry_ptr) curraddr;
ioints = mpt_count_ioint_entries();
mpt_build_ioint_entries(mpie, 0);
curraddr += sizeof(*mpie) * ioints;
mpch->entry_count += ioints;
mpie = (int_entry_ptr)curraddr;
mpt_build_localint_entries(mpie);
curraddr += sizeof(*mpie) * MPEII_NUM_LOCAL_IRQ;
mpch->entry_count += MPEII_NUM_LOCAL_IRQ;
if (oem_tbl_start) {
mpch->oem_table_pointer = curraddr - startaddr + MPTABLE_BASE;
mpch->oem_table_size = oem_tbl_size;
memcpy(curraddr, oem_tbl_start, oem_tbl_size);
}
mpch->base_table_length = curraddr - (char *)mpch;
mpch->checksum = mpt_compute_checksum(mpch, mpch->base_table_length);
return 0;
}

51
devicemodel/core/post.c Normal file
View File

@@ -0,0 +1,51 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <stdbool.h>
#include <assert.h>
#include "inout.h"
#include "lpc.h"
static int
post_data_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
assert(in == 1);
if (bytes != 1)
return -1;
*eax = 0xff; /* return some garbage */
return 0;
}
INOUT_PORT(post, 0x84, IOPORT_F_IN, post_data_handler);
SYSRES_IO(0x84, 1);

View File

@@ -0,0 +1,818 @@
/*-
* Copyright (c) 2014 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <assert.h>
#include <openssl/md5.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include <uuid/uuid.h>
#include "vmm.h"
#include "vmmapi.h"
#include "dm.h"
#include "smbiostbl.h"
#define SMBIOS_BASE 0xF1000
/* ACRN-DM_ACPI_BASE - SMBIOS_BASE) */
#define SMBIOS_MAX_LENGTH (0xF2400 - 0xF1000)
#define SMBIOS_TYPE_BIOS 0
#define SMBIOS_TYPE_SYSTEM 1
#define SMBIOS_TYPE_CHASSIS 3
#define SMBIOS_TYPE_PROCESSOR 4
#define SMBIOS_TYPE_MEMARRAY 16
#define SMBIOS_TYPE_MEMDEVICE 17
#define SMBIOS_TYPE_MEMARRAYMAP 19
#define SMBIOS_TYPE_BOOT 32
#define SMBIOS_TYPE_EOT 127
struct smbios_structure {
uint8_t type;
uint8_t length;
uint16_t handle;
} __attribute__((packed));
typedef int (*initializer_func_t)(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size);
struct smbios_template_entry {
struct smbios_structure *entry;
const char **strings;
initializer_func_t initializer;
};
/*
* SMBIOS Structure Table Entry Point
*/
#define SMBIOS_ENTRY_EANCHOR "_SM_"
#define SMBIOS_ENTRY_EANCHORLEN 4
#define SMBIOS_ENTRY_IANCHOR "_DMI_"
#define SMBIOS_ENTRY_IANCHORLEN 5
struct smbios_entry_point {
char eanchor[4]; /* anchor tag */
uint8_t echecksum; /* checksum of entry point structure */
uint8_t eplen; /* length in bytes of entry point */
uint8_t major; /* major version of the SMBIOS spec */
uint8_t minor; /* minor version of the SMBIOS spec */
uint16_t maxssize; /* maximum size in bytes of a struct */
uint8_t revision; /* entry point structure revision */
uint8_t format[5]; /* entry point rev-specific data */
char ianchor[5]; /* intermediate anchor tag */
uint8_t ichecksum; /* intermediate checksum */
uint16_t stlen; /* len in bytes of structure table */
uint32_t staddr; /* physical addr of structure table */
uint16_t stnum; /* number of structure table entries */
uint8_t bcdrev; /* BCD value representing DMI ver */
} __attribute__((packed));
/*
* BIOS Information
*/
#define SMBIOS_FL_ISA 0x00000010 /* ISA is supported */
#define SMBIOS_FL_PCI 0x00000080 /* PCI is supported */
#define SMBIOS_FL_SHADOW 0x00001000 /* BIOS shadowing is allowed */
#define SMBIOS_FL_CDBOOT 0x00008000 /* Boot from CD is supported */
#define SMBIOS_FL_SELBOOT 0x00010000 /* Selectable Boot supported */
#define SMBIOS_FL_EDD 0x00080000 /* EDD Spec is supported */
#define SMBIOS_XB1_FL_ACPI 0x00000001 /* ACPI is supported */
#define SMBIOS_XB2_FL_BBS 0x00000001 /* BIOS Boot Specification */
#define SMBIOS_XB2_FL_VM 0x00000010 /* Virtual Machine */
struct smbios_table_type0 {
struct smbios_structure header;
uint8_t vendor; /* vendor string */
uint8_t version; /* version string */
uint16_t segment; /* address segment location */
uint8_t rel_date; /* release date */
uint8_t size; /* rom size */
uint64_t cflags; /* characteristics */
uint8_t xc_bytes[2]; /* characteristics ext bytes */
uint8_t sb_major_rel; /* system bios version */
uint8_t sb_minor_rele;
uint8_t ecfw_major_rel; /* embedded ctrl fw version */
uint8_t ecfw_minor_rel;
} __attribute__((packed));
/*
* System Information
*/
#define SMBIOS_WAKEUP_SWITCH 0x06 /* power switch */
struct smbios_table_type1 {
struct smbios_structure header;
uint8_t manufacturer; /* manufacturer string */
uint8_t product; /* product name string */
uint8_t version; /* version string */
uint8_t serial; /* serial number string */
uint8_t uuid[16]; /* uuid byte array */
uint8_t wakeup; /* wake-up event */
uint8_t sku; /* sku number string */
uint8_t family; /* family name string */
} __attribute__((packed));
/*
* System Enclosure or Chassis
*/
#define SMBIOS_CHT_UNKNOWN 0x02 /* unknown */
#define SMBIOS_CHST_SAFE 0x03 /* safe */
#define SMBIOS_CHSC_NONE 0x03 /* none */
struct smbios_table_type3 {
struct smbios_structure header;
uint8_t manufacturer; /* manufacturer string */
uint8_t type; /* type */
uint8_t version; /* version string */
uint8_t serial; /* serial number string */
uint8_t asset; /* asset tag string */
uint8_t bustate; /* boot-up state */
uint8_t psstate; /* power supply state */
uint8_t tstate; /* thermal state */
uint8_t security; /* security status */
uint8_t uheight; /* height in 'u's */
uint8_t cords; /* number of power cords */
uint8_t elems; /* number of element records */
uint8_t elemlen; /* length of records */
uint8_t sku; /* sku number string */
} __attribute__((packed));
/*
* Processor Information
*/
#define SMBIOS_PRT_CENTRAL 0x03 /* central processor */
#define SMBIOS_PRF_OTHER 0x01 /* other */
#define SMBIOS_PRS_PRESENT 0x40 /* socket is populated */
#define SMBIOS_PRS_ENABLED 0x1 /* enabled */
#define SMBIOS_PRU_NONE 0x06 /* none */
#define SMBIOS_PFL_64B 0x04 /* 64-bit capable */
struct smbios_table_type4 {
struct smbios_structure header;
uint8_t socket; /* socket designation string */
uint8_t type; /* processor type */
uint8_t family; /* processor family */
uint8_t manufacturer; /* manufacturer string */
uint64_t cpuid; /* processor cpuid */
uint8_t version; /* version string */
uint8_t voltage; /* voltage */
uint16_t clkspeed; /* ext clock speed in mhz */
uint16_t maxspeed; /* maximum speed in mhz */
uint16_t curspeed; /* current speed in mhz */
uint8_t status; /* status */
uint8_t upgrade; /* upgrade */
uint16_t l1handle; /* l1 cache handle */
uint16_t l2handle; /* l2 cache handle */
uint16_t l3handle; /* l3 cache handle */
uint8_t serial; /* serial number string */
uint8_t asset; /* asset tag string */
uint8_t part; /* part number string */
uint8_t cores; /* cores per socket */
uint8_t ecores; /* enabled cores */
uint8_t threads; /* threads per socket */
uint16_t cflags; /* processor characteristics */
uint16_t family2; /* processor family 2 */
} __attribute__((packed));
/*
* Physical Memory Array
*/
#define SMBIOS_MAL_SYSMB 0x03 /* system board or motherboard */
#define SMBIOS_MAU_SYSTEM 0x03 /* system memory */
#define SMBIOS_MAE_NONE 0x03 /* none */
struct smbios_table_type16 {
struct smbios_structure header;
uint8_t location; /* physical device location */
uint8_t use; /* device functional purpose */
uint8_t ecc; /* err detect/correct method */
uint32_t size; /* max mem capacity in kb */
uint16_t errhand; /* handle of error (if any) */
uint16_t ndevs; /* num of slots or sockets */
uint64_t xsize; /* max mem capacity in bytes */
} __attribute__((packed));
/*
* Memory Device
*/
#define SMBIOS_MDFF_UNKNOWN 0x02 /* unknown */
#define SMBIOS_MDT_UNKNOWN 0x02 /* unknown */
#define SMBIOS_MDF_UNKNOWN 0x0004 /* unknown */
struct smbios_table_type17 {
struct smbios_structure header;
uint16_t arrayhand; /* handle of physl mem array */
uint16_t errhand; /* handle of mem error data */
uint16_t twidth; /* total width in bits */
uint16_t dwidth; /* data width in bits */
uint16_t size; /* size in bytes */
uint8_t form; /* form factor */
uint8_t set; /* set */
uint8_t dloc; /* device locator string */
uint8_t bloc; /* phys bank locator string */
uint8_t type; /* memory type */
uint16_t flags; /* memory characteristics */
uint16_t maxspeed; /* maximum speed in mhz */
uint8_t manufacturer; /* manufacturer string */
uint8_t serial; /* serial number string */
uint8_t asset; /* asset tag string */
uint8_t part; /* part number string */
uint8_t attributes; /* attributes */
uint32_t xsize; /* extended size in mbs */
uint16_t curspeed; /* current speed in mhz */
uint16_t minvoltage; /* minimum voltage */
uint16_t maxvoltage; /* maximum voltage */
uint16_t curvoltage; /* configured voltage */
} __attribute__((packed));
/*
* Memory Array Mapped Address
*/
struct smbios_table_type19 {
struct smbios_structure header;
uint32_t saddr; /* start phys addr in kb */
uint32_t eaddr; /* end phys addr in kb */
uint16_t arrayhand; /* physical mem array handle */
uint8_t width; /* num of dev in row */
uint64_t xsaddr; /* start phys addr in bytes */
uint64_t xeaddr; /* end phys addr in bytes */
} __attribute__((packed));
/*
* System Boot Information
*/
#define SMBIOS_BOOT_NORMAL 0 /* no errors detected */
struct smbios_table_type32 {
struct smbios_structure header;
uint8_t reserved[6];
uint8_t status; /* boot status */
} __attribute__((packed));
/*
* End-of-Table
*/
struct smbios_table_type127 {
struct smbios_structure header;
} __attribute__((packed));
struct smbios_table_type0 smbios_type0_template = {
{ SMBIOS_TYPE_BIOS, sizeof(struct smbios_table_type0), 0 },
1, /* bios vendor string */
2, /* bios version string */
0xF000, /* bios address segment location */
3, /* bios release date */
0x0, /* bios size (64k * (n + 1) is the size in bytes) */
SMBIOS_FL_ISA | SMBIOS_FL_PCI | SMBIOS_FL_SHADOW |
SMBIOS_FL_CDBOOT | SMBIOS_FL_EDD,
{ SMBIOS_XB1_FL_ACPI, SMBIOS_XB2_FL_BBS | SMBIOS_XB2_FL_VM },
0x0, /* bios major release */
0x0, /* bios minor release */
0xff, /* embedded controller firmware major release */
0xff /* embedded controller firmware minor release */
};
const char *smbios_type0_strings[] = {
"ACRN-DM", /* vendor string */
"1.00", /* bios version string */
"03/14/2014", /* bios release date string */
NULL
};
struct smbios_table_type1 smbios_type1_template = {
{ SMBIOS_TYPE_SYSTEM, sizeof(struct smbios_table_type1), 0 },
1, /* manufacturer string */
2, /* product string */
3, /* version string */
4, /* serial number string */
{ 0 },
SMBIOS_WAKEUP_SWITCH,
5, /* sku string */
6 /* family string */
};
static int smbios_type1_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size);
const char *smbios_type1_strings[] = {
" ", /* manufacturer string */
"ACRN-DM", /* product name string */
"1.0", /* version string */
"None", /* serial number string */
"None", /* sku string */
" ", /* family name string */
NULL
};
struct smbios_table_type3 smbios_type3_template = {
{ SMBIOS_TYPE_CHASSIS, sizeof(struct smbios_table_type3), 0 },
1, /* manufacturer string */
SMBIOS_CHT_UNKNOWN,
2, /* version string */
3, /* serial number string */
4, /* asset tag string */
SMBIOS_CHST_SAFE,
SMBIOS_CHST_SAFE,
SMBIOS_CHST_SAFE,
SMBIOS_CHSC_NONE,
0, /* height in 'u's (0=enclosure height unspecified) */
0, /* number of power cords (0=number unspecified) */
0, /* number of contained element records */
0, /* length of records */
5 /* sku number string */
};
const char *smbios_type3_strings[] = {
" ", /* manufacturer string */
"1.0", /* version string */
"None", /* serial number string */
"None", /* asset tag string */
"None", /* sku number string */
NULL
};
struct smbios_table_type4 smbios_type4_template = {
{ SMBIOS_TYPE_PROCESSOR, sizeof(struct smbios_table_type4), 0 },
1, /* socket designation string */
SMBIOS_PRT_CENTRAL,
SMBIOS_PRF_OTHER,
2, /* manufacturer string */
0, /* cpuid */
3, /* version string */
0, /* voltage */
0, /* external clock frequency in mhz (0=unknown) */
0, /* maximum frequency in mhz (0=unknown) */
0, /* current frequency in mhz (0=unknown) */
SMBIOS_PRS_PRESENT | SMBIOS_PRS_ENABLED,
SMBIOS_PRU_NONE,
-1, /* l1 cache handle */
-1, /* l2 cache handle */
-1, /* l3 cache handle */
4, /* serial number string */
5, /* asset tag string */
6, /* part number string */
0, /* cores per socket (0=unknown) */
0, /* enabled cores per socket (0=unknown) */
0, /* threads per socket (0=unknown) */
SMBIOS_PFL_64B,
SMBIOS_PRF_OTHER
};
const char *smbios_type4_strings[] = {
" ", /* socket designation string */
" ", /* manufacturer string */
" ", /* version string */
"None", /* serial number string */
"None", /* asset tag string */
"None", /* part number string */
NULL
};
static int smbios_type4_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size);
struct smbios_table_type16 smbios_type16_template = {
{ SMBIOS_TYPE_MEMARRAY, sizeof(struct smbios_table_type16), 0 },
SMBIOS_MAL_SYSMB,
SMBIOS_MAU_SYSTEM,
SMBIOS_MAE_NONE,
0x80000000, /* max mem capacity in kb (0x80000000=use extended) */
-1, /* handle of error (if any) */
0, /* number of slots or sockets (TBD) */
0 /* extended maximum memory capacity in bytes (TBD) */
};
static int smbios_type16_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size);
struct smbios_table_type17 smbios_type17_template = {
{ SMBIOS_TYPE_MEMDEVICE, sizeof(struct smbios_table_type17), 0 },
-1, /* handle of physical memory array */
-1, /* handle of memory error data */
64, /* total width in bits including ecc */
64, /* data width in bits */
0x7fff, /* size in bytes (0x7fff=use extended)*/
SMBIOS_MDFF_UNKNOWN,
0, /* set (0x00=none, 0xff=unknown) */
1, /* device locator string */
2, /* physical bank locator string */
SMBIOS_MDT_UNKNOWN,
SMBIOS_MDF_UNKNOWN,
0, /* maximum memory speed in mhz (0=unknown) */
3, /* manufacturer string */
4, /* serial number string */
5, /* asset tag string */
6, /* part number string */
0, /* attributes (0=unknown rank information) */
0, /* extended size in mb (TBD) */
0, /* current speed in mhz (0=unknown) */
0, /* minimum voltage in mv (0=unknown) */
0, /* maximum voltage in mv (0=unknown) */
0 /* configured voltage in mv (0=unknown) */
};
const char *smbios_type17_strings[] = {
" ", /* device locator string */
" ", /* physical bank locator string */
" ", /* manufacturer string */
"None", /* serial number string */
"None", /* asset tag string */
"None", /* part number string */
NULL
};
static int smbios_type17_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size);
struct smbios_table_type19 smbios_type19_template = {
{ SMBIOS_TYPE_MEMARRAYMAP, sizeof(struct smbios_table_type19), 0 },
0xffffffff, /* starting phys addr in kb (0xffffffff=use ext) */
0xffffffff, /* ending phys addr in kb (0xffffffff=use ext) */
-1, /* physical memory array handle */
1, /* number of devices that form a row */
0, /* extended starting phys addr in bytes (TDB) */
0 /* extended ending phys addr in bytes (TDB) */
};
static int smbios_type19_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size);
struct smbios_table_type32 smbios_type32_template = {
{ SMBIOS_TYPE_BOOT, sizeof(struct smbios_table_type32), 0 },
{ 0, 0, 0, 0, 0, 0 },
SMBIOS_BOOT_NORMAL
};
struct smbios_table_type127 smbios_type127_template = {
{ SMBIOS_TYPE_EOT, sizeof(struct smbios_table_type127), 0 }
};
static int smbios_generic_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size);
static struct smbios_template_entry smbios_template[] = {
{ (struct smbios_structure *)&smbios_type0_template,
smbios_type0_strings,
smbios_generic_initializer },
{ (struct smbios_structure *)&smbios_type1_template,
smbios_type1_strings,
smbios_type1_initializer },
{ (struct smbios_structure *)&smbios_type3_template,
smbios_type3_strings,
smbios_generic_initializer },
{ (struct smbios_structure *)&smbios_type4_template,
smbios_type4_strings,
smbios_type4_initializer },
{ (struct smbios_structure *)&smbios_type16_template,
NULL,
smbios_type16_initializer },
{ (struct smbios_structure *)&smbios_type17_template,
smbios_type17_strings,
smbios_type17_initializer },
{ (struct smbios_structure *)&smbios_type19_template,
NULL,
smbios_type19_initializer },
{ (struct smbios_structure *)&smbios_type32_template,
NULL,
smbios_generic_initializer },
{ (struct smbios_structure *)&smbios_type127_template,
NULL,
smbios_generic_initializer },
{ NULL, NULL, NULL }
};
static uint64_t guest_lomem, guest_himem;
static uint16_t type16_handle;
static int
smbios_generic_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size)
{
struct smbios_structure *entry;
memcpy(curaddr, template_entry, template_entry->length);
entry = (struct smbios_structure *)curaddr;
entry->handle = *n + 1;
curaddr += entry->length;
if (template_strings != NULL) {
int i;
for (i = 0; template_strings[i] != NULL; i++) {
const char *string;
int len;
string = template_strings[i];
len = strlen(string) + 1;
memcpy(curaddr, string, len);
curaddr += len;
}
*curaddr = '\0';
curaddr++;
} else {
/* Minimum string section is double nul */
*curaddr = '\0';
curaddr++;
*curaddr = '\0';
curaddr++;
}
(*n)++;
*endaddr = curaddr;
return 0;
}
static int
smbios_type1_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size)
{
struct smbios_table_type1 *type1;
smbios_generic_initializer(template_entry, template_strings,
curaddr, endaddr, n, size);
type1 = (struct smbios_table_type1 *)curaddr;
if (guest_uuid_str != NULL) {
uuid_t uuid;
uint32_t status;
status = uuid_parse(guest_uuid_str, uuid);
if (status != 0)
return -1;
/* TODO */
/* uuid_enc_le(&type1->uuid, &uuid); */
} else {
MD5_CTX mdctx;
u_char digest[16];
char hostname[MAXHOSTNAMELEN];
/*
* Universally unique and yet reproducible are an
* oxymoron, however reproducible is desirable in
* this case.
*/
if (gethostname(hostname, sizeof(hostname)))
return -1;
MD5_Init(&mdctx);
MD5_Update(&mdctx, vmname, strlen(vmname));
MD5_Update(&mdctx, hostname, sizeof(hostname));
MD5_Final(digest, &mdctx);
/*
* Set the variant and version number.
*/
digest[6] &= 0x0F;
digest[6] |= 0x30; /* version 3 */
digest[8] &= 0x3F;
digest[8] |= 0x80;
memcpy(&type1->uuid, digest, sizeof(digest));
}
return 0;
}
static int
smbios_type4_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size)
{
int i;
for (i = 0; i < guest_ncpus; i++) {
struct smbios_table_type4 *type4;
char *p;
int nstrings, len;
smbios_generic_initializer(template_entry, template_strings,
curaddr, endaddr, n, size);
type4 = (struct smbios_table_type4 *)curaddr;
p = curaddr + sizeof(struct smbios_table_type4);
nstrings = 0;
while (p < *endaddr - 1) {
if (*p++ == '\0')
nstrings++;
}
len = sprintf(*endaddr - 1, "CPU #%d", i) + 1;
*endaddr += len - 1;
*(*endaddr) = '\0';
(*endaddr)++;
type4->socket = nstrings + 1;
curaddr = *endaddr;
}
return 0;
}
static int
smbios_type16_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size)
{
struct smbios_table_type16 *type16;
type16_handle = *n;
smbios_generic_initializer(template_entry, template_strings,
curaddr, endaddr, n, size);
type16 = (struct smbios_table_type16 *)curaddr;
type16->xsize = guest_lomem + guest_himem;
type16->ndevs = guest_himem > 0 ? 2 : 1;
return 0;
}
static int
smbios_type17_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size)
{
struct smbios_table_type17 *type17;
smbios_generic_initializer(template_entry, template_strings,
curaddr, endaddr, n, size);
type17 = (struct smbios_table_type17 *)curaddr;
type17->arrayhand = type16_handle;
type17->xsize = guest_lomem;
if (guest_himem > 0) {
curaddr = *endaddr;
smbios_generic_initializer(template_entry, template_strings,
curaddr, endaddr, n, size);
type17 = (struct smbios_table_type17 *)curaddr;
type17->arrayhand = type16_handle;
type17->xsize = guest_himem;
}
return 0;
}
static int
smbios_type19_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size)
{
struct smbios_table_type19 *type19;
smbios_generic_initializer(template_entry, template_strings,
curaddr, endaddr, n, size);
type19 = (struct smbios_table_type19 *)curaddr;
type19->arrayhand = type16_handle;
type19->xsaddr = 0;
type19->xeaddr = guest_lomem;
if (guest_himem > 0) {
curaddr = *endaddr;
smbios_generic_initializer(template_entry, template_strings,
curaddr, endaddr, n, size);
type19 = (struct smbios_table_type19 *)curaddr;
type19->arrayhand = type16_handle;
type19->xsaddr = 4*GB;
type19->xeaddr = guest_himem;
}
return 0;
}
static void
smbios_ep_initializer(struct smbios_entry_point *smbios_ep, uint32_t staddr)
{
memset(smbios_ep, 0, sizeof(*smbios_ep));
memcpy(smbios_ep->eanchor, SMBIOS_ENTRY_EANCHOR,
SMBIOS_ENTRY_EANCHORLEN);
smbios_ep->eplen = 0x1F;
assert(sizeof(struct smbios_entry_point) == smbios_ep->eplen);
smbios_ep->major = 2;
smbios_ep->minor = 6;
smbios_ep->revision = 0;
memcpy(smbios_ep->ianchor, SMBIOS_ENTRY_IANCHOR,
SMBIOS_ENTRY_IANCHORLEN);
smbios_ep->staddr = staddr;
smbios_ep->bcdrev = 0x24;
}
static void
smbios_ep_finalizer(struct smbios_entry_point *smbios_ep, uint16_t len,
uint16_t num, uint16_t maxssize)
{
uint8_t checksum;
int i;
smbios_ep->maxssize = maxssize;
smbios_ep->stlen = len;
smbios_ep->stnum = num;
checksum = 0;
for (i = 0x10; i < 0x1f; i++)
checksum -= ((uint8_t *)smbios_ep)[i];
smbios_ep->ichecksum = checksum;
checksum = 0;
for (i = 0; i < 0x1f; i++)
checksum -= ((uint8_t *)smbios_ep)[i];
smbios_ep->echecksum = checksum;
}
int
smbios_build(struct vmctx *ctx)
{
struct smbios_entry_point *smbios_ep;
uint16_t n;
uint16_t maxssize;
char *curaddr, *startaddr, *ststartaddr;
int i;
int err;
guest_lomem = vm_get_lowmem_size(ctx);
guest_himem = vm_get_highmem_size(ctx);
startaddr = paddr_guest2host(ctx, SMBIOS_BASE, SMBIOS_MAX_LENGTH);
if (startaddr == NULL) {
fprintf(stderr, "smbios table requires mapped mem\n");
return -1;
}
curaddr = startaddr;
smbios_ep = (struct smbios_entry_point *)curaddr;
smbios_ep_initializer(smbios_ep, SMBIOS_BASE +
sizeof(struct smbios_entry_point));
curaddr += sizeof(struct smbios_entry_point);
ststartaddr = curaddr;
n = 0;
maxssize = 0;
for (i = 0; smbios_template[i].entry != NULL; i++) {
struct smbios_structure *entry;
const char **strings;
initializer_func_t initializer;
char *endaddr;
uint16_t size;
entry = smbios_template[i].entry;
strings = smbios_template[i].strings;
initializer = smbios_template[i].initializer;
err = (*initializer)(entry, strings, curaddr, &endaddr,
&n, &size);
if (err != 0)
return err;
if (size > maxssize)
maxssize = size;
curaddr = endaddr;
}
assert(curaddr - startaddr < SMBIOS_MAX_LENGTH);
smbios_ep_finalizer(smbios_ep, curaddr - ststartaddr, n, maxssize);
return 0;
}

View File

@@ -0,0 +1,331 @@
/*-
* Copyright (c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <stdbool.h>
#include <stdint.h>
#include "acrn_common.h"
#include "vmmapi.h"
#include "sw_load.h"
#define SETUP_SIG 0x5a5aaa55
/* If we load kernel/ramdisk/bootargs directly, the UOS
* memory layout will be like:
*
* | ... |
* +-----------------------------------------------------+
* | offset: 0xf2400 (ACPI table) |
* +-----------------------------------------------------+
* | ... |
* +-----------------------------------------------------+
* | offset: 16MB (kernel image) |
* +-----------------------------------------------------+
* | ... |
* +-----------------------------------------------------+
* | offset: lowmem - 4MB (ramdisk image) |
* +-----------------------------------------------------+
* | offset: lowmem - 8K (bootargs) |
* +-----------------------------------------------------+
* | offset: lowmem - 6K (kernel entry address) |
* +-----------------------------------------------------+
* | offset: lowmem - 4K (zero_page include e820 table) |
* +-----------------------------------------------------+
*/
/* Check default e820 table in sw_load_common.c for info about ctx->lowmem */
#define RAMDISK_LOAD_OFF(ctx) (ctx->lowmem - 4*MB)
#define BOOTARGS_LOAD_OFF(ctx) (ctx->lowmem - 8*KB)
#define KERNEL_ENTRY_OFF(ctx) (ctx->lowmem - 6*KB)
#define ZEROPAGE_LOAD_OFF(ctx) (ctx->lowmem - 4*KB)
#define KERNEL_LOAD_OFF(ctx) (16*MB)
/* The real mode kernel header, refer to Documentation/x86/boot.txt */
struct _zeropage {
uint8_t pad1[0x1e8]; /* 0x000 */
uint8_t e820_nentries; /* 0x1e8 */
uint8_t pad2[0x8]; /* 0x1e9 */
struct {
uint8_t hdr_pad1[0x1f]; /* 0x1f1 */
uint8_t loader_type; /* 0x210 */
uint8_t load_flags; /* 0x211 */
uint8_t hdr_pad2[0x2]; /* 0x212 */
uint32_t code32_start; /* 0x214 */
uint32_t ramdisk_addr; /* 0x218 */
uint32_t ramdisk_size; /* 0x21c */
uint8_t hdr_pad3[0x8]; /* 0x220 */
uint32_t bootargs_addr; /* 0x228 */
uint8_t hdr_pad4[0x3c]; /* 0x22c */
} __attribute__((packed)) hdr;
uint8_t pad3[0x68]; /* 0x268 */
struct e820_entry e820[0x80]; /* 0x2d0 */
uint8_t pad4[0x330]; /* 0xcd0 */
} __attribute__((packed));
static char ramdisk_path[STR_LEN];
static char kernel_path[STR_LEN];
static int with_ramdisk;
static int with_kernel;
static int ramdisk_size;
static int kernel_size;
static int
acrn_get_bzimage_setup_size(struct vmctx *ctx)
{
uint32_t *tmp, location = 1024, setup_sectors;
int size = -1;
tmp = (uint32_t *)(ctx->baseaddr + KERNEL_LOAD_OFF(ctx)) + 1024/4;
while (*tmp != SETUP_SIG && location < 0x8000) {
tmp++;
location += 4;
}
/* setup size must be at least 1024 bytes and small than 0x8000 */
if (location < 0x8000 && location > 1024) {
setup_sectors = (location + 511) / 512;
size = setup_sectors*512;
printf("SW_LOAD: found setup sig @ 0x%08x, "
"setup_size is 0x%08x\n",
location, size);
} else
printf("SW_LOAD ERR: could not get setup "
"size in kernel %s\n",
kernel_path);
return size;
}
int
acrn_parse_kernel(char *arg)
{
int len = strlen(arg);
if (len < STR_LEN) {
strncpy(kernel_path, arg, len);
kernel_path[len] = '\0';
if (check_image(kernel_path) != 0){
fprintf(stderr, "SW_LOAD: check_image failed for '%s'\n",
kernel_path);
exit(10); /* Non-zero */
}
with_kernel = 1;
printf("SW_LOAD: get kernel path %s\n", kernel_path);
return 0;
} else
return -1;
}
int
acrn_parse_ramdisk(char *arg)
{
int len = strlen(arg);
if (len < STR_LEN) {
strncpy(ramdisk_path, arg, len);
ramdisk_path[len] = '\0';
if (check_image(ramdisk_path) != 0){
fprintf(stderr, "SW_LOAD: check_image failed for '%s'\n",
ramdisk_path);
exit(11); /* Non-zero */
}
with_ramdisk = 1;
printf("SW_LOAD: get ramdisk path %s\n", ramdisk_path);
return 0;
} else
return -1;
}
static int
acrn_prepare_ramdisk(struct vmctx *ctx)
{
FILE *fp;
int len, read;
fp = fopen(ramdisk_path, "r");
if (fp == NULL) {
printf("SW_LOAD ERR: could not open ramdisk file %s\n",
ramdisk_path);
return -1;
}
fseek(fp, 0, SEEK_END);
len = ftell(fp);
if (len > (BOOTARGS_LOAD_OFF(ctx) - RAMDISK_LOAD_OFF(ctx))) {
printf("SW_LOAD ERR: the size of ramdisk file is too big"
" file len=0x%x, limit is 0x%lx\n", len,
BOOTARGS_LOAD_OFF(ctx) - RAMDISK_LOAD_OFF(ctx));
fclose(fp);
return -1;
}
ramdisk_size = len;
fseek(fp, 0, SEEK_SET);
read = fread(ctx->baseaddr + RAMDISK_LOAD_OFF(ctx),
sizeof(char), len, fp);
if (read < len) {
printf("SW_LOAD ERR: could not read the whole ramdisk file,"
" file len=%d, read %d\n", len, read);
fclose(fp);
return -1;
}
fclose(fp);
printf("SW_LOAD: ramdisk %s size %d copied to guest 0x%lx\n",
ramdisk_path, ramdisk_size, RAMDISK_LOAD_OFF(ctx));
return 0;
}
static int
acrn_prepare_kernel(struct vmctx *ctx)
{
FILE *fp;
int len, read;
fp = fopen(kernel_path, "r");
if (fp == NULL) {
printf("SW_LOAD ERR: could not open kernel file %s\n",
kernel_path);
return -1;
}
fseek(fp, 0, SEEK_END);
len = ftell(fp);
if ((len + KERNEL_LOAD_OFF(ctx)) > RAMDISK_LOAD_OFF(ctx)) {
printf("SW_LOAD ERR: need big system memory to fit image\n");
fclose(fp);
return -1;
}
kernel_size = len;
fseek(fp, 0, SEEK_SET);
read = fread(ctx->baseaddr + KERNEL_LOAD_OFF(ctx),
sizeof(char), len, fp);
if (read < len) {
printf("SW_LOAD ERR: could not read the whole kernel file,"
" file len=%d, read %d\n", len, read);
fclose(fp);
return -1;
}
fclose(fp);
printf("SW_LOAD: kernel %s size %d copied to guest 0x%lx\n",
kernel_path, kernel_size, KERNEL_LOAD_OFF(ctx));
return 0;
}
static int
acrn_prepare_zeropage(struct vmctx *ctx, int setup_size)
{
struct _zeropage *zeropage = (struct _zeropage *)
(ctx->baseaddr + ZEROPAGE_LOAD_OFF(ctx));
struct _zeropage *kernel_load = (struct _zeropage *)
(ctx->baseaddr + KERNEL_LOAD_OFF(ctx));
/* clear the zeropage */
memset(zeropage, 0, 2*KB);
/* copy part of the header into the zero page */
memcpy(&(zeropage->hdr), &(kernel_load->hdr), sizeof(zeropage->hdr));
if (with_ramdisk) {
/*Copy ramdisk load_addr and size in zeropage header structure*/
zeropage->hdr.ramdisk_addr = (uint32_t)
((uint64_t)RAMDISK_LOAD_OFF(ctx));
zeropage->hdr.ramdisk_size = (uint32_t)ramdisk_size;
printf("SW_LOAD: build zeropage for ramdisk addr: 0x%x,"
" size: %d\n", zeropage->hdr.ramdisk_addr,
zeropage->hdr.ramdisk_size);
}
/* Copy bootargs load_addr in zeropage header structure */
zeropage->hdr.bootargs_addr = (uint32_t)
((uint64_t)BOOTARGS_LOAD_OFF(ctx));
printf("SW_LOAD: build zeropage for bootargs addr: 0x%x\n",
zeropage->hdr.bootargs_addr);
/* set constant arguments in zero page */
zeropage->hdr.loader_type = 0xff;
zeropage->hdr.load_flags |= (1<<5); /* quiet */
/* Create/add e820 table entries in zeropage */
zeropage->e820_nentries = acrn_create_e820_table(ctx, zeropage->e820);
return 0;
}
int
acrn_sw_load_bzimage(struct vmctx *ctx)
{
int ret, setup_size;
uint64_t *cfg_offset = (uint64_t *)(ctx->baseaddr + GUEST_CFG_OFFSET);
*cfg_offset = ctx->lowmem;
if (with_bootargs) {
strcpy(ctx->baseaddr + BOOTARGS_LOAD_OFF(ctx), get_bootargs());
printf("SW_LOAD: bootargs copied to guest 0x%lx\n",
BOOTARGS_LOAD_OFF(ctx));
}
if (with_ramdisk) {
ret = acrn_prepare_ramdisk(ctx);
if (ret)
return ret;
}
if (with_kernel) {
uint64_t *kernel_entry_addr =
(uint64_t *)(ctx->baseaddr + KERNEL_ENTRY_OFF(ctx));
ret = acrn_prepare_kernel(ctx);
if (ret)
return ret;
setup_size = acrn_get_bzimage_setup_size(ctx);
if (setup_size <= 0)
return -1;
*kernel_entry_addr = (uint64_t)
(KERNEL_LOAD_OFF(ctx) + setup_size + 0x200);
ret = acrn_prepare_zeropage(ctx, setup_size);
if (ret)
return ret;
printf("SW_LOAD: zeropage prepared @ 0x%lx, "
"kernel_entry_addr=0x%lx\n",
ZEROPAGE_LOAD_OFF(ctx), *kernel_entry_addr);
}
return 0;
}

View File

@@ -0,0 +1,230 @@
/*-
* Copyright (c) 2017 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <stdbool.h>
#include <stdint.h>
#include "vmmapi.h"
#include "sw_load.h"
#include "dm.h"
int with_bootargs;
static char bootargs[STR_LEN];
/*
* Default e820 mem map:
*
* there is reserved memory hole for PCI hole and APIC etc
* so the memory layout could be separated into lowmem & highmem.
* - if request memory size <= ctx->lowmem_limit, then there is only
* map[0]:0~ctx->lowmem for RAM
* ctx->lowmem = request_memory_size
* - if request memory size > ctx->lowmem_limit, then there are
* map[0]:0~ctx->lowmem_limit & map[2]:4G~ctx->highmem for RAM
* ctx->highmem = request_memory_size - ctx->lowmem_limit
*
* Begin End Type Length
* 0: 0 - 0xF0000 RAM 0xF0000
* 1 0xf0000 - 0x100000 (reserved) 0x10000
* 2 0x100000 - lowmem RAM lowmem - 0x100000
* 3: lowmem - bff_fffff (reserved) 0xc00_00000-lowmem
* 4: 0xc00_00000 - dff_fffff PCI hole 512MB
* 5: 0xe00_00000 - fff_fffff (reserved) 512MB
* 6: 1_000_00000 - highmem RAM highmem-4G
*/
const struct e820_entry e820_default_entries[NUM_E820_ENTRIES] = {
{ /* 0 to mptable/smbios/acpi */
.baseaddr = 0x00000000,
.length = 0xF0000,
.type = E820_TYPE_RAM
},
{ /* mptable/smbios/acpi to lowmem */
.baseaddr = 0xF0000,
.length = 0x10000,
.type = E820_TYPE_RESERVED
},
{ /* lowmem to lowmem_limit*/
.baseaddr = 0x100000,
.length = 0x48f00000,
.type = E820_TYPE_RAM
},
{ /* lowmem to lowmem_limit*/
.baseaddr = 0x49000000,
.length = 0x77000000,
.type = E820_TYPE_RESERVED
},
{ /* lowmem_limit to 4G */
.baseaddr = 0xe0000000,
.length = 0x20000000,
.type = E820_TYPE_RESERVED
},
{
.baseaddr = 0x100000000,
.length = 0x000100000,
.type = E820_TYPE_RESERVED
},
};
int
acrn_parse_bootargs(char *arg)
{
int len = strlen(arg);
if (len < STR_LEN) {
strncpy(bootargs, arg, len);
bootargs[len] = '\0';
with_bootargs = 1;
printf("SW_LOAD: get bootargs %s\n", bootargs);
return 0;
} else
return -1;
}
char*
get_bootargs(void)
{
return bootargs;
}
int
check_image(char *path)
{
FILE *fp;
fp = fopen(path, "r");
if (fp == NULL)
return -1;
fclose(fp);
return 0;
}
/* Assumption:
* the range [start, start + size] belongs to one entry of e820 table
*/
int
add_e820_entry(struct e820_entry *e820, int len, uint64_t start,
uint64_t size, uint32_t type)
{
int i, length = len;
uint64_t e_s, e_e;
for (i = 0; i < len; i++) {
e_s = e820[i].baseaddr;
e_e = e820[i].baseaddr + e820[i].length;
if ((e_s <= start) && ((start + size) <= e_e)) {
int index_s = 0, index_e = 3;
uint64_t pt[4];
uint32_t pt_t[3];
pt[0] = e_s;
pt[1] = start;
pt[2] = start + size;
pt[3] = e_e;
pt_t[0] = e820[i].type;
pt_t[1] = type;
pt_t[2] = e820[i].type;
if (e_s == start) {
index_s = 1;
}
if (e_e == (start + size)) {
index_e = 2;
}
length += index_e - index_s - 1;
if ((i != (len - 1) && ((index_e - index_s) > 1))) {
memmove(&e820[i + index_e - index_s],
&e820[i + 1], (len - i - 1) *
sizeof(struct e820_entry));
}
for (; index_s < index_e; index_s++, i++) {
e820[i].baseaddr = pt[index_s];
e820[i].length = pt[index_s + 1] - pt[index_s];
e820[i].type = pt_t[index_s];
}
break;
}
}
return length;
}
uint32_t
acrn_create_e820_table(struct vmctx *ctx, struct e820_entry *e820)
{
uint32_t k;
memcpy(e820, e820_default_entries, sizeof(e820_default_entries));
if (ctx->lowmem > 0) {
e820[LOWRAM_E820_ENTRIES].length = ctx->lowmem -
e820[LOWRAM_E820_ENTRIES].baseaddr;
e820[LOWRAM_E820_ENTRIES+1].baseaddr = ctx->lowmem;
e820[LOWRAM_E820_ENTRIES+1].length =
ctx->lowmem_limit - ctx->lowmem;
}
if (ctx->highmem > 0) {
e820[HIGHRAM_E820_ENTRIES].type = E820_TYPE_RAM;
e820[HIGHRAM_E820_ENTRIES].length = ctx->highmem;
}
printf("SW_LOAD: build e820 %d entries to addr: %p\r\n",
NUM_E820_ENTRIES, (void *)e820);
for (k = 0; k < NUM_E820_ENTRIES; k++)
printf("SW_LOAD: entry[%d]: addr 0x%016lx, size 0x%016lx, "
" type 0x%x\r\n",
k, e820[k].baseaddr,
e820[k].length,
e820[k].type);
return NUM_E820_ENTRIES;
}
int
acrn_sw_load(struct vmctx *ctx)
{
if (vsbl_file_name)
return acrn_sw_load_vsbl(ctx);
else
return acrn_sw_load_bzimage(ctx);
}

View File

@@ -0,0 +1,292 @@
/*-
* Copyright (c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <stdbool.h>
#include <stdint.h>
#include "dm.h"
#include "acrn_common.h"
#include "vmmapi.h"
#include "sw_load.h"
#include "acpi.h"
/* If the vsbl is loaded by DM, the UOS memory layout will be like:
*
* | ... |
* +--------------------------------------------------+
* | offset: 0xf2400 (ACPI table) |
* +--------------------------------------------------+
* | ... |
* +--------------------------------------------------+
* | offset: 16MB (vsbl image) |
* +--------------------------------------------------+
* | ... |
* +--------------------------------------------------+
* | offset: lowmem - 16K (partition blob) |
* +--------------------------------------------------+
* | offset: lowmem - 12K (e820 table) |
* +--------------------------------------------------+
* | offset: lowmem - 8K (boot_args_address) |
* +--------------------------------------------------+
* | offset: lowmem - 6K (vsbl entry address) |
* +--------------------------------------------------+
* | offset: lowmem - 4K (config_page with e820 table)|
* +--------------------------------------------------+
*/
/* Check default e820 table in sw_load_common.c for info about ctx->lowmem */
#define CONFIGPAGE_OFF(ctx) ((ctx)->lowmem - 4*KB)
#define VSBL_ENTRY_OFF(ctx) ((ctx)->lowmem - 6*KB)
#define BOOTARGS_OFF(ctx) ((ctx)->lowmem - 8*KB)
#define E820_TABLE_OFF(ctx) ((ctx)->lowmem - 12*KB)
#define GUEST_PART_INFO_OFF(ctx) ((ctx)->lowmem - 16*KB)
/* vsbl real entry is saved in the first 4 bytes of vsbl image */
#define VSBL_OFF(ctx) (16*MB)
struct vsbl_para {
uint64_t e820_table_address;
uint64_t e820_entries;
uint64_t acpi_table_address;
uint64_t acpi_table_size;
uint64_t guest_part_info_address;
uint64_t guest_part_info_size;
uint64_t vsbl_address;
uint64_t vsbl_size;
uint64_t bootargs_address;
uint32_t trusty_enabled;
uint32_t key_info_lock;
uint32_t watchdog_reset;
uint32_t boot_device_address;
};
static char guest_part_info_path[STR_LEN];
static int guest_part_info_size;
static bool with_guest_part_info;
static char vsbl_path[STR_LEN];
static int vsbl_size;
static int boot_blk_bdf;
#define LOW_8BIT(x) ((x) & 0xFF)
void
vsbl_set_bdf(int bnum, int snum, int fnum)
{
boot_blk_bdf = (LOW_8BIT(bnum) << 16) | (LOW_8BIT(snum) << 8) |
LOW_8BIT(fnum);
}
int
acrn_parse_guest_part_info(char *arg)
{
int len = strlen(arg);
if (len < STR_LEN) {
strncpy(guest_part_info_path, arg, len);
guest_part_info_path[len] = '\0';
assert(check_image(guest_part_info_path) == 0);
with_guest_part_info = true;
printf("SW_LOAD: get partition blob path %s\n",
guest_part_info_path);
return 0;
} else
return -1;
}
static int
acrn_prepare_guest_part_info(struct vmctx *ctx)
{
FILE *fp;
int len, read;
fp = fopen(guest_part_info_path, "r");
if (fp == NULL) {
fprintf(stderr,
"SW_LOAD ERR: could not open partition blob %s\n",
guest_part_info_path);
return -1;
}
fseek(fp, 0, SEEK_END);
len = ftell(fp);
if ((len + GUEST_PART_INFO_OFF(ctx)) > BOOTARGS_OFF(ctx)) {
fprintf(stderr,
"SW_LOAD ERR: too large partition blob\n");
fclose(fp);
return -1;
}
guest_part_info_size = len;
fseek(fp, 0, SEEK_SET);
read = fread(ctx->baseaddr + GUEST_PART_INFO_OFF(ctx),
sizeof(char), len, fp);
if (read < len) {
fprintf(stderr,
"SW_LOAD ERR: could not read whole partition blob\n");
fclose(fp);
return -1;
}
fclose(fp);
printf("SW_LOAD: partition blob %s size %d copy to guest 0x%lx\n",
guest_part_info_path, guest_part_info_size,
GUEST_PART_INFO_OFF(ctx));
return 0;
}
int
acrn_parse_vsbl(char *arg)
{
int len = strlen(arg);
if (len < STR_LEN) {
strncpy(vsbl_path, arg, len);
vsbl_path[len] = '\0';
assert(check_image(vsbl_path) == 0);
vsbl_file_name = vsbl_path;
printf("SW_LOAD: get vsbl path %s\n",
vsbl_path);
return 0;
} else
return -1;
}
static int
acrn_prepare_vsbl(struct vmctx *ctx)
{
FILE *fp;
int len, read;
fp = fopen(vsbl_path, "r");
if (fp == NULL) {
fprintf(stderr,
"SW_LOAD ERR: could not open vsbl file: %s\n",
vsbl_path);
return -1;
}
fseek(fp, 0, SEEK_END);
len = ftell(fp);
if ((len + VSBL_OFF(ctx)) > GUEST_PART_INFO_OFF(ctx)) {
fprintf(stderr,
"SW_LOAD ERR: too large vsbl file\n");
fclose(fp);
return -1;
}
vsbl_size = len;
fseek(fp, 0, SEEK_SET);
read = fread(ctx->baseaddr + VSBL_OFF(ctx),
sizeof(char), len, fp);
if (read < len) {
fprintf(stderr,
"SW_LOAD ERR: could not read whole partition blob\n");
fclose(fp);
return -1;
}
fclose(fp);
printf("SW_LOAD: partition blob %s size %d copy to guest 0x%lx\n",
vsbl_path, vsbl_size, VSBL_OFF(ctx));
return 0;
}
int
acrn_sw_load_vsbl(struct vmctx *ctx)
{
int ret;
struct e820_entry *e820;
struct vsbl_para *vsbl_para;
uint64_t vsbl_start_addr =
(uint64_t)ctx->baseaddr + VSBL_OFF(ctx);
uint64_t *vsbl_entry =
(uint64_t *)(ctx->baseaddr + VSBL_ENTRY_OFF(ctx));
uint64_t *cfg_offset =
(uint64_t *)(ctx->baseaddr + GUEST_CFG_OFFSET);
*cfg_offset = ctx->lowmem;
vsbl_para = (struct vsbl_para *)
(ctx->baseaddr + CONFIGPAGE_OFF(ctx));
memset(vsbl_para, 0x0, sizeof(struct vsbl_para));
e820 = (struct e820_entry *)
(ctx->baseaddr + E820_TABLE_OFF(ctx));
vsbl_para->e820_entries = acrn_create_e820_table(ctx, e820);
vsbl_para->e820_table_address = E820_TABLE_OFF(ctx);
vsbl_para->acpi_table_address = get_acpi_base();
vsbl_para->acpi_table_size = get_acpi_table_length();
if (with_bootargs) {
strcpy(ctx->baseaddr + BOOTARGS_OFF(ctx), get_bootargs());
vsbl_para->bootargs_address = BOOTARGS_OFF(ctx);
} else {
vsbl_para->bootargs_address = 0;
}
if (with_guest_part_info) {
ret = acrn_prepare_guest_part_info(ctx);
if (ret)
return ret;
vsbl_para->guest_part_info_address = GUEST_PART_INFO_OFF(ctx);
vsbl_para->guest_part_info_size = guest_part_info_size;
} else {
vsbl_para->guest_part_info_address = 0;
vsbl_para->guest_part_info_size = 0;
}
ret = acrn_prepare_vsbl(ctx);
if (ret)
return ret;
vsbl_para->vsbl_address = VSBL_OFF(ctx);
vsbl_para->vsbl_size = vsbl_size;
vsbl_para->e820_entries = add_e820_entry(e820, vsbl_para->e820_entries,
vsbl_para->vsbl_address, vsbl_size, E820_TYPE_RESERVED);
*vsbl_entry = *((uint32_t *) vsbl_start_addr);
vsbl_para->boot_device_address = boot_blk_bdf;
vsbl_para->trusty_enabled = trusty_enabled;
return 0;
}

751
devicemodel/core/vmmapi.c Normal file
View File

@@ -0,0 +1,751 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/uio.h>
#include <sys/user.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <assert.h>
#include <string.h>
#include <ctype.h>
#include <fcntl.h>
#include <unistd.h>
#include <uuid/uuid.h>
#include "types.h"
#include "cpuset.h"
#include "segments.h"
#include "specialreg.h"
#include "vmm.h"
#include "vhm_ioctl_defs.h"
#include "vmmapi.h"
#include "mevent.h"
#include "dm.h"
#define MAP_NOCORE 0
#define MAP_ALIGNED_SUPER 0
/*
* Size of the guard region before and after the virtual address space
* mapping the guest physical memory. This must be a multiple of the
* superpage size for performance reasons.
*/
#define VM_MMAP_GUARD_SIZE (4 * MB)
#define SUPPORT_VHM_API_VERSION_MAJOR 1
#define SUPPORT_VHM_API_VERSION_MINOR 0
int
vm_create(const char *name)
{
/* TODO: specific part for vm create */
return 0;
}
static int
check_api(int fd)
{
struct api_version api_version;
int error;
error = ioctl(fd, IC_GET_API_VERSION, &api_version);
if (error) {
fprintf(stderr, "failed to get vhm api version\n");
return -1;
}
if (api_version.major_version != SUPPORT_VHM_API_VERSION_MAJOR ||
api_version.minor_version != SUPPORT_VHM_API_VERSION_MINOR) {
fprintf(stderr, "not support vhm api version\n");
return -1;
}
printf("VHM api version %d.%d\n", api_version.major_version,
api_version.minor_version);
return 0;
}
static int devfd = -1;
struct vmctx *
vm_open(const char *name)
{
struct vmctx *ctx;
struct acrn_create_vm create_vm;
int error, retry = 10;
uuid_t vm_uuid;
ctx = calloc(1, sizeof(struct vmctx) + strlen(name) + 1);
assert(ctx != NULL);
assert(devfd == -1);
devfd = open("/dev/acrn_vhm", O_RDWR|O_CLOEXEC);
if (devfd == -1) {
fprintf(stderr, "Could not open /dev/acrn_vhm\n");
goto err;
}
if (check_api(devfd) < 0)
goto err;
if (guest_uuid_str == NULL)
guest_uuid_str = "d2795438-25d6-11e8-864e-cb7a18b34643";
error = uuid_parse(guest_uuid_str, vm_uuid);
if (error != 0)
goto err;
/* save vm uuid to ctx */
uuid_copy(ctx->vm_uuid, vm_uuid);
/* Pass uuid as parameter of create vm*/
uuid_copy(create_vm.GUID, vm_uuid);
ctx->fd = devfd;
ctx->memflags = 0;
ctx->lowmem_limit = 2 * GB;
ctx->name = (char *)(ctx + 1);
strcpy(ctx->name, name);
/* Set trusty enable flag */
if (trusty_enabled)
create_vm.vm_flag |= SECURE_WORLD_ENABLED;
else
create_vm.vm_flag &= (~SECURE_WORLD_ENABLED);
while (retry > 0) {
error = ioctl(ctx->fd, IC_CREATE_VM, &create_vm);
if (error == 0)
break;
usleep(500000);
retry--;
}
if (error) {
fprintf(stderr, "failed to create VM %s\n", ctx->name);
goto err;
}
ctx->vmid = create_vm.vmid;
return ctx;
err:
free(ctx);
return NULL;
}
void
vm_close(struct vmctx *ctx)
{
if (!ctx)
return;
close(ctx->fd);
free(ctx);
devfd = -1;
}
int
vm_set_shared_io_page(struct vmctx *ctx, uint64_t page_vma)
{
int error;
error = ioctl(ctx->fd, IC_SET_IOREQ_BUFFER, page_vma);
if (error) {
fprintf(stderr, "failed to setup shared io page create VM %s\n",
ctx->name);
return -1;
}
return 0;
}
int
vm_create_ioreq_client(struct vmctx *ctx)
{
return ioctl(ctx->fd, IC_CREATE_IOREQ_CLIENT, 0);
}
int
vm_destroy_ioreq_client(struct vmctx *ctx)
{
return ioctl(ctx->fd, IC_DESTROY_IOREQ_CLIENT, ctx->ioreq_client);
}
int
vm_attach_ioreq_client(struct vmctx *ctx)
{
int error;
error = ioctl(ctx->fd, IC_ATTACH_IOREQ_CLIENT, ctx->ioreq_client);
if (error) {
fprintf(stderr, "attach ioreq client return %d "
"(1 = destroying, could be triggered by Power State "
"change, others = error)\n", error);
return error;
}
return 0;
}
int
vm_notify_request_done(struct vmctx *ctx, int vcpu)
{
int error;
struct ioreq_notify notify;
bzero(&notify, sizeof(notify));
notify.client_id = ctx->ioreq_client;
notify.vcpu = vcpu;
error = ioctl(ctx->fd, IC_NOTIFY_REQUEST_FINISH, &notify);
if (error) {
fprintf(stderr, "failed: notify request finish\n");
return -1;
}
return 0;
}
void
vm_destroy(struct vmctx *ctx)
{
if (ctx)
ioctl(ctx->fd, IC_DESTROY_VM, NULL);
}
int
vm_parse_memsize(const char *optarg, size_t *ret_memsize)
{
char *endptr;
size_t optval;
int shift;
optval = strtoul(optarg, &endptr, 0);
switch (tolower((unsigned char)*endptr)) {
case 'g':
shift = 30;
break;
case 'm':
shift = 20;
break;
case 'k':
shift = 10;
break;
case 'b':
case '\0': /* No unit. */
shift = 0;
default:
/* Unrecognized unit. */
return -1;
}
optval = optval << shift;
if (optval < 128 * MB)
return -1;
*ret_memsize = optval;
return 0;
}
uint32_t
vm_get_lowmem_limit(struct vmctx *ctx)
{
return ctx->lowmem_limit;
}
void
vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
{
ctx->lowmem_limit = limit;
}
void
vm_set_memflags(struct vmctx *ctx, int flags)
{
ctx->memflags = flags;
}
int
vm_get_memflags(struct vmctx *ctx)
{
return ctx->memflags;
}
int
vm_map_memseg_vma(struct vmctx *ctx, size_t len, vm_paddr_t gpa,
uint64_t vma, int prot)
{
struct vm_memmap memmap;
bzero(&memmap, sizeof(struct vm_memmap));
memmap.type = VM_MEMMAP_SYSMEM;
memmap.using_vma = 1;
memmap.vma_base = vma;
memmap.len = len;
memmap.gpa = gpa;
memmap.prot = prot;
return ioctl(ctx->fd, IC_SET_MEMSEG, &memmap);
}
static int
vm_alloc_set_memseg(struct vmctx *ctx, int segid, size_t len,
vm_paddr_t gpa, int prot, char *base, char **ptr)
{
struct vm_memseg memseg;
struct vm_memmap memmap;
int error, flags;
if (segid == VM_MEMMAP_SYSMEM) {
bzero(&memseg, sizeof(struct vm_memseg));
memseg.len = len;
memseg.gpa = gpa;
error = ioctl(ctx->fd, IC_ALLOC_MEMSEG, &memseg);
if (error)
return error;
bzero(&memmap, sizeof(struct vm_memmap));
memmap.type = segid;
memmap.len = len;
memmap.gpa = gpa;
memmap.prot = PROT_ALL;
error = ioctl(ctx->fd, IC_SET_MEMSEG, &memmap);
if (error)
return error;
flags = MAP_SHARED | MAP_FIXED;
if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
flags |= MAP_NOCORE;
/* mmap into the process address space on the host */
*ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa);
if (*ptr == MAP_FAILED) {
*ptr = NULL;
error = -1;
}
} else
/* XXX: no VM_BOOTROM/VM_FRAMEBUFFER support*/
error = -1;
return error;
}
int
vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
{
size_t objsize, len;
vm_paddr_t gpa;
int prot;
char *baseaddr, *ptr;
int error, flags;
assert(vms == VM_MMAP_ALL);
/*
* If 'memsize' cannot fit entirely in the 'lowmem' segment then
* create another 'highmem' segment above 4GB for the remainder.
*/
if (memsize > ctx->lowmem_limit) {
ctx->lowmem = ctx->lowmem_limit;
ctx->highmem = memsize - ctx->lowmem_limit;
objsize = 4*GB + ctx->highmem;
} else {
ctx->lowmem = memsize;
ctx->highmem = 0;
objsize = ctx->lowmem;
}
if (hugetlb)
return hugetlb_setup_memory(ctx);
/*
* Stake out a contiguous region covering the guest physical memory
* and the adjoining guard regions.
*/
len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE;
flags = MAP_PRIVATE | MAP_ANON | MAP_NOCORE | MAP_ALIGNED_SUPER;
ptr = mmap(NULL, len, PROT_NONE, flags, -1, 0);
if (ptr == MAP_FAILED)
return -1;
baseaddr = ptr + VM_MMAP_GUARD_SIZE;
/* TODO: need add error handling */
/* alloc & map for lowmem */
if (ctx->lowmem > 0) {
gpa = 0;
len = ctx->lowmem;
prot = PROT_ALL;
error = vm_alloc_set_memseg(ctx, VM_MEMMAP_SYSMEM, len, gpa,
prot, baseaddr, &ctx->mmap_lowmem);
if (error)
return error;
}
/* alloc & map for highmem */
if (ctx->highmem > 0) {
gpa = 4*GB;
len = ctx->highmem;
prot = PROT_ALL;
error = vm_alloc_set_memseg(ctx, VM_MEMMAP_SYSMEM, len, gpa,
prot, baseaddr, &ctx->mmap_highmem);
if (error)
return error;
}
ctx->baseaddr = baseaddr;
return 0;
}
void
vm_unsetup_memory(struct vmctx *ctx)
{
if (hugetlb) {
hugetlb_unsetup_memory(ctx);
return;
}
if (ctx->lowmem > 0)
munmap(ctx->mmap_lowmem, ctx->lowmem);
if (ctx->highmem > 0)
munmap(ctx->mmap_highmem, ctx->highmem);
}
/*
* Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
* the lowmem or highmem regions.
*
* In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region.
* The instruction emulation code depends on this behavior.
*/
void *
vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
{
if (ctx->lowmem > 0) {
if (gaddr < ctx->lowmem && len <= ctx->lowmem &&
gaddr + len <= ctx->lowmem)
return (ctx->baseaddr + gaddr);
}
if (ctx->highmem > 0) {
if (gaddr >= 4*GB) {
if (gaddr < 4*GB + ctx->highmem &&
len <= ctx->highmem &&
gaddr + len <= 4*GB + ctx->highmem)
return (ctx->baseaddr + gaddr);
}
}
return NULL;
}
size_t
vm_get_lowmem_size(struct vmctx *ctx)
{
return ctx->lowmem;
}
size_t
vm_get_highmem_size(struct vmctx *ctx)
{
return ctx->highmem;
}
void *
vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len)
{
return MAP_FAILED;
}
int
vm_run(struct vmctx *ctx)
{
int error;
error = ioctl(ctx->fd, IC_START_VM, &ctx->vmid);
return error;
}
void
vm_pause(struct vmctx *ctx)
{
ioctl(ctx->fd, IC_PAUSE_VM, &ctx->vmid);
}
static int suspend_mode = VM_SUSPEND_NONE;
void
vm_set_suspend_mode(enum vm_suspend_how how)
{
suspend_mode = how;
}
int
vm_get_suspend_mode(void)
{
return suspend_mode;
}
int
vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
{
vm_set_suspend_mode(how);
mevent_notify();
return 0;
}
int
vm_apicid2vcpu(struct vmctx *ctx, int apicid)
{
/*
* The apic id associated with the 'vcpu' has the same numerical value
* as the 'vcpu' itself.
*/
return apicid;
}
int
vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
{
struct acrn_msi_entry msi;
bzero(&msi, sizeof(msi));
msi.msi_addr = addr;
msi.msi_data = msg;
return ioctl(ctx->fd, IC_INJECT_MSI, &msi);
}
int
vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
{
struct acrn_irqline ioapic_irq;
bzero(&ioapic_irq, sizeof(ioapic_irq));
ioapic_irq.intr_type = ACRN_INTR_TYPE_IOAPIC;
ioapic_irq.ioapic_irq = irq;
return ioctl(ctx->fd, IC_ASSERT_IRQLINE, &ioapic_irq);
}
int
vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
{
struct acrn_irqline ioapic_irq;
bzero(&ioapic_irq, sizeof(ioapic_irq));
ioapic_irq.intr_type = ACRN_INTR_TYPE_IOAPIC;
ioapic_irq.ioapic_irq = irq;
return ioctl(ctx->fd, IC_DEASSERT_IRQLINE, &ioapic_irq);
}
static int
vm_isa_irq(struct vmctx *ctx, int irq, int ioapic_irq, unsigned long call_id)
{
struct acrn_irqline isa_irq;
bzero(&isa_irq, sizeof(isa_irq));
isa_irq.intr_type = ACRN_INTR_TYPE_ISA;
isa_irq.pic_irq = irq;
isa_irq.ioapic_irq = ioapic_irq;
return ioctl(ctx->fd, call_id, &isa_irq);
}
int
vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
{
return vm_isa_irq(ctx, atpic_irq, ioapic_irq, IC_ASSERT_IRQLINE);
}
int
vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
{
return vm_isa_irq(ctx, atpic_irq, ioapic_irq, IC_DEASSERT_IRQLINE);
}
int
vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
{
return vm_isa_irq(ctx, atpic_irq, ioapic_irq, IC_PULSE_IRQLINE);
}
int
vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
{
*pincount = 24;
return 0;
}
int
vm_assign_ptdev(struct vmctx *ctx, int bus, int slot, int func)
{
uint16_t bdf;
bdf = ((bus & 0xff) << 8) | ((slot & 0x1f) << 3) |
(func & 0x7);
return ioctl(ctx->fd, IC_ASSIGN_PTDEV, &bdf);
}
int
vm_unassign_ptdev(struct vmctx *ctx, int bus, int slot, int func)
{
uint16_t bdf;
bdf = ((bus & 0xff) << 8) | ((slot & 0x1f) << 3) |
(func & 0x7);
return ioctl(ctx->fd, IC_DEASSIGN_PTDEV, &bdf);
}
int
vm_map_ptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
{
struct vm_memmap memmap;
bzero(&memmap, sizeof(struct vm_memmap));
memmap.type = VM_MMIO;
memmap.len = len;
memmap.gpa = gpa;
memmap.hpa = hpa;
memmap.prot = PROT_ALL;
return ioctl(ctx->fd, IC_SET_MEMSEG, &memmap);
}
int
vm_setup_ptdev_msi(struct vmctx *ctx, struct acrn_vm_pci_msix_remap *msi_remap)
{
if (!msi_remap)
return -1;
return ioctl(ctx->fd, IC_VM_PCI_MSIX_REMAP, msi_remap);
}
int
vm_set_ptdev_msix_info(struct vmctx *ctx, struct ic_ptdev_irq *ptirq)
{
if (!ptirq)
return -1;
return ioctl(ctx->fd, IC_SET_PTDEV_INTR_INFO, ptirq);
}
int
vm_reset_ptdev_msix_info(struct vmctx *ctx, uint16_t virt_bdf,
int vector_count)
{
struct ic_ptdev_irq ptirq;
bzero(&ptirq, sizeof(ptirq));
ptirq.type = IRQ_MSIX;
ptirq.virt_bdf = virt_bdf;
ptirq.msix.vector_cnt = vector_count;
return ioctl(ctx->fd, IC_RESET_PTDEV_INTR_INFO, &ptirq);
}
int
vm_set_ptdev_intx_info(struct vmctx *ctx, uint16_t virt_bdf, uint16_t phys_bdf,
int virt_pin, int phys_pin, bool pic_pin)
{
struct ic_ptdev_irq ptirq;
bzero(&ptirq, sizeof(ptirq));
ptirq.type = IRQ_INTX;
ptirq.virt_bdf = virt_bdf;
ptirq.phys_bdf = phys_bdf;
ptirq.intx.virt_pin = virt_pin;
ptirq.intx.phys_pin = phys_pin;
ptirq.intx.is_pic_pin = pic_pin;
return ioctl(ctx->fd, IC_SET_PTDEV_INTR_INFO, &ptirq);
}
int
vm_reset_ptdev_intx_info(struct vmctx *ctx, int virt_pin, bool pic_pin)
{
struct ic_ptdev_irq ptirq;
bzero(&ptirq, sizeof(ptirq));
ptirq.type = IRQ_INTX;
ptirq.intx.virt_pin = virt_pin;
ptirq.intx.is_pic_pin = pic_pin;
return ioctl(ctx->fd, IC_RESET_PTDEV_INTR_INFO, &ptirq);
}
int
vm_create_vcpu(struct vmctx *ctx, int vcpu_id)
{
struct acrn_create_vcpu cv;
int error;
bzero(&cv, sizeof(struct acrn_create_vcpu));
cv.vcpu_id = vcpu_id;
error = ioctl(ctx->fd, IC_CREATE_VCPU, &cv);
return error;
}
int
vm_get_device_fd(struct vmctx *ctx)
{
return ctx->fd;
}
int
vm_get_cpu_state(struct vmctx *ctx, void *state_buf)
{
return ioctl(ctx->fd, IC_PM_GET_CPU_STATE, state_buf);
}

936
devicemodel/hw/block_if.c Normal file
View File

@@ -0,0 +1,936 @@
/*-
* Copyright (c) 2013 Peter Grehan <grehan@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/queue.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <linux/fs.h>
#include <errno.h>
#include <assert.h>
#include <err.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <signal.h>
#include <sysexits.h>
#include <unistd.h>
#include "dm.h"
#include "mevent.h"
#include "block_if.h"
#include "ahci.h"
/*
* Notes:
* The F_OFD_SETLK support is introduced in glibc 2.20.
* The glibc version on target board is above 2.20.
* The following code temporarily fixes up building issues on Ubuntu 14.04,
* where the glibc version is 2.19 by default.
* Theoretically we should use cross-compiling tool to compile applications.
*/
#ifndef F_OFD_SETLK
#define F_OFD_SETLK 37
#endif
#define BLOCKIF_SIG 0xb109b109
#define BLOCKIF_NUMTHR 8
#define BLOCKIF_MAXREQ (64 + BLOCKIF_NUMTHR)
/*
* Debug printf
*/
static int block_if_debug;
#define DPRINTF(params) do { if (block_if_debug) printf params; } while (0)
#define WPRINTF(params) (printf params)
enum blockop {
BOP_READ,
BOP_WRITE,
BOP_FLUSH,
BOP_DELETE
};
enum blockstat {
BST_FREE,
BST_BLOCK,
BST_PEND,
BST_BUSY,
BST_DONE
};
struct blockif_elem {
TAILQ_ENTRY(blockif_elem) link;
struct blockif_req *req;
enum blockop op;
enum blockstat status;
pthread_t tid;
off_t block;
};
struct blockif_ctxt {
int magic;
int fd;
int isblk;
int isgeom;
int candelete;
int rdonly;
off_t size;
int sub_file_assign;
off_t sub_file_start_lba;
struct flock fl;
int sectsz;
int psectsz;
int psectoff;
int closing;
pthread_t btid[BLOCKIF_NUMTHR];
pthread_mutex_t mtx;
pthread_cond_t cond;
/* Request elements and free/pending/busy queues */
TAILQ_HEAD(, blockif_elem) freeq;
TAILQ_HEAD(, blockif_elem) pendq;
TAILQ_HEAD(, blockif_elem) busyq;
struct blockif_elem reqs[BLOCKIF_MAXREQ];
};
static pthread_once_t blockif_once = PTHREAD_ONCE_INIT;
struct blockif_sig_elem {
pthread_mutex_t mtx;
pthread_cond_t cond;
int pending;
struct blockif_sig_elem *next;
};
static struct blockif_sig_elem *blockif_bse_head;
static int
blockif_enqueue(struct blockif_ctxt *bc, struct blockif_req *breq,
enum blockop op)
{
struct blockif_elem *be, *tbe;
off_t off;
int i;
be = TAILQ_FIRST(&bc->freeq);
assert(be != NULL);
assert(be->status == BST_FREE);
TAILQ_REMOVE(&bc->freeq, be, link);
be->req = breq;
be->op = op;
switch (op) {
case BOP_READ:
case BOP_WRITE:
case BOP_DELETE:
off = breq->offset;
for (i = 0; i < breq->iovcnt; i++)
off += breq->iov[i].iov_len;
break;
default:
/* off = OFF_MAX; */
off = 1 << (sizeof(off_t) - 1);
}
be->block = off;
TAILQ_FOREACH(tbe, &bc->pendq, link) {
if (tbe->block == breq->offset)
break;
}
if (tbe == NULL) {
TAILQ_FOREACH(tbe, &bc->busyq, link) {
if (tbe->block == breq->offset)
break;
}
}
if (tbe == NULL)
be->status = BST_PEND;
else
be->status = BST_BLOCK;
TAILQ_INSERT_TAIL(&bc->pendq, be, link);
return (be->status == BST_PEND);
}
static int
blockif_dequeue(struct blockif_ctxt *bc, pthread_t t, struct blockif_elem **bep)
{
struct blockif_elem *be;
TAILQ_FOREACH(be, &bc->pendq, link) {
if (be->status == BST_PEND)
break;
assert(be->status == BST_BLOCK);
}
if (be == NULL)
return 0;
TAILQ_REMOVE(&bc->pendq, be, link);
be->status = BST_BUSY;
be->tid = t;
TAILQ_INSERT_TAIL(&bc->busyq, be, link);
*bep = be;
return 1;
}
static void
blockif_complete(struct blockif_ctxt *bc, struct blockif_elem *be)
{
struct blockif_elem *tbe;
if (be->status == BST_DONE || be->status == BST_BUSY)
TAILQ_REMOVE(&bc->busyq, be, link);
else
TAILQ_REMOVE(&bc->pendq, be, link);
TAILQ_FOREACH(tbe, &bc->pendq, link) {
if (tbe->req->offset == be->block)
tbe->status = BST_PEND;
}
be->tid = 0;
be->status = BST_FREE;
be->req = NULL;
TAILQ_INSERT_TAIL(&bc->freeq, be, link);
}
static void
blockif_proc(struct blockif_ctxt *bc, struct blockif_elem *be, uint8_t *buf)
{
struct blockif_req *br;
off_t arg[2];
ssize_t clen, len, off, boff, voff;
int i, err;
br = be->req;
if (br->iovcnt <= 1)
buf = NULL;
err = 0;
switch (be->op) {
case BOP_READ:
if (buf == NULL) {
len = preadv(bc->fd, br->iov, br->iovcnt,
br->offset + bc->sub_file_start_lba);
if (len < 0)
err = errno;
else
br->resid -= len;
break;
}
i = 0;
off = voff = 0;
while (br->resid > 0) {
len = MIN(br->resid, MAXPHYS);
if (pread(bc->fd, buf, len, br->offset +
off + bc->sub_file_start_lba) < 0) {
err = errno;
break;
}
boff = 0;
do {
clen = MIN(len - boff, br->iov[i].iov_len -
voff);
memcpy(br->iov[i].iov_base + voff,
buf + boff, clen);
if (clen < br->iov[i].iov_len - voff)
voff += clen;
else {
i++;
voff = 0;
}
boff += clen;
} while (boff < len);
off += len;
br->resid -= len;
}
break;
case BOP_WRITE:
if (bc->rdonly) {
err = EROFS;
break;
}
if (buf == NULL) {
len = pwritev(bc->fd, br->iov, br->iovcnt,
br->offset + bc->sub_file_start_lba);
if (len < 0)
err = errno;
else
br->resid -= len;
break;
}
i = 0;
off = voff = 0;
while (br->resid > 0) {
len = MIN(br->resid, MAXPHYS);
boff = 0;
do {
clen = MIN(len - boff, br->iov[i].iov_len -
voff);
memcpy(buf + boff,
br->iov[i].iov_base + voff, clen);
if (clen < br->iov[i].iov_len - voff)
voff += clen;
else {
i++;
voff = 0;
}
boff += clen;
} while (boff < len);
if (pwrite(bc->fd, buf, len, br->offset +
off + bc->sub_file_start_lba) < 0) {
err = errno;
break;
}
off += len;
br->resid -= len;
}
break;
case BOP_FLUSH:
if (fsync(bc->fd))
err = errno;
break;
case BOP_DELETE:
/* only used by AHCI */
if (!bc->candelete)
err = EOPNOTSUPP;
else if (bc->rdonly)
err = EROFS;
else if (bc->isblk) {
arg[0] = br->offset;
arg[1] = br->resid;
if (ioctl(bc->fd, BLKDISCARD, arg))
err = errno;
else
br->resid = 0;
}
else
err = EOPNOTSUPP;
break;
default:
err = EINVAL;
break;
}
be->status = BST_DONE;
(*br->callback)(br, err);
}
static void *
blockif_thr(void *arg)
{
struct blockif_ctxt *bc;
struct blockif_elem *be;
pthread_t t;
uint8_t *buf;
bc = arg;
if (bc->isgeom)
buf = malloc(MAXPHYS);
else
buf = NULL;
t = pthread_self();
pthread_mutex_lock(&bc->mtx);
for (;;) {
while (blockif_dequeue(bc, t, &be)) {
pthread_mutex_unlock(&bc->mtx);
blockif_proc(bc, be, buf);
pthread_mutex_lock(&bc->mtx);
blockif_complete(bc, be);
}
/* Check ctxt status here to see if exit requested */
if (bc->closing)
break;
pthread_cond_wait(&bc->cond, &bc->mtx);
}
pthread_mutex_unlock(&bc->mtx);
if (buf)
free(buf);
pthread_exit(NULL);
return NULL;
}
static void
blockif_sigcont_handler(int signal)
{
struct blockif_sig_elem *bse;
WPRINTF(("block_if sigcont handler!\n"));
for (;;) {
/*
* Process the entire list even if not intended for
* this thread.
*/
do {
bse = blockif_bse_head;
if (bse == NULL)
return;
} while (!__sync_bool_compare_and_swap(
(uintptr_t *)&blockif_bse_head,
(uintptr_t)bse,
(uintptr_t)bse->next));
pthread_mutex_lock(&bse->mtx);
bse->pending = 0;
pthread_cond_signal(&bse->cond);
pthread_mutex_unlock(&bse->mtx);
}
}
static void
blockif_init(void)
{
signal(SIGCONT, blockif_sigcont_handler);
}
/*
* This function checks if the sub file range, specified by sub_start and
* sub_size, has any overlap with other sub file ranges with write access.
*/
static int
sub_file_validate(struct blockif_ctxt *bc, int fd, int read_only,
off_t sub_start, off_t sub_size)
{
struct flock *fl = &bc->fl;
memset(fl, 0, sizeof(struct flock));
fl->l_whence = SEEK_SET; /* offset base is start of file */
if (read_only)
fl->l_type = F_RDLCK;
else
fl->l_type = F_WRLCK;
fl->l_start = sub_start;
fl->l_len = sub_size;
/* use "open file description locks" to validate */
if (fcntl(fd, F_OFD_SETLK, fl) == -1) {
DPRINTF(("failed to lock subfile!\n"));
return -1;
}
/* Keep file lock on to prevent other sub files, until DM exits */
return 0;
}
void
sub_file_unlock(struct blockif_ctxt *bc)
{
struct flock *fl;
if (bc->sub_file_assign) {
fl = &bc->fl;
DPRINTF(("blockif: release file lock...\n"));
fl->l_type = F_UNLCK;
if (fcntl(bc->fd, F_OFD_SETLK, fl) == -1) {
fprintf(stderr, "blockif: failed to unlock subfile!\n");
exit(1);
}
DPRINTF(("blockif: release done\n"));
}
}
struct blockif_ctxt *
blockif_open(const char *optstr, const char *ident)
{
char tname[MAXCOMLEN + 1];
/* char name[MAXPATHLEN]; */
char *nopt, *xopts, *cp;
struct blockif_ctxt *bc;
struct stat sbuf;
/* struct diocgattr_arg arg; */
off_t size, psectsz, psectoff;
int extra, fd, i, sectsz;
int nocache, sync, ro, candelete, geom, ssopt, pssopt;
long sz;
long long b;
int err_code = -1;
off_t sub_file_start_lba, sub_file_size;
int sub_file_assign;
pthread_once(&blockif_once, blockif_init);
fd = -1;
ssopt = 0;
nocache = 0;
sync = 0;
ro = 0;
sub_file_assign = 0;
/*
* The first element in the optstring is always a pathname.
* Optional elements follow
*/
nopt = xopts = strdup(optstr);
if (!nopt) {
WPRINTF(("block_if.c: strdup retruns NULL\n"));
return NULL;
}
while (xopts != NULL) {
cp = strsep(&xopts, ",");
if (cp == nopt) /* file or device pathname */
continue;
else if (!strcmp(cp, "nocache"))
nocache = 1;
else if (!strcmp(cp, "sync") || !strcmp(cp, "direct"))
sync = 1;
else if (!strcmp(cp, "ro"))
ro = 1;
else if (sscanf(cp, "sectorsize=%d/%d", &ssopt, &pssopt) == 2)
;
else if (sscanf(cp, "sectorsize=%d", &ssopt) == 1)
pssopt = ssopt;
else if (sscanf(cp, "range=%ld/%ld", &sub_file_start_lba,
&sub_file_size) == 2)
sub_file_assign = 1;
else {
fprintf(stderr, "Invalid device option \"%s\"\n", cp);
goto err;
}
}
/* enforce a write-through policy by default */
nocache = 1;
sync = 1;
extra = 0;
if (nocache)
extra |= O_DIRECT;
if (sync)
extra |= O_SYNC;
fd = open(nopt, (ro ? O_RDONLY : O_RDWR) | extra);
if (fd < 0 && !ro) {
/* Attempt a r/w fail with a r/o open */
fd = open(nopt, O_RDONLY | extra);
ro = 1;
}
if (fd < 0) {
warn("Could not open backing file: %s", nopt);
goto err;
}
if (fstat(fd, &sbuf) < 0) {
warn("Could not stat backing file %s", nopt);
goto err;
}
/*
* Deal with raw devices
*/
size = sbuf.st_size;
sectsz = DEV_BSIZE;
psectsz = psectoff = 0;
candelete = geom = 0;
if (S_ISBLK(sbuf.st_mode)) {
/* get size */
err_code = ioctl(fd, BLKGETSIZE, &sz);
if (err_code) {
fprintf(stderr, "error %d getting block size!\n",
err_code);
size = sbuf.st_size; /* set default value */
} else {
size = sz * DEV_BSIZE; /* DEV_BSIZE is 512 on Linux */
}
if (!err_code || err_code == EFBIG) {
err_code = ioctl(fd, BLKGETSIZE64, &b);
if (err_code || b == 0 || b == sz)
size = b * DEV_BSIZE;
else
size = b;
}
DPRINTF(("block partition size is 0x%lx\n", size));
/* get sector size, 512 on Linux */
sectsz = DEV_BSIZE;
DPRINTF(("block partition sector size is 0x%x\n", sectsz));
/* get physical sector size */
err_code = ioctl(fd, BLKPBSZGET, &psectsz);
if (err_code) {
fprintf(stderr, "error %d getting physical sectsz!\n",
err_code);
psectsz = DEV_BSIZE; /* set default physical size */
}
DPRINTF(("block partition physical sector size is 0x%lx\n",
psectsz));
} else
psectsz = sbuf.st_blksize;
if (ssopt != 0) {
if (!powerof2(ssopt) || !powerof2(pssopt) || ssopt < 512 ||
ssopt > pssopt) {
fprintf(stderr, "Invalid sector size %d/%d\n",
ssopt, pssopt);
goto err;
}
/*
* Some backend drivers (e.g. cd0, ada0) require that the I/O
* size be a multiple of the device's sector size.
*
* Validate that the emulated sector size complies with this
* requirement.
*/
if (S_ISCHR(sbuf.st_mode)) {
if (ssopt < sectsz || (ssopt % sectsz) != 0) {
fprintf(stderr,
"Sector size %d incompatible with underlying device sector size %d\n",
ssopt, sectsz);
goto err;
}
}
sectsz = ssopt;
psectsz = pssopt;
psectoff = 0;
}
bc = calloc(1, sizeof(struct blockif_ctxt));
if (bc == NULL) {
perror("calloc");
goto err;
}
if (sub_file_assign) {
DPRINTF(("sector size is %d\n", sectsz));
bc->sub_file_assign = 1;
bc->sub_file_start_lba = sub_file_start_lba * sectsz;
size = sub_file_size * sectsz;
DPRINTF(("Validating sub file...\n"));
err_code = sub_file_validate(bc, fd, ro, bc->sub_file_start_lba,
size);
if (err_code < 0) {
fprintf(stderr, "subfile range specified not valid!\n");
exit(1);
}
DPRINTF(("Validated done!\n"));
} else {
/* normal case */
bc->sub_file_assign = 0;
bc->sub_file_start_lba = 0;
}
bc->magic = BLOCKIF_SIG;
bc->fd = fd;
bc->isblk = S_ISBLK(sbuf.st_mode);
bc->isgeom = geom;
bc->candelete = candelete;
bc->rdonly = ro;
bc->size = size;
bc->sectsz = sectsz;
bc->psectsz = psectsz;
bc->psectoff = psectoff;
pthread_mutex_init(&bc->mtx, NULL);
pthread_cond_init(&bc->cond, NULL);
TAILQ_INIT(&bc->freeq);
TAILQ_INIT(&bc->pendq);
TAILQ_INIT(&bc->busyq);
for (i = 0; i < BLOCKIF_MAXREQ; i++) {
bc->reqs[i].status = BST_FREE;
TAILQ_INSERT_HEAD(&bc->freeq, &bc->reqs[i], link);
}
for (i = 0; i < BLOCKIF_NUMTHR; i++) {
pthread_create(&bc->btid[i], NULL, blockif_thr, bc);
snprintf(tname, sizeof(tname), "blk-%s-%d", ident, i);
pthread_setname_np(bc->btid[i], tname);
}
return bc;
err:
if (fd >= 0)
close(fd);
return NULL;
}
static int
blockif_request(struct blockif_ctxt *bc, struct blockif_req *breq,
enum blockop op)
{
int err;
err = 0;
pthread_mutex_lock(&bc->mtx);
if (!TAILQ_EMPTY(&bc->freeq)) {
/*
* Enqueue and inform the block i/o thread
* that there is work available
*/
if (blockif_enqueue(bc, breq, op))
pthread_cond_signal(&bc->cond);
} else {
/*
* Callers are not allowed to enqueue more than
* the specified blockif queue limit. Return an
* error to indicate that the queue length has been
* exceeded.
*/
err = E2BIG;
}
pthread_mutex_unlock(&bc->mtx);
return err;
}
int
blockif_read(struct blockif_ctxt *bc, struct blockif_req *breq)
{
assert(bc->magic == BLOCKIF_SIG);
return blockif_request(bc, breq, BOP_READ);
}
int
blockif_write(struct blockif_ctxt *bc, struct blockif_req *breq)
{
assert(bc->magic == BLOCKIF_SIG);
return blockif_request(bc, breq, BOP_WRITE);
}
int
blockif_flush(struct blockif_ctxt *bc, struct blockif_req *breq)
{
assert(bc->magic == BLOCKIF_SIG);
return blockif_request(bc, breq, BOP_FLUSH);
}
int
blockif_delete(struct blockif_ctxt *bc, struct blockif_req *breq)
{
assert(bc->magic == BLOCKIF_SIG);
return blockif_request(bc, breq, BOP_DELETE);
}
int
blockif_cancel(struct blockif_ctxt *bc, struct blockif_req *breq)
{
struct blockif_elem *be;
assert(bc->magic == BLOCKIF_SIG);
pthread_mutex_lock(&bc->mtx);
/*
* Check pending requests.
*/
TAILQ_FOREACH(be, &bc->pendq, link) {
if (be->req == breq)
break;
}
if (be != NULL) {
/*
* Found it.
*/
blockif_complete(bc, be);
pthread_mutex_unlock(&bc->mtx);
return 0;
}
/*
* Check in-flight requests.
*/
TAILQ_FOREACH(be, &bc->busyq, link) {
if (be->req == breq)
break;
}
if (be == NULL) {
/*
* Didn't find it.
*/
pthread_mutex_unlock(&bc->mtx);
return -1;
}
/*
* Interrupt the processing thread to force it return
* prematurely via it's normal callback path.
*/
while (be->status == BST_BUSY) {
struct blockif_sig_elem bse, *old_head;
pthread_mutex_init(&bse.mtx, NULL);
pthread_cond_init(&bse.cond, NULL);
bse.pending = 1;
do {
old_head = blockif_bse_head;
bse.next = old_head;
} while (!__sync_bool_compare_and_swap((uintptr_t *)&
blockif_bse_head,
(uintptr_t)old_head,
(uintptr_t)&bse));
pthread_kill(be->tid, SIGCONT);
pthread_mutex_lock(&bse.mtx);
while (bse.pending)
pthread_cond_wait(&bse.cond, &bse.mtx);
pthread_mutex_unlock(&bse.mtx);
}
pthread_mutex_unlock(&bc->mtx);
/*
* The processing thread has been interrupted. Since it's not
* clear if the callback has been invoked yet, return EBUSY.
*/
return -EBUSY;
}
int
blockif_close(struct blockif_ctxt *bc)
{
void *jval;
int i;
assert(bc->magic == BLOCKIF_SIG);
sub_file_unlock(bc);
/*
* Stop the block i/o thread
*/
pthread_mutex_lock(&bc->mtx);
bc->closing = 1;
pthread_mutex_unlock(&bc->mtx);
pthread_cond_broadcast(&bc->cond);
for (i = 0; i < BLOCKIF_NUMTHR; i++)
pthread_join(bc->btid[i], &jval);
/* XXX Cancel queued i/o's ??? */
/*
* Release resources
*/
bc->magic = 0;
close(bc->fd);
free(bc);
return 0;
}
/*
* Return virtual C/H/S values for a given block. Use the algorithm
* outlined in the VHD specification to calculate values.
*/
void
blockif_chs(struct blockif_ctxt *bc, uint16_t *c, uint8_t *h, uint8_t *s)
{
off_t sectors; /* total sectors of the block dev */
off_t hcyl; /* cylinders times heads */
uint16_t secpt; /* sectors per track */
uint8_t heads;
assert(bc->magic == BLOCKIF_SIG);
sectors = bc->size / bc->sectsz;
/* Clamp the size to the largest possible with CHS */
if (sectors > 65535UL*16*255)
sectors = 65535UL*16*255;
if (sectors >= 65536UL*16*63) {
secpt = 255;
heads = 16;
hcyl = sectors / secpt;
} else {
secpt = 17;
hcyl = sectors / secpt;
heads = (hcyl + 1023) / 1024;
if (heads < 4)
heads = 4;
if (hcyl >= (heads * 1024) || heads > 16) {
secpt = 31;
heads = 16;
hcyl = sectors / secpt;
}
if (hcyl >= (heads * 1024)) {
secpt = 63;
heads = 16;
hcyl = sectors / secpt;
}
}
*c = hcyl / heads;
*h = heads;
*s = secpt;
}
/*
* Accessors
*/
off_t
blockif_size(struct blockif_ctxt *bc)
{
assert(bc->magic == BLOCKIF_SIG);
return bc->size;
}
int
blockif_sectsz(struct blockif_ctxt *bc)
{
assert(bc->magic == BLOCKIF_SIG);
return bc->sectsz;
}
void
blockif_psectsz(struct blockif_ctxt *bc, int *size, int *off)
{
assert(bc->magic == BLOCKIF_SIG);
*size = bc->psectsz;
*off = bc->psectoff;
}
int
blockif_queuesz(struct blockif_ctxt *bc)
{
assert(bc->magic == BLOCKIF_SIG);
return (BLOCKIF_MAXREQ - 1);
}
int
blockif_is_ro(struct blockif_ctxt *bc)
{
assert(bc->magic == BLOCKIF_SIG);
return bc->rdonly;
}
int
blockif_candelete(struct blockif_ctxt *bc)
{
assert(bc->magic == BLOCKIF_SIG);
return bc->candelete;
}

2482
devicemodel/hw/pci/ahci.c Normal file

File diff suppressed because it is too large Load Diff

2214
devicemodel/hw/pci/core.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,68 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <pthread.h>
#include "pci_core.h"
static int
pci_hostbridge_init(struct vmctx *ctx, struct pci_vdev *pi, char *opts)
{
/* config space */
pci_set_cfgdata16(pi, PCIR_VENDOR, 0x1275); /* NetApp */
pci_set_cfgdata16(pi, PCIR_DEVICE, 0x1275); /* NetApp */
pci_set_cfgdata8(pi, PCIR_HDRTYPE, PCIM_HDRTYPE_NORMAL);
pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_BRIDGE);
pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_BRIDGE_HOST);
pci_emul_add_pciecap(pi, PCIEM_TYPE_ROOT_PORT);
return 0;
}
static int
pci_amd_hostbridge_init(struct vmctx *ctx, struct pci_vdev *pi, char *opts)
{
(void) pci_hostbridge_init(ctx, pi, opts);
pci_set_cfgdata16(pi, PCIR_VENDOR, 0x1022); /* AMD */
pci_set_cfgdata16(pi, PCIR_DEVICE, 0x7432); /* made up */
return 0;
}
struct pci_vdev_ops pci_ops_amd_hostbridge = {
.class_name = "amd_hostbridge",
.vdev_init = pci_amd_hostbridge_init,
};
DEFINE_PCI_DEVTYPE(pci_ops_amd_hostbridge);
struct pci_vdev_ops pci_ops_hostbridge = {
.class_name = "hostbridge",
.vdev_init = pci_hostbridge_init,
};
DEFINE_PCI_DEVTYPE(pci_ops_hostbridge);

357
devicemodel/hw/pci/irq.c Normal file
View File

@@ -0,0 +1,357 @@
/*-
* Copyright (c) 2014 Hudson River Trading LLC
* Written by: John H. Baldwin <jhb@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <assert.h>
#include <pthread.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include "types.h"
#include "acpi.h"
#include "vmm.h"
#include "vmmapi.h"
#include "inout.h"
#include "pci_core.h"
#include "irq.h"
#include "lpc.h"
/*
* Implement an 8 pin PCI interrupt router compatible with the router
* present on Intel's ICH10 chip.
*/
/* Fields in each PIRQ register. */
#define PIRQ_DIS 0x80
#define PIRQ_IRQ 0x0f
/* Only IRQs 3-7, 9-12, and 14-15 are permitted. */
#define PERMITTED_IRQS 0xdef8
#define IRQ_PERMITTED(irq) (((1U << (irq)) & PERMITTED_IRQS) != 0)
/* IRQ count to disable an IRQ. */
#define IRQ_DISABLED 0xff
static struct pirq {
uint8_t reg;
int use_count;
int active_count;
pthread_mutex_t lock;
} pirqs[8];
static u_char irq_counts[16];
static int pirq_cold = 1;
/*
* Returns true if this pin is enabled with a valid IRQ. Setting the
* register to a reserved IRQ causes interrupts to not be asserted as
* if the pin was disabled.
*/
static bool
pirq_valid_irq(int reg)
{
if (reg & PIRQ_DIS)
return false;
return IRQ_PERMITTED(reg & PIRQ_IRQ);
}
uint8_t
pirq_read(int pin)
{
assert(pin > 0 && pin <= nitems(pirqs));
return pirqs[pin - 1].reg;
}
void
pirq_write(struct vmctx *ctx, int pin, uint8_t val)
{
struct pirq *pirq;
assert(pin > 0 && pin <= nitems(pirqs));
pirq = &pirqs[pin - 1];
pthread_mutex_lock(&pirq->lock);
if (pirq->reg != (val & (PIRQ_DIS | PIRQ_IRQ))) {
if (pirq->active_count != 0 && pirq_valid_irq(pirq->reg))
vm_isa_deassert_irq(ctx, pirq->reg & PIRQ_IRQ, -1);
pirq->reg = val & (PIRQ_DIS | PIRQ_IRQ);
if (pirq->active_count != 0 && pirq_valid_irq(pirq->reg))
vm_isa_assert_irq(ctx, pirq->reg & PIRQ_IRQ, -1);
}
pthread_mutex_unlock(&pirq->lock);
}
void
pci_irq_reserve(int irq)
{
assert(irq >= 0 && irq < nitems(irq_counts));
assert(pirq_cold);
assert(irq_counts[irq] == 0 || irq_counts[irq] == IRQ_DISABLED);
irq_counts[irq] = IRQ_DISABLED;
}
void
pci_irq_use(int irq)
{
assert(irq >= 0 && irq < nitems(irq_counts));
assert(pirq_cold);
assert(irq_counts[irq] != IRQ_DISABLED);
irq_counts[irq]++;
}
void
pci_irq_init(struct vmctx *ctx)
{
int i;
for (i = 0; i < nitems(pirqs); i++) {
pirqs[i].reg = PIRQ_DIS;
pirqs[i].use_count = 0;
pirqs[i].active_count = 0;
pthread_mutex_init(&pirqs[i].lock, NULL);
}
for (i = 0; i < nitems(irq_counts); i++) {
if (IRQ_PERMITTED(i))
irq_counts[i] = 0;
else
irq_counts[i] = IRQ_DISABLED;
}
}
void pci_irq_deinit(struct vmctx *ctx)
{
pirq_cold = 1;
}
void
pci_irq_assert(struct pci_vdev *dev)
{
struct pirq *pirq;
if (dev->lintr.pirq_pin > 0) {
assert(dev->lintr.pirq_pin <= nitems(pirqs));
pirq = &pirqs[dev->lintr.pirq_pin - 1];
pthread_mutex_lock(&pirq->lock);
pirq->active_count++;
if (pirq->active_count == 1 && pirq_valid_irq(pirq->reg)) {
vm_isa_assert_irq(dev->vmctx, pirq->reg & PIRQ_IRQ,
dev->lintr.ioapic_irq);
pthread_mutex_unlock(&pirq->lock);
return;
}
pthread_mutex_unlock(&pirq->lock);
}
vm_ioapic_assert_irq(dev->vmctx, dev->lintr.ioapic_irq);
}
void
pci_irq_deassert(struct pci_vdev *dev)
{
struct pirq *pirq;
if (dev->lintr.pirq_pin > 0) {
assert(dev->lintr.pirq_pin <= nitems(pirqs));
pirq = &pirqs[dev->lintr.pirq_pin - 1];
pthread_mutex_lock(&pirq->lock);
pirq->active_count--;
if (pirq->active_count == 0 && pirq_valid_irq(pirq->reg)) {
vm_isa_deassert_irq(dev->vmctx, pirq->reg & PIRQ_IRQ,
dev->lintr.ioapic_irq);
pthread_mutex_unlock(&pirq->lock);
return;
}
pthread_mutex_unlock(&pirq->lock);
}
vm_ioapic_deassert_irq(dev->vmctx, dev->lintr.ioapic_irq);
}
int
pirq_alloc_pin(struct pci_vdev *dev)
{
int best_count, best_irq, best_pin, irq, pin;
pirq_cold = 0;
/* Find the least-used PIRQ pin. */
best_pin = 0;
best_count = pirqs[0].use_count;
for (pin = 1; pin < nitems(pirqs); pin++) {
if (pirqs[pin].use_count < best_count) {
best_pin = pin;
best_count = pirqs[pin].use_count;
}
}
pirqs[best_pin].use_count++;
/* Second, route this pin to an IRQ. */
if (pirqs[best_pin].reg == PIRQ_DIS) {
best_irq = -1;
best_count = 0;
for (irq = 0; irq < nitems(irq_counts); irq++) {
if (irq_counts[irq] == IRQ_DISABLED)
continue;
if (best_irq == -1 || irq_counts[irq] < best_count) {
best_irq = irq;
best_count = irq_counts[irq];
}
}
assert(best_irq >= 0);
irq_counts[best_irq]++;
pirqs[best_pin].reg = best_irq;
}
return (best_pin + 1);
}
int
pirq_irq(int pin)
{
assert(pin > 0 && pin <= nitems(pirqs));
return (pirqs[pin - 1].reg & PIRQ_IRQ);
}
/* XXX: Generate $PIR table. */
static void
pirq_dsdt(void)
{
char *irq_prs, *old;
int irq, pin;
irq_prs = NULL;
for (irq = 0; irq < nitems(irq_counts); irq++) {
if (!IRQ_PERMITTED(irq))
continue;
if (irq_prs == NULL) {
if (asprintf(&irq_prs, "%d", irq) < 0) {
/*error*/
if (irq_prs != NULL)
free(irq_prs);
return;
}
} else {
old = irq_prs;
if (asprintf(&irq_prs, "%s,%d", old, irq) < 0) {
/*error*/
if (irq_prs != NULL)
free(irq_prs);
free(old);
return;
}
free(old);
}
}
/*
* A helper method to validate a link register's value. This
* duplicates pirq_valid_irq().
*/
dsdt_line("");
dsdt_line("Method (PIRV, 1, NotSerialized)");
dsdt_line("{");
dsdt_line(" If (And (Arg0, 0x%02X))", PIRQ_DIS);
dsdt_line(" {");
dsdt_line(" Return (0x00)");
dsdt_line(" }");
dsdt_line(" And (Arg0, 0x%02X, Local0)", PIRQ_IRQ);
dsdt_line(" If (LLess (Local0, 0x03))");
dsdt_line(" {");
dsdt_line(" Return (0x00)");
dsdt_line(" }");
dsdt_line(" If (LEqual (Local0, 0x08))");
dsdt_line(" {");
dsdt_line(" Return (0x00)");
dsdt_line(" }");
dsdt_line(" If (LEqual (Local0, 0x0D))");
dsdt_line(" {");
dsdt_line(" Return (0x00)");
dsdt_line(" }");
dsdt_line(" Return (0x01)");
dsdt_line("}");
for (pin = 0; pin < nitems(pirqs); pin++) {
dsdt_line("");
dsdt_line("Device (LNK%c)", 'A' + pin);
dsdt_line("{");
dsdt_line(" Name (_HID, EisaId (\"PNP0C0F\"))");
dsdt_line(" Name (_UID, 0x%02X)", pin + 1);
dsdt_line(" Method (_STA, 0, NotSerialized)");
dsdt_line(" {");
dsdt_line(" If (PIRV (PIR%c))", 'A' + pin);
dsdt_line(" {");
dsdt_line(" Return (0x0B)");
dsdt_line(" }");
dsdt_line(" Else");
dsdt_line(" {");
dsdt_line(" Return (0x09)");
dsdt_line(" }");
dsdt_line(" }");
dsdt_line(" Name (_PRS, ResourceTemplate ()");
dsdt_line(" {");
dsdt_line(" IRQ (Level, ActiveLow, Shared, )");
dsdt_line(" {%s}", irq_prs);
dsdt_line(" })");
dsdt_line(" Name (CB%02X, ResourceTemplate ()", pin + 1);
dsdt_line(" {");
dsdt_line(" IRQ (Level, ActiveLow, Shared, )");
dsdt_line(" {}");
dsdt_line(" })");
dsdt_line(" CreateWordField (CB%02X, 0x01, CIR%c)",
pin + 1, 'A' + pin);
dsdt_line(" Method (_CRS, 0, NotSerialized)");
dsdt_line(" {");
dsdt_line(" And (PIR%c, 0x%02X, Local0)", 'A' + pin,
PIRQ_DIS | PIRQ_IRQ);
dsdt_line(" If (PIRV (Local0))");
dsdt_line(" {");
dsdt_line(" ShiftLeft (0x01, Local0, CIR%c)", 'A' + pin);
dsdt_line(" }");
dsdt_line(" Else");
dsdt_line(" {");
dsdt_line(" Store (0x00, CIR%c)", 'A' + pin);
dsdt_line(" }");
dsdt_line(" Return (CB%02X)", pin + 1);
dsdt_line(" }");
dsdt_line(" Method (_DIS, 0, NotSerialized)");
dsdt_line(" {");
dsdt_line(" Store (0x80, PIR%c)", 'A' + pin);
dsdt_line(" }");
dsdt_line(" Method (_SRS, 1, NotSerialized)");
dsdt_line(" {");
dsdt_line(" CreateWordField (Arg0, 0x01, SIR%c)", 'A' + pin);
dsdt_line(" FindSetRightBit (SIR%c, Local0)", 'A' + pin);
dsdt_line(" Store (Decrement (Local0), PIR%c)", 'A' + pin);
dsdt_line(" }");
dsdt_line("}");
}
free(irq_prs);
}
LPC_DSDT(pirq_dsdt);

487
devicemodel/hw/pci/lpc.c Normal file
View File

@@ -0,0 +1,487 @@
/*-
* Copyright (c) 2013 Neel Natu <neel@freebsd.org>
* Copyright (c) 2013 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include "vmm.h"
#include "vmmapi.h"
#include "acpi.h"
#include "inout.h"
#include "pci_core.h"
#include "irq.h"
#include "lpc.h"
#include "uart_core.h"
#define IO_ICU1 0x20
#define IO_ICU2 0xA0
SET_DECLARE(lpc_dsdt_set, struct lpc_dsdt);
SET_DECLARE(lpc_sysres_set, struct lpc_sysres);
#define ELCR_PORT 0x4d0
SYSRES_IO(ELCR_PORT, 2);
#define IO_TIMER1_PORT 0x40
#define NMISC_PORT 0x61
SYSRES_IO(NMISC_PORT, 1);
static struct pci_vdev *lpc_bridge;
#define LPC_UART_NUM 2
static struct lpc_uart_vdev {
struct uart_vdev *uart;
const char *opts;
int iobase;
int irq;
int enabled;
} lpc_uart_vdev[LPC_UART_NUM];
static const char *lpc_uart_names[LPC_UART_NUM] = { "COM1", "COM2" };
/*
* LPC device configuration is in the following form:
* <lpc_device_name>[,<options>]
* For e.g. "com1,stdio"
*/
int
lpc_device_parse(const char *opts)
{
int unit, error;
char *str, *cpy, *lpcdev;
error = -1;
str = cpy = strdup(opts);
lpcdev = strsep(&str, ",");
if (lpcdev != NULL) {
for (unit = 0; unit < LPC_UART_NUM; unit++) {
if (strcasecmp(lpcdev, lpc_uart_names[unit]) == 0) {
lpc_uart_vdev[unit].opts = str;
error = 0;
goto done;
}
}
}
done:
if (error)
free(cpy);
return error;
}
static void
lpc_uart_intr_assert(void *arg)
{
struct lpc_uart_vdev *lpc_uart = arg;
assert(lpc_uart->irq >= 0);
if (lpc_bridge)
vm_isa_pulse_irq(lpc_bridge->vmctx,
lpc_uart->irq,
lpc_uart->irq);
}
static void
lpc_uart_intr_deassert(void *arg)
{
/*
* The COM devices on the LPC bus generate edge triggered interrupts,
* so nothing more to do here.
*/
}
static int
lpc_uart_io_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
int offset;
struct lpc_uart_vdev *lpc_uart = arg;
offset = port - lpc_uart->iobase;
switch (bytes) {
case 1:
if (in)
*eax = uart_read(lpc_uart->uart, offset);
else
uart_write(lpc_uart->uart, offset, *eax);
break;
case 2:
if (in) {
*eax = uart_read(lpc_uart->uart, offset);
*eax |= uart_read(lpc_uart->uart, offset + 1) << 8;
} else {
uart_write(lpc_uart->uart, offset, *eax);
uart_write(lpc_uart->uart, offset + 1, *eax >> 8);
}
break;
default:
return -1;
}
return 0;
}
static void
lpc_deinit(struct vmctx *ctx)
{
struct lpc_uart_vdev *lpc_uart;
struct inout_port iop;
const char *name;
int unit;
/* COM1 and COM2 */
for (unit = 0; unit < LPC_UART_NUM; unit++) {
name = lpc_uart_names[unit];
lpc_uart = &lpc_uart_vdev[unit];
if (lpc_uart->enabled == 0)
continue;
bzero(&iop, sizeof(struct inout_port));
iop.name = name;
iop.port = lpc_uart->iobase;
iop.size = UART_IO_BAR_SIZE;
iop.flags = IOPORT_F_INOUT;
unregister_inout(&iop);
uart_release_backend(lpc_uart->uart, lpc_uart->opts);
uart_deinit(lpc_uart->uart);
uart_legacy_dealloc(unit);
lpc_uart->uart = NULL;
lpc_uart->enabled = 0;
}
}
static int
lpc_init(struct vmctx *ctx)
{
struct lpc_uart_vdev *lpc_uart;
struct inout_port iop;
const char *name;
int unit, error;
/* COM1 and COM2 */
for (unit = 0; unit < LPC_UART_NUM; unit++) {
lpc_uart = &lpc_uart_vdev[unit];
name = lpc_uart_names[unit];
if (uart_legacy_alloc(unit,
&lpc_uart->iobase,
&lpc_uart->irq) != 0) {
fprintf(stderr, "Unable to allocate resources for "
"LPC device %s\n", name);
goto init_failed;
}
pci_irq_reserve(lpc_uart->irq);
lpc_uart->uart = uart_init(lpc_uart_intr_assert,
lpc_uart_intr_deassert, lpc_uart);
if (lpc_uart->uart < 0) {
uart_legacy_dealloc(unit);
goto init_failed;
}
if (uart_set_backend(lpc_uart->uart, lpc_uart->opts) != 0) {
fprintf(stderr, "Unable to initialize backend '%s' "
"for LPC device %s\n", lpc_uart->opts, name);
uart_deinit(lpc_uart->uart);
uart_legacy_dealloc(unit);
goto init_failed;
}
bzero(&iop, sizeof(struct inout_port));
iop.name = name;
iop.port = lpc_uart->iobase;
iop.size = UART_IO_BAR_SIZE;
iop.flags = IOPORT_F_INOUT;
iop.handler = lpc_uart_io_handler;
iop.arg = lpc_uart;
error = register_inout(&iop);
assert(error == 0);
lpc_uart->enabled = 1;
}
return 0;
init_failed:
lpc_deinit(ctx);
return -1;
}
static void
pci_lpc_write_dsdt(struct pci_vdev *dev)
{
struct lpc_dsdt **ldpp, *ldp;
dsdt_line("");
dsdt_line("Device (ISA)");
dsdt_line("{");
dsdt_line(" Name (_ADR, 0x%04X%04X)", dev->slot, dev->func);
dsdt_line(" OperationRegion (LPCR, PCI_Config, 0x00, 0x100)");
dsdt_line(" Field (LPCR, AnyAcc, NoLock, Preserve)");
dsdt_line(" {");
dsdt_line(" Offset (0x60),");
dsdt_line(" PIRA, 8,");
dsdt_line(" PIRB, 8,");
dsdt_line(" PIRC, 8,");
dsdt_line(" PIRD, 8,");
dsdt_line(" Offset (0x68),");
dsdt_line(" PIRE, 8,");
dsdt_line(" PIRF, 8,");
dsdt_line(" PIRG, 8,");
dsdt_line(" PIRH, 8");
dsdt_line(" }");
dsdt_line("");
dsdt_indent(1);
SET_FOREACH(ldpp, lpc_dsdt_set) {
ldp = *ldpp;
ldp->handler();
}
dsdt_line("");
dsdt_line("Device (PIC)");
dsdt_line("{");
dsdt_line(" Name (_HID, EisaId (\"PNP0000\"))");
dsdt_line(" Name (_CRS, ResourceTemplate ()");
dsdt_line(" {");
dsdt_indent(2);
dsdt_fixed_ioport(IO_ICU1, 2);
dsdt_fixed_ioport(IO_ICU2, 2);
dsdt_fixed_irq(2);
dsdt_unindent(2);
dsdt_line(" })");
dsdt_line("}");
dsdt_line("");
dsdt_line("Device (TIMR)");
dsdt_line("{");
dsdt_line(" Name (_HID, EisaId (\"PNP0100\"))");
dsdt_line(" Name (_CRS, ResourceTemplate ()");
dsdt_line(" {");
dsdt_indent(2);
dsdt_fixed_ioport(IO_TIMER1_PORT, 4);
dsdt_fixed_irq(0);
dsdt_unindent(2);
dsdt_line(" })");
dsdt_line("}");
dsdt_unindent(1);
dsdt_line("}");
}
static void
pci_lpc_sysres_dsdt(void)
{
struct lpc_sysres **lspp, *lsp;
dsdt_line("");
dsdt_line("Device (SIO)");
dsdt_line("{");
dsdt_line(" Name (_HID, EisaId (\"PNP0C02\"))");
dsdt_line(" Name (_CRS, ResourceTemplate ()");
dsdt_line(" {");
dsdt_indent(2);
SET_FOREACH(lspp, lpc_sysres_set) {
lsp = *lspp;
switch (lsp->type) {
case LPC_SYSRES_IO:
dsdt_fixed_ioport(lsp->base, lsp->length);
break;
case LPC_SYSRES_MEM:
dsdt_fixed_mem32(lsp->base, lsp->length);
break;
}
}
dsdt_unindent(2);
dsdt_line(" })");
dsdt_line("}");
}
LPC_DSDT(pci_lpc_sysres_dsdt);
static void
pci_lpc_uart_dsdt(void)
{
struct lpc_uart_vdev *lpc_uart;
int unit;
for (unit = 0; unit < LPC_UART_NUM; unit++) {
lpc_uart = &lpc_uart_vdev[unit];
if (!lpc_uart->enabled)
continue;
dsdt_line("");
dsdt_line("Device (%s)", lpc_uart_names[unit]);
dsdt_line("{");
dsdt_line(" Name (_HID, EisaId (\"PNP0501\"))");
dsdt_line(" Name (_UID, %d)", unit + 1);
dsdt_line(" Name (_CRS, ResourceTemplate ()");
dsdt_line(" {");
dsdt_indent(2);
dsdt_fixed_ioport(lpc_uart->iobase, UART_IO_BAR_SIZE);
dsdt_fixed_irq(lpc_uart->irq);
dsdt_unindent(2);
dsdt_line(" })");
dsdt_line("}");
}
}
LPC_DSDT(pci_lpc_uart_dsdt);
static int
pci_lpc_cfgwrite(struct vmctx *ctx, int vcpu, struct pci_vdev *pi,
int coff, int bytes, uint32_t val)
{
int pirq_pin;
if (bytes == 1) {
pirq_pin = 0;
if (coff >= 0x60 && coff <= 0x63)
pirq_pin = coff - 0x60 + 1;
if (coff >= 0x68 && coff <= 0x6b)
pirq_pin = coff - 0x68 + 5;
if (pirq_pin != 0) {
pirq_write(ctx, pirq_pin, val);
pci_set_cfgdata8(pi, coff, pirq_read(pirq_pin));
return 0;
}
}
return -1;
}
static void
pci_lpc_write(struct vmctx *ctx, int vcpu, struct pci_vdev *pi,
int baridx, uint64_t offset, int size, uint64_t value)
{
}
static uint64_t
pci_lpc_read(struct vmctx *ctx, int vcpu, struct pci_vdev *pi,
int baridx, uint64_t offset, int size)
{
return 0;
}
#define LPC_DEV 0x7000
#define LPC_VENDOR 0x8086
static int
pci_lpc_init(struct vmctx *ctx, struct pci_vdev *pi, char *opts)
{
/*
* Do not allow more than one LPC bridge to be configured.
*/
if (lpc_bridge != NULL) {
fprintf(stderr, "Only one LPC bridge is allowed.\n");
return -1;
}
/*
* Enforce that the LPC can only be configured on bus 0. This
* simplifies the ACPI DSDT because it can provide a decode for
* all legacy i/o ports behind bus 0.
*/
if (pi->bus != 0) {
fprintf(stderr, "LPC bridge can be present only on bus 0.\n");
return -1;
}
if (lpc_init(ctx) != 0)
return -1;
/* initialize config space */
pci_set_cfgdata16(pi, PCIR_DEVICE, LPC_DEV);
pci_set_cfgdata16(pi, PCIR_VENDOR, LPC_VENDOR);
pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_BRIDGE);
pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_BRIDGE_ISA);
lpc_bridge = pi;
return 0;
}
static void
pci_lpc_deinit(struct vmctx *ctx, struct pci_vdev *pi, char *opts)
{
lpc_bridge = NULL;
lpc_deinit(ctx);
}
char *
lpc_pirq_name(int pin)
{
char *name = NULL;
if (lpc_bridge == NULL)
return NULL;
if (asprintf(&name, "\\_SB.PCI0.ISA.LNK%c,", 'A' + pin - 1) < 0) {
if (name != NULL)
free(name);
return NULL;
}
return name;
}
void
lpc_pirq_routed(void)
{
int pin;
if (lpc_bridge == NULL)
return;
for (pin = 0; pin < 4; pin++)
pci_set_cfgdata8(lpc_bridge, 0x60 + pin, pirq_read(pin + 1));
for (pin = 0; pin < 4; pin++)
pci_set_cfgdata8(lpc_bridge, 0x68 + pin, pirq_read(pin + 5));
}
struct pci_vdev_ops pci_ops_lpc = {
.class_name = "lpc",
.vdev_init = pci_lpc_init,
.vdev_deinit = pci_lpc_deinit,
.vdev_write_dsdt = pci_lpc_write_dsdt,
.vdev_cfgwrite = pci_lpc_cfgwrite,
.vdev_barwrite = pci_lpc_write,
.vdev_barread = pci_lpc_read
};
DEFINE_PCI_DEVTYPE(pci_ops_lpc);

File diff suppressed because it is too large Load Diff

128
devicemodel/hw/pci/uart.c Normal file
View File

@@ -0,0 +1,128 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <stdio.h>
#include "dm.h"
#include "pci_core.h"
#include "uart_core.h"
/*
* Pick a PCI vid/did of a chip with a single uart at
* BAR0, that most versions of FreeBSD can understand:
* Siig CyberSerial 1-port.
*/
#define COM_VENDOR 0x131f
#define COM_DEV 0x2000
static void
pci_uart_intr_assert(void *arg)
{
struct pci_vdev *dev = arg;
pci_lintr_assert(dev);
}
static void
pci_uart_intr_deassert(void *arg)
{
struct pci_vdev *dev = arg;
pci_lintr_deassert(dev);
}
static void
pci_uart_write(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int baridx, uint64_t offset, int size, uint64_t value)
{
assert(baridx == 0);
assert(size == 1);
uart_write(dev->arg, offset, value);
}
uint64_t
pci_uart_read(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int baridx, uint64_t offset, int size)
{
uint8_t val;
assert(baridx == 0);
assert(size == 1);
val = uart_read(dev->arg, offset);
return val;
}
static int
pci_uart_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct uart_vdev *uart;
pci_emul_alloc_bar(dev, 0, PCIBAR_IO, UART_IO_BAR_SIZE);
pci_lintr_request(dev);
/* initialize config space */
pci_set_cfgdata16(dev, PCIR_DEVICE, COM_DEV);
pci_set_cfgdata16(dev, PCIR_VENDOR, COM_VENDOR);
pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_SIMPLECOMM);
uart = uart_init(pci_uart_intr_assert, pci_uart_intr_deassert, dev);
dev->arg = uart;
if (uart_set_backend(uart, opts) != 0) {
fprintf(stderr, "Unable to initialize backend '%s' for "
"pci uart at %d:%d\n", opts, dev->slot, dev->func);
return -1;
}
return 0;
}
static void
pci_uart_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct uart_vdev *uart = (struct uart_vdev *)dev->arg;
if (uart == NULL)
return;
uart_release_backend(uart, opts);
uart_deinit(uart);
}
struct pci_vdev_ops pci_ops_com = {
.class_name = "uart",
.vdev_init = pci_uart_init,
.vdev_deinit = pci_uart_deinit,
.vdev_barwrite = pci_uart_write,
.vdev_barread = pci_uart_read
};
DEFINE_PCI_DEVTYPE(pci_ops_com);

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,441 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/stat.h>
#include <sys/uio.h>
#include <sys/ioctl.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <strings.h>
#include <unistd.h>
#include <assert.h>
#include <pthread.h>
#include <openssl/md5.h>
#include "dm.h"
#include "pci_core.h"
#include "virtio.h"
#include "block_if.h"
#define VIRTIO_BLK_RINGSZ 64
#define VIRTIO_BLK_S_OK 0
#define VIRTIO_BLK_S_IOERR 1
#define VIRTIO_BLK_S_UNSUPP 2
#define VIRTIO_BLK_BLK_ID_BYTES 20
/* Capability bits */
#define VIRTIO_BLK_F_SEG_MAX (1 << 2) /* Maximum request segments */
#define VIRTIO_BLK_F_BLK_SIZE (1 << 6) /* cfg block size valid */
#define VIRTIO_BLK_F_FLUSH (1 << 9) /* Cache flush support */
#define VIRTIO_BLK_F_TOPOLOGY (1 << 10) /* Optimal I/O alignment */
/*
* Host capabilities
*/
#define VIRTIO_BLK_S_HOSTCAPS \
(VIRTIO_BLK_F_SEG_MAX | \
VIRTIO_BLK_F_BLK_SIZE | \
VIRTIO_BLK_F_FLUSH | \
VIRTIO_BLK_F_TOPOLOGY | \
VIRTIO_RING_F_INDIRECT_DESC) /* indirect descriptors */
/*
* Config space "registers"
*/
struct virtio_blk_config {
uint64_t capacity;
uint32_t size_max;
uint32_t seg_max;
struct {
uint16_t cylinders;
uint8_t heads;
uint8_t sectors;
} geometry;
uint32_t blk_size;
struct {
uint8_t physical_block_exp;
uint8_t alignment_offset;
uint16_t min_io_size;
uint32_t opt_io_size;
} topology;
uint8_t writeback;
} __attribute__((packed));
/*
* Fixed-size block header
*/
struct virtio_blk_hdr {
#define VBH_OP_READ 0
#define VBH_OP_WRITE 1
#define VBH_OP_FLUSH 4
#define VBH_OP_FLUSH_OUT 5
#define VBH_OP_IDENT 8
#define VBH_FLAG_BARRIER 0x80000000 /* OR'ed into type */
uint32_t type;
uint32_t ioprio;
uint64_t sector;
} __attribute__((packed));
/*
* Debug printf
*/
static int virtio_blk_debug;
#define DPRINTF(params) do { if (virtio_blk_debug) printf params; } while (0)
#define WPRINTF(params) (printf params)
struct virtio_blk_ioreq {
struct blockif_req req;
struct virtio_blk *blk;
uint8_t *status;
uint16_t idx;
};
/*
* Per-device struct
*/
struct virtio_blk {
struct virtio_base base;
pthread_mutex_t mtx;
struct virtio_vq_info vq;
struct virtio_blk_config cfg;
struct blockif_ctxt *bc;
char ident[VIRTIO_BLK_BLK_ID_BYTES + 1];
struct virtio_blk_ioreq ios[VIRTIO_BLK_RINGSZ];
};
static void virtio_blk_reset(void *);
static void virtio_blk_notify(void *, struct virtio_vq_info *);
static int virtio_blk_cfgread(void *, int, int, uint32_t *);
static int virtio_blk_cfgwrite(void *, int, int, uint32_t);
static struct virtio_ops virtio_blk_ops = {
"virtio_blk", /* our name */
1, /* we support 1 virtqueue */
sizeof(struct virtio_blk_config), /* config reg size */
virtio_blk_reset, /* reset */
virtio_blk_notify, /* device-wide qnotify */
virtio_blk_cfgread, /* read PCI config */
virtio_blk_cfgwrite, /* write PCI config */
NULL, /* apply negotiated features */
NULL, /* called on guest set status */
VIRTIO_BLK_S_HOSTCAPS, /* our capabilities */
};
static void
virtio_blk_reset(void *vdev)
{
struct virtio_blk *blk = vdev;
DPRINTF(("virtio_blk: device reset requested !\n"));
virtio_reset_dev(&blk->base);
}
static void
virtio_blk_done(struct blockif_req *br, int err)
{
struct virtio_blk_ioreq *io = br->param;
struct virtio_blk *blk = io->blk;
/* convert errno into a virtio block error return */
if (err == EOPNOTSUPP || err == ENOSYS)
*io->status = VIRTIO_BLK_S_UNSUPP;
else if (err != 0)
*io->status = VIRTIO_BLK_S_IOERR;
else
*io->status = VIRTIO_BLK_S_OK;
/*
* Return the descriptor back to the host.
* We wrote 1 byte (our status) to host.
*/
pthread_mutex_lock(&blk->mtx);
vq_relchain(&blk->vq, io->idx, 1);
vq_endchains(&blk->vq, 0);
pthread_mutex_unlock(&blk->mtx);
}
static void
virtio_blk_proc(struct virtio_blk *blk, struct virtio_vq_info *vq)
{
struct virtio_blk_hdr *vbh;
struct virtio_blk_ioreq *io;
int i, n;
int err;
ssize_t iolen;
int writeop, type;
struct iovec iov[BLOCKIF_IOV_MAX + 2];
uint16_t idx, flags[BLOCKIF_IOV_MAX + 2];
n = vq_getchain(vq, &idx, iov, BLOCKIF_IOV_MAX + 2, flags);
/*
* The first descriptor will be the read-only fixed header,
* and the last is for status (hence +2 above and below).
* The remaining iov's are the actual data I/O vectors.
*
* XXX - note - this fails on crash dump, which does a
* VIRTIO_BLK_T_FLUSH with a zero transfer length
*/
assert(n >= 2 && n <= BLOCKIF_IOV_MAX + 2);
io = &blk->ios[idx];
assert((flags[0] & VRING_DESC_F_WRITE) == 0);
assert(iov[0].iov_len == sizeof(struct virtio_blk_hdr));
vbh = iov[0].iov_base;
memcpy(&io->req.iov, &iov[1], sizeof(struct iovec) * (n - 2));
io->req.iovcnt = n - 2;
io->req.offset = vbh->sector * DEV_BSIZE;
io->status = iov[--n].iov_base;
assert(iov[n].iov_len == 1);
assert(flags[n] & VRING_DESC_F_WRITE);
/*
* XXX
* The guest should not be setting the BARRIER flag because
* we don't advertise the capability.
*/
type = vbh->type & ~VBH_FLAG_BARRIER;
writeop = (type == VBH_OP_WRITE);
iolen = 0;
for (i = 1; i < n; i++) {
/*
* - write op implies read-only descriptor,
* - read/ident op implies write-only descriptor,
* therefore test the inverse of the descriptor bit
* to the op.
*/
assert(((flags[i] & VRING_DESC_F_WRITE) == 0) == writeop);
iolen += iov[i].iov_len;
}
io->req.resid = iolen;
DPRINTF(("virtio-block: %s op, %zd bytes, %d segs, offset %ld\n\r",
writeop ? "write" : "read/ident", iolen, i - 1,
io->req.offset));
switch (type) {
case VBH_OP_READ:
err = blockif_read(blk->bc, &io->req);
break;
case VBH_OP_WRITE:
err = blockif_write(blk->bc, &io->req);
break;
case VBH_OP_FLUSH:
case VBH_OP_FLUSH_OUT:
err = blockif_flush(blk->bc, &io->req);
break;
case VBH_OP_IDENT:
/* Assume a single buffer */
/* S/n equal to buffer is not zero-terminated. */
memset(iov[1].iov_base, 0, iov[1].iov_len);
strncpy(iov[1].iov_base, blk->ident,
MIN(iov[1].iov_len, sizeof(blk->ident)));
virtio_blk_done(&io->req, 0);
return;
default:
virtio_blk_done(&io->req, EOPNOTSUPP);
return;
}
assert(err == 0);
}
static void
virtio_blk_notify(void *vdev, struct virtio_vq_info *vq)
{
struct virtio_blk *blk = vdev;
while (vq_has_descs(vq))
virtio_blk_proc(blk, vq);
}
static int
virtio_blk_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
char bident[16];
struct blockif_ctxt *bctxt;
MD5_CTX mdctx;
u_char digest[16];
struct virtio_blk *blk;
off_t size;
int i, sectsz, sts, sto;
pthread_mutexattr_t attr;
int rc;
if (opts == NULL) {
printf("virtio-block: backing device required\n");
return -1;
}
/*
* The supplied backing file has to exist
*/
snprintf(bident, sizeof(bident), "%d:%d", dev->slot, dev->func);
bctxt = blockif_open(opts, bident);
if (bctxt == NULL) {
perror("Could not open backing file");
return -1;
}
size = blockif_size(bctxt);
sectsz = blockif_sectsz(bctxt);
blockif_psectsz(bctxt, &sts, &sto);
blk = calloc(1, sizeof(struct virtio_blk));
if (!blk) {
WPRINTF(("virtio_blk: calloc returns NULL\n"));
return -1;
}
blk->bc = bctxt;
for (i = 0; i < VIRTIO_BLK_RINGSZ; i++) {
struct virtio_blk_ioreq *io = &blk->ios[i];
io->req.callback = virtio_blk_done;
io->req.param = io;
io->blk = blk;
io->idx = i;
}
/* init mutex attribute properly to avoid deadlock */
rc = pthread_mutexattr_init(&attr);
if (rc)
DPRINTF(("mutexattr init failed with erro %d!\n", rc));
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
if (rc)
DPRINTF(("virtio_blk: mutexattr_settype failed with "
"error %d!\n", rc));
rc = pthread_mutex_init(&blk->mtx, &attr);
if (rc)
DPRINTF(("virtio_blk: pthread_mutex_init failed with "
"error %d!\n", rc));
/* init virtio struct and virtqueues */
virtio_linkup(&blk->base, &virtio_blk_ops, blk, dev, &blk->vq);
blk->base.mtx = &blk->mtx;
blk->vq.qsize = VIRTIO_BLK_RINGSZ;
/* blk->vq.vq_notify = we have no per-queue notify */
/*
* Create an identifier for the backing file. Use parts of the
* md5 sum of the filename
*/
MD5_Init(&mdctx);
MD5_Update(&mdctx, opts, strlen(opts));
MD5_Final(digest, &mdctx);
sprintf(blk->ident, "ACRN--%02X%02X-%02X%02X-%02X%02X",
digest[0], digest[1], digest[2], digest[3], digest[4], digest[5]);
/* setup virtio block config space */
blk->cfg.capacity = size / DEV_BSIZE; /* 512-byte units */
blk->cfg.size_max = 0; /* not negotiated */
blk->cfg.seg_max = BLOCKIF_IOV_MAX;
blk->cfg.geometry.cylinders = 0; /* no geometry */
blk->cfg.geometry.heads = 0;
blk->cfg.geometry.sectors = 0;
blk->cfg.blk_size = sectsz;
blk->cfg.topology.physical_block_exp =
(sts > sectsz) ? (ffsll(sts / sectsz) - 1) : 0;
blk->cfg.topology.alignment_offset =
(sto != 0) ? ((sts - sto) / sectsz) : 0;
blk->cfg.topology.min_io_size = 0;
blk->cfg.topology.opt_io_size = 0;
blk->cfg.writeback = 0;
/*
* Should we move some of this into virtio.c? Could
* have the device, class, and subdev_0 as fields in
* the virtio constants structure.
*/
pci_set_cfgdata16(dev, PCIR_DEVICE, VIRTIO_DEV_BLOCK);
pci_set_cfgdata16(dev, PCIR_VENDOR, VIRTIO_VENDOR);
pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_STORAGE);
pci_set_cfgdata16(dev, PCIR_SUBDEV_0, VIRTIO_TYPE_BLOCK);
pci_set_cfgdata16(dev, PCIR_SUBVEND_0, VIRTIO_VENDOR);
if (virtio_interrupt_init(&blk->base, virtio_uses_msix())) {
blockif_close(blk->bc);
free(blk);
return -1;
}
virtio_set_io_bar(&blk->base, 0);
return 0;
}
static void
virtio_blk_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct blockif_ctxt *bctxt;
struct virtio_blk *blk;
if (dev->arg) {
DPRINTF(("virtio_blk: deinit\n"));
blk = (struct virtio_blk *) dev->arg;
bctxt = blk->bc;
blockif_close(bctxt);
free(blk);
}
}
static int
virtio_blk_cfgwrite(void *vdev, int offset, int size, uint32_t value)
{
DPRINTF(("virtio_blk: write to readonly reg %d\n\r", offset));
return -1;
}
static int
virtio_blk_cfgread(void *vdev, int offset, int size, uint32_t *retval)
{
struct virtio_blk *blk = vdev;
void *ptr;
/* our caller has already verified offset and size */
ptr = (uint8_t *)&blk->cfg + offset;
memcpy(retval, ptr, size);
return 0;
}
struct pci_vdev_ops pci_ops_virtio_blk = {
.class_name = "virtio-blk",
.vdev_init = virtio_blk_init,
.vdev_deinit = virtio_blk_deinit,
.vdev_barwrite = virtio_pci_write,
.vdev_barread = virtio_pci_read
};
DEFINE_PCI_DEVTYPE(pci_ops_virtio_blk);

View File

@@ -0,0 +1,947 @@
/*-
* Copyright (c) 2016 iXsystems Inc.
* All rights reserved.
*
* This software was developed by Jakub Klama <jceel@FreeBSD.org>
* under sponsorship from iXsystems Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer
* in this position and unchanged.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/uio.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <pthread.h>
#include <libgen.h>
#include <sysexits.h>
#include <termios.h>
#include "dm.h"
#include "pci_core.h"
#include "virtio.h"
#include "mevent.h"
#define VIRTIO_CONSOLE_RINGSZ 64
#define VIRTIO_CONSOLE_MAXPORTS 16
#define VIRTIO_CONSOLE_MAXQ (VIRTIO_CONSOLE_MAXPORTS * 2 + 2)
#define VIRTIO_CONSOLE_DEVICE_READY 0
#define VIRTIO_CONSOLE_DEVICE_ADD 1
#define VIRTIO_CONSOLE_DEVICE_REMOVE 2
#define VIRTIO_CONSOLE_PORT_READY 3
#define VIRTIO_CONSOLE_CONSOLE_PORT 4
#define VIRTIO_CONSOLE_CONSOLE_RESIZE 5
#define VIRTIO_CONSOLE_PORT_OPEN 6
#define VIRTIO_CONSOLE_PORT_NAME 7
#define VIRTIO_CONSOLE_F_SIZE 0
#define VIRTIO_CONSOLE_F_MULTIPORT 1
#define VIRTIO_CONSOLE_F_EMERG_WRITE 2
#define VIRTIO_CONSOLE_S_HOSTCAPS \
(VIRTIO_CONSOLE_F_SIZE | \
VIRTIO_CONSOLE_F_MULTIPORT | \
VIRTIO_CONSOLE_F_EMERG_WRITE)
static int virtio_console_debug;
#define DPRINTF(params) do { \
if (virtio_console_debug) \
printf params; \
} while (0)
#define WPRINTF(params) (printf params)
struct virtio_console;
struct virtio_console_port;
struct virtio_console_config;
typedef void (virtio_console_cb_t)(struct virtio_console_port *, void *,
struct iovec *, int);
enum virtio_console_be_type {
VIRTIO_CONSOLE_BE_STDIO = 0,
VIRTIO_CONSOLE_BE_TTY,
VIRTIO_CONSOLE_BE_PTY,
VIRTIO_CONSOLE_BE_FILE,
VIRTIO_CONSOLE_BE_MAX,
VIRTIO_CONSOLE_BE_INVALID = VIRTIO_CONSOLE_BE_MAX
};
struct virtio_console_port {
struct virtio_console *console;
int id;
const char *name;
bool enabled;
bool is_console;
bool rx_ready;
bool open;
int rxq;
int txq;
void *arg;
virtio_console_cb_t *cb;
};
struct virtio_console_backend {
struct virtio_console_port *port;
struct mevent *evp;
int fd;
bool open;
enum virtio_console_be_type be_type;
int pts_fd; /* only valid for PTY */
};
struct virtio_console {
struct virtio_base base;
struct virtio_vq_info queues[VIRTIO_CONSOLE_MAXQ];
pthread_mutex_t mtx;
uint64_t cfg;
uint64_t features;
int nports;
bool ready;
struct virtio_console_port control_port;
struct virtio_console_port ports[VIRTIO_CONSOLE_MAXPORTS];
struct virtio_console_config *config;
};
struct virtio_console_config {
uint16_t cols;
uint16_t rows;
uint32_t max_nr_ports;
uint32_t emerg_wr;
} __attribute__((packed));
struct virtio_console_control {
uint32_t id;
uint16_t event;
uint16_t value;
} __attribute__((packed));
struct virtio_console_console_resize {
uint16_t cols;
uint16_t rows;
} __attribute__((packed));
static void virtio_console_reset(void *);
static void virtio_console_notify_rx(void *, struct virtio_vq_info *);
static void virtio_console_notify_tx(void *, struct virtio_vq_info *);
static int virtio_console_cfgread(void *, int, int, uint32_t *);
static int virtio_console_cfgwrite(void *, int, int, uint32_t);
static void virtio_console_neg_features(void *, uint64_t);
static void virtio_console_control_send(struct virtio_console *,
struct virtio_console_control *, const void *, size_t);
static void virtio_console_announce_port(struct virtio_console_port *);
static void virtio_console_open_port(struct virtio_console_port *, bool);
static struct virtio_ops virtio_console_ops = {
"vtcon", /* our name */
VIRTIO_CONSOLE_MAXQ, /* we support VTCON_MAXQ virtqueues */
sizeof(struct virtio_console_config), /* config reg size */
virtio_console_reset, /* reset */
NULL, /* device-wide qnotify */
virtio_console_cfgread, /* read virtio config */
virtio_console_cfgwrite, /* write virtio config */
virtio_console_neg_features, /* apply negotiated features */
NULL, /* called on guest set status */
VIRTIO_CONSOLE_S_HOSTCAPS, /* our capabilities */
};
static const char *virtio_console_be_table[VIRTIO_CONSOLE_BE_MAX] = {
[VIRTIO_CONSOLE_BE_STDIO] = "stdio",
[VIRTIO_CONSOLE_BE_TTY] = "tty",
[VIRTIO_CONSOLE_BE_PTY] = "pty",
[VIRTIO_CONSOLE_BE_FILE] = "file"
};
static struct termios virtio_console_saved_tio;
static int virtio_console_saved_flags;
static void
virtio_console_reset(void *vdev)
{
struct virtio_console *console;
console = vdev;
DPRINTF(("vtcon: device reset requested!\n"));
virtio_reset_dev(&console->base);
}
static void
virtio_console_neg_features(void *vdev, uint64_t negotiated_features)
{
struct virtio_console *console = vdev;
console->features = negotiated_features;
}
static int
virtio_console_cfgread(void *vdev, int offset, int size, uint32_t *retval)
{
struct virtio_console *console = vdev;
void *ptr;
ptr = (uint8_t *)console->config + offset;
memcpy(retval, ptr, size);
return 0;
}
static int
virtio_console_cfgwrite(void *vdev, int offset, int size, uint32_t val)
{
return 0;
}
static inline struct virtio_console_port *
virtio_console_vq_to_port(struct virtio_console *console,
struct virtio_vq_info *vq)
{
uint16_t num = vq->num;
if (num == 0 || num == 1)
return &console->ports[0];
if (num == 2 || num == 3)
return &console->control_port;
return &console->ports[(num / 2) - 1];
}
static inline struct virtio_vq_info *
virtio_console_port_to_vq(struct virtio_console_port *port, bool tx_queue)
{
int qnum;
qnum = tx_queue ? port->txq : port->rxq;
return &port->console->queues[qnum];
}
static struct virtio_console_port *
virtio_console_add_port(struct virtio_console *console, const char *name,
virtio_console_cb_t *cb, void *arg, bool is_console)
{
struct virtio_console_port *port;
if (console->nports == VIRTIO_CONSOLE_MAXPORTS) {
errno = EBUSY;
return NULL;
}
port = &console->ports[console->nports++];
port->id = console->nports - 1;
port->console = console;
port->name = name;
port->cb = cb;
port->arg = arg;
port->is_console = is_console;
if (port->id == 0) {
/* port0 */
port->txq = 0;
port->rxq = 1;
} else {
port->txq = console->nports * 2;
port->rxq = port->txq + 1;
}
port->enabled = true;
return port;
}
static void
virtio_console_control_tx(struct virtio_console_port *port, void *arg,
struct iovec *iov, int niov)
{
struct virtio_console *console;
struct virtio_console_port *tmp;
struct virtio_console_control resp, *ctrl;
int i;
assert(niov == 1);
console = port->console;
ctrl = (struct virtio_console_control *)iov->iov_base;
switch (ctrl->event) {
case VIRTIO_CONSOLE_DEVICE_READY:
console->ready = true;
/* set port ready events for registered ports */
for (i = 0; i < VIRTIO_CONSOLE_MAXPORTS; i++) {
tmp = &console->ports[i];
if (tmp->enabled)
virtio_console_announce_port(tmp);
if (tmp->open)
virtio_console_open_port(tmp, true);
}
break;
case VIRTIO_CONSOLE_PORT_READY:
if (ctrl->id >= console->nports) {
WPRINTF(("VTCONSOLE_PORT_READY for unknown port %d\n",
ctrl->id));
return;
}
tmp = &console->ports[ctrl->id];
if (tmp->is_console) {
resp.event = VIRTIO_CONSOLE_CONSOLE_PORT;
resp.id = ctrl->id;
resp.value = 1;
virtio_console_control_send(console, &resp, NULL, 0);
}
break;
}
}
static void
virtio_console_announce_port(struct virtio_console_port *port)
{
struct virtio_console_control event;
event.id = port->id;
event.event = VIRTIO_CONSOLE_DEVICE_ADD;
event.value = 1;
virtio_console_control_send(port->console, &event, NULL, 0);
event.event = VIRTIO_CONSOLE_PORT_NAME;
virtio_console_control_send(port->console, &event, port->name,
strlen(port->name));
}
static void
virtio_console_open_port(struct virtio_console_port *port, bool open)
{
struct virtio_console_control event;
if (!port->console->ready) {
port->open = true;
return;
}
event.id = port->id;
event.event = VIRTIO_CONSOLE_PORT_OPEN;
event.value = (int)open;
virtio_console_control_send(port->console, &event, NULL, 0);
}
static void
virtio_console_control_send(struct virtio_console *console,
struct virtio_console_control *ctrl,
const void *payload, size_t len)
{
struct virtio_vq_info *vq;
struct iovec iov;
uint16_t idx;
int n;
vq = virtio_console_port_to_vq(&console->control_port, true);
if (!vq_has_descs(vq))
return;
n = vq_getchain(vq, &idx, &iov, 1, NULL);
assert(n == 1);
memcpy(iov.iov_base, ctrl, sizeof(struct virtio_console_control));
if (payload != NULL && len > 0)
memcpy(iov.iov_base + sizeof(struct virtio_console_control),
payload, len);
vq_relchain(vq, idx, sizeof(struct virtio_console_control) + len);
vq_endchains(vq, 1);
}
static void
virtio_console_notify_tx(void *vdev, struct virtio_vq_info *vq)
{
struct virtio_console *console;
struct virtio_console_port *port;
struct iovec iov[1];
uint16_t idx;
uint16_t flags[8];
console = vdev;
port = virtio_console_vq_to_port(console, vq);
while (vq_has_descs(vq)) {
vq_getchain(vq, &idx, iov, 1, flags);
if (port != NULL)
port->cb(port, port->arg, iov, 1);
/*
* Release this chain and handle more
*/
vq_relchain(vq, idx, 0);
}
vq_endchains(vq, 1); /* Generate interrupt if appropriate. */
}
static void
virtio_console_notify_rx(void *vdev, struct virtio_vq_info *vq)
{
struct virtio_console *console;
struct virtio_console_port *port;
console = vdev;
port = virtio_console_vq_to_port(console, vq);
if (!port->rx_ready) {
port->rx_ready = 1;
vq->used->flags |= VRING_USED_F_NO_NOTIFY;
}
}
static void
virtio_console_reset_backend(struct virtio_console_backend *be)
{
if (!be)
return;
if (be->fd != STDIN_FILENO)
mevent_delete_close(be->evp);
else
mevent_delete(be->evp);
if (be->be_type == VIRTIO_CONSOLE_BE_PTY && be->pts_fd > 0) {
close(be->pts_fd);
be->pts_fd = -1;
}
be->evp = NULL;
be->fd = -1;
be->open = false;
}
static void
virtio_console_backend_read(int fd __attribute__((unused)),
enum ev_type t __attribute__((unused)),
void *arg)
{
struct virtio_console_port *port;
struct virtio_console_backend *be = arg;
struct virtio_vq_info *vq;
struct iovec iov;
static char dummybuf[2048];
int len, n;
uint16_t idx;
port = be->port;
vq = virtio_console_port_to_vq(port, true);
if (!be->open || !port->rx_ready) {
len = read(be->fd, dummybuf, sizeof(dummybuf));
if (len == 0)
goto close;
return;
}
if (!vq_has_descs(vq)) {
len = read(be->fd, dummybuf, sizeof(dummybuf));
vq_endchains(vq, 1);
if (len == 0)
goto close;
return;
}
do {
n = vq_getchain(vq, &idx, &iov, 1, NULL);
len = readv(be->fd, &iov, n);
if (len <= 0) {
vq_retchain(vq);
vq_endchains(vq, 0);
/* no data available */
if (len == -1 && errno == EAGAIN)
return;
/* any other errors */
goto close;
}
vq_relchain(vq, idx, len);
} while (vq_has_descs(vq));
vq_endchains(vq, 1);
close:
virtio_console_reset_backend(be);
WPRINTF(("vtcon: be read failed and close! len = %d, errno = %d\n",
len, errno));
}
static void
virtio_console_backend_write(struct virtio_console_port *port, void *arg,
struct iovec *iov, int niov)
{
struct virtio_console_backend *be;
int ret;
be = arg;
if (be->fd == -1)
return;
ret = writev(be->fd, iov, niov);
if (ret <= 0) {
/* backend cannot receive more data. For example when pts is
* not connected to any client, its tty buffer will become full.
* In this case we just drop data from guest hvc console.
*/
if (ret == -1 && errno == EAGAIN)
return;
virtio_console_reset_backend(be);
WPRINTF(("vtcon: be write failed! errno = %d\n", errno));
}
}
static void
virtio_console_restore_stdio(void)
{
tcsetattr(STDIN_FILENO, TCSANOW, &virtio_console_saved_tio);
fcntl(STDIN_FILENO, F_SETFL, virtio_console_saved_flags);
stdio_in_use = false;
}
static bool
virtio_console_backend_can_read(enum virtio_console_be_type be_type)
{
return (be_type == VIRTIO_CONSOLE_BE_FILE) ? false : true;
}
static int
virtio_console_open_backend(const char *path,
enum virtio_console_be_type be_type)
{
int fd = -1;
switch (be_type) {
case VIRTIO_CONSOLE_BE_PTY:
fd = posix_openpt(O_RDWR | O_NOCTTY);
if (fd == -1)
WPRINTF(("vtcon: posix_openpt failed, errno = %d\n",
errno));
else if (grantpt(fd) == -1 || unlockpt(fd) == -1) {
WPRINTF(("vtcon: grant/unlock failed, errno = %d\n",
errno));
close(fd);
fd = -1;
}
break;
case VIRTIO_CONSOLE_BE_STDIO:
if (stdio_in_use) {
WPRINTF(("vtcon: stdio is used by other device\n"));
break;
}
fd = STDIN_FILENO;
stdio_in_use = true;
break;
case VIRTIO_CONSOLE_BE_TTY:
fd = open(path, O_RDWR | O_NONBLOCK);
if (fd < 0)
WPRINTF(("vtcon: open failed: %s\n", path));
else if (!isatty(fd)) {
WPRINTF(("vtcon: not a tty: %s\n", path));
close(fd);
fd = -1;
}
break;
case VIRTIO_CONSOLE_BE_FILE:
fd = open(path, O_WRONLY|O_CREAT|O_APPEND|O_NONBLOCK, 0666);
if (fd < 0)
WPRINTF(("vtcon: open failed: %s\n", path));
break;
default:
WPRINTF(("not supported backend %d!\n", be_type));
}
return fd;
}
static int
virtio_console_config_backend(struct virtio_console_backend *be)
{
int fd, flags;
char *pts_name = NULL;
int slave_fd = -1;
struct termios tio, saved_tio;
if (!be || be->fd == -1)
return -1;
fd = be->fd;
switch (be->be_type) {
case VIRTIO_CONSOLE_BE_PTY:
pts_name = ptsname(fd);
if (pts_name == NULL) {
WPRINTF(("vtcon: ptsname return NULL, errno = %d\n",
errno));
return -1;
}
slave_fd = open(pts_name, O_RDWR);
if (slave_fd == -1) {
WPRINTF(("vtcon: slave_fd open failed, errno = %d\n",
errno));
return -1;
}
tcgetattr(slave_fd, &tio);
cfmakeraw(&tio);
tcsetattr(slave_fd, TCSAFLUSH, &tio);
be->pts_fd = slave_fd;
WPRINTF(("***********************************************\n"));
WPRINTF(("virt-console backend redirected to %s\n", pts_name));
WPRINTF(("***********************************************\n"));
flags = fcntl(fd, F_GETFL);
fcntl(fd, F_SETFL, flags | O_NONBLOCK);
break;
case VIRTIO_CONSOLE_BE_TTY:
case VIRTIO_CONSOLE_BE_STDIO:
tcgetattr(fd, &tio);
saved_tio = tio;
cfmakeraw(&tio);
tio.c_cflag |= CLOCAL;
tcsetattr(fd, TCSANOW, &tio);
if (be->be_type == VIRTIO_CONSOLE_BE_STDIO) {
flags = fcntl(fd, F_GETFL);
fcntl(fd, F_SETFL, flags | O_NONBLOCK);
virtio_console_saved_flags = flags;
virtio_console_saved_tio = saved_tio;
atexit(virtio_console_restore_stdio);
}
break;
default:
break; /* nothing to do */
}
return 0;
}
static int
virtio_console_add_backend(struct virtio_console *console,
const char *name, const char *path,
enum virtio_console_be_type be_type,
bool is_console)
{
struct virtio_console_backend *be;
int error = 0, fd = -1;
be = calloc(1, sizeof(struct virtio_console_backend));
if (be == NULL) {
error = -1;
goto out;
}
fd = virtio_console_open_backend(path, be_type);
if (fd < 0) {
error = -1;
goto out;
}
be->fd = fd;
be->be_type = be_type;
if (virtio_console_config_backend(be) < 0) {
WPRINTF(("vtcon: virtio_console_config_backend failed\n"));
error = -1;
goto out;
}
be->port = virtio_console_add_port(console, name,
virtio_console_backend_write, be, is_console);
if (be->port == NULL) {
WPRINTF(("vtcon: virtio_console_add_port failed\n"));
error = -1;
goto out;
}
if (virtio_console_backend_can_read(be_type)) {
if (isatty(fd)) {
be->evp = mevent_add(fd, EVF_READ,
virtio_console_backend_read, be);
if (be->evp == NULL) {
WPRINTF(("vtcon: mevent_add failed\n"));
error = -1;
goto out;
}
}
}
virtio_console_open_port(be->port, true);
be->open = true;
out:
if (error != 0) {
if (be) {
if (be->evp)
mevent_delete(be->evp);
if (be->port) {
be->port->enabled = false;
be->port->arg = NULL;
}
if (be->be_type == VIRTIO_CONSOLE_BE_PTY &&
be->pts_fd > 0)
close(be->pts_fd);
free(be);
}
if (fd != -1 && fd != STDIN_FILENO)
close(fd);
}
return error;
}
static void
virtio_console_close_backend(struct virtio_console_backend *be)
{
if (!be)
return;
switch (be->be_type) {
case VIRTIO_CONSOLE_BE_PTY:
if (be->pts_fd > 0) {
close(be->pts_fd);
be->pts_fd = -1;
}
break;
case VIRTIO_CONSOLE_BE_STDIO:
virtio_console_restore_stdio();
break;
default:
break;
}
be->fd = -1;
be->open = false;
memset(be->port, 0, sizeof(*be->port));
}
static void
virtio_console_close_all(struct virtio_console *console)
{
int i;
struct virtio_console_port *port;
struct virtio_console_backend *be;
for (i = 0; i < console->nports; i++) {
port = &console->ports[i];
if (!port->enabled)
continue;
be = (struct virtio_console_backend *)port->arg;
if (be) {
if (be->evp) {
if (be->fd != STDIN_FILENO)
mevent_delete_close(be->evp);
else
mevent_delete(be->evp);
}
virtio_console_close_backend(be);
free(be);
}
}
}
static enum virtio_console_be_type
virtio_console_get_be_type(const char *backend)
{
int i;
for (i = 0; i < VIRTIO_CONSOLE_BE_MAX; i++)
if (strcasecmp(backend, virtio_console_be_table[i]) == 0)
return i;
return VIRTIO_CONSOLE_BE_INVALID;
}
static int
virtio_console_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_console *console;
char *backend = NULL;
char *portname = NULL;
char *portpath = NULL;
char *opt;
int i;
pthread_mutexattr_t attr;
enum virtio_console_be_type be_type;
bool is_console = false;
int rc;
if (!opts) {
WPRINTF(("vtcon: invalid opts\n"));
return -1;
}
console = calloc(1, sizeof(struct virtio_console));
if (!console) {
WPRINTF(("vtcon: calloc returns NULL\n"));
return -1;
}
console->config = calloc(1, sizeof(struct virtio_console_config));
if (!console->config) {
WPRINTF(("vtcon->config: calloc returns NULL\n"));
free(console);
return -1;
}
console->config->max_nr_ports = VIRTIO_CONSOLE_MAXPORTS;
console->config->cols = 80;
console->config->rows = 25;
/* init mutex attribute properly to avoid deadlock */
rc = pthread_mutexattr_init(&attr);
if (rc)
DPRINTF(("mutexattr init failed with erro %d!\n", rc));
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
if (rc)
DPRINTF(("virtio_console: mutexattr_settype failed with "
"error %d!\n", rc));
rc = pthread_mutex_init(&console->mtx, &attr);
if (rc)
DPRINTF(("virtio_console: pthread_mutex_init failed with "
"error %d!\n", rc));
virtio_linkup(&console->base, &virtio_console_ops, console, dev,
console->queues);
console->base.mtx = &console->mtx;
for (i = 0; i < VIRTIO_CONSOLE_MAXQ; i++) {
console->queues[i].qsize = VIRTIO_CONSOLE_RINGSZ;
console->queues[i].notify = i % 2 == 0
? virtio_console_notify_rx
: virtio_console_notify_tx;
}
/* initialize config space */
pci_set_cfgdata16(dev, PCIR_DEVICE, VIRTIO_DEV_CONSOLE);
pci_set_cfgdata16(dev, PCIR_VENDOR, VIRTIO_VENDOR);
pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_SIMPLECOMM);
pci_set_cfgdata16(dev, PCIR_SUBDEV_0, VIRTIO_TYPE_CONSOLE);
pci_set_cfgdata16(dev, PCIR_SUBVEND_0, VIRTIO_VENDOR);
if (virtio_interrupt_init(&console->base, virtio_uses_msix())) {
if (console) {
if (console->config)
free(console->config);
free(console);
}
return -1;
}
virtio_set_io_bar(&console->base, 0);
/* create control port */
console->control_port.console = console;
console->control_port.txq = 2;
console->control_port.rxq = 3;
console->control_port.cb = virtio_console_control_tx;
console->control_port.enabled = true;
/* virtio-console,[@]stdio|tty|pty|file:portname[=portpath]
* [,[@]stdio|tty|pty|file:portname[=portpath]]
*/
while ((opt = strsep(&opts, ",")) != NULL) {
backend = strsep(&opt, ":");
if (backend == NULL) {
WPRINTF(("vtcon: no backend is specified!\n"));
return -1;
}
if (backend[0] == '@') {
is_console = true;
backend++;
} else
is_console = false;
be_type = virtio_console_get_be_type(backend);
if (be_type == VIRTIO_CONSOLE_BE_INVALID) {
WPRINTF(("vtcon: invalid backend %s!\n",
backend));
return -1;
}
if (opt != NULL) {
portname = strsep(&opt, "=");
portpath = opt;
if (portpath == NULL
&& be_type != VIRTIO_CONSOLE_BE_STDIO
&& be_type != VIRTIO_CONSOLE_BE_PTY) {
WPRINTF(("vtcon: portpath missing for %s\n",
portname));
return -1;
}
if (virtio_console_add_backend(console, portname,
portpath, be_type, is_console) < 0) {
WPRINTF(("vtcon: add port failed %s\n",
portname));
return -1;
}
}
}
return 0;
}
static void
virtio_console_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_console *console;
console = (struct virtio_console *)dev->arg;
if (console) {
virtio_console_close_all(console);
if (console->config)
free(console->config);
free(console);
}
}
struct pci_vdev_ops pci_ops_virtio_console = {
.class_name = "virtio-console",
.vdev_init = virtio_console_init,
.vdev_deinit = virtio_console_deinit,
.vdev_barwrite = virtio_pci_write,
.vdev_barread = virtio_pci_read
};
DEFINE_PCI_DEVTYPE(pci_ops_virtio_console);

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,385 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* virtio hyper dmabuf
* Allows to share data buffers between VMs using dmabuf like interface
*/
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <pthread.h>
#include "dm.h"
#include "pci_core.h"
#include "virtio.h"
#include "virtio_kernel.h"
#include "vmmapi.h"
/*
* Size of queue was chosen experimentaly in a way
* that it allows to run ~20 shared surfaces without
* any delays on hyper dmabuf dirver side due to lack
* of free buffers in queue
*/
#define HYPER_DMABUF_RINGSZ 128
/* Hyper dmabuf uses two queues one for Rx and one for Tx */
#define HYPER_DMABUF_VQ_NUM 2
const char *hyper_dmabuf_vbs_dev_path = "/dev/vbs_hyper_dmabuf";
static int virtio_hyper_dmabuf_debug;
#define DPRINTF(...)\
do {\
if (virtio_hyper_dmabuf_debug)\
printf(__VA_ARGS__);\
} while (0)
#define WPRINTF(...) printf(__VA_ARGS__)
static enum VBS_K_STATUS kstatus = VIRTIO_DEV_INITIAL;
static int vbs_k_hyper_dmabuf_fd = -1;
static struct vbs_dev_info kdev;
static struct vbs_vqs_info kvqs;
struct virtio_hyper_dmabuf {
struct virtio_base base;
struct virtio_vq_info vq[HYPER_DMABUF_VQ_NUM];
pthread_mutex_t mtx;
};
static int virtio_hyper_dmabuf_k_init(void);
static int virtio_hyper_dmabuf_k_start(void);
static int virtio_hyper_dmabuf_k_stop(void);
static int virtio_hyper_dmabuf_k_reset(void);
static int virtio_hyper_dmabuf_k_dev_set(const char *name, int vmid,
int nvq, uint32_t feature,
uint64_t pio_start, uint64_t pio_len);
static int virtio_hyper_dmabuf_k_vq_set(unsigned int nvq, unsigned int idx,
uint16_t qsize,
uint32_t pfn, uint16_t msix_idx,
uint64_t msix_addr, uint32_t msix_data);
static void virtio_hyper_dmabuf_no_notify(void *, struct virtio_vq_info *);
static void virtio_hyper_dmabuf_set_status(void *, uint64_t);
static void virtio_hyper_dmabuf_reset(void *);
static struct virtio_ops virtio_hyper_dmabuf_ops_k = {
"virtio_hyper_dmabuf", /* our name */
HYPER_DMABUF_VQ_NUM, /* we support 2 virtqueue */
0, /* config reg size */
virtio_hyper_dmabuf_reset, /* reset */
virtio_hyper_dmabuf_no_notify, /* device-wide qnotify */
NULL, /* read virtio config */
NULL, /* write virtio config */
NULL, /* apply negotiated features */
virtio_hyper_dmabuf_set_status, /* called on guest set status */
0, /* our capabilities */
};
static int
virtio_hyper_dmabuf_k_init()
{
if (vbs_k_hyper_dmabuf_fd != -1) {
WPRINTF("virtio_hyper_dmabuf: Ooops! Re-entered!!\n");
return -VIRTIO_ERROR_REENTER;
}
vbs_k_hyper_dmabuf_fd = open(hyper_dmabuf_vbs_dev_path, O_RDWR);
if (vbs_k_hyper_dmabuf_fd < 0) {
WPRINTF("virtio_hyper_dmabuf: Failed to open %s!\n",
hyper_dmabuf_vbs_dev_path);
return -VIRTIO_ERROR_FD_OPEN_FAILED;
}
DPRINTF("virtio_hyper_dmabuf: Open %s success!\n",
hyper_dmabuf_vbs_dev_path);
memset(&kdev, 0, sizeof(kdev));
memset(&kvqs, 0, sizeof(kvqs));
return VIRTIO_SUCCESS;
}
static int
virtio_hyper_dmabuf_k_dev_set(const char *name, int vmid, int nvq,
uint32_t feature, uint64_t pio_start,
uint64_t pio_len)
{
/* init kdev */
strncpy(kdev.name, name, VBS_NAME_LEN);
kdev.vmid = vmid;
kdev.nvq = nvq;
kdev.negotiated_features = feature;
kdev.pio_range_start = pio_start;
kdev.pio_range_len = pio_len;
return VIRTIO_SUCCESS;
}
static int
virtio_hyper_dmabuf_k_vq_set(unsigned int nvq, unsigned int idx,
uint16_t qsize, uint32_t pfn,
uint16_t msix_idx, uint64_t msix_addr,
uint32_t msix_data)
{
if (nvq <= idx) {
WPRINTF("virtio_hyper_dmabuf: wrong idx for vq_set!\n");
return -VIRTIO_ERROR_GENERAL;
}
/* init kvqs */
kvqs.nvq = nvq;
kvqs.vqs[idx].qsize = qsize;
kvqs.vqs[idx].pfn = pfn;
kvqs.vqs[idx].msix_idx = msix_idx;
kvqs.vqs[idx].msix_addr = msix_addr;
kvqs.vqs[idx].msix_data = msix_data;
return VIRTIO_SUCCESS;
}
static int
virtio_hyper_dmabuf_k_start(void)
{
if (vbs_kernel_start(vbs_k_hyper_dmabuf_fd, &kdev, &kvqs) < 0) {
WPRINTF("virtio_hyper_dmabuf: Failed in vbs_kernel_start!\n");
return -VIRTIO_ERROR_START;
}
DPRINTF("virtio_hyper_dmabuf: vbs_kernel_started!\n");
return VIRTIO_SUCCESS;
}
static int
virtio_hyper_dmabuf_k_stop(void)
{
return vbs_kernel_stop(vbs_k_hyper_dmabuf_fd);
}
static int
virtio_hyper_dmabuf_k_reset(void)
{
memset(&kdev, 0, sizeof(kdev));
memset(&kvqs, 0, sizeof(kvqs));
return vbs_kernel_reset(vbs_k_hyper_dmabuf_fd);
}
static void
virtio_hyper_dmabuf_reset(void *base)
{
struct virtio_hyper_dmabuf *hyper_dmabuf;
hyper_dmabuf = (struct virtio_hyper_dmabuf *)base;
DPRINTF("virtio_hyper_dmabuf: device reset requested !\n");
virtio_reset_dev(&hyper_dmabuf->base);
if (kstatus == VIRTIO_DEV_STARTED) {
virtio_hyper_dmabuf_k_stop();
virtio_hyper_dmabuf_k_reset();
kstatus = VIRTIO_DEV_INITIAL;
}
}
static void
virtio_hyper_dmabuf_no_notify(void *base, struct virtio_vq_info *vq)
{
}
/*
* This callback gives us a chance to determine the timings
* to kickoff VBS-K initialization
*/
static void
virtio_hyper_dmabuf_set_status(void *base, uint64_t status)
{
struct virtio_hyper_dmabuf *hyper_dmabuf;
int nvq;
struct msix_table_entry *mte;
uint64_t msix_addr = 0;
uint32_t msix_data = 0;
int rc, i, j;
hyper_dmabuf = (struct virtio_hyper_dmabuf *) base;
nvq = hyper_dmabuf->base.vops->nvq;
if (kstatus == VIRTIO_DEV_INIT_SUCCESS &&
(status & VIRTIO_CR_STATUS_DRIVER_OK)) {
/* time to kickoff VBS-K side */
/* init vdev first */
rc = virtio_hyper_dmabuf_k_dev_set(
hyper_dmabuf->base.vops->name,
hyper_dmabuf->base.dev->vmctx->vmid,
nvq,
hyper_dmabuf->base.negotiated_caps,
/* currently we let VBS-K handle
* kick register
*/
hyper_dmabuf->base.dev->bar[0].addr + 16,
2);
for (i = 0; i < nvq; i++) {
if (hyper_dmabuf->vq[i].msix_idx !=
VIRTIO_MSI_NO_VECTOR) {
j = hyper_dmabuf->vq[i].msix_idx;
mte = &hyper_dmabuf->base.dev->msix.table[j];
msix_addr = mte->addr;
msix_data = mte->msg_data;
}
rc = virtio_hyper_dmabuf_k_vq_set(
nvq, i,
hyper_dmabuf->vq[i].qsize,
hyper_dmabuf->vq[i].pfn,
hyper_dmabuf->vq[i].msix_idx,
msix_addr,
msix_data);
if (rc < 0) {
WPRINTF("virtio_hyper_dmabuf:");
WPRINTF("kernel_set_vq");
WPRINTF("failed, i %d ret %d\n", i, rc);
return;
}
}
rc = virtio_hyper_dmabuf_k_start();
if (rc < 0) {
WPRINTF("virtio_hyper_dmabuf:");
WPRINTF("kernel_start() failed\n");
kstatus = VIRTIO_DEV_START_FAILED;
} else {
kstatus = VIRTIO_DEV_STARTED;
}
}
}
static int
virtio_hyper_dmabuf_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_hyper_dmabuf *hyper_dmabuf;
kstatus = VIRTIO_DEV_PRE_INIT;
pthread_mutexattr_t attr;
int rc;
hyper_dmabuf = calloc(1, sizeof(struct virtio_hyper_dmabuf));
if (!hyper_dmabuf) {
WPRINTF(("virtio_hdma: calloc returns NULL\n"));
return -1;
}
/* init mutex attribute properly */
rc = pthread_mutexattr_init(&attr);
if (rc)
DPRINTF("mutexattr init failed with erro %d!\n", rc);
if (virtio_uses_msix()) {
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
DPRINTF("virtio_msix: mutexattr_settype ");
DPRINTF("failed with error %d!\n", rc);
} else {
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
DPRINTF("virtio_intx: mutexattr_settype ");
DPRINTF("failed with error %d!\n", rc);
}
rc = pthread_mutex_init(&hyper_dmabuf->mtx, &attr);
if (rc)
DPRINTF("mutex init failed with error %d!\n", rc);
virtio_linkup(&hyper_dmabuf->base,
&virtio_hyper_dmabuf_ops_k,
hyper_dmabuf,
dev,
hyper_dmabuf->vq);
rc = virtio_hyper_dmabuf_k_init();
if (rc < 0) {
WPRINTF("virtio_hyper_dmabuf: VBS-K ");
WPRINTF("init failed with error %d!\n", rc);
kstatus = VIRTIO_DEV_INIT_FAILED;
} else {
kstatus = VIRTIO_DEV_INIT_SUCCESS;
}
hyper_dmabuf->base.mtx = &hyper_dmabuf->mtx;
hyper_dmabuf->vq[0].qsize = HYPER_DMABUF_RINGSZ;
hyper_dmabuf->vq[1].qsize = HYPER_DMABUF_RINGSZ;
/* initialize config space */
pci_set_cfgdata16(dev, PCIR_DEVICE, VIRTIO_DEV_HYPERDMABUF);
pci_set_cfgdata16(dev, PCIR_VENDOR, INTEL_VENDOR_ID);
pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_MEMORY);
pci_set_cfgdata16(dev, PCIR_SUBDEV_0, VIRTIO_TYPE_HYPERDMABUF);
pci_set_cfgdata16(dev, PCIR_SUBVEND_0, INTEL_VENDOR_ID);
if (virtio_interrupt_init(&hyper_dmabuf->base, virtio_uses_msix())) {
if (hyper_dmabuf)
free(hyper_dmabuf);
return -1;
}
virtio_set_io_bar(&hyper_dmabuf->base, 0);
return 0;
}
static void
virtio_hyper_dmabuf_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
if (kstatus == VIRTIO_DEV_STARTED) {
DPRINTF("virtio_hyper_dmabuf: deinitializing\n");
virtio_hyper_dmabuf_k_stop();
virtio_hyper_dmabuf_k_reset();
kstatus = VIRTIO_DEV_INITIAL;
assert(vbs_k_hyper_dmabuf_fd >= 0);
close(vbs_k_hyper_dmabuf_fd);
vbs_k_hyper_dmabuf_fd = -1;
}
if (dev->arg)
free((struct virtio_hyper_dmabuf *)dev->arg);
}
struct pci_vdev_ops pci_ops_virtio_hyper_dmabuf = {
.class_name = "virtio-hyper_dmabuf",
.vdev_init = virtio_hyper_dmabuf_init,
.vdev_deinit = virtio_hyper_dmabuf_deinit,
.vdev_barwrite = virtio_pci_write,
.vdev_barread = virtio_pci_read
};
DEFINE_PCI_DEVTYPE(pci_ops_virtio_hyper_dmabuf);

View File

@@ -0,0 +1,721 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/types.h>
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <pthread.h>
#include "dm.h"
#include "pci_core.h"
#include "virtio.h"
#include "mevent.h"
#include <linux/input.h>
static int virtio_input_debug;
#define DPRINTF(params) do { if (virtio_input_debug) printf params; } while (0)
#define WPRINTF(params) (printf params)
/*
* Queue definitions.
*/
#define VIRTIO_INPUT_EVENT_QUEUE 0
#define VIRTIO_INPUT_STATUS_QUEUE 1
#define VIRTIO_INPUT_MAXQ 2
/*
* Virtqueue size.
*/
#define VIRTIO_INPUT_RINGSZ 64
/*
* Default size of the buffer used to hold events between SYN
*/
#define VIRTIO_INPUT_PACKET_SIZE 10
/*
* Host capabilities
*/
#define VIRTIO_INPUT_S_HOSTCAPS (VIRTIO_F_VERSION_1)
enum virtio_input_config_select {
VIRTIO_INPUT_CFG_UNSET = 0x00,
VIRTIO_INPUT_CFG_ID_NAME = 0x01,
VIRTIO_INPUT_CFG_ID_SERIAL = 0x02,
VIRTIO_INPUT_CFG_ID_DEVIDS = 0x03,
VIRTIO_INPUT_CFG_PROP_BITS = 0x10,
VIRTIO_INPUT_CFG_EV_BITS = 0x11,
VIRTIO_INPUT_CFG_ABS_INFO = 0x12,
};
struct virtio_input_absinfo {
uint32_t min;
uint32_t max;
uint32_t fuzz;
uint32_t flat;
uint32_t res;
};
struct virtio_input_devids {
uint16_t bustype;
uint16_t vendor;
uint16_t product;
uint16_t version;
};
struct virtio_input_event {
uint16_t type;
uint16_t code;
uint32_t value;
};
/*
* Device-specific configuration registers
* To query a specific piece of configuration information FE driver sets
* "select" and "subsel" accordingly, information size is returned in "size"
* and information data is returned in union "u"
*/
struct virtio_input_config {
uint8_t select;
uint8_t subsel;
uint8_t size;
uint8_t reserved[5];
union {
char string[128];
uint8_t bitmap[128];
struct virtio_input_absinfo abs;
struct virtio_input_devids ids;
} u;
};
struct virtio_input_event_elem {
struct virtio_input_event event;
struct iovec iov;
uint16_t idx;
};
/*
* Per-device struct
*/
struct virtio_input {
struct virtio_base base;
struct virtio_vq_info queues[VIRTIO_INPUT_MAXQ];
pthread_mutex_t mtx;
struct mevent *mevp;
uint64_t features;
struct virtio_input_config cfg;
char *evdev;
char *serial;
int fd;
bool ready;
struct virtio_input_event_elem *event_queue;
uint32_t event_qsize;
uint32_t event_qindex;
};
static void virtio_input_reset(void *);
static void virtio_input_neg_features(void *, uint64_t);
static void virtio_input_set_status(void *, uint64_t);
static int virtio_input_cfgread(void *, int, int, uint32_t *);
static int virtio_input_cfgwrite(void *, int, int, uint32_t);
static bool virtio_input_get_config(struct virtio_input *, uint8_t, uint8_t,
struct virtio_input_config *);
static struct virtio_ops virtio_input_ops = {
"virtio_input", /* our name */
VIRTIO_INPUT_MAXQ, /* we support VTCON_MAXQ virtqueues */
sizeof(struct virtio_input_config), /* config reg size */
virtio_input_reset, /* reset */
NULL, /* device-wide qnotify */
virtio_input_cfgread, /* read virtio config */
virtio_input_cfgwrite, /* write virtio config */
virtio_input_neg_features, /* apply negotiated features */
virtio_input_set_status, /* called on guest set status */
VIRTIO_INPUT_S_HOSTCAPS, /* our capabilities */
};
static void
virtio_input_reset(void *vdev)
{
struct virtio_input *vi;
vi = vdev;
DPRINTF(("vtinput: device reset requested!\n"));
vi->ready = false;
virtio_reset_dev(&vi->base);
}
static void
virtio_input_neg_features(void *vdev, uint64_t negotiated_features)
{
struct virtio_input *vi = vdev;
vi->features = negotiated_features;
}
static void
virtio_input_set_status(void *vdev, uint64_t status)
{
struct virtio_input *vi = vdev;
if (status & VIRTIO_CR_STATUS_DRIVER_OK) {
if (!vi->ready)
vi->ready = true;
}
}
static int
virtio_input_cfgread(void *vdev, int offset, int size, uint32_t *retval)
{
struct virtio_input *vi = vdev;
struct virtio_input_config cfg;
bool rc;
rc = virtio_input_get_config(vi, vi->cfg.select,
vi->cfg.subsel, &cfg);
if (rc)
memcpy(retval, (uint8_t *)&cfg + offset, size);
else
memset(retval, 0, size);
return 0;
}
static int
virtio_input_cfgwrite(void *vdev, int offset, int size, uint32_t val)
{
struct virtio_input *vi = vdev;
if (offset == offsetof(struct virtio_input_config, select))
vi->cfg.select = (uint8_t)val;
else if (offset == offsetof(struct virtio_input_config, subsel))
vi->cfg.subsel = (uint8_t)val;
else
DPRINTF(("vtinput: write to readonly reg %d\n", offset));
return 0;
}
static void
virtio_input_notify_event_vq(void *vdev, struct virtio_vq_info *vq)
{
DPRINTF(("%s\n", __func__));
}
static void
virtio_input_notify_status_vq(void *vdev, struct virtio_vq_info *vq)
{
struct virtio_input *vi;
struct virtio_input_event event;
struct input_event host_event;
struct iovec iov;
int n, len;
uint16_t idx;
vi = vdev;
while (vq_has_descs(vq)) {
n = vq_getchain(vq, &idx, &iov, 1, NULL);
assert(n == 1);
memcpy(&event, iov.iov_base, sizeof(event));
host_event.type = event.type;
host_event.code = event.code;
host_event.value = event.value;
len = write(vi->fd, &host_event, sizeof(host_event));
if (len == -1)
WPRINTF(("%s: write failed, len = %d, errno = %d\n",
__func__, len, errno));
vq_relchain(vq, idx, sizeof(event)); /* Release the chain */
}
vq_endchains(vq, 1); /* Generate interrupt if appropriate. */
}
static void
virtio_input_send_event(struct virtio_input *vi,
struct virtio_input_event *event)
{
struct virtio_vq_info *vq;
struct iovec iov;
int n, i;
uint16_t idx;
if (!vi->ready)
return;
if (vi->event_qindex == vi->event_qsize) {
vi->event_qsize++;
vi->event_queue = realloc(vi->event_queue,
vi->event_qsize *
sizeof(struct virtio_input_event_elem));
assert(vi->event_queue);
}
vi->event_queue[vi->event_qindex].event = *event;
vi->event_qindex++;
if (event->type != EV_SYN || event->code != SYN_REPORT)
return;
vq = &vi->queues[VIRTIO_INPUT_EVENT_QUEUE];
for (i = 0; i < vi->event_qindex; i++) {
if (!vq_has_descs(vq)) {
while (i-- > 0)
vq_retchain(vq);
WPRINTF(("%s: not enough avail descs, dropped:%d\n",
__func__, vi->event_qindex));
goto out;
}
n = vq_getchain(vq, &idx, &iov, 1, NULL);
assert(n == 1);
vi->event_queue[i].iov = iov;
vi->event_queue[i].idx = idx;
}
for (i = 0; i < vi->event_qindex; i++) {
memcpy(vi->event_queue[i].iov.iov_base,
&vi->event_queue[i].event,
sizeof(struct virtio_input_event));
vq_relchain(vq, vi->event_queue[i].idx,
sizeof(struct virtio_input_event));
}
out:
vi->event_qindex = 0;
vq_endchains(vq, 1);
}
static void
virtio_input_read_event(int fd __attribute__((unused)),
enum ev_type t __attribute__((unused)),
void *arg)
{
struct virtio_input *vi = arg;
struct virtio_input_event event;
struct input_event host_event;
int len;
while (1) {
len = read(vi->fd, &host_event, sizeof(host_event));
if (len != sizeof(host_event)) {
if (len == -1 && errno != EAGAIN)
WPRINTF(("vtinput: host read failed! "
"len = %d, errno = %d\n",
len, errno));
break;
}
event.type = host_event.type;
event.code = host_event.code;
event.value = host_event.value;
virtio_input_send_event(vi, &event);
}
}
static int
virtio_input_get_bitmap(struct virtio_input *vi, unsigned int cmd, int count,
struct virtio_input_config *cfg)
{
int i, size = -1;
int rc;
if (count <= 0)
return -1;
if (!cfg)
return -1;
memset(cfg, 0, sizeof(*cfg));
rc = ioctl(vi->fd, cmd, cfg->u.bitmap);
if (rc < 0)
return -1;
count = count / 8;
for (i = count - 1; i >= 0; i--) {
if (cfg->u.bitmap[i]) {
size = i + 1;
break;
}
}
return size;
}
static bool
virtio_input_get_propbits(struct virtio_input *vi,
struct virtio_input_config *cfg)
{
unsigned int cmd;
int size;
if (!cfg)
return false;
cmd = EVIOCGPROP(INPUT_PROP_CNT / 8);
size = virtio_input_get_bitmap(vi, cmd, INPUT_PROP_CNT, cfg);
if (size > 0) {
cfg->select = VIRTIO_INPUT_CFG_PROP_BITS;
cfg->subsel = 0;
cfg->size = size;
return true;
}
return false;
}
static bool
virtio_input_get_evbits(struct virtio_input *vi, int type,
struct virtio_input_config *cfg)
{
unsigned int cmd;
int count, size;
if (!cfg)
return false;
switch (type) {
case EV_KEY:
count = KEY_CNT;
break;
case EV_REL:
count = REL_CNT;
break;
case EV_ABS:
count = ABS_CNT;
break;
case EV_MSC:
count = MSC_CNT;
break;
case EV_SW:
count = SW_CNT;
break;
case EV_LED:
count = LED_CNT;
break;
default:
return false;
}
cmd = EVIOCGBIT(type, count / 8);
size = virtio_input_get_bitmap(vi, cmd, count, cfg);
if (size > 0) {
cfg->select = VIRTIO_INPUT_CFG_EV_BITS;
cfg->subsel = type;
cfg->size = size;
return true;
}
return false;
}
static bool
virtio_input_get_absinfo(struct virtio_input *vi, int axis,
struct virtio_input_config *cfg)
{
struct virtio_input_config ev_cfg;
struct input_absinfo abs;
bool has_ev_abs;
int rc;
if (!cfg)
return false;
has_ev_abs = virtio_input_get_evbits(vi, EV_ABS, &ev_cfg);
if (!has_ev_abs)
return false;
rc = ioctl(vi->fd, EVIOCGABS(axis), &abs);
if (rc < 0)
return false;
cfg->u.abs.min = abs.minimum;
cfg->u.abs.max = abs.maximum;
cfg->u.abs.fuzz = abs.fuzz;
cfg->u.abs.flat = abs.flat;
cfg->u.abs.res = abs.resolution;
cfg->select = VIRTIO_INPUT_CFG_ABS_INFO;
cfg->subsel = axis;
cfg->size = sizeof(struct virtio_input_absinfo);
return true;
}
static bool
virtio_input_get_config(struct virtio_input *vi, uint8_t select,
uint8_t subsel, struct virtio_input_config *cfg)
{
struct input_id dev_ids;
bool found = false;
int rc;
if (!cfg)
return false;
memset(cfg, 0, sizeof(*cfg));
switch (select) {
case VIRTIO_INPUT_CFG_ID_NAME:
rc = ioctl(vi->fd, EVIOCGNAME(sizeof(cfg->u.string) - 1),
cfg->u.string);
if (rc >= 0) {
cfg->select = VIRTIO_INPUT_CFG_ID_NAME;
cfg->size = strlen(cfg->u.string);
found = true;
}
break;
case VIRTIO_INPUT_CFG_ID_SERIAL:
if (vi->serial) {
cfg->select = VIRTIO_INPUT_CFG_ID_SERIAL;
cfg->size = snprintf(cfg->u.string,
sizeof(cfg->u.string), "%s", vi->serial);
found = true;
}
break;
case VIRTIO_INPUT_CFG_ID_DEVIDS:
rc = ioctl(vi->fd, EVIOCGID, &dev_ids);
if (!rc) {
cfg->u.ids.bustype = dev_ids.bustype;
cfg->u.ids.vendor = dev_ids.vendor;
cfg->u.ids.product = dev_ids.product;
cfg->u.ids.version = dev_ids.version;
cfg->select = VIRTIO_INPUT_CFG_ID_DEVIDS;
cfg->size = sizeof(struct virtio_input_devids);
found = true;
}
break;
case VIRTIO_INPUT_CFG_PROP_BITS:
found = virtio_input_get_propbits(vi, cfg);
break;
case VIRTIO_INPUT_CFG_EV_BITS:
found = virtio_input_get_evbits(vi, subsel, cfg);
break;
case VIRTIO_INPUT_CFG_ABS_INFO:
found = virtio_input_get_absinfo(vi, subsel, cfg);
break;
default:
break;
}
return found;
}
static int
virtio_input_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_input *vi;
pthread_mutexattr_t attr;
bool mutex_initialized = false;
char *opt;
int flags, ver;
int rc;
/* get evdev path from opts
* -s n,virtio-input,/dev/input/eventX[,serial]
*/
if (!opts) {
WPRINTF(("%s: evdev path is NULL\n", __func__));
return -1;
}
vi = calloc(1, sizeof(struct virtio_input));
if (!vi) {
WPRINTF(("%s: out of memory\n", __func__));
return -1;
}
opt = strsep(&opts, ",");
if (!opt) {
WPRINTF(("%s: evdev path is NULL\n", __func__));
goto fail;
}
vi->evdev = strdup(opt);
if (!vi->evdev) {
WPRINTF(("%s: strdup failed\n", __func__));
goto fail;
}
if (opts) {
vi->serial = strdup(opts);
if (!vi->serial) {
WPRINTF(("%s: strdup serial failed\n", __func__));
goto fail;
}
}
vi->fd = open(vi->evdev, O_RDWR);
if (vi->fd < 0) {
WPRINTF(("open %s failed %d\n", vi->evdev, errno));
goto fail;
}
flags = fcntl(vi->fd, F_GETFL);
fcntl(vi->fd, F_SETFL, flags | O_NONBLOCK);
rc = ioctl(vi->fd, EVIOCGVERSION, &ver); /* is it a evdev device? */
if (rc < 0) {
WPRINTF(("%s: get version failed\n", vi->evdev));
goto fail;
}
rc = ioctl(vi->fd, EVIOCGRAB, 1); /* exclusive access */
if (rc < 0) {
WPRINTF(("%s: grab device failed %d\n", vi->evdev, errno));
goto fail;
}
/* init mutex attribute properly to avoid deadlock */
rc = pthread_mutexattr_init(&attr);
if (rc)
DPRINTF(("mutexattr init failed with erro %d!\n", rc));
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
if (rc)
DPRINTF(("vtinput: mutexattr_settype failed with "
"error %d!\n", rc));
rc = pthread_mutex_init(&vi->mtx, &attr);
if (rc)
DPRINTF(("vtinput: pthread_mutex_init failed with "
"error %d!\n", rc));
mutex_initialized = (rc == 0) ? true : false;
vi->event_qsize = VIRTIO_INPUT_PACKET_SIZE;
vi->event_qindex = 0;
vi->event_queue = calloc(vi->event_qsize,
sizeof(struct virtio_input_event_elem));
if (!vi->event_queue) {
WPRINTF(("vtinput: could not alloc event queue buf\n"));
goto fail;
}
vi->mevp = mevent_add(vi->fd, EVF_READ, virtio_input_read_event, vi);
if (vi->mevp == NULL) {
WPRINTF(("vtinput: could not register event\n"));
goto fail;
}
virtio_linkup(&vi->base, &virtio_input_ops, vi, dev, vi->queues);
vi->base.mtx = &vi->mtx;
vi->queues[VIRTIO_INPUT_EVENT_QUEUE].qsize = VIRTIO_INPUT_RINGSZ;
vi->queues[VIRTIO_INPUT_EVENT_QUEUE].notify =
virtio_input_notify_event_vq;
vi->queues[VIRTIO_INPUT_STATUS_QUEUE].qsize = VIRTIO_INPUT_RINGSZ;
vi->queues[VIRTIO_INPUT_STATUS_QUEUE].notify =
virtio_input_notify_status_vq;
/* initialize config space */
pci_set_cfgdata16(dev, PCIR_DEVICE, 0x1040 + VIRTIO_TYPE_INPUT);
pci_set_cfgdata16(dev, PCIR_VENDOR, VIRTIO_VENDOR);
pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_INPUTDEV);
pci_set_cfgdata8(dev, PCIR_SUBCLASS, PCIS_INPUTDEV_OTHER);
pci_set_cfgdata16(dev, PCIR_SUBDEV_0, 0x1040 + VIRTIO_TYPE_INPUT);
pci_set_cfgdata16(dev, PCIR_SUBVEND_0, VIRTIO_VENDOR);
if (virtio_interrupt_init(&vi->base, virtio_uses_msix())) {
DPRINTF(("%s, interrupt_init failed!\n", __func__));
goto fail;
}
rc = virtio_set_modern_bar(&vi->base, true);
return rc;
fail:
if (vi) {
if (mutex_initialized)
pthread_mutex_destroy(&vi->mtx);
if (vi->event_queue) {
free(vi->event_queue);
vi->event_queue = NULL;
}
if (vi->mevp) {
mevent_delete(vi->mevp);
vi->mevp = NULL;
}
if (vi->fd > 0) {
close(vi->fd);
vi->fd = -1;
}
if (vi->serial) {
free(vi->serial);
vi->serial = NULL;
}
if (vi->evdev) {
free(vi->evdev);
vi->evdev = NULL;
}
free(vi);
vi = NULL;
}
return -1;
}
static void
virtio_input_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_input *vi;
vi = (struct virtio_input *)dev->arg;
if (vi) {
pthread_mutex_destroy(&vi->mtx);
if (vi->event_queue)
free(vi->event_queue);
if (vi->mevp)
mevent_delete(vi->mevp);
if (vi->fd > 0)
close(vi->fd);
if (vi->evdev)
free(vi->evdev);
if (vi->serial)
free(vi->serial);
free(vi);
vi = NULL;
}
}
struct pci_vdev_ops pci_ops_virtio_input = {
.class_name = "virtio-input",
.vdev_init = virtio_input_init,
.vdev_deinit = virtio_input_deinit,
.vdev_barwrite = virtio_pci_write,
.vdev_barread = virtio_pci_read
};
DEFINE_PCI_DEVTYPE(pci_ops_virtio_input);

View File

@@ -0,0 +1,104 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/* Routines to notify the VBS-K in kernel */
#include <stdio.h>
#include <sys/ioctl.h>
#include "virtio_kernel.h"
static int virtio_kernel_debug;
#define DPRINTF(params) do { if (virtio_kernel_debug) printf params; } while (0)
#define WPRINTF(params) (printf params)
static int
vbs_dev_info_set(int fd, void *arg)
{
return ioctl(fd, VBS_K_SET_DEV, arg);
}
static int
vbs_vqs_info_set(int fd, void *arg)
{
return ioctl(fd, VBS_K_SET_VQ, arg);
}
/* VBS-K common ops */
/* VBS-K init/reset */
int
vbs_kernel_init(int fd)
{
return VIRTIO_SUCCESS;
}
int
vbs_kernel_reset(int fd)
{
return VIRTIO_SUCCESS;
}
/*
* We need a way to start/stop vbs_k execution since guest might want to
* change the configuration of the virtio device after VBS-K has been
* initialized.
*/
/* VBS-K start/stop */
int
vbs_kernel_start(int fd, struct vbs_dev_info *dev, struct vbs_vqs_info *vqs)
{
int ret;
if (fd < 0) {
WPRINTF(("%s: fd < 0\n", __func__));
return -VIRTIO_ERROR_FD_OPEN_FAILED;
}
ret = vbs_dev_info_set(fd, dev);
if (ret < 0) {
WPRINTF(("vbs_kernel_set_dev failed: ret %d\n", ret));
return ret;
}
ret = vbs_vqs_info_set(fd, vqs);
if (ret < 0) {
WPRINTF(("vbs_kernel_set_vqs failed: ret %d\n", ret));
return ret;
}
return VIRTIO_SUCCESS;
}
int
vbs_kernel_stop(int fd)
{
DPRINTF(("%s\n", __func__));
return VIRTIO_SUCCESS;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,482 @@
/*-
* Copyright (c) 2014 Nahanni Systems Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer
* in this position and unchanged.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* virtio entropy device emulation.
* Randomness is sourced from /dev/random which does not block
* once it has been seeded at bootup.
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/uio.h>
#include <err.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <pthread.h>
#include <sysexits.h>
#include "dm.h"
#include "pci_core.h"
#include "virtio.h"
#include "virtio_kernel.h"
#include "vmmapi.h" /* for vmctx */
#define VIRTIO_RND_RINGSZ 64
/*
* Per-device struct
*/
struct virtio_rnd {
/* VBS-U variables */
struct virtio_base base;
struct virtio_vq_info vq;
pthread_mutex_t mtx;
uint64_t cfg;
int fd;
/* VBS-K variables */
struct {
enum VBS_K_STATUS status;
int fd;
struct vbs_dev_info dev;
struct vbs_vqs_info vqs;
} vbs_k;
};
static int virtio_rnd_debug;
#define DPRINTF(params) do { if (virtio_rnd_debug) printf params; } while (0)
#define WPRINTF(params) (printf params)
/* VBS-K interface functions */
static int virtio_rnd_kernel_init(struct virtio_rnd *); /* open VBS-K chardev */
static int virtio_rnd_kernel_start(struct virtio_rnd *);
static int virtio_rnd_kernel_stop(struct virtio_rnd *);
static int virtio_rnd_kernel_reset(struct virtio_rnd *);
static int virtio_rnd_kernel_dev_set(struct vbs_dev_info *kdev,
const char *name, int vmid, int nvq,
uint32_t feature, uint64_t pio_start,
uint64_t pio_len);
static int virtio_rnd_kernel_vq_set(struct vbs_vqs_info *kvqs, unsigned int nvq,
unsigned int idx, uint16_t qsize,
uint32_t pfn, uint16_t msix_idx,
uint64_t msix_addr, uint32_t msix_data);
/* VBS-U virtio_ops */
static void virtio_rnd_reset(void *);
static void virtio_rnd_notify(void *, struct virtio_vq_info *);
static struct virtio_ops virtio_rnd_ops = {
"virtio_rnd", /* our name */
1, /* we support 1 virtqueue */
0, /* config reg size */
virtio_rnd_reset, /* reset */
virtio_rnd_notify, /* device-wide qnotify */
NULL, /* read virtio config */
NULL, /* write virtio config */
NULL, /* apply negotiated features */
NULL, /* called on guest set status */
0, /* our capabilities */
};
/* VBS-K virtio_ops */
static void virtio_rnd_k_no_notify(void *, struct virtio_vq_info *);
static void virtio_rnd_k_set_status(void *, uint64_t);
static struct virtio_ops virtio_rnd_ops_k = {
"virtio_rnd", /* our name */
1, /* we support 1 virtqueue */
0, /* config reg size */
virtio_rnd_reset, /* reset */
virtio_rnd_k_no_notify, /* device-wide qnotify */
NULL, /* read virtio config */
NULL, /* write virtio config */
NULL, /* apply negotiated features */
virtio_rnd_k_set_status,/* called on guest set status */
0, /* our capabilities */
};
/* VBS-K interface function implementations */
static void
virtio_rnd_k_no_notify(void *base, struct virtio_vq_info *vq)
{
WPRINTF(("virtio_rnd: VBS-K mode! Should not reach here!!\n"));
}
/*
* This callback gives us a chance to determine the timings
* to kickoff VBS-K initialization
*/
static void
virtio_rnd_k_set_status(void *base, uint64_t status)
{
struct virtio_rnd *rnd;
int nvq;
struct msix_table_entry *mte;
uint64_t msix_addr = 0;
uint32_t msix_data = 0;
int rc, i, j;
rnd = base;
nvq = rnd->base.vops->nvq;
if (rnd->vbs_k.status == VIRTIO_DEV_INIT_SUCCESS &&
(status & VIRTIO_CR_STATUS_DRIVER_OK)) {
/* time to kickoff VBS-K side */
/* init vdev first */
rc = virtio_rnd_kernel_dev_set(&rnd->vbs_k.dev,
rnd->base.vops->name,
rnd->base.dev->vmctx->vmid,
nvq,
rnd->base.negotiated_caps,
/*
* currently we let VBS-K handle
* kick register
*/
rnd->base.dev->bar[0].addr + 16,
2);
for (i = 0; i < nvq; i++) {
if (rnd->vq.msix_idx != VIRTIO_MSI_NO_VECTOR) {
j = rnd->vq.msix_idx;
mte = &rnd->base.dev->msix.table[j];
msix_addr = mte->addr;
msix_data = mte->msg_data;
}
rc = virtio_rnd_kernel_vq_set(&rnd->vbs_k.vqs,
nvq, i,
rnd->vq.qsize,
rnd->vq.pfn,
rnd->vq.msix_idx,
msix_addr,
msix_data);
if (rc < 0) {
WPRINTF(("rnd_kernel_set_vq fail,i %d ret %d\n",
i, rc));
return;
}
}
rc = virtio_rnd_kernel_start(rnd);
if (rc < 0) {
WPRINTF(("virtio_rnd_kernel_start() failed\n"));
rnd->vbs_k.status = VIRTIO_DEV_START_FAILED;
} else {
rnd->vbs_k.status = VIRTIO_DEV_STARTED;
}
}
}
/*
* Called in virtio_rnd_init(), where the initialization of the
* PCIe device emulation is still on the way by device model.
*/
static int
virtio_rnd_kernel_init(struct virtio_rnd *rnd)
{
assert(rnd->vbs_k.fd == 0);
rnd->vbs_k.fd = open("/dev/vbs_rng", O_RDWR);
if (rnd->vbs_k.fd < 0) {
WPRINTF(("Failed to open /dev/vbs_k_rng!\n"));
return -VIRTIO_ERROR_FD_OPEN_FAILED;
}
DPRINTF(("Open /dev/vbs_rng success!\n"));
memset(&rnd->vbs_k.dev, 0, sizeof(struct vbs_dev_info));
memset(&rnd->vbs_k.vqs, 0, sizeof(struct vbs_vqs_info));
return VIRTIO_SUCCESS;
}
static int
virtio_rnd_kernel_dev_set(struct vbs_dev_info *kdev, const char *name,
int vmid, int nvq, uint32_t feature,
uint64_t pio_start, uint64_t pio_len)
{
/* FE driver has set VIRTIO_CONFIG_S_DRIVER_OK */
/* init kdev */
strncpy(kdev->name, name, VBS_NAME_LEN);
kdev->vmid = vmid;
kdev->nvq = nvq;
kdev->negotiated_features = feature;
kdev->pio_range_start = pio_start;
kdev->pio_range_len = pio_len;
return VIRTIO_SUCCESS;
}
static int
virtio_rnd_kernel_vq_set(struct vbs_vqs_info *kvqs, unsigned int nvq,
unsigned int idx, uint16_t qsize, uint32_t pfn,
uint16_t msix_idx, uint64_t msix_addr,
uint32_t msix_data)
{
/* FE driver has set VIRTIO_CONFIG_S_DRIVER_OK */
if (nvq <= idx) {
WPRINTF(("%s: wrong idx!\n", __func__));
return -VIRTIO_ERROR_GENERAL;
}
/* init kvqs */
kvqs->nvq = nvq;
kvqs->vqs[idx].qsize = qsize;
kvqs->vqs[idx].pfn = pfn;
kvqs->vqs[idx].msix_idx = msix_idx;
kvqs->vqs[idx].msix_addr = msix_addr;
kvqs->vqs[idx].msix_data = msix_data;
return VIRTIO_SUCCESS;
}
static int
virtio_rnd_kernel_start(struct virtio_rnd *rnd)
{
if (vbs_kernel_start(rnd->vbs_k.fd,
&rnd->vbs_k.dev,
&rnd->vbs_k.vqs) < 0) {
WPRINTF(("Failed in vbs_k_start!\n"));
return -VIRTIO_ERROR_START;
}
DPRINTF(("vbs_k_started!\n"));
return VIRTIO_SUCCESS;
}
static int
virtio_rnd_kernel_stop(struct virtio_rnd *rnd)
{
/* device specific cleanups here */
return vbs_kernel_stop(rnd->vbs_k.fd);
}
static int
virtio_rnd_kernel_reset(struct virtio_rnd *rnd)
{
memset(&rnd->vbs_k.dev, 0, sizeof(struct vbs_dev_info));
memset(&rnd->vbs_k.vqs, 0, sizeof(struct vbs_vqs_info));
return vbs_kernel_reset(rnd->vbs_k.fd);
}
static void
virtio_rnd_reset(void *base)
{
struct virtio_rnd *rnd;
rnd = base;
DPRINTF(("virtio_rnd: device reset requested !\n"));
virtio_reset_dev(&rnd->base);
DPRINTF(("virtio_rnd: kstatus %d\n", rnd->vbs_k.status));
if (rnd->vbs_k.status == VIRTIO_DEV_STARTED) {
DPRINTF(("virtio_rnd: VBS-K reset requested!\n"));
virtio_rnd_kernel_stop(rnd);
virtio_rnd_kernel_reset(rnd);
rnd->vbs_k.status = VIRTIO_DEV_INITIAL;
}
}
static void
virtio_rnd_notify(void *base, struct virtio_vq_info *vq)
{
struct iovec iov;
struct virtio_rnd *rnd;
int len;
uint16_t idx;
rnd = base;
if (rnd->fd < 0) {
vq_endchains(vq, 0);
return;
}
while (vq_has_descs(vq)) {
vq_getchain(vq, &idx, &iov, 1, NULL);
len = read(rnd->fd, iov.iov_base, iov.iov_len);
DPRINTF(("%s: %d\r\n", __func__, len));
/* Catastrophe if unable to read from /dev/random */
assert(len > 0);
/*
* Release this chain and handle more
*/
vq_relchain(vq, idx, len);
}
vq_endchains(vq, 1); /* Generate interrupt if appropriate. */
}
static int
virtio_rnd_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_rnd *rnd;
int fd;
int len;
uint8_t v;
pthread_mutexattr_t attr;
int rc;
char *opt;
char *vbs_k_opt = NULL;
enum VBS_K_STATUS kstat = VIRTIO_DEV_INITIAL;
while ((opt = strsep(&opts, ",")) != NULL) {
/* vbs_k_opt should be kernel=on */
vbs_k_opt = strsep(&opt, "=");
DPRINTF(("vbs_k_opt is %s\n", vbs_k_opt));
if (opt != NULL) {
if (strncmp(opt, "on", 2) == 0)
kstat = VIRTIO_DEV_PRE_INIT;
WPRINTF(("virtio_rnd: VBS-K initializing..."));
}
}
/*
* Should always be able to open /dev/random.
*/
fd = open("/dev/random", O_RDONLY | O_NONBLOCK);
assert(fd >= 0);
/*
* Check that device is seeded and non-blocking.
*/
len = read(fd, &v, sizeof(v));
if (len <= 0) {
WPRINTF(("virtio_rnd: /dev/random not ready, read(): %d", len));
return -1;
}
rnd = calloc(1, sizeof(struct virtio_rnd));
if (!rnd) {
WPRINTF(("virtio_rnd: calloc returns NULL\n"));
return -1;
}
rnd->vbs_k.status = kstat;
/* init mutex attribute properly */
rc = pthread_mutexattr_init(&attr);
if (rc)
DPRINTF(("mutexattr init failed with erro %d!\n", rc));
if (virtio_uses_msix()) {
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
if (rc)
DPRINTF(("virtio_msix: mutexattr_settype failed with "
"error %d!\n", rc));
} else {
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
if (rc)
DPRINTF(("virtio_intx: mutexattr_settype failed with "
"error %d!\n", rc));
}
rc = pthread_mutex_init(&rnd->mtx, &attr);
if (rc)
DPRINTF(("mutex init failed with error %d!\n", rc));
if (rnd->vbs_k.status == VIRTIO_DEV_PRE_INIT) {
DPRINTF(("%s: VBS-K option detected!\n", __func__));
virtio_linkup(&rnd->base, &virtio_rnd_ops_k,
rnd, dev, &rnd->vq);
rc = virtio_rnd_kernel_init(rnd);
if (rc < 0) {
WPRINTF(("virtio_rnd: VBS-K init failed,error %d!\n",
rc));
rnd->vbs_k.status = VIRTIO_DEV_INIT_FAILED;
} else {
rnd->vbs_k.status = VIRTIO_DEV_INIT_SUCCESS;
}
}
if (rnd->vbs_k.status == VIRTIO_DEV_INITIAL ||
rnd->vbs_k.status != VIRTIO_DEV_INIT_SUCCESS) {
DPRINTF(("%s: fallback to VBS-U...\n", __func__));
virtio_linkup(&rnd->base, &virtio_rnd_ops, rnd, dev, &rnd->vq);
}
rnd->base.mtx = &rnd->mtx;
rnd->vq.qsize = VIRTIO_RND_RINGSZ;
/* keep /dev/random opened while emulating */
rnd->fd = fd;
/* initialize config space */
pci_set_cfgdata16(dev, PCIR_DEVICE, VIRTIO_DEV_RANDOM);
pci_set_cfgdata16(dev, PCIR_VENDOR, VIRTIO_VENDOR);
pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_CRYPTO);
pci_set_cfgdata16(dev, PCIR_SUBDEV_0, VIRTIO_TYPE_ENTROPY);
pci_set_cfgdata16(dev, PCIR_SUBVEND_0, VIRTIO_VENDOR);
if (virtio_interrupt_init(&rnd->base, virtio_uses_msix())) {
if (rnd)
free(rnd);
return -1;
}
virtio_set_io_bar(&rnd->base, 0);
return 0;
}
static void
virtio_rnd_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_rnd *rnd;
rnd = dev->arg;
if (rnd == NULL) {
DPRINTF(("%s: rnd is NULL\n", __func__));
return;
}
if (rnd->vbs_k.status == VIRTIO_DEV_STARTED) {
DPRINTF(("%s: deinit virtio_rnd_k!\n", __func__));
virtio_rnd_kernel_stop(rnd);
virtio_rnd_kernel_reset(rnd);
rnd->vbs_k.status = VIRTIO_DEV_INITIAL;
assert(rnd->vbs_k.fd >= 0);
close(rnd->vbs_k.fd);
rnd->vbs_k.fd = -1;
}
DPRINTF(("%s: free struct virtio_rnd!\n", __func__));
free(rnd);
}
struct pci_vdev_ops pci_ops_virtio_rnd = {
.class_name = "virtio-rnd",
.vdev_init = virtio_rnd_init,
.vdev_deinit = virtio_rnd_deinit,
.vdev_barwrite = virtio_pci_write,
.vdev_barread = virtio_pci_read
};
DEFINE_PCI_DEVTYPE(pci_ops_virtio_rnd);

View File

@@ -0,0 +1,373 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* WatchDog Timer (WDT): emulate i6300esb PCI wdt Intel SOC devices,
* used to monitor guest OS
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <time.h>
#include <assert.h>
#include <stdbool.h>
#include "vmm.h"
#include "vmmapi.h"
#include "mevent.h"
#include "pci_core.h"
#define WDT_REG_BAR_SIZE 0x10
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_ESB 0x25ab
#define ESB_CONFIG_REG 0x60 /* Config register*/
#define ESB_LOCK_REG 0x68 /* WDT lock register*/
/* Memory mapped registers */
#define ESB_TIMER1_REG 0x00 /* Timer1 value after each reset */
#define ESB_TIMER2_REG 0x04 /* Timer2 value after each reset */
#define ESB_RELOAD_REG 0x0c /* Reload register */
#define ESB_WDT_ENABLE (0x01 << 1) /* Enable WDT */
#define ESB_WDT_LOCK (0x01 << 0) /* Lock (nowayout) */
#define ESB_WDT_REBOOT (0x01 << 5) /* Enable reboot on timeout */
#define ESB_WDT_RELOAD (0x01 << 8) /* Ping/kick dog */
#define TIMER_TO_SECONDS(val) (val >> 9)
/* Magic constants */
#define ESB_UNLOCK1 0x80 /* Step 1 to unlock reset registers */
#define ESB_UNLOCK2 0x86 /* Step 2 to unlock reset registers */
#define WDT_TIMER_SIG 0x55AA
#define DEFAULT_MAX_TIMER_VAL 0x000FFFFF
/* for debug */
/* #define WDT_DEBUG */
#ifdef WDT_DEBUG
static FILE * dbg_file;
#define DPRINTF(format, args...) \
do { fprintf(dbg_file, format, args); fflush(dbg_file); } while (0)
#else
#define DPRINTF(format, arg...)
#endif
struct info_wdt {
bool reboot_enabled;/* "reboot" on wdt out */
bool locked; /* If true, enabled field cannot be changed. */
bool wdt_enabled; /* If true, watchdog is enabled. */
bool timer_created;
timer_t wdt_timerid;
uint32_t timer1_val;
uint32_t timer2_val;
int stage; /* stage 1 or 2. */
int unlock_state; /* unlock states 0 -> 1 -> 2 */
};
static struct info_wdt wdt_state;
static void start_wdt_timer(void);
/*
* WDT timer, start when guest OS start watchdog service; and re-start for
* each dog-kick / ping action if time out, it will trigger reboot or other
* action to guest OS
*/
static void
wdt_expired_thread(union sigval v)
{
DPRINTF("wdt timer out! id=0x%x, stage=%d, reboot=%d\n",
v.sival_int, wdt_state.stage, wdt_state.reboot_enabled);
if (wdt_state.stage == 1) {
wdt_state.stage = 2;
start_wdt_timer();
} else {
if (wdt_state.reboot_enabled) {
/* watchdog timer out, set the uos to reboot */
vm_set_suspend_mode(VM_SUSPEND_RESET);
mevent_notify();
} else {
/* if not need reboot, just loop timer */
wdt_state.stage = 1;
start_wdt_timer();
}
}
}
static void
stop_wdt_timer(void)
{
struct itimerspec timer_val;
DPRINTF("%s: timer_created=%d\n", __func__, wdt_state.timer_created);
if (!wdt_state.timer_created)
return;
memset(&timer_val, 0, sizeof(struct itimerspec));
timer_settime(wdt_state.wdt_timerid, 0, &timer_val, NULL);
}
static void
delete_wdt_timer(void)
{
if (!wdt_state.timer_created)
return;
DPRINTF("%s: timer %ld deleted\n", __func__,
(uint64_t)wdt_state.wdt_timerid);
timer_delete(wdt_state.wdt_timerid);
wdt_state.timer_created = false;
}
static void
reset_wdt_timer(int seconds)
{
struct itimerspec timer_val;
DPRINTF("%s: time=%d\n", __func__, seconds);
memset(&timer_val, 0, sizeof(struct itimerspec));
timer_settime(wdt_state.wdt_timerid, 0, &timer_val, NULL);
timer_val.it_value.tv_sec = seconds;
if (timer_settime(wdt_state.wdt_timerid, 0, &timer_val, NULL) == -1) {
perror("timer_settime failed.\n");
timer_delete(wdt_state.wdt_timerid);
wdt_state.timer_created = 0;
exit(-1);
}
}
static void
start_wdt_timer(void)
{
int seconds;
struct sigevent sig_evt;
struct itimerspec timer_val;
if (!wdt_state.wdt_enabled)
return;
if (wdt_state.stage == 1)
seconds = TIMER_TO_SECONDS(wdt_state.timer1_val);
else
seconds = TIMER_TO_SECONDS(wdt_state.timer2_val);
DPRINTF("%s: created=%d, time=%d\n", __func__,
wdt_state.timer_created, seconds);
memset(&sig_evt, 0, sizeof(struct sigevent));
if (wdt_state.timer_created) {
reset_wdt_timer(seconds);
return;
}
sig_evt.sigev_value.sival_int = WDT_TIMER_SIG;
sig_evt.sigev_notify = SIGEV_THREAD;
sig_evt.sigev_notify_function = wdt_expired_thread;
if (timer_create(CLOCK_REALTIME, &sig_evt,
&wdt_state.wdt_timerid) == -1) {
perror("timer_create failed.\n");
exit(-1);
}
memset(&timer_val, 0, sizeof(struct itimerspec));
timer_val.it_value.tv_sec = seconds;
if (timer_settime(wdt_state.wdt_timerid, 0, &timer_val, NULL) == -1) {
perror("timer_settime failed.\n");
timer_delete(wdt_state.wdt_timerid);
exit(-1);
}
wdt_state.timer_created = true;
}
static int
pci_wdt_cfg_read(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int offset, int bytes, uint32_t *rv)
{
int need_cfg = 1;
DPRINTF("%s: offset = %x, len = %d\n", __func__, offset, bytes);
if (offset == ESB_LOCK_REG && bytes == 1) {
*rv = (wdt_state.locked ? ESB_WDT_LOCK : 0) |
(wdt_state.wdt_enabled ? ESB_WDT_ENABLE : 0);
need_cfg = 0;
}
return need_cfg;
}
static int
pci_wdt_cfg_write(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int offset, int bytes, uint32_t val)
{
bool old_flag;
int need_cfg = 1;
DPRINTF("%s: offset = %x, len = %d, val = 0x%x\n",
__func__, offset, bytes, val);
if (offset == ESB_CONFIG_REG && bytes == 2) {
wdt_state.reboot_enabled = ((val & ESB_WDT_REBOOT) == 0);
need_cfg = 0;
} else if (offset == ESB_LOCK_REG && bytes == 1) {
if (!wdt_state.locked) {
wdt_state.locked = ((val & ESB_WDT_LOCK) != 0);
old_flag = wdt_state.wdt_enabled;
wdt_state.wdt_enabled = ((val & ESB_WDT_ENABLE) != 0);
if (!old_flag && wdt_state.wdt_enabled) {
wdt_state.stage = 1;
start_wdt_timer();
} else if (!wdt_state.wdt_enabled)
stop_wdt_timer();
}
need_cfg = 0;
}
return need_cfg;
}
static void
pci_wdt_bar_write(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int baridx, uint64_t offset, int size, uint64_t value)
{
assert(baridx == 0);
DPRINTF("%s: addr = 0x%x, val = 0x%x, size=%d\n",
__func__, (int) offset, (int)value, size);
if (offset == ESB_RELOAD_REG) {
assert(size == 2);
if (value == ESB_UNLOCK1)
wdt_state.unlock_state = 1;
else if ((value == ESB_UNLOCK2)
&& (wdt_state.unlock_state == 1))
wdt_state.unlock_state = 2;
else if ((wdt_state.unlock_state == 2)
&& (value & ESB_WDT_RELOAD)) {
wdt_state.stage = 1;
start_wdt_timer();
wdt_state.unlock_state = 0;
}
} else if (wdt_state.unlock_state == 2) {
if (offset == ESB_TIMER1_REG)
wdt_state.timer1_val = value & DEFAULT_MAX_TIMER_VAL;
else if (offset == ESB_TIMER2_REG)
wdt_state.timer2_val = value & DEFAULT_MAX_TIMER_VAL;
wdt_state.unlock_state = 0;
}
}
uint64_t
pci_wdt_bar_read(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int baridx, uint64_t offset, int size)
{
assert(baridx == 0);
DPRINTF("%s: addr = 0x%x, size=%d\n", __func__, (int) offset, size);
return 0;
}
static int
pci_wdt_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
/*the wdt just has one inistance */
if (wdt_state.reboot_enabled && wdt_state.timer1_val) {
perror("wdt can't be created twice, please check!");
return -1;
}
/* init wdt state info */
wdt_state.reboot_enabled = true;
wdt_state.locked = false;
wdt_state.timer_created = false;
wdt_state.wdt_enabled = false;
wdt_state.stage = 1;
wdt_state.timer1_val = DEFAULT_MAX_TIMER_VAL;
wdt_state.timer2_val = DEFAULT_MAX_TIMER_VAL;
wdt_state.unlock_state = 0;
pci_emul_alloc_bar(dev, 0, PCIBAR_MEM32, WDT_REG_BAR_SIZE);
/* initialize config space */
pci_set_cfgdata16(dev, PCIR_VENDOR, PCI_VENDOR_ID_INTEL);
pci_set_cfgdata16(dev, PCIR_DEVICE, PCI_DEVICE_ID_INTEL_ESB);
pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_BASEPERIPH);
pci_set_cfgdata8(dev, PCIR_SUBCLASS, PCIS_BASEPERIPH_OTHER);
#ifdef WDT_DEBUG
dbg_file = fopen("/tmp/wdt_log", "w+");
#endif
DPRINTF("%s: iobar =0x%lx, size=%ld\n", __func__,
dev->bar[0].addr, dev->bar[0].size);
return 0;
}
static void
pci_wdt_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
delete_wdt_timer();
memset(&wdt_state, 0, sizeof(wdt_state));
}
struct pci_vdev_ops pci_ops_wdt = {
.class_name = "wdt-i6300esb",
.vdev_init = pci_wdt_init,
.vdev_deinit = pci_wdt_deinit,
.vdev_cfgwrite = pci_wdt_cfg_write,
.vdev_cfgread = pci_wdt_cfg_read,
.vdev_barwrite = pci_wdt_bar_write,
.vdev_barread = pci_wdt_bar_read
};
DEFINE_PCI_DEVTYPE(pci_ops_wdt);

2917
devicemodel/hw/pci/xhci.c Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,387 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include "vmm.h"
#include "vmmapi.h"
#include "dm.h"
#include "acpi.h"
static inline int get_vcpu_pm_info(struct vmctx *ctx, int vcpu_id,
uint64_t pm_type, uint64_t *pm_info)
{
*pm_info = ((ctx->vmid << PMCMD_VMID_SHIFT) & PMCMD_VMID_MASK)
| ((vcpu_id << PMCMD_VCPUID_SHIFT) & PMCMD_VCPUID_MASK)
| (pm_type & PMCMD_TYPE_MASK);
return vm_get_cpu_state(ctx, pm_info);
}
static inline uint8_t get_vcpu_px_cnt(struct vmctx *ctx, int vcpu_id)
{
uint64_t px_cnt;
if (get_vcpu_pm_info(ctx, vcpu_id, PMCMD_GET_PX_CNT, &px_cnt)) {
return 0;
}
return (uint8_t)px_cnt;
}
uint8_t get_vcpu_cx_cnt(struct vmctx *ctx, int vcpu_id)
{
uint64_t cx_cnt;
if (get_vcpu_pm_info(ctx, vcpu_id, PMCMD_GET_CX_CNT, &cx_cnt)) {
return 0;
}
return (uint8_t)cx_cnt;
}
static int get_vcpu_px_data(struct vmctx *ctx, int vcpu_id,
int px_num, struct cpu_px_data *vcpu_px_data)
{
uint64_t *pm_ioctl_buf;
enum pm_cmd_type cmd_type = PMCMD_GET_PX_DATA;
pm_ioctl_buf = malloc(sizeof(struct cpu_px_data));
if (!pm_ioctl_buf) {
return -1;
}
*pm_ioctl_buf = ((ctx->vmid << PMCMD_VMID_SHIFT) & PMCMD_VMID_MASK)
| ((vcpu_id << PMCMD_VCPUID_SHIFT) & PMCMD_VCPUID_MASK)
| ((px_num << PMCMD_STATE_NUM_SHIFT) & PMCMD_STATE_NUM_MASK)
| cmd_type;
/* get and validate px data */
if (vm_get_cpu_state(ctx, pm_ioctl_buf)) {
free(pm_ioctl_buf);
return -1;
}
memcpy(vcpu_px_data, pm_ioctl_buf,
sizeof(struct cpu_px_data));
free(pm_ioctl_buf);
return 0;
}
int get_vcpu_cx_data(struct vmctx *ctx, int vcpu_id,
int cx_num, struct cpu_cx_data *vcpu_cx_data)
{
uint64_t *pm_ioctl_buf;
enum pm_cmd_type cmd_type = PMCMD_GET_CX_DATA;
pm_ioctl_buf = malloc(sizeof(struct cpu_cx_data));
if (!pm_ioctl_buf) {
return -1;
}
*pm_ioctl_buf = ((ctx->vmid << PMCMD_VMID_SHIFT) & PMCMD_VMID_MASK)
| ((vcpu_id << PMCMD_VCPUID_SHIFT) & PMCMD_VCPUID_MASK)
| ((cx_num << PMCMD_STATE_NUM_SHIFT) & PMCMD_STATE_NUM_MASK)
| cmd_type;
/* get and validate cx data */
if (vm_get_cpu_state(ctx, pm_ioctl_buf)) {
free(pm_ioctl_buf);
return -1;
}
memcpy(vcpu_cx_data, pm_ioctl_buf,
sizeof(struct cpu_cx_data));
free(pm_ioctl_buf);
return 0;
}
char *_asi_table[7] = { "SystemMemory",
"SystemIO",
"PCI_Config",
"EmbeddedControl",
"SMBus",
"PCC",
"FFixedHW"};
static char *get_asi_string(uint8_t space_id)
{
switch (space_id) {
case SPACE_SYSTEM_MEMORY:
return _asi_table[0];
case SPACE_SYSTEM_IO:
return _asi_table[1];
case SPACE_PCI_CONFIG:
return _asi_table[2];
case SPACE_Embedded_Control:
return _asi_table[3];
case SPACE_SMBUS:
return _asi_table[4];
case SPACE_PLATFORM_COMM:
return _asi_table[5];
case SPACE_FFixedHW:
return _asi_table[6];
default:
return NULL;
}
}
/* _CST: C-States
*/
void dsdt_write_cst(struct vmctx *ctx, int vcpu_id)
{
int i;
uint8_t vcpu_cx_cnt;
char *cx_asi;
struct acpi_generic_address cx_reg;
struct cpu_cx_data *vcpu_cx_data;
vcpu_cx_cnt = get_vcpu_cx_cnt(ctx, vcpu_id);
if (!vcpu_cx_cnt) {
return;
}
/* vcpu_cx_data start from C1, cx_cnt is total Cx entry num. */
vcpu_cx_data = malloc(vcpu_cx_cnt * sizeof(struct cpu_cx_data));
if (!vcpu_cx_data) {
return;
}
/* copy and validate cx data first */
for (i = 1; i <= vcpu_cx_cnt; i++) {
if (get_vcpu_cx_data(ctx, vcpu_id, i, vcpu_cx_data + i - 1)) {
/* something must be wrong, so skip the write. */
free(vcpu_cx_data);
return;
}
}
dsdt_line("");
dsdt_line(" Method (_CST, 0, NotSerialized)");
dsdt_line(" {");
dsdt_line(" Return (Package (0x%02X)", vcpu_cx_cnt + 1);
dsdt_line(" {");
dsdt_line(" 0x%02X,", vcpu_cx_cnt);
for (i = 0; i < vcpu_cx_cnt; i++) {
dsdt_line(" Package (0x04)");
dsdt_line(" {");
cx_reg = (vcpu_cx_data + i)->cx_reg;
cx_asi = get_asi_string(cx_reg.space_id);
dsdt_line(" ResourceTemplate ()");
dsdt_line(" {");
dsdt_line(" Register (%s,", cx_asi);
dsdt_line(" 0x%02x,", cx_reg.bit_width);
dsdt_line(" 0x%02x,", cx_reg.bit_offset);
dsdt_line(" 0x%016lx,", cx_reg.address);
dsdt_line(" 0x%02x,", cx_reg.access_size);
dsdt_line(" )");
dsdt_line(" },");
dsdt_line(" 0x%04X,", (vcpu_cx_data + i)->type);
dsdt_line(" 0x%04X,", (vcpu_cx_data + i)->latency);
dsdt_line(" 0x%04X", (vcpu_cx_data + i)->power);
if (i == (vcpu_cx_cnt - 1)) {
dsdt_line(" }");
} else {
dsdt_line(" },");
}
}
dsdt_line(" })");
dsdt_line(" }");
free(vcpu_cx_data);
}
/* _PPC: Performance Present Capabilities
* hard code _PPC to 0, all states are available.
*/
static void dsdt_write_ppc(void)
{
dsdt_line(" Name (_PPC, Zero)");
}
/* _PCT: Performance Control
* Both Performance Control and Status Register are set to FFixedHW
*/
static void dsdt_write_pct(void)
{
dsdt_line(" Method (_PCT, 0, NotSerialized)");
dsdt_line(" {");
dsdt_line(" Return (Package (0x02)");
dsdt_line(" {");
dsdt_line(" ResourceTemplate ()");
dsdt_line(" {");
dsdt_line(" Register (FFixedHW,");
dsdt_line(" 0x00,");
dsdt_line(" 0x00,");
dsdt_line(" 0x0000000000000000,");
dsdt_line(" ,)");
dsdt_line(" },");
dsdt_line("");
dsdt_line(" ResourceTemplate ()");
dsdt_line(" {");
dsdt_line(" Register (FFixedHW,");
dsdt_line(" 0x00,");
dsdt_line(" 0x00,");
dsdt_line(" 0x0000000000000000,");
dsdt_line(" ,)");
dsdt_line(" }");
dsdt_line(" })");
dsdt_line(" }");
}
/* _PSS: Performance Supported States
*/
static void dsdt_write_pss(struct vmctx *ctx, int vcpu_id)
{
uint8_t vcpu_px_cnt;
int i;
struct cpu_px_data *vcpu_px_data;
vcpu_px_cnt = get_vcpu_px_cnt(ctx, vcpu_id);
if (!vcpu_px_cnt) {
return;
}
vcpu_px_data = malloc(vcpu_px_cnt * sizeof(struct cpu_px_data));
if (!vcpu_px_data) {
return;
}
/* copy and validate px data first */
for (i = 0; i < vcpu_px_cnt; i++) {
if (get_vcpu_px_data(ctx, vcpu_id, i, vcpu_px_data + i)) {
/* something must be wrong, so skip the write. */
free(vcpu_px_data);
return;
}
}
dsdt_line("");
dsdt_line(" Method (_PSS, 0, NotSerialized)");
dsdt_line(" {");
dsdt_line(" Return (Package (0x%02X)", vcpu_px_cnt);
dsdt_line(" {");
for (i = 0; i < vcpu_px_cnt; i++) {
dsdt_line(" Package (0x%02X)", 6);
dsdt_line(" {");
dsdt_line(" 0x%08X,",
(vcpu_px_data + i)->core_frequency);
dsdt_line(" 0x%08X,",
(vcpu_px_data + i)->power);
dsdt_line(" 0x%08X,",
(vcpu_px_data + i)->transition_latency);
dsdt_line(" 0x%08X,",
(vcpu_px_data + i)->bus_master_latency);
dsdt_line(" 0x%08X,",
(vcpu_px_data + i)->control);
dsdt_line(" 0x%08X",
(vcpu_px_data + i)->status);
if (i == (vcpu_px_cnt - 1)) {
dsdt_line(" }");
} else {
dsdt_line(" },");
}
}
dsdt_line(" })");
dsdt_line(" }");
free(vcpu_px_data);
}
void pm_write_dsdt(struct vmctx *ctx, int ncpu)
{
int i;
/* Scope (_PR) */
dsdt_line("");
dsdt_line(" Scope (_PR)");
dsdt_line(" {");
for (i = 0; i < ncpu; i++) {
dsdt_line(" Processor (CPU%d, 0x%02X, 0x00000000, 0x00) {}",
i, i);
}
dsdt_line(" }");
dsdt_line("");
/* Scope (_PR.CPU(N)) */
for (i = 0; i < ncpu; i++) {
dsdt_line(" Scope (_PR.CPU%d)", i);
dsdt_line(" {");
dsdt_line("");
dsdt_write_pss(ctx, i);
dsdt_write_cst(ctx, i);
/* hard code _PPC and _PCT for all vpu */
if (i == 0) {
dsdt_write_ppc();
dsdt_write_pct();
} else {
dsdt_line(" Method (_PPC, 0, NotSerialized)");
dsdt_line(" {");
dsdt_line(" Return (^^CPU0._PPC)");
dsdt_line(" }");
dsdt_line("");
dsdt_line(" Method (_PCT, 0, NotSerialized)");
dsdt_line(" {");
dsdt_line(" Return (^^CPU0._PCT)");
dsdt_line(" }");
dsdt_line("");
}
dsdt_line(" }");
}
}

View File

@@ -0,0 +1,519 @@
/*-
* Copyright (c) 2014 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* Copyright (c) 2015 Nahanni Systems Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <stdint.h>
#include <assert.h>
#include <errno.h>
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <pthread.h>
#include "acpi.h"
#include "inout.h"
#include "pci_core.h"
#include "irq.h"
#include "lpc.h"
#include "atkbdc.h"
#include "ps2kbd.h"
#include "ps2mouse.h"
#include "vmm.h"
#include "vmmapi.h"
static void
atkbdc_assert_kbd_intr(struct atkbdc_base *base)
{
if ((base->ram[0] & KBD_ENABLE_KBD_INT) != 0) {
base->kbd.irq_active = true;
vm_isa_pulse_irq(base->ctx, base->kbd.irq, base->kbd.irq);
}
}
static void
atkbdc_assert_aux_intr(struct atkbdc_base *base)
{
if ((base->ram[0] & KBD_ENABLE_AUX_INT) != 0) {
base->aux.irq_active = true;
vm_isa_pulse_irq(base->ctx, base->aux.irq, base->aux.irq);
}
}
static int
atkbdc_kbd_queue_data(struct atkbdc_base *base, uint8_t val)
{
if (base->kbd.bcnt < FIFOSZ) {
base->kbd.buffer[base->kbd.bwr] = val;
base->kbd.bwr = (base->kbd.bwr + 1) % FIFOSZ;
base->kbd.bcnt++;
base->status |= KBDS_KBD_BUFFER_FULL;
base->outport |= KBDO_KBD_OUTFULL;
} else {
printf("atkbd data buffer full\n");
}
return (base->kbd.bcnt < FIFOSZ);
}
static void
atkbdc_kbd_read(struct atkbdc_base *base)
{
const uint8_t translation[256] = {
0xff, 0x43, 0x41, 0x3f, 0x3d, 0x3b, 0x3c, 0x58,
0x64, 0x44, 0x42, 0x40, 0x3e, 0x0f, 0x29, 0x59,
0x65, 0x38, 0x2a, 0x70, 0x1d, 0x10, 0x02, 0x5a,
0x66, 0x71, 0x2c, 0x1f, 0x1e, 0x11, 0x03, 0x5b,
0x67, 0x2e, 0x2d, 0x20, 0x12, 0x05, 0x04, 0x5c,
0x68, 0x39, 0x2f, 0x21, 0x14, 0x13, 0x06, 0x5d,
0x69, 0x31, 0x30, 0x23, 0x22, 0x15, 0x07, 0x5e,
0x6a, 0x72, 0x32, 0x24, 0x16, 0x08, 0x09, 0x5f,
0x6b, 0x33, 0x25, 0x17, 0x18, 0x0b, 0x0a, 0x60,
0x6c, 0x34, 0x35, 0x26, 0x27, 0x19, 0x0c, 0x61,
0x6d, 0x73, 0x28, 0x74, 0x1a, 0x0d, 0x62, 0x6e,
0x3a, 0x36, 0x1c, 0x1b, 0x75, 0x2b, 0x63, 0x76,
0x55, 0x56, 0x77, 0x78, 0x79, 0x7a, 0x0e, 0x7b,
0x7c, 0x4f, 0x7d, 0x4b, 0x47, 0x7e, 0x7f, 0x6f,
0x52, 0x53, 0x50, 0x4c, 0x4d, 0x48, 0x01, 0x45,
0x57, 0x4e, 0x51, 0x4a, 0x37, 0x49, 0x46, 0x54,
0x80, 0x81, 0x82, 0x41, 0x54, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
};
uint8_t val;
uint8_t release = 0;
if (base->ram[0] & KBD_TRANSLATION) {
while (ps2kbd_read(base->ps2kbd, &val) != -1) {
if (val == 0xf0) {
release = 0x80;
continue;
} else {
val = translation[val] | release;
}
atkbdc_kbd_queue_data(base, val);
break;
}
} else {
while (base->kbd.bcnt < FIFOSZ) {
if (ps2kbd_read(base->ps2kbd, &val) != -1)
atkbdc_kbd_queue_data(base, val);
else
break;
}
}
if (((base->ram[0] & KBD_DISABLE_AUX_PORT) ||
ps2mouse_fifocnt(base->ps2mouse) == 0) && base->kbd.bcnt > 0)
atkbdc_assert_kbd_intr(base);
}
static void
atkbdc_aux_poll(struct atkbdc_base *base)
{
if (ps2mouse_fifocnt(base->ps2mouse) > 0) {
base->status |= KBDS_AUX_BUFFER_FULL | KBDS_KBD_BUFFER_FULL;
base->outport |= KBDO_AUX_OUTFULL;
atkbdc_assert_aux_intr(base);
}
}
static void
atkbdc_kbd_poll(struct atkbdc_base *base)
{
atkbdc_kbd_read(base);
}
static void
atkbdc_poll(struct atkbdc_base *base)
{
atkbdc_aux_poll(base);
atkbdc_kbd_poll(base);
}
static void
atkbdc_dequeue_data(struct atkbdc_base *base, uint8_t *buf)
{
if (ps2mouse_read(base->ps2mouse, buf) == 0) {
if (ps2mouse_fifocnt(base->ps2mouse) == 0) {
if (base->kbd.bcnt == 0)
base->status &= ~(KBDS_AUX_BUFFER_FULL |
KBDS_KBD_BUFFER_FULL);
else
base->status &= ~(KBDS_AUX_BUFFER_FULL);
base->outport &= ~KBDO_AUX_OUTFULL;
}
atkbdc_poll(base);
return;
}
if (base->kbd.bcnt > 0) {
*buf = base->kbd.buffer[base->kbd.brd];
base->kbd.brd = (base->kbd.brd + 1) % FIFOSZ;
base->kbd.bcnt--;
if (base->kbd.bcnt == 0) {
base->status &= ~KBDS_KBD_BUFFER_FULL;
base->outport &= ~KBDO_KBD_OUTFULL;
}
atkbdc_poll(base);
}
if (ps2mouse_fifocnt(base->ps2mouse) == 0 && base->kbd.bcnt == 0)
base->status &= ~(KBDS_AUX_BUFFER_FULL | KBDS_KBD_BUFFER_FULL);
}
static int
atkbdc_data_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
struct atkbdc_base *base;
uint8_t buf;
int retval;
if (bytes != 1)
return -1;
base = arg;
retval = 0;
pthread_mutex_lock(&base->mtx);
if (in) {
base->curcmd = 0;
if (base->ctrlbyte != 0) {
*eax = base->ctrlbyte & 0xff;
base->ctrlbyte = 0;
} else {
/* read device buffer; includes kbd cmd responses */
atkbdc_dequeue_data(base, &buf);
*eax = buf;
}
base->status &= ~KBDS_CTRL_FLAG;
pthread_mutex_unlock(&base->mtx);
return retval;
}
if (base->status & KBDS_CTRL_FLAG) {
/*
* Command byte for the controller.
*/
switch (base->curcmd) {
case KBDC_SET_COMMAND_BYTE:
base->ram[0] = *eax;
if (base->ram[0] & KBD_SYS_FLAG_BIT)
base->status |= KBDS_SYS_FLAG;
else
base->status &= ~KBDS_SYS_FLAG;
break;
case KBDC_WRITE_OUTPORT:
base->outport = *eax;
break;
case KBDC_WRITE_TO_AUX:
ps2mouse_write(base->ps2mouse, *eax, 0);
atkbdc_poll(base);
break;
case KBDC_WRITE_KBD_OUTBUF:
atkbdc_kbd_queue_data(base, *eax);
break;
case KBDC_WRITE_AUX_OUTBUF:
ps2mouse_write(base->ps2mouse, *eax, 1);
base->status |= (KBDS_AUX_BUFFER_FULL |
KBDS_KBD_BUFFER_FULL);
atkbdc_aux_poll(base);
break;
default:
/* write to particular RAM byte */
if (base->curcmd >= 0x61 && base->curcmd <= 0x7f) {
int byten;
byten = (base->curcmd - 0x60) & 0x1f;
base->ram[byten] = *eax & 0xff;
}
break;
}
base->curcmd = 0;
base->status &= ~KBDS_CTRL_FLAG;
pthread_mutex_unlock(&base->mtx);
return retval;
}
/*
* Data byte for the device.
*/
ps2kbd_write(base->ps2kbd, *eax);
atkbdc_poll(base);
pthread_mutex_unlock(&base->mtx);
return retval;
}
static int
atkbdc_sts_ctl_handler(struct vmctx *ctx, int vcpu, int in, int port,
int bytes, uint32_t *eax, void *arg)
{
struct atkbdc_base *base;
int error, retval;
if (bytes != 1)
return -1;
base = arg;
retval = 0;
pthread_mutex_lock(&base->mtx);
if (in) {
/* read status register */
*eax = base->status;
pthread_mutex_unlock(&base->mtx);
return retval;
}
base->curcmd = 0;
base->status |= KBDS_CTRL_FLAG;
base->ctrlbyte = 0;
switch (*eax) {
case KBDC_GET_COMMAND_BYTE:
base->ctrlbyte = CTRL_CMD_FLAG | base->ram[0];
break;
case KBDC_TEST_CTRL:
base->ctrlbyte = CTRL_CMD_FLAG | 0x55;
break;
case KBDC_TEST_AUX_PORT:
case KBDC_TEST_KBD_PORT:
base->ctrlbyte = CTRL_CMD_FLAG | 0;
break;
case KBDC_READ_INPORT:
base->ctrlbyte = CTRL_CMD_FLAG | 0;
break;
case KBDC_READ_OUTPORT:
base->ctrlbyte = CTRL_CMD_FLAG | base->outport;
break;
case KBDC_SET_COMMAND_BYTE:
case KBDC_WRITE_OUTPORT:
case KBDC_WRITE_KBD_OUTBUF:
case KBDC_WRITE_AUX_OUTBUF:
base->curcmd = *eax;
break;
case KBDC_DISABLE_KBD_PORT:
base->ram[0] |= KBD_DISABLE_KBD_PORT;
break;
case KBDC_ENABLE_KBD_PORT:
base->ram[0] &= ~KBD_DISABLE_KBD_PORT;
if (base->kbd.bcnt > 0)
base->status |= KBDS_KBD_BUFFER_FULL;
atkbdc_poll(base);
break;
case KBDC_WRITE_TO_AUX:
base->curcmd = *eax;
break;
case KBDC_DISABLE_AUX_PORT:
base->ram[0] |= KBD_DISABLE_AUX_PORT;
ps2mouse_toggle(base->ps2mouse, 0);
base->status &= ~(KBDS_AUX_BUFFER_FULL | KBDS_KBD_BUFFER_FULL);
base->outport &= ~KBDS_AUX_BUFFER_FULL;
break;
case KBDC_ENABLE_AUX_PORT:
base->ram[0] &= ~KBD_DISABLE_AUX_PORT;
ps2mouse_toggle(base->ps2mouse, 1);
if (ps2mouse_fifocnt(base->ps2mouse) > 0)
base->status |= KBDS_AUX_BUFFER_FULL |
KBDS_KBD_BUFFER_FULL;
break;
case KBDC_RESET: /* Pulse "reset" line */
error = vm_suspend(ctx, VM_SUSPEND_RESET);
assert(error == 0 || errno == EALREADY);
break;
default:
if (*eax >= 0x21 && *eax <= 0x3f) {
/* read "byte N" from RAM */
int byten;
byten = (*eax - 0x20) & 0x1f;
base->ctrlbyte = CTRL_CMD_FLAG | base->ram[byten];
}
break;
}
pthread_mutex_unlock(&base->mtx);
if (base->ctrlbyte != 0) {
base->status |= KBDS_KBD_BUFFER_FULL;
base->status &= ~KBDS_AUX_BUFFER_FULL;
atkbdc_assert_kbd_intr(base);
} else if (ps2mouse_fifocnt(base->ps2mouse) > 0 &&
(base->ram[0] & KBD_DISABLE_AUX_PORT) == 0) {
base->status |= KBDS_AUX_BUFFER_FULL | KBDS_KBD_BUFFER_FULL;
atkbdc_assert_aux_intr(base);
} else if (base->kbd.bcnt > 0 && (base->ram[0] &
KBD_DISABLE_KBD_PORT) == 0) {
base->status |= KBDS_KBD_BUFFER_FULL;
atkbdc_assert_kbd_intr(base);
}
return retval;
}
void
atkbdc_event(struct atkbdc_base *base, int iskbd)
{
pthread_mutex_lock(&base->mtx);
if (iskbd)
atkbdc_kbd_poll(base);
else
atkbdc_aux_poll(base);
pthread_mutex_unlock(&base->mtx);
}
void
atkbdc_init(struct vmctx *ctx)
{
struct inout_port iop;
struct atkbdc_base *base;
int error;
base = calloc(1, sizeof(struct atkbdc_base));
assert(base != NULL);
base->ctx = ctx;
ctx->atkbdc_base = base;
pthread_mutex_init(&base->mtx, NULL);
bzero(&iop, sizeof(struct inout_port));
iop.name = "atkdbc";
iop.port = KBD_STS_CTL_PORT;
iop.size = 1;
iop.flags = IOPORT_F_INOUT;
iop.handler = atkbdc_sts_ctl_handler;
iop.arg = base;
error = register_inout(&iop);
assert(error == 0);
bzero(&iop, sizeof(struct inout_port));
iop.name = "atkdbc";
iop.port = KBD_DATA_PORT;
iop.size = 1;
iop.flags = IOPORT_F_INOUT;
iop.handler = atkbdc_data_handler;
iop.arg = base;
error = register_inout(&iop);
assert(error == 0);
pci_irq_reserve(KBD_DEV_IRQ);
base->kbd.irq = KBD_DEV_IRQ;
pci_irq_reserve(AUX_DEV_IRQ);
base->aux.irq = AUX_DEV_IRQ;
base->ps2kbd = ps2kbd_init(base);
base->ps2mouse = ps2mouse_init(base);
}
void
atkbdc_deinit(struct vmctx *ctx)
{
struct inout_port iop;
struct atkbdc_base *base = ctx->atkbdc_base;
ps2kbd_deinit(base);
base->ps2kbd = NULL;
ps2mouse_deinit(base);
base->ps2mouse = NULL;
bzero(&iop, sizeof(struct inout_port));
iop.name = "atkdbc";
iop.port = KBD_DATA_PORT;
iop.size = 1;
unregister_inout(&iop);
bzero(&iop, sizeof(struct inout_port));
iop.name = "atkdbc";
iop.port = KBD_STS_CTL_PORT;
iop.size = 1;
unregister_inout(&iop);
free(base);
ctx->atkbdc_base = NULL;
}
static void
atkbdc_dsdt(void)
{
dsdt_line("");
dsdt_line("Device (KBD)");
dsdt_line("{");
dsdt_line(" Name (_HID, EisaId (\"PNP0303\"))");
dsdt_line(" Name (_CRS, ResourceTemplate ()");
dsdt_line(" {");
dsdt_indent(2);
dsdt_fixed_ioport(KBD_DATA_PORT, 1);
dsdt_fixed_ioport(KBD_STS_CTL_PORT, 1);
dsdt_fixed_irq(1);
dsdt_unindent(2);
dsdt_line(" })");
dsdt_line("}");
dsdt_line("");
dsdt_line("Device (MOU)");
dsdt_line("{");
dsdt_line(" Name (_HID, EisaId (\"PNP0F13\"))");
dsdt_line(" Name (_CRS, ResourceTemplate ()");
dsdt_line(" {");
dsdt_indent(2);
dsdt_fixed_ioport(KBD_DATA_PORT, 1);
dsdt_fixed_ioport(KBD_STS_CTL_PORT, 1);
dsdt_fixed_irq(12);
dsdt_unindent(2);
dsdt_line(" })");
dsdt_line("}");
}
LPC_DSDT(atkbdc_dsdt);

View File

@@ -0,0 +1,107 @@
/*************************************************************************
* INTEL CONFIDENTIAL
* Copyright 2018 Intel Corporation
*
* The source code contained or described herein and all documents related to
* the source code ("Material") are owned by Intel Corporation or its
* suppliers or licensors. Title to the Material remains with Intel
* Corporation or its suppliers and licensors. The Material contains trade
* secrets and proprietary and confidential information of Intel or its
* suppliers and licensors. The Material is protected by worldwide copyright
* and trade secret laws and treaty provisions. No part of the Material may
* be used, copied, reproduced, modified, published, uploaded, posted,
* transmitted, distributed, or disclosed in any way without Intel's prior
* express written permission.
*
* No license under any patent, copyright, trade secret or other
* intellectual property right is granted to or conferred upon you by
* disclosure or delivery of the Materials, either expressly, by
* implication, inducement, estoppel or otherwise. Any license under such
* intellectual property rights must be express and approved by Intel in
* writing.
*************************************************************************/
/* cmos io device is used for android device reboot to bootloader or
* recovery or normal boot usage
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <time.h>
#include <assert.h>
#include "inout.h"
#define CMOS_ADDR 0x74
#define CMOS_DATA 0x75
#define CMOS_BUF_SIZE 256
#define CMOS_NAME "cmos_io"
/* #define CMOS_DEBUG */
#ifdef CMOS_DEBUG
static FILE * dbg_file;
#define DPRINTF(format, args...) \
do { fprintf(dbg_file, format, args); fflush(dbg_file); } while (0)
#else
#define DPRINTF(format, arg...)
#endif
/* cmos buffer used to store write/read contents,
* and it should not be cleared when reboot
*/
static uint8_t cmos_buffer[CMOS_BUF_SIZE];
static int
cmos_io_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
static int buf_offset;
static int next_ops; /* 0 for addr, 1 for data, in pair (addr,data)*/
assert(port == CMOS_ADDR || port == CMOS_DATA);
assert(bytes == 1);
#ifdef CMOS_DEBUG
if (!dbg_file)
dbg_file = fopen("/tmp/cmos_log", "a+");
#endif
DPRINTF("%s port =0x%x, in=%d, size=%d, val=0x%x, ops=%d\n",
__func__, port, in, bytes, (uint8_t)*eax, next_ops);
if (port == CMOS_ADDR) {
/* if port is addr, ops should be 0 */
assert(next_ops == 0 && !in);
if (next_ops != 0) {
next_ops = 0;
return -1;
}
buf_offset = (uint8_t)(*eax);
next_ops = 1;
} else if (port == CMOS_DATA) {
assert(next_ops == 1);
if (next_ops != 1) {
next_ops = 0;
return -1;
}
if (in)
*eax = cmos_buffer[buf_offset];
else
cmos_buffer[buf_offset] = (uint8_t)*eax;
next_ops = 0;
}
return 0;
}
INOUT_PORT(cmos_io, CMOS_ADDR, IOPORT_F_INOUT, cmos_io_handler);
INOUT_PORT(cmos_io, CMOS_DATA, IOPORT_F_INOUT, cmos_io_handler);

View File

@@ -0,0 +1,74 @@
/*-
* Copyright (c) 2014 Hudson River Trading LLC
* Written by: John H. Baldwin <jhb@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <stdio.h>
#include <stdbool.h>
#include "vmm.h"
#include "vmmapi.h"
#include "ioapic.h"
#include "pci_core.h"
#include "lpc.h"
/*
* Assign PCI INTx interrupts to I/O APIC pins in a round-robin
* fashion. Note that we have no idea what the HPET is using, but the
* HPET is also programmable whereas this is intended for hardwired
* PCI interrupts.
*
* This assumes a single I/O APIC where pins >= 16 are permitted for
* PCI devices.
*/
static int pci_pins;
void
ioapic_init(struct vmctx *ctx)
{
if (vm_ioapic_pincount(ctx, &pci_pins) < 0) {
pci_pins = 0;
return;
}
/* Ignore the first 16 pins. */
if (pci_pins <= 16) {
pci_pins = 0;
return;
}
pci_pins -= 16;
}
int
ioapic_pci_alloc_irq(struct pci_vdev *dev)
{
static int last_pin;
if (pci_pins == 0)
return -1;
return (16 + (last_pin++ % pci_pins));
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,908 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* Carrier Board Commnucation protocol stack.
*/
#include <stdio.h>
#include <stdbool.h>
#include "ioc.h"
/*
* Debug printf
*/
static int ioc_cbc_debug;
static FILE *dbg_file;
#define DPRINTF(format, arg...) \
do { if (ioc_cbc_debug && dbg_file) { fprintf(dbg_file, format, arg);\
fflush(dbg_file); } } while (0)
#define WPRINTF(format, arg...) printf(format, ##arg)
static void cbc_send_pkt(struct cbc_pkt *pkt);
/*
* Buffer bytes of reading from the virtual UART, because the bytes maybe can
* not generate one complete CBC frame.
*/
int
cbc_copy_to_ring(const uint8_t *buf, size_t size, struct cbc_ring *ring)
{
int i, pos;
/* TODO: memcpy or other implementation instead copy byte one by one */
for (i = 0; i < size; i++) {
pos = (ring->tail + 1) & (CBC_RING_BUFFER_SIZE - 1);
if (pos != ring->head) {
ring->buf[ring->tail] = buf[i];
ring->tail = pos;
} else {
WPRINTF("ioc cbc ring buffer is full!!\r\n");
return -1;
}
}
return 0;
}
/*
* Drop the bytes from the ring buffer.
*/
static inline void
cbc_ring_skips(struct cbc_ring *ring, size_t bytes)
{
ring->head = (ring->head + bytes) & (CBC_RING_BUFFER_SIZE - 1);
}
/*
* Caculate checksum
*/
static inline uint16_t
cbc_cal_chksum(const uint8_t *data, size_t size)
{
int i;
uint16_t value = 0;
for (i = 0; i < size; i++)
value += 0x100 - *(data + i);
return value;
}
/*
* Checksum verification
*/
static inline int
cbc_verify_chksum(struct cbc_ring *ring, size_t size, uint8_t c)
{
int i, pos;
uint16_t value = 0;
for (i = 0; i < size; i++) {
pos = (ring->head + i) & (CBC_RING_BUFFER_SIZE - 1);
value += 0x100 - *(ring->buf + pos);
}
return ((value & 0xFF) == c ? 0 : -1);
}
/*
* Make the buf alignment with unit and pack out 0xFF if need.
*/
static size_t
cbc_fill_padding(uint8_t *buf, size_t size, int unit)
{
size_t i, left;
size_t paddings = size;
left = size % unit;
if (left != 0) {
paddings = size + unit - left;
for (i = size - CBC_CHKSUM_SIZE; i < paddings; i++)
buf[i] = 0xFF;
}
return paddings;
}
/*
* Unpack CBC link layer data.
* The function trys to generate a CBC link frame then build a cbc_request and
* put the cbc_request to the CBC rx queue.
*/
void
cbc_unpack_link(struct ioc_dev *ioc)
{
/*
* To build one complete frame from the ring buffer,
* The remains means the real length of current frame.
* The avalids means the bytes in the ring buffer.
* If the avalids can not meet minimum frame length or remains,
* need to wait more bytes before CBC link unpack.
*/
static int remains, rx_seq_counter;
uint8_t seq, len, ext, checksum;
int avalids, frame_len, els_pos, chksum_pos;
struct cbc_ring *ring = &ioc->ring;
for (;;) {
/* Get the avalid bytes in the ring buffer */
avalids = ring->tail - ring->head;
avalids = avalids >= 0 ? avalids :
(CBC_RING_BUFFER_SIZE + avalids);
/*
* The avalid bytes from ring buffer must be a minimum frame
* or the part of one complete frame remain bytes
*/
if (avalids < CBC_MIN_FRAME_SIZE || avalids < remains)
break;
/* Reset the remains flag if the avalid bytes can be parsed */
remains = 0;
/*
* If start of frame value is incorrect, drop the byte
* of ring buffer head
*/
if (ring->buf[ring->head] != CBC_SOF_VALUE) {
cbc_ring_skips(ring, 1);
continue;
}
/*
* Parse the extension, frame length and sequence
*/
els_pos = (ring->head + CBC_ELS_POS) &
(CBC_RING_BUFFER_SIZE - 1);
ext = (ring->buf[els_pos] >> CBC_EXT_OFFSET) & CBC_EXT_MASK;
len = (ring->buf[els_pos] >> CBC_LEN_OFFSET) & CBC_LEN_MASK;
seq = (ring->buf[els_pos] >> CBC_SEQ_OFFSET) & CBC_SEQ_MASK;
/* FIXME: Extension defined in CBC protocol, but not use yet */
(void)ext;
/*
* CBC service block aligns with four types
* length is zero means four bytes, so length needs to add one
* and real len contains all CBC protocol headers length.
*/
len = (len + 1) * 4;
frame_len = len + CBC_LINK_HDR_SIZE + CBC_ADDR_HDR_SIZE;
/* Safty check */
if (frame_len > CBC_MAX_FRAME_SIZE) {
cbc_ring_skips(ring, 1);
continue;
}
/* Need more bytes to build one complete CBC frame */
if (avalids < frame_len) {
remains = frame_len;
continue;
}
/* Checksum verification */
chksum_pos = (ring->head + frame_len - 1) &
(CBC_RING_BUFFER_SIZE - 1);
checksum = ring->buf[chksum_pos];
if (cbc_verify_chksum(ring, frame_len - 1, checksum) != 0) {
cbc_ring_skips(ring, 1);
continue;
}
/*
* Rx sequence check
* TODO: just warning now, need to drop the frame or not?
*/
rx_seq_counter = (rx_seq_counter + 1) & CBC_SEQ_MASK;
if (rx_seq_counter != seq) {
WPRINTF("%s", "ioc rx sequence check falied\r\n");
rx_seq_counter = seq;
}
/* Build a cbc_request and put it on the queue */
ioc_build_request(ioc, frame_len, len);
/* Drop the bytes from the ring buffer */
cbc_ring_skips(ring, frame_len);
}
}
/*
* Find a CBC signal from CBC signal table.
*/
static inline struct cbc_signal *
cbc_find_signal(uint16_t id, struct cbc_signal *table, size_t size)
{
int i;
for (i = 0; i < size; i++) {
if (id == table[i].id)
return &table[i];
}
return NULL;
}
/*
* Find a CBC signal group from CBC signal group table.
*/
static inline struct cbc_group *
cbc_find_signal_group(uint16_t id, struct cbc_group *table, size_t size)
{
int i;
for (i = 0; i < size; i++) {
if (id == table[i].id)
return &table[i];
}
return NULL;
}
/*
* Signal length unit is bit in signal definition not byte,
* if the length is 3 bits then return 1 byte,
* if the length is 10 bits then return 2 bytes.
*/
static int
cbc_get_signal_len(uint16_t id, struct cbc_signal *table, size_t size)
{
struct cbc_signal *p;
p = cbc_find_signal(id, table, size);
return (p == NULL ? 0 : (p->len + 7)/8);
}
/*
* Set signal flag to inactive.
*/
static void
cbc_disable_signal(uint16_t id, struct cbc_signal *table, size_t size)
{
struct cbc_signal *p;
p = cbc_find_signal(id, table, size);
if (p)
p->flag = CBC_INACTIVE;
}
/*
* Set signal group flag to inactive.
*/
static void
cbc_disable_signal_group(uint16_t id, struct cbc_group *table, size_t size)
{
struct cbc_group *p;
p = cbc_find_signal_group(id, table, size);
if (p)
p->flag = CBC_INACTIVE;
}
/*
* Search one cbc_signal with signal id in the whitelist table.
*/
static struct cbc_signal *
wlist_find_signal(uint16_t id, struct wlist_signal *list, size_t size)
{
int i;
for (i = 0; i < size; i++) {
if (id == list[i].id)
return list[i].sig;
}
return NULL;
}
/*
* Search one cbc_group with group id in the whitelist table.
*/
static struct
cbc_group *wlist_find_group(uint16_t id, struct wlist_group *list, size_t size)
{
int i;
for (i = 0; i < size; i++) {
if (id == list[i].id)
return list[i].grp;
}
return NULL;
}
/*
* Whitelist verification for a signal.
*/
static int
wlist_verify_signal(uint16_t id, struct wlist_signal *list, size_t size)
{
struct cbc_signal *sig;
sig = wlist_find_signal(id, list, size);
if (!sig || sig->flag == CBC_INACTIVE)
return -1;
return 0;
}
/*
* Whiltelist verification for a signal group.
*/
static int
wlist_verify_group(uint16_t id, struct wlist_group *list, size_t size)
{
struct cbc_group *grp;
grp = wlist_find_group(id, list, size);
if (!grp || grp->flag == CBC_INACTIVE)
return -1;
return 0;
}
/*
* CBC invalidates signals/groups.
*/
static void
cbc_set_invalidation(struct cbc_pkt *pkt, int type)
{
int i;
uint8_t *payload;
uint8_t num;
uint16_t id;
payload = pkt->req->buf + CBC_PAYLOAD_POS;
/* Number of signals or groups */
num = payload[1];
/*
* Safty check.
* Each signal/group id length is 2 bytes and 2 bytes of service header,
* the service length should less than the maximum service size.
*/
if ((num * 2 + 2) >= CBC_MAX_SERVICE_SIZE) {
DPRINTF("ioc cbc group number is invalid, number is %d\r\n",
num);
return;
}
for (i = 0; i < num; i++) {
id = payload[i * 2 + 2] | payload[i * 2 + 3] << 8;
if (type == CBC_INVAL_T_SIGNAL)
cbc_disable_signal(id, pkt->cfg->cbc_sig_tbl,
pkt->cfg->cbc_sig_num);
else if (type == CBC_INVAL_T_GROUP)
cbc_disable_signal_group(id, pkt->cfg->cbc_grp_tbl,
pkt->cfg->cbc_grp_num);
else
DPRINTF("%s", "ioc invalidation is not defined\r\n");
}
}
/*
* CBC multi-signal data process.
* Forwarding signal should be in whitelist, otherwise abandon the signal.
*/
static void
cbc_forward_signals(struct cbc_pkt *pkt)
{
int i, j;
int offset = 1;
uint8_t *payload = pkt->req->buf + CBC_PAYLOAD_POS;
uint8_t num = 0;
uint16_t id;
int signal_len;
int valids = 1;
for (i = 0; i < payload[0]; i++) {
id = payload[offset] | payload[offset + 1] << 8;
/* The length includes two bytes of signal ID occupation */
signal_len = cbc_get_signal_len(id, pkt->cfg->cbc_sig_tbl,
pkt->cfg->cbc_sig_num) + 2;
/* Whitelist verification */
if (wlist_verify_signal(id, pkt->cfg->wlist_sig_tbl,
pkt->cfg->wlist_sig_num) == 0) {
num++;
if (valids < offset) {
for (j = 0; j < signal_len; j++, valids++)
payload[valids] = payload[offset + j];
} else
valids += signal_len;
}
offset += signal_len;
/* Safty check */
if (offset + 1 > CBC_MAX_SERVICE_SIZE) {
DPRINTF("ioc offset=%d is error in forward signal\r\n",
offset);
return;
}
}
/* Send permitted signals */
if (num > 0) {
/*
* Set permitted signal numbers
*/
payload[0] = num;
/*
* Set multi-signal value for CBC service layer header,
* one service frame is generated completely.
*/
pkt->req->buf[CBC_SRV_POS] = CBC_SD_MULTI_SIGNAL;
pkt->req->srv_len = valids + CBC_SRV_HDR_SIZE;
/* Send the CBC packet */
cbc_send_pkt(pkt);
}
}
/*
* Pack CBC link header includes SOF value, extension bits, frame length bits,
* tx sequence bits, link alignment paddings and checksum byte.
*/
static void
cbc_pack_link(struct cbc_pkt *pkt)
{
static size_t tx_seq_counter;
size_t len;
uint16_t checksum = 0;
/* Safty check */
if (pkt->req->srv_len > CBC_MAX_SERVICE_SIZE) {
DPRINTF("ioc pack req with wrong service length:%d\r\n",
pkt->req->srv_len);
return;
}
/*
* Compute one CBC frame length
* and aligh with CBC default granularity
*/
len = pkt->req->srv_len + CBC_ADDR_HDR_SIZE + CBC_LINK_HDR_SIZE;
len = cbc_fill_padding(pkt->req->buf, len, CBC_GRANULARITY);
/* Fill start of frame */
pkt->req->buf[CBC_SOF_POS] = CBC_SOF_VALUE;
/* Fill extension bits ,frame length bits and tx sequence bits */
pkt->req->buf[CBC_ELS_POS] = (CBC_EXT_VALUE & CBC_EXT_MASK)
<< CBC_EXT_OFFSET;
pkt->req->buf[CBC_ELS_POS] |= (((pkt->req->srv_len - 1)/CBC_LEN_UNIT)
& CBC_LEN_MASK) << CBC_LEN_OFFSET;
pkt->req->buf[CBC_ELS_POS] |= (tx_seq_counter & CBC_SEQ_MASK)
<< CBC_SEQ_OFFSET;
/* Fill checksum that is last byte */
checksum = cbc_cal_chksum(pkt->req->buf, len - 1);
pkt->req->buf[len - 1] = checksum & 0xFF;
/* Set the CBC link frame length */
pkt->req->link_len = len;
/* Increase tx sequence */
tx_seq_counter = (tx_seq_counter + 1) & CBC_SEQ_MASK;
}
/*
* Pack CBC address layer header includes channel mux and priority.
*/
static void
cbc_pack_address(struct cbc_pkt *pkt)
{
uint8_t prio, mux;
mux = pkt->req->id;
switch (mux) {
case IOC_NATIVE_PMT:
case IOC_NATIVE_LFCC:
case IOC_NATIVE_SIGNAL:
case IOC_NATIVE_DLT:
prio = CBC_PRIO_HIGH;
break;
case IOC_NATIVE_DIAG:
prio = CBC_PRIO_LOW;
break;
default:
prio = CBC_PRIO_MEDIUM;
break;
}
pkt->req->buf[CBC_ADDR_POS] = ((mux & CBC_MUX_MASK) << CBC_MUX_OFFSET);
pkt->req->buf[CBC_ADDR_POS] |=
((prio & CBC_PRIO_MASK)<<CBC_PRIO_OFFSET);
}
/*
* Send CBC packet buf to IOC xmit channel.
* TODO: Due to rx/tx threads share the function, it is better to seperate.
*/
static void
cbc_send_pkt(struct cbc_pkt *pkt)
{
int rc;
uint8_t *data;
size_t len;
enum ioc_ch_id id;
/*
* link_len is 0 means the packet from CBC cdev that there is no data
* of link layer, the packet is transmitted to virtual UART.
* Opposite, the packet is transmitted to CBC cdev.
*/
if (pkt->req->link_len == 0) {
cbc_pack_address(pkt);
cbc_pack_link(pkt);
id = IOC_VIRTUAL_UART;
data = pkt->req->buf;
len = pkt->req->link_len;
} else {
id = pkt->req->id;
data = pkt->req->buf + CBC_SRV_POS;
len = pkt->req->srv_len;
}
rc = ioc_ch_xmit(id, data, len);
if (rc < 0)
DPRINTF("ioc xmit failed on channel id=%d\n\r", id);
}
/*
* Update heartbeat state.
*/
static void
cbc_update_heartbeat(struct cbc_pkt *pkt, uint8_t cmd, uint8_t sus_action)
{
uint8_t stat;
(void) sus_action;
/*
* If heartbeat state switches(active/inactive), update the new state
* value based on heartbeat commands.
* The default state is inactive.
*/
if (cmd == CBC_HB_ACTIVE || cmd == CBC_HB_STANDBY ||
cmd == CBC_HB_INITIAL)
stat = 1;
else
stat = 0;
/* Default heartbeat state is zero, that means not active */
if (stat != pkt->hb_state) {
/*
* Route the cbc request to the tx thread
* and request type is SOC state update
*/
pkt->qtype = CBC_QUEUE_T_TX;
pkt->req->rtype = CBC_REQ_T_SOC;
pkt->req->buf[0] = stat;
pkt->hb_state = stat;
}
}
/*
* Update wakeup reason value and notify UOS immediately.
* Some events can change the wakeup reason include periodic wakeup reason
* from IOC firmware, IOC bootup reason, heartbeat state changing and VMM
* callback.
*/
static void
cbc_update_wakeup_reason(struct cbc_pkt *pkt, uint32_t reason)
{
uint8_t *payload;
/* TODO: VMM requests S3/S5 do not implement yet */
if (pkt->soc_active) {
pkt->boot_reason = 0;
reason |= CBC_WK_RSN_SOC;
} else
reason &= ~CBC_WK_RSN_SOC;
reason &= CBC_WK_RSN_ALL;
if (pkt->boot_reason != 0)
reason = pkt->boot_reason;
pkt->reason = reason;
/* Wakeup reason only has three bytes in CBC payload */
payload = pkt->req->buf + CBC_PAYLOAD_POS;
payload[0] = reason;
payload[1] = reason >> 8;
payload[2] = reason >> 16;
/* For CBC address layer header packing */
pkt->req->id = IOC_NATIVE_LFCC;
/* Fill service header */
pkt->req->buf[CBC_SRV_POS] = CBC_SC_WK_RSN;
pkt->req->srv_len = 4;
pkt->req->link_len = 0;
/* Send the CBC packet */
cbc_send_pkt(pkt);
}
/*
* CBC service lifecycle process.
* FIXME: called in rx and tx threads, seperating two functions is better.
*/
static void
cbc_process_lifecycle(struct cbc_pkt *pkt)
{
uint8_t cmd;
uint8_t *payload;
uint32_t reason;
cmd = pkt->req->buf[CBC_SRV_POS];
payload = pkt->req->buf + CBC_PAYLOAD_POS;
switch (cmd) {
case CBC_SC_WK_RSN:
reason = payload[0] | (payload[1] << 8) | (payload[2] << 16);
cbc_update_wakeup_reason(pkt, reason);
break;
case CBC_SC_HB:
cbc_update_heartbeat(pkt, payload[0], payload[1]);
break;
default:
DPRINTF("ioc lifecycle command=%d can not be handled\r\n",
cmd);
break;
}
}
/*
* CBC service signal data process.
* FIXME: called in rx and tx threads, seperating two functions is better.
*/
static void
cbc_process_signal(struct cbc_pkt *pkt)
{
/*
* TODO: put the is_active into pkt structure instead local static
* variable when the cbc_process_signal is seperated.
*/
static bool is_active;
uint8_t cmd;
uint8_t *payload;
uint16_t id;
payload = pkt->req->buf + CBC_PAYLOAD_POS;
cmd = pkt->req->buf[CBC_SRV_POS];
/*
* FIXME:seperate the logic in two functions
* link_len is 0 means the packet is transmitted to PTY(UART DM)
* if the signal channel is not active, do not transmit it to PTY
* to CBC cdevs, always forward the signals because signal channel
* status only for UOS
*/
if (pkt->req->link_len == 0 && is_active == false &&
(cmd == CBC_SD_SINGLE_SIGNAL ||
cmd == CBC_SD_MULTI_SIGNAL ||
cmd == CBC_SD_GROUP_SIGNAL))
return;
switch (cmd) {
/* Bidirectional command */
case CBC_SD_SINGLE_SIGNAL:
id = payload[0] | payload[1] << 8;
if (wlist_verify_signal(id, pkt->cfg->wlist_sig_tbl,
pkt->cfg->wlist_sig_num) == 0)
cbc_send_pkt(pkt);
break;
/* Bidirectional command */
case CBC_SD_MULTI_SIGNAL:
cbc_forward_signals(pkt);
break;
/* Bidirectional command */
case CBC_SD_GROUP_SIGNAL:
id = payload[0] | payload[1] << 8;
if (wlist_verify_group(id, pkt->cfg->wlist_grp_tbl,
pkt->cfg->wlist_grp_num) == 0)
cbc_send_pkt(pkt);
break;
/* Bidirectional command */
case CBC_SD_INVAL_SSIG:
id = payload[0] | payload[1] << 8;
cbc_disable_signal(id, pkt->cfg->cbc_sig_tbl,
pkt->cfg->cbc_sig_num);
break;
/* Bidirectional command */
case CBC_SD_INVAL_MSIG:
cbc_set_invalidation(pkt, CBC_INVAL_T_SIGNAL);
break;
/* Bidirectional command */
case CBC_SD_INVAL_SGRP:
id = payload[0] | payload[1] << 8;
cbc_disable_signal_group(id, pkt->cfg->cbc_grp_tbl,
pkt->cfg->cbc_grp_num);
break;
/* Bidirectional command */
case CBC_SD_INVAL_MGRP:
cbc_set_invalidation(pkt, CBC_INVAL_T_GROUP);
break;
/*
* FIXME: seperate into rx signal process
* Open/reset/close are not bidirectional operations
* only for IOC rx thread
*/
case CBC_SD_OPEN_CHANNEL:
case CBC_SD_RESET_CHANNEL:
is_active = true;
break;
case CBC_SD_CLOSE_CHANNEL:
is_active = false;
break;
default:
DPRINTF("ioc got an new operation of signal channel=%d\r\n",
cmd);
break;
}
}
/*
* Rx handler mainly processes rx direction data flow
* the rx direction is that virtual UART -> native CBC cdevs
*/
void
cbc_rx_handler(struct cbc_pkt *pkt)
{
uint8_t mux, prio;
/*
* FIXME: need to check CBC request type in the rx handler
* currently simply check is enough, expand the check in the further
*/
if (pkt->req->rtype != CBC_REQ_T_PROT)
return;
/*
* TODO: use this prio to enable dynamic cbc priority configuration
* feature in the future, currently ignore it.
*/
prio = (pkt->req->buf[CBC_ADDR_POS] >> CBC_PRIO_OFFSET) & CBC_PRIO_MASK;
(void) prio;
mux = (pkt->req->buf[CBC_ADDR_POS] >> CBC_MUX_OFFSET) & CBC_MUX_MASK;
pkt->req->id = mux;
switch (mux) {
case IOC_NATIVE_LFCC:
cbc_process_lifecycle(pkt);
break;
case IOC_NATIVE_SIGNAL:
cbc_process_signal(pkt);
break;
/* Forward directly */
case IOC_NATIVE_RAW0 ... IOC_NATIVE_RAW11:
cbc_send_pkt(pkt);
break;
default:
DPRINTF("ioc unpack wrong channel=%d\r\n", mux);
break;
}
}
/*
* Tx handler mainly processes tx direction data flow,
* the tx direction is that native CBC cdevs -> virtual UART.
*/
void
cbc_tx_handler(struct cbc_pkt *pkt)
{
if (pkt->req->rtype == CBC_REQ_T_PROT) {
switch (pkt->req->id) {
case IOC_NATIVE_LFCC:
cbc_process_lifecycle(pkt);
break;
case IOC_NATIVE_SIGNAL:
cbc_process_signal(pkt);
break;
case IOC_NATIVE_RAW0 ... IOC_NATIVE_RAW11:
cbc_send_pkt(pkt);
break;
default:
DPRINTF("ioc cbc tx handler got invalid channel=%d\r\n",
pkt->req->id);
break;
}
} else if (pkt->req->rtype == CBC_REQ_T_SOC) {
/*
* Update wakeup reasons with SoC new state
* the new state update by heartbeat state change
* (active/inactive) in rx thread
*/
pkt->soc_active = pkt->req->buf[0];
cbc_update_wakeup_reason(pkt, pkt->reason);
} else {
/* TODO: others request types process */
DPRINTF("ioc invalid cbc_request type in tx:%d\r\n",
pkt->req->rtype);
}
}
/*
* Initialize whitelist node with cbc_group,
* so that whitelist can access cbc_group flag via group id.
*/
void
wlist_init_group(struct cbc_group *cbc_tbl, size_t cbc_size,
struct wlist_group *wlist_tbl, size_t wlist_size)
{
int i, j;
if (!cbc_tbl || cbc_size == 0 || !wlist_tbl || wlist_size == 0)
return;
for (i = 0; i < wlist_size; i++) {
for (j = 0; j < cbc_size; j++) {
if (wlist_tbl[i].id == cbc_tbl[j].id) {
wlist_tbl[i].grp = &cbc_tbl[j];
break;
}
}
}
}
/*
* Initialize whitelist node with cbc_signal,
* so that whitelist can access cbc_signal flag via signal id.
*/
void
wlist_init_signal(struct cbc_signal *cbc_tbl, size_t cbc_size,
struct wlist_signal *wlist_tbl, size_t wlist_size)
{
int i, j;
if (!cbc_tbl || cbc_size == 0 || !wlist_tbl || wlist_size == 0)
return;
for (i = 0; i < wlist_size; i++) {
for (j = 0; j < cbc_size; j++) {
if (wlist_tbl[i].id == cbc_tbl[j].id) {
wlist_tbl[i].sig = &cbc_tbl[j];
break;
}
}
}
}
/*
* Share log file with IOC.
*/
void
cbc_set_log_file(FILE *f)
{
dbg_file = f;
}

View File

@@ -0,0 +1,485 @@
/*-
* Copyright (c) 2015 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* Copyright (c) 2015 Nahanni Systems Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <assert.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <pthread.h>
#include "types.h"
#include "atkbdc.h"
#include "console.h"
/* keyboard device commands */
#define PS2KC_RESET_DEV 0xff
#define PS2KC_DISABLE 0xf5
#define PS2KC_ENABLE 0xf4
#define PS2KC_SET_TYPEMATIC 0xf3
#define PS2KC_SEND_DEV_ID 0xf2
#define PS2KC_SET_SCANCODE_SET 0xf0
#define PS2KC_ECHO 0xee
#define PS2KC_SET_LEDS 0xed
#define PS2KC_BAT_SUCCESS 0xaa
#define PS2KC_ACK 0xfa
#define PS2KBD_FIFOSZ 16
struct fifo {
uint8_t buf[PS2KBD_FIFOSZ];
int rindex; /* index to read from */
int windex; /* index to write to */
int num; /* number of bytes in the fifo */
int size; /* size of the fifo */
};
struct ps2kbd_info {
struct atkbdc_base *base;
pthread_mutex_t mtx;
bool enabled;
struct fifo fifo;
uint8_t curcmd; /* current command for next byte */
};
static void
fifo_init(struct ps2kbd_info *kbd)
{
struct fifo *fifo;
fifo = &kbd->fifo;
fifo->size = sizeof(((struct fifo *)0)->buf);
}
static void
fifo_reset(struct ps2kbd_info *kbd)
{
struct fifo *fifo;
fifo = &kbd->fifo;
bzero(fifo, sizeof(struct fifo));
fifo->size = sizeof(((struct fifo *)0)->buf);
}
static void
fifo_put(struct ps2kbd_info *kbd, uint8_t val)
{
struct fifo *fifo;
fifo = &kbd->fifo;
if (fifo->num < fifo->size) {
fifo->buf[fifo->windex] = val;
fifo->windex = (fifo->windex + 1) % fifo->size;
fifo->num++;
}
}
static int
fifo_get(struct ps2kbd_info *kbd, uint8_t *val)
{
struct fifo *fifo;
fifo = &kbd->fifo;
if (fifo->num > 0) {
*val = fifo->buf[fifo->rindex];
fifo->rindex = (fifo->rindex + 1) % fifo->size;
fifo->num--;
return 0;
}
return -1;
}
int
ps2kbd_read(struct ps2kbd_info *kbd, uint8_t *val)
{
int retval;
pthread_mutex_lock(&kbd->mtx);
retval = fifo_get(kbd, val);
pthread_mutex_unlock(&kbd->mtx);
return retval;
}
void
ps2kbd_write(struct ps2kbd_info *kbd, uint8_t val)
{
pthread_mutex_lock(&kbd->mtx);
if (kbd->curcmd) {
switch (kbd->curcmd) {
case PS2KC_SET_TYPEMATIC:
fifo_put(kbd, PS2KC_ACK);
break;
case PS2KC_SET_SCANCODE_SET:
fifo_put(kbd, PS2KC_ACK);
break;
case PS2KC_SET_LEDS:
fifo_put(kbd, PS2KC_ACK);
break;
default:
fprintf(stderr, "Unhandled ps2 keyboard current "
"command byte 0x%02x\n", val);
break;
}
kbd->curcmd = 0;
} else {
switch (val) {
case 0x00:
fifo_put(kbd, PS2KC_ACK);
break;
case PS2KC_RESET_DEV:
fifo_reset(kbd);
fifo_put(kbd, PS2KC_ACK);
fifo_put(kbd, PS2KC_BAT_SUCCESS);
break;
case PS2KC_DISABLE:
kbd->enabled = false;
fifo_put(kbd, PS2KC_ACK);
break;
case PS2KC_ENABLE:
kbd->enabled = true;
fifo_reset(kbd);
fifo_put(kbd, PS2KC_ACK);
break;
case PS2KC_SET_TYPEMATIC:
kbd->curcmd = val;
fifo_put(kbd, PS2KC_ACK);
break;
case PS2KC_SEND_DEV_ID:
fifo_put(kbd, PS2KC_ACK);
fifo_put(kbd, 0xab);
fifo_put(kbd, 0x83);
break;
case PS2KC_SET_SCANCODE_SET:
kbd->curcmd = val;
fifo_put(kbd, PS2KC_ACK);
break;
case PS2KC_ECHO:
fifo_put(kbd, PS2KC_ECHO);
break;
case PS2KC_SET_LEDS:
kbd->curcmd = val;
fifo_put(kbd, PS2KC_ACK);
break;
default:
fprintf(stderr, "Unhandled ps2 keyboard command "
"0x%02x\n", val);
break;
}
}
pthread_mutex_unlock(&kbd->mtx);
}
/*
* Translate keysym to type 2 scancode and insert into keyboard buffer.
*/
static void
ps2kbd_keysym_queue(struct ps2kbd_info *kbd,
int down, uint32_t keysym)
{
/* ASCII to type 2 scancode lookup table */
const uint8_t translation[128] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x29, 0x16, 0x52, 0x26, 0x25, 0x2e, 0x3d, 0x52,
0x46, 0x45, 0x3e, 0x55, 0x41, 0x4e, 0x49, 0x4a,
0x45, 0x16, 0x1e, 0x26, 0x25, 0x2e, 0x36, 0x3d,
0x3e, 0x46, 0x4c, 0x4c, 0x41, 0x55, 0x49, 0x4a,
0x1e, 0x1c, 0x32, 0x21, 0x23, 0x24, 0x2b, 0x34,
0x33, 0x43, 0x3b, 0x42, 0x4b, 0x3a, 0x31, 0x44,
0x4d, 0x15, 0x2d, 0x1b, 0x2c, 0x3c, 0x2a, 0x1d,
0x22, 0x35, 0x1a, 0x54, 0x5d, 0x5b, 0x36, 0x4e,
0x0e, 0x1c, 0x32, 0x21, 0x23, 0x24, 0x2b, 0x34,
0x33, 0x43, 0x3b, 0x42, 0x4b, 0x3a, 0x31, 0x44,
0x4d, 0x15, 0x2d, 0x1b, 0x2c, 0x3c, 0x2a, 0x1d,
0x22, 0x35, 0x1a, 0x54, 0x5d, 0x5b, 0x0e, 0x00,
};
/* assert(pthread_mutex_isowned_np(&kbd->mtx)); */
switch (keysym) {
case 0x0 ... 0x7f:
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, translation[keysym]);
break;
case 0xff08: /* Back space */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x66);
break;
case 0xff09: /* Tab */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x0d);
break;
case 0xff0d: /* Return */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x5a);
break;
case 0xff1b: /* Escape */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x76);
break;
case 0xff50: /* Home */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x6c);
break;
case 0xff51: /* Left arrow */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x6b);
break;
case 0xff52: /* Up arrow */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x75);
break;
case 0xff53: /* Right arrow */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x74);
break;
case 0xff54: /* Down arrow */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x72);
break;
case 0xff55: /* PgUp */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x7d);
break;
case 0xff56: /* PgDwn */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x7a);
break;
case 0xff57: /* End */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x69);
break;
case 0xff63: /* Ins */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x70);
break;
case 0xff8d: /* Keypad Enter */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x5a);
break;
case 0xffe1: /* Left shift */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x12);
break;
case 0xffe2: /* Right shift */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x59);
break;
case 0xffe3: /* Left control */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x14);
break;
case 0xffe4: /* Right control */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x14);
break;
case 0xffe7: /* Left meta */
/* XXX */
break;
case 0xffe8: /* Right meta */
/* XXX */
break;
case 0xffe9: /* Left alt */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x11);
break;
case 0xfe03: /* AltGr */
case 0xffea: /* Right alt */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x11);
break;
case 0xffeb: /* Left Windows */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x1f);
break;
case 0xffec: /* Right Windows */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x27);
break;
case 0xffbe: /* F1 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x05);
break;
case 0xffbf: /* F2 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x06);
break;
case 0xffc0: /* F3 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x04);
break;
case 0xffc1: /* F4 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x0C);
break;
case 0xffc2: /* F5 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x03);
break;
case 0xffc3: /* F6 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x0B);
break;
case 0xffc4: /* F7 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x83);
break;
case 0xffc5: /* F8 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x0A);
break;
case 0xffc6: /* F9 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x01);
break;
case 0xffc7: /* F10 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x09);
break;
case 0xffc8: /* F11 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x78);
break;
case 0xffc9: /* F12 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x07);
break;
case 0xffff: /* Del */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x71);
break;
default:
fprintf(stderr, "Unhandled ps2 keyboard keysym 0x%x\n",
keysym);
break;
}
}
static void
ps2kbd_event(int down, uint32_t keysym, void *arg)
{
struct ps2kbd_info *kbd = arg;
int fifo_full;
pthread_mutex_lock(&kbd->mtx);
if (!kbd->enabled) {
pthread_mutex_unlock(&kbd->mtx);
return;
}
fifo_full = kbd->fifo.num == PS2KBD_FIFOSZ;
ps2kbd_keysym_queue(kbd, down, keysym);
pthread_mutex_unlock(&kbd->mtx);
if (!fifo_full)
atkbdc_event(kbd->base, 1);
}
struct ps2kbd_info *
ps2kbd_init(struct atkbdc_base *base)
{
struct ps2kbd_info *kbd;
kbd = calloc(1, sizeof(struct ps2kbd_info));
assert(kbd != NULL);
pthread_mutex_init(&kbd->mtx, NULL);
fifo_init(kbd);
kbd->base = base;
console_kbd_register(ps2kbd_event, kbd, 1);
return kbd;
}
void
ps2kbd_deinit(struct atkbdc_base *base)
{
console_kbd_unregister();
free(base->ps2kbd);
base->ps2kbd = NULL;
}

View File

@@ -0,0 +1,421 @@
/*-
* Copyright (c) 2015 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* Copyright (c) 2015 Nahanni Systems Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <assert.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <pthread.h>
#include "types.h"
#include "atkbdc.h"
#include "console.h"
/* mouse device commands */
#define PS2MC_RESET_DEV 0xff
#define PS2MC_SET_DEFAULTS 0xf6
#define PS2MC_DISABLE 0xf5
#define PS2MC_ENABLE 0xf4
#define PS2MC_SET_SAMPLING_RATE 0xf3
#define PS2MC_SEND_DEV_ID 0xf2
#define PS2MC_SET_REMOTE_MODE 0xf0
#define PS2MC_SEND_DEV_DATA 0xeb
#define PS2MC_SET_STREAM_MODE 0xea
#define PS2MC_SEND_DEV_STATUS 0xe9
#define PS2MC_SET_RESOLUTION 0xe8
#define PS2MC_SET_SCALING1 0xe7
#define PS2MC_SET_SCALING2 0xe6
#define PS2MC_BAT_SUCCESS 0xaa
#define PS2MC_ACK 0xfa
/* mouse device id */
#define PS2MOUSE_DEV_ID 0x0
/* mouse data bits */
#define PS2M_DATA_Y_OFLOW 0x80
#define PS2M_DATA_X_OFLOW 0x40
#define PS2M_DATA_Y_SIGN 0x20
#define PS2M_DATA_X_SIGN 0x10
#define PS2M_DATA_AONE 0x08
#define PS2M_DATA_MID_BUTTON 0x04
#define PS2M_DATA_RIGHT_BUTTON 0x02
#define PS2M_DATA_LEFT_BUTTON 0x01
/* mouse status bits */
#define PS2M_STS_REMOTE_MODE 0x40
#define PS2M_STS_ENABLE_DEV 0x20
#define PS2M_STS_SCALING_21 0x10
#define PS2M_STS_MID_BUTTON 0x04
#define PS2M_STS_RIGHT_BUTTON 0x02
#define PS2M_STS_LEFT_BUTTON 0x01
#define PS2MOUSE_FIFOSZ 16
struct fifo {
uint8_t buf[PS2MOUSE_FIFOSZ];
int rindex; /* index to read from */
int windex; /* index to write to */
int num; /* number of bytes in the fifo */
int size; /* size of the fifo */
};
struct ps2mouse_info {
struct atkbdc_base *base;
pthread_mutex_t mtx;
uint8_t status;
uint8_t resolution;
uint8_t sampling_rate;
int ctrlenable;
struct fifo fifo;
uint8_t curcmd; /* current command for next byte */
int cur_x, cur_y;
int delta_x, delta_y;
};
static void
fifo_init(struct ps2mouse_info *mouse)
{
struct fifo *fifo;
fifo = &mouse->fifo;
fifo->size = sizeof(((struct fifo *)0)->buf);
}
static void
fifo_reset(struct ps2mouse_info *mouse)
{
struct fifo *fifo;
fifo = &mouse->fifo;
bzero(fifo, sizeof(struct fifo));
fifo->size = sizeof(((struct fifo *)0)->buf);
}
static void
fifo_put(struct ps2mouse_info *mouse, uint8_t val)
{
struct fifo *fifo;
fifo = &mouse->fifo;
if (fifo->num < fifo->size) {
fifo->buf[fifo->windex] = val;
fifo->windex = (fifo->windex + 1) % fifo->size;
fifo->num++;
}
}
static int
fifo_get(struct ps2mouse_info *mouse, uint8_t *val)
{
struct fifo *fifo;
fifo = &mouse->fifo;
if (fifo->num > 0) {
*val = fifo->buf[fifo->rindex];
fifo->rindex = (fifo->rindex + 1) % fifo->size;
fifo->num--;
return 0;
}
return -1;
}
static void
movement_reset(struct ps2mouse_info *mouse)
{
/* assert(pthread_mutex_isowned_np(&mouse->mtx)); */
mouse->delta_x = 0;
mouse->delta_y = 0;
}
static void
movement_update(struct ps2mouse_info *mouse, int x, int y)
{
mouse->delta_x += x - mouse->cur_x;
mouse->delta_y += mouse->cur_y - y;
mouse->cur_x = x;
mouse->cur_y = y;
}
static void
movement_get(struct ps2mouse_info *mouse)
{
uint8_t val0, val1, val2;
/* assert(pthread_mutex_isowned_np(&mouse->mtx)); */
val0 = PS2M_DATA_AONE;
val0 |= mouse->status & (PS2M_DATA_LEFT_BUTTON |
PS2M_DATA_RIGHT_BUTTON | PS2M_DATA_MID_BUTTON);
if (mouse->delta_x >= 0) {
if (mouse->delta_x > 255) {
val0 |= PS2M_DATA_X_OFLOW;
val1 = 255;
} else
val1 = mouse->delta_x;
} else {
val0 |= PS2M_DATA_X_SIGN;
if (mouse->delta_x < -255) {
val0 |= PS2M_DATA_X_OFLOW;
val1 = 255;
} else
val1 = mouse->delta_x;
}
mouse->delta_x = 0;
if (mouse->delta_y >= 0) {
if (mouse->delta_y > 255) {
val0 |= PS2M_DATA_Y_OFLOW;
val2 = 255;
} else
val2 = mouse->delta_y;
} else {
val0 |= PS2M_DATA_Y_SIGN;
if (mouse->delta_y < -255) {
val0 |= PS2M_DATA_Y_OFLOW;
val2 = 255;
} else
val2 = mouse->delta_y;
}
mouse->delta_y = 0;
if (mouse->fifo.num < (mouse->fifo.size - 3)) {
fifo_put(mouse, val0);
fifo_put(mouse, val1);
fifo_put(mouse, val2);
}
}
static void
ps2mouse_reset(struct ps2mouse_info *mouse)
{
/* assert(pthread_mutex_isowned_np(&mouse->mtx)); */
fifo_reset(mouse);
movement_reset(mouse);
mouse->status = PS2M_STS_ENABLE_DEV;
mouse->resolution = 4;
mouse->sampling_rate = 100;
mouse->cur_x = 0;
mouse->cur_y = 0;
mouse->delta_x = 0;
mouse->delta_y = 0;
}
int
ps2mouse_read(struct ps2mouse_info *mouse, uint8_t *val)
{
int retval;
pthread_mutex_lock(&mouse->mtx);
retval = fifo_get(mouse, val);
pthread_mutex_unlock(&mouse->mtx);
return retval;
}
int
ps2mouse_fifocnt(struct ps2mouse_info *mouse)
{
return mouse->fifo.num;
}
void
ps2mouse_toggle(struct ps2mouse_info *mouse, int enable)
{
pthread_mutex_lock(&mouse->mtx);
if (enable)
mouse->ctrlenable = 1;
else {
mouse->ctrlenable = 0;
mouse->fifo.rindex = 0;
mouse->fifo.windex = 0;
mouse->fifo.num = 0;
}
pthread_mutex_unlock(&mouse->mtx);
}
void
ps2mouse_write(struct ps2mouse_info *mouse, uint8_t val, int insert)
{
pthread_mutex_lock(&mouse->mtx);
fifo_reset(mouse);
if (mouse->curcmd) {
switch (mouse->curcmd) {
case PS2MC_SET_SAMPLING_RATE:
mouse->sampling_rate = val;
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_SET_RESOLUTION:
mouse->resolution = val;
fifo_put(mouse, PS2MC_ACK);
break;
default:
fprintf(stderr, "Unhandled ps2 mouse current "
"command byte 0x%02x\n", val);
break;
}
mouse->curcmd = 0;
} else if (insert) {
fifo_put(mouse, val);
} else {
switch (val) {
case 0x00:
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_RESET_DEV:
ps2mouse_reset(mouse);
fifo_put(mouse, PS2MC_ACK);
fifo_put(mouse, PS2MC_BAT_SUCCESS);
fifo_put(mouse, PS2MOUSE_DEV_ID);
break;
case PS2MC_SET_DEFAULTS:
ps2mouse_reset(mouse);
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_DISABLE:
fifo_reset(mouse);
mouse->status &= ~PS2M_STS_ENABLE_DEV;
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_ENABLE:
fifo_reset(mouse);
mouse->status |= PS2M_STS_ENABLE_DEV;
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_SET_SAMPLING_RATE:
mouse->curcmd = val;
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_SEND_DEV_ID:
fifo_put(mouse, PS2MC_ACK);
fifo_put(mouse, PS2MOUSE_DEV_ID);
break;
case PS2MC_SET_REMOTE_MODE:
mouse->status |= PS2M_STS_REMOTE_MODE;
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_SEND_DEV_DATA:
fifo_put(mouse, PS2MC_ACK);
movement_get(mouse);
break;
case PS2MC_SET_STREAM_MODE:
mouse->status &= ~PS2M_STS_REMOTE_MODE;
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_SEND_DEV_STATUS:
fifo_put(mouse, PS2MC_ACK);
fifo_put(mouse, mouse->status);
fifo_put(mouse, mouse->resolution);
fifo_put(mouse, mouse->sampling_rate);
break;
case PS2MC_SET_RESOLUTION:
mouse->curcmd = val;
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_SET_SCALING1:
case PS2MC_SET_SCALING2:
fifo_put(mouse, PS2MC_ACK);
break;
default:
fifo_put(mouse, PS2MC_ACK);
fprintf(stderr, "Unhandled ps2 mouse command "
"0x%02x\n", val);
break;
}
}
pthread_mutex_unlock(&mouse->mtx);
}
static void
ps2mouse_event(uint8_t button, int x, int y, void *arg)
{
struct ps2mouse_info *mouse = arg;
pthread_mutex_lock(&mouse->mtx);
movement_update(mouse, x, y);
mouse->status &= ~(PS2M_STS_LEFT_BUTTON |
PS2M_STS_RIGHT_BUTTON | PS2M_STS_MID_BUTTON);
if (button & (1 << 0))
mouse->status |= PS2M_STS_LEFT_BUTTON;
if (button & (1 << 1))
mouse->status |= PS2M_STS_MID_BUTTON;
if (button & (1 << 2))
mouse->status |= PS2M_STS_RIGHT_BUTTON;
if ((mouse->status & PS2M_STS_ENABLE_DEV) == 0 || !mouse->ctrlenable) {
/* no data reporting */
pthread_mutex_unlock(&mouse->mtx);
return;
}
movement_get(mouse);
pthread_mutex_unlock(&mouse->mtx);
if (mouse->fifo.num > 0)
atkbdc_event(mouse->base, 0);
}
struct ps2mouse_info *
ps2mouse_init(struct atkbdc_base *base)
{
struct ps2mouse_info *mouse;
mouse = calloc(1, sizeof(struct ps2mouse_info));
assert(mouse != NULL);
pthread_mutex_init(&mouse->mtx, NULL);
fifo_init(mouse);
mouse->base = base;
pthread_mutex_lock(&mouse->mtx);
ps2mouse_reset(mouse);
pthread_mutex_unlock(&mouse->mtx);
console_ptr_register(ps2mouse_event, mouse, 1);
return mouse;
}
void
ps2mouse_deinit(struct atkbdc_base *base)
{
console_ptr_unregister();
fifo_reset(base->ps2mouse);
free(base->ps2mouse);
base->ps2mouse = NULL;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,816 @@
/*-
* Copyright (c) 2014 Leon Dang <ldang@nahannisys.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/time.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "types.h"
#include "usb.h"
#include "usbdi.h"
#include "usb_core.h"
#include "console.h"
#include "gc.h"
static int umouse_debug;
#define DPRINTF(params) do { if (umouse_debug) printf params; } while (0)
#define WPRINTF(params) (printf params)
/* USB endpoint context (1-15) for reporting mouse data events*/
#define UMOUSE_INTR_ENDPT 1
#define UMOUSE_REPORT_DESC_TYPE 0x22
#define UMOUSE_GET_REPORT 0x01
#define UMOUSE_GET_IDLE 0x02
#define UMOUSE_GET_PROTOCOL 0x03
#define UMOUSE_SET_REPORT 0x09
#define UMOUSE_SET_IDLE 0x0A
#define UMOUSE_SET_PROTOCOL 0x0B
enum {
UMSTR_LANG,
UMSTR_MANUFACTURER,
UMSTR_PRODUCT,
UMSTR_SERIAL,
UMSTR_CONFIG,
UMSTR_MAX
};
static const char *const umouse_desc_strings[] = {
"\x04\x09",
"ACRN-DM",
"HID Tablet",
"01",
"HID Tablet Device",
};
struct umouse_hid_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bcdHID[2];
uint8_t bCountryCode;
uint8_t bNumDescriptors;
uint8_t bReportDescriptorType;
uint8_t wItemLength[2];
} __attribute__((packed));
struct umouse_config_desc {
struct usb_config_descriptor confd;
struct usb_interface_descriptor ifcd;
struct umouse_hid_descriptor hidd;
struct usb_endpoint_descriptor endpd;
struct usb_endpoint_ss_comp_descriptor sscompd;
} __attribute__((packed));
#define MOUSE_MAX_X 0x8000
#define MOUSE_MAX_Y 0x8000
static const uint8_t umouse_report_desc[] = {
0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
0x09, 0x02, /* USAGE (Mouse) */
0xa1, 0x01, /* COLLECTION (Application) */
0x09, 0x01, /* USAGE (Pointer) */
0xa1, 0x00, /* COLLECTION (Physical) */
0x05, 0x09, /* USAGE_PAGE (Button) */
0x19, 0x01, /* USAGE_MINIMUM (Button 1) */
0x29, 0x03, /* USAGE_MAXIMUM (Button 3) */
0x15, 0x00, /* LOGICAL_MINIMUM (0) */
0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
0x75, 0x01, /* REPORT_SIZE (1) */
0x95, 0x03, /* REPORT_COUNT (3) */
0x81, 0x02, /* INPUT (Data,Var,Abs); 3 buttons */
0x75, 0x05, /* REPORT_SIZE (5) */
0x95, 0x01, /* REPORT_COUNT (1) */
0x81, 0x03, /* INPUT (Cnst,Var,Abs); padding */
0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
0x09, 0x30, /* USAGE (X) */
0x09, 0x31, /* USAGE (Y) */
0x35, 0x00, /* PHYSICAL_MINIMUM (0) */
0x46, 0xff, 0x7f, /* PHYSICAL_MAXIMUM (0x7fff) */
0x15, 0x00, /* LOGICAL_MINIMUM (0) */
0x26, 0xff, 0x7f, /* LOGICAL_MAXIMUM (0x7fff) */
0x75, 0x10, /* REPORT_SIZE (16) */
0x95, 0x02, /* REPORT_COUNT (2) */
0x81, 0x02, /* INPUT (Data,Var,Abs) */
0x05, 0x01, /* USAGE Page (Generic Desktop) */
0x09, 0x38, /* USAGE (Wheel) */
0x35, 0x00, /* PHYSICAL_MINIMUM (0) */
0x45, 0x00, /* PHYSICAL_MAXIMUM (0) */
0x15, 0x81, /* LOGICAL_MINIMUM (-127) */
0x25, 0x7f, /* LOGICAL_MAXIMUM (127) */
0x75, 0x08, /* REPORT_SIZE (8) */
0x95, 0x01, /* REPORT_COUNT (1) */
0x81, 0x06, /* INPUT (Data,Var,Rel) */
0xc0, /* END_COLLECTION */
0xc0 /* END_COLLECTION */
};
struct umouse_report {
uint8_t buttons; /* bits: 0 left, 1 right, 2 middle */
int16_t x; /* x position */
int16_t y; /* y position */
int8_t z; /* z wheel position */
} __attribute__((packed));
static struct usb_device_descriptor umouse_dev_desc = {
.bLength = sizeof(umouse_dev_desc),
.bDescriptorType = UDESC_DEVICE,
.bcdUSB = UD_USB_3_0,
.bMaxPacketSize = 8, /* max packet size */
.idVendor = 0xFB5D, /* vendor */
.idProduct = 0x0001,/* product */
.bcdDevice = 0, /* device version */
.iManufacturer = UMSTR_MANUFACTURER,
.iProduct = UMSTR_PRODUCT,
.iSerialNumber = UMSTR_SERIAL,
.bNumConfigurations = 1,
};
static struct umouse_config_desc umouse_confd = {
.confd = {
.bLength = sizeof(umouse_confd.confd),
.bDescriptorType = UDESC_CONFIG,
.wTotalLength = sizeof(umouse_confd),
.bNumInterface = 1,
.bConfigurationValue = 1,
.iConfiguration = UMSTR_CONFIG,
.bmAttributes = UC_BUS_POWERED | UC_REMOTE_WAKEUP,
.bMaxPower = 0,
},
.ifcd = {
.bLength = sizeof(umouse_confd.ifcd),
.bDescriptorType = UDESC_INTERFACE,
.bNumEndpoints = 1,
.bInterfaceClass = UICLASS_HID,
.bInterfaceSubClass = UISUBCLASS_BOOT,
.bInterfaceProtocol = UIPROTO_MOUSE,
},
.hidd = {
.bLength = sizeof(umouse_confd.hidd),
.bDescriptorType = 0x21,
.bcdHID = { 0x01, 0x10 },
.bCountryCode = 0,
.bNumDescriptors = 1,
.bReportDescriptorType = UMOUSE_REPORT_DESC_TYPE,
.wItemLength = { sizeof(umouse_report_desc), 0 },
},
.endpd = {
.bLength = sizeof(umouse_confd.endpd),
.bDescriptorType = UDESC_ENDPOINT,
.bEndpointAddress = UE_DIR_IN | UMOUSE_INTR_ENDPT,
.bmAttributes = UE_INTERRUPT,
.wMaxPacketSize = 8,
.bInterval = 0xA,
},
.sscompd = {
.bLength = sizeof(umouse_confd.sscompd),
.bDescriptorType = UDESC_ENDPOINT_SS_COMP,
.bMaxBurst = 0,
.bmAttributes = 0,
.wBytesPerInterval = 0,
},
};
struct umouse_bos_desc {
struct usb_bos_descriptor bosd;
struct usb_devcap_ss_descriptor usbssd;
} __attribute__((packed));
struct umouse_bos_desc umouse_bosd = {
.bosd = {
.bLength = sizeof(umouse_bosd.bosd),
.bDescriptorType = UDESC_BOS,
.wTotalLength = sizeof(umouse_bosd),
.bNumDeviceCaps = 1,
},
.usbssd = {
.bLength = sizeof(umouse_bosd.usbssd),
.bDescriptorType = UDESC_DEVICE_CAPABILITY,
.bDevCapabilityType = 3,
.bmAttributes = 0,
.wSpeedsSupported = 0x08,
.bFunctionalitySupport = 3,
.bU1DevExitLat = 0xa, /* dummy - not used */
.wU2DevExitLat = 0x20,
}
};
struct umouse_vdev {
struct usb_hci *hci;
char *opt;
struct umouse_report um_report;
int newdata;
struct {
uint8_t idle;
uint8_t protocol;
uint8_t feature;
} hid;
pthread_mutex_t mtx;
pthread_mutex_t ev_mtx;
int polling;
struct timeval prev_evt;
};
static void
umouse_event(uint8_t button, int x, int y, void *arg)
{
struct umouse_vdev *dev;
struct gfx_ctx_image *gc;
gc = console_get_image();
if (gc == NULL) {
/* not ready */
return;
}
dev = arg;
pthread_mutex_lock(&dev->mtx);
dev->um_report.buttons = 0;
dev->um_report.z = 0;
if (button & 0x01)
dev->um_report.buttons |= 0x01; /* left */
if (button & 0x02)
dev->um_report.buttons |= 0x04; /* middle */
if (button & 0x04)
dev->um_report.buttons |= 0x02; /* right */
if (button & 0x8)
dev->um_report.z = 1;
if (button & 0x10)
dev->um_report.z = -1;
/* scale coords to mouse resolution */
dev->um_report.x = MOUSE_MAX_X * x / gc->width;
dev->um_report.y = MOUSE_MAX_Y * y / gc->height;
dev->newdata = 1;
pthread_mutex_unlock(&dev->mtx);
pthread_mutex_lock(&dev->ev_mtx);
dev->hci->hci_intr(dev->hci, UE_DIR_IN | UMOUSE_INTR_ENDPT);
pthread_mutex_unlock(&dev->ev_mtx);
}
static void *
umouse_init(struct usb_hci *hci, char *opt)
{
struct umouse_vdev *dev;
dev = calloc(1, sizeof(struct umouse_vdev));
if (!dev) {
WPRINTF(("umouse: calloc returns NULL\n"));
return NULL;
}
dev->hci = hci;
dev->hid.protocol = 1; /* REPORT protocol */
dev->opt = strdup(opt);
pthread_mutex_init(&dev->mtx, NULL);
pthread_mutex_init(&dev->ev_mtx, NULL);
console_ptr_register(umouse_event, dev, 10);
return dev;
}
#define UREQ(x, y) ((x) | ((y) << 8))
static int
umouse_request(void *scarg, struct usb_data_xfer *xfer)
{
struct umouse_vdev *dev;
struct usb_data_xfer_block *data;
const char *str;
uint16_t value;
uint16_t index;
uint16_t len;
uint16_t slen;
uint8_t *udata;
int err;
int i, idx;
int eshort;
dev = scarg;
data = NULL;
udata = NULL;
assert(xfer != NULL && xfer->head >= 0);
idx = xfer->head;
for (i = 0; i < xfer->ndata; i++) {
xfer->data[idx].bdone = 0;
if (data == NULL && USB_DATA_OK(xfer, i)) {
data = &xfer->data[idx];
udata = data->buf;
}
xfer->data[idx].processed = 1;
idx = (idx + 1) % USB_MAX_XFER_BLOCKS;
}
err = USB_ERR_NORMAL_COMPLETION;
eshort = 0;
if (!xfer->ureq) {
DPRINTF(("%s: port %d\r\n", __func__, dev->hci->hci_port));
goto done;
}
value = xfer->ureq->wValue;
index = xfer->ureq->wIndex;
len = xfer->ureq->wLength;
DPRINTF(("%s: port %d, type 0x%x, req 0x%x,"
"val 0x%x, idx 0x%x, len %u\r\n", __func__,
dev->hci->hci_port, xfer->ureq->bmRequestType,
xfer->ureq->bRequest, value, index, len));
switch (UREQ(xfer->ureq->bRequest, xfer->ureq->bmRequestType)) {
case UREQ(UR_GET_CONFIG, UT_READ_DEVICE):
DPRINTF(("umouse: (UR_GET_CONFIG, UT_READ_DEVICE)\r\n"));
if (!data)
break;
*udata = umouse_confd.confd.bConfigurationValue;
data->blen = len > 0 ? len - 1 : 0;
eshort = data->blen > 0;
data->bdone += 1;
break;
case UREQ(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
DPRINTF(("umouse: (UR_GET_DESCRIPTOR,UT_READ_DEVICE)"
"val %x\r\n",
value >> 8));
if (!data)
break;
switch (value >> 8) {
case UDESC_DEVICE:
DPRINTF(("umouse: (->UDESC_DEVICE) len %u"
"?= sizeof(umouse_dev_desc) %lu\r\n",
len, sizeof(umouse_dev_desc)));
if ((value & 0xFF) != 0) {
err = USB_ERR_IOERROR;
goto done;
}
if (len > sizeof(umouse_dev_desc)) {
data->blen = len - sizeof(umouse_dev_desc);
len = sizeof(umouse_dev_desc);
} else
data->blen = 0;
memcpy(data->buf, &umouse_dev_desc, len);
data->bdone += len;
break;
case UDESC_CONFIG:
DPRINTF(("umouse: (->UDESC_CONFIG)\r\n"));
if ((value & 0xFF) != 0) {
err = USB_ERR_IOERROR;
goto done;
}
if (len > sizeof(umouse_confd)) {
data->blen = len - sizeof(umouse_confd);
len = sizeof(umouse_confd);
} else
data->blen = 0;
memcpy(data->buf, &umouse_confd, len);
data->bdone += len;
break;
case UDESC_STRING:
DPRINTF(("umouse: (->UDESC_STRING)\r\n"));
str = NULL;
if ((value & 0xFF) < UMSTR_MAX)
str = umouse_desc_strings[value & 0xFF];
else
goto done;
if ((value & 0xFF) == UMSTR_LANG) {
udata[0] = 4;
udata[1] = UDESC_STRING;
data->blen = len - 2;
len -= 2;
data->bdone += 2;
if (len >= 2) {
udata[2] = str[0];
udata[3] = str[1];
data->blen -= 2;
data->bdone += 2;
} else
data->blen = 0;
goto done;
}
slen = 2 + strlen(str) * 2;
udata[0] = slen;
udata[1] = UDESC_STRING;
if (len > slen) {
data->blen = len - slen;
len = slen;
} else
data->blen = 0;
for (i = 2; i < len; i += 2) {
udata[i] = *str++;
udata[i+1] = '\0';
}
data->bdone += slen;
break;
case UDESC_BOS:
DPRINTF(("umouse: USB3 BOS\r\n"));
if (len > sizeof(umouse_bosd)) {
data->blen = len - sizeof(umouse_bosd);
len = sizeof(umouse_bosd);
} else
data->blen = 0;
memcpy(udata, &umouse_bosd, len);
data->bdone += len;
break;
default:
DPRINTF(("umouse: unknown(%d)->ERROR\r\n", value >> 8));
err = USB_ERR_IOERROR;
goto done;
}
eshort = data->blen > 0;
break;
case UREQ(UR_GET_DESCRIPTOR, UT_READ_INTERFACE):
DPRINTF(("umouse: (UR_GET_DESCRIPTOR, UT_READ_INTERFACE)"
"0x%x\r\n",
(value >> 8)));
if (!data)
break;
switch (value >> 8) {
case UMOUSE_REPORT_DESC_TYPE:
if (len > sizeof(umouse_report_desc)) {
data->blen = len - sizeof(umouse_report_desc);
len = sizeof(umouse_report_desc);
} else
data->blen = 0;
memcpy(data->buf, umouse_report_desc, len);
data->bdone += len;
break;
default:
DPRINTF(("umouse: IO ERROR\r\n"));
err = USB_ERR_IOERROR;
goto done;
}
eshort = data->blen > 0;
break;
case UREQ(UR_GET_INTERFACE, UT_READ_INTERFACE):
DPRINTF(("umouse: (UR_GET_INTERFACE, UT_READ_INTERFACE)\r\n"));
if (index != 0) {
DPRINTF(("umouse get_interface, invalid index %d\r\n",
index));
err = USB_ERR_IOERROR;
goto done;
}
if (!data)
break;
if (len > 0) {
*udata = 0;
data->blen = len - 1;
}
eshort = data->blen > 0;
data->bdone += 1;
break;
case UREQ(UR_GET_STATUS, UT_READ_DEVICE):
DPRINTF(("umouse: (UR_GET_STATUS, UT_READ_DEVICE)\r\n"));
if (!data)
break;
if (data != NULL && len > 1) {
if (dev->hid.feature == UF_DEVICE_REMOTE_WAKEUP)
USETW(udata, UDS_REMOTE_WAKEUP);
else
USETW(udata, 0);
data->blen = len - 2;
data->bdone += 2;
}
eshort = data->blen > 0;
break;
case UREQ(UR_GET_STATUS, UT_READ_INTERFACE):
case UREQ(UR_GET_STATUS, UT_READ_ENDPOINT):
DPRINTF(("umouse: (UR_GET_STATUS, UT_READ_INTERFACE)\r\n"));
if (!data)
break;
if (data != NULL && len > 1) {
USETW(udata, 0);
data->blen = len - 2;
data->bdone += 2;
}
eshort = data->blen > 0;
break;
case UREQ(UR_SET_ADDRESS, UT_WRITE_DEVICE):
/* XXX Controller should've handled this */
DPRINTF(("umouse set address %u\r\n", value));
break;
case UREQ(UR_SET_CONFIG, UT_WRITE_DEVICE):
DPRINTF(("umouse set config %u\r\n", value));
break;
case UREQ(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE):
DPRINTF(("umouse set descriptor %u\r\n", value));
break;
case UREQ(UR_CLEAR_FEATURE, UT_WRITE_DEVICE):
DPRINTF(("umouse: (UR_SET_FEATURE,UT_WRITE_DEVICE) %x\r\n",
value));
if (value == UF_DEVICE_REMOTE_WAKEUP)
dev->hid.feature = 0;
break;
case UREQ(UR_SET_FEATURE, UT_WRITE_DEVICE):
DPRINTF(("umouse: (UR_SET_FEATURE,UT_WRITE_DEVICE) %x\r\n",
value));
if (value == UF_DEVICE_REMOTE_WAKEUP)
dev->hid.feature = UF_DEVICE_REMOTE_WAKEUP;
break;
case UREQ(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE):
case UREQ(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT):
case UREQ(UR_SET_FEATURE, UT_WRITE_INTERFACE):
case UREQ(UR_SET_FEATURE, UT_WRITE_ENDPOINT):
DPRINTF(("umouse: (UR_CLEAR_FEATURE,UT_WRITE_INTERFACE)\r\n"
));
err = USB_ERR_IOERROR;
goto done;
case UREQ(UR_SET_INTERFACE, UT_WRITE_INTERFACE):
DPRINTF(("umouse set interface %u\r\n", value));
break;
case UREQ(UR_ISOCH_DELAY, UT_WRITE_DEVICE):
DPRINTF(("umouse set isoch delay %u\r\n", value));
break;
case UREQ(UR_SET_SEL, 0):
DPRINTF(("umouse set sel\r\n"));
break;
case UREQ(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT):
DPRINTF(("umouse synch frame\r\n"));
break;
/* HID device requests */
case UREQ(UMOUSE_GET_REPORT, UT_READ_CLASS_INTERFACE):
DPRINTF(("umouse: (UMOUSE_GET_REPORT,UT_READ_CLASS_INTERFACE) "
"0x%x\r\n", (value >> 8)));
if (!data)
break;
if ((value >> 8) == 0x01 && len >= sizeof(dev->um_report)) {
/* TODO read from backend */
if (len > sizeof(dev->um_report)) {
data->blen = len - sizeof(dev->um_report);
len = sizeof(dev->um_report);
} else
data->blen = 0;
memcpy(data->buf, &dev->um_report, len);
data->bdone += len;
} else {
err = USB_ERR_IOERROR;
goto done;
}
eshort = data->blen > 0;
break;
case UREQ(UMOUSE_GET_IDLE, UT_READ_CLASS_INTERFACE):
if (!data)
break;
if (data != NULL && len > 0) {
*udata = dev->hid.idle;
data->blen = len - 1;
data->bdone += 1;
}
eshort = data->blen > 0;
break;
case UREQ(UMOUSE_GET_PROTOCOL, UT_READ_CLASS_INTERFACE):
if (!data)
break;
if (data != NULL && len > 0) {
*udata = dev->hid.protocol;
data->blen = len - 1;
data->bdone += 1;
}
eshort = data->blen > 0;
break;
case UREQ(UMOUSE_SET_REPORT, UT_WRITE_CLASS_INTERFACE):
DPRINTF(("umouse: (UMOUSE_SET_REPORT,"
"UT_WRITE_CLASS_INTERFACE) ignored\r\n"
));
break;
case UREQ(UMOUSE_SET_IDLE, UT_WRITE_CLASS_INTERFACE):
dev->hid.idle = xfer->ureq->wValue >> 8;
DPRINTF(("umouse: (UMOUSE_SET_IDLE,"
"UT_WRITE_CLASS_INTERFACE) %x\r\n",
dev->hid.idle));
break;
case UREQ(UMOUSE_SET_PROTOCOL, UT_WRITE_CLASS_INTERFACE):
dev->hid.protocol = xfer->ureq->wValue >> 8;
DPRINTF(("umouse: (UR_CLEAR_FEATURE,"
"UT_WRITE_CLASS_INTERFACE) %x\r\n",
dev->hid.protocol));
break;
default:
DPRINTF(("**** umouse request unhandled\r\n"));
err = USB_ERR_IOERROR;
break;
}
done:
if (xfer->ureq && (xfer->ureq->bmRequestType & UT_WRITE) &&
(err == USB_ERR_NORMAL_COMPLETION) && (data != NULL))
data->blen = 0;
else if (eshort)
err = USB_ERR_SHORT_XFER;
DPRINTF(("umouse request error code %d (0=ok), blen %u txlen %u\r\n",
err, (data ? data->blen : 0), (data ? data->bdone : 0)));
return err;
}
static int
umouse_data_handler(void *scarg, struct usb_data_xfer *xfer, int dir,
int epctx)
{
struct umouse_vdev *dev;
struct usb_data_xfer_block *data;
uint8_t *udata;
int len, i, idx;
int err;
assert(xfer != NULL && xfer->head >= 0);
DPRINTF(("umouse handle data - DIR=%s|EP=%d, blen %d\r\n",
dir ? "IN" : "OUT", epctx, xfer->data[0].blen));
/* find buffer to add data */
udata = NULL;
err = USB_ERR_NORMAL_COMPLETION;
/* handle xfer at first unprocessed item with buffer */
data = NULL;
idx = xfer->head;
for (i = 0; i < xfer->ndata; i++) {
data = &xfer->data[idx];
if (data->buf != NULL && data->blen != 0)
break;
data->processed = 1;
data = NULL;
idx = (idx + 1) % USB_MAX_XFER_BLOCKS;
}
if (!data)
goto done;
udata = data->buf;
len = data->blen;
if (udata == NULL) {
DPRINTF(("umouse no buffer provided for input\r\n"));
err = USB_ERR_NOMEM;
goto done;
}
dev = scarg;
if (dir) {
pthread_mutex_lock(&dev->mtx);
if (!dev->newdata) {
err = USB_ERR_CANCELLED;
USB_DATA_SET_ERRCODE(&xfer->data[xfer->head], USB_NAK);
pthread_mutex_unlock(&dev->mtx);
goto done;
}
if (dev->polling) {
err = USB_ERR_STALLED;
USB_DATA_SET_ERRCODE(data, USB_STALL);
pthread_mutex_unlock(&dev->mtx);
goto done;
}
dev->polling = 1;
if (len > 0) {
dev->newdata = 0;
data->processed = 1;
data->bdone += 6;
memcpy(udata, &dev->um_report, 6);
data->blen = len - 6;
if (data->blen > 0)
err = USB_ERR_SHORT_XFER;
}
dev->polling = 0;
pthread_mutex_unlock(&dev->mtx);
} else {
USB_DATA_SET_ERRCODE(data, USB_STALL);
err = USB_ERR_STALLED;
}
done:
return err;
}
static int
umouse_reset(void *scarg)
{
struct umouse_vdev *dev;
dev = scarg;
dev->newdata = 0;
return 0;
}
static int
umouse_remove(void *scarg)
{
return 0;
}
static int
umouse_stop(void *scarg)
{
return 0;
}
struct usb_devemu ue_mouse = {
.ue_emu = "tablet",
.ue_usbver = 3,
.ue_usbspeed = USB_SPEED_HIGH,
.ue_init = umouse_init,
.ue_request = umouse_request,
.ue_data = umouse_data_handler,
.ue_reset = umouse_reset,
.ue_remove = umouse_remove,
.ue_stop = umouse_stop
};
USB_EMUL_SET(ue_mouse);

722
devicemodel/hw/uart_core.c Normal file
View File

@@ -0,0 +1,722 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* Copyright (c) 2013 Neel Natu <neel@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <err.h>
#include <fcntl.h>
#include <termios.h>
#include <unistd.h>
#include <stdbool.h>
#include <string.h>
#include <pthread.h>
#include <sysexits.h>
#include "types.h"
#include "mevent.h"
#include "uart_core.h"
#include "ns16550.h"
#include "dm.h"
#define COM1_BASE 0x3F8
#define COM1_IRQ 4
#define COM2_BASE 0x2F8
#define COM2_IRQ 3
#define DEFAULT_RCLK 1843200
#define DEFAULT_BAUD 9600
#define FCR_RX_MASK 0xC0
#define MCR_OUT1 0x04
#define MCR_OUT2 0x08
#define MSR_DELTA_MASK 0x0f
#ifndef REG_SCR
#define REG_SCR com_scr
#endif
#define FIFOSZ 256
static struct termios tio_stdio_orig;
static struct {
int baseaddr;
int irq;
bool inuse;
} uart_lres[] = {
{ COM1_BASE, COM1_IRQ, false},
{ COM2_BASE, COM2_IRQ, false},
};
#define UART_NLDEVS (ARRAY_SIZE(uart_lres))
struct fifo {
uint8_t buf[FIFOSZ];
int rindex; /* index to read from */
int windex; /* index to write to */
int num; /* number of characters in the fifo */
int size; /* size of the fifo */
};
struct ttyfd {
bool opened;
int fd; /* tty device file descriptor */
struct termios tio_orig, tio_new; /* I/O Terminals */
};
struct uart_vdev {
pthread_mutex_t mtx; /* protects all elements */
uint8_t data; /* Data register (R/W) */
uint8_t ier; /* Interrupt enable register (R/W) */
uint8_t lcr; /* Line control register (R/W) */
uint8_t mcr; /* Modem control register (R/W) */
uint8_t lsr; /* Line status register (R/W) */
uint8_t msr; /* Modem status register (R/W) */
uint8_t fcr; /* FIFO control register (W) */
uint8_t scr; /* Scratch register (R/W) */
uint8_t dll; /* Baudrate divisor latch LSB */
uint8_t dlh; /* Baudrate divisor latch MSB */
struct fifo rxfifo;
struct mevent *mev;
struct ttyfd tty;
bool thre_int_pending; /* THRE interrupt pending */
void *arg;
uart_intr_func_t intr_assert;
uart_intr_func_t intr_deassert;
};
static void uart_drain(int fd, enum ev_type ev, void *arg);
static void
ttyclose(void)
{
tcsetattr(STDIN_FILENO, TCSANOW, &tio_stdio_orig);
}
static void
ttyopen(struct ttyfd *tf)
{
tcgetattr(tf->fd, &tf->tio_orig);
tf->tio_new = tf->tio_orig;
cfmakeraw(&tf->tio_new);
tf->tio_new.c_cflag |= CLOCAL;
tcsetattr(tf->fd, TCSANOW, &tf->tio_new);
if (tf->fd == STDIN_FILENO) {
tio_stdio_orig = tf->tio_orig;
atexit(ttyclose);
}
}
static int
ttyread(struct ttyfd *tf)
{
unsigned char rb;
if (read(tf->fd, &rb, 1) > 0)
return rb;
return -1;
}
static int
ttywrite(struct ttyfd *tf, unsigned char wb)
{
if (write(tf->fd, &wb, 1) > 0)
return 1;
return -1;
}
static void
rxfifo_reset(struct uart_vdev *uart, int size)
{
char flushbuf[32];
struct fifo *fifo;
ssize_t nread;
int error;
fifo = &uart->rxfifo;
bzero(fifo, sizeof(struct fifo));
fifo->size = size;
if (uart->tty.opened) {
/*
* Flush any unread input from the tty buffer.
*/
while (1) {
nread = read(uart->tty.fd, flushbuf, sizeof(flushbuf));
if (nread != sizeof(flushbuf))
break;
}
/*
* Enable mevent to trigger when new characters are available
* on the tty fd.
*/
error = mevent_enable(uart->mev);
assert(error == 0);
}
}
static int
rxfifo_available(struct uart_vdev *uart)
{
struct fifo *fifo;
fifo = &uart->rxfifo;
return (fifo->num < fifo->size);
}
static int
rxfifo_putchar(struct uart_vdev *uart, uint8_t ch)
{
struct fifo *fifo;
int error;
fifo = &uart->rxfifo;
if (fifo->num < fifo->size) {
fifo->buf[fifo->windex] = ch;
fifo->windex = (fifo->windex + 1) % fifo->size;
fifo->num++;
if (!rxfifo_available(uart)) {
if (uart->tty.opened) {
/*
* Disable mevent callback if the FIFO is full.
*/
error = mevent_disable(uart->mev);
assert(error == 0);
}
}
return 0;
} else
return -1;
}
static int
rxfifo_getchar(struct uart_vdev *uart)
{
struct fifo *fifo;
int c, error, wasfull;
wasfull = 0;
fifo = &uart->rxfifo;
if (fifo->num > 0) {
if (!rxfifo_available(uart))
wasfull = 1;
c = fifo->buf[fifo->rindex];
fifo->rindex = (fifo->rindex + 1) % fifo->size;
fifo->num--;
if (wasfull) {
if (uart->tty.opened) {
error = mevent_enable(uart->mev);
assert(error == 0);
}
}
return c;
} else
return -1;
}
static int
rxfifo_numchars(struct uart_vdev *uart)
{
struct fifo *fifo = &uart->rxfifo;
return fifo->num;
}
static void
uart_opentty(struct uart_vdev *uart)
{
ttyopen(&uart->tty);
if (isatty(uart->tty.fd)) {
uart->mev = mevent_add(uart->tty.fd, EVF_READ,
uart_drain, uart);
assert(uart->mev != NULL);
}
}
static void
uart_closetty(struct uart_vdev *uart)
{
if (uart->tty.fd != STDIN_FILENO)
mevent_delete_close(uart->mev);
else
mevent_delete(uart->mev);
uart->mev = 0;
ttyclose();
}
static uint8_t
modem_status(uint8_t mcr)
{
uint8_t msr;
if (mcr & MCR_LOOPBACK) {
/*
* In the loopback mode certain bits from the MCR are
* reflected back into MSR.
*/
msr = 0;
if (mcr & MCR_RTS)
msr |= MSR_CTS;
if (mcr & MCR_DTR)
msr |= MSR_DSR;
if (mcr & MCR_OUT1)
msr |= MSR_RI;
if (mcr & MCR_OUT2)
msr |= MSR_DCD;
} else {
/*
* Always assert DCD and DSR so tty open doesn't block
* even if CLOCAL is turned off.
*/
msr = MSR_DCD | MSR_DSR;
}
assert((msr & MSR_DELTA_MASK) == 0);
return msr;
}
/*
* The IIR returns a prioritized interrupt reason:
* - receive data available
* - transmit holding register empty
* - modem status change
*
* Return an interrupt reason if one is available.
*/
static int
uart_intr_reason(struct uart_vdev *uart)
{
if ((uart->lsr & LSR_OE) != 0 && (uart->ier & IER_ERLS) != 0)
return IIR_RLS;
else if (rxfifo_numchars(uart) > 0 && (uart->ier & IER_ERXRDY) != 0)
return IIR_RXTOUT;
else if (uart->thre_int_pending && (uart->ier & IER_ETXRDY) != 0)
return IIR_TXRDY;
else if ((uart->msr & MSR_DELTA_MASK) != 0 &&
(uart->ier & IER_EMSC) != 0)
return IIR_MLSC;
else
return IIR_NOPEND;
}
static void
uart_reset(struct uart_vdev *uart)
{
uint16_t divisor;
divisor = DEFAULT_RCLK / DEFAULT_BAUD / 16;
uart->dll = divisor;
uart->dlh = divisor >> 16;
uart->msr = modem_status(uart->mcr);
rxfifo_reset(uart, 1); /* no fifo until enabled by software */
}
/*
* Toggle the COM port's intr pin depending on whether or not we have an
* interrupt condition to report to the processor.
*/
static void
uart_toggle_intr(struct uart_vdev *uart)
{
uint8_t intr_reason;
intr_reason = uart_intr_reason(uart);
if (intr_reason == IIR_NOPEND)
(*uart->intr_deassert)(uart->arg);
else
(*uart->intr_assert)(uart->arg);
}
static void
uart_drain(int fd, enum ev_type ev, void *arg)
{
struct uart_vdev *uart;
int ch;
uart = arg;
assert(fd == uart->tty.fd);
assert(ev == EVF_READ);
/*
* This routine is called in the context of the mevent thread
* to take out the uart lock to protect against concurrent
* access from a vCPU i/o exit
*/
pthread_mutex_lock(&uart->mtx);
if ((uart->mcr & MCR_LOOPBACK) != 0) {
(void) ttyread(&uart->tty);
} else {
while ((ch = ttyread(&uart->tty)) != -1)
rxfifo_putchar(uart, ch);
uart_toggle_intr(uart);
}
pthread_mutex_unlock(&uart->mtx);
}
void
uart_write(struct uart_vdev *uart, int offset, uint8_t value)
{
int fifosz;
uint8_t msr;
pthread_mutex_lock(&uart->mtx);
/*
* Take care of the special case DLAB accesses first
*/
if ((uart->lcr & LCR_DLAB) != 0) {
if (offset == REG_DLL) {
uart->dll = value;
goto done;
}
if (offset == REG_DLH) {
uart->dlh = value;
goto done;
}
}
switch (offset) {
case REG_DATA:
if (uart->mcr & MCR_LOOPBACK) {
if (rxfifo_putchar(uart, value) != 0)
uart->lsr |= LSR_OE;
} else if (uart->tty.opened) {
ttywrite(&uart->tty, value);
} /* else drop on floor */
uart->thre_int_pending = true;
break;
case REG_IER:
/*
* Apply mask so that bits 4-7 are 0
* Also enables bits 0-3 only if they're 1
*/
uart->ier = value & 0x0F;
break;
case REG_FCR:
/*
* When moving from FIFO and 16450 mode and vice versa,
* the FIFO contents are reset.
*/
if ((uart->fcr & FCR_ENABLE) ^ (value & FCR_ENABLE)) {
fifosz = (value & FCR_ENABLE) ? FIFOSZ : 1;
rxfifo_reset(uart, fifosz);
}
/*
* The FCR_ENABLE bit must be '1' for the programming
* of other FCR bits to be effective.
*/
if ((value & FCR_ENABLE) == 0) {
uart->fcr = 0;
} else {
if ((value & FCR_RCV_RST) != 0)
rxfifo_reset(uart, FIFOSZ);
uart->fcr = value &
(FCR_ENABLE | FCR_DMA | FCR_RX_MASK);
}
break;
case REG_LCR:
uart->lcr = value;
break;
case REG_MCR:
/* Apply mask so that bits 5-7 are 0 */
uart->mcr = value & 0x1F;
msr = modem_status(uart->mcr);
/*
* Detect if there has been any change between the
* previous and the new value of MSR. If there is
* then assert the appropriate MSR delta bit.
*/
if ((msr & MSR_CTS) ^ (uart->msr & MSR_CTS))
uart->msr |= MSR_DCTS;
if ((msr & MSR_DSR) ^ (uart->msr & MSR_DSR))
uart->msr |= MSR_DDSR;
if ((msr & MSR_DCD) ^ (uart->msr & MSR_DCD))
uart->msr |= MSR_DDCD;
if ((uart->msr & MSR_RI) != 0 && (msr & MSR_RI) == 0)
uart->msr |= MSR_TERI;
/*
* Update the value of MSR while retaining the delta
* bits.
*/
uart->msr &= MSR_DELTA_MASK;
uart->msr |= msr;
break;
case REG_LSR:
/*
* Line status register is not meant to be written to
* during normal operation.
*/
break;
case REG_MSR:
/*
* As far as I can tell MSR is a read-only register.
*/
break;
case REG_SCR:
uart->scr = value;
break;
default:
break;
}
done:
uart_toggle_intr(uart);
pthread_mutex_unlock(&uart->mtx);
}
uint8_t
uart_read(struct uart_vdev *uart, int offset)
{
uint8_t iir, intr_reason, reg;
pthread_mutex_lock(&uart->mtx);
/*
* Take care of the special case DLAB accesses first
*/
if ((uart->lcr & LCR_DLAB) != 0) {
if (offset == REG_DLL) {
reg = uart->dll;
goto done;
}
if (offset == REG_DLH) {
reg = uart->dlh;
goto done;
}
}
switch (offset) {
case REG_DATA:
reg = rxfifo_getchar(uart);
break;
case REG_IER:
reg = uart->ier;
break;
case REG_IIR:
iir = (uart->fcr & FCR_ENABLE) ? IIR_FIFO_MASK : 0;
intr_reason = uart_intr_reason(uart);
/*
* Deal with side effects of reading the IIR register
*/
if (intr_reason == IIR_TXRDY)
uart->thre_int_pending = false;
iir |= intr_reason;
reg = iir;
break;
case REG_LCR:
reg = uart->lcr;
break;
case REG_MCR:
reg = uart->mcr;
break;
case REG_LSR:
/* Transmitter is always ready for more data */
uart->lsr |= LSR_TEMT | LSR_THRE;
/* Check for new receive data */
if (rxfifo_numchars(uart) > 0)
uart->lsr |= LSR_RXRDY;
else
uart->lsr &= ~LSR_RXRDY;
reg = uart->lsr;
/* The LSR_OE bit is cleared on LSR read */
uart->lsr &= ~LSR_OE;
break;
case REG_MSR:
/*
* MSR delta bits are cleared on read
*/
reg = uart->msr;
uart->msr &= ~MSR_DELTA_MASK;
break;
case REG_SCR:
reg = uart->scr;
break;
default:
reg = 0xFF;
break;
}
done:
uart_toggle_intr(uart);
pthread_mutex_unlock(&uart->mtx);
return reg;
}
int
uart_legacy_alloc(int which, int *baseaddr, int *irq)
{
if (which < 0 || which >= UART_NLDEVS || uart_lres[which].inuse)
return -1;
uart_lres[which].inuse = true;
*baseaddr = uart_lres[which].baseaddr;
*irq = uart_lres[which].irq;
return 0;
}
void
uart_legacy_dealloc(int which)
{
uart_lres[which].inuse = false;
}
struct uart_vdev *
uart_init(uart_intr_func_t intr_assert, uart_intr_func_t intr_deassert,
void *arg)
{
struct uart_vdev *uart;
uart = calloc(1, sizeof(struct uart_vdev));
assert(uart != NULL);
uart->arg = arg;
uart->intr_assert = intr_assert;
uart->intr_deassert = intr_deassert;
pthread_mutex_init(&uart->mtx, NULL);
uart_reset(uart);
return uart;
}
void
uart_deinit(struct uart_vdev *uart)
{
if (uart) {
if (uart->tty.opened && uart->tty.fd == STDIN_FILENO) {
ttyclose();
stdio_in_use = false;
}
free(uart);
}
}
static int
uart_tty_backend(struct uart_vdev *uart, const char *opts)
{
int fd;
int retval;
retval = -1;
fd = open(opts, O_RDWR | O_NONBLOCK);
if (fd > 0 && isatty(fd)) {
uart->tty.fd = fd;
uart->tty.opened = true;
retval = 0;
}
return retval;
}
int
uart_set_backend(struct uart_vdev *uart, const char *opts)
{
int retval;
retval = -1;
if (opts == NULL)
return 0;
if (strcmp("stdio", opts) == 0) {
if (!stdio_in_use) {
uart->tty.fd = STDIN_FILENO;
uart->tty.opened = true;
stdio_in_use = true;
retval = 0;
}
} else if (uart_tty_backend(uart, opts) == 0) {
retval = 0;
}
/* Make the backend file descriptor non-blocking */
if (retval == 0)
retval = fcntl(uart->tty.fd, F_SETFL, O_NONBLOCK);
if (retval == 0)
uart_opentty(uart);
return retval;
}
void
uart_release_backend(struct uart_vdev *uart, const char *opts)
{
if (opts == NULL)
return;
uart_closetty(uart);
if (strcmp("stdio", opts) == 0) {
stdio_in_use = false;
} else
close(uart->tty.fd);
uart->tty.fd = 0;
uart->tty.opened = false;
}

73
devicemodel/hw/usb_core.c Normal file
View File

@@ -0,0 +1,73 @@
/*-
* Copyright (c) 2014 Nahanni Systems Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/queue.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include "usb_core.h"
SET_DECLARE(usb_emu_set, struct usb_devemu);
struct usb_devemu *
usb_emu_finddev(char *name)
{
struct usb_devemu **udpp, *udp;
SET_FOREACH(udpp, usb_emu_set) {
udp = *udpp;
if (!strcmp(udp->ue_emu, name))
return udp;
}
return NULL;
}
struct usb_data_xfer_block *
usb_data_xfer_append(struct usb_data_xfer *xfer, void *buf, int blen,
void *hci_data, int ccs)
{
struct usb_data_xfer_block *xb;
if (xfer->ndata >= USB_MAX_XFER_BLOCKS)
return NULL;
xb = &xfer->data[xfer->tail];
xb->buf = buf;
xb->blen = blen;
xb->hci_data = hci_data;
xb->ccs = ccs;
xb->processed = 0;
xb->bdone = 0;
xfer->ndata++;
xfer->tail = (xfer->tail + 1) % USB_MAX_XFER_BLOCKS;
return xb;
}

View File

@@ -0,0 +1,62 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _ACPI_H_
#define _ACPI_H_
#define SCI_INT 9
#define SMI_CMD 0xb2
#define ACPI_ENABLE 0xa0
#define ACPI_DISABLE 0xa1
#define PM1A_EVT_ADDR 0x400
#define PM1A_CNT_ADDR 0x404
#define IO_PMTMR 0x408 /* 4-byte i/o port for the timer */
/* All dynamic table entry no. */
#define NHLT_ENTRY_NO 8
void acpi_table_enable(int num);
uint32_t get_acpi_base(void);
uint32_t get_acpi_table_length(void);
struct vmctx;
int acpi_build(struct vmctx *ctx, int ncpu);
void dsdt_line(const char *fmt, ...);
void dsdt_fixed_ioport(uint16_t iobase, uint16_t length);
void dsdt_fixed_irq(uint8_t irq);
void dsdt_fixed_mem32(uint32_t base, uint32_t length);
void dsdt_indent(int levels);
void dsdt_unindent(int levels);
void sci_init(struct vmctx *ctx);
void pm_write_dsdt(struct vmctx *ctx, int ncpu);
#endif /* _ACPI_H_ */

322
devicemodel/include/ahci.h Normal file
View File

@@ -0,0 +1,322 @@
/*-
* Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org>
* Copyright (c) 2009-2012 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification, immediately at the beginning of the file.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _AHCI_H_
#define _AHCI_H_
/* ATA register defines */
#define ATA_DATA 0 /* (RW) data */
#define ATA_FEATURE 1 /* (W) feature */
#define ATA_F_DMA 0x01 /* enable DMA */
#define ATA_F_OVL 0x02 /* enable overlap */
#define ATA_COUNT 2 /* (W) sector count */
#define ATA_SECTOR 3 /* (RW) sector # */
#define ATA_CYL_LSB 4 /* (RW) cylinder# LSB */
#define ATA_CYL_MSB 5 /* (RW) cylinder# MSB */
#define ATA_DRIVE 6 /* (W) Sector/Drive/Head */
#define ATA_D_LBA 0x40 /* use LBA addressing */
#define ATA_D_IBM 0xa0 /* 512 byte sectors, ECC */
#define ATA_COMMAND 7 /* (W) command */
#define ATA_ERROR 8 /* (R) error */
#define ATA_E_ILI 0x01 /* illegal length */
#define ATA_E_NM 0x02 /* no media */
#define ATA_E_ABORT 0x04 /* command aborted */
#define ATA_E_MCR 0x08 /* media change request */
#define ATA_E_IDNF 0x10 /* ID not found */
#define ATA_E_MC 0x20 /* media changed */
#define ATA_E_UNC 0x40 /* uncorrectable data */
#define ATA_E_ICRC 0x80 /* UDMA crc error */
#define ATA_E_ATAPI_SENSE_MASK 0xf0 /* ATAPI sense key mask */
#define ATA_IREASON 9 /* (R) interrupt reason */
#define ATA_I_CMD 0x01 /* cmd (1) | data (0) */
#define ATA_I_IN 0x02 /* read (1) | write (0) */
#define ATA_I_RELEASE 0x04 /* released bus (1) */
#define ATA_I_TAGMASK 0xf8 /* tag mask */
#define ATA_STATUS 10 /* (R) status */
#define ATA_ALTSTAT 11 /* (R) alternate status */
#define ATA_S_ERROR 0x01 /* error */
#define ATA_S_INDEX 0x02 /* index */
#define ATA_S_CORR 0x04 /* data corrected */
#define ATA_S_DRQ 0x08 /* data request */
#define ATA_S_DSC 0x10 /* drive seek completed */
#define ATA_S_SERVICE 0x10 /* drive needs service */
#define ATA_S_DWF 0x20 /* drive write fault */
#define ATA_S_DMA 0x20 /* DMA ready */
#define ATA_S_READY 0x40 /* drive ready */
#define ATA_S_BUSY 0x80 /* busy */
#define ATA_CONTROL 12 /* (W) control */
#define ATA_A_IDS 0x02 /* disable interrupts */
#define ATA_A_RESET 0x04 /* RESET controller */
#define ATA_A_4BIT 0x08 /* 4 head bits */
#define ATA_A_HOB 0x80 /* High Order Byte enable */
/* SATA register defines */
#define ATA_SSTATUS 13
#define ATA_SS_DET_MASK 0x0000000f
#define ATA_SS_DET_NO_DEVICE 0x00000000
#define ATA_SS_DET_DEV_PRESENT 0x00000001
#define ATA_SS_DET_PHY_ONLINE 0x00000003
#define ATA_SS_DET_PHY_OFFLINE 0x00000004
#define ATA_SS_SPD_MASK 0x000000f0
#define ATA_SS_SPD_NO_SPEED 0x00000000
#define ATA_SS_SPD_GEN1 0x00000010
#define ATA_SS_SPD_GEN2 0x00000020
#define ATA_SS_SPD_GEN3 0x00000030
#define ATA_SS_IPM_MASK 0x00000f00
#define ATA_SS_IPM_NO_DEVICE 0x00000000
#define ATA_SS_IPM_ACTIVE 0x00000100
#define ATA_SS_IPM_PARTIAL 0x00000200
#define ATA_SS_IPM_SLUMBER 0x00000600
#define ATA_SS_IPM_DEVSLEEP 0x00000800
#define ATA_SERROR 14
#define ATA_SE_DATA_CORRECTED 0x00000001
#define ATA_SE_COMM_CORRECTED 0x00000002
#define ATA_SE_DATA_ERR 0x00000100
#define ATA_SE_COMM_ERR 0x00000200
#define ATA_SE_PROT_ERR 0x00000400
#define ATA_SE_HOST_ERR 0x00000800
#define ATA_SE_PHY_CHANGED 0x00010000
#define ATA_SE_PHY_IERROR 0x00020000
#define ATA_SE_COMM_WAKE 0x00040000
#define ATA_SE_DECODE_ERR 0x00080000
#define ATA_SE_PARITY_ERR 0x00100000
#define ATA_SE_CRC_ERR 0x00200000
#define ATA_SE_HANDSHAKE_ERR 0x00400000
#define ATA_SE_LINKSEQ_ERR 0x00800000
#define ATA_SE_TRANSPORT_ERR 0x01000000
#define ATA_SE_UNKNOWN_FIS 0x02000000
#define ATA_SE_EXCHANGED 0x04000000
#define ATA_SCONTROL 15
#define ATA_SC_DET_MASK 0x0000000f
#define ATA_SC_DET_IDLE 0x00000000
#define ATA_SC_DET_RESET 0x00000001
#define ATA_SC_DET_DISABLE 0x00000004
#define ATA_SC_SPD_MASK 0x000000f0
#define ATA_SC_SPD_NO_SPEED 0x00000000
#define ATA_SC_SPD_SPEED_GEN1 0x00000010
#define ATA_SC_SPD_SPEED_GEN2 0x00000020
#define ATA_SC_SPD_SPEED_GEN3 0x00000030
#define ATA_SC_IPM_MASK 0x00000f00
#define ATA_SC_IPM_NONE 0x00000000
#define ATA_SC_IPM_DIS_PARTIAL 0x00000100
#define ATA_SC_IPM_DIS_SLUMBER 0x00000200
#define ATA_SC_IPM_DIS_DEVSLEEP 0x00000400
#define ATA_SACTIVE 16
#define AHCI_MAX_PORTS 32
#define AHCI_MAX_SLOTS 32
#define AHCI_MAX_IRQS 16
/* SATA AHCI v1.0 register defines */
#define AHCI_CAP 0x00
#define AHCI_CAP_NPMASK 0x0000001f
#define AHCI_CAP_SXS 0x00000020
#define AHCI_CAP_EMS 0x00000040
#define AHCI_CAP_CCCS 0x00000080
#define AHCI_CAP_NCS 0x00001F00
#define AHCI_CAP_NCS_SHIFT 8
#define AHCI_CAP_PSC 0x00002000
#define AHCI_CAP_SSC 0x00004000
#define AHCI_CAP_PMD 0x00008000
#define AHCI_CAP_FBSS 0x00010000
#define AHCI_CAP_SPM 0x00020000
#define AHCI_CAP_SAM 0x00080000
#define AHCI_CAP_ISS 0x00F00000
#define AHCI_CAP_ISS_SHIFT 20
#define AHCI_CAP_SCLO 0x01000000
#define AHCI_CAP_SAL 0x02000000
#define AHCI_CAP_SALP 0x04000000
#define AHCI_CAP_SSS 0x08000000
#define AHCI_CAP_SMPS 0x10000000
#define AHCI_CAP_SSNTF 0x20000000
#define AHCI_CAP_SNCQ 0x40000000
#define AHCI_CAP_64BIT 0x80000000
#define AHCI_GHC 0x04
#define AHCI_GHC_AE 0x80000000
#define AHCI_GHC_MRSM 0x00000004
#define AHCI_GHC_IE 0x00000002
#define AHCI_GHC_HR 0x00000001
#define AHCI_IS 0x08
#define AHCI_PI 0x0c
#define AHCI_VS 0x10
#define AHCI_CCCC 0x14
#define AHCI_CCCC_TV_MASK 0xffff0000
#define AHCI_CCCC_TV_SHIFT 16
#define AHCI_CCCC_CC_MASK 0x0000ff00
#define AHCI_CCCC_CC_SHIFT 8
#define AHCI_CCCC_INT_MASK 0x000000f8
#define AHCI_CCCC_INT_SHIFT 3
#define AHCI_CCCC_EN 0x00000001
#define AHCI_CCCP 0x18
#define AHCI_EM_LOC 0x1C
#define AHCI_EM_CTL 0x20
#define AHCI_EM_MR 0x00000001
#define AHCI_EM_TM 0x00000100
#define AHCI_EM_RST 0x00000200
#define AHCI_EM_LED 0x00010000
#define AHCI_EM_SAFTE 0x00020000
#define AHCI_EM_SES2 0x00040000
#define AHCI_EM_SGPIO 0x00080000
#define AHCI_EM_SMB 0x01000000
#define AHCI_EM_XMT 0x02000000
#define AHCI_EM_ALHD 0x04000000
#define AHCI_EM_PM 0x08000000
#define AHCI_CAP2 0x24
#define AHCI_CAP2_BOH 0x00000001
#define AHCI_CAP2_NVMP 0x00000002
#define AHCI_CAP2_APST 0x00000004
#define AHCI_CAP2_SDS 0x00000008
#define AHCI_CAP2_SADM 0x00000010
#define AHCI_CAP2_DESO 0x00000020
#define AHCI_OFFSET 0x100
#define AHCI_STEP 0x80
#define AHCI_P_CLB 0x00
#define AHCI_P_CLBU 0x04
#define AHCI_P_FB 0x08
#define AHCI_P_FBU 0x0c
#define AHCI_P_IS 0x10
#define AHCI_P_IE 0x14
#define AHCI_P_IX_DHR 0x00000001
#define AHCI_P_IX_PS 0x00000002
#define AHCI_P_IX_DS 0x00000004
#define AHCI_P_IX_SDB 0x00000008
#define AHCI_P_IX_UF 0x00000010
#define AHCI_P_IX_DP 0x00000020
#define AHCI_P_IX_PC 0x00000040
#define AHCI_P_IX_MP 0x00000080
#define AHCI_P_IX_PRC 0x00400000
#define AHCI_P_IX_IPM 0x00800000
#define AHCI_P_IX_OF 0x01000000
#define AHCI_P_IX_INF 0x04000000
#define AHCI_P_IX_IF 0x08000000
#define AHCI_P_IX_HBD 0x10000000
#define AHCI_P_IX_HBF 0x20000000
#define AHCI_P_IX_TFE 0x40000000
#define AHCI_P_IX_CPD 0x80000000
#define AHCI_P_CMD 0x18
#define AHCI_P_CMD_ST 0x00000001
#define AHCI_P_CMD_SUD 0x00000002
#define AHCI_P_CMD_POD 0x00000004
#define AHCI_P_CMD_CLO 0x00000008
#define AHCI_P_CMD_FRE 0x00000010
#define AHCI_P_CMD_CCS_MASK 0x00001f00
#define AHCI_P_CMD_CCS_SHIFT 8
#define AHCI_P_CMD_ISS 0x00002000
#define AHCI_P_CMD_FR 0x00004000
#define AHCI_P_CMD_CR 0x00008000
#define AHCI_P_CMD_CPS 0x00010000
#define AHCI_P_CMD_PMA 0x00020000
#define AHCI_P_CMD_HPCP 0x00040000
#define AHCI_P_CMD_MPSP 0x00080000
#define AHCI_P_CMD_CPD 0x00100000
#define AHCI_P_CMD_ESP 0x00200000
#define AHCI_P_CMD_FBSCP 0x00400000
#define AHCI_P_CMD_APSTE 0x00800000
#define AHCI_P_CMD_ATAPI 0x01000000
#define AHCI_P_CMD_DLAE 0x02000000
#define AHCI_P_CMD_ALPE 0x04000000
#define AHCI_P_CMD_ASP 0x08000000
#define AHCI_P_CMD_ICC_MASK 0xf0000000
#define AHCI_P_CMD_NOOP 0x00000000
#define AHCI_P_CMD_ACTIVE 0x10000000
#define AHCI_P_CMD_PARTIAL 0x20000000
#define AHCI_P_CMD_SLUMBER 0x60000000
#define AHCI_P_CMD_DEVSLEEP 0x80000000
#define AHCI_P_TFD 0x20
#define AHCI_P_SIG 0x24
#define AHCI_P_SSTS 0x28
#define AHCI_P_SCTL 0x2c
#define AHCI_P_SERR 0x30
#define AHCI_P_SACT 0x34
#define AHCI_P_CI 0x38
#define AHCI_P_SNTF 0x3C
#define AHCI_P_FBS 0x40
#define AHCI_P_FBS_EN 0x00000001
#define AHCI_P_FBS_DEC 0x00000002
#define AHCI_P_FBS_SDE 0x00000004
#define AHCI_P_FBS_DEV 0x00000f00
#define AHCI_P_FBS_DEV_SHIFT 8
#define AHCI_P_FBS_ADO 0x0000f000
#define AHCI_P_FBS_ADO_SHIFT 12
#define AHCI_P_FBS_DWE 0x000f0000
#define AHCI_P_FBS_DWE_SHIFT 16
#define AHCI_P_DEVSLP 0x44
#define AHCI_P_DEVSLP_ADSE 0x00000001
#define AHCI_P_DEVSLP_DSP 0x00000002
#define AHCI_P_DEVSLP_DETO 0x000003fc
#define AHCI_P_DEVSLP_DETO_SHIFT 2
#define AHCI_P_DEVSLP_MDAT 0x00007c00
#define AHCI_P_DEVSLP_MDAT_SHIFT 10
#define AHCI_P_DEVSLP_DITO 0x01ff8000
#define AHCI_P_DEVSLP_DITO_SHIFT 15
#define AHCI_P_DEVSLP_DM 0x0e000000
#define AHCI_P_DEVSLP_DM_SHIFT 25
/* Just to be sure, if building as module. */
#if MAXPHYS < 512 * 1024
#undef MAXPHYS
#define MAXPHYS (512 * 1024)
#endif
/* Pessimistic prognosis on number of required S/G entries */
#define AHCI_SG_ENTRIES (roundup(btoc(MAXPHYS) + 1, 8))
/* Command list. 32 commands. First, 1Kbyte aligned. */
#define AHCI_CL_OFFSET 0
#define AHCI_CL_SIZE 32
/* Command tables. Up to 32 commands, Each, 128byte aligned. */
#define AHCI_CT_OFFSET (AHCI_CL_OFFSET + AHCI_CL_SIZE * AHCI_MAX_SLOTS)
#define AHCI_CT_SIZE (128 + AHCI_SG_ENTRIES * 16)
/* Total main work area. */
#define AHCI_WORK_SIZE (AHCI_CT_OFFSET + AHCI_CT_SIZE * ch->numslots)
#endif /* _AHCI_H_ */

1012
devicemodel/include/ata.h Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,120 @@
/*-
* Copyright (c) 2015 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _ATKBDC_H_
#define _ATKBDC_H_
#define KBD_DATA_PORT 0x60
#define KBD_STS_CTL_PORT 0x64
#define KBDC_RESET 0xfe
#define KBD_DEV_IRQ 1
#define AUX_DEV_IRQ 12
/* controller commands */
#define KBDC_SET_COMMAND_BYTE 0x60
#define KBDC_GET_COMMAND_BYTE 0x20
#define KBDC_DISABLE_AUX_PORT 0xa7
#define KBDC_ENABLE_AUX_PORT 0xa8
#define KBDC_TEST_AUX_PORT 0xa9
#define KBDC_TEST_CTRL 0xaa
#define KBDC_TEST_KBD_PORT 0xab
#define KBDC_DISABLE_KBD_PORT 0xad
#define KBDC_ENABLE_KBD_PORT 0xae
#define KBDC_READ_INPORT 0xc0
#define KBDC_READ_OUTPORT 0xd0
#define KBDC_WRITE_OUTPORT 0xd1
#define KBDC_WRITE_KBD_OUTBUF 0xd2
#define KBDC_WRITE_AUX_OUTBUF 0xd3
#define KBDC_WRITE_TO_AUX 0xd4
/* controller command byte (set by KBDC_SET_COMMAND_BYTE) */
#define KBD_TRANSLATION 0x40
#define KBD_SYS_FLAG_BIT 0x04
#define KBD_DISABLE_KBD_PORT 0x10
#define KBD_DISABLE_AUX_PORT 0x20
#define KBD_ENABLE_AUX_INT 0x02
#define KBD_ENABLE_KBD_INT 0x01
#define KBD_KBD_CONTROL_BITS (KBD_DISABLE_KBD_PORT | KBD_ENABLE_KBD_INT)
#define KBD_AUX_CONTROL_BITS (KBD_DISABLE_AUX_PORT | KBD_ENABLE_AUX_INT)
/* controller status bits */
#define KBDS_KBD_BUFFER_FULL 0x01
#define KBDS_SYS_FLAG 0x04
#define KBDS_CTRL_FLAG 0x08
#define KBDS_AUX_BUFFER_FULL 0x20
/* controller output port */
#define KBDO_KBD_OUTFULL 0x10
#define KBDO_AUX_OUTFULL 0x20
#define RAMSZ 32
#define FIFOSZ 15
#define CTRL_CMD_FLAG 0x8000
struct vmctx;
struct kbd_dev {
bool irq_active;
int irq;
uint8_t buffer[FIFOSZ];
int brd, bwr;
int bcnt;
};
struct aux_dev {
bool irq_active;
int irq;
};
struct atkbdc_base {
struct vmctx *ctx;
pthread_mutex_t mtx;
struct ps2kbd_info *ps2kbd;
struct ps2mouse_info *ps2mouse;
uint8_t status; /* status register */
uint8_t outport; /* controller output port */
uint8_t ram[RAMSZ]; /* byte0 = controller config */
uint32_t curcmd; /* current command for next byte */
uint32_t ctrlbyte;
struct kbd_dev kbd;
struct aux_dev aux;
};
void atkbdc_init(struct vmctx *ctx);
void atkbdc_deinit(struct vmctx *ctx);
void atkbdc_event(struct atkbdc_base *base, int iskbd);
#endif /* _ATKBDC_H_ */

View File

@@ -0,0 +1,70 @@
/*-
* Copyright (c) 2013 Peter Grehan <grehan@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* The block API to be used by acrn-dm block-device emulations. The routines
* are thread safe, with no assumptions about the context of the completion
* callback - it may occur in the caller's context, or asynchronously in
* another thread.
*/
#ifndef _BLOCK_IF_H_
#define _BLOCK_IF_H_
#include <sys/uio.h>
#include <sys/unistd.h>
#define BLOCKIF_IOV_MAX 33 /* not practical to be IOV_MAX */
struct blockif_req {
struct iovec iov[BLOCKIF_IOV_MAX];
int iovcnt;
off_t offset;
ssize_t resid;
void (*callback)(struct blockif_req *req, int err);
void *param;
};
struct blockif_ctxt;
struct blockif_ctxt *blockif_open(const char *optstr, const char *ident);
off_t blockif_size(struct blockif_ctxt *bc);
void blockif_chs(struct blockif_ctxt *bc, uint16_t *c, uint8_t *h,
uint8_t *s);
int blockif_sectsz(struct blockif_ctxt *bc);
void blockif_psectsz(struct blockif_ctxt *bc, int *size, int *off);
int blockif_queuesz(struct blockif_ctxt *bc);
int blockif_is_ro(struct blockif_ctxt *bc);
int blockif_candelete(struct blockif_ctxt *bc);
int blockif_read(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_write(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_flush(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_delete(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_cancel(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_close(struct blockif_ctxt *bc);
#endif /* _BLOCK_IF_H_ */

View File

@@ -0,0 +1,55 @@
/*-
* Copyright (c) 2015 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _CONSOLE_H_
#define _CONSOLE_H_
struct gfx_ctx;
typedef void (*fb_render_func_t)(struct gfx_ctx *gc, void *arg);
typedef void (*kbd_event_func_t)(int down, uint32_t keysym, void *arg);
typedef void (*ptr_event_func_t)(uint8_t mask, int x, int y, void *arg);
void console_init(int w, int h, void *fbaddr);
void console_set_fbaddr(void *fbaddr);
struct gfx_ctx_image *console_get_image(void);
void console_fb_register(fb_render_func_t render_cb, void *arg);
void console_refresh(void);
void console_kbd_register(kbd_event_func_t event_cb, void *arg, int pri);
void console_kbd_unregister(void);
void console_key_event(int down, uint32_t keysym);
void console_ptr_register(ptr_event_func_t event_cb, void *arg, int pri);
void console_ptr_unregister(void);
void console_ptr_event(uint8_t button, int x, int y);
#endif /* _CONSOLE_H_ */

View File

@@ -0,0 +1,38 @@
/*-
* Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org>
* All rights reserved.
*
* Copyright (c) 2008 Nokia Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _CPUSET_H_
#define _CPUSET_H_
#define CPU_EMPTY(p) (CPU_COUNT_S(CPU_SETSIZE, p) == 0)
#define CPU_CLR_ATOMIC(n, p) CPU_CLR(n, p)
#define CPU_SET_ATOMIC(n, p) CPU_SET(n, p)
#endif /* !_CPUSET_H_ */

52
devicemodel/include/dm.h Normal file
View File

@@ -0,0 +1,52 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _DM_H_
#define _DM_H_
#define VMEXIT_CONTINUE (0)
#define VMEXIT_ABORT (-1)
#include <stdbool.h>
#include "types.h"
#include "vmm.h"
struct vmctx;
extern int guest_ncpus;
extern char *guest_uuid_str;
extern uint8_t trusty_enabled;
extern char *vsbl_file_name;
extern char *vmname;
extern bool stdio_in_use;
int vmexit_task_switch(struct vmctx *ctx, struct vhm_request *vhm_req,
int *vcpu);
void *paddr_guest2host(struct vmctx *ctx, uintptr_t addr, size_t len);
void *dm_gpa2hva(uint64_t gpa, size_t size);
int virtio_uses_msix(void);
void ptdev_prefer_msi(bool enable);
#endif

48
devicemodel/include/gc.h Normal file
View File

@@ -0,0 +1,48 @@
/*-
* Copyright (c) 2015 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _GC_H_
#define _GC_H_
#include "types.h"
struct gfx_ctx;
struct gfx_ctx_image {
int vgamode;
int width;
int height;
uint32_t *data;
};
struct gfx_ctx *gc_init(int width, int height, void *fbaddr);
void gc_set_fbaddr(struct gfx_ctx *gc, void *fbaddr);
void gc_resize(struct gfx_ctx *gc, int width, int height);
struct gfx_ctx_image *gc_get_image(struct gfx_ctx *gc);
#endif /* _GC_H_ */

220
devicemodel/include/heci.h Normal file
View File

@@ -0,0 +1,220 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef _HECI_H_
#define _HECI_H_
#include <uuid/uuid.h>
#define HECI_HBM_HOST_VERSION 0x01
#define HECI_HBM_HOST_STOP 0x02
#define HECI_HBM_ME_STOP 0x03
#define HECI_HBM_HOST_ENUM 0x04
#define HECI_HBM_HOST_CLIENT_PROP 0x05
#define HECI_HBM_CLIENT_CONNECT 0x06
#define HECI_HBM_CLIENT_DISCONNECT 0x07
#define HECI_HBM_FLOW_CONTROL 0x08
#define HECI_HBM_CLIENT_CONNECTION_RESET 0x09
/*
* enum heci_hbm_status - heci host bus messages return values
*
* @HECI_HBM_SUCCESS : status success
* @HECI_HBM_CLIENT_NOT_FOUND : client not found
* @HECI_HBM_ALREADY_EXISTS : connection already established
* @HECI_HBM_REJECTED : connection is rejected
* @HECI_HBM_INVALID_PARAMETER : invalid parameter
* @HECI_HBM_NOT_ALLOWED : operation not allowed
* @HECI_HBM_ALREADY_STARTED : system is already started
* @HECI_HBM_NOT_STARTED : system not started
*
*/
enum heci_hbm_status {
HECI_HBM_SUCCESS = 0,
HECI_HBM_CLIENT_NOT_FOUND = 1,
HECI_HBM_ALREADY_EXISTS = 2,
HECI_HBM_REJECTED = 3,
HECI_HBM_INVALID_PARAMETER = 4,
HECI_HBM_NOT_ALLOWED = 5,
HECI_HBM_ALREADY_STARTED = 6,
HECI_HBM_NOT_STARTED = 7,
HECI_HBM_MAX
};
struct mei_enumerate_me_clients {
uint8_t valid_addresses[32];
};
struct mei_request_client_params {
uint8_t client_id;
uint8_t reserved[3];
uint8_t data[64];
} __attribute__((packed));
struct heci_client_properties {
uuid_t protocol_name;
uint8_t protocol_version;
uint8_t max_connections;
uint8_t fixed_address;
uint8_t single_recv_buf;
uint32_t max_msg_length;
} __attribute__((packed));
/* message header is same in native and virtual */
struct heci_msg_hdr {
uint32_t me_addr:8;
uint32_t host_addr:8;
uint32_t length:9;
uint32_t reserved:5;
uint32_t internal:1;
uint32_t msg_complete:1;
} __attribute__((packed));
struct heci_hbm_cmd {
uint8_t cmd:7;
uint8_t is_response:1;
};
struct heci_hbm_host_ver_req {
struct heci_hbm_cmd hbm_cmd;
uint8_t reserved;
uint8_t minor;
uint8_t major;
};
struct heci_hbm_host_ver_res {
struct heci_hbm_cmd hbm_cmd;
uint8_t host_ver_support;
uint8_t minor;
uint8_t major;
};
struct heci_hbm_host_stop_req {
struct heci_hbm_cmd hbm_cmd;
uint8_t reason;
uint8_t reserved[2];
};
struct heci_hbm_host_stop_res {
struct heci_hbm_cmd hbm_cmd;
uint8_t reserved[3];
};
struct heci_hbm_me_stop_res {
struct heci_hbm_cmd hbm_cmd;
uint8_t reason;
uint8_t reserved[2];
};
struct heci_hbm_me_stop_req {
struct heci_hbm_cmd hbm_cmd;
uint8_t reserved[3];
};
struct heci_hbm_host_enum_req {
struct heci_hbm_cmd hbm_cmd;
uint8_t reserved[3];
};
struct heci_hbm_host_enum_res {
struct heci_hbm_cmd hbm_cmd;
uint8_t reserved[3];
uint8_t valid_addresses[32];
};
struct heci_hbm_host_client_prop_req {
struct heci_hbm_cmd hbm_cmd;
uint8_t address;
uint8_t reserved[2];
};
struct heci_hbm_host_client_prop_res {
struct heci_hbm_cmd hbm_cmd;
uint8_t address;
uint8_t status;
uint8_t reserved[1];
struct heci_client_properties props;
};
struct heci_hbm_client_connect_req {
struct heci_hbm_cmd hbm_cmd;
uint8_t me_addr;
uint8_t host_addr;
uint8_t reserved;
};
struct heci_hbm_client_connect_res {
struct heci_hbm_cmd hbm_cmd;
uint8_t me_addr;
uint8_t host_addr;
uint8_t status;
};
struct heci_hbm_client_disconnect_req {
struct heci_hbm_cmd hbm_cmd;
uint8_t me_addr;
uint8_t host_addr;
uint8_t reserved;
};
struct heci_hbm_client_disconnect_res {
struct heci_hbm_cmd hbm_cmd;
uint8_t me_addr;
uint8_t host_addr;
uint8_t status;
};
struct heci_hbm_flow_ctl {
struct heci_hbm_cmd hbm_cmd;
uint8_t me_addr;
uint8_t host_addr;
uint8_t reserved[5];
};
struct heci_hbm_client_connection_reset_req {
struct heci_hbm_cmd hbm_cmd;
uint8_t me_addr;
uint8_t host_addr;
uint8_t reserved[1];
};
struct heci_hbm_client_connection_reset_res {
struct heci_hbm_cmd hbm_cmd;
uint8_t me_addr;
uint8_t host_addr;
uint8_t status;
};
#define IOCTL_MEI_ENUMERATE_ME_CLIENTS \
_IOWR('H', 0x04, struct mei_enumerate_me_clients)
#define IOCTL_MEI_REQUEST_CLIENT_PROP \
_IOWR('H', 0x05, struct mei_request_client_params)
#endif /* _HECI_H_ */

View File

@@ -0,0 +1,82 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _INOUT_H_
#define _INOUT_H_
#include "types.h"
#include "acrn_common.h"
struct vmctx;
struct vhm_request;
/*
* inout emulation handlers return 0 on success and -1 on failure.
*/
typedef int (*inout_func_t)(struct vmctx *ctx, int vcpu, int in, int port,
int bytes, uint32_t *eax, void *arg);
struct inout_port {
const char *name;
int port;
int size;
int flags;
inout_func_t handler;
void *arg;
};
#define IOPORT_F_IN 0x1
#define IOPORT_F_OUT 0x2
#define IOPORT_F_INOUT (IOPORT_F_IN | IOPORT_F_OUT)
/*
* The following flags are used internally and must not be used by
* device models.
*/
#define IOPORT_F_DEFAULT 0x80000000 /* claimed by default handler */
#define INOUT_PORT(name, port, flags, handler) \
static struct inout_port __CONCAT(__inout_port, __LINE__) = \
{ \
#name, \
(port), \
1, \
(flags), \
(handler), \
0 \
}; \
DATA_SET(inout_port_set, __CONCAT(__inout_port, __LINE__))
void init_inout(void);
int emulate_inout(struct vmctx *ctx, int *pvcpu, struct pio_request *req,
int strict);
int register_inout(struct inout_port *iop);
int unregister_inout(struct inout_port *iop);
int init_bvmcons(void);
void deinit_bvmcons(void);
void enable_bvmcons(void);
#endif /* _INOUT_H_ */

View File

@@ -0,0 +1,41 @@
/*-
* Copyright (c) 2014 Hudson River Trading LLC
* Written by: John H. Baldwin <jhb@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _IOAPIC_H_
#define _IOAPIC_H_
struct pci_vdev;
/*
* Allocate a PCI IRQ from the I/O APIC.
*/
void ioapic_init(struct vmctx *ctx);
int ioapic_pci_alloc_irq(struct pci_vdev *pi);
#endif

743
devicemodel/include/ioc.h Normal file
View File

@@ -0,0 +1,743 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef _IOC_H_
#define _IOC_H_
#include <stdint.h>
#include <pthread.h>
#include <sys/queue.h>
#include <sys/epoll.h>
/*
* Carrier Board Communication(CBC) frame definition
* +---+---------+-----------+---------------+---------+----------+---------+
*
* +---------------+-------------+
* | ServiceHeader | DataPayload |
* | 8b | 24b...504b |
* ++--------------+-------------+
* Service Layer
* \ /
* +-------------+----------\-------------/
* | Multiplexer | Priority | UpperLayer |
* | 5b | 3b | |
* +-------------+------------------------+
* Address Layer
* \ /
* +---+---------+-----------+---------------+---------\----------/---------+
* |SOF|Extension|FrameLength|SequenceCounter|TimeStamp|UpperLayer|CheckSum |
* |8b | 1b | 5b | 2b |32B(n/a) | | 8b |
* +---+---------+-----------+---------------+---------+----------+---------+
* Link Layer
*
*/
#define CBC_SOF_VALUE 0x05 /* CBC start of frame value */
#define CBC_EXT_VALUE 0x00 /* CBC extension bit value */
#define CBC_PRIO_MASK 0x07 /* CBC priority bitmask */
#define CBC_MUX_MASK 0x1F /* CBC muxtiplexer bitmask */
#define CBC_LEN_MASK 0x1F /* CBC frame length bitmask */
#define CBC_SEQ_MASK 0x03 /* CBC sequence bitmask */
#define CBC_EXT_MASK 0x01 /* CBC extension bits bitmask */
#define CBC_MUX_OFFSET 3 /* CBC muxtiplexer offset */
#define CBC_SEQ_OFFSET 0 /* CBC sequence offset */
#define CBC_LEN_OFFSET 2 /* CBC service frame length offset */
#define CBC_EXT_OFFSET 7 /* CBC extension bits offset */
#define CBC_LEN_UNIT 4 /* CBC frame content in block length */
#define CBC_PRIO_OFFSET 0 /* CBC priority offset */
#define CBC_CHKSUM_SIZE 1 /* CBC checksum size */
#define CBC_GRANULARITY 4 /* CBC frame alignment */
#define CBC_LINK_HDR_SIZE 3 /* CBC link layer header size */
#define CBC_ADDR_HDR_SIZE 1 /* CBC address layser header size */
#define CBC_SRV_HDR_SIZE 1 /* CBC service layer header size */
#define CBC_MAX_FRAME_SIZE 96 /* CBC maximum frame size */
#define CBC_MIN_FRAME_SIZE 8 /* CBC mininum frame size */
#define CBC_MAX_SERVICE_SIZE 64 /* CBC maximum service size */
/*
* Define the start positions of each layer headers.
* CBC_SOF_POS: start of frame start byte position
* CBC_ELS_POS: externsion, frame length and sequence start byte position
* CBC_ADDR_POS: address protocol start byte postion
* CBC_SRV_POS: service protocol start byte position
* CBC_PAYLOAD_POS: CBC payload start byte position
*/
#define CBC_SOF_POS 0
#define CBC_ELS_POS (CBC_SOF_POS + 1)
#define CBC_ADDR_POS (CBC_SOF_POS + CBC_LINK_HDR_SIZE - CBC_CHKSUM_SIZE)
#define CBC_SRV_POS (CBC_ADDR_POS + CBC_ADDR_HDR_SIZE)
#define CBC_PAYLOAD_POS (CBC_SRV_POS + CBC_SRV_HDR_SIZE)
#define CBC_WK_RSN_BTN (1 << 5) /* CBC wakeup reason field button */
#define CBC_WK_RSN_RTC (1 << 9) /* CBC wakeup reason field rtc */
#define CBC_WK_RSN_DOR (1 << 11) /* CBC wakeup reason field cardoor */
#define CBC_WK_RSN_SOC (1 << 23) /* CBC wakeup reason field soc */
/*
* IOC mediator permits button, rtc and cardoor wakeup reasons which comes from
* IOC firmware, others will be masked.
*/
#define CBC_WK_RSN_ALL (CBC_WK_RSN_BTN | CBC_WK_RSN_RTC | CBC_WK_RSN_DOR)
/*
* CBC ring buffer is used to buffer bytes before build one complete CBC frame.
*/
#define CBC_RING_BUFFER_SIZE 256
/*
* Default whitelist node is NULL before whitelist initialization.
*/
#define DEFAULT_WLIST_NODE (0)
/*
* Default IOC channels file descriptor is -1 before open.
*/
#define IOC_INIT_FD -1
/*
* Maximum CBC requests number.
*/
#define IOC_MAX_REQUESTS 200
/*
* Maximum epoll events.
*/
#define IOC_MAX_EVENTS 32
/* IOC default path */
#define IOC_DP_NONE ""
/*
* IOC native channel path definition.
*/
#define IOC_NP_PMT "/dev/cbc-pmt"
#define IOC_NP_LF "/dev/cbc-lifecycle"
#define IOC_NP_SIG "/dev/cbc-signals"
#define IOC_NP_ESIG "/dev/cbc-early-signals"
#define IOC_NP_DIAG "/dev/cbc-diagnosis"
#define IOC_NP_DLT "/dev/cbc-dlt"
#define IOC_NP_LIND "/dev/cbc-linda"
#define IOC_NP_RAW0 "/dev/cbc-raw0"
#define IOC_NP_RAW1 "/dev/cbc-raw1"
#define IOC_NP_RAW2 "/dev/cbc-raw2"
#define IOC_NP_RAW3 "/dev/cbc-raw3"
#define IOC_NP_RAW4 "/dev/cbc-raw4"
#define IOC_NP_RAW5 "/dev/cbc-raw5"
#define IOC_NP_RAW6 "/dev/cbc-raw6"
#define IOC_NP_RAW7 "/dev/cbc-raw7"
#define IOC_NP_RAW8 "/dev/cbc-raw8"
#define IOC_NP_RAW9 "/dev/cbc-raw9"
#define IOC_NP_RAW10 "/dev/cbc-raw10"
#define IOC_NP_RAW11 "/dev/cbc-raw11"
#define IOC_NP_FLF "/tmp/ioc_fake_lifecycle"
#define IOC_NP_FSIG "/tmp/ioc_fake_signal"
#define IOC_NP_FRAW "/tmp/ioc_fake_raw11"
/*
* CBC signal data command types.
* Signal Data Message
* +----------------+--------------+
* | SignalDataCMD | Payload |
* | 8b | 0~56b |
* +----------------+--------------+
*/
enum cbc_signal_data_command {
CBC_SD_SINGLE_SIGNAL = 1, /* Single signal update */
CBC_SD_MULTI_SIGNAL = 2, /* Multi signal update */
CBC_SD_GROUP_SIGNAL = 3, /* Group signal update */
CBC_SD_DEFAULT_VALUES = 4, /* Update default values */
CBC_SD_UPDATE_SNA = 5, /* Update SNA values */
CBC_SD_INVAL_SSIG = 6, /* Invalidate signal */
CBC_SD_INVAL_MSIG = 7, /* Invalidate multi signals */
CBC_SD_INVAL_SGRP = 8, /* Invalidate signal group */
CBC_SD_INVAL_MGRP = 9, /* Invalidate muliti groups */
CBC_SD_OPEN_CHANNEL = 253, /* Open signal channel */
CBC_SD_CLOSE_CHANNEL = 254, /* Clsoe signal channel */
CBC_SD_RESET_CHANNEL = 255, /* Reset signal channel */
CBC_SD_MAX
};
/*
* CBC system control command types.
* +------------------+------------+
* | SystemControlCMD | Payload |
* | 8b | 24b |
* +------------------+------------+
*/
enum cbc_system_control_command {
CBC_SC_WK_RSN = 1, /* Wakeup reasons */
CBC_SC_HB = 2, /* Heartbeat */
CBC_SC_BOOTSEL = 3, /* Boot selector */
CBC_SC_SPRS_HB = 4, /* Suppress heartbeat check */
CBC_SC_MAX
};
/*
* CBC system control - heartbeat: command types.
* Heartbeat Message
* +------------------+---------+-----------------+------+
* | SystemControlCMD | Command | SUS_STAT Action | Resv |
* | 8b | 8b | 8b | 8b |
* +------------------+---------+-----------------+------+
*/
enum cbc_heartbeat_command {
CBC_HB_SD_PREP, /* Shutdown prepared */
CBC_HB_ACTIVE, /* Active */
CBC_HB_SD_DLY, /* Shutdown delay */
CBC_HB_INITIAL, /* Initial */
CBC_HB_STANDBY, /* Standby */
CBC_HB_DIAG, /* Diagnosis */
CBC_HB_SD_REQ, /* Cm shutdown request */
CBC_HB_SD_EXE, /* Shutdown execute */
CBC_HB_EMG_SD, /* Mmergency shutdown execute */
CBC_HB_MAX
};
/*
* CBC system control - heartbeat: suspend state action types.
*/
enum cbc_sus_stat_action {
CBC_SS_INVALID, /* Invalid */
CBC_SS_HALT_I0, /* Halt */
CBC_SS_REBOOT0, /* Reboot */
CBC_SS_HALT_I1, /* Ignore once then halt */
CBC_SS_REBOOT1, /* Ignore once then reboot */
CBC_SS_HALT_I2, /* Ignore twice then halt */
CBC_SS_REBOOT2, /* Ignore twice then reboot */
CBC_SS_REFRESH, /* Ram refresh, S3 */
CBC_SS_MAX
};
/*
* CBC rx signal identity definition.
*/
enum cbc_rx_signal_id {
CBC_SIG_ID_STFR = 20000, /* SetTunerFrequency */
CBC_SIG_ID_EGYO = 20001, /* EnableGyro */
CBC_SIG_ID_WACS = 20002, /* WriteAmplifierConfigurationSequence*/
CBC_SIG_ID_RIFC = 20003, /* RequestIocFblChecksum */
CBC_SIG_ID_RIWC = 20004, /* RequestIocWfChecksum */
CBC_SIG_ID_RIAC = 20005, /* RequestIocAppChecksum */
CBC_SIG_ID_RIVS = 20006, /* RequestIocVersion */
CBC_SIG_ID_RRMS = 20007, /* RequestRuntimeMeasurement */
CBC_SIG_ID_MTAM = 20008, /* MuteAmplifier */
CBC_SIG_ID_VICL = 651, /* VideoInCtrl */
};
/*
* CBC tx signal identity definition.
*/
enum cbc_tx_signal_id {
CBC_SIG_ID_MBV = 501, /* MainBatteryVoltage */
CBC_SIG_ID_TSA = 502, /* TemperatureSensorAmplifier */
CBC_SIG_ID_TSE = 503, /* TemperatureSensorEnvironment */
CBC_SIG_ID_VSWA = 701, /* VehicleSteeringWheelAngle */
CBC_SIG_ID_VSPD = 702, /* VehicleSpeed */
CBC_SIG_ID_VESP = 703, /* VehicleEngineSpeed */
CBC_SIG_ID_VECT = 704, /* VehicleEngineCoolantTemp */
CBC_SIG_ID_VRGR = 705, /* VehicleReverseGear */
CBC_SIG_ID_VPS = 706, /* VehiclePowerStatus */
CBC_SIG_ID_VPM = 707, /* VehiclePowerMode */
CBC_SIG_ID_VMD = 708, /* VehicleMode */
CBC_SIG_ID_VIS = 709, /* VehicleImmobilizerState */
CBC_SIG_ID_VGP = 710, /* VehicleGearshiftPosition */
CBC_SIG_ID_VAG = 711, /* VehicleActualGear */
CBC_SIG_ID_VFS = 712, /* VehicleFuelStatus */
CBC_SIG_ID_VFL = 713, /* VehicleFuelLevel */
CBC_SIG_ID_VDTE = 714, /* VehicleDistanceToEmpty */
CBC_SIG_ID_SWUB = 715, /* SteeringWheelUpBtn */
CBC_SIG_ID_SWRB = 716, /* SteeringWheelRightBtn */
CBC_SIG_ID_SWPB = 717, /* SteeringWheelPrevBtn */
CBC_SIG_ID_SWNB = 718, /* SteeringWheelNextBtn */
CBC_SIG_ID_SWLB = 719, /* SteeringWheelLeftBtn */
CBC_SIG_ID_SWDB = 720, /* SteeringWheelDownBtn */
CBC_SIG_ID_SWVA = 721, /* SteeringWheelVolumeAdjust */
CBC_SIG_ID_SWSCB = 722, /* SteeringWheelSpeechCtrlBtn */
CBC_SIG_ID_SWPLB = 723, /* SteeringWheelPlayBtn */
CBC_SIG_ID_SWPCB = 724, /* SteeringWheelPickupCallBtn */
CBC_SIG_ID_SWPSB = 725, /* SteeringWheelPauseBtn */
CBC_SIG_ID_SWHB = 726, /* SteeringWheelHomeBtn */
CBC_SIG_ID_SWEB = 727, /* SteeringWheelEnterBtn */
CBC_SIG_ID_SWECB = 728, /* SteeringWheelEndCallBtn */
CBC_SIG_ID_SWCB = 729, /* SteeringWheelConfigBtn */
CBC_SIG_ID_SWCLB = 730, /* SteeringWheelCancelBtn */
CBC_SIG_ID_SWAMB = 731, /* SteeringWheelAudioMuteBtn */
CBC_SIG_ID_RRSUB = 732, /* RightRearSeatUpBtn */
CBC_SIG_ID_RRSRB = 733, /* RightRearSeatRightBtn */
CBC_SIG_ID_RRSPB = 734, /* RightRearSeatPrevBtn */
CBC_SIG_ID_RRSP9B = 735, /* RightRearSeatPosition9Btn */
CBC_SIG_ID_RRSP8B = 736, /* RightRearSeatPosition8Btn */
CBC_SIG_ID_RRSP7B = 737, /* RightRearSeatPosition7Btn */
CBC_SIG_ID_RRSP6B = 738, /* RightRearSeatPosition6Btn */
CBC_SIG_ID_RRSP5B = 739, /* RightRearSeatPosition5Btn */
CBC_SIG_ID_RRSP4B = 740, /* RightRearSeatPosition4Btn */
CBC_SIG_ID_RRSP3B = 741, /* RightRearSeatPosition3Btn */
CBC_SIG_ID_RRSP2B = 742, /* RightRearSeatPosition2Btn */
CBC_SIG_ID_RRSP1B = 743, /* RightRearSeatPosition1Btn */
CBC_SIG_ID_RRSP0B = 744, /* RightRearSeatPosition0Btn */
CBC_SIG_ID_RRSNB = 745, /* RightRearSeatNextBtn */
CBC_SIG_ID_RRSLB = 746, /* RightRearSeatLeftBtn */
CBC_SIG_ID_RRSDB = 747, /* RightRearSeatDownBtn */
CBC_SIG_ID_RRSVA = 748, /* RightRearSeatVolumeAdjust */
CBC_SIG_ID_RSSSB = 749, /* RightRearSeatStopBtn */
CBC_SIG_ID_RRSSCB = 750, /* RightRearSeatSpeechCtrlBtn */
CBC_SIG_ID_RRSSB = 751, /* RightRearSeatSearchBtn */
CBC_SIG_ID_RRSRDB = 752, /* RightRearSeatRadioBtn */
CBC_SIG_ID_RRSPLB = 753, /* RightRearSeatPlayBtn */
CBC_SIG_ID_RRSPSB = 754, /* RightRearSeatPauseBtn */
CBC_SIG_ID_RRSOMB = 755, /* RightRearSeatOpticalMediaBtn */
CBC_SIG_ID_RRSHB = 756, /* RightRearSeatHomeBtn */
CBC_SIG_ID_RRSHDB = 757, /* RightRearSeatHarddiskBtn */
CBC_SIG_ID_RRSENB = 758, /* RightRearSeatEnterBtn */
CBC_SIG_ID_RRSEJB = 759, /* RightRearSeatEjectBtn */
CBC_SIG_ID_RRSCB = 760, /* RightRearSeatConfigBtn */
CBC_SIG_ID_RRSCLB = 761, /* RightRearSeatCancelBtn */
CBC_SIG_ID_RRSAMB = 762, /* RightRearSeatAudioMuteBtn */
CBC_SIG_ID_RVCS = 763, /* RearViewCameraStatus */
CBC_SIG_ID_PSS = 764, /* PdcSwitchStatus */
CBC_SIG_ID_PUB = 765, /* PassengerUpBtn */
CBC_SIG_ID_PRB = 766, /* PassengerRightBtn */
CBC_SIG_ID_PPB = 767, /* PassengerPrevBtn */
CBC_SIG_ID_PP9B = 768, /* PassengerPosition9Btn */
CBC_SIG_ID_PP8B = 769, /* PassengerPosition8Btn */
CBC_SIG_ID_PP7B = 770, /* PassengerPosition7Btn */
CBC_SIG_ID_PP6B = 771, /* PassengerPosition6Btn */
CBC_SIG_ID_PP5B = 772, /* PassengerPosition5Btn */
CBC_SIG_ID_PP4B = 773, /* PassengerPosition4Btn */
CBC_SIG_ID_PP3B = 774, /* PassengerPosition3Btn */
CBC_SIG_ID_PP2B = 775, /* PassengerPosition2Btn */
CBC_SIG_ID_PP1B = 776, /* PassengerPosition1Btn */
CBC_SIG_ID_PP0B = 777, /* PassengerPosition0Btn */
CBC_SIG_ID_PNB = 778, /* PassengerNextBtn */
CBC_SIG_ID_PLB = 779, /* PassengerLeftBtn */
CBC_SIG_ID_PDB = 780, /* PassengerDownBtn */
CBC_SIG_ID_PVA = 781, /* PassengerVolumeAdjust */
CBC_SIG_ID_PSB = 782, /* PassengerStopBtn */
CBC_SIG_ID_PSCB = 783, /* PassengerSpeechCtrlBtn */
CBC_SIG_ID_PSRB = 784, /* PassengerSearchBtn */
CBC_SIG_ID_PRDB = 785, /* PassengerRadioBtn */
CBC_SIG_ID_PPLB = 786, /* PassengerPlayBtn */
CBC_SIG_ID_PPSB = 787, /* PassengerPauseBtn */
CBC_SIG_ID_POMB = 788, /* PassengerOpticalMediaBtn */
CBC_SIG_ID_PHMB = 789, /* PassengerHomeBtn */
CBC_SIG_ID_PHDB = 790, /* PassengerHarddiskBtn */
CBC_SIG_ID_PENB = 791, /* PassengerEnterBtn */
CBC_SIG_ID_PEJB = 792, /* PassengerEjectBtn */
CBC_SIG_ID_PCFB = 793, /* PassengerConfigBtn */
CBC_SIG_ID_PCLB = 794, /* PassengerCancelBtn */
CBC_SIG_ID_PAMB = 795, /* PassengerAudioMuteBtn */
CBC_SIG_ID_LRSUB = 796, /* LeftRearSeatUpBtn */
CBC_SIG_ID_LRSRB = 797, /* LeftRearSeatRightBtn */
CBC_SIG_ID_LRSPB = 798, /* LeftRearSeatPrevBtn */
CBC_SIG_ID_LRSP9B = 799, /* LeftRearSeatPosition9Btn */
CBC_SIG_ID_LRSP8B = 800, /* LeftRearSeatPosition8Btn */
CBC_SIG_ID_LRSP7B = 801, /* LeftRearSeatPosition7Btn */
CBC_SIG_ID_LRSP6B = 802, /* LeftRearSeatPosition6Btn */
CBC_SIG_ID_LRSP5B = 803, /* LeftRearSeatPosition5Btn */
CBC_SIG_ID_LRSP4B = 804, /* LeftRearSeatPosition4Btn */
CBC_SIG_ID_LRSP3B = 805, /* LeftRearSeatPosition3Btn */
CBC_SIG_ID_LRSP2B = 806, /* LeftRearSeatPosition2Btn */
CBC_SIG_ID_LRSP1B = 807, /* LeftRearSeatPosition1Btn */
CBC_SIG_ID_LRSP0B = 808, /* LeftRearSeatPosition0Btn */
CBC_SIG_ID_LRSNB = 809, /* LeftRearSeatNextBtn */
CBC_SIG_ID_LRSLB = 810, /* LeftRearSeatLeftBtn */
CBC_SIG_ID_LRSDB = 811, /* LeftRearSeatDownBtn */
CBC_SIG_ID_LRSVA = 812, /* LeftRearSeatVolumeAdjust */
CBC_SIG_ID_LRSAMB = 813, /* LeftRearSeatAudioMuteBtn */
CBC_SIG_ID_LRSSB = 814, /* LeftRearSeatStopBtn */
CBC_SIG_ID_LRSSCB = 815, /* LeftRearSeatSpeechCtrlBtn */
CBC_SIG_ID_LRSSRB = 816, /* LeftRearSeatSearchBtn */
CBC_SIG_ID_LRSRDB = 817, /* LeftRearSeatRadioBtn */
CBC_SIG_ID_LRSPLB = 818, /* LeftRearSeatPlayBtn */
CBC_SIG_ID_LRSPSB = 819, /* LeftRearSeatPauseBtn */
CBC_SIG_ID_LRSOMB = 820, /* LeftRearSeatOpticalMediaBtn */
CBC_SIG_ID_LRSHMB = 821, /* LeftRearSeatHomeBtn */
CBC_SIG_ID_LRSHDB = 822, /* LeftRearSeatHarddiskBtn */
CBC_SIG_ID_LRSENB = 823, /* LeftRearSeatEnterBtn */
CBC_SIG_ID_LRSEJB = 824, /* LeftRearSeatEjectBtn */
CBC_SIG_ID_LRSCFB = 825, /* LeftRearSeatConfigBtn */
CBC_SIG_ID_LRSCLB = 826, /* LeftRearSeatCancelBtn */
CBC_SIG_ID_DVA = 827, /* DriverVolumeAdjust */
CBC_SIG_ID_DECSP = 828, /* DriverErgoCommanderSteps */
CBC_SIG_ID_DECST = 829, /* DriverErgoCommanderStatus */
CBC_SIG_ID_DAMB = 830, /* DriverAudioMuteBtn */
CBC_SIG_ID_DNB = 831, /* DriverNextBtn */
CBC_SIG_ID_DLB = 832, /* DriverLeftBtn */
CBC_SIG_ID_DDB = 833, /* DriverDownBtn */
CBC_SIG_ID_DUB = 834, /* DriverUpBtn */
CBC_SIG_ID_DRB = 835, /* DriverRightBtn */
CBC_SIG_ID_DPB = 836, /* DriverPrevBtn */
CBC_SIG_ID_DP9B = 837, /* DriverPosition9Btn */
CBC_SIG_ID_DP8B = 838, /* DriverPosition8Btn */
CBC_SIG_ID_DP7B = 839, /* DriverPosition7Btn */
CBC_SIG_ID_DP6B = 840, /* DriverPosition6Btn */
CBC_SIG_ID_DP5B = 841, /* DriverPosition5Btn */
CBC_SIG_ID_DP4B = 842, /* DriverPosition4Btn */
CBC_SIG_ID_DP3B = 843, /* DriverPosition3Btn */
CBC_SIG_ID_DP2B = 844, /* DriverPosition2Btn */
CBC_SIG_ID_DP1B = 845, /* DriverPosition1Btn */
CBC_SIG_ID_DP0B = 846, /* DriverPosition0Btn */
CBC_SIG_ID_DSCB = 847, /* DriverSpeechCtrlBtn */
CBC_SIG_ID_DSRB = 848, /* DriverSearchBtn */
CBC_SIG_ID_DRDB = 849, /* DriverRadioBtn */
CBC_SIG_ID_DSTB = 850, /* DriverStopBtn */
CBC_SIG_ID_DPLB = 851, /* DriverPlayBtn */
CBC_SIG_ID_DPSB = 852, /* DriverPauseBtn */
CBC_SIG_ID_DOMB = 853, /* DriverOpticalMediaBtn */
CBC_SIG_ID_DHMB = 854, /* DriverHomeBtn */
CBC_SIG_ID_DHHB = 855, /* DriverHarddiskBtn */
CBC_SIG_ID_DENB = 856, /* DriverEnterBtn */
CBC_SIG_ID_DEJB = 857, /* DriverEjectBtn */
CBC_SIG_ID_DCFB = 858, /* DriverConfigBtn */
CBC_SIG_ID_DCLB = 859, /* DriverCancelBtn */
CBC_SIG_ID_DSTG = 860, /* DoorStatusTailgate */
CBC_SIG_ID_DSRR = 861, /* DoorStatusRightRear */
CBC_SIG_ID_DSRF = 862, /* DoorStatusRightFront */
CBC_SIG_ID_DSLR = 863, /* DoorStatusLeftRear */
CBC_SIG_ID_DSLF = 864, /* DoorStatusLeftFront */
CBC_SIG_ID_DSEH = 865, /* DoorStatusEngineHood */
CBC_SIG_ID_CSSRRW = 866, /* ChildSafetyStatusRightRearWnd */
CBC_SIG_ID_CSSRR = 867, /* ChildSafetyStatusRightRear */
CBC_SIG_ID_CSSLRW = 868, /* ChildSafetyStatusLeftRearWnd */
CBC_SIG_ID_CSSLR = 869, /* ChildSafetyStatusLeftRear */
CBC_SIG_ID_ATEMP = 870, /* AmbientTemperature */
CBC_SIG_ID_ANSL = 871, /* AmbientNoiseLevel */
CBC_SIG_ID_ALTI = 872, /* AmbientLightIntensity */
CBC_SIG_ID_VSA = 873, /* VehicleSteeringAngle */
CBC_SIG_ID_LLAT = 875, /* LocationLatitude */
CBC_SIG_ID_LLON = 876, /* LocationLongitude */
CBC_SIG_ID_LALT = 877, /* LocationAltitude */
CBC_SIG_ID_LACC = 878, /* LocationAccuracy */
CBC_SIG_ID_LHED = 879, /* LocationHeading */
CBC_SIG_ID_LSPD = 880, /* LocationSpeed */
CBC_SIG_ID_LSRC = 881, /* LocationSource */
CBC_SIG_ID_LSCT = 882, /* LocationSourceCount */
CBC_SIG_ID_PDFB = 884, /* PdcDistanceFrontCenter */
CBC_SIG_ID_PDFL1 = 885, /* PdcDistanceFrontLeft1 */
CBC_SIG_ID_PDFL2 = 886, /* PdcDistanceFrontLeft2 */
CBC_SIG_ID_PDFL3 = 887, /* PdcDistanceFrontLeft3 */
CBC_SIG_ID_PDFR1 = 888, /* PdcDistanceFrontRight1 */
CBC_SIG_ID_PDFR2 = 889, /* PdcDistanceFrontRight2 */
CBC_SIG_ID_PDFR3 = 890, /* PdcDistanceFrontRight3 */
CBC_SIG_ID_PDRC = 892, /* PdcDistanceRearCenter */
CBC_SIG_ID_PDRL1 = 893, /* PdcDistanceRearLeft1 */
CBC_SIG_ID_PDRL2 = 894, /* PdcDistanceRearLeft2 */
CBC_SIG_ID_PDRL3 = 895, /* PdcDistanceRearLeft3 */
CBC_SIG_ID_PDRR1 = 896, /* PdcDistanceRearRight1 */
CBC_SIG_ID_PDRR2 = 897, /* PdcDistanceRearRight2 */
CBC_SIG_ID_PDRR3 = 898, /* PdcDistanceRearRight3 */
CBC_SIG_ID_VXA = 900, /* VehicleXAcceleration */
CBC_SIG_ID_VYA = 901, /* VehicleYAcceleration */
CBC_SIG_ID_VZA = 902, /* VehicleZAcceleration */
CBC_SIG_ID_IACR = 906, /* IocAppChecksumResponse */
CBC_SIG_ID_IWCR = 907, /* IocWfChecksumResponse */
CBC_SIG_ID_IFCR = 908, /* IocFblChecksumResponse */
CBC_SIG_ID_GYROX = 911, /* GyroX */
CBC_SIG_ID_GYROY = 912, /* GyroY */
CBC_SIG_ID_IAVB = 915, /* IocAppVersionBuild */
CBC_SIG_ID_IAVMJ = 916, /* IocAppVersionMajor */
CBC_SIG_ID_RAV = 919, /* RuntimeAverageValue */
CBC_SIG_ID_RMAX = 920, /* RuntimeMaxValue */
CBC_SIG_ID_RMIN = 921, /* RuntimeMinValue */
CBC_SIG_ID_ACCX = 924, /* AccX */
CBC_SIG_ID_ACCY = 925, /* AccY */
CBC_SIG_ID_ACCZ = 926, /* AccZ */
CBC_SIG_ID_MDS = 927, /* MrbDipSwitch */
CBC_SIG_ID_FCP = 928, /* FanCurrentRpm */
CBC_SIG_ID_GYROZ = 929, /* GyroZ */
CBC_SIG_ID_IAVMN = 930, /* IocAppVersionMinor */
CBC_SIG_ID_RTST = 931, /* RuntimeSamplesTaken */
};
/*
* CBC rx group identity definition.
*/
enum cbc_rx_group_id {
CBC_GRP_ID_0 = 0,
};
/*
* CBC tx group identity definition.
*/
enum cbc_tx_group_id {
CBC_GRP_ID_LOC = 874, /* Location */
CBC_GRP_ID_PDF = 883, /* PdcDistanceFront */
CBC_GRP_ID_PDR = 891, /* PdcDistanceRear */
CBC_GRP_ID_VAC = 899, /* VehicleAcceleration */
CBC_GRP_ID_GAS = 909, /* GyroAbs */
CBC_GRP_ID_IVR = 913, /* IocVersionResponse */
CBC_GRP_ID_IRM = 917, /* IocRuntimeMeasurementResultsResponse */
CBC_GRP_ID_GAC = 922, /* GyroAcc */
};
/*
* IOC channels definition.
* Include all native CBC channels and one virtual UART
*/
enum ioc_ch_id {
IOC_NATIVE_PMT, /* Native /dev/cbc-pmt */
IOC_NATIVE_LFCC, /* Native /dev/cbc-lifecycle */
IOC_NATIVE_SIGNAL, /* Native /dev/cbc-signals */
IOC_NATIVE_ESIG, /* Native /dev/cbc-early-signals */
IOC_NATIVE_DIAG, /* Native /dev/cbc-diagnosis */
IOC_NATIVE_DLT, /* Native /dev/cbc_dlt */
IOC_NATIVE_LINDA, /* Native /dev/cbc-lindata */
IOC_NATIVE_RAW0, /* Native /dev/cbc-raw0 */
IOC_NATIVE_RAW1, /* Native /dev/cbc-raw1 */
IOC_NATIVE_RAW2, /* Native /dev/cbc-raw2 */
IOC_NATIVE_RAW3, /* Native /dev/cbc-raw3 */
IOC_NATIVE_RAW4, /* Native /dev/cbc-raw4 */
IOC_NATIVE_RAW5, /* Native /dev/cbc-raw5 */
IOC_NATIVE_RAW6, /* Native /dev/cbc-raw6 */
IOC_NATIVE_RAW7, /* Native /dev/cbc-raw7 */
IOC_NATIVE_RAW8, /* Native /dev/cbc-raw8 */
IOC_NATIVE_RAW9, /* Native /dev/cbc-raw9 */
IOC_NATIVE_RAW10, /* Native /dev/cbc-raw10 */
IOC_NATIVE_RAW11, /* Native /dev/cbc-raw11 */
IOC_VIRTUAL_UART, /* Virtual UART */
IOC_NATIVE_DUMMY0, /* Native fake lifecycle channel */
IOC_NATIVE_DUMMY1, /* Native fake signal channel */
IOC_NATIVE_DUMMY2, /* Native Fake oem raw channel */
IOC_CH_MAX
};
/*
* CBC priority is used to pack CBC address layer header.
*/
enum cbc_prio {
CBC_PRIO_LOW = 2,
CBC_PRIO_MEDIUM = 3,
CBC_PRIO_HIGH = 6
};
/*
* CBC invalidation types.
*/
enum cbc_inval_type {
CBC_INVAL_T_SIGNAL,
CBC_INVAL_T_GROUP
};
/*
* CBC signal and group state flag.
*/
enum cbc_flag {
CBC_INACTIVE,
CBC_ACTIVE
};
/*
* CBC queue types.
* Rx queue buffers cbc_requests for virtual UART -> native CBC channels.
* Tx queue buffers cbc_requests for antive CBC cdevs -> virtual UART.
* Free queue buffers the cbc_requests that are not in rx/tx queues for new data
* comming.
*/
enum cbc_queue_type {
CBC_QUEUE_T_RX,
CBC_QUEUE_T_TX,
CBC_QUEUE_T_FREE
};
/*
* CBC request types.
*/
enum cbc_request_type {
CBC_REQ_T_PROT, /* CBC protocol request */
CBC_REQ_T_VMM_S3, /* VMM suspend request */
CBC_REQ_T_VMM_S5, /* VMM shutdown request */
CBC_REQ_T_SOC /* SOC state update request */
};
/*
* Open the channel and add it into IOC epoll event data if the channel state
* is ON, otherwise ignore it.
*/
enum ioc_ch_stat {
IOC_CH_OFF,
IOC_CH_ON
};
struct cbc_signal {
uint16_t id; /* CBC signal id number */
uint16_t len; /* CBC signal length in bits not bytes */
enum cbc_flag flag; /* CBC signal active/inactive flag */
};
struct cbc_group {
uint16_t id; /* CBC group id number */
enum cbc_flag flag; /* CBC group active/inactive flag */
};
struct wlist_signal {
uint16_t id;
struct cbc_signal *sig;
};
struct wlist_group {
uint16_t id;
struct cbc_group *grp;
};
/*
* CBC ring is used to buffer bytes before build one complete CBC frame.
*/
struct cbc_ring {
uint32_t head;
uint32_t tail;
uint8_t buf[CBC_RING_BUFFER_SIZE];
};
/*
* CBC configuration contains signal/group tables and whiltlist tables.
*/
struct cbc_config {
int32_t cbc_sig_num; /* CBC signals number */
int32_t cbc_grp_num; /* CBC groups number */
int32_t wlist_sig_num; /* Whitelist signals number */
int32_t wlist_grp_num; /* Whitelist groups number */
struct cbc_signal *cbc_sig_tbl; /* CBC signals table */
struct cbc_group *cbc_grp_tbl; /* CBC groups table */
struct wlist_signal *wlist_sig_tbl; /* Whitelist signals table */
struct wlist_group *wlist_grp_tbl; /* Whitelist groups table */
};
/*
* IOC channel information.
*/
struct ioc_ch_info {
int32_t fd; /* IOC channel fd */
char name[32]; /* IOC channel name */
enum ioc_ch_id id; /* IOC channel identity number */
enum ioc_ch_stat stat; /* IOC channel state */
};
/*
* CBC request is mainly structure of communication between threads.
*/
struct cbc_request {
int32_t srv_len; /* Service frame length */
int32_t link_len; /* Link frame length */
enum ioc_ch_id id; /* Channel id number */
enum cbc_request_type rtype; /* Request types */
uint8_t buf[CBC_MAX_FRAME_SIZE];
SIMPLEQ_ENTRY(cbc_request) me_queue;
};
/*
* CBC packet is mainly structure for CBC protocol process.
*/
struct cbc_pkt {
uint8_t soc_active; /* Record soc state */
uint8_t hb_state; /* Record Heartbeat state */
uint32_t reason; /* Record current wakeup reason */
uint32_t boot_reason; /* Record boot up wakeup reason */
struct cbc_request *req; /* CBC packet data */
struct cbc_config *cfg; /* CBC and whitelist configurations */
enum cbc_queue_type qtype; /* Routes cbc_request to queue */
};
/*
* CBC simple queue head definition.
*/
SIMPLEQ_HEAD(cbc_qhead, cbc_request);
/*
* IOC device structure.
* IOC device is a virtual device and DM has virtual device data structure
* for virtual devices management in the further.
* So export the ioc_dev definition to the IOC header file.
*/
struct ioc_dev {
char name[16]; /* Core thread name */
int closing; /* Close IOC mediator device flag */
int epfd; /* Epoll fd */
struct epoll_event *evts; /* Epoll events table */
struct cbc_request *pool; /* CBC requests pool */
struct cbc_ring ring; /* Ring buffer */
pthread_t tid; /* Core thread id */
struct cbc_qhead free_qhead; /* Free queue head */
pthread_mutex_t free_mtx; /* Free queue mutex */
char rx_name[16]; /* Rx thread name */
struct cbc_qhead rx_qhead; /* Rx queue head */
struct cbc_config rx_config; /* Rx configuration */
pthread_t rx_tid;
pthread_cond_t rx_cond;
pthread_mutex_t rx_mtx;
void (*ioc_dev_rx)(struct cbc_pkt *pkt);
char tx_name[16]; /* Tx thread name */
struct cbc_qhead tx_qhead; /* Tx queue head */
struct cbc_config tx_config; /* Tx configuration */
pthread_t tx_tid;
pthread_cond_t tx_cond;
pthread_mutex_t tx_mtx;
void (*ioc_dev_tx)(struct cbc_pkt *pkt);
};
/* Parse IOC parameters */
int ioc_parse(const char *opts);
struct vmctx;
/* IOC mediator common ops */
int ioc_init(struct vmctx *ctx);
void ioc_deinit(struct vmctx *ctx);
/* Build a cbc_request and send it to CBC protocol stack */
void ioc_build_request(struct ioc_dev *ioc, int32_t link_len, int32_t srv_len);
/* Send data to native CBC cdevs and virtual PTY(UART DM) device */
int ioc_ch_xmit(enum ioc_ch_id id, const uint8_t *buf, size_t size);
/* Main handlers of CBC protocol stack */
void cbc_rx_handler(struct cbc_pkt *pkt);
void cbc_tx_handler(struct cbc_pkt *pkt);
/* Copy to buf to the ring buffer */
int cbc_copy_to_ring(const uint8_t *buf, size_t size, struct cbc_ring *ring);
/* Build a cbc_request based on CBC link layer protocol */
void cbc_unpack_link(struct ioc_dev *ioc);
/* Whitelist initialization */
void wlist_init_signal(struct cbc_signal *cbc_tbl, size_t cbc_size,
struct wlist_signal *wlist_tbl, size_t wlist_size);
void wlist_init_group(struct cbc_group *cbc_tbl, size_t cbc_size,
struct wlist_group *wlist_tbl, size_t wlist_size);
/* Set CBC log file */
void cbc_set_log_file(FILE *f);
#endif

View File

@@ -0,0 +1,44 @@
/*-
* Copyright (c) 2010 Marcel Moolenaar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _IODEV_H_
#define _IODEV_H_
#define IODEV_PIO_READ 0
#define IODEV_PIO_WRITE 1
struct iodev_pio_req {
u_int access;
u_int port;
u_int width;
u_int val;
};
#define IODEV_PIO _IOWR('I', 0, struct iodev_pio_req)
#endif /* _IODEV_H_ */

46
devicemodel/include/irq.h Normal file
View File

@@ -0,0 +1,46 @@
/*-
* Copyright (c) 2014 Hudson River Trading LLC
* Written by: John H. Baldwin <jhb@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _IRQ_H_
#define _IRQ_H_
struct pci_vdev;
void pci_irq_assert(struct pci_vdev *pi);
void pci_irq_deassert(struct pci_vdev *pi);
void pci_irq_init(struct vmctx *ctx);
void pci_irq_deinit(struct vmctx *ctx);
void pci_irq_reserve(int irq);
void pci_irq_use(int irq);
int pirq_alloc_pin(struct pci_vdev *pi);
int pirq_irq(int pin);
uint8_t pirq_read(int pin);
void pirq_write(struct vmctx *ctx, int pin, uint8_t val);
#endif /* _IRQ_H_ */

71
devicemodel/include/lpc.h Normal file
View File

@@ -0,0 +1,71 @@
/*-
* Copyright (c) 2013 Neel Natu <neel@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _LPC_H_
#define _LPC_H_
typedef void (*lpc_write_dsdt_t)(void);
struct lpc_dsdt {
lpc_write_dsdt_t handler;
};
#define LPC_DSDT(handler) \
static struct lpc_dsdt __CONCAT(__lpc_dsdt, __LINE__) = \
{ \
(handler), \
}; \
DATA_SET(lpc_dsdt_set, __CONCAT(__lpc_dsdt, __LINE__))
enum lpc_sysres_type {
LPC_SYSRES_IO,
LPC_SYSRES_MEM
};
struct lpc_sysres {
enum lpc_sysres_type type;
uint32_t base;
uint32_t length;
};
#define LPC_SYSRES(type, base, length) \
static struct lpc_sysres __CONCAT(__lpc_sysres, __LINE__) = \
{ \
(type), \
(base), \
(length) \
}; \
DATA_SET(lpc_sysres_set, __CONCAT(__lpc_sysres, __LINE__))
#define SYSRES_IO(base, length) LPC_SYSRES(LPC_SYSRES_IO, base, length)
#define SYSRES_MEM(base, length) LPC_SYSRES(LPC_SYSRES_MEM, base, length)
int lpc_device_parse(const char *opt);
char *lpc_pirq_name(int pin);
void lpc_pirq_routed(void);
#endif

View File

@@ -0,0 +1,40 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef _MACROS_H_
#define _MACROS_H_
#undef __CONCAT
#define _CONCAT_(a, b) a ## b
#define __CONCAT(a, b) _CONCAT_(a, b)
#endif

View File

@@ -0,0 +1,115 @@
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)rtc.h 7.1 (Berkeley) 5/12/91
* $FreeBSD$
*/
#ifndef _I386_ISA_RTC_H_
#define _I386_ISA_RTC_H_ 1
/*
* MC146818 RTC Register locations
*/
#define RTC_SEC 0x00 /* seconds */
#define RTC_SECALRM 0x01 /* seconds alarm */
#define RTC_MIN 0x02 /* minutes */
#define RTC_MINALRM 0x03 /* minutes alarm */
#define RTC_HRS 0x04 /* hours */
#define RTC_HRSALRM 0x05 /* hours alarm */
#define RTC_WDAY 0x06 /* week day */
#define RTC_DAY 0x07 /* day of month */
#define RTC_MONTH 0x08 /* month of year */
#define RTC_YEAR 0x09 /* month of year */
#define RTC_STATUSA 0x0a /* status register A */
#define RTCSA_TUP 0x80 /* time update, don't look now */
#define RTCSA_RESET 0x70 /* reset divider */
#define RTCSA_DIVIDER 0x20 /* divider correct for 32768 Hz */
#define RTCSA_8192 0x03 /* 8192 Hz interrupt */
#define RTCSA_4096 0x04
#define RTCSA_2048 0x05
#define RTCSA_1024 0x06 /* default for profiling */
#define RTCSA_PROF RTCSA_1024
#define RTC_PROFRATE 1024
#define RTCSA_512 0x07
#define RTCSA_256 0x08
#define RTCSA_128 0x09
#define RTCSA_NOPROF RTCSA_128
#define RTC_NOPROFRATE 128
#define RTCSA_64 0x0a
#define RTCSA_32 0x0b /* 32 Hz interrupt */
#define RTC_STATUSB 0x0b /* status register B */
#define RTCSB_DST 0x01 /* USA Daylight Savings Time enable */
#define RTCSB_24HR 0x02 /* 0 = 12 hours, 1 = 24 hours */
#define RTCSB_BCD 0x04 /* 0 = BCD, 1 = Binary coded time */
#define RTCSB_SQWE 0x08 /* 1 = output sqare wave at SQW pin */
#define RTCSB_UINTR 0x10 /* 1 = enable update-ended interrupt */
#define RTCSB_AINTR 0x20 /* 1 = enable alarm interrupt */
#define RTCSB_PINTR 0x40 /* 1 = enable periodic clock interrupt */
#define RTCSB_HALT 0x80 /* stop clock updates */
#define RTC_INTR 0x0c /* status register C (R) interrupt source */
#define RTCIR_UPDATE 0x10 /* update intr */
#define RTCIR_ALARM 0x20 /* alarm intr */
#define RTCIR_PERIOD 0x40 /* periodic intr */
#define RTCIR_INT 0x80 /* interrupt output signal */
#define RTC_STATUSD 0x0d /* status register D (R) Lost Power */
#define RTCSD_PWR 0x80 /* clock power OK */
#define RTC_DIAG 0x0e /* status register E - bios diagnostic */
#define RTCDG_BITS "\020\010clock_battery\007ROM_cksum\006config_unit\005memory_size\004fixed_disk\003invalid_time"
#define RTC_RESET 0x0f /* status register F - reset code byte */
#define RTCRS_RST 0x00 /* normal reset */
#define RTCRS_LOAD 0x04 /* load system */
#define RTC_FDISKETTE 0x10 /* diskette drive type in upper/lower nibble */
#define RTCFDT_NONE 0 /* none present */
#define RTCFDT_360K 0x10 /* 360K */
#define RTCFDT_12M 0x20 /* 1.2M */
#define RTCFDT_720K 0x30 /* 720K */
#define RTCFDT_144M 0x40 /* 1.44M */
#define RTCFDT_288M_1 0x50 /* 2.88M, some BIOSes */
#define RTCFDT_288M 0x60 /* 2.88M */
#define RTC_BASELO 0x15 /* low byte of basemem size */
#define RTC_BASEHI 0x16 /* high byte of basemem size */
#define RTC_EXTLO 0x17 /* low byte of extended mem size */
#define RTC_EXTHI 0x18 /* low byte of extended mem size */
#define RTC_CENTURY 0x32 /* current century */
#endif /* _I386_ISA_RTC_H_ */

58
devicemodel/include/mem.h Normal file
View File

@@ -0,0 +1,58 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MEM_H_
#define _MEM_H_
struct vmctx;
typedef int (*mem_func_t)(struct vmctx *ctx, int vcpu, int dir, uint64_t addr,
int size, uint64_t *val, void *arg1, long arg2);
struct mem_range {
const char *name;
int flags;
mem_func_t handler;
void *arg1;
long arg2;
uint64_t base;
uint64_t size;
};
#define MEM_F_READ 0x1
#define MEM_F_WRITE 0x2
#define MEM_F_RW 0x3
#define MEM_F_IMMUTABLE 0x4 /* mem_range cannot be unregistered */
void init_mem(void);
int emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req);
int register_mem(struct mem_range *memp);
int register_mem_fallback(struct mem_range *memp);
int unregister_mem(struct mem_range *memp);
int unregister_mem_fallback(struct mem_range *memp);
#endif /* _MEM_H_ */

View File

@@ -0,0 +1,60 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MEVENT_H_
#define _MEVENT_H_
enum ev_type {
EVF_READ,
EVF_WRITE,
EVF_TIMER, /* Not supported yet */
EVF_SIGNAL /* Not supported yet */
};
char *vmname;
struct mevent;
struct mevent *mevent_add(int fd, enum ev_type type,
void (*func)(int, enum ev_type, void *),
void *param);
int mevent_enable(struct mevent *evp);
int mevent_disable(struct mevent *evp);
int mevent_delete(struct mevent *evp);
int mevent_delete_close(struct mevent *evp);
int mevent_notify(void);
void mevent_dispatch(void);
int mevent_init(void);
void mevent_deinit(void);
#define list_foreach_safe(var, head, field, tvar) \
for ((var) = LIST_FIRST((head)); \
(var) && ((tvar) = LIST_NEXT((var), field), 1);\
(var) = (tvar))
#endif /* _MEVENT_H_ */

View File

@@ -0,0 +1,82 @@
/*
* Project Acrn
* Acrn-dm-monitor
*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Author: TaoYuhong <yuhong.tao@intel.com>
*/
/* acrn-dm monitor APIS */
#ifndef MONITOR_H
#define MONITOR_H
#include "monitor_msg.h"
int monitor_init(struct vmctx *ctx);
void monitor_close(void);
/**
* monitor broadcast()
* Developer can use monitor_broadcast() inside acrn-dm, send vmm_msg to all client.
* @arguements:
* @msg: any valid vmm_msg data structure, which you want send
*/
int monitor_broadcast(struct vmm_msg *msg);
/* msg_sender will be seen/modify by msg handler */
struct msg_sender {
int fd; /* msg handler can replay to this fd */
char name[CLIENT_NAME_LEN]; /* client have a chance to name itsself */
int broadcast;
};
/**
* monitor_register_handler()
* Developer can add vmm_msg handler inside acrn-dm, that means, when a client send a
* vmm_msg to acrn-dm, the correspoding callback will be called
* @arguements:
* @msg: msg->msgid must be set, for which handler will be add.
* @callback: when a received message match msg->msgid, callback will be envoked.
* And these data are pass in to help developer: (a)msg, the received message, from
* socket. (b)sender, tell you who send this message, anything wite to sender->fd
* will be able to read out by client socket. Developer should only write valid
* vmm_msg. (c)priv, that is what you pass to monitor_add_msg_handler();
* @priv, callback will see this value.
*/
int monitor_register_handler(struct vmm_msg *msg,
void (*callback) (struct vmm_msg * msg,
struct msg_sender * sender,
void *priv), void *priv);
#endif

View File

@@ -0,0 +1,107 @@
/*
* Project Acrn
* Acrn-dm-monitor
*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Author: TaoYuhong <yuhong.tao@intel.com>
*/
/* this file will be shared, by anyone who want talk to acrn-dm */
#ifndef MONITOR_MSG_H
#define MONITOR_MSG_H
enum msgid {
REQ_STARTALL = 0, /* SOS lifecycle service -> VM Mngr */
REQ_PAUSEALL, /* SOS lifecycle service -> VM Mngr */
REQ_START, /* VM Mngr -> ACRN-DM(vm) */
REQ_STOP, /* VM Mngr -> ACRN-DM(vm) */
REQ_PAUSE,
REQ_RESUME, /* VM Mngr -> ACRN-DM(vm) */
REQ_RESET,
REQ_QUERY,
NTF_ALLSTOPPED, /* VM Mngr -> SOS lifecycle service */
NTF_ALLPAUSED, /* VM Mngr -> SOS lifecycle service */
NTF_STARTED, /* ACRN-DM -> VM Mngr */
NTF_STOPPED, /* ACRN-DM -> VM Mngr */
NTF_PAUSED, /* ACRN-DM -> VM Mngr */
NTF_RESUMED, /* ACRN-DM -> VM Mngr */
MSG_STR,
MSG_HANDSHAKE, /* handshake */
MSGID_MAX
};
#define VMM_MSG_MAGIC 0x67736d206d6d76 /* that is char[8] "vmm msg", on X86 */
#define VMM_MSG_MAX_LEN 4096
struct vmm_msg {
unsigned long long magic; /* Make sure you get a vmm_msg */
unsigned int msgid;
unsigned long timestamp;
size_t len; /* vmm_msg + payload size */
char payload[0];
};
/* practical messages, and helpers
* each message defined in msgid should have its own data structure,
* and shared by acrn-dm and vm-mngr. So that such that message data
* structure can be recognized by both sides of the communication
*/
/* For test, generate a message who carry a string
* eg., VMM_MSG_STR(hello_msg, "Hello\n") will create hello_msg,
* then you can write(sock_fd, hello_msg, sizeof(hello_msg))
*/
#define VMM_MSG_STR(var, str) \
struct vmm_msg_##var { \
struct vmm_msg vmsg; \
char raw[sizeof(str)]; \
} var = { \
.vmsg = { \
.magic = VMM_MSG_MAGIC, \
.msgid = MSG_STR, \
.len = sizeof(struct vmm_msg_##var), \
}, \
.raw = str, \
}
#define CLIENT_NAME_LEN 16
struct vmm_msg_handshake {
struct vmm_msg vmsg;
char name[CLIENT_NAME_LEN]; /* name should be a string, end with '\0' */
/* can be "acrnd", "vm-mngr" or "acrnctl" */
int broadcast; /* if set, allow acrn-dm send broadcast */
/* message to such client */
};
#endif

View File

@@ -0,0 +1,188 @@
/*-
* Copyright (c) 1996, by Steve Passe
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. The name of the developer may NOT be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __MPTABLE_H__
#define __MPTABLE_H__
enum busTypes {
NOBUS = 0,
CBUS = 1,
CBUSII = 2,
EISA = 3,
ISA = 6,
MCA = 9,
PCI = 13,
XPRESS = 18,
MAX_BUSTYPE = 18,
UNKNOWN_BUSTYPE = 0xff
};
/* MP Floating Pointer Structure */
typedef struct MPFPS {
uint8_t signature[4];
uint32_t pap;
uint8_t length;
uint8_t spec_rev;
uint8_t checksum;
uint8_t config_type;
uint8_t mpfb2;
uint8_t mpfb3;
uint8_t mpfb4;
uint8_t mpfb5;
} __attribute__((packed)) *mpfps_t;
#define MPFB2_IMCR_PRESENT 0x80
#define MPFB2_MUL_CLK_SRCS 0x40
/* MP Configuration Table Header */
typedef struct MPCTH {
uint8_t signature[4];
uint16_t base_table_length;
uint8_t spec_rev;
uint8_t checksum;
uint8_t oem_id[8];
uint8_t product_id[12];
uint32_t oem_table_pointer;
uint16_t oem_table_size;
uint16_t entry_count;
uint32_t apic_address;
uint16_t extended_table_length;
uint8_t extended_table_checksum;
uint8_t reserved;
} __attribute__((packed)) *mpcth_t;
/* Base table entries */
#define MPCT_ENTRY_PROCESSOR 0
#define MPCT_ENTRY_BUS 1
#define MPCT_ENTRY_IOAPIC 2
#define MPCT_ENTRY_INT 3
#define MPCT_ENTRY_LOCAL_INT 4
typedef struct PROCENTRY {
uint8_t type;
uint8_t apic_id;
uint8_t apic_version;
uint8_t cpu_flags;
uint32_t cpu_signature;
uint32_t feature_flags;
uint32_t reserved1;
uint32_t reserved2;
} __attribute__((packed)) *proc_entry_ptr;
#define PROCENTRY_FLAG_EN 0x01
#define PROCENTRY_FLAG_BP 0x02
typedef struct BUSENTRY {
uint8_t type;
uint8_t bus_id;
uint8_t bus_type[6];
} __attribute__((packed)) *bus_entry_ptr;
typedef struct IOAPICENTRY {
uint8_t type;
uint8_t apic_id;
uint8_t apic_version;
uint8_t apic_flags;
uint32_t apic_address;
} __attribute__((packed)) *io_apic_entry_ptr;
#define IOAPICENTRY_FLAG_EN 0x01
typedef struct INTENTRY {
uint8_t type;
uint8_t int_type;
uint16_t int_flags;
uint8_t src_bus_id;
uint8_t src_bus_irq;
uint8_t dst_apic_id;
uint8_t dst_apic_int;
} __attribute__((packed)) *int_entry_ptr;
#define INTENTRY_TYPE_INT 0
#define INTENTRY_TYPE_NMI 1
#define INTENTRY_TYPE_SMI 2
#define INTENTRY_TYPE_EXTINT 3
#define INTENTRY_FLAGS_POLARITY 0x3
#define INTENTRY_FLAGS_POLARITY_CONFORM 0x0
#define INTENTRY_FLAGS_POLARITY_ACTIVEHI 0x1
#define INTENTRY_FLAGS_POLARITY_ACTIVELO 0x3
#define INTENTRY_FLAGS_TRIGGER 0xc
#define INTENTRY_FLAGS_TRIGGER_CONFORM 0x0
#define INTENTRY_FLAGS_TRIGGER_EDGE 0x4
#define INTENTRY_FLAGS_TRIGGER_LEVEL 0xc
/* Extended table entries */
typedef struct EXTENTRY {
uint8_t type;
uint8_t length;
} __attribute__((packed)) *ext_entry_ptr;
#define MPCT_EXTENTRY_SAS 0x80
#define MPCT_EXTENTRY_BHD 0x81
#define MPCT_EXTENTRY_CBASM 0x82
typedef struct SASENTRY {
uint8_t type;
uint8_t length;
uint8_t bus_id;
uint8_t address_type;
uint64_t address_base;
uint64_t address_length;
} __attribute__((packed)) *sas_entry_ptr;
#define SASENTRY_TYPE_IO 0
#define SASENTRY_TYPE_MEMORY 1
#define SASENTRY_TYPE_PREFETCH 2
typedef struct BHDENTRY {
uint8_t type;
uint8_t length;
uint8_t bus_id;
uint8_t bus_info;
uint8_t parent_bus;
uint8_t reserved[3];
} __attribute__((packed)) *bhd_entry_ptr;
#define BHDENTRY_INFO_SUBTRACTIVE_DECODE 0x1
typedef struct CBASMENTRY {
uint8_t type;
uint8_t length;
uint8_t bus_id;
uint8_t address_mod;
uint32_t predefined_range;
} __attribute__((packed)) *cbasm_entry_ptr;
#define CBASMENTRY_ADDRESS_MOD_ADD 0x0
#define CBASMENTRY_ADDRESS_MOD_SUBTRACT 0x1
#define CBASMENTRY_RANGE_ISA_IO 0
#define CBASMENTRY_RANGE_VGA_IO 1
#endif /* !__MPTABLE_H__ */

View File

@@ -0,0 +1,35 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MPTBL_H_
#define _MPTBL_H_
int mptable_build(struct vmctx *ctx, int ncpu);
void mptable_add_oemtbl(void *tbl, int tblsz);
#endif /* _MPTBL_H_ */

View File

@@ -0,0 +1,642 @@
/*
* Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``S IS''AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* $FreeBSD$
*
* Definitions of constants and the structures used by the netmap
* framework, for the part visible to both kernel and userspace.
* Detailed info on netmap is available with "man netmap" or at
*
* http://info.iet.unipi.it/~luigi/netmap/
*
* This API is also used to communicate with the VALE software switch
*/
#ifndef _NET_NETMAP_H_
#define _NET_NETMAP_H_
#define NETMAP_API 11 /* current API version */
#define NETMAP_MIN_API 11 /* min and max versions accepted */
#define NETMAP_MAX_API 15
/*
* Some fields should be cache-aligned to reduce contention.
* The alignment is architecture and OS dependent, but rather than
* digging into OS headers to find the exact value we use an estimate
* that should cover most architectures.
*/
#define NM_CACHE_ALIGN 128
/*
* --- Netmap data structures ---
*
* The userspace data structures used by netmap are shown below.
* They are allocated by the kernel and mmap()ed by userspace threads.
* Pointers are implemented as memory offsets or indexes,
* so that they can be easily dereferenced in kernel and userspace.
KERNEL (opaque, obviously)
====================================================================
|
USERSPACE | struct netmap_ring
+---->+---------------+
/ | head,cur,tail |
struct netmap_if (nifp, 1 per fd) / | buf_ofs |
+---------------+ / | other fields |
| ni_tx_rings | / +===============+
| ni_rx_rings | / | buf_idx, len | slot[0]
| | / | flags, ptr |
| | / +---------------+
+===============+ / | buf_idx, len | slot[1]
| txring_ofs[0] | (rel.to nifp)--' | flags, ptr |
| txring_ofs[1] | +---------------+
(tx+1 entries) (num_slots entries)
| txring_ofs[t] | | buf_idx, len | slot[n-1]
+---------------+ | flags, ptr |
| rxring_ofs[0] | +---------------+
| rxring_ofs[1] |
(rx+1 entries)
| rxring_ofs[r] |
+---------------+
* For each "interface" (NIC, host stack, PIPE, VALE switch port) bound to
* a file descriptor, the mmap()ed region contains a (logically readonly)
* struct netmap_if pointing to struct netmap_ring's.
*
* There is one netmap_ring per physical NIC ring, plus one tx/rx ring
* pair attached to the host stack (this pair is unused for non-NIC ports).
*
* All physical/host stack ports share the same memory region,
* so that zero-copy can be implemented between them.
* VALE switch ports instead have separate memory regions.
*
* The netmap_ring is the userspace-visible replica of the NIC ring.
* Each slot has the index of a buffer (MTU-sized and residing in the
* mmapped region), its length and some flags. An extra 64-bit pointer
* is provided for user-supplied buffers in the tx path.
*
* In user space, the buffer address is computed as
* (char *)ring + buf_ofs + index * NETMAP_BUF_SIZE
*
* Added in NETMAP_API 11:
*
* + NIOCREGIF can request the allocation of extra spare buffers from
* the same memory pool. The desired number of buffers must be in
* nr_arg3. The ioctl may return fewer buffers, depending on memory
* availability. nr_arg3 will return the actual value, and, once
* mapped, nifp->ni_bufs_head will be the index of the first buffer.
*
* The buffers are linked to each other using the first uint32_t
* as the index. On close, ni_bufs_head must point to the list of
* buffers to be released.
*
* + NIOCREGIF can request space for extra rings (and buffers)
* allocated in the same memory space. The number of extra rings
* is in nr_arg1, and is advisory. This is a no-op on NICs where
* the size of the memory space is fixed.
*
* + NIOCREGIF can attach to PIPE rings sharing the same memory
* space with a parent device. The ifname indicates the parent device,
* which must already exist. Flags in nr_flags indicate if we want to
* bind the master or slave side, the index (from nr_ringid)
* is just a cookie and does not need to be sequential.
*
* + NIOCREGIF can also attach to 'monitor' rings that replicate
* the content of specific rings, also from the same memory space.
*
* Extra flags in nr_flags support the above functions.
* Application libraries may use the following naming scheme:
* netmap:foo all NIC ring pairs
* netmap:foo^ only host ring pair
* netmap:foo+ all NIC ring + host ring pairs
* netmap:foo-k the k-th NIC ring pair
* netmap:foo{k PIPE ring pair k, master side
* netmap:foo}k PIPE ring pair k, slave side
*
* Some notes about host rings:
*
* + The RX host ring is used to store those packets that the host network
* stack is trying to transmit through a NIC queue, but only if that queue
* is currently in netmap mode. Netmap will not intercept host stack mbufs
* designated to NIC queues that are not in netmap mode. As a consequence,
* registering a netmap port with netmap:foo^ is not enough to intercept
* mbufs in the RX host ring; the netmap port should be registered with
* netmap:foo*, or another registration should be done to open at least a
* NIC TX queue in netmap mode.
*
* + Netmap is not currently able to deal with intercepted trasmit mbufs which
* require offloadings like TSO, UFO, checksumming offloadings, etc. It is
* responsibility of the user to disable those offloadings (e.g. using
* ifconfig on FreeBSD or ethtool -K on Linux) for an interface that is being
* used in netmap mode. If the offloadings are not disabled, GSO and/or
* unchecksummed packets may be dropped immediately or end up in the host RX
* ring, and will be dropped as soon as the packet reaches another netmap
* adapter.
*/
/*
* struct netmap_slot is a buffer descriptor
*/
struct netmap_slot {
uint32_t buf_idx; /* buffer index */
uint16_t len; /* length for this slot */
uint16_t flags; /* buf changed, etc. */
uint64_t ptr; /* pointer for indirect buffers */
};
/*
* The following flags control how the slot is used
*/
#define NS_BUF_CHANGED 0x0001 /* buf_idx changed */
/*
* must be set whenever buf_idx is changed (as it might be
* necessary to recompute the physical address and mapping)
*
* It is also set by the kernel whenever the buf_idx is
* changed internally (e.g., by pipes). Applications may
* use this information to know when they can reuse the
* contents of previously prepared buffers.
*/
#define NS_REPORT 0x0002 /* ask the hardware to report results */
/*
* Request notification when slot is used by the hardware.
* Normally transmit completions are handled lazily and
* may be unreported. This flag lets us know when a slot
* has been sent (e.g. to terminate the sender).
*/
#define NS_FORWARD 0x0004 /* pass packet 'forward' */
/*
* (Only for physical ports, rx rings with NR_FORWARD set).
* Slot released to the kernel (i.e. before ring->head) with
* this flag set are passed to the peer ring (host/NIC),
* thus restoring the host-NIC connection for these slots.
* This supports efficient traffic monitoring or firewalling.
*/
#define NS_NO_LEARN 0x0008 /* disable bridge learning */
/*
* On a VALE switch, do not 'learn' the source port for
* this buffer.
*/
#define NS_INDIRECT 0x0010 /* userspace buffer */
/*
* (VALE tx rings only) data is in a userspace buffer,
* whose address is in the 'ptr' field in the slot.
*/
#define NS_MOREFRAG 0x0020 /* packet has more fragments */
/*
* (VALE ports only)
* Set on all but the last slot of a multi-segment packet.
* The 'len' field refers to the individual fragment.
*/
#define NS_PORT_SHIFT 8
#define NS_PORT_MASK (0xff << NS_PORT_SHIFT)
/*
* The high 8 bits of the flag, if not zero, indicate the
* destination port for the VALE switch, overriding
* the lookup table.
*/
#define NS_RFRAGS(_slot) (((_slot)->flags >> 8) & 0xff)
/*
* (VALE rx rings only) the high 8 bits
* are the number of fragments.
*/
/*
* struct netmap_ring
*
* Netmap representation of a TX or RX ring (also known as "queue").
* This is a queue implemented as a fixed-size circular array.
* At the software level the important fields are: head, cur, tail.
*
* In TX rings:
*
* head first slot available for transmission.
* cur wakeup point. select() and poll() will unblock
* when 'tail' moves past 'cur'
* tail (readonly) first slot reserved to the kernel
*
* [head .. tail-1] can be used for new packets to send;
* 'head' and 'cur' must be incremented as slots are filled
* with new packets to be sent;
* 'cur' can be moved further ahead if we need more space
* for new transmissions. XXX todo (2014-03-12)
*
* In RX rings:
*
* head first valid received packet
* cur wakeup point. select() and poll() will unblock
* when 'tail' moves past 'cur'
* tail (readonly) first slot reserved to the kernel
*
* [head .. tail-1] contain received packets;
* 'head' and 'cur' must be incremented as slots are consumed
* and can be returned to the kernel;
* 'cur' can be moved further ahead if we want to wait for
* new packets without returning the previous ones.
*
* DATA OWNERSHIP/LOCKING:
* The netmap_ring, and all slots and buffers in the range
* [head .. tail-1] are owned by the user program;
* the kernel only accesses them during a netmap system call
* and in the user thread context.
*
* Other slots and buffers are reserved for use by the kernel
*/
struct netmap_ring {
/*
* buf_ofs is meant to be used through macros.
* It contains the offset of the buffer region from this
* descriptor.
*/
const int64_t buf_ofs;
const uint32_t num_slots; /* number of slots in the ring. */
const uint32_t nr_buf_size;
const uint16_t ringid;
const uint16_t dir; /* 0: tx, 1: rx */
uint32_t head; /* (u) first user slot */
uint32_t cur; /* (u) wakeup point */
uint32_t tail; /* (k) first kernel slot */
uint32_t flags;
struct timeval ts; /* (k) time of last *sync() */
/* opaque room for a mutex or similar object */
#if !defined(_WIN32) || defined(__CYGWIN__)
uint8_t __attribute__((__aligned__(NM_CACHE_ALIGN))) sem[128];
#else
uint8_t __declspec(align(NM_CACHE_ALIGN)) sem[128];
#endif
/* the slots follow. This struct has variable size */
struct netmap_slot slot[0]; /* array of slots. */
};
/*
* RING FLAGS
*/
#define NR_TIMESTAMP 0x0002 /* set timestamp on *sync() */
/*
* updates the 'ts' field on each netmap syscall. This saves
* saves a separate gettimeofday(), and is not much worse than
* software timestamps generated in the interrupt handler.
*/
#define NR_FORWARD 0x0004 /* enable NS_FORWARD for ring */
/*
* Enables the NS_FORWARD slot flag for the ring.
*/
/*
* Netmap representation of an interface and its queue(s).
* This is initialized by the kernel when binding a file
* descriptor to a port, and should be considered as readonly
* by user programs. The kernel never uses it.
*
* There is one netmap_if for each file descriptor on which we want
* to select/poll.
* select/poll operates on one or all pairs depending on the value of
* nmr_queueid passed on the ioctl.
*/
struct netmap_if {
char ni_name[IFNAMSIZ]; /* name of the interface. */
const uint32_t ni_version; /* API version, currently unused */
const uint32_t ni_flags; /* properties */
#define NI_PRIV_MEM 0x1 /* private memory region */
/*
* The number of packet rings available in netmap mode.
* Physical NICs can have different numbers of tx and rx rings.
* Physical NICs also have a 'host' ring pair.
* Additionally, clients can request additional ring pairs to
* be used for internal communication.
*/
const uint32_t ni_tx_rings; /* number of HW tx rings */
const uint32_t ni_rx_rings; /* number of HW rx rings */
uint32_t ni_bufs_head; /* head index for extra bufs */
uint32_t ni_spare1[5];
/*
* The following array contains the offset of each netmap ring
* from this structure, in the following order:
* NIC tx rings (ni_tx_rings); host tx ring (1); extra tx rings;
* NIC rx rings (ni_rx_rings); host tx ring (1); extra rx rings.
*
* The area is filled up by the kernel on NIOCREGIF,
* and then only read by userspace code.
*/
const ssize_t ring_ofs[0];
};
#ifndef NIOCREGIF
/*
* ioctl names and related fields
*
* NIOCTXSYNC, NIOCRXSYNC synchronize tx or rx queues,
* whose identity is set in NIOCREGIF through nr_ringid.
* These are non blocking and take no argument.
*
* NIOCGINFO takes a struct ifreq, the interface name is the input,
* the outputs are number of queues and number of descriptor
* for each queue (useful to set number of threads etc.).
* The info returned is only advisory and may change before
* the interface is bound to a file descriptor.
*
* NIOCREGIF takes an interface name within a struct nmre,
* and activates netmap mode on the interface (if possible).
*
* The argument to NIOCGINFO/NIOCREGIF overlays struct ifreq so we
* can pass it down to other NIC-related ioctls.
*
* The actual argument (struct nmreq) has a number of options to request
* different functions.
* The following are used in NIOCREGIF when nr_cmd == 0:
*
* nr_name (in)
* The name of the port (em0, valeXXX:YYY, etc.)
* limited to IFNAMSIZ for backward compatibility.
*
* nr_version (in/out)
* Must match NETMAP_API as used in the kernel, error otherwise.
* Always returns the desired value on output.
*
* nr_tx_slots, nr_tx_slots, nr_tx_rings, nr_rx_rings (in/out)
* On input, non-zero values may be used to reconfigure the port
* according to the requested values, but this is not guaranteed.
* On output the actual values in use are reported.
*
* nr_ringid (in)
* Indicates how rings should be bound to the file descriptors.
* If nr_flags != 0, then the low bits (in NETMAP_RING_MASK)
* are used to indicate the ring number, and nr_flags specifies
* the actual rings to bind. NETMAP_NO_TX_POLL is unaffected.
*
* NOTE: THE FOLLOWING (nr_flags == 0) IS DEPRECATED:
* If nr_flags == 0, NETMAP_HW_RING and NETMAP_SW_RING control
* the binding as follows:
* 0 (default) binds all physical rings
* NETMAP_HW_RING | ring number binds a single ring pair
* NETMAP_SW_RING binds only the host tx/rx rings
*
* NETMAP_NO_TX_POLL can be OR-ed to make select()/poll() push
* packets on tx rings only if POLLOUT is set.
* The default is to push any pending packet.
*
* NETMAP_DO_RX_POLL can be OR-ed to make select()/poll() release
* packets on rx rings also when POLLIN is NOT set.
* The default is to touch the rx ring only with POLLIN.
* Note that this is the opposite of TX because it
* reflects the common usage.
*
* NOTE: NETMAP_PRIV_MEM IS DEPRECATED, use nr_arg2 instead.
* NETMAP_PRIV_MEM is set on return for ports that do not use
* the global memory allocator.
* This information is not significant and applications
* should look at the region id in nr_arg2
*
* nr_flags is the recommended mode to indicate which rings should
* be bound to a file descriptor. Values are NR_REG_*
*
* nr_arg1 (in) The number of extra rings to be reserved.
* Especially when allocating a VALE port the system only
* allocates the amount of memory needed for the port.
* If more shared memory rings are desired (e.g. for pipes),
* the first invocation for the same basename/allocator
* should specify a suitable number. Memory cannot be
* extended after the first allocation without closing
* all ports on the same region.
*
* nr_arg2 (in/out) The identity of the memory region used.
* On input, 0 means the system decides autonomously,
* other values may try to select a specific region.
* On return the actual value is reported.
* Region '1' is the global allocator, normally shared
* by all interfaces. Other values are private regions.
* If two ports the same region zero-copy is possible.
*
* nr_arg3 (in/out) number of extra buffers to be allocated.
*
*
*
* nr_cmd (in) if non-zero indicates a special command:
* NETMAP_BDG_ATTACH and nr_name = vale*:ifname
* attaches the NIC to the switch; nr_ringid specifies
* which rings to use. Used by vale-ctl -a ...
* nr_arg1 = NETMAP_BDG_HOST also attaches the host port
* as in vale-ctl -h ...
*
* NETMAP_BDG_DETACH and nr_name = vale*:ifname
* disconnects a previously attached NIC.
* Used by vale-ctl -d ...
*
* NETMAP_BDG_LIST
* list the configuration of VALE switches.
*
* NETMAP_BDG_VNET_HDR
* Set the virtio-net header length used by the client
* of a VALE switch port.
*
* NETMAP_BDG_NEWIF
* create a persistent VALE port with name nr_name.
* Used by vale-ctl -n ...
*
* NETMAP_BDG_DELIF
* delete a persistent VALE port. Used by vale-ctl -d ...
*
* nr_arg1, nr_arg2, nr_arg3 (in/out) command specific
*
*
*
*/
/*
* struct nmreq overlays a struct ifreq (just the name)
*/
struct nmreq {
char nr_name[IFNAMSIZ];
uint32_t nr_version; /* API version */
uint32_t nr_offset; /* nifp offset in the shared region */
uint32_t nr_memsize; /* size of the shared region */
uint32_t nr_tx_slots; /* slots in tx rings */
uint32_t nr_rx_slots; /* slots in rx rings */
uint16_t nr_tx_rings; /* number of tx rings */
uint16_t nr_rx_rings; /* number of rx rings */
uint16_t nr_ringid; /* ring(s) we care about */
#define NETMAP_HW_RING 0x4000 /* single NIC ring pair */
#define NETMAP_SW_RING 0x2000 /* only host ring pair */
#define NETMAP_RING_MASK 0x0fff /* the ring number */
#define NETMAP_NO_TX_POLL 0x1000 /* no automatic txsync on poll */
#define NETMAP_DO_RX_POLL 0x8000 /* DO automatic rxsync on poll */
uint16_t nr_cmd;
#define NETMAP_BDG_ATTACH 1 /* attach the NIC */
#define NETMAP_BDG_DETACH 2 /* detach the NIC */
#define NETMAP_BDG_REGOPS 3 /* register bridge callbacks */
#define NETMAP_BDG_LIST 4 /* get bridge's info */
#define NETMAP_BDG_VNET_HDR 5 /* set the port virtio-net-hdr length */
#define NETMAP_BDG_OFFSET NETMAP_BDG_VNET_HDR /* deprecated alias */
#define NETMAP_BDG_NEWIF 6 /* create a virtual port */
#define NETMAP_BDG_DELIF 7 /* destroy a virtual port */
#define NETMAP_PT_HOST_CREATE 8 /* create ptnetmap kthreads */
#define NETMAP_PT_HOST_DELETE 9 /* delete ptnetmap kthreads */
#define NETMAP_BDG_POLLING_ON 10 /* delete polling kthread */
#define NETMAP_BDG_POLLING_OFF 11 /* delete polling kthread */
#define NETMAP_VNET_HDR_GET 12 /* get the port virtio-net-hdr length */
#define NETMAP_POOLS_INFO_GET 13 /* get memory allocator pools info */
uint16_t nr_arg1; /* reserve extra rings in NIOCREGIF */
#define NETMAP_BDG_HOST 1 /* attach the host stack on ATTACH */
uint16_t nr_arg2;
uint32_t nr_arg3; /* req. extra buffers in NIOCREGIF */
uint32_t nr_flags;
/* various modes, extends nr_ringid */
uint32_t spare2[1];
};
#define NR_REG_MASK 0xf /* values for nr_flags */
enum { NR_REG_DEFAULT = 0, /* backward compat, should not be used. */
NR_REG_ALL_NIC = 1,
NR_REG_SW = 2,
NR_REG_NIC_SW = 3,
NR_REG_ONE_NIC = 4,
NR_REG_PIPE_MASTER = 5,
NR_REG_PIPE_SLAVE = 6,
};
/* monitor uses the NR_REG to select the rings to monitor */
#define NR_MONITOR_TX 0x100
#define NR_MONITOR_RX 0x200
#define NR_ZCOPY_MON 0x400
/* request exclusive access to the selected rings */
#define NR_EXCLUSIVE 0x800
/* request ptnetmap host support */
#define NR_PASSTHROUGH_HOST NR_PTNETMAP_HOST /* deprecated */
#define NR_PTNETMAP_HOST 0x1000
#define NR_RX_RINGS_ONLY 0x2000
#define NR_TX_RINGS_ONLY 0x4000
/* Applications set this flag if they are able to deal with virtio-net headers,
* that is send/receive frames that start with a virtio-net header.
* If not set, NIOCREGIF will fail with netmap ports that require applications
* to use those headers. If the flag is set, the application can use the
* NETMAP_VNET_HDR_GET command to figure out the header length.
*/
#define NR_ACCEPT_VNET_HDR 0x8000
#define NM_BDG_NAME "vale" /* prefix for bridge port name */
/*
* Windows does not have _IOWR(). _IO(), _IOW() and _IOR() are defined
* in ws2def.h but not sure if they are in the form we need.
* XXX so we redefine them
* in a convenient way to use for DeviceIoControl signatures
*/
#ifdef _WIN32
#undef _IO /* ws2def.h */
#define _WIN_NM_IOCTL_TYPE 40000
#define _IO(_c, _n) CTL_CODE(_WIN_NM_IOCTL_TYPE, ((_n) + 0x800), \
METHOD_BUFFERED, FILE_ANY_ACCESS)
#define _IO_direct(_c, _n) CTL_CODE(_WIN_NM_IOCTL_TYPE, ((_n) + 0x800), \
METHOD_OUT_DIRECT, FILE_ANY_ACCESS)
#define _IOWR(_c, _n, _s) _IO(_c, _n)
/* We havesome internal sysctl in addition to the externally visible ones */
#define NETMAP_MMAP _IO_direct('i', 160) /* note METHOD_OUT_DIRECT */
#define NETMAP_POLL _IO('i', 162)
/* and also two setsockopt for sysctl emulation */
#define NETMAP_SETSOCKOPT _IO('i', 140)
#define NETMAP_GETSOCKOPT _IO('i', 141)
/* These linknames are for the Netmap Core Driver */
#define NETMAP_NT_DEVICE_NAME L"\\Device\\NETMAP"
#define NETMAP_DOS_DEVICE_NAME L"\\DosDevices\\netmap"
/* Definition of a structure used to pass a virtual address within an IOCTL */
typedef struct _MEMORY_ENTRY {
PVOID pUsermodeVirtualAddress;
} MEMORY_ENTRY, *PMEMORY_ENTRY;
typedef struct _POLL_REQUEST_DATA {
int events;
int timeout;
int revents;
} POLL_REQUEST_DATA;
#endif /* _WIN32 */
/*
* FreeBSD uses the size value embedded in the _IOWR to determine
* how much to copy in/out. So we need it to match the actual
* data structure we pass. We put some spares in the structure
* to ease compatibility with other versions
*/
#define NIOCGINFO _IOWR('i', 145, struct nmreq) /* return IF info */
#define NIOCREGIF _IOWR('i', 146, struct nmreq) /* interface register */
#define NIOCTXSYNC _IO('i', 148) /* sync tx queues */
#define NIOCRXSYNC _IO('i', 149) /* sync rx queues */
#define NIOCCONFIG _IOWR('i', 150, struct nm_ifreq) /* for ext. modules */
#endif /* !NIOCREGIF */
/*
* Helper functions for kernel and userspace
*/
/*
* check if space is available in the ring.
*/
static inline int
nm_ring_empty(struct netmap_ring *ring)
{
return (ring->cur == ring->tail);
}
/*
* Opaque structure that is passed to an external kernel
* module via ioctl(fd, NIOCCONFIG, req) for a user-owned
* bridge port (at this point ephemeral VALE interface).
*/
#define NM_IFRDATA_LEN 256
struct nm_ifreq {
char nifr_name[IFNAMSIZ];
char data[NM_IFRDATA_LEN];
};
#endif /* _NET_NETMAP_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,254 @@
/*-
* Copyright (c) 1991 The Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)ns16550.h 7.1 (Berkeley) 5/9/91
* $FreeBSD$
*/
#ifndef _NS16550_H_
#define _NS16550_H_
/*
* NS8250... UART registers.
*/
/* 8250 registers #[0-6]. */
#define com_data 0 /* data register (R/W) */
#define REG_DATA com_data
#define com_ier 1 /* interrupt enable register (W) */
#define REG_IER com_ier
#define IER_ERXRDY 0x1
#define IER_ETXRDY 0x2
#define IER_ERLS 0x4
#define IER_EMSC 0x8
/*
* Receive timeout interrupt enable.
* Implemented in Intel XScale, Ingenic XBurst.
*/
#define IER_RXTMOUT 0x10
#define IER_BITS "\20\1ERXRDY\2ETXRDY\3ERLS\4EMSC\5RXTMOUT"
#define com_iir 2 /* interrupt identification register (R) */
#define REG_IIR com_iir
#define IIR_IMASK 0xf
#define IIR_RXTOUT 0xc
#define IIR_BUSY 0x7
#define IIR_RLS 0x6
#define IIR_RXRDY 0x4
#define IIR_TXRDY 0x2
#define IIR_NOPEND 0x1
#define IIR_MLSC 0x0
#define IIR_FIFO_MASK 0xc0 /* set if FIFOs are enabled */
#define IIR_BITS "\20\1NOPEND\2TXRDY\3RXRDY"
#define com_lcr 3 /* line control register (R/W) */
#define com_cfcr com_lcr /* character format control register (R/W) */
#define REG_LCR com_lcr
#define LCR_DLAB 0x80
#define CFCR_DLAB LCR_DLAB
#define LCR_EFR_ENABLE 0xbf /* magic to enable EFR on 16650 up */
#define CFCR_EFR_ENABLE LCR_EFR_ENABLE
#define LCR_SBREAK 0x40
#define CFCR_SBREAK LCR_SBREAK
#define LCR_PZERO 0x30
#define CFCR_PZERO LCR_PZERO
#define LCR_PONE 0x20
#define CFCR_PONE LCR_PONE
#define LCR_PEVEN 0x10
#define CFCR_PEVEN LCR_PEVEN
#define LCR_PODD 0x00
#define CFCR_PODD LCR_PODD
#define LCR_PENAB 0x08
#define CFCR_PENAB LCR_PENAB
#define LCR_STOPB 0x04
#define CFCR_STOPB LCR_STOPB
#define LCR_8BITS 0x03
#define CFCR_8BITS LCR_8BITS
#define LCR_7BITS 0x02
#define CFCR_7BITS LCR_7BITS
#define LCR_6BITS 0x01
#define CFCR_6BITS LCR_6BITS
#define LCR_5BITS 0x00
#define CFCR_5BITS LCR_5BITS
#define com_mcr 4 /* modem control register (R/W) */
#define REG_MCR com_mcr
#define MCR_PRESCALE 0x80 /* only available on 16650 up */
#define MCR_LOOPBACK 0x10
#define MCR_IE 0x08
#define MCR_IENABLE MCR_IE
#define MCR_DRS 0x04
#define MCR_RTS 0x02
#define MCR_DTR 0x01
#define MCR_BITS "\20\1DTR\2RTS\3DRS\4IE\5LOOPBACK\10PRESCALE"
#define com_lsr 5 /* line status register (R/W) */
#define REG_LSR com_lsr
#define LSR_RCV_FIFO 0x80
#define LSR_TEMT 0x40
#define LSR_TSRE LSR_TEMT
#define LSR_THRE 0x20
#define LSR_TXRDY LSR_THRE
#define LSR_BI 0x10
#define LSR_FE 0x08
#define LSR_PE 0x04
#define LSR_OE 0x02
#define LSR_RXRDY 0x01
#define LSR_RCV_MASK 0x1f
#define LSR_BITS "\20\1RXRDY\2OE\3PE\4FE\5BI\6THRE\7TEMT\10RCV_FIFO"
#define com_msr 6 /* modem status register (R/W) */
#define REG_MSR com_msr
#define MSR_DCD 0x80
#define MSR_RI 0x40
#define MSR_DSR 0x20
#define MSR_CTS 0x10
#define MSR_DDCD 0x08
#define MSR_TERI 0x04
#define MSR_DDSR 0x02
#define MSR_DCTS 0x01
#define MSR_BITS "\20\1DCTS\2DDSR\3TERI\4DDCD\5CTS\6DSR\7RI\10DCD"
/* 8250 multiplexed registers #[0-1]. Access enabled by LCR[7]. */
#define com_dll 0 /* divisor latch low (R/W) */
#define com_dlbl com_dll
#define com_dlm 1 /* divisor latch high (R/W) */
#define com_dlbh com_dlm
#define REG_DLL com_dll
#define REG_DLH com_dlm
/* 16450 register #7. Not multiplexed. */
#define com_scr 7 /* scratch register (R/W) */
/* 16550 register #2. Not multiplexed. */
#define com_fcr 2 /* FIFO control register (W) */
#define com_fifo com_fcr
#define REG_FCR com_fcr
#define FCR_ENABLE 0x01
#define FIFO_ENABLE FCR_ENABLE
#define FCR_RCV_RST 0x02
#define FIFO_RCV_RST FCR_RCV_RST
#define FCR_XMT_RST 0x04
#define FIFO_XMT_RST FCR_XMT_RST
#define FCR_DMA 0x08
#define FIFO_DMA_MODE FCR_DMA
#ifdef CPU_XBURST
#define FCR_UART_ON 0x10
#endif
#define FCR_RX_LOW 0x00
#define FIFO_RX_LOW FCR_RX_LOW
#define FCR_RX_MEDL 0x40
#define FIFO_RX_MEDL FCR_RX_MEDL
#define FCR_RX_MEDH 0x80
#define FIFO_RX_MEDH FCR_RX_MEDH
#define FCR_RX_HIGH 0xc0
#define FIFO_RX_HIGH FCR_RX_HIGH
#define FCR_BITS "\20\1ENABLE\2RCV_RST\3XMT_RST\4DMA"
/* 16650 registers #2,[4-7]. Access enabled by LCR_EFR_ENABLE. */
#define com_efr 2 /* enhanced features register (R/W) */
#define REG_EFR com_efr
#define EFR_CTS 0x80
#define EFR_AUTOCTS EFR_CTS
#define EFR_RTS 0x40
#define EFR_AUTORTS EFR_RTS
#define EFR_EFE 0x10 /* enhanced functions enable */
#define com_xon1 4 /* XON 1 character (R/W) */
#define com_xon2 5 /* XON 2 character (R/W) */
#define com_xoff1 6 /* XOFF 1 character (R/W) */
#define com_xoff2 7 /* XOFF 2 character (R/W) */
#define DW_REG_USR 31 /* DesignWare derived Uart Status Reg */
#define com_usr 39 /* Octeon 16750/16550 Uart Status Reg */
#define REG_USR com_usr
#define USR_BUSY 1 /* Uart Busy. Serial transfer in progress */
#define USR_TXFIFO_NOTFULL 2 /* Uart TX FIFO Not full */
/* 16950 register #1. Access enabled by ACR[7]. Also requires !LCR[7]. */
#define com_asr 1 /* additional status register (R[0-7]/W[0-1]) */
/* 16950 register #3. R/W access enabled by ACR[7]. */
#define com_rfl 3 /* receiver fifo level (R) */
/*
* 16950 register #4. Access enabled by ACR[7]. Also requires
* !LCR_EFR_ENABLE.
*/
#define com_tfl 4 /* transmitter fifo level (R) */
/*
* 16950 register #5. Accessible if !LCR_EFR_ENABLE. Read access also
* requires ACR[6].
*/
#define com_icr 5 /* index control register (R/W) */
#define REG_ICR com_icr
/*
* 16950 register #7. It is the same as com_scr except it has a different
* abbreviation in the manufacturer's data sheet and it also serves as an
* index into the Indexed Control register set.
*/
#define com_spr com_scr /* scratch pad (and index) register (R/W) */
#define REG_SPR com_scr
/*
* 16950 indexed control registers #[0-0x13]. Access is via index in SPR,
* data in ICR (if ICR is accessible).
*/
#define com_acr 0 /* additional control register (R/W) */
#define REG_ACR com_acr
#define ACR_ASE 0x80 /* ASR/RFL/TFL enable */
#define ACR_ICRE 0x40 /* ICR enable */
#define ACR_TLE 0x20 /* TTL/RTL enable */
#define com_cpr 1 /* clock prescaler register (R/W) */
#define com_tcr 2 /* times clock register (R/W) */
#define com_ttl 4 /* transmitter trigger level (R/W) */
#define com_rtl 5 /* receiver trigger level (R/W) */
/* ... */
/* Hardware extension mode register for RSB-2000/3000. */
#define com_emr com_msr
#define EMR_EXBUFF 0x04
#define EMR_CTSFLW 0x08
#define EMR_DSRFLW 0x10
#define EMR_RTSFLW 0x20
#define EMR_DTRFLW 0x40
#define EMR_EFMODE 0x80
#endif

View File

@@ -0,0 +1,304 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _PCI_CORE_H_
#define _PCI_CORE_H_
#include <sys/queue.h>
#include <assert.h>
#include "types.h"
#include "pcireg.h"
#define PCI_BARMAX PCIR_MAX_BAR_0 /* BAR registers in a Type 0 header */
struct vmctx;
struct pci_vdev;
struct memory_region;
struct pci_vdev_ops {
char *class_name; /* Name of device class */
/* instance creation */
int (*vdev_init)(struct vmctx *, struct pci_vdev *,
char *opts);
/* instance deinit */
void (*vdev_deinit)(struct vmctx *, struct pci_vdev *,
char *opts);
/* ACPI DSDT enumeration */
void (*vdev_write_dsdt)(struct pci_vdev *);
/* ops related to physical resources */
void (*vdev_phys_access)(struct vmctx *ctx, struct pci_vdev *dev);
/* config space read/write callbacks */
int (*vdev_cfgwrite)(struct vmctx *ctx, int vcpu,
struct pci_vdev *pi, int offset,
int bytes, uint32_t val);
int (*vdev_cfgread)(struct vmctx *ctx, int vcpu,
struct pci_vdev *pi, int offset,
int bytes, uint32_t *retval);
/* BAR read/write callbacks */
void (*vdev_barwrite)(struct vmctx *ctx, int vcpu,
struct pci_vdev *pi, int baridx,
uint64_t offset, int size, uint64_t value);
uint64_t (*vdev_barread)(struct vmctx *ctx, int vcpu,
struct pci_vdev *pi, int baridx,
uint64_t offset, int size);
};
/*
* Put all PCI instances' addresses into one section named pci_devemu_set
* so that DM could enumerate and initialize each of them.
*/
#define DEFINE_PCI_DEVTYPE(x) DATA_SET(pci_vdev_ops_set, x)
enum pcibar_type {
PCIBAR_NONE,
PCIBAR_IO,
PCIBAR_MEM32,
PCIBAR_MEM64,
PCIBAR_MEMHI64
};
struct pcibar {
enum pcibar_type type; /* io or memory */
uint64_t size;
uint64_t addr;
};
#define PI_NAMESZ 40
struct msix_table_entry {
uint64_t addr;
uint32_t msg_data;
uint32_t vector_control;
} __attribute__((packed));
/*
* In case the structure is modified to hold extra information, use a define
* for the size that should be emulated.
*/
#define MSIX_TABLE_ENTRY_SIZE 16
#define MAX_MSIX_TABLE_ENTRIES 2048
#define PBA_SIZE(msgnum) (roundup2((msgnum), 64) / 8)
enum lintr_stat {
IDLE,
ASSERTED,
PENDING
};
struct pci_vdev {
struct pci_vdev_ops *dev_ops;
struct vmctx *vmctx;
uint8_t bus, slot, func;
char name[PI_NAMESZ];
int bar_getsize;
int prevcap;
int capend;
struct {
int8_t pin;
enum lintr_stat state;
int pirq_pin;
int ioapic_irq;
pthread_mutex_t lock;
} lintr;
struct {
int enabled;
uint64_t addr;
uint64_t msg_data;
int maxmsgnum;
} msi;
struct {
int enabled;
int table_bar;
int pba_bar;
uint32_t table_offset;
int table_count;
uint32_t pba_offset;
int pba_size;
int function_mask;
struct msix_table_entry *table; /* allocated at runtime */
void *pba_page;
int pba_page_offset;
} msix;
void *arg; /* devemu-private data */
uint8_t cfgdata[PCI_REGMAX + 1];
struct pcibar bar[PCI_BARMAX + 1];
};
struct msicap {
uint8_t capid;
uint8_t nextptr;
uint16_t msgctrl;
uint32_t addrlo;
uint32_t addrhi;
uint16_t msgdata;
} __attribute__((packed));
static_assert(sizeof(struct msicap) == 14, "compile-time assertion failed");
struct msixcap {
uint8_t capid;
uint8_t nextptr;
uint16_t msgctrl;
uint32_t table_info; /* bar index and offset within it */
uint32_t pba_info; /* bar index and offset within it */
} __attribute__((packed));
static_assert(sizeof(struct msixcap) == 12, "compile-time assertion failed");
struct pciecap {
uint8_t capid;
uint8_t nextptr;
uint16_t pcie_capabilities;
uint32_t dev_capabilities; /* all devices */
uint16_t dev_control;
uint16_t dev_status;
uint32_t link_capabilities; /* devices with links */
uint16_t link_control;
uint16_t link_status;
uint32_t slot_capabilities; /* ports with slots */
uint16_t slot_control;
uint16_t slot_status;
uint16_t root_control; /* root ports */
uint16_t root_capabilities;
uint32_t root_status;
uint32_t dev_capabilities2; /* all devices */
uint16_t dev_control2;
uint16_t dev_status2;
uint32_t link_capabilities2; /* devices with links */
uint16_t link_control2;
uint16_t link_status2;
uint32_t slot_capabilities2; /* ports with slots */
uint16_t slot_control2;
uint16_t slot_status2;
} __attribute__((packed));
static_assert(sizeof(struct pciecap) == 60, "compile-time assertion failed");
typedef void (*pci_lintr_cb)(int b, int s, int pin, int pirq_pin,
int ioapic_irq, void *arg);
int init_pci(struct vmctx *ctx);
void deinit_pci(struct vmctx *ctx);
void msicap_cfgwrite(struct pci_vdev *pi, int capoff, int offset,
int bytes, uint32_t val);
void msixcap_cfgwrite(struct pci_vdev *pi, int capoff, int offset,
int bytes, uint32_t val);
void pci_callback(void);
int pci_emul_alloc_bar(struct pci_vdev *pdi, int idx,
enum pcibar_type type, uint64_t size);
int pci_emul_alloc_pbar(struct pci_vdev *pdi, int idx,
uint64_t hostbase, enum pcibar_type type,
uint64_t size);
void pci_emul_free_bars(struct pci_vdev *pdi);
int pci_emul_add_capability(struct pci_vdev *dev, u_char *capdata,
int caplen);
int pci_emul_add_msicap(struct pci_vdev *pi, int msgnum);
int pci_emul_add_pciecap(struct pci_vdev *pi, int pcie_device_type);
void pci_generate_msi(struct pci_vdev *pi, int msgnum);
void pci_generate_msix(struct pci_vdev *pi, int msgnum);
void pci_lintr_assert(struct pci_vdev *pi);
void pci_lintr_deassert(struct pci_vdev *pi);
void pci_lintr_request(struct pci_vdev *pi);
int pci_msi_enabled(struct pci_vdev *pi);
int pci_msix_enabled(struct pci_vdev *pi);
int pci_msix_table_bar(struct pci_vdev *pi);
int pci_msix_pba_bar(struct pci_vdev *pi);
int pci_msi_maxmsgnum(struct pci_vdev *pi);
int pci_parse_slot(char *opt);
void pci_populate_msicap(struct msicap *cap, int msgs, int nextptr);
int pci_emul_add_msixcap(struct pci_vdev *pi, int msgnum, int barnum);
int pci_emul_msix_twrite(struct pci_vdev *pi, uint64_t offset, int size,
uint64_t value);
uint64_t pci_emul_msix_tread(struct pci_vdev *pi, uint64_t offset, int size);
int pci_count_lintr(int bus);
void pci_walk_lintr(int bus, pci_lintr_cb cb, void *arg);
void pci_write_dsdt(void);
uint64_t pci_ecfg_base(void);
int pci_bus_configured(int bus);
int emulate_pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus,
int slot, int func, int reg, int bytes, int *value);
static inline void
pci_set_cfgdata8(struct pci_vdev *pi, int offset, uint8_t val)
{
assert(offset <= PCI_REGMAX);
*(uint8_t *)(pi->cfgdata + offset) = val;
}
static inline void
pci_set_cfgdata16(struct pci_vdev *pi, int offset, uint16_t val)
{
assert(offset <= (PCI_REGMAX - 1) && (offset & 1) == 0);
*(uint16_t *)(pi->cfgdata + offset) = val;
}
static inline void
pci_set_cfgdata32(struct pci_vdev *pi, int offset, uint32_t val)
{
assert(offset <= (PCI_REGMAX - 3) && (offset & 3) == 0);
*(uint32_t *)(pi->cfgdata + offset) = val;
}
static inline uint8_t
pci_get_cfgdata8(struct pci_vdev *pi, int offset)
{
assert(offset <= PCI_REGMAX);
return (*(uint8_t *)(pi->cfgdata + offset));
}
static inline uint16_t
pci_get_cfgdata16(struct pci_vdev *pi, int offset)
{
assert(offset <= (PCI_REGMAX - 1) && (offset & 1) == 0);
return (*(uint16_t *)(pi->cfgdata + offset));
}
static inline uint32_t
pci_get_cfgdata32(struct pci_vdev *pi, int offset)
{
assert(offset <= (PCI_REGMAX - 3) && (offset & 3) == 0);
return (*(uint32_t *)(pi->cfgdata + offset));
}
#endif /* _PCI_CORE_H_ */

148
devicemodel/include/pciio.h Normal file
View File

@@ -0,0 +1,148 @@
/*-
* Copyright (c) 1997, Stefan Esser <se@FreeBSD.ORG>
* Copyright (c) 1997, 1998, 1999, Kenneth D. Merry <ken@FreeBSD.ORG>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef _PCIIO_H_
#define _PCIIO_H_
#include "asm/ioctl.h"
#define PCI_MAXNAMELEN 16
typedef enum {
PCI_GETCONF_LAST_DEVICE,
PCI_GETCONF_LIST_CHANGED,
PCI_GETCONF_MORE_DEVS,
PCI_GETCONF_ERROR
} pci_getconf_status;
typedef enum {
PCI_GETCONF_NO_MATCH = 0x0000,
PCI_GETCONF_MATCH_DOMAIN = 0x0001,
PCI_GETCONF_MATCH_BUS = 0x0002,
PCI_GETCONF_MATCH_DEV = 0x0004,
PCI_GETCONF_MATCH_FUNC = 0x0008,
PCI_GETCONF_MATCH_NAME = 0x0010,
PCI_GETCONF_MATCH_UNIT = 0x0020,
PCI_GETCONF_MATCH_VENDOR = 0x0040,
PCI_GETCONF_MATCH_DEVICE = 0x0080,
PCI_GETCONF_MATCH_CLASS = 0x0100
} pci_getconf_flags;
struct pcisel {
u_int32_t domain; /* domain number */
u_int8_t bus; /* bus number */
u_int8_t dev; /* device on this bus */
u_int8_t func; /* function on this device */
};
struct pci_conf {
struct pcisel sel; /* domain+bus+slot+function */
u_int8_t hdr; /* PCI header type */
u_int16_t subvendor; /* card vendor ID */
u_int16_t subdevice; /* card device ID, assigned by
* card vendor
*/
u_int16_t vendor; /* chip vendor ID */
u_int16_t device; /* chip device ID, assigned by
* chip vendor
*/
u_int8_t pc_class; /* chip PCI class */
u_int8_t subclass; /* chip PCI subclass */
u_int8_t progif; /* chip PCI programming interface */
u_int8_t revid; /* chip revision ID */
char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
u_long pd_unit; /* device unit number */
};
struct pci_match_conf {
struct pcisel sel; /* domain+bus+slot+function */
char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
u_long pd_unit; /* Unit number */
u_int16_t vendor; /* PCI Vendor ID */
u_int16_t device; /* PCI Device ID */
u_int8_t pc_class; /* PCI class */
pci_getconf_flags flags; /* Matching expression */
};
struct pci_conf_io {
u_int32_t pat_buf_len; /* pattern buffer length */
u_int32_t num_patterns; /* number of patterns */
struct pci_match_conf *patterns; /* pattern buffer */
u_int32_t match_buf_len; /* match buffer length */
u_int32_t num_matches; /* number of matches returned */
struct pci_conf *matches; /* match buffer */
u_int32_t offset; /* offset into device list */
u_int32_t generation; /* device list generation */
pci_getconf_status status; /* request status */
};
struct pci_io {
struct pcisel sel; /* device to operate on */
int reg; /* configuration register to examine */
int width; /* width (in bytes) of read or write */
u_int32_t data; /* data to write or result of read */
};
struct pci_bar_io {
struct pcisel sel; /* device to operate on */
int reg; /* starting address of BAR */
int pbi_enabled; /* decoding enabled */
uint64_t base; /* current value of BAR */
uint64_t length; /* length of BAR */
};
struct pci_vpd_element {
char keyword[2];
uint8_t flags;
uint8_t datalen;
uint8_t data[0];
};
#define PVE_FLAG_IDENT 0x01 /* Element is the string identifier */
#define PVE_FLAG_RW 0x02 /* Element is read/write */
#define PVE_NEXT(pve) \
((struct pci_vpd_element *)((char *)(pve) + \
sizeof(struct pci_vpd_element) + (pve)->datalen))
struct pci_list_vpd_io {
struct pcisel plvi_sel; /* device to operate on */
size_t plvi_len; /* size of the data area */
struct pci_vpd_element *plvi_data;
};
#define PCIOCGETCONF _IOWR('p', 5, struct pci_conf_io)
#define PCIOCREAD _IOWR('p', 2, struct pci_io)
#define PCIOCWRITE _IOWR('p', 3, struct pci_io)
#define PCIOCATTACHED _IOWR('p', 4, struct pci_io)
#define PCIOCGETBAR _IOWR('p', 6, struct pci_bar_io)
#define PCIOCLISTVPD _IOWR('p', 7, struct pci_list_vpd_io)
#endif /* !_PCIIO_H_ */

1068
devicemodel/include/pcireg.h Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,40 @@
/*-
* Copyright (c) 2015 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _PS2KBD_H_
#define _PS2KBD_H_
struct atkbdc_base;
struct ps2kbd_info *ps2kbd_init(struct atkbdc_base *base);
void ps2kbd_deinit(struct atkbdc_base *base);
int ps2kbd_read(struct ps2kbd_info *kbd, uint8_t *val);
void ps2kbd_write(struct ps2kbd_info *kbd, uint8_t val);
#endif /* _PS2KBD_H_ */

View File

@@ -0,0 +1,42 @@
/*-
* Copyright (c) 2015 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _PS2MOUSE_H_
#define _PS2MOUSE_H_
struct atkbdc_base;
struct ps2mouse_info *ps2mouse_init(struct atkbdc_base *base);
void ps2mouse_deinit(struct atkbdc_base *base);
int ps2mouse_read(struct ps2mouse_info *mouse, uint8_t *val);
void ps2mouse_write(struct ps2mouse_info *mouse, uint8_t val, int insert);
void ps2mouse_toggle(struct ps2mouse_info *mouse, int enable);
int ps2mouse_fifocnt(struct ps2mouse_info *mouse);
#endif /* _PS2MOUSE_H_ */

View File

@@ -0,0 +1,374 @@
/*
* common definition
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright (c) 2017 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Copyright (C) 2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/**
* @file acrn_common.h
*
* @brief acrn common data structure for hypercall or ioctl
*/
#ifndef _ACRN_COMMON_H_
#define _ACRN_COMMON_H_
#include <types.h>
/*
* Common structures for ACRN/VHM/DM
*/
/*
* IO request
*/
#define VHM_REQUEST_MAX 16
#define REQ_STATE_PENDING 0
#define REQ_STATE_SUCCESS 1
#define REQ_STATE_PROCESSING 2
#define REQ_STATE_FAILED -1
#define REQ_PORTIO 0
#define REQ_MMIO 1
#define REQ_PCICFG 2
#define REQ_WP 3
#define REQUEST_READ 0
#define REQUEST_WRITE 1
/* Generic VM flags from guest OS */
#define SECURE_WORLD_ENABLED (1UL<<0) /* Whether secure world is enabled */
/**
* @brief Hypercall
*
* @addtogroup acrn_hypercall ACRN Hypercall
* @{
*/
struct mmio_request {
uint32_t direction;
uint32_t reserved;
int64_t address;
int64_t size;
int64_t value;
} __aligned(8);
struct pio_request {
uint32_t direction;
uint32_t reserved;
int64_t address;
int64_t size;
int32_t value;
} __aligned(8);
struct pci_request {
uint32_t direction;
uint32_t reserved[3];/* need keep same header fields with pio_request */
int64_t size;
int32_t value;
int32_t bus;
int32_t dev;
int32_t func;
int32_t reg;
} __aligned(8);
/* vhm_request are 256Bytes aligned */
struct vhm_request {
/* offset: 0bytes - 63bytes */
union {
uint32_t type;
int32_t reserved0[16];
};
/* offset: 64bytes-127bytes */
union {
struct pio_request pio_request;
struct pci_request pci_request;
struct mmio_request mmio_request;
int64_t reserved1[8];
} reqs;
/* True: valid req which need VHM to process.
* ACRN write, VHM read only
**/
int32_t valid;
/* the client which is distributed to handle this request */
int32_t client;
/* 1: VHM had processed and success
* 0: VHM had not yet processed
* -1: VHM failed to process. Invalid request
* VHM write, ACRN read only
*/
int32_t processed;
} __aligned(256);
struct vhm_request_buffer {
union {
struct vhm_request req_queue[VHM_REQUEST_MAX];
int8_t reserved[4096];
};
} __aligned(4096);
/**
* @brief Info to create a VM, the parameter for HC_CREATE_VM hypercall
*/
struct acrn_create_vm {
/** created vmid return to VHM. Keep it first field */
int32_t vmid;
/** VCPU numbers this VM want to create */
uint32_t vcpu_num;
/** the GUID of this VM */
uint8_t GUID[16];
/* VM flag bits from Guest OS, now used
* SECURE_WORLD_ENABLED (1UL<<0)
*/
uint64_t vm_flag;
/** Reserved for future use*/
uint8_t reserved[24];
} __aligned(8);
/**
* @brief Info to create a VCPU
*
* the parameter for HC_CREATE_VCPU hypercall
*/
struct acrn_create_vcpu {
/** the virtual CPU ID for the VCPU created */
uint32_t vcpu_id;
/** the physical CPU ID for the VCPU created */
uint32_t pcpu_id;
} __aligned(8);
/**
* @brief Info to set ioreq buffer for a created VM
*
* the parameter for HC_SET_IOREQ_BUFFER hypercall
*/
struct acrn_set_ioreq_buffer {
/** guest physical address of VM request_buffer */
uint64_t req_buf;
} __aligned(8);
/** Interrupt type for acrn_irqline: inject interrupt to IOAPIC */
#define ACRN_INTR_TYPE_ISA 0
/** Interrupt type for acrn_irqline: inject interrupt to both PIC and IOAPIC */
#define ACRN_INTR_TYPE_IOAPIC 1
/**
* @brief Info to assert/deassert/pulse a virtual IRQ line for a VM
*
* the parameter for HC_ASSERT_IRQLINE/HC_DEASSERT_IRQLINE/HC_PULSE_IRQLINE
* hypercall
*/
struct acrn_irqline {
/** interrupt type which could be IOAPIC or ISA */
uint32_t intr_type;
/** reserved for alignment padding */
uint32_t reserved;
/** pic IRQ for ISA type */
uint64_t pic_irq;
/** ioapic IRQ for IOAPIC & ISA TYPE,
* if -1 then this IRQ will not be injected
*/
uint64_t ioapic_irq;
} __aligned(8);
/**
* @brief Info to inject a MSI interrupt to VM
*
* the parameter for HC_INJECT_MSI hypercall
*/
struct acrn_msi_entry {
/** MSI addr[19:12] with dest VCPU ID */
uint64_t msi_addr;
/** MSI data[7:0] with vector */
uint64_t msi_data;
} __aligned(8);
/**
* @brief Info to inject a NMI interrupt for a VM
*/
struct acrn_nmi_entry {
/** virtual CPU ID to inject */
int64_t vcpu_id;
} __aligned(8);
/**
* @brief Info to remap pass-through PCI MSI for a VM
*
* the parameter for HC_VM_PCI_MSIX_REMAP hypercall
*/
struct acrn_vm_pci_msix_remap {
/** pass-through PCI device virtual BDF# */
uint16_t virt_bdf;
/** pass-through PCI device physical BDF# */
uint16_t phys_bdf;
/** pass-through PCI device MSI/MSI-X cap control data */
uint16_t msi_ctl;
/** reserved for alignment padding */
uint16_t reserved;
/** pass-through PCI device MSI address to remap, which will
* return the caller after remapping
*/
uint64_t msi_addr; /* IN/OUT: msi address to fix */
/** pass-through PCI device MSI data to remap, which will
* return the caller after remapping
*/
uint32_t msi_data;
/** pass-through PCI device is MSI or MSI-X
* 0 - MSI, 1 - MSI-X
*/
int32_t msix;
/** if the pass-through PCI device is MSI-X, this field contains
* the MSI-X entry table index
*/
int32_t msix_entry_index;
/** if the pass-through PCI device is MSI-X, this field contains
* Vector Control for MSI-X Entry, field defined in MSI-X spec
*/
uint32_t vector_ctl;
} __aligned(8);
/**
* @brief The guest config pointer offset.
*
* It's designed to support passing DM config data pointer, based on it,
* hypervisor would parse then pass DM defined configuration to GUEST VCPU
* when booting guest VM.
* the address 0xd0000 here is designed by DM, as it arranged all memory
* layout below 1M, DM should make sure there is no overlap for the address
* 0xd0000 usage.
*/
#define GUEST_CFG_OFFSET 0xd0000
/**
* @brief Info The power state data of a VCPU.
*
*/
#define SPACE_SYSTEM_MEMORY 0
#define SPACE_SYSTEM_IO 1
#define SPACE_PCI_CONFIG 2
#define SPACE_Embedded_Control 3
#define SPACE_SMBUS 4
#define SPACE_PLATFORM_COMM 10
#define SPACE_FFixedHW 0x7F
struct acpi_generic_address {
uint8_t space_id;
uint8_t bit_width;
uint8_t bit_offset;
uint8_t access_size;
uint64_t address;
} __attribute__((aligned(8)));
struct cpu_cx_data {
struct acpi_generic_address cx_reg;
uint8_t type;
uint32_t latency;
uint64_t power;
} __attribute__((aligned(8)));
struct cpu_px_data {
uint64_t core_frequency; /* megahertz */
uint64_t power; /* milliWatts */
uint64_t transition_latency; /* microseconds */
uint64_t bus_master_latency; /* microseconds */
uint64_t control; /* control value */
uint64_t status; /* success indicator */
} __attribute__((aligned(8)));
/**
* @brief Info PM command from DM/VHM.
*
* The command would specify request type(e.g. get px count or data) for
* specific VM and specific VCPU with specific state number.
* For Px, PMCMD_STATE_NUM means Px number from 0 to (MAX_PSTATE - 1),
* For Cx, PMCMD_STATE_NUM means Cx entry index from 1 to MAX_CX_ENTRY.
*/
#define PMCMD_VMID_MASK 0xff000000
#define PMCMD_VCPUID_MASK 0x00ff0000
#define PMCMD_STATE_NUM_MASK 0x0000ff00
#define PMCMD_TYPE_MASK 0x000000ff
#define PMCMD_VMID_SHIFT 24
#define PMCMD_VCPUID_SHIFT 16
#define PMCMD_STATE_NUM_SHIFT 8
enum pm_cmd_type {
PMCMD_GET_PX_CNT,
PMCMD_GET_PX_DATA,
PMCMD_GET_CX_CNT,
PMCMD_GET_CX_DATA,
};
/**
* @}
*/
#endif /* _ACRN_COMMON_H_ */

View File

@@ -0,0 +1,214 @@
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright (c) 2017 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Copyright (C) 2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/**
* @file vhm_ioctl_defs.h
*
* @brief Virtio and Hypervisor Module definition for ioctl to user space
*/
#ifndef _VHM_IOCTL_DEFS_H_
#define _VHM_IOCTL_DEFS_H_
/* Commmon structures for ACRN/VHM/DM */
#include "acrn_common.h"
/*
* Commmon IOCTL ID defination for VHM/DM
*/
#define _IC_ID(x, y) (((x)<<24)|(y))
#define IC_ID 0x43UL
/* General */
#define IC_ID_GEN_BASE 0x0UL
#define IC_GET_API_VERSION _IC_ID(IC_ID, IC_ID_GEN_BASE + 0x00)
/* VM management */
#define IC_ID_VM_BASE 0x10UL
#define IC_CREATE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x00)
#define IC_DESTROY_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x01)
#define IC_START_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x02)
#define IC_PAUSE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x03)
#define IC_CREATE_VCPU _IC_ID(IC_ID, IC_ID_VM_BASE + 0x04)
/* IRQ and Interrupts */
#define IC_ID_IRQ_BASE 0x20UL
#define IC_ASSERT_IRQLINE _IC_ID(IC_ID, IC_ID_IRQ_BASE + 0x00)
#define IC_DEASSERT_IRQLINE _IC_ID(IC_ID, IC_ID_IRQ_BASE + 0x01)
#define IC_PULSE_IRQLINE _IC_ID(IC_ID, IC_ID_IRQ_BASE + 0x02)
#define IC_INJECT_MSI _IC_ID(IC_ID, IC_ID_IRQ_BASE + 0x03)
/* DM ioreq management */
#define IC_ID_IOREQ_BASE 0x30UL
#define IC_SET_IOREQ_BUFFER _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x00)
#define IC_NOTIFY_REQUEST_FINISH _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x01)
#define IC_CREATE_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x02)
#define IC_ATTACH_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x03)
#define IC_DESTROY_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x04)
/* Guest memory management */
#define IC_ID_MEM_BASE 0x40UL
#define IC_ALLOC_MEMSEG _IC_ID(IC_ID, IC_ID_MEM_BASE + 0x00)
#define IC_SET_MEMSEG _IC_ID(IC_ID, IC_ID_MEM_BASE + 0x01)
/* PCI assignment*/
#define IC_ID_PCI_BASE 0x50UL
#define IC_ASSIGN_PTDEV _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x00)
#define IC_DEASSIGN_PTDEV _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x01)
#define IC_VM_PCI_MSIX_REMAP _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x02)
#define IC_SET_PTDEV_INTR_INFO _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x03)
#define IC_RESET_PTDEV_INTR_INFO _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x04)
/* Power management */
#define IC_ID_PM_BASE 0x60UL
#define IC_PM_GET_CPU_STATE _IC_ID(IC_ID, IC_ID_PM_BASE + 0x00)
/**
* struct vm_memseg - memory segment info for guest
*
* @len: length of memory segment
* @gpa: guest physical start address of memory segment
*/
struct vm_memseg {
uint64_t len;
uint64_t gpa;
};
#define VM_MEMMAP_SYSMEM 0
#define VM_MMIO 1
/**
* struct vm_memmap - EPT memory mapping info for guest
*/
struct vm_memmap {
/** @type: memory mapping type */
uint32_t type;
/** @using_vma: using vma_base to get vm0_gpa,
* only for type == VM_MEMMAP_SYSMEM
*/
uint32_t using_vma;
/** @gpa: user OS guest physical start address of memory mapping */
uint64_t gpa;
/** union */
union {
/** @hpa: host physical start address of memory,
* only for type == VM_MMIO
*/
uint64_t hpa;
/** @vma_base: service OS user virtual start address of
* memory, only for type == VM_MEMMAP_SYSMEM &&
* using_vma == true
*/
uint64_t vma_base;
};
/** @len: the length of memory range mapped */
uint64_t len; /* mmap length */
/** @prot: memory mapping attribute */
uint32_t prot; /* RWX */
};
/**
* struct ic_ptdev_irq - pass thru device irq data structure
*/
struct ic_ptdev_irq {
#define IRQ_INTX 0
#define IRQ_MSI 1
#define IRQ_MSIX 2
/** @type: irq type */
uint32_t type;
/** @virt_bdf: virtual bdf description of pass thru device */
uint16_t virt_bdf; /* IN: Device virtual BDF# */
/** @phy_bdf: physical bdf description of pass thru device */
uint16_t phys_bdf; /* IN: Device physical BDF# */
/** union */
union {
/** struct intx - info of IOAPIC/PIC interrupt */
struct {
/** @virt_pin: virtual IOAPIC pin */
uint32_t virt_pin;
/** @phys_pin: physical IOAPIC pin */
uint32_t phys_pin;
/** @pic_pin: PIC pin */
uint32_t is_pic_pin;
} intx;
/** struct msix - info of MSI/MSIX interrupt */
struct {
/* Keep this filed on top of msix */
/** @vector_cnt: vector count of MSI/MSIX */
uint32_t vector_cnt;
/** @table_size: size of MSIX table(round up to 4K) */
uint32_t table_size;
/** @table_paddr: physical address of MSIX table */
uint64_t table_paddr;
} msix;
};
};
/**
* struct ioreq_notify - data strcture to notify hypervisor ioreq is handled
*
* @client_id: client id to identify ioreq client
* @vcpu: identify the ioreq submitter
*/
struct ioreq_notify {
int32_t client_id;
uint32_t vcpu;
};
/**
* struct api_version - data structure to track VHM API version
*
* @major_version: major version of VHM API
* @minor_version: minor version of VHM API
*/
struct api_version {
uint32_t major_version;
uint32_t minor_version;
};
#endif /* VHM_IOCTL_DEFS_H */

52
devicemodel/include/rtc.h Normal file
View File

@@ -0,0 +1,52 @@
/*-
* Copyright (c) 2014 Neel Natu (neel@freebsd.org)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _RTC_H_
#define _RTC_H_
#include "types.h"
#define IO_RTC 0x070 /* RTC */
struct vrtc;
struct vmctx;
int vrtc_init(struct vmctx *ctx);
void vrtc_enable_localtime(int l_time);
void vrtc_deinit(struct vmctx *ctx);
void vrtc_reset(struct vrtc *vrtc);
time_t vrtc_get_time(struct vrtc *vrtc);
int vrtc_set_time(struct vrtc *vrtc, time_t secs);
int vrtc_nvram_read(struct vrtc *vrtc, int offset, uint8_t *retval);
int vrtc_nvram_write(struct vrtc *vrtc, int offset, uint8_t value);
int vrtc_addr_handler(struct vmctx *ctx, int vcpu, int in, int port,
int bytes, uint32_t *eax, void *arg);
int vrtc_data_handler(struct vmctx *ctx, int vcpu, int in, int port,
int bytes, uint32_t *eax, void *arg);
#endif

View File

@@ -0,0 +1,276 @@
/*-
* Copyright (c) 1989, 1990 William F. Jolitz
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)segments.h 7.1 (Berkeley) 5/9/91
* $FreeBSD$
*/
#ifndef _SEGMENTS_H_
#define _SEGMENTS_H_
/*
* X86 Segmentation Data Structures and definitions
*/
/*
* Selectors
*/
#define SEL_RPL_MASK 3 /* requester priv level */
#define ISPL(s) ((s)&3) /* priority level of a selector */
#define SEL_KPL 0 /* kernel priority level */
#define SEL_UPL 3 /* user priority level */
#define ISLDT(s) ((s)&SEL_LDT) /* is it local or global */
#define SEL_LDT 4 /* local descriptor table */
#define IDXSEL(s) (((s)>>3) & 0x1fff) /* index of selector */
#define LSEL(s, r) (((s)<<3) | SEL_LDT | r) /* a local selector */
#define GSEL(s, r) (((s)<<3) | r) /* a global selector */
/*
* User segment descriptors (%cs, %ds etc for i386 apps. 64 bit wide)
* For long-mode apps, %cs only has the conforming bit in sd_type, the sd_dpl,
* sd_p, sd_l and sd_def32 which must be zero). %ds only has sd_p.
*/
struct segment_descriptor {
unsigned sd_lolimit:16; /* segment extent (lsb) */
unsigned sd_lobase:24; /* segment base address (lsb) */
unsigned sd_type:5; /* segment type */
unsigned sd_dpl:2; /* segment descriptor priority level */
unsigned sd_p:1; /* segment descriptor present */
unsigned sd_hilimit:4; /* segment extent (msb) */
unsigned sd_xx:2; /* unused */
unsigned sd_def32:1; /* default 32 vs 16 bit size */
unsigned sd_gran:1; /* limit granularity (byte/page units)*/
unsigned sd_hibase:8; /* segment base address (msb) */
} __attribute__((packed));
struct user_segment_descriptor {
unsigned sd_lolimit:16; /* segment extent (lsb) */
unsigned sd_lobase:24; /* segment base address (lsb) */
unsigned sd_type:5; /* segment type */
unsigned sd_dpl:2; /* segment descriptor priority level */
unsigned sd_p:1; /* segment descriptor present */
unsigned sd_hilimit:4; /* segment extent (msb) */
unsigned sd_xx:1; /* unused */
unsigned sd_long:1; /* long mode (cs only) */
unsigned sd_def32:1; /* default 32 vs 16 bit size */
unsigned sd_gran:1; /* limit granularity (byte/page units)*/
unsigned sd_hibase:8; /* segment base address (msb) */
} __attribute__((packed));
#define USD_GETBASE(sd) (((sd)->sd_lobase) | (sd)->sd_hibase << 24)
#define USD_SETBASE(sd, b) do { (sd)->sd_lobase = (b); \
(sd)->sd_hibase = ((b) >> 24); } \
while (0)
#define USD_GETLIMIT(sd) (((sd)->sd_lolimit) | (sd)->sd_hilimit << 16)
#define USD_SETLIMIT(sd, l) do { (sd)->sd_lolimit = (l); \
(sd)->sd_hilimit = ((l) >> 16); } \
while (0)
#ifdef __i386__
/*
* Gate descriptors (e.g. indirect descriptors)
*/
struct gate_descriptor {
unsigned gd_looffset:16; /* gate offset (lsb) */
unsigned gd_selector:16; /* gate segment selector */
unsigned gd_stkcpy:5; /* number of stack wds to cpy */
unsigned gd_xx:3; /* unused */
unsigned gd_type:5; /* segment type */
unsigned gd_dpl:2; /* segment descriptor priority level */
unsigned gd_p:1; /* segment descriptor present */
unsigned gd_hioffset:16; /* gate offset (msb) */
} __attribute__((packed));
/*
* Generic descriptor
*/
union descriptor {
struct segment_descriptor sd;
struct gate_descriptor gd;
};
#else
/*
* Gate descriptors (e.g. indirect descriptors, trap, interrupt etc. 128 bit)
* Only interrupt and trap gates have gd_ist.
*/
struct gate_descriptor {
uint64_t gd_looffset:16; /* gate offset (lsb) */
uint64_t gd_selector:16; /* gate segment selector */
uint64_t gd_ist:3; /* IST table index */
uint64_t gd_xx:5; /* unused */
uint64_t gd_type:5; /* segment type */
uint64_t gd_dpl:2; /* segment descriptor priority level */
uint64_t gd_p:1; /* segment descriptor present */
uint64_t gd_hioffset:48; /* gate offset (msb) */
uint64_t sd_xx1:32;
} __attribute__((packed));
/*
* Generic descriptor
*/
union descriptor {
struct user_segment_descriptor sd;
struct gate_descriptor gd;
};
#endif
/* system segments and gate types */
#define SDT_SYSNULL 0 /* system null */
#define SDT_SYS286TSS 1 /* system 286 TSS available */
#define SDT_SYSLDT 2 /* system local descriptor table */
#define SDT_SYS286BSY 3 /* system 286 TSS busy */
#define SDT_SYS286CGT 4 /* system 286 call gate */
#define SDT_SYSTASKGT 5 /* system task gate */
#define SDT_SYS286IGT 6 /* system 286 interrupt gate */
#define SDT_SYS286TGT 7 /* system 286 trap gate */
#define SDT_SYSNULL2 8 /* system null again */
#define SDT_SYS386TSS 9 /* system 386 TSS available */
#define SDT_SYSTSS 9 /* system available 64 bit TSS */
#define SDT_SYSNULL3 10 /* system null again */
#define SDT_SYS386BSY 11 /* system 386 TSS busy */
#define SDT_SYSBSY 11 /* system busy 64 bit TSS */
#define SDT_SYS386CGT 12 /* system 386 call gate */
#define SDT_SYSCGT 12 /* system 64 bit call gate */
#define SDT_SYSNULL4 13 /* system null again */
#define SDT_SYS386IGT 14 /* system 386 interrupt gate */
#define SDT_SYSIGT 14 /* system 64 bit interrupt gate */
#define SDT_SYS386TGT 15 /* system 386 trap gate */
#define SDT_SYSTGT 15 /* system 64 bit trap gate */
/* memory segment types */
#define SDT_MEMRO 16 /* memory read only */
#define SDT_MEMROA 17 /* memory read only accessed */
#define SDT_MEMRW 18 /* memory read write */
#define SDT_MEMRWA 19 /* memory read write accessed */
#define SDT_MEMROD 20 /* memory read only expand dwn limit */
#define SDT_MEMRODA 21 /* memory read only expand dwn limit accessed */
#define SDT_MEMRWD 22 /* memory read write expand dwn limit */
#define SDT_MEMRWDA 23 /* memory read write expand dwn limit accessed*/
#define SDT_MEME 24 /* memory execute only */
#define SDT_MEMEA 25 /* memory execute only accessed */
#define SDT_MEMER 26 /* memory execute read */
#define SDT_MEMERA 27 /* memory execute read accessed */
#define SDT_MEMEC 28 /* memory execute only conforming */
#define SDT_MEMEAC 29 /* memory execute only accessed conforming */
#define SDT_MEMERC 30 /* memory execute read conforming */
#define SDT_MEMERAC 31 /* memory execute read accessed conforming */
/*
* Size of IDT table
*/
#define NIDT 256 /* 32 reserved, 0x80 syscall, most are h/w */
#define NRSVIDT 32 /* reserved entries for cpu exceptions */
/*
* Entries in the Interrupt Descriptor Table (IDT)
*/
#define IDT_DE 0 /* #DE: Divide Error */
#define IDT_DB 1 /* #DB: Debug */
#define IDT_NMI 2 /* Nonmaskable External Interrupt */
#define IDT_BP 3 /* #BP: Breakpoint */
#define IDT_OF 4 /* #OF: Overflow */
#define IDT_BR 5 /* #BR: Bound Range Exceeded */
#define IDT_UD 6 /* #UD: Undefined/Invalid Opcode */
#define IDT_NM 7 /* #NM: No Math Coprocessor */
#define IDT_DF 8 /* #DF: Double Fault */
#define IDT_FPUGP 9 /* Coprocessor Segment Overrun */
#define IDT_TS 10 /* #TS: Invalid TSS */
#define IDT_NP 11 /* #NP: Segment Not Present */
#define IDT_SS 12 /* #SS: Stack Segment Fault */
#define IDT_GP 13 /* #GP: General Protection Fault */
#define IDT_PF 14 /* #PF: Page Fault */
#define IDT_MF 16 /* #MF: FPU Floating-Point Error */
#define IDT_AC 17 /* #AC: Alignment Check */
#define IDT_MC 18 /* #MC: Machine Check */
#define IDT_XF 19 /* #XF: SIMD Floating-Point Exception */
#define IDT_IO_INTS NRSVIDT /* Base of IDT entries for I/O interrupts. */
#define IDT_SYSCALL 0x80 /* System Call Interrupt Vector */
#define IDT_DTRACE_RET 0x92 /* DTrace pid provider Interrupt Vector */
#define IDT_EVTCHN 0x93 /* Xen HVM Event Channel Interrupt Vector */
#if defined(__i386__)
/*
* Entries in the Global Descriptor Table (GDT)
* Note that each 4 entries share a single 32 byte L1 cache line.
* Some of the fast syscall instructions require a specific order here.
*/
#define GNULL_SEL 0 /* Null Descriptor */
#define GPRIV_SEL 1 /* SMP Per-Processor Private Data */
#define GUFS_SEL 2 /* User %fs Descriptor (order critical: 1) */
#define GUGS_SEL 3 /* User %gs Descriptor (order critical: 2) */
#define GCODE_SEL 4 /* Kernel Code Descriptor (order critical: 1) */
#define GDATA_SEL 5 /* Kernel Data Descriptor (order critical: 2) */
#define GUCODE_SEL 6 /* User Code Descriptor (order critical: 3) */
#define GUDATA_SEL 7 /* User Data Descriptor (order critical: 4) */
#define GBIOSLOWMEM_SEL 8 /* BIOS low memory access (must be entry 8) */
#define GPROC0_SEL 9 /* Task state process slot zero and up */
#define GLDT_SEL 10 /* Default User LDT */
#define GUSERLDT_SEL 11 /* User LDT */
#define GPANIC_SEL 12 /* Task state to consider panic from */
#define GBIOSCODE32_SEL 13 /* BIOS interface (32bit Code) */
#define GBIOSCODE16_SEL 14 /* BIOS interface (16bit Code) */
#define GBIOSDATA_SEL 15 /* BIOS interface (Data) */
#define GBIOSUTIL_SEL 16 /* BIOS interface (Utility) */
#define GBIOSARGS_SEL 17 /* BIOS interface (Arguments) */
#define GNDIS_SEL 18 /* For the NDIS layer */
#define NGDT 19
/*
* Entries in the Local Descriptor Table (LDT)
*/
#define LSYS5CALLS_SEL 0 /* forced by intel BCS */
#define LSYS5SIGR_SEL 1
#define LUCODE_SEL 3
#define LUDATA_SEL 5
#define NLDT (LUDATA_SEL + 1)
#else /* !__i386__ */
/*
* Entries in the Global Descriptor Table (GDT)
*/
#define GNULL_SEL 0 /* Null Descriptor */
#define GNULL2_SEL 1 /* Null Descriptor */
#define GUFS32_SEL 2 /* User 32 bit %fs Descriptor */
#define GUGS32_SEL 3 /* User 32 bit %gs Descriptor */
#define GCODE_SEL 4 /* Kernel Code Descriptor */
#define GDATA_SEL 5 /* Kernel Data Descriptor */
#define GUCODE32_SEL 6 /* User 32 bit code Descriptor */
#define GUDATA_SEL 7 /* User 32/64 bit Data Descriptor */
#define GUCODE_SEL 8 /* User 64 bit Code Descriptor */
#define GPROC0_SEL 9 /* TSS for entering kernel etc */
/* slot 10 is second half of GPROC0_SEL */
#define GUSERLDT_SEL 11 /* LDT */
/* slot 12 is second half of GUSERLDT_SEL */
#define NGDT 13
#endif /* __i386__ */
#endif /* !_SEGMENTS_H_ */

View File

@@ -0,0 +1,36 @@
/*-
* Copyright (c) 2014 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SMBIOSTBL_H_
#define _SMBIOSTBL_H_
struct vmctx;
int smbios_build(struct vmctx *ctx);
#endif /* _SMBIOSTBL_H_ */

View File

@@ -0,0 +1,914 @@
/*-
* Copyright (c) 1991 The Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)specialreg.h 7.1 (Berkeley) 5/9/91
* $FreeBSD$
*/
#ifndef _SPECIALREG_H_
#define _SPECIALREG_H_
/*
* Bits in 386 special registers:
*/
#define CR0_PE 0x00000001 /* Protected mode Enable */
#define CR0_MP 0x00000002 /* "Math" (fpu) Present */
#define CR0_EM 0x00000004 /* EMulate FPU instructions. (trap ESC only) */
#define CR0_TS 0x00000008 /* Task Switched (if MP, trap ESC and WAIT) */
#define CR0_PG 0x80000000 /* PaGing enable */
/*
* Bits in 486 special registers:
*/
#define CR0_NE 0x00000020 /* Numeric Error enable (EX16 vs IRQ13) */
#define CR0_WP 0x00010000 /* Write Protect (honor page protect in
* all modes)
*/
#define CR0_AM 0x00040000 /* Alignment Mask (set to enable AC flag) */
#define CR0_NW 0x20000000 /* Not Write-through */
#define CR0_CD 0x40000000 /* Cache Disable */
#define CR3_PCID_SAVE 0x8000000000000000
#define CR3_PCID_MASK 0xfff
/*
* Bits in PPro special registers
*/
#define CR4_VME 0x00000001 /* Virtual 8086 mode extensions */
#define CR4_PVI 0x00000002 /* Protected-mode virtual interrupts */
#define CR4_TSD 0x00000004 /* Time stamp disable */
#define CR4_DE 0x00000008 /* Debugging extensions */
#define CR4_PSE 0x00000010 /* Page size extensions */
#define CR4_PAE 0x00000020 /* Physical address extension */
#define CR4_MCE 0x00000040 /* Machine check enable */
#define CR4_PGE 0x00000080 /* Page global enable */
#define CR4_PCE 0x00000100 /* Performance monitoring counter enable */
#define CR4_FXSR 0x00000200 /* Fast FPU save/restore used by OS */
#define CR4_XMM 0x00000400 /* enable SIMD/MMX2 to use except 16 */
#define CR4_VMXE 0x00002000 /* enable VMX operation (Intel-specific) */
#define CR4_FSGSBASE 0x00010000 /* Enable FS/GS BASE accessing instructions */
#define CR4_PCIDE 0x00020000 /* Enable Context ID */
#define CR4_XSAVE 0x00040000 /* XSETBV/XGETBV */
#define CR4_SMEP 0x00100000 /* Supervisor-Mode Execution Prevention */
/*
* Bits in AMD64 special registers. EFER is 64 bits wide.
*/
#define EFER_SCE 0x000000001 /* System Call Extensions (R/W) */
#define EFER_LME 0x000000100 /* Long mode enable (R/W) */
#define EFER_LMA 0x000000400 /* Long mode active (R) */
#define EFER_NXE 0x000000800 /* PTE No-Execute bit enable (R/W) */
#define EFER_SVM 0x000001000 /* SVM enable bit for AMD, reserved for Intel */
#define EFER_LMSLE 0x000002000 /* Long Mode Segment Limit Enable */
#define EFER_FFXSR 0x000004000 /* Fast FXSAVE/FSRSTOR */
#define EFER_TCE 0x000008000 /* Translation Cache Extension */
/*
* Intel Extended Features registers
*/
#define XCR0 0 /* XFEATURE_ENABLED_MASK register */
#define XFEATURE_ENABLED_X87 0x00000001
#define XFEATURE_ENABLED_SSE 0x00000002
#define XFEATURE_ENABLED_YMM_HI128 0x00000004
#define XFEATURE_ENABLED_AVX XFEATURE_ENABLED_YMM_HI128
#define XFEATURE_ENABLED_BNDREGS 0x00000008
#define XFEATURE_ENABLED_BNDCSR 0x00000010
#define XFEATURE_ENABLED_OPMASK 0x00000020
#define XFEATURE_ENABLED_ZMM_HI256 0x00000040
#define XFEATURE_ENABLED_HI16_ZMM 0x00000080
#define XFEATURE_AVX \
(XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE | XFEATURE_ENABLED_AVX)
#define XFEATURE_AVX512 \
(XFEATURE_ENABLED_OPMASK | XFEATURE_ENABLED_ZMM_HI256 | \
XFEATURE_ENABLED_HI16_ZMM)
#define XFEATURE_MPX \
(XFEATURE_ENABLED_BNDREGS | XFEATURE_ENABLED_BNDCSR)
/*
* CPUID instruction features register
*/
#define CPUID_FPU 0x00000001
#define CPUID_VME 0x00000002
#define CPUID_DE 0x00000004
#define CPUID_PSE 0x00000008
#define CPUID_TSC 0x00000010
#define CPUID_MSR 0x00000020
#define CPUID_PAE 0x00000040
#define CPUID_MCE 0x00000080
#define CPUID_CX8 0x00000100
#define CPUID_APIC 0x00000200
#define CPUID_B10 0x00000400
#define CPUID_SEP 0x00000800
#define CPUID_MTRR 0x00001000
#define CPUID_PGE 0x00002000
#define CPUID_MCA 0x00004000
#define CPUID_CMOV 0x00008000
#define CPUID_PAT 0x00010000
#define CPUID_PSE36 0x00020000
#define CPUID_PSN 0x00040000
#define CPUID_CLFSH 0x00080000
#define CPUID_B20 0x00100000
#define CPUID_DS 0x00200000
#define CPUID_ACPI 0x00400000
#define CPUID_MMX 0x00800000
#define CPUID_FXSR 0x01000000
#define CPUID_SSE 0x02000000
#define CPUID_XMM 0x02000000
#define CPUID_SSE2 0x04000000
#define CPUID_SS 0x08000000
#define CPUID_HTT 0x10000000
#define CPUID_TM 0x20000000
#define CPUID_IA64 0x40000000
#define CPUID_PBE 0x80000000
#define CPUID2_SSE3 0x00000001
#define CPUID2_PCLMULQDQ 0x00000002
#define CPUID2_DTES64 0x00000004
#define CPUID2_MON 0x00000008
#define CPUID2_DS_CPL 0x00000010
#define CPUID2_VMX 0x00000020
#define CPUID2_SMX 0x00000040
#define CPUID2_EST 0x00000080
#define CPUID2_TM2 0x00000100
#define CPUID2_SSSE3 0x00000200
#define CPUID2_CNXTID 0x00000400
#define CPUID2_SDBG 0x00000800
#define CPUID2_FMA 0x00001000
#define CPUID2_CX16 0x00002000
#define CPUID2_XTPR 0x00004000
#define CPUID2_PDCM 0x00008000
#define CPUID2_PCID 0x00020000
#define CPUID2_DCA 0x00040000
#define CPUID2_SSE41 0x00080000
#define CPUID2_SSE42 0x00100000
#define CPUID2_X2APIC 0x00200000
#define CPUID2_MOVBE 0x00400000
#define CPUID2_POPCNT 0x00800000
#define CPUID2_TSCDLT 0x01000000
#define CPUID2_AESNI 0x02000000
#define CPUID2_XSAVE 0x04000000
#define CPUID2_OSXSAVE 0x08000000
#define CPUID2_AVX 0x10000000
#define CPUID2_F16C 0x20000000
#define CPUID2_RDRAND 0x40000000
#define CPUID2_HV 0x80000000
/*
* Important bits in the Thermal and Power Management flags
* CPUID.6 EAX and ECX.
*/
#define CPUTPM1_SENSOR 0x00000001
#define CPUTPM1_TURBO 0x00000002
#define CPUTPM1_ARAT 0x00000004
#define CPUTPM2_EFFREQ 0x00000001
/*
* Important bits in the AMD extended cpuid flags
*/
#define AMDID_SYSCALL 0x00000800
#define AMDID_MP 0x00080000
#define AMDID_NX 0x00100000
#define AMDID_EXT_MMX 0x00400000
#define AMDID_FFXSR 0x02000000
#define AMDID_PAGE1GB 0x04000000
#define AMDID_RDTSCP 0x08000000
#define AMDID_LM 0x20000000
#define AMDID_EXT_3DNOW 0x40000000
#define AMDID_3DNOW 0x80000000
#define AMDID2_LAHF 0x00000001
#define AMDID2_CMP 0x00000002
#define AMDID2_SVM 0x00000004
#define AMDID2_EXT_APIC 0x00000008
#define AMDID2_CR8 0x00000010
#define AMDID2_ABM 0x00000020
#define AMDID2_SSE4A 0x00000040
#define AMDID2_MAS 0x00000080
#define AMDID2_PREFETCH 0x00000100
#define AMDID2_OSVW 0x00000200
#define AMDID2_IBS 0x00000400
#define AMDID2_XOP 0x00000800
#define AMDID2_SKINIT 0x00001000
#define AMDID2_WDT 0x00002000
#define AMDID2_LWP 0x00008000
#define AMDID2_FMA4 0x00010000
#define AMDID2_TCE 0x00020000
#define AMDID2_NODE_ID 0x00080000
#define AMDID2_TBM 0x00200000
#define AMDID2_TOPOLOGY 0x00400000
#define AMDID2_PCXC 0x00800000
#define AMDID2_PNXC 0x01000000
#define AMDID2_DBE 0x04000000
#define AMDID2_PTSC 0x08000000
#define AMDID2_PTSCEL2I 0x10000000
#define AMDID2_MWAITX 0x20000000
/*
* CPUID instruction 1 eax info
*/
#define CPUID_STEPPING 0x0000000f
#define CPUID_MODEL 0x000000f0
#define CPUID_FAMILY 0x00000f00
#define CPUID_EXT_MODEL 0x000f0000
#define CPUID_EXT_FAMILY 0x0ff00000
#ifdef __i386__
#define CPUID_TO_MODEL(id) \
((((id) & CPUID_MODEL) >> 4) | \
((((id) & CPUID_FAMILY) >= 0x600) ? \
(((id) & CPUID_EXT_MODEL) >> 12) : 0))
#define CPUID_TO_FAMILY(id) \
((((id) & CPUID_FAMILY) >> 8) + \
((((id) & CPUID_FAMILY) == 0xf00) ? \
(((id) & CPUID_EXT_FAMILY) >> 20) : 0))
#else
#define CPUID_TO_MODEL(id) \
((((id) & CPUID_MODEL) >> 4) | \
(((id) & CPUID_EXT_MODEL) >> 12))
#define CPUID_TO_FAMILY(id) \
((((id) & CPUID_FAMILY) >> 8) + \
(((id) & CPUID_EXT_FAMILY) >> 20))
#endif
/*
* CPUID instruction 1 ebx info
*/
#define CPUID_BRAND_INDEX 0x000000ff
#define CPUID_CLFUSH_SIZE 0x0000ff00
#define CPUID_HTT_CORES 0x00ff0000
#define CPUID_LOCAL_APIC_ID 0xff000000
/*
* CPUID instruction 5 info
*/
#define CPUID5_MON_MIN_SIZE 0x0000ffff /* eax */
#define CPUID5_MON_MAX_SIZE 0x0000ffff /* ebx */
#define CPUID5_MON_MWAIT_EXT 0x00000001 /* ecx */
#define CPUID5_MWAIT_INTRBREAK 0x00000002 /* ecx */
/*
* MWAIT cpu power states. Lower 4 bits are sub-states.
*/
#define MWAIT_C0 0xf0
#define MWAIT_C1 0x00
#define MWAIT_C2 0x10
#define MWAIT_C3 0x20
#define MWAIT_C4 0x30
/*
* MWAIT extensions.
*/
/* Interrupt breaks MWAIT even when masked. */
#define MWAIT_INTRBREAK 0x00000001
/*
* CPUID instruction 6 ecx info
*/
#define CPUID_PERF_STAT 0x00000001
#define CPUID_PERF_BIAS 0x00000008
/*
* CPUID instruction 0xb ebx info.
*/
#define CPUID_TYPE_INVAL 0
#define CPUID_TYPE_SMT 1
#define CPUID_TYPE_CORE 2
/*
* CPUID instruction 0xd Processor Extended State Enumeration Sub-leaf 1
*/
#define CPUID_EXTSTATE_XSAVEOPT 0x00000001
#define CPUID_EXTSTATE_XSAVEC 0x00000002
#define CPUID_EXTSTATE_XINUSE 0x00000004
#define CPUID_EXTSTATE_XSAVES 0x00000008
/*
* AMD extended function 8000_0007h edx info
*/
#define AMDPM_TS 0x00000001
#define AMDPM_FID 0x00000002
#define AMDPM_VID 0x00000004
#define AMDPM_TTP 0x00000008
#define AMDPM_TM 0x00000010
#define AMDPM_STC 0x00000020
#define AMDPM_100MHZ_STEPS 0x00000040
#define AMDPM_HW_PSTATE 0x00000080
#define AMDPM_TSC_INVARIANT 0x00000100
#define AMDPM_CPB 0x00000200
/*
* AMD extended function 8000_0008h ecx info
*/
#define AMDID_CMP_CORES 0x000000ff
#define AMDID_COREID_SIZE 0x0000f000
#define AMDID_COREID_SIZE_SHIFT 12
/*
* CPUID instruction 7 Structured Extended Features, leaf 0 ebx info
*/
#define CPUID_STDEXT_FSGSBASE 0x00000001
#define CPUID_STDEXT_TSC_ADJUST 0x00000002
#define CPUID_STDEXT_SGX 0x00000004
#define CPUID_STDEXT_BMI1 0x00000008
#define CPUID_STDEXT_HLE 0x00000010
#define CPUID_STDEXT_AVX2 0x00000020
#define CPUID_STDEXT_FDP_EXC 0x00000040
#define CPUID_STDEXT_SMEP 0x00000080
#define CPUID_STDEXT_BMI2 0x00000100
#define CPUID_STDEXT_ERMS 0x00000200
#define CPUID_STDEXT_INVPCID 0x00000400
#define CPUID_STDEXT_RTM 0x00000800
#define CPUID_STDEXT_PQM 0x00001000
#define CPUID_STDEXT_NFPUSG 0x00002000
#define CPUID_STDEXT_MPX 0x00004000
#define CPUID_STDEXT_PQE 0x00008000
#define CPUID_STDEXT_AVX512F 0x00010000
#define CPUID_STDEXT_AVX512DQ 0x00020000
#define CPUID_STDEXT_RDSEED 0x00040000
#define CPUID_STDEXT_ADX 0x00080000
#define CPUID_STDEXT_SMAP 0x00100000
#define CPUID_STDEXT_AVX512IFMA 0x00200000
#define CPUID_STDEXT_PCOMMIT 0x00400000
#define CPUID_STDEXT_CLFLUSHOPT 0x00800000
#define CPUID_STDEXT_CLWB 0x01000000
#define CPUID_STDEXT_PROCTRACE 0x02000000
#define CPUID_STDEXT_AVX512PF 0x04000000
#define CPUID_STDEXT_AVX512ER 0x08000000
#define CPUID_STDEXT_AVX512CD 0x10000000
#define CPUID_STDEXT_SHA 0x20000000
#define CPUID_STDEXT_AVX512BW 0x40000000
/*
* CPUID instruction 7 Structured Extended Features, leaf 0 ecx info
*/
#define CPUID_STDEXT2_PREFETCHWT1 0x00000001
#define CPUID_STDEXT2_UMIP 0x00000004
#define CPUID_STDEXT2_PKU 0x00000008
#define CPUID_STDEXT2_OSPKE 0x00000010
#define CPUID_STDEXT2_RDPID 0x00400000
#define CPUID_STDEXT2_SGXLC 0x40000000
/*
* CPUID manufacturers identifiers
*/
#define AMD_VENDOR_ID "AuthenticAMD"
#define CENTAUR_VENDOR_ID "CentaurHauls"
#define CYRIX_VENDOR_ID "CyrixInstead"
#define INTEL_VENDOR_ID "GenuineIntel"
#define NEXGEN_VENDOR_ID "NexGenDriven"
#define NSC_VENDOR_ID "Geode by NSC"
#define RISE_VENDOR_ID "RiseRiseRise"
#define SIS_VENDOR_ID "SiS SiS SiS "
#define TRANSMETA_VENDOR_ID "GenuineTMx86"
#define UMC_VENDOR_ID "UMC UMC UMC "
/*
* Model-specific registers for the i386 family
*/
#define MSR_P5_MC_ADDR 0x000
#define MSR_P5_MC_TYPE 0x001
#define MSR_TSC 0x010
#define MSR_P5_CESR 0x011
#define MSR_P5_CTR0 0x012
#define MSR_P5_CTR1 0x013
#define MSR_IA32_PLATFORM_ID 0x017
#define MSR_APICBASE 0x01b
#define MSR_EBL_CR_POWERON 0x02a
#define MSR_TEST_CTL 0x033
#define MSR_IA32_FEATURE_CONTROL 0x03a
#define MSR_BIOS_UPDT_TRIG 0x079
#define MSR_BBL_CR_D0 0x088
#define MSR_BBL_CR_D1 0x089
#define MSR_BBL_CR_D2 0x08a
#define MSR_BIOS_SIGN 0x08b
#define MSR_PERFCTR0 0x0c1
#define MSR_PERFCTR1 0x0c2
#define MSR_PLATFORM_INFO 0x0ce
#define MSR_MPERF 0x0e7
#define MSR_APERF 0x0e8
#define MSR_IA32_EXT_CONFIG 0x0ee /* Undocumented. Core Solo/Duo only */
#define MSR_MTRRcap 0x0fe
#define MSR_BBL_CR_ADDR 0x116
#define MSR_BBL_CR_DECC 0x118
#define MSR_BBL_CR_CTL 0x119
#define MSR_BBL_CR_TRIG 0x11a
#define MSR_BBL_CR_BUSY 0x11b
#define MSR_BBL_CR_CTL3 0x11e
#define MSR_SYSENTER_CS_MSR 0x174
#define MSR_SYSENTER_ESP_MSR 0x175
#define MSR_SYSENTER_EIP_MSR 0x176
#define MSR_MCG_CAP 0x179
#define MSR_MCG_STATUS 0x17a
#define MSR_MCG_CTL 0x17b
#define MSR_EVNTSEL0 0x186
#define MSR_EVNTSEL1 0x187
#define MSR_THERM_CONTROL 0x19a
#define MSR_THERM_INTERRUPT 0x19b
#define MSR_THERM_STATUS 0x19c
#define MSR_IA32_MISC_ENABLE 0x1a0
#define MSR_IA32_TEMPERATURE_TARGET 0x1a2
#define MSR_TURBO_RATIO_LIMIT 0x1ad
#define MSR_TURBO_RATIO_LIMIT1 0x1ae
#define MSR_DEBUGCTLMSR 0x1d9
#define MSR_LASTBRANCHFROMIP 0x1db
#define MSR_LASTBRANCHTOIP 0x1dc
#define MSR_LASTINTFROMIP 0x1dd
#define MSR_LASTINTTOIP 0x1de
#define MSR_ROB_CR_BKUPTMPDR6 0x1e0
#define MSR_MTRRVarBase 0x200
#define MSR_MTRR64kBase 0x250
#define MSR_MTRR16kBase 0x258
#define MSR_MTRR4kBase 0x268
#define MSR_PAT 0x277
#define MSR_MC0_CTL2 0x280
#define MSR_MTRRdefType 0x2ff
#define MSR_MC0_CTL 0x400
#define MSR_MC0_STATUS 0x401
#define MSR_MC0_ADDR 0x402
#define MSR_MC0_MISC 0x403
#define MSR_MC1_CTL 0x404
#define MSR_MC1_STATUS 0x405
#define MSR_MC1_ADDR 0x406
#define MSR_MC1_MISC 0x407
#define MSR_MC2_CTL 0x408
#define MSR_MC2_STATUS 0x409
#define MSR_MC2_ADDR 0x40a
#define MSR_MC2_MISC 0x40b
#define MSR_MC3_CTL 0x40c
#define MSR_MC3_STATUS 0x40d
#define MSR_MC3_ADDR 0x40e
#define MSR_MC3_MISC 0x40f
#define MSR_MC4_CTL 0x410
#define MSR_MC4_STATUS 0x411
#define MSR_MC4_ADDR 0x412
#define MSR_MC4_MISC 0x413
#define MSR_RAPL_POWER_UNIT 0x606
#define MSR_PKG_ENERGY_STATUS 0x611
#define MSR_DRAM_ENERGY_STATUS 0x619
#define MSR_PP0_ENERGY_STATUS 0x639
#define MSR_PP1_ENERGY_STATUS 0x641
#define MSR_TSC_DEADLINE 0x6e0 /* Writes are not serializing */
/*
* VMX MSRs
*/
#define MSR_VMX_BASIC 0x480
#define MSR_VMX_PINBASED_CTLS 0x481
#define MSR_VMX_PROCBASED_CTLS 0x482
#define MSR_VMX_EXIT_CTLS 0x483
#define MSR_VMX_ENTRY_CTLS 0x484
#define MSR_VMX_CR0_FIXED0 0x486
#define MSR_VMX_CR0_FIXED1 0x487
#define MSR_VMX_CR4_FIXED0 0x488
#define MSR_VMX_CR4_FIXED1 0x489
#define MSR_VMX_PROCBASED_CTLS2 0x48b
#define MSR_VMX_EPT_VPID_CAP 0x48c
#define MSR_VMX_TRUE_PINBASED_CTLS 0x48d
#define MSR_VMX_TRUE_PROCBASED_CTLS 0x48e
#define MSR_VMX_TRUE_EXIT_CTLS 0x48f
#define MSR_VMX_TRUE_ENTRY_CTLS 0x490
/*
* X2APIC MSRs.
* Writes are not serializing.
*/
#define MSR_APIC_000 0x800
#define MSR_APIC_ID 0x802
#define MSR_APIC_VERSION 0x803
#define MSR_APIC_TPR 0x808
#define MSR_APIC_EOI 0x80b
#define MSR_APIC_LDR 0x80d
#define MSR_APIC_SVR 0x80f
#define MSR_APIC_ISR0 0x810
#define MSR_APIC_ISR1 0x811
#define MSR_APIC_ISR2 0x812
#define MSR_APIC_ISR3 0x813
#define MSR_APIC_ISR4 0x814
#define MSR_APIC_ISR5 0x815
#define MSR_APIC_ISR6 0x816
#define MSR_APIC_ISR7 0x817
#define MSR_APIC_TMR0 0x818
#define MSR_APIC_IRR0 0x820
#define MSR_APIC_ESR 0x828
#define MSR_APIC_LVT_CMCI 0x82F
#define MSR_APIC_ICR 0x830
#define MSR_APIC_LVT_TIMER 0x832
#define MSR_APIC_LVT_THERMAL 0x833
#define MSR_APIC_LVT_PCINT 0x834
#define MSR_APIC_LVT_LINT0 0x835
#define MSR_APIC_LVT_LINT1 0x836
#define MSR_APIC_LVT_ERROR 0x837
#define MSR_APIC_ICR_TIMER 0x838
#define MSR_APIC_CCR_TIMER 0x839
#define MSR_APIC_DCR_TIMER 0x83e
#define MSR_APIC_SELF_IPI 0x83f
#define MSR_IA32_XSS 0xda0
/*
* Constants related to MSR's.
*/
#define APICBASE_RESERVED 0x000002ff
#define APICBASE_BSP 0x00000100
#define APICBASE_X2APIC 0x00000400
#define APICBASE_ENABLED 0x00000800
#define APICBASE_ADDRESS 0xfffff000
/* MSR_IA32_FEATURE_CONTROL related */
#define IA32_FEATURE_CONTROL_LOCK 0x01 /* lock bit */
#define IA32_FEATURE_CONTROL_SMX_EN 0x02 /* enable VMX inside SMX */
#define IA32_FEATURE_CONTROL_VMX_EN 0x04 /* enable VMX outside SMX */
/* MSR IA32_MISC_ENABLE */
#define IA32_MISC_EN_FASTSTR 0x0000000000000001ULL
#define IA32_MISC_EN_ATCCE 0x0000000000000008ULL
#define IA32_MISC_EN_PERFMON 0x0000000000000080ULL
#define IA32_MISC_EN_PEBSU 0x0000000000001000ULL
#define IA32_MISC_EN_ESSTE 0x0000000000010000ULL
#define IA32_MISC_EN_MONE 0x0000000000040000ULL
#define IA32_MISC_EN_LIMCPUID 0x0000000000400000ULL
#define IA32_MISC_EN_xTPRD 0x0000000000800000ULL
#define IA32_MISC_EN_XDD 0x0000000400000000ULL
/*
* PAT modes.
*/
#define PAT_UNCACHEABLE 0x00
#define PAT_WRITE_COMBINING 0x01
#define PAT_WRITE_THROUGH 0x04
#define PAT_WRITE_PROTECTED 0x05
#define PAT_WRITE_BACK 0x06
#define PAT_UNCACHED 0x07
#define PAT_VALUE(i, m) ((long long)(m) << (8 * (i)))
#define PAT_MASK(i) PAT_VALUE(i, 0xff)
/*
* Constants related to MTRRs
*/
#define MTRR_UNCACHEABLE 0x00
#define MTRR_WRITE_COMBINING 0x01
#define MTRR_WRITE_THROUGH 0x04
#define MTRR_WRITE_PROTECTED 0x05
#define MTRR_WRITE_BACK 0x06
#define MTRR_N64K 8 /* numbers of fixed-size entries */
#define MTRR_N16K 16
#define MTRR_N4K 64
#define MTRR_CAP_WC 0x0000000000000400
#define MTRR_CAP_FIXED 0x0000000000000100
#define MTRR_CAP_VCNT 0x00000000000000ff
#define MTRR_DEF_ENABLE 0x0000000000000800
#define MTRR_DEF_FIXED_ENABLE 0x0000000000000400
#define MTRR_DEF_TYPE 0x00000000000000ff
#define MTRR_PHYSBASE_PHYSBASE 0x000ffffffffff000
#define MTRR_PHYSBASE_TYPE 0x00000000000000ff
#define MTRR_PHYSMASK_PHYSMASK 0x000ffffffffff000
#define MTRR_PHYSMASK_VALID 0x0000000000000800
/*
* Cyrix configuration registers, accessible as IO ports.
*/
#define CCR0 0xc0 /* Configuration control register 0 */
#define CCR0_NC0 0x01 /* First 64K of each 1M memory region is
* non-cacheable
*/
#define CCR0_NC1 0x02 /* 640K-1M region is non-cacheable */
#define CCR0_A20M 0x04 /* Enables A20M# input pin */
#define CCR0_KEN 0x08 /* Enables KEN# input pin */
#define CCR0_FLUSH 0x10 /* Enables FLUSH# input pin */
#define CCR0_BARB 0x20 /* Flushes internal cache when
* entering hold state
*/
#define CCR0_CO 0x40 /* Cache org: 1=direct mapped, 0=2x set
* assoc
*/
#define CCR0_SUSPEND 0x80 /* Enables SUSP# and SUSPA# pins */
#define CCR1 0xc1 /* Configuration control register 1 */
#define CCR1_RPL 0x01 /* Enables RPLSET and RPLVAL# pins */
#define CCR1_SMI 0x02 /* Enables SMM pins */
#define CCR1_SMAC 0x04 /* System management memory access */
#define CCR1_MMAC 0x08 /* Main memory access */
#define CCR1_NO_LOCK 0x10 /* Negate LOCK# */
#define CCR1_SM3 0x80 /* SMM address space address region 3 */
#define CCR2 0xc2
#define CCR2_WB 0x02 /* Enables WB cache interface pins */
#define CCR2_SADS 0x02 /* Slow ADS */
#define CCR2_LOCK_NW 0x04 /* LOCK NW Bit */
#define CCR2_SUSP_HLT 0x08 /* Suspend on HALT */
#define CCR2_WT1 0x10 /* WT region 1 */
#define CCR2_WPR1 0x10 /* Write-protect region 1 */
#define CCR2_BARB 0x20 /* Flushes write-back cache when
* entering hold state.
*/
#define CCR2_BWRT 0x40 /* Enables burst write cycles */
#define CCR2_USE_SUSP 0x80 /* Enables suspend pins */
#define CCR3 0xc3
#define CCR3_SMILOCK 0x01 /* SMM register lock */
#define CCR3_NMI 0x02 /* Enables NMI during SMM */
#define CCR3_LINBRST 0x04 /* Linear address burst cycles */
#define CCR3_SMMMODE 0x08 /* SMM Mode */
#define CCR3_MAPEN0 0x10 /* Enables Map0 */
#define CCR3_MAPEN1 0x20 /* Enables Map1 */
#define CCR3_MAPEN2 0x40 /* Enables Map2 */
#define CCR3_MAPEN3 0x80 /* Enables Map3 */
#define CCR4 0xe8
#define CCR4_IOMASK 0x07
#define CCR4_MEM 0x08 /* Enables momory bypassing */
#define CCR4_DTE 0x10 /* Enables directory table
* entry cache
*/
#define CCR4_FASTFPE 0x20 /* Fast FPU exception */
#define CCR4_CPUID 0x80 /* Enables CPUID instruction */
#define CCR5 0xe9
#define CCR5_WT_ALLOC 0x01 /* Write-through allocate */
#define CCR5_SLOP 0x02 /* LOOP instruction slowed down */
#define CCR5_LBR1 0x10 /* Local bus region 1 */
#define CCR5_ARREN 0x20 /* Enables ARR region */
#define CCR6 0xea
#define CCR7 0xeb
/* Performance Control Register (5x86 only). */
#define PCR0 0x20
#define PCR0_RSTK 0x01 /* Enables return stack */
#define PCR0_BTB 0x02 /* Enables branch target buffer */
#define PCR0_LOOP 0x04 /* Enables loop */
#define PCR0_AIS 0x08 /* Enables all instrcutions stalled to
* serialize pipe.
*/
#define PCR0_MLR 0x10 /* Enables reordering of misaligned
* loads
*/
#define PCR0_BTBRT 0x40 /* Enables BTB test register. */
#define PCR0_LSSER 0x80 /* Disable reorder */
/* Device Identification Registers */
#define DIR0 0xfe
#define DIR1 0xff
/*
* Machine Check register constants.
*/
#define MCG_CAP_COUNT 0x000000ff
#define MCG_CAP_CTL_P 0x00000100
#define MCG_CAP_EXT_P 0x00000200
#define MCG_CAP_CMCI_P 0x00000400
#define MCG_CAP_TES_P 0x00000800
#define MCG_CAP_EXT_CNT 0x00ff0000
#define MCG_CAP_SER_P 0x01000000
#define MCG_STATUS_RIPV 0x00000001
#define MCG_STATUS_EIPV 0x00000002
#define MCG_STATUS_MCIP 0x00000004
#define MCG_CTL_ENABLE 0xffffffffffffffff
#define MCG_CTL_DISABLE 0x0000000000000000
#define MSR_MC_CTL(x) (MSR_MC0_CTL + (x) * 4)
#define MSR_MC_STATUS(x) (MSR_MC0_STATUS + (x) * 4)
#define MSR_MC_ADDR(x) (MSR_MC0_ADDR + (x) * 4)
#define MSR_MC_MISC(x) (MSR_MC0_MISC + (x) * 4)
#define MSR_MC_CTL2(x) (MSR_MC0_CTL2 + (x)) /* If MCG_CAP_CMCI_P */
#define MC_STATUS_MCA_ERROR 0x000000000000ffff
#define MC_STATUS_MODEL_ERROR 0x00000000ffff0000
#define MC_STATUS_OTHER_INFO 0x01ffffff00000000
#define MC_STATUS_COR_COUNT 0x001fffc000000000 /* If MCG_CAP_CMCI_P */
#define MC_STATUS_TES_STATUS 0x0060000000000000 /* If MCG_CAP_TES_P */
#define MC_STATUS_AR 0x0080000000000000 /* If MCG_CAP_TES_P */
#define MC_STATUS_S 0x0100000000000000 /* If MCG_CAP_TES_P */
#define MC_STATUS_PCC 0x0200000000000000
#define MC_STATUS_ADDRV 0x0400000000000000
#define MC_STATUS_MISCV 0x0800000000000000
#define MC_STATUS_EN 0x1000000000000000
#define MC_STATUS_UC 0x2000000000000000
#define MC_STATUS_OVER 0x4000000000000000
#define MC_STATUS_VAL 0x8000000000000000
#define MC_MISC_RA_LSB 0x000000000000003f /* If MCG_CAP_SER_P */
#define MC_MISC_ADDRESS_MODE 0x00000000000001c0 /* If MCG_CAP_SER_P */
#define MC_CTL2_THRESHOLD 0x0000000000007fff
#define MC_CTL2_CMCI_EN 0x0000000040000000
#define MC_AMDNB_BANK 4
#define MC_MISC_AMDNB_VAL 0x8000000000000000 /* Counter presence
* valid
*/
#define MC_MISC_AMDNB_CNTP 0x4000000000000000 /* Counter present */
#define MC_MISC_AMDNB_LOCK 0x2000000000000000 /* Register locked */
#define MC_MISC_AMDNB_LVT_MASK 0x00f0000000000000 /* Extended LVT
* offset
*/
#define MC_MISC_AMDNB_LVT_SHIFT 52
#define MC_MISC_AMDNB_CNTEN 0x0008000000000000 /* Counter enabled */
#define MC_MISC_AMDNB_INT_MASK 0x0006000000000000 /* Interrupt type */
#define MC_MISC_AMDNB_INT_LVT 0x0002000000000000 /* Interrupt via
* Extended LVT
*/
#define MC_MISC_AMDNB_INT_SMI 0x0004000000000000 /* SMI */
#define MC_MISC_AMDNB_OVERFLOW 0x0001000000000000 /* Counter overflow */
#define MC_MISC_AMDNB_CNT_MASK 0x00000fff00000000 /* Counter value */
#define MC_MISC_AMDNB_CNT_SHIFT 32
#define MC_MISC_AMDNB_CNT_MAX 0xfff
#define MC_MISC_AMDNB_PTR_MASK 0x00000000ff000000 /* Pointer to
* additional registers
*/
#define MC_MISC_AMDNB_PTR_SHIFT 24
/*
* The following four 3-byte registers control the non-cacheable regions.
* These registers must be written as three separate bytes.
*
* NCRx+0: A31-A24 of starting address
* NCRx+1: A23-A16 of starting address
* NCRx+2: A15-A12 of starting address | NCR_SIZE_xx.
*
* The non-cacheable region's starting address must be aligned to the
* size indicated by the NCR_SIZE_xx field.
*/
#define NCR1 0xc4
#define NCR2 0xc7
#define NCR3 0xca
#define NCR4 0xcd
#define NCR_SIZE_0K 0
#define NCR_SIZE_4K 1
#define NCR_SIZE_8K 2
#define NCR_SIZE_16K 3
#define NCR_SIZE_32K 4
#define NCR_SIZE_64K 5
#define NCR_SIZE_128K 6
#define NCR_SIZE_256K 7
#define NCR_SIZE_512K 8
#define NCR_SIZE_1M 9
#define NCR_SIZE_2M 10
#define NCR_SIZE_4M 11
#define NCR_SIZE_8M 12
#define NCR_SIZE_16M 13
#define NCR_SIZE_32M 14
#define NCR_SIZE_4G 15
/*
* The address region registers are used to specify the location and
* size for the eight address regions.
*
* ARRx + 0: A31-A24 of start address
* ARRx + 1: A23-A16 of start address
* ARRx + 2: A15-A12 of start address | ARR_SIZE_xx
*/
#define ARR0 0xc4
#define ARR1 0xc7
#define ARR2 0xca
#define ARR3 0xcd
#define ARR4 0xd0
#define ARR5 0xd3
#define ARR6 0xd6
#define ARR7 0xd9
#define ARR_SIZE_0K 0
#define ARR_SIZE_4K 1
#define ARR_SIZE_8K 2
#define ARR_SIZE_16K 3
#define ARR_SIZE_32K 4
#define ARR_SIZE_64K 5
#define ARR_SIZE_128K 6
#define ARR_SIZE_256K 7
#define ARR_SIZE_512K 8
#define ARR_SIZE_1M 9
#define ARR_SIZE_2M 10
#define ARR_SIZE_4M 11
#define ARR_SIZE_8M 12
#define ARR_SIZE_16M 13
#define ARR_SIZE_32M 14
#define ARR_SIZE_4G 15
/*
* The region control registers specify the attributes associated with
* the ARRx addres regions.
*/
#define RCR0 0xdc
#define RCR1 0xdd
#define RCR2 0xde
#define RCR3 0xdf
#define RCR4 0xe0
#define RCR5 0xe1
#define RCR6 0xe2
#define RCR7 0xe3
#define RCR_RCD 0x01 /* Disables caching for ARRx (x = 0-6). */
#define RCR_RCE 0x01 /* Enables caching for ARR7. */
#define RCR_WWO 0x02 /* Weak write ordering. */
#define RCR_WL 0x04 /* Weak locking. */
#define RCR_WG 0x08 /* Write gathering. */
#define RCR_WT 0x10 /* Write-through. */
#define RCR_NLB 0x20 /* LBA# pin is not asserted. */
/* AMD Write Allocate Top-Of-Memory and Control Register */
#define AMD_WT_ALLOC_TME 0x40000 /* top-of-memory enable */
#define AMD_WT_ALLOC_PRE 0x20000 /* programmable range enable */
#define AMD_WT_ALLOC_FRE 0x10000 /* fixed (A0000-FFFFF) range enable */
/* AMD64 MSR's */
#define MSR_EFER 0xc0000080 /* extended features */
#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target/cs/ss */
#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target rip */
#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target rip */
#define MSR_SF_MASK 0xc0000084 /* syscall flags mask */
#define MSR_FSBASE 0xc0000100 /* base address of the %fs "segment" */
#define MSR_GSBASE 0xc0000101 /* base address of the %gs "segment" */
#define MSR_KGSBASE 0xc0000102 /* base address of the kernel %gs */
#define MSR_PERFEVSEL0 0xc0010000
#define MSR_PERFEVSEL1 0xc0010001
#define MSR_PERFEVSEL2 0xc0010002
#define MSR_PERFEVSEL3 0xc0010003
#define MSR_K7_PERFCTR0 0xc0010004
#define MSR_K7_PERFCTR1 0xc0010005
#define MSR_K7_PERFCTR2 0xc0010006
#define MSR_K7_PERFCTR3 0xc0010007
#define MSR_SYSCFG 0xc0010010
#define MSR_HWCR 0xc0010015
#define MSR_IORRBASE0 0xc0010016
#define MSR_IORRMASK0 0xc0010017
#define MSR_IORRBASE1 0xc0010018
#define MSR_IORRMASK1 0xc0010019
#define MSR_TOP_MEM 0xc001001a /* boundary for ram below 4G */
#define MSR_TOP_MEM2 0xc001001d /* boundary for ram above 4G */
#define MSR_NB_CFG1 0xc001001f /* NB configuration 1 */
#define MSR_P_STATE_LIMIT 0xc0010061 /* P-state Current Limit Register */
#define MSR_P_STATE_CONTROL 0xc0010062 /* P-state Control Register */
#define MSR_P_STATE_STATUS 0xc0010063 /* P-state Status Register */
#define MSR_P_STATE_CONFIG(n) (0xc0010064 + (n)) /* P-state Config */
#define MSR_SMM_ADDR 0xc0010112 /* SMM TSEG base address */
#define MSR_SMM_MASK 0xc0010113 /* SMM TSEG address mask */
#define MSR_EXTFEATURES 0xc0011005 /* Extended CPUID Features override */
#define MSR_IC_CFG 0xc0011021 /* Instruction Cache Configuration */
#define MSR_K8_UCODE_UPDATE 0xc0010020 /* update microcode */
#define MSR_MC0_CTL_MASK 0xc0010044
#define MSR_VM_CR 0xc0010114 /* SVM: feature control */
#define MSR_VM_HSAVE_PA 0xc0010117 /* SVM: host save area address */
/* MSR_VM_CR related */
#define VM_CR_SVMDIS 0x10 /* SVM: disabled by BIOS */
/* VIA ACE crypto featureset: for via_feature_rng */
#define VIA_HAS_RNG 1 /* cpu has RNG */
/* VIA ACE crypto featureset: for via_feature_xcrypt */
#define VIA_HAS_AES 1 /* cpu has AES */
#define VIA_HAS_SHA 2 /* cpu has SHA1 & SHA256 */
#define VIA_HAS_MM 4 /* cpu has RSA instructions */
#define VIA_HAS_AESCTR 8 /* cpu has AES-CTR instructions */
/* Centaur Extended Feature flags */
#define VIA_CPUID_HAS_RNG 0x000004
#define VIA_CPUID_DO_RNG 0x000008
#define VIA_CPUID_HAS_ACE 0x000040
#define VIA_CPUID_DO_ACE 0x000080
#define VIA_CPUID_HAS_ACE2 0x000100
#define VIA_CPUID_DO_ACE2 0x000200
#define VIA_CPUID_HAS_PHE 0x000400
#define VIA_CPUID_DO_PHE 0x000800
#define VIA_CPUID_HAS_PMM 0x001000
#define VIA_CPUID_DO_PMM 0x002000
/* VIA ACE xcrypt-* instruction context control options */
#define VIA_CRYPT_CWLO_ROUND_M 0x0000000f
#define VIA_CRYPT_CWLO_ALG_M 0x00000070
#define VIA_CRYPT_CWLO_ALG_AES 0x00000000
#define VIA_CRYPT_CWLO_KEYGEN_M 0x00000080
#define VIA_CRYPT_CWLO_KEYGEN_HW 0x00000000
#define VIA_CRYPT_CWLO_KEYGEN_SW 0x00000080
#define VIA_CRYPT_CWLO_NORMAL 0x00000000
#define VIA_CRYPT_CWLO_INTERMEDIATE 0x00000100
#define VIA_CRYPT_CWLO_ENCRYPT 0x00000000
#define VIA_CRYPT_CWLO_DECRYPT 0x00000200
#define VIA_CRYPT_CWLO_KEY128 0x0000000a /* 128bit, 10 rds */
#define VIA_CRYPT_CWLO_KEY192 0x0000040c /* 192bit, 12 rds */
#define VIA_CRYPT_CWLO_KEY256 0x0000080e /* 256bit, 15 rds */
#endif /* !_SPECIALREG_H_ */

Some files were not shown because too many files have changed in this diff Show More