initial import

internal commit: 0ab1ea615e5cfbb0687a9d593a86a7b774386076

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Anthony Xu
2018-03-07 21:01:19 +08:00
committed by lijinxia
parent b966397914
commit bd31b1c53e
93 changed files with 37861 additions and 0 deletions

35
devicemodel/MAINTAINERS Normal file
View File

@@ -0,0 +1,35 @@
ACRN Device Model Maintainers
===========================
This file provides information about the primary maintainers for
ACRN Device Model Maintainers.
In general, you should not privately email the maintainer. You should
email the acrn-dev list, but you can also Cc the maintainer.
Descriptions of section entries:
L: Mailing list that is relevant to this area (default is acrn-dev)
Patches and questions should be sent to the email list.
M: Cc address for patches and questions (ie, the package maintainer)
W: Web-page with status/info
T: SCM tree type and location. Type is one of: git, svn.
S: Status, one of the following:
Supported: Someone is actually paid to look after this.
Maintained: Someone actually looks after it.
Odd Fixes: It has a maintainer but they don't have time to do
much other than throw the odd patch in. See below.
Orphan: No current maintainer [but maybe you could take the
role as you write your new code].
Obsolete: Old code. Something tagged obsolete generally means
it has been replaced by a better system and you
should be using that.
Maintainers List
----------------
W: N/A
S: Supported
L: https://lists.projectacrn.org/g/acrn-dev
T: git - https://github.com/projectacrn/acrn-devicemodel.git
M: Anthony Xu <anthony.xu@intel.com>
M: Hao Li <hao.l.li@intel.com>

121
devicemodel/Makefile Normal file
View File

@@ -0,0 +1,121 @@
#
# ACRN-DM
#
MAJOR_VERSION=0
MINOR_VERSION=1
BASEDIR := $(shell pwd)
DM_OBJDIR ?= $(CURDIR)/build
ifneq ($(TARGET_YOCTO), TRUE)
CC := gcc
endif
CFLAGS := -g -O0 -std=gnu11
CFLAGS += -D_GNU_SOURCE
CFLAGS += -DNO_OPENSSL
CFLAGS += -m64
CFLAGS += -Wall -ffunction-sections
CFLAGS += -Werror
CFLAGS += -I$(BASEDIR)/include
CFLAGS += -I$(BASEDIR)/include/public
LIBS = -lrt
LIBS += -lpthread
LIBS += -lcrypto
LIBS += -lpciaccess
LIBS += -lz
LIBS += -luuid
# hw
SRCS += hw/pci/virtio/virtio.c
SRCS += hw/pci/virtio/virtio_kernel.c
SRCS += hw/platform/usb_mouse.c
SRCS += hw/platform/usb_core.c
SRCS += hw/platform/atkbdc.c
SRCS += hw/platform/ps2mouse.c
SRCS += hw/platform/rtc.c
SRCS += hw/platform/ps2kbd.c
SRCS += hw/platform/pm.c
SRCS += hw/platform/uart_core.c
SRCS += hw/platform/block_if.c
SRCS += hw/platform/ioapic.c
SRCS += hw/platform/cmos_io.c
SRCS += hw/pci/wdt_i6300esb.c
SRCS += hw/pci/lpc.c
SRCS += hw/pci/xhci.c
SRCS += hw/pci/core.c
SRCS += hw/pci/virtio/virtio_console.c
SRCS += hw/pci/virtio/virtio_block.c
SRCS += hw/pci/ahci.c
SRCS += hw/pci/hostbridge.c
SRCS += hw/pci/passthrough.c
SRCS += hw/pci/virtio/virtio_net.c
SRCS += hw/pci/virtio/virtio_rnd.c
SRCS += hw/pci/virtio/virtio_hyper_dmabuf.c
SRCS += hw/pci/irq.c
SRCS += hw/pci/uart.c
SRCS += hw/acpi/acpi.c
# core
#SRCS += core/bootrom.c
SRCS += core/sw_load.c
SRCS += core/smbiostbl.c
SRCS += core/mevent.c
SRCS += core/gc.c
SRCS += core/console.c
SRCS += core/inout.c
SRCS += core/mem.c
SRCS += core/post.c
SRCS += core/consport.c
SRCS += core/vmmapi.c
SRCS += core/mptbl.c
SRCS += core/main.c
OBJS := $(patsubst %.c,$(DM_OBJDIR)/%.o,$(SRCS))
HEADERS := $(shell find $(BASEDIR) -name '*.h')
DISTCLEAN_OBJS := $(shell find $(BASEDIR) -name '*.o')
PROGRAM := acrn-dm
all: include/version.h $(PROGRAM)
@echo -n ""
$(PROGRAM): $(OBJS)
$(CC) -o $(DM_OBJDIR)/$@ $(CFLAGS) $(LDFLAGS) $^ $(LIBS)
clean:
rm -f $(OBJS)
rm -f include/version.h
rm -f $(OBJS)
rm -rf $(DM_OBJDIR)
if test -f $(PROGRAM); then rm $(PROGRAM); fi
distclean:
rm -f $(DISTCLEAN_OBJS)
rm -f include/version.h
rm -f $(OBJS)
rm -rf $(DM_OBJDIR)
rm -f tags TAGS cscope.files cscope.in.out cscope.out cscope.po.out GTAGS GPATH GRTAGS GSYMS
include/version.h:
touch include/version.h
@COMMIT=`git rev-parse --verify --short HEAD 2>/dev/null`;\
DIRTY=`git diff-index --name-only HEAD`;\
if [ -n "$$DIRTY" ];then PATCH="$$COMMIT-dirty";else PATCH="$$COMMIT";fi;\
TIME=`date "+%Y-%m-%d %H:%M:%S"`;\
cat license_header > include/version.h;\
echo "#define DM_MAJOR_VERSION $(MAJOR_VERSION)" >> include/version.h;\
echo "#define DM_MINOR_VERSION $(MINOR_VERSION)" >> include/version.h;\
echo "#define DM_BUILD_VERSION "\""$$PATCH"\""" >> include/version.h;\
echo "#define DM_BUILD_TIME "\""$$TIME"\""" >> include/version.h;\
echo "#define DM_BUILD_USER "\""$(USER)"\""" >> include/version.h
$(DM_OBJDIR)/%.o: %.c $(HEADERS)
[ ! -e $@ ] && mkdir -p $(dir $@); \
$(CC) $(CFLAGS) -c $< -o $@
install: $(DM_OBJDIR)/$(PROGRAM)
install -D $(DM_OBJDIR)/$(PROGRAM) $(DESTDIR)/usr/bin/$(PROGRAM)

18
devicemodel/README Normal file
View File

@@ -0,0 +1,18 @@
BUILD DEPENDANCE
for CentOS
yum install libuuid-devel
BUILD
make
CLEAN
make clean
RUN DEPENDANCE
for CentOS
yum install openssl-libs
yum install zlib
yum install libuuid

116
devicemodel/core/console.c Normal file
View File

@@ -0,0 +1,116 @@
/*-
* Copyright (c) 2015 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include "gc.h"
#include "console.h"
static struct {
struct gfx_ctx *gc;
fb_render_func_t fb_render_cb;
void *fb_arg;
kbd_event_func_t kbd_event_cb;
void *kbd_arg;
int kbd_priority;
ptr_event_func_t ptr_event_cb;
void *ptr_arg;
int ptr_priority;
} console;
void
console_init(int w, int h, void *fbaddr)
{
console.gc = gc_init(w, h, fbaddr);
}
void
console_set_fbaddr(void *fbaddr)
{
gc_set_fbaddr(console.gc, fbaddr);
}
struct gfx_ctx_image *
console_get_image(void)
{
struct gfx_ctx_image *image;
image = gc_get_image(console.gc);
return image;
}
void
console_fb_register(fb_render_func_t render_cb, void *arg)
{
console.fb_render_cb = render_cb;
console.fb_arg = arg;
}
void
console_refresh(void)
{
if (console.fb_render_cb)
(*console.fb_render_cb)(console.gc, console.fb_arg);
}
void
console_kbd_register(kbd_event_func_t event_cb, void *arg, int pri)
{
if (pri > console.kbd_priority) {
console.kbd_event_cb = event_cb;
console.kbd_arg = arg;
console.kbd_priority = pri;
}
}
void
console_ptr_register(ptr_event_func_t event_cb, void *arg, int pri)
{
if (pri > console.ptr_priority) {
console.ptr_event_cb = event_cb;
console.ptr_arg = arg;
console.ptr_priority = pri;
}
}
void
console_key_event(int down, uint32_t keysym)
{
if (console.kbd_event_cb)
(*console.kbd_event_cb)(down, keysym, console.kbd_arg);
}
void
console_ptr_event(uint8_t button, int x, int y)
{
if (console.ptr_event_cb)
(*console.ptr_event_cb)(button, x, y, console.ptr_arg);
}

150
devicemodel/core/consport.c Normal file
View File

@@ -0,0 +1,150 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/select.h>
#include <err.h>
#include <stdio.h>
#include <stdlib.h>
#include <termios.h>
#include <unistd.h>
#include <stdbool.h>
#include <sysexits.h>
#include "inout.h"
#include "lpc.h"
#define BVM_CONSOLE_PORT 0x220
#define BVM_CONS_SIG ('b' << 8 | 'v')
static struct termios tio_orig, tio_new;
static void
ttyclose(void)
{
tcsetattr(STDIN_FILENO, TCSANOW, &tio_orig);
}
static void
ttyopen(void)
{
tcgetattr(STDIN_FILENO, &tio_orig);
cfmakeraw(&tio_new);
tcsetattr(STDIN_FILENO, TCSANOW, &tio_new);
atexit(ttyclose);
}
static bool
tty_char_available(void)
{
fd_set rfds;
struct timeval tv;
FD_ZERO(&rfds);
FD_SET(STDIN_FILENO, &rfds);
tv.tv_sec = 0;
tv.tv_usec = 0;
if (select(STDIN_FILENO + 1, &rfds, NULL, NULL, &tv) > 0)
return true;
else
return false;
}
static int
ttyread(void)
{
char rb;
if (tty_char_available()) {
read(STDIN_FILENO, &rb, 1);
return (rb & 0xff);
} else {
return -1;
}
}
static void
ttywrite(unsigned char wb)
{
(void) write(STDOUT_FILENO, &wb, 1);
}
static int
console_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
static int opened;
if (bytes == 2 && in) {
*eax = BVM_CONS_SIG;
return 0;
}
/*
* Guests might probe this port to look for old ISA devices
* using single-byte reads. Return 0xff for those.
*/
if (bytes == 1 && in) {
*eax = 0xff;
return 0;
}
if (bytes != 4)
return -1;
if (!opened) {
ttyopen();
opened = 1;
}
if (in)
*eax = ttyread();
else
ttywrite(*eax);
return 0;
}
SYSRES_IO(BVM_CONSOLE_PORT, 4);
static struct inout_port consport = {
"bvmcons",
BVM_CONSOLE_PORT,
1,
IOPORT_F_INOUT,
console_handler
};
void
init_bvmcons(void)
{
register_inout(&consport);
}

76
devicemodel/core/gc.c Normal file
View File

@@ -0,0 +1,76 @@
#include <sys/cdefs.h>
#include <sys/types.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include "gc.h"
struct gfx_ctx {
struct gfx_ctx_image *gc_image;
int raw;
};
struct gfx_ctx *
gc_init(int width, int height, void *fbaddr)
{
struct gfx_ctx *gc;
struct gfx_ctx_image *gc_image;
gc = calloc(1, sizeof(struct gfx_ctx));
assert(gc != NULL);
gc_image = calloc(1, sizeof(struct gfx_ctx_image));
assert(gc_image != NULL);
gc_image->width = width;
gc_image->height = height;
if (fbaddr) {
gc_image->data = fbaddr;
gc->raw = 1;
} else {
gc_image->data = calloc(width * height, sizeof(uint32_t));
gc->raw = 0;
}
gc->gc_image = gc_image;
return gc;
}
void
gc_set_fbaddr(struct gfx_ctx *gc, void *fbaddr)
{
gc->raw = 1;
if (gc->gc_image->data && gc->gc_image->data != fbaddr)
free(gc->gc_image->data);
gc->gc_image->data = fbaddr;
}
void
gc_resize(struct gfx_ctx *gc, int width, int height)
{
struct gfx_ctx_image *gc_image;
gc_image = gc->gc_image;
gc_image->width = width;
gc_image->height = height;
if (!gc->raw) {
gc_image->data = realloc(gc_image->data,
width * height * sizeof(uint32_t));
if (gc_image->data != NULL)
memset(gc_image->data, 0, width * height *
sizeof(uint32_t));
}
}
struct gfx_ctx_image *
gc_get_image(struct gfx_ctx *gc)
{
if (gc == NULL)
return NULL;
return gc->gc_image;
}

192
devicemodel/core/inout.c Normal file
View File

@@ -0,0 +1,192 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/mman.h>
#include <linux/uio.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <assert.h>
#include "vmm.h"
#include "vmmapi.h"
#include "dm.h"
#include "inout.h"
SET_DECLARE(inout_port_set, struct inout_port);
#define MAX_IOPORTS (1 << 16)
#define VERIFY_IOPORT(port, size) \
assert((port) >= 0 && (size) > 0 && ((port) + (size)) <= MAX_IOPORTS)
static struct {
const char *name;
int flags;
inout_func_t handler;
void *arg;
} inout_handlers[MAX_IOPORTS];
static int
default_inout(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
if (in) {
switch (bytes) {
case 4:
*eax = 0xffffffff;
break;
case 2:
*eax = 0xffff;
break;
case 1:
*eax = 0xff;
break;
}
}
return 0;
}
static void
register_default_iohandler(int start, int size)
{
struct inout_port iop;
VERIFY_IOPORT(start, size);
bzero(&iop, sizeof(iop));
iop.name = "default";
iop.port = start;
iop.size = size;
iop.flags = IOPORT_F_INOUT | IOPORT_F_DEFAULT;
iop.handler = default_inout;
register_inout(&iop);
}
int
emulate_inout(struct vmctx *ctx, int *pvcpu, struct pio_request *pio_request,
int strict)
{
int bytes, flags, in, port;
inout_func_t handler;
void *arg;
int retval;
bytes = pio_request->size;
in = (pio_request->direction == REQUEST_READ);
port = pio_request->address;
assert(port < MAX_IOPORTS);
assert(bytes == 1 || bytes == 2 || bytes == 4);
handler = inout_handlers[port].handler;
if (strict && handler == default_inout)
return -1;
flags = inout_handlers[port].flags;
arg = inout_handlers[port].arg;
if (pio_request->direction == REQUEST_READ) {
if (!(flags & IOPORT_F_IN))
return -1;
} else {
if (!(flags & IOPORT_F_OUT))
return -1;
}
retval = handler(ctx, *pvcpu, in, port, bytes,
(uint32_t *)&(pio_request->value), arg);
return retval;
}
void
init_inout(void)
{
struct inout_port **iopp, *iop;
/*
* Set up the default handler for all ports
*/
register_default_iohandler(0, MAX_IOPORTS);
/*
* Overwrite with specified handlers
*/
SET_FOREACH(iopp, inout_port_set) {
iop = *iopp;
assert(iop->port < MAX_IOPORTS);
inout_handlers[iop->port].name = iop->name;
inout_handlers[iop->port].flags = iop->flags;
inout_handlers[iop->port].handler = iop->handler;
inout_handlers[iop->port].arg = NULL;
}
}
int
register_inout(struct inout_port *iop)
{
int i;
VERIFY_IOPORT(iop->port, iop->size);
/*
* Verify that the new registration is not overwriting an already
* allocated i/o range.
*/
if ((iop->flags & IOPORT_F_DEFAULT) == 0) {
for (i = iop->port; i < iop->port + iop->size; i++) {
if ((inout_handlers[i].flags & IOPORT_F_DEFAULT) == 0)
return -1;
}
}
for (i = iop->port; i < iop->port + iop->size; i++) {
inout_handlers[i].name = iop->name;
inout_handlers[i].flags = iop->flags;
inout_handlers[i].handler = iop->handler;
inout_handlers[i].arg = iop->arg;
}
return 0;
}
int
unregister_inout(struct inout_port *iop)
{
VERIFY_IOPORT(iop->port, iop->size);
assert(inout_handlers[iop->port].name == iop->name);
register_default_iohandler(iop->port, iop->size);
return 0;
}

800
devicemodel/core/main.c Normal file
View File

@@ -0,0 +1,800 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <err.h>
#include <errno.h>
#include <libgen.h>
#include <unistd.h>
#include <assert.h>
#include <pthread.h>
#include <sysexits.h>
#include <stdbool.h>
#include "types.h"
#include "vmm.h"
#include "vmmapi.h"
#include "cpuset.h"
#include "dm.h"
#include "acpi.h"
#include "atkbdc.h"
#include "inout.h"
#include "ioapic.h"
#include "mem.h"
#include "mevent.h"
#include "mptbl.h"
#include "pci_core.h"
#include "irq.h"
#include "lpc.h"
#include "smbiostbl.h"
#include "rtc.h"
#include "version.h"
#define GUEST_NIO_PORT 0x488 /* guest upcalls via i/o port */
#define MB (1024UL * 1024)
#define GB (1024UL * MB)
typedef int (*vmexit_handler_t)(struct vmctx *,
struct vhm_request *, int *vcpu);
char *vmname;
int guest_ncpus;
char *guest_uuid_str;
bool stdio_in_use;
static int guest_vmexit_on_hlt, guest_vmexit_on_pause;
static int virtio_msix = 1;
static int x2apic_mode; /* default is xAPIC */
static int strictio;
static int strictmsr = 1;
static int acpi;
static char *progname;
static const int BSP;
static cpuset_t cpumask;
static void do_close_pre(struct vmctx *ctx);
static void do_close_post(struct vmctx *ctx);
static void vm_loop(struct vmctx *ctx);
static int quit_vm_loop;
static char vhm_request_page[4096] __attribute__ ((aligned(4096)));
static struct vhm_request *vhm_req_buf =
(struct vhm_request *)&vhm_request_page;
struct dmstats {
uint64_t vmexit_bogus;
uint64_t vmexit_reqidle;
uint64_t vmexit_hlt;
uint64_t vmexit_pause;
uint64_t vmexit_mtrap;
uint64_t cpu_switch_rotate;
uint64_t cpu_switch_direct;
uint64_t vmexit_mmio_emul;
} stats;
struct mt_vmm_info {
pthread_t mt_thr;
struct vmctx *mt_ctx;
int mt_vcpu;
} mt_vmm_info[VM_MAXCPU];
static cpuset_t *vcpumap[VM_MAXCPU] = { NULL };
static struct vmctx *_ctx;
static void
usage(int code)
{
fprintf(stderr,
"Usage: %s [-abehuwxACHPSWY] [-c vcpus] [-g <gdb port>] [-l <lpc>]\n"
" %*s [-m mem] [-p vcpu:hostcpu] [-s <pci>] [-U uuid] <vm>\n"
" -a: local apic is in xAPIC mode (deprecated)\n"
" -A: create ACPI tables\n"
" -c: # cpus (default 1)\n"
" -C: include guest memory in core file\n"
" -e: exit on unhandled I/O access\n"
" -g: gdb port\n"
" -h: help\n"
" -H: vmexit from the guest on hlt\n"
" -l: LPC device configuration\n"
" -m: memory size in MB\n"
" -M: do not hide INTx link for MSI&INTx capable ptdev\n"
" -p: pin 'vcpu' to 'hostcpu'\n"
" -P: vmexit from the guest on pause\n"
" -s: <slot,driver,configinfo> PCI slot config\n"
" -S: guest memory cannot be swapped\n"
" -u: RTC keeps UTC time\n"
" -U: uuid\n"
" -w: ignore unimplemented MSRs\n"
" -W: force virtio to use single-vector MSI\n"
" -x: local apic is in x2APIC mode\n"
" -Y: disable MPtable generation\n"
" -k: kernel image path\n"
" -r: ramdisk image path\n"
" -B: bootargs for kernel\n"
" -v: version\n",
progname, (int)strlen(progname), "");
exit(code);
}
static void
print_version(void)
{
fprintf(stderr, "DM version is: %d.%d-%s, build by %s@%s\n",
DM_MAJOR_VERSION, DM_MINOR_VERSION, DM_BUILD_VERSION,
DM_BUILD_USER, DM_BUILD_TIME);
exit(0);
}
static int
pincpu_parse(const char *opt)
{
int vcpu, pcpu;
if (sscanf(opt, "%d:%d", &vcpu, &pcpu) != 2) {
fprintf(stderr, "invalid format: %s\n", opt);
return -1;
}
if (vcpu < 0 || vcpu >= VM_MAXCPU) {
fprintf(stderr, "vcpu '%d' outside valid range from 0 to %d\n",
vcpu, VM_MAXCPU - 1);
return -1;
}
if (pcpu < 0 || pcpu >= CPU_SETSIZE) {
fprintf(stderr,
"hostcpu '%d' outside valid range from 0 to %d\n",
pcpu, CPU_SETSIZE - 1);
return -1;
}
if (vcpumap[vcpu] == NULL) {
vcpumap[vcpu] = malloc(sizeof(cpuset_t));
if (vcpumap[vcpu] == NULL) {
perror("malloc");
return -1;
}
CPU_ZERO(vcpumap[vcpu]);
}
CPU_SET(pcpu, vcpumap[vcpu]);
return 0;
}
void *
paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len)
{
return vm_map_gpa(ctx, gaddr, len);
}
void *
dm_gpa2hva(uint64_t gpa, size_t size)
{
return vm_map_gpa(_ctx, gpa, size);
}
int
fbsdrun_vmexit_on_pause(void)
{
return guest_vmexit_on_pause;
}
int
fbsdrun_vmexit_on_hlt(void)
{
return guest_vmexit_on_hlt;
}
int
fbsdrun_virtio_msix(void)
{
return virtio_msix;
}
static void *
fbsdrun_start_thread(void *param)
{
char tname[MAXCOMLEN + 1];
struct mt_vmm_info *mtp;
int vcpu;
mtp = param;
vcpu = mtp->mt_vcpu;
snprintf(tname, sizeof(tname), "vcpu %d", vcpu);
pthread_setname_np(mtp->mt_thr, tname);
vm_loop(mtp->mt_ctx);
/* reset or halt */
return NULL;
}
void
fbsdrun_addcpu(struct vmctx *ctx, int guest_ncpus)
{
int i;
int error;
for (i = 0; i < guest_ncpus; i++) {
error = vm_create_vcpu(ctx, i);
if (error != 0)
err(EX_OSERR, "could not create CPU %d", i);
CPU_SET_ATOMIC(i, &cpumask);
mt_vmm_info[i].mt_ctx = ctx;
mt_vmm_info[i].mt_vcpu = i;
}
error = pthread_create(&mt_vmm_info[0].mt_thr, NULL,
fbsdrun_start_thread, &mt_vmm_info[0]);
assert(error == 0);
}
static int
fbsdrun_deletecpu(struct vmctx *ctx, int vcpu)
{
if (!CPU_ISSET(vcpu, &cpumask)) {
fprintf(stderr, "Attempting to delete unknown cpu %d\n", vcpu);
exit(1);
}
/* wait for vm_loop cleanup */
quit_vm_loop = 1;
vm_destroy_ioreq_client(ctx);
while (quit_vm_loop)
usleep(10000);
CPU_CLR_ATOMIC(vcpu, &cpumask);
return CPU_EMPTY(&cpumask);
}
static int
vmexit_inout(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
int error;
int bytes, port, in;
port = vhm_req->reqs.pio_request.address;
bytes = vhm_req->reqs.pio_request.size;
in = (vhm_req->reqs.pio_request.direction == REQUEST_READ);
error = emulate_inout(ctx, pvcpu, &vhm_req->reqs.pio_request, strictio);
if (error) {
fprintf(stderr, "Unhandled %s%c 0x%04x\n",
in ? "in" : "out",
bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'),
port);
return VMEXIT_ABORT;
} else {
return VMEXIT_CONTINUE;
}
}
static int
vmexit_mmio_emul(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
int err;
stats.vmexit_mmio_emul++;
err = emulate_mem(ctx, &vhm_req->reqs.mmio_request);
if (err) {
if (err == -ESRCH)
fprintf(stderr, "Unhandled memory access to 0x%lx\n",
vhm_req->reqs.mmio_request.address);
fprintf(stderr, "Failed to emulate instruction [");
fprintf(stderr, "mmio address 0x%lx, size %ld",
vhm_req->reqs.mmio_request.address,
vhm_req->reqs.mmio_request.size);
vhm_req->processed = REQ_STATE_FAILED;
return VMEXIT_ABORT;
}
vhm_req->processed = REQ_STATE_SUCCESS;
return VMEXIT_CONTINUE;
}
static int
vmexit_pci_emul(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
int err, in = (vhm_req->reqs.pci_request.direction == REQUEST_READ);
err = emulate_pci_cfgrw(ctx, *pvcpu, in,
vhm_req->reqs.pci_request.bus,
vhm_req->reqs.pci_request.dev,
vhm_req->reqs.pci_request.func,
vhm_req->reqs.pci_request.reg,
vhm_req->reqs.pci_request.size,
&vhm_req->reqs.pci_request.value);
if (err) {
fprintf(stderr, "Unhandled pci cfg rw at %x:%x.%x reg 0x%x\n",
vhm_req->reqs.pci_request.bus,
vhm_req->reqs.pci_request.dev,
vhm_req->reqs.pci_request.func,
vhm_req->reqs.pci_request.reg);
return VMEXIT_ABORT;
}
vhm_req->processed = REQ_STATE_SUCCESS;
return VMEXIT_CONTINUE;
}
#define DEBUG_EPT_MISCONFIG
#ifdef DEBUG_EPT_MISCONFIG
#define EXIT_REASON_EPT_MISCONFIG 49
#define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400
#define VMCS_IDENT(x) ((x) | 0x80000000)
#endif /* #ifdef DEBUG_EPT_MISCONFIG */
static int
vmexit_bogus(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
stats.vmexit_bogus++;
return VMEXIT_CONTINUE;
}
static int
vmexit_reqidle(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
stats.vmexit_reqidle++;
return VMEXIT_CONTINUE;
}
static int
vmexit_hlt(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
stats.vmexit_hlt++;
/*
* Just continue execution with the next instruction. We use
* the HLT VM exit as a way to be friendly with the host
* scheduler.
*/
return VMEXIT_CONTINUE;
}
static int
vmexit_pause(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
stats.vmexit_pause++;
return VMEXIT_CONTINUE;
}
static int
vmexit_mtrap(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
{
stats.vmexit_mtrap++;
return VMEXIT_CONTINUE;
}
static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
[VM_EXITCODE_INOUT] = vmexit_inout,
[VM_EXITCODE_MMIO_EMUL] = vmexit_mmio_emul,
[VM_EXITCODE_PCI_CFG] = vmexit_pci_emul,
[VM_EXITCODE_BOGUS] = vmexit_bogus,
[VM_EXITCODE_REQIDLE] = vmexit_reqidle,
[VM_EXITCODE_MTRAP] = vmexit_mtrap,
[VM_EXITCODE_HLT] = vmexit_hlt,
[VM_EXITCODE_PAUSE] = vmexit_pause,
};
static void
handle_vmexit(struct vmctx *ctx, struct vhm_request *vhm_req, int vcpu)
{
int rc;
enum vm_exitcode exitcode;
exitcode = vhm_req->type;
if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
fprintf(stderr, "handle vmexit: unexpected exitcode 0x%x\n",
exitcode);
exit(1);
}
rc = (*handler[exitcode])(ctx, vhm_req, &vcpu);
switch (rc) {
case VMEXIT_CONTINUE:
vhm_req->processed = REQ_STATE_SUCCESS;
break;
case VMEXIT_ABORT:
vhm_req->processed = REQ_STATE_FAILED;
abort();
default:
exit(1);
}
vm_notify_request_done(ctx, vcpu);
}
static void
vm_loop(struct vmctx *ctx)
{
int error;
ctx->ioreq_client = vm_create_ioreq_client(ctx);
assert(ctx->ioreq_client > 0);
error = vm_run(ctx);
assert(error == 0);
while (1) {
int vcpu;
struct vhm_request *vhm_req;
error = vm_attach_ioreq_client(ctx);
if (error)
break;
for (vcpu = 0; vcpu < 4; vcpu++) {
vhm_req = &vhm_req_buf[vcpu];
if (vhm_req->valid
&& (vhm_req->processed == REQ_STATE_PROCESSING)
&& (vhm_req->client == ctx->ioreq_client))
handle_vmexit(ctx, vhm_req, vcpu);
}
}
quit_vm_loop = 0;
printf("VM loop exit\n");
}
static int
num_vcpus_allowed(struct vmctx *ctx)
{
/* TODO: add ioctl to get gerneric information including
* virtual cpus, now hardcode
*/
return VM_MAXCPU;
}
static struct vmctx *
do_open(const char *vmname)
{
struct vmctx *ctx;
int error;
error = vm_create(vmname);
if (error) {
perror("vm_create");
exit(1);
}
ctx = vm_open(vmname);
if (ctx == NULL) {
perror("vm_open");
exit(1);
}
return ctx;
}
static void
do_close_pre(struct vmctx *ctx)
{
vm_destroy(ctx);
vm_close(ctx);
}
static void
do_close_post(struct vmctx *ctx)
{
pci_irq_deinit(ctx);
deinit_pci(ctx);
vm_destroy(ctx);
vm_close(ctx);
}
static void
sig_handler_term(int signo)
{
printf("Receive SIGINT to terminate application...\n");
vm_set_suspend_mode(VM_SUSPEND_POWEROFF);
mevent_notify();
}
int
main(int argc, char *argv[])
{
int c, error, gdb_port, err, bvmcons;
int max_vcpus, mptgen, memflags;
int rtc_localtime;
struct vmctx *ctx;
size_t memsize;
char *optstr;
bvmcons = 0;
progname = basename(argv[0]);
gdb_port = 0;
guest_ncpus = 1;
memsize = 256 * MB;
mptgen = 1;
rtc_localtime = 1;
memflags = 0;
quit_vm_loop = 0;
if (signal(SIGINT, sig_handler_term) == SIG_ERR)
fprintf(stderr, "cannot register handler for SIGINT\n");
optstr = "abehuwxACHIMPSWYvk:r:B:p:g:c:s:m:l:U:G:";
while ((c = getopt(argc, argv, optstr)) != -1) {
switch (c) {
case 'a':
x2apic_mode = 0;
break;
case 'A':
acpi = 1;
break;
case 'b':
bvmcons = 1;
break;
case 'p':
if (pincpu_parse(optarg) != 0) {
errx(EX_USAGE,
"invalid vcpu pinning configuration '%s'",
optarg);
}
break;
case 'c':
guest_ncpus = atoi(optarg);
break;
case 'C':
memflags |= VM_MEM_F_INCORE;
break;
case 'g':
gdb_port = atoi(optarg);
break;
case 'l':
if (lpc_device_parse(optarg) != 0) {
errx(EX_USAGE,
"invalid lpc device configuration '%s'",
optarg);
}
break;
case 's':
if (pci_parse_slot(optarg) != 0)
exit(1);
else
break;
case 'S':
memflags |= VM_MEM_F_WIRED;
break;
case 'm':
error = vm_parse_memsize(optarg, &memsize);
if (error)
errx(EX_USAGE, "invalid memsize '%s'", optarg);
break;
case 'H':
guest_vmexit_on_hlt = 1;
break;
case 'I':
/*
* The "-I" option was used to add an ioapic to the
* virtual machine.
*
* An ioapic is now provided unconditionally for each
* virtual machine and this option is now deprecated.
*/
break;
case 'P':
guest_vmexit_on_pause = 1;
break;
case 'e':
strictio = 1;
break;
case 'u':
rtc_localtime = 0;
break;
case 'U':
guest_uuid_str = optarg;
break;
case 'w':
strictmsr = 0;
break;
case 'W':
virtio_msix = 0;
break;
case 'x':
x2apic_mode = 1;
break;
case 'Y':
mptgen = 0;
break;
case 'k':
if (acrn_parse_kernel(optarg) != 0)
exit(1);
else
break;
case 'r':
if (acrn_parse_ramdisk(optarg) != 0)
exit(1);
else
break;
case 'B':
if (acrn_parse_bootargs(optarg) != 0)
exit(1);
else
break;
break;
case 'M':
ptdev_prefer_msi(false);
break;
case 'v':
print_version();
break;
case 'h':
usage(0);
default:
usage(1);
}
}
argc -= optind;
argv += optind;
if (argc != 1)
usage(1);
vmname = argv[0];
for (;;) {
ctx = do_open(vmname);
/* set IOReq buffer page */
error = vm_set_shared_io_page(ctx, (unsigned long)vhm_req_buf);
if (error)
do_close_pre(ctx);
assert(error == 0);
if (guest_ncpus < 1) {
fprintf(stderr, "Invalid guest vCPUs (%d)\n",
guest_ncpus);
do_close_pre(ctx);
exit(1);
}
max_vcpus = num_vcpus_allowed(ctx);
if (guest_ncpus > max_vcpus) {
fprintf(stderr, "%d vCPUs requested but %d available\n",
guest_ncpus, max_vcpus);
do_close_pre(ctx);
exit(1);
}
vm_set_memflags(ctx, memflags);
err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL);
if (err) {
fprintf(stderr, "Unable to setup memory (%d)\n", errno);
do_close_pre(ctx);
exit(1);
}
init_mem();
init_inout();
pci_irq_init(ctx);
atkbdc_init(ctx);
ioapic_init(ctx);
vrtc_init(ctx, rtc_localtime);
sci_init(ctx);
/*
* Exit if a device emulation finds an error in its
* initialization
*/
if (init_pci(ctx) != 0) {
do_close_pre(ctx);
exit(1);
}
if (gdb_port != 0)
fprintf(stderr, "dbgport not supported\n");
if (bvmcons)
init_bvmcons();
/*
* build the guest tables, MP etc.
*/
if (mptgen) {
error = mptable_build(ctx, guest_ncpus);
if (error) {
do_close_post(ctx);
exit(1);
}
}
error = smbios_build(ctx);
if (error)
do_close_post(ctx);
assert(error == 0);
if (acpi) {
error = acpi_build(ctx, guest_ncpus);
if (error)
do_close_post(ctx);
assert(error == 0);
}
error = acrn_sw_load(ctx);
if (error)
do_close_post(ctx);
assert(error == 0);
/*
* Change the proc title to include the VM name.
*/
/*setproctitle("%s", vmname);*/
/*
* Add CPU 0
*/
fbsdrun_addcpu(ctx, guest_ncpus);
/* Make a copy for ctx */
_ctx = ctx;
/*
* Head off to the main event dispatch loop
*/
mevent_dispatch();
vm_pause(ctx);
fbsdrun_deletecpu(ctx, BSP);
vm_unsetup_memory(ctx);
do_close_post(ctx);
_ctx = 0;
if (vm_get_suspend_mode() != VM_SUSPEND_RESET)
break;
vm_set_suspend_mode(VM_SUSPEND_NONE);
}
exit(0);
}

271
devicemodel/core/mem.c Normal file
View File

@@ -0,0 +1,271 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Memory ranges are represented with an RB tree. On insertion, the range
* is checked for overlaps. On lookup, the key has the same base and limit
* so it can be searched within the range.
*/
#include <sys/cdefs.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <assert.h>
#include <pthread.h>
#include "vmm.h"
#include "types.h"
#include "mem.h"
#include "tree.h"
struct mmio_rb_range {
RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */
struct mem_range mr_param;
uint64_t mr_base;
uint64_t mr_end;
};
struct mmio_rb_tree;
RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
/*
* Per-VM cache. Since most accesses from a vCPU will be to
* consecutive addresses in a range, it makes sense to cache the
* result of a lookup.
*/
static struct mmio_rb_range *mmio_hint;
static pthread_rwlock_t mmio_rwlock;
static int
mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b)
{
if (a->mr_end < b->mr_base)
return -1;
else if (a->mr_base > b->mr_end)
return 1;
return 0;
}
static int
mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr,
struct mmio_rb_range **entry)
{
struct mmio_rb_range find, *res;
find.mr_base = find.mr_end = addr;
res = RB_FIND(mmio_rb_tree, rbt, &find);
if (res != NULL) {
*entry = res;
return 0;
}
return -1;
}
__attribute__((unused))
static int
mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new)
{
struct mmio_rb_range *overlap;
overlap = RB_INSERT(mmio_rb_tree, rbt, new);
if (overlap != NULL) {
#ifdef RB_DEBUG
printf("overlap detected: new %lx:%lx, tree %lx:%lx\n",
new->mr_base, new->mr_end,
overlap->mr_base, overlap->mr_end);
#endif
return -1;
}
return 0;
}
#if RB_DEBUG
static void
mmio_rb_dump(struct mmio_rb_tree *rbt)
{
struct mmio_rb_range *np;
pthread_rwlock_rdlock(&mmio_rwlock);
RB_FOREACH(np, mmio_rb_tree, rbt) {
printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end,
np->mr_param.name);
}
pthread_rwlock_unlock(&mmio_rwlock);
}
#endif
RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
__attribute__((unused))
static int
mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
{
int error;
struct mem_range *mr = arg;
error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size,
rval, mr->arg1, mr->arg2);
return error;
}
__attribute__((unused))
static int
mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
{
int error;
struct mem_range *mr = arg;
error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size,
&wval, mr->arg1, mr->arg2);
return error;
}
int
emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req)
{
uint64_t paddr = mmio_req->address;
int size = mmio_req->size;
struct mmio_rb_range *entry = NULL;
int err;
pthread_rwlock_rdlock(&mmio_rwlock);
/*
* First check the per-VM cache
*/
if (mmio_hint && paddr >= mmio_hint->mr_base &&
paddr <= mmio_hint->mr_end)
entry = mmio_hint;
if (entry == NULL) {
if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0)
/* Update the per-VMU cache */
mmio_hint = entry;
else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
pthread_rwlock_unlock(&mmio_rwlock);
return -ESRCH;
}
}
assert(entry != NULL);
if (mmio_req->direction == REQUEST_READ)
err = mem_read(ctx, 0, paddr, (uint64_t *)&mmio_req->value,
size, &entry->mr_param);
else
err = mem_write(ctx, 0, paddr, mmio_req->value,
size, &entry->mr_param);
pthread_rwlock_unlock(&mmio_rwlock);
return err;
}
static int
register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
{
struct mmio_rb_range *entry, *mrp;
int err;
err = 0;
mrp = malloc(sizeof(struct mmio_rb_range));
if (mrp != NULL) {
mrp->mr_param = *memp;
mrp->mr_base = memp->base;
mrp->mr_end = memp->base + memp->size - 1;
pthread_rwlock_wrlock(&mmio_rwlock);
if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
err = mmio_rb_add(rbt, mrp);
pthread_rwlock_unlock(&mmio_rwlock);
if (err)
free(mrp);
} else
err = -1;
return err;
}
int
register_mem(struct mem_range *memp)
{
return register_mem_int(&mmio_rb_root, memp);
}
int
register_mem_fallback(struct mem_range *memp)
{
return register_mem_int(&mmio_rb_fallback, memp);
}
int
unregister_mem(struct mem_range *memp)
{
struct mem_range *mr;
struct mmio_rb_range *entry = NULL;
int err;
pthread_rwlock_wrlock(&mmio_rwlock);
err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry);
if (err == 0) {
mr = &entry->mr_param;
assert(mr->name == memp->name);
assert(mr->base == memp->base && mr->size == memp->size);
assert((mr->flags & MEM_F_IMMUTABLE) == 0);
RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry);
/* flush Per-VM cache */
if (mmio_hint == entry)
mmio_hint = NULL;
}
pthread_rwlock_unlock(&mmio_rwlock);
if (entry)
free(entry);
return err;
}
void
init_mem(void)
{
RB_INIT(&mmio_rb_root);
RB_INIT(&mmio_rb_fallback);
pthread_rwlock_init(&mmio_rwlock, NULL);
}

412
devicemodel/core/mevent.c Normal file
View File

@@ -0,0 +1,412 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Micro event library for FreeBSD, designed for a single i/o thread
* using EPOLL, and having events be persistent by default.
*/
#include <sys/cdefs.h>
#include <assert.h>
#include <err.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <sysexits.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/epoll.h>
#include <sys/queue.h>
#include <pthread.h>
#include "mevent.h"
#include "vmm.h"
#include "vmmapi.h"
#define MEVENT_MAX 64
#define MEV_ADD 1
#define MEV_ENABLE 2
#define MEV_DISABLE 3
#define MEV_DEL_PENDING 4
static pthread_t mevent_tid;
static int mevent_pipefd[2];
static pthread_mutex_t mevent_lmutex = PTHREAD_MUTEX_INITIALIZER;
struct mevent {
void (*me_func)(int, enum ev_type, void *);
int me_fd;
enum ev_type me_type;
void *me_param;
int me_cq;
int me_state;
int me_closefd;
LIST_ENTRY(mevent) me_list;
};
struct ctl_event {
int op;
int fd;
struct epoll_event ee;
};
static LIST_HEAD(listhead, mevent) global_head, change_head;
static void
mevent_qlock(void)
{
pthread_mutex_lock(&mevent_lmutex);
}
static void
mevent_qunlock(void)
{
pthread_mutex_unlock(&mevent_lmutex);
}
static void
mevent_pipe_read(int fd, enum ev_type type, void *param)
{
char buf[MEVENT_MAX];
int status;
/*
* Drain the pipe read side. The fd is non-blocking so this is
* safe to do.
*/
do {
status = read(fd, buf, sizeof(buf));
} while (status == MEVENT_MAX);
}
void
mevent_notify(void)
{
char c;
/*
* If calling from outside the i/o thread, write a byte on the
* pipe to force the i/o thread to exit the blocking epoll call.
*/
if (mevent_pipefd[1] != 0 && pthread_self() != mevent_tid)
write(mevent_pipefd[1], &c, 1);
}
static int
mevent_kq_filter(struct mevent *mevp)
{
int retval;
retval = 0;
if (mevp->me_type == EVF_READ)
retval = EPOLLIN;
if (mevp->me_type == EVF_WRITE)
retval = EPOLLOUT;
return retval;
}
static int
mevent_kq_flags(struct mevent *mevp)
{
int ret;
switch (mevp->me_state) {
case MEV_ADD:
ret = EPOLL_CTL_ADD; /* implicitly enabled */
break;
case MEV_DEL_PENDING:
ret = EPOLL_CTL_DEL;
break;
default:
assert(0);
break;
}
return ret;
}
static int
mevent_build(int mfd, struct ctl_event *kev)
{
struct mevent *mevp, *tmpp;
int i;
i = 0;
mevent_qlock();
list_foreach_safe(mevp, &change_head, me_list, tmpp) {
if (mevp->me_closefd) {
/*
* A close of the file descriptor will remove the
* event
*/
close(mevp->me_fd);
} else {
kev[i].fd = mevp->me_fd;
kev[i].ee.events = mevent_kq_filter(mevp);
kev[i].op = mevent_kq_flags(mevp);
kev[i].ee.data.ptr = mevp;
i++;
}
mevp->me_cq = 0;
LIST_REMOVE(mevp, me_list);
if (mevp->me_state == MEV_DEL_PENDING)
free(mevp);
else
LIST_INSERT_HEAD(&global_head, mevp, me_list);
assert(i < MEVENT_MAX);
}
mevent_qunlock();
return i;
}
static void
mevent_destroy()
{
struct mevent *mevp, *tmpp;
mevent_qlock();
list_foreach_safe(mevp, &global_head, me_list, tmpp) {
if ((mevp->me_type == EVF_READ ||
mevp->me_type == EVF_WRITE)
&& mevp->me_fd != STDIN_FILENO)
close(mevp->me_fd);
LIST_REMOVE(mevp, me_list);
free(mevp);
}
mevent_qunlock();
}
static void
mevent_handle(struct epoll_event *kev, int numev)
{
int i;
struct mevent *mevp;
for (i = 0; i < numev; i++) {
mevp = kev[i].data.ptr;
/* XXX check for EV_ERROR ? */
(*mevp->me_func)(mevp->me_fd, mevp->me_type, mevp->me_param);
}
}
struct mevent *
mevent_add(int tfd, enum ev_type type,
void (*func)(int, enum ev_type, void *), void *param)
{
struct mevent *lp, *mevp;
if (tfd < 0 || func == NULL)
return NULL;
if (type == EVF_TIMER)
return NULL;
mevp = NULL;
mevent_qlock();
/*
* Verify that the fd/type tuple is not present in any list
*/
LIST_FOREACH(lp, &global_head, me_list) {
if (lp->me_fd == tfd && lp->me_type == type)
goto exit;
}
LIST_FOREACH(lp, &change_head, me_list) {
if (lp->me_fd == tfd && lp->me_type == type)
goto exit;
}
/*
* Allocate an entry, populate it, and add it to the change list.
*/
mevp = calloc(1, sizeof(struct mevent));
if (mevp == NULL)
goto exit;
mevp->me_fd = tfd;
mevp->me_type = type;
mevp->me_func = func;
mevp->me_param = param;
LIST_INSERT_HEAD(&change_head, mevp, me_list);
mevp->me_cq = 1;
mevp->me_state = MEV_ADD;
mevent_notify();
exit:
mevent_qunlock();
return mevp;
}
int
mevent_enable(struct mevent *evp)
{
return 0;
}
int
mevent_disable(struct mevent *evp)
{
return 0;
}
static int
mevent_delete_event(struct mevent *evp, int closefd)
{
mevent_qlock();
/*
* Place the entry onto the changed list if not already there, and
* mark as to be deleted.
*/
if (evp->me_cq == 0) {
evp->me_cq = 1;
LIST_REMOVE(evp, me_list);
LIST_INSERT_HEAD(&change_head, evp, me_list);
mevent_notify();
}
evp->me_state = MEV_DEL_PENDING;
if (closefd)
evp->me_closefd = 1;
mevent_qunlock();
return 0;
}
int
mevent_delete(struct mevent *evp)
{
return mevent_delete_event(evp, 0);
}
int
mevent_delete_close(struct mevent *evp)
{
return mevent_delete_event(evp, 1);
}
static void
mevent_set_name(void)
{
pthread_setname_np(mevent_tid, "mevent");
}
void
mevent_dispatch(void)
{
struct ctl_event clist[MEVENT_MAX];
struct epoll_event eventlist[MEVENT_MAX];
struct mevent *pipev;
int mfd;
int numev;
int ret;
mevent_tid = pthread_self();
mevent_set_name();
mfd = epoll_create1(0);
assert(mfd > 0);
/*
* Open the pipe that will be used for other threads to force
* the blocking kqueue call to exit by writing to it. Set the
* descriptor to non-blocking.
*/
ret = pipe(mevent_pipefd);
if (ret < 0) {
perror("pipe");
exit(0);
}
/*
* Add internal event handler for the pipe write fd
*/
pipev = mevent_add(mevent_pipefd[0], EVF_READ, mevent_pipe_read, NULL);
assert(pipev != NULL);
for (;;) {
/*
* Build changelist if required.
* XXX the changelist can be put into the blocking call
* to eliminate the extra syscall. Currently better for
* debug.
*/
int i;
struct epoll_event *e;
numev = mevent_build(mfd, clist);
for (i = 0; i < numev; i++) {
e = &clist[i].ee;
ret = epoll_ctl(mfd, clist[i].op, clist[i].fd, e);
if (ret == -1)
perror("Error return from epoll_ctl");
}
/*
* Block awaiting events
*/
ret = epoll_wait(mfd, eventlist, MEVENT_MAX, -1);
if (ret == -1 && errno != EINTR)
perror("Error return from epoll_wait");
/*
* Handle reported events
*/
mevent_handle(eventlist, ret);
if (vm_get_suspend_mode() != VM_SUSPEND_NONE)
break;
}
mevent_build(mfd, clist);
mevent_destroy();
close(mfd);
}

367
devicemodel/core/mptbl.c Normal file
View File

@@ -0,0 +1,367 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <stdio.h>
#include <string.h>
#include "types.h"
#include "mptable.h"
#include "acpi.h"
#include "dm.h"
#include "mptbl.h"
#include "pci_core.h"
#define MPTABLE_BASE 0xF0000
/* floating pointer length + maximum length of configuration table */
#define MPTABLE_MAX_LENGTH (65536 + 16)
#define LAPIC_PADDR 0xFEE00000
#define LAPIC_VERSION 16
#define IOAPIC_PADDR 0xFEC00000
#define IOAPIC_VERSION 0x11
#define MP_SPECREV 4
#define MPFP_SIG "_MP_"
/* Configuration header defines */
#define MPCH_SIG "PCMP"
#define MPCH_OEMID "BHyVe "
#define MPCH_OEMID_LEN 8
#define MPCH_PRODID "Hypervisor "
#define MPCH_PRODID_LEN 12
/* Processor entry defines */
#define MPEP_SIG_FAMILY 6 /* XXX acrn-dm should supply this */
#define MPEP_SIG_MODEL 26
#define MPEP_SIG_STEPPING 5
#define MPEP_SIG \
((MPEP_SIG_FAMILY << 8) | \
(MPEP_SIG_MODEL << 4) | \
(MPEP_SIG_STEPPING))
#define MPEP_FEATURES (0xBFEBFBFF) /* XXX Intel i7 */
/* Number of local intr entries */
#define MPEII_NUM_LOCAL_IRQ 2
/* Bus entry defines */
#define MPE_NUM_BUSES 2
#define MPE_BUSNAME_LEN 6
#define MPE_BUSNAME_ISA "ISA "
#define MPE_BUSNAME_PCI "PCI "
static void *oem_tbl_start;
static int oem_tbl_size;
static uint8_t
mpt_compute_checksum(void *base, size_t len)
{
uint8_t *bytes;
uint8_t sum;
for (bytes = base, sum = 0; len > 0; len--)
sum += *bytes++;
return (256 - sum);
}
static void
mpt_build_mpfp(mpfps_t mpfp, vm_paddr_t gpa)
{
memset(mpfp, 0, sizeof(*mpfp));
memcpy(mpfp->signature, MPFP_SIG, 4);
mpfp->pap = gpa + sizeof(*mpfp);
mpfp->length = 1;
mpfp->spec_rev = MP_SPECREV;
mpfp->checksum = mpt_compute_checksum(mpfp, sizeof(*mpfp));
}
static void
mpt_build_mpch(mpcth_t mpch)
{
memset(mpch, 0, sizeof(*mpch));
memcpy(mpch->signature, MPCH_SIG, 4);
mpch->spec_rev = MP_SPECREV;
memcpy(mpch->oem_id, MPCH_OEMID, MPCH_OEMID_LEN);
memcpy(mpch->product_id, MPCH_PRODID, MPCH_PRODID_LEN);
mpch->apic_address = LAPIC_PADDR;
}
static void
mpt_build_proc_entries(proc_entry_ptr mpep, int ncpu)
{
int i;
for (i = 0; i < ncpu; i++) {
memset(mpep, 0, sizeof(*mpep));
mpep->type = MPCT_ENTRY_PROCESSOR;
mpep->apic_id = i; /* XXX */
mpep->apic_version = LAPIC_VERSION;
mpep->cpu_flags = PROCENTRY_FLAG_EN;
if (i == 0)
mpep->cpu_flags |= PROCENTRY_FLAG_BP;
mpep->cpu_signature = MPEP_SIG;
mpep->feature_flags = MPEP_FEATURES;
mpep++;
}
}
static void
mpt_build_localint_entries(int_entry_ptr mpie)
{
/* Hardcode LINT0 as ExtINT on all CPUs. */
memset(mpie, 0, sizeof(*mpie));
mpie->type = MPCT_ENTRY_LOCAL_INT;
mpie->int_type = INTENTRY_TYPE_EXTINT;
mpie->int_flags = INTENTRY_FLAGS_POLARITY_CONFORM |
INTENTRY_FLAGS_TRIGGER_CONFORM;
mpie->dst_apic_id = 0xff;
mpie->dst_apic_int = 0;
mpie++;
/* Hardcode LINT1 as NMI on all CPUs. */
memset(mpie, 0, sizeof(*mpie));
mpie->type = MPCT_ENTRY_LOCAL_INT;
mpie->int_type = INTENTRY_TYPE_NMI;
mpie->int_flags = INTENTRY_FLAGS_POLARITY_CONFORM |
INTENTRY_FLAGS_TRIGGER_CONFORM;
mpie->dst_apic_id = 0xff;
mpie->dst_apic_int = 1;
}
static void
mpt_build_bus_entries(bus_entry_ptr mpeb)
{
memset(mpeb, 0, sizeof(*mpeb));
mpeb->type = MPCT_ENTRY_BUS;
mpeb->bus_id = 0;
memcpy(mpeb->bus_type, MPE_BUSNAME_PCI, MPE_BUSNAME_LEN);
mpeb++;
memset(mpeb, 0, sizeof(*mpeb));
mpeb->type = MPCT_ENTRY_BUS;
mpeb->bus_id = 1;
memcpy(mpeb->bus_type, MPE_BUSNAME_ISA, MPE_BUSNAME_LEN);
}
static void
mpt_build_ioapic_entries(io_apic_entry_ptr mpei, int id)
{
memset(mpei, 0, sizeof(*mpei));
mpei->type = MPCT_ENTRY_IOAPIC;
mpei->apic_id = id;
mpei->apic_version = IOAPIC_VERSION;
mpei->apic_flags = IOAPICENTRY_FLAG_EN;
mpei->apic_address = IOAPIC_PADDR;
}
static int
mpt_count_ioint_entries(void)
{
int bus, count;
count = 0;
for (bus = 0; bus <= PCI_BUSMAX; bus++)
count += pci_count_lintr(bus);
/*
* Always include entries for the first 16 pins along with a entry
* for each active PCI INTx pin.
*/
return (16 + count);
}
static void
mpt_generate_pci_int(int bus, int slot, int pin, int pirq_pin, int ioapic_irq,
void *arg)
{
int_entry_ptr *mpiep, mpie;
mpiep = arg;
mpie = *mpiep;
memset(mpie, 0, sizeof(*mpie));
/*
* This is always after another I/O interrupt entry, so cheat
* and fetch the I/O APIC ID from the prior entry.
*/
mpie->type = MPCT_ENTRY_INT;
mpie->int_type = INTENTRY_TYPE_INT;
mpie->src_bus_id = bus;
mpie->src_bus_irq = slot << 2 | (pin - 1);
mpie->dst_apic_id = mpie[-1].dst_apic_id;
mpie->dst_apic_int = ioapic_irq;
*mpiep = mpie + 1;
}
static void
mpt_build_ioint_entries(int_entry_ptr mpie, int id)
{
int pin, bus;
/*
* The following config is taken from kernel mptable.c
* mptable_parse_default_config_ints(...), for now
* just use the default config, tweek later if needed.
*/
/* First, generate the first 16 pins. */
for (pin = 0; pin < 16; pin++) {
memset(mpie, 0, sizeof(*mpie));
mpie->type = MPCT_ENTRY_INT;
mpie->src_bus_id = 1;
mpie->dst_apic_id = id;
/*
* All default configs route IRQs from bus 0 to the first 16
* pins of the first I/O APIC with an APIC ID of 2.
*/
mpie->dst_apic_int = pin;
switch (pin) {
case 0:
/* Pin 0 is an ExtINT pin. */
mpie->int_type = INTENTRY_TYPE_EXTINT;
break;
case 2:
/* IRQ 0 is routed to pin 2. */
mpie->int_type = INTENTRY_TYPE_INT;
mpie->src_bus_irq = 0;
break;
case SCI_INT:
/* ACPI SCI is level triggered and active-lo. */
mpie->int_flags = INTENTRY_FLAGS_POLARITY_ACTIVELO |
INTENTRY_FLAGS_TRIGGER_LEVEL;
mpie->int_type = INTENTRY_TYPE_INT;
mpie->src_bus_irq = SCI_INT;
break;
default:
/* All other pins are identity mapped. */
mpie->int_type = INTENTRY_TYPE_INT;
mpie->src_bus_irq = pin;
break;
}
mpie++;
}
/* Next, generate entries for any PCI INTx interrupts. */
for (bus = 0; bus <= PCI_BUSMAX; bus++)
pci_walk_lintr(bus, mpt_generate_pci_int, &mpie);
}
void
mptable_add_oemtbl(void *tbl, int tblsz)
{
oem_tbl_start = tbl;
oem_tbl_size = tblsz;
}
int
mptable_build(struct vmctx *ctx, int ncpu)
{
mpcth_t mpch;
bus_entry_ptr mpeb;
io_apic_entry_ptr mpei;
proc_entry_ptr mpep;
mpfps_t mpfp;
int_entry_ptr mpie;
int ioints, bus;
char *curraddr;
char *startaddr;
startaddr = paddr_guest2host(ctx, MPTABLE_BASE, MPTABLE_MAX_LENGTH);
if (startaddr == NULL) {
fprintf(stderr, "mptable requires mapped mem\n");
return -1;
}
/*
* There is no way to advertise multiple PCI hierarchies via MPtable
* so require that there is no PCI hierarchy with a non-zero bus
* number.
*/
for (bus = 1; bus <= PCI_BUSMAX; bus++) {
if (pci_bus_configured(bus)) {
fprintf(stderr, "MPtable is incompatible with "
"multiple PCI hierarchies.\r\n");
fprintf(stderr, "MPtable generation can be disabled "
"by passing the -Y option to acrn-dm.\r\n");
return -1;
}
}
curraddr = startaddr;
mpfp = (mpfps_t)curraddr;
mpt_build_mpfp(mpfp, MPTABLE_BASE);
curraddr += sizeof(*mpfp);
mpch = (mpcth_t)curraddr;
mpt_build_mpch(mpch);
curraddr += sizeof(*mpch);
mpep = (proc_entry_ptr)curraddr;
mpt_build_proc_entries(mpep, ncpu);
curraddr += sizeof(*mpep) * ncpu;
mpch->entry_count += ncpu;
mpeb = (bus_entry_ptr) curraddr;
mpt_build_bus_entries(mpeb);
curraddr += sizeof(*mpeb) * MPE_NUM_BUSES;
mpch->entry_count += MPE_NUM_BUSES;
mpei = (io_apic_entry_ptr)curraddr;
mpt_build_ioapic_entries(mpei, 0);
curraddr += sizeof(*mpei);
mpch->entry_count++;
mpie = (int_entry_ptr) curraddr;
ioints = mpt_count_ioint_entries();
mpt_build_ioint_entries(mpie, 0);
curraddr += sizeof(*mpie) * ioints;
mpch->entry_count += ioints;
mpie = (int_entry_ptr)curraddr;
mpt_build_localint_entries(mpie);
curraddr += sizeof(*mpie) * MPEII_NUM_LOCAL_IRQ;
mpch->entry_count += MPEII_NUM_LOCAL_IRQ;
if (oem_tbl_start) {
mpch->oem_table_pointer = curraddr - startaddr + MPTABLE_BASE;
mpch->oem_table_size = oem_tbl_size;
memcpy(curraddr, oem_tbl_start, oem_tbl_size);
}
mpch->base_table_length = curraddr - (char *)mpch;
mpch->checksum = mpt_compute_checksum(mpch, mpch->base_table_length);
return 0;
}

51
devicemodel/core/post.c Normal file
View File

@@ -0,0 +1,51 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <stdbool.h>
#include <assert.h>
#include "inout.h"
#include "lpc.h"
static int
post_data_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
assert(in == 1);
if (bytes != 1)
return -1;
*eax = 0xff; /* return some garbage */
return 0;
}
INOUT_PORT(post, 0x84, IOPORT_F_IN, post_data_handler);
SYSRES_IO(0x84, 1);

View File

@@ -0,0 +1,821 @@
/*-
* Copyright (c) 2014 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <assert.h>
#include <openssl/md5.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include <uuid/uuid.h>
#include "vmm.h"
#include "vmmapi.h"
#include "dm.h"
#include "smbiostbl.h"
#define MB (1024*1024)
#define GB (1024ULL*1024*1024)
#define SMBIOS_BASE 0xF1000
/* ACRN-DM_ACPI_BASE - SMBIOS_BASE) */
#define SMBIOS_MAX_LENGTH (0xF2400 - 0xF1000)
#define SMBIOS_TYPE_BIOS 0
#define SMBIOS_TYPE_SYSTEM 1
#define SMBIOS_TYPE_CHASSIS 3
#define SMBIOS_TYPE_PROCESSOR 4
#define SMBIOS_TYPE_MEMARRAY 16
#define SMBIOS_TYPE_MEMDEVICE 17
#define SMBIOS_TYPE_MEMARRAYMAP 19
#define SMBIOS_TYPE_BOOT 32
#define SMBIOS_TYPE_EOT 127
struct smbios_structure {
uint8_t type;
uint8_t length;
uint16_t handle;
} __attribute__((packed));
typedef int (*initializer_func_t)(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size);
struct smbios_template_entry {
struct smbios_structure *entry;
const char **strings;
initializer_func_t initializer;
};
/*
* SMBIOS Structure Table Entry Point
*/
#define SMBIOS_ENTRY_EANCHOR "_SM_"
#define SMBIOS_ENTRY_EANCHORLEN 4
#define SMBIOS_ENTRY_IANCHOR "_DMI_"
#define SMBIOS_ENTRY_IANCHORLEN 5
struct smbios_entry_point {
char eanchor[4]; /* anchor tag */
uint8_t echecksum; /* checksum of entry point structure */
uint8_t eplen; /* length in bytes of entry point */
uint8_t major; /* major version of the SMBIOS spec */
uint8_t minor; /* minor version of the SMBIOS spec */
uint16_t maxssize; /* maximum size in bytes of a struct */
uint8_t revision; /* entry point structure revision */
uint8_t format[5]; /* entry point rev-specific data */
char ianchor[5]; /* intermediate anchor tag */
uint8_t ichecksum; /* intermediate checksum */
uint16_t stlen; /* len in bytes of structure table */
uint32_t staddr; /* physical addr of structure table */
uint16_t stnum; /* number of structure table entries */
uint8_t bcdrev; /* BCD value representing DMI ver */
} __attribute__((packed));
/*
* BIOS Information
*/
#define SMBIOS_FL_ISA 0x00000010 /* ISA is supported */
#define SMBIOS_FL_PCI 0x00000080 /* PCI is supported */
#define SMBIOS_FL_SHADOW 0x00001000 /* BIOS shadowing is allowed */
#define SMBIOS_FL_CDBOOT 0x00008000 /* Boot from CD is supported */
#define SMBIOS_FL_SELBOOT 0x00010000 /* Selectable Boot supported */
#define SMBIOS_FL_EDD 0x00080000 /* EDD Spec is supported */
#define SMBIOS_XB1_FL_ACPI 0x00000001 /* ACPI is supported */
#define SMBIOS_XB2_FL_BBS 0x00000001 /* BIOS Boot Specification */
#define SMBIOS_XB2_FL_VM 0x00000010 /* Virtual Machine */
struct smbios_table_type0 {
struct smbios_structure header;
uint8_t vendor; /* vendor string */
uint8_t version; /* version string */
uint16_t segment; /* address segment location */
uint8_t rel_date; /* release date */
uint8_t size; /* rom size */
uint64_t cflags; /* characteristics */
uint8_t xc_bytes[2]; /* characteristics ext bytes */
uint8_t sb_major_rel; /* system bios version */
uint8_t sb_minor_rele;
uint8_t ecfw_major_rel; /* embedded ctrl fw version */
uint8_t ecfw_minor_rel;
} __attribute__((packed));
/*
* System Information
*/
#define SMBIOS_WAKEUP_SWITCH 0x06 /* power switch */
struct smbios_table_type1 {
struct smbios_structure header;
uint8_t manufacturer; /* manufacturer string */
uint8_t product; /* product name string */
uint8_t version; /* version string */
uint8_t serial; /* serial number string */
uint8_t uuid[16]; /* uuid byte array */
uint8_t wakeup; /* wake-up event */
uint8_t sku; /* sku number string */
uint8_t family; /* family name string */
} __attribute__((packed));
/*
* System Enclosure or Chassis
*/
#define SMBIOS_CHT_UNKNOWN 0x02 /* unknown */
#define SMBIOS_CHST_SAFE 0x03 /* safe */
#define SMBIOS_CHSC_NONE 0x03 /* none */
struct smbios_table_type3 {
struct smbios_structure header;
uint8_t manufacturer; /* manufacturer string */
uint8_t type; /* type */
uint8_t version; /* version string */
uint8_t serial; /* serial number string */
uint8_t asset; /* asset tag string */
uint8_t bustate; /* boot-up state */
uint8_t psstate; /* power supply state */
uint8_t tstate; /* thermal state */
uint8_t security; /* security status */
uint8_t uheight; /* height in 'u's */
uint8_t cords; /* number of power cords */
uint8_t elems; /* number of element records */
uint8_t elemlen; /* length of records */
uint8_t sku; /* sku number string */
} __attribute__((packed));
/*
* Processor Information
*/
#define SMBIOS_PRT_CENTRAL 0x03 /* central processor */
#define SMBIOS_PRF_OTHER 0x01 /* other */
#define SMBIOS_PRS_PRESENT 0x40 /* socket is populated */
#define SMBIOS_PRS_ENABLED 0x1 /* enabled */
#define SMBIOS_PRU_NONE 0x06 /* none */
#define SMBIOS_PFL_64B 0x04 /* 64-bit capable */
struct smbios_table_type4 {
struct smbios_structure header;
uint8_t socket; /* socket designation string */
uint8_t type; /* processor type */
uint8_t family; /* processor family */
uint8_t manufacturer; /* manufacturer string */
uint64_t cpuid; /* processor cpuid */
uint8_t version; /* version string */
uint8_t voltage; /* voltage */
uint16_t clkspeed; /* ext clock speed in mhz */
uint16_t maxspeed; /* maximum speed in mhz */
uint16_t curspeed; /* current speed in mhz */
uint8_t status; /* status */
uint8_t upgrade; /* upgrade */
uint16_t l1handle; /* l1 cache handle */
uint16_t l2handle; /* l2 cache handle */
uint16_t l3handle; /* l3 cache handle */
uint8_t serial; /* serial number string */
uint8_t asset; /* asset tag string */
uint8_t part; /* part number string */
uint8_t cores; /* cores per socket */
uint8_t ecores; /* enabled cores */
uint8_t threads; /* threads per socket */
uint16_t cflags; /* processor characteristics */
uint16_t family2; /* processor family 2 */
} __attribute__((packed));
/*
* Physical Memory Array
*/
#define SMBIOS_MAL_SYSMB 0x03 /* system board or motherboard */
#define SMBIOS_MAU_SYSTEM 0x03 /* system memory */
#define SMBIOS_MAE_NONE 0x03 /* none */
struct smbios_table_type16 {
struct smbios_structure header;
uint8_t location; /* physical device location */
uint8_t use; /* device functional purpose */
uint8_t ecc; /* err detect/correct method */
uint32_t size; /* max mem capacity in kb */
uint16_t errhand; /* handle of error (if any) */
uint16_t ndevs; /* num of slots or sockets */
uint64_t xsize; /* max mem capacity in bytes */
} __attribute__((packed));
/*
* Memory Device
*/
#define SMBIOS_MDFF_UNKNOWN 0x02 /* unknown */
#define SMBIOS_MDT_UNKNOWN 0x02 /* unknown */
#define SMBIOS_MDF_UNKNOWN 0x0004 /* unknown */
struct smbios_table_type17 {
struct smbios_structure header;
uint16_t arrayhand; /* handle of physl mem array */
uint16_t errhand; /* handle of mem error data */
uint16_t twidth; /* total width in bits */
uint16_t dwidth; /* data width in bits */
uint16_t size; /* size in bytes */
uint8_t form; /* form factor */
uint8_t set; /* set */
uint8_t dloc; /* device locator string */
uint8_t bloc; /* phys bank locator string */
uint8_t type; /* memory type */
uint16_t flags; /* memory characteristics */
uint16_t maxspeed; /* maximum speed in mhz */
uint8_t manufacturer; /* manufacturer string */
uint8_t serial; /* serial number string */
uint8_t asset; /* asset tag string */
uint8_t part; /* part number string */
uint8_t attributes; /* attributes */
uint32_t xsize; /* extended size in mbs */
uint16_t curspeed; /* current speed in mhz */
uint16_t minvoltage; /* minimum voltage */
uint16_t maxvoltage; /* maximum voltage */
uint16_t curvoltage; /* configured voltage */
} __attribute__((packed));
/*
* Memory Array Mapped Address
*/
struct smbios_table_type19 {
struct smbios_structure header;
uint32_t saddr; /* start phys addr in kb */
uint32_t eaddr; /* end phys addr in kb */
uint16_t arrayhand; /* physical mem array handle */
uint8_t width; /* num of dev in row */
uint64_t xsaddr; /* start phys addr in bytes */
uint64_t xeaddr; /* end phys addr in bytes */
} __attribute__((packed));
/*
* System Boot Information
*/
#define SMBIOS_BOOT_NORMAL 0 /* no errors detected */
struct smbios_table_type32 {
struct smbios_structure header;
uint8_t reserved[6];
uint8_t status; /* boot status */
} __attribute__((packed));
/*
* End-of-Table
*/
struct smbios_table_type127 {
struct smbios_structure header;
} __attribute__((packed));
struct smbios_table_type0 smbios_type0_template = {
{ SMBIOS_TYPE_BIOS, sizeof(struct smbios_table_type0), 0 },
1, /* bios vendor string */
2, /* bios version string */
0xF000, /* bios address segment location */
3, /* bios release date */
0x0, /* bios size (64k * (n + 1) is the size in bytes) */
SMBIOS_FL_ISA | SMBIOS_FL_PCI | SMBIOS_FL_SHADOW |
SMBIOS_FL_CDBOOT | SMBIOS_FL_EDD,
{ SMBIOS_XB1_FL_ACPI, SMBIOS_XB2_FL_BBS | SMBIOS_XB2_FL_VM },
0x0, /* bios major release */
0x0, /* bios minor release */
0xff, /* embedded controller firmware major release */
0xff /* embedded controller firmware minor release */
};
const char *smbios_type0_strings[] = {
"ACRN-DM", /* vendor string */
"1.00", /* bios version string */
"03/14/2014", /* bios release date string */
NULL
};
struct smbios_table_type1 smbios_type1_template = {
{ SMBIOS_TYPE_SYSTEM, sizeof(struct smbios_table_type1), 0 },
1, /* manufacturer string */
2, /* product string */
3, /* version string */
4, /* serial number string */
{ 0 },
SMBIOS_WAKEUP_SWITCH,
5, /* sku string */
6 /* family string */
};
static int smbios_type1_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size);
const char *smbios_type1_strings[] = {
" ", /* manufacturer string */
"ACRN-DM", /* product name string */
"1.0", /* version string */
"None", /* serial number string */
"None", /* sku string */
" ", /* family name string */
NULL
};
struct smbios_table_type3 smbios_type3_template = {
{ SMBIOS_TYPE_CHASSIS, sizeof(struct smbios_table_type3), 0 },
1, /* manufacturer string */
SMBIOS_CHT_UNKNOWN,
2, /* version string */
3, /* serial number string */
4, /* asset tag string */
SMBIOS_CHST_SAFE,
SMBIOS_CHST_SAFE,
SMBIOS_CHST_SAFE,
SMBIOS_CHSC_NONE,
0, /* height in 'u's (0=enclosure height unspecified) */
0, /* number of power cords (0=number unspecified) */
0, /* number of contained element records */
0, /* length of records */
5 /* sku number string */
};
const char *smbios_type3_strings[] = {
" ", /* manufacturer string */
"1.0", /* version string */
"None", /* serial number string */
"None", /* asset tag string */
"None", /* sku number string */
NULL
};
struct smbios_table_type4 smbios_type4_template = {
{ SMBIOS_TYPE_PROCESSOR, sizeof(struct smbios_table_type4), 0 },
1, /* socket designation string */
SMBIOS_PRT_CENTRAL,
SMBIOS_PRF_OTHER,
2, /* manufacturer string */
0, /* cpuid */
3, /* version string */
0, /* voltage */
0, /* external clock frequency in mhz (0=unknown) */
0, /* maximum frequency in mhz (0=unknown) */
0, /* current frequency in mhz (0=unknown) */
SMBIOS_PRS_PRESENT | SMBIOS_PRS_ENABLED,
SMBIOS_PRU_NONE,
-1, /* l1 cache handle */
-1, /* l2 cache handle */
-1, /* l3 cache handle */
4, /* serial number string */
5, /* asset tag string */
6, /* part number string */
0, /* cores per socket (0=unknown) */
0, /* enabled cores per socket (0=unknown) */
0, /* threads per socket (0=unknown) */
SMBIOS_PFL_64B,
SMBIOS_PRF_OTHER
};
const char *smbios_type4_strings[] = {
" ", /* socket designation string */
" ", /* manufacturer string */
" ", /* version string */
"None", /* serial number string */
"None", /* asset tag string */
"None", /* part number string */
NULL
};
static int smbios_type4_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size);
struct smbios_table_type16 smbios_type16_template = {
{ SMBIOS_TYPE_MEMARRAY, sizeof(struct smbios_table_type16), 0 },
SMBIOS_MAL_SYSMB,
SMBIOS_MAU_SYSTEM,
SMBIOS_MAE_NONE,
0x80000000, /* max mem capacity in kb (0x80000000=use extended) */
-1, /* handle of error (if any) */
0, /* number of slots or sockets (TBD) */
0 /* extended maximum memory capacity in bytes (TBD) */
};
static int smbios_type16_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size);
struct smbios_table_type17 smbios_type17_template = {
{ SMBIOS_TYPE_MEMDEVICE, sizeof(struct smbios_table_type17), 0 },
-1, /* handle of physical memory array */
-1, /* handle of memory error data */
64, /* total width in bits including ecc */
64, /* data width in bits */
0x7fff, /* size in bytes (0x7fff=use extended)*/
SMBIOS_MDFF_UNKNOWN,
0, /* set (0x00=none, 0xff=unknown) */
1, /* device locator string */
2, /* physical bank locator string */
SMBIOS_MDT_UNKNOWN,
SMBIOS_MDF_UNKNOWN,
0, /* maximum memory speed in mhz (0=unknown) */
3, /* manufacturer string */
4, /* serial number string */
5, /* asset tag string */
6, /* part number string */
0, /* attributes (0=unknown rank information) */
0, /* extended size in mb (TBD) */
0, /* current speed in mhz (0=unknown) */
0, /* minimum voltage in mv (0=unknown) */
0, /* maximum voltage in mv (0=unknown) */
0 /* configured voltage in mv (0=unknown) */
};
const char *smbios_type17_strings[] = {
" ", /* device locator string */
" ", /* physical bank locator string */
" ", /* manufacturer string */
"None", /* serial number string */
"None", /* asset tag string */
"None", /* part number string */
NULL
};
static int smbios_type17_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size);
struct smbios_table_type19 smbios_type19_template = {
{ SMBIOS_TYPE_MEMARRAYMAP, sizeof(struct smbios_table_type19), 0 },
0xffffffff, /* starting phys addr in kb (0xffffffff=use ext) */
0xffffffff, /* ending phys addr in kb (0xffffffff=use ext) */
-1, /* physical memory array handle */
1, /* number of devices that form a row */
0, /* extended starting phys addr in bytes (TDB) */
0 /* extended ending phys addr in bytes (TDB) */
};
static int smbios_type19_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size);
struct smbios_table_type32 smbios_type32_template = {
{ SMBIOS_TYPE_BOOT, sizeof(struct smbios_table_type32), 0 },
{ 0, 0, 0, 0, 0, 0 },
SMBIOS_BOOT_NORMAL
};
struct smbios_table_type127 smbios_type127_template = {
{ SMBIOS_TYPE_EOT, sizeof(struct smbios_table_type127), 0 }
};
static int smbios_generic_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size);
static struct smbios_template_entry smbios_template[] = {
{ (struct smbios_structure *)&smbios_type0_template,
smbios_type0_strings,
smbios_generic_initializer },
{ (struct smbios_structure *)&smbios_type1_template,
smbios_type1_strings,
smbios_type1_initializer },
{ (struct smbios_structure *)&smbios_type3_template,
smbios_type3_strings,
smbios_generic_initializer },
{ (struct smbios_structure *)&smbios_type4_template,
smbios_type4_strings,
smbios_type4_initializer },
{ (struct smbios_structure *)&smbios_type16_template,
NULL,
smbios_type16_initializer },
{ (struct smbios_structure *)&smbios_type17_template,
smbios_type17_strings,
smbios_type17_initializer },
{ (struct smbios_structure *)&smbios_type19_template,
NULL,
smbios_type19_initializer },
{ (struct smbios_structure *)&smbios_type32_template,
NULL,
smbios_generic_initializer },
{ (struct smbios_structure *)&smbios_type127_template,
NULL,
smbios_generic_initializer },
{ NULL, NULL, NULL }
};
static uint64_t guest_lomem, guest_himem;
static uint16_t type16_handle;
static int
smbios_generic_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size)
{
struct smbios_structure *entry;
memcpy(curaddr, template_entry, template_entry->length);
entry = (struct smbios_structure *)curaddr;
entry->handle = *n + 1;
curaddr += entry->length;
if (template_strings != NULL) {
int i;
for (i = 0; template_strings[i] != NULL; i++) {
const char *string;
int len;
string = template_strings[i];
len = strlen(string) + 1;
memcpy(curaddr, string, len);
curaddr += len;
}
*curaddr = '\0';
curaddr++;
} else {
/* Minimum string section is double nul */
*curaddr = '\0';
curaddr++;
*curaddr = '\0';
curaddr++;
}
(*n)++;
*endaddr = curaddr;
return 0;
}
static int
smbios_type1_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size)
{
struct smbios_table_type1 *type1;
smbios_generic_initializer(template_entry, template_strings,
curaddr, endaddr, n, size);
type1 = (struct smbios_table_type1 *)curaddr;
if (guest_uuid_str != NULL) {
uuid_t uuid;
uint32_t status;
status = uuid_parse(guest_uuid_str, uuid);
if (status != 0)
return -1;
/* TODO */
/* uuid_enc_le(&type1->uuid, &uuid); */
} else {
MD5_CTX mdctx;
u_char digest[16];
char hostname[MAXHOSTNAMELEN];
/*
* Universally unique and yet reproducible are an
* oxymoron, however reproducible is desirable in
* this case.
*/
if (gethostname(hostname, sizeof(hostname)))
return -1;
MD5_Init(&mdctx);
MD5_Update(&mdctx, vmname, strlen(vmname));
MD5_Update(&mdctx, hostname, sizeof(hostname));
MD5_Final(digest, &mdctx);
/*
* Set the variant and version number.
*/
digest[6] &= 0x0F;
digest[6] |= 0x30; /* version 3 */
digest[8] &= 0x3F;
digest[8] |= 0x80;
memcpy(&type1->uuid, digest, sizeof(digest));
}
return 0;
}
static int
smbios_type4_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size)
{
int i;
for (i = 0; i < guest_ncpus; i++) {
struct smbios_table_type4 *type4;
char *p;
int nstrings, len;
smbios_generic_initializer(template_entry, template_strings,
curaddr, endaddr, n, size);
type4 = (struct smbios_table_type4 *)curaddr;
p = curaddr + sizeof(struct smbios_table_type4);
nstrings = 0;
while (p < *endaddr - 1) {
if (*p++ == '\0')
nstrings++;
}
len = sprintf(*endaddr - 1, "CPU #%d", i) + 1;
*endaddr += len - 1;
*(*endaddr) = '\0';
(*endaddr)++;
type4->socket = nstrings + 1;
curaddr = *endaddr;
}
return 0;
}
static int
smbios_type16_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size)
{
struct smbios_table_type16 *type16;
type16_handle = *n;
smbios_generic_initializer(template_entry, template_strings,
curaddr, endaddr, n, size);
type16 = (struct smbios_table_type16 *)curaddr;
type16->xsize = guest_lomem + guest_himem;
type16->ndevs = guest_himem > 0 ? 2 : 1;
return 0;
}
static int
smbios_type17_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size)
{
struct smbios_table_type17 *type17;
smbios_generic_initializer(template_entry, template_strings,
curaddr, endaddr, n, size);
type17 = (struct smbios_table_type17 *)curaddr;
type17->arrayhand = type16_handle;
type17->xsize = guest_lomem;
if (guest_himem > 0) {
curaddr = *endaddr;
smbios_generic_initializer(template_entry, template_strings,
curaddr, endaddr, n, size);
type17 = (struct smbios_table_type17 *)curaddr;
type17->arrayhand = type16_handle;
type17->xsize = guest_himem;
}
return 0;
}
static int
smbios_type19_initializer(struct smbios_structure *template_entry,
const char **template_strings, char *curaddr, char **endaddr,
uint16_t *n, uint16_t *size)
{
struct smbios_table_type19 *type19;
smbios_generic_initializer(template_entry, template_strings,
curaddr, endaddr, n, size);
type19 = (struct smbios_table_type19 *)curaddr;
type19->arrayhand = type16_handle;
type19->xsaddr = 0;
type19->xeaddr = guest_lomem;
if (guest_himem > 0) {
curaddr = *endaddr;
smbios_generic_initializer(template_entry, template_strings,
curaddr, endaddr, n, size);
type19 = (struct smbios_table_type19 *)curaddr;
type19->arrayhand = type16_handle;
type19->xsaddr = 4*GB;
type19->xeaddr = guest_himem;
}
return 0;
}
static void
smbios_ep_initializer(struct smbios_entry_point *smbios_ep, uint32_t staddr)
{
memset(smbios_ep, 0, sizeof(*smbios_ep));
memcpy(smbios_ep->eanchor, SMBIOS_ENTRY_EANCHOR,
SMBIOS_ENTRY_EANCHORLEN);
smbios_ep->eplen = 0x1F;
assert(sizeof(struct smbios_entry_point) == smbios_ep->eplen);
smbios_ep->major = 2;
smbios_ep->minor = 6;
smbios_ep->revision = 0;
memcpy(smbios_ep->ianchor, SMBIOS_ENTRY_IANCHOR,
SMBIOS_ENTRY_IANCHORLEN);
smbios_ep->staddr = staddr;
smbios_ep->bcdrev = 0x24;
}
static void
smbios_ep_finalizer(struct smbios_entry_point *smbios_ep, uint16_t len,
uint16_t num, uint16_t maxssize)
{
uint8_t checksum;
int i;
smbios_ep->maxssize = maxssize;
smbios_ep->stlen = len;
smbios_ep->stnum = num;
checksum = 0;
for (i = 0x10; i < 0x1f; i++)
checksum -= ((uint8_t *)smbios_ep)[i];
smbios_ep->ichecksum = checksum;
checksum = 0;
for (i = 0; i < 0x1f; i++)
checksum -= ((uint8_t *)smbios_ep)[i];
smbios_ep->echecksum = checksum;
}
int
smbios_build(struct vmctx *ctx)
{
struct smbios_entry_point *smbios_ep;
uint16_t n;
uint16_t maxssize;
char *curaddr, *startaddr, *ststartaddr;
int i;
int err;
guest_lomem = vm_get_lowmem_size(ctx);
guest_himem = vm_get_highmem_size(ctx);
startaddr = paddr_guest2host(ctx, SMBIOS_BASE, SMBIOS_MAX_LENGTH);
if (startaddr == NULL) {
fprintf(stderr, "smbios table requires mapped mem\n");
return -1;
}
curaddr = startaddr;
smbios_ep = (struct smbios_entry_point *)curaddr;
smbios_ep_initializer(smbios_ep, SMBIOS_BASE +
sizeof(struct smbios_entry_point));
curaddr += sizeof(struct smbios_entry_point);
ststartaddr = curaddr;
n = 0;
maxssize = 0;
for (i = 0; smbios_template[i].entry != NULL; i++) {
struct smbios_structure *entry;
const char **strings;
initializer_func_t initializer;
char *endaddr;
uint16_t size;
entry = smbios_template[i].entry;
strings = smbios_template[i].strings;
initializer = smbios_template[i].initializer;
err = (*initializer)(entry, strings, curaddr, &endaddr,
&n, &size);
if (err != 0)
return err;
if (size > maxssize)
maxssize = size;
curaddr = endaddr;
}
assert(curaddr - startaddr < SMBIOS_MAX_LENGTH);
smbios_ep_finalizer(smbios_ep, curaddr - ststartaddr, n, maxssize);
return 0;
}

434
devicemodel/core/sw_load.c Normal file
View File

@@ -0,0 +1,434 @@
/*-
* Copyright (c) 2017 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <stdbool.h>
#include <stdint.h>
#include "acrn_common.h"
#include "vmmapi.h"
#define STR_LEN 1024
#define SETUP_SIG 0x5a5aaa55
#define KB (1024UL)
#define MB (1024 * 1024UL)
#define GB (1024 * 1024 * 1024UL)
/* E820 memory types */
#define E820_TYPE_RAM 1 /* EFI 1, 2, 3, 4, 5, 6, 7 */
/* EFI 0, 11, 12, 13 (everything not used elsewhere) */
#define E820_TYPE_RESERVED 2
#define E820_TYPE_ACPI_RECLAIM 3 /* EFI 9 */
#define E820_TYPE_ACPI_NVS 4 /* EFI 10 */
#define E820_TYPE_UNUSABLE 5 /* EFI 8 */
#define NUM_E820_ENTRIES 4
#define LOWRAM_E820_ENTRIES 0
#define HIGHRAM_E820_ENTRIES 3
/* see below e820 default mapping for more info about ctx->lowmem */
#define RAMDISK_LOAD_OFF(ctx) (ctx->lowmem - 4*MB)
#define BOOTARGS_LOAD_OFF(ctx) (ctx->lowmem - 8*KB)
#define KERNEL_ENTRY_OFF(ctx) (ctx->lowmem - 6*KB)
#define ZEROPAGE_LOAD_OFF(ctx) (ctx->lowmem - 4*KB)
#define KERNEL_LOAD_OFF(ctx) (16*MB)
/* Defines a single entry in an E820 memory map. */
struct e820_entry {
/** The base address of the memory range. */
uint64_t baseaddr;
/** The length of the memory range. */
uint64_t length;
/** The type of memory region. */
uint32_t type;
} __attribute__((packed));
/* The real mode kernel header, refer to Documentation/x86/boot.txt */
struct _zeropage {
uint8_t pad1[0x1e8]; /* 0x000 */
uint8_t e820_nentries; /* 0x1e8 */
uint8_t pad2[0x8]; /* 0x1e9 */
struct {
uint8_t hdr_pad1[0x1f]; /* 0x1f1 */
uint8_t loader_type; /* 0x210 */
uint8_t load_flags; /* 0x211 */
uint8_t hdr_pad2[0x2]; /* 0x212 */
uint32_t code32_start; /* 0x214 */
uint32_t ramdisk_addr; /* 0x218 */
uint32_t ramdisk_size; /* 0x21c */
uint8_t hdr_pad3[0x8]; /* 0x220 */
uint32_t bootargs_addr; /* 0x228 */
uint8_t hdr_pad4[0x3c]; /* 0x22c */
} __attribute__((packed)) hdr;
uint8_t pad3[0x68]; /* 0x268 */
struct e820_entry e820[0x80]; /* 0x2d0 */
uint8_t pad4[0x330]; /* 0xcd0 */
} __attribute__((packed));
static char bootargs[STR_LEN];
static char ramdisk_path[STR_LEN];
static char kernel_path[STR_LEN];
static int with_bootargs;
static int with_ramdisk;
static int with_kernel;
static int ramdisk_size;
static int kernel_size;
/*
* Default e820 mem map:
*
* there is reserved memory hole for PCI hole and APIC etc
* so the memory layout could be separated into lowmem & highmem.
* - if request memory size <= ctx->lowmem_limit, then there is only
* map[0]:0~ctx->lowmem for RAM
* ctx->lowmem = request_memory_size
* - if request memory size > ctx->lowmem_limit, then there are
* map[0]:0~ctx->lowmem_limit & map[2]:4G~ctx->highmem for RAM
* ctx->highmem = request_memory_size - ctx->lowmem_limit
*
* Begin End Type Length
* 0: 0 - lowmem RAM lowmem
* 1: lowmem - bff_fffff (reserved) 0xc00_00000-lowmem
* 2: 0xc00_00000 - dff_fffff PCI hole 512MB
* 3: 0xe00_00000 - fff_fffff (reserved) 512MB
* 2: 1_000_00000 - highmem RAM highmem-4G
*/
const struct e820_entry e820_default_entries[NUM_E820_ENTRIES] = {
{ /* 0 to lowmem */
.baseaddr = 0x00000000,
.length = 0x49000000,
.type = E820_TYPE_RAM
},
{ /* lowmem to lowmem_limit*/
.baseaddr = 0x49000000,
.length = 0x77000000,
.type = E820_TYPE_RESERVED
},
{ /* lowmem_limit to 4G */
.baseaddr = 0xe0000000,
.length = 0x20000000,
.type = E820_TYPE_RESERVED
},
{
.baseaddr = 0x100000000,
.length = 0x000100000,
.type = E820_TYPE_RESERVED
},
};
static int
acrn_get_bzimage_setup_size(struct vmctx *ctx)
{
uint32_t *tmp, location = 1024, setup_sectors;
int size = -1;
tmp = (uint32_t *)(ctx->baseaddr + KERNEL_LOAD_OFF(ctx)) + 1024/4;
while (*tmp != SETUP_SIG && location < 0x8000) {
tmp++;
location += 4;
}
/* setup size must be at least 1024 bytes and small than 0x8000 */
if (location < 0x8000 && location > 1024) {
setup_sectors = (location + 511) / 512;
size = setup_sectors*512;
printf("SW_LOAD: found setup sig @ 0x%08x, "
"setup_size is 0x%08x\n",
location, size);
} else
printf("SW_LOAD ERR: could not get setup "
"size in kernel %s\n",
kernel_path);
return size;
}
static int
check_image(char *path)
{
FILE *fp;
fp = fopen(path, "r");
if (fp == NULL)
return -1;
fclose(fp);
return 0;
}
int
acrn_parse_kernel(char *arg)
{
int len = strlen(arg);
if (len < STR_LEN) {
strncpy(kernel_path, arg, len);
kernel_path[len] = '\0';
assert(check_image(kernel_path) == 0);
with_kernel = 1;
printf("SW_LOAD: get kernel path %s\n", kernel_path);
return 0;
} else
return -1;
}
int
acrn_parse_ramdisk(char *arg)
{
int len = strlen(arg);
if (len < STR_LEN) {
strncpy(ramdisk_path, arg, len);
ramdisk_path[len] = '\0';
assert(check_image(ramdisk_path) == 0);
with_ramdisk = 1;
printf("SW_LOAD: get ramdisk path %s\n", ramdisk_path);
return 0;
} else
return -1;
}
int
acrn_parse_bootargs(char *arg)
{
int len = strlen(arg);
if (len < STR_LEN) {
strncpy(bootargs, arg, len);
bootargs[len] = '\0';
with_bootargs = 1;
printf("SW_LOAD: get bootargs %s\n", bootargs);
return 0;
} else
return -1;
}
static int
acrn_prepare_ramdisk(struct vmctx *ctx)
{
FILE *fp;
int len, read;
fp = fopen(ramdisk_path, "r");
if (fp == NULL) {
printf("SW_LOAD ERR: could not open ramdisk file %s\n",
ramdisk_path);
return -1;
}
fseek(fp, 0, SEEK_END);
len = ftell(fp);
if (len > (BOOTARGS_LOAD_OFF(ctx) - RAMDISK_LOAD_OFF(ctx))) {
printf("SW_LOAD ERR: the size of ramdisk file is too big"
" file len=0x%x, limit is 0x%lx\n", len,
BOOTARGS_LOAD_OFF(ctx) - RAMDISK_LOAD_OFF(ctx));
fclose(fp);
return -1;
}
ramdisk_size = len;
fseek(fp, 0, SEEK_SET);
read = fread(ctx->baseaddr + RAMDISK_LOAD_OFF(ctx),
sizeof(char), len, fp);
if (read < len) {
printf("SW_LOAD ERR: could not read the whole ramdisk file,"
" file len=%d, read %d\n", len, read);
fclose(fp);
return -1;
}
fclose(fp);
printf("SW_LOAD: ramdisk %s size %d copied to guest 0x%lx\n",
ramdisk_path, ramdisk_size, RAMDISK_LOAD_OFF(ctx));
return 0;
}
static int
acrn_prepare_kernel(struct vmctx *ctx)
{
FILE *fp;
int len, read;
fp = fopen(kernel_path, "r");
if (fp == NULL) {
printf("SW_LOAD ERR: could not open kernel file %s\n",
kernel_path);
return -1;
}
fseek(fp, 0, SEEK_END);
len = ftell(fp);
if ((len + KERNEL_LOAD_OFF(ctx)) > RAMDISK_LOAD_OFF(ctx)) {
printf("SW_LOAD ERR: need big system memory to fit image\n");
fclose(fp);
return -1;
}
kernel_size = len;
fseek(fp, 0, SEEK_SET);
read = fread(ctx->baseaddr + KERNEL_LOAD_OFF(ctx),
sizeof(char), len, fp);
if (read < len) {
printf("SW_LOAD ERR: could not read the whole kernel file,"
" file len=%d, read %d\n", len, read);
fclose(fp);
return -1;
}
fclose(fp);
printf("SW_LOAD: kernel %s size %d copied to guest 0x%lx\n",
kernel_path, kernel_size, KERNEL_LOAD_OFF(ctx));
return 0;
}
static uint32_t
acrn_create_e820_table(struct vmctx *ctx, struct e820_entry *e820)
{
uint32_t k;
memcpy(e820, e820_default_entries, sizeof(e820_default_entries));
if (ctx->lowmem > 0) {
e820[LOWRAM_E820_ENTRIES].length = ctx->lowmem;
e820[LOWRAM_E820_ENTRIES+1].baseaddr = ctx->lowmem;
e820[LOWRAM_E820_ENTRIES+1].length =
ctx->lowmem_limit - ctx->lowmem;
}
if (ctx->highmem > 0) {
e820[HIGHRAM_E820_ENTRIES].type = E820_TYPE_RAM;
e820[HIGHRAM_E820_ENTRIES].length = ctx->highmem;
}
printf("SW_LOAD: build e820 %d entries to addr: %p\n",
NUM_E820_ENTRIES, (void *)e820);
for (k = 0; k < NUM_E820_ENTRIES; k++)
printf("SW_LOAD: entry[%d]: addr 0x%016lx, size 0x%016lx, "
" type 0x%x\n",
k, e820[k].baseaddr,
e820[k].length,
e820[k].type);
return NUM_E820_ENTRIES;
}
static int
acrn_prepare_zeropage(struct vmctx *ctx, int setup_size)
{
struct _zeropage *zeropage = (struct _zeropage *)
(ctx->baseaddr + ZEROPAGE_LOAD_OFF(ctx));
struct _zeropage *kernel_load = (struct _zeropage *)
(ctx->baseaddr + KERNEL_LOAD_OFF(ctx));
/* clear the zeropage */
memset(zeropage, 0, 2*KB);
/* copy part of the header into the zero page */
memcpy(&(zeropage->hdr), &(kernel_load->hdr), sizeof(zeropage->hdr));
if (with_ramdisk) {
/*Copy ramdisk load_addr and size in zeropage header structure*/
zeropage->hdr.ramdisk_addr = (uint32_t)
((uint64_t)RAMDISK_LOAD_OFF(ctx));
zeropage->hdr.ramdisk_size = (uint32_t)ramdisk_size;
printf("SW_LOAD: build zeropage for ramdisk addr: 0x%x,"
" size: %d\n", zeropage->hdr.ramdisk_addr,
zeropage->hdr.ramdisk_size);
}
/* Copy bootargs load_addr in zeropage header structure */
zeropage->hdr.bootargs_addr = (uint32_t)
((uint64_t)BOOTARGS_LOAD_OFF(ctx));
printf("SW_LOAD: build zeropage for bootargs addr: 0x%x\n",
zeropage->hdr.bootargs_addr);
/* set constant arguments in zero page */
zeropage->hdr.loader_type = 0xff;
zeropage->hdr.load_flags |= (1<<5); /* quiet */
/* Create/add e820 table entries in zeropage */
zeropage->e820_nentries = acrn_create_e820_table(ctx, zeropage->e820);
return 0;
}
int
acrn_sw_load(struct vmctx *ctx)
{
int ret, setup_size;
uint64_t *cfg_offset = (uint64_t *)(ctx->baseaddr + GUEST_CFG_OFFSET);
*cfg_offset = ctx->lowmem;
if (with_bootargs) {
strcpy(ctx->baseaddr + BOOTARGS_LOAD_OFF(ctx), bootargs);
printf("SW_LOAD: bootargs copied to guest 0x%lx\n",
BOOTARGS_LOAD_OFF(ctx));
}
if (with_ramdisk) {
ret = acrn_prepare_ramdisk(ctx);
if (ret)
return ret;
}
if (with_kernel) {
uint64_t *kernel_entry_addr =
(uint64_t *)(ctx->baseaddr + KERNEL_ENTRY_OFF(ctx));
ret = acrn_prepare_kernel(ctx);
if (ret)
return ret;
setup_size = acrn_get_bzimage_setup_size(ctx);
if (setup_size <= 0)
return -1;
*kernel_entry_addr = (uint64_t)
(KERNEL_LOAD_OFF(ctx) + setup_size + 0x200);
ret = acrn_prepare_zeropage(ctx, setup_size);
if (ret)
return ret;
printf("SW_LOAD: zeropage prepared @ 0x%lx, "
"kernel_entry_addr=0x%lx\n",
ZEROPAGE_LOAD_OFF(ctx), *kernel_entry_addr);
}
return 0;
}

704
devicemodel/core/vmmapi.c Normal file
View File

@@ -0,0 +1,704 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/uio.h>
#include <sys/user.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <assert.h>
#include <string.h>
#include <ctype.h>
#include <fcntl.h>
#include <unistd.h>
#include "types.h"
#include "cpuset.h"
#include "segments.h"
#include "specialreg.h"
#include "vmm.h"
#include "vhm_ioctl_defs.h"
#include "vmmapi.h"
#include "mevent.h"
#define MB (1024 * 1024UL)
#define GB (1024 * 1024 * 1024UL)
#define MAP_NOCORE 0
#define MAP_ALIGNED_SUPER 0
/*
* Size of the guard region before and after the virtual address space
* mapping the guest physical memory. This must be a multiple of the
* superpage size for performance reasons.
*/
#define VM_MMAP_GUARD_SIZE (4 * MB)
#define PROT_RW (PROT_READ | PROT_WRITE)
#define PROT_ALL (PROT_READ | PROT_WRITE | PROT_EXEC)
#define SUPPORT_VHM_API_VERSION_MAJOR 1
#define SUPPORT_VHM_API_VERSION_MINOR 0
int
vm_create(const char *name)
{
/* TODO: specific part for vm create */
return 0;
}
static int
check_api(int fd)
{
struct api_version api_version;
int error;
error = ioctl(fd, IC_GET_API_VERSION, &api_version);
if (error) {
fprintf(stderr, "failed to get vhm api version\n");
return -1;
}
if (api_version.major_version != SUPPORT_VHM_API_VERSION_MAJOR ||
api_version.minor_version != SUPPORT_VHM_API_VERSION_MINOR) {
fprintf(stderr, "not support vhm api version\n");
return -1;
}
printf("VHM api version %d.%d\n", api_version.major_version,
api_version.minor_version);
return 0;
}
static int devfd = -1;
struct vmctx *
vm_open(const char *name)
{
struct vmctx *ctx;
struct acrn_create_vm create_vm;
int error, retry = 10;
ctx = malloc(sizeof(struct vmctx) + strlen(name) + 1);
assert(ctx != NULL);
assert(devfd == -1);
devfd = open("/dev/acrn_vhm", O_RDWR|O_CLOEXEC);
if (devfd == -1) {
fprintf(stderr, "Could not open /dev/acrn_vhm\n");
goto err;
}
if (check_api(devfd) < 0)
goto err;
ctx->fd = devfd;
ctx->memflags = 0;
ctx->lowmem_limit = 2 * GB;
ctx->name = (char *)(ctx + 1);
strcpy(ctx->name, name);
while (retry > 0) {
error = ioctl(ctx->fd, IC_CREATE_VM, &create_vm);
if (error == 0)
break;
usleep(500000);
retry--;
}
if (error) {
fprintf(stderr, "failed to create VM %s\n", ctx->name);
goto err;
}
ctx->vmid = create_vm.vmid;
return ctx;
err:
free(ctx);
return NULL;
}
void
vm_close(struct vmctx *ctx)
{
if (!ctx)
return;
close(ctx->fd);
free(ctx);
devfd = -1;
}
int
vm_set_shared_io_page(struct vmctx *ctx, uint64_t page_vma)
{
int error;
error = ioctl(ctx->fd, IC_SET_IOREQ_BUFFER, page_vma);
if (error) {
fprintf(stderr, "failed to setup shared io page create VM %s\n",
ctx->name);
return -1;
}
return 0;
}
int
vm_create_ioreq_client(struct vmctx *ctx)
{
return ioctl(ctx->fd, IC_CREATE_IOREQ_CLIENT, 0);
}
int
vm_destroy_ioreq_client(struct vmctx *ctx)
{
return ioctl(ctx->fd, IC_DESTROY_IOREQ_CLIENT, ctx->ioreq_client);
}
int
vm_attach_ioreq_client(struct vmctx *ctx)
{
int error;
error = ioctl(ctx->fd, IC_ATTACH_IOREQ_CLIENT, ctx->ioreq_client);
if (error) {
fprintf(stderr, "attach ioreq client return %d "
"(1 = destroying, could be triggered by Power State "
"change, others = error)\n", error);
return error;
}
return 0;
}
int
vm_notify_request_done(struct vmctx *ctx, int vcpu)
{
int error;
struct ioreq_notify notify;
bzero(&notify, sizeof(notify));
notify.client_id = ctx->ioreq_client;
notify.vcpu = vcpu;
error = ioctl(ctx->fd, IC_NOTIFY_REQUEST_FINISH, &notify);
if (error) {
fprintf(stderr, "failed: notify request finish\n");
return -1;
}
return 0;
}
void
vm_destroy(struct vmctx *ctx)
{
if (ctx)
ioctl(ctx->fd, IC_DESTROY_VM, NULL);
}
int
vm_parse_memsize(const char *optarg, size_t *ret_memsize)
{
char *endptr;
size_t optval;
int shift;
optval = strtoul(optarg, &endptr, 0);
switch (tolower((unsigned char)*endptr)) {
case 'g':
shift = 30;
break;
case 'm':
shift = 20;
break;
case 'k':
shift = 10;
break;
case 'b':
case '\0': /* No unit. */
shift = 0;
default:
/* Unrecognized unit. */
return -1;
}
optval = optval << shift;
if (optval < 128 * MB)
return -1;
*ret_memsize = optval;
return 0;
}
uint32_t
vm_get_lowmem_limit(struct vmctx *ctx)
{
return ctx->lowmem_limit;
}
void
vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
{
ctx->lowmem_limit = limit;
}
void
vm_set_memflags(struct vmctx *ctx, int flags)
{
ctx->memflags = flags;
}
int
vm_get_memflags(struct vmctx *ctx)
{
return ctx->memflags;
}
static int
vm_alloc_set_memseg(struct vmctx *ctx, int segid, size_t len,
vm_paddr_t gpa, int prot, char *base, char **ptr)
{
struct vm_memseg memseg;
struct vm_memmap memmap;
int error, flags;
if (segid == VM_SYSMEM) {
bzero(&memseg, sizeof(struct vm_memseg));
memseg.len = len;
memseg.gpa = gpa;
error = ioctl(ctx->fd, IC_ALLOC_MEMSEG, &memseg);
if (error)
return error;
bzero(&memmap, sizeof(struct vm_memmap));
memmap.type = segid;
memmap.len = len;
memmap.gpa = gpa;
memmap.prot = PROT_ALL;
error = ioctl(ctx->fd, IC_SET_MEMSEG, &memmap);
if (error)
return error;
flags = MAP_SHARED | MAP_FIXED;
if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
flags |= MAP_NOCORE;
/* mmap into the process address space on the host */
*ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa);
if (*ptr == MAP_FAILED) {
*ptr = NULL;
error = -1;
}
} else
/* XXX: no VM_BOOTROM/VM_FRAMEBUFFER support*/
error = -1;
return error;
}
int
vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
{
size_t objsize, len;
vm_paddr_t gpa;
int prot;
char *baseaddr, *ptr;
int error, flags;
assert(vms == VM_MMAP_ALL);
/*
* If 'memsize' cannot fit entirely in the 'lowmem' segment then
* create another 'highmem' segment above 4GB for the remainder.
*/
if (memsize > ctx->lowmem_limit) {
ctx->lowmem = ctx->lowmem_limit;
ctx->highmem = memsize - ctx->lowmem_limit;
objsize = 4*GB + ctx->highmem;
} else {
ctx->lowmem = memsize;
ctx->highmem = 0;
objsize = ctx->lowmem;
}
/*
* Stake out a contiguous region covering the guest physical memory
* and the adjoining guard regions.
*/
len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE;
flags = MAP_PRIVATE | MAP_ANON | MAP_NOCORE | MAP_ALIGNED_SUPER;
ptr = mmap(NULL, len, PROT_NONE, flags, -1, 0);
if (ptr == MAP_FAILED)
return -1;
baseaddr = ptr + VM_MMAP_GUARD_SIZE;
/* TODO: need add error handling */
/* alloc & map for lowmem */
if (ctx->lowmem > 0) {
gpa = 0;
len = ctx->lowmem;
prot = PROT_ALL;
error = vm_alloc_set_memseg(ctx, VM_SYSMEM, len, gpa, prot,
baseaddr, &ctx->mmap_lowmem);
if (error)
return error;
}
/* alloc & map for highmem */
if (ctx->highmem > 0) {
gpa = 4*GB;
len = ctx->highmem;
prot = PROT_ALL;
error = vm_alloc_set_memseg(ctx, VM_SYSMEM, len, gpa, prot,
baseaddr, &ctx->mmap_highmem);
if (error)
return error;
}
ctx->baseaddr = baseaddr;
return 0;
}
void
vm_unsetup_memory(struct vmctx *ctx)
{
if (ctx->lowmem > 0)
munmap(ctx->mmap_lowmem, ctx->lowmem);
if (ctx->highmem > 0)
munmap(ctx->mmap_highmem, ctx->highmem);
}
/*
* Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
* the lowmem or highmem regions.
*
* In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region.
* The instruction emulation code depends on this behavior.
*/
void *
vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
{
if (ctx->lowmem > 0) {
if (gaddr < ctx->lowmem && len <= ctx->lowmem &&
gaddr + len <= ctx->lowmem)
return (ctx->baseaddr + gaddr);
}
if (ctx->highmem > 0) {
if (gaddr >= 4*GB) {
if (gaddr < 4*GB + ctx->highmem &&
len <= ctx->highmem &&
gaddr + len <= 4*GB + ctx->highmem)
return (ctx->baseaddr + gaddr);
}
}
return NULL;
}
size_t
vm_get_lowmem_size(struct vmctx *ctx)
{
return ctx->lowmem;
}
size_t
vm_get_highmem_size(struct vmctx *ctx)
{
return ctx->highmem;
}
void *
vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len)
{
return MAP_FAILED;
}
int
vm_run(struct vmctx *ctx)
{
int error;
error = ioctl(ctx->fd, IC_START_VM, &ctx->vmid);
return error;
}
void
vm_pause(struct vmctx *ctx)
{
ioctl(ctx->fd, IC_PAUSE_VM, &ctx->vmid);
}
static int suspend_mode = VM_SUSPEND_NONE;
void
vm_set_suspend_mode(enum vm_suspend_how how)
{
suspend_mode = how;
}
int
vm_get_suspend_mode(void)
{
return suspend_mode;
}
int
vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
{
vm_set_suspend_mode(how);
mevent_notify();
return 0;
}
int
vm_apicid2vcpu(struct vmctx *ctx, int apicid)
{
/*
* The apic id associated with the 'vcpu' has the same numerical value
* as the 'vcpu' itself.
*/
return apicid;
}
int
vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
{
struct acrn_msi_entry msi;
bzero(&msi, sizeof(msi));
msi.msi_addr = addr;
msi.msi_data = msg;
return ioctl(ctx->fd, IC_INJECT_MSI, &msi);
}
int
vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
{
struct acrn_irqline ioapic_irq;
bzero(&ioapic_irq, sizeof(ioapic_irq));
ioapic_irq.intr_type = ACRN_INTR_TYPE_IOAPIC;
ioapic_irq.ioapic_irq = irq;
return ioctl(ctx->fd, IC_ASSERT_IRQLINE, &ioapic_irq);
}
int
vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
{
struct acrn_irqline ioapic_irq;
bzero(&ioapic_irq, sizeof(ioapic_irq));
ioapic_irq.intr_type = ACRN_INTR_TYPE_IOAPIC;
ioapic_irq.ioapic_irq = irq;
return ioctl(ctx->fd, IC_DEASSERT_IRQLINE, &ioapic_irq);
}
static int
vm_isa_irq(struct vmctx *ctx, int irq, int ioapic_irq, unsigned long call_id)
{
struct acrn_irqline isa_irq;
bzero(&isa_irq, sizeof(isa_irq));
isa_irq.intr_type = ACRN_INTR_TYPE_ISA;
isa_irq.pic_irq = irq;
isa_irq.ioapic_irq = ioapic_irq;
return ioctl(ctx->fd, call_id, &isa_irq);
}
int
vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
{
return vm_isa_irq(ctx, atpic_irq, ioapic_irq, IC_ASSERT_IRQLINE);
}
int
vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
{
return vm_isa_irq(ctx, atpic_irq, ioapic_irq, IC_DEASSERT_IRQLINE);
}
int
vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
{
return vm_isa_irq(ctx, atpic_irq, ioapic_irq, IC_PULSE_IRQLINE);
}
int
vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
{
*pincount = 24;
return 0;
}
int
vm_assign_ptdev(struct vmctx *ctx, int bus, int slot, int func)
{
uint16_t bdf;
bdf = ((bus & 0xff) << 8) | ((slot & 0x1f) << 3) |
(func & 0x7);
return ioctl(ctx->fd, IC_ASSIGN_PTDEV, &bdf);
}
int
vm_unassign_ptdev(struct vmctx *ctx, int bus, int slot, int func)
{
uint16_t bdf;
bdf = ((bus & 0xff) << 8) | ((slot & 0x1f) << 3) |
(func & 0x7);
return ioctl(ctx->fd, IC_DEASSIGN_PTDEV, &bdf);
}
int
vm_map_ptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
{
struct vm_memmap memmap;
bzero(&memmap, sizeof(struct vm_memmap));
memmap.type = VM_MMIO;
memmap.len = len;
memmap.gpa = gpa;
memmap.hpa = hpa;
memmap.prot = PROT_ALL;
return ioctl(ctx->fd, IC_SET_MEMSEG, &memmap);
}
int
vm_setup_ptdev_msi(struct vmctx *ctx, struct acrn_vm_pci_msix_remap *msi_remap)
{
if (!msi_remap)
return -1;
return ioctl(ctx->fd, IC_VM_PCI_MSIX_REMAP, msi_remap);
}
int
vm_set_ptdev_msix_info(struct vmctx *ctx, struct ic_ptdev_irq *ptirq)
{
if (!ptirq)
return -1;
return ioctl(ctx->fd, IC_SET_PTDEV_INTR_INFO, ptirq);
}
int
vm_reset_ptdev_msix_info(struct vmctx *ctx, uint16_t virt_bdf,
int vector_count)
{
struct ic_ptdev_irq ptirq;
bzero(&ptirq, sizeof(ptirq));
ptirq.type = IRQ_MSIX;
ptirq.virt_bdf = virt_bdf;
ptirq.msix.vector_cnt = vector_count;
return ioctl(ctx->fd, IC_RESET_PTDEV_INTR_INFO, &ptirq);
}
int
vm_set_ptdev_intx_info(struct vmctx *ctx, uint16_t virt_bdf, uint16_t phys_bdf,
int virt_pin, int phys_pin, bool pic_pin)
{
struct ic_ptdev_irq ptirq;
bzero(&ptirq, sizeof(ptirq));
ptirq.type = IRQ_INTX;
ptirq.virt_bdf = virt_bdf;
ptirq.phys_bdf = phys_bdf;
ptirq.intx.virt_pin = virt_pin;
ptirq.intx.phys_pin = phys_pin;
ptirq.intx.is_pic_pin = pic_pin;
return ioctl(ctx->fd, IC_SET_PTDEV_INTR_INFO, &ptirq);
}
int
vm_reset_ptdev_intx_info(struct vmctx *ctx, int virt_pin, bool pic_pin)
{
struct ic_ptdev_irq ptirq;
bzero(&ptirq, sizeof(ptirq));
ptirq.type = IRQ_INTX;
ptirq.intx.virt_pin = virt_pin;
ptirq.intx.is_pic_pin = pic_pin;
return ioctl(ctx->fd, IC_RESET_PTDEV_INTR_INFO, &ptirq);
}
int
vm_create_vcpu(struct vmctx *ctx, int vcpu_id)
{
struct acrn_create_vcpu cv;
int error;
bzero(&cv, sizeof(struct acrn_create_vcpu));
cv.vcpu_id = vcpu_id;
error = ioctl(ctx->fd, IC_CREATE_VCPU, &cv);
return error;
}
int
vm_get_device_fd(struct vmctx *ctx)
{
return ctx->fd;
}

1013
devicemodel/hw/acpi/acpi.c Normal file

File diff suppressed because it is too large Load Diff

2481
devicemodel/hw/pci/ahci.c Normal file

File diff suppressed because it is too large Load Diff

2156
devicemodel/hw/pci/core.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,68 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <pthread.h>
#include "pci_core.h"
static int
pci_hostbridge_init(struct vmctx *ctx, struct pci_vdev *pi, char *opts)
{
/* config space */
pci_set_cfgdata16(pi, PCIR_VENDOR, 0x1275); /* NetApp */
pci_set_cfgdata16(pi, PCIR_DEVICE, 0x1275); /* NetApp */
pci_set_cfgdata8(pi, PCIR_HDRTYPE, PCIM_HDRTYPE_NORMAL);
pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_BRIDGE);
pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_BRIDGE_HOST);
pci_emul_add_pciecap(pi, PCIEM_TYPE_ROOT_PORT);
return 0;
}
static int
pci_amd_hostbridge_init(struct vmctx *ctx, struct pci_vdev *pi, char *opts)
{
(void) pci_hostbridge_init(ctx, pi, opts);
pci_set_cfgdata16(pi, PCIR_VENDOR, 0x1022); /* AMD */
pci_set_cfgdata16(pi, PCIR_DEVICE, 0x7432); /* made up */
return 0;
}
struct pci_vdev_ops pci_ops_amd_hostbridge = {
.class_name = "amd_hostbridge",
.vdev_init = pci_amd_hostbridge_init,
};
DEFINE_PCI_DEVTYPE(pci_ops_amd_hostbridge);
struct pci_vdev_ops pci_ops_hostbridge = {
.class_name = "hostbridge",
.vdev_init = pci_hostbridge_init,
};
DEFINE_PCI_DEVTYPE(pci_ops_hostbridge);

344
devicemodel/hw/pci/irq.c Normal file
View File

@@ -0,0 +1,344 @@
/*-
* Copyright (c) 2014 Hudson River Trading LLC
* Written by: John H. Baldwin <jhb@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <assert.h>
#include <pthread.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include "types.h"
#include "acpi.h"
#include "vmm.h"
#include "vmmapi.h"
#include "inout.h"
#include "pci_core.h"
#include "irq.h"
#include "lpc.h"
/*
* Implement an 8 pin PCI interrupt router compatible with the router
* present on Intel's ICH10 chip.
*/
/* Fields in each PIRQ register. */
#define PIRQ_DIS 0x80
#define PIRQ_IRQ 0x0f
/* Only IRQs 3-7, 9-12, and 14-15 are permitted. */
#define PERMITTED_IRQS 0xdef8
#define IRQ_PERMITTED(irq) (((1U << (irq)) & PERMITTED_IRQS) != 0)
/* IRQ count to disable an IRQ. */
#define IRQ_DISABLED 0xff
static struct pirq {
uint8_t reg;
int use_count;
int active_count;
pthread_mutex_t lock;
} pirqs[8];
static u_char irq_counts[16];
static int pirq_cold = 1;
/*
* Returns true if this pin is enabled with a valid IRQ. Setting the
* register to a reserved IRQ causes interrupts to not be asserted as
* if the pin was disabled.
*/
static bool
pirq_valid_irq(int reg)
{
if (reg & PIRQ_DIS)
return false;
return IRQ_PERMITTED(reg & PIRQ_IRQ);
}
uint8_t
pirq_read(int pin)
{
assert(pin > 0 && pin <= nitems(pirqs));
return pirqs[pin - 1].reg;
}
void
pirq_write(struct vmctx *ctx, int pin, uint8_t val)
{
struct pirq *pirq;
assert(pin > 0 && pin <= nitems(pirqs));
pirq = &pirqs[pin - 1];
pthread_mutex_lock(&pirq->lock);
if (pirq->reg != (val & (PIRQ_DIS | PIRQ_IRQ))) {
if (pirq->active_count != 0 && pirq_valid_irq(pirq->reg))
vm_isa_deassert_irq(ctx, pirq->reg & PIRQ_IRQ, -1);
pirq->reg = val & (PIRQ_DIS | PIRQ_IRQ);
if (pirq->active_count != 0 && pirq_valid_irq(pirq->reg))
vm_isa_assert_irq(ctx, pirq->reg & PIRQ_IRQ, -1);
}
pthread_mutex_unlock(&pirq->lock);
}
void
pci_irq_reserve(int irq)
{
assert(irq >= 0 && irq < nitems(irq_counts));
assert(pirq_cold);
assert(irq_counts[irq] == 0 || irq_counts[irq] == IRQ_DISABLED);
irq_counts[irq] = IRQ_DISABLED;
}
void
pci_irq_use(int irq)
{
assert(irq >= 0 && irq < nitems(irq_counts));
assert(pirq_cold);
assert(irq_counts[irq] != IRQ_DISABLED);
irq_counts[irq]++;
}
void
pci_irq_init(struct vmctx *ctx)
{
int i;
for (i = 0; i < nitems(pirqs); i++) {
pirqs[i].reg = PIRQ_DIS;
pirqs[i].use_count = 0;
pirqs[i].active_count = 0;
pthread_mutex_init(&pirqs[i].lock, NULL);
}
for (i = 0; i < nitems(irq_counts); i++) {
if (IRQ_PERMITTED(i))
irq_counts[i] = 0;
else
irq_counts[i] = IRQ_DISABLED;
}
}
void pci_irq_deinit(struct vmctx *ctx)
{
pirq_cold = 1;
}
void
pci_irq_assert(struct pci_vdev *dev)
{
struct pirq *pirq;
if (dev->lintr.pirq_pin > 0) {
assert(dev->lintr.pirq_pin <= nitems(pirqs));
pirq = &pirqs[dev->lintr.pirq_pin - 1];
pthread_mutex_lock(&pirq->lock);
pirq->active_count++;
if (pirq->active_count == 1 && pirq_valid_irq(pirq->reg)) {
vm_isa_assert_irq(dev->vmctx, pirq->reg & PIRQ_IRQ,
dev->lintr.ioapic_irq);
pthread_mutex_unlock(&pirq->lock);
return;
}
pthread_mutex_unlock(&pirq->lock);
}
vm_ioapic_assert_irq(dev->vmctx, dev->lintr.ioapic_irq);
}
void
pci_irq_deassert(struct pci_vdev *dev)
{
struct pirq *pirq;
if (dev->lintr.pirq_pin > 0) {
assert(dev->lintr.pirq_pin <= nitems(pirqs));
pirq = &pirqs[dev->lintr.pirq_pin - 1];
pthread_mutex_lock(&pirq->lock);
pirq->active_count--;
if (pirq->active_count == 0 && pirq_valid_irq(pirq->reg)) {
vm_isa_deassert_irq(dev->vmctx, pirq->reg & PIRQ_IRQ,
dev->lintr.ioapic_irq);
pthread_mutex_unlock(&pirq->lock);
return;
}
pthread_mutex_unlock(&pirq->lock);
}
vm_ioapic_deassert_irq(dev->vmctx, dev->lintr.ioapic_irq);
}
int
pirq_alloc_pin(struct pci_vdev *dev)
{
int best_count, best_irq, best_pin, irq, pin;
pirq_cold = 0;
/* Find the least-used PIRQ pin. */
best_pin = 0;
best_count = pirqs[0].use_count;
for (pin = 1; pin < nitems(pirqs); pin++) {
if (pirqs[pin].use_count < best_count) {
best_pin = pin;
best_count = pirqs[pin].use_count;
}
}
pirqs[best_pin].use_count++;
/* Second, route this pin to an IRQ. */
if (pirqs[best_pin].reg == PIRQ_DIS) {
best_irq = -1;
best_count = 0;
for (irq = 0; irq < nitems(irq_counts); irq++) {
if (irq_counts[irq] == IRQ_DISABLED)
continue;
if (best_irq == -1 || irq_counts[irq] < best_count) {
best_irq = irq;
best_count = irq_counts[irq];
}
}
assert(best_irq >= 0);
irq_counts[best_irq]++;
pirqs[best_pin].reg = best_irq;
}
return (best_pin + 1);
}
int
pirq_irq(int pin)
{
assert(pin > 0 && pin <= nitems(pirqs));
return (pirqs[pin - 1].reg & PIRQ_IRQ);
}
/* XXX: Generate $PIR table. */
static void
pirq_dsdt(void)
{
char *irq_prs, *old;
int irq, pin;
irq_prs = NULL;
for (irq = 0; irq < nitems(irq_counts); irq++) {
if (!IRQ_PERMITTED(irq))
continue;
if (irq_prs == NULL)
asprintf(&irq_prs, "%d", irq);
else {
old = irq_prs;
asprintf(&irq_prs, "%s,%d", old, irq);
free(old);
}
}
/*
* A helper method to validate a link register's value. This
* duplicates pirq_valid_irq().
*/
dsdt_line("");
dsdt_line("Method (PIRV, 1, NotSerialized)");
dsdt_line("{");
dsdt_line(" If (And (Arg0, 0x%02X))", PIRQ_DIS);
dsdt_line(" {");
dsdt_line(" Return (0x00)");
dsdt_line(" }");
dsdt_line(" And (Arg0, 0x%02X, Local0)", PIRQ_IRQ);
dsdt_line(" If (LLess (Local0, 0x03))");
dsdt_line(" {");
dsdt_line(" Return (0x00)");
dsdt_line(" }");
dsdt_line(" If (LEqual (Local0, 0x08))");
dsdt_line(" {");
dsdt_line(" Return (0x00)");
dsdt_line(" }");
dsdt_line(" If (LEqual (Local0, 0x0D))");
dsdt_line(" {");
dsdt_line(" Return (0x00)");
dsdt_line(" }");
dsdt_line(" Return (0x01)");
dsdt_line("}");
for (pin = 0; pin < nitems(pirqs); pin++) {
dsdt_line("");
dsdt_line("Device (LNK%c)", 'A' + pin);
dsdt_line("{");
dsdt_line(" Name (_HID, EisaId (\"PNP0C0F\"))");
dsdt_line(" Name (_UID, 0x%02X)", pin + 1);
dsdt_line(" Method (_STA, 0, NotSerialized)");
dsdt_line(" {");
dsdt_line(" If (PIRV (PIR%c))", 'A' + pin);
dsdt_line(" {");
dsdt_line(" Return (0x0B)");
dsdt_line(" }");
dsdt_line(" Else");
dsdt_line(" {");
dsdt_line(" Return (0x09)");
dsdt_line(" }");
dsdt_line(" }");
dsdt_line(" Name (_PRS, ResourceTemplate ()");
dsdt_line(" {");
dsdt_line(" IRQ (Level, ActiveLow, Shared, )");
dsdt_line(" {%s}", irq_prs);
dsdt_line(" })");
dsdt_line(" Name (CB%02X, ResourceTemplate ()", pin + 1);
dsdt_line(" {");
dsdt_line(" IRQ (Level, ActiveLow, Shared, )");
dsdt_line(" {}");
dsdt_line(" })");
dsdt_line(" CreateWordField (CB%02X, 0x01, CIR%c)",
pin + 1, 'A' + pin);
dsdt_line(" Method (_CRS, 0, NotSerialized)");
dsdt_line(" {");
dsdt_line(" And (PIR%c, 0x%02X, Local0)", 'A' + pin,
PIRQ_DIS | PIRQ_IRQ);
dsdt_line(" If (PIRV (Local0))");
dsdt_line(" {");
dsdt_line(" ShiftLeft (0x01, Local0, CIR%c)", 'A' + pin);
dsdt_line(" }");
dsdt_line(" Else");
dsdt_line(" {");
dsdt_line(" Store (0x00, CIR%c)", 'A' + pin);
dsdt_line(" }");
dsdt_line(" Return (CB%02X)", pin + 1);
dsdt_line(" }");
dsdt_line(" Method (_DIS, 0, NotSerialized)");
dsdt_line(" {");
dsdt_line(" Store (0x80, PIR%c)", 'A' + pin);
dsdt_line(" }");
dsdt_line(" Method (_SRS, 1, NotSerialized)");
dsdt_line(" {");
dsdt_line(" CreateWordField (Arg0, 0x01, SIR%c)", 'A' + pin);
dsdt_line(" FindSetRightBit (SIR%c, Local0)", 'A' + pin);
dsdt_line(" Store (Decrement (Local0), PIR%c)", 'A' + pin);
dsdt_line(" }");
dsdt_line("}");
}
free(irq_prs);
}
LPC_DSDT(pirq_dsdt);

455
devicemodel/hw/pci/lpc.c Normal file
View File

@@ -0,0 +1,455 @@
/*-
* Copyright (c) 2013 Neel Natu <neel@freebsd.org>
* Copyright (c) 2013 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include "vmm.h"
#include "vmmapi.h"
#include "acpi.h"
#include "inout.h"
#include "pci_core.h"
#include "irq.h"
#include "lpc.h"
#include "uart_core.h"
#define IO_ICU1 0x20
#define IO_ICU2 0xA0
SET_DECLARE(lpc_dsdt_set, struct lpc_dsdt);
SET_DECLARE(lpc_sysres_set, struct lpc_sysres);
#define ELCR_PORT 0x4d0
SYSRES_IO(ELCR_PORT, 2);
#define IO_TIMER1_PORT 0x40
#define NMISC_PORT 0x61
SYSRES_IO(NMISC_PORT, 1);
static struct pci_vdev *lpc_bridge;
#define LPC_UART_NUM 2
static struct lpc_uart_vdev {
struct uart_vdev *uart;
const char *opts;
int iobase;
int irq;
int enabled;
} lpc_uart_vdev[LPC_UART_NUM];
static const char *lpc_uart_names[LPC_UART_NUM] = { "COM1", "COM2" };
/*
* LPC device configuration is in the following form:
* <lpc_device_name>[,<options>]
* For e.g. "com1,stdio"
*/
int
lpc_device_parse(const char *opts)
{
int unit, error;
char *str, *cpy, *lpcdev;
error = -1;
str = cpy = strdup(opts);
lpcdev = strsep(&str, ",");
if (lpcdev != NULL) {
for (unit = 0; unit < LPC_UART_NUM; unit++) {
if (strcasecmp(lpcdev, lpc_uart_names[unit]) == 0) {
lpc_uart_vdev[unit].opts = str;
error = 0;
goto done;
}
}
}
done:
if (error)
free(cpy);
return error;
}
static void
lpc_uart_intr_assert(void *arg)
{
struct lpc_uart_vdev *lpc_uart = arg;
assert(lpc_uart->irq >= 0);
if (lpc_bridge)
vm_isa_pulse_irq(lpc_bridge->vmctx,
lpc_uart->irq,
lpc_uart->irq);
}
static void
lpc_uart_intr_deassert(void *arg)
{
/*
* The COM devices on the LPC bus generate edge triggered interrupts,
* so nothing more to do here.
*/
}
static int
lpc_uart_io_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
int offset;
struct lpc_uart_vdev *lpc_uart = arg;
offset = port - lpc_uart->iobase;
switch (bytes) {
case 1:
if (in)
*eax = uart_read(lpc_uart->uart, offset);
else
uart_write(lpc_uart->uart, offset, *eax);
break;
case 2:
if (in) {
*eax = uart_read(lpc_uart->uart, offset);
*eax |= uart_read(lpc_uart->uart, offset + 1) << 8;
} else {
uart_write(lpc_uart->uart, offset, *eax);
uart_write(lpc_uart->uart, offset + 1, *eax >> 8);
}
break;
default:
return -1;
}
return 0;
}
static int
lpc_init(struct vmctx *ctx)
{
struct lpc_uart_vdev *lpc_uart;
struct inout_port iop;
const char *name;
int unit, error;
/* COM1 and COM2 */
for (unit = 0; unit < LPC_UART_NUM; unit++) {
lpc_uart = &lpc_uart_vdev[unit];
name = lpc_uart_names[unit];
if (uart_legacy_alloc(unit,
&lpc_uart->iobase,
&lpc_uart->irq) != 0) {
fprintf(stderr, "Unable to allocate resources for "
"LPC device %s\n", name);
return -1;
}
pci_irq_reserve(lpc_uart->irq);
lpc_uart->uart = uart_init(lpc_uart_intr_assert,
lpc_uart_intr_deassert, lpc_uart);
if (uart_set_backend(lpc_uart->uart, lpc_uart->opts) != 0) {
fprintf(stderr, "Unable to initialize backend '%s' "
"for LPC device %s\n", lpc_uart->opts, name);
return -1;
}
bzero(&iop, sizeof(struct inout_port));
iop.name = name;
iop.port = lpc_uart->iobase;
iop.size = UART_IO_BAR_SIZE;
iop.flags = IOPORT_F_INOUT;
iop.handler = lpc_uart_io_handler;
iop.arg = lpc_uart;
error = register_inout(&iop);
assert(error == 0);
lpc_uart->enabled = 1;
}
return 0;
}
static void
lpc_deinit(struct vmctx *ctx)
{
struct lpc_uart_vdev *lpc_uart;
int unit;
/* COM1 and COM2 */
for (unit = 0; unit < LPC_UART_NUM; unit++) {
lpc_uart = &lpc_uart_vdev[unit];
uart_legacy_dealloc(unit);
uart_deinit(lpc_uart->uart);
lpc_uart->uart = NULL;
lpc_uart->enabled = 0;
}
}
static void
pci_lpc_write_dsdt(struct pci_vdev *dev)
{
struct lpc_dsdt **ldpp, *ldp;
dsdt_line("");
dsdt_line("Device (ISA)");
dsdt_line("{");
dsdt_line(" Name (_ADR, 0x%04X%04X)", dev->slot, dev->func);
dsdt_line(" OperationRegion (LPCR, PCI_Config, 0x00, 0x100)");
dsdt_line(" Field (LPCR, AnyAcc, NoLock, Preserve)");
dsdt_line(" {");
dsdt_line(" Offset (0x60),");
dsdt_line(" PIRA, 8,");
dsdt_line(" PIRB, 8,");
dsdt_line(" PIRC, 8,");
dsdt_line(" PIRD, 8,");
dsdt_line(" Offset (0x68),");
dsdt_line(" PIRE, 8,");
dsdt_line(" PIRF, 8,");
dsdt_line(" PIRG, 8,");
dsdt_line(" PIRH, 8");
dsdt_line(" }");
dsdt_line("");
dsdt_indent(1);
SET_FOREACH(ldpp, lpc_dsdt_set) {
ldp = *ldpp;
ldp->handler();
}
dsdt_line("");
dsdt_line("Device (PIC)");
dsdt_line("{");
dsdt_line(" Name (_HID, EisaId (\"PNP0000\"))");
dsdt_line(" Name (_CRS, ResourceTemplate ()");
dsdt_line(" {");
dsdt_indent(2);
dsdt_fixed_ioport(IO_ICU1, 2);
dsdt_fixed_ioport(IO_ICU2, 2);
dsdt_fixed_irq(2);
dsdt_unindent(2);
dsdt_line(" })");
dsdt_line("}");
dsdt_line("");
dsdt_line("Device (TIMR)");
dsdt_line("{");
dsdt_line(" Name (_HID, EisaId (\"PNP0100\"))");
dsdt_line(" Name (_CRS, ResourceTemplate ()");
dsdt_line(" {");
dsdt_indent(2);
dsdt_fixed_ioport(IO_TIMER1_PORT, 4);
dsdt_fixed_irq(0);
dsdt_unindent(2);
dsdt_line(" })");
dsdt_line("}");
dsdt_unindent(1);
dsdt_line("}");
}
static void
pci_lpc_sysres_dsdt(void)
{
struct lpc_sysres **lspp, *lsp;
dsdt_line("");
dsdt_line("Device (SIO)");
dsdt_line("{");
dsdt_line(" Name (_HID, EisaId (\"PNP0C02\"))");
dsdt_line(" Name (_CRS, ResourceTemplate ()");
dsdt_line(" {");
dsdt_indent(2);
SET_FOREACH(lspp, lpc_sysres_set) {
lsp = *lspp;
switch (lsp->type) {
case LPC_SYSRES_IO:
dsdt_fixed_ioport(lsp->base, lsp->length);
break;
case LPC_SYSRES_MEM:
dsdt_fixed_mem32(lsp->base, lsp->length);
break;
}
}
dsdt_unindent(2);
dsdt_line(" })");
dsdt_line("}");
}
LPC_DSDT(pci_lpc_sysres_dsdt);
static void
pci_lpc_uart_dsdt(void)
{
struct lpc_uart_vdev *lpc_uart;
int unit;
for (unit = 0; unit < LPC_UART_NUM; unit++) {
lpc_uart = &lpc_uart_vdev[unit];
if (!lpc_uart->enabled)
continue;
dsdt_line("");
dsdt_line("Device (%s)", lpc_uart_names[unit]);
dsdt_line("{");
dsdt_line(" Name (_HID, EisaId (\"PNP0501\"))");
dsdt_line(" Name (_UID, %d)", unit + 1);
dsdt_line(" Name (_CRS, ResourceTemplate ()");
dsdt_line(" {");
dsdt_indent(2);
dsdt_fixed_ioport(lpc_uart->iobase, UART_IO_BAR_SIZE);
dsdt_fixed_irq(lpc_uart->irq);
dsdt_unindent(2);
dsdt_line(" })");
dsdt_line("}");
}
}
LPC_DSDT(pci_lpc_uart_dsdt);
static int
pci_lpc_cfgwrite(struct vmctx *ctx, int vcpu, struct pci_vdev *pi,
int coff, int bytes, uint32_t val)
{
int pirq_pin;
if (bytes == 1) {
pirq_pin = 0;
if (coff >= 0x60 && coff <= 0x63)
pirq_pin = coff - 0x60 + 1;
if (coff >= 0x68 && coff <= 0x6b)
pirq_pin = coff - 0x68 + 5;
if (pirq_pin != 0) {
pirq_write(ctx, pirq_pin, val);
pci_set_cfgdata8(pi, coff, pirq_read(pirq_pin));
return 0;
}
}
return -1;
}
static void
pci_lpc_write(struct vmctx *ctx, int vcpu, struct pci_vdev *pi,
int baridx, uint64_t offset, int size, uint64_t value)
{
}
static uint64_t
pci_lpc_read(struct vmctx *ctx, int vcpu, struct pci_vdev *pi,
int baridx, uint64_t offset, int size)
{
return 0;
}
#define LPC_DEV 0x7000
#define LPC_VENDOR 0x8086
static int
pci_lpc_init(struct vmctx *ctx, struct pci_vdev *pi, char *opts)
{
/*
* Do not allow more than one LPC bridge to be configured.
*/
if (lpc_bridge != NULL) {
fprintf(stderr, "Only one LPC bridge is allowed.\n");
return -1;
}
/*
* Enforce that the LPC can only be configured on bus 0. This
* simplifies the ACPI DSDT because it can provide a decode for
* all legacy i/o ports behind bus 0.
*/
if (pi->bus != 0) {
fprintf(stderr, "LPC bridge can be present only on bus 0.\n");
return -1;
}
if (lpc_init(ctx) != 0)
return -1;
/* initialize config space */
pci_set_cfgdata16(pi, PCIR_DEVICE, LPC_DEV);
pci_set_cfgdata16(pi, PCIR_VENDOR, LPC_VENDOR);
pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_BRIDGE);
pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_BRIDGE_ISA);
lpc_bridge = pi;
return 0;
}
static void
pci_lpc_deinit(struct vmctx *ctx, struct pci_vdev *pi, char *opts)
{
lpc_bridge = NULL;
lpc_deinit(ctx);
}
char *
lpc_pirq_name(int pin)
{
char *name;
if (lpc_bridge == NULL)
return NULL;
asprintf(&name, "\\_SB.PCI0.ISA.LNK%c,", 'A' + pin - 1);
return name;
}
void
lpc_pirq_routed(void)
{
int pin;
if (lpc_bridge == NULL)
return;
for (pin = 0; pin < 4; pin++)
pci_set_cfgdata8(lpc_bridge, 0x60 + pin, pirq_read(pin + 1));
for (pin = 0; pin < 4; pin++)
pci_set_cfgdata8(lpc_bridge, 0x68 + pin, pirq_read(pin + 5));
}
struct pci_vdev_ops pci_ops_lpc = {
.class_name = "lpc",
.vdev_init = pci_lpc_init,
.vdev_deinit = pci_lpc_deinit,
.vdev_write_dsdt = pci_lpc_write_dsdt,
.vdev_cfgwrite = pci_lpc_cfgwrite,
.vdev_barwrite = pci_lpc_write,
.vdev_barread = pci_lpc_read
};
DEFINE_PCI_DEVTYPE(pci_ops_lpc);

File diff suppressed because it is too large Load Diff

115
devicemodel/hw/pci/uart.c Normal file
View File

@@ -0,0 +1,115 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <stdio.h>
#include "dm.h"
#include "pci_core.h"
#include "uart_core.h"
/*
* Pick a PCI vid/did of a chip with a single uart at
* BAR0, that most versions of FreeBSD can understand:
* Siig CyberSerial 1-port.
*/
#define COM_VENDOR 0x131f
#define COM_DEV 0x2000
static void
pci_uart_intr_assert(void *arg)
{
struct pci_vdev *dev = arg;
pci_lintr_assert(dev);
}
static void
pci_uart_intr_deassert(void *arg)
{
struct pci_vdev *dev = arg;
pci_lintr_deassert(dev);
}
static void
pci_uart_write(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int baridx, uint64_t offset, int size, uint64_t value)
{
assert(baridx == 0);
assert(size == 1);
uart_write(dev->arg, offset, value);
}
uint64_t
pci_uart_read(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int baridx, uint64_t offset, int size)
{
uint8_t val;
assert(baridx == 0);
assert(size == 1);
val = uart_read(dev->arg, offset);
return val;
}
static int
pci_uart_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct uart_vdev *uart;
pci_emul_alloc_bar(dev, 0, PCIBAR_IO, UART_IO_BAR_SIZE);
pci_lintr_request(dev);
/* initialize config space */
pci_set_cfgdata16(dev, PCIR_DEVICE, COM_DEV);
pci_set_cfgdata16(dev, PCIR_VENDOR, COM_VENDOR);
pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_SIMPLECOMM);
uart = uart_init(pci_uart_intr_assert, pci_uart_intr_deassert, dev);
dev->arg = uart;
if (uart_set_backend(uart, opts) != 0) {
fprintf(stderr, "Unable to initialize backend '%s' for "
"pci uart at %d:%d\n", opts, dev->slot, dev->func);
return -1;
}
return 0;
}
struct pci_vdev_ops pci_ops_com = {
.class_name = "uart",
.vdev_init = pci_uart_init,
.vdev_barwrite = pci_uart_write,
.vdev_barread = pci_uart_read
};
DEFINE_PCI_DEVTYPE(pci_ops_com);

View File

@@ -0,0 +1,790 @@
/*-
* Copyright (c) 2013 Chris Torek <torek @ torek net>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/uio.h>
#include <stdio.h>
#include <stdint.h>
#include <pthread.h>
#include "dm.h"
#include "pci_core.h"
#include "virtio.h"
/*
* Functions for dealing with generalized "virtual devices" as
* defined by <https://www.google.com/#output=search&q=virtio+spec>
*/
/*
* In case we decide to relax the "virtio struct comes at the
* front of virtio-based device struct" constraint, let's use
* this to convert.
*/
#define DEV_STRUCT(vs) ((void *)(vs))
/*
* Link a virtio_base to its constants, the virtio device, and
* the PCI emulation.
*/
void
virtio_linkup(struct virtio_base *base, struct virtio_ops *vops,
void *pci_virtio_dev, struct pci_vdev *dev,
struct virtio_vq_info *queues)
{
int i;
/* base and pci_virtio_dev addresses must match */
assert((void *)base == pci_virtio_dev);
base->vops = vops;
base->dev = dev;
dev->arg = base;
base->queues = queues;
for (i = 0; i < vops->nvq; i++) {
queues[i].base = base;
queues[i].num = i;
}
}
/*
* Reset device (device-wide). This erases all queues, i.e.,
* all the queues become invalid (though we don't wipe out the
* internal pointers, we just clear the VQ_ALLOC flag).
*
* It resets negotiated features to "none".
*
* If MSI-X is enabled, this also resets all the vectors to NO_VECTOR.
*/
void
virtio_reset_dev(struct virtio_base *base)
{
struct virtio_vq_info *vq;
int i, nvq;
/* if (base->mtx) */
/* assert(pthread_mutex_isowned_np(base->mtx)); */
nvq = base->vops->nvq;
for (vq = base->queues, i = 0; i < nvq; vq++, i++) {
vq->flags = 0;
vq->last_avail = 0;
vq->save_used = 0;
vq->pfn = 0;
vq->msix_idx = VIRTIO_MSI_NO_VECTOR;
}
base->negotiated_caps = 0;
base->curq = 0;
/* base->status = 0; -- redundant */
if (base->isr)
pci_lintr_deassert(base->dev);
base->isr = 0;
base->msix_cfg_idx = VIRTIO_MSI_NO_VECTOR;
}
/*
* Set I/O BAR (usually 0) to map PCI config registers.
*/
void
virtio_set_io_bar(struct virtio_base *base, int barnum)
{
size_t size;
/*
* ??? should we use CFG0 if MSI-X is disabled?
* Existing code did not...
*/
size = VIRTIO_CR_CFG1 + base->vops->cfgsize;
pci_emul_alloc_bar(base->dev, barnum, PCIBAR_IO, size);
}
/*
* Initialize MSI-X vector capabilities if we're to use MSI-X,
* or MSI capabilities if not.
*
* We assume we want one MSI-X vector per queue, here, plus one
* for the config vec.
*/
int
virtio_intr_init(struct virtio_base *base, int barnum, int use_msix)
{
int nvec;
if (use_msix) {
base->flags |= VIRTIO_USE_MSIX;
VIRTIO_BASE_LOCK(base);
virtio_reset_dev(base); /* set all vectors to NO_VECTOR */
VIRTIO_BASE_UNLOCK(base);
nvec = base->vops->nvq + 1;
if (pci_emul_add_msixcap(base->dev, nvec, barnum))
return -1;
} else
base->flags &= ~VIRTIO_USE_MSIX;
/* Only 1 MSI vector for acrn-dm */
pci_emul_add_msicap(base->dev, 1);
/* Legacy interrupts are mandatory for virtio devices */
pci_lintr_request(base->dev);
return 0;
}
/*
* Initialize MSI-X vector capabilities if we're to use MSI-X,
* or MSI capabilities if not.
*
* Wrapper function for virtio_intr_init() since by default we
* will use bar 1 for MSI-X.
*/
int
virtio_interrupt_init(struct virtio_base *base, int use_msix)
{
return virtio_intr_init(base, 1, use_msix);
}
/*
* Initialize the currently-selected virtio queue (base->curq).
* The guest just gave us a page frame number, from which we can
* calculate the addresses of the queue.
*/
void
virtio_vq_init(struct virtio_base *base, uint32_t pfn)
{
struct virtio_vq_info *vq;
uint64_t phys;
size_t size;
char *vb;
vq = &base->queues[base->curq];
vq->pfn = pfn;
phys = (uint64_t)pfn << VRING_PAGE_BITS;
size = vring_size(vq->qsize);
vb = paddr_guest2host(base->dev->vmctx, phys, size);
/* First page(s) are descriptors... */
vq->desc = (struct virtio_desc *)vb;
vb += vq->qsize * sizeof(struct virtio_desc);
/* ... immediately followed by "avail" ring (entirely uint16_t's) */
vq->avail = (struct vring_avail *)vb;
vb += (2 + vq->qsize + 1) * sizeof(uint16_t);
/* Then it's rounded up to the next page... */
vb = (char *)roundup2((uintptr_t)vb, VRING_ALIGN);
/* ... and the last page(s) are the used ring. */
vq->used = (struct vring_used *)vb;
/* Mark queue as allocated, and start at 0 when we use it. */
vq->flags = VQ_ALLOC;
vq->last_avail = 0;
vq->save_used = 0;
}
/*
* Helper inline for vq_getchain(): record the i'th "real"
* descriptor.
*/
static inline void
_vq_record(int i, volatile struct virtio_desc *vd, struct vmctx *ctx,
struct iovec *iov, int n_iov, uint16_t *flags) {
if (i >= n_iov)
return;
iov[i].iov_base = paddr_guest2host(ctx, vd->addr, vd->len);
iov[i].iov_len = vd->len;
if (flags != NULL)
flags[i] = vd->flags;
}
#define VQ_MAX_DESCRIPTORS 512 /* see below */
/*
* Examine the chain of descriptors starting at the "next one" to
* make sure that they describe a sensible request. If so, return
* the number of "real" descriptors that would be needed/used in
* acting on this request. This may be smaller than the number of
* available descriptors, e.g., if there are two available but
* they are two separate requests, this just returns 1. Or, it
* may be larger: if there are indirect descriptors involved,
* there may only be one descriptor available but it may be an
* indirect pointing to eight more. We return 8 in this case,
* i.e., we do not count the indirect descriptors, only the "real"
* ones.
*
* Basically, this vets the flags and vd_next field of each
* descriptor and tells you how many are involved. Since some may
* be indirect, this also needs the vmctx (in the pci_vdev
* at base->dev) so that it can find indirect descriptors.
*
* As we process each descriptor, we copy and adjust it (guest to
* host address wise, also using the vmtctx) into the given iov[]
* array (of the given size). If the array overflows, we stop
* placing values into the array but keep processing descriptors,
* up to VQ_MAX_DESCRIPTORS, before giving up and returning -1.
* So you, the caller, must not assume that iov[] is as big as the
* return value (you can process the same thing twice to allocate
* a larger iov array if needed, or supply a zero length to find
* out how much space is needed).
*
* If you want to verify the WRITE flag on each descriptor, pass a
* non-NULL "flags" pointer to an array of "uint16_t" of the same size
* as n_iov and we'll copy each flags field after unwinding any
* indirects.
*
* If some descriptor(s) are invalid, this prints a diagnostic message
* and returns -1. If no descriptors are ready now it simply returns 0.
*
* You are assumed to have done a vq_ring_ready() if needed (note
* that vq_has_descs() does one).
*/
int
vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx,
struct iovec *iov, int n_iov, uint16_t *flags)
{
int i;
u_int ndesc, n_indir;
u_int idx, next;
volatile struct virtio_desc *vdir, *vindir, *vp;
struct vmctx *ctx;
struct virtio_base *base;
const char *name;
base = vq->base;
name = base->vops->name;
/*
* Note: it's the responsibility of the guest not to
* update vq->avail->idx until all of the descriptors
* the guest has written are valid (including all their
* next fields and vd_flags).
*
* Compute (last_avail - idx) in integers mod 2**16. This is
* the number of descriptors the device has made available
* since the last time we updated vq->last_avail.
*
* We just need to do the subtraction as an unsigned int,
* then trim off excess bits.
*/
idx = vq->last_avail;
ndesc = (uint16_t)((u_int)vq->avail->idx - idx);
if (ndesc == 0)
return 0;
if (ndesc > vq->qsize) {
/* XXX need better way to diagnose issues */
fprintf(stderr,
"%s: ndesc (%u) out of range, driver confused?\r\n",
name, (u_int)ndesc);
return -1;
}
/*
* Now count/parse "involved" descriptors starting from
* the head of the chain.
*
* To prevent loops, we could be more complicated and
* check whether we're re-visiting a previously visited
* index, but we just abort if the count gets excessive.
*/
ctx = base->dev->vmctx;
*pidx = next = vq->avail->ring[idx & (vq->qsize - 1)];
vq->last_avail++;
for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->next) {
if (next >= vq->qsize) {
fprintf(stderr,
"%s: descriptor index %u out of range, "
"driver confused?\r\n",
name, next);
return -1;
}
vdir = &vq->desc[next];
if ((vdir->flags & VRING_DESC_F_INDIRECT) == 0) {
_vq_record(i, vdir, ctx, iov, n_iov, flags);
i++;
} else if ((base->vops->hv_caps &
VIRTIO_RING_F_INDIRECT_DESC) == 0) {
fprintf(stderr,
"%s: descriptor has forbidden INDIRECT flag, "
"driver confused?\r\n",
name);
return -1;
} else {
n_indir = vdir->len / 16;
if ((vdir->len & 0xf) || n_indir == 0) {
fprintf(stderr,
"%s: invalid indir len 0x%x, "
"driver confused?\r\n",
name, (u_int)vdir->len);
return -1;
}
vindir = paddr_guest2host(ctx,
vdir->addr, vdir->len);
/*
* Indirects start at the 0th, then follow
* their own embedded "next"s until those run
* out. Each one's indirect flag must be off
* (we don't really have to check, could just
* ignore errors...).
*/
next = 0;
for (;;) {
vp = &vindir[next];
if (vp->flags & VRING_DESC_F_INDIRECT) {
fprintf(stderr,
"%s: indirect desc has INDIR flag,"
" driver confused?\r\n",
name);
return -1;
}
_vq_record(i, vp, ctx, iov, n_iov, flags);
if (++i > VQ_MAX_DESCRIPTORS)
goto loopy;
if ((vp->flags & VRING_DESC_F_NEXT) == 0)
break;
next = vp->next;
if (next >= n_indir) {
fprintf(stderr,
"%s: invalid next %u > %u, "
"driver confused?\r\n",
name, (u_int)next, n_indir);
return -1;
}
}
}
if ((vdir->flags & VRING_DESC_F_NEXT) == 0)
return i;
}
loopy:
fprintf(stderr,
"%s: descriptor loop? count > %d - driver confused?\r\n",
name, i);
return -1;
}
/*
* Return the currently-first request chain back to the available queue.
*
* (This chain is the one you handled when you called vq_getchain()
* and used its positive return value.)
*/
void
vq_retchain(struct virtio_vq_info *vq)
{
vq->last_avail--;
}
/*
* Return specified request chain to the guest, setting its I/O length
* to the provided value.
*
* (This chain is the one you handled when you called vq_getchain()
* and used its positive return value.)
*/
void
vq_relchain(struct virtio_vq_info *vq, uint16_t idx, uint32_t iolen)
{
uint16_t uidx, mask;
volatile struct vring_used *vuh;
volatile struct virtio_used *vue;
/*
* Notes:
* - mask is N-1 where N is a power of 2 so computes x % N
* - vuh points to the "used" data shared with guest
* - vue points to the "used" ring entry we want to update
* - head is the same value we compute in vq_iovecs().
*
* (I apologize for the two fields named idx; the
* virtio spec calls the one that vue points to, "id"...)
*/
mask = vq->qsize - 1;
vuh = vq->used;
uidx = vuh->idx;
vue = &vuh->ring[uidx++ & mask];
vue->idx = idx;
vue->tlen = iolen;
vuh->idx = uidx;
}
/*
* Driver has finished processing "available" chains and calling
* vq_relchain on each one. If driver used all the available
* chains, used_all should be set.
*
* If the "used" index moved we may need to inform the guest, i.e.,
* deliver an interrupt. Even if the used index did NOT move we
* may need to deliver an interrupt, if the avail ring is empty and
* we are supposed to interrupt on empty.
*
* Note that used_all_avail is provided by the caller because it's
* a snapshot of the ring state when he decided to finish interrupt
* processing -- it's possible that descriptors became available after
* that point. (It's also typically a constant 1/True as well.)
*/
void
vq_endchains(struct virtio_vq_info *vq, int used_all_avail)
{
struct virtio_base *base;
uint16_t event_idx, new_idx, old_idx;
int intr;
/*
* Interrupt generation: if we're using EVENT_IDX,
* interrupt if we've crossed the event threshold.
* Otherwise interrupt is generated if we added "used" entries,
* but suppressed by VRING_AVAIL_F_NO_INTERRUPT.
*
* In any case, though, if NOTIFY_ON_EMPTY is set and the
* entire avail was processed, we need to interrupt always.
*/
base = vq->base;
old_idx = vq->save_used;
vq->save_used = new_idx = vq->used->idx;
if (used_all_avail &&
(base->negotiated_caps & VIRTIO_F_NOTIFY_ON_EMPTY))
intr = 1;
else if (base->negotiated_caps & VIRTIO_RING_F_EVENT_IDX) {
event_idx = VQ_USED_EVENT_IDX(vq);
/*
* This calculation is per docs and the kernel
* (see src/sys/dev/virtio/virtio_ring.h).
*/
intr = (uint16_t)(new_idx - event_idx - 1) <
(uint16_t)(new_idx - old_idx);
} else {
intr = new_idx != old_idx &&
!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT);
}
if (intr)
vq_interrupt(base, vq);
}
/* Note: these are in sorted order to make for a fast search */
static struct config_reg {
uint16_t offset; /* register offset */
uint8_t size; /* size (bytes) */
uint8_t ro; /* true => reg is read only */
const char *name; /* name of reg */
} config_regs[] = {
{ VIRTIO_CR_HOSTCAP, 4, 1, "HOSTCAP" },
{ VIRTIO_CR_GUESTCAP, 4, 0, "GUESTCAP" },
{ VIRTIO_CR_PFN, 4, 0, "PFN" },
{ VIRTIO_CR_QNUM, 2, 1, "QNUM" },
{ VIRTIO_CR_QSEL, 2, 0, "QSEL" },
{ VIRTIO_CR_QNOTIFY, 2, 0, "QNOTIFY" },
{ VIRTIO_CR_STATUS, 1, 0, "STATUS" },
{ VIRTIO_CR_ISR, 1, 0, "ISR" },
{ VIRTIO_CR_CFGVEC, 2, 0, "CFGVEC" },
{ VIRTIO_CR_QVEC, 2, 0, "QVEC" },
};
static inline struct config_reg *
virtio_find_cr(int offset) {
u_int hi, lo, mid;
struct config_reg *cr;
lo = 0;
hi = sizeof(config_regs) / sizeof(*config_regs) - 1;
while (hi >= lo) {
mid = (hi + lo) >> 1;
cr = &config_regs[mid];
if (cr->offset == offset)
return cr;
if (cr->offset < offset)
lo = mid + 1;
else
hi = mid - 1;
}
return NULL;
}
/*
* Handle pci config space reads.
* If it's to the MSI-X info, do that.
* If it's part of the virtio standard stuff, do that.
* Otherwise dispatch to the actual driver.
*/
uint64_t
virtio_pci_read(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int baridx, uint64_t offset, int size)
{
struct virtio_base *base = dev->arg;
struct virtio_ops *vops;
struct config_reg *cr;
uint64_t virtio_config_size, max;
const char *name;
uint32_t newoff;
uint32_t value;
int error;
if (base->flags & VIRTIO_USE_MSIX) {
if (baridx == pci_msix_table_bar(dev) ||
baridx == pci_msix_pba_bar(dev)) {
return pci_emul_msix_tread(dev, offset, size);
}
}
/* XXX probably should do something better than just assert() */
assert(baridx == 0);
if (base->mtx)
pthread_mutex_lock(base->mtx);
vops = base->vops;
name = vops->name;
value = size == 1 ? 0xff : size == 2 ? 0xffff : 0xffffffff;
if (size != 1 && size != 2 && size != 4)
goto bad;
if (pci_msix_enabled(dev))
virtio_config_size = VIRTIO_CR_CFG1;
else
virtio_config_size = VIRTIO_CR_CFG0;
if (offset >= virtio_config_size) {
/*
* Subtract off the standard size (including MSI-X
* registers if enabled) and dispatch to underlying driver.
* If that fails, fall into general code.
*/
newoff = offset - virtio_config_size;
max = vops->cfgsize ? vops->cfgsize : 0x100000000;
if (newoff + size > max)
goto bad;
error = (*vops->cfgread)(DEV_STRUCT(base), newoff,
size, &value);
if (!error)
goto done;
}
bad:
cr = virtio_find_cr(offset);
if (cr == NULL || cr->size != size) {
if (cr != NULL) {
/* offset must be OK, so size must be bad */
fprintf(stderr,
"%s: read from %s: bad size %d\r\n",
name, cr->name, size);
} else {
fprintf(stderr,
"%s: read from bad offset/size %jd/%d\r\n",
name, (uintmax_t)offset, size);
}
goto done;
}
switch (offset) {
case VIRTIO_CR_HOSTCAP:
value = vops->hv_caps;
break;
case VIRTIO_CR_GUESTCAP:
value = base->negotiated_caps;
break;
case VIRTIO_CR_PFN:
if (base->curq < vops->nvq)
value = base->queues[base->curq].pfn;
break;
case VIRTIO_CR_QNUM:
value = base->curq < vops->nvq ?
base->queues[base->curq].qsize : 0;
break;
case VIRTIO_CR_QSEL:
value = base->curq;
break;
case VIRTIO_CR_QNOTIFY:
value = 0; /* XXX */
break;
case VIRTIO_CR_STATUS:
value = base->status;
break;
case VIRTIO_CR_ISR:
value = base->isr;
base->isr = 0; /* a read clears this flag */
if (value)
pci_lintr_deassert(dev);
break;
case VIRTIO_CR_CFGVEC:
value = base->msix_cfg_idx;
break;
case VIRTIO_CR_QVEC:
value = base->curq < vops->nvq ?
base->queues[base->curq].msix_idx :
VIRTIO_MSI_NO_VECTOR;
break;
}
done:
if (base->mtx)
pthread_mutex_unlock(base->mtx);
return value;
}
/*
* Handle pci config space writes.
* If it's to the MSI-X info, do that.
* If it's part of the virtio standard stuff, do that.
* Otherwise dispatch to the actual driver.
*/
void
virtio_pci_write(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int baridx, uint64_t offset, int size, uint64_t value)
{
struct virtio_base *base = dev->arg;
struct virtio_vq_info *vq;
struct virtio_ops *vops;
struct config_reg *cr;
uint64_t virtio_config_size, max;
const char *name;
uint32_t newoff;
int error;
if (base->flags & VIRTIO_USE_MSIX) {
if (baridx == pci_msix_table_bar(dev) ||
baridx == pci_msix_pba_bar(dev)) {
pci_emul_msix_twrite(dev, offset, size, value);
return;
}
}
/* XXX probably should do something better than just assert() */
assert(baridx == 0);
if (base->mtx)
pthread_mutex_lock(base->mtx);
vops = base->vops;
name = vops->name;
if (size != 1 && size != 2 && size != 4)
goto bad;
if (pci_msix_enabled(dev))
virtio_config_size = VIRTIO_CR_CFG1;
else
virtio_config_size = VIRTIO_CR_CFG0;
if (offset >= virtio_config_size) {
/*
* Subtract off the standard size (including MSI-X
* registers if enabled) and dispatch to underlying driver.
*/
newoff = offset - virtio_config_size;
max = vops->cfgsize ? vops->cfgsize : 0x100000000;
if (newoff + size > max)
goto bad;
error = (*vops->cfgwrite)(DEV_STRUCT(base), newoff,
size, value);
if (!error)
goto done;
}
bad:
cr = virtio_find_cr(offset);
if (cr == NULL || cr->size != size || cr->ro) {
if (cr != NULL) {
/* offset must be OK, wrong size and/or reg is R/O */
if (cr->size != size)
fprintf(stderr,
"%s: write to %s: bad size %d\r\n",
name, cr->name, size);
if (cr->ro)
fprintf(stderr,
"%s: write to read-only reg %s\r\n",
name, cr->name);
} else {
fprintf(stderr,
"%s: write to bad offset/size %jd/%d\r\n",
name, (uintmax_t)offset, size);
}
goto done;
}
switch (offset) {
case VIRTIO_CR_GUESTCAP:
base->negotiated_caps = value & vops->hv_caps;
if (vops->apply_features)
(*vops->apply_features)(DEV_STRUCT(base),
base->negotiated_caps);
break;
case VIRTIO_CR_PFN:
if (base->curq >= vops->nvq)
goto bad_qindex;
virtio_vq_init(base, value);
break;
case VIRTIO_CR_QSEL:
/*
* Note that the guest is allowed to select an
* invalid queue; we just need to return a QNUM
* of 0 while the bad queue is selected.
*/
base->curq = value;
break;
case VIRTIO_CR_QNOTIFY:
if (value >= vops->nvq) {
fprintf(stderr, "%s: queue %d notify out of range\r\n",
name, (int)value);
goto done;
}
vq = &base->queues[value];
if (vq->notify)
(*vq->notify)(DEV_STRUCT(base), vq);
else if (vops->qnotify)
(*vops->qnotify)(DEV_STRUCT(base), vq);
else
fprintf(stderr,
"%s: qnotify queue %d: missing vq/vops notify\r\n",
name, (int)value);
break;
case VIRTIO_CR_STATUS:
base->status = value;
if (vops->set_status)
(*vops->set_status)(DEV_STRUCT(base), value);
if (value == 0)
(*vops->reset)(DEV_STRUCT(base));
break;
case VIRTIO_CR_CFGVEC:
base->msix_cfg_idx = value;
break;
case VIRTIO_CR_QVEC:
if (base->curq >= vops->nvq)
goto bad_qindex;
vq = &base->queues[base->curq];
vq->msix_idx = value;
break;
}
goto done;
bad_qindex:
fprintf(stderr,
"%s: write config reg %s: curq %d >= max %d\r\n",
name, cr->name, base->curq, vops->nvq);
done:
if (base->mtx)
pthread_mutex_unlock(base->mtx);
}

View File

@@ -0,0 +1,441 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/stat.h>
#include <sys/uio.h>
#include <sys/ioctl.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <strings.h>
#include <unistd.h>
#include <assert.h>
#include <pthread.h>
#include <openssl/md5.h>
#include "dm.h"
#include "pci_core.h"
#include "virtio.h"
#include "block_if.h"
#define VIRTIO_BLK_RINGSZ 64
#define VIRTIO_BLK_S_OK 0
#define VIRTIO_BLK_S_IOERR 1
#define VIRTIO_BLK_S_UNSUPP 2
#define VIRTIO_BLK_BLK_ID_BYTES 20
/* Capability bits */
#define VIRTIO_BLK_F_SEG_MAX (1 << 2) /* Maximum request segments */
#define VIRTIO_BLK_F_BLK_SIZE (1 << 6) /* cfg block size valid */
#define VIRTIO_BLK_F_FLUSH (1 << 9) /* Cache flush support */
#define VIRTIO_BLK_F_TOPOLOGY (1 << 10) /* Optimal I/O alignment */
/*
* Host capabilities
*/
#define VIRTIO_BLK_S_HOSTCAPS \
(VIRTIO_BLK_F_SEG_MAX | \
VIRTIO_BLK_F_BLK_SIZE | \
VIRTIO_BLK_F_FLUSH | \
VIRTIO_BLK_F_TOPOLOGY | \
VIRTIO_RING_F_INDIRECT_DESC) /* indirect descriptors */
/*
* Config space "registers"
*/
struct virtio_blk_config {
uint64_t capacity;
uint32_t size_max;
uint32_t seg_max;
struct {
uint16_t cylinders;
uint8_t heads;
uint8_t sectors;
} geometry;
uint32_t blk_size;
struct {
uint8_t physical_block_exp;
uint8_t alignment_offset;
uint16_t min_io_size;
uint32_t opt_io_size;
} topology;
uint8_t writeback;
} __attribute__((packed));
/*
* Fixed-size block header
*/
struct virtio_blk_hdr {
#define VBH_OP_READ 0
#define VBH_OP_WRITE 1
#define VBH_OP_FLUSH 4
#define VBH_OP_FLUSH_OUT 5
#define VBH_OP_IDENT 8
#define VBH_FLAG_BARRIER 0x80000000 /* OR'ed into type */
uint32_t type;
uint32_t ioprio;
uint64_t sector;
} __attribute__((packed));
/*
* Debug printf
*/
static int virtio_blk_debug;
#define DPRINTF(params) do { if (virtio_blk_debug) printf params; } while (0)
#define WPRINTF(params) (printf params)
struct virtio_blk_ioreq {
struct blockif_req req;
struct virtio_blk *blk;
uint8_t *status;
uint16_t idx;
};
/*
* Per-device struct
*/
struct virtio_blk {
struct virtio_base base;
pthread_mutex_t mtx;
struct virtio_vq_info vq;
struct virtio_blk_config cfg;
struct blockif_ctxt *bc;
char ident[VIRTIO_BLK_BLK_ID_BYTES + 1];
struct virtio_blk_ioreq ios[VIRTIO_BLK_RINGSZ];
};
static void virtio_blk_reset(void *);
static void virtio_blk_notify(void *, struct virtio_vq_info *);
static int virtio_blk_cfgread(void *, int, int, uint32_t *);
static int virtio_blk_cfgwrite(void *, int, int, uint32_t);
static struct virtio_ops virtio_blk_ops = {
"virtio_blk", /* our name */
1, /* we support 1 virtqueue */
sizeof(struct virtio_blk_config), /* config reg size */
virtio_blk_reset, /* reset */
virtio_blk_notify, /* device-wide qnotify */
virtio_blk_cfgread, /* read PCI config */
virtio_blk_cfgwrite, /* write PCI config */
NULL, /* apply negotiated features */
NULL, /* called on guest set status */
VIRTIO_BLK_S_HOSTCAPS, /* our capabilities */
};
static void
virtio_blk_reset(void *vdev)
{
struct virtio_blk *blk = vdev;
DPRINTF(("virtio_blk: device reset requested !\n"));
virtio_reset_dev(&blk->base);
}
static void
virtio_blk_done(struct blockif_req *br, int err)
{
struct virtio_blk_ioreq *io = br->param;
struct virtio_blk *blk = io->blk;
/* convert errno into a virtio block error return */
if (err == EOPNOTSUPP || err == ENOSYS)
*io->status = VIRTIO_BLK_S_UNSUPP;
else if (err != 0)
*io->status = VIRTIO_BLK_S_IOERR;
else
*io->status = VIRTIO_BLK_S_OK;
/*
* Return the descriptor back to the host.
* We wrote 1 byte (our status) to host.
*/
pthread_mutex_lock(&blk->mtx);
vq_relchain(&blk->vq, io->idx, 1);
vq_endchains(&blk->vq, 0);
pthread_mutex_unlock(&blk->mtx);
}
static void
virtio_blk_proc(struct virtio_blk *blk, struct virtio_vq_info *vq)
{
struct virtio_blk_hdr *vbh;
struct virtio_blk_ioreq *io;
int i, n;
int err;
ssize_t iolen;
int writeop, type;
struct iovec iov[BLOCKIF_IOV_MAX + 2];
uint16_t idx, flags[BLOCKIF_IOV_MAX + 2];
n = vq_getchain(vq, &idx, iov, BLOCKIF_IOV_MAX + 2, flags);
/*
* The first descriptor will be the read-only fixed header,
* and the last is for status (hence +2 above and below).
* The remaining iov's are the actual data I/O vectors.
*
* XXX - note - this fails on crash dump, which does a
* VIRTIO_BLK_T_FLUSH with a zero transfer length
*/
assert(n >= 2 && n <= BLOCKIF_IOV_MAX + 2);
io = &blk->ios[idx];
assert((flags[0] & VRING_DESC_F_WRITE) == 0);
assert(iov[0].iov_len == sizeof(struct virtio_blk_hdr));
vbh = iov[0].iov_base;
memcpy(&io->req.iov, &iov[1], sizeof(struct iovec) * (n - 2));
io->req.iovcnt = n - 2;
io->req.offset = vbh->sector * DEV_BSIZE;
io->status = iov[--n].iov_base;
assert(iov[n].iov_len == 1);
assert(flags[n] & VRING_DESC_F_WRITE);
/*
* XXX
* The guest should not be setting the BARRIER flag because
* we don't advertise the capability.
*/
type = vbh->type & ~VBH_FLAG_BARRIER;
writeop = (type == VBH_OP_WRITE);
iolen = 0;
for (i = 1; i < n; i++) {
/*
* - write op implies read-only descriptor,
* - read/ident op implies write-only descriptor,
* therefore test the inverse of the descriptor bit
* to the op.
*/
assert(((flags[i] & VRING_DESC_F_WRITE) == 0) == writeop);
iolen += iov[i].iov_len;
}
io->req.resid = iolen;
DPRINTF(("virtio-block: %s op, %zd bytes, %d segs, offset %ld\n\r",
writeop ? "write" : "read/ident", iolen, i - 1,
io->req.offset));
switch (type) {
case VBH_OP_READ:
err = blockif_read(blk->bc, &io->req);
break;
case VBH_OP_WRITE:
err = blockif_write(blk->bc, &io->req);
break;
case VBH_OP_FLUSH:
case VBH_OP_FLUSH_OUT:
err = blockif_flush(blk->bc, &io->req);
break;
case VBH_OP_IDENT:
/* Assume a single buffer */
/* S/n equal to buffer is not zero-terminated. */
memset(iov[1].iov_base, 0, iov[1].iov_len);
strncpy(iov[1].iov_base, blk->ident,
MIN(iov[1].iov_len, sizeof(blk->ident)));
virtio_blk_done(&io->req, 0);
return;
default:
virtio_blk_done(&io->req, EOPNOTSUPP);
return;
}
assert(err == 0);
}
static void
virtio_blk_notify(void *vdev, struct virtio_vq_info *vq)
{
struct virtio_blk *blk = vdev;
while (vq_has_descs(vq))
virtio_blk_proc(blk, vq);
}
static int
virtio_blk_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
char bident[sizeof("XX:X:X")];
struct blockif_ctxt *bctxt;
MD5_CTX mdctx;
u_char digest[16];
struct virtio_blk *blk;
off_t size;
int i, sectsz, sts, sto;
pthread_mutexattr_t attr;
int rc;
if (opts == NULL) {
printf("virtio-block: backing device required\n");
return -1;
}
/*
* The supplied backing file has to exist
*/
snprintf(bident, sizeof(bident), "%d:%d", dev->slot, dev->func);
bctxt = blockif_open(opts, bident);
if (bctxt == NULL) {
perror("Could not open backing file");
return -1;
}
size = blockif_size(bctxt);
sectsz = blockif_sectsz(bctxt);
blockif_psectsz(bctxt, &sts, &sto);
blk = calloc(1, sizeof(struct virtio_blk));
if (!blk) {
WPRINTF(("virtio_blk: calloc returns NULL\n"));
return -1;
}
blk->bc = bctxt;
for (i = 0; i < VIRTIO_BLK_RINGSZ; i++) {
struct virtio_blk_ioreq *io = &blk->ios[i];
io->req.callback = virtio_blk_done;
io->req.param = io;
io->blk = blk;
io->idx = i;
}
/* init mutex attribute properly to avoid deadlock */
rc = pthread_mutexattr_init(&attr);
if (rc)
DPRINTF(("mutexattr init failed with erro %d!\n", rc));
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
if (rc)
DPRINTF(("virtio_blk: mutexattr_settype failed with "
"error %d!\n", rc));
rc = pthread_mutex_init(&blk->mtx, &attr);
if (rc)
DPRINTF(("virtio_blk: pthread_mutex_init failed with "
"error %d!\n", rc));
/* init virtio struct and virtqueues */
virtio_linkup(&blk->base, &virtio_blk_ops, blk, dev, &blk->vq);
blk->base.mtx = &blk->mtx;
blk->vq.qsize = VIRTIO_BLK_RINGSZ;
/* blk->vq.vq_notify = we have no per-queue notify */
/*
* Create an identifier for the backing file. Use parts of the
* md5 sum of the filename
*/
MD5_Init(&mdctx);
MD5_Update(&mdctx, opts, strlen(opts));
MD5_Final(digest, &mdctx);
sprintf(blk->ident, "ACRN--%02X%02X-%02X%02X-%02X%02X",
digest[0], digest[1], digest[2], digest[3], digest[4], digest[5]);
/* setup virtio block config space */
blk->cfg.capacity = size / DEV_BSIZE; /* 512-byte units */
blk->cfg.size_max = 0; /* not negotiated */
blk->cfg.seg_max = BLOCKIF_IOV_MAX;
blk->cfg.geometry.cylinders = 0; /* no geometry */
blk->cfg.geometry.heads = 0;
blk->cfg.geometry.sectors = 0;
blk->cfg.blk_size = sectsz;
blk->cfg.topology.physical_block_exp =
(sts > sectsz) ? (ffsll(sts / sectsz) - 1) : 0;
blk->cfg.topology.alignment_offset =
(sto != 0) ? ((sts - sto) / sectsz) : 0;
blk->cfg.topology.min_io_size = 0;
blk->cfg.topology.opt_io_size = 0;
blk->cfg.writeback = 0;
/*
* Should we move some of this into virtio.c? Could
* have the device, class, and subdev_0 as fields in
* the virtio constants structure.
*/
pci_set_cfgdata16(dev, PCIR_DEVICE, VIRTIO_DEV_BLOCK);
pci_set_cfgdata16(dev, PCIR_VENDOR, VIRTIO_VENDOR);
pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_STORAGE);
pci_set_cfgdata16(dev, PCIR_SUBDEV_0, VIRTIO_TYPE_BLOCK);
pci_set_cfgdata16(dev, PCIR_SUBVEND_0, VIRTIO_VENDOR);
if (virtio_interrupt_init(&blk->base, fbsdrun_virtio_msix())) {
blockif_close(blk->bc);
free(blk);
return -1;
}
virtio_set_io_bar(&blk->base, 0);
return 0;
}
static void
virtio_blk_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct blockif_ctxt *bctxt;
struct virtio_blk *blk;
if (dev->arg) {
DPRINTF(("virtio_blk: deinit\n"));
blk = (struct virtio_blk *) dev->arg;
bctxt = blk->bc;
blockif_close(bctxt);
free(blk);
}
}
static int
virtio_blk_cfgwrite(void *vdev, int offset, int size, uint32_t value)
{
DPRINTF(("virtio_blk: write to readonly reg %d\n\r", offset));
return -1;
}
static int
virtio_blk_cfgread(void *vdev, int offset, int size, uint32_t *retval)
{
struct virtio_blk *blk = vdev;
void *ptr;
/* our caller has already verified offset and size */
ptr = (uint8_t *)&blk->cfg + offset;
memcpy(retval, ptr, size);
return 0;
}
struct pci_vdev_ops pci_ops_virtio_blk = {
.class_name = "virtio-blk",
.vdev_init = virtio_blk_init,
.vdev_deinit = virtio_blk_deinit,
.vdev_barwrite = virtio_pci_write,
.vdev_barread = virtio_pci_read
};
DEFINE_PCI_DEVTYPE(pci_ops_virtio_blk);

View File

@@ -0,0 +1,937 @@
/*-
* Copyright (c) 2016 iXsystems Inc.
* All rights reserved.
*
* This software was developed by Jakub Klama <jceel@FreeBSD.org>
* under sponsorship from iXsystems Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer
* in this position and unchanged.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/uio.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <pthread.h>
#include <libgen.h>
#include <sysexits.h>
#include <termios.h>
#include "dm.h"
#include "pci_core.h"
#include "virtio.h"
#include "mevent.h"
#define VIRTIO_CONSOLE_RINGSZ 64
#define VIRTIO_CONSOLE_MAXPORTS 16
#define VIRTIO_CONSOLE_MAXQ (VIRTIO_CONSOLE_MAXPORTS * 2 + 2)
#define VIRTIO_CONSOLE_DEVICE_READY 0
#define VIRTIO_CONSOLE_DEVICE_ADD 1
#define VIRTIO_CONSOLE_DEVICE_REMOVE 2
#define VIRTIO_CONSOLE_PORT_READY 3
#define VIRTIO_CONSOLE_CONSOLE_PORT 4
#define VIRTIO_CONSOLE_CONSOLE_RESIZE 5
#define VIRTIO_CONSOLE_PORT_OPEN 6
#define VIRTIO_CONSOLE_PORT_NAME 7
#define VIRTIO_CONSOLE_F_SIZE 0
#define VIRTIO_CONSOLE_F_MULTIPORT 1
#define VIRTIO_CONSOLE_F_EMERG_WRITE 2
#define VIRTIO_CONSOLE_S_HOSTCAPS \
(VIRTIO_CONSOLE_F_SIZE | \
VIRTIO_CONSOLE_F_MULTIPORT | \
VIRTIO_CONSOLE_F_EMERG_WRITE)
static int virtio_console_debug;
#define DPRINTF(params) do { \
if (virtio_console_debug) \
printf params; \
} while (0)
#define WPRINTF(params) (printf params)
struct virtio_console;
struct virtio_console_port;
struct virtio_console_config;
typedef void (virtio_console_cb_t)(struct virtio_console_port *, void *,
struct iovec *, int);
enum virtio_console_be_type {
VIRTIO_CONSOLE_BE_STDIO = 0,
VIRTIO_CONSOLE_BE_TTY,
VIRTIO_CONSOLE_BE_PTY,
VIRTIO_CONSOLE_BE_FILE,
VIRTIO_CONSOLE_BE_MAX,
VIRTIO_CONSOLE_BE_INVALID = VIRTIO_CONSOLE_BE_MAX
};
struct virtio_console_port {
struct virtio_console *console;
int id;
const char *name;
bool enabled;
bool is_console;
bool rx_ready;
bool open;
int rxq;
int txq;
void *arg;
virtio_console_cb_t *cb;
};
struct virtio_console_backend {
struct virtio_console_port *port;
struct mevent *evp;
int fd;
bool open;
enum virtio_console_be_type be_type;
int pts_fd; /* only valid for PTY */
};
struct virtio_console {
struct virtio_base base;
struct virtio_vq_info queues[VIRTIO_CONSOLE_MAXQ];
pthread_mutex_t mtx;
uint64_t cfg;
uint64_t features;
int nports;
bool ready;
struct virtio_console_port control_port;
struct virtio_console_port ports[VIRTIO_CONSOLE_MAXPORTS];
struct virtio_console_config *config;
};
struct virtio_console_config {
uint16_t cols;
uint16_t rows;
uint32_t max_nr_ports;
uint32_t emerg_wr;
} __attribute__((packed));
struct virtio_console_control {
uint32_t id;
uint16_t event;
uint16_t value;
} __attribute__((packed));
struct virtio_console_console_resize {
uint16_t cols;
uint16_t rows;
} __attribute__((packed));
static void virtio_console_reset(void *);
static void virtio_console_notify_rx(void *, struct virtio_vq_info *);
static void virtio_console_notify_tx(void *, struct virtio_vq_info *);
static int virtio_console_cfgread(void *, int, int, uint32_t *);
static int virtio_console_cfgwrite(void *, int, int, uint32_t);
static void virtio_console_neg_features(void *, uint64_t);
static void virtio_console_control_send(struct virtio_console *,
struct virtio_console_control *, const void *, size_t);
static void virtio_console_announce_port(struct virtio_console_port *);
static void virtio_console_open_port(struct virtio_console_port *, bool);
static struct virtio_ops virtio_console_ops = {
"vtcon", /* our name */
VIRTIO_CONSOLE_MAXQ, /* we support VTCON_MAXQ virtqueues */
sizeof(struct virtio_console_config), /* config reg size */
virtio_console_reset, /* reset */
NULL, /* device-wide qnotify */
virtio_console_cfgread, /* read virtio config */
virtio_console_cfgwrite, /* write virtio config */
virtio_console_neg_features, /* apply negotiated features */
NULL, /* called on guest set status */
VIRTIO_CONSOLE_S_HOSTCAPS, /* our capabilities */
};
static const char *virtio_console_be_table[VIRTIO_CONSOLE_BE_MAX] = {
[VIRTIO_CONSOLE_BE_STDIO] = "stdio",
[VIRTIO_CONSOLE_BE_TTY] = "tty",
[VIRTIO_CONSOLE_BE_PTY] = "pty",
[VIRTIO_CONSOLE_BE_FILE] = "file"
};
static struct termios virtio_console_saved_tio;
static int virtio_console_saved_flags;
static void
virtio_console_reset(void *vdev)
{
struct virtio_console *console;
console = vdev;
DPRINTF(("vtcon: device reset requested!\n"));
virtio_reset_dev(&console->base);
}
static void
virtio_console_neg_features(void *vdev, uint64_t negotiated_features)
{
struct virtio_console *console = vdev;
console->features = negotiated_features;
}
static int
virtio_console_cfgread(void *vdev, int offset, int size, uint32_t *retval)
{
struct virtio_console *console = vdev;
void *ptr;
ptr = (uint8_t *)console->config + offset;
memcpy(retval, ptr, size);
return 0;
}
static int
virtio_console_cfgwrite(void *vdev, int offset, int size, uint32_t val)
{
return 0;
}
static inline struct virtio_console_port *
virtio_console_vq_to_port(struct virtio_console *console,
struct virtio_vq_info *vq)
{
uint16_t num = vq->num;
if (num == 0 || num == 1)
return &console->ports[0];
if (num == 2 || num == 3)
return &console->control_port;
return &console->ports[(num / 2) - 1];
}
static inline struct virtio_vq_info *
virtio_console_port_to_vq(struct virtio_console_port *port, bool tx_queue)
{
int qnum;
qnum = tx_queue ? port->txq : port->rxq;
return &port->console->queues[qnum];
}
static struct virtio_console_port *
virtio_console_add_port(struct virtio_console *console, const char *name,
virtio_console_cb_t *cb, void *arg, bool is_console)
{
struct virtio_console_port *port;
if (console->nports == VIRTIO_CONSOLE_MAXPORTS) {
errno = EBUSY;
return NULL;
}
port = &console->ports[console->nports++];
port->id = console->nports - 1;
port->console = console;
port->name = name;
port->cb = cb;
port->arg = arg;
port->is_console = is_console;
if (port->id == 0) {
/* port0 */
port->txq = 0;
port->rxq = 1;
} else {
port->txq = console->nports * 2;
port->rxq = port->txq + 1;
}
port->enabled = true;
return port;
}
static void
virtio_console_control_tx(struct virtio_console_port *port, void *arg,
struct iovec *iov, int niov)
{
struct virtio_console *console;
struct virtio_console_port *tmp;
struct virtio_console_control resp, *ctrl;
int i;
assert(niov == 1);
console = port->console;
ctrl = (struct virtio_console_control *)iov->iov_base;
switch (ctrl->event) {
case VIRTIO_CONSOLE_DEVICE_READY:
console->ready = true;
/* set port ready events for registered ports */
for (i = 0; i < VIRTIO_CONSOLE_MAXPORTS; i++) {
tmp = &console->ports[i];
if (tmp->enabled)
virtio_console_announce_port(tmp);
if (tmp->open)
virtio_console_open_port(tmp, true);
}
break;
case VIRTIO_CONSOLE_PORT_READY:
if (ctrl->id >= console->nports) {
WPRINTF(("VTCONSOLE_PORT_READY for unknown port %d\n",
ctrl->id));
return;
}
tmp = &console->ports[ctrl->id];
if (tmp->is_console) {
resp.event = VIRTIO_CONSOLE_CONSOLE_PORT;
resp.id = ctrl->id;
resp.value = 1;
virtio_console_control_send(console, &resp, NULL, 0);
}
break;
}
}
static void
virtio_console_announce_port(struct virtio_console_port *port)
{
struct virtio_console_control event;
event.id = port->id;
event.event = VIRTIO_CONSOLE_DEVICE_ADD;
event.value = 1;
virtio_console_control_send(port->console, &event, NULL, 0);
event.event = VIRTIO_CONSOLE_PORT_NAME;
virtio_console_control_send(port->console, &event, port->name,
strlen(port->name));
}
static void
virtio_console_open_port(struct virtio_console_port *port, bool open)
{
struct virtio_console_control event;
if (!port->console->ready) {
port->open = true;
return;
}
event.id = port->id;
event.event = VIRTIO_CONSOLE_PORT_OPEN;
event.value = (int)open;
virtio_console_control_send(port->console, &event, NULL, 0);
}
static void
virtio_console_control_send(struct virtio_console *console,
struct virtio_console_control *ctrl,
const void *payload, size_t len)
{
struct virtio_vq_info *vq;
struct iovec iov;
uint16_t idx;
int n;
vq = virtio_console_port_to_vq(&console->control_port, true);
if (!vq_has_descs(vq))
return;
n = vq_getchain(vq, &idx, &iov, 1, NULL);
assert(n == 1);
memcpy(iov.iov_base, ctrl, sizeof(struct virtio_console_control));
if (payload != NULL && len > 0)
memcpy(iov.iov_base + sizeof(struct virtio_console_control),
payload, len);
vq_relchain(vq, idx, sizeof(struct virtio_console_control) + len);
vq_endchains(vq, 1);
}
static void
virtio_console_notify_tx(void *vdev, struct virtio_vq_info *vq)
{
struct virtio_console *console;
struct virtio_console_port *port;
struct iovec iov[1];
uint16_t idx;
uint16_t flags[8];
console = vdev;
port = virtio_console_vq_to_port(console, vq);
while (vq_has_descs(vq)) {
vq_getchain(vq, &idx, iov, 1, flags);
if (port != NULL)
port->cb(port, port->arg, iov, 1);
/*
* Release this chain and handle more
*/
vq_relchain(vq, idx, 0);
}
vq_endchains(vq, 1); /* Generate interrupt if appropriate. */
}
static void
virtio_console_notify_rx(void *vdev, struct virtio_vq_info *vq)
{
struct virtio_console *console;
struct virtio_console_port *port;
console = vdev;
port = virtio_console_vq_to_port(console, vq);
if (!port->rx_ready) {
port->rx_ready = 1;
vq->used->flags |= VRING_USED_F_NO_NOTIFY;
}
}
static void
virtio_console_reset_backend(struct virtio_console_backend *be)
{
if (!be)
return;
if (be->fd != STDIN_FILENO)
mevent_delete_close(be->evp);
else
mevent_delete(be->evp);
if (be->be_type == VIRTIO_CONSOLE_BE_PTY && be->pts_fd > 0) {
close(be->pts_fd);
be->pts_fd = -1;
}
be->evp = NULL;
be->fd = -1;
be->open = false;
}
static void
virtio_console_backend_read(int fd __attribute__((unused)),
enum ev_type t __attribute__((unused)),
void *arg)
{
struct virtio_console_port *port;
struct virtio_console_backend *be = arg;
struct virtio_vq_info *vq;
struct iovec iov;
static char dummybuf[2048];
int len, n;
uint16_t idx;
port = be->port;
vq = virtio_console_port_to_vq(port, true);
if (!be->open || !port->rx_ready) {
len = read(be->fd, dummybuf, sizeof(dummybuf));
if (len == 0)
goto close;
return;
}
if (!vq_has_descs(vq)) {
len = read(be->fd, dummybuf, sizeof(dummybuf));
vq_endchains(vq, 1);
if (len == 0)
goto close;
return;
}
do {
n = vq_getchain(vq, &idx, &iov, 1, NULL);
len = readv(be->fd, &iov, n);
if (len <= 0) {
vq_retchain(vq);
vq_endchains(vq, 0);
/* no data available */
if (len == -1 && errno == EAGAIN)
return;
/* any other errors */
goto close;
}
vq_relchain(vq, idx, len);
} while (vq_has_descs(vq));
vq_endchains(vq, 1);
close:
virtio_console_reset_backend(be);
WPRINTF(("vtcon: be read failed and close! len = %d, errno = %d\n",
len, errno));
}
static void
virtio_console_backend_write(struct virtio_console_port *port, void *arg,
struct iovec *iov, int niov)
{
struct virtio_console_backend *be;
int ret;
be = arg;
if (be->fd == -1)
return;
ret = writev(be->fd, iov, niov);
if (ret <= 0) {
/* backend cannot receive more data. For example when pts is
* not connected to any client, its tty buffer will become full.
* In this case we just drop data from guest hvc console.
*/
if (ret == -1 && errno == EAGAIN)
return;
virtio_console_reset_backend(be);
WPRINTF(("vtcon: be write failed! errno = %d\n", errno));
}
}
static void
virtio_console_restore_stdio(void)
{
tcsetattr(STDIN_FILENO, TCSANOW, &virtio_console_saved_tio);
fcntl(STDIN_FILENO, F_SETFL, virtio_console_saved_flags);
stdio_in_use = false;
}
static bool
virtio_console_backend_can_read(enum virtio_console_be_type be_type)
{
return (be_type == VIRTIO_CONSOLE_BE_FILE) ? false : true;
}
static int
virtio_console_open_backend(const char *path,
enum virtio_console_be_type be_type)
{
int fd = -1;
switch (be_type) {
case VIRTIO_CONSOLE_BE_PTY:
fd = posix_openpt(O_RDWR | O_NOCTTY);
if (fd == -1)
WPRINTF(("vtcon: posix_openpt failed, errno = %d\n",
errno));
else if (grantpt(fd) == -1 || unlockpt(fd) == -1) {
WPRINTF(("vtcon: grant/unlock failed, errno = %d\n",
errno));
close(fd);
fd = -1;
}
break;
case VIRTIO_CONSOLE_BE_STDIO:
if (stdio_in_use) {
WPRINTF(("vtcon: stdio is used by other device\n"));
break;
}
fd = STDIN_FILENO;
stdio_in_use = true;
break;
case VIRTIO_CONSOLE_BE_TTY:
fd = open(path, O_RDWR | O_NONBLOCK);
if (fd < 0)
WPRINTF(("vtcon: open failed: %s\n", path));
else if (!isatty(fd)) {
WPRINTF(("vtcon: not a tty: %s\n", path));
close(fd);
fd = -1;
}
break;
case VIRTIO_CONSOLE_BE_FILE:
fd = open(path, O_WRONLY|O_CREAT|O_APPEND|O_NONBLOCK, 0666);
if (fd < 0)
WPRINTF(("vtcon: open failed: %s\n", path));
break;
default:
WPRINTF(("not supported backend %d!\n", be_type));
}
return fd;
}
static int
virtio_console_config_backend(struct virtio_console_backend *be)
{
int fd, flags;
char *pts_name = NULL;
int slave_fd = -1;
struct termios tio, saved_tio;
if (!be || be->fd == -1)
return -1;
fd = be->fd;
switch (be->be_type) {
case VIRTIO_CONSOLE_BE_PTY:
pts_name = ptsname(fd);
if (pts_name == NULL) {
WPRINTF(("vtcon: ptsname return NULL, errno = %d\n",
errno));
return -1;
}
slave_fd = open(pts_name, O_RDWR);
if (slave_fd == -1) {
WPRINTF(("vtcon: slave_fd open failed, errno = %d\n",
errno));
return -1;
}
tcgetattr(slave_fd, &tio);
cfmakeraw(&tio);
tcsetattr(slave_fd, TCSAFLUSH, &tio);
be->pts_fd = slave_fd;
WPRINTF(("***********************************************\n"));
WPRINTF(("virt-console backend redirected to %s\n", pts_name));
WPRINTF(("***********************************************\n"));
flags = fcntl(fd, F_GETFL);
fcntl(fd, F_SETFL, flags | O_NONBLOCK);
break;
case VIRTIO_CONSOLE_BE_TTY:
case VIRTIO_CONSOLE_BE_STDIO:
tcgetattr(fd, &tio);
saved_tio = tio;
cfmakeraw(&tio);
tio.c_cflag |= CLOCAL;
tcsetattr(fd, TCSANOW, &tio);
if (be->be_type == VIRTIO_CONSOLE_BE_STDIO) {
flags = fcntl(fd, F_GETFL);
fcntl(fd, F_SETFL, flags | O_NONBLOCK);
virtio_console_saved_flags = flags;
virtio_console_saved_tio = saved_tio;
atexit(virtio_console_restore_stdio);
}
break;
default:
break; /* nothing to do */
}
return 0;
}
static int
virtio_console_add_backend(struct virtio_console *console,
const char *name, const char *path,
enum virtio_console_be_type be_type,
bool is_console)
{
struct virtio_console_backend *be;
int error = 0, fd = -1;
be = calloc(1, sizeof(struct virtio_console_backend));
if (be == NULL) {
error = -1;
goto out;
}
fd = virtio_console_open_backend(path, be_type);
if (fd < 0) {
error = -1;
goto out;
}
be->fd = fd;
be->be_type = be_type;
if (virtio_console_config_backend(be) < 0) {
WPRINTF(("vtcon: virtio_console_config_backend failed\n"));
error = -1;
goto out;
}
be->port = virtio_console_add_port(console, name,
virtio_console_backend_write, be, is_console);
if (be->port == NULL) {
WPRINTF(("vtcon: virtio_console_add_port failed\n"));
error = -1;
goto out;
}
if (virtio_console_backend_can_read(be_type)) {
be->evp = mevent_add(fd, EVF_READ,
virtio_console_backend_read, be);
if (be->evp == NULL) {
WPRINTF(("vtcon: mevent_add failed\n"));
error = -1;
goto out;
}
}
virtio_console_open_port(be->port, true);
be->open = true;
out:
if (error != 0) {
if (be) {
if (be->evp)
mevent_delete(be->evp);
if (be->port) {
be->port->enabled = false;
be->port->arg = NULL;
}
if (be->be_type == VIRTIO_CONSOLE_BE_PTY &&
be->pts_fd > 0)
close(be->pts_fd);
free(be);
}
if (fd != -1 && fd != STDIN_FILENO)
close(fd);
}
return error;
}
static void
virtio_console_close_backend(struct virtio_console_backend *be)
{
if (!be)
return;
switch (be->be_type) {
case VIRTIO_CONSOLE_BE_PTY:
if (be->pts_fd > 0) {
close(be->pts_fd);
be->pts_fd = -1;
}
break;
case VIRTIO_CONSOLE_BE_STDIO:
virtio_console_restore_stdio();
break;
default:
break;
}
be->fd = -1;
be->open = false;
memset(be->port, 0, sizeof(*be->port));
}
static void
virtio_console_close_all(struct virtio_console *console)
{
int i;
struct virtio_console_port *port;
struct virtio_console_backend *be;
for (i = 0; i < console->nports; i++) {
port = &console->ports[i];
if (!port->enabled)
continue;
be = (struct virtio_console_backend *)port->arg;
if (be) {
virtio_console_close_backend(be);
free(be);
}
}
}
static enum virtio_console_be_type
virtio_console_get_be_type(const char *backend)
{
int i;
for (i = 0; i < VIRTIO_CONSOLE_BE_MAX; i++)
if (strcasecmp(backend, virtio_console_be_table[i]) == 0)
return i;
return VIRTIO_CONSOLE_BE_INVALID;
}
static int
virtio_console_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_console *console;
char *backend = NULL;
char *portname = NULL;
char *portpath = NULL;
char *opt;
int i;
pthread_mutexattr_t attr;
enum virtio_console_be_type be_type;
bool is_console = false;
int rc;
if (!opts) {
WPRINTF(("vtcon: invalid opts\n"));
return -1;
}
console = calloc(1, sizeof(struct virtio_console));
if (!console) {
WPRINTF(("vtcon: calloc returns NULL\n"));
return -1;
}
console->config = calloc(1, sizeof(struct virtio_console_config));
if (!console->config) {
WPRINTF(("vtcon->config: calloc returns NULL\n"));
free(console);
return -1;
}
console->config->max_nr_ports = VIRTIO_CONSOLE_MAXPORTS;
console->config->cols = 80;
console->config->rows = 25;
/* init mutex attribute properly to avoid deadlock */
rc = pthread_mutexattr_init(&attr);
if (rc)
DPRINTF(("mutexattr init failed with erro %d!\n", rc));
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
if (rc)
DPRINTF(("virtio_console: mutexattr_settype failed with "
"error %d!\n", rc));
rc = pthread_mutex_init(&console->mtx, &attr);
if (rc)
DPRINTF(("virtio_console: pthread_mutex_init failed with "
"error %d!\n", rc));
virtio_linkup(&console->base, &virtio_console_ops, console, dev,
console->queues);
console->base.mtx = &console->mtx;
for (i = 0; i < VIRTIO_CONSOLE_MAXQ; i++) {
console->queues[i].qsize = VIRTIO_CONSOLE_RINGSZ;
console->queues[i].notify = i % 2 == 0
? virtio_console_notify_rx
: virtio_console_notify_tx;
}
/* initialize config space */
pci_set_cfgdata16(dev, PCIR_DEVICE, VIRTIO_DEV_CONSOLE);
pci_set_cfgdata16(dev, PCIR_VENDOR, VIRTIO_VENDOR);
pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_SIMPLECOMM);
pci_set_cfgdata16(dev, PCIR_SUBDEV_0, VIRTIO_TYPE_CONSOLE);
pci_set_cfgdata16(dev, PCIR_SUBVEND_0, VIRTIO_VENDOR);
if (virtio_interrupt_init(&console->base, fbsdrun_virtio_msix())) {
if (console) {
if (console->config)
free(console->config);
free(console);
}
return -1;
}
virtio_set_io_bar(&console->base, 0);
/* create control port */
console->control_port.console = console;
console->control_port.txq = 2;
console->control_port.rxq = 3;
console->control_port.cb = virtio_console_control_tx;
console->control_port.enabled = true;
/* virtio-console,[@]stdio|tty|pty|file:portname[=portpath]
* [,[@]stdio|tty|pty|file:portname[=portpath]]
*/
while ((opt = strsep(&opts, ",")) != NULL) {
backend = strsep(&opt, ":");
if (backend == NULL) {
WPRINTF(("vtcon: no backend is specified!\n"));
return -1;
}
if (backend[0] == '@') {
is_console = true;
backend++;
}
be_type = virtio_console_get_be_type(backend);
if (be_type == VIRTIO_CONSOLE_BE_INVALID) {
WPRINTF(("vtcon: invalid backend %s!\n",
backend));
return -1;
}
if (opt != NULL) {
portname = strsep(&opt, "=");
portpath = opt;
if (portpath == NULL
&& be_type != VIRTIO_CONSOLE_BE_STDIO
&& be_type != VIRTIO_CONSOLE_BE_PTY) {
WPRINTF(("vtcon: portpath missing for %s\n",
portname));
return -1;
}
if (virtio_console_add_backend(console, portname,
portpath, be_type, is_console) < 0) {
WPRINTF(("vtcon: add port failed %s\n",
portname));
return -1;
}
}
}
return 0;
}
static void
virtio_console_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_console *console;
console = (struct virtio_console *)dev->arg;
if (console) {
virtio_console_close_all(console);
if (console->config)
free(console->config);
free(console);
}
}
struct pci_vdev_ops pci_ops_virtio_console = {
.class_name = "virtio-console",
.vdev_init = virtio_console_init,
.vdev_deinit = virtio_console_deinit,
.vdev_barwrite = virtio_pci_write,
.vdev_barread = virtio_pci_read
};
DEFINE_PCI_DEVTYPE(pci_ops_virtio_console);

View File

@@ -0,0 +1,385 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* virtio hyper dmabuf
* Allows to share data buffers between VMs using dmabuf like interface
*/
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <pthread.h>
#include "dm.h"
#include "pci_core.h"
#include "virtio.h"
#include "virtio_kernel.h"
#include "vmmapi.h"
/*
* Size of queue was chosen experimentaly in a way
* that it allows to run ~20 shared surfaces without
* any delays on hyper dmabuf dirver side due to lack
* of free buffers in queue
*/
#define HYPER_DMABUF_RINGSZ 128
/* Hyper dmabuf uses two queues one for Rx and one for Tx */
#define HYPER_DMABUF_VQ_NUM 2
const char *hyper_dmabuf_vbs_dev_path = "/dev/vbs_hyper_dmabuf";
static int virtio_hyper_dmabuf_debug;
#define DPRINTF(...)\
do {\
if (virtio_hyper_dmabuf_debug)\
printf(__VA_ARGS__);\
} while (0)
#define WPRINTF(...) printf(__VA_ARGS__)
static enum VBS_K_STATUS kstatus = VIRTIO_DEV_INITIAL;
static int vbs_k_hyper_dmabuf_fd = -1;
static struct vbs_dev_info kdev;
static struct vbs_vqs_info kvqs;
struct virtio_hyper_dmabuf {
struct virtio_base base;
struct virtio_vq_info vq[HYPER_DMABUF_VQ_NUM];
pthread_mutex_t mtx;
};
static int virtio_hyper_dmabuf_k_init(void);
static int virtio_hyper_dmabuf_k_start(void);
static int virtio_hyper_dmabuf_k_stop(void);
static int virtio_hyper_dmabuf_k_reset(void);
static int virtio_hyper_dmabuf_k_dev_set(const char *name, int vmid,
int nvq, uint32_t feature,
uint64_t pio_start, uint64_t pio_len);
static int virtio_hyper_dmabuf_k_vq_set(unsigned int nvq, unsigned int idx,
uint16_t qsize,
uint32_t pfn, uint16_t msix_idx,
uint64_t msix_addr, uint32_t msix_data);
static void virtio_hyper_dmabuf_no_notify(void *, struct virtio_vq_info *);
static void virtio_hyper_dmabuf_set_status(void *, uint64_t);
static void virtio_hyper_dmabuf_reset(void *);
static struct virtio_ops virtio_hyper_dmabuf_ops_k = {
"virtio_hyper_dmabuf", /* our name */
HYPER_DMABUF_VQ_NUM, /* we support 2 virtqueue */
0, /* config reg size */
virtio_hyper_dmabuf_reset, /* reset */
virtio_hyper_dmabuf_no_notify, /* device-wide qnotify */
NULL, /* read virtio config */
NULL, /* write virtio config */
NULL, /* apply negotiated features */
virtio_hyper_dmabuf_set_status, /* called on guest set status */
0, /* our capabilities */
};
static int
virtio_hyper_dmabuf_k_init()
{
if (vbs_k_hyper_dmabuf_fd != -1) {
WPRINTF("virtio_hyper_dmabuf: Ooops! Re-entered!!\n");
return -VIRTIO_ERROR_REENTER;
}
vbs_k_hyper_dmabuf_fd = open(hyper_dmabuf_vbs_dev_path, O_RDWR);
if (vbs_k_hyper_dmabuf_fd < 0) {
WPRINTF("virtio_hyper_dmabuf: Failed to open %s!\n",
hyper_dmabuf_vbs_dev_path);
return -VIRTIO_ERROR_FD_OPEN_FAILED;
}
DPRINTF("virtio_hyper_dmabuf: Open %s success!\n",
hyper_dmabuf_vbs_dev_path);
memset(&kdev, 0, sizeof(kdev));
memset(&kvqs, 0, sizeof(kvqs));
return VIRTIO_SUCCESS;
}
static int
virtio_hyper_dmabuf_k_dev_set(const char *name, int vmid, int nvq,
uint32_t feature, uint64_t pio_start,
uint64_t pio_len)
{
/* init kdev */
strncpy(kdev.name, name, VBS_NAME_LEN);
kdev.vmid = vmid;
kdev.nvq = nvq;
kdev.negotiated_features = feature;
kdev.pio_range_start = pio_start;
kdev.pio_range_len = pio_len;
return VIRTIO_SUCCESS;
}
static int
virtio_hyper_dmabuf_k_vq_set(unsigned int nvq, unsigned int idx,
uint16_t qsize, uint32_t pfn,
uint16_t msix_idx, uint64_t msix_addr,
uint32_t msix_data)
{
if (nvq <= idx) {
WPRINTF("virtio_hyper_dmabuf: wrong idx for vq_set!\n");
return -VIRTIO_ERROR_GENERAL;
}
/* init kvqs */
kvqs.nvq = nvq;
kvqs.vqs[idx].qsize = qsize;
kvqs.vqs[idx].pfn = pfn;
kvqs.vqs[idx].msix_idx = msix_idx;
kvqs.vqs[idx].msix_addr = msix_addr;
kvqs.vqs[idx].msix_data = msix_data;
return VIRTIO_SUCCESS;
}
static int
virtio_hyper_dmabuf_k_start(void)
{
if (vbs_kernel_start(vbs_k_hyper_dmabuf_fd, &kdev, &kvqs) < 0) {
WPRINTF("virtio_hyper_dmabuf: Failed in vbs_kernel_start!\n");
return -VIRTIO_ERROR_START;
}
DPRINTF("virtio_hyper_dmabuf: vbs_kernel_started!\n");
return VIRTIO_SUCCESS;
}
static int
virtio_hyper_dmabuf_k_stop(void)
{
return vbs_kernel_stop(vbs_k_hyper_dmabuf_fd);
}
static int
virtio_hyper_dmabuf_k_reset(void)
{
memset(&kdev, 0, sizeof(kdev));
memset(&kvqs, 0, sizeof(kvqs));
return vbs_kernel_reset(vbs_k_hyper_dmabuf_fd);
}
static void
virtio_hyper_dmabuf_reset(void *base)
{
struct virtio_hyper_dmabuf *hyper_dmabuf;
hyper_dmabuf = (struct virtio_hyper_dmabuf *)base;
DPRINTF("virtio_hyper_dmabuf: device reset requested !\n");
virtio_reset_dev(&hyper_dmabuf->base);
if (kstatus == VIRTIO_DEV_STARTED) {
virtio_hyper_dmabuf_k_stop();
virtio_hyper_dmabuf_k_reset();
kstatus = VIRTIO_DEV_INITIAL;
}
}
static void
virtio_hyper_dmabuf_no_notify(void *base, struct virtio_vq_info *vq)
{
}
/*
* This callback gives us a chance to determine the timings
* to kickoff VBS-K initialization
*/
static void
virtio_hyper_dmabuf_set_status(void *base, uint64_t status)
{
struct virtio_hyper_dmabuf *hyper_dmabuf;
int nvq;
struct msix_table_entry *mte;
uint64_t msix_addr = 0;
uint32_t msix_data = 0;
int rc, i, j;
hyper_dmabuf = (struct virtio_hyper_dmabuf *) base;
nvq = hyper_dmabuf->base.vops->nvq;
if (kstatus == VIRTIO_DEV_INIT_SUCCESS &&
(status & VIRTIO_CR_STATUS_DRIVER_OK)) {
/* time to kickoff VBS-K side */
/* init vdev first */
rc = virtio_hyper_dmabuf_k_dev_set(
hyper_dmabuf->base.vops->name,
hyper_dmabuf->base.dev->vmctx->vmid,
nvq,
hyper_dmabuf->base.negotiated_caps,
/* currently we let VBS-K handle
* kick register
*/
hyper_dmabuf->base.dev->bar[0].addr + 16,
2);
for (i = 0; i < nvq; i++) {
if (hyper_dmabuf->vq[i].msix_idx !=
VIRTIO_MSI_NO_VECTOR) {
j = hyper_dmabuf->vq[i].msix_idx;
mte = &hyper_dmabuf->base.dev->msix.table[j];
msix_addr = mte->addr;
msix_data = mte->msg_data;
}
rc = virtio_hyper_dmabuf_k_vq_set(
nvq, i,
hyper_dmabuf->vq[i].qsize,
hyper_dmabuf->vq[i].pfn,
hyper_dmabuf->vq[i].msix_idx,
msix_addr,
msix_data);
if (rc < 0) {
WPRINTF("virtio_hyper_dmabuf:");
WPRINTF("kernel_set_vq");
WPRINTF("failed, i %d ret %d\n", i, rc);
return;
}
}
rc = virtio_hyper_dmabuf_k_start();
if (rc < 0) {
WPRINTF("virtio_hyper_dmabuf:");
WPRINTF("kernel_start() failed\n");
kstatus = VIRTIO_DEV_START_FAILED;
} else {
kstatus = VIRTIO_DEV_STARTED;
}
}
}
static int
virtio_hyper_dmabuf_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_hyper_dmabuf *hyper_dmabuf;
kstatus = VIRTIO_DEV_PRE_INIT;
pthread_mutexattr_t attr;
int rc;
hyper_dmabuf = calloc(1, sizeof(struct virtio_hyper_dmabuf));
if (!hyper_dmabuf) {
WPRINTF(("virtio_hdma: calloc returns NULL\n"));
return -1;
}
/* init mutex attribute properly */
rc = pthread_mutexattr_init(&attr);
if (rc)
DPRINTF("mutexattr init failed with erro %d!\n", rc);
if (fbsdrun_virtio_msix()) {
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
DPRINTF("virtio_msix: mutexattr_settype ");
DPRINTF("failed with error %d!\n", rc);
} else {
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
DPRINTF("virtio_intx: mutexattr_settype ");
DPRINTF("failed with error %d!\n", rc);
}
rc = pthread_mutex_init(&hyper_dmabuf->mtx, &attr);
if (rc)
DPRINTF("mutex init failed with error %d!\n", rc);
virtio_linkup(&hyper_dmabuf->base,
&virtio_hyper_dmabuf_ops_k,
hyper_dmabuf,
dev,
hyper_dmabuf->vq);
rc = virtio_hyper_dmabuf_k_init();
if (rc < 0) {
WPRINTF("virtio_hyper_dmabuf: VBS-K ");
WPRINTF("init failed with error %d!\n", rc);
kstatus = VIRTIO_DEV_INIT_FAILED;
} else {
kstatus = VIRTIO_DEV_INIT_SUCCESS;
}
hyper_dmabuf->base.mtx = &hyper_dmabuf->mtx;
hyper_dmabuf->vq[0].qsize = HYPER_DMABUF_RINGSZ;
hyper_dmabuf->vq[1].qsize = HYPER_DMABUF_RINGSZ;
/* initialize config space */
pci_set_cfgdata16(dev, PCIR_DEVICE, VIRTIO_DEV_HYPERDMABUF);
pci_set_cfgdata16(dev, PCIR_VENDOR, INTEL_VENDOR_ID);
pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_MEMORY);
pci_set_cfgdata16(dev, PCIR_SUBDEV_0, VIRTIO_TYPE_HYPERDMABUF);
pci_set_cfgdata16(dev, PCIR_SUBVEND_0, INTEL_VENDOR_ID);
if (virtio_interrupt_init(&hyper_dmabuf->base, fbsdrun_virtio_msix())) {
if (hyper_dmabuf)
free(hyper_dmabuf);
return -1;
}
virtio_set_io_bar(&hyper_dmabuf->base, 0);
return 0;
}
static void
virtio_hyper_dmabuf_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
if (kstatus == VIRTIO_DEV_STARTED) {
DPRINTF("virtio_hyper_dmabuf: deinitializing\n");
virtio_hyper_dmabuf_k_stop();
virtio_hyper_dmabuf_k_reset();
kstatus = VIRTIO_DEV_INITIAL;
assert(vbs_k_hyper_dmabuf_fd >= 0);
close(vbs_k_hyper_dmabuf_fd);
vbs_k_hyper_dmabuf_fd = -1;
}
if (dev->arg)
free((struct virtio_hyper_dmabuf *)dev->arg);
}
struct pci_vdev_ops pci_ops_virtio_hyper_dmabuf = {
.class_name = "virtio-hyper_dmabuf",
.vdev_init = virtio_hyper_dmabuf_init,
.vdev_deinit = virtio_hyper_dmabuf_deinit,
.vdev_barwrite = virtio_pci_write,
.vdev_barread = virtio_pci_read
};
DEFINE_PCI_DEVTYPE(pci_ops_virtio_hyper_dmabuf);

View File

@@ -0,0 +1,104 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/* Routines to notify the VBS-K in kernel */
#include <stdio.h>
#include <sys/ioctl.h>
#include "virtio_kernel.h"
static int virtio_kernel_debug;
#define DPRINTF(params) do { if (virtio_kernel_debug) printf params; } while (0)
#define WPRINTF(params) (printf params)
static int
vbs_dev_info_set(int fd, void *arg)
{
return ioctl(fd, VBS_K_SET_DEV, arg);
}
static int
vbs_vqs_info_set(int fd, void *arg)
{
return ioctl(fd, VBS_K_SET_VQ, arg);
}
/* VBS-K common ops */
/* VBS-K init/reset */
int
vbs_kernel_init(int fd)
{
return VIRTIO_SUCCESS;
}
int
vbs_kernel_reset(int fd)
{
return VIRTIO_SUCCESS;
}
/*
* We need a way to start/stop vbs_k execution since guest might want to
* change the configuration of the virtio device after VBS-K has been
* initialized.
*/
/* VBS-K start/stop */
int
vbs_kernel_start(int fd, struct vbs_dev_info *dev, struct vbs_vqs_info *vqs)
{
int ret;
if (fd < 0) {
WPRINTF(("%s: fd < 0\n", __func__));
return -VIRTIO_ERROR_FD_OPEN_FAILED;
}
ret = vbs_dev_info_set(fd, dev);
if (ret < 0) {
WPRINTF(("vbs_kernel_set_dev failed: ret %d\n", ret));
return ret;
}
ret = vbs_vqs_info_set(fd, vqs);
if (ret < 0) {
WPRINTF(("vbs_kernel_set_vqs failed: ret %d\n", ret));
return ret;
}
return VIRTIO_SUCCESS;
}
int
vbs_kernel_stop(int fd)
{
DPRINTF(("%s\n", __func__));
return VIRTIO_SUCCESS;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,482 @@
/*-
* Copyright (c) 2014 Nahanni Systems Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer
* in this position and unchanged.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* virtio entropy device emulation.
* Randomness is sourced from /dev/random which does not block
* once it has been seeded at bootup.
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/uio.h>
#include <err.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <pthread.h>
#include <sysexits.h>
#include "dm.h"
#include "pci_core.h"
#include "virtio.h"
#include "virtio_kernel.h"
#include "vmmapi.h" /* for vmctx */
#define VIRTIO_RND_RINGSZ 64
/*
* Per-device struct
*/
struct virtio_rnd {
/* VBS-U variables */
struct virtio_base base;
struct virtio_vq_info vq;
pthread_mutex_t mtx;
uint64_t cfg;
int fd;
/* VBS-K variables */
struct {
enum VBS_K_STATUS status;
int fd;
struct vbs_dev_info dev;
struct vbs_vqs_info vqs;
} vbs_k;
};
static int virtio_rnd_debug;
#define DPRINTF(params) do { if (virtio_rnd_debug) printf params; } while (0)
#define WPRINTF(params) (printf params)
/* VBS-K interface functions */
static int virtio_rnd_kernel_init(struct virtio_rnd *); /* open VBS-K chardev */
static int virtio_rnd_kernel_start(struct virtio_rnd *);
static int virtio_rnd_kernel_stop(struct virtio_rnd *);
static int virtio_rnd_kernel_reset(struct virtio_rnd *);
static int virtio_rnd_kernel_dev_set(struct vbs_dev_info *kdev,
const char *name, int vmid, int nvq,
uint32_t feature, uint64_t pio_start,
uint64_t pio_len);
static int virtio_rnd_kernel_vq_set(struct vbs_vqs_info *kvqs, unsigned int nvq,
unsigned int idx, uint16_t qsize,
uint32_t pfn, uint16_t msix_idx,
uint64_t msix_addr, uint32_t msix_data);
/* VBS-U virtio_ops */
static void virtio_rnd_reset(void *);
static void virtio_rnd_notify(void *, struct virtio_vq_info *);
static struct virtio_ops virtio_rnd_ops = {
"virtio_rnd", /* our name */
1, /* we support 1 virtqueue */
0, /* config reg size */
virtio_rnd_reset, /* reset */
virtio_rnd_notify, /* device-wide qnotify */
NULL, /* read virtio config */
NULL, /* write virtio config */
NULL, /* apply negotiated features */
NULL, /* called on guest set status */
0, /* our capabilities */
};
/* VBS-K virtio_ops */
static void virtio_rnd_k_no_notify(void *, struct virtio_vq_info *);
static void virtio_rnd_k_set_status(void *, uint64_t);
static struct virtio_ops virtio_rnd_ops_k = {
"virtio_rnd", /* our name */
1, /* we support 1 virtqueue */
0, /* config reg size */
virtio_rnd_reset, /* reset */
virtio_rnd_k_no_notify, /* device-wide qnotify */
NULL, /* read virtio config */
NULL, /* write virtio config */
NULL, /* apply negotiated features */
virtio_rnd_k_set_status,/* called on guest set status */
0, /* our capabilities */
};
/* VBS-K interface function implementations */
static void
virtio_rnd_k_no_notify(void *base, struct virtio_vq_info *vq)
{
WPRINTF(("virtio_rnd: VBS-K mode! Should not reach here!!\n"));
}
/*
* This callback gives us a chance to determine the timings
* to kickoff VBS-K initialization
*/
static void
virtio_rnd_k_set_status(void *base, uint64_t status)
{
struct virtio_rnd *rnd;
int nvq;
struct msix_table_entry *mte;
uint64_t msix_addr = 0;
uint32_t msix_data = 0;
int rc, i, j;
rnd = base;
nvq = rnd->base.vops->nvq;
if (rnd->vbs_k.status == VIRTIO_DEV_INIT_SUCCESS &&
(status & VIRTIO_CR_STATUS_DRIVER_OK)) {
/* time to kickoff VBS-K side */
/* init vdev first */
rc = virtio_rnd_kernel_dev_set(&rnd->vbs_k.dev,
rnd->base.vops->name,
rnd->base.dev->vmctx->vmid,
nvq,
rnd->base.negotiated_caps,
/*
* currently we let VBS-K handle
* kick register
*/
rnd->base.dev->bar[0].addr + 16,
2);
for (i = 0; i < nvq; i++) {
if (rnd->vq.msix_idx != VIRTIO_MSI_NO_VECTOR) {
j = rnd->vq.msix_idx;
mte = &rnd->base.dev->msix.table[j];
msix_addr = mte->addr;
msix_data = mte->msg_data;
}
rc = virtio_rnd_kernel_vq_set(&rnd->vbs_k.vqs,
nvq, i,
rnd->vq.qsize,
rnd->vq.pfn,
rnd->vq.msix_idx,
msix_addr,
msix_data);
if (rc < 0) {
WPRINTF(("rnd_kernel_set_vq fail,i %d ret %d\n",
i, rc));
return;
}
}
rc = virtio_rnd_kernel_start(rnd);
if (rc < 0) {
WPRINTF(("virtio_rnd_kernel_start() failed\n"));
rnd->vbs_k.status = VIRTIO_DEV_START_FAILED;
} else {
rnd->vbs_k.status = VIRTIO_DEV_STARTED;
}
}
}
/*
* Called in virtio_rnd_init(), where the initialization of the
* PCIe device emulation is still on the way by device model.
*/
static int
virtio_rnd_kernel_init(struct virtio_rnd *rnd)
{
assert(rnd->vbs_k.fd == 0);
rnd->vbs_k.fd = open("/dev/vbs_rng", O_RDWR);
if (rnd->vbs_k.fd < 0) {
WPRINTF(("Failed to open /dev/vbs_k_rng!\n"));
return -VIRTIO_ERROR_FD_OPEN_FAILED;
}
DPRINTF(("Open /dev/vbs_rng success!\n"));
memset(&rnd->vbs_k.dev, 0, sizeof(struct vbs_dev_info));
memset(&rnd->vbs_k.vqs, 0, sizeof(struct vbs_vqs_info));
return VIRTIO_SUCCESS;
}
static int
virtio_rnd_kernel_dev_set(struct vbs_dev_info *kdev, const char *name,
int vmid, int nvq, uint32_t feature,
uint64_t pio_start, uint64_t pio_len)
{
/* FE driver has set VIRTIO_CONFIG_S_DRIVER_OK */
/* init kdev */
strncpy(kdev->name, name, VBS_NAME_LEN);
kdev->vmid = vmid;
kdev->nvq = nvq;
kdev->negotiated_features = feature;
kdev->pio_range_start = pio_start;
kdev->pio_range_len = pio_len;
return VIRTIO_SUCCESS;
}
static int
virtio_rnd_kernel_vq_set(struct vbs_vqs_info *kvqs, unsigned int nvq,
unsigned int idx, uint16_t qsize, uint32_t pfn,
uint16_t msix_idx, uint64_t msix_addr,
uint32_t msix_data)
{
/* FE driver has set VIRTIO_CONFIG_S_DRIVER_OK */
if (nvq <= idx) {
WPRINTF(("%s: wrong idx!\n", __func__));
return -VIRTIO_ERROR_GENERAL;
}
/* init kvqs */
kvqs->nvq = nvq;
kvqs->vqs[idx].qsize = qsize;
kvqs->vqs[idx].pfn = pfn;
kvqs->vqs[idx].msix_idx = msix_idx;
kvqs->vqs[idx].msix_addr = msix_addr;
kvqs->vqs[idx].msix_data = msix_data;
return VIRTIO_SUCCESS;
}
static int
virtio_rnd_kernel_start(struct virtio_rnd *rnd)
{
if (vbs_kernel_start(rnd->vbs_k.fd,
&rnd->vbs_k.dev,
&rnd->vbs_k.vqs) < 0) {
WPRINTF(("Failed in vbs_k_start!\n"));
return -VIRTIO_ERROR_START;
}
DPRINTF(("vbs_k_started!\n"));
return VIRTIO_SUCCESS;
}
static int
virtio_rnd_kernel_stop(struct virtio_rnd *rnd)
{
/* device specific cleanups here */
return vbs_kernel_stop(rnd->vbs_k.fd);
}
static int
virtio_rnd_kernel_reset(struct virtio_rnd *rnd)
{
memset(&rnd->vbs_k.dev, 0, sizeof(struct vbs_dev_info));
memset(&rnd->vbs_k.vqs, 0, sizeof(struct vbs_vqs_info));
return vbs_kernel_reset(rnd->vbs_k.fd);
}
static void
virtio_rnd_reset(void *base)
{
struct virtio_rnd *rnd;
rnd = base;
DPRINTF(("virtio_rnd: device reset requested !\n"));
virtio_reset_dev(&rnd->base);
DPRINTF(("virtio_rnd: kstatus %d\n", rnd->vbs_k.status));
if (rnd->vbs_k.status == VIRTIO_DEV_STARTED) {
DPRINTF(("virtio_rnd: VBS-K reset requested!\n"));
virtio_rnd_kernel_stop(rnd);
virtio_rnd_kernel_reset(rnd);
rnd->vbs_k.status = VIRTIO_DEV_INITIAL;
}
}
static void
virtio_rnd_notify(void *base, struct virtio_vq_info *vq)
{
struct iovec iov;
struct virtio_rnd *rnd;
int len;
uint16_t idx;
rnd = base;
if (rnd->fd < 0) {
vq_endchains(vq, 0);
return;
}
while (vq_has_descs(vq)) {
vq_getchain(vq, &idx, &iov, 1, NULL);
len = read(rnd->fd, iov.iov_base, iov.iov_len);
DPRINTF(("%s: %d\r\n", __func__, len));
/* Catastrophe if unable to read from /dev/random */
assert(len > 0);
/*
* Release this chain and handle more
*/
vq_relchain(vq, idx, len);
}
vq_endchains(vq, 1); /* Generate interrupt if appropriate. */
}
static int
virtio_rnd_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_rnd *rnd;
int fd;
int len;
uint8_t v;
pthread_mutexattr_t attr;
int rc;
char *opt;
char *vbs_k_opt = NULL;
enum VBS_K_STATUS kstat = VIRTIO_DEV_INITIAL;
while ((opt = strsep(&opts, ",")) != NULL) {
/* vbs_k_opt should be kernel=on */
vbs_k_opt = strsep(&opt, "=");
DPRINTF(("vbs_k_opt is %s\n", vbs_k_opt));
if (opt != NULL) {
if (strncmp(opt, "on", 2) == 0)
kstat = VIRTIO_DEV_PRE_INIT;
WPRINTF(("virtio_rnd: VBS-K initializing..."));
}
}
/*
* Should always be able to open /dev/random.
*/
fd = open("/dev/random", O_RDONLY | O_NONBLOCK);
assert(fd >= 0);
/*
* Check that device is seeded and non-blocking.
*/
len = read(fd, &v, sizeof(v));
if (len <= 0) {
WPRINTF(("virtio_rnd: /dev/random not ready, read(): %d", len));
return -1;
}
rnd = calloc(1, sizeof(struct virtio_rnd));
if (!rnd) {
WPRINTF(("virtio_rnd: calloc returns NULL\n"));
return -1;
}
rnd->vbs_k.status = kstat;
/* init mutex attribute properly */
rc = pthread_mutexattr_init(&attr);
if (rc)
DPRINTF(("mutexattr init failed with erro %d!\n", rc));
if (fbsdrun_virtio_msix()) {
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
if (rc)
DPRINTF(("virtio_msix: mutexattr_settype failed with "
"error %d!\n", rc));
} else {
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
if (rc)
DPRINTF(("virtio_intx: mutexattr_settype failed with "
"error %d!\n", rc));
}
rc = pthread_mutex_init(&rnd->mtx, &attr);
if (rc)
DPRINTF(("mutex init failed with error %d!\n", rc));
if (rnd->vbs_k.status == VIRTIO_DEV_PRE_INIT) {
DPRINTF(("%s: VBS-K option detected!\n", __func__));
virtio_linkup(&rnd->base, &virtio_rnd_ops_k,
rnd, dev, &rnd->vq);
rc = virtio_rnd_kernel_init(rnd);
if (rc < 0) {
WPRINTF(("virtio_rnd: VBS-K init failed,error %d!\n",
rc));
rnd->vbs_k.status = VIRTIO_DEV_INIT_FAILED;
} else {
rnd->vbs_k.status = VIRTIO_DEV_INIT_SUCCESS;
}
}
if (rnd->vbs_k.status == VIRTIO_DEV_INITIAL ||
rnd->vbs_k.status != VIRTIO_DEV_INIT_SUCCESS) {
DPRINTF(("%s: fallback to VBS-U...\n", __func__));
virtio_linkup(&rnd->base, &virtio_rnd_ops, rnd, dev, &rnd->vq);
}
rnd->base.mtx = &rnd->mtx;
rnd->vq.qsize = VIRTIO_RND_RINGSZ;
/* keep /dev/random opened while emulating */
rnd->fd = fd;
/* initialize config space */
pci_set_cfgdata16(dev, PCIR_DEVICE, VIRTIO_DEV_RANDOM);
pci_set_cfgdata16(dev, PCIR_VENDOR, VIRTIO_VENDOR);
pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_CRYPTO);
pci_set_cfgdata16(dev, PCIR_SUBDEV_0, VIRTIO_TYPE_ENTROPY);
pci_set_cfgdata16(dev, PCIR_SUBVEND_0, VIRTIO_VENDOR);
if (virtio_interrupt_init(&rnd->base, fbsdrun_virtio_msix())) {
if (rnd)
free(rnd);
return -1;
}
virtio_set_io_bar(&rnd->base, 0);
return 0;
}
static void
virtio_rnd_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_rnd *rnd;
rnd = dev->arg;
if (rnd == NULL) {
DPRINTF(("%s: rnd is NULL\n", __func__));
return;
}
if (rnd->vbs_k.status == VIRTIO_DEV_STARTED) {
DPRINTF(("%s: deinit virtio_rnd_k!\n", __func__));
virtio_rnd_kernel_stop(rnd);
virtio_rnd_kernel_reset(rnd);
rnd->vbs_k.status = VIRTIO_DEV_INITIAL;
assert(rnd->vbs_k.fd >= 0);
close(rnd->vbs_k.fd);
rnd->vbs_k.fd = -1;
}
DPRINTF(("%s: free struct virtio_rnd!\n", __func__));
free(rnd);
}
struct pci_vdev_ops pci_ops_virtio_rnd = {
.class_name = "virtio-rnd",
.vdev_init = virtio_rnd_init,
.vdev_deinit = virtio_rnd_deinit,
.vdev_barwrite = virtio_pci_write,
.vdev_barread = virtio_pci_read
};
DEFINE_PCI_DEVTYPE(pci_ops_virtio_rnd);

View File

@@ -0,0 +1,361 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* WatchDog Timer (WDT): emulate i6300esb PCI wdt Intel SOC devices,
* used to monitor guest OS
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <time.h>
#include <assert.h>
#include <stdbool.h>
#include "vmm.h"
#include "vmmapi.h"
#include "mevent.h"
#include "pci_core.h"
#define WDT_REG_BAR_SIZE 0x10
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_ESB 0x25ab
#define ESB_CONFIG_REG 0x60 /* Config register*/
#define ESB_LOCK_REG 0x68 /* WDT lock register*/
/* Memory mapped registers */
#define ESB_TIMER1_REG 0x00 /* Timer1 value after each reset */
#define ESB_TIMER2_REG 0x04 /* Timer2 value after each reset */
#define ESB_RELOAD_REG 0x0c /* Reload register */
#define ESB_WDT_ENABLE (0x01 << 1) /* Enable WDT */
#define ESB_WDT_LOCK (0x01 << 0) /* Lock (nowayout) */
#define ESB_WDT_REBOOT (0x01 << 5) /* Enable reboot on timeout */
#define ESB_WDT_RELOAD (0x01 << 8) /* Ping/kick dog */
#define TIMER_TO_SECONDS(val) (val >> 9)
/* Magic constants */
#define ESB_UNLOCK1 0x80 /* Step 1 to unlock reset registers */
#define ESB_UNLOCK2 0x86 /* Step 2 to unlock reset registers */
#define WDT_TIMER_SIG 0x55AA
#define DEFAULT_MAX_TIMER_VAL 0x000FFFFF
/* for debug */
/* #define WDT_DEBUG */
#ifdef WDT_DEBUG
static FILE * dbg_file;
#define DPRINTF(format, args...) \
do { fprintf(dbg_file, format, args); fflush(dbg_file); } while (0)
#else
#define DPRINTF(format, arg...)
#endif
struct info_wdt {
bool reboot_enabled;/* "reboot" on wdt out */
bool locked; /* If true, enabled field cannot be changed. */
bool wdt_enabled; /* If true, watchdog is enabled. */
bool timer_created;
timer_t wdt_timerid;
uint32_t timer1_val;
uint32_t timer2_val;
int stage; /* stage 1 or 2. */
int unlock_state; /* unlock states 0 -> 1 -> 2 */
};
static struct info_wdt wdt_state;
static void start_wdt_timer(void);
/*
* WDT timer, start when guest OS start watchdog service; and re-start for
* each dog-kick / ping action if time out, it will trigger reboot or other
* action to guest OS
*/
static void
wdt_expired_thread(union sigval v)
{
DPRINTF("wdt timer out! id=0x%x, stage=%d, reboot=%d\n",
v.sival_int, wdt_state.stage, wdt_state.reboot_enabled);
if (wdt_state.stage == 1) {
wdt_state.stage = 2;
start_wdt_timer();
} else {
if (wdt_state.reboot_enabled) {
/* watchdog timer out, set the uos to reboot */
vm_set_suspend_mode(VM_SUSPEND_RESET);
mevent_notify();
} else {
/* if not need reboot, just loop timer */
wdt_state.stage = 1;
start_wdt_timer();
}
}
}
static void
stop_wdt_timer()
{
struct itimerspec timer_val;
DPRINTF("%s: timer_created=%d\n", __func__, wdt_state.timer_created);
if (!wdt_state.timer_created)
return;
memset(&timer_val, 0, sizeof(struct itimerspec));
timer_settime(wdt_state.wdt_timerid, 0, &timer_val, NULL);
}
static void
reset_wdt_timer(int seconds)
{
struct itimerspec timer_val;
DPRINTF("%s: time=%d\n", __func__, seconds);
memset(&timer_val, 0, sizeof(struct itimerspec));
timer_settime(wdt_state.wdt_timerid, 0, &timer_val, NULL);
timer_val.it_value.tv_sec = seconds;
if (timer_settime(wdt_state.wdt_timerid, 0, &timer_val, NULL) == -1) {
perror("timer_settime failed.\n");
timer_delete(wdt_state.wdt_timerid);
wdt_state.timer_created = 0;
exit(-1);
}
}
static void
start_wdt_timer(void)
{
int seconds;
struct sigevent sig_evt;
struct itimerspec timer_val;
if (!wdt_state.wdt_enabled)
return;
if (wdt_state.stage == 1)
seconds = TIMER_TO_SECONDS(wdt_state.timer1_val);
else
seconds = TIMER_TO_SECONDS(wdt_state.timer2_val);
DPRINTF("%s: created=%d, time=%d\n", __func__,
wdt_state.timer_created, seconds);
memset(&sig_evt, 0, sizeof(struct sigevent));
if (wdt_state.timer_created) {
reset_wdt_timer(seconds);
return;
}
sig_evt.sigev_value.sival_int = WDT_TIMER_SIG;
sig_evt.sigev_notify = SIGEV_THREAD;
sig_evt.sigev_notify_function = wdt_expired_thread;
if (timer_create(CLOCK_REALTIME, &sig_evt,
&wdt_state.wdt_timerid) == -1) {
perror("timer_create failed.\n");
exit(-1);
}
memset(&timer_val, 0, sizeof(struct itimerspec));
timer_val.it_value.tv_sec = seconds;
if (timer_settime(wdt_state.wdt_timerid, 0, &timer_val, NULL) == -1) {
perror("timer_settime failed.\n");
timer_delete(wdt_state.wdt_timerid);
exit(-1);
}
wdt_state.timer_created = true;
}
static int
pci_wdt_cfg_read(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int offset, int bytes, uint32_t *rv)
{
int need_cfg = 1;
DPRINTF("%s: offset = %x, len = %d\n", __func__, offset, bytes);
if (offset == ESB_LOCK_REG && bytes == 1) {
*rv = (wdt_state.locked ? ESB_WDT_LOCK : 0) |
(wdt_state.wdt_enabled ? ESB_WDT_ENABLE : 0);
need_cfg = 0;
}
return need_cfg;
}
static int
pci_wdt_cfg_write(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int offset, int bytes, uint32_t val)
{
bool old_flag;
int need_cfg = 1;
DPRINTF("%s: offset = %x, len = %d, val = 0x%x\n",
__func__, offset, bytes, val);
if (offset == ESB_CONFIG_REG && bytes == 2) {
wdt_state.reboot_enabled = ((val & ESB_WDT_REBOOT) == 0);
need_cfg = 0;
} else if (offset == ESB_LOCK_REG && bytes == 1) {
if (!wdt_state.locked) {
wdt_state.locked = ((val & ESB_WDT_LOCK) != 0);
old_flag = wdt_state.wdt_enabled;
wdt_state.wdt_enabled = ((val & ESB_WDT_ENABLE) != 0);
if (!old_flag && wdt_state.wdt_enabled) {
wdt_state.stage = 1;
start_wdt_timer();
} else if (!wdt_state.wdt_enabled)
stop_wdt_timer();
}
need_cfg = 0;
}
return need_cfg;
}
static void
pci_wdt_bar_write(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int baridx, uint64_t offset, int size, uint64_t value)
{
assert(baridx == 0);
DPRINTF("%s: addr = 0x%x, val = 0x%x, size=%d\n",
__func__, (int) offset, (int)value, size);
if (offset == ESB_RELOAD_REG) {
assert(size == 2);
if (value == ESB_UNLOCK1)
wdt_state.unlock_state = 1;
else if ((value == ESB_UNLOCK2)
&& (wdt_state.unlock_state == 1))
wdt_state.unlock_state = 2;
else if ((wdt_state.unlock_state == 2)
&& (value & ESB_WDT_RELOAD)) {
wdt_state.stage = 1;
start_wdt_timer();
wdt_state.unlock_state = 0;
}
} else if (wdt_state.unlock_state == 2) {
if (offset == ESB_TIMER1_REG)
wdt_state.timer1_val = value & DEFAULT_MAX_TIMER_VAL;
else if (offset == ESB_TIMER2_REG)
wdt_state.timer2_val = value & DEFAULT_MAX_TIMER_VAL;
wdt_state.unlock_state = 0;
}
}
uint64_t
pci_wdt_bar_read(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int baridx, uint64_t offset, int size)
{
assert(baridx == 0);
DPRINTF("%s: addr = 0x%x, size=%d\n", __func__, (int) offset, size);
return 0;
}
static int
pci_wdt_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
/*the wdt just has one inistance */
if (wdt_state.reboot_enabled && wdt_state.timer1_val) {
perror("wdt can't be created twice, please check!");
return -1;
}
/* init wdt state info */
wdt_state.reboot_enabled = true;
wdt_state.locked = false;
wdt_state.timer_created = false;
wdt_state.wdt_enabled = false;
wdt_state.stage = 1;
wdt_state.timer1_val = DEFAULT_MAX_TIMER_VAL;
wdt_state.timer2_val = DEFAULT_MAX_TIMER_VAL;
wdt_state.unlock_state = 0;
pci_emul_alloc_bar(dev, 0, PCIBAR_MEM32, WDT_REG_BAR_SIZE);
/* initialize config space */
pci_set_cfgdata16(dev, PCIR_VENDOR, PCI_VENDOR_ID_INTEL);
pci_set_cfgdata16(dev, PCIR_DEVICE, PCI_DEVICE_ID_INTEL_ESB);
pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_BASEPERIPH);
pci_set_cfgdata8(dev, PCIR_SUBCLASS, PCIS_BASEPERIPH_OTHER);
#ifdef WDT_DEBUG
dbg_file = fopen("/tmp/wdt_log", "w+");
#endif
DPRINTF("%s: iobar =0x%lx, size=%ld\n", __func__,
dev->bar[0].addr, dev->bar[0].size);
return 0;
}
static void
pci_wdt_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
stop_wdt_timer();
memset(&wdt_state, 0, sizeof(wdt_state));
}
struct pci_vdev_ops pci_ops_wdt = {
.class_name = "wdt-i6300esb",
.vdev_init = pci_wdt_init,
.vdev_deinit = pci_wdt_deinit,
.vdev_cfgwrite = pci_wdt_cfg_write,
.vdev_cfgread = pci_wdt_cfg_read,
.vdev_barwrite = pci_wdt_bar_write,
.vdev_barread = pci_wdt_bar_read
};
DEFINE_PCI_DEVTYPE(pci_ops_wdt);

2917
devicemodel/hw/pci/xhci.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,572 @@
/*-
* Copyright (c) 2014 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* Copyright (c) 2015 Nahanni Systems Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <stdint.h>
#include <assert.h>
#include <errno.h>
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <pthread.h>
#include "acpi.h"
#include "inout.h"
#include "pci_core.h"
#include "irq.h"
#include "lpc.h"
#include "ps2kbd.h"
#include "ps2mouse.h"
#include "vmm.h"
#include "vmmapi.h"
#define KBD_DATA_PORT 0x60
#define KBD_STS_CTL_PORT 0x64
#define KBDC_RESET 0xfe
#define KBD_DEV_IRQ 1
#define AUX_DEV_IRQ 12
/* controller commands */
#define KBDC_SET_COMMAND_BYTE 0x60
#define KBDC_GET_COMMAND_BYTE 0x20
#define KBDC_DISABLE_AUX_PORT 0xa7
#define KBDC_ENABLE_AUX_PORT 0xa8
#define KBDC_TEST_AUX_PORT 0xa9
#define KBDC_TEST_CTRL 0xaa
#define KBDC_TEST_KBD_PORT 0xab
#define KBDC_DISABLE_KBD_PORT 0xad
#define KBDC_ENABLE_KBD_PORT 0xae
#define KBDC_READ_INPORT 0xc0
#define KBDC_READ_OUTPORT 0xd0
#define KBDC_WRITE_OUTPORT 0xd1
#define KBDC_WRITE_KBD_OUTBUF 0xd2
#define KBDC_WRITE_AUX_OUTBUF 0xd3
#define KBDC_WRITE_TO_AUX 0xd4
/* controller command byte (set by KBDC_SET_COMMAND_BYTE) */
#define KBD_TRANSLATION 0x40
#define KBD_SYS_FLAG_BIT 0x04
#define KBD_DISABLE_KBD_PORT 0x10
#define KBD_DISABLE_AUX_PORT 0x20
#define KBD_ENABLE_AUX_INT 0x02
#define KBD_ENABLE_KBD_INT 0x01
#define KBD_KBD_CONTROL_BITS (KBD_DISABLE_KBD_PORT | KBD_ENABLE_KBD_INT)
#define KBD_AUX_CONTROL_BITS (KBD_DISABLE_AUX_PORT | KBD_ENABLE_AUX_INT)
/* controller status bits */
#define KBDS_KBD_BUFFER_FULL 0x01
#define KBDS_SYS_FLAG 0x04
#define KBDS_CTRL_FLAG 0x08
#define KBDS_AUX_BUFFER_FULL 0x20
/* controller output port */
#define KBDO_KBD_OUTFULL 0x10
#define KBDO_AUX_OUTFULL 0x20
#define RAMSZ 32
#define FIFOSZ 15
#define CTRL_CMD_FLAG 0x8000
struct kbd_dev {
bool irq_active;
int irq;
uint8_t buffer[FIFOSZ];
int brd, bwr;
int bcnt;
};
struct aux_dev {
bool irq_active;
int irq;
};
struct atkbdc_base {
struct vmctx *ctx;
pthread_mutex_t mtx;
struct ps2kbd_info *ps2kbd;
struct ps2mouse_info *ps2mouse;
uint8_t status; /* status register */
uint8_t outport; /* controller output port */
uint8_t ram[RAMSZ]; /* byte0 = controller config */
uint32_t curcmd; /* current command for next byte */
uint32_t ctrlbyte;
struct kbd_dev kbd;
struct aux_dev aux;
};
static void
atkbdc_assert_kbd_intr(struct atkbdc_base *base)
{
if ((base->ram[0] & KBD_ENABLE_KBD_INT) != 0) {
base->kbd.irq_active = true;
vm_isa_pulse_irq(base->ctx, base->kbd.irq, base->kbd.irq);
}
}
static void
atkbdc_assert_aux_intr(struct atkbdc_base *base)
{
if ((base->ram[0] & KBD_ENABLE_AUX_INT) != 0) {
base->aux.irq_active = true;
vm_isa_pulse_irq(base->ctx, base->aux.irq, base->aux.irq);
}
}
static int
atkbdc_kbd_queue_data(struct atkbdc_base *base, uint8_t val)
{
if (base->kbd.bcnt < FIFOSZ) {
base->kbd.buffer[base->kbd.bwr] = val;
base->kbd.bwr = (base->kbd.bwr + 1) % FIFOSZ;
base->kbd.bcnt++;
base->status |= KBDS_KBD_BUFFER_FULL;
base->outport |= KBDO_KBD_OUTFULL;
} else {
printf("atkbd data buffer full\n");
}
return (base->kbd.bcnt < FIFOSZ);
}
static void
atkbdc_kbd_read(struct atkbdc_base *base)
{
const uint8_t translation[256] = {
0xff, 0x43, 0x41, 0x3f, 0x3d, 0x3b, 0x3c, 0x58,
0x64, 0x44, 0x42, 0x40, 0x3e, 0x0f, 0x29, 0x59,
0x65, 0x38, 0x2a, 0x70, 0x1d, 0x10, 0x02, 0x5a,
0x66, 0x71, 0x2c, 0x1f, 0x1e, 0x11, 0x03, 0x5b,
0x67, 0x2e, 0x2d, 0x20, 0x12, 0x05, 0x04, 0x5c,
0x68, 0x39, 0x2f, 0x21, 0x14, 0x13, 0x06, 0x5d,
0x69, 0x31, 0x30, 0x23, 0x22, 0x15, 0x07, 0x5e,
0x6a, 0x72, 0x32, 0x24, 0x16, 0x08, 0x09, 0x5f,
0x6b, 0x33, 0x25, 0x17, 0x18, 0x0b, 0x0a, 0x60,
0x6c, 0x34, 0x35, 0x26, 0x27, 0x19, 0x0c, 0x61,
0x6d, 0x73, 0x28, 0x74, 0x1a, 0x0d, 0x62, 0x6e,
0x3a, 0x36, 0x1c, 0x1b, 0x75, 0x2b, 0x63, 0x76,
0x55, 0x56, 0x77, 0x78, 0x79, 0x7a, 0x0e, 0x7b,
0x7c, 0x4f, 0x7d, 0x4b, 0x47, 0x7e, 0x7f, 0x6f,
0x52, 0x53, 0x50, 0x4c, 0x4d, 0x48, 0x01, 0x45,
0x57, 0x4e, 0x51, 0x4a, 0x37, 0x49, 0x46, 0x54,
0x80, 0x81, 0x82, 0x41, 0x54, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
};
uint8_t val;
uint8_t release = 0;
if (base->ram[0] & KBD_TRANSLATION) {
while (ps2kbd_read(base->ps2kbd, &val) != -1) {
if (val == 0xf0) {
release = 0x80;
continue;
} else {
val = translation[val] | release;
}
atkbdc_kbd_queue_data(base, val);
break;
}
} else {
while (base->kbd.bcnt < FIFOSZ) {
if (ps2kbd_read(base->ps2kbd, &val) != -1)
atkbdc_kbd_queue_data(base, val);
else
break;
}
}
if (((base->ram[0] & KBD_DISABLE_AUX_PORT) ||
ps2mouse_fifocnt(base->ps2mouse) == 0) && base->kbd.bcnt > 0)
atkbdc_assert_kbd_intr(base);
}
static void
atkbdc_aux_poll(struct atkbdc_base *base)
{
if (ps2mouse_fifocnt(base->ps2mouse) > 0) {
base->status |= KBDS_AUX_BUFFER_FULL | KBDS_KBD_BUFFER_FULL;
base->outport |= KBDO_AUX_OUTFULL;
atkbdc_assert_aux_intr(base);
}
}
static void
atkbdc_kbd_poll(struct atkbdc_base *base)
{
atkbdc_kbd_read(base);
}
static void
atkbdc_poll(struct atkbdc_base *base)
{
atkbdc_aux_poll(base);
atkbdc_kbd_poll(base);
}
static void
atkbdc_dequeue_data(struct atkbdc_base *base, uint8_t *buf)
{
if (ps2mouse_read(base->ps2mouse, buf) == 0) {
if (ps2mouse_fifocnt(base->ps2mouse) == 0) {
if (base->kbd.bcnt == 0)
base->status &= ~(KBDS_AUX_BUFFER_FULL |
KBDS_KBD_BUFFER_FULL);
else
base->status &= ~(KBDS_AUX_BUFFER_FULL);
base->outport &= ~KBDO_AUX_OUTFULL;
}
atkbdc_poll(base);
return;
}
if (base->kbd.bcnt > 0) {
*buf = base->kbd.buffer[base->kbd.brd];
base->kbd.brd = (base->kbd.brd + 1) % FIFOSZ;
base->kbd.bcnt--;
if (base->kbd.bcnt == 0) {
base->status &= ~KBDS_KBD_BUFFER_FULL;
base->outport &= ~KBDO_KBD_OUTFULL;
}
atkbdc_poll(base);
}
if (ps2mouse_fifocnt(base->ps2mouse) == 0 && base->kbd.bcnt == 0)
base->status &= ~(KBDS_AUX_BUFFER_FULL | KBDS_KBD_BUFFER_FULL);
}
static int
atkbdc_data_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
struct atkbdc_base *base;
uint8_t buf;
int retval;
if (bytes != 1)
return -1;
base = arg;
retval = 0;
pthread_mutex_lock(&base->mtx);
if (in) {
base->curcmd = 0;
if (base->ctrlbyte != 0) {
*eax = base->ctrlbyte & 0xff;
base->ctrlbyte = 0;
} else {
/* read device buffer; includes kbd cmd responses */
atkbdc_dequeue_data(base, &buf);
*eax = buf;
}
base->status &= ~KBDS_CTRL_FLAG;
pthread_mutex_unlock(&base->mtx);
return retval;
}
if (base->status & KBDS_CTRL_FLAG) {
/*
* Command byte for the controller.
*/
switch (base->curcmd) {
case KBDC_SET_COMMAND_BYTE:
base->ram[0] = *eax;
if (base->ram[0] & KBD_SYS_FLAG_BIT)
base->status |= KBDS_SYS_FLAG;
else
base->status &= ~KBDS_SYS_FLAG;
break;
case KBDC_WRITE_OUTPORT:
base->outport = *eax;
break;
case KBDC_WRITE_TO_AUX:
ps2mouse_write(base->ps2mouse, *eax, 0);
atkbdc_poll(base);
break;
case KBDC_WRITE_KBD_OUTBUF:
atkbdc_kbd_queue_data(base, *eax);
break;
case KBDC_WRITE_AUX_OUTBUF:
ps2mouse_write(base->ps2mouse, *eax, 1);
base->status |= (KBDS_AUX_BUFFER_FULL |
KBDS_KBD_BUFFER_FULL);
atkbdc_aux_poll(base);
break;
default:
/* write to particular RAM byte */
if (base->curcmd >= 0x61 && base->curcmd <= 0x7f) {
int byten;
byten = (base->curcmd - 0x60) & 0x1f;
base->ram[byten] = *eax & 0xff;
}
break;
}
base->curcmd = 0;
base->status &= ~KBDS_CTRL_FLAG;
pthread_mutex_unlock(&base->mtx);
return retval;
}
/*
* Data byte for the device.
*/
ps2kbd_write(base->ps2kbd, *eax);
atkbdc_poll(base);
pthread_mutex_unlock(&base->mtx);
return retval;
}
static int
atkbdc_sts_ctl_handler(struct vmctx *ctx, int vcpu, int in, int port,
int bytes, uint32_t *eax, void *arg)
{
struct atkbdc_base *base;
int error, retval;
if (bytes != 1)
return -1;
base = arg;
retval = 0;
pthread_mutex_lock(&base->mtx);
if (in) {
/* read status register */
*eax = base->status;
pthread_mutex_unlock(&base->mtx);
return retval;
}
base->curcmd = 0;
base->status |= KBDS_CTRL_FLAG;
base->ctrlbyte = 0;
switch (*eax) {
case KBDC_GET_COMMAND_BYTE:
base->ctrlbyte = CTRL_CMD_FLAG | base->ram[0];
break;
case KBDC_TEST_CTRL:
base->ctrlbyte = CTRL_CMD_FLAG | 0x55;
break;
case KBDC_TEST_AUX_PORT:
case KBDC_TEST_KBD_PORT:
base->ctrlbyte = CTRL_CMD_FLAG | 0;
break;
case KBDC_READ_INPORT:
base->ctrlbyte = CTRL_CMD_FLAG | 0;
break;
case KBDC_READ_OUTPORT:
base->ctrlbyte = CTRL_CMD_FLAG | base->outport;
break;
case KBDC_SET_COMMAND_BYTE:
case KBDC_WRITE_OUTPORT:
case KBDC_WRITE_KBD_OUTBUF:
case KBDC_WRITE_AUX_OUTBUF:
base->curcmd = *eax;
break;
case KBDC_DISABLE_KBD_PORT:
base->ram[0] |= KBD_DISABLE_KBD_PORT;
break;
case KBDC_ENABLE_KBD_PORT:
base->ram[0] &= ~KBD_DISABLE_KBD_PORT;
if (base->kbd.bcnt > 0)
base->status |= KBDS_KBD_BUFFER_FULL;
atkbdc_poll(base);
break;
case KBDC_WRITE_TO_AUX:
base->curcmd = *eax;
break;
case KBDC_DISABLE_AUX_PORT:
base->ram[0] |= KBD_DISABLE_AUX_PORT;
ps2mouse_toggle(base->ps2mouse, 0);
base->status &= ~(KBDS_AUX_BUFFER_FULL | KBDS_KBD_BUFFER_FULL);
base->outport &= ~KBDS_AUX_BUFFER_FULL;
break;
case KBDC_ENABLE_AUX_PORT:
base->ram[0] &= ~KBD_DISABLE_AUX_PORT;
ps2mouse_toggle(base->ps2mouse, 1);
if (ps2mouse_fifocnt(base->ps2mouse) > 0)
base->status |= KBDS_AUX_BUFFER_FULL |
KBDS_KBD_BUFFER_FULL;
break;
case KBDC_RESET: /* Pulse "reset" line */
error = vm_suspend(ctx, VM_SUSPEND_RESET);
assert(error == 0 || errno == EALREADY);
break;
default:
if (*eax >= 0x21 && *eax <= 0x3f) {
/* read "byte N" from RAM */
int byten;
byten = (*eax - 0x20) & 0x1f;
base->ctrlbyte = CTRL_CMD_FLAG | base->ram[byten];
}
break;
}
pthread_mutex_unlock(&base->mtx);
if (base->ctrlbyte != 0) {
base->status |= KBDS_KBD_BUFFER_FULL;
base->status &= ~KBDS_AUX_BUFFER_FULL;
atkbdc_assert_kbd_intr(base);
} else if (ps2mouse_fifocnt(base->ps2mouse) > 0 &&
(base->ram[0] & KBD_DISABLE_AUX_PORT) == 0) {
base->status |= KBDS_AUX_BUFFER_FULL | KBDS_KBD_BUFFER_FULL;
atkbdc_assert_aux_intr(base);
} else if (base->kbd.bcnt > 0 && (base->ram[0] &
KBD_DISABLE_KBD_PORT) == 0) {
base->status |= KBDS_KBD_BUFFER_FULL;
atkbdc_assert_kbd_intr(base);
}
return retval;
}
void
atkbdc_event(struct atkbdc_base *base, int iskbd)
{
pthread_mutex_lock(&base->mtx);
if (iskbd)
atkbdc_kbd_poll(base);
else
atkbdc_aux_poll(base);
pthread_mutex_unlock(&base->mtx);
}
void
atkbdc_init(struct vmctx *ctx)
{
struct inout_port iop;
struct atkbdc_base *base;
int error;
base = calloc(1, sizeof(struct atkbdc_base));
assert(base != NULL);
base->ctx = ctx;
pthread_mutex_init(&base->mtx, NULL);
bzero(&iop, sizeof(struct inout_port));
iop.name = "atkdbc";
iop.port = KBD_STS_CTL_PORT;
iop.size = 1;
iop.flags = IOPORT_F_INOUT;
iop.handler = atkbdc_sts_ctl_handler;
iop.arg = base;
error = register_inout(&iop);
assert(error == 0);
bzero(&iop, sizeof(struct inout_port));
iop.name = "atkdbc";
iop.port = KBD_DATA_PORT;
iop.size = 1;
iop.flags = IOPORT_F_INOUT;
iop.handler = atkbdc_data_handler;
iop.arg = base;
error = register_inout(&iop);
assert(error == 0);
pci_irq_reserve(KBD_DEV_IRQ);
base->kbd.irq = KBD_DEV_IRQ;
pci_irq_reserve(AUX_DEV_IRQ);
base->aux.irq = AUX_DEV_IRQ;
base->ps2kbd = ps2kbd_init(base);
base->ps2mouse = ps2mouse_init(base);
}
static void
atkbdc_dsdt(void)
{
dsdt_line("");
dsdt_line("Device (KBD)");
dsdt_line("{");
dsdt_line(" Name (_HID, EisaId (\"PNP0303\"))");
dsdt_line(" Name (_CRS, ResourceTemplate ()");
dsdt_line(" {");
dsdt_indent(2);
dsdt_fixed_ioport(KBD_DATA_PORT, 1);
dsdt_fixed_ioport(KBD_STS_CTL_PORT, 1);
dsdt_fixed_irq(1);
dsdt_unindent(2);
dsdt_line(" })");
dsdt_line("}");
dsdt_line("");
dsdt_line("Device (MOU)");
dsdt_line("{");
dsdt_line(" Name (_HID, EisaId (\"PNP0F13\"))");
dsdt_line(" Name (_CRS, ResourceTemplate ()");
dsdt_line(" {");
dsdt_indent(2);
dsdt_fixed_ioport(KBD_DATA_PORT, 1);
dsdt_fixed_ioport(KBD_STS_CTL_PORT, 1);
dsdt_fixed_irq(12);
dsdt_unindent(2);
dsdt_line(" })");
dsdt_line("}");
}
LPC_DSDT(atkbdc_dsdt);

View File

@@ -0,0 +1,936 @@
/*-
* Copyright (c) 2013 Peter Grehan <grehan@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/queue.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <linux/fs.h>
#include <errno.h>
#include <assert.h>
#include <err.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <signal.h>
#include <sysexits.h>
#include <unistd.h>
#include "dm.h"
#include "mevent.h"
#include "block_if.h"
#include "ahci.h"
/*
* Notes:
* The F_OFD_SETLK support is introduced in glibc 2.20.
* The glibc version on target board is above 2.20.
* The following code temporarily fixes up building issues on Ubuntu 14.04,
* where the glibc version is 2.19 by default.
* Theoretically we should use cross-compiling tool to compile applications.
*/
#ifndef F_OFD_SETLK
#define F_OFD_SETLK 37
#endif
#define BLOCKIF_SIG 0xb109b109
#define BLOCKIF_NUMTHR 8
#define BLOCKIF_MAXREQ (64 + BLOCKIF_NUMTHR)
/*
* Debug printf
*/
static int block_if_debug;
#define DPRINTF(params) do { if (block_if_debug) printf params; } while (0)
#define WPRINTF(params) (printf params)
enum blockop {
BOP_READ,
BOP_WRITE,
BOP_FLUSH,
BOP_DELETE
};
enum blockstat {
BST_FREE,
BST_BLOCK,
BST_PEND,
BST_BUSY,
BST_DONE
};
struct blockif_elem {
TAILQ_ENTRY(blockif_elem) link;
struct blockif_req *req;
enum blockop op;
enum blockstat status;
pthread_t tid;
off_t block;
};
struct blockif_ctxt {
int magic;
int fd;
int isblk;
int isgeom;
int candelete;
int rdonly;
off_t size;
int sub_file_assign;
off_t sub_file_start_lba;
struct flock fl;
int sectsz;
int psectsz;
int psectoff;
int closing;
pthread_t btid[BLOCKIF_NUMTHR];
pthread_mutex_t mtx;
pthread_cond_t cond;
/* Request elements and free/pending/busy queues */
TAILQ_HEAD(, blockif_elem) freeq;
TAILQ_HEAD(, blockif_elem) pendq;
TAILQ_HEAD(, blockif_elem) busyq;
struct blockif_elem reqs[BLOCKIF_MAXREQ];
};
static pthread_once_t blockif_once = PTHREAD_ONCE_INIT;
struct blockif_sig_elem {
pthread_mutex_t mtx;
pthread_cond_t cond;
int pending;
struct blockif_sig_elem *next;
};
static struct blockif_sig_elem *blockif_bse_head;
static int
blockif_enqueue(struct blockif_ctxt *bc, struct blockif_req *breq,
enum blockop op)
{
struct blockif_elem *be, *tbe;
off_t off;
int i;
be = TAILQ_FIRST(&bc->freeq);
assert(be != NULL);
assert(be->status == BST_FREE);
TAILQ_REMOVE(&bc->freeq, be, link);
be->req = breq;
be->op = op;
switch (op) {
case BOP_READ:
case BOP_WRITE:
case BOP_DELETE:
off = breq->offset;
for (i = 0; i < breq->iovcnt; i++)
off += breq->iov[i].iov_len;
break;
default:
/* off = OFF_MAX; */
off = 1 << (sizeof(off_t) - 1);
}
be->block = off;
TAILQ_FOREACH(tbe, &bc->pendq, link) {
if (tbe->block == breq->offset)
break;
}
if (tbe == NULL) {
TAILQ_FOREACH(tbe, &bc->busyq, link) {
if (tbe->block == breq->offset)
break;
}
}
if (tbe == NULL)
be->status = BST_PEND;
else
be->status = BST_BLOCK;
TAILQ_INSERT_TAIL(&bc->pendq, be, link);
return (be->status == BST_PEND);
}
static int
blockif_dequeue(struct blockif_ctxt *bc, pthread_t t, struct blockif_elem **bep)
{
struct blockif_elem *be;
TAILQ_FOREACH(be, &bc->pendq, link) {
if (be->status == BST_PEND)
break;
assert(be->status == BST_BLOCK);
}
if (be == NULL)
return 0;
TAILQ_REMOVE(&bc->pendq, be, link);
be->status = BST_BUSY;
be->tid = t;
TAILQ_INSERT_TAIL(&bc->busyq, be, link);
*bep = be;
return 1;
}
static void
blockif_complete(struct blockif_ctxt *bc, struct blockif_elem *be)
{
struct blockif_elem *tbe;
if (be->status == BST_DONE || be->status == BST_BUSY)
TAILQ_REMOVE(&bc->busyq, be, link);
else
TAILQ_REMOVE(&bc->pendq, be, link);
TAILQ_FOREACH(tbe, &bc->pendq, link) {
if (tbe->req->offset == be->block)
tbe->status = BST_PEND;
}
be->tid = 0;
be->status = BST_FREE;
be->req = NULL;
TAILQ_INSERT_TAIL(&bc->freeq, be, link);
}
static void
blockif_proc(struct blockif_ctxt *bc, struct blockif_elem *be, uint8_t *buf)
{
struct blockif_req *br;
off_t arg[2];
ssize_t clen, len, off, boff, voff;
int i, err;
br = be->req;
if (br->iovcnt <= 1)
buf = NULL;
err = 0;
switch (be->op) {
case BOP_READ:
if (buf == NULL) {
len = preadv(bc->fd, br->iov, br->iovcnt,
br->offset + bc->sub_file_start_lba);
if (len < 0)
err = errno;
else
br->resid -= len;
break;
}
i = 0;
off = voff = 0;
while (br->resid > 0) {
len = MIN(br->resid, MAXPHYS);
if (pread(bc->fd, buf, len, br->offset +
off + bc->sub_file_start_lba) < 0) {
err = errno;
break;
}
boff = 0;
do {
clen = MIN(len - boff, br->iov[i].iov_len -
voff);
memcpy(br->iov[i].iov_base + voff,
buf + boff, clen);
if (clen < br->iov[i].iov_len - voff)
voff += clen;
else {
i++;
voff = 0;
}
boff += clen;
} while (boff < len);
off += len;
br->resid -= len;
}
break;
case BOP_WRITE:
if (bc->rdonly) {
err = EROFS;
break;
}
if (buf == NULL) {
len = pwritev(bc->fd, br->iov, br->iovcnt,
br->offset + bc->sub_file_start_lba);
if (len < 0)
err = errno;
else
br->resid -= len;
break;
}
i = 0;
off = voff = 0;
while (br->resid > 0) {
len = MIN(br->resid, MAXPHYS);
boff = 0;
do {
clen = MIN(len - boff, br->iov[i].iov_len -
voff);
memcpy(buf + boff,
br->iov[i].iov_base + voff, clen);
if (clen < br->iov[i].iov_len - voff)
voff += clen;
else {
i++;
voff = 0;
}
boff += clen;
} while (boff < len);
if (pwrite(bc->fd, buf, len, br->offset +
off + bc->sub_file_start_lba) < 0) {
err = errno;
break;
}
off += len;
br->resid -= len;
}
break;
case BOP_FLUSH:
if (fsync(bc->fd))
err = errno;
break;
case BOP_DELETE:
/* only used by AHCI */
if (!bc->candelete)
err = EOPNOTSUPP;
else if (bc->rdonly)
err = EROFS;
else if (bc->isblk) {
arg[0] = br->offset;
arg[1] = br->resid;
if (ioctl(bc->fd, BLKDISCARD, arg))
err = errno;
else
br->resid = 0;
}
else
err = EOPNOTSUPP;
break;
default:
err = EINVAL;
break;
}
be->status = BST_DONE;
(*br->callback)(br, err);
}
static void *
blockif_thr(void *arg)
{
struct blockif_ctxt *bc;
struct blockif_elem *be;
pthread_t t;
uint8_t *buf;
bc = arg;
if (bc->isgeom)
buf = malloc(MAXPHYS);
else
buf = NULL;
t = pthread_self();
pthread_mutex_lock(&bc->mtx);
for (;;) {
while (blockif_dequeue(bc, t, &be)) {
pthread_mutex_unlock(&bc->mtx);
blockif_proc(bc, be, buf);
pthread_mutex_lock(&bc->mtx);
blockif_complete(bc, be);
}
/* Check ctxt status here to see if exit requested */
if (bc->closing)
break;
pthread_cond_wait(&bc->cond, &bc->mtx);
}
pthread_mutex_unlock(&bc->mtx);
if (buf)
free(buf);
pthread_exit(NULL);
return NULL;
}
static void
blockif_sigcont_handler(int signal)
{
struct blockif_sig_elem *bse;
WPRINTF(("block_if sigcont handler!\n"));
for (;;) {
/*
* Process the entire list even if not intended for
* this thread.
*/
do {
bse = blockif_bse_head;
if (bse == NULL)
return;
} while (!__sync_bool_compare_and_swap(
(uintptr_t *)&blockif_bse_head,
(uintptr_t)bse,
(uintptr_t)bse->next));
pthread_mutex_lock(&bse->mtx);
bse->pending = 0;
pthread_cond_signal(&bse->cond);
pthread_mutex_unlock(&bse->mtx);
}
}
static void
blockif_init(void)
{
signal(SIGCONT, blockif_sigcont_handler);
}
/*
* This function checks if the sub file range, specified by sub_start and
* sub_size, has any overlap with other sub file ranges with write access.
*/
static int
sub_file_validate(struct blockif_ctxt *bc, int fd, int read_only,
off_t sub_start, off_t sub_size)
{
struct flock *fl = &bc->fl;
memset(fl, 0, sizeof(struct flock));
fl->l_whence = SEEK_SET; /* offset base is start of file */
if (read_only)
fl->l_type = F_RDLCK;
else
fl->l_type = F_WRLCK;
fl->l_start = sub_start;
fl->l_len = sub_size;
/* use "open file description locks" to validate */
if (fcntl(fd, F_OFD_SETLK, fl) == -1) {
DPRINTF(("failed to lock subfile!\n"));
return -1;
}
/* Keep file lock on to prevent other sub files, until DM exits */
return 0;
}
void
sub_file_unlock(struct blockif_ctxt *bc)
{
struct flock *fl;
if (bc->sub_file_assign) {
fl = &bc->fl;
DPRINTF(("blockif: release file lock...\n"));
fl->l_type = F_UNLCK;
if (fcntl(bc->fd, F_OFD_SETLK, fl) == -1) {
fprintf(stderr, "blockif: failed to unlock subfile!\n");
exit(1);
}
DPRINTF(("blockif: release done\n"));
}
}
struct blockif_ctxt *
blockif_open(const char *optstr, const char *ident)
{
char tname[MAXCOMLEN + 1];
/* char name[MAXPATHLEN]; */
char *nopt, *xopts, *cp;
struct blockif_ctxt *bc;
struct stat sbuf;
/* struct diocgattr_arg arg; */
off_t size, psectsz, psectoff;
int extra, fd, i, sectsz;
int nocache, sync, ro, candelete, geom, ssopt, pssopt;
long sz;
long long b;
int err_code = -1;
off_t sub_file_start_lba, sub_file_size;
int sub_file_assign;
pthread_once(&blockif_once, blockif_init);
fd = -1;
ssopt = 0;
nocache = 0;
sync = 0;
ro = 0;
sub_file_assign = 0;
/*
* The first element in the optstring is always a pathname.
* Optional elements follow
*/
nopt = xopts = strdup(optstr);
if (!nopt) {
WPRINTF(("block_if.c: strdup retruns NULL\n"));
return NULL;
}
while (xopts != NULL) {
cp = strsep(&xopts, ",");
if (cp == nopt) /* file or device pathname */
continue;
else if (!strcmp(cp, "nocache"))
nocache = 1;
else if (!strcmp(cp, "sync") || !strcmp(cp, "direct"))
sync = 1;
else if (!strcmp(cp, "ro"))
ro = 1;
else if (sscanf(cp, "sectorsize=%d/%d", &ssopt, &pssopt) == 2)
;
else if (sscanf(cp, "sectorsize=%d", &ssopt) == 1)
pssopt = ssopt;
else if (sscanf(cp, "range=%ld/%ld", &sub_file_start_lba,
&sub_file_size) == 2)
sub_file_assign = 1;
else {
fprintf(stderr, "Invalid device option \"%s\"\n", cp);
goto err;
}
}
/* enforce a write-through policy by default */
nocache = 1;
sync = 1;
extra = 0;
if (nocache)
extra |= O_DIRECT;
if (sync)
extra |= O_SYNC;
fd = open(nopt, (ro ? O_RDONLY : O_RDWR) | extra);
if (fd < 0 && !ro) {
/* Attempt a r/w fail with a r/o open */
fd = open(nopt, O_RDONLY | extra);
ro = 1;
}
if (fd < 0) {
warn("Could not open backing file: %s", nopt);
goto err;
}
if (fstat(fd, &sbuf) < 0) {
warn("Could not stat backing file %s", nopt);
goto err;
}
/*
* Deal with raw devices
*/
size = sbuf.st_size;
sectsz = DEV_BSIZE;
psectsz = psectoff = 0;
candelete = geom = 0;
if (S_ISBLK(sbuf.st_mode)) {
/* get size */
err_code = ioctl(fd, BLKGETSIZE, &sz);
if (err_code) {
fprintf(stderr, "error %d getting block size!\n",
err_code);
size = sbuf.st_size; /* set default value */
} else {
size = sz * DEV_BSIZE; /* DEV_BSIZE is 512 on Linux */
}
if (!err_code || err_code == EFBIG) {
err_code = ioctl(fd, BLKGETSIZE64, &b);
if (err_code || b == 0 || b == sz)
size = b * DEV_BSIZE;
else
size = b;
}
DPRINTF(("block partition size is 0x%lx\n", size));
/* get sector size, 512 on Linux */
sectsz = DEV_BSIZE;
DPRINTF(("block partition sector size is 0x%x\n", sectsz));
/* get physical sector size */
err_code = ioctl(fd, BLKPBSZGET, &psectsz);
if (err_code) {
fprintf(stderr, "error %d getting physical sectsz!\n",
err_code);
psectsz = DEV_BSIZE; /* set default physical size */
}
DPRINTF(("block partition physical sector size is 0x%lx\n",
psectsz));
} else
psectsz = sbuf.st_blksize;
if (ssopt != 0) {
if (!powerof2(ssopt) || !powerof2(pssopt) || ssopt < 512 ||
ssopt > pssopt) {
fprintf(stderr, "Invalid sector size %d/%d\n",
ssopt, pssopt);
goto err;
}
/*
* Some backend drivers (e.g. cd0, ada0) require that the I/O
* size be a multiple of the device's sector size.
*
* Validate that the emulated sector size complies with this
* requirement.
*/
if (S_ISCHR(sbuf.st_mode)) {
if (ssopt < sectsz || (ssopt % sectsz) != 0) {
fprintf(stderr,
"Sector size %d incompatible with underlying device sector size %d\n",
ssopt, sectsz);
goto err;
}
}
sectsz = ssopt;
psectsz = pssopt;
psectoff = 0;
}
bc = calloc(1, sizeof(struct blockif_ctxt));
if (bc == NULL) {
perror("calloc");
goto err;
}
if (sub_file_assign) {
DPRINTF(("sector size is %d\n", sectsz));
bc->sub_file_assign = 1;
bc->sub_file_start_lba = sub_file_start_lba * sectsz;
size = sub_file_size * sectsz;
DPRINTF(("Validating sub file...\n"));
err_code = sub_file_validate(bc, fd, ro, bc->sub_file_start_lba,
size);
if (err_code < 0) {
fprintf(stderr, "subfile range specified not valid!\n");
exit(1);
}
DPRINTF(("Validated done!\n"));
} else {
/* normal case */
bc->sub_file_assign = 0;
bc->sub_file_start_lba = 0;
}
bc->magic = BLOCKIF_SIG;
bc->fd = fd;
bc->isblk = S_ISBLK(sbuf.st_mode);
bc->isgeom = geom;
bc->candelete = candelete;
bc->rdonly = ro;
bc->size = size;
bc->sectsz = sectsz;
bc->psectsz = psectsz;
bc->psectoff = psectoff;
pthread_mutex_init(&bc->mtx, NULL);
pthread_cond_init(&bc->cond, NULL);
TAILQ_INIT(&bc->freeq);
TAILQ_INIT(&bc->pendq);
TAILQ_INIT(&bc->busyq);
for (i = 0; i < BLOCKIF_MAXREQ; i++) {
bc->reqs[i].status = BST_FREE;
TAILQ_INSERT_HEAD(&bc->freeq, &bc->reqs[i], link);
}
for (i = 0; i < BLOCKIF_NUMTHR; i++) {
pthread_create(&bc->btid[i], NULL, blockif_thr, bc);
snprintf(tname, sizeof(tname), "blk-%s-%d", ident, i);
pthread_setname_np(bc->btid[i], tname);
}
return bc;
err:
if (fd >= 0)
close(fd);
return NULL;
}
static int
blockif_request(struct blockif_ctxt *bc, struct blockif_req *breq,
enum blockop op)
{
int err;
err = 0;
pthread_mutex_lock(&bc->mtx);
if (!TAILQ_EMPTY(&bc->freeq)) {
/*
* Enqueue and inform the block i/o thread
* that there is work available
*/
if (blockif_enqueue(bc, breq, op))
pthread_cond_signal(&bc->cond);
} else {
/*
* Callers are not allowed to enqueue more than
* the specified blockif queue limit. Return an
* error to indicate that the queue length has been
* exceeded.
*/
err = E2BIG;
}
pthread_mutex_unlock(&bc->mtx);
return err;
}
int
blockif_read(struct blockif_ctxt *bc, struct blockif_req *breq)
{
assert(bc->magic == BLOCKIF_SIG);
return blockif_request(bc, breq, BOP_READ);
}
int
blockif_write(struct blockif_ctxt *bc, struct blockif_req *breq)
{
assert(bc->magic == BLOCKIF_SIG);
return blockif_request(bc, breq, BOP_WRITE);
}
int
blockif_flush(struct blockif_ctxt *bc, struct blockif_req *breq)
{
assert(bc->magic == BLOCKIF_SIG);
return blockif_request(bc, breq, BOP_FLUSH);
}
int
blockif_delete(struct blockif_ctxt *bc, struct blockif_req *breq)
{
assert(bc->magic == BLOCKIF_SIG);
return blockif_request(bc, breq, BOP_DELETE);
}
int
blockif_cancel(struct blockif_ctxt *bc, struct blockif_req *breq)
{
struct blockif_elem *be;
assert(bc->magic == BLOCKIF_SIG);
pthread_mutex_lock(&bc->mtx);
/*
* Check pending requests.
*/
TAILQ_FOREACH(be, &bc->pendq, link) {
if (be->req == breq)
break;
}
if (be != NULL) {
/*
* Found it.
*/
blockif_complete(bc, be);
pthread_mutex_unlock(&bc->mtx);
return 0;
}
/*
* Check in-flight requests.
*/
TAILQ_FOREACH(be, &bc->busyq, link) {
if (be->req == breq)
break;
}
if (be == NULL) {
/*
* Didn't find it.
*/
pthread_mutex_unlock(&bc->mtx);
return -1;
}
/*
* Interrupt the processing thread to force it return
* prematurely via it's normal callback path.
*/
while (be->status == BST_BUSY) {
struct blockif_sig_elem bse, *old_head;
pthread_mutex_init(&bse.mtx, NULL);
pthread_cond_init(&bse.cond, NULL);
bse.pending = 1;
do {
old_head = blockif_bse_head;
bse.next = old_head;
} while (!__sync_bool_compare_and_swap((uintptr_t *)&
blockif_bse_head,
(uintptr_t)old_head,
(uintptr_t)&bse));
pthread_kill(be->tid, SIGCONT);
pthread_mutex_lock(&bse.mtx);
while (bse.pending)
pthread_cond_wait(&bse.cond, &bse.mtx);
pthread_mutex_unlock(&bse.mtx);
}
pthread_mutex_unlock(&bc->mtx);
/*
* The processing thread has been interrupted. Since it's not
* clear if the callback has been invoked yet, return EBUSY.
*/
return -EBUSY;
}
int
blockif_close(struct blockif_ctxt *bc)
{
void *jval;
int i;
assert(bc->magic == BLOCKIF_SIG);
sub_file_unlock(bc);
/*
* Stop the block i/o thread
*/
pthread_mutex_lock(&bc->mtx);
bc->closing = 1;
pthread_mutex_unlock(&bc->mtx);
pthread_cond_broadcast(&bc->cond);
for (i = 0; i < BLOCKIF_NUMTHR; i++)
pthread_join(bc->btid[i], &jval);
/* XXX Cancel queued i/o's ??? */
/*
* Release resources
*/
bc->magic = 0;
close(bc->fd);
free(bc);
return 0;
}
/*
* Return virtual C/H/S values for a given block. Use the algorithm
* outlined in the VHD specification to calculate values.
*/
void
blockif_chs(struct blockif_ctxt *bc, uint16_t *c, uint8_t *h, uint8_t *s)
{
off_t sectors; /* total sectors of the block dev */
off_t hcyl; /* cylinders times heads */
uint16_t secpt; /* sectors per track */
uint8_t heads;
assert(bc->magic == BLOCKIF_SIG);
sectors = bc->size / bc->sectsz;
/* Clamp the size to the largest possible with CHS */
if (sectors > 65535UL*16*255)
sectors = 65535UL*16*255;
if (sectors >= 65536UL*16*63) {
secpt = 255;
heads = 16;
hcyl = sectors / secpt;
} else {
secpt = 17;
hcyl = sectors / secpt;
heads = (hcyl + 1023) / 1024;
if (heads < 4)
heads = 4;
if (hcyl >= (heads * 1024) || heads > 16) {
secpt = 31;
heads = 16;
hcyl = sectors / secpt;
}
if (hcyl >= (heads * 1024)) {
secpt = 63;
heads = 16;
hcyl = sectors / secpt;
}
}
*c = hcyl / heads;
*h = heads;
*s = secpt;
}
/*
* Accessors
*/
off_t
blockif_size(struct blockif_ctxt *bc)
{
assert(bc->magic == BLOCKIF_SIG);
return bc->size;
}
int
blockif_sectsz(struct blockif_ctxt *bc)
{
assert(bc->magic == BLOCKIF_SIG);
return bc->sectsz;
}
void
blockif_psectsz(struct blockif_ctxt *bc, int *size, int *off)
{
assert(bc->magic == BLOCKIF_SIG);
*size = bc->psectsz;
*off = bc->psectoff;
}
int
blockif_queuesz(struct blockif_ctxt *bc)
{
assert(bc->magic == BLOCKIF_SIG);
return (BLOCKIF_MAXREQ - 1);
}
int
blockif_is_ro(struct blockif_ctxt *bc)
{
assert(bc->magic == BLOCKIF_SIG);
return bc->rdonly;
}
int
blockif_candelete(struct blockif_ctxt *bc)
{
assert(bc->magic == BLOCKIF_SIG);
return bc->candelete;
}

View File

@@ -0,0 +1,107 @@
/*************************************************************************
* INTEL CONFIDENTIAL
* Copyright 2018 Intel Corporation
*
* The source code contained or described herein and all documents related to
* the source code ("Material") are owned by Intel Corporation or its
* suppliers or licensors. Title to the Material remains with Intel
* Corporation or its suppliers and licensors. The Material contains trade
* secrets and proprietary and confidential information of Intel or its
* suppliers and licensors. The Material is protected by worldwide copyright
* and trade secret laws and treaty provisions. No part of the Material may
* be used, copied, reproduced, modified, published, uploaded, posted,
* transmitted, distributed, or disclosed in any way without Intel's prior
* express written permission.
*
* No license under any patent, copyright, trade secret or other
* intellectual property right is granted to or conferred upon you by
* disclosure or delivery of the Materials, either expressly, by
* implication, inducement, estoppel or otherwise. Any license under such
* intellectual property rights must be express and approved by Intel in
* writing.
*************************************************************************/
/* cmos io device is used for android device reboot to bootloader or
* recovery or normal boot usage
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <time.h>
#include <assert.h>
#include "inout.h"
#define CMOS_ADDR 0x74
#define CMOS_DATA 0x75
#define CMOS_BUF_SIZE 256
#define CMOS_NAME "cmos_io"
/* #define CMOS_DEBUG */
#ifdef CMOS_DEBUG
static FILE * dbg_file;
#define DPRINTF(format, args...) \
do { fprintf(dbg_file, format, args); fflush(dbg_file); } while (0)
#else
#define DPRINTF(format, arg...)
#endif
/* cmos buffer used to store write/read contents,
* and it should not be cleared when reboot
*/
static uint8_t cmos_buffer[CMOS_BUF_SIZE];
static int
cmos_io_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
static int buf_offset;
static int next_ops; /* 0 for addr, 1 for data, in pair (addr,data)*/
assert(port == CMOS_ADDR || port == CMOS_DATA);
assert(bytes == 1);
#ifdef CMOS_DEBUG
if (!dbg_file)
dbg_file = fopen("/tmp/cmos_log", "a+");
#endif
DPRINTF("%s port =0x%x, in=%d, size=%d, val=0x%x, ops=%d\n",
__func__, port, in, bytes, (uint8_t)*eax, next_ops);
if (port == CMOS_ADDR) {
/* if port is addr, ops should be 0 */
assert(next_ops == 0 && !in);
if (next_ops != 0) {
next_ops = 0;
return -1;
}
buf_offset = (uint8_t)(*eax);
next_ops = 1;
} else if (port == CMOS_DATA) {
assert(next_ops == 1);
if (next_ops != 1) {
next_ops = 0;
return -1;
}
if (in)
*eax = cmos_buffer[buf_offset];
else
cmos_buffer[buf_offset] = (uint8_t)*eax;
next_ops = 0;
}
return 0;
}
INOUT_PORT(cmos_io, CMOS_ADDR, IOPORT_F_INOUT, cmos_io_handler);
INOUT_PORT(cmos_io, CMOS_DATA, IOPORT_F_INOUT, cmos_io_handler);

View File

@@ -0,0 +1,74 @@
/*-
* Copyright (c) 2014 Hudson River Trading LLC
* Written by: John H. Baldwin <jhb@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <stdio.h>
#include <stdbool.h>
#include "vmm.h"
#include "vmmapi.h"
#include "ioapic.h"
#include "pci_core.h"
#include "lpc.h"
/*
* Assign PCI INTx interrupts to I/O APIC pins in a round-robin
* fashion. Note that we have no idea what the HPET is using, but the
* HPET is also programmable whereas this is intended for hardwired
* PCI interrupts.
*
* This assumes a single I/O APIC where pins >= 16 are permitted for
* PCI devices.
*/
static int pci_pins;
void
ioapic_init(struct vmctx *ctx)
{
if (vm_ioapic_pincount(ctx, &pci_pins) < 0) {
pci_pins = 0;
return;
}
/* Ignore the first 16 pins. */
if (pci_pins <= 16) {
pci_pins = 0;
return;
}
pci_pins -= 16;
}
int
ioapic_pci_alloc_irq(struct pci_vdev *dev)
{
static int last_pin;
if (pci_pins == 0)
return -1;
return (16 + (last_pin++ % pci_pins));
}

View File

@@ -0,0 +1,305 @@
/*-
* Copyright (c) 2013 Hudson River Trading LLC
* Written by: John H. Baldwin <jhb@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <assert.h>
#include <errno.h>
#include <pthread.h>
#include <signal.h>
#include <stdbool.h>
#include "vmmapi.h"
#include "vmm.h"
#include "acpi.h"
#include "inout.h"
#include "mevent.h"
#include "irq.h"
#include "lpc.h"
static pthread_mutex_t pm_lock = PTHREAD_MUTEX_INITIALIZER;
static struct mevent *power_button;
static sig_t old_power_handler;
/*
* Reset Control register at I/O port 0xcf9. Bit 2 forces a system
* reset when it transitions from 0 to 1. Bit 1 selects the type of
* reset to attempt: 0 selects a "soft" reset, and 1 selects a "hard"
* reset.
*/
static int
reset_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
int error;
static uint8_t reset_control;
if (bytes != 1)
return -1;
if (in)
*eax = reset_control;
else {
reset_control = *eax;
/* Treat hard and soft resets the same. */
if (reset_control & 0x4) {
error = vm_suspend(ctx, VM_SUSPEND_RESET);
assert(error == 0 || errno == EALREADY);
}
}
return 0;
}
INOUT_PORT(reset_reg, 0xCF9, IOPORT_F_INOUT, reset_handler);
/*
* ACPI's SCI is a level-triggered interrupt.
*/
static int sci_active;
static void
sci_assert(struct vmctx *ctx)
{
if (sci_active)
return;
vm_isa_assert_irq(ctx, SCI_INT, SCI_INT);
sci_active = 1;
}
static void
sci_deassert(struct vmctx *ctx)
{
if (!sci_active)
return;
vm_isa_deassert_irq(ctx, SCI_INT, SCI_INT);
sci_active = 0;
}
/*
* Power Management 1 Event Registers
*
* The only power management event supported is a power button upon
* receiving SIGTERM.
*/
static uint16_t pm1_enable, pm1_status;
#define PM1_TMR_STS 0x0001
#define PM1_BM_STS 0x0010
#define PM1_GBL_STS 0x0020
#define PM1_PWRBTN_STS 0x0100
#define PM1_SLPBTN_STS 0x0200
#define PM1_RTC_STS 0x0400
#define PM1_WAK_STS 0x8000
#define PM1_TMR_EN 0x0001
#define PM1_GBL_EN 0x0020
#define PM1_PWRBTN_EN 0x0100
#define PM1_SLPBTN_EN 0x0200
#define PM1_RTC_EN 0x0400
static void
sci_update(struct vmctx *ctx)
{
int need_sci;
/* See if the SCI should be active or not. */
need_sci = 0;
if ((pm1_enable & PM1_TMR_EN) && (pm1_status & PM1_TMR_STS))
need_sci = 1;
if ((pm1_enable & PM1_GBL_EN) && (pm1_status & PM1_GBL_STS))
need_sci = 1;
if ((pm1_enable & PM1_PWRBTN_EN) && (pm1_status & PM1_PWRBTN_STS))
need_sci = 1;
if ((pm1_enable & PM1_SLPBTN_EN) && (pm1_status & PM1_SLPBTN_STS))
need_sci = 1;
if ((pm1_enable & PM1_RTC_EN) && (pm1_status & PM1_RTC_STS))
need_sci = 1;
if (need_sci)
sci_assert(ctx);
else
sci_deassert(ctx);
}
static int
pm1_status_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
if (bytes != 2)
return -1;
pthread_mutex_lock(&pm_lock);
if (in)
*eax = pm1_status;
else {
/*
* Writes are only permitted to clear certain bits by
* writing 1 to those flags.
*/
pm1_status &= ~(*eax & (PM1_WAK_STS | PM1_RTC_STS |
PM1_SLPBTN_STS | PM1_PWRBTN_STS | PM1_BM_STS));
sci_update(ctx);
}
pthread_mutex_unlock(&pm_lock);
return 0;
}
static int
pm1_enable_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
if (bytes != 2)
return -1;
pthread_mutex_lock(&pm_lock);
if (in)
*eax = pm1_enable;
else {
/*
* Only permit certain bits to be set. We never use
* the global lock, but ACPI-CA whines profusely if it
* can't set GBL_EN.
*/
pm1_enable = *eax & (PM1_PWRBTN_EN | PM1_GBL_EN);
sci_update(ctx);
}
pthread_mutex_unlock(&pm_lock);
return 0;
}
INOUT_PORT(pm1_status, PM1A_EVT_ADDR, IOPORT_F_INOUT, pm1_status_handler);
INOUT_PORT(pm1_enable, PM1A_EVT_ADDR + 2, IOPORT_F_INOUT, pm1_enable_handler);
static void
power_button_handler(int signal, enum ev_type type, void *arg)
{
struct vmctx *ctx;
ctx = arg;
pthread_mutex_lock(&pm_lock);
if (!(pm1_status & PM1_PWRBTN_STS)) {
pm1_status |= PM1_PWRBTN_STS;
sci_update(ctx);
}
pthread_mutex_unlock(&pm_lock);
}
/*
* Power Management 1 Control Register
*
* This is mostly unimplemented except that we wish to handle writes that
* set SPL_EN to handle S5 (soft power off).
*/
static uint16_t pm1_control;
#define PM1_SCI_EN 0x0001
#define PM1_SLP_TYP 0x1c00
#define PM1_SLP_EN 0x2000
#define PM1_ALWAYS_ZERO 0xc003
static int
pm1_control_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
int error;
if (bytes != 2)
return -1;
if (in)
*eax = pm1_control;
else {
/*
* Various bits are write-only or reserved, so force them
* to zero in pm1_control. Always preserve SCI_EN as OSPM
* can never change it.
*/
pm1_control = (pm1_control & PM1_SCI_EN) |
(*eax & ~(PM1_SLP_EN | PM1_ALWAYS_ZERO));
/*
* If SLP_EN is set, check for S5. ACRN-DM's _S5_ method
* says that '5' should be stored in SLP_TYP for S5.
*/
if (*eax & PM1_SLP_EN) {
if ((pm1_control & PM1_SLP_TYP) >> 10 == 5) {
error = vm_suspend(ctx, VM_SUSPEND_POWEROFF);
assert(error == 0 || errno == EALREADY);
}
}
}
return 0;
}
INOUT_PORT(pm1_control, PM1A_CNT_ADDR, IOPORT_F_INOUT, pm1_control_handler);
SYSRES_IO(PM1A_EVT_ADDR, 8);
/*
* ACPI SMI Command Register
*
* This write-only register is used to enable and disable ACPI.
*/
static int
smi_cmd_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes,
uint32_t *eax, void *arg)
{
assert(!in);
if (bytes != 1)
return -1;
pthread_mutex_lock(&pm_lock);
switch (*eax) {
case ACPI_ENABLE:
pm1_control |= PM1_SCI_EN;
if (power_button == NULL) {
power_button = mevent_add(SIGTERM, EVF_SIGNAL,
power_button_handler, ctx);
old_power_handler = signal(SIGTERM, SIG_IGN);
}
break;
case ACPI_DISABLE:
pm1_control &= ~PM1_SCI_EN;
if (power_button != NULL) {
mevent_delete(power_button);
power_button = NULL;
signal(SIGTERM, old_power_handler);
}
break;
}
pthread_mutex_unlock(&pm_lock);
return 0;
}
INOUT_PORT(smi_cmd, SMI_CMD, IOPORT_F_OUT, smi_cmd_handler);
SYSRES_IO(SMI_CMD, 1);
void
sci_init(struct vmctx *ctx)
{
/*
* Mark ACPI's SCI as level trigger and bump its use count
* in the PIRQ router.
*/
pci_irq_use(SCI_INT);
}

View File

@@ -0,0 +1,477 @@
/*-
* Copyright (c) 2015 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* Copyright (c) 2015 Nahanni Systems Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <assert.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <pthread.h>
#include "types.h"
#include "atkbdc.h"
#include "console.h"
/* keyboard device commands */
#define PS2KC_RESET_DEV 0xff
#define PS2KC_DISABLE 0xf5
#define PS2KC_ENABLE 0xf4
#define PS2KC_SET_TYPEMATIC 0xf3
#define PS2KC_SEND_DEV_ID 0xf2
#define PS2KC_SET_SCANCODE_SET 0xf0
#define PS2KC_ECHO 0xee
#define PS2KC_SET_LEDS 0xed
#define PS2KC_BAT_SUCCESS 0xaa
#define PS2KC_ACK 0xfa
#define PS2KBD_FIFOSZ 16
struct fifo {
uint8_t buf[PS2KBD_FIFOSZ];
int rindex; /* index to read from */
int windex; /* index to write to */
int num; /* number of bytes in the fifo */
int size; /* size of the fifo */
};
struct ps2kbd_info {
struct atkbdc_base *base;
pthread_mutex_t mtx;
bool enabled;
struct fifo fifo;
uint8_t curcmd; /* current command for next byte */
};
static void
fifo_init(struct ps2kbd_info *kbd)
{
struct fifo *fifo;
fifo = &kbd->fifo;
fifo->size = sizeof(((struct fifo *)0)->buf);
}
static void
fifo_reset(struct ps2kbd_info *kbd)
{
struct fifo *fifo;
fifo = &kbd->fifo;
bzero(fifo, sizeof(struct fifo));
fifo->size = sizeof(((struct fifo *)0)->buf);
}
static void
fifo_put(struct ps2kbd_info *kbd, uint8_t val)
{
struct fifo *fifo;
fifo = &kbd->fifo;
if (fifo->num < fifo->size) {
fifo->buf[fifo->windex] = val;
fifo->windex = (fifo->windex + 1) % fifo->size;
fifo->num++;
}
}
static int
fifo_get(struct ps2kbd_info *kbd, uint8_t *val)
{
struct fifo *fifo;
fifo = &kbd->fifo;
if (fifo->num > 0) {
*val = fifo->buf[fifo->rindex];
fifo->rindex = (fifo->rindex + 1) % fifo->size;
fifo->num--;
return 0;
}
return -1;
}
int
ps2kbd_read(struct ps2kbd_info *kbd, uint8_t *val)
{
int retval;
pthread_mutex_lock(&kbd->mtx);
retval = fifo_get(kbd, val);
pthread_mutex_unlock(&kbd->mtx);
return retval;
}
void
ps2kbd_write(struct ps2kbd_info *kbd, uint8_t val)
{
pthread_mutex_lock(&kbd->mtx);
if (kbd->curcmd) {
switch (kbd->curcmd) {
case PS2KC_SET_TYPEMATIC:
fifo_put(kbd, PS2KC_ACK);
break;
case PS2KC_SET_SCANCODE_SET:
fifo_put(kbd, PS2KC_ACK);
break;
case PS2KC_SET_LEDS:
fifo_put(kbd, PS2KC_ACK);
break;
default:
fprintf(stderr, "Unhandled ps2 keyboard current "
"command byte 0x%02x\n", val);
break;
}
kbd->curcmd = 0;
} else {
switch (val) {
case 0x00:
fifo_put(kbd, PS2KC_ACK);
break;
case PS2KC_RESET_DEV:
fifo_reset(kbd);
fifo_put(kbd, PS2KC_ACK);
fifo_put(kbd, PS2KC_BAT_SUCCESS);
break;
case PS2KC_DISABLE:
kbd->enabled = false;
fifo_put(kbd, PS2KC_ACK);
break;
case PS2KC_ENABLE:
kbd->enabled = true;
fifo_reset(kbd);
fifo_put(kbd, PS2KC_ACK);
break;
case PS2KC_SET_TYPEMATIC:
kbd->curcmd = val;
fifo_put(kbd, PS2KC_ACK);
break;
case PS2KC_SEND_DEV_ID:
fifo_put(kbd, PS2KC_ACK);
fifo_put(kbd, 0xab);
fifo_put(kbd, 0x83);
break;
case PS2KC_SET_SCANCODE_SET:
kbd->curcmd = val;
fifo_put(kbd, PS2KC_ACK);
break;
case PS2KC_ECHO:
fifo_put(kbd, PS2KC_ECHO);
break;
case PS2KC_SET_LEDS:
kbd->curcmd = val;
fifo_put(kbd, PS2KC_ACK);
break;
default:
fprintf(stderr, "Unhandled ps2 keyboard command "
"0x%02x\n", val);
break;
}
}
pthread_mutex_unlock(&kbd->mtx);
}
/*
* Translate keysym to type 2 scancode and insert into keyboard buffer.
*/
static void
ps2kbd_keysym_queue(struct ps2kbd_info *kbd,
int down, uint32_t keysym)
{
/* ASCII to type 2 scancode lookup table */
const uint8_t translation[128] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x29, 0x16, 0x52, 0x26, 0x25, 0x2e, 0x3d, 0x52,
0x46, 0x45, 0x3e, 0x55, 0x41, 0x4e, 0x49, 0x4a,
0x45, 0x16, 0x1e, 0x26, 0x25, 0x2e, 0x36, 0x3d,
0x3e, 0x46, 0x4c, 0x4c, 0x41, 0x55, 0x49, 0x4a,
0x1e, 0x1c, 0x32, 0x21, 0x23, 0x24, 0x2b, 0x34,
0x33, 0x43, 0x3b, 0x42, 0x4b, 0x3a, 0x31, 0x44,
0x4d, 0x15, 0x2d, 0x1b, 0x2c, 0x3c, 0x2a, 0x1d,
0x22, 0x35, 0x1a, 0x54, 0x5d, 0x5b, 0x36, 0x4e,
0x0e, 0x1c, 0x32, 0x21, 0x23, 0x24, 0x2b, 0x34,
0x33, 0x43, 0x3b, 0x42, 0x4b, 0x3a, 0x31, 0x44,
0x4d, 0x15, 0x2d, 0x1b, 0x2c, 0x3c, 0x2a, 0x1d,
0x22, 0x35, 0x1a, 0x54, 0x5d, 0x5b, 0x0e, 0x00,
};
/* assert(pthread_mutex_isowned_np(&kbd->mtx)); */
switch (keysym) {
case 0x0 ... 0x7f:
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, translation[keysym]);
break;
case 0xff08: /* Back space */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x66);
break;
case 0xff09: /* Tab */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x0d);
break;
case 0xff0d: /* Return */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x5a);
break;
case 0xff1b: /* Escape */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x76);
break;
case 0xff50: /* Home */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x6c);
break;
case 0xff51: /* Left arrow */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x6b);
break;
case 0xff52: /* Up arrow */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x75);
break;
case 0xff53: /* Right arrow */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x74);
break;
case 0xff54: /* Down arrow */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x72);
break;
case 0xff55: /* PgUp */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x7d);
break;
case 0xff56: /* PgDwn */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x7a);
break;
case 0xff57: /* End */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x69);
break;
case 0xff63: /* Ins */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x70);
break;
case 0xff8d: /* Keypad Enter */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x5a);
break;
case 0xffe1: /* Left shift */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x12);
break;
case 0xffe2: /* Right shift */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x59);
break;
case 0xffe3: /* Left control */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x14);
break;
case 0xffe4: /* Right control */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x14);
break;
case 0xffe7: /* Left meta */
/* XXX */
break;
case 0xffe8: /* Right meta */
/* XXX */
break;
case 0xffe9: /* Left alt */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x11);
break;
case 0xfe03: /* AltGr */
case 0xffea: /* Right alt */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x11);
break;
case 0xffeb: /* Left Windows */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x1f);
break;
case 0xffec: /* Right Windows */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x27);
break;
case 0xffbe: /* F1 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x05);
break;
case 0xffbf: /* F2 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x06);
break;
case 0xffc0: /* F3 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x04);
break;
case 0xffc1: /* F4 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x0C);
break;
case 0xffc2: /* F5 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x03);
break;
case 0xffc3: /* F6 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x0B);
break;
case 0xffc4: /* F7 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x83);
break;
case 0xffc5: /* F8 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x0A);
break;
case 0xffc6: /* F9 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x01);
break;
case 0xffc7: /* F10 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x09);
break;
case 0xffc8: /* F11 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x78);
break;
case 0xffc9: /* F12 */
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x07);
break;
case 0xffff: /* Del */
fifo_put(kbd, 0xe0);
if (!down)
fifo_put(kbd, 0xf0);
fifo_put(kbd, 0x71);
break;
default:
fprintf(stderr, "Unhandled ps2 keyboard keysym 0x%x\n",
keysym);
break;
}
}
static void
ps2kbd_event(int down, uint32_t keysym, void *arg)
{
struct ps2kbd_info *kbd = arg;
int fifo_full;
pthread_mutex_lock(&kbd->mtx);
if (!kbd->enabled) {
pthread_mutex_unlock(&kbd->mtx);
return;
}
fifo_full = kbd->fifo.num == PS2KBD_FIFOSZ;
ps2kbd_keysym_queue(kbd, down, keysym);
pthread_mutex_unlock(&kbd->mtx);
if (!fifo_full)
atkbdc_event(kbd->base, 1);
}
struct ps2kbd_info *
ps2kbd_init(struct atkbdc_base *base)
{
struct ps2kbd_info *kbd;
kbd = calloc(1, sizeof(struct ps2kbd_info));
assert(kbd != NULL);
pthread_mutex_init(&kbd->mtx, NULL);
fifo_init(kbd);
kbd->base = base;
console_kbd_register(ps2kbd_event, kbd, 1);
return kbd;
}

View File

@@ -0,0 +1,412 @@
/*-
* Copyright (c) 2015 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* Copyright (c) 2015 Nahanni Systems Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <assert.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <pthread.h>
#include "types.h"
#include "atkbdc.h"
#include "console.h"
/* mouse device commands */
#define PS2MC_RESET_DEV 0xff
#define PS2MC_SET_DEFAULTS 0xf6
#define PS2MC_DISABLE 0xf5
#define PS2MC_ENABLE 0xf4
#define PS2MC_SET_SAMPLING_RATE 0xf3
#define PS2MC_SEND_DEV_ID 0xf2
#define PS2MC_SET_REMOTE_MODE 0xf0
#define PS2MC_SEND_DEV_DATA 0xeb
#define PS2MC_SET_STREAM_MODE 0xea
#define PS2MC_SEND_DEV_STATUS 0xe9
#define PS2MC_SET_RESOLUTION 0xe8
#define PS2MC_SET_SCALING1 0xe7
#define PS2MC_SET_SCALING2 0xe6
#define PS2MC_BAT_SUCCESS 0xaa
#define PS2MC_ACK 0xfa
/* mouse device id */
#define PS2MOUSE_DEV_ID 0x0
/* mouse data bits */
#define PS2M_DATA_Y_OFLOW 0x80
#define PS2M_DATA_X_OFLOW 0x40
#define PS2M_DATA_Y_SIGN 0x20
#define PS2M_DATA_X_SIGN 0x10
#define PS2M_DATA_AONE 0x08
#define PS2M_DATA_MID_BUTTON 0x04
#define PS2M_DATA_RIGHT_BUTTON 0x02
#define PS2M_DATA_LEFT_BUTTON 0x01
/* mouse status bits */
#define PS2M_STS_REMOTE_MODE 0x40
#define PS2M_STS_ENABLE_DEV 0x20
#define PS2M_STS_SCALING_21 0x10
#define PS2M_STS_MID_BUTTON 0x04
#define PS2M_STS_RIGHT_BUTTON 0x02
#define PS2M_STS_LEFT_BUTTON 0x01
#define PS2MOUSE_FIFOSZ 16
struct fifo {
uint8_t buf[PS2MOUSE_FIFOSZ];
int rindex; /* index to read from */
int windex; /* index to write to */
int num; /* number of bytes in the fifo */
int size; /* size of the fifo */
};
struct ps2mouse_info {
struct atkbdc_base *base;
pthread_mutex_t mtx;
uint8_t status;
uint8_t resolution;
uint8_t sampling_rate;
int ctrlenable;
struct fifo fifo;
uint8_t curcmd; /* current command for next byte */
int cur_x, cur_y;
int delta_x, delta_y;
};
static void
fifo_init(struct ps2mouse_info *mouse)
{
struct fifo *fifo;
fifo = &mouse->fifo;
fifo->size = sizeof(((struct fifo *)0)->buf);
}
static void
fifo_reset(struct ps2mouse_info *mouse)
{
struct fifo *fifo;
fifo = &mouse->fifo;
bzero(fifo, sizeof(struct fifo));
fifo->size = sizeof(((struct fifo *)0)->buf);
}
static void
fifo_put(struct ps2mouse_info *mouse, uint8_t val)
{
struct fifo *fifo;
fifo = &mouse->fifo;
if (fifo->num < fifo->size) {
fifo->buf[fifo->windex] = val;
fifo->windex = (fifo->windex + 1) % fifo->size;
fifo->num++;
}
}
static int
fifo_get(struct ps2mouse_info *mouse, uint8_t *val)
{
struct fifo *fifo;
fifo = &mouse->fifo;
if (fifo->num > 0) {
*val = fifo->buf[fifo->rindex];
fifo->rindex = (fifo->rindex + 1) % fifo->size;
fifo->num--;
return 0;
}
return -1;
}
static void
movement_reset(struct ps2mouse_info *mouse)
{
/* assert(pthread_mutex_isowned_np(&mouse->mtx)); */
mouse->delta_x = 0;
mouse->delta_y = 0;
}
static void
movement_update(struct ps2mouse_info *mouse, int x, int y)
{
mouse->delta_x += x - mouse->cur_x;
mouse->delta_y += mouse->cur_y - y;
mouse->cur_x = x;
mouse->cur_y = y;
}
static void
movement_get(struct ps2mouse_info *mouse)
{
uint8_t val0, val1, val2;
/* assert(pthread_mutex_isowned_np(&mouse->mtx)); */
val0 = PS2M_DATA_AONE;
val0 |= mouse->status & (PS2M_DATA_LEFT_BUTTON |
PS2M_DATA_RIGHT_BUTTON | PS2M_DATA_MID_BUTTON);
if (mouse->delta_x >= 0) {
if (mouse->delta_x > 255) {
val0 |= PS2M_DATA_X_OFLOW;
val1 = 255;
} else
val1 = mouse->delta_x;
} else {
val0 |= PS2M_DATA_X_SIGN;
if (mouse->delta_x < -255) {
val0 |= PS2M_DATA_X_OFLOW;
val1 = 255;
} else
val1 = mouse->delta_x;
}
mouse->delta_x = 0;
if (mouse->delta_y >= 0) {
if (mouse->delta_y > 255) {
val0 |= PS2M_DATA_Y_OFLOW;
val2 = 255;
} else
val2 = mouse->delta_y;
} else {
val0 |= PS2M_DATA_Y_SIGN;
if (mouse->delta_y < -255) {
val0 |= PS2M_DATA_Y_OFLOW;
val2 = 255;
} else
val2 = mouse->delta_y;
}
mouse->delta_y = 0;
if (mouse->fifo.num < (mouse->fifo.size - 3)) {
fifo_put(mouse, val0);
fifo_put(mouse, val1);
fifo_put(mouse, val2);
}
}
static void
ps2mouse_reset(struct ps2mouse_info *mouse)
{
/* assert(pthread_mutex_isowned_np(&mouse->mtx)); */
fifo_reset(mouse);
movement_reset(mouse);
mouse->status = PS2M_STS_ENABLE_DEV;
mouse->resolution = 4;
mouse->sampling_rate = 100;
mouse->cur_x = 0;
mouse->cur_y = 0;
mouse->delta_x = 0;
mouse->delta_y = 0;
}
int
ps2mouse_read(struct ps2mouse_info *mouse, uint8_t *val)
{
int retval;
pthread_mutex_lock(&mouse->mtx);
retval = fifo_get(mouse, val);
pthread_mutex_unlock(&mouse->mtx);
return retval;
}
int
ps2mouse_fifocnt(struct ps2mouse_info *mouse)
{
return mouse->fifo.num;
}
void
ps2mouse_toggle(struct ps2mouse_info *mouse, int enable)
{
pthread_mutex_lock(&mouse->mtx);
if (enable)
mouse->ctrlenable = 1;
else {
mouse->ctrlenable = 0;
mouse->fifo.rindex = 0;
mouse->fifo.windex = 0;
mouse->fifo.num = 0;
}
pthread_mutex_unlock(&mouse->mtx);
}
void
ps2mouse_write(struct ps2mouse_info *mouse, uint8_t val, int insert)
{
pthread_mutex_lock(&mouse->mtx);
fifo_reset(mouse);
if (mouse->curcmd) {
switch (mouse->curcmd) {
case PS2MC_SET_SAMPLING_RATE:
mouse->sampling_rate = val;
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_SET_RESOLUTION:
mouse->resolution = val;
fifo_put(mouse, PS2MC_ACK);
break;
default:
fprintf(stderr, "Unhandled ps2 mouse current "
"command byte 0x%02x\n", val);
break;
}
mouse->curcmd = 0;
} else if (insert) {
fifo_put(mouse, val);
} else {
switch (val) {
case 0x00:
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_RESET_DEV:
ps2mouse_reset(mouse);
fifo_put(mouse, PS2MC_ACK);
fifo_put(mouse, PS2MC_BAT_SUCCESS);
fifo_put(mouse, PS2MOUSE_DEV_ID);
break;
case PS2MC_SET_DEFAULTS:
ps2mouse_reset(mouse);
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_DISABLE:
fifo_reset(mouse);
mouse->status &= ~PS2M_STS_ENABLE_DEV;
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_ENABLE:
fifo_reset(mouse);
mouse->status |= PS2M_STS_ENABLE_DEV;
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_SET_SAMPLING_RATE:
mouse->curcmd = val;
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_SEND_DEV_ID:
fifo_put(mouse, PS2MC_ACK);
fifo_put(mouse, PS2MOUSE_DEV_ID);
break;
case PS2MC_SET_REMOTE_MODE:
mouse->status |= PS2M_STS_REMOTE_MODE;
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_SEND_DEV_DATA:
fifo_put(mouse, PS2MC_ACK);
movement_get(mouse);
break;
case PS2MC_SET_STREAM_MODE:
mouse->status &= ~PS2M_STS_REMOTE_MODE;
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_SEND_DEV_STATUS:
fifo_put(mouse, PS2MC_ACK);
fifo_put(mouse, mouse->status);
fifo_put(mouse, mouse->resolution);
fifo_put(mouse, mouse->sampling_rate);
break;
case PS2MC_SET_RESOLUTION:
mouse->curcmd = val;
fifo_put(mouse, PS2MC_ACK);
break;
case PS2MC_SET_SCALING1:
case PS2MC_SET_SCALING2:
fifo_put(mouse, PS2MC_ACK);
break;
default:
fifo_put(mouse, PS2MC_ACK);
fprintf(stderr, "Unhandled ps2 mouse command "
"0x%02x\n", val);
break;
}
}
pthread_mutex_unlock(&mouse->mtx);
}
static void
ps2mouse_event(uint8_t button, int x, int y, void *arg)
{
struct ps2mouse_info *mouse = arg;
pthread_mutex_lock(&mouse->mtx);
movement_update(mouse, x, y);
mouse->status &= ~(PS2M_STS_LEFT_BUTTON |
PS2M_STS_RIGHT_BUTTON | PS2M_STS_MID_BUTTON);
if (button & (1 << 0))
mouse->status |= PS2M_STS_LEFT_BUTTON;
if (button & (1 << 1))
mouse->status |= PS2M_STS_MID_BUTTON;
if (button & (1 << 2))
mouse->status |= PS2M_STS_RIGHT_BUTTON;
if ((mouse->status & PS2M_STS_ENABLE_DEV) == 0 || !mouse->ctrlenable) {
/* no data reporting */
pthread_mutex_unlock(&mouse->mtx);
return;
}
movement_get(mouse);
pthread_mutex_unlock(&mouse->mtx);
if (mouse->fifo.num > 0)
atkbdc_event(mouse->base, 0);
}
struct ps2mouse_info *
ps2mouse_init(struct atkbdc_base *base)
{
struct ps2mouse_info *mouse;
mouse = calloc(1, sizeof(struct ps2mouse_info));
assert(mouse != NULL);
pthread_mutex_init(&mouse->mtx, NULL);
fifo_init(mouse);
mouse->base = base;
pthread_mutex_lock(&mouse->mtx);
ps2mouse_reset(mouse);
pthread_mutex_unlock(&mouse->mtx);
console_ptr_register(ps2mouse_event, mouse, 1);
return mouse;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,687 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* Copyright (c) 2013 Neel Natu <neel@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <err.h>
#include <fcntl.h>
#include <termios.h>
#include <unistd.h>
#include <stdbool.h>
#include <string.h>
#include <pthread.h>
#include <sysexits.h>
#include "types.h"
#include "mevent.h"
#include "uart_core.h"
#include "ns16550.h"
#include "dm.h"
#define COM1_BASE 0x3F8
#define COM1_IRQ 4
#define COM2_BASE 0x2F8
#define COM2_IRQ 3
#define DEFAULT_RCLK 1843200
#define DEFAULT_BAUD 9600
#define FCR_RX_MASK 0xC0
#define MCR_OUT1 0x04
#define MCR_OUT2 0x08
#define MSR_DELTA_MASK 0x0f
#ifndef REG_SCR
#define REG_SCR com_scr
#endif
#define FIFOSZ 256
static struct termios tio_stdio_orig;
static struct {
int baseaddr;
int irq;
bool inuse;
} uart_lres[] = {
{ COM1_BASE, COM1_IRQ, false},
{ COM2_BASE, COM2_IRQ, false},
};
#define UART_NLDEVS (ARRAY_SIZE(uart_lres))
struct fifo {
uint8_t buf[FIFOSZ];
int rindex; /* index to read from */
int windex; /* index to write to */
int num; /* number of characters in the fifo */
int size; /* size of the fifo */
};
struct ttyfd {
bool opened;
int fd; /* tty device file descriptor */
struct termios tio_orig, tio_new; /* I/O Terminals */
};
struct uart_vdev {
pthread_mutex_t mtx; /* protects all elements */
uint8_t data; /* Data register (R/W) */
uint8_t ier; /* Interrupt enable register (R/W) */
uint8_t lcr; /* Line control register (R/W) */
uint8_t mcr; /* Modem control register (R/W) */
uint8_t lsr; /* Line status register (R/W) */
uint8_t msr; /* Modem status register (R/W) */
uint8_t fcr; /* FIFO control register (W) */
uint8_t scr; /* Scratch register (R/W) */
uint8_t dll; /* Baudrate divisor latch LSB */
uint8_t dlh; /* Baudrate divisor latch MSB */
struct fifo rxfifo;
struct mevent *mev;
struct ttyfd tty;
bool thre_int_pending; /* THRE interrupt pending */
void *arg;
uart_intr_func_t intr_assert;
uart_intr_func_t intr_deassert;
};
static void uart_drain(int fd, enum ev_type ev, void *arg);
static void
ttyclose(void)
{
tcsetattr(STDIN_FILENO, TCSANOW, &tio_stdio_orig);
}
static void
ttyopen(struct ttyfd *tf)
{
tcgetattr(tf->fd, &tf->tio_orig);
tf->tio_new = tf->tio_orig;
cfmakeraw(&tf->tio_new);
tf->tio_new.c_cflag |= CLOCAL;
tcsetattr(tf->fd, TCSANOW, &tf->tio_new);
if (tf->fd == STDIN_FILENO) {
tio_stdio_orig = tf->tio_orig;
atexit(ttyclose);
}
}
static int
ttyread(struct ttyfd *tf)
{
unsigned char rb;
if (read(tf->fd, &rb, 1) == 1)
return rb;
else
return -1;
}
static void
ttywrite(struct ttyfd *tf, unsigned char wb)
{
(void)write(tf->fd, &wb, 1);
}
static void
rxfifo_reset(struct uart_vdev *uart, int size)
{
char flushbuf[32];
struct fifo *fifo;
ssize_t nread;
int error;
fifo = &uart->rxfifo;
bzero(fifo, sizeof(struct fifo));
fifo->size = size;
if (uart->tty.opened) {
/*
* Flush any unread input from the tty buffer.
*/
while (1) {
nread = read(uart->tty.fd, flushbuf, sizeof(flushbuf));
if (nread != sizeof(flushbuf))
break;
}
/*
* Enable mevent to trigger when new characters are available
* on the tty fd.
*/
error = mevent_enable(uart->mev);
assert(error == 0);
}
}
static int
rxfifo_available(struct uart_vdev *uart)
{
struct fifo *fifo;
fifo = &uart->rxfifo;
return (fifo->num < fifo->size);
}
static int
rxfifo_putchar(struct uart_vdev *uart, uint8_t ch)
{
struct fifo *fifo;
int error;
fifo = &uart->rxfifo;
if (fifo->num < fifo->size) {
fifo->buf[fifo->windex] = ch;
fifo->windex = (fifo->windex + 1) % fifo->size;
fifo->num++;
if (!rxfifo_available(uart)) {
if (uart->tty.opened) {
/*
* Disable mevent callback if the FIFO is full.
*/
error = mevent_disable(uart->mev);
assert(error == 0);
}
}
return 0;
} else
return -1;
}
static int
rxfifo_getchar(struct uart_vdev *uart)
{
struct fifo *fifo;
int c, error, wasfull;
wasfull = 0;
fifo = &uart->rxfifo;
if (fifo->num > 0) {
if (!rxfifo_available(uart))
wasfull = 1;
c = fifo->buf[fifo->rindex];
fifo->rindex = (fifo->rindex + 1) % fifo->size;
fifo->num--;
if (wasfull) {
if (uart->tty.opened) {
error = mevent_enable(uart->mev);
assert(error == 0);
}
}
return c;
} else
return -1;
}
static int
rxfifo_numchars(struct uart_vdev *uart)
{
struct fifo *fifo = &uart->rxfifo;
return fifo->num;
}
static void
uart_opentty(struct uart_vdev *uart)
{
ttyopen(&uart->tty);
uart->mev = mevent_add(uart->tty.fd, EVF_READ, uart_drain, uart);
assert(uart->mev != NULL);
}
static uint8_t
modem_status(uint8_t mcr)
{
uint8_t msr;
if (mcr & MCR_LOOPBACK) {
/*
* In the loopback mode certain bits from the MCR are
* reflected back into MSR.
*/
msr = 0;
if (mcr & MCR_RTS)
msr |= MSR_CTS;
if (mcr & MCR_DTR)
msr |= MSR_DSR;
if (mcr & MCR_OUT1)
msr |= MSR_RI;
if (mcr & MCR_OUT2)
msr |= MSR_DCD;
} else {
/*
* Always assert DCD and DSR so tty open doesn't block
* even if CLOCAL is turned off.
*/
msr = MSR_DCD | MSR_DSR;
}
assert((msr & MSR_DELTA_MASK) == 0);
return msr;
}
/*
* The IIR returns a prioritized interrupt reason:
* - receive data available
* - transmit holding register empty
* - modem status change
*
* Return an interrupt reason if one is available.
*/
static int
uart_intr_reason(struct uart_vdev *uart)
{
if ((uart->lsr & LSR_OE) != 0 && (uart->ier & IER_ERLS) != 0)
return IIR_RLS;
else if (rxfifo_numchars(uart) > 0 && (uart->ier & IER_ERXRDY) != 0)
return IIR_RXTOUT;
else if (uart->thre_int_pending && (uart->ier & IER_ETXRDY) != 0)
return IIR_TXRDY;
else if ((uart->msr & MSR_DELTA_MASK) != 0 &&
(uart->ier & IER_EMSC) != 0)
return IIR_MLSC;
else
return IIR_NOPEND;
}
static void
uart_reset(struct uart_vdev *uart)
{
uint16_t divisor;
divisor = DEFAULT_RCLK / DEFAULT_BAUD / 16;
uart->dll = divisor;
uart->dlh = divisor >> 16;
uart->msr = modem_status(uart->mcr);
rxfifo_reset(uart, 1); /* no fifo until enabled by software */
}
/*
* Toggle the COM port's intr pin depending on whether or not we have an
* interrupt condition to report to the processor.
*/
static void
uart_toggle_intr(struct uart_vdev *uart)
{
uint8_t intr_reason;
intr_reason = uart_intr_reason(uart);
if (intr_reason == IIR_NOPEND)
(*uart->intr_deassert)(uart->arg);
else
(*uart->intr_assert)(uart->arg);
}
static void
uart_drain(int fd, enum ev_type ev, void *arg)
{
struct uart_vdev *uart;
int ch;
uart = arg;
assert(fd == uart->tty.fd);
assert(ev == EVF_READ);
/*
* This routine is called in the context of the mevent thread
* to take out the uart lock to protect against concurrent
* access from a vCPU i/o exit
*/
pthread_mutex_lock(&uart->mtx);
if ((uart->mcr & MCR_LOOPBACK) != 0) {
(void) ttyread(&uart->tty);
} else {
while ((ch = ttyread(&uart->tty)) != -1)
rxfifo_putchar(uart, ch);
uart_toggle_intr(uart);
}
pthread_mutex_unlock(&uart->mtx);
}
void
uart_write(struct uart_vdev *uart, int offset, uint8_t value)
{
int fifosz;
uint8_t msr;
pthread_mutex_lock(&uart->mtx);
/*
* Take care of the special case DLAB accesses first
*/
if ((uart->lcr & LCR_DLAB) != 0) {
if (offset == REG_DLL) {
uart->dll = value;
goto done;
}
if (offset == REG_DLH) {
uart->dlh = value;
goto done;
}
}
switch (offset) {
case REG_DATA:
if (uart->mcr & MCR_LOOPBACK) {
if (rxfifo_putchar(uart, value) != 0)
uart->lsr |= LSR_OE;
} else if (uart->tty.opened) {
ttywrite(&uart->tty, value);
} /* else drop on floor */
uart->thre_int_pending = true;
break;
case REG_IER:
/*
* Apply mask so that bits 4-7 are 0
* Also enables bits 0-3 only if they're 1
*/
uart->ier = value & 0x0F;
break;
case REG_FCR:
/*
* When moving from FIFO and 16450 mode and vice versa,
* the FIFO contents are reset.
*/
if ((uart->fcr & FCR_ENABLE) ^ (value & FCR_ENABLE)) {
fifosz = (value & FCR_ENABLE) ? FIFOSZ : 1;
rxfifo_reset(uart, fifosz);
}
/*
* The FCR_ENABLE bit must be '1' for the programming
* of other FCR bits to be effective.
*/
if ((value & FCR_ENABLE) == 0) {
uart->fcr = 0;
} else {
if ((value & FCR_RCV_RST) != 0)
rxfifo_reset(uart, FIFOSZ);
uart->fcr = value &
(FCR_ENABLE | FCR_DMA | FCR_RX_MASK);
}
break;
case REG_LCR:
uart->lcr = value;
break;
case REG_MCR:
/* Apply mask so that bits 5-7 are 0 */
uart->mcr = value & 0x1F;
msr = modem_status(uart->mcr);
/*
* Detect if there has been any change between the
* previous and the new value of MSR. If there is
* then assert the appropriate MSR delta bit.
*/
if ((msr & MSR_CTS) ^ (uart->msr & MSR_CTS))
uart->msr |= MSR_DCTS;
if ((msr & MSR_DSR) ^ (uart->msr & MSR_DSR))
uart->msr |= MSR_DDSR;
if ((msr & MSR_DCD) ^ (uart->msr & MSR_DCD))
uart->msr |= MSR_DDCD;
if ((uart->msr & MSR_RI) != 0 && (msr & MSR_RI) == 0)
uart->msr |= MSR_TERI;
/*
* Update the value of MSR while retaining the delta
* bits.
*/
uart->msr &= MSR_DELTA_MASK;
uart->msr |= msr;
break;
case REG_LSR:
/*
* Line status register is not meant to be written to
* during normal operation.
*/
break;
case REG_MSR:
/*
* As far as I can tell MSR is a read-only register.
*/
break;
case REG_SCR:
uart->scr = value;
break;
default:
break;
}
done:
uart_toggle_intr(uart);
pthread_mutex_unlock(&uart->mtx);
}
uint8_t
uart_read(struct uart_vdev *uart, int offset)
{
uint8_t iir, intr_reason, reg;
pthread_mutex_lock(&uart->mtx);
/*
* Take care of the special case DLAB accesses first
*/
if ((uart->lcr & LCR_DLAB) != 0) {
if (offset == REG_DLL) {
reg = uart->dll;
goto done;
}
if (offset == REG_DLH) {
reg = uart->dlh;
goto done;
}
}
switch (offset) {
case REG_DATA:
reg = rxfifo_getchar(uart);
break;
case REG_IER:
reg = uart->ier;
break;
case REG_IIR:
iir = (uart->fcr & FCR_ENABLE) ? IIR_FIFO_MASK : 0;
intr_reason = uart_intr_reason(uart);
/*
* Deal with side effects of reading the IIR register
*/
if (intr_reason == IIR_TXRDY)
uart->thre_int_pending = false;
iir |= intr_reason;
reg = iir;
break;
case REG_LCR:
reg = uart->lcr;
break;
case REG_MCR:
reg = uart->mcr;
break;
case REG_LSR:
/* Transmitter is always ready for more data */
uart->lsr |= LSR_TEMT | LSR_THRE;
/* Check for new receive data */
if (rxfifo_numchars(uart) > 0)
uart->lsr |= LSR_RXRDY;
else
uart->lsr &= ~LSR_RXRDY;
reg = uart->lsr;
/* The LSR_OE bit is cleared on LSR read */
uart->lsr &= ~LSR_OE;
break;
case REG_MSR:
/*
* MSR delta bits are cleared on read
*/
reg = uart->msr;
uart->msr &= ~MSR_DELTA_MASK;
break;
case REG_SCR:
reg = uart->scr;
break;
default:
reg = 0xFF;
break;
}
done:
uart_toggle_intr(uart);
pthread_mutex_unlock(&uart->mtx);
return reg;
}
int
uart_legacy_alloc(int which, int *baseaddr, int *irq)
{
if (which < 0 || which >= UART_NLDEVS || uart_lres[which].inuse)
return -1;
uart_lres[which].inuse = true;
*baseaddr = uart_lres[which].baseaddr;
*irq = uart_lres[which].irq;
return 0;
}
void
uart_legacy_dealloc(int which)
{
uart_lres[which].inuse = false;
}
struct uart_vdev *
uart_init(uart_intr_func_t intr_assert, uart_intr_func_t intr_deassert,
void *arg)
{
struct uart_vdev *uart;
uart = calloc(1, sizeof(struct uart_vdev));
assert(uart != NULL);
uart->arg = arg;
uart->intr_assert = intr_assert;
uart->intr_deassert = intr_deassert;
pthread_mutex_init(&uart->mtx, NULL);
uart_reset(uart);
return uart;
}
void
uart_deinit(struct uart_vdev *uart)
{
if (uart) {
if (uart->tty.opened && uart->tty.fd == STDIN_FILENO) {
ttyclose();
stdio_in_use = false;
}
free(uart);
}
}
static int
uart_tty_backend(struct uart_vdev *uart, const char *opts)
{
int fd;
int retval;
retval = -1;
fd = open(opts, O_RDWR | O_NONBLOCK);
if (fd > 0 && isatty(fd)) {
uart->tty.fd = fd;
uart->tty.opened = true;
retval = 0;
}
return retval;
}
int
uart_set_backend(struct uart_vdev *uart, const char *opts)
{
int retval;
retval = -1;
if (opts == NULL)
return 0;
if (strcmp("stdio", opts) == 0) {
if (!stdio_in_use) {
uart->tty.fd = STDIN_FILENO;
uart->tty.opened = true;
stdio_in_use = true;
retval = 0;
}
} else if (uart_tty_backend(uart, opts) == 0) {
retval = 0;
}
/* Make the backend file descriptor non-blocking */
if (retval == 0)
retval = fcntl(uart->tty.fd, F_SETFL, O_NONBLOCK);
if (retval == 0)
uart_opentty(uart);
return retval;
}

View File

@@ -0,0 +1,73 @@
/*-
* Copyright (c) 2014 Nahanni Systems Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/queue.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include "usb_core.h"
SET_DECLARE(usb_emu_set, struct usb_devemu);
struct usb_devemu *
usb_emu_finddev(char *name)
{
struct usb_devemu **udpp, *udp;
SET_FOREACH(udpp, usb_emu_set) {
udp = *udpp;
if (!strcmp(udp->ue_emu, name))
return udp;
}
return NULL;
}
struct usb_data_xfer_block *
usb_data_xfer_append(struct usb_data_xfer *xfer, void *buf, int blen,
void *hci_data, int ccs)
{
struct usb_data_xfer_block *xb;
if (xfer->ndata >= USB_MAX_XFER_BLOCKS)
return NULL;
xb = &xfer->data[xfer->tail];
xb->buf = buf;
xb->blen = blen;
xb->hci_data = hci_data;
xb->ccs = ccs;
xb->processed = 0;
xb->bdone = 0;
xfer->ndata++;
xfer->tail = (xfer->tail + 1) % USB_MAX_XFER_BLOCKS;
return xb;
}

View File

@@ -0,0 +1,816 @@
/*-
* Copyright (c) 2014 Leon Dang <ldang@nahannisys.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/time.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "types.h"
#include "usb.h"
#include "usbdi.h"
#include "usb_core.h"
#include "console.h"
#include "gc.h"
static int umouse_debug;
#define DPRINTF(params) do { if (umouse_debug) printf params; } while (0)
#define WPRINTF(params) (printf params)
/* USB endpoint context (1-15) for reporting mouse data events*/
#define UMOUSE_INTR_ENDPT 1
#define UMOUSE_REPORT_DESC_TYPE 0x22
#define UMOUSE_GET_REPORT 0x01
#define UMOUSE_GET_IDLE 0x02
#define UMOUSE_GET_PROTOCOL 0x03
#define UMOUSE_SET_REPORT 0x09
#define UMOUSE_SET_IDLE 0x0A
#define UMOUSE_SET_PROTOCOL 0x0B
enum {
UMSTR_LANG,
UMSTR_MANUFACTURER,
UMSTR_PRODUCT,
UMSTR_SERIAL,
UMSTR_CONFIG,
UMSTR_MAX
};
static const char *const umouse_desc_strings[] = {
"\x04\x09",
"ACRN-DM",
"HID Tablet",
"01",
"HID Tablet Device",
};
struct umouse_hid_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bcdHID[2];
uint8_t bCountryCode;
uint8_t bNumDescriptors;
uint8_t bReportDescriptorType;
uint8_t wItemLength[2];
} __attribute__((packed));
struct umouse_config_desc {
struct usb_config_descriptor confd;
struct usb_interface_descriptor ifcd;
struct umouse_hid_descriptor hidd;
struct usb_endpoint_descriptor endpd;
struct usb_endpoint_ss_comp_descriptor sscompd;
} __attribute__((packed));
#define MOUSE_MAX_X 0x8000
#define MOUSE_MAX_Y 0x8000
static const uint8_t umouse_report_desc[] = {
0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
0x09, 0x02, /* USAGE (Mouse) */
0xa1, 0x01, /* COLLECTION (Application) */
0x09, 0x01, /* USAGE (Pointer) */
0xa1, 0x00, /* COLLECTION (Physical) */
0x05, 0x09, /* USAGE_PAGE (Button) */
0x19, 0x01, /* USAGE_MINIMUM (Button 1) */
0x29, 0x03, /* USAGE_MAXIMUM (Button 3) */
0x15, 0x00, /* LOGICAL_MINIMUM (0) */
0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
0x75, 0x01, /* REPORT_SIZE (1) */
0x95, 0x03, /* REPORT_COUNT (3) */
0x81, 0x02, /* INPUT (Data,Var,Abs); 3 buttons */
0x75, 0x05, /* REPORT_SIZE (5) */
0x95, 0x01, /* REPORT_COUNT (1) */
0x81, 0x03, /* INPUT (Cnst,Var,Abs); padding */
0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
0x09, 0x30, /* USAGE (X) */
0x09, 0x31, /* USAGE (Y) */
0x35, 0x00, /* PHYSICAL_MINIMUM (0) */
0x46, 0xff, 0x7f, /* PHYSICAL_MAXIMUM (0x7fff) */
0x15, 0x00, /* LOGICAL_MINIMUM (0) */
0x26, 0xff, 0x7f, /* LOGICAL_MAXIMUM (0x7fff) */
0x75, 0x10, /* REPORT_SIZE (16) */
0x95, 0x02, /* REPORT_COUNT (2) */
0x81, 0x02, /* INPUT (Data,Var,Abs) */
0x05, 0x01, /* USAGE Page (Generic Desktop) */
0x09, 0x38, /* USAGE (Wheel) */
0x35, 0x00, /* PHYSICAL_MINIMUM (0) */
0x45, 0x00, /* PHYSICAL_MAXIMUM (0) */
0x15, 0x81, /* LOGICAL_MINIMUM (-127) */
0x25, 0x7f, /* LOGICAL_MAXIMUM (127) */
0x75, 0x08, /* REPORT_SIZE (8) */
0x95, 0x01, /* REPORT_COUNT (1) */
0x81, 0x06, /* INPUT (Data,Var,Rel) */
0xc0, /* END_COLLECTION */
0xc0 /* END_COLLECTION */
};
struct umouse_report {
uint8_t buttons; /* bits: 0 left, 1 right, 2 middle */
int16_t x; /* x position */
int16_t y; /* y position */
int8_t z; /* z wheel position */
} __attribute__((packed));
static struct usb_device_descriptor umouse_dev_desc = {
.bLength = sizeof(umouse_dev_desc),
.bDescriptorType = UDESC_DEVICE,
.bcdUSB = UD_USB_3_0,
.bMaxPacketSize = 8, /* max packet size */
.idVendor = 0xFB5D, /* vendor */
.idProduct = 0x0001,/* product */
.bcdDevice = 0, /* device version */
.iManufacturer = UMSTR_MANUFACTURER,
.iProduct = UMSTR_PRODUCT,
.iSerialNumber = UMSTR_SERIAL,
.bNumConfigurations = 1,
};
static struct umouse_config_desc umouse_confd = {
.confd = {
.bLength = sizeof(umouse_confd.confd),
.bDescriptorType = UDESC_CONFIG,
.wTotalLength = sizeof(umouse_confd),
.bNumInterface = 1,
.bConfigurationValue = 1,
.iConfiguration = UMSTR_CONFIG,
.bmAttributes = UC_BUS_POWERED | UC_REMOTE_WAKEUP,
.bMaxPower = 0,
},
.ifcd = {
.bLength = sizeof(umouse_confd.ifcd),
.bDescriptorType = UDESC_INTERFACE,
.bNumEndpoints = 1,
.bInterfaceClass = UICLASS_HID,
.bInterfaceSubClass = UISUBCLASS_BOOT,
.bInterfaceProtocol = UIPROTO_MOUSE,
},
.hidd = {
.bLength = sizeof(umouse_confd.hidd),
.bDescriptorType = 0x21,
.bcdHID = { 0x01, 0x10 },
.bCountryCode = 0,
.bNumDescriptors = 1,
.bReportDescriptorType = UMOUSE_REPORT_DESC_TYPE,
.wItemLength = { sizeof(umouse_report_desc), 0 },
},
.endpd = {
.bLength = sizeof(umouse_confd.endpd),
.bDescriptorType = UDESC_ENDPOINT,
.bEndpointAddress = UE_DIR_IN | UMOUSE_INTR_ENDPT,
.bmAttributes = UE_INTERRUPT,
.wMaxPacketSize = 8,
.bInterval = 0xA,
},
.sscompd = {
.bLength = sizeof(umouse_confd.sscompd),
.bDescriptorType = UDESC_ENDPOINT_SS_COMP,
.bMaxBurst = 0,
.bmAttributes = 0,
.wBytesPerInterval = 0,
},
};
struct umouse_bos_desc {
struct usb_bos_descriptor bosd;
struct usb_devcap_ss_descriptor usbssd;
} __attribute__((packed));
struct umouse_bos_desc umouse_bosd = {
.bosd = {
.bLength = sizeof(umouse_bosd.bosd),
.bDescriptorType = UDESC_BOS,
.wTotalLength = sizeof(umouse_bosd),
.bNumDeviceCaps = 1,
},
.usbssd = {
.bLength = sizeof(umouse_bosd.usbssd),
.bDescriptorType = UDESC_DEVICE_CAPABILITY,
.bDevCapabilityType = 3,
.bmAttributes = 0,
.wSpeedsSupported = 0x08,
.bFunctionalitySupport = 3,
.bU1DevExitLat = 0xa, /* dummy - not used */
.wU2DevExitLat = 0x20,
}
};
struct umouse_vdev {
struct usb_hci *hci;
char *opt;
struct umouse_report um_report;
int newdata;
struct {
uint8_t idle;
uint8_t protocol;
uint8_t feature;
} hid;
pthread_mutex_t mtx;
pthread_mutex_t ev_mtx;
int polling;
struct timeval prev_evt;
};
static void
umouse_event(uint8_t button, int x, int y, void *arg)
{
struct umouse_vdev *dev;
struct gfx_ctx_image *gc;
gc = console_get_image();
if (gc == NULL) {
/* not ready */
return;
}
dev = arg;
pthread_mutex_lock(&dev->mtx);
dev->um_report.buttons = 0;
dev->um_report.z = 0;
if (button & 0x01)
dev->um_report.buttons |= 0x01; /* left */
if (button & 0x02)
dev->um_report.buttons |= 0x04; /* middle */
if (button & 0x04)
dev->um_report.buttons |= 0x02; /* right */
if (button & 0x8)
dev->um_report.z = 1;
if (button & 0x10)
dev->um_report.z = -1;
/* scale coords to mouse resolution */
dev->um_report.x = MOUSE_MAX_X * x / gc->width;
dev->um_report.y = MOUSE_MAX_Y * y / gc->height;
dev->newdata = 1;
pthread_mutex_unlock(&dev->mtx);
pthread_mutex_lock(&dev->ev_mtx);
dev->hci->hci_intr(dev->hci, UE_DIR_IN | UMOUSE_INTR_ENDPT);
pthread_mutex_unlock(&dev->ev_mtx);
}
static void *
umouse_init(struct usb_hci *hci, char *opt)
{
struct umouse_vdev *dev;
dev = calloc(1, sizeof(struct umouse_vdev));
if (!dev) {
WPRINTF(("umouse: calloc returns NULL\n"));
return NULL;
}
dev->hci = hci;
dev->hid.protocol = 1; /* REPORT protocol */
dev->opt = strdup(opt);
pthread_mutex_init(&dev->mtx, NULL);
pthread_mutex_init(&dev->ev_mtx, NULL);
console_ptr_register(umouse_event, dev, 10);
return dev;
}
#define UREQ(x, y) ((x) | ((y) << 8))
static int
umouse_request(void *scarg, struct usb_data_xfer *xfer)
{
struct umouse_vdev *dev;
struct usb_data_xfer_block *data;
const char *str;
uint16_t value;
uint16_t index;
uint16_t len;
uint16_t slen;
uint8_t *udata;
int err;
int i, idx;
int eshort;
dev = scarg;
data = NULL;
udata = NULL;
assert(xfer != NULL && xfer->head >= 0);
idx = xfer->head;
for (i = 0; i < xfer->ndata; i++) {
xfer->data[idx].bdone = 0;
if (data == NULL && USB_DATA_OK(xfer, i)) {
data = &xfer->data[idx];
udata = data->buf;
}
xfer->data[idx].processed = 1;
idx = (idx + 1) % USB_MAX_XFER_BLOCKS;
}
err = USB_ERR_NORMAL_COMPLETION;
eshort = 0;
if (!xfer->ureq) {
DPRINTF(("%s: port %d\r\n", __func__, dev->hci->hci_port));
goto done;
}
value = xfer->ureq->wValue;
index = xfer->ureq->wIndex;
len = xfer->ureq->wLength;
DPRINTF(("%s: port %d, type 0x%x, req 0x%x,"
"val 0x%x, idx 0x%x, len %u\r\n", __func__,
dev->hci->hci_port, xfer->ureq->bmRequestType,
xfer->ureq->bRequest, value, index, len));
switch (UREQ(xfer->ureq->bRequest, xfer->ureq->bmRequestType)) {
case UREQ(UR_GET_CONFIG, UT_READ_DEVICE):
DPRINTF(("umouse: (UR_GET_CONFIG, UT_READ_DEVICE)\r\n"));
if (!data)
break;
*udata = umouse_confd.confd.bConfigurationValue;
data->blen = len > 0 ? len - 1 : 0;
eshort = data->blen > 0;
data->bdone += 1;
break;
case UREQ(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
DPRINTF(("umouse: (UR_GET_DESCRIPTOR,UT_READ_DEVICE)"
"val %x\r\n",
value >> 8));
if (!data)
break;
switch (value >> 8) {
case UDESC_DEVICE:
DPRINTF(("umouse: (->UDESC_DEVICE) len %u"
"?= sizeof(umouse_dev_desc) %lu\r\n",
len, sizeof(umouse_dev_desc)));
if ((value & 0xFF) != 0) {
err = USB_ERR_IOERROR;
goto done;
}
if (len > sizeof(umouse_dev_desc)) {
data->blen = len - sizeof(umouse_dev_desc);
len = sizeof(umouse_dev_desc);
} else
data->blen = 0;
memcpy(data->buf, &umouse_dev_desc, len);
data->bdone += len;
break;
case UDESC_CONFIG:
DPRINTF(("umouse: (->UDESC_CONFIG)\r\n"));
if ((value & 0xFF) != 0) {
err = USB_ERR_IOERROR;
goto done;
}
if (len > sizeof(umouse_confd)) {
data->blen = len - sizeof(umouse_confd);
len = sizeof(umouse_confd);
} else
data->blen = 0;
memcpy(data->buf, &umouse_confd, len);
data->bdone += len;
break;
case UDESC_STRING:
DPRINTF(("umouse: (->UDESC_STRING)\r\n"));
str = NULL;
if ((value & 0xFF) < UMSTR_MAX)
str = umouse_desc_strings[value & 0xFF];
else
goto done;
if ((value & 0xFF) == UMSTR_LANG) {
udata[0] = 4;
udata[1] = UDESC_STRING;
data->blen = len - 2;
len -= 2;
data->bdone += 2;
if (len >= 2) {
udata[2] = str[0];
udata[3] = str[1];
data->blen -= 2;
data->bdone += 2;
} else
data->blen = 0;
goto done;
}
slen = 2 + strlen(str) * 2;
udata[0] = slen;
udata[1] = UDESC_STRING;
if (len > slen) {
data->blen = len - slen;
len = slen;
} else
data->blen = 0;
for (i = 2; i < len; i += 2) {
udata[i] = *str++;
udata[i+1] = '\0';
}
data->bdone += slen;
break;
case UDESC_BOS:
DPRINTF(("umouse: USB3 BOS\r\n"));
if (len > sizeof(umouse_bosd)) {
data->blen = len - sizeof(umouse_bosd);
len = sizeof(umouse_bosd);
} else
data->blen = 0;
memcpy(udata, &umouse_bosd, len);
data->bdone += len;
break;
default:
DPRINTF(("umouse: unknown(%d)->ERROR\r\n", value >> 8));
err = USB_ERR_IOERROR;
goto done;
}
eshort = data->blen > 0;
break;
case UREQ(UR_GET_DESCRIPTOR, UT_READ_INTERFACE):
DPRINTF(("umouse: (UR_GET_DESCRIPTOR, UT_READ_INTERFACE)"
"0x%x\r\n",
(value >> 8)));
if (!data)
break;
switch (value >> 8) {
case UMOUSE_REPORT_DESC_TYPE:
if (len > sizeof(umouse_report_desc)) {
data->blen = len - sizeof(umouse_report_desc);
len = sizeof(umouse_report_desc);
} else
data->blen = 0;
memcpy(data->buf, umouse_report_desc, len);
data->bdone += len;
break;
default:
DPRINTF(("umouse: IO ERROR\r\n"));
err = USB_ERR_IOERROR;
goto done;
}
eshort = data->blen > 0;
break;
case UREQ(UR_GET_INTERFACE, UT_READ_INTERFACE):
DPRINTF(("umouse: (UR_GET_INTERFACE, UT_READ_INTERFACE)\r\n"));
if (index != 0) {
DPRINTF(("umouse get_interface, invalid index %d\r\n",
index));
err = USB_ERR_IOERROR;
goto done;
}
if (!data)
break;
if (len > 0) {
*udata = 0;
data->blen = len - 1;
}
eshort = data->blen > 0;
data->bdone += 1;
break;
case UREQ(UR_GET_STATUS, UT_READ_DEVICE):
DPRINTF(("umouse: (UR_GET_STATUS, UT_READ_DEVICE)\r\n"));
if (!data)
break;
if (data != NULL && len > 1) {
if (dev->hid.feature == UF_DEVICE_REMOTE_WAKEUP)
USETW(udata, UDS_REMOTE_WAKEUP);
else
USETW(udata, 0);
data->blen = len - 2;
data->bdone += 2;
}
eshort = data->blen > 0;
break;
case UREQ(UR_GET_STATUS, UT_READ_INTERFACE):
case UREQ(UR_GET_STATUS, UT_READ_ENDPOINT):
DPRINTF(("umouse: (UR_GET_STATUS, UT_READ_INTERFACE)\r\n"));
if (!data)
break;
if (data != NULL && len > 1) {
USETW(udata, 0);
data->blen = len - 2;
data->bdone += 2;
}
eshort = data->blen > 0;
break;
case UREQ(UR_SET_ADDRESS, UT_WRITE_DEVICE):
/* XXX Controller should've handled this */
DPRINTF(("umouse set address %u\r\n", value));
break;
case UREQ(UR_SET_CONFIG, UT_WRITE_DEVICE):
DPRINTF(("umouse set config %u\r\n", value));
break;
case UREQ(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE):
DPRINTF(("umouse set descriptor %u\r\n", value));
break;
case UREQ(UR_CLEAR_FEATURE, UT_WRITE_DEVICE):
DPRINTF(("umouse: (UR_SET_FEATURE,UT_WRITE_DEVICE) %x\r\n",
value));
if (value == UF_DEVICE_REMOTE_WAKEUP)
dev->hid.feature = 0;
break;
case UREQ(UR_SET_FEATURE, UT_WRITE_DEVICE):
DPRINTF(("umouse: (UR_SET_FEATURE,UT_WRITE_DEVICE) %x\r\n",
value));
if (value == UF_DEVICE_REMOTE_WAKEUP)
dev->hid.feature = UF_DEVICE_REMOTE_WAKEUP;
break;
case UREQ(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE):
case UREQ(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT):
case UREQ(UR_SET_FEATURE, UT_WRITE_INTERFACE):
case UREQ(UR_SET_FEATURE, UT_WRITE_ENDPOINT):
DPRINTF(("umouse: (UR_CLEAR_FEATURE,UT_WRITE_INTERFACE)\r\n"
));
err = USB_ERR_IOERROR;
goto done;
case UREQ(UR_SET_INTERFACE, UT_WRITE_INTERFACE):
DPRINTF(("umouse set interface %u\r\n", value));
break;
case UREQ(UR_ISOCH_DELAY, UT_WRITE_DEVICE):
DPRINTF(("umouse set isoch delay %u\r\n", value));
break;
case UREQ(UR_SET_SEL, 0):
DPRINTF(("umouse set sel\r\n"));
break;
case UREQ(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT):
DPRINTF(("umouse synch frame\r\n"));
break;
/* HID device requests */
case UREQ(UMOUSE_GET_REPORT, UT_READ_CLASS_INTERFACE):
DPRINTF(("umouse: (UMOUSE_GET_REPORT,UT_READ_CLASS_INTERFACE) "
"0x%x\r\n", (value >> 8)));
if (!data)
break;
if ((value >> 8) == 0x01 && len >= sizeof(dev->um_report)) {
/* TODO read from backend */
if (len > sizeof(dev->um_report)) {
data->blen = len - sizeof(dev->um_report);
len = sizeof(dev->um_report);
} else
data->blen = 0;
memcpy(data->buf, &dev->um_report, len);
data->bdone += len;
} else {
err = USB_ERR_IOERROR;
goto done;
}
eshort = data->blen > 0;
break;
case UREQ(UMOUSE_GET_IDLE, UT_READ_CLASS_INTERFACE):
if (!data)
break;
if (data != NULL && len > 0) {
*udata = dev->hid.idle;
data->blen = len - 1;
data->bdone += 1;
}
eshort = data->blen > 0;
break;
case UREQ(UMOUSE_GET_PROTOCOL, UT_READ_CLASS_INTERFACE):
if (!data)
break;
if (data != NULL && len > 0) {
*udata = dev->hid.protocol;
data->blen = len - 1;
data->bdone += 1;
}
eshort = data->blen > 0;
break;
case UREQ(UMOUSE_SET_REPORT, UT_WRITE_CLASS_INTERFACE):
DPRINTF(("umouse: (UMOUSE_SET_REPORT,"
"UT_WRITE_CLASS_INTERFACE) ignored\r\n"
));
break;
case UREQ(UMOUSE_SET_IDLE, UT_WRITE_CLASS_INTERFACE):
dev->hid.idle = xfer->ureq->wValue >> 8;
DPRINTF(("umouse: (UMOUSE_SET_IDLE,"
"UT_WRITE_CLASS_INTERFACE) %x\r\n",
dev->hid.idle));
break;
case UREQ(UMOUSE_SET_PROTOCOL, UT_WRITE_CLASS_INTERFACE):
dev->hid.protocol = xfer->ureq->wValue >> 8;
DPRINTF(("umouse: (UR_CLEAR_FEATURE,"
"UT_WRITE_CLASS_INTERFACE) %x\r\n",
dev->hid.protocol));
break;
default:
DPRINTF(("**** umouse request unhandled\r\n"));
err = USB_ERR_IOERROR;
break;
}
done:
if (xfer->ureq && (xfer->ureq->bmRequestType & UT_WRITE) &&
(err == USB_ERR_NORMAL_COMPLETION) && (data != NULL))
data->blen = 0;
else if (eshort)
err = USB_ERR_SHORT_XFER;
DPRINTF(("umouse request error code %d (0=ok), blen %u txlen %u\r\n",
err, (data ? data->blen : 0), (data ? data->bdone : 0)));
return err;
}
static int
umouse_data_handler(void *scarg, struct usb_data_xfer *xfer, int dir,
int epctx)
{
struct umouse_vdev *dev;
struct usb_data_xfer_block *data;
uint8_t *udata;
int len, i, idx;
int err;
assert(xfer != NULL && xfer->head >= 0);
DPRINTF(("umouse handle data - DIR=%s|EP=%d, blen %d\r\n",
dir ? "IN" : "OUT", epctx, xfer->data[0].blen));
/* find buffer to add data */
udata = NULL;
err = USB_ERR_NORMAL_COMPLETION;
/* handle xfer at first unprocessed item with buffer */
data = NULL;
idx = xfer->head;
for (i = 0; i < xfer->ndata; i++) {
data = &xfer->data[idx];
if (data->buf != NULL && data->blen != 0)
break;
data->processed = 1;
data = NULL;
idx = (idx + 1) % USB_MAX_XFER_BLOCKS;
}
if (!data)
goto done;
udata = data->buf;
len = data->blen;
if (udata == NULL) {
DPRINTF(("umouse no buffer provided for input\r\n"));
err = USB_ERR_NOMEM;
goto done;
}
dev = scarg;
if (dir) {
pthread_mutex_lock(&dev->mtx);
if (!dev->newdata) {
err = USB_ERR_CANCELLED;
USB_DATA_SET_ERRCODE(&xfer->data[xfer->head], USB_NAK);
pthread_mutex_unlock(&dev->mtx);
goto done;
}
if (dev->polling) {
err = USB_ERR_STALLED;
USB_DATA_SET_ERRCODE(data, USB_STALL);
pthread_mutex_unlock(&dev->mtx);
goto done;
}
dev->polling = 1;
if (len > 0) {
dev->newdata = 0;
data->processed = 1;
data->bdone += 6;
memcpy(udata, &dev->um_report, 6);
data->blen = len - 6;
if (data->blen > 0)
err = USB_ERR_SHORT_XFER;
}
dev->polling = 0;
pthread_mutex_unlock(&dev->mtx);
} else {
USB_DATA_SET_ERRCODE(data, USB_STALL);
err = USB_ERR_STALLED;
}
done:
return err;
}
static int
umouse_reset(void *scarg)
{
struct umouse_vdev *dev;
dev = scarg;
dev->newdata = 0;
return 0;
}
static int
umouse_remove(void *scarg)
{
return 0;
}
static int
umouse_stop(void *scarg)
{
return 0;
}
struct usb_devemu ue_mouse = {
.ue_emu = "tablet",
.ue_usbver = 3,
.ue_usbspeed = USB_SPEED_HIGH,
.ue_init = umouse_init,
.ue_request = umouse_request,
.ue_data = umouse_data_handler,
.ue_reset = umouse_reset,
.ue_remove = umouse_remove,
.ue_stop = umouse_stop
};
USB_EMUL_SET(ue_mouse);

View File

@@ -0,0 +1,59 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _ACPI_H_
#define _ACPI_H_
#define SCI_INT 9
#define SMI_CMD 0xb2
#define ACPI_ENABLE 0xa0
#define ACPI_DISABLE 0xa1
#define PM1A_EVT_ADDR 0x400
#define PM1A_CNT_ADDR 0x404
#define IO_PMTMR 0x408 /* 4-byte i/o port for the timer */
/* All dynamic table entry no. */
#define NHLT_ENTRY_NO 8
void acpi_table_enable(int num);
struct vmctx;
int acpi_build(struct vmctx *ctx, int ncpu);
void dsdt_line(const char *fmt, ...);
void dsdt_fixed_ioport(uint16_t iobase, uint16_t length);
void dsdt_fixed_irq(uint8_t irq);
void dsdt_fixed_mem32(uint32_t base, uint32_t length);
void dsdt_indent(int levels);
void dsdt_unindent(int levels);
void sci_init(struct vmctx *ctx);
#endif /* _ACPI_H_ */

322
devicemodel/include/ahci.h Normal file
View File

@@ -0,0 +1,322 @@
/*-
* Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org>
* Copyright (c) 2009-2012 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification, immediately at the beginning of the file.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _AHCI_H_
#define _AHCI_H_
/* ATA register defines */
#define ATA_DATA 0 /* (RW) data */
#define ATA_FEATURE 1 /* (W) feature */
#define ATA_F_DMA 0x01 /* enable DMA */
#define ATA_F_OVL 0x02 /* enable overlap */
#define ATA_COUNT 2 /* (W) sector count */
#define ATA_SECTOR 3 /* (RW) sector # */
#define ATA_CYL_LSB 4 /* (RW) cylinder# LSB */
#define ATA_CYL_MSB 5 /* (RW) cylinder# MSB */
#define ATA_DRIVE 6 /* (W) Sector/Drive/Head */
#define ATA_D_LBA 0x40 /* use LBA addressing */
#define ATA_D_IBM 0xa0 /* 512 byte sectors, ECC */
#define ATA_COMMAND 7 /* (W) command */
#define ATA_ERROR 8 /* (R) error */
#define ATA_E_ILI 0x01 /* illegal length */
#define ATA_E_NM 0x02 /* no media */
#define ATA_E_ABORT 0x04 /* command aborted */
#define ATA_E_MCR 0x08 /* media change request */
#define ATA_E_IDNF 0x10 /* ID not found */
#define ATA_E_MC 0x20 /* media changed */
#define ATA_E_UNC 0x40 /* uncorrectable data */
#define ATA_E_ICRC 0x80 /* UDMA crc error */
#define ATA_E_ATAPI_SENSE_MASK 0xf0 /* ATAPI sense key mask */
#define ATA_IREASON 9 /* (R) interrupt reason */
#define ATA_I_CMD 0x01 /* cmd (1) | data (0) */
#define ATA_I_IN 0x02 /* read (1) | write (0) */
#define ATA_I_RELEASE 0x04 /* released bus (1) */
#define ATA_I_TAGMASK 0xf8 /* tag mask */
#define ATA_STATUS 10 /* (R) status */
#define ATA_ALTSTAT 11 /* (R) alternate status */
#define ATA_S_ERROR 0x01 /* error */
#define ATA_S_INDEX 0x02 /* index */
#define ATA_S_CORR 0x04 /* data corrected */
#define ATA_S_DRQ 0x08 /* data request */
#define ATA_S_DSC 0x10 /* drive seek completed */
#define ATA_S_SERVICE 0x10 /* drive needs service */
#define ATA_S_DWF 0x20 /* drive write fault */
#define ATA_S_DMA 0x20 /* DMA ready */
#define ATA_S_READY 0x40 /* drive ready */
#define ATA_S_BUSY 0x80 /* busy */
#define ATA_CONTROL 12 /* (W) control */
#define ATA_A_IDS 0x02 /* disable interrupts */
#define ATA_A_RESET 0x04 /* RESET controller */
#define ATA_A_4BIT 0x08 /* 4 head bits */
#define ATA_A_HOB 0x80 /* High Order Byte enable */
/* SATA register defines */
#define ATA_SSTATUS 13
#define ATA_SS_DET_MASK 0x0000000f
#define ATA_SS_DET_NO_DEVICE 0x00000000
#define ATA_SS_DET_DEV_PRESENT 0x00000001
#define ATA_SS_DET_PHY_ONLINE 0x00000003
#define ATA_SS_DET_PHY_OFFLINE 0x00000004
#define ATA_SS_SPD_MASK 0x000000f0
#define ATA_SS_SPD_NO_SPEED 0x00000000
#define ATA_SS_SPD_GEN1 0x00000010
#define ATA_SS_SPD_GEN2 0x00000020
#define ATA_SS_SPD_GEN3 0x00000030
#define ATA_SS_IPM_MASK 0x00000f00
#define ATA_SS_IPM_NO_DEVICE 0x00000000
#define ATA_SS_IPM_ACTIVE 0x00000100
#define ATA_SS_IPM_PARTIAL 0x00000200
#define ATA_SS_IPM_SLUMBER 0x00000600
#define ATA_SS_IPM_DEVSLEEP 0x00000800
#define ATA_SERROR 14
#define ATA_SE_DATA_CORRECTED 0x00000001
#define ATA_SE_COMM_CORRECTED 0x00000002
#define ATA_SE_DATA_ERR 0x00000100
#define ATA_SE_COMM_ERR 0x00000200
#define ATA_SE_PROT_ERR 0x00000400
#define ATA_SE_HOST_ERR 0x00000800
#define ATA_SE_PHY_CHANGED 0x00010000
#define ATA_SE_PHY_IERROR 0x00020000
#define ATA_SE_COMM_WAKE 0x00040000
#define ATA_SE_DECODE_ERR 0x00080000
#define ATA_SE_PARITY_ERR 0x00100000
#define ATA_SE_CRC_ERR 0x00200000
#define ATA_SE_HANDSHAKE_ERR 0x00400000
#define ATA_SE_LINKSEQ_ERR 0x00800000
#define ATA_SE_TRANSPORT_ERR 0x01000000
#define ATA_SE_UNKNOWN_FIS 0x02000000
#define ATA_SE_EXCHANGED 0x04000000
#define ATA_SCONTROL 15
#define ATA_SC_DET_MASK 0x0000000f
#define ATA_SC_DET_IDLE 0x00000000
#define ATA_SC_DET_RESET 0x00000001
#define ATA_SC_DET_DISABLE 0x00000004
#define ATA_SC_SPD_MASK 0x000000f0
#define ATA_SC_SPD_NO_SPEED 0x00000000
#define ATA_SC_SPD_SPEED_GEN1 0x00000010
#define ATA_SC_SPD_SPEED_GEN2 0x00000020
#define ATA_SC_SPD_SPEED_GEN3 0x00000030
#define ATA_SC_IPM_MASK 0x00000f00
#define ATA_SC_IPM_NONE 0x00000000
#define ATA_SC_IPM_DIS_PARTIAL 0x00000100
#define ATA_SC_IPM_DIS_SLUMBER 0x00000200
#define ATA_SC_IPM_DIS_DEVSLEEP 0x00000400
#define ATA_SACTIVE 16
#define AHCI_MAX_PORTS 32
#define AHCI_MAX_SLOTS 32
#define AHCI_MAX_IRQS 16
/* SATA AHCI v1.0 register defines */
#define AHCI_CAP 0x00
#define AHCI_CAP_NPMASK 0x0000001f
#define AHCI_CAP_SXS 0x00000020
#define AHCI_CAP_EMS 0x00000040
#define AHCI_CAP_CCCS 0x00000080
#define AHCI_CAP_NCS 0x00001F00
#define AHCI_CAP_NCS_SHIFT 8
#define AHCI_CAP_PSC 0x00002000
#define AHCI_CAP_SSC 0x00004000
#define AHCI_CAP_PMD 0x00008000
#define AHCI_CAP_FBSS 0x00010000
#define AHCI_CAP_SPM 0x00020000
#define AHCI_CAP_SAM 0x00080000
#define AHCI_CAP_ISS 0x00F00000
#define AHCI_CAP_ISS_SHIFT 20
#define AHCI_CAP_SCLO 0x01000000
#define AHCI_CAP_SAL 0x02000000
#define AHCI_CAP_SALP 0x04000000
#define AHCI_CAP_SSS 0x08000000
#define AHCI_CAP_SMPS 0x10000000
#define AHCI_CAP_SSNTF 0x20000000
#define AHCI_CAP_SNCQ 0x40000000
#define AHCI_CAP_64BIT 0x80000000
#define AHCI_GHC 0x04
#define AHCI_GHC_AE 0x80000000
#define AHCI_GHC_MRSM 0x00000004
#define AHCI_GHC_IE 0x00000002
#define AHCI_GHC_HR 0x00000001
#define AHCI_IS 0x08
#define AHCI_PI 0x0c
#define AHCI_VS 0x10
#define AHCI_CCCC 0x14
#define AHCI_CCCC_TV_MASK 0xffff0000
#define AHCI_CCCC_TV_SHIFT 16
#define AHCI_CCCC_CC_MASK 0x0000ff00
#define AHCI_CCCC_CC_SHIFT 8
#define AHCI_CCCC_INT_MASK 0x000000f8
#define AHCI_CCCC_INT_SHIFT 3
#define AHCI_CCCC_EN 0x00000001
#define AHCI_CCCP 0x18
#define AHCI_EM_LOC 0x1C
#define AHCI_EM_CTL 0x20
#define AHCI_EM_MR 0x00000001
#define AHCI_EM_TM 0x00000100
#define AHCI_EM_RST 0x00000200
#define AHCI_EM_LED 0x00010000
#define AHCI_EM_SAFTE 0x00020000
#define AHCI_EM_SES2 0x00040000
#define AHCI_EM_SGPIO 0x00080000
#define AHCI_EM_SMB 0x01000000
#define AHCI_EM_XMT 0x02000000
#define AHCI_EM_ALHD 0x04000000
#define AHCI_EM_PM 0x08000000
#define AHCI_CAP2 0x24
#define AHCI_CAP2_BOH 0x00000001
#define AHCI_CAP2_NVMP 0x00000002
#define AHCI_CAP2_APST 0x00000004
#define AHCI_CAP2_SDS 0x00000008
#define AHCI_CAP2_SADM 0x00000010
#define AHCI_CAP2_DESO 0x00000020
#define AHCI_OFFSET 0x100
#define AHCI_STEP 0x80
#define AHCI_P_CLB 0x00
#define AHCI_P_CLBU 0x04
#define AHCI_P_FB 0x08
#define AHCI_P_FBU 0x0c
#define AHCI_P_IS 0x10
#define AHCI_P_IE 0x14
#define AHCI_P_IX_DHR 0x00000001
#define AHCI_P_IX_PS 0x00000002
#define AHCI_P_IX_DS 0x00000004
#define AHCI_P_IX_SDB 0x00000008
#define AHCI_P_IX_UF 0x00000010
#define AHCI_P_IX_DP 0x00000020
#define AHCI_P_IX_PC 0x00000040
#define AHCI_P_IX_MP 0x00000080
#define AHCI_P_IX_PRC 0x00400000
#define AHCI_P_IX_IPM 0x00800000
#define AHCI_P_IX_OF 0x01000000
#define AHCI_P_IX_INF 0x04000000
#define AHCI_P_IX_IF 0x08000000
#define AHCI_P_IX_HBD 0x10000000
#define AHCI_P_IX_HBF 0x20000000
#define AHCI_P_IX_TFE 0x40000000
#define AHCI_P_IX_CPD 0x80000000
#define AHCI_P_CMD 0x18
#define AHCI_P_CMD_ST 0x00000001
#define AHCI_P_CMD_SUD 0x00000002
#define AHCI_P_CMD_POD 0x00000004
#define AHCI_P_CMD_CLO 0x00000008
#define AHCI_P_CMD_FRE 0x00000010
#define AHCI_P_CMD_CCS_MASK 0x00001f00
#define AHCI_P_CMD_CCS_SHIFT 8
#define AHCI_P_CMD_ISS 0x00002000
#define AHCI_P_CMD_FR 0x00004000
#define AHCI_P_CMD_CR 0x00008000
#define AHCI_P_CMD_CPS 0x00010000
#define AHCI_P_CMD_PMA 0x00020000
#define AHCI_P_CMD_HPCP 0x00040000
#define AHCI_P_CMD_MPSP 0x00080000
#define AHCI_P_CMD_CPD 0x00100000
#define AHCI_P_CMD_ESP 0x00200000
#define AHCI_P_CMD_FBSCP 0x00400000
#define AHCI_P_CMD_APSTE 0x00800000
#define AHCI_P_CMD_ATAPI 0x01000000
#define AHCI_P_CMD_DLAE 0x02000000
#define AHCI_P_CMD_ALPE 0x04000000
#define AHCI_P_CMD_ASP 0x08000000
#define AHCI_P_CMD_ICC_MASK 0xf0000000
#define AHCI_P_CMD_NOOP 0x00000000
#define AHCI_P_CMD_ACTIVE 0x10000000
#define AHCI_P_CMD_PARTIAL 0x20000000
#define AHCI_P_CMD_SLUMBER 0x60000000
#define AHCI_P_CMD_DEVSLEEP 0x80000000
#define AHCI_P_TFD 0x20
#define AHCI_P_SIG 0x24
#define AHCI_P_SSTS 0x28
#define AHCI_P_SCTL 0x2c
#define AHCI_P_SERR 0x30
#define AHCI_P_SACT 0x34
#define AHCI_P_CI 0x38
#define AHCI_P_SNTF 0x3C
#define AHCI_P_FBS 0x40
#define AHCI_P_FBS_EN 0x00000001
#define AHCI_P_FBS_DEC 0x00000002
#define AHCI_P_FBS_SDE 0x00000004
#define AHCI_P_FBS_DEV 0x00000f00
#define AHCI_P_FBS_DEV_SHIFT 8
#define AHCI_P_FBS_ADO 0x0000f000
#define AHCI_P_FBS_ADO_SHIFT 12
#define AHCI_P_FBS_DWE 0x000f0000
#define AHCI_P_FBS_DWE_SHIFT 16
#define AHCI_P_DEVSLP 0x44
#define AHCI_P_DEVSLP_ADSE 0x00000001
#define AHCI_P_DEVSLP_DSP 0x00000002
#define AHCI_P_DEVSLP_DETO 0x000003fc
#define AHCI_P_DEVSLP_DETO_SHIFT 2
#define AHCI_P_DEVSLP_MDAT 0x00007c00
#define AHCI_P_DEVSLP_MDAT_SHIFT 10
#define AHCI_P_DEVSLP_DITO 0x01ff8000
#define AHCI_P_DEVSLP_DITO_SHIFT 15
#define AHCI_P_DEVSLP_DM 0x0e000000
#define AHCI_P_DEVSLP_DM_SHIFT 25
/* Just to be sure, if building as module. */
#if MAXPHYS < 512 * 1024
#undef MAXPHYS
#define MAXPHYS (512 * 1024)
#endif
/* Pessimistic prognosis on number of required S/G entries */
#define AHCI_SG_ENTRIES (roundup(btoc(MAXPHYS) + 1, 8))
/* Command list. 32 commands. First, 1Kbyte aligned. */
#define AHCI_CL_OFFSET 0
#define AHCI_CL_SIZE 32
/* Command tables. Up to 32 commands, Each, 128byte aligned. */
#define AHCI_CT_OFFSET (AHCI_CL_OFFSET + AHCI_CL_SIZE * AHCI_MAX_SLOTS)
#define AHCI_CT_SIZE (128 + AHCI_SG_ENTRIES * 16)
/* Total main work area. */
#define AHCI_WORK_SIZE (AHCI_CT_OFFSET + AHCI_CT_SIZE * ch->numslots)
#endif /* _AHCI_H_ */

1012
devicemodel/include/ata.h Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,38 @@
/*-
* Copyright (c) 2015 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _ATKBDC_H_
#define _ATKBDC_H_
struct atkbdc_base;
struct vmctx;
void atkbdc_init(struct vmctx *ctx);
void atkbdc_event(struct atkbdc_base *base, int iskbd);
#endif /* _ATKBDC_H_ */

View File

@@ -0,0 +1,70 @@
/*-
* Copyright (c) 2013 Peter Grehan <grehan@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* The block API to be used by acrn-dm block-device emulations. The routines
* are thread safe, with no assumptions about the context of the completion
* callback - it may occur in the caller's context, or asynchronously in
* another thread.
*/
#ifndef _BLOCK_IF_H_
#define _BLOCK_IF_H_
#include <sys/uio.h>
#include <sys/unistd.h>
#define BLOCKIF_IOV_MAX 33 /* not practical to be IOV_MAX */
struct blockif_req {
struct iovec iov[BLOCKIF_IOV_MAX];
int iovcnt;
off_t offset;
ssize_t resid;
void (*callback)(struct blockif_req *req, int err);
void *param;
};
struct blockif_ctxt;
struct blockif_ctxt *blockif_open(const char *optstr, const char *ident);
off_t blockif_size(struct blockif_ctxt *bc);
void blockif_chs(struct blockif_ctxt *bc, uint16_t *c, uint8_t *h,
uint8_t *s);
int blockif_sectsz(struct blockif_ctxt *bc);
void blockif_psectsz(struct blockif_ctxt *bc, int *size, int *off);
int blockif_queuesz(struct blockif_ctxt *bc);
int blockif_is_ro(struct blockif_ctxt *bc);
int blockif_candelete(struct blockif_ctxt *bc);
int blockif_read(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_write(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_flush(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_delete(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_cancel(struct blockif_ctxt *bc, struct blockif_req *breq);
int blockif_close(struct blockif_ctxt *bc);
#endif /* _BLOCK_IF_H_ */

View File

@@ -0,0 +1,53 @@
/*-
* Copyright (c) 2015 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _CONSOLE_H_
#define _CONSOLE_H_
struct gfx_ctx;
typedef void (*fb_render_func_t)(struct gfx_ctx *gc, void *arg);
typedef void (*kbd_event_func_t)(int down, uint32_t keysym, void *arg);
typedef void (*ptr_event_func_t)(uint8_t mask, int x, int y, void *arg);
void console_init(int w, int h, void *fbaddr);
void console_set_fbaddr(void *fbaddr);
struct gfx_ctx_image *console_get_image(void);
void console_fb_register(fb_render_func_t render_cb, void *arg);
void console_refresh(void);
void console_kbd_register(kbd_event_func_t event_cb, void *arg, int pri);
void console_key_event(int down, uint32_t keysym);
void console_ptr_register(ptr_event_func_t event_cb, void *arg, int pri);
void console_ptr_event(uint8_t button, int x, int y);
#endif /* _CONSOLE_H_ */

View File

@@ -0,0 +1,38 @@
/*-
* Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org>
* All rights reserved.
*
* Copyright (c) 2008 Nokia Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _CPUSET_H_
#define _CPUSET_H_
#define CPU_EMPTY(p) (CPU_COUNT_S(CPU_SETSIZE, p) == 0)
#define CPU_CLR_ATOMIC(n, p) CPU_CLR(n, p)
#define CPU_SET_ATOMIC(n, p) CPU_SET(n, p)
#endif /* !_CPUSET_H_ */

57
devicemodel/include/dm.h Normal file
View File

@@ -0,0 +1,57 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _DM_H_
#define _DM_H_
#define VMEXIT_CONTINUE (0)
#define VMEXIT_ABORT (-1)
#include <stdbool.h>
#include "types.h"
#include "vmm.h"
struct vmctx;
extern int guest_ncpus;
extern char *guest_uuid_str;
extern char *vmname;
extern bool stdio_in_use;
int vmexit_task_switch(struct vmctx *ctx, struct vhm_request *vhm_req,
int *vcpu);
void *paddr_guest2host(struct vmctx *ctx, uintptr_t addr, size_t len);
void *dm_gpa2hva(uint64_t gpa, size_t size);
void fbsdrun_addcpu(struct vmctx *ctx, int guest_ncpus);
int fbsdrun_muxed(void);
int fbsdrun_vmexit_on_hlt(void);
int fbsdrun_vmexit_on_pause(void);
int fbsdrun_disable_x2apic(void);
int fbsdrun_virtio_msix(void);
void ptdev_prefer_msi(bool enable);
#endif

48
devicemodel/include/gc.h Normal file
View File

@@ -0,0 +1,48 @@
/*-
* Copyright (c) 2015 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _GC_H_
#define _GC_H_
#include "types.h"
struct gfx_ctx;
struct gfx_ctx_image {
int vgamode;
int width;
int height;
uint32_t *data;
};
struct gfx_ctx *gc_init(int width, int height, void *fbaddr);
void gc_set_fbaddr(struct gfx_ctx *gc, void *fbaddr);
void gc_resize(struct gfx_ctx *gc, int width, int height);
struct gfx_ctx_image *gc_get_image(struct gfx_ctx *gc);
#endif /* _GC_H_ */

View File

@@ -0,0 +1,80 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _INOUT_H_
#define _INOUT_H_
#include "types.h"
#include "acrn_common.h"
struct vmctx;
struct vhm_request;
/*
* inout emulation handlers return 0 on success and -1 on failure.
*/
typedef int (*inout_func_t)(struct vmctx *ctx, int vcpu, int in, int port,
int bytes, uint32_t *eax, void *arg);
struct inout_port {
const char *name;
int port;
int size;
int flags;
inout_func_t handler;
void *arg;
};
#define IOPORT_F_IN 0x1
#define IOPORT_F_OUT 0x2
#define IOPORT_F_INOUT (IOPORT_F_IN | IOPORT_F_OUT)
/*
* The following flags are used internally and must not be used by
* device models.
*/
#define IOPORT_F_DEFAULT 0x80000000 /* claimed by default handler */
#define INOUT_PORT(name, port, flags, handler) \
static struct inout_port __CONCAT(__inout_port, __LINE__) = \
{ \
#name, \
(port), \
1, \
(flags), \
(handler), \
0 \
}; \
DATA_SET(inout_port_set, __CONCAT(__inout_port, __LINE__))
void init_inout(void);
int emulate_inout(struct vmctx *ctx, int *pvcpu, struct pio_request *req,
int strict);
int register_inout(struct inout_port *iop);
int unregister_inout(struct inout_port *iop);
void init_bvmcons(void);
#endif /* _INOUT_H_ */

View File

@@ -0,0 +1,41 @@
/*-
* Copyright (c) 2014 Hudson River Trading LLC
* Written by: John H. Baldwin <jhb@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _IOAPIC_H_
#define _IOAPIC_H_
struct pci_vdev;
/*
* Allocate a PCI IRQ from the I/O APIC.
*/
void ioapic_init(struct vmctx *ctx);
int ioapic_pci_alloc_irq(struct pci_vdev *pi);
#endif

View File

@@ -0,0 +1,44 @@
/*-
* Copyright (c) 2010 Marcel Moolenaar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _IODEV_H_
#define _IODEV_H_
#define IODEV_PIO_READ 0
#define IODEV_PIO_WRITE 1
struct iodev_pio_req {
u_int access;
u_int port;
u_int width;
u_int val;
};
#define IODEV_PIO _IOWR('I', 0, struct iodev_pio_req)
#endif /* _IODEV_H_ */

46
devicemodel/include/irq.h Normal file
View File

@@ -0,0 +1,46 @@
/*-
* Copyright (c) 2014 Hudson River Trading LLC
* Written by: John H. Baldwin <jhb@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _IRQ_H_
#define _IRQ_H_
struct pci_vdev;
void pci_irq_assert(struct pci_vdev *pi);
void pci_irq_deassert(struct pci_vdev *pi);
void pci_irq_init(struct vmctx *ctx);
void pci_irq_deinit(struct vmctx *ctx);
void pci_irq_reserve(int irq);
void pci_irq_use(int irq);
int pirq_alloc_pin(struct pci_vdev *pi);
int pirq_irq(int pin);
uint8_t pirq_read(int pin);
void pirq_write(struct vmctx *ctx, int pin, uint8_t val);
#endif /* _IRQ_H_ */

71
devicemodel/include/lpc.h Normal file
View File

@@ -0,0 +1,71 @@
/*-
* Copyright (c) 2013 Neel Natu <neel@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _LPC_H_
#define _LPC_H_
typedef void (*lpc_write_dsdt_t)(void);
struct lpc_dsdt {
lpc_write_dsdt_t handler;
};
#define LPC_DSDT(handler) \
static struct lpc_dsdt __CONCAT(__lpc_dsdt, __LINE__) = \
{ \
(handler), \
}; \
DATA_SET(lpc_dsdt_set, __CONCAT(__lpc_dsdt, __LINE__))
enum lpc_sysres_type {
LPC_SYSRES_IO,
LPC_SYSRES_MEM
};
struct lpc_sysres {
enum lpc_sysres_type type;
uint32_t base;
uint32_t length;
};
#define LPC_SYSRES(type, base, length) \
static struct lpc_sysres __CONCAT(__lpc_sysres, __LINE__) = \
{ \
(type), \
(base), \
(length) \
}; \
DATA_SET(lpc_sysres_set, __CONCAT(__lpc_sysres, __LINE__))
#define SYSRES_IO(base, length) LPC_SYSRES(LPC_SYSRES_IO, base, length)
#define SYSRES_MEM(base, length) LPC_SYSRES(LPC_SYSRES_MEM, base, length)
int lpc_device_parse(const char *opt);
char *lpc_pirq_name(int pin);
void lpc_pirq_routed(void);
#endif

View File

@@ -0,0 +1,40 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef _MACROS_H_
#define _MACROS_H_
#undef __CONCAT
#define _CONCAT_(a, b) a ## b
#define __CONCAT(a, b) _CONCAT_(a, b)
#endif

View File

@@ -0,0 +1,115 @@
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)rtc.h 7.1 (Berkeley) 5/12/91
* $FreeBSD$
*/
#ifndef _I386_ISA_RTC_H_
#define _I386_ISA_RTC_H_ 1
/*
* MC146818 RTC Register locations
*/
#define RTC_SEC 0x00 /* seconds */
#define RTC_SECALRM 0x01 /* seconds alarm */
#define RTC_MIN 0x02 /* minutes */
#define RTC_MINALRM 0x03 /* minutes alarm */
#define RTC_HRS 0x04 /* hours */
#define RTC_HRSALRM 0x05 /* hours alarm */
#define RTC_WDAY 0x06 /* week day */
#define RTC_DAY 0x07 /* day of month */
#define RTC_MONTH 0x08 /* month of year */
#define RTC_YEAR 0x09 /* month of year */
#define RTC_STATUSA 0x0a /* status register A */
#define RTCSA_TUP 0x80 /* time update, don't look now */
#define RTCSA_RESET 0x70 /* reset divider */
#define RTCSA_DIVIDER 0x20 /* divider correct for 32768 Hz */
#define RTCSA_8192 0x03 /* 8192 Hz interrupt */
#define RTCSA_4096 0x04
#define RTCSA_2048 0x05
#define RTCSA_1024 0x06 /* default for profiling */
#define RTCSA_PROF RTCSA_1024
#define RTC_PROFRATE 1024
#define RTCSA_512 0x07
#define RTCSA_256 0x08
#define RTCSA_128 0x09
#define RTCSA_NOPROF RTCSA_128
#define RTC_NOPROFRATE 128
#define RTCSA_64 0x0a
#define RTCSA_32 0x0b /* 32 Hz interrupt */
#define RTC_STATUSB 0x0b /* status register B */
#define RTCSB_DST 0x01 /* USA Daylight Savings Time enable */
#define RTCSB_24HR 0x02 /* 0 = 12 hours, 1 = 24 hours */
#define RTCSB_BCD 0x04 /* 0 = BCD, 1 = Binary coded time */
#define RTCSB_SQWE 0x08 /* 1 = output sqare wave at SQW pin */
#define RTCSB_UINTR 0x10 /* 1 = enable update-ended interrupt */
#define RTCSB_AINTR 0x20 /* 1 = enable alarm interrupt */
#define RTCSB_PINTR 0x40 /* 1 = enable periodic clock interrupt */
#define RTCSB_HALT 0x80 /* stop clock updates */
#define RTC_INTR 0x0c /* status register C (R) interrupt source */
#define RTCIR_UPDATE 0x10 /* update intr */
#define RTCIR_ALARM 0x20 /* alarm intr */
#define RTCIR_PERIOD 0x40 /* periodic intr */
#define RTCIR_INT 0x80 /* interrupt output signal */
#define RTC_STATUSD 0x0d /* status register D (R) Lost Power */
#define RTCSD_PWR 0x80 /* clock power OK */
#define RTC_DIAG 0x0e /* status register E - bios diagnostic */
#define RTCDG_BITS "\020\010clock_battery\007ROM_cksum\006config_unit\005memory_size\004fixed_disk\003invalid_time"
#define RTC_RESET 0x0f /* status register F - reset code byte */
#define RTCRS_RST 0x00 /* normal reset */
#define RTCRS_LOAD 0x04 /* load system */
#define RTC_FDISKETTE 0x10 /* diskette drive type in upper/lower nibble */
#define RTCFDT_NONE 0 /* none present */
#define RTCFDT_360K 0x10 /* 360K */
#define RTCFDT_12M 0x20 /* 1.2M */
#define RTCFDT_720K 0x30 /* 720K */
#define RTCFDT_144M 0x40 /* 1.44M */
#define RTCFDT_288M_1 0x50 /* 2.88M, some BIOSes */
#define RTCFDT_288M 0x60 /* 2.88M */
#define RTC_BASELO 0x15 /* low byte of basemem size */
#define RTC_BASEHI 0x16 /* high byte of basemem size */
#define RTC_EXTLO 0x17 /* low byte of extended mem size */
#define RTC_EXTHI 0x18 /* low byte of extended mem size */
#define RTC_CENTURY 0x32 /* current century */
#endif /* _I386_ISA_RTC_H_ */

57
devicemodel/include/mem.h Normal file
View File

@@ -0,0 +1,57 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MEM_H_
#define _MEM_H_
struct vmctx;
typedef int (*mem_func_t)(struct vmctx *ctx, int vcpu, int dir, uint64_t addr,
int size, uint64_t *val, void *arg1, long arg2);
struct mem_range {
const char *name;
int flags;
mem_func_t handler;
void *arg1;
long arg2;
uint64_t base;
uint64_t size;
};
#define MEM_F_READ 0x1
#define MEM_F_WRITE 0x2
#define MEM_F_RW 0x3
#define MEM_F_IMMUTABLE 0x4 /* mem_range cannot be unregistered */
void init_mem(void);
int emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req);
int register_mem(struct mem_range *memp);
int register_mem_fallback(struct mem_range *memp);
int unregister_mem(struct mem_range *memp);
#endif /* _MEM_H_ */

View File

@@ -0,0 +1,58 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MEVENT_H_
#define _MEVENT_H_
enum ev_type {
EVF_READ,
EVF_WRITE,
EVF_TIMER, /* Not supported yet */
EVF_SIGNAL /* Not supported yet */
};
char *vmname;
struct mevent;
struct mevent *mevent_add(int fd, enum ev_type type,
void (*func)(int, enum ev_type, void *),
void *param);
int mevent_enable(struct mevent *evp);
int mevent_disable(struct mevent *evp);
int mevent_delete(struct mevent *evp);
int mevent_delete_close(struct mevent *evp);
void mevent_notify(void);
void mevent_dispatch(void);
#define list_foreach_safe(var, head, field, tvar) \
for ((var) = LIST_FIRST((head)); \
(var) && ((tvar) = LIST_NEXT((var), field), 1);\
(var) = (tvar))
#endif /* _MEVENT_H_ */

View File

@@ -0,0 +1,188 @@
/*-
* Copyright (c) 1996, by Steve Passe
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. The name of the developer may NOT be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __MPTABLE_H__
#define __MPTABLE_H__
enum busTypes {
NOBUS = 0,
CBUS = 1,
CBUSII = 2,
EISA = 3,
ISA = 6,
MCA = 9,
PCI = 13,
XPRESS = 18,
MAX_BUSTYPE = 18,
UNKNOWN_BUSTYPE = 0xff
};
/* MP Floating Pointer Structure */
typedef struct MPFPS {
uint8_t signature[4];
uint32_t pap;
uint8_t length;
uint8_t spec_rev;
uint8_t checksum;
uint8_t config_type;
uint8_t mpfb2;
uint8_t mpfb3;
uint8_t mpfb4;
uint8_t mpfb5;
} __attribute__((packed)) *mpfps_t;
#define MPFB2_IMCR_PRESENT 0x80
#define MPFB2_MUL_CLK_SRCS 0x40
/* MP Configuration Table Header */
typedef struct MPCTH {
uint8_t signature[4];
uint16_t base_table_length;
uint8_t spec_rev;
uint8_t checksum;
uint8_t oem_id[8];
uint8_t product_id[12];
uint32_t oem_table_pointer;
uint16_t oem_table_size;
uint16_t entry_count;
uint32_t apic_address;
uint16_t extended_table_length;
uint8_t extended_table_checksum;
uint8_t reserved;
} __attribute__((packed)) *mpcth_t;
/* Base table entries */
#define MPCT_ENTRY_PROCESSOR 0
#define MPCT_ENTRY_BUS 1
#define MPCT_ENTRY_IOAPIC 2
#define MPCT_ENTRY_INT 3
#define MPCT_ENTRY_LOCAL_INT 4
typedef struct PROCENTRY {
uint8_t type;
uint8_t apic_id;
uint8_t apic_version;
uint8_t cpu_flags;
uint32_t cpu_signature;
uint32_t feature_flags;
uint32_t reserved1;
uint32_t reserved2;
} __attribute__((packed)) *proc_entry_ptr;
#define PROCENTRY_FLAG_EN 0x01
#define PROCENTRY_FLAG_BP 0x02
typedef struct BUSENTRY {
uint8_t type;
uint8_t bus_id;
uint8_t bus_type[6];
} __attribute__((packed)) *bus_entry_ptr;
typedef struct IOAPICENTRY {
uint8_t type;
uint8_t apic_id;
uint8_t apic_version;
uint8_t apic_flags;
uint32_t apic_address;
} __attribute__((packed)) *io_apic_entry_ptr;
#define IOAPICENTRY_FLAG_EN 0x01
typedef struct INTENTRY {
uint8_t type;
uint8_t int_type;
uint16_t int_flags;
uint8_t src_bus_id;
uint8_t src_bus_irq;
uint8_t dst_apic_id;
uint8_t dst_apic_int;
} __attribute__((packed)) *int_entry_ptr;
#define INTENTRY_TYPE_INT 0
#define INTENTRY_TYPE_NMI 1
#define INTENTRY_TYPE_SMI 2
#define INTENTRY_TYPE_EXTINT 3
#define INTENTRY_FLAGS_POLARITY 0x3
#define INTENTRY_FLAGS_POLARITY_CONFORM 0x0
#define INTENTRY_FLAGS_POLARITY_ACTIVEHI 0x1
#define INTENTRY_FLAGS_POLARITY_ACTIVELO 0x3
#define INTENTRY_FLAGS_TRIGGER 0xc
#define INTENTRY_FLAGS_TRIGGER_CONFORM 0x0
#define INTENTRY_FLAGS_TRIGGER_EDGE 0x4
#define INTENTRY_FLAGS_TRIGGER_LEVEL 0xc
/* Extended table entries */
typedef struct EXTENTRY {
uint8_t type;
uint8_t length;
} __attribute__((packed)) *ext_entry_ptr;
#define MPCT_EXTENTRY_SAS 0x80
#define MPCT_EXTENTRY_BHD 0x81
#define MPCT_EXTENTRY_CBASM 0x82
typedef struct SASENTRY {
uint8_t type;
uint8_t length;
uint8_t bus_id;
uint8_t address_type;
uint64_t address_base;
uint64_t address_length;
} __attribute__((packed)) *sas_entry_ptr;
#define SASENTRY_TYPE_IO 0
#define SASENTRY_TYPE_MEMORY 1
#define SASENTRY_TYPE_PREFETCH 2
typedef struct BHDENTRY {
uint8_t type;
uint8_t length;
uint8_t bus_id;
uint8_t bus_info;
uint8_t parent_bus;
uint8_t reserved[3];
} __attribute__((packed)) *bhd_entry_ptr;
#define BHDENTRY_INFO_SUBTRACTIVE_DECODE 0x1
typedef struct CBASMENTRY {
uint8_t type;
uint8_t length;
uint8_t bus_id;
uint8_t address_mod;
uint32_t predefined_range;
} __attribute__((packed)) *cbasm_entry_ptr;
#define CBASMENTRY_ADDRESS_MOD_ADD 0x0
#define CBASMENTRY_ADDRESS_MOD_SUBTRACT 0x1
#define CBASMENTRY_RANGE_ISA_IO 0
#define CBASMENTRY_RANGE_VGA_IO 1
#endif /* !__MPTABLE_H__ */

View File

@@ -0,0 +1,35 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MPTBL_H_
#define _MPTBL_H_
int mptable_build(struct vmctx *ctx, int ncpu);
void mptable_add_oemtbl(void *tbl, int tblsz);
#endif /* _MPTBL_H_ */

View File

@@ -0,0 +1,642 @@
/*
* Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``S IS''AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* $FreeBSD$
*
* Definitions of constants and the structures used by the netmap
* framework, for the part visible to both kernel and userspace.
* Detailed info on netmap is available with "man netmap" or at
*
* http://info.iet.unipi.it/~luigi/netmap/
*
* This API is also used to communicate with the VALE software switch
*/
#ifndef _NET_NETMAP_H_
#define _NET_NETMAP_H_
#define NETMAP_API 11 /* current API version */
#define NETMAP_MIN_API 11 /* min and max versions accepted */
#define NETMAP_MAX_API 15
/*
* Some fields should be cache-aligned to reduce contention.
* The alignment is architecture and OS dependent, but rather than
* digging into OS headers to find the exact value we use an estimate
* that should cover most architectures.
*/
#define NM_CACHE_ALIGN 128
/*
* --- Netmap data structures ---
*
* The userspace data structures used by netmap are shown below.
* They are allocated by the kernel and mmap()ed by userspace threads.
* Pointers are implemented as memory offsets or indexes,
* so that they can be easily dereferenced in kernel and userspace.
KERNEL (opaque, obviously)
====================================================================
|
USERSPACE | struct netmap_ring
+---->+---------------+
/ | head,cur,tail |
struct netmap_if (nifp, 1 per fd) / | buf_ofs |
+---------------+ / | other fields |
| ni_tx_rings | / +===============+
| ni_rx_rings | / | buf_idx, len | slot[0]
| | / | flags, ptr |
| | / +---------------+
+===============+ / | buf_idx, len | slot[1]
| txring_ofs[0] | (rel.to nifp)--' | flags, ptr |
| txring_ofs[1] | +---------------+
(tx+1 entries) (num_slots entries)
| txring_ofs[t] | | buf_idx, len | slot[n-1]
+---------------+ | flags, ptr |
| rxring_ofs[0] | +---------------+
| rxring_ofs[1] |
(rx+1 entries)
| rxring_ofs[r] |
+---------------+
* For each "interface" (NIC, host stack, PIPE, VALE switch port) bound to
* a file descriptor, the mmap()ed region contains a (logically readonly)
* struct netmap_if pointing to struct netmap_ring's.
*
* There is one netmap_ring per physical NIC ring, plus one tx/rx ring
* pair attached to the host stack (this pair is unused for non-NIC ports).
*
* All physical/host stack ports share the same memory region,
* so that zero-copy can be implemented between them.
* VALE switch ports instead have separate memory regions.
*
* The netmap_ring is the userspace-visible replica of the NIC ring.
* Each slot has the index of a buffer (MTU-sized and residing in the
* mmapped region), its length and some flags. An extra 64-bit pointer
* is provided for user-supplied buffers in the tx path.
*
* In user space, the buffer address is computed as
* (char *)ring + buf_ofs + index * NETMAP_BUF_SIZE
*
* Added in NETMAP_API 11:
*
* + NIOCREGIF can request the allocation of extra spare buffers from
* the same memory pool. The desired number of buffers must be in
* nr_arg3. The ioctl may return fewer buffers, depending on memory
* availability. nr_arg3 will return the actual value, and, once
* mapped, nifp->ni_bufs_head will be the index of the first buffer.
*
* The buffers are linked to each other using the first uint32_t
* as the index. On close, ni_bufs_head must point to the list of
* buffers to be released.
*
* + NIOCREGIF can request space for extra rings (and buffers)
* allocated in the same memory space. The number of extra rings
* is in nr_arg1, and is advisory. This is a no-op on NICs where
* the size of the memory space is fixed.
*
* + NIOCREGIF can attach to PIPE rings sharing the same memory
* space with a parent device. The ifname indicates the parent device,
* which must already exist. Flags in nr_flags indicate if we want to
* bind the master or slave side, the index (from nr_ringid)
* is just a cookie and does not need to be sequential.
*
* + NIOCREGIF can also attach to 'monitor' rings that replicate
* the content of specific rings, also from the same memory space.
*
* Extra flags in nr_flags support the above functions.
* Application libraries may use the following naming scheme:
* netmap:foo all NIC ring pairs
* netmap:foo^ only host ring pair
* netmap:foo+ all NIC ring + host ring pairs
* netmap:foo-k the k-th NIC ring pair
* netmap:foo{k PIPE ring pair k, master side
* netmap:foo}k PIPE ring pair k, slave side
*
* Some notes about host rings:
*
* + The RX host ring is used to store those packets that the host network
* stack is trying to transmit through a NIC queue, but only if that queue
* is currently in netmap mode. Netmap will not intercept host stack mbufs
* designated to NIC queues that are not in netmap mode. As a consequence,
* registering a netmap port with netmap:foo^ is not enough to intercept
* mbufs in the RX host ring; the netmap port should be registered with
* netmap:foo*, or another registration should be done to open at least a
* NIC TX queue in netmap mode.
*
* + Netmap is not currently able to deal with intercepted trasmit mbufs which
* require offloadings like TSO, UFO, checksumming offloadings, etc. It is
* responsibility of the user to disable those offloadings (e.g. using
* ifconfig on FreeBSD or ethtool -K on Linux) for an interface that is being
* used in netmap mode. If the offloadings are not disabled, GSO and/or
* unchecksummed packets may be dropped immediately or end up in the host RX
* ring, and will be dropped as soon as the packet reaches another netmap
* adapter.
*/
/*
* struct netmap_slot is a buffer descriptor
*/
struct netmap_slot {
uint32_t buf_idx; /* buffer index */
uint16_t len; /* length for this slot */
uint16_t flags; /* buf changed, etc. */
uint64_t ptr; /* pointer for indirect buffers */
};
/*
* The following flags control how the slot is used
*/
#define NS_BUF_CHANGED 0x0001 /* buf_idx changed */
/*
* must be set whenever buf_idx is changed (as it might be
* necessary to recompute the physical address and mapping)
*
* It is also set by the kernel whenever the buf_idx is
* changed internally (e.g., by pipes). Applications may
* use this information to know when they can reuse the
* contents of previously prepared buffers.
*/
#define NS_REPORT 0x0002 /* ask the hardware to report results */
/*
* Request notification when slot is used by the hardware.
* Normally transmit completions are handled lazily and
* may be unreported. This flag lets us know when a slot
* has been sent (e.g. to terminate the sender).
*/
#define NS_FORWARD 0x0004 /* pass packet 'forward' */
/*
* (Only for physical ports, rx rings with NR_FORWARD set).
* Slot released to the kernel (i.e. before ring->head) with
* this flag set are passed to the peer ring (host/NIC),
* thus restoring the host-NIC connection for these slots.
* This supports efficient traffic monitoring or firewalling.
*/
#define NS_NO_LEARN 0x0008 /* disable bridge learning */
/*
* On a VALE switch, do not 'learn' the source port for
* this buffer.
*/
#define NS_INDIRECT 0x0010 /* userspace buffer */
/*
* (VALE tx rings only) data is in a userspace buffer,
* whose address is in the 'ptr' field in the slot.
*/
#define NS_MOREFRAG 0x0020 /* packet has more fragments */
/*
* (VALE ports only)
* Set on all but the last slot of a multi-segment packet.
* The 'len' field refers to the individual fragment.
*/
#define NS_PORT_SHIFT 8
#define NS_PORT_MASK (0xff << NS_PORT_SHIFT)
/*
* The high 8 bits of the flag, if not zero, indicate the
* destination port for the VALE switch, overriding
* the lookup table.
*/
#define NS_RFRAGS(_slot) (((_slot)->flags >> 8) & 0xff)
/*
* (VALE rx rings only) the high 8 bits
* are the number of fragments.
*/
/*
* struct netmap_ring
*
* Netmap representation of a TX or RX ring (also known as "queue").
* This is a queue implemented as a fixed-size circular array.
* At the software level the important fields are: head, cur, tail.
*
* In TX rings:
*
* head first slot available for transmission.
* cur wakeup point. select() and poll() will unblock
* when 'tail' moves past 'cur'
* tail (readonly) first slot reserved to the kernel
*
* [head .. tail-1] can be used for new packets to send;
* 'head' and 'cur' must be incremented as slots are filled
* with new packets to be sent;
* 'cur' can be moved further ahead if we need more space
* for new transmissions. XXX todo (2014-03-12)
*
* In RX rings:
*
* head first valid received packet
* cur wakeup point. select() and poll() will unblock
* when 'tail' moves past 'cur'
* tail (readonly) first slot reserved to the kernel
*
* [head .. tail-1] contain received packets;
* 'head' and 'cur' must be incremented as slots are consumed
* and can be returned to the kernel;
* 'cur' can be moved further ahead if we want to wait for
* new packets without returning the previous ones.
*
* DATA OWNERSHIP/LOCKING:
* The netmap_ring, and all slots and buffers in the range
* [head .. tail-1] are owned by the user program;
* the kernel only accesses them during a netmap system call
* and in the user thread context.
*
* Other slots and buffers are reserved for use by the kernel
*/
struct netmap_ring {
/*
* buf_ofs is meant to be used through macros.
* It contains the offset of the buffer region from this
* descriptor.
*/
const int64_t buf_ofs;
const uint32_t num_slots; /* number of slots in the ring. */
const uint32_t nr_buf_size;
const uint16_t ringid;
const uint16_t dir; /* 0: tx, 1: rx */
uint32_t head; /* (u) first user slot */
uint32_t cur; /* (u) wakeup point */
uint32_t tail; /* (k) first kernel slot */
uint32_t flags;
struct timeval ts; /* (k) time of last *sync() */
/* opaque room for a mutex or similar object */
#if !defined(_WIN32) || defined(__CYGWIN__)
uint8_t __attribute__((__aligned__(NM_CACHE_ALIGN))) sem[128];
#else
uint8_t __declspec(align(NM_CACHE_ALIGN)) sem[128];
#endif
/* the slots follow. This struct has variable size */
struct netmap_slot slot[0]; /* array of slots. */
};
/*
* RING FLAGS
*/
#define NR_TIMESTAMP 0x0002 /* set timestamp on *sync() */
/*
* updates the 'ts' field on each netmap syscall. This saves
* saves a separate gettimeofday(), and is not much worse than
* software timestamps generated in the interrupt handler.
*/
#define NR_FORWARD 0x0004 /* enable NS_FORWARD for ring */
/*
* Enables the NS_FORWARD slot flag for the ring.
*/
/*
* Netmap representation of an interface and its queue(s).
* This is initialized by the kernel when binding a file
* descriptor to a port, and should be considered as readonly
* by user programs. The kernel never uses it.
*
* There is one netmap_if for each file descriptor on which we want
* to select/poll.
* select/poll operates on one or all pairs depending on the value of
* nmr_queueid passed on the ioctl.
*/
struct netmap_if {
char ni_name[IFNAMSIZ]; /* name of the interface. */
const uint32_t ni_version; /* API version, currently unused */
const uint32_t ni_flags; /* properties */
#define NI_PRIV_MEM 0x1 /* private memory region */
/*
* The number of packet rings available in netmap mode.
* Physical NICs can have different numbers of tx and rx rings.
* Physical NICs also have a 'host' ring pair.
* Additionally, clients can request additional ring pairs to
* be used for internal communication.
*/
const uint32_t ni_tx_rings; /* number of HW tx rings */
const uint32_t ni_rx_rings; /* number of HW rx rings */
uint32_t ni_bufs_head; /* head index for extra bufs */
uint32_t ni_spare1[5];
/*
* The following array contains the offset of each netmap ring
* from this structure, in the following order:
* NIC tx rings (ni_tx_rings); host tx ring (1); extra tx rings;
* NIC rx rings (ni_rx_rings); host tx ring (1); extra rx rings.
*
* The area is filled up by the kernel on NIOCREGIF,
* and then only read by userspace code.
*/
const ssize_t ring_ofs[0];
};
#ifndef NIOCREGIF
/*
* ioctl names and related fields
*
* NIOCTXSYNC, NIOCRXSYNC synchronize tx or rx queues,
* whose identity is set in NIOCREGIF through nr_ringid.
* These are non blocking and take no argument.
*
* NIOCGINFO takes a struct ifreq, the interface name is the input,
* the outputs are number of queues and number of descriptor
* for each queue (useful to set number of threads etc.).
* The info returned is only advisory and may change before
* the interface is bound to a file descriptor.
*
* NIOCREGIF takes an interface name within a struct nmre,
* and activates netmap mode on the interface (if possible).
*
* The argument to NIOCGINFO/NIOCREGIF overlays struct ifreq so we
* can pass it down to other NIC-related ioctls.
*
* The actual argument (struct nmreq) has a number of options to request
* different functions.
* The following are used in NIOCREGIF when nr_cmd == 0:
*
* nr_name (in)
* The name of the port (em0, valeXXX:YYY, etc.)
* limited to IFNAMSIZ for backward compatibility.
*
* nr_version (in/out)
* Must match NETMAP_API as used in the kernel, error otherwise.
* Always returns the desired value on output.
*
* nr_tx_slots, nr_tx_slots, nr_tx_rings, nr_rx_rings (in/out)
* On input, non-zero values may be used to reconfigure the port
* according to the requested values, but this is not guaranteed.
* On output the actual values in use are reported.
*
* nr_ringid (in)
* Indicates how rings should be bound to the file descriptors.
* If nr_flags != 0, then the low bits (in NETMAP_RING_MASK)
* are used to indicate the ring number, and nr_flags specifies
* the actual rings to bind. NETMAP_NO_TX_POLL is unaffected.
*
* NOTE: THE FOLLOWING (nr_flags == 0) IS DEPRECATED:
* If nr_flags == 0, NETMAP_HW_RING and NETMAP_SW_RING control
* the binding as follows:
* 0 (default) binds all physical rings
* NETMAP_HW_RING | ring number binds a single ring pair
* NETMAP_SW_RING binds only the host tx/rx rings
*
* NETMAP_NO_TX_POLL can be OR-ed to make select()/poll() push
* packets on tx rings only if POLLOUT is set.
* The default is to push any pending packet.
*
* NETMAP_DO_RX_POLL can be OR-ed to make select()/poll() release
* packets on rx rings also when POLLIN is NOT set.
* The default is to touch the rx ring only with POLLIN.
* Note that this is the opposite of TX because it
* reflects the common usage.
*
* NOTE: NETMAP_PRIV_MEM IS DEPRECATED, use nr_arg2 instead.
* NETMAP_PRIV_MEM is set on return for ports that do not use
* the global memory allocator.
* This information is not significant and applications
* should look at the region id in nr_arg2
*
* nr_flags is the recommended mode to indicate which rings should
* be bound to a file descriptor. Values are NR_REG_*
*
* nr_arg1 (in) The number of extra rings to be reserved.
* Especially when allocating a VALE port the system only
* allocates the amount of memory needed for the port.
* If more shared memory rings are desired (e.g. for pipes),
* the first invocation for the same basename/allocator
* should specify a suitable number. Memory cannot be
* extended after the first allocation without closing
* all ports on the same region.
*
* nr_arg2 (in/out) The identity of the memory region used.
* On input, 0 means the system decides autonomously,
* other values may try to select a specific region.
* On return the actual value is reported.
* Region '1' is the global allocator, normally shared
* by all interfaces. Other values are private regions.
* If two ports the same region zero-copy is possible.
*
* nr_arg3 (in/out) number of extra buffers to be allocated.
*
*
*
* nr_cmd (in) if non-zero indicates a special command:
* NETMAP_BDG_ATTACH and nr_name = vale*:ifname
* attaches the NIC to the switch; nr_ringid specifies
* which rings to use. Used by vale-ctl -a ...
* nr_arg1 = NETMAP_BDG_HOST also attaches the host port
* as in vale-ctl -h ...
*
* NETMAP_BDG_DETACH and nr_name = vale*:ifname
* disconnects a previously attached NIC.
* Used by vale-ctl -d ...
*
* NETMAP_BDG_LIST
* list the configuration of VALE switches.
*
* NETMAP_BDG_VNET_HDR
* Set the virtio-net header length used by the client
* of a VALE switch port.
*
* NETMAP_BDG_NEWIF
* create a persistent VALE port with name nr_name.
* Used by vale-ctl -n ...
*
* NETMAP_BDG_DELIF
* delete a persistent VALE port. Used by vale-ctl -d ...
*
* nr_arg1, nr_arg2, nr_arg3 (in/out) command specific
*
*
*
*/
/*
* struct nmreq overlays a struct ifreq (just the name)
*/
struct nmreq {
char nr_name[IFNAMSIZ];
uint32_t nr_version; /* API version */
uint32_t nr_offset; /* nifp offset in the shared region */
uint32_t nr_memsize; /* size of the shared region */
uint32_t nr_tx_slots; /* slots in tx rings */
uint32_t nr_rx_slots; /* slots in rx rings */
uint16_t nr_tx_rings; /* number of tx rings */
uint16_t nr_rx_rings; /* number of rx rings */
uint16_t nr_ringid; /* ring(s) we care about */
#define NETMAP_HW_RING 0x4000 /* single NIC ring pair */
#define NETMAP_SW_RING 0x2000 /* only host ring pair */
#define NETMAP_RING_MASK 0x0fff /* the ring number */
#define NETMAP_NO_TX_POLL 0x1000 /* no automatic txsync on poll */
#define NETMAP_DO_RX_POLL 0x8000 /* DO automatic rxsync on poll */
uint16_t nr_cmd;
#define NETMAP_BDG_ATTACH 1 /* attach the NIC */
#define NETMAP_BDG_DETACH 2 /* detach the NIC */
#define NETMAP_BDG_REGOPS 3 /* register bridge callbacks */
#define NETMAP_BDG_LIST 4 /* get bridge's info */
#define NETMAP_BDG_VNET_HDR 5 /* set the port virtio-net-hdr length */
#define NETMAP_BDG_OFFSET NETMAP_BDG_VNET_HDR /* deprecated alias */
#define NETMAP_BDG_NEWIF 6 /* create a virtual port */
#define NETMAP_BDG_DELIF 7 /* destroy a virtual port */
#define NETMAP_PT_HOST_CREATE 8 /* create ptnetmap kthreads */
#define NETMAP_PT_HOST_DELETE 9 /* delete ptnetmap kthreads */
#define NETMAP_BDG_POLLING_ON 10 /* delete polling kthread */
#define NETMAP_BDG_POLLING_OFF 11 /* delete polling kthread */
#define NETMAP_VNET_HDR_GET 12 /* get the port virtio-net-hdr length */
#define NETMAP_POOLS_INFO_GET 13 /* get memory allocator pools info */
uint16_t nr_arg1; /* reserve extra rings in NIOCREGIF */
#define NETMAP_BDG_HOST 1 /* attach the host stack on ATTACH */
uint16_t nr_arg2;
uint32_t nr_arg3; /* req. extra buffers in NIOCREGIF */
uint32_t nr_flags;
/* various modes, extends nr_ringid */
uint32_t spare2[1];
};
#define NR_REG_MASK 0xf /* values for nr_flags */
enum { NR_REG_DEFAULT = 0, /* backward compat, should not be used. */
NR_REG_ALL_NIC = 1,
NR_REG_SW = 2,
NR_REG_NIC_SW = 3,
NR_REG_ONE_NIC = 4,
NR_REG_PIPE_MASTER = 5,
NR_REG_PIPE_SLAVE = 6,
};
/* monitor uses the NR_REG to select the rings to monitor */
#define NR_MONITOR_TX 0x100
#define NR_MONITOR_RX 0x200
#define NR_ZCOPY_MON 0x400
/* request exclusive access to the selected rings */
#define NR_EXCLUSIVE 0x800
/* request ptnetmap host support */
#define NR_PASSTHROUGH_HOST NR_PTNETMAP_HOST /* deprecated */
#define NR_PTNETMAP_HOST 0x1000
#define NR_RX_RINGS_ONLY 0x2000
#define NR_TX_RINGS_ONLY 0x4000
/* Applications set this flag if they are able to deal with virtio-net headers,
* that is send/receive frames that start with a virtio-net header.
* If not set, NIOCREGIF will fail with netmap ports that require applications
* to use those headers. If the flag is set, the application can use the
* NETMAP_VNET_HDR_GET command to figure out the header length.
*/
#define NR_ACCEPT_VNET_HDR 0x8000
#define NM_BDG_NAME "vale" /* prefix for bridge port name */
/*
* Windows does not have _IOWR(). _IO(), _IOW() and _IOR() are defined
* in ws2def.h but not sure if they are in the form we need.
* XXX so we redefine them
* in a convenient way to use for DeviceIoControl signatures
*/
#ifdef _WIN32
#undef _IO /* ws2def.h */
#define _WIN_NM_IOCTL_TYPE 40000
#define _IO(_c, _n) CTL_CODE(_WIN_NM_IOCTL_TYPE, ((_n) + 0x800), \
METHOD_BUFFERED, FILE_ANY_ACCESS)
#define _IO_direct(_c, _n) CTL_CODE(_WIN_NM_IOCTL_TYPE, ((_n) + 0x800), \
METHOD_OUT_DIRECT, FILE_ANY_ACCESS)
#define _IOWR(_c, _n, _s) _IO(_c, _n)
/* We havesome internal sysctl in addition to the externally visible ones */
#define NETMAP_MMAP _IO_direct('i', 160) /* note METHOD_OUT_DIRECT */
#define NETMAP_POLL _IO('i', 162)
/* and also two setsockopt for sysctl emulation */
#define NETMAP_SETSOCKOPT _IO('i', 140)
#define NETMAP_GETSOCKOPT _IO('i', 141)
/* These linknames are for the Netmap Core Driver */
#define NETMAP_NT_DEVICE_NAME L"\\Device\\NETMAP"
#define NETMAP_DOS_DEVICE_NAME L"\\DosDevices\\netmap"
/* Definition of a structure used to pass a virtual address within an IOCTL */
typedef struct _MEMORY_ENTRY {
PVOID pUsermodeVirtualAddress;
} MEMORY_ENTRY, *PMEMORY_ENTRY;
typedef struct _POLL_REQUEST_DATA {
int events;
int timeout;
int revents;
} POLL_REQUEST_DATA;
#endif /* _WIN32 */
/*
* FreeBSD uses the size value embedded in the _IOWR to determine
* how much to copy in/out. So we need it to match the actual
* data structure we pass. We put some spares in the structure
* to ease compatibility with other versions
*/
#define NIOCGINFO _IOWR('i', 145, struct nmreq) /* return IF info */
#define NIOCREGIF _IOWR('i', 146, struct nmreq) /* interface register */
#define NIOCTXSYNC _IO('i', 148) /* sync tx queues */
#define NIOCRXSYNC _IO('i', 149) /* sync rx queues */
#define NIOCCONFIG _IOWR('i', 150, struct nm_ifreq) /* for ext. modules */
#endif /* !NIOCREGIF */
/*
* Helper functions for kernel and userspace
*/
/*
* check if space is available in the ring.
*/
static inline int
nm_ring_empty(struct netmap_ring *ring)
{
return (ring->cur == ring->tail);
}
/*
* Opaque structure that is passed to an external kernel
* module via ioctl(fd, NIOCCONFIG, req) for a user-owned
* bridge port (at this point ephemeral VALE interface).
*/
#define NM_IFRDATA_LEN 256
struct nm_ifreq {
char nifr_name[IFNAMSIZ];
char data[NM_IFRDATA_LEN];
};
#endif /* _NET_NETMAP_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,254 @@
/*-
* Copyright (c) 1991 The Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)ns16550.h 7.1 (Berkeley) 5/9/91
* $FreeBSD$
*/
#ifndef _NS16550_H_
#define _NS16550_H_
/*
* NS8250... UART registers.
*/
/* 8250 registers #[0-6]. */
#define com_data 0 /* data register (R/W) */
#define REG_DATA com_data
#define com_ier 1 /* interrupt enable register (W) */
#define REG_IER com_ier
#define IER_ERXRDY 0x1
#define IER_ETXRDY 0x2
#define IER_ERLS 0x4
#define IER_EMSC 0x8
/*
* Receive timeout interrupt enable.
* Implemented in Intel XScale, Ingenic XBurst.
*/
#define IER_RXTMOUT 0x10
#define IER_BITS "\20\1ERXRDY\2ETXRDY\3ERLS\4EMSC\5RXTMOUT"
#define com_iir 2 /* interrupt identification register (R) */
#define REG_IIR com_iir
#define IIR_IMASK 0xf
#define IIR_RXTOUT 0xc
#define IIR_BUSY 0x7
#define IIR_RLS 0x6
#define IIR_RXRDY 0x4
#define IIR_TXRDY 0x2
#define IIR_NOPEND 0x1
#define IIR_MLSC 0x0
#define IIR_FIFO_MASK 0xc0 /* set if FIFOs are enabled */
#define IIR_BITS "\20\1NOPEND\2TXRDY\3RXRDY"
#define com_lcr 3 /* line control register (R/W) */
#define com_cfcr com_lcr /* character format control register (R/W) */
#define REG_LCR com_lcr
#define LCR_DLAB 0x80
#define CFCR_DLAB LCR_DLAB
#define LCR_EFR_ENABLE 0xbf /* magic to enable EFR on 16650 up */
#define CFCR_EFR_ENABLE LCR_EFR_ENABLE
#define LCR_SBREAK 0x40
#define CFCR_SBREAK LCR_SBREAK
#define LCR_PZERO 0x30
#define CFCR_PZERO LCR_PZERO
#define LCR_PONE 0x20
#define CFCR_PONE LCR_PONE
#define LCR_PEVEN 0x10
#define CFCR_PEVEN LCR_PEVEN
#define LCR_PODD 0x00
#define CFCR_PODD LCR_PODD
#define LCR_PENAB 0x08
#define CFCR_PENAB LCR_PENAB
#define LCR_STOPB 0x04
#define CFCR_STOPB LCR_STOPB
#define LCR_8BITS 0x03
#define CFCR_8BITS LCR_8BITS
#define LCR_7BITS 0x02
#define CFCR_7BITS LCR_7BITS
#define LCR_6BITS 0x01
#define CFCR_6BITS LCR_6BITS
#define LCR_5BITS 0x00
#define CFCR_5BITS LCR_5BITS
#define com_mcr 4 /* modem control register (R/W) */
#define REG_MCR com_mcr
#define MCR_PRESCALE 0x80 /* only available on 16650 up */
#define MCR_LOOPBACK 0x10
#define MCR_IE 0x08
#define MCR_IENABLE MCR_IE
#define MCR_DRS 0x04
#define MCR_RTS 0x02
#define MCR_DTR 0x01
#define MCR_BITS "\20\1DTR\2RTS\3DRS\4IE\5LOOPBACK\10PRESCALE"
#define com_lsr 5 /* line status register (R/W) */
#define REG_LSR com_lsr
#define LSR_RCV_FIFO 0x80
#define LSR_TEMT 0x40
#define LSR_TSRE LSR_TEMT
#define LSR_THRE 0x20
#define LSR_TXRDY LSR_THRE
#define LSR_BI 0x10
#define LSR_FE 0x08
#define LSR_PE 0x04
#define LSR_OE 0x02
#define LSR_RXRDY 0x01
#define LSR_RCV_MASK 0x1f
#define LSR_BITS "\20\1RXRDY\2OE\3PE\4FE\5BI\6THRE\7TEMT\10RCV_FIFO"
#define com_msr 6 /* modem status register (R/W) */
#define REG_MSR com_msr
#define MSR_DCD 0x80
#define MSR_RI 0x40
#define MSR_DSR 0x20
#define MSR_CTS 0x10
#define MSR_DDCD 0x08
#define MSR_TERI 0x04
#define MSR_DDSR 0x02
#define MSR_DCTS 0x01
#define MSR_BITS "\20\1DCTS\2DDSR\3TERI\4DDCD\5CTS\6DSR\7RI\10DCD"
/* 8250 multiplexed registers #[0-1]. Access enabled by LCR[7]. */
#define com_dll 0 /* divisor latch low (R/W) */
#define com_dlbl com_dll
#define com_dlm 1 /* divisor latch high (R/W) */
#define com_dlbh com_dlm
#define REG_DLL com_dll
#define REG_DLH com_dlm
/* 16450 register #7. Not multiplexed. */
#define com_scr 7 /* scratch register (R/W) */
/* 16550 register #2. Not multiplexed. */
#define com_fcr 2 /* FIFO control register (W) */
#define com_fifo com_fcr
#define REG_FCR com_fcr
#define FCR_ENABLE 0x01
#define FIFO_ENABLE FCR_ENABLE
#define FCR_RCV_RST 0x02
#define FIFO_RCV_RST FCR_RCV_RST
#define FCR_XMT_RST 0x04
#define FIFO_XMT_RST FCR_XMT_RST
#define FCR_DMA 0x08
#define FIFO_DMA_MODE FCR_DMA
#ifdef CPU_XBURST
#define FCR_UART_ON 0x10
#endif
#define FCR_RX_LOW 0x00
#define FIFO_RX_LOW FCR_RX_LOW
#define FCR_RX_MEDL 0x40
#define FIFO_RX_MEDL FCR_RX_MEDL
#define FCR_RX_MEDH 0x80
#define FIFO_RX_MEDH FCR_RX_MEDH
#define FCR_RX_HIGH 0xc0
#define FIFO_RX_HIGH FCR_RX_HIGH
#define FCR_BITS "\20\1ENABLE\2RCV_RST\3XMT_RST\4DMA"
/* 16650 registers #2,[4-7]. Access enabled by LCR_EFR_ENABLE. */
#define com_efr 2 /* enhanced features register (R/W) */
#define REG_EFR com_efr
#define EFR_CTS 0x80
#define EFR_AUTOCTS EFR_CTS
#define EFR_RTS 0x40
#define EFR_AUTORTS EFR_RTS
#define EFR_EFE 0x10 /* enhanced functions enable */
#define com_xon1 4 /* XON 1 character (R/W) */
#define com_xon2 5 /* XON 2 character (R/W) */
#define com_xoff1 6 /* XOFF 1 character (R/W) */
#define com_xoff2 7 /* XOFF 2 character (R/W) */
#define DW_REG_USR 31 /* DesignWare derived Uart Status Reg */
#define com_usr 39 /* Octeon 16750/16550 Uart Status Reg */
#define REG_USR com_usr
#define USR_BUSY 1 /* Uart Busy. Serial transfer in progress */
#define USR_TXFIFO_NOTFULL 2 /* Uart TX FIFO Not full */
/* 16950 register #1. Access enabled by ACR[7]. Also requires !LCR[7]. */
#define com_asr 1 /* additional status register (R[0-7]/W[0-1]) */
/* 16950 register #3. R/W access enabled by ACR[7]. */
#define com_rfl 3 /* receiver fifo level (R) */
/*
* 16950 register #4. Access enabled by ACR[7]. Also requires
* !LCR_EFR_ENABLE.
*/
#define com_tfl 4 /* transmitter fifo level (R) */
/*
* 16950 register #5. Accessible if !LCR_EFR_ENABLE. Read access also
* requires ACR[6].
*/
#define com_icr 5 /* index control register (R/W) */
#define REG_ICR com_icr
/*
* 16950 register #7. It is the same as com_scr except it has a different
* abbreviation in the manufacturer's data sheet and it also serves as an
* index into the Indexed Control register set.
*/
#define com_spr com_scr /* scratch pad (and index) register (R/W) */
#define REG_SPR com_scr
/*
* 16950 indexed control registers #[0-0x13]. Access is via index in SPR,
* data in ICR (if ICR is accessible).
*/
#define com_acr 0 /* additional control register (R/W) */
#define REG_ACR com_acr
#define ACR_ASE 0x80 /* ASR/RFL/TFL enable */
#define ACR_ICRE 0x40 /* ICR enable */
#define ACR_TLE 0x20 /* TTL/RTL enable */
#define com_cpr 1 /* clock prescaler register (R/W) */
#define com_tcr 2 /* times clock register (R/W) */
#define com_ttl 4 /* transmitter trigger level (R/W) */
#define com_rtl 5 /* receiver trigger level (R/W) */
/* ... */
/* Hardware extension mode register for RSB-2000/3000. */
#define com_emr com_msr
#define EMR_EXBUFF 0x04
#define EMR_CTSFLW 0x08
#define EMR_DSRFLW 0x10
#define EMR_RTSFLW 0x20
#define EMR_DTRFLW 0x40
#define EMR_EFMODE 0x80
#endif

View File

@@ -0,0 +1,301 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _PCI_CORE_H_
#define _PCI_CORE_H_
#include <sys/queue.h>
#include <assert.h>
#include "types.h"
#include "pcireg.h"
#define PCI_BARMAX PCIR_MAX_BAR_0 /* BAR registers in a Type 0 header */
struct vmctx;
struct pci_vdev;
struct memory_region;
struct pci_vdev_ops {
char *class_name; /* Name of device class */
/* instance creation */
int (*vdev_init)(struct vmctx *, struct pci_vdev *,
char *opts);
/* instance deinit */
void (*vdev_deinit)(struct vmctx *, struct pci_vdev *,
char *opts);
/* ACPI DSDT enumeration */
void (*vdev_write_dsdt)(struct pci_vdev *);
/* ops related to physical resources */
void (*vdev_phys_access)(struct vmctx *ctx, struct pci_vdev *dev);
/* config space read/write callbacks */
int (*vdev_cfgwrite)(struct vmctx *ctx, int vcpu,
struct pci_vdev *pi, int offset,
int bytes, uint32_t val);
int (*vdev_cfgread)(struct vmctx *ctx, int vcpu,
struct pci_vdev *pi, int offset,
int bytes, uint32_t *retval);
/* BAR read/write callbacks */
void (*vdev_barwrite)(struct vmctx *ctx, int vcpu,
struct pci_vdev *pi, int baridx,
uint64_t offset, int size, uint64_t value);
uint64_t (*vdev_barread)(struct vmctx *ctx, int vcpu,
struct pci_vdev *pi, int baridx,
uint64_t offset, int size);
};
/*
* Put all PCI instances' addresses into one section named pci_devemu_set
* so that DM could enumerate and initialize each of them.
*/
#define DEFINE_PCI_DEVTYPE(x) DATA_SET(pci_vdev_ops_set, x)
enum pcibar_type {
PCIBAR_NONE,
PCIBAR_IO,
PCIBAR_MEM32,
PCIBAR_MEM64,
PCIBAR_MEMHI64
};
struct pcibar {
enum pcibar_type type; /* io or memory */
uint64_t size;
uint64_t addr;
};
#define PI_NAMESZ 40
struct msix_table_entry {
uint64_t addr;
uint32_t msg_data;
uint32_t vector_control;
} __attribute__((packed));
/*
* In case the structure is modified to hold extra information, use a define
* for the size that should be emulated.
*/
#define MSIX_TABLE_ENTRY_SIZE 16
#define MAX_MSIX_TABLE_ENTRIES 2048
#define PBA_SIZE(msgnum) (roundup2((msgnum), 64) / 8)
enum lintr_stat {
IDLE,
ASSERTED,
PENDING
};
struct pci_vdev {
struct pci_vdev_ops *dev_ops;
struct vmctx *vmctx;
uint8_t bus, slot, func;
char name[PI_NAMESZ];
int bar_getsize;
int prevcap;
int capend;
struct {
int8_t pin;
enum lintr_stat state;
int pirq_pin;
int ioapic_irq;
pthread_mutex_t lock;
} lintr;
struct {
int enabled;
uint64_t addr;
uint64_t msg_data;
int maxmsgnum;
} msi;
struct {
int enabled;
int table_bar;
int pba_bar;
uint32_t table_offset;
int table_count;
uint32_t pba_offset;
int pba_size;
int function_mask;
struct msix_table_entry *table; /* allocated at runtime */
void *pba_page;
int pba_page_offset;
} msix;
void *arg; /* devemu-private data */
uint8_t cfgdata[PCI_REGMAX + 1];
struct pcibar bar[PCI_BARMAX + 1];
};
struct msicap {
uint8_t capid;
uint8_t nextptr;
uint16_t msgctrl;
uint32_t addrlo;
uint32_t addrhi;
uint16_t msgdata;
} __attribute__((packed));
static_assert(sizeof(struct msicap) == 14, "compile-time assertion failed");
struct msixcap {
uint8_t capid;
uint8_t nextptr;
uint16_t msgctrl;
uint32_t table_info; /* bar index and offset within it */
uint32_t pba_info; /* bar index and offset within it */
} __attribute__((packed));
static_assert(sizeof(struct msixcap) == 12, "compile-time assertion failed");
struct pciecap {
uint8_t capid;
uint8_t nextptr;
uint16_t pcie_capabilities;
uint32_t dev_capabilities; /* all devices */
uint16_t dev_control;
uint16_t dev_status;
uint32_t link_capabilities; /* devices with links */
uint16_t link_control;
uint16_t link_status;
uint32_t slot_capabilities; /* ports with slots */
uint16_t slot_control;
uint16_t slot_status;
uint16_t root_control; /* root ports */
uint16_t root_capabilities;
uint32_t root_status;
uint32_t dev_capabilities2; /* all devices */
uint16_t dev_control2;
uint16_t dev_status2;
uint32_t link_capabilities2; /* devices with links */
uint16_t link_control2;
uint16_t link_status2;
uint32_t slot_capabilities2; /* ports with slots */
uint16_t slot_control2;
uint16_t slot_status2;
} __attribute__((packed));
static_assert(sizeof(struct pciecap) == 60, "compile-time assertion failed");
typedef void (*pci_lintr_cb)(int b, int s, int pin, int pirq_pin,
int ioapic_irq, void *arg);
int init_pci(struct vmctx *ctx);
void deinit_pci(struct vmctx *ctx);
void msicap_cfgwrite(struct pci_vdev *pi, int capoff, int offset,
int bytes, uint32_t val);
void msixcap_cfgwrite(struct pci_vdev *pi, int capoff, int offset,
int bytes, uint32_t val);
void pci_callback(void);
int pci_emul_alloc_bar(struct pci_vdev *pdi, int idx,
enum pcibar_type type, uint64_t size);
int pci_emul_alloc_pbar(struct pci_vdev *pdi, int idx,
uint64_t hostbase, enum pcibar_type type,
uint64_t size);
int pci_emul_add_msicap(struct pci_vdev *pi, int msgnum);
int pci_emul_add_pciecap(struct pci_vdev *pi, int pcie_device_type);
void pci_generate_msi(struct pci_vdev *pi, int msgnum);
void pci_generate_msix(struct pci_vdev *pi, int msgnum);
void pci_lintr_assert(struct pci_vdev *pi);
void pci_lintr_deassert(struct pci_vdev *pi);
void pci_lintr_request(struct pci_vdev *pi);
int pci_msi_enabled(struct pci_vdev *pi);
int pci_msix_enabled(struct pci_vdev *pi);
int pci_msix_table_bar(struct pci_vdev *pi);
int pci_msix_pba_bar(struct pci_vdev *pi);
int pci_msi_maxmsgnum(struct pci_vdev *pi);
int pci_parse_slot(char *opt);
void pci_populate_msicap(struct msicap *cap, int msgs, int nextptr);
int pci_emul_add_msixcap(struct pci_vdev *pi, int msgnum, int barnum);
int pci_emul_msix_twrite(struct pci_vdev *pi, uint64_t offset, int size,
uint64_t value);
uint64_t pci_emul_msix_tread(struct pci_vdev *pi, uint64_t offset, int size);
int pci_count_lintr(int bus);
void pci_walk_lintr(int bus, pci_lintr_cb cb, void *arg);
void pci_write_dsdt(void);
uint64_t pci_ecfg_base(void);
int pci_bus_configured(int bus);
int emulate_pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus,
int slot, int func, int reg, int bytes, int *value);
static inline void
pci_set_cfgdata8(struct pci_vdev *pi, int offset, uint8_t val)
{
assert(offset <= PCI_REGMAX);
*(uint8_t *)(pi->cfgdata + offset) = val;
}
static inline void
pci_set_cfgdata16(struct pci_vdev *pi, int offset, uint16_t val)
{
assert(offset <= (PCI_REGMAX - 1) && (offset & 1) == 0);
*(uint16_t *)(pi->cfgdata + offset) = val;
}
static inline void
pci_set_cfgdata32(struct pci_vdev *pi, int offset, uint32_t val)
{
assert(offset <= (PCI_REGMAX - 3) && (offset & 3) == 0);
*(uint32_t *)(pi->cfgdata + offset) = val;
}
static inline uint8_t
pci_get_cfgdata8(struct pci_vdev *pi, int offset)
{
assert(offset <= PCI_REGMAX);
return (*(uint8_t *)(pi->cfgdata + offset));
}
static inline uint16_t
pci_get_cfgdata16(struct pci_vdev *pi, int offset)
{
assert(offset <= (PCI_REGMAX - 1) && (offset & 1) == 0);
return (*(uint16_t *)(pi->cfgdata + offset));
}
static inline uint32_t
pci_get_cfgdata32(struct pci_vdev *pi, int offset)
{
assert(offset <= (PCI_REGMAX - 3) && (offset & 3) == 0);
return (*(uint32_t *)(pi->cfgdata + offset));
}
#endif /* _PCI_CORE_H_ */

148
devicemodel/include/pciio.h Normal file
View File

@@ -0,0 +1,148 @@
/*-
* Copyright (c) 1997, Stefan Esser <se@FreeBSD.ORG>
* Copyright (c) 1997, 1998, 1999, Kenneth D. Merry <ken@FreeBSD.ORG>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef _PCIIO_H_
#define _PCIIO_H_
#include "asm/ioctl.h"
#define PCI_MAXNAMELEN 16
typedef enum {
PCI_GETCONF_LAST_DEVICE,
PCI_GETCONF_LIST_CHANGED,
PCI_GETCONF_MORE_DEVS,
PCI_GETCONF_ERROR
} pci_getconf_status;
typedef enum {
PCI_GETCONF_NO_MATCH = 0x0000,
PCI_GETCONF_MATCH_DOMAIN = 0x0001,
PCI_GETCONF_MATCH_BUS = 0x0002,
PCI_GETCONF_MATCH_DEV = 0x0004,
PCI_GETCONF_MATCH_FUNC = 0x0008,
PCI_GETCONF_MATCH_NAME = 0x0010,
PCI_GETCONF_MATCH_UNIT = 0x0020,
PCI_GETCONF_MATCH_VENDOR = 0x0040,
PCI_GETCONF_MATCH_DEVICE = 0x0080,
PCI_GETCONF_MATCH_CLASS = 0x0100
} pci_getconf_flags;
struct pcisel {
u_int32_t domain; /* domain number */
u_int8_t bus; /* bus number */
u_int8_t dev; /* device on this bus */
u_int8_t func; /* function on this device */
};
struct pci_conf {
struct pcisel sel; /* domain+bus+slot+function */
u_int8_t hdr; /* PCI header type */
u_int16_t subvendor; /* card vendor ID */
u_int16_t subdevice; /* card device ID, assigned by
* card vendor
*/
u_int16_t vendor; /* chip vendor ID */
u_int16_t device; /* chip device ID, assigned by
* chip vendor
*/
u_int8_t pc_class; /* chip PCI class */
u_int8_t subclass; /* chip PCI subclass */
u_int8_t progif; /* chip PCI programming interface */
u_int8_t revid; /* chip revision ID */
char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
u_long pd_unit; /* device unit number */
};
struct pci_match_conf {
struct pcisel sel; /* domain+bus+slot+function */
char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
u_long pd_unit; /* Unit number */
u_int16_t vendor; /* PCI Vendor ID */
u_int16_t device; /* PCI Device ID */
u_int8_t pc_class; /* PCI class */
pci_getconf_flags flags; /* Matching expression */
};
struct pci_conf_io {
u_int32_t pat_buf_len; /* pattern buffer length */
u_int32_t num_patterns; /* number of patterns */
struct pci_match_conf *patterns; /* pattern buffer */
u_int32_t match_buf_len; /* match buffer length */
u_int32_t num_matches; /* number of matches returned */
struct pci_conf *matches; /* match buffer */
u_int32_t offset; /* offset into device list */
u_int32_t generation; /* device list generation */
pci_getconf_status status; /* request status */
};
struct pci_io {
struct pcisel sel; /* device to operate on */
int reg; /* configuration register to examine */
int width; /* width (in bytes) of read or write */
u_int32_t data; /* data to write or result of read */
};
struct pci_bar_io {
struct pcisel sel; /* device to operate on */
int reg; /* starting address of BAR */
int pbi_enabled; /* decoding enabled */
uint64_t base; /* current value of BAR */
uint64_t length; /* length of BAR */
};
struct pci_vpd_element {
char keyword[2];
uint8_t flags;
uint8_t datalen;
uint8_t data[0];
};
#define PVE_FLAG_IDENT 0x01 /* Element is the string identifier */
#define PVE_FLAG_RW 0x02 /* Element is read/write */
#define PVE_NEXT(pve) \
((struct pci_vpd_element *)((char *)(pve) + \
sizeof(struct pci_vpd_element) + (pve)->datalen))
struct pci_list_vpd_io {
struct pcisel plvi_sel; /* device to operate on */
size_t plvi_len; /* size of the data area */
struct pci_vpd_element *plvi_data;
};
#define PCIOCGETCONF _IOWR('p', 5, struct pci_conf_io)
#define PCIOCREAD _IOWR('p', 2, struct pci_io)
#define PCIOCWRITE _IOWR('p', 3, struct pci_io)
#define PCIOCATTACHED _IOWR('p', 4, struct pci_io)
#define PCIOCGETBAR _IOWR('p', 6, struct pci_bar_io)
#define PCIOCLISTVPD _IOWR('p', 7, struct pci_list_vpd_io)
#endif /* !_PCIIO_H_ */

1068
devicemodel/include/pcireg.h Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,39 @@
/*-
* Copyright (c) 2015 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _PS2KBD_H_
#define _PS2KBD_H_
struct atkbdc_base;
struct ps2kbd_info *ps2kbd_init(struct atkbdc_base *base);
int ps2kbd_read(struct ps2kbd_info *kbd, uint8_t *val);
void ps2kbd_write(struct ps2kbd_info *kbd, uint8_t val);
#endif /* _PS2KBD_H_ */

View File

@@ -0,0 +1,41 @@
/*-
* Copyright (c) 2015 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _PS2MOUSE_H_
#define _PS2MOUSE_H_
struct atkbdc_base;
struct ps2mouse_info *ps2mouse_init(struct atkbdc_base *base);
int ps2mouse_read(struct ps2mouse_info *mouse, uint8_t *val);
void ps2mouse_write(struct ps2mouse_info *mouse, uint8_t val, int insert);
void ps2mouse_toggle(struct ps2mouse_info *mouse, int enable);
int ps2mouse_fifocnt(struct ps2mouse_info *mouse);
#endif /* _PS2MOUSE_H_ */

View File

@@ -0,0 +1,308 @@
/*
* common definition
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright (c) 2017 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Copyright (C) 2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/**
* @file acrn_common.h
*
* @brief acrn common data structure for hypercall or ioctl
*/
#ifndef _ACRN_COMMON_H_
#define _ACRN_COMMON_H_
#include <types.h>
/*
* Common structures for ACRN/VHM/DM
*/
/*
* IO request
*/
#define VHM_REQUEST_MAX 16
#define REQ_STATE_PENDING 0
#define REQ_STATE_SUCCESS 1
#define REQ_STATE_PROCESSING 2
#define REQ_STATE_FAILED -1
#define REQ_PORTIO 0
#define REQ_MMIO 1
#define REQ_PCICFG 2
#define REQ_WP 3
#define REQUEST_READ 0
#define REQUEST_WRITE 1
/**
* @brief Hypercall
*
* @addtogroup acrn_hypercall ACRN Hypercall
* @{
*/
struct mmio_request {
uint32_t direction;
uint32_t reserved;
int64_t address;
int64_t size;
int64_t value;
} __aligned(8);
struct pio_request {
uint32_t direction;
uint32_t reserved;
int64_t address;
int64_t size;
int32_t value;
} __aligned(8);
struct pci_request {
uint32_t direction;
uint32_t reserved[3];/* need keep same header fields with pio_request */
int64_t size;
int32_t value;
int32_t bus;
int32_t dev;
int32_t func;
int32_t reg;
} __aligned(8);
/* vhm_request are 256Bytes aligned */
struct vhm_request {
/* offset: 0bytes - 63bytes */
union {
uint32_t type;
int32_t reserved0[16];
};
/* offset: 64bytes-127bytes */
union {
struct pio_request pio_request;
struct pci_request pci_request;
struct mmio_request mmio_request;
int64_t reserved1[8];
} reqs;
/* True: valid req which need VHM to process.
* ACRN write, VHM read only
**/
int32_t valid;
/* the client which is distributed to handle this request */
int32_t client;
/* 1: VHM had processed and success
* 0: VHM had not yet processed
* -1: VHM failed to process. Invalid request
* VHM write, ACRN read only
*/
int32_t processed;
} __aligned(256);
struct vhm_request_buffer {
union {
struct vhm_request req_queue[VHM_REQUEST_MAX];
int8_t reserved[4096];
};
} __aligned(4096);
/**
* @brief Info to create a VM, the parameter for HC_CREATE_VM hypercall
*/
struct acrn_create_vm {
/** created vmid return to VHM. Keep it first field */
int32_t vmid;
/** vcpu numbers this VM want to create */
uint32_t vcpu_num;
/** the GUID of this VM */
uint8_t GUID[16];
/** whether Secure World is enabled for this VM */
uint8_t secure_world_enabled;
/** Reserved for future use*/
uint8_t reserved[31];
} __aligned(8);
/**
* @brief Info to create a vcpu
*
* the parameter for HC_CREATE_VCPU hypercall
*/
struct acrn_create_vcpu {
/** the virtual cpu id for the vcpu want to create */
uint32_t vcpu_id;
/** the physical cpu id for the vcpu want to create */
uint32_t pcpu_id;
} __aligned(8);
/**
* @brief Info to set ioreq buffer for a created VM
*
* the parameter for HC_SET_IOREQ_BUFFER hypercall
*/
struct acrn_set_ioreq_buffer {
/** gpa of per VM request_buffer */
uint64_t req_buf;
} __aligned(8);
/** Interrupt type for acrn_irqline: inject interrupt to IOAPIC */
#define ACRN_INTR_TYPE_ISA 0
/** Interrupt type for acrn_irqline: inject interrupt to both PIC and IOAPIC */
#define ACRN_INTR_TYPE_IOAPIC 1
/**
* @brief Info to assert/deassert/pulse a virtual irq line for a VM
*
* the parameter for HC_ASSERT_IRQLINE/HC_DEASSERT_IRQLINE/HC_PULSE_IRQLINE
* hypercall
*/
struct acrn_irqline {
/** interrupt type which could be IOAPIC or ISA */
uint32_t intr_type;
/** reserved for alignment padding */
uint32_t reserved;
/** pic irq for ISA type */
uint64_t pic_irq;
/** ioapic irq for IOAPIC & ISA TYPE,
* if -1 then this irq will not be injected
*/
uint64_t ioapic_irq;
} __aligned(8);
/**
* @brief Info to inject a msi interrupt for a VM
*
* the parameter for HC_INJECT_MSI hypercall
*/
struct acrn_msi_entry {
/** msi addr[19:12] with dest vcpu id */
uint64_t msi_addr;
/** msi data[7:0] with vector */
uint64_t msi_data;
} __aligned(8);
/**
* @brief Info to inject a nmi interrupt for a VM
*/
struct acrn_nmi_entry {
/** virtual cpu id to inject */
int64_t vcpu_id;
} __aligned(8);
/**
* @brief Info to remap pass-through pci msi for a VM
*
* the parameter for HC_VM_PCI_MSIX_REMAP hypercall
*/
struct acrn_vm_pci_msix_remap {
/** pass-through pci device virtual BDF# */
uint16_t virt_bdf;
/** pass-through pci device physical BDF# */
uint16_t phys_bdf;
/** pass-through pci device MSI/x cap control data */
uint16_t msi_ctl;
/** reserved for alignment padding */
uint16_t reserved;
/** pass-through pci device msi address to remap, which will
* return the caller after remapping
*/
uint64_t msi_addr; /* IN/OUT: msi address to fix */
/** pass-through pci device msi data to remap, which will
* return the caller after remapping
*/
uint32_t msi_data;
/** pass-through pci device is msi or msix
* 0 - MSI, 1 - MSI-X
*/
int32_t msix;
/** if the pass-through pci device is msix, this field contains
* the MSI-X entry table index
*/
int32_t msix_entry_index;
/** if the pass-through pci device is msix, this field contains
* Vector Control for MSI-X Entry, field defined in MSIX spec
*/
uint32_t vector_ctl;
} __aligned(8);
/**
* @brief The guest config pointer offset.
*
* It's designed to support passing DM config data pointer, based on it,
* hypervisor would parse then pass DM defined configration to GUEST vcpu
* when booting guest VM.
* the address 0xd0000 here is designed by DM, as it arranged all memory
* layout below 1M, DM should make sure there is no overlap for the address
* 0xd0000 usage.
*/
#define GUEST_CFG_OFFSET 0xd0000
/**
* @}
*/
#endif /* _ACRN_COMMON_H_ */

View File

@@ -0,0 +1,198 @@
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright (c) 2017 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Copyright (C) 2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/**
* @file vhm_ioctl_defs.h
*
* @brief Virtio and Hypervisor Module definition for ioctl to user space
*/
#ifndef _VHM_IOCTL_DEFS_H_
#define _VHM_IOCTL_DEFS_H_
/* Commmon structures for ACRN/VHM/DM */
#include "acrn_common.h"
/*
* Commmon IOCTL ID defination for VHM/DM
*/
#define _IC_ID(x, y) (((x)<<24)|(y))
#define IC_ID 0x43UL
/* General */
#define IC_ID_GEN_BASE 0x0UL
#define IC_GET_API_VERSION _IC_ID(IC_ID, IC_ID_GEN_BASE + 0x00)
/* VM management */
#define IC_ID_VM_BASE 0x10UL
#define IC_CREATE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x00)
#define IC_DESTROY_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x01)
#define IC_START_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x02)
#define IC_PAUSE_VM _IC_ID(IC_ID, IC_ID_VM_BASE + 0x03)
#define IC_CREATE_VCPU _IC_ID(IC_ID, IC_ID_VM_BASE + 0x04)
/* IRQ and Interrupts */
#define IC_ID_IRQ_BASE 0x20UL
#define IC_ASSERT_IRQLINE _IC_ID(IC_ID, IC_ID_IRQ_BASE + 0x00)
#define IC_DEASSERT_IRQLINE _IC_ID(IC_ID, IC_ID_IRQ_BASE + 0x01)
#define IC_PULSE_IRQLINE _IC_ID(IC_ID, IC_ID_IRQ_BASE + 0x02)
#define IC_INJECT_MSI _IC_ID(IC_ID, IC_ID_IRQ_BASE + 0x03)
/* DM ioreq management */
#define IC_ID_IOREQ_BASE 0x30UL
#define IC_SET_IOREQ_BUFFER _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x00)
#define IC_NOTIFY_REQUEST_FINISH _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x01)
#define IC_CREATE_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x02)
#define IC_ATTACH_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x03)
#define IC_DESTROY_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x04)
/* Guest memory management */
#define IC_ID_MEM_BASE 0x40UL
#define IC_ALLOC_MEMSEG _IC_ID(IC_ID, IC_ID_MEM_BASE + 0x00)
#define IC_SET_MEMSEG _IC_ID(IC_ID, IC_ID_MEM_BASE + 0x01)
/* PCI assignment*/
#define IC_ID_PCI_BASE 0x50UL
#define IC_ASSIGN_PTDEV _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x00)
#define IC_DEASSIGN_PTDEV _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x01)
#define IC_VM_PCI_MSIX_REMAP _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x02)
#define IC_SET_PTDEV_INTR_INFO _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x03)
#define IC_RESET_PTDEV_INTR_INFO _IC_ID(IC_ID, IC_ID_PCI_BASE + 0x04)
/**
* struct vm_memseg - memory segment info for guest
*
* @len: length of memory segment
* @gpa: guest physical start address of memory segment
*/
struct vm_memseg {
uint64_t len;
uint64_t gpa;
};
#define VM_SYSMEM 0
#define VM_MMIO 1
/**
* struct vm_memmap - EPT memory mapping info for guest
*
* @type: memory mapping type
* @gpa: guest physical start address of memory mapping
* @hpa: host physical start address of memory
* @len: the length of memory range mapped
* @prot: memory mapping attribute
*/
struct vm_memmap {
uint32_t type;
uint32_t reserved;
uint64_t gpa;
uint64_t hpa; /* only for type == VM_MMIO */
uint64_t len; /* mmap length */
uint32_t prot; /* RWX */
};
/**
* struct ic_ptdev_irq - pass thru device irq data structure
*/
struct ic_ptdev_irq {
#define IRQ_INTX 0
#define IRQ_MSI 1
#define IRQ_MSIX 2
/** @type: irq type */
uint32_t type;
/** @virt_bdf: virtual bdf description of pass thru device */
uint16_t virt_bdf; /* IN: Device virtual BDF# */
/** @phy_bdf: physical bdf description of pass thru device */
uint16_t phys_bdf; /* IN: Device physical BDF# */
/** union */
union {
/** struct intx - info of IOAPIC/PIC interrupt */
struct {
/** @virt_pin: virtual IOAPIC pin */
uint32_t virt_pin;
/** @phys_pin: physical IOAPIC pin */
uint32_t phys_pin;
/** @pic_pin: PIC pin */
uint32_t is_pic_pin;
} intx;
/** struct msix - info of MSI/MSIX interrupt */
struct {
/* Keep this filed on top of msix */
/** @vector_cnt: vector count of MSI/MSIX */
uint32_t vector_cnt;
/** @table_size: size of MSIX table(round up to 4K) */
uint32_t table_size;
/** @table_paddr: physical address of MSIX table */
uint64_t table_paddr;
} msix;
};
};
/**
* struct ioreq_notify - data strcture to notify hypervisor ioreq is handled
*
* @client_id: client id to identify ioreq client
* @vcpu: identify the ioreq submitter
*/
struct ioreq_notify {
int32_t client_id;
uint32_t vcpu;
};
/**
* struct api_version - data structure to track VHM API version
*
* @major_version: major version of VHM API
* @minor_version: minor version of VHM API
*/
struct api_version {
uint32_t major_version;
uint32_t minor_version;
};
#endif /* VHM_IOCTL_DEFS_H */

51
devicemodel/include/rtc.h Normal file
View File

@@ -0,0 +1,51 @@
/*-
* Copyright (c) 2014 Neel Natu (neel@freebsd.org)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _RTC_H_
#define _RTC_H_
#include "types.h"
#define IO_RTC 0x070 /* RTC */
struct vrtc;
struct vmctx;
struct vrtc *vrtc_init(struct vmctx *ctx, int local_time);
void vrtc_cleanup(struct vrtc *vrtc);
void vrtc_reset(struct vrtc *vrtc);
time_t vrtc_get_time(struct vrtc *vrtc);
int vrtc_set_time(struct vrtc *vrtc, time_t secs);
int vrtc_nvram_read(struct vrtc *vrtc, int offset, uint8_t *retval);
int vrtc_nvram_write(struct vrtc *vrtc, int offset, uint8_t value);
int vrtc_addr_handler(struct vmctx *ctx, int vcpu, int in, int port,
int bytes, uint32_t *eax, void *arg);
int vrtc_data_handler(struct vmctx *ctx, int vcpu, int in, int port,
int bytes, uint32_t *eax, void *arg);
#endif

View File

@@ -0,0 +1,276 @@
/*-
* Copyright (c) 1989, 1990 William F. Jolitz
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)segments.h 7.1 (Berkeley) 5/9/91
* $FreeBSD$
*/
#ifndef _SEGMENTS_H_
#define _SEGMENTS_H_
/*
* X86 Segmentation Data Structures and definitions
*/
/*
* Selectors
*/
#define SEL_RPL_MASK 3 /* requester priv level */
#define ISPL(s) ((s)&3) /* priority level of a selector */
#define SEL_KPL 0 /* kernel priority level */
#define SEL_UPL 3 /* user priority level */
#define ISLDT(s) ((s)&SEL_LDT) /* is it local or global */
#define SEL_LDT 4 /* local descriptor table */
#define IDXSEL(s) (((s)>>3) & 0x1fff) /* index of selector */
#define LSEL(s, r) (((s)<<3) | SEL_LDT | r) /* a local selector */
#define GSEL(s, r) (((s)<<3) | r) /* a global selector */
/*
* User segment descriptors (%cs, %ds etc for i386 apps. 64 bit wide)
* For long-mode apps, %cs only has the conforming bit in sd_type, the sd_dpl,
* sd_p, sd_l and sd_def32 which must be zero). %ds only has sd_p.
*/
struct segment_descriptor {
unsigned sd_lolimit:16; /* segment extent (lsb) */
unsigned sd_lobase:24; /* segment base address (lsb) */
unsigned sd_type:5; /* segment type */
unsigned sd_dpl:2; /* segment descriptor priority level */
unsigned sd_p:1; /* segment descriptor present */
unsigned sd_hilimit:4; /* segment extent (msb) */
unsigned sd_xx:2; /* unused */
unsigned sd_def32:1; /* default 32 vs 16 bit size */
unsigned sd_gran:1; /* limit granularity (byte/page units)*/
unsigned sd_hibase:8; /* segment base address (msb) */
} __attribute__((packed));
struct user_segment_descriptor {
unsigned sd_lolimit:16; /* segment extent (lsb) */
unsigned sd_lobase:24; /* segment base address (lsb) */
unsigned sd_type:5; /* segment type */
unsigned sd_dpl:2; /* segment descriptor priority level */
unsigned sd_p:1; /* segment descriptor present */
unsigned sd_hilimit:4; /* segment extent (msb) */
unsigned sd_xx:1; /* unused */
unsigned sd_long:1; /* long mode (cs only) */
unsigned sd_def32:1; /* default 32 vs 16 bit size */
unsigned sd_gran:1; /* limit granularity (byte/page units)*/
unsigned sd_hibase:8; /* segment base address (msb) */
} __attribute__((packed));
#define USD_GETBASE(sd) (((sd)->sd_lobase) | (sd)->sd_hibase << 24)
#define USD_SETBASE(sd, b) do { (sd)->sd_lobase = (b); \
(sd)->sd_hibase = ((b) >> 24); } \
while (0)
#define USD_GETLIMIT(sd) (((sd)->sd_lolimit) | (sd)->sd_hilimit << 16)
#define USD_SETLIMIT(sd, l) do { (sd)->sd_lolimit = (l); \
(sd)->sd_hilimit = ((l) >> 16); } \
while (0)
#ifdef __i386__
/*
* Gate descriptors (e.g. indirect descriptors)
*/
struct gate_descriptor {
unsigned gd_looffset:16; /* gate offset (lsb) */
unsigned gd_selector:16; /* gate segment selector */
unsigned gd_stkcpy:5; /* number of stack wds to cpy */
unsigned gd_xx:3; /* unused */
unsigned gd_type:5; /* segment type */
unsigned gd_dpl:2; /* segment descriptor priority level */
unsigned gd_p:1; /* segment descriptor present */
unsigned gd_hioffset:16; /* gate offset (msb) */
} __attribute__((packed));
/*
* Generic descriptor
*/
union descriptor {
struct segment_descriptor sd;
struct gate_descriptor gd;
};
#else
/*
* Gate descriptors (e.g. indirect descriptors, trap, interrupt etc. 128 bit)
* Only interrupt and trap gates have gd_ist.
*/
struct gate_descriptor {
uint64_t gd_looffset:16; /* gate offset (lsb) */
uint64_t gd_selector:16; /* gate segment selector */
uint64_t gd_ist:3; /* IST table index */
uint64_t gd_xx:5; /* unused */
uint64_t gd_type:5; /* segment type */
uint64_t gd_dpl:2; /* segment descriptor priority level */
uint64_t gd_p:1; /* segment descriptor present */
uint64_t gd_hioffset:48; /* gate offset (msb) */
uint64_t sd_xx1:32;
} __attribute__((packed));
/*
* Generic descriptor
*/
union descriptor {
struct user_segment_descriptor sd;
struct gate_descriptor gd;
};
#endif
/* system segments and gate types */
#define SDT_SYSNULL 0 /* system null */
#define SDT_SYS286TSS 1 /* system 286 TSS available */
#define SDT_SYSLDT 2 /* system local descriptor table */
#define SDT_SYS286BSY 3 /* system 286 TSS busy */
#define SDT_SYS286CGT 4 /* system 286 call gate */
#define SDT_SYSTASKGT 5 /* system task gate */
#define SDT_SYS286IGT 6 /* system 286 interrupt gate */
#define SDT_SYS286TGT 7 /* system 286 trap gate */
#define SDT_SYSNULL2 8 /* system null again */
#define SDT_SYS386TSS 9 /* system 386 TSS available */
#define SDT_SYSTSS 9 /* system available 64 bit TSS */
#define SDT_SYSNULL3 10 /* system null again */
#define SDT_SYS386BSY 11 /* system 386 TSS busy */
#define SDT_SYSBSY 11 /* system busy 64 bit TSS */
#define SDT_SYS386CGT 12 /* system 386 call gate */
#define SDT_SYSCGT 12 /* system 64 bit call gate */
#define SDT_SYSNULL4 13 /* system null again */
#define SDT_SYS386IGT 14 /* system 386 interrupt gate */
#define SDT_SYSIGT 14 /* system 64 bit interrupt gate */
#define SDT_SYS386TGT 15 /* system 386 trap gate */
#define SDT_SYSTGT 15 /* system 64 bit trap gate */
/* memory segment types */
#define SDT_MEMRO 16 /* memory read only */
#define SDT_MEMROA 17 /* memory read only accessed */
#define SDT_MEMRW 18 /* memory read write */
#define SDT_MEMRWA 19 /* memory read write accessed */
#define SDT_MEMROD 20 /* memory read only expand dwn limit */
#define SDT_MEMRODA 21 /* memory read only expand dwn limit accessed */
#define SDT_MEMRWD 22 /* memory read write expand dwn limit */
#define SDT_MEMRWDA 23 /* memory read write expand dwn limit accessed*/
#define SDT_MEME 24 /* memory execute only */
#define SDT_MEMEA 25 /* memory execute only accessed */
#define SDT_MEMER 26 /* memory execute read */
#define SDT_MEMERA 27 /* memory execute read accessed */
#define SDT_MEMEC 28 /* memory execute only conforming */
#define SDT_MEMEAC 29 /* memory execute only accessed conforming */
#define SDT_MEMERC 30 /* memory execute read conforming */
#define SDT_MEMERAC 31 /* memory execute read accessed conforming */
/*
* Size of IDT table
*/
#define NIDT 256 /* 32 reserved, 0x80 syscall, most are h/w */
#define NRSVIDT 32 /* reserved entries for cpu exceptions */
/*
* Entries in the Interrupt Descriptor Table (IDT)
*/
#define IDT_DE 0 /* #DE: Divide Error */
#define IDT_DB 1 /* #DB: Debug */
#define IDT_NMI 2 /* Nonmaskable External Interrupt */
#define IDT_BP 3 /* #BP: Breakpoint */
#define IDT_OF 4 /* #OF: Overflow */
#define IDT_BR 5 /* #BR: Bound Range Exceeded */
#define IDT_UD 6 /* #UD: Undefined/Invalid Opcode */
#define IDT_NM 7 /* #NM: No Math Coprocessor */
#define IDT_DF 8 /* #DF: Double Fault */
#define IDT_FPUGP 9 /* Coprocessor Segment Overrun */
#define IDT_TS 10 /* #TS: Invalid TSS */
#define IDT_NP 11 /* #NP: Segment Not Present */
#define IDT_SS 12 /* #SS: Stack Segment Fault */
#define IDT_GP 13 /* #GP: General Protection Fault */
#define IDT_PF 14 /* #PF: Page Fault */
#define IDT_MF 16 /* #MF: FPU Floating-Point Error */
#define IDT_AC 17 /* #AC: Alignment Check */
#define IDT_MC 18 /* #MC: Machine Check */
#define IDT_XF 19 /* #XF: SIMD Floating-Point Exception */
#define IDT_IO_INTS NRSVIDT /* Base of IDT entries for I/O interrupts. */
#define IDT_SYSCALL 0x80 /* System Call Interrupt Vector */
#define IDT_DTRACE_RET 0x92 /* DTrace pid provider Interrupt Vector */
#define IDT_EVTCHN 0x93 /* Xen HVM Event Channel Interrupt Vector */
#if defined(__i386__)
/*
* Entries in the Global Descriptor Table (GDT)
* Note that each 4 entries share a single 32 byte L1 cache line.
* Some of the fast syscall instructions require a specific order here.
*/
#define GNULL_SEL 0 /* Null Descriptor */
#define GPRIV_SEL 1 /* SMP Per-Processor Private Data */
#define GUFS_SEL 2 /* User %fs Descriptor (order critical: 1) */
#define GUGS_SEL 3 /* User %gs Descriptor (order critical: 2) */
#define GCODE_SEL 4 /* Kernel Code Descriptor (order critical: 1) */
#define GDATA_SEL 5 /* Kernel Data Descriptor (order critical: 2) */
#define GUCODE_SEL 6 /* User Code Descriptor (order critical: 3) */
#define GUDATA_SEL 7 /* User Data Descriptor (order critical: 4) */
#define GBIOSLOWMEM_SEL 8 /* BIOS low memory access (must be entry 8) */
#define GPROC0_SEL 9 /* Task state process slot zero and up */
#define GLDT_SEL 10 /* Default User LDT */
#define GUSERLDT_SEL 11 /* User LDT */
#define GPANIC_SEL 12 /* Task state to consider panic from */
#define GBIOSCODE32_SEL 13 /* BIOS interface (32bit Code) */
#define GBIOSCODE16_SEL 14 /* BIOS interface (16bit Code) */
#define GBIOSDATA_SEL 15 /* BIOS interface (Data) */
#define GBIOSUTIL_SEL 16 /* BIOS interface (Utility) */
#define GBIOSARGS_SEL 17 /* BIOS interface (Arguments) */
#define GNDIS_SEL 18 /* For the NDIS layer */
#define NGDT 19
/*
* Entries in the Local Descriptor Table (LDT)
*/
#define LSYS5CALLS_SEL 0 /* forced by intel BCS */
#define LSYS5SIGR_SEL 1
#define LUCODE_SEL 3
#define LUDATA_SEL 5
#define NLDT (LUDATA_SEL + 1)
#else /* !__i386__ */
/*
* Entries in the Global Descriptor Table (GDT)
*/
#define GNULL_SEL 0 /* Null Descriptor */
#define GNULL2_SEL 1 /* Null Descriptor */
#define GUFS32_SEL 2 /* User 32 bit %fs Descriptor */
#define GUGS32_SEL 3 /* User 32 bit %gs Descriptor */
#define GCODE_SEL 4 /* Kernel Code Descriptor */
#define GDATA_SEL 5 /* Kernel Data Descriptor */
#define GUCODE32_SEL 6 /* User 32 bit code Descriptor */
#define GUDATA_SEL 7 /* User 32/64 bit Data Descriptor */
#define GUCODE_SEL 8 /* User 64 bit Code Descriptor */
#define GPROC0_SEL 9 /* TSS for entering kernel etc */
/* slot 10 is second half of GPROC0_SEL */
#define GUSERLDT_SEL 11 /* LDT */
/* slot 12 is second half of GUSERLDT_SEL */
#define NGDT 13
#endif /* __i386__ */
#endif /* !_SEGMENTS_H_ */

View File

@@ -0,0 +1,36 @@
/*-
* Copyright (c) 2014 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SMBIOSTBL_H_
#define _SMBIOSTBL_H_
struct vmctx;
int smbios_build(struct vmctx *ctx);
#endif /* _SMBIOSTBL_H_ */

View File

@@ -0,0 +1,914 @@
/*-
* Copyright (c) 1991 The Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)specialreg.h 7.1 (Berkeley) 5/9/91
* $FreeBSD$
*/
#ifndef _SPECIALREG_H_
#define _SPECIALREG_H_
/*
* Bits in 386 special registers:
*/
#define CR0_PE 0x00000001 /* Protected mode Enable */
#define CR0_MP 0x00000002 /* "Math" (fpu) Present */
#define CR0_EM 0x00000004 /* EMulate FPU instructions. (trap ESC only) */
#define CR0_TS 0x00000008 /* Task Switched (if MP, trap ESC and WAIT) */
#define CR0_PG 0x80000000 /* PaGing enable */
/*
* Bits in 486 special registers:
*/
#define CR0_NE 0x00000020 /* Numeric Error enable (EX16 vs IRQ13) */
#define CR0_WP 0x00010000 /* Write Protect (honor page protect in
* all modes)
*/
#define CR0_AM 0x00040000 /* Alignment Mask (set to enable AC flag) */
#define CR0_NW 0x20000000 /* Not Write-through */
#define CR0_CD 0x40000000 /* Cache Disable */
#define CR3_PCID_SAVE 0x8000000000000000
#define CR3_PCID_MASK 0xfff
/*
* Bits in PPro special registers
*/
#define CR4_VME 0x00000001 /* Virtual 8086 mode extensions */
#define CR4_PVI 0x00000002 /* Protected-mode virtual interrupts */
#define CR4_TSD 0x00000004 /* Time stamp disable */
#define CR4_DE 0x00000008 /* Debugging extensions */
#define CR4_PSE 0x00000010 /* Page size extensions */
#define CR4_PAE 0x00000020 /* Physical address extension */
#define CR4_MCE 0x00000040 /* Machine check enable */
#define CR4_PGE 0x00000080 /* Page global enable */
#define CR4_PCE 0x00000100 /* Performance monitoring counter enable */
#define CR4_FXSR 0x00000200 /* Fast FPU save/restore used by OS */
#define CR4_XMM 0x00000400 /* enable SIMD/MMX2 to use except 16 */
#define CR4_VMXE 0x00002000 /* enable VMX operation (Intel-specific) */
#define CR4_FSGSBASE 0x00010000 /* Enable FS/GS BASE accessing instructions */
#define CR4_PCIDE 0x00020000 /* Enable Context ID */
#define CR4_XSAVE 0x00040000 /* XSETBV/XGETBV */
#define CR4_SMEP 0x00100000 /* Supervisor-Mode Execution Prevention */
/*
* Bits in AMD64 special registers. EFER is 64 bits wide.
*/
#define EFER_SCE 0x000000001 /* System Call Extensions (R/W) */
#define EFER_LME 0x000000100 /* Long mode enable (R/W) */
#define EFER_LMA 0x000000400 /* Long mode active (R) */
#define EFER_NXE 0x000000800 /* PTE No-Execute bit enable (R/W) */
#define EFER_SVM 0x000001000 /* SVM enable bit for AMD, reserved for Intel */
#define EFER_LMSLE 0x000002000 /* Long Mode Segment Limit Enable */
#define EFER_FFXSR 0x000004000 /* Fast FXSAVE/FSRSTOR */
#define EFER_TCE 0x000008000 /* Translation Cache Extension */
/*
* Intel Extended Features registers
*/
#define XCR0 0 /* XFEATURE_ENABLED_MASK register */
#define XFEATURE_ENABLED_X87 0x00000001
#define XFEATURE_ENABLED_SSE 0x00000002
#define XFEATURE_ENABLED_YMM_HI128 0x00000004
#define XFEATURE_ENABLED_AVX XFEATURE_ENABLED_YMM_HI128
#define XFEATURE_ENABLED_BNDREGS 0x00000008
#define XFEATURE_ENABLED_BNDCSR 0x00000010
#define XFEATURE_ENABLED_OPMASK 0x00000020
#define XFEATURE_ENABLED_ZMM_HI256 0x00000040
#define XFEATURE_ENABLED_HI16_ZMM 0x00000080
#define XFEATURE_AVX \
(XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE | XFEATURE_ENABLED_AVX)
#define XFEATURE_AVX512 \
(XFEATURE_ENABLED_OPMASK | XFEATURE_ENABLED_ZMM_HI256 | \
XFEATURE_ENABLED_HI16_ZMM)
#define XFEATURE_MPX \
(XFEATURE_ENABLED_BNDREGS | XFEATURE_ENABLED_BNDCSR)
/*
* CPUID instruction features register
*/
#define CPUID_FPU 0x00000001
#define CPUID_VME 0x00000002
#define CPUID_DE 0x00000004
#define CPUID_PSE 0x00000008
#define CPUID_TSC 0x00000010
#define CPUID_MSR 0x00000020
#define CPUID_PAE 0x00000040
#define CPUID_MCE 0x00000080
#define CPUID_CX8 0x00000100
#define CPUID_APIC 0x00000200
#define CPUID_B10 0x00000400
#define CPUID_SEP 0x00000800
#define CPUID_MTRR 0x00001000
#define CPUID_PGE 0x00002000
#define CPUID_MCA 0x00004000
#define CPUID_CMOV 0x00008000
#define CPUID_PAT 0x00010000
#define CPUID_PSE36 0x00020000
#define CPUID_PSN 0x00040000
#define CPUID_CLFSH 0x00080000
#define CPUID_B20 0x00100000
#define CPUID_DS 0x00200000
#define CPUID_ACPI 0x00400000
#define CPUID_MMX 0x00800000
#define CPUID_FXSR 0x01000000
#define CPUID_SSE 0x02000000
#define CPUID_XMM 0x02000000
#define CPUID_SSE2 0x04000000
#define CPUID_SS 0x08000000
#define CPUID_HTT 0x10000000
#define CPUID_TM 0x20000000
#define CPUID_IA64 0x40000000
#define CPUID_PBE 0x80000000
#define CPUID2_SSE3 0x00000001
#define CPUID2_PCLMULQDQ 0x00000002
#define CPUID2_DTES64 0x00000004
#define CPUID2_MON 0x00000008
#define CPUID2_DS_CPL 0x00000010
#define CPUID2_VMX 0x00000020
#define CPUID2_SMX 0x00000040
#define CPUID2_EST 0x00000080
#define CPUID2_TM2 0x00000100
#define CPUID2_SSSE3 0x00000200
#define CPUID2_CNXTID 0x00000400
#define CPUID2_SDBG 0x00000800
#define CPUID2_FMA 0x00001000
#define CPUID2_CX16 0x00002000
#define CPUID2_XTPR 0x00004000
#define CPUID2_PDCM 0x00008000
#define CPUID2_PCID 0x00020000
#define CPUID2_DCA 0x00040000
#define CPUID2_SSE41 0x00080000
#define CPUID2_SSE42 0x00100000
#define CPUID2_X2APIC 0x00200000
#define CPUID2_MOVBE 0x00400000
#define CPUID2_POPCNT 0x00800000
#define CPUID2_TSCDLT 0x01000000
#define CPUID2_AESNI 0x02000000
#define CPUID2_XSAVE 0x04000000
#define CPUID2_OSXSAVE 0x08000000
#define CPUID2_AVX 0x10000000
#define CPUID2_F16C 0x20000000
#define CPUID2_RDRAND 0x40000000
#define CPUID2_HV 0x80000000
/*
* Important bits in the Thermal and Power Management flags
* CPUID.6 EAX and ECX.
*/
#define CPUTPM1_SENSOR 0x00000001
#define CPUTPM1_TURBO 0x00000002
#define CPUTPM1_ARAT 0x00000004
#define CPUTPM2_EFFREQ 0x00000001
/*
* Important bits in the AMD extended cpuid flags
*/
#define AMDID_SYSCALL 0x00000800
#define AMDID_MP 0x00080000
#define AMDID_NX 0x00100000
#define AMDID_EXT_MMX 0x00400000
#define AMDID_FFXSR 0x02000000
#define AMDID_PAGE1GB 0x04000000
#define AMDID_RDTSCP 0x08000000
#define AMDID_LM 0x20000000
#define AMDID_EXT_3DNOW 0x40000000
#define AMDID_3DNOW 0x80000000
#define AMDID2_LAHF 0x00000001
#define AMDID2_CMP 0x00000002
#define AMDID2_SVM 0x00000004
#define AMDID2_EXT_APIC 0x00000008
#define AMDID2_CR8 0x00000010
#define AMDID2_ABM 0x00000020
#define AMDID2_SSE4A 0x00000040
#define AMDID2_MAS 0x00000080
#define AMDID2_PREFETCH 0x00000100
#define AMDID2_OSVW 0x00000200
#define AMDID2_IBS 0x00000400
#define AMDID2_XOP 0x00000800
#define AMDID2_SKINIT 0x00001000
#define AMDID2_WDT 0x00002000
#define AMDID2_LWP 0x00008000
#define AMDID2_FMA4 0x00010000
#define AMDID2_TCE 0x00020000
#define AMDID2_NODE_ID 0x00080000
#define AMDID2_TBM 0x00200000
#define AMDID2_TOPOLOGY 0x00400000
#define AMDID2_PCXC 0x00800000
#define AMDID2_PNXC 0x01000000
#define AMDID2_DBE 0x04000000
#define AMDID2_PTSC 0x08000000
#define AMDID2_PTSCEL2I 0x10000000
#define AMDID2_MWAITX 0x20000000
/*
* CPUID instruction 1 eax info
*/
#define CPUID_STEPPING 0x0000000f
#define CPUID_MODEL 0x000000f0
#define CPUID_FAMILY 0x00000f00
#define CPUID_EXT_MODEL 0x000f0000
#define CPUID_EXT_FAMILY 0x0ff00000
#ifdef __i386__
#define CPUID_TO_MODEL(id) \
((((id) & CPUID_MODEL) >> 4) | \
((((id) & CPUID_FAMILY) >= 0x600) ? \
(((id) & CPUID_EXT_MODEL) >> 12) : 0))
#define CPUID_TO_FAMILY(id) \
((((id) & CPUID_FAMILY) >> 8) + \
((((id) & CPUID_FAMILY) == 0xf00) ? \
(((id) & CPUID_EXT_FAMILY) >> 20) : 0))
#else
#define CPUID_TO_MODEL(id) \
((((id) & CPUID_MODEL) >> 4) | \
(((id) & CPUID_EXT_MODEL) >> 12))
#define CPUID_TO_FAMILY(id) \
((((id) & CPUID_FAMILY) >> 8) + \
(((id) & CPUID_EXT_FAMILY) >> 20))
#endif
/*
* CPUID instruction 1 ebx info
*/
#define CPUID_BRAND_INDEX 0x000000ff
#define CPUID_CLFUSH_SIZE 0x0000ff00
#define CPUID_HTT_CORES 0x00ff0000
#define CPUID_LOCAL_APIC_ID 0xff000000
/*
* CPUID instruction 5 info
*/
#define CPUID5_MON_MIN_SIZE 0x0000ffff /* eax */
#define CPUID5_MON_MAX_SIZE 0x0000ffff /* ebx */
#define CPUID5_MON_MWAIT_EXT 0x00000001 /* ecx */
#define CPUID5_MWAIT_INTRBREAK 0x00000002 /* ecx */
/*
* MWAIT cpu power states. Lower 4 bits are sub-states.
*/
#define MWAIT_C0 0xf0
#define MWAIT_C1 0x00
#define MWAIT_C2 0x10
#define MWAIT_C3 0x20
#define MWAIT_C4 0x30
/*
* MWAIT extensions.
*/
/* Interrupt breaks MWAIT even when masked. */
#define MWAIT_INTRBREAK 0x00000001
/*
* CPUID instruction 6 ecx info
*/
#define CPUID_PERF_STAT 0x00000001
#define CPUID_PERF_BIAS 0x00000008
/*
* CPUID instruction 0xb ebx info.
*/
#define CPUID_TYPE_INVAL 0
#define CPUID_TYPE_SMT 1
#define CPUID_TYPE_CORE 2
/*
* CPUID instruction 0xd Processor Extended State Enumeration Sub-leaf 1
*/
#define CPUID_EXTSTATE_XSAVEOPT 0x00000001
#define CPUID_EXTSTATE_XSAVEC 0x00000002
#define CPUID_EXTSTATE_XINUSE 0x00000004
#define CPUID_EXTSTATE_XSAVES 0x00000008
/*
* AMD extended function 8000_0007h edx info
*/
#define AMDPM_TS 0x00000001
#define AMDPM_FID 0x00000002
#define AMDPM_VID 0x00000004
#define AMDPM_TTP 0x00000008
#define AMDPM_TM 0x00000010
#define AMDPM_STC 0x00000020
#define AMDPM_100MHZ_STEPS 0x00000040
#define AMDPM_HW_PSTATE 0x00000080
#define AMDPM_TSC_INVARIANT 0x00000100
#define AMDPM_CPB 0x00000200
/*
* AMD extended function 8000_0008h ecx info
*/
#define AMDID_CMP_CORES 0x000000ff
#define AMDID_COREID_SIZE 0x0000f000
#define AMDID_COREID_SIZE_SHIFT 12
/*
* CPUID instruction 7 Structured Extended Features, leaf 0 ebx info
*/
#define CPUID_STDEXT_FSGSBASE 0x00000001
#define CPUID_STDEXT_TSC_ADJUST 0x00000002
#define CPUID_STDEXT_SGX 0x00000004
#define CPUID_STDEXT_BMI1 0x00000008
#define CPUID_STDEXT_HLE 0x00000010
#define CPUID_STDEXT_AVX2 0x00000020
#define CPUID_STDEXT_FDP_EXC 0x00000040
#define CPUID_STDEXT_SMEP 0x00000080
#define CPUID_STDEXT_BMI2 0x00000100
#define CPUID_STDEXT_ERMS 0x00000200
#define CPUID_STDEXT_INVPCID 0x00000400
#define CPUID_STDEXT_RTM 0x00000800
#define CPUID_STDEXT_PQM 0x00001000
#define CPUID_STDEXT_NFPUSG 0x00002000
#define CPUID_STDEXT_MPX 0x00004000
#define CPUID_STDEXT_PQE 0x00008000
#define CPUID_STDEXT_AVX512F 0x00010000
#define CPUID_STDEXT_AVX512DQ 0x00020000
#define CPUID_STDEXT_RDSEED 0x00040000
#define CPUID_STDEXT_ADX 0x00080000
#define CPUID_STDEXT_SMAP 0x00100000
#define CPUID_STDEXT_AVX512IFMA 0x00200000
#define CPUID_STDEXT_PCOMMIT 0x00400000
#define CPUID_STDEXT_CLFLUSHOPT 0x00800000
#define CPUID_STDEXT_CLWB 0x01000000
#define CPUID_STDEXT_PROCTRACE 0x02000000
#define CPUID_STDEXT_AVX512PF 0x04000000
#define CPUID_STDEXT_AVX512ER 0x08000000
#define CPUID_STDEXT_AVX512CD 0x10000000
#define CPUID_STDEXT_SHA 0x20000000
#define CPUID_STDEXT_AVX512BW 0x40000000
/*
* CPUID instruction 7 Structured Extended Features, leaf 0 ecx info
*/
#define CPUID_STDEXT2_PREFETCHWT1 0x00000001
#define CPUID_STDEXT2_UMIP 0x00000004
#define CPUID_STDEXT2_PKU 0x00000008
#define CPUID_STDEXT2_OSPKE 0x00000010
#define CPUID_STDEXT2_RDPID 0x00400000
#define CPUID_STDEXT2_SGXLC 0x40000000
/*
* CPUID manufacturers identifiers
*/
#define AMD_VENDOR_ID "AuthenticAMD"
#define CENTAUR_VENDOR_ID "CentaurHauls"
#define CYRIX_VENDOR_ID "CyrixInstead"
#define INTEL_VENDOR_ID "GenuineIntel"
#define NEXGEN_VENDOR_ID "NexGenDriven"
#define NSC_VENDOR_ID "Geode by NSC"
#define RISE_VENDOR_ID "RiseRiseRise"
#define SIS_VENDOR_ID "SiS SiS SiS "
#define TRANSMETA_VENDOR_ID "GenuineTMx86"
#define UMC_VENDOR_ID "UMC UMC UMC "
/*
* Model-specific registers for the i386 family
*/
#define MSR_P5_MC_ADDR 0x000
#define MSR_P5_MC_TYPE 0x001
#define MSR_TSC 0x010
#define MSR_P5_CESR 0x011
#define MSR_P5_CTR0 0x012
#define MSR_P5_CTR1 0x013
#define MSR_IA32_PLATFORM_ID 0x017
#define MSR_APICBASE 0x01b
#define MSR_EBL_CR_POWERON 0x02a
#define MSR_TEST_CTL 0x033
#define MSR_IA32_FEATURE_CONTROL 0x03a
#define MSR_BIOS_UPDT_TRIG 0x079
#define MSR_BBL_CR_D0 0x088
#define MSR_BBL_CR_D1 0x089
#define MSR_BBL_CR_D2 0x08a
#define MSR_BIOS_SIGN 0x08b
#define MSR_PERFCTR0 0x0c1
#define MSR_PERFCTR1 0x0c2
#define MSR_PLATFORM_INFO 0x0ce
#define MSR_MPERF 0x0e7
#define MSR_APERF 0x0e8
#define MSR_IA32_EXT_CONFIG 0x0ee /* Undocumented. Core Solo/Duo only */
#define MSR_MTRRcap 0x0fe
#define MSR_BBL_CR_ADDR 0x116
#define MSR_BBL_CR_DECC 0x118
#define MSR_BBL_CR_CTL 0x119
#define MSR_BBL_CR_TRIG 0x11a
#define MSR_BBL_CR_BUSY 0x11b
#define MSR_BBL_CR_CTL3 0x11e
#define MSR_SYSENTER_CS_MSR 0x174
#define MSR_SYSENTER_ESP_MSR 0x175
#define MSR_SYSENTER_EIP_MSR 0x176
#define MSR_MCG_CAP 0x179
#define MSR_MCG_STATUS 0x17a
#define MSR_MCG_CTL 0x17b
#define MSR_EVNTSEL0 0x186
#define MSR_EVNTSEL1 0x187
#define MSR_THERM_CONTROL 0x19a
#define MSR_THERM_INTERRUPT 0x19b
#define MSR_THERM_STATUS 0x19c
#define MSR_IA32_MISC_ENABLE 0x1a0
#define MSR_IA32_TEMPERATURE_TARGET 0x1a2
#define MSR_TURBO_RATIO_LIMIT 0x1ad
#define MSR_TURBO_RATIO_LIMIT1 0x1ae
#define MSR_DEBUGCTLMSR 0x1d9
#define MSR_LASTBRANCHFROMIP 0x1db
#define MSR_LASTBRANCHTOIP 0x1dc
#define MSR_LASTINTFROMIP 0x1dd
#define MSR_LASTINTTOIP 0x1de
#define MSR_ROB_CR_BKUPTMPDR6 0x1e0
#define MSR_MTRRVarBase 0x200
#define MSR_MTRR64kBase 0x250
#define MSR_MTRR16kBase 0x258
#define MSR_MTRR4kBase 0x268
#define MSR_PAT 0x277
#define MSR_MC0_CTL2 0x280
#define MSR_MTRRdefType 0x2ff
#define MSR_MC0_CTL 0x400
#define MSR_MC0_STATUS 0x401
#define MSR_MC0_ADDR 0x402
#define MSR_MC0_MISC 0x403
#define MSR_MC1_CTL 0x404
#define MSR_MC1_STATUS 0x405
#define MSR_MC1_ADDR 0x406
#define MSR_MC1_MISC 0x407
#define MSR_MC2_CTL 0x408
#define MSR_MC2_STATUS 0x409
#define MSR_MC2_ADDR 0x40a
#define MSR_MC2_MISC 0x40b
#define MSR_MC3_CTL 0x40c
#define MSR_MC3_STATUS 0x40d
#define MSR_MC3_ADDR 0x40e
#define MSR_MC3_MISC 0x40f
#define MSR_MC4_CTL 0x410
#define MSR_MC4_STATUS 0x411
#define MSR_MC4_ADDR 0x412
#define MSR_MC4_MISC 0x413
#define MSR_RAPL_POWER_UNIT 0x606
#define MSR_PKG_ENERGY_STATUS 0x611
#define MSR_DRAM_ENERGY_STATUS 0x619
#define MSR_PP0_ENERGY_STATUS 0x639
#define MSR_PP1_ENERGY_STATUS 0x641
#define MSR_TSC_DEADLINE 0x6e0 /* Writes are not serializing */
/*
* VMX MSRs
*/
#define MSR_VMX_BASIC 0x480
#define MSR_VMX_PINBASED_CTLS 0x481
#define MSR_VMX_PROCBASED_CTLS 0x482
#define MSR_VMX_EXIT_CTLS 0x483
#define MSR_VMX_ENTRY_CTLS 0x484
#define MSR_VMX_CR0_FIXED0 0x486
#define MSR_VMX_CR0_FIXED1 0x487
#define MSR_VMX_CR4_FIXED0 0x488
#define MSR_VMX_CR4_FIXED1 0x489
#define MSR_VMX_PROCBASED_CTLS2 0x48b
#define MSR_VMX_EPT_VPID_CAP 0x48c
#define MSR_VMX_TRUE_PINBASED_CTLS 0x48d
#define MSR_VMX_TRUE_PROCBASED_CTLS 0x48e
#define MSR_VMX_TRUE_EXIT_CTLS 0x48f
#define MSR_VMX_TRUE_ENTRY_CTLS 0x490
/*
* X2APIC MSRs.
* Writes are not serializing.
*/
#define MSR_APIC_000 0x800
#define MSR_APIC_ID 0x802
#define MSR_APIC_VERSION 0x803
#define MSR_APIC_TPR 0x808
#define MSR_APIC_EOI 0x80b
#define MSR_APIC_LDR 0x80d
#define MSR_APIC_SVR 0x80f
#define MSR_APIC_ISR0 0x810
#define MSR_APIC_ISR1 0x811
#define MSR_APIC_ISR2 0x812
#define MSR_APIC_ISR3 0x813
#define MSR_APIC_ISR4 0x814
#define MSR_APIC_ISR5 0x815
#define MSR_APIC_ISR6 0x816
#define MSR_APIC_ISR7 0x817
#define MSR_APIC_TMR0 0x818
#define MSR_APIC_IRR0 0x820
#define MSR_APIC_ESR 0x828
#define MSR_APIC_LVT_CMCI 0x82F
#define MSR_APIC_ICR 0x830
#define MSR_APIC_LVT_TIMER 0x832
#define MSR_APIC_LVT_THERMAL 0x833
#define MSR_APIC_LVT_PCINT 0x834
#define MSR_APIC_LVT_LINT0 0x835
#define MSR_APIC_LVT_LINT1 0x836
#define MSR_APIC_LVT_ERROR 0x837
#define MSR_APIC_ICR_TIMER 0x838
#define MSR_APIC_CCR_TIMER 0x839
#define MSR_APIC_DCR_TIMER 0x83e
#define MSR_APIC_SELF_IPI 0x83f
#define MSR_IA32_XSS 0xda0
/*
* Constants related to MSR's.
*/
#define APICBASE_RESERVED 0x000002ff
#define APICBASE_BSP 0x00000100
#define APICBASE_X2APIC 0x00000400
#define APICBASE_ENABLED 0x00000800
#define APICBASE_ADDRESS 0xfffff000
/* MSR_IA32_FEATURE_CONTROL related */
#define IA32_FEATURE_CONTROL_LOCK 0x01 /* lock bit */
#define IA32_FEATURE_CONTROL_SMX_EN 0x02 /* enable VMX inside SMX */
#define IA32_FEATURE_CONTROL_VMX_EN 0x04 /* enable VMX outside SMX */
/* MSR IA32_MISC_ENABLE */
#define IA32_MISC_EN_FASTSTR 0x0000000000000001ULL
#define IA32_MISC_EN_ATCCE 0x0000000000000008ULL
#define IA32_MISC_EN_PERFMON 0x0000000000000080ULL
#define IA32_MISC_EN_PEBSU 0x0000000000001000ULL
#define IA32_MISC_EN_ESSTE 0x0000000000010000ULL
#define IA32_MISC_EN_MONE 0x0000000000040000ULL
#define IA32_MISC_EN_LIMCPUID 0x0000000000400000ULL
#define IA32_MISC_EN_xTPRD 0x0000000000800000ULL
#define IA32_MISC_EN_XDD 0x0000000400000000ULL
/*
* PAT modes.
*/
#define PAT_UNCACHEABLE 0x00
#define PAT_WRITE_COMBINING 0x01
#define PAT_WRITE_THROUGH 0x04
#define PAT_WRITE_PROTECTED 0x05
#define PAT_WRITE_BACK 0x06
#define PAT_UNCACHED 0x07
#define PAT_VALUE(i, m) ((long long)(m) << (8 * (i)))
#define PAT_MASK(i) PAT_VALUE(i, 0xff)
/*
* Constants related to MTRRs
*/
#define MTRR_UNCACHEABLE 0x00
#define MTRR_WRITE_COMBINING 0x01
#define MTRR_WRITE_THROUGH 0x04
#define MTRR_WRITE_PROTECTED 0x05
#define MTRR_WRITE_BACK 0x06
#define MTRR_N64K 8 /* numbers of fixed-size entries */
#define MTRR_N16K 16
#define MTRR_N4K 64
#define MTRR_CAP_WC 0x0000000000000400
#define MTRR_CAP_FIXED 0x0000000000000100
#define MTRR_CAP_VCNT 0x00000000000000ff
#define MTRR_DEF_ENABLE 0x0000000000000800
#define MTRR_DEF_FIXED_ENABLE 0x0000000000000400
#define MTRR_DEF_TYPE 0x00000000000000ff
#define MTRR_PHYSBASE_PHYSBASE 0x000ffffffffff000
#define MTRR_PHYSBASE_TYPE 0x00000000000000ff
#define MTRR_PHYSMASK_PHYSMASK 0x000ffffffffff000
#define MTRR_PHYSMASK_VALID 0x0000000000000800
/*
* Cyrix configuration registers, accessible as IO ports.
*/
#define CCR0 0xc0 /* Configuration control register 0 */
#define CCR0_NC0 0x01 /* First 64K of each 1M memory region is
* non-cacheable
*/
#define CCR0_NC1 0x02 /* 640K-1M region is non-cacheable */
#define CCR0_A20M 0x04 /* Enables A20M# input pin */
#define CCR0_KEN 0x08 /* Enables KEN# input pin */
#define CCR0_FLUSH 0x10 /* Enables FLUSH# input pin */
#define CCR0_BARB 0x20 /* Flushes internal cache when
* entering hold state
*/
#define CCR0_CO 0x40 /* Cache org: 1=direct mapped, 0=2x set
* assoc
*/
#define CCR0_SUSPEND 0x80 /* Enables SUSP# and SUSPA# pins */
#define CCR1 0xc1 /* Configuration control register 1 */
#define CCR1_RPL 0x01 /* Enables RPLSET and RPLVAL# pins */
#define CCR1_SMI 0x02 /* Enables SMM pins */
#define CCR1_SMAC 0x04 /* System management memory access */
#define CCR1_MMAC 0x08 /* Main memory access */
#define CCR1_NO_LOCK 0x10 /* Negate LOCK# */
#define CCR1_SM3 0x80 /* SMM address space address region 3 */
#define CCR2 0xc2
#define CCR2_WB 0x02 /* Enables WB cache interface pins */
#define CCR2_SADS 0x02 /* Slow ADS */
#define CCR2_LOCK_NW 0x04 /* LOCK NW Bit */
#define CCR2_SUSP_HLT 0x08 /* Suspend on HALT */
#define CCR2_WT1 0x10 /* WT region 1 */
#define CCR2_WPR1 0x10 /* Write-protect region 1 */
#define CCR2_BARB 0x20 /* Flushes write-back cache when
* entering hold state.
*/
#define CCR2_BWRT 0x40 /* Enables burst write cycles */
#define CCR2_USE_SUSP 0x80 /* Enables suspend pins */
#define CCR3 0xc3
#define CCR3_SMILOCK 0x01 /* SMM register lock */
#define CCR3_NMI 0x02 /* Enables NMI during SMM */
#define CCR3_LINBRST 0x04 /* Linear address burst cycles */
#define CCR3_SMMMODE 0x08 /* SMM Mode */
#define CCR3_MAPEN0 0x10 /* Enables Map0 */
#define CCR3_MAPEN1 0x20 /* Enables Map1 */
#define CCR3_MAPEN2 0x40 /* Enables Map2 */
#define CCR3_MAPEN3 0x80 /* Enables Map3 */
#define CCR4 0xe8
#define CCR4_IOMASK 0x07
#define CCR4_MEM 0x08 /* Enables momory bypassing */
#define CCR4_DTE 0x10 /* Enables directory table
* entry cache
*/
#define CCR4_FASTFPE 0x20 /* Fast FPU exception */
#define CCR4_CPUID 0x80 /* Enables CPUID instruction */
#define CCR5 0xe9
#define CCR5_WT_ALLOC 0x01 /* Write-through allocate */
#define CCR5_SLOP 0x02 /* LOOP instruction slowed down */
#define CCR5_LBR1 0x10 /* Local bus region 1 */
#define CCR5_ARREN 0x20 /* Enables ARR region */
#define CCR6 0xea
#define CCR7 0xeb
/* Performance Control Register (5x86 only). */
#define PCR0 0x20
#define PCR0_RSTK 0x01 /* Enables return stack */
#define PCR0_BTB 0x02 /* Enables branch target buffer */
#define PCR0_LOOP 0x04 /* Enables loop */
#define PCR0_AIS 0x08 /* Enables all instrcutions stalled to
* serialize pipe.
*/
#define PCR0_MLR 0x10 /* Enables reordering of misaligned
* loads
*/
#define PCR0_BTBRT 0x40 /* Enables BTB test register. */
#define PCR0_LSSER 0x80 /* Disable reorder */
/* Device Identification Registers */
#define DIR0 0xfe
#define DIR1 0xff
/*
* Machine Check register constants.
*/
#define MCG_CAP_COUNT 0x000000ff
#define MCG_CAP_CTL_P 0x00000100
#define MCG_CAP_EXT_P 0x00000200
#define MCG_CAP_CMCI_P 0x00000400
#define MCG_CAP_TES_P 0x00000800
#define MCG_CAP_EXT_CNT 0x00ff0000
#define MCG_CAP_SER_P 0x01000000
#define MCG_STATUS_RIPV 0x00000001
#define MCG_STATUS_EIPV 0x00000002
#define MCG_STATUS_MCIP 0x00000004
#define MCG_CTL_ENABLE 0xffffffffffffffff
#define MCG_CTL_DISABLE 0x0000000000000000
#define MSR_MC_CTL(x) (MSR_MC0_CTL + (x) * 4)
#define MSR_MC_STATUS(x) (MSR_MC0_STATUS + (x) * 4)
#define MSR_MC_ADDR(x) (MSR_MC0_ADDR + (x) * 4)
#define MSR_MC_MISC(x) (MSR_MC0_MISC + (x) * 4)
#define MSR_MC_CTL2(x) (MSR_MC0_CTL2 + (x)) /* If MCG_CAP_CMCI_P */
#define MC_STATUS_MCA_ERROR 0x000000000000ffff
#define MC_STATUS_MODEL_ERROR 0x00000000ffff0000
#define MC_STATUS_OTHER_INFO 0x01ffffff00000000
#define MC_STATUS_COR_COUNT 0x001fffc000000000 /* If MCG_CAP_CMCI_P */
#define MC_STATUS_TES_STATUS 0x0060000000000000 /* If MCG_CAP_TES_P */
#define MC_STATUS_AR 0x0080000000000000 /* If MCG_CAP_TES_P */
#define MC_STATUS_S 0x0100000000000000 /* If MCG_CAP_TES_P */
#define MC_STATUS_PCC 0x0200000000000000
#define MC_STATUS_ADDRV 0x0400000000000000
#define MC_STATUS_MISCV 0x0800000000000000
#define MC_STATUS_EN 0x1000000000000000
#define MC_STATUS_UC 0x2000000000000000
#define MC_STATUS_OVER 0x4000000000000000
#define MC_STATUS_VAL 0x8000000000000000
#define MC_MISC_RA_LSB 0x000000000000003f /* If MCG_CAP_SER_P */
#define MC_MISC_ADDRESS_MODE 0x00000000000001c0 /* If MCG_CAP_SER_P */
#define MC_CTL2_THRESHOLD 0x0000000000007fff
#define MC_CTL2_CMCI_EN 0x0000000040000000
#define MC_AMDNB_BANK 4
#define MC_MISC_AMDNB_VAL 0x8000000000000000 /* Counter presence
* valid
*/
#define MC_MISC_AMDNB_CNTP 0x4000000000000000 /* Counter present */
#define MC_MISC_AMDNB_LOCK 0x2000000000000000 /* Register locked */
#define MC_MISC_AMDNB_LVT_MASK 0x00f0000000000000 /* Extended LVT
* offset
*/
#define MC_MISC_AMDNB_LVT_SHIFT 52
#define MC_MISC_AMDNB_CNTEN 0x0008000000000000 /* Counter enabled */
#define MC_MISC_AMDNB_INT_MASK 0x0006000000000000 /* Interrupt type */
#define MC_MISC_AMDNB_INT_LVT 0x0002000000000000 /* Interrupt via
* Extended LVT
*/
#define MC_MISC_AMDNB_INT_SMI 0x0004000000000000 /* SMI */
#define MC_MISC_AMDNB_OVERFLOW 0x0001000000000000 /* Counter overflow */
#define MC_MISC_AMDNB_CNT_MASK 0x00000fff00000000 /* Counter value */
#define MC_MISC_AMDNB_CNT_SHIFT 32
#define MC_MISC_AMDNB_CNT_MAX 0xfff
#define MC_MISC_AMDNB_PTR_MASK 0x00000000ff000000 /* Pointer to
* additional registers
*/
#define MC_MISC_AMDNB_PTR_SHIFT 24
/*
* The following four 3-byte registers control the non-cacheable regions.
* These registers must be written as three separate bytes.
*
* NCRx+0: A31-A24 of starting address
* NCRx+1: A23-A16 of starting address
* NCRx+2: A15-A12 of starting address | NCR_SIZE_xx.
*
* The non-cacheable region's starting address must be aligned to the
* size indicated by the NCR_SIZE_xx field.
*/
#define NCR1 0xc4
#define NCR2 0xc7
#define NCR3 0xca
#define NCR4 0xcd
#define NCR_SIZE_0K 0
#define NCR_SIZE_4K 1
#define NCR_SIZE_8K 2
#define NCR_SIZE_16K 3
#define NCR_SIZE_32K 4
#define NCR_SIZE_64K 5
#define NCR_SIZE_128K 6
#define NCR_SIZE_256K 7
#define NCR_SIZE_512K 8
#define NCR_SIZE_1M 9
#define NCR_SIZE_2M 10
#define NCR_SIZE_4M 11
#define NCR_SIZE_8M 12
#define NCR_SIZE_16M 13
#define NCR_SIZE_32M 14
#define NCR_SIZE_4G 15
/*
* The address region registers are used to specify the location and
* size for the eight address regions.
*
* ARRx + 0: A31-A24 of start address
* ARRx + 1: A23-A16 of start address
* ARRx + 2: A15-A12 of start address | ARR_SIZE_xx
*/
#define ARR0 0xc4
#define ARR1 0xc7
#define ARR2 0xca
#define ARR3 0xcd
#define ARR4 0xd0
#define ARR5 0xd3
#define ARR6 0xd6
#define ARR7 0xd9
#define ARR_SIZE_0K 0
#define ARR_SIZE_4K 1
#define ARR_SIZE_8K 2
#define ARR_SIZE_16K 3
#define ARR_SIZE_32K 4
#define ARR_SIZE_64K 5
#define ARR_SIZE_128K 6
#define ARR_SIZE_256K 7
#define ARR_SIZE_512K 8
#define ARR_SIZE_1M 9
#define ARR_SIZE_2M 10
#define ARR_SIZE_4M 11
#define ARR_SIZE_8M 12
#define ARR_SIZE_16M 13
#define ARR_SIZE_32M 14
#define ARR_SIZE_4G 15
/*
* The region control registers specify the attributes associated with
* the ARRx addres regions.
*/
#define RCR0 0xdc
#define RCR1 0xdd
#define RCR2 0xde
#define RCR3 0xdf
#define RCR4 0xe0
#define RCR5 0xe1
#define RCR6 0xe2
#define RCR7 0xe3
#define RCR_RCD 0x01 /* Disables caching for ARRx (x = 0-6). */
#define RCR_RCE 0x01 /* Enables caching for ARR7. */
#define RCR_WWO 0x02 /* Weak write ordering. */
#define RCR_WL 0x04 /* Weak locking. */
#define RCR_WG 0x08 /* Write gathering. */
#define RCR_WT 0x10 /* Write-through. */
#define RCR_NLB 0x20 /* LBA# pin is not asserted. */
/* AMD Write Allocate Top-Of-Memory and Control Register */
#define AMD_WT_ALLOC_TME 0x40000 /* top-of-memory enable */
#define AMD_WT_ALLOC_PRE 0x20000 /* programmable range enable */
#define AMD_WT_ALLOC_FRE 0x10000 /* fixed (A0000-FFFFF) range enable */
/* AMD64 MSR's */
#define MSR_EFER 0xc0000080 /* extended features */
#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target/cs/ss */
#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target rip */
#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target rip */
#define MSR_SF_MASK 0xc0000084 /* syscall flags mask */
#define MSR_FSBASE 0xc0000100 /* base address of the %fs "segment" */
#define MSR_GSBASE 0xc0000101 /* base address of the %gs "segment" */
#define MSR_KGSBASE 0xc0000102 /* base address of the kernel %gs */
#define MSR_PERFEVSEL0 0xc0010000
#define MSR_PERFEVSEL1 0xc0010001
#define MSR_PERFEVSEL2 0xc0010002
#define MSR_PERFEVSEL3 0xc0010003
#define MSR_K7_PERFCTR0 0xc0010004
#define MSR_K7_PERFCTR1 0xc0010005
#define MSR_K7_PERFCTR2 0xc0010006
#define MSR_K7_PERFCTR3 0xc0010007
#define MSR_SYSCFG 0xc0010010
#define MSR_HWCR 0xc0010015
#define MSR_IORRBASE0 0xc0010016
#define MSR_IORRMASK0 0xc0010017
#define MSR_IORRBASE1 0xc0010018
#define MSR_IORRMASK1 0xc0010019
#define MSR_TOP_MEM 0xc001001a /* boundary for ram below 4G */
#define MSR_TOP_MEM2 0xc001001d /* boundary for ram above 4G */
#define MSR_NB_CFG1 0xc001001f /* NB configuration 1 */
#define MSR_P_STATE_LIMIT 0xc0010061 /* P-state Current Limit Register */
#define MSR_P_STATE_CONTROL 0xc0010062 /* P-state Control Register */
#define MSR_P_STATE_STATUS 0xc0010063 /* P-state Status Register */
#define MSR_P_STATE_CONFIG(n) (0xc0010064 + (n)) /* P-state Config */
#define MSR_SMM_ADDR 0xc0010112 /* SMM TSEG base address */
#define MSR_SMM_MASK 0xc0010113 /* SMM TSEG address mask */
#define MSR_EXTFEATURES 0xc0011005 /* Extended CPUID Features override */
#define MSR_IC_CFG 0xc0011021 /* Instruction Cache Configuration */
#define MSR_K8_UCODE_UPDATE 0xc0010020 /* update microcode */
#define MSR_MC0_CTL_MASK 0xc0010044
#define MSR_VM_CR 0xc0010114 /* SVM: feature control */
#define MSR_VM_HSAVE_PA 0xc0010117 /* SVM: host save area address */
/* MSR_VM_CR related */
#define VM_CR_SVMDIS 0x10 /* SVM: disabled by BIOS */
/* VIA ACE crypto featureset: for via_feature_rng */
#define VIA_HAS_RNG 1 /* cpu has RNG */
/* VIA ACE crypto featureset: for via_feature_xcrypt */
#define VIA_HAS_AES 1 /* cpu has AES */
#define VIA_HAS_SHA 2 /* cpu has SHA1 & SHA256 */
#define VIA_HAS_MM 4 /* cpu has RSA instructions */
#define VIA_HAS_AESCTR 8 /* cpu has AES-CTR instructions */
/* Centaur Extended Feature flags */
#define VIA_CPUID_HAS_RNG 0x000004
#define VIA_CPUID_DO_RNG 0x000008
#define VIA_CPUID_HAS_ACE 0x000040
#define VIA_CPUID_DO_ACE 0x000080
#define VIA_CPUID_HAS_ACE2 0x000100
#define VIA_CPUID_DO_ACE2 0x000200
#define VIA_CPUID_HAS_PHE 0x000400
#define VIA_CPUID_DO_PHE 0x000800
#define VIA_CPUID_HAS_PMM 0x001000
#define VIA_CPUID_DO_PMM 0x002000
/* VIA ACE xcrypt-* instruction context control options */
#define VIA_CRYPT_CWLO_ROUND_M 0x0000000f
#define VIA_CRYPT_CWLO_ALG_M 0x00000070
#define VIA_CRYPT_CWLO_ALG_AES 0x00000000
#define VIA_CRYPT_CWLO_KEYGEN_M 0x00000080
#define VIA_CRYPT_CWLO_KEYGEN_HW 0x00000000
#define VIA_CRYPT_CWLO_KEYGEN_SW 0x00000080
#define VIA_CRYPT_CWLO_NORMAL 0x00000000
#define VIA_CRYPT_CWLO_INTERMEDIATE 0x00000100
#define VIA_CRYPT_CWLO_ENCRYPT 0x00000000
#define VIA_CRYPT_CWLO_DECRYPT 0x00000200
#define VIA_CRYPT_CWLO_KEY128 0x0000000a /* 128bit, 10 rds */
#define VIA_CRYPT_CWLO_KEY192 0x0000040c /* 192bit, 12 rds */
#define VIA_CRYPT_CWLO_KEY256 0x0000080e /* 256bit, 15 rds */
#endif /* !_SPECIALREG_H_ */

817
devicemodel/include/tree.h Normal file
View File

@@ -0,0 +1,817 @@
/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */
/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
/* $FreeBSD$ */
/*-
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _TREE_H_
#define _TREE_H_
#include <sys/cdefs.h>
/*
* This file defines data structures for different types of trees:
* splay trees and red-black trees.
*
* A splay tree is a self-organizing data structure. Every operation
* on the tree causes a splay to happen. The splay moves the requested
* node to the root of the tree and partly rebalances it.
*
* This has the benefit that request locality causes faster lookups as
* the requested nodes move to the top of the tree. On the other hand,
* every lookup causes memory writes.
*
* The Balance Theorem bounds the total access time for m operations
* and n inserts on an initially empty tree as O((m + n)lg n). The
* amortized cost for a sequence of m accesses to a splay tree is O(lg n);
*
* A red-black tree is a binary search tree with the node color as an
* extra attribute. It fulfills a set of conditions:
* - every search path from the root to a leaf consists of the
* same number of black nodes,
* - each red node (except for the root) has a black parent,
* - each leaf node is black.
*
* Every operation on a red-black tree is bounded as O(lg n).
* The maximum height of a red-black tree is 2lg (n+1).
*/
#define SPLAY_HEAD(name, type) \
struct name { \
struct type *sph_root; /* root of the tree */ \
}
#define SPLAY_INITIALIZER(root) \
{ NULL }
#define SPLAY_INIT(root) ((root)->sph_root = NULL)
#define SPLAY_ENTRY(type) \
struct { \
struct type *spe_left; /* left element */ \
struct type *spe_right; /* right element */ \
}
#define SPLAY_LEFT(elm, field) ((elm)->field.spe_left)
#define SPLAY_RIGHT(elm, field) ((elm)->field.spe_right)
#define SPLAY_ROOT(head) ((head)->sph_root)
#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \
SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
(head)->sph_root = tmp; \
} while (/*CONSTCOND*/ 0)
#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \
SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
SPLAY_LEFT(tmp, field) = (head)->sph_root; \
(head)->sph_root = tmp; \
} while (/*CONSTCOND*/ 0)
#define SPLAY_LINKLEFT(head, tmp, field) do { \
SPLAY_LEFT(tmp, field) = (head)->sph_root; \
tmp = (head)->sph_root; \
(head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
} while (/*CONSTCOND*/ 0)
#define SPLAY_LINKRIGHT(head, tmp, field) do { \
SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
tmp = (head)->sph_root; \
(head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
} while (/*CONSTCOND*/ 0)
#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \
SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\
SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
} while (/*CONSTCOND*/ 0)
/* Generates prototypes and inline functions */
#define SPLAY_PROTOTYPE(name, type, field, cmp) \
void name##_SPLAY(struct name *head, struct type *elm); \
void name##_SPLAY_MINMAX(struct name *head, int __comp); \
struct type *name##_SPLAY_INSERT(struct name *head, struct type *elm); \
struct type *name##_SPLAY_REMOVE(struct name *head, struct type *elm); \
\
/* Finds the node with the same key as elm */ \
static inline struct type * \
name##_SPLAY_FIND(struct name *head, struct type *elm) \
{ \
if (SPLAY_EMPTY(head)) \
return NULL; \
name##_SPLAY(head, elm); \
if ((cmp)(elm, (head)->sph_root) == 0) \
return head->sph_root; \
return NULL; \
} \
\
static inline struct type * \
name##_SPLAY_NEXT(struct name *head, struct type *elm) \
{ \
name##_SPLAY(head, elm); \
if (SPLAY_RIGHT(elm, field) != NULL) { \
elm = SPLAY_RIGHT(elm, field); \
while (SPLAY_LEFT(elm, field) != NULL) { \
elm = SPLAY_LEFT(elm, field); \
} \
} else \
elm = NULL; \
return elm; \
} \
\
static inline struct type * \
name##_SPLAY_MIN_MAX(struct name *head, int val) \
{ \
name##_SPLAY_MINMAX(head, val); \
return SPLAY_ROOT(head); \
}
/* Main splay operation.
* Moves node close to the key of elm to top
*/
#define SPLAY_GENERATE(name, type, field, cmp) \
struct type * \
name##_SPLAY_INSERT(struct name *head, struct type *elm) \
{ \
if (SPLAY_EMPTY(head)) { \
SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL;\
} else { \
int __comp; \
name##_SPLAY(head, elm); \
__comp = (cmp)(elm, (head)->sph_root); \
if (__comp < 0) { \
SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root,\
field) \
SPLAY_RIGHT(elm, field) = (head)->sph_root; \
SPLAY_LEFT((head)->sph_root, field) = NULL; \
} else if (__comp > 0) { \
SPLAY_RIGHT(elm, field) = \
SPLAY_RIGHT((head)->sph_root, field); \
SPLAY_LEFT(elm, field) = (head)->sph_root; \
SPLAY_RIGHT((head)->sph_root, field) = NULL; \
} else \
return ((head)->sph_root); \
} \
(head)->sph_root = (elm); \
return NULL; \
} \
\
struct type * \
name##_SPLAY_REMOVE(struct name *head, struct type *elm) \
{ \
struct type *__tmp; \
if (SPLAY_EMPTY(head)) \
return NULL; \
name##_SPLAY(head, elm); \
if ((cmp)(elm, (head)->sph_root) == 0) { \
if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
(head)->sph_root = SPLAY_RIGHT((head)->sph_root,\
field); \
} else { \
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
(head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\
name##_SPLAY(head, elm); \
SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
} \
return elm; \
} \
return NULL; \
} \
\
void \
name##_SPLAY(struct name *head, struct type *elm) \
{ \
struct type __node, *__left, *__right, *__tmp; \
int __comp; \
\
SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
__left = __right = &__node; \
\
while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \
if (__comp < 0) { \
__tmp = SPLAY_LEFT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if ((cmp)(elm, __tmp) < 0) { \
SPLAY_ROTATE_RIGHT(head, __tmp, field); \
if (SPLAY_LEFT((head)->sph_root, field) \
== NULL) \
break; \
} \
SPLAY_LINKLEFT(head, __right, field); \
} else if (__comp > 0) { \
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if ((cmp)(elm, __tmp) > 0) { \
SPLAY_ROTATE_LEFT(head, __tmp, field); \
if (SPLAY_RIGHT((head)->sph_root, field)\
== NULL) \
break; \
} \
SPLAY_LINKRIGHT(head, __left, field); \
} \
} \
SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
} \
\
/* Splay with either the minimum or the maximum element \
* Used to find minimum or maximum element in tree. \
*/ \
void name##_SPLAY_MINMAX(struct name *head, int __comp) \
{ \
struct type __node, *__left, *__right, *__tmp; \
\
SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
__left = __right = &__node; \
\
while (1) { \
if (__comp < 0) { \
__tmp = SPLAY_LEFT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if (__comp < 0) { \
SPLAY_ROTATE_RIGHT(head, __tmp, field); \
if (SPLAY_LEFT((head)->sph_root, field) \
== NULL) \
break; \
} \
SPLAY_LINKLEFT(head, __right, field); \
} else if (__comp > 0) { \
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if (__comp > 0) { \
SPLAY_ROTATE_LEFT(head, __tmp, field); \
if (SPLAY_RIGHT((head)->sph_root, field)\
== NULL) \
break; \
} \
SPLAY_LINKRIGHT(head, __left, field); \
} \
} \
SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
}
#define SPLAY_NEGINF -1
#define SPLAY_INF 1
#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \
: name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \
: name##_SPLAY_MIN_MAX(x, SPLAY_INF))
#define SPLAY_FOREACH(x, name, head) \
for ((x) = SPLAY_MIN(name, head); \
(x) != NULL; \
(x) = SPLAY_NEXT(name, head, x))
/* Macros that define a red-black tree */
#define RB_HEAD(name, type) \
struct name { \
struct type *rbh_root; /* root of the tree */ \
}
#define RB_INITIALIZER(root) \
{ NULL }
#define RB_INIT(root) ((root)->rbh_root = NULL)
#define RB_BLACK 0
#define RB_RED 1
#define RB_ENTRY(type) \
struct { \
struct type *rbe_left; /* left element */ \
struct type *rbe_right; /* right element */ \
struct type *rbe_parent; /* parent element */ \
int rbe_color; /* node color */ \
}
#define RB_LEFT(elm, field) ((elm)->field.rbe_left)
#define RB_RIGHT(elm, field) ((elm)->field.rbe_right)
#define RB_PARENT(elm, field) ((elm)->field.rbe_parent)
#define RB_COLOR(elm, field) ((elm)->field.rbe_color)
#define RB_ROOT(head) ((head)->rbh_root)
#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
#define RB_SET(elm, parent, field) do { \
RB_PARENT(elm, field) = parent; \
RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
RB_COLOR(elm, field) = RB_RED; \
} while (/*CONSTCOND*/ 0)
#define RB_SET_BLACKRED(black, red, field) do { \
RB_COLOR(black, field) = RB_BLACK; \
RB_COLOR(red, field) = RB_RED; \
} while (/*CONSTCOND*/ 0)
#ifndef RB_AUGMENT
#define RB_AUGMENT(x) do {} while (0)
#endif
#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \
(tmp) = RB_RIGHT(elm, field); \
RB_RIGHT(elm, field) = RB_LEFT(tmp, field); \
if (RB_RIGHT(elm, field) != NULL) \
RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
RB_AUGMENT(elm); \
RB_PARENT(tmp, field) = RB_PARENT(elm, field); \
if (RB_PARENT(tmp, field) != NULL) { \
if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
else \
RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
} else \
(head)->rbh_root = (tmp); \
RB_LEFT(tmp, field) = (elm); \
RB_PARENT(elm, field) = (tmp); \
RB_AUGMENT(tmp); \
if ((RB_PARENT(tmp, field))) \
RB_AUGMENT(RB_PARENT(tmp, field)); \
} while (/*CONSTCOND*/ 0)
#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \
(tmp) = RB_LEFT(elm, field); \
RB_LEFT(elm, field) = RB_RIGHT(tmp, field); \
if (RB_LEFT(elm, field) != NULL) \
RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
RB_AUGMENT(elm); \
RB_PARENT(tmp, field) = RB_PARENT(elm, field); \
if (RB_PARENT(tmp, field) != NULL) { \
if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
else \
RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
} else \
(head)->rbh_root = (tmp); \
RB_RIGHT(tmp, field) = (elm); \
RB_PARENT(elm, field) = (tmp); \
RB_AUGMENT(tmp); \
if ((RB_PARENT(tmp, field))) \
RB_AUGMENT(RB_PARENT(tmp, field)); \
} while (/*CONSTCOND*/ 0)
/* Generates prototypes and inline functions */
#define RB_PROTOTYPE(name, type, field, cmp) \
RB_PROTOTYPE_INTERNAL(name, type, field, cmp,)
#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \
RB_PROTOTYPE_INTERNAL(name, type, field, cmp, __unused static)
#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \
RB_PROTOTYPE_INSERT_COLOR(name, type, attr); \
RB_PROTOTYPE_REMOVE_COLOR(name, type, attr); \
RB_PROTOTYPE_INSERT(name, type, attr); \
RB_PROTOTYPE_REMOVE(name, type, attr); \
RB_PROTOTYPE_FIND(name, type, attr); \
RB_PROTOTYPE_NFIND(name, type, attr); \
RB_PROTOTYPE_NEXT(name, type, attr); \
RB_PROTOTYPE_PREV(name, type, attr); \
RB_PROTOTYPE_MINMAX(name, type, attr);
#define RB_PROTOTYPE_INSERT_COLOR(name, type, attr) \
attr void name##_RB_INSERT_COLOR(struct name *, struct type *)
#define RB_PROTOTYPE_REMOVE_COLOR(name, type, attr) \
attr void name##_RB_REMOVE_COLOR(struct name *, struct type *,\
struct type *)
#define RB_PROTOTYPE_REMOVE(name, type, attr) \
attr struct type *name##_RB_REMOVE(struct name *, struct type *)
#define RB_PROTOTYPE_INSERT(name, type, attr) \
attr struct type *name##_RB_INSERT(struct name *, struct type *)
#define RB_PROTOTYPE_FIND(name, type, attr) \
attr struct type *name##_RB_FIND(struct name *, struct type *)
#define RB_PROTOTYPE_NFIND(name, type, attr) \
attr struct type *name##_RB_NFIND(struct name *, struct type *)
#define RB_PROTOTYPE_NEXT(name, type, attr) \
attr struct type *name##_RB_NEXT(struct type *)
#define RB_PROTOTYPE_PREV(name, type, attr) \
attr struct type *name##_RB_PREV(struct type *)
#define RB_PROTOTYPE_MINMAX(name, type, attr) \
attr struct type *name##_RB_MINMAX(struct name *, int)
/* Main rb operation.
* Moves node close to the key of elm to top
*/
#define RB_GENERATE(name, type, field, cmp) \
RB_GENERATE_INTERNAL(name, type, field, cmp,)
#define RB_GENERATE_STATIC(name, type, field, cmp) \
RB_GENERATE_INTERNAL(name, type, field, cmp, __unused static)
#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \
RB_GENERATE_INSERT_COLOR(name, type, field, attr) \
RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \
RB_GENERATE_INSERT(name, type, field, cmp, attr) \
RB_GENERATE_REMOVE(name, type, field, attr) \
RB_GENERATE_FIND(name, type, field, cmp, attr) \
RB_GENERATE_NFIND(name, type, field, cmp, attr) \
RB_GENERATE_NEXT(name, type, field, attr) \
RB_GENERATE_PREV(name, type, field, attr) \
RB_GENERATE_MINMAX(name, type, field, attr)
#define RB_GENERATE_INSERT_COLOR(name, type, field, attr) \
attr void \
name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \
{ \
struct type *parent, *gparent, *tmp; \
while ((parent = RB_PARENT(elm, field)) != NULL && \
RB_COLOR(parent, field) == RB_RED) { \
gparent = RB_PARENT(parent, field); \
if (parent == RB_LEFT(gparent, field)) { \
tmp = RB_RIGHT(gparent, field); \
if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
RB_COLOR(tmp, field) = RB_BLACK; \
RB_SET_BLACKRED(parent, gparent, field);\
elm = gparent; \
continue; \
} \
if (RB_RIGHT(parent, field) == elm) { \
RB_ROTATE_LEFT(head, parent, tmp, field);\
tmp = parent; \
parent = elm; \
elm = tmp; \
} \
RB_SET_BLACKRED(parent, gparent, field); \
RB_ROTATE_RIGHT(head, gparent, tmp, field); \
} else { \
tmp = RB_LEFT(gparent, field); \
if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
RB_COLOR(tmp, field) = RB_BLACK; \
RB_SET_BLACKRED(parent, gparent, field);\
elm = gparent; \
continue; \
} \
if (RB_LEFT(parent, field) == elm) { \
RB_ROTATE_RIGHT(head, parent, tmp, field);\
tmp = parent; \
parent = elm; \
elm = tmp; \
} \
RB_SET_BLACKRED(parent, gparent, field); \
RB_ROTATE_LEFT(head, gparent, tmp, field); \
} \
} \
RB_COLOR(head->rbh_root, field) = RB_BLACK; \
}
#define RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \
attr void \
name##_RB_REMOVE_COLOR(struct name *head, \
struct type *parent, struct type *elm) \
{ \
struct type *tmp; \
while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \
elm != RB_ROOT(head)) { \
if (RB_LEFT(parent, field) == elm) { \
tmp = RB_RIGHT(parent, field); \
if (RB_COLOR(tmp, field) == RB_RED) { \
RB_SET_BLACKRED(tmp, parent, field); \
RB_ROTATE_LEFT(head, parent, tmp, field);\
tmp = RB_RIGHT(parent, field); \
} \
if ((RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) \
&& (RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) \
== RB_BLACK)){ \
RB_COLOR(tmp, field) = RB_RED; \
elm = parent; \
parent = RB_PARENT(elm, field); \
} else { \
if (RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) \
== RB_BLACK) {\
struct type *oleft; \
oleft = RB_LEFT(tmp, field); \
if (oleft != NULL) \
RB_COLOR(oleft, field) = \
RB_BLACK; \
RB_COLOR(tmp, field) = RB_RED; \
RB_ROTATE_RIGHT(head, tmp, oleft, \
field); \
tmp = RB_RIGHT(parent, field); \
} \
RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
RB_COLOR(parent, field) = RB_BLACK; \
if (RB_RIGHT(tmp, field)) \
RB_COLOR(RB_RIGHT(tmp, field), field) \
= RB_BLACK; \
RB_ROTATE_LEFT(head, parent, tmp, field);\
elm = RB_ROOT(head); \
break; \
} \
} else { \
tmp = RB_LEFT(parent, field); \
if (RB_COLOR(tmp, field) == RB_RED) { \
RB_SET_BLACKRED(tmp, parent, field); \
RB_ROTATE_RIGHT(head, parent, tmp, field);\
tmp = RB_LEFT(parent, field); \
} \
if ((RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) \
&& (RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) == \
RB_BLACK)) { \
RB_COLOR(tmp, field) = RB_RED; \
elm = parent; \
parent = RB_PARENT(elm, field); \
} else { \
if (RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) \
== RB_BLACK) { \
struct type *oright; \
oright = RB_RIGHT(tmp, field); \
if (oright != NULL) \
RB_COLOR(oright, field) \
= RB_BLACK; \
RB_COLOR(tmp, field) = RB_RED; \
RB_ROTATE_LEFT(head, tmp, oright,\
field); \
tmp = RB_LEFT(parent, field); \
} \
RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
RB_COLOR(parent, field) = RB_BLACK; \
if (RB_LEFT(tmp, field)) \
RB_COLOR(RB_LEFT(tmp, field), field) \
= RB_BLACK; \
RB_ROTATE_RIGHT(head, parent, tmp, field);\
elm = RB_ROOT(head); \
break; \
} \
} \
} \
if (elm) \
RB_COLOR(elm, field) = RB_BLACK; \
}
#define RB_GENERATE_REMOVE(name, type, field, attr) \
attr struct type * \
name##_RB_REMOVE(struct name *head, struct type *elm) \
{ \
struct type *child, *parent, *old = elm; \
int color; \
if (RB_LEFT(elm, field) == NULL) \
child = RB_RIGHT(elm, field); \
else if (RB_RIGHT(elm, field) == NULL) \
child = RB_LEFT(elm, field); \
else { \
struct type *left; \
elm = RB_RIGHT(elm, field); \
while ((left = RB_LEFT(elm, field)) != NULL) \
elm = left; \
child = RB_RIGHT(elm, field); \
parent = RB_PARENT(elm, field); \
color = RB_COLOR(elm, field); \
if (child) \
RB_PARENT(child, field) = parent; \
if (parent) { \
if (RB_LEFT(parent, field) == elm) \
RB_LEFT(parent, field) = child; \
else \
RB_RIGHT(parent, field) = child; \
RB_AUGMENT(parent); \
} else \
RB_ROOT(head) = child; \
if (RB_PARENT(elm, field) == old) \
parent = elm; \
(elm)->field = (old)->field; \
if (RB_PARENT(old, field)) { \
if (RB_LEFT(RB_PARENT(old, field), field) == old)\
RB_LEFT(RB_PARENT(old, field), field) = elm;\
else \
RB_RIGHT(RB_PARENT(old, field), field) = elm;\
RB_AUGMENT(RB_PARENT(old, field)); \
} else \
RB_ROOT(head) = elm; \
RB_PARENT(RB_LEFT(old, field), field) = elm; \
if (RB_RIGHT(old, field)) \
RB_PARENT(RB_RIGHT(old, field), field) = elm; \
if (parent) { \
left = parent; \
do { \
RB_AUGMENT(left); \
} while ((left = RB_PARENT(left, field)) != NULL); \
} \
goto color; \
} \
parent = RB_PARENT(elm, field); \
color = RB_COLOR(elm, field); \
if (child) \
RB_PARENT(child, field) = parent; \
if (parent) { \
if (RB_LEFT(parent, field) == elm) \
RB_LEFT(parent, field) = child; \
else \
RB_RIGHT(parent, field) = child; \
RB_AUGMENT(parent); \
} else \
RB_ROOT(head) = child; \
color: \
if (color == RB_BLACK) \
name##_RB_REMOVE_COLOR(head, parent, child); \
return old; \
} \
#define RB_GENERATE_INSERT(name, type, field, cmp, attr) \
/* Inserts a node into the RB tree */ \
attr struct type * \
name##_RB_INSERT(struct name *head, struct type *elm) \
{ \
struct type *tmp; \
struct type *parent = NULL; \
int comp = 0; \
tmp = RB_ROOT(head); \
while (tmp) { \
parent = tmp; \
comp = (cmp)(elm, parent); \
if (comp < 0) \
tmp = RB_LEFT(tmp, field); \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return tmp; \
} \
RB_SET(elm, parent, field); \
if (parent != NULL) { \
if (comp < 0) \
RB_LEFT(parent, field) = elm; \
else \
RB_RIGHT(parent, field) = elm; \
RB_AUGMENT(parent); \
} else \
RB_ROOT(head) = elm; \
name##_RB_INSERT_COLOR(head, elm); \
return NULL; \
}
#define RB_GENERATE_FIND(name, type, field, cmp, attr) \
/* Finds the node with the same key as elm */ \
attr struct type * \
name##_RB_FIND(struct name *head, struct type *elm) \
{ \
struct type *tmp = RB_ROOT(head); \
int comp; \
while (tmp) { \
comp = cmp(elm, tmp); \
if (comp < 0) \
tmp = RB_LEFT(tmp, field); \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return tmp; \
} \
return NULL; \
}
#define RB_GENERATE_NFIND(name, type, field, cmp, attr) \
/* Finds the first node greater than or equal to the search key */ \
attr struct type * \
name##_RB_NFIND(struct name *head, struct type *elm) \
{ \
struct type *tmp = RB_ROOT(head); \
struct type *res = NULL; \
int comp; \
while (tmp) { \
comp = cmp(elm, tmp); \
if (comp < 0) { \
res = tmp; \
tmp = RB_LEFT(tmp, field); \
} \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return tmp; \
} \
return res; \
}
#define RB_GENERATE_NEXT(name, type, field, attr) \
/* ARGSUSED */ \
attr struct type * \
name##_RB_NEXT(struct type *elm) \
{ \
if (RB_RIGHT(elm, field)) { \
elm = RB_RIGHT(elm, field); \
while (RB_LEFT(elm, field)) \
elm = RB_LEFT(elm, field); \
} else { \
if (RB_PARENT(elm, field) && \
(elm == RB_LEFT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
else { \
while (RB_PARENT(elm, field) && \
(elm == RB_RIGHT(RB_PARENT(elm, field), field)))\
elm = RB_PARENT(elm, field); \
elm = RB_PARENT(elm, field); \
} \
} \
return elm; \
}
#define RB_GENERATE_PREV(name, type, field, attr) \
/* ARGSUSED */ \
attr struct type * \
name##_RB_PREV(struct type *elm) \
{ \
if (RB_LEFT(elm, field)) { \
elm = RB_LEFT(elm, field); \
while (RB_RIGHT(elm, field)) \
elm = RB_RIGHT(elm, field); \
} else { \
if (RB_PARENT(elm, field) && \
(elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
else { \
while (RB_PARENT(elm, field) && \
(elm == RB_LEFT(RB_PARENT(elm, field), field)))\
elm = RB_PARENT(elm, field); \
elm = RB_PARENT(elm, field); \
} \
} \
return elm; \
}
#define RB_GENERATE_MINMAX(name, type, field, attr) \
attr struct type * \
name##_RB_MINMAX(struct name *head, int val) \
{ \
struct type *tmp = RB_ROOT(head); \
struct type *parent = NULL; \
while (tmp) { \
parent = tmp; \
if (val < 0) \
tmp = RB_LEFT(tmp, field); \
else \
tmp = RB_RIGHT(tmp, field); \
} \
return parent; \
}
#define RB_NEGINF -1
#define RB_INF 1
#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y)
#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
#define RB_PREV(name, x, y) name##_RB_PREV(y)
#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
#define RB_FOREACH(x, name, head) \
for ((x) = RB_MIN(name, head); \
(x) != NULL; \
(x) = name##_RB_NEXT(x))
#define RB_FOREACH_FROM(x, name, y) \
for ((x) = (y); \
((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
(x) = (y))
#define RB_FOREACH_SAFE(x, name, head, y) \
for ((x) = RB_MIN(name, head); \
((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
(x) = (y))
#define RB_FOREACH_REVERSE(x, name, head) \
for ((x) = RB_MAX(name, head); \
(x) != NULL; \
(x) = name##_RB_PREV(x))
#define RB_FOREACH_REVERSE_FROM(x, name, y) \
for ((x) = (y); \
((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
(x) = (y))
#define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \
for ((x) = RB_MAX(name, head); \
((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
(x) = (y))
#endif /* _TREE_H_ */

139
devicemodel/include/types.h Normal file
View File

@@ -0,0 +1,139 @@
#ifndef _TYPES_H_
#define _TYPES_H_
#include "macros.h"
#include <stdint.h>
#include <sched.h>
#include <sys/types.h>
#define MAXCOMLEN 19 /* max command name remembered */
#define MAXINTERP PATH_MAX /* max interpreter file name length */
#define MAXLOGNAME 33 /* max login name length (incl. NUL) */
#define SPECNAMELEN 63 /* max length of devicename */
typedef cpu_set_t cpuset_t;
typedef uint64_t vm_paddr_t;
typedef uint64_t vm_ooffset_t;
typedef uint64_t cap_ioctl_t;
#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
#define __aligned(x) __attribute__((aligned(x)))
#define __section(x) __attribute__((__section__(x)))
#define __MAKE_SET(set, sym) \
static void const * const __set_##set##_sym_##sym \
__section("set_" #set) __attribute__((used)) = &sym
#define DATA_SET(set, sym) __MAKE_SET(set, sym)
#define SET_DECLARE(set, ptype)\
extern ptype * __CONCAT(__start_set_, set); \
extern ptype *__CONCAT(__stop_set_, set)
#define SET_BEGIN(set) \
(&__CONCAT(__start_set_, set))
#define SET_LIMIT(set) \
(&__CONCAT(__stop_set_, set))
#define SET_FOREACH(pvar, set) \
for (pvar = SET_BEGIN(set); pvar < SET_LIMIT(set); pvar++)
#define nitems(x) (sizeof((x)) / sizeof((x)[0]))
#define roundup2(x, y) (((x)+((y)-1))&(~((y)-1)))
#define rounddown2(x, y) ((x)&(~((y)-1)))
static inline uint16_t
be16dec(const void *pp)
{
uint8_t const *p = (uint8_t const *)pp;
return ((p[0] << 8) | p[1]);
}
static inline uint32_t
be32dec(const void *pp)
{
uint8_t const *p = (uint8_t const *)pp;
return (((uint32_t)p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]);
}
static inline void
be16enc(void *pp, uint16_t u)
{
uint8_t *p = (uint8_t *)pp;
p[0] = (u >> 8) & 0xff;
p[1] = u & 0xff;
}
static inline void
be32enc(void *pp, uint32_t u)
{
uint8_t *p = (uint8_t *)pp;
p[0] = (u >> 24) & 0xff;
p[1] = (u >> 16) & 0xff;
p[2] = (u >> 8) & 0xff;
p[3] = u & 0xff;
}
static inline int
flsl(uint64_t mask)
{
return mask ? 64 - __builtin_clzl(mask) : 0;
}
/* memory barrier */
#define mb() ({ asm volatile("mfence" ::: "memory"); (void)0; })
static inline void
do_cpuid(u_int ax, u_int *p)
{
__asm __volatile("cpuid"
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax));
}
#define UGETW(w) \
((w)[0] | \
(((uint16_t)((w)[1])) << 8))
#define UGETDW(w) \
((w)[0] | \
(((uint16_t)((w)[1])) << 8) | \
(((uint32_t)((w)[2])) << 16) | \
(((uint32_t)((w)[3])) << 24))
#define UGETQW(w) \
((w)[0] | \
(((uint16_t)((w)[1])) << 8) | \
(((uint32_t)((w)[2])) << 16) | \
(((uint32_t)((w)[3])) << 24) | \
(((uint64_t)((w)[4])) << 32) | \
(((uint64_t)((w)[5])) << 40) | \
(((uint64_t)((w)[6])) << 48) | \
(((uint64_t)((w)[7])) << 56))
#define USETW(w, v) do { \
(w)[0] = (uint8_t)(v); \
(w)[1] = (uint8_t)((v) >> 8); \
} while (0)
#define USETDW(w, v) do { \
(w)[0] = (uint8_t)(v); \
(w)[1] = (uint8_t)((v) >> 8); \
(w)[2] = (uint8_t)((v) >> 16); \
(w)[3] = (uint8_t)((v) >> 24); \
} while (0)
#define USETQW(w, v) do { \
(w)[0] = (uint8_t)(v); \
(w)[1] = (uint8_t)((v) >> 8); \
(w)[2] = (uint8_t)((v) >> 16); \
(w)[3] = (uint8_t)((v) >> 24); \
(w)[4] = (uint8_t)((v) >> 32); \
(w)[5] = (uint8_t)((v) >> 40); \
(w)[6] = (uint8_t)((v) >> 48); \
(w)[7] = (uint8_t)((v) >> 56); \
} while (0)
#endif

View File

@@ -0,0 +1,47 @@
/*-
* Copyright (c) 2013 Neel Natu <neel@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _UART_CORE_H_
#define _UART_CORE_H_
#define UART_IO_BAR_SIZE 8
struct uart_vdev;
typedef void (*uart_intr_func_t)(void *arg);
struct uart_vdev *uart_init(uart_intr_func_t intr_assert,
uart_intr_func_t intr_deassert, void *arg);
void uart_deinit(struct uart_vdev *uart);
int uart_legacy_alloc(int unit, int *ioaddr, int *irq);
void uart_legacy_dealloc(int which);
uint8_t uart_read(struct uart_vdev *uart, int offset);
void uart_write(struct uart_vdev *uart, int offset, uint8_t value);
int uart_set_backend(struct uart_vdev *uart, const char *opt);
#endif

851
devicemodel/include/usb.h Normal file
View File

@@ -0,0 +1,851 @@
/* $FreeBSD$ */
/*-
* Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
* Copyright (c) 1998 The NetBSD Foundation, Inc. All rights reserved.
* Copyright (c) 1998 Lennart Augustsson. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* This file contains standard definitions for the following USB
* protocol versions:
*
* USB v1.0
* USB v1.1
* USB v2.0
* USB v3.0
*/
#ifndef _USB_H_
#define _USB_H_
#include "types.h"
#define USB_STACK_VERSION 2000 /* 2.0 */
/* Definition of some hardcoded USB constants. */
#define USB_MAX_IPACKET 8 /* initial USB packet size */
#define USB_EP_MAX (2*16) /* hardcoded */
#define USB_ROOT_HUB_ADDR 1 /* index */
#define USB_MIN_DEVICES 2 /* unused + root HUB */
#define USB_UNCONFIG_INDEX 0xFF /* internal use only */
#define USB_IFACE_INDEX_ANY 0xFF /* internal use only */
#define USB_START_ADDR 0 /* default USB device BUS address
* after USB bus reset
*/
#define USB_CONTROL_ENDPOINT 0 /* default control endpoint */
#define USB_FRAMES_PER_SECOND_FS 1000 /* full speed */
#define USB_FRAMES_PER_SECOND_HS 8000 /* high speed */
#define USB_FS_BYTES_PER_HS_UFRAME 188 /* bytes */
#define USB_HS_MICRO_FRAMES_MAX 8 /* units */
#define USB_ISOC_TIME_MAX 128 /* ms */
/*
* Minimum time a device needs to be powered down to go through a
* power cycle. These values are not in the USB specification.
*/
#define USB_POWER_DOWN_TIME 200 /* ms */
#define USB_PORT_POWER_DOWN_TIME 100 /* ms */
/* Definition of software USB power modes */
#define USB_POWER_MODE_OFF 0 /* turn off device */
#define USB_POWER_MODE_ON 1 /* always on */
#define USB_POWER_MODE_SAVE 2 /* automatic suspend and resume */
#define USB_POWER_MODE_SUSPEND 3 /* force suspend */
#define USB_POWER_MODE_RESUME 4 /* force resume */
/* These are the values from the USB specification. */
#define USB_PORT_RESET_DELAY_SPEC 10 /* ms */
#define USB_PORT_ROOT_RESET_DELAY_SPEC 50 /* ms */
#define USB_PORT_RESET_RECOVERY_SPEC 10 /* ms */
#define USB_PORT_POWERUP_DELAY_SPEC 100 /* ms */
#define USB_PORT_RESUME_DELAY_SPEC 20 /* ms */
#define USB_SET_ADDRESS_SETTLE_SPEC 2 /* ms */
#define USB_RESUME_DELAY_SPEC (20*5) /* ms */
#define USB_RESUME_WAIT_SPEC 10 /* ms */
#define USB_RESUME_RECOVERY_SPEC 10 /* ms */
#define USB_EXTRA_POWER_UP_TIME_SPEC 0 /* ms */
/* Allow for marginal and non-conforming devices. */
#define USB_PORT_RESET_DELAY 50 /* ms */
#define USB_PORT_ROOT_RESET_DELAY 200 /* ms */
#define USB_PORT_RESET_RECOVERY 250 /* ms */
#define USB_PORT_POWERUP_DELAY 300 /* ms */
#define USB_PORT_RESUME_DELAY (20*2) /* ms */
#define USB_SET_ADDRESS_SETTLE 10 /* ms */
#define USB_RESUME_DELAY (50*5) /* ms */
#define USB_RESUME_WAIT 50 /* ms */
#define USB_RESUME_RECOVERY 50 /* ms */
#define USB_EXTRA_POWER_UP_TIME 20 /* ms */
#define USB_MIN_POWER 100 /* mA */
#define USB_MAX_POWER 500 /* mA */
#define USB_BUS_RESET_DELAY 100 /* ms */
/*
* USB record layout in memory:
*
* - USB config 0
* - USB interfaces
* - USB alternative interfaces
* - USB endpoints
*
* - USB config 1
* - USB interfaces
* - USB alternative interfaces
* - USB endpoints
*/
/* Declaration of USB records */
struct usb_device_request {
uint8_t bmRequestType;
uint8_t bRequest;
uint16_t wValue;
uint16_t wIndex;
uint16_t wLength;
} __attribute__((packed));
typedef struct usb_device_request usb_device_request_t;
#define UT_WRITE 0x00
#define UT_READ 0x80
#define UT_STANDARD 0x00
#define UT_CLASS 0x20
#define UT_VENDOR 0x40
#define UT_DEVICE 0x00
#define UT_INTERFACE 0x01
#define UT_ENDPOINT 0x02
#define UT_OTHER 0x03
#define UT_READ_DEVICE (UT_READ | UT_STANDARD | UT_DEVICE)
#define UT_READ_INTERFACE (UT_READ | UT_STANDARD | UT_INTERFACE)
#define UT_READ_ENDPOINT (UT_READ | UT_STANDARD | UT_ENDPOINT)
#define UT_WRITE_DEVICE (UT_WRITE | UT_STANDARD | UT_DEVICE)
#define UT_WRITE_INTERFACE (UT_WRITE | UT_STANDARD | UT_INTERFACE)
#define UT_WRITE_ENDPOINT (UT_WRITE | UT_STANDARD | UT_ENDPOINT)
#define UT_READ_CLASS_DEVICE (UT_READ | UT_CLASS | UT_DEVICE)
#define UT_READ_CLASS_INTERFACE (UT_READ | UT_CLASS | UT_INTERFACE)
#define UT_READ_CLASS_OTHER (UT_READ | UT_CLASS | UT_OTHER)
#define UT_READ_CLASS_ENDPOINT (UT_READ | UT_CLASS | UT_ENDPOINT)
#define UT_WRITE_CLASS_DEVICE (UT_WRITE | UT_CLASS | UT_DEVICE)
#define UT_WRITE_CLASS_INTERFACE (UT_WRITE | UT_CLASS | UT_INTERFACE)
#define UT_WRITE_CLASS_OTHER (UT_WRITE | UT_CLASS | UT_OTHER)
#define UT_WRITE_CLASS_ENDPOINT (UT_WRITE | UT_CLASS | UT_ENDPOINT)
#define UT_READ_VENDOR_DEVICE (UT_READ | UT_VENDOR | UT_DEVICE)
#define UT_READ_VENDOR_INTERFACE (UT_READ | UT_VENDOR | UT_INTERFACE)
#define UT_READ_VENDOR_OTHER (UT_READ | UT_VENDOR | UT_OTHER)
#define UT_READ_VENDOR_ENDPOINT (UT_READ | UT_VENDOR | UT_ENDPOINT)
#define UT_WRITE_VENDOR_DEVICE (UT_WRITE | UT_VENDOR | UT_DEVICE)
#define UT_WRITE_VENDOR_INTERFACE (UT_WRITE | UT_VENDOR | UT_INTERFACE)
#define UT_WRITE_VENDOR_OTHER (UT_WRITE | UT_VENDOR | UT_OTHER)
#define UT_WRITE_VENDOR_ENDPOINT (UT_WRITE | UT_VENDOR | UT_ENDPOINT)
/* Requests */
#define UR_GET_STATUS 0x00
#define UR_CLEAR_FEATURE 0x01
#define UR_SET_FEATURE 0x03
#define UR_SET_ADDRESS 0x05
#define UR_GET_DESCRIPTOR 0x06
#define UDESC_DEVICE 0x01
#define UDESC_CONFIG 0x02
#define UDESC_STRING 0x03
#define USB_LANGUAGE_TABLE 0x00 /* language ID string index */
#define UDESC_INTERFACE 0x04
#define UDESC_ENDPOINT 0x05
#define UDESC_DEVICE_QUALIFIER 0x06
#define UDESC_OTHER_SPEED_CONFIGURATION 0x07
#define UDESC_INTERFACE_POWER 0x08
#define UDESC_OTG 0x09
#define UDESC_DEBUG 0x0A
#define UDESC_IFACE_ASSOC 0x0B /* interface association */
#define UDESC_BOS 0x0F /* binary object store */
#define UDESC_DEVICE_CAPABILITY 0x10
#define UDESC_CS_DEVICE 0x21 /* class specific */
#define UDESC_CS_CONFIG 0x22
#define UDESC_CS_STRING 0x23
#define UDESC_CS_INTERFACE 0x24
#define UDESC_CS_ENDPOINT 0x25
#define UDESC_HUB 0x29
#define UDESC_SS_HUB 0x2A /* super speed */
#define UDESC_ENDPOINT_SS_COMP 0x30 /* super speed */
#define UR_SET_DESCRIPTOR 0x07
#define UR_GET_CONFIG 0x08
#define UR_SET_CONFIG 0x09
#define UR_GET_INTERFACE 0x0a
#define UR_SET_INTERFACE 0x0b
#define UR_SYNCH_FRAME 0x0c
#define UR_SET_SEL 0x30
#define UR_ISOCH_DELAY 0x31
/* HUB specific request */
#define UR_GET_BUS_STATE 0x02
#define UR_CLEAR_TT_BUFFER 0x08
#define UR_RESET_TT 0x09
#define UR_GET_TT_STATE 0x0a
#define UR_STOP_TT 0x0b
#define UR_SET_AND_TEST 0x0c /* USB 2.0 only */
#define UR_SET_HUB_DEPTH 0x0c /* USB 3.0 only */
#define USB_SS_HUB_DEPTH_MAX 5
#define UR_GET_PORT_ERR_COUNT 0x0d
/* Feature numbers */
#define UF_ENDPOINT_HALT 0
#define UF_DEVICE_REMOTE_WAKEUP 1
#define UF_TEST_MODE 2
#define UF_U1_ENABLE 0x30
#define UF_U2_ENABLE 0x31
#define UF_LTM_ENABLE 0x32
/* HUB specific features */
#define UHF_C_HUB_LOCAL_POWER 0
#define UHF_C_HUB_OVER_CURRENT 1
#define UHF_PORT_CONNECTION 0
#define UHF_PORT_ENABLE 1
#define UHF_PORT_SUSPEND 2
#define UHF_PORT_OVER_CURRENT 3
#define UHF_PORT_RESET 4
#define UHF_PORT_LINK_STATE 5
#define UHF_PORT_POWER 8
#define UHF_PORT_LOW_SPEED 9
#define UHF_PORT_L1 10
#define UHF_C_PORT_CONNECTION 16
#define UHF_C_PORT_ENABLE 17
#define UHF_C_PORT_SUSPEND 18
#define UHF_C_PORT_OVER_CURRENT 19
#define UHF_C_PORT_RESET 20
#define UHF_PORT_TEST 21
#define UHF_PORT_INDICATOR 22
#define UHF_C_PORT_L1 23
/* SuperSpeed HUB specific features */
#define UHF_PORT_U1_TIMEOUT 23
#define UHF_PORT_U2_TIMEOUT 24
#define UHF_C_PORT_LINK_STATE 25
#define UHF_C_PORT_CONFIG_ERROR 26
#define UHF_PORT_REMOTE_WAKE_MASK 27
#define UHF_BH_PORT_RESET 28
#define UHF_C_BH_PORT_RESET 29
#define UHF_FORCE_LINKPM_ACCEPT 30
struct usb_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bDescriptorSubtype;
} __attribute__((packed));
typedef struct usb_descriptor usb_descriptor_t;
struct usb_device_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint16_t bcdUSB;
#define UD_USB_2_0 0x0200
#define UD_USB_3_0 0x0300
#define UD_IS_USB2(d) ((d)->bcdUSB[1] == 0x02)
#define UD_IS_USB3(d) ((d)->bcdUSB[1] == 0x03)
uint8_t bDeviceClass;
uint8_t bDeviceSubClass;
uint8_t bDeviceProtocol;
uint8_t bMaxPacketSize;
/* The fields below are not part of the initial descriptor. */
uint16_t idVendor;
uint16_t idProduct;
uint16_t bcdDevice;
uint8_t iManufacturer;
uint8_t iProduct;
uint8_t iSerialNumber;
uint8_t bNumConfigurations;
} __attribute__((packed));
typedef struct usb_device_descriptor usb_device_descriptor_t;
/* Binary Device Object Store (BOS) */
struct usb_bos_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint16_t wTotalLength;
uint8_t bNumDeviceCaps;
} __attribute__((packed));
typedef struct usb_bos_descriptor usb_bos_descriptor_t;
/* Binary Device Object Store Capability */
struct usb_bos_cap_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bDevCapabilityType;
#define USB_DEVCAP_RESERVED 0x00
#define USB_DEVCAP_WUSB 0x01
#define USB_DEVCAP_USB2EXT 0x02
#define USB_DEVCAP_SUPER_SPEED 0x03
#define USB_DEVCAP_CONTAINER_ID 0x04
/* data ... */
} __attribute__((packed));
typedef struct usb_bos_cap_descriptor usb_bos_cap_descriptor_t;
struct usb_devcap_usb2ext_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bDevCapabilityType;
uint32_t bmAttributes;
#define USB_V2EXT_LPM (1U << 1)
#define USB_V2EXT_BESL_SUPPORTED (1U << 2)
#define USB_V2EXT_BESL_BASELINE_VALID (1U << 3)
#define USB_V2EXT_BESL_DEEP_VALID (1U << 4)
#define USB_V2EXT_BESL_BASELINE_GET(x) (((x) >> 8) & 0xF)
#define USB_V2EXT_BESL_DEEP_GET(x) (((x) >> 12) & 0xF)
} __attribute__((packed));
typedef struct usb_devcap_usb2ext_descriptor usb_devcap_usb2ext_descriptor_t;
struct usb_devcap_ss_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bDevCapabilityType;
uint8_t bmAttributes;
uint16_t wSpeedsSupported;
uint8_t bFunctionalitySupport;
uint8_t bU1DevExitLat;
uint16_t wU2DevExitLat;
} __attribute__((packed));
typedef struct usb_devcap_ss_descriptor usb_devcap_ss_descriptor_t;
struct usb_devcap_container_id_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bDevCapabilityType;
uint8_t bReserved;
uint8_t bContainerID;
} __attribute__((packed));
typedef struct usb_devcap_container_id_descriptor
usb_devcap_container_id_descriptor_t;
/* Device class codes */
#define UDCLASS_IN_INTERFACE 0x00
#define UDCLASS_COMM 0x02
#define UDCLASS_HUB 0x09
#define UDSUBCLASS_HUB 0x00
#define UDPROTO_FSHUB 0x00
#define UDPROTO_HSHUBSTT 0x01
#define UDPROTO_HSHUBMTT 0x02
#define UDPROTO_SSHUB 0x03
#define UDCLASS_DIAGNOSTIC 0xdc
#define UDCLASS_WIRELESS 0xe0
#define UDSUBCLASS_RF 0x01
#define UDPROTO_BLUETOOTH 0x01
#define UDCLASS_VENDOR 0xff
struct usb_config_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint16_t wTotalLength;
uint8_t bNumInterface;
uint8_t bConfigurationValue;
#define USB_UNCONFIG_NO 0
uint8_t iConfiguration;
uint8_t bmAttributes;
#define UC_BUS_POWERED 0x80
#define UC_SELF_POWERED 0x40
#define UC_REMOTE_WAKEUP 0x20
uint8_t bMaxPower; /* max current in 2 mA units */
#define UC_POWER_FACTOR 2
} __attribute__((packed));
typedef struct usb_config_descriptor usb_config_descriptor_t;
struct usb_interface_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bInterfaceNumber;
uint8_t bAlternateSetting;
uint8_t bNumEndpoints;
uint8_t bInterfaceClass;
uint8_t bInterfaceSubClass;
uint8_t bInterfaceProtocol;
uint8_t iInterface;
} __attribute__((packed));
typedef struct usb_interface_descriptor usb_interface_descriptor_t;
struct usb_interface_assoc_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bFirstInterface;
uint8_t bInterfaceCount;
uint8_t bFunctionClass;
uint8_t bFunctionSubClass;
uint8_t bFunctionProtocol;
uint8_t iFunction;
} __attribute__((packed));
typedef struct usb_interface_assoc_descriptor usb_interface_assoc_descriptor_t;
/* Interface class codes */
#define UICLASS_UNSPEC 0x00
#define UICLASS_AUDIO 0x01 /* audio */
#define UISUBCLASS_AUDIOCONTROL 1
#define UISUBCLASS_AUDIOSTREAM 2
#define UISUBCLASS_MIDISTREAM 3
#define UICLASS_CDC 0x02 /* communication */
#define UISUBCLASS_DIRECT_LINE_CONTROL_MODEL 1
#define UISUBCLASS_ABSTRACT_CONTROL_MODEL 2
#define UISUBCLASS_TELEPHONE_CONTROL_MODEL 3
#define UISUBCLASS_MULTICHANNEL_CONTROL_MODEL 4
#define UISUBCLASS_CAPI_CONTROLMODEL 5
#define UISUBCLASS_ETHERNET_NETWORKING_CONTROL_MODEL 6
#define UISUBCLASS_ATM_NETWORKING_CONTROL_MODEL 7
#define UISUBCLASS_WIRELESS_HANDSET_CM 8
#define UISUBCLASS_DEVICE_MGMT 9
#define UISUBCLASS_MOBILE_DIRECT_LINE_MODEL 10
#define UISUBCLASS_OBEX 11
#define UISUBCLASS_ETHERNET_EMULATION_MODEL 12
#define UISUBCLASS_NETWORK_CONTROL_MODEL 13
#define UIPROTO_CDC_NONE 0
#define UIPROTO_CDC_AT 1
#define UICLASS_HID 0x03
#define UISUBCLASS_BOOT 1
#define UIPROTO_BOOT_KEYBOARD 1
#define UIPROTO_MOUSE 2
#define UICLASS_PHYSICAL 0x05
#define UICLASS_IMAGE 0x06
#define UISUBCLASS_SIC 1 /* still image class */
#define UICLASS_PRINTER 0x07
#define UISUBCLASS_PRINTER 1
#define UIPROTO_PRINTER_UNI 1
#define UIPROTO_PRINTER_BI 2
#define UIPROTO_PRINTER_1284 3
#define UICLASS_MASS 0x08
#define UISUBCLASS_RBC 1
#define UISUBCLASS_SFF8020I 2
#define UISUBCLASS_QIC157 3
#define UISUBCLASS_UFI 4
#define UISUBCLASS_SFF8070I 5
#define UISUBCLASS_SCSI 6
#define UIPROTO_MASS_CBI_I 0
#define UIPROTO_MASS_CBI 1
#define UIPROTO_MASS_BBB_OLD 2 /* Not in the spec anymore */
#define UIPROTO_MASS_BBB 80 /* 'P' for the Iomega Zip drive */
#define UICLASS_HUB 0x09
#define UISUBCLASS_HUB 0
#define UIPROTO_FSHUB 0
#define UIPROTO_HSHUBSTT 0 /* Yes, same as previous */
#define UIPROTO_HSHUBMTT 1
#define UICLASS_CDC_DATA 0x0a
#define UISUBCLASS_DATA 0x00
#define UIPROTO_DATA_ISDNBRI 0x30 /* Physical iface */
#define UIPROTO_DATA_HDLC 0x31 /* HDLC */
#define UIPROTO_DATA_TRANSPARENT 0x32 /* Transparent */
#define UIPROTO_DATA_Q921M 0x50 /* Management for Q921 */
#define UIPROTO_DATA_Q921 0x51 /* Data for Q921 */
#define UIPROTO_DATA_Q921TM 0x52 /* TEI multiplexer for Q921 */
#define UIPROTO_DATA_V42BIS 0x90 /* Data compression */
#define UIPROTO_DATA_Q931 0x91 /* Euro-ISDN */
#define UIPROTO_DATA_V120 0x92 /* V.24 rate adaption */
#define UIPROTO_DATA_CAPI 0x93 /* CAPI 2.0 commands */
#define UIPROTO_DATA_HOST_BASED 0xfd /* Host based driver */
#define UIPROTO_DATA_PUF 0xfe /* see Prot. Unit Func. Desc. */
#define UIPROTO_DATA_VENDOR 0xff /* Vendor specific */
#define UIPROTO_DATA_NCM 0x01 /* Network Control Model */
#define UICLASS_SMARTCARD 0x0b
#define UICLASS_FIRM_UPD 0x0c
#define UICLASS_SECURITY 0x0d
#define UICLASS_DIAGNOSTIC 0xdc
#define UICLASS_WIRELESS 0xe0
#define UISUBCLASS_RF 0x01
#define UIPROTO_BLUETOOTH 0x01
#define UIPROTO_RNDIS 0x03
#define UICLASS_IAD 0xEF /* Interface Association Descriptor */
#define UISUBCLASS_SYNC 0x01
#define UIPROTO_ACTIVESYNC 0x01
#define UICLASS_APPL_SPEC 0xfe
#define UISUBCLASS_FIRMWARE_DOWNLOAD 1
#define UISUBCLASS_IRDA 2
#define UIPROTO_IRDA 0
#define UICLASS_VENDOR 0xff
#define UISUBCLASS_XBOX360_CONTROLLER 0x5d
#define UIPROTO_XBOX360_GAMEPAD 0x01
struct usb_endpoint_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bEndpointAddress;
#define UE_GET_DIR(a) ((a) & 0x80)
#define UE_SET_DIR(a, d) ((a) | (((d)&1) << 7))
#define UE_DIR_IN 0x80 /* IN-token endpoint, fixed */
#define UE_DIR_OUT 0x00 /* OUT-token endpoint, fixed */
#define UE_DIR_RX 0xfd /* for internal use only! */
#define UE_DIR_TX 0xfe /* for internal use only! */
#define UE_DIR_ANY 0xff /* for internal use only! */
#define UE_ADDR 0x0f
#define UE_ADDR_ANY 0xff /* for internal use only! */
#define UE_GET_ADDR(a) ((a) & UE_ADDR)
uint8_t bmAttributes;
#define UE_XFERTYPE 0x03
#define UE_CONTROL 0x00
#define UE_ISOCHRONOUS 0x01
#define UE_BULK 0x02
#define UE_INTERRUPT 0x03
#define UE_BULK_INTR 0xfe /* for internal use only! */
#define UE_TYPE_ANY 0xff /* for internal use only! */
#define UE_GET_XFERTYPE(a) ((a) & UE_XFERTYPE)
#define UE_ISO_TYPE 0x0c
#define UE_ISO_ASYNC 0x04
#define UE_ISO_ADAPT 0x08
#define UE_ISO_SYNC 0x0c
#define UE_GET_ISO_TYPE(a) ((a) & UE_ISO_TYPE)
#define UE_ISO_USAGE 0x30
#define UE_ISO_USAGE_DATA 0x00
#define UE_ISO_USAGE_FEEDBACK 0x10
#define UE_ISO_USAGE_IMPLICT_FB 0x20
#define UE_GET_ISO_USAGE(a) ((a) & UE_ISO_USAGE)
uint16_t wMaxPacketSize;
#define UE_ZERO_MPS 0xFFFF /* for internal use only */
uint8_t bInterval;
} __attribute__((packed));
typedef struct usb_endpoint_descriptor usb_endpoint_descriptor_t;
struct usb_endpoint_ss_comp_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bMaxBurst;
uint8_t bmAttributes;
#define UE_GET_BULK_STREAMS(x) ((x) & 0x0F)
#define UE_GET_SS_ISO_MULT(x) ((x) & 0x03)
uint16_t wBytesPerInterval;
} __attribute__((packed));
typedef struct usb_endpoint_ss_comp_descriptor
usb_endpoint_ss_comp_descriptor_t;
struct usb_string_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint16_t bString[126];
uint8_t bUnused;
} __attribute__((packed));
typedef struct usb_string_descriptor usb_string_descriptor_t;
#define USB_MAKE_STRING_DESC(m, name) \
static const struct { \
uint8_t bLength; \
uint8_t bDescriptorType; \
uint8_t bData[sizeof((uint8_t []){m})]; \
} __attribute__((packed)) name = { \
.bLength = sizeof(name), \
.bDescriptorType = UDESC_STRING, \
.bData = { m }, \
}
struct usb_string_lang {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bData[2];
} __attribute__((packed));
typedef struct usb_string_lang usb_string_lang_t;
struct usb_hub_descriptor {
uint8_t bDescLength;
uint8_t bDescriptorType;
uint8_t bNbrPorts;
uint16_t wHubCharacteristics;
#define UHD_PWR 0x0003
#define UHD_PWR_GANGED 0x0000
#define UHD_PWR_INDIVIDUAL 0x0001
#define UHD_PWR_NO_SWITCH 0x0002
#define UHD_COMPOUND 0x0004
#define UHD_OC 0x0018
#define UHD_OC_GLOBAL 0x0000
#define UHD_OC_INDIVIDUAL 0x0008
#define UHD_OC_NONE 0x0010
#define UHD_TT_THINK 0x0060
#define UHD_TT_THINK_8 0x0000
#define UHD_TT_THINK_16 0x0020
#define UHD_TT_THINK_24 0x0040
#define UHD_TT_THINK_32 0x0060
#define UHD_PORT_IND 0x0080
uint8_t bPwrOn2PwrGood; /* delay in 2 ms units */
#define UHD_PWRON_FACTOR 2
uint8_t bHubContrCurrent;
uint8_t DeviceRemovable[32]; /* max 255 ports */
#define UHD_NOT_REMOV(desc, i) \
(((desc)->DeviceRemovable[(i)/8] >> ((i) % 8)) & 1)
uint8_t PortPowerCtrlMask[1]; /* deprecated */
} __attribute__((packed));
typedef struct usb_hub_descriptor usb_hub_descriptor_t;
struct usb_hub_ss_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bNbrPorts;
uint16_t wHubCharacteristics;
uint8_t bPwrOn2PwrGood; /* delay in 2 ms units */
uint8_t bHubContrCurrent;
uint8_t bHubHdrDecLat;
uint16_t wHubDelay;
uint8_t DeviceRemovable[32]; /* max 255 ports */
} __attribute__((packed));
typedef struct usb_hub_ss_descriptor usb_hub_ss_descriptor_t;
/* minimum HUB descriptor (8-ports maximum) */
struct usb_hub_descriptor_min {
uint8_t bDescLength;
uint8_t bDescriptorType;
uint8_t bNbrPorts;
uint16_t wHubCharacteristics;
uint8_t bPwrOn2PwrGood;
uint8_t bHubContrCurrent;
uint8_t DeviceRemovable[1];
uint8_t PortPowerCtrlMask[1];
} __attribute__((packed));
typedef struct usb_hub_descriptor_min usb_hub_descriptor_min_t;
struct usb_device_qualifier {
uint8_t bLength;
uint8_t bDescriptorType;
uint16_t bcdUSB;
uint8_t bDeviceClass;
uint8_t bDeviceSubClass;
uint8_t bDeviceProtocol;
uint8_t bMaxPacketSize0;
uint8_t bNumConfigurations;
uint8_t bReserved;
} __attribute__((packed));
typedef struct usb_device_qualifier usb_device_qualifier_t;
struct usb_otg_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bmAttributes;
#define UOTG_SRP 0x01
#define UOTG_HNP 0x02
} __attribute__((packed));
typedef struct usb_otg_descriptor usb_otg_descriptor_t;
/* OTG feature selectors */
#define UOTG_B_HNP_ENABLE 3
#define UOTG_A_HNP_SUPPORT 4
#define UOTG_A_ALT_HNP_SUPPORT 5
struct usb_status {
uint16_t wStatus;
/* Device status flags */
#define UDS_SELF_POWERED 0x0001
#define UDS_REMOTE_WAKEUP 0x0002
/* Endpoint status flags */
#define UES_HALT 0x0001
} __attribute__((packed));
typedef struct usb_status usb_status_t;
struct usb_hub_status {
uint16_t wHubStatus;
#define UHS_LOCAL_POWER 0x0001
#define UHS_OVER_CURRENT 0x0002
uint16_t wHubChange;
} __attribute__((packed));
typedef struct usb_hub_status usb_hub_status_t;
struct usb_port_status {
uint16_t wPortStatus;
#define UPS_CURRENT_CONNECT_STATUS 0x0001
#define UPS_PORT_ENABLED 0x0002
#define UPS_SUSPEND 0x0004
#define UPS_OVERCURRENT_INDICATOR 0x0008
#define UPS_RESET 0x0010
#define UPS_PORT_L1 0x0020 /* USB 2.0 only */
/* The link-state bits are valid for Super-Speed USB HUBs */
#define UPS_PORT_LINK_STATE_GET(x) (((x) >> 5) & 0xF)
#define UPS_PORT_LINK_STATE_SET(x) (((x) & 0xF) << 5)
#define UPS_PORT_LS_U0 0x00
#define UPS_PORT_LS_U1 0x01
#define UPS_PORT_LS_U2 0x02
#define UPS_PORT_LS_U3 0x03
#define UPS_PORT_LS_SS_DIS 0x04
#define UPS_PORT_LS_RX_DET 0x05
#define UPS_PORT_LS_SS_INA 0x06
#define UPS_PORT_LS_POLL 0x07
#define UPS_PORT_LS_RECOVER 0x08
#define UPS_PORT_LS_HOT_RST 0x09
#define UPS_PORT_LS_COMP_MODE 0x0A
#define UPS_PORT_LS_LOOPBACK 0x0B
#define UPS_PORT_LS_RESUME 0x0F
#define UPS_PORT_POWER 0x0100
#define UPS_PORT_POWER_SS 0x0200 /* super-speed only */
#define UPS_LOW_SPEED 0x0200
#define UPS_HIGH_SPEED 0x0400
#define UPS_OTHER_SPEED 0x0600 /* currently FreeBSD specific */
#define UPS_PORT_TEST 0x0800
#define UPS_PORT_INDICATOR 0x1000
#define UPS_PORT_MODE_DEVICE 0x8000 /* currently FreeBSD specific */
uint16_t wPortChange;
#define UPS_C_CONNECT_STATUS 0x0001
#define UPS_C_PORT_ENABLED 0x0002
#define UPS_C_SUSPEND 0x0004
#define UPS_C_OVERCURRENT_INDICATOR 0x0008
#define UPS_C_PORT_RESET 0x0010
#define UPS_C_PORT_L1 0x0020 /* USB 2.0 only */
#define UPS_C_BH_PORT_RESET 0x0020 /* USB 3.0 only */
#define UPS_C_PORT_LINK_STATE 0x0040
#define UPS_C_PORT_CONFIG_ERROR 0x0080
} __attribute__((packed));
typedef struct usb_port_status usb_port_status_t;
/*
* The "USB_SPEED" macros defines all the supported USB speeds.
*/
enum usb_dev_speed {
USB_SPEED_VARIABLE,
USB_SPEED_LOW,
USB_SPEED_FULL,
USB_SPEED_HIGH,
USB_SPEED_SUPER,
};
#define USB_SPEED_MAX (USB_SPEED_SUPER+1)
/*
* The "USB_REV" macros defines all the supported USB revisions.
*/
enum usb_revision {
USB_REV_UNKNOWN,
USB_REV_PRE_1_0,
USB_REV_1_0,
USB_REV_1_1,
USB_REV_2_0,
USB_REV_2_5,
USB_REV_3_0
};
#define USB_REV_MAX (USB_REV_3_0+1)
/*
* Supported host controller modes.
*/
enum usb_hc_mode {
USB_MODE_HOST, /* initiates transfers */
USB_MODE_DEVICE, /* bus transfer target */
USB_MODE_DUAL /* can be host or device */
};
#define USB_MODE_MAX (USB_MODE_DUAL+1)
/*
* The "USB_STATE" enums define all the supported device states.
*/
enum usb_dev_state {
USB_STATE_DETACHED,
USB_STATE_ATTACHED,
USB_STATE_POWERED,
USB_STATE_ADDRESSED,
USB_STATE_CONFIGURED,
};
#define USB_STATE_MAX (USB_STATE_CONFIGURED+1)
/*
* The "USB_EP_MODE" macros define all the currently supported
* endpoint modes.
*/
enum usb_ep_mode {
USB_EP_MODE_DEFAULT,
USB_EP_MODE_STREAMS, /* USB3.0 specific */
USB_EP_MODE_HW_MASS_STORAGE,
USB_EP_MODE_HW_SERIAL,
USB_EP_MODE_HW_ETHERNET_CDC,
USB_EP_MODE_HW_ETHERNET_NCM,
USB_EP_MODE_MAX
};
/* Default USB configuration */
#define USB_HAVE_UGEN 1
#define USB_HAVE_DEVCTL 1
#define USB_HAVE_BUSDMA 1
#define USB_HAVE_COMPAT_LINUX 1
#define USB_HAVE_USER_IO 1
#define USB_HAVE_MBUF 1
#define USB_HAVE_TT_SUPPORT 1
#define USB_HAVE_POWERD 1
#define USB_HAVE_MSCTEST 1
#define USB_HAVE_MSCTEST_DETACH 1
#define USB_HAVE_PF 1
#define USB_HAVE_ROOT_MOUNT_HOLD 1
#define USB_HAVE_ID_SECTION 1
#define USB_HAVE_PER_BUS_PROCESS 1
#define USB_HAVE_FIXED_ENDPOINT 0
#define USB_HAVE_FIXED_IFACE 0
#define USB_HAVE_FIXED_CONFIG 0
#define USB_HAVE_FIXED_PORT 0
#define USB_HAVE_DISABLE_ENUM 1
/* define zero ticks callout value */
#define USB_CALLOUT_ZERO_TICKS 1
#if (!defined(USB_HOST_ALIGN)) || (USB_HOST_ALIGN <= 0)
/* Use default value. */
#undef USB_HOST_ALIGN
#if defined(__arm__) || defined(__mips__) || defined(__powerpc__)
/* Arm and MIPS need at least this much, if not more */
#define USB_HOST_ALIGN 32
#else
#define USB_HOST_ALIGN 8 /* bytes, must be power of two */
#endif
#endif
/* Sanity check for USB_HOST_ALIGN: Verify power of two. */
#if ((-USB_HOST_ALIGN) & USB_HOST_ALIGN) != USB_HOST_ALIGN
#error "USB_HOST_ALIGN is not power of two."
#endif
#define USB_FS_ISOC_UFRAME_MAX 4 /* exclusive unit */
#define USB_BUS_MAX 256 /* units */
#define USB_MAX_DEVICES 128 /* units */
#define USB_CONFIG_MAX 65535 /* bytes */
#define USB_IFACE_MAX 32 /* units */
#define USB_FIFO_MAX 128 /* units */
#define USB_MAX_EP_STREAMS 8 /* units */
#define USB_MAX_EP_UNITS 32 /* units */
#define USB_MAX_PORTS 255 /* units */
#define USB_MAX_FS_ISOC_FRAMES_PER_XFER (120) /* units */
#define USB_MAX_HS_ISOC_FRAMES_PER_XFER (8*120) /* units */
#define USB_HUB_MAX_DEPTH 5
#define USB_EP0_BUFSIZE 1024 /* bytes */
#define USB_CS_RESET_LIMIT 20 /* failures = 20 * 50 ms = 1sec */
#define USB_MAX_AUTO_QUIRK 8 /* maximum number of dynamic quirks */
#define USB_IN_POLLING_MODE_FUNC() usbd_in_polling_mode()
#define USB_IN_POLLING_MODE_VALUE() (SCHEDULER_STOPPED() || kdb_active)
typedef uint32_t usb_timeout_t; /* milliseconds */
typedef uint32_t usb_frlength_t; /* bytes */
typedef uint32_t usb_frcount_t; /* units */
typedef uint32_t usb_size_t; /* bytes */
typedef uint32_t usb_ticks_t; /* system defined */
typedef uint16_t usb_power_mask_t; /* see "USB_HW_POWER_XXX" */
typedef uint16_t usb_stream_t; /* stream ID */
#endif /* _USB__H_ */

View File

@@ -0,0 +1,152 @@
/*-
* Copyright (c) 2014 Leon Dang <ldang@nahannisys.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _USB_CORE_H_
#define _USB_CORE_H_
#include <stdlib.h>
#include <pthread.h>
#include "types.h"
#define USB_MAX_XFER_BLOCKS 8
#define USB_XFER_OUT 0
#define USB_XFER_IN 1
struct usb_hci;
struct usb_device_request;
struct usb_data_xfer;
/* Device emulation handlers */
struct usb_devemu {
char *ue_emu; /* name of device emulation */
int ue_usbver; /* usb version: 2 or 3 */
int ue_usbspeed; /* usb device speed */
/* instance creation */
void *(*ue_init)(struct usb_hci *hci, char *opt);
/* handlers */
int (*ue_request)(void *sc, struct usb_data_xfer *xfer);
int (*ue_data)(void *sc, struct usb_data_xfer *xfer, int dir,
int epctx);
int (*ue_reset)(void *sc);
int (*ue_remove)(void *sc);
int (*ue_stop)(void *sc);
};
#define USB_EMUL_SET(x) DATA_SET(usb_emu_set, x)
/*
* USB device events to notify HCI when state changes
*/
enum hci_usbev {
USBDEV_ATTACH,
USBDEV_RESET,
USBDEV_STOP,
USBDEV_REMOVE,
};
/* usb controller, ie xhci, ehci */
struct usb_hci {
int (*hci_intr)(struct usb_hci *hci, int epctx);
int (*hci_event)(struct usb_hci *hci, enum hci_usbev evid,
void *param);
void *dev; /* private device for hci */
/* controller managed fields */
int hci_address;
int hci_port;
};
/*
* Each xfer block is mapped to the hci transfer block.
* On input into the device handler, blen is set to the lenght of buf.
* The device handler is to update blen to reflect on the residual size
* of the buffer, i.e. len(buf) - len(consumed).
*/
struct usb_data_xfer_block {
void *buf; /* IN or OUT pointer */
int blen; /* in:len(buf), out:len(remaining) */
int bdone; /* bytes transferred */
uint32_t processed; /* device processed this + errcode */
void *hci_data; /* HCI private reference */
int ccs;
uint32_t streamid;
uint64_t trbnext; /* next TRB guest address */
};
struct usb_data_xfer {
struct usb_data_xfer_block data[USB_MAX_XFER_BLOCKS];
struct usb_device_request *ureq; /* setup ctl request */
int ndata; /* # of data items */
int head;
int tail;
pthread_mutex_t mtx;
};
enum USB_ERRCODE {
USB_ACK,
USB_NAK,
USB_STALL,
USB_NYET,
USB_ERR,
USB_SHORT
};
#define USB_DATA_GET_ERRCODE(x) ((x)->processed >> 8)
#define USB_DATA_SET_ERRCODE(x, e) \
((x)->processed = ((x)->processed & 0xFF) | (e << 8))
#define USB_DATA_OK(x, i) ((x)->data[(i)].buf != NULL)
#define USB_DATA_XFER_INIT(x) do { \
memset((x), 0, sizeof(*(x))); \
pthread_mutex_init(&((x)->mtx), NULL); \
} while (0)
#define USB_DATA_XFER_RESET(x) do { \
memset((x)->data, 0, sizeof((x)->data)); \
(x)->ndata = 0; \
(x)->head = (x)->tail = 0; \
} while (0)
#define USB_DATA_XFER_LOCK(x) \
pthread_mutex_lock(&((x)->mtx))
#define USB_DATA_XFER_UNLOCK(x) \
pthread_mutex_unlock(&((x)->mtx))
struct usb_devemu *usb_emu_finddev(char *name);
struct usb_data_xfer_block *usb_data_xfer_append(struct usb_data_xfer *xfer,
void *buf,
int blen,
void *hci_data,
int ccs);
#endif /* _USB_CORE_H_ */

View File

@@ -0,0 +1,89 @@
/*-
* Copyright (c) 2009 Andrew Thompson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _USB_USBDI_H_
#define _USB_USBDI_H_
struct usb_fifo;
struct usb_xfer;
struct usb_device;
struct usb_attach_arg;
struct usb_interface;
struct usb_endpoint;
struct usb_page_cache;
struct usb_page_search;
struct usb_process;
struct usb_proc_msg;
struct usb_mbuf;
struct usb_fs_privdata;
struct mbuf;
typedef enum { /* keep in sync with usb_errstr_table */
USB_ERR_NORMAL_COMPLETION = 0,
USB_ERR_PENDING_REQUESTS, /* 1 */
USB_ERR_NOT_STARTED, /* 2 */
USB_ERR_INVAL, /* 3 */
USB_ERR_NOMEM, /* 4 */
USB_ERR_CANCELLED, /* 5 */
USB_ERR_BAD_ADDRESS, /* 6 */
USB_ERR_BAD_BUFSIZE, /* 7 */
USB_ERR_BAD_FLAG, /* 8 */
USB_ERR_NO_CALLBACK, /* 9 */
USB_ERR_IN_USE, /* 10 */
USB_ERR_NO_ADDR, /* 11 */
USB_ERR_NO_PIPE, /* 12 */
USB_ERR_ZERO_NFRAMES, /* 13 */
USB_ERR_ZERO_MAXP, /* 14 */
USB_ERR_SET_ADDR_FAILED, /* 15 */
USB_ERR_NO_POWER, /* 16 */
USB_ERR_TOO_DEEP, /* 17 */
USB_ERR_IOERROR, /* 18 */
USB_ERR_NOT_CONFIGURED, /* 19 */
USB_ERR_TIMEOUT, /* 20 */
USB_ERR_SHORT_XFER, /* 21 */
USB_ERR_STALLED, /* 22 */
USB_ERR_INTERRUPTED, /* 23 */
USB_ERR_DMA_LOAD_FAILED, /* 24 */
USB_ERR_BAD_CONTEXT, /* 25 */
USB_ERR_NO_ROOT_HUB, /* 26 */
USB_ERR_NO_INTR_THREAD, /* 27 */
USB_ERR_NOT_LOCKED, /* 28 */
USB_ERR_MAX
} usb_error_t;
/*
* Flags for transfers
*/
#define USB_FORCE_SHORT_XFER 0x0001 /* force a short transmit last */
#define USB_SHORT_XFER_OK 0x0004 /* allow short reads */
#define USB_DELAY_STATUS_STAGE 0x0010 /* insert delay before STATUS stage */
#define USB_USER_DATA_PTR 0x0020 /* internal flag */
#define USB_MULTI_SHORT_OK 0x0040 /* allow multiple short frames */
#define USB_MANUAL_STATUS 0x0080 /* manual ctrl status */
#define USB_NO_TIMEOUT 0
#define USB_DEFAULT_TIMEOUT 5000 /* 5000 ms = 5 seconds */
#endif /* _USB_USBDI_H_ */

View File

@@ -0,0 +1,71 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/* Shared data structure between VBS-U and VBS-K */
#ifndef _VBS_COMMON_IF_H_
#define _VBS_COMMON_IF_H_
#include "types.h"
#define VBS_MAX_VQ_CNT 10
#define VBS_NAME_LEN 32
struct vbs_vq_info {
uint16_t qsize; /* size of this queue (a power of 2) */
uint32_t pfn; /* PFN of virt queue (not shifted!) */
uint16_t msix_idx; /* MSI-X index, or VIRTIO_MSI_NO_VECTOR */
uint64_t msix_addr;
uint32_t msix_data;
};
struct vbs_vqs_info {
uint32_t nvq; /* number of virtqueues */
struct vbs_vq_info vqs[VBS_MAX_VQ_CNT];
/* array of struct vbs_vq_info */
};
struct vbs_dev_info {
char name[VBS_NAME_LEN];/* VBS name */
int vmid; /* VMID this device belongs to */
int nvq; /* virtqueue # */
uint32_t negotiated_features;
/* features after VIRTIO_CONFIG_S_DRIVER_OK */
uint64_t pio_range_start;
uint64_t pio_range_len; /* PIO bar address initialized by guest OS */
};
/* reuse vhost ioctl index */
#define VBS_K_IOCTL 0xAF
#define VBS_K_SET_DEV _IOW(VBS_K_IOCTL, 0x00, struct vbs_dev_info)
#define VBS_K_SET_VQ _IOW(VBS_K_IOCTL, 0x01, struct vbs_vqs_info)
#endif /* _VBS_COMMON_IF_H_ */

View File

@@ -0,0 +1,685 @@
/*-
* Copyright (c) 2013 Chris Torek <torek @ torek net>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/**
* @file virtio.h
*
* @brief Virtio Backend Service (VBS) APIs for ACRN Project
*/
#ifndef _VIRTIO_H_
#define _VIRTIO_H_
/*
* These are derived from several virtio specifications.
*
* Some useful links:
* https://github.com/rustyrussell/virtio-spec
* http://people.redhat.com/pbonzini/virtio-spec.pdf
*/
/*
* A virtual device has zero or more "virtual queues" (virtqueue).
* Each virtqueue uses at least two 4096-byte pages, laid out thus:
*
* +-----------------------------------------------+
* | "desc": <N> descriptors, 16 bytes each |
* | ----------------------------------------- |
* | "avail": 2 uint16; <N> uint16; 1 uint16 |
* | ----------------------------------------- |
* | pad to 4k boundary |
* +-----------------------------------------------+
* | "used": 2 x uint16; <N> elems; 1 uint16 |
* | ----------------------------------------- |
* | pad to 4k boundary |
* +-----------------------------------------------+
*
* The number <N> that appears here is always a power of two and is
* limited to no more than 32768 (as it must fit in a 16-bit field).
* If <N> is sufficiently large, the above will occupy more than
* two pages. In any case, all pages must be physically contiguous
* within the guest's physical address space.
*
* The <N> 16-byte "desc" descriptors consist of a 64-bit guest
* physical address <addr>, a 32-bit length <len>, a 16-bit
* <flags>, and a 16-bit <next> field (all in guest byte order).
*
* There are three flags that may be set :
* NEXT descriptor is chained, so use its "next" field
* WRITE descriptor is for host to write into guest RAM
* (else host is to read from guest RAM)
* INDIRECT descriptor address field is (guest physical)
* address of a linear array of descriptors
*
* Unless INDIRECT is set, <len> is the number of bytes that may
* be read/written from guest physical address <addr>. If
* INDIRECT is set, WRITE is ignored and <len> provides the length
* of the indirect descriptors (and <len> must be a multiple of
* 16). Note that NEXT may still be set in the main descriptor
* pointing to the indirect, and should be set in each indirect
* descriptor that uses the next descriptor (these should generally
* be numbered sequentially). However, INDIRECT must not be set
* in the indirect descriptors. Upon reaching an indirect descriptor
* without a NEXT bit, control returns to the direct descriptors.
*
* Except inside an indirect, each <next> value must be in the
* range [0 .. N) (i.e., the half-open interval). (Inside an
* indirect, each <next> must be in the range [0 .. <len>/16).)
*
* The "avail" data structures reside in the same pages as the
* "desc" structures since both together are used by the device to
* pass information to the hypervisor's virtual driver. These
* begin with a 16-bit <flags> field and 16-bit index <idx>, then
* have <N> 16-bit <ring> values, followed by one final 16-bit
* field <used_event>. The <N> <ring> entries are simply indices
* indices into the descriptor ring (and thus must meet the same
* constraints as each <next> value). However, <idx> is counted
* up from 0 (initially) and simply wraps around after 65535; it
* is taken mod <N> to find the next available entry.
*
* The "used" ring occupies a separate page or pages, and contains
* values written from the virtual driver back to the guest OS.
* This begins with a 16-bit <flags> and 16-bit <idx>, then there
* are <N> "vring_used" elements, followed by a 16-bit <avail_event>.
* The <N> "vring_used" elements consist of a 32-bit <id> and a
* 32-bit <len> (tlen below). The <id> is simply the index of
* the head of a descriptor chain the guest made available
* earlier, and the <len> is the number of bytes actually written,
* e.g., in the case of a network driver that provided a large
* receive buffer but received only a small amount of data.
*
* The two event fields, <used_event> and <avail_event>, in the
* avail and used rings (respectively -- note the reversal!), are
* always provided, but are used only if the virtual device
* negotiates the VIRTIO_RING_F_EVENT_IDX feature during feature
* negotiation. Similarly, both rings provide a flag --
* VRING_AVAIL_F_NO_INTERRUPT and VRING_USED_F_NO_NOTIFY -- in
* their <flags> field, indicating that the guest does not need an
* interrupt, or that the hypervisor driver does not need a
* notify, when descriptors are added to the corresponding ring.
* (These are provided only for interrupt optimization and need
* not be implemented.)
*/
#include "types.h"
/**
* @brief virtio API
*
* @defgroup acrn_virtio virtio API
* @{
*/
#define VRING_ALIGN 4096
#define VRING_DESC_F_NEXT (1 << 0)
#define VRING_DESC_F_WRITE (1 << 1)
#define VRING_DESC_F_INDIRECT (1 << 2)
struct virtio_desc { /* AKA vring_desc */
uint64_t addr; /* guest physical address */
uint32_t len; /* length of scatter/gather seg */
uint16_t flags; /* VRING_F_DESC_* */
uint16_t next; /* next desc if F_NEXT */
} __attribute__((packed));
struct virtio_used { /* AKA vring_used_elem */
uint32_t idx; /* head of used descriptor chain */
uint32_t tlen; /* length written-to */
} __attribute__((packed));
#define VRING_AVAIL_F_NO_INTERRUPT 1
struct vring_avail {
uint16_t flags; /* VRING_AVAIL_F_* */
uint16_t idx; /* counts to 65535, then cycles */
uint16_t ring[]; /* size N, reported in QNUM value */
/* uint16_t used_event; -- after N ring entries */
} __attribute__((packed));
#define VRING_USED_F_NO_NOTIFY 1
struct vring_used {
uint16_t flags; /* VRING_USED_F_* */
uint16_t idx; /* counts to 65535, then cycles */
struct virtio_used ring[];
/* size N */
/* uint16_t avail_event; -- after N ring entries */
} __attribute__((packed));
/*
* The address of any given virtual queue is determined by a single
* Page Frame Number register. The guest writes the PFN into the
* PCI config space. However, a device that has two or more
* virtqueues can have a different PFN, and size, for each queue.
* The number of queues is determinable via the PCI config space
* VTCFG_R_QSEL register. Writes to QSEL select the queue: 0 means
* queue #0, 1 means queue#1, etc. Once a queue is selected, the
* remaining PFN and QNUM registers refer to that queue.
*
* QNUM is a read-only register containing a nonzero power of two
* that indicates the (hypervisor's) queue size. Or, if reading it
* produces zero, the hypervisor does not have a corresponding
* queue. (The number of possible queues depends on the virtual
* device. The block device has just one; the network device
* provides either two -- 0 = receive, 1 = transmit -- or three,
* with 2 = control.)
*
* PFN is a read/write register giving the physical page address of
* the virtqueue in guest memory (the guest must allocate enough space
* based on the hypervisor's provided QNUM).
*
* QNOTIFY is effectively write-only: when the guest writes a queue
* number to the register, the hypervisor should scan the specified
* virtqueue. (Reading QNOTIFY currently always gets 0).
*/
/*
* PFN register shift amount
*/
#define VRING_PAGE_BITS 12
/*
* Virtio device types
*/
#define VIRTIO_TYPE_NET 1
#define VIRTIO_TYPE_BLOCK 2
#define VIRTIO_TYPE_CONSOLE 3
#define VIRTIO_TYPE_ENTROPY 4
#define VIRTIO_TYPE_BALLOON 5
#define VIRTIO_TYPE_IOMEMORY 6
#define VIRTIO_TYPE_RPMSG 7
#define VIRTIO_TYPE_SCSI 8
#define VIRTIO_TYPE_9P 9
/*
* ACRN virtio device types
* Experimental IDs start at 0xFFFF and work down
*/
#define VIRTIO_TYPE_RPMB 0xFFFF
#define VIRTIO_TYPE_HECI 0xFFFE
#define VIRTIO_TYPE_AUDIO 0xFFFD
#define VIRTIO_TYPE_IPU 0xFFFC
#define VIRTIO_TYPE_TSN 0xFFFB
#define VIRTIO_TYPE_HYPERDMABUF 0xFFFA
#define VIRTIO_TYPE_HDCP 0xFFF9
#define VIRTIO_TYPE_COREU 0xFFF8
/*
* PCI vendor/device IDs
*/
#define INTEL_VENDOR_ID 0x8086
#define VIRTIO_VENDOR 0x1AF4
#define VIRTIO_DEV_NET 0x1000
#define VIRTIO_DEV_BLOCK 0x1001
#define VIRTIO_DEV_CONSOLE 0x1003
#define VIRTIO_DEV_RANDOM 0x1005
/*
* ACRN virtio device IDs
*/
#define VIRTIO_DEV_RPMB 0x8601
#define VIRTIO_DEV_HECI 0x8602
#define VIRTIO_DEV_AUDIO 0x8603
#define VIRTIO_DEV_IPU 0x8604
#define VIRTIO_DEV_TSN 0x8605
#define VIRTIO_DEV_HYPERDMABUF 0x8606
#define VIRTIO_DEV_HDCP 0x8607
#define VIRTIO_DEV_COREU 0x8608
/*
* PCI config space constants.
*
* If MSI-X is enabled, the ISR register is generally not used,
* and the configuration vector and queue vector appear at offsets
* 20 and 22 with the remaining configuration registers at 24.
* If MSI-X is not enabled, those two registers disappear and
* the remaining configuration registers start at offset 20.
*/
#define VIRTIO_CR_HOSTCAP 0
#define VIRTIO_CR_GUESTCAP 4
#define VIRTIO_CR_PFN 8
#define VIRTIO_CR_QNUM 12
#define VIRTIO_CR_QSEL 14
#define VIRTIO_CR_QNOTIFY 16
#define VIRTIO_CR_STATUS 18
#define VIRTIO_CR_ISR 19
#define VIRTIO_CR_CFGVEC 20
#define VIRTIO_CR_QVEC 22
#define VIRTIO_CR_CFG0 20 /* No MSI-X */
#define VIRTIO_CR_CFG1 24 /* With MSI-X */
#define VIRTIO_CR_MSIX 20
/*
* Bits in VIRTIO_CR_STATUS. Guests need not actually set any of these,
* but a guest writing 0 to this register means "please reset".
*/
#define VIRTIO_CR_STATUS_ACK 0x01
/* guest OS has acknowledged dev */
#define VIRTIO_CR_STATUS_DRIVER 0x02
/* guest OS driver is loaded */
#define VIRTIO_CR_STATUS_DRIVER_OK 0x04
/* guest OS driver ready */
#define VIRTIO_CR_STATUS_FAILED 0x80
/* guest has given up on this dev */
/*
* Bits in VIRTIO_CR_ISR. These apply only if not using MSI-X.
*
* (We don't [yet?] ever use CONF_CHANGED.)
*/
#define VIRTIO_CR_ISR_QUEUES 0x01
/* re-scan queues */
#define VIRTIO_CR_ISR_CONF_CHANGED 0x80
/* configuration changed */
#define VIRTIO_MSI_NO_VECTOR 0xFFFF
/*
* Feature flags.
* Note: bits 0 through 23 are reserved to each device type.
*/
#define VIRTIO_F_NOTIFY_ON_EMPTY (1 << 24)
#define VIRTIO_RING_F_INDIRECT_DESC (1 << 28)
#define VIRTIO_RING_F_EVENT_IDX (1 << 29)
/* From section 2.3, "Virtqueue Configuration", of the virtio specification */
/**
* @brief Calculate size of a virtual ring.
*
* @param qsz Size of raw data in a certain virtqueue.
*
* @return size of a certain virtqueue, in bytes.
*/
static inline size_t
vring_size(u_int qsz)
{
size_t size;
/* constant 3 below = flags, va_idx, va_used_event */
size = sizeof(struct virtio_desc) * qsz + sizeof(uint16_t) * (3 + qsz);
size = roundup2(size, VRING_ALIGN);
/* constant 3 below = flags, idx, avail_event */
size += sizeof(uint16_t) * 3 + sizeof(struct virtio_used) * qsz;
size = roundup2(size, VRING_ALIGN);
return size;
}
struct vmctx;
struct pci_vdev;
struct virtio_vq_info;
/*
* A virtual device, with some number (possibly 0) of virtual
* queues and some size (possibly 0) of configuration-space
* registers private to the device. The virtio_base should come
* at the front of each "derived class", so that a pointer to the
* virtio_base is also a pointer to the more specific, derived-
* from-virtio driver's virtio_base struct.
*
* Note: inside each hypervisor virtio driver, changes to these
* data structures must be locked against other threads, if any.
* Except for PCI config space register read/write, we assume each
* driver does the required locking, but we need a pointer to the
* lock (if there is one) for PCI config space read/write ops.
*
* When the guest reads or writes the device's config space, the
* generic layer checks for operations on the special registers
* described above. If the offset of the register(s) being read
* or written is past the CFG area (CFG0 or CFG1), the request is
* passed on to the virtual device, after subtracting off the
* generic-layer size. (So, drivers can just use the offset as
* an offset into "struct config", for instance.)
*
* (The virtio layer also makes sure that the read or write is to/
* from a "good" config offset, hence cfgsize, and on BAR #0.
* However, the driver must verify the read or write size and offset
* and that no one is writing a readonly register.)
*
* The BROKED flag ("this thing done gone and broked") is for future
* use.
*/
#define VIRTIO_USE_MSIX 0x01
#define VIRTIO_EVENT_IDX 0x02 /* use the event-index values */
#define VIRTIO_BROKED 0x08 /* ??? */
/**
* @brief Base component to any virtio device
*/
struct virtio_base {
struct virtio_ops *vops; /**< virtio operations */
int flags; /**< VIRTIO_* flags from above */
pthread_mutex_t *mtx; /**< POSIX mutex, if any */
struct pci_vdev *dev; /**< PCI device instance */
uint32_t negotiated_caps; /**< negotiated capabilities */
struct virtio_vq_info *queues; /**< one per nvq */
int curq; /**< current queue */
uint8_t status; /**< value from last status write */
uint8_t isr; /**< ISR flags, if not MSI-X */
uint16_t msix_cfg_idx; /**< MSI-X vector for config event */
};
#define VIRTIO_BASE_LOCK(vb) \
do { \
if (vb->mtx) \
pthread_mutex_lock(vb->mtx); \
} while (0)
#define VIRTIO_BASE_UNLOCK(vb) \
do { \
if (vb->mtx) \
pthread_mutex_unlock(vb->mtx); \
} while (0)
/**
* @brief Virtio specific operation functions for this type of virtio device
*/
struct virtio_ops {
const char *name; /**< name of driver (for diagnostics) */
int nvq; /**< number of virtual queues */
size_t cfgsize; /**< size of dev-specific config regs */
void (*reset)(void *);
/**< called on virtual device reset */
void (*qnotify)(void *, struct virtio_vq_info *);
/**< called on QNOTIFY if no VQ notify */
int (*cfgread)(void *, int, int, uint32_t *);
/**< to read config regs */
int (*cfgwrite)(void *, int, int, uint32_t);
/**< to write config regs */
void (*apply_features)(void *, uint64_t);
/**< to apply negotiated features */
void (*set_status)(void *, uint64_t);
/**< called to set device status */
uint64_t hv_caps; /**< hypervisor-provided capabilities */
};
#define VQ_ALLOC 0x01 /* set once we have a pfn */
#define VQ_BROKED 0x02 /* ??? */
/**
* @brief Virtqueue data structure
*
* Data structure allocated (statically) per virtual queue.
*
* Drivers may change qsize after a reset. When the guest OS
* requests a device reset, the hypervisor first calls
* vb->vo->reset(); then the data structure below is
* reinitialized (for each virtqueue: vb->vo->nvq).
*
* The remaining fields should only be fussed-with by the generic
* code.
*
* Note: the addresses of desc, avail, and vq_used are all
* computable from each other, but it's a lot simpler if we just
* keep a pointer to each one. The event indices are similarly
* (but more easily) computable, and this time we'll compute them:
* they're just XX_ring[N].
*/
struct virtio_vq_info {
uint16_t qsize; /**< size of this queue (a power of 2) */
void (*notify)(void *, struct virtio_vq_info *);
/**< called instead of notify, if not NULL */
struct virtio_base *base;
/**< backpointer to virtio_base */
uint16_t num; /**< the num'th queue in the virtio_base */
uint16_t flags; /**< flags (see above) */
uint16_t last_avail; /**< a recent value of avail->idx */
uint16_t save_used; /**< saved used->idx; see vq_endchains */
uint16_t msix_idx; /**< MSI-X index, or VIRTIO_MSI_NO_VECTOR */
uint32_t pfn; /**< PFN of virt queue (not shifted!) */
volatile struct virtio_desc *desc;
/**< descriptor array */
volatile struct vring_avail *avail;
/**< the "avail" ring */
volatile struct vring_used *used;
/**< the "used" ring */
};
/* as noted above, these are sort of backwards, name-wise */
#define VQ_AVAIL_EVENT_IDX(vq) \
(*(volatile uint16_t *)&(vq)->used->ring[(vq)->qsize])
#define VQ_USED_EVENT_IDX(vq) \
((vq)->avail->ring[(vq)->qsize])
/**
* @brief Is this ring ready for I/O?
*
* @param vq Pointer to struct virtio_vq_info.
*
* @return 0 on not ready and 1 on ready.
*/
static inline int
vq_ring_ready(struct virtio_vq_info *vq)
{
return (vq->flags & VQ_ALLOC);
}
/**
* @brief Are there "available" descriptors?
*
* This does not count how many, just returns 1 if there is any.
*
* @param vq Pointer to struct virtio_vq_info.
*
* @return 0 on no available and 1 on available.
*/
static inline int
vq_has_descs(struct virtio_vq_info *vq)
{
return (vq_ring_ready(vq) && vq->last_avail !=
vq->avail->idx);
}
/**
* @brief Deliver an interrupt to guest on the given virtqueue.
*
* The interrupt could be MSI-X or a generic MSI interrupt.
*
* @param vb Pointer to struct virtio_base.
* @param vq Pointer to struct virtio_vq_info.
*
* @return NULL
*/
static inline void
vq_interrupt(struct virtio_base *vb, struct virtio_vq_info *vq)
{
if (pci_msix_enabled(vb->dev))
pci_generate_msix(vb->dev, vq->msix_idx);
else {
VIRTIO_BASE_LOCK(vb);
vb->isr |= VIRTIO_CR_ISR_QUEUES;
pci_generate_msi(vb->dev, 0);
pci_lintr_assert(vb->dev);
VIRTIO_BASE_UNLOCK(vb);
}
}
struct iovec;
/**
* @brief Link a virtio_base to its constants, the virtio device,
* and the PCI emulation.
*
* @param vb Pointer to struct virtio_base.
* @param vo Pointer to struct virtio_ops.
* @param pci_virtio_dev Pointer to instance of certain virtio device.
* @param dev Pointer to struct pci_vdev which emulates a PCI device.
* @param queues Pointer to struct virtio_vq_info, normally an array.
*
* @return NULL
*/
void virtio_linkup(struct virtio_base *vb, struct virtio_ops *vo,
void *pci_virtio_dev, struct pci_vdev *dev,
struct virtio_vq_info *queues);
/**
* @brief Initialize MSI-X vector capabilities if we're to use MSI-X,
* or MSI capabilities if not.
*
* Wrapper function for virtio_intr_init() for cases we directly use
* BAR 1 for MSI-X capabilities.
*
* @param vb Pointer to struct virtio_base.
* @param use_msix If using MSI-X.
*
* @return 0 on success and non-zero on fail.
*/
int virtio_interrupt_init(struct virtio_base *vb, int use_msix);
/**
* @brief Initialize MSI-X vector capabilities if we're to use MSI-X,
* or MSI capabilities if not.
*
* @param vb Pointer to struct virtio_base.
* @param barnum Which BAR[0..5] to use.
* @param use_msix If using MSI-X.
*
* @return 0 on success and non-zero on fail.
*/
int virtio_intr_init(struct virtio_base *vb, int barnum, int use_msix);
/**
* @brief Reset device (device-wide).
*
* This erases all queues, i.e., all the queues become invalid.
* But we don't wipe out the internal pointers, by just clearing
* the VQ_ALLOC flag.
*
* @param vb Pointer to struct virtio_base.
*
* @return N/A
*/
void virtio_reset_dev(struct virtio_base *vb);
/**
* @brief Set I/O BAR (usually 0) to map PCI config registers.
*
* @param vb Pointer to struct virtio_base.
* @param barnum Which BAR[0..5] to use.
*
* @return N/A
*/
void virtio_set_io_bar(struct virtio_base *vb, int barnum);
/**
* @brief Walk through the chain of descriptors involved in a request
* and put them into a given iov[] array.
*
* @param vq Pointer to struct virtio_vq_info.
* @param pidx Pointer to available ring position.
* @param iov Pointer to iov[] array prepared by caller.
* @param n_iov Size of iov[] array.
* @param flags Pointer to a uint16_t array which will contain flag of
* each descriptor.
*
* @return number of descriptors.
*/
int vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx,
struct iovec *iov, int n_iov, uint16_t *flags);
/**
* @brief Return the currently-first request chain back to the
* available ring.
*
* @param vq Pointer to struct virtio_vq_info.
*
* @return N/A
*/
void vq_retchain(struct virtio_vq_info *vq);
/**
* @brief Return specified request chain to the guest,
* setting its I/O length to the provided value.
*
* @param vq Pointer to struct virtio_vq_info.
* @param idx Pointer to available ring position, returned by vq_getchain().
* @param iolen Number of data bytes to be returned to frontend.
*
* @return N/A
*/
void vq_relchain(struct virtio_vq_info *vq, uint16_t idx, uint32_t iolen);
/**
* @brief Driver has finished processing "available" chains and calling
* vq_relchain on each one.
*
* If driver used all the available chains, used_all_avail need to be set to 1.
*
* @param vq Pointer to struct virtio_vq_info.
* @param used_all_avail Flag indicating if driver used all available chains.
*
* @return N/A
*/
void vq_endchains(struct virtio_vq_info *vq, int used_all_avail);
/**
* @brief Handle PCI configuration space reads.
*
* Handle virtio standard register reads, and dispatch other reads to
* actual virtio device driver.
*
* @param ctx Pointer to struct vmctx representing VM context.
* @param vcpu VCPU ID.
* @param dev Pointer to struct pci_vdev which emulates a PCI device.
* @param baridx Which BAR[0..5] to use.
* @param offset Register offset in bytes within a BAR region.
* @param size Access range in bytes.
*
* @return register value.
*/
uint64_t virtio_pci_read(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int baridx, uint64_t offset, int size);
/**
* @brief Handle PCI configuration space writes.
*
* Handle virtio standard register writes, and dispatch other writes to
* actual virtio device driver.
*
* @param ctx Pointer to struct vmctx representing VM context.
* @param vcpu VCPU ID.
* @param dev Pointer to struct pci_vdev which emulates a PCI device.
* @param baridx Which BAR[0..5] to use.
* @param offset Register offset in bytes within a BAR region.
* @param size Access range in bytes.
* @param value Data value to be written into register.
*
* @return N/A
*/
void virtio_pci_write(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int baridx, uint64_t offset, int size, uint64_t value);
/**
* @}
*/
#endif /* _VIRTIO_H_ */

View File

@@ -0,0 +1,66 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/* This file provides common routines to interface with VBS-K kernel modules */
#ifndef _VIRTIO_KERNEL_H_
#define _VIRTIO_KERNEL_H_
#include "vbs_common_if.h" /* data format between VBS-U & VBS-K */
enum VBS_K_STATUS {
VIRTIO_DEV_INITIAL = 1, /* initial status */
VIRTIO_DEV_PRE_INIT, /* detected thru cmdline option */
VIRTIO_DEV_INIT_FAILED, /* init failed */
VIRTIO_DEV_INIT_SUCCESS, /* init success */
VIRTIO_DEV_START_FAILED, /* start failed */
VIRTIO_DEV_STARTED, /* start success */
};
/* Return codes */
#define VIRTIO_SUCCESS 0
#define VIRTIO_ERROR_REENTER 1
#define VIRTIO_ERROR_FD_OPEN_FAILED 2
#define VIRTIO_ERROR_MEM_ALLOC_FAILED 3
#define VIRTIO_ERROR_START 4
#define VIRTIO_ERROR_GENERAL 5
/* VBS-K common ops */
/* VBS-K init/reset*/
int vbs_kernel_init(int fd);
int vbs_kernel_reset(int fd);
/* VBS-K start/stop */
int vbs_kernel_start(int fd, struct vbs_dev_info *dev,
struct vbs_vqs_info *vqs);
int vbs_kernel_stop(int fd);
#endif

291
devicemodel/include/vmm.h Normal file
View File

@@ -0,0 +1,291 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _VMM_H_
#define _VMM_H_
#include "types.h"
#include "vhm_ioctl_defs.h"
/*
* Entries in the Interrupt Descriptor Table (IDT)
*/
#define IDT_DE 0 /* #DE: Divide Error */
#define IDT_DB 1 /* #DB: Debug */
#define IDT_NMI 2 /* Nonmaskable External Interrupt */
#define IDT_BP 3 /* #BP: Breakpoint */
#define IDT_OF 4 /* #OF: Overflow */
#define IDT_BR 5 /* #BR: Bound Range Exceeded */
#define IDT_UD 6 /* #UD: Undefined/Invalid Opcode */
#define IDT_NM 7 /* #NM: No Math Coprocessor */
#define IDT_DF 8 /* #DF: Double Fault */
#define IDT_FPUGP 9 /* Coprocessor Segment Overrun */
#define IDT_TS 10 /* #TS: Invalid TSS */
#define IDT_NP 11 /* #NP: Segment Not Present */
#define IDT_SS 12 /* #SS: Stack Segment Fault */
#define IDT_GP 13 /* #GP: General Protection Fault */
#define IDT_PF 14 /* #PF: Page Fault */
#define IDT_MF 16 /* #MF: FPU Floating-Point Error */
#define IDT_AC 17 /* #AC: Alignment Check */
#define IDT_MC 18 /* #MC: Machine Check */
#define IDT_XF 19 /* #XF: SIMD Floating-Point Exception */
enum vm_suspend_how {
VM_SUSPEND_NONE,
VM_SUSPEND_RESET,
VM_SUSPEND_POWEROFF,
VM_SUSPEND_HALT,
VM_SUSPEND_TRIPLEFAULT,
VM_SUSPEND_LAST
};
/*
* Identifiers for architecturally defined registers.
*/
enum vm_reg_name {
VM_REG_GUEST_RAX,
VM_REG_GUEST_RBX,
VM_REG_GUEST_RCX,
VM_REG_GUEST_RDX,
VM_REG_GUEST_RSI,
VM_REG_GUEST_RDI,
VM_REG_GUEST_RBP,
VM_REG_GUEST_R8,
VM_REG_GUEST_R9,
VM_REG_GUEST_R10,
VM_REG_GUEST_R11,
VM_REG_GUEST_R12,
VM_REG_GUEST_R13,
VM_REG_GUEST_R14,
VM_REG_GUEST_R15,
VM_REG_GUEST_CR0,
VM_REG_GUEST_CR3,
VM_REG_GUEST_CR4,
VM_REG_GUEST_DR7,
VM_REG_GUEST_RSP,
VM_REG_GUEST_RIP,
VM_REG_GUEST_RFLAGS,
VM_REG_GUEST_ES,
VM_REG_GUEST_CS,
VM_REG_GUEST_SS,
VM_REG_GUEST_DS,
VM_REG_GUEST_FS,
VM_REG_GUEST_GS,
VM_REG_GUEST_LDTR,
VM_REG_GUEST_TR,
VM_REG_GUEST_IDTR,
VM_REG_GUEST_GDTR,
VM_REG_GUEST_EFER,
VM_REG_GUEST_CR2,
VM_REG_GUEST_PDPTE0,
VM_REG_GUEST_PDPTE1,
VM_REG_GUEST_PDPTE2,
VM_REG_GUEST_PDPTE3,
VM_REG_GUEST_INTR_SHADOW,
VM_REG_LAST
};
#define VM_INTINFO_VECTOR(info) ((info) & 0xff)
#define VM_INTINFO_DEL_ERRCODE 0x800
#define VM_INTINFO_RSVD 0x7ffff000
#define VM_INTINFO_VALID 0x80000000
#define VM_INTINFO_TYPE 0x700
#define VM_INTINFO_HWINTR (0 << 8)
#define VM_INTINFO_NMI (2 << 8)
#define VM_INTINFO_HWEXCEPTION (3 << 8)
#define VM_INTINFO_SWINTR (4 << 8)
#define VM_MAXCPU 16 /* maximum virtual cpus */
/*
* Identifiers for optional vmm capabilities
*/
enum vm_cap_type {
VM_CAP_HALT_EXIT,
VM_CAP_MTRAP_EXIT,
VM_CAP_PAUSE_EXIT,
VM_CAP_UNRESTRICTED_GUEST,
VM_CAP_ENABLE_INVPCID,
VM_CAP_MAX
};
enum vm_intr_trigger {
EDGE_TRIGGER,
LEVEL_TRIGGER
};
/*
* The 'access' field has the format specified in Table 21-2 of the Intel
* Architecture Manual vol 3b.
*
* XXX The contents of the 'access' field are architecturally defined except
* bit 16 - Segment Unusable.
*/
struct seg_desc {
uint64_t base;
uint32_t limit;
uint32_t access;
};
#define SEG_DESC_TYPE(access) ((access) & 0x001f)
#define SEG_DESC_DPL(access) (((access) >> 5) & 0x3)
#define SEG_DESC_PRESENT(access) (((access) & 0x0080) ? 1 : 0)
#define SEG_DESC_DEF32(access) (((access) & 0x4000) ? 1 : 0)
#define SEG_DESC_GRANULARITY(access) (((access) & 0x8000) ? 1 : 0)
#define SEG_DESC_UNUSABLE(access) (((access) & 0x10000) ? 1 : 0)
enum vm_cpu_mode {
CPU_MODE_REAL,
CPU_MODE_PROTECTED,
CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
};
enum vm_paging_mode {
PAGING_MODE_FLAT,
PAGING_MODE_32,
PAGING_MODE_PAE,
PAGING_MODE_64,
};
struct vm_guest_paging {
uint64_t cr3;
int cpl;
enum vm_cpu_mode cpu_mode;
enum vm_paging_mode paging_mode;
};
/*
* The data structures 'vie' and 'vie_op' are meant to be opaque to the
* consumers of instruction decoding. The only reason why their contents
* need to be exposed is because they are part of the 'vhm_request' structure.
*/
struct vie_op {
uint8_t op_byte; /* actual opcode byte */
uint8_t op_type; /* type of operation (e.g. MOV) */
uint16_t op_flags;
};
#define VIE_INST_SIZE 15
struct vie {
uint8_t inst[VIE_INST_SIZE]; /* instruction bytes */
uint8_t num_valid; /* size of the instruction */
uint8_t num_processed;
uint8_t addrsize:4, opsize:4; /* address and operand sizes */
uint8_t rex_w:1, /* REX prefix */
rex_r:1,
rex_x:1,
rex_b:1,
rex_present:1,
repz_present:1, /* REP/REPE/REPZ prefix */
repnz_present:1, /* REPNE/REPNZ prefix */
opsize_override:1, /* Operand size override */
addrsize_override:1, /* Address size override */
segment_override:1; /* Segment override */
uint8_t mod:2, /* ModRM byte */
reg:4,
rm:4;
uint8_t ss:2, /* SIB byte */
index:4,
base:4;
uint8_t disp_bytes;
uint8_t imm_bytes;
uint8_t scale;
int base_register; /* VM_REG_GUEST_xyz */
int index_register; /* VM_REG_GUEST_xyz */
int segment_register; /* VM_REG_GUEST_xyz */
int64_t displacement; /* optional addr displacement */
int64_t immediate; /* optional immediate operand */
uint8_t decoded; /* set to 1 if successfully decoded */
struct vie_op op; /* opcode description */
};
enum vm_exitcode {
VM_EXITCODE_INOUT = 0,
VM_EXITCODE_MMIO_EMUL,
VM_EXITCODE_PCI_CFG,
VM_EXITCODE_BOGUS,
VM_EXITCODE_HLT,
VM_EXITCODE_MTRAP,
VM_EXITCODE_PAUSE,
VM_EXITCODE_PAGING,
VM_EXITCODE_DEPRECATED1, /* used to be SPINDOWN_CPU */
VM_EXITCODE_RENDEZVOUS,
VM_EXITCODE_IOAPIC_EOI,
VM_EXITCODE_INOUT_STR,
VM_EXITCODE_MONITOR,
VM_EXITCODE_MWAIT,
VM_EXITCODE_REQIDLE,
VM_EXITCODE_MAX
};
struct vm_inout {
uint16_t bytes:3; /* 1 or 2 or 4 */
uint16_t in:1;
uint16_t string:1;
uint16_t rep:1;
uint16_t port;
uint32_t eax; /* valid for out */
};
struct vm_inout_str {
struct vm_inout inout; /* must be the first element */
struct vm_guest_paging paging;
uint64_t rflags;
uint64_t cr0;
uint64_t index;
uint64_t count; /* rep=1 (%rcx), rep=0 (1) */
int addrsize;
enum vm_reg_name seg_name;
struct seg_desc seg_desc;
};
enum task_switch_reason {
TSR_CALL,
TSR_IRET,
TSR_JMP,
TSR_IDT_GATE, /* task gate in IDT */
};
struct vm_task_switch {
uint16_t tsssel; /* new TSS selector */
int ext; /* task switch due to external event */
uint32_t errcode;
int errcode_valid; /* push 'errcode' on the new stack */
enum task_switch_reason reason;
struct vm_guest_paging paging;
};
#endif /* _VMM_H_ */

View File

@@ -0,0 +1,147 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _VMMAPI_H_
#define _VMMAPI_H_
#include <sys/param.h>
#include "types.h"
#include "vmm.h"
/*
* API version for out-of-tree consumers for making compile time decisions.
*/
#define VMMAPI_VERSION 0103 /* 2 digit major followed by 2 digit minor */
struct iovec;
struct vmctx {
int fd;
int vmid;
int ioreq_client;
uint32_t lowmem_limit;
int memflags;
size_t lowmem;
size_t highmem;
char *mmap_lowmem;
char *mmap_highmem;
char *baseaddr;
char *name;
};
/*
* Different styles of mapping the memory assigned to a VM into the address
* space of the controlling process.
*/
enum vm_mmap_style {
VM_MMAP_NONE, /* no mapping */
VM_MMAP_ALL, /* fully and statically mapped */
VM_MMAP_SPARSE, /* mappings created on-demand */
};
/*
* 'flags' value passed to 'vm_set_memflags()'.
*/
#define VM_MEM_F_INCORE 0x01 /* include guest memory in core file */
#define VM_MEM_F_WIRED 0x02 /* guest memory is wired */
#define VM_MEMMAP_F_WIRED 0x01
#define VM_MEMMAP_F_IOMMU 0x02
#define VM_MEMSEG_NAME(m) ((m)->name[0] != '\0' ? (m)->name : NULL)
struct vm_lapic_msi {
uint64_t msg;
uint64_t addr;
};
struct vm_isa_irq {
int atpic_irq;
int ioapic_irq;
};
/*
* Create a device memory segment identified by 'segid'.
*
* Returns a pointer to the memory segment on success and MAP_FAILED otherwise.
*/
void *vm_create_devmem(struct vmctx *ctx, int segid, const char *name,
size_t len);
int vm_create(const char *name);
int vm_get_device_fd(struct vmctx *ctx);
struct vmctx *vm_open(const char *name);
void vm_close(struct vmctx *ctx);
void vm_pause(struct vmctx *ctx);
int vm_set_shared_io_page(struct vmctx *ctx, uint64_t page_vma);
int vm_create_ioreq_client(struct vmctx *ctx);
int vm_destroy_ioreq_client(struct vmctx *ctx);
int vm_attach_ioreq_client(struct vmctx *ctx);
int vm_notify_request_done(struct vmctx *ctx, int vcpu);
void vm_set_suspend_mode(enum vm_suspend_how how);
int vm_get_suspend_mode(void);
void vm_destroy(struct vmctx *ctx);
int vm_parse_memsize(const char *optarg, size_t *memsize);
int vm_setup_memory(struct vmctx *ctx, size_t len, enum vm_mmap_style s);
void vm_unsetup_memory(struct vmctx *ctx);
void *vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len);
uint32_t vm_get_lowmem_limit(struct vmctx *ctx);
void vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit);
void vm_set_memflags(struct vmctx *ctx, int flags);
int vm_get_memflags(struct vmctx *ctx);
size_t vm_get_lowmem_size(struct vmctx *ctx);
size_t vm_get_highmem_size(struct vmctx *ctx);
int vm_run(struct vmctx *ctx);
int vm_suspend(struct vmctx *ctx, enum vm_suspend_how how);
int vm_apicid2vcpu(struct vmctx *ctx, int apicid);
int vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg);
int vm_ioapic_assert_irq(struct vmctx *ctx, int irq);
int vm_ioapic_deassert_irq(struct vmctx *ctx, int irq);
int vm_ioapic_pincount(struct vmctx *ctx, int *pincount);
int vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq);
int vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq);
int vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq);
int vm_assign_ptdev(struct vmctx *ctx, int bus, int slot, int func);
int vm_unassign_ptdev(struct vmctx *ctx, int bus, int slot, int func);
int vm_map_ptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
int vm_setup_ptdev_msi(struct vmctx *ctx,
struct acrn_vm_pci_msix_remap *msi_remap);
int vm_set_ptdev_msix_info(struct vmctx *ctx, struct ic_ptdev_irq *ptirq);
int vm_reset_ptdev_msix_info(struct vmctx *ctx, uint16_t virt_bdf,
int vector_count);
int vm_set_ptdev_intx_info(struct vmctx *ctx, uint16_t virt_bdf,
uint16_t phys_bdf, int virt_pin, int phys_pin, bool pic_pin);
int vm_reset_ptdev_intx_info(struct vmctx *ctx, int virt_pin, bool pic_pin);
int vm_create_vcpu(struct vmctx *ctx, int vcpu_id);
int acrn_parse_kernel(char *arg);
int acrn_parse_ramdisk(char *arg);
int acrn_parse_bootargs(char *arg);
int acrn_sw_load(struct vmctx *ctx);
#endif /* _VMMAPI_H_ */

352
devicemodel/include/xhci.h Normal file
View File

@@ -0,0 +1,352 @@
/*-
* Copyright (c) 2014 Leon Dang <ldang@nahannisys.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _XHCI_H_
#define _XHCI_H_
#define PCI_USBREV 0x60 /* USB protocol revision */
enum { /* dsc_slotstate */
XHCI_ST_DISABLED,
XHCI_ST_ENABLED,
XHCI_ST_DEFAULT,
XHCI_ST_ADDRESSED,
XHCI_ST_CONFIGURED,
XHCI_ST_MAX
};
enum {
XHCI_ST_SLCTX_DISABLED,
XHCI_ST_SLCTX_DEFAULT,
XHCI_ST_SLCTX_ADDRESSED,
XHCI_ST_SLCTX_CONFIGURED
};
enum {
XHCI_ST_EPCTX_DISABLED,
XHCI_ST_EPCTX_RUNNING,
XHCI_ST_EPCTX_HALTED,
XHCI_ST_EPCTX_STOPPED,
XHCI_ST_EPCTX_ERROR
};
#define XHCI_MAX_DEVICES MIN(USB_MAX_DEVICES, 128)
#define XHCI_MAX_ENDPOINTS 32 /* hardcoded - do not change */
#define XHCI_MAX_SCRATCHPADS 32
#define XHCI_MAX_EVENTS (16 * 13)
#define XHCI_MAX_COMMANDS (16 * 1)
#define XHCI_MAX_RSEG 1
#define XHCI_MAX_TRANSFERS 4
#if USB_MAX_EP_STREAMS == 8
#define XHCI_MAX_STREAMS 8
#define XHCI_MAX_STREAMS_LOG 3
#elif USB_MAX_EP_STREAMS == 1
#define XHCI_MAX_STREAMS 1
#define XHCI_MAX_STREAMS_LOG 0
#else
#error "The USB_MAX_EP_STREAMS value is not supported."
#endif
#define XHCI_DEV_CTX_ADDR_ALIGN 64 /* bytes */
#define XHCI_DEV_CTX_ALIGN 64 /* bytes */
#define XHCI_INPUT_CTX_ALIGN 64 /* bytes */
#define XHCI_SLOT_CTX_ALIGN 32 /* bytes */
#define XHCI_ENDP_CTX_ALIGN 32 /* bytes */
#define XHCI_STREAM_CTX_ALIGN 16 /* bytes */
#define XHCI_TRANS_RING_SEG_ALIGN 16 /* bytes */
#define XHCI_CMD_RING_SEG_ALIGN 64 /* bytes */
#define XHCI_EVENT_RING_SEG_ALIGN 64 /* bytes */
#define XHCI_SCRATCH_BUF_ARRAY_ALIGN 64 /* bytes */
#define XHCI_SCRATCH_BUFFER_ALIGN USB_PAGE_SIZE
#define XHCI_TRB_ALIGN 16 /* bytes */
#define XHCI_TD_ALIGN 64 /* bytes */
#define XHCI_PAGE_SIZE 4096 /* bytes */
struct xhci_slot_ctx {
volatile uint32_t dwSctx0;
#define XHCI_SCTX_0_ROUTE_SET(x) ((x) & 0xFFFFF)
#define XHCI_SCTX_0_ROUTE_GET(x) ((x) & 0xFFFFF)
#define XHCI_SCTX_0_SPEED_SET(x) (((x) & 0xF) << 20)
#define XHCI_SCTX_0_SPEED_GET(x) (((x) >> 20) & 0xF)
#define XHCI_SCTX_0_MTT_SET(x) (((x) & 0x1) << 25)
#define XHCI_SCTX_0_MTT_GET(x) (((x) >> 25) & 0x1)
#define XHCI_SCTX_0_HUB_SET(x) (((x) & 0x1) << 26)
#define XHCI_SCTX_0_HUB_GET(x) (((x) >> 26) & 0x1)
#define XHCI_SCTX_0_CTX_NUM_SET(x) (((x) & 0x1F) << 27)
#define XHCI_SCTX_0_CTX_NUM_GET(x) (((x) >> 27) & 0x1F)
volatile uint32_t dwSctx1;
#define XHCI_SCTX_1_MAX_EL_SET(x) ((x) & 0xFFFF)
#define XHCI_SCTX_1_MAX_EL_GET(x) ((x) & 0xFFFF)
#define XHCI_SCTX_1_RH_PORT_SET(x) (((x) & 0xFF) << 16)
#define XHCI_SCTX_1_RH_PORT_GET(x) (((x) >> 16) & 0xFF)
#define XHCI_SCTX_1_NUM_PORTS_SET(x) (((x) & 0xFF) << 24)
#define XHCI_SCTX_1_NUM_PORTS_GET(x) (((x) >> 24) & 0xFF)
volatile uint32_t dwSctx2;
#define XHCI_SCTX_2_TT_HUB_SID_SET(x) ((x) & 0xFF)
#define XHCI_SCTX_2_TT_HUB_SID_GET(x) ((x) & 0xFF)
#define XHCI_SCTX_2_TT_PORT_NUM_SET(x) (((x) & 0xFF) << 8)
#define XHCI_SCTX_2_TT_PORT_NUM_GET(x) (((x) >> 8) & 0xFF)
#define XHCI_SCTX_2_TT_THINK_TIME_SET(x) (((x) & 0x3) << 16)
#define XHCI_SCTX_2_TT_THINK_TIME_GET(x) (((x) >> 16) & 0x3)
#define XHCI_SCTX_2_IRQ_TARGET_SET(x) (((x) & 0x3FF) << 22)
#define XHCI_SCTX_2_IRQ_TARGET_GET(x) (((x) >> 22) & 0x3FF)
volatile uint32_t dwSctx3;
#define XHCI_SCTX_3_DEV_ADDR_SET(x) ((x) & 0xFF)
#define XHCI_SCTX_3_DEV_ADDR_GET(x) ((x) & 0xFF)
#define XHCI_SCTX_3_SLOT_STATE_SET(x) (((x) & 0x1F) << 27)
#define XHCI_SCTX_3_SLOT_STATE_GET(x) (((x) >> 27) & 0x1F)
volatile uint32_t dwSctx4;
volatile uint32_t dwSctx5;
volatile uint32_t dwSctx6;
volatile uint32_t dwSctx7;
};
struct xhci_endp_ctx {
volatile uint32_t dwEpCtx0;
#define XHCI_EPCTX_0_EPSTATE_SET(x) ((x) & 0x7)
#define XHCI_EPCTX_0_EPSTATE_GET(x) ((x) & 0x7)
#define XHCI_EPCTX_0_MULT_SET(x) (((x) & 0x3) << 8)
#define XHCI_EPCTX_0_MULT_GET(x) (((x) >> 8) & 0x3)
#define XHCI_EPCTX_0_MAXP_STREAMS_SET(x) (((x) & 0x1F) << 10)
#define XHCI_EPCTX_0_MAXP_STREAMS_GET(x) (((x) >> 10) & 0x1F)
#define XHCI_EPCTX_0_LSA_SET(x) (((x) & 0x1) << 15)
#define XHCI_EPCTX_0_LSA_GET(x) (((x) >> 15) & 0x1)
#define XHCI_EPCTX_0_IVAL_SET(x) (((x) & 0xFF) << 16)
#define XHCI_EPCTX_0_IVAL_GET(x) (((x) >> 16) & 0xFF)
volatile uint32_t dwEpCtx1;
#define XHCI_EPCTX_1_CERR_SET(x) (((x) & 0x3) << 1)
#define XHCI_EPCTX_1_CERR_GET(x) (((x) >> 1) & 0x3)
#define XHCI_EPCTX_1_EPTYPE_SET(x) (((x) & 0x7) << 3)
#define XHCI_EPCTX_1_EPTYPE_GET(x) (((x) >> 3) & 0x7)
#define XHCI_EPCTX_1_HID_SET(x) (((x) & 0x1) << 7)
#define XHCI_EPCTX_1_HID_GET(x) (((x) >> 7) & 0x1)
#define XHCI_EPCTX_1_MAXB_SET(x) (((x) & 0xFF) << 8)
#define XHCI_EPCTX_1_MAXB_GET(x) (((x) >> 8) & 0xFF)
#define XHCI_EPCTX_1_MAXP_SIZE_SET(x) (((x) & 0xFFFF) << 16)
#define XHCI_EPCTX_1_MAXP_SIZE_GET(x) (((x) >> 16) & 0xFFFF)
volatile uint64_t qwEpCtx2;
#define XHCI_EPCTX_2_DCS_SET(x) ((x) & 0x1)
#define XHCI_EPCTX_2_DCS_GET(x) ((x) & 0x1)
#define XHCI_EPCTX_2_TR_DQ_PTR_MASK 0xFFFFFFFFFFFFFFF0U
volatile uint32_t dwEpCtx4;
#define XHCI_EPCTX_4_AVG_TRB_LEN_SET(x) ((x) & 0xFFFF)
#define XHCI_EPCTX_4_AVG_TRB_LEN_GET(x) ((x) & 0xFFFF)
#define XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(x) (((x) & 0xFFFF) << 16)
#define XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_GET(x) (((x) >> 16) & 0xFFFF)
volatile uint32_t dwEpCtx5;
volatile uint32_t dwEpCtx6;
volatile uint32_t dwEpCtx7;
};
struct xhci_input_ctx {
#define XHCI_INCTX_NON_CTRL_MASK 0xFFFFFFFCU
volatile uint32_t dwInCtx0;
#define XHCI_INCTX_0_DROP_MASK(n) (1U << (n))
volatile uint32_t dwInCtx1;
#define XHCI_INCTX_1_ADD_MASK(n) (1U << (n))
volatile uint32_t dwInCtx2;
volatile uint32_t dwInCtx3;
volatile uint32_t dwInCtx4;
volatile uint32_t dwInCtx5;
volatile uint32_t dwInCtx6;
volatile uint32_t dwInCtx7;
};
struct xhci_input_dev_ctx {
struct xhci_input_ctx ctx_input;
union {
struct xhci_slot_ctx u_slot;
struct xhci_endp_ctx u_ep[XHCI_MAX_ENDPOINTS];
} ctx_dev_slep;
};
struct xhci_dev_ctx {
union {
struct xhci_slot_ctx u_slot;
struct xhci_endp_ctx u_ep[XHCI_MAX_ENDPOINTS];
} ctx_dev_slep;
} __attribute__((aligned(XHCI_DEV_CTX_ALIGN)));
#define ctx_slot ctx_dev_slep.u_slot
#define ctx_ep ctx_dev_slep.u_ep
struct xhci_stream_ctx {
volatile uint64_t qwSctx0;
#define XHCI_SCTX_0_DCS_GET(x) ((x) & 0x1)
#define XHCI_SCTX_0_DCS_SET(x) ((x) & 0x1)
#define XHCI_SCTX_0_SCT_SET(x) (((x) & 0x7) << 1)
#define XHCI_SCTX_0_SCT_GET(x) (((x) >> 1) & 0x7)
#define XHCI_SCTX_0_SCT_SEC_TR_RING 0x0
#define XHCI_SCTX_0_SCT_PRIM_TR_RING 0x1
#define XHCI_SCTX_0_SCT_PRIM_SSA_8 0x2
#define XHCI_SCTX_0_SCT_PRIM_SSA_16 0x3
#define XHCI_SCTX_0_SCT_PRIM_SSA_32 0x4
#define XHCI_SCTX_0_SCT_PRIM_SSA_64 0x5
#define XHCI_SCTX_0_SCT_PRIM_SSA_128 0x6
#define XHCI_SCTX_0_SCT_PRIM_SSA_256 0x7
#define XHCI_SCTX_0_TR_DQ_PTR_MASK 0xFFFFFFFFFFFFFFF0U
volatile uint32_t dwSctx2;
volatile uint32_t dwSctx3;
};
struct xhci_trb {
volatile uint64_t qwTrb0;
#define XHCI_TRB_0_DIR_IN_MASK (0x80ULL << 0)
#define XHCI_TRB_0_WLENGTH_MASK (0xFFFFULL << 48)
volatile uint32_t dwTrb2;
#define XHCI_TRB_2_ERROR_GET(x) (((x) >> 24) & 0xFF)
#define XHCI_TRB_2_ERROR_SET(x) (((x) & 0xFF) << 24)
#define XHCI_TRB_2_TDSZ_GET(x) (((x) >> 17) & 0x1F)
#define XHCI_TRB_2_TDSZ_SET(x) (((x) & 0x1F) << 17)
#define XHCI_TRB_2_REM_GET(x) ((x) & 0xFFFFFF)
#define XHCI_TRB_2_REM_SET(x) ((x) & 0xFFFFFF)
#define XHCI_TRB_2_BYTES_GET(x) ((x) & 0x1FFFF)
#define XHCI_TRB_2_BYTES_SET(x) ((x) & 0x1FFFF)
#define XHCI_TRB_2_IRQ_GET(x) (((x) >> 22) & 0x3FF)
#define XHCI_TRB_2_IRQ_SET(x) (((x) & 0x3FF) << 22)
#define XHCI_TRB_2_STREAM_GET(x) (((x) >> 16) & 0xFFFF)
#define XHCI_TRB_2_STREAM_SET(x) (((x) & 0xFFFF) << 16)
volatile uint32_t dwTrb3;
#define XHCI_TRB_3_TYPE_GET(x) (((x) >> 10) & 0x3F)
#define XHCI_TRB_3_TYPE_SET(x) (((x) & 0x3F) << 10)
#define XHCI_TRB_3_CYCLE_BIT (1U << 0)
#define XHCI_TRB_3_TC_BIT (1U << 1) /* command ring only */
#define XHCI_TRB_3_ENT_BIT (1U << 1) /* transfer ring only */
#define XHCI_TRB_3_ISP_BIT (1U << 2)
#define XHCI_TRB_3_ED_BIT (1U << 2)
#define XHCI_TRB_3_NSNOOP_BIT (1U << 3)
#define XHCI_TRB_3_CHAIN_BIT (1U << 4)
#define XHCI_TRB_3_IOC_BIT (1U << 5)
#define XHCI_TRB_3_IDT_BIT (1U << 6)
#define XHCI_TRB_3_TBC_GET(x) (((x) >> 7) & 3)
#define XHCI_TRB_3_TBC_SET(x) (((x) & 3) << 7)
#define XHCI_TRB_3_BEI_BIT (1U << 9)
#define XHCI_TRB_3_DCEP_BIT (1U << 9)
#define XHCI_TRB_3_PRSV_BIT (1U << 9)
#define XHCI_TRB_3_BSR_BIT (1U << 9)
#define XHCI_TRB_3_TRT_MASK (3U << 16)
#define XHCI_TRB_3_TRT_NONE (0U << 16)
#define XHCI_TRB_3_TRT_OUT (2U << 16)
#define XHCI_TRB_3_TRT_IN (3U << 16)
#define XHCI_TRB_3_DIR_IN (1U << 16)
#define XHCI_TRB_3_TLBPC_GET(x) (((x) >> 16) & 0xF)
#define XHCI_TRB_3_TLBPC_SET(x) (((x) & 0xF) << 16)
#define XHCI_TRB_3_EP_GET(x) (((x) >> 16) & 0x1F)
#define XHCI_TRB_3_EP_SET(x) (((x) & 0x1F) << 16)
#define XHCI_TRB_3_FRID_GET(x) (((x) >> 20) & 0x7FF)
#define XHCI_TRB_3_FRID_SET(x) (((x) & 0x7FF) << 20)
#define XHCI_TRB_3_ISO_SIA_BIT (1U << 31)
#define XHCI_TRB_3_SUSP_EP_BIT (1U << 23)
#define XHCI_TRB_3_SLOT_GET(x) (((x) >> 24) & 0xFF)
#define XHCI_TRB_3_SLOT_SET(x) (((x) & 0xFF) << 24)
/* Commands */
#define XHCI_TRB_TYPE_RESERVED 0x00
#define XHCI_TRB_TYPE_NORMAL 0x01
#define XHCI_TRB_TYPE_SETUP_STAGE 0x02
#define XHCI_TRB_TYPE_DATA_STAGE 0x03
#define XHCI_TRB_TYPE_STATUS_STAGE 0x04
#define XHCI_TRB_TYPE_ISOCH 0x05
#define XHCI_TRB_TYPE_LINK 0x06
#define XHCI_TRB_TYPE_EVENT_DATA 0x07
#define XHCI_TRB_TYPE_NOOP 0x08
#define XHCI_TRB_TYPE_ENABLE_SLOT 0x09
#define XHCI_TRB_TYPE_DISABLE_SLOT 0x0A
#define XHCI_TRB_TYPE_ADDRESS_DEVICE 0x0B
#define XHCI_TRB_TYPE_CONFIGURE_EP 0x0C
#define XHCI_TRB_TYPE_EVALUATE_CTX 0x0D
#define XHCI_TRB_TYPE_RESET_EP 0x0E
#define XHCI_TRB_TYPE_STOP_EP 0x0F
#define XHCI_TRB_TYPE_SET_TR_DEQUEUE 0x10
#define XHCI_TRB_TYPE_RESET_DEVICE 0x11
#define XHCI_TRB_TYPE_FORCE_EVENT 0x12
#define XHCI_TRB_TYPE_NEGOTIATE_BW 0x13
#define XHCI_TRB_TYPE_SET_LATENCY_TOL 0x14
#define XHCI_TRB_TYPE_GET_PORT_BW 0x15
#define XHCI_TRB_TYPE_FORCE_HEADER 0x16
#define XHCI_TRB_TYPE_NOOP_CMD 0x17
/* Events */
#define XHCI_TRB_EVENT_TRANSFER 0x20
#define XHCI_TRB_EVENT_CMD_COMPLETE 0x21
#define XHCI_TRB_EVENT_PORT_STS_CHANGE 0x22
#define XHCI_TRB_EVENT_BW_REQUEST 0x23
#define XHCI_TRB_EVENT_DOORBELL 0x24
#define XHCI_TRB_EVENT_HOST_CTRL 0x25
#define XHCI_TRB_EVENT_DEVICE_NOTIFY 0x26
#define XHCI_TRB_EVENT_MFINDEX_WRAP 0x27
/* Error codes */
#define XHCI_TRB_ERROR_INVALID 0x00
#define XHCI_TRB_ERROR_SUCCESS 0x01
#define XHCI_TRB_ERROR_DATA_BUF 0x02
#define XHCI_TRB_ERROR_BABBLE 0x03
#define XHCI_TRB_ERROR_XACT 0x04
#define XHCI_TRB_ERROR_TRB 0x05
#define XHCI_TRB_ERROR_STALL 0x06
#define XHCI_TRB_ERROR_RESOURCE 0x07
#define XHCI_TRB_ERROR_BANDWIDTH 0x08
#define XHCI_TRB_ERROR_NO_SLOTS 0x09
#define XHCI_TRB_ERROR_STREAM_TYPE 0x0A
#define XHCI_TRB_ERROR_SLOT_NOT_ON 0x0B
#define XHCI_TRB_ERROR_ENDP_NOT_ON 0x0C
#define XHCI_TRB_ERROR_SHORT_PKT 0x0D
#define XHCI_TRB_ERROR_RING_UNDERRUN 0x0E
#define XHCI_TRB_ERROR_RING_OVERRUN 0x0F
#define XHCI_TRB_ERROR_VF_RING_FULL 0x10
#define XHCI_TRB_ERROR_PARAMETER 0x11
#define XHCI_TRB_ERROR_BW_OVERRUN 0x12
#define XHCI_TRB_ERROR_CONTEXT_STATE 0x13
#define XHCI_TRB_ERROR_NO_PING_RESP 0x14
#define XHCI_TRB_ERROR_EV_RING_FULL 0x15
#define XHCI_TRB_ERROR_INCOMPAT_DEV 0x16
#define XHCI_TRB_ERROR_MISSED_SERVICE 0x17
#define XHCI_TRB_ERROR_CMD_RING_STOP 0x18
#define XHCI_TRB_ERROR_CMD_ABORTED 0x19
#define XHCI_TRB_ERROR_STOPPED 0x1A
#define XHCI_TRB_ERROR_LENGTH 0x1B
#define XHCI_TRB_ERROR_BAD_MELAT 0x1D
#define XHCI_TRB_ERROR_ISOC_OVERRUN 0x1F
#define XHCI_TRB_ERROR_EVENT_LOST 0x20
#define XHCI_TRB_ERROR_UNDEFINED 0x21
#define XHCI_TRB_ERROR_INVALID_SID 0x22
#define XHCI_TRB_ERROR_SEC_BW 0x23
#define XHCI_TRB_ERROR_SPLIT_XACT 0x24
} __attribute__((aligned(4)));
struct xhci_dev_endpoint_trbs {
struct xhci_trb trb[(XHCI_MAX_STREAMS *
XHCI_MAX_TRANSFERS) + XHCI_MAX_STREAMS];
};
struct xhci_event_ring_seg {
volatile uint64_t qwEvrsTablePtr;
volatile uint32_t dwEvrsTableSize;
volatile uint32_t dwEvrsReserved;
};
#endif /* _XHCI_H_ */

View File

@@ -0,0 +1,251 @@
/* $FreeBSD$ */
/*-
* Copyright (c) 2010 Hans Petter Selasky. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _XHCIREG_H_
#define _XHCIREG_H_
/* XHCI PCI config registers */
#define PCI_XHCI_CBMEM 0x10 /* configuration base MEM */
#define PCI_XHCI_USBREV 0x60 /* RO USB protocol revision */
#define PCI_USB_REV_3_0 0x30 /* USB 3.0 */
#define PCI_XHCI_FLADJ 0x61 /* RW frame length adjust */
#define PCI_XHCI_INTEL_XUSB2PR 0xD0 /* Intel USB2 Port Routing */
#define PCI_XHCI_INTEL_USB2PRM 0xD4 /* Intel USB2 Port Routing Mask */
#define PCI_XHCI_INTEL_USB3_PSSEN 0xD8 /* Intel USB3 Port SuperSpeed Enable */
#define PCI_XHCI_INTEL_USB3PRM 0xDC /* Intel USB3 Port Routing Mask */
/* XHCI capability registers */
#define XHCI_CAPLENGTH 0x00 /* RO capability */
#define XHCI_RESERVED 0x01 /* Reserved */
#define XHCI_HCIVERSION 0x02 /* RO Interface version number */
#define XHCI_HCIVERSION_0_9 0x0090 /* xHCI version 0.9 */
#define XHCI_HCIVERSION_1_0 0x0100 /* xHCI version 1.0 */
#define XHCI_HCSPARAMS1 0x04 /* RO structural parameters 1 */
#define XHCI_HCS1_DEVSLOT_MAX(x)((x) & 0xFF)
#define XHCI_HCS1_IRQ_MAX(x) (((x) >> 8) & 0x3FF)
#define XHCI_HCS1_N_PORTS(x) (((x) >> 24) & 0xFF)
#define XHCI_HCSPARAMS2 0x08 /* RO structural parameters 2 */
#define XHCI_HCS2_IST(x) ((x) & 0xF)
#define XHCI_HCS2_ERST_MAX(x) (((x) >> 4) & 0xF)
#define XHCI_HCS2_SPR(x) (((x) >> 26) & 0x1)
#define XHCI_HCS2_SPB_MAX(x) ((((x) >> 16) & 0x3E0) | (((x) >> 27) & 0x1F))
#define XHCI_HCSPARAMS3 0x0C /* RO structural parameters 3 */
#define XHCI_HCS3_U1_DEL(x) ((x) & 0xFF)
#define XHCI_HCS3_U2_DEL(x) (((x) >> 16) & 0xFFFF)
#define XHCI_HCSPARAMS0 0x10 /* RO capability parameters */
#define XHCI_HCS0_AC64(x) ((x) & 0x1) /* 64-bit capable */
#define XHCI_HCS0_BNC(x) (((x) >> 1) & 0x1) /* BW negotiation */
#define XHCI_HCS0_CSZ(x) (((x) >> 2) & 0x1) /* context size */
#define XHCI_HCS0_PPC(x) (((x) >> 3) & 0x1) /* port power control */
#define XHCI_HCS0_PIND(x) (((x) >> 4) & 0x1) /* port indicators */
#define XHCI_HCS0_LHRC(x) (((x) >> 5) & 0x1) /* light HC reset */
#define XHCI_HCS0_LTC(x) (((x) >> 6) & 0x1) /* latency tolerance msg */
#define XHCI_HCS0_NSS(x) (((x) >> 7) & 0x1) /* no secondary sid */
/* max pri. stream array size */
#define XHCI_HCS0_PSA_SZ_MAX(x) (((x) >> 12) & 0xF)
/* extended capabilities pointer */
#define XHCI_HCS0_XECP(x) (((x) >> 16) & 0xFFFF)
#define XHCI_DBOFF 0x14 /* RO doorbell offset */
#define XHCI_RTSOFF 0x18 /* RO runtime register space offset */
/* XHCI operational registers. Offset given by XHCI_CAPLENGTH register */
#define XHCI_USBCMD 0x00 /* XHCI command */
#define XHCI_CMD_RS 0x00000001 /* RW Run/Stop */
#define XHCI_CMD_HCRST 0x00000002 /* RW Host Controller Reset */
#define XHCI_CMD_INTE 0x00000004 /* RW Interrupter Enable */
#define XHCI_CMD_HSEE 0x00000008 /* RW Host System Error Enable */
/* RO/RW Light Host Controller Reset */
#define XHCI_CMD_LHCRST 0x00000080
#define XHCI_CMD_CSS 0x00000100 /* RW Controller Save State */
#define XHCI_CMD_CRS 0x00000200 /* RW Controller Restore State */
#define XHCI_CMD_EWE 0x00000400 /* RW Enable Wrap Event */
#define XHCI_CMD_EU3S 0x00000800 /* RW Enable U3 MFINDEX Stop */
#define XHCI_USBSTS 0x04 /* XHCI status */
#define XHCI_STS_HCH 0x00000001 /* RO - Host Controller Halted */
#define XHCI_STS_HSE 0x00000004 /* RW - Host System Error */
#define XHCI_STS_EINT 0x00000008 /* RW - Event Interrupt */
#define XHCI_STS_PCD 0x00000010 /* RW - Port Change Detect */
#define XHCI_STS_SSS 0x00000100 /* RO - Save State Status */
#define XHCI_STS_RSS 0x00000200 /* RO - Restore State Status */
#define XHCI_STS_SRE 0x00000400 /* RW - Save/Restore Error */
#define XHCI_STS_CNR 0x00000800 /* RO - Controller Not Ready */
#define XHCI_STS_HCE 0x00001000 /* RO - Host Controller Error */
#define XHCI_PAGESIZE 0x08 /* XHCI page size mask */
#define XHCI_PAGESIZE_4K 0x00000001 /* 4K Page Size */
#define XHCI_PAGESIZE_8K 0x00000002 /* 8K Page Size */
#define XHCI_PAGESIZE_16K 0x00000004 /* 16K Page Size */
#define XHCI_PAGESIZE_32K 0x00000008 /* 32K Page Size */
#define XHCI_PAGESIZE_64K 0x00000010 /* 64K Page Size */
#define XHCI_DNCTRL 0x14 /* XHCI device notification control */
#define XHCI_DNCTRL_MASK(n) (1U << (n))
#define XHCI_CRCR_LO 0x18 /* XHCI command ring control */
#define XHCI_CRCR_LO_RCS 0x00000001 /* RW - consumer cycle state */
#define XHCI_CRCR_LO_CS 0x00000002 /* RW - command stop */
#define XHCI_CRCR_LO_CA 0x00000004 /* RW - command abort */
#define XHCI_CRCR_LO_CRR 0x00000008 /* RW - command ring running */
#define XHCI_CRCR_LO_MASK 0x0000000F
#define XHCI_CRCR_HI 0x1C /* XHCI command ring control */
#define XHCI_DCBAAP_LO 0x30 /* XHCI dev context BA pointer */
#define XHCI_DCBAAP_HI 0x34 /* XHCI dev context BA pointer */
#define XHCI_CONFIG 0x38
/* RW - number of device slots enabled */
#define XHCI_CONFIG_SLOTS_MASK 0x000000FF
/* XHCI port status registers */
#define XHCI_PORTSC(n) (0x3F0 + (0x10 * (n))) /* XHCI port status */
#define XHCI_PS_CCS 0x00000001 /* RO - current connect status */
#define XHCI_PS_PED 0x00000002 /* RW - port enabled / disabled */
#define XHCI_PS_OCA 0x00000008 /* RO - over current active */
#define XHCI_PS_PR 0x00000010 /* RW - port reset */
/* RW - port link state */
#define XHCI_PS_PLS_GET(x) (((x) >> 5) & 0xF)
/* RW - port link state */
#define XHCI_PS_PLS_SET(x) (((x) & 0xF) << 5)
#define XHCI_PS_PP 0x00000200 /* RW - port power */
#define XHCI_PS_SPEED_GET(x) (((x) >> 10) & 0xF) /* RO - port speed */
/* RW - port indicator */
#define XHCI_PS_PIC_GET(x) (((x) >> 14) & 0x3)
/* RW - port indicator */
#define XHCI_PS_PIC_SET(x) (((x) & 0x3) << 14)
#define XHCI_PS_LWS 0x00010000 /* RW - port link state write strobe */
#define XHCI_PS_CSC 0x00020000 /* RW - connect status change */
#define XHCI_PS_PEC 0x00040000 /* RW - port enable/disable change */
#define XHCI_PS_WRC 0x00080000 /* RW - warm port reset change */
#define XHCI_PS_OCC 0x00100000 /* RW - over-current change */
#define XHCI_PS_PRC 0x00200000 /* RW - port reset change */
#define XHCI_PS_PLC 0x00400000 /* RW - port link state change */
#define XHCI_PS_CEC 0x00800000 /* RW - config error change */
#define XHCI_PS_CAS 0x01000000 /* RO - cold attach status */
#define XHCI_PS_WCE 0x02000000 /* RW - wake on connect enable */
#define XHCI_PS_WDE 0x04000000 /* RW - wake on disconnect enable */
#define XHCI_PS_WOE 0x08000000 /* RW - wake on over-current enable */
#define XHCI_PS_DR 0x40000000 /* RO - device removable */
#define XHCI_PS_WPR 0x80000000U /* RW - warm port reset */
#define XHCI_PS_CLEAR 0x80FF01FFU /* command bits */
/* XHCI status and control */
#define XHCI_PORTPMSC(n) (0x3F4 + (0x10 * (n)))
#define XHCI_PM3_U1TO_GET(x) (((x) >> 0) & 0xFF) /* RW - U1 timeout */
#define XHCI_PM3_U1TO_SET(x) (((x) & 0xFF) << 0) /* RW - U1 timeout */
#define XHCI_PM3_U2TO_GET(x) (((x) >> 8) & 0xFF) /* RW - U2 timeout */
#define XHCI_PM3_U2TO_SET(x) (((x) & 0xFF) << 8) /* RW - U2 timeout */
#define XHCI_PM3_FLA 0x00010000 /* RW - Force Link PM Accept */
#define XHCI_PM2_L1S_GET(x) (((x) >> 0) & 0x7) /* RO - L1 status */
#define XHCI_PM2_RWE 0x00000008 /* RW - remote wakup enable */
/* RW - host initiated resume duration */
#define XHCI_PM2_HIRD_GET(x) (((x) >> 4) & 0xF)
/* RW - host initiated resume duration */
#define XHCI_PM2_HIRD_SET(x) (((x) & 0xF) << 4)
#define XHCI_PM2_L1SLOT_GET(x) (((x) >> 8) & 0xFF) /* RW - L1 device slot */
#define XHCI_PM2_L1SLOT_SET(x) (((x) & 0xFF) << 8) /* RW - L1 device slot */
#define XHCI_PM2_HLE 0x00010000 /* RW - hardware LPM enable */
#define XHCI_PORTLI(n) (0x3F8 + (0x10 * (n))) /* XHCI port link info */
/* RO - port link errors */
#define XHCI_PLI3_ERR_GET(x) (((x) >> 0) & 0xFFFF)
/* XHCI port reserved */
#define XHCI_PORTRSV(n) (0x3FC + (0x10 * (n)))
/* XHCI runtime registers. Offset given by
* XHCI_CAPLENGTH + XHCI_RTSOFF registers
*/
#define XHCI_MFINDEX 0x0000 /* RO - microframe index */
#define XHCI_MFINDEX_GET(x) ((x) & 0x3FFF)
/* XHCI interrupt management */
#define XHCI_IMAN(n) (0x0020 + (0x20 * (n)))
#define XHCI_IMAN_INTR_PEND 0x00000001 /* RW - interrupt pending */
#define XHCI_IMAN_INTR_ENA 0x00000002 /* RW - interrupt enable */
/* XHCI interrupt moderation */
#define XHCI_IMOD(n) (0x0024 + (0x20 * (n)))
#define XHCI_IMOD_IVAL_GET(x) (((x) >> 0) & 0xFFFF) /* 250ns unit */
#define XHCI_IMOD_IVAL_SET(x) (((x) & 0xFFFF) << 0) /* 250ns unit */
#define XHCI_IMOD_ICNT_GET(x) (((x) >> 16) & 0xFFFF) /* 250ns unit */
#define XHCI_IMOD_ICNT_SET(x) (((x) & 0xFFFF) << 16) /* 250ns unit */
#define XHCI_IMOD_DEFAULT 0x000001F4U /* 8000 IRQs/second */
/* 4000 IRQs/second - LynxPoint */
#define XHCI_IMOD_DEFAULT_LP 0x000003F8U
/* XHCI event ring segment table size */
#define XHCI_ERSTSZ(n) (0x0028 + (0x20 * (n)))
#define XHCI_ERSTS_GET(x) ((x) & 0xFFFF)
#define XHCI_ERSTS_SET(x) ((x) & 0xFFFF)
/* XHCI event ring segment table BA */
#define XHCI_ERSTBA_LO(n) (0x0030 + (0x20 * (n)))
/* XHCI event ring segment table BA */
#define XHCI_ERSTBA_HI(n) (0x0034 + (0x20 * (n)))
/* XHCI event ring dequeue pointer */
#define XHCI_ERDP_LO(n) (0x0038 + (0x20 * (n)))
#define XHCI_ERDP_LO_SINDEX(x) ((x) & 0x7) /* RO - dequeue segment index */
#define XHCI_ERDP_LO_BUSY 0x00000008 /* RW - event handler busy */
/* XHCI event ring dequeue pointer */
#define XHCI_ERDP_HI(n) (0x003C + (0x20 * (n)))
/* XHCI doorbell registers
* Offset given by XHCI_CAPLENGTH + XHCI_DBOFF registers
*/
#define XHCI_DOORBELL(n) (0x0000 + (4 * (n)))
#define XHCI_DB_TARGET_GET(x) ((x) & 0xFF) /* RW - doorbell target */
#define XHCI_DB_TARGET_SET(x) ((x) & 0xFF) /* RW - doorbell target */
/* RW - doorbell stream ID */
#define XHCI_DB_SID_GET(x) (((x) >> 16) & 0xFFFF)
/* RW - doorbell stream ID */
#define XHCI_DB_SID_SET(x) (((x) & 0xFFFF) << 16)
/* XHCI legacy support */
#define XHCI_XECP_ID(x) ((x) & 0xFF)
#define XHCI_XECP_NEXT(x) (((x) >> 8) & 0xFF)
#define XHCI_XECP_BIOS_SEM 0x0002
#define XHCI_XECP_OS_SEM 0x0003
/* XHCI capability ID's */
#define XHCI_ID_USB_LEGACY 0x0001
#define XHCI_ID_PROTOCOLS 0x0002
#define XHCI_ID_POWER_MGMT 0x0003
#define XHCI_ID_VIRTUALIZATION 0x0004
#define XHCI_ID_MSG_IRQ 0x0005
#define XHCI_ID_USB_LOCAL_MEM 0x0006
/* XHCI register R/W wrappers */
#define XREAD1(sc, what, a) \
bus_space_read_1((sc)->sc_io_tag, (sc)->sc_io_hdl, \
(a) + (sc)->sc_##what##_off)
#define XREAD2(sc, what, a) \
bus_space_read_2((sc)->sc_io_tag, (sc)->sc_io_hdl, \
(a) + (sc)->sc_##what##_off)
#define XREAD4(sc, what, a) \
bus_space_read_4((sc)->sc_io_tag, (sc)->sc_io_hdl, \
(a) + (sc)->sc_##what##_off)
#define XWRITE1(sc, what, a, x) \
bus_space_write_1((sc)->sc_io_tag, (sc)->sc_io_hdl, \
(a) + (sc)->sc_##what##_off, (x))
#define XWRITE2(sc, what, a, x) \
bus_space_write_2((sc)->sc_io_tag, (sc)->sc_io_hdl, \
(a) + (sc)->sc_##what##_off, (x))
#define XWRITE4(sc, what, a, x) \
bus_space_write_4((sc)->sc_io_tag, (sc)->sc_io_hdl, \
(a) + (sc)->sc_##what##_off, (x))
#endif /* _XHCIREG_H_ */

View File

@@ -0,0 +1,30 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/

52
devicemodel/samples/bridge.sh Executable file
View File

@@ -0,0 +1,52 @@
#!/bin/bash
# Instructions to create systemd-networkd configuration files:
if [ ! -e /etc/systemd/network ]; then
mkdir -p /etc/systemd/network
# /etc/systemd/network/acrn.network
cat <<EOF>/etc/systemd/network/acrn.network
[Match]
Name=e*
[Network]
Bridge=acrn-br0
EOF
# /etc/systemd/network/acrn.netdev
cat <<EOF>/etc/systemd/network/acrn.netdev
[NetDev]
Name=acrn-br0
Kind=bridge
EOF
# /etc/systemd/network/eth.network
cat <<EOF>/etc/systemd/network/eth.network
[Match]
Name=acrn-br0
[Network]
DHCP=ipv4
EOF
# need to mask 80-dhcp.network and 80-virtual.network
ln -s /dev/null /etc/systemd/network/80-dhcp.network
ln -s /dev/null /etc/systemd/network/80-virtual.network
# should get specifc list of taps
# /etc/systemd/network/acrn_tap0.netdev
cat <<EOF>/etc/systemd/network/acrn_tap0.netdev
[NetDev]
Name=acrn_tap0
Kind=tap
EOF
# restart systemd-network to create the devices
# and bind network address to the bridge
systemctl restart systemd-networkd
fi
# add tap device under the bridge
brctl addif acrn-br0 acrn_tap0

View File

@@ -0,0 +1,36 @@
#!/bin/bash
function launch_clear()
{
vm_name=vm$1
#check if the vm is running or not
vm_ps=$(pgrep -a -f acrn-dm)
result=$(echo $vm_ps | grep "${vm_name}")
if [[ "$result" != "" ]]; then
echo "$vm_name is running, can't create twice!"
exit
fi
#for memsize setting
memsize=`cat /proc/meminfo|head -n 1|awk '{print $2}'`
if [ $memsize -gt 4000000 ];then
mem_size=1750M
fi
mem_size=1000M
./acrn-dm -A -m $mem_size -c $2 -s 0:0,hostbridge -s 1:0,lpc -l com1,stdio \
-s 5,virtio-console,@pty:pty_port \
-s 6,virtio-hyper_dmabuf \
-s 3,virtio-blk,/home/root/clear.img \
-s 4,virtio-net,tap0 -k /home/root/bzImage \
-B "root=/dev/vda3 rw rootwait noxsave maxcpus=$2 nohpet console=tty0 console=hvc0 \
console=ttyS0 no_timer_check ignore_loglevel log_buf_len=16M \
consoleblank=0 tsc=reliable i915.avail_planes_per_pipe=$4 \
i915.enable_hangcheck=0 i915.nuclear_pageflip=1" $vm_name
}
launch_clear 2 1 "64 448 8" 0x00000C clear