dm: virtio-gpu: VGA compability support

Legacy VGA & VBE interface as a common interface is supported by
many legacy and modern OS. Many installer of OS distribution use
this interface to display the GUI of installer when setup a refresh
new installation on bare-metal. Besides, Windows OS always use this
interface to display it's BSOD, recovery mode & safe mode GUI. It
is need because Windows don't include virtio-gpu driver as their
in-box driver, VGA interface will be used before the virtio-gpu
driver been installed.
To be compatiable with the PCI bar layout of legacy VGA, the layout
is refined to meet with the requirement of legacy VGA and modern
virtio-gpu.

BAR0: VGA Framebuffer memory, 16 MB in size.
BAR2: MMIO Space
  [0x0000~0x03ff] EDID data blob
  [0x0400~0x041f] VGA ioports registers
  [0x0500~0x0516] bochs display interface registers
  [0x1000~0x17ff] Virtio common configuration registers
  [0x1800~0x1fff] Virtio ISR state registers
  [0x2000~0x2fff] Virtio device configuration registers
  [0x3000~0x3fff] Virtio notification registers
BAR4: MSI/MSI-X
BAR5: Virtio port io

Tracked-On: #7210
Signed-off-by: Sun Peng <peng.p.sun@linux.intel.com>
Reviewed-by: Zhao, yakui <yakui.zhao@intel.com>
Acked-by: Wang, Yu1 <yu1.wang@intel.com>
This commit is contained in:
Sun, Peng 2022-02-25 23:04:58 +08:00 committed by acrnsi-robot
parent f2cfa761ae
commit 1ed96bfbf8
12 changed files with 2082 additions and 46 deletions

View File

@ -98,6 +98,8 @@ SRCS += hw/block_if.c
SRCS += hw/usb_core.c
SRCS += hw/uart_core.c
SRCS += hw/vdisplay_sdl.c
SRCS += hw/vga.c
SRCS += hw/gc.c
SRCS += hw/pci/virtio/virtio.c
SRCS += hw/pci/virtio/virtio_kernel.c
SRCS += hw/pci/virtio/vhost.c
@ -167,7 +169,6 @@ SRCS += core/sw_load_vsbl.c
SRCS += core/sw_load_ovmf.c
SRCS += core/sw_load_elf.c
SRCS += core/mevent.c
SRCS += core/gc.c
SRCS += core/pm.c
SRCS += core/pm_vuart.c
SRCS += core/console.c

View File

@ -1,19 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "gc.h"
struct gfx_ctx {
struct gfx_ctx_image *gc_image;
int raw;
};
struct gfx_ctx_image *
gc_get_image(struct gfx_ctx *gc)
{
if (gc == NULL)
return NULL;
return gc->gc_image;
}

View File

@ -94,6 +94,7 @@ struct hugetlb_info {
int fd;
int pg_size;
size_t lowmem;
size_t fbmem;
size_t biosmem;
size_t highmem;
@ -110,6 +111,7 @@ static struct hugetlb_info hugetlb_priv[HUGETLB_LV_MAX] = {
.fd = -1,
.pg_size = 0,
.lowmem = 0,
.fbmem = 0,
.biosmem = 0,
.highmem = 0,
@ -124,6 +126,7 @@ static struct hugetlb_info hugetlb_priv[HUGETLB_LV_MAX] = {
.fd = -1,
.pg_size = 0,
.lowmem = 0,
.fbmem = 0,
.biosmem = 0,
.highmem = 0,
@ -237,6 +240,7 @@ static bool should_enable_hugetlb_level(int level)
}
return (hugetlb_priv[level].lowmem > 0 ||
hugetlb_priv[level].fbmem > 0 ||
hugetlb_priv[level].biosmem > 0 ||
hugetlb_priv[level].highmem > 0);
}
@ -248,7 +252,7 @@ static bool should_enable_hugetlb_level(int level)
* skip : skip offset in different level hugetlbfs fd
*/
static int mmap_hugetlbfs_from_level(struct vmctx *ctx, int level, size_t len,
size_t offset, size_t skip)
size_t offset, size_t skip, char **addr_out)
{
char *addr;
size_t pagesz = 0;
@ -265,6 +269,9 @@ static int mmap_hugetlbfs_from_level(struct vmctx *ctx, int level, size_t len,
if (addr == MAP_FAILED)
return -ENOMEM;
if (addr_out)
*addr_out = addr;
pr_info("mmap 0x%lx@%p\n", len, addr);
/* pre-allocate hugepages by touch them */
@ -282,7 +289,7 @@ static int mmap_hugetlbfs_from_level(struct vmctx *ctx, int level, size_t len,
static int mmap_hugetlbfs(struct vmctx *ctx, size_t offset,
void (*get_param)(struct hugetlb_info *, size_t *, size_t *),
size_t (*adj_param)(struct hugetlb_info *, struct hugetlb_info *, int))
size_t (*adj_param)(struct hugetlb_info *, struct hugetlb_info *, int), char **addr)
{
size_t len, skip;
int level, ret = 0, pg_size;
@ -292,7 +299,7 @@ static int mmap_hugetlbfs(struct vmctx *ctx, size_t offset,
pg_size = hugetlb_priv[level].pg_size;
while (len > 0) {
ret = mmap_hugetlbfs_from_level(ctx, level, len, offset, skip);
ret = mmap_hugetlbfs_from_level(ctx, level, len, offset, skip, addr);
if (ret < 0 && level > HUGETLB_LV1) {
len = adj_param(
@ -359,6 +366,28 @@ static size_t adj_biosmem_param(struct hugetlb_info *htlb,
return htlb->biosmem;
}
#define FB_SIZE (16 * MB)
static void get_fbmem_param(struct hugetlb_info *htlb,
size_t *len, size_t *skip)
{
if (htlb == &hugetlb_priv[0]) {
*len = FB_SIZE;
*skip = htlb->lowmem + htlb->highmem + htlb->biosmem;
} else {
*len = 0;
*skip = htlb->lowmem + htlb->highmem + htlb->biosmem;
}
}
static size_t adj_fbmem_param(struct hugetlb_info *htlb,
struct hugetlb_info *htlb_prev, int adj_size)
{
htlb->fbmem -= adj_size;
htlb_prev->fbmem += adj_size;
return htlb->fbmem;
}
static int rm_hugetlb_dirs(int level)
{
char path[MAX_PATH_LEN]={0};
@ -484,8 +513,9 @@ static bool hugetlb_check_memgap(void)
for (lvl = HUGETLB_LV1; lvl < hugetlb_lv_max; lvl++) {
free_pages = read_sys_info(hugetlb_priv[lvl].free_pages_path);
need_pages = (hugetlb_priv[lvl].lowmem + hugetlb_priv[lvl].biosmem +
hugetlb_priv[lvl].highmem) / hugetlb_priv[lvl].pg_size;
need_pages = (hugetlb_priv[lvl].lowmem + hugetlb_priv[lvl].fbmem +
hugetlb_priv[lvl].biosmem + hugetlb_priv[lvl].highmem) /
hugetlb_priv[lvl].pg_size;
hugetlb_priv[lvl].pages_delta = need_pages - free_pages;
/* if delta > 0, it's a gap for needed pages, to be handled */
@ -672,7 +702,7 @@ void uninit_hugetlb(void)
int hugetlb_setup_memory(struct vmctx *ctx)
{
int level;
size_t lowmem, biosmem, highmem;
size_t lowmem, fbmem, biosmem, highmem;
bool has_gap;
if (ctx->lowmem == 0) {
@ -692,6 +722,8 @@ int hugetlb_setup_memory(struct vmctx *ctx)
* hugetlb_priv[HUGETLB_LV1].pg_size */
ctx->lowmem =
ALIGN_DOWN(ctx->lowmem, hugetlb_priv[HUGETLB_LV1].pg_size);
ctx->fbmem =
ALIGN_DOWN(ctx->fbmem, hugetlb_priv[HUGETLB_LV1].pg_size);
ctx->biosmem =
ALIGN_DOWN(ctx->biosmem, hugetlb_priv[HUGETLB_LV1].pg_size);
ctx->highmem =
@ -701,12 +733,15 @@ int hugetlb_setup_memory(struct vmctx *ctx)
/* check & set hugetlb level memory size for lowmem/biosmem/highmem */
lowmem = ctx->lowmem;
fbmem = ctx->fbmem;
biosmem = ctx->biosmem;
highmem = ctx->highmem;
for (level = hugetlb_lv_max - 1; level >= HUGETLB_LV1; level--) {
hugetlb_priv[level].lowmem =
ALIGN_DOWN(lowmem, hugetlb_priv[level].pg_size);
hugetlb_priv[level].fbmem =
ALIGN_DOWN(fbmem, hugetlb_priv[level].pg_size);
hugetlb_priv[level].biosmem =
ALIGN_DOWN(biosmem, hugetlb_priv[level].pg_size);
hugetlb_priv[level].highmem =
@ -715,6 +750,8 @@ int hugetlb_setup_memory(struct vmctx *ctx)
if (level > HUGETLB_LV1) {
hugetlb_priv[level-1].lowmem = lowmem =
lowmem - hugetlb_priv[level].lowmem;
hugetlb_priv[level-1].fbmem = fbmem =
fbmem - hugetlb_priv[level].fbmem;
hugetlb_priv[level-1].biosmem = biosmem =
biosmem - hugetlb_priv[level].biosmem;
hugetlb_priv[level-1].highmem = highmem =
@ -742,9 +779,10 @@ int hugetlb_setup_memory(struct vmctx *ctx)
/* dump hugepage trying to setup */
pr_info("\ntry to setup hugepage with:\n");
for (level = HUGETLB_LV1; level < hugetlb_lv_max; level++) {
pr_info("\tlevel %d - lowmem 0x%lx, biosmem 0x%lx, highmem 0x%lx\n",
pr_info("\tlevel %d - lowmem 0x%lx, fbmem 0x%lx, biosmem 0x%lx, highmem 0x%lx\n",
level,
hugetlb_priv[level].lowmem,
hugetlb_priv[level].fbmem,
hugetlb_priv[level].biosmem,
hugetlb_priv[level].highmem);
}
@ -769,25 +807,31 @@ int hugetlb_setup_memory(struct vmctx *ctx)
pr_info("mmap ptr 0x%p -> baseaddr 0x%p\n", ptr, ctx->baseaddr);
/* mmap lowmem */
if (mmap_hugetlbfs(ctx, 0, get_lowmem_param, adj_lowmem_param) < 0) {
if (mmap_hugetlbfs(ctx, 0, get_lowmem_param, adj_lowmem_param, NULL) < 0) {
pr_err("lowmem mmap failed");
goto err_lock;
}
/* mmap highmem */
if (mmap_hugetlbfs(ctx, ctx->highmem_gpa_base,
get_highmem_param, adj_highmem_param) < 0) {
get_highmem_param, adj_highmem_param, NULL) < 0) {
pr_err("highmem mmap failed");
goto err_lock;
}
/* mmap biosmem */
if (mmap_hugetlbfs(ctx, 4 * GB - ctx->biosmem,
get_biosmem_param, adj_biosmem_param) < 0) {
get_biosmem_param, adj_biosmem_param, NULL) < 0) {
pr_err("biosmem mmap failed");
goto err_lock;
}
/* mmap fbmem */
if (mmap_hugetlbfs(ctx, 4 * GB - ctx->biosmem - ctx->fbmem,
get_fbmem_param, adj_fbmem_param, (char **)&ctx->fb_base) < 0) {
pr_err("fbmem mmap failed");
goto err_lock;
}
unlock_acrn_hugetlb();
/* dump hugepage really setup */

View File

@ -372,6 +372,7 @@ vm_setup_memory(struct vmctx *ctx, size_t memsize)
}
ctx->biosmem = high_bios_size();
ctx->fbmem = (16 * 1024 * 1024);
ret = hugetlb_setup_memory(ctx);
if (ret == 0) {

91
devicemodel/hw/gc.c Normal file
View File

@ -0,0 +1,91 @@
/*
* Copyright (C) 2022 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include "gc.h"
struct gfx_ctx *
gc_init(int width, int height, void *fbaddr)
{
struct gfx_ctx *gc;
struct gfx_ctx_image *gc_image;
gc = calloc(1, sizeof(struct gfx_ctx));
assert(gc != NULL);
gc_image = calloc(1, sizeof(struct gfx_ctx_image));
assert(gc_image != NULL);
gc_image->width = width;
gc_image->height = height;
if (fbaddr) {
gc_image->data = fbaddr;
gc->raw = 1;
} else {
gc_image->data = calloc(width * height, sizeof(uint32_t));
gc->raw = 0;
}
gc->gc_image = gc_image;
return gc;
}
void
gc_deinit(struct gfx_ctx *gc)
{
if (!gc)
return;
if (gc->gc_image) {
if (!gc->raw && gc->gc_image->data) {
free(gc->gc_image->data);
gc->gc_image->data = NULL;
}
free(gc->gc_image);
gc->gc_image = NULL;
}
free(gc);
gc = NULL;
}
void
gc_set_fbaddr(struct gfx_ctx *gc, void *fbaddr)
{
gc->raw = 1;
if (gc->gc_image->data && gc->gc_image->data != fbaddr)
free(gc->gc_image->data);
gc->gc_image->data = fbaddr;
}
struct gfx_ctx_image *
gc_get_image(struct gfx_ctx *gc)
{
if (gc == NULL)
return NULL;
return gc->gc_image;
}
void
gc_resize(struct gfx_ctx *gc, int width, int height)
{
struct gfx_ctx_image *gc_image;
gc_image = gc->gc_image;
gc_image->width = width;
gc_image->height = height;
if (!gc->raw) {
gc_image->data = realloc(gc_image->data,
width * height * sizeof(uint32_t));
if (gc_image->data != NULL)
memset(gc_image->data, 0, width * height *
sizeof(uint32_t));
}
}

View File

@ -1167,7 +1167,7 @@ virtio_set_modern_mmio_bar(struct virtio_base *base, int barnum)
/*
* Set virtio modern PIO BAR (usually 2) to map notify capability.
*/
static int
int
virtio_set_modern_pio_bar(struct virtio_base *base, int barnum)
{
int rc;
@ -1264,7 +1264,7 @@ virtio_get_cap_id(uint64_t offset, int size)
return rc;
}
static uint32_t
uint32_t
virtio_common_cfg_read(struct pci_vdev *dev, uint64_t offset, int size)
{
struct virtio_base *base = dev->arg;
@ -1378,7 +1378,7 @@ virtio_common_cfg_read(struct pci_vdev *dev, uint64_t offset, int size)
return value;
}
static void
void
virtio_common_cfg_write(struct pci_vdev *dev, uint64_t offset, int size,
uint64_t value)
{
@ -1511,7 +1511,7 @@ bad_qindex:
}
/* ignore driver writes to ISR region, and only support ISR region read */
static uint32_t
uint32_t
virtio_isr_cfg_read(struct pci_vdev *dev, uint64_t offset, int size)
{
struct virtio_base *base = dev->arg;
@ -1525,7 +1525,7 @@ virtio_isr_cfg_read(struct pci_vdev *dev, uint64_t offset, int size)
return value;
}
static uint32_t
uint32_t
virtio_device_cfg_read(struct pci_vdev *dev, uint64_t offset, int size)
{
struct virtio_base *base = dev->arg;
@ -1558,7 +1558,7 @@ virtio_device_cfg_read(struct pci_vdev *dev, uint64_t offset, int size)
return value;
}
static void
void
virtio_device_cfg_write(struct pci_vdev *dev, uint64_t offset, int size,
uint64_t value)
{
@ -1590,7 +1590,7 @@ virtio_device_cfg_write(struct pci_vdev *dev, uint64_t offset, int size,
* ignore driver reads from notify region, and only support notify region
* write
*/
static void
void
virtio_notify_cfg_write(struct pci_vdev *dev, uint64_t offset, int size,
uint64_t value)
{

View File

@ -10,11 +10,16 @@
#include <string.h>
#include <pthread.h>
#include <sys/mman.h>
#include <unistd.h>
#include <stdbool.h>
#include <vmmapi.h>
#include "dm.h"
#include "pci_core.h"
#include "virtio.h"
#include "vdisplay.h"
#include "console.h"
#include "vga.h"
/*
* Queue definitions.
@ -54,6 +59,18 @@
#define VIRTIO_GPU_MAX_SCANOUTS 16
#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
#define VIRTIO_GPU_FLAG_INFO_RING_IDX (1 << 1)
#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
#define VIRTIO_GPU_VGA_FB_SIZE 16 * MB
#define VIRTIO_GPU_VGA_DMEMSZ 128
#define VIRTIO_GPU_EDID_SIZE 384
#define VIRTIO_GPU_VGA_IOPORT_OFFSET 0x400
#define VIRTIO_GPU_VGA_IOPORT_SIZE (0x3e0 - 0x3c0)
#define VIRTIO_GPU_VGA_VBE_OFFSET 0x500
#define VIRTIO_GPU_VGA_VBE_SIZE (0xb * 2)
#define VIRTIO_GPU_CAP_COMMON_OFFSET 0x1000
#define VIRTIO_GPU_CAP_COMMON_SIZE 0x800
#define VIRTIO_GPU_CAP_ISR_OFFSET 0x1800
#define VIRTIO_GPU_CAP_ISR_SIZE 0x800
/*
* Config space "registers"
@ -282,6 +299,9 @@ struct virtio_gpu {
LIST_HEAD(,virtio_gpu_resource_2d) r2d_list;
struct vdpy_display_bh ctrl_bh;
struct vdpy_display_bh cursor_bh;
struct vdpy_display_bh vga_bh;
struct vga vga;
uint8_t edid[VIRTIO_GPU_EDID_SIZE];
};
struct virtio_gpu_command {
@ -299,6 +319,7 @@ static int virtio_gpu_cfgread(void *, int, int, uint32_t *);
static int virtio_gpu_cfgwrite(void *, int, int, uint32_t);
static void virtio_gpu_neg_features(void *, uint64_t);
static void virtio_gpu_set_status(void *, uint64_t);
static void * virtio_gpu_vga_render(void *param);
static struct virtio_ops virtio_gpu_ops = {
"virtio-gpu", /* our name */
@ -347,6 +368,11 @@ virtio_gpu_reset(void *vdev)
}
}
LIST_INIT(&gpu->r2d_list);
gpu->vga.enable = true;
vdpy_surface_set(gpu->vdpy_handle, &gpu->vga.surf);
gpu->vga.surf.width = 0;
gpu->vga.surf.stride = 0;
pthread_create(&gpu->vga.tid, NULL, virtio_gpu_vga_render, (void*)gpu);
virtio_reset_dev(&gpu->base);
}
@ -693,6 +719,10 @@ virtio_gpu_cmd_set_scanout(struct virtio_gpu_command *cmd)
cmd->iolen = sizeof(resp);
memcpy(cmd->iov[1].iov_base, &resp, sizeof(resp));
if(cmd->gpu->vga.enable) {
cmd->gpu->vga.enable = false;
}
}
static void
@ -981,12 +1011,63 @@ virtio_gpu_notify_cursorq(void *vdev, struct virtio_vq_info *vq)
vdpy_submit_bh(gpu->vdpy_handle, &gpu->cursor_bh);
}
static void
virtio_gpu_vga_bh(void *param)
{
struct virtio_gpu *gpu;
gpu = (struct virtio_gpu*)param;
if ((gpu->vga.surf.width != gpu->vga.gc->gc_image->width) ||
(gpu->vga.surf.height != gpu->vga.gc->gc_image->height)) {
gpu->vga.surf.width = gpu->vga.gc->gc_image->width;
gpu->vga.surf.height = gpu->vga.gc->gc_image->height;
gpu->vga.surf.stride = gpu->vga.gc->gc_image->width * 4;
gpu->vga.surf.pixel = gpu->vga.gc->gc_image->data;
gpu->vga.surf.surf_format = PIXMAN_a8r8g8b8;
gpu->vga.surf.surf_type = SURFACE_PIXMAN;
vdpy_surface_set(gpu->vdpy_handle, &gpu->vga.surf);
}
vdpy_surface_update(gpu->vdpy_handle, &gpu->vga.surf);
}
static void *
virtio_gpu_vga_render(void *param)
{
struct virtio_gpu *gpu;
gpu = (struct virtio_gpu*)param;
/* The below logic needs to be refined */
while(gpu->vga.enable) {
if(gpu->vga.gc->gc_image->vgamode) {
vga_render(gpu->vga.gc, gpu->vga.dev);
break;
}
if(gpu->vga.gc->gc_image->width != gpu->vga.vberegs.xres ||
gpu->vga.gc->gc_image->height != gpu->vga.vberegs.yres) {
gc_resize(gpu->vga.gc, gpu->vga.vberegs.xres, gpu->vga.vberegs.yres);
}
vdpy_submit_bh(gpu->vdpy_handle, &gpu->vga_bh);
usleep(33000);
}
return NULL;
}
static int
virtio_gpu_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
{
struct virtio_gpu *gpu;
pthread_mutexattr_t attr;
int rc = 0;
struct display_info info;
int prot;
struct virtio_pci_cap cap;
struct virtio_pci_notify_cap notify;
struct virtio_pci_cfg_cap cfg;
if (virtio_gpu_device_cnt) {
pr_err("%s: only 1 virtio-gpu device can be created.\n", __func__);
@ -1038,11 +1119,13 @@ virtio_gpu_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
gpu->vq[VIRTIO_GPU_CURSORQ].qsize = VIRTIO_GPU_RINGSZ;
gpu->vq[VIRTIO_GPU_CURSORQ].notify = virtio_gpu_notify_cursorq;
/* Initialize the ctrl/cursor bh_task */
/* Initialize the ctrl/cursor/vga bh_task */
gpu->ctrl_bh.task_cb = virtio_gpu_ctrl_bh;
gpu->ctrl_bh.data = &gpu->vq[VIRTIO_GPU_CONTROLQ];
gpu->cursor_bh.task_cb = virtio_gpu_cursor_bh;
gpu->cursor_bh.data = &gpu->vq[VIRTIO_GPU_CURSORQ];
gpu->vga_bh.task_cb = virtio_gpu_vga_bh;
gpu->vga_bh.data = gpu;
/* prepare the config space */
gpu->cfg.events_read = 0;
@ -1055,25 +1138,92 @@ virtio_gpu_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
pci_set_cfgdata16(dev, PCIR_VENDOR, VIRTIO_VENDOR);
pci_set_cfgdata16(dev, PCIR_REVID, 1);
pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_DISPLAY);
pci_set_cfgdata8(dev, PCIR_SUBCLASS, PCIS_DISPLAY_OTHER);
pci_set_cfgdata8(dev, PCIR_SUBCLASS, PCIS_DISPLAY_VGA);
pci_set_cfgdata16(dev, PCIR_SUBDEV_0, VIRTIO_TYPE_GPU);
pci_set_cfgdata16(dev, PCIR_SUBVEND_0, VIRTIO_VENDOR);
LIST_INIT(&gpu->r2d_list);
vdpy_get_display_info(gpu->vdpy_handle, &info);
/*** PCI Config BARs setup ***/
rc = virtio_interrupt_init(&gpu->base, virtio_uses_msix());
/** BAR0: VGA framebuffer **/
pci_emul_alloc_bar(dev, 0, PCIBAR_MEM32, VIRTIO_GPU_VGA_FB_SIZE);
prot = PROT_READ | PROT_WRITE;
if (vm_map_memseg_vma(ctx, VIRTIO_GPU_VGA_FB_SIZE, dev->bar[0].addr,
(uint64_t)ctx->fb_base, prot) != 0) {
pr_err("%s: fail to map VGA framebuffer to bar0.\n", __func__);
}
/** BAR2: VGA & Virtio Modern regs **/
/* EDID data blob [0x000~0x3ff] */
vdpy_get_edid(gpu->vdpy_handle, gpu->edid, VIRTIO_GPU_EDID_SIZE);
/* VGA ioports regs [0x400~0x41f] */
gpu->vga.gc = gc_init(info.width, info.height, ctx->fb_base);
gpu->vga.dev = vga_init(gpu->vga.gc, 0);
/* Bochs Display regs [0x500~0x516]*/
gpu->vga.vberegs.xres = info.width;
gpu->vga.vberegs.yres = info.height;
gpu->vga.vberegs.bpp = 32;
gpu->vga.vberegs.id = VBE_DISPI_ID0;
gpu->vga.vberegs.video_memory_64k = VIRTIO_GPU_VGA_FB_SIZE >> 16;
/* Virtio Modern capability regs*/
cap.cap_vndr = PCIY_VENDOR;
cap.cap_next = 0;
cap.cap_len = sizeof(cap);
cap.bar = 2;
/* Common configuration regs [0x1000~0x17ff]*/
cap.cfg_type = VIRTIO_PCI_CAP_COMMON_CFG;
cap.offset = VIRTIO_GPU_CAP_COMMON_OFFSET;
cap.length = VIRTIO_GPU_CAP_COMMON_SIZE;
pci_emul_add_capability(dev, (u_char *)&cap, sizeof(cap));
/* ISR status regs [0x1800~0x1fff]*/
cap.cfg_type = VIRTIO_PCI_CAP_ISR_CFG;
cap.offset = VIRTIO_GPU_CAP_ISR_OFFSET;
cap.length = VIRTIO_GPU_CAP_ISR_SIZE;
pci_emul_add_capability(dev, (u_char *)&cap, sizeof(cap));
/* Device configuration regs [0x2000~0x2fff]*/
cap.cfg_type = VIRTIO_PCI_CAP_DEVICE_CFG;
cap.offset = VIRTIO_CAP_DEVICE_OFFSET;
cap.length = VIRTIO_CAP_DEVICE_SIZE;
pci_emul_add_capability(dev, (u_char *)&cap, sizeof(cap));
/* Notification regs [0x3000~0x3fff]*/
notify.cap.cap_vndr = PCIY_VENDOR;
notify.cap.cap_next = 0;
notify.cap.cap_len = sizeof(notify);
notify.cap.cfg_type = VIRTIO_PCI_CAP_NOTIFY_CFG;
notify.cap.bar = 2;
notify.cap.offset = VIRTIO_CAP_NOTIFY_OFFSET;
notify.cap.length = VIRTIO_CAP_NOTIFY_SIZE;
notify.notify_off_multiplier = VIRTIO_MODERN_NOTIFY_OFF_MULT;
pci_emul_add_capability(dev, (u_char *)&notify, sizeof(notify));
/* Alternative configuration access regs */
cfg.cap.cap_vndr = PCIY_VENDOR;
cfg.cap.cap_next = 0;
cfg.cap.cap_len = sizeof(cfg);
cfg.cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG;
pci_emul_add_capability(dev, (u_char *)&cfg, sizeof(cfg));
pci_emul_alloc_bar(dev, 2, PCIBAR_MEM64, VIRTIO_MODERN_MEM_BAR_SIZE);
rc = virtio_intr_init(&gpu->base, 4, virtio_uses_msix());
if (rc) {
pr_err("%s, interrupt_init failed.\n", __func__);
return rc;
}
rc = virtio_set_modern_bar(&gpu->base, true);
rc = virtio_set_modern_pio_bar(&gpu->base, 5);
if (rc) {
pr_err("%s, set modern bar failed.\n", __func__);
pr_err("%s, set modern io bar(BAR5) failed.\n", __func__);
return rc;
}
gpu->vdpy_handle = vdpy_init();
/* VGA Compablility */
gpu->vga.enable = true;
gpu->vga.surf.width = 0;
gpu->vga.surf.stride = 0;
gpu->vga.surf.height = 0;
gpu->vga.surf.pixel = 0;
pthread_create(&gpu->vga.tid, NULL, virtio_gpu_vga_render, (void*)gpu);
return 0;
}
@ -1109,18 +1259,133 @@ virtio_gpu_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
vdpy_deinit(gpu->vdpy_handle);
}
uint64_t
virtio_gpu_edid_read(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
uint64_t offset, int size)
{
struct virtio_gpu *gpu;
uint8_t *p;
uint64_t value;
gpu = (struct virtio_gpu *)dev->arg;
p = (uint8_t *)gpu->edid + offset;
value = 0;
switch (size) {
case 1:
value = *p;
break;
case 2:
value = *(uint16_t *)p;
break;
case 4:
value = *(uint32_t *)p;
break;
case 8:
value = *(uint64_t *)p;
break;
default:
pr_dbg("%s: read unknown size %d\n", __func__, size);
break;
}
return (value);
}
static void
virtio_gpu_write(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int baridx, uint64_t offset, int size, uint64_t value)
{
virtio_pci_write(ctx, vcpu, dev, baridx, offset, size, value);
struct virtio_gpu *gpu;
gpu = (struct virtio_gpu *)dev->arg;
if (baridx == 0) {
pr_err("%s: vgafb offset=%d size=%d value=%d.\n", __func__, offset, size, value);
} else if (baridx == 2) {
if ((offset >= 0) && (offset <= VIRTIO_GPU_EDID_SIZE)) {
pr_dbg("%s: EDID region is read-only.\n", __func__);
} else if ((offset >= VIRTIO_GPU_VGA_IOPORT_OFFSET) &&
(offset < (VIRTIO_GPU_VGA_IOPORT_OFFSET +
VIRTIO_GPU_VGA_IOPORT_SIZE))) {
offset -= VIRTIO_GPU_VGA_IOPORT_OFFSET;
vga_ioport_write(ctx, vcpu, &gpu->vga, offset, size,
value);
} else if ((offset >= VIRTIO_GPU_VGA_VBE_OFFSET) &&
(offset < (VIRTIO_GPU_VGA_VBE_OFFSET +
VIRTIO_GPU_VGA_VBE_SIZE))) {
offset -= VIRTIO_GPU_VGA_VBE_OFFSET;
vga_vbe_write(ctx, vcpu, &gpu->vga, offset, size, value);
if (offset == VBE_DISPI_INDEX_ENABLE) {
pthread_create(&gpu->vga.tid, NULL, virtio_gpu_vga_render, (void*)gpu);
}
} else if ((offset >= VIRTIO_GPU_CAP_COMMON_OFFSET) &&
(offset < (VIRTIO_GPU_CAP_COMMON_OFFSET +
VIRTIO_GPU_CAP_COMMON_SIZE))) {
offset -= VIRTIO_GPU_CAP_COMMON_OFFSET;
virtio_common_cfg_write(dev, offset, size, value);
} else if ((offset >= VIRTIO_CAP_DEVICE_OFFSET) &&
(offset < (VIRTIO_CAP_DEVICE_OFFSET +
VIRTIO_CAP_DEVICE_SIZE))) {
offset -= VIRTIO_CAP_DEVICE_OFFSET;
virtio_device_cfg_write(dev, offset, size, value);
} else if ((offset >= VIRTIO_CAP_NOTIFY_OFFSET) &&
(offset < (VIRTIO_CAP_NOTIFY_OFFSET +
VIRTIO_CAP_NOTIFY_SIZE))) {
offset -= VIRTIO_CAP_NOTIFY_OFFSET;
virtio_notify_cfg_write(dev, offset, size, value);
} else {
virtio_pci_write(ctx, vcpu, dev, baridx, offset, size,
value);
}
} else {
virtio_pci_write(ctx, vcpu, dev, baridx, offset, size, value);
}
}
static uint64_t
virtio_gpu_read(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int baridx, uint64_t offset, int size)
{
return virtio_pci_read(ctx, vcpu, dev, baridx, offset, size);
struct virtio_gpu *gpu;
gpu = (struct virtio_gpu *)dev->arg;
if (baridx == 0) {
pr_err("%s: vgafb offset=%d size=%d.\n", __func__, offset, size);
return 0;
} else if (baridx == 2) {
if ((offset >= 0) && (offset <= VIRTIO_GPU_EDID_SIZE)) {
return virtio_gpu_edid_read(ctx, vcpu, dev, offset, size);
} else if ((offset >= VIRTIO_GPU_VGA_IOPORT_OFFSET) &&
(offset < (VIRTIO_GPU_VGA_IOPORT_OFFSET +
VIRTIO_GPU_VGA_IOPORT_SIZE))) {
offset -= VIRTIO_GPU_VGA_IOPORT_OFFSET;
return vga_ioport_read(ctx, vcpu, &gpu->vga, offset, size);
} else if ((offset >= VIRTIO_GPU_VGA_VBE_OFFSET) &&
(offset < (VIRTIO_GPU_VGA_VBE_OFFSET +
VIRTIO_GPU_VGA_VBE_SIZE))) {
offset -= VIRTIO_GPU_VGA_VBE_OFFSET;
return vga_vbe_read(ctx, vcpu, &gpu->vga, offset, size);
} else if ((offset >= VIRTIO_GPU_CAP_COMMON_OFFSET) &&
(offset < (VIRTIO_GPU_CAP_COMMON_OFFSET +
VIRTIO_GPU_CAP_COMMON_SIZE))) {
offset -= VIRTIO_GPU_CAP_COMMON_OFFSET;
return virtio_common_cfg_read(dev, offset, size);
} else if ((offset >= VIRTIO_GPU_CAP_ISR_OFFSET) &&
(offset < (VIRTIO_GPU_CAP_ISR_OFFSET +
VIRTIO_GPU_CAP_ISR_SIZE))) {
offset -= VIRTIO_GPU_CAP_ISR_OFFSET;
return virtio_isr_cfg_read(dev, offset, size);
} else if ((offset >= VIRTIO_CAP_DEVICE_OFFSET) &&
(offset < (VIRTIO_CAP_DEVICE_OFFSET +
VIRTIO_CAP_DEVICE_SIZE))) {
offset -= VIRTIO_CAP_DEVICE_OFFSET;
return virtio_device_cfg_read(dev, offset, size);
} else {
return virtio_pci_read(ctx, vcpu, dev, baridx, offset,
size);
}
} else {
return virtio_pci_read(ctx, vcpu, dev, baridx, offset, size);
}
}

1428
devicemodel/hw/vga.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -31,8 +31,6 @@
#include "types.h"
struct gfx_ctx;
struct gfx_ctx_image {
int vgamode;
int width;
@ -40,6 +38,15 @@ struct gfx_ctx_image {
uint32_t *data;
};
struct gfx_ctx {
struct gfx_ctx_image *gc_image;
int raw;
};
struct gfx_ctx_image *gc_get_image(struct gfx_ctx *gc);
struct gfx_ctx *gc_init(int width, int height, void *fbaddr);
void gc_deinit(struct gfx_ctx *gc);
void gc_set_fbaddr(struct gfx_ctx *gc, void *fbaddr);
void gc_resize(struct gfx_ctx *gc, int width, int height);
#endif /* _GC_H_ */

199
devicemodel/include/vga.h Normal file
View File

@ -0,0 +1,199 @@
/*
* Copyright (C) 2022 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*/
#ifndef _VGA_H_
#define _VGA_H_
#include "gc.h"
#include "vdisplay.h"
#define VGA_IOPORT_START 0x3c0
#define VGA_IOPORT_END 0x3df
/* General registers */
#define GEN_INPUT_STS0_PORT 0x3c2
#define GEN_FEATURE_CTRL_PORT 0x3ca
#define GEN_MISC_OUTPUT_PORT 0x3cc
#define GEN_INPUT_STS1_MONO_PORT 0x3ba
#define GEN_INPUT_STS1_COLOR_PORT 0x3da
#define GEN_IS1_VR 0x08 /* Vertical retrace */
#define GEN_IS1_DE 0x01 /* Display enable not */
/* Attribute controller registers. */
#define ATC_IDX_PORT 0x3c0
#define ATC_DATA_PORT 0x3c1
#define ATC_IDX_MASK 0x1f
#define ATC_PALETTE0 0
#define ATC_PALETTE15 15
#define ATC_MODE_CONTROL 16
#define ATC_MC_IPS 0x80 /* Internal palette size */
#define ATC_MC_GA 0x01 /* Graphics/alphanumeric */
#define ATC_OVERSCAN_COLOR 17
#define ATC_COLOR_PLANE_ENABLE 18
#define ATC_HORIZ_PIXEL_PANNING 19
#define ATC_COLOR_SELECT 20
#define ATC_CS_C67 0x0c /* Color select bits 6+7 */
#define ATC_CS_C45 0x03 /* Color select bits 4+5 */
/* Sequencer registers. */
#define SEQ_IDX_PORT 0x3c4
#define SEQ_DATA_PORT 0x3c5
#define SEQ_RESET 0
#define SEQ_RESET_ASYNC 0x1
#define SEQ_RESET_SYNC 0x2
#define SEQ_CLOCKING_MODE 1
#define SEQ_CM_SO 0x20 /* Screen off */
#define SEQ_CM_89 0x01 /* 8/9 dot clock */
#define SEQ_MAP_MASK 2
#define SEQ_CHAR_MAP_SELECT 3
#define SEQ_CMS_SAH 0x20 /* Char map A bit 2 */
#define SEQ_CMS_SAH_SHIFT 5
#define SEQ_CMS_SA 0x0c /* Char map A bits 0+1 */
#define SEQ_CMS_SA_SHIFT 2
#define SEQ_CMS_SBH 0x10 /* Char map B bit 2 */
#define SEQ_CMS_SBH_SHIFT 4
#define SEQ_CMS_SB 0x03 /* Char map B bits 0+1 */
#define SEQ_CMS_SB_SHIFT 0
#define SEQ_MEMORY_MODE 4
#define SEQ_MM_C4 0x08 /* Chain 4 */
#define SEQ_MM_OE 0x04 /* Odd/even */
#define SEQ_MM_EM 0x02 /* Extended memory */
/* Graphics controller registers. */
#define GC_IDX_PORT 0x3ce
#define GC_DATA_PORT 0x3cf
#define GC_SET_RESET 0
#define GC_ENABLE_SET_RESET 1
#define GC_COLOR_COMPARE 2
#define GC_DATA_ROTATE 3
#define GC_READ_MAP_SELECT 4
#define GC_MODE 5
#define GC_MODE_OE 0x10 /* Odd/even */
#define GC_MODE_C4 0x04 /* Chain 4 */
#define GC_MISCELLANEOUS 6
#define GC_MISC_GM 0x01 /* Graphics/alphanumeric */
#define GC_MISC_MM 0x0c /* memory map */
#define GC_MISC_MM_SHIFT 2
#define GC_COLOR_DONT_CARE 7
#define GC_BIT_MASK 8
/* CRT controller registers. */
#define CRTC_IDX_MONO_PORT 0x3b4
#define CRTC_DATA_MONO_PORT 0x3b5
#define CRTC_IDX_COLOR_PORT 0x3d4
#define CRTC_DATA_COLOR_PORT 0x3d5
#define CRTC_HORIZ_TOTAL 0
#define CRTC_HORIZ_DISP_END 1
#define CRTC_START_HORIZ_BLANK 2
#define CRTC_END_HORIZ_BLANK 3
#define CRTC_START_HORIZ_RETRACE 4
#define CRTC_END_HORIZ_RETRACE 5
#define CRTC_VERT_TOTAL 6
#define CRTC_OVERFLOW 7
#define CRTC_OF_VRS9 0x80 /* VRS bit 9 */
#define CRTC_OF_VRS9_SHIFT 7
#define CRTC_OF_VDE9 0x40 /* VDE bit 9 */
#define CRTC_OF_VDE9_SHIFT 6
#define CRTC_OF_VRS8 0x04 /* VRS bit 8 */
#define CRTC_OF_VRS8_SHIFT 2
#define CRTC_OF_VDE8 0x02 /* VDE bit 8 */
#define CRTC_OF_VDE8_SHIFT 1
#define CRTC_PRESET_ROW_SCAN 8
#define CRTC_MAX_SCAN_LINE 9
#define CRTC_MSL_MSL 0x1f
#define CRTC_CURSOR_START 10
#define CRTC_CS_CO 0x20 /* Cursor off */
#define CRTC_CS_CS 0x1f /* Cursor start */
#define CRTC_CURSOR_END 11
#define CRTC_CE_CE 0x1f /* Cursor end */
#define CRTC_START_ADDR_HIGH 12
#define CRTC_START_ADDR_LOW 13
#define CRTC_CURSOR_LOC_HIGH 14
#define CRTC_CURSOR_LOC_LOW 15
#define CRTC_VERT_RETRACE_START 16
#define CRTC_VERT_RETRACE_END 17
#define CRTC_VRE_MASK 0xf
#define CRTC_VERT_DISP_END 18
#define CRTC_OFFSET 19
#define CRTC_UNDERLINE_LOC 20
#define CRTC_START_VERT_BLANK 21
#define CRTC_END_VERT_BLANK 22
#define CRTC_MODE_CONTROL 23
#define CRTC_MC_TE 0x80 /* Timing enable */
#define CRTC_LINE_COMPARE 24
/* DAC registers */
#define DAC_MASK 0x3c6
#define DAC_IDX_RD_PORT 0x3c7
#define DAC_IDX_WR_PORT 0x3c8
#define DAC_DATA_PORT 0x3c9
#define VBE_DISPI_INDEX_ID 0x0
#define VBE_DISPI_INDEX_XRES 0x1
#define VBE_DISPI_INDEX_YRES 0x2
#define VBE_DISPI_INDEX_BPP 0x3
#define VBE_DISPI_INDEX_ENABLE 0x4
#define VBE_DISPI_INDEX_BANK 0x5
#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
#define VBE_DISPI_INDEX_X_OFFSET 0x8
#define VBE_DISPI_INDEX_Y_OFFSET 0x9
#define VBE_DISPI_INDEX_VIDEO_MEM_64K 0xa
#define VBE_DISPI_DISABLED 0x00
#define VBE_DISPI_ENABLED 0x01
#define VBE_DISPI_GETCAPS 0x02
#define VBE_DISPI_8BIT_DAC 0x20
#define VBE_DISPI_LFB_ENABLED 0x40
#define VBE_DISPI_NOCLEARMEM 0x80
#define VBE_DISPI_ID0 0xB0C0
#define VBE_DISPI_ID1 0xB0C1
#define VBE_DISPI_ID2 0xB0C2
#define VBE_DISPI_ID3 0xB0C3
#define VBE_DISPI_ID4 0xB0C4
#define VBE_DISPI_ID5 0xB0C5
struct vga {
bool enable;
void *dev;
struct gfx_ctx *gc;
struct surface surf;
pthread_t tid;
struct {
uint16_t id;
uint16_t xres;
uint16_t yres;
uint16_t bpp;
uint16_t enable;
uint16_t bank;
uint16_t virt_width;
uint16_t virt_height;
uint16_t x_offset;
uint16_t y_offset;
uint16_t video_memory_64k;
} __attribute__((packed)) vberegs;
};
void *vga_init(struct gfx_ctx *gc, int io_only);
void vga_render(struct gfx_ctx *gc, void *arg);
int vga_port_in_handler(struct vmctx *ctx, int in, int port, int bytes,
uint8_t *val, void *arg);
int vga_port_out_handler(struct vmctx *ctx, int in, int port, int bytes,
uint8_t val, void *arg);
void vga_ioport_write(struct vmctx *ctx, int vcpu, struct vga *vga,
uint64_t offset, int size, uint64_t value);
uint64_t vga_ioport_read(struct vmctx *ctx, int vcpu, struct vga *vga,
uint64_t offset, int size);
void vga_vbe_write(struct vmctx *ctx, int vcpu, struct vga *vga,
uint64_t offset, int size, uint64_t value);
uint64_t vga_vbe_read(struct vmctx *ctx, int vcpu, struct vga *vga,
uint64_t offset, int size);
#endif /* _VGA_H_ */

View File

@ -745,5 +745,22 @@ int virtio_set_modern_bar(struct virtio_base *base, bool use_notify_pio);
/**
* @}
*/
/* FIXME: Fix the assumption about the zero offset in virtio_pci_cap.
* Should not export the internal virtio APIs.
*/
void virtio_common_cfg_write(struct pci_vdev *dev,
uint64_t offset, int size, uint64_t value);
void virtio_device_cfg_write(struct pci_vdev *dev,
uint64_t offset, int size, uint64_t value);
void virtio_notify_cfg_write(struct pci_vdev *dev,
uint64_t offset, int size, uint64_t value);
uint32_t virtio_common_cfg_read(
struct pci_vdev *dev, uint64_t offset, int size);
uint32_t virtio_isr_cfg_read(
struct pci_vdev *dev, uint64_t offset, int size);
uint32_t virtio_device_cfg_read(
struct pci_vdev *dev, uint64_t offset, int size);
int virtio_set_modern_pio_bar(
struct virtio_base *base, int barnum);
#endif /* _VIRTIO_H_ */

View File

@ -53,6 +53,7 @@ struct vmctx {
uint32_t lowmem_limit;
uint64_t highmem_gpa_base;
size_t lowmem;
size_t fbmem;
size_t biosmem;
size_t highmem;
char *baseaddr;
@ -64,6 +65,7 @@ struct vmctx {
void *vpit;
void *ioc_dev;
void *tpm_dev;
void *fb_base;
/* BSP state. guest loader needs to fill it */
struct acrn_vcpu_regs bsp_regs;