HV: deny HV owned PCI bar access from SOS

This patch denies Service VM the access permission to device resources
owned by hypervisor.
HV may own these devices: (1) debug uart pci device for debug version
(2) type 1 pci device if have pre-launched VMs.
Current implementation exposes the mmio/pio resource of HV owned devices
to SOS, should remove them from SOS.

Tracked-On: #5615
Signed-off-by: Tao Yuhong <yuhong.tao@intel.com>
This commit is contained in:
Tao Yuhong 2021-01-29 17:16:10 -05:00 committed by wenlingz
parent 6e7ce4a73f
commit 50d8525618
6 changed files with 73 additions and 36 deletions

View File

@ -64,7 +64,21 @@ struct acrn_vm_pci_dev_config *init_one_dev_config(struct pci_pdev *pdev)
}
dev_config = &vm_config->pci_devs[vm_config->pci_dev_num];
dev_config->emu_type = PCI_DEV_TYPE_PTDEV;
if (is_hv_owned_pdev(pdev->bdf)) {
/* SOS need to emulate the type1 pdevs owned by HV */
dev_config->emu_type = PCI_DEV_TYPE_SOSEMUL;
if (is_bridge(pdev)) {
dev_config->vdev_ops = &vpci_bridge_ops;
} else if (is_host_bridge(pdev)) {
dev_config->vdev_ops = &vhostbridge_ops;
} else {
/* May have type0 device, E.g. debug pci uart */
break;
}
} else {
dev_config->emu_type = PCI_DEV_TYPE_PTDEV;
}
dev_config->vbdf.value = pdev->bdf.value;
dev_config->pbdf.value = pdev->bdf.value;
dev_config->pdev = pdev;

View File

@ -332,6 +332,17 @@ static void deny_pdevs(struct acrn_vm *sos, struct acrn_vm_pci_dev_config *pci_d
}
}
static void deny_hv_owned_devices(struct acrn_vm *sos)
{
uint32_t i;
const struct pci_pdev **hv_owned = get_hv_owned_pdevs();
for (i = 0U; i < get_hv_owned_pdev_num(); i++) {
deny_pci_bar_access(sos, hv_owned[i]);
}
}
/**
* @param[inout] vm pointer to a vm descriptor
*
@ -410,6 +421,8 @@ static void prepare_sos_vm_memmap(struct acrn_vm *vm)
}
}
deny_hv_owned_devices(vm);
/* unmap AP trampoline code for security
* This buffer is guaranteed to be page aligned.
*/

View File

@ -625,18 +625,7 @@ struct pci_vdev *vpci_init_vdev(struct acrn_vpci *vpci, struct acrn_vm_pci_dev_c
if (dev_config->vdev_ops != NULL) {
vdev->vdev_ops = dev_config->vdev_ops;
} else {
if (get_highest_severity_vm(false) == vpci2vm(vpci)) {
vdev->vdev_ops = &pci_pt_dev_ops;
} else {
if (is_bridge(vdev->pdev)) {
vdev->vdev_ops = &vpci_bridge_ops;
} else if (is_host_bridge(vdev->pdev)) {
vdev->vdev_ops = &vhostbridge_ops;
} else {
vdev->vdev_ops = &pci_pt_dev_ops;
}
}
vdev->vdev_ops = &pci_pt_dev_ops;
ASSERT(dev_config->emu_type == PCI_DEV_TYPE_PTDEV,
"Only PCI_DEV_TYPE_PTDEV could not configure vdev_ops");
ASSERT(dev_config->pdev != NULL, "PCI PTDev is not present on platform!");

View File

@ -90,7 +90,7 @@ static void create_vf(struct pci_vdev *pf_vdev, union pci_bdf vf_bdf, uint16_t v
* Per VT-d 8.3.3, the VFs are under the scope of the same
* remapping unit as the associated PF when SRIOV is enabled.
*/
vf_pdev = init_pdev(vf_bdf.value, pf_vdev->pdev->drhd_index);
vf_pdev = pci_init_pdev(vf_bdf, pf_vdev->pdev->drhd_index);
if (vf_pdev != NULL) {
struct acrn_vm_pci_dev_config *dev_cfg;

View File

@ -54,6 +54,20 @@ static uint32_t num_pci_pdev;
static struct pci_pdev pci_pdevs[CONFIG_MAX_PCI_DEV_NUM];
static struct hlist_head pdevs_hlist_heads[PDEV_HLIST_HASHSIZE];
/* For HV owned pdev */
static uint32_t num_hv_owned_pci_pdev;
static struct pci_pdev *hv_owned_pci_pdevs[CONFIG_MAX_PCI_DEV_NUM];
uint32_t get_hv_owned_pdev_num(void)
{
return num_hv_owned_pci_pdev;
}
const struct pci_pdev **get_hv_owned_pdevs(void)
{
return (const struct pci_pdev **)hv_owned_pci_pdevs;
}
static struct pci_mmcfg_region phys_pci_mmcfg = {
.address = DEFAULT_PCI_MMCFG_BASE,
.start_bus = DEFAULT_PCI_MMCFG_START_BUS,
@ -352,25 +366,19 @@ bool is_plat_hidden_pdev(union pci_bdf bdf)
return hidden;
}
static bool is_hv_owned_pdev(union pci_bdf pbdf)
bool is_hv_owned_pdev(union pci_bdf pbdf)
{
bool hidden = false;
/* if it is debug uart, hide it*/
if (is_pci_dbg_uart(pbdf)) {
pr_info("hide pci uart dev: (%x:%x:%x)", pbdf.bits.b, pbdf.bits.d, pbdf.bits.f);
hidden = true;
}
bool ret = false;
uint32_t i;
return hidden;
}
static struct pci_pdev *pci_init_pdev(union pci_bdf pbdf, uint32_t drhd_index)
{
struct pci_pdev *pdev = NULL;
if (!is_hv_owned_pdev(pbdf)) {
pdev = init_pdev(pbdf.value, drhd_index);
for (i = 0U; i < num_hv_owned_pci_pdev; i++) {
if (bdf_is_equal(pbdf, hv_owned_pci_pdevs[i]->bdf)) {
pr_info("hv owned dev: (%x:%x:%x)", pbdf.bits.b, pbdf.bits.d, pbdf.bits.f);
ret = true;
break;
}
}
return pdev;
return ret;
}
/*
@ -779,20 +787,19 @@ static void pci_enumerate_cap(struct pci_pdev *pdev)
*
* @return If there's a successfully initialized pdev return it, otherwise return NULL;
*/
struct pci_pdev *init_pdev(uint16_t pbdf, uint32_t drhd_index)
struct pci_pdev *pci_init_pdev(union pci_bdf bdf, uint32_t drhd_index)
{
uint8_t hdr_type, hdr_layout;
union pci_bdf bdf;
struct pci_pdev *pdev = NULL;
bool is_hv_owned = false;
if (num_pci_pdev < CONFIG_MAX_PCI_DEV_NUM) {
bdf.value = pbdf;
hdr_type = (uint8_t)pci_pdev_read_cfg(bdf, PCIR_HDRTYPE, 1U);
hdr_layout = (hdr_type & PCIM_HDRTYPE);
if ((hdr_layout == PCIM_HDRTYPE_NORMAL) || (hdr_layout == PCIM_HDRTYPE_BRIDGE)) {
pdev = &pci_pdevs[num_pci_pdev];
pdev->bdf.value = pbdf;
pdev->bdf = bdf;
pdev->hdr_type = hdr_type;
pdev->base_class = (uint8_t)pci_pdev_read_cfg(bdf, PCIR_CLASS, 1U);
pdev->sub_class = (uint8_t)pci_pdev_read_cfg(bdf, PCIR_SUBCLASS, 1U);
@ -803,7 +810,18 @@ struct pci_pdev *init_pdev(uint16_t pbdf, uint32_t drhd_index)
pci_enumerate_cap(pdev);
}
hlist_add_head(&pdev->link, &pdevs_hlist_heads[hash64(pbdf, PDEV_HLIST_HASHBITS)]);
#if (PRE_VM_NUM != 0U)
/* HV owned pdev: 1.typ1 pdev if pre-launched VM exist; 2.pci debug uart */
is_hv_owned = (hdr_layout == PCIM_HDRTYPE_BRIDGE) || is_pci_dbg_uart(bdf);
#else
/* HV owned pdev: 1.pci debug uart */
is_hv_owned = is_pci_dbg_uart(bdf);
#endif
if (is_hv_owned) {
hv_owned_pci_pdevs[num_hv_owned_pci_pdev] = pdev;
num_hv_owned_pci_pdev++;
}
hlist_add_head(&pdev->link, &pdevs_hlist_heads[hash64(bdf.value, PDEV_HLIST_HASHBITS)]);
pdev->drhd_index = drhd_index;
num_pci_pdev++;
reserve_vmsix_on_msi_irtes(pdev);

View File

@ -338,11 +338,14 @@ void set_mmcfg_region(struct pci_mmcfg_region *region);
#endif
struct pci_mmcfg_region *get_mmcfg_region(void);
struct pci_pdev *init_pdev(uint16_t pbdf, uint32_t drhd_index);
struct pci_pdev *pci_init_pdev(union pci_bdf pbdf, uint32_t drhd_index);
uint32_t pci_pdev_read_cfg(union pci_bdf bdf, uint32_t offset, uint32_t bytes);
void pci_pdev_write_cfg(union pci_bdf bdf, uint32_t offset, uint32_t bytes, uint32_t val);
void enable_disable_pci_intx(union pci_bdf bdf, bool enable);
bool is_hv_owned_pdev(union pci_bdf pbdf);
uint32_t get_hv_owned_pdev_num(void);
const struct pci_pdev **get_hv_owned_pdevs(void);
/*
* @brief Walks the PCI heirarchy and initializes array of pci_pdev structs
* Uses DRHD info from ACPI DMAR tables to cover the endpoints and