dm: fix the issue when guest tries to disable memory range access

According to PCI spec 3.0 section 6.2.2 "Device Control", guest
could write the command register to control device response to
io/mem access.

The origial code register/unregister the memory range which is
not suitable because it can't handle the sequence:
  1. disble the device response to specific memory range
  2. reboot guest (DM will try to free the memory range which
     was freed in step 1 already)

Tracked-On: #1277
Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
Acked-by: Yu Wang <yu1.wang@intel.com>
This commit is contained in:
Yin Fengwei
2018-09-18 11:30:39 +08:00
committed by wenlingz
parent be0cde7dec
commit 8787b65fde
5 changed files with 182 additions and 4 deletions

View File

@@ -33,6 +33,7 @@
*/
#include <errno.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
@@ -47,6 +48,7 @@ struct mmio_rb_range {
struct mem_range mr_param;
uint64_t mr_base;
uint64_t mr_end;
bool enabled;
};
struct mmio_rb_tree;
@@ -181,6 +183,10 @@ emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req)
assert(entry != NULL);
if (entry->enabled == false) {
return -1;
}
if (mmio_req->direction == REQUEST_READ)
err = mem_read(ctx, 0, paddr, (uint64_t *)&mmio_req->value,
size, &entry->mr_param);
@@ -207,6 +213,7 @@ register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
mrp->mr_param = *memp;
mrp->mr_base = memp->base;
mrp->mr_end = memp->base + memp->size - 1;
mrp->enabled = true;
pthread_rwlock_wrlock(&mmio_rwlock);
if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
err = mmio_rb_add(rbt, mrp);
@@ -219,6 +226,68 @@ register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
return err;
}
int
disable_mem(struct mem_range *memp)
{
uint64_t paddr = memp->base;
struct mmio_rb_range *entry = NULL;
pthread_rwlock_rdlock(&mmio_rwlock);
/*
* First check the per-VM cache
*/
if (mmio_hint && paddr >= mmio_hint->mr_base &&
paddr <= mmio_hint->mr_end)
entry = mmio_hint;
if (entry == NULL) {
if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0)
/* Update the per-VMU cache */
mmio_hint = entry;
else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
pthread_rwlock_unlock(&mmio_rwlock);
return -ESRCH;
}
}
assert(entry != NULL);
entry->enabled = false;
pthread_rwlock_unlock(&mmio_rwlock);
return 0;
}
int
enable_mem(struct mem_range *memp)
{
uint64_t paddr = memp->base;
struct mmio_rb_range *entry = NULL;
pthread_rwlock_rdlock(&mmio_rwlock);
/*
* First check the per-VM cache
*/
if (mmio_hint && paddr >= mmio_hint->mr_base &&
paddr <= mmio_hint->mr_end)
entry = mmio_hint;
if (entry == NULL) {
if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0)
/* Update the per-VMU cache */
mmio_hint = entry;
else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
pthread_rwlock_unlock(&mmio_rwlock);
return -ESRCH;
}
}
assert(entry != NULL);
entry->enabled = true;
pthread_rwlock_unlock(&mmio_rwlock);
return 0;
}
int
register_mem(struct mem_range *memp)
{