mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-15 13:59:18 +00:00
dm: completely remove enable_bar()/disable_bar() functions
Following up ond648df766c
, surgically remove all the functions related to enable_bar()/disable_bar() that got introduced in8787b65fde
. Tracked-On: #2902 Signed-off-by: Peter Fang <peter.fang@intel.com> Reviewed-by: Shuo A Liu <shuo.a.liu@intel.com>
This commit is contained in:
committed by
ACRN System Integration
parent
a718fbe860
commit
4c38ff00c6
@@ -33,7 +33,6 @@
|
||||
*/
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
@@ -51,7 +50,6 @@ struct mmio_rb_range {
|
||||
struct mem_range mr_param;
|
||||
uint64_t mr_base;
|
||||
uint64_t mr_end;
|
||||
bool enabled;
|
||||
};
|
||||
|
||||
static RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
|
||||
@@ -168,25 +166,18 @@ emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req)
|
||||
if (mmio_hint && paddr >= mmio_hint->mr_base &&
|
||||
paddr <= mmio_hint->mr_end)
|
||||
entry = mmio_hint;
|
||||
|
||||
if (entry == NULL) {
|
||||
if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0)
|
||||
/* Update the per-VMU cache */
|
||||
mmio_hint = entry;
|
||||
else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
|
||||
pthread_rwlock_unlock(&mmio_rwlock);
|
||||
return -ESRCH;
|
||||
}
|
||||
else if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0)
|
||||
/* Update the per-VM cache */
|
||||
mmio_hint = entry;
|
||||
else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
|
||||
pthread_rwlock_unlock(&mmio_rwlock);
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
pthread_rwlock_unlock(&mmio_rwlock);
|
||||
|
||||
assert(entry != NULL);
|
||||
|
||||
if (entry->enabled == false) {
|
||||
pthread_rwlock_unlock(&mmio_rwlock);
|
||||
return -1;
|
||||
}
|
||||
pthread_rwlock_unlock(&mmio_rwlock);
|
||||
|
||||
if (mmio_req->direction == REQUEST_READ)
|
||||
err = mem_read(ctx, 0, paddr, (uint64_t *)&mmio_req->value,
|
||||
size, &entry->mr_param);
|
||||
@@ -197,68 +188,6 @@ emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req)
|
||||
return err;
|
||||
}
|
||||
|
||||
int
|
||||
disable_mem(struct mem_range *memp)
|
||||
{
|
||||
uint64_t paddr = memp->base;
|
||||
struct mmio_rb_range *entry = NULL;
|
||||
|
||||
pthread_rwlock_rdlock(&mmio_rwlock);
|
||||
/*
|
||||
* First check the per-VM cache
|
||||
*/
|
||||
if (mmio_hint && paddr >= mmio_hint->mr_base &&
|
||||
paddr <= mmio_hint->mr_end)
|
||||
entry = mmio_hint;
|
||||
|
||||
if (entry == NULL) {
|
||||
if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0)
|
||||
/* Update the per-VMU cache */
|
||||
mmio_hint = entry;
|
||||
else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
|
||||
pthread_rwlock_unlock(&mmio_rwlock);
|
||||
return -ESRCH;
|
||||
}
|
||||
}
|
||||
|
||||
assert(entry != NULL);
|
||||
entry->enabled = false;
|
||||
pthread_rwlock_unlock(&mmio_rwlock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
enable_mem(struct mem_range *memp)
|
||||
{
|
||||
uint64_t paddr = memp->base;
|
||||
struct mmio_rb_range *entry = NULL;
|
||||
|
||||
pthread_rwlock_rdlock(&mmio_rwlock);
|
||||
/*
|
||||
* First check the per-VM cache
|
||||
*/
|
||||
if (mmio_hint && paddr >= mmio_hint->mr_base &&
|
||||
paddr <= mmio_hint->mr_end)
|
||||
entry = mmio_hint;
|
||||
|
||||
if (entry == NULL) {
|
||||
if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0)
|
||||
/* Update the per-VMU cache */
|
||||
mmio_hint = entry;
|
||||
else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
|
||||
pthread_rwlock_unlock(&mmio_rwlock);
|
||||
return -ESRCH;
|
||||
}
|
||||
}
|
||||
|
||||
assert(entry != NULL);
|
||||
entry->enabled = true;
|
||||
pthread_rwlock_unlock(&mmio_rwlock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
|
||||
{
|
||||
@@ -273,7 +202,6 @@ register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
|
||||
mrp->mr_param = *memp;
|
||||
mrp->mr_base = memp->base;
|
||||
mrp->mr_end = memp->base + memp->size - 1;
|
||||
mrp->enabled = true;
|
||||
pthread_rwlock_wrlock(&mmio_rwlock);
|
||||
if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
|
||||
err = mmio_rb_add(rbt, mrp);
|
||||
|
Reference in New Issue
Block a user