mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-07 03:04:49 +00:00
dm: clean up mem.c
- use strncmp() instead of comparing string pointers to make no assumptions about the toolchain's literal pool - re-shuffle the functions so they're consistent with mem.h - make non-public functions static - increase code re-use Tracked-On: #2792 Signed-off-by: Peter Fang <peter.fang@intel.com> Reviewed-by: Eddie Dong <eddie.dong@intel.com> Acked-by: Yin Fengwei <fengwei.yin@intel.com>
This commit is contained in:
parent
890d40226b
commit
82e42cfa2a
@ -37,12 +37,15 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <pthread.h>
|
||||
|
||||
#include "vmm.h"
|
||||
#include "mem.h"
|
||||
#include "tree.h"
|
||||
|
||||
#define MEMNAMESZ (80)
|
||||
|
||||
struct mmio_rb_range {
|
||||
RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */
|
||||
struct mem_range mr_param;
|
||||
@ -51,10 +54,8 @@ struct mmio_rb_range {
|
||||
bool enabled;
|
||||
};
|
||||
|
||||
struct mmio_rb_tree;
|
||||
RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
|
||||
|
||||
RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
|
||||
static RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
|
||||
RB_PROTOTYPE_STATIC(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
|
||||
|
||||
/*
|
||||
* Per-VM cache. Since most accesses from a vCPU will be to
|
||||
@ -128,7 +129,7 @@ mmio_rb_dump(struct mmio_rb_tree *rbt)
|
||||
}
|
||||
#endif
|
||||
|
||||
RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
|
||||
RB_GENERATE_STATIC(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
|
||||
|
||||
static int
|
||||
mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
|
||||
@ -196,33 +197,6 @@ emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
|
||||
{
|
||||
struct mmio_rb_range *entry, *mrp;
|
||||
int err;
|
||||
|
||||
err = 0;
|
||||
|
||||
mrp = malloc(sizeof(struct mmio_rb_range));
|
||||
|
||||
if (mrp != NULL) {
|
||||
mrp->mr_param = *memp;
|
||||
mrp->mr_base = memp->base;
|
||||
mrp->mr_end = memp->base + memp->size - 1;
|
||||
mrp->enabled = true;
|
||||
pthread_rwlock_wrlock(&mmio_rwlock);
|
||||
if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
|
||||
err = mmio_rb_add(rbt, mrp);
|
||||
pthread_rwlock_unlock(&mmio_rwlock);
|
||||
if (err)
|
||||
free(mrp);
|
||||
} else
|
||||
err = -1;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int
|
||||
disable_mem(struct mem_range *memp)
|
||||
{
|
||||
@ -285,6 +259,33 @@ enable_mem(struct mem_range *memp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
|
||||
{
|
||||
struct mmio_rb_range *entry, *mrp;
|
||||
int err;
|
||||
|
||||
err = 0;
|
||||
|
||||
mrp = malloc(sizeof(struct mmio_rb_range));
|
||||
|
||||
if (mrp != NULL) {
|
||||
mrp->mr_param = *memp;
|
||||
mrp->mr_base = memp->base;
|
||||
mrp->mr_end = memp->base + memp->size - 1;
|
||||
mrp->enabled = true;
|
||||
pthread_rwlock_wrlock(&mmio_rwlock);
|
||||
if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
|
||||
err = mmio_rb_add(rbt, mrp);
|
||||
pthread_rwlock_unlock(&mmio_rwlock);
|
||||
if (err)
|
||||
free(mrp);
|
||||
} else
|
||||
err = -1;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int
|
||||
register_mem(struct mem_range *memp)
|
||||
{
|
||||
@ -297,25 +298,28 @@ register_mem_fallback(struct mem_range *memp)
|
||||
return register_mem_int(&mmio_rb_fallback, memp);
|
||||
}
|
||||
|
||||
int
|
||||
unregister_mem_fallback(struct mem_range *memp)
|
||||
static int
|
||||
unregister_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
|
||||
{
|
||||
struct mem_range *mr;
|
||||
struct mmio_rb_range *entry = NULL;
|
||||
int err;
|
||||
|
||||
pthread_rwlock_wrlock(&mmio_rwlock);
|
||||
err = mmio_rb_lookup(&mmio_rb_fallback, memp->base, &entry);
|
||||
err = mmio_rb_lookup(rbt, memp->base, &entry);
|
||||
if (err == 0) {
|
||||
mr = &entry->mr_param;
|
||||
assert(mr->name == memp->name);
|
||||
assert(mr->base == memp->base && mr->size == memp->size);
|
||||
assert((mr->flags & MEM_F_IMMUTABLE) == 0);
|
||||
RB_REMOVE(mmio_rb_tree, &mmio_rb_fallback, entry);
|
||||
if (strncmp(mr->name, memp->name, MEMNAMESZ)) {
|
||||
err = -1;
|
||||
} else {
|
||||
assert(mr->base == memp->base && mr->size == memp->size);
|
||||
assert((mr->flags & MEM_F_IMMUTABLE) == 0);
|
||||
RB_REMOVE(mmio_rb_tree, rbt, entry);
|
||||
|
||||
/* flush Per-VM cache */
|
||||
if (mmio_hint == entry)
|
||||
mmio_hint = NULL;
|
||||
/* flush Per-VM cache */
|
||||
if (mmio_hint == entry)
|
||||
mmio_hint = NULL;
|
||||
}
|
||||
}
|
||||
pthread_rwlock_unlock(&mmio_rwlock);
|
||||
|
||||
@ -328,29 +332,13 @@ unregister_mem_fallback(struct mem_range *memp)
|
||||
int
|
||||
unregister_mem(struct mem_range *memp)
|
||||
{
|
||||
struct mem_range *mr;
|
||||
struct mmio_rb_range *entry = NULL;
|
||||
int err;
|
||||
return unregister_mem_int(&mmio_rb_root, memp);
|
||||
}
|
||||
|
||||
pthread_rwlock_wrlock(&mmio_rwlock);
|
||||
err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry);
|
||||
if (err == 0) {
|
||||
mr = &entry->mr_param;
|
||||
assert(mr->name == memp->name);
|
||||
assert(mr->base == memp->base && mr->size == memp->size);
|
||||
assert((mr->flags & MEM_F_IMMUTABLE) == 0);
|
||||
RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry);
|
||||
|
||||
/* flush Per-VM cache */
|
||||
if (mmio_hint == entry)
|
||||
mmio_hint = NULL;
|
||||
}
|
||||
pthread_rwlock_unlock(&mmio_rwlock);
|
||||
|
||||
if (entry)
|
||||
free(entry);
|
||||
|
||||
return err;
|
||||
int
|
||||
unregister_mem_fallback(struct mem_range *memp)
|
||||
{
|
||||
return unregister_mem_int(&mmio_rb_fallback, memp);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -49,13 +49,13 @@ struct mem_range {
|
||||
#define MEM_F_RW 0x3
|
||||
#define MEM_F_IMMUTABLE 0x4 /* mem_range cannot be unregistered */
|
||||
|
||||
void init_mem(void);
|
||||
int emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req);
|
||||
int disable_mem(struct mem_range *memp);
|
||||
int enable_mem(struct mem_range *memp);
|
||||
int register_mem(struct mem_range *memp);
|
||||
int register_mem_fallback(struct mem_range *memp);
|
||||
int unregister_mem(struct mem_range *memp);
|
||||
int unregister_mem_fallback(struct mem_range *memp);
|
||||
int disable_mem(struct mem_range *memp);
|
||||
int enable_mem(struct mem_range *memp);
|
||||
void init_mem(void);
|
||||
|
||||
#endif /* _MEM_H_ */
|
||||
|
@ -385,7 +385,7 @@ struct { \
|
||||
#define RB_PROTOTYPE(name, type, field, cmp) \
|
||||
RB_PROTOTYPE_INTERNAL(name, type, field, cmp,)
|
||||
#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \
|
||||
RB_PROTOTYPE_INTERNAL(name, type, field, cmp, __unused static)
|
||||
RB_PROTOTYPE_INTERNAL(name, type, field, cmp, static)
|
||||
#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \
|
||||
RB_PROTOTYPE_INSERT_COLOR(name, type, attr); \
|
||||
RB_PROTOTYPE_REMOVE_COLOR(name, type, attr); \
|
||||
@ -422,7 +422,7 @@ struct { \
|
||||
#define RB_GENERATE(name, type, field, cmp) \
|
||||
RB_GENERATE_INTERNAL(name, type, field, cmp,)
|
||||
#define RB_GENERATE_STATIC(name, type, field, cmp) \
|
||||
RB_GENERATE_INTERNAL(name, type, field, cmp, __unused static)
|
||||
RB_GENERATE_INTERNAL(name, type, field, cmp, __attribute__((unused)) static)
|
||||
#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \
|
||||
RB_GENERATE_INSERT_COLOR(name, type, field, attr) \
|
||||
RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \
|
||||
|
Loading…
Reference in New Issue
Block a user