mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-10 12:32:45 +00:00
hv:Replace dynamic memory with static for sbuf
--Config LOG_BUF_SIZE 256KB for per cpu --Replace 'calloc' with static array for sbuf --Rename 'alloc_earlylog_sbuf' to 'init_earlylog_sbuf' --Remove deadcode sbuf_free v2-->v3: -- put the buffer into per_cpu data structure v1-->v2: -- add 'is_early_logbuf' in percpu data structure used for check if need to do 'do_copy_earlylog' Tracked-On: #861 Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com> Reviewed-by: Yan, Like <like.yan@intel.com> Reviewed-by: Jason Chen CJ <jason.cj.chen@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
9e39732259
commit
2975f9fa65
@ -99,6 +99,10 @@ config STACK_SIZE
|
||||
The size of stacks used by physical cores. Each core uses one stack
|
||||
for normal operations and another three for specific exceptions.
|
||||
|
||||
config LOG_BUF_SIZE
|
||||
hex "Capacity of logbuf for each physical cpu"
|
||||
default 0x40000
|
||||
|
||||
config LOG_DESTINATION
|
||||
int "Bitmap of consoles where logs are printed"
|
||||
range 0 7
|
||||
|
@ -10,7 +10,6 @@
|
||||
* transfered to SOS:
|
||||
* bsp/uefi/clearlinux/acrn.conf: hvlog=2M@0x1FE00000
|
||||
*/
|
||||
#define HVLOG_BUF_SIZE (2*1024*1024)
|
||||
|
||||
struct logmsg {
|
||||
uint32_t flags;
|
||||
@ -20,26 +19,16 @@ struct logmsg {
|
||||
|
||||
static struct logmsg logmsg;
|
||||
|
||||
static inline void alloc_earlylog_sbuf(uint16_t pcpu_id)
|
||||
static inline void init_earlylog_sbuf(uint16_t pcpu_id)
|
||||
{
|
||||
struct shared_buf *sbuf = (struct shared_buf *)per_cpu(early_logbuf, pcpu_id);
|
||||
uint32_t ele_size = LOG_ENTRY_SIZE;
|
||||
uint32_t ele_num = (((HVLOG_BUF_SIZE >> 1U) / phys_cpu_num)
|
||||
- SBUF_HEAD_SIZE) / ele_size;
|
||||
uint32_t ele_num = ((CONFIG_LOG_BUF_SIZE - SBUF_HEAD_SIZE) / ele_size);
|
||||
|
||||
per_cpu(earlylog_sbuf, pcpu_id) = sbuf_allocate(ele_num, ele_size);
|
||||
if (per_cpu(earlylog_sbuf, pcpu_id) == NULL) {
|
||||
printf("failed to allcate sbuf for hvlog - %hu\n", pcpu_id);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void free_earlylog_sbuf(uint16_t pcpu_id)
|
||||
{
|
||||
if (per_cpu(earlylog_sbuf, pcpu_id) == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
free(per_cpu(earlylog_sbuf, pcpu_id));
|
||||
per_cpu(earlylog_sbuf, pcpu_id) = NULL;
|
||||
sbuf->ele_num = ele_num;
|
||||
sbuf->ele_size = ele_size;
|
||||
sbuf->size = ele_num * ele_size;
|
||||
sbuf->magic = SBUF_MAGIC;
|
||||
}
|
||||
|
||||
static void do_copy_earlylog(struct shared_buf *dst_sbuf,
|
||||
@ -78,7 +67,8 @@ void init_logmsg(uint32_t flags)
|
||||
|
||||
/* allocate sbuf for log before sos booting */
|
||||
for (pcpu_id = 0U; (pcpu_id < phys_cpu_num) && (pcpu_id < CONFIG_MAX_PCPU_NUM); pcpu_id++) {
|
||||
alloc_earlylog_sbuf(pcpu_id);
|
||||
init_earlylog_sbuf(pcpu_id);
|
||||
per_cpu(is_early_logbuf, pcpu_id) = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -147,13 +137,13 @@ void do_logmsg(uint32_t severity, const char *fmt, ...)
|
||||
unsigned int i, msg_len;
|
||||
struct shared_buf *sbuf = (struct shared_buf *)
|
||||
per_cpu(sbuf, pcpu_id)[ACRN_HVLOG];
|
||||
struct shared_buf *early_sbuf = per_cpu(earlylog_sbuf, pcpu_id);
|
||||
struct shared_buf *early_sbuf = (struct shared_buf *)per_cpu(early_logbuf, pcpu_id);
|
||||
|
||||
if (early_sbuf != NULL) {
|
||||
if (per_cpu(is_early_logbuf, pcpu_id)) {
|
||||
if (sbuf != NULL) {
|
||||
/* switch to sbuf from sos */
|
||||
do_copy_earlylog(sbuf, early_sbuf);
|
||||
free_earlylog_sbuf(pcpu_id);
|
||||
per_cpu(is_early_logbuf, pcpu_id) = false;
|
||||
} else {
|
||||
/* use earlylog sbuf if no sbuf from sos */
|
||||
sbuf = early_sbuf;
|
||||
@ -176,7 +166,7 @@ void print_logmsg_buffer(uint16_t pcpu_id)
|
||||
{
|
||||
char buffer[LOG_ENTRY_SIZE + 1];
|
||||
uint32_t read_cnt;
|
||||
struct shared_buf **sbuf;
|
||||
struct shared_buf *sbuf;
|
||||
int is_earlylog = 0;
|
||||
uint64_t rflags;
|
||||
|
||||
@ -184,31 +174,22 @@ void print_logmsg_buffer(uint16_t pcpu_id)
|
||||
return;
|
||||
}
|
||||
|
||||
if (per_cpu(earlylog_sbuf, pcpu_id) != NULL) {
|
||||
sbuf = &per_cpu(earlylog_sbuf, pcpu_id);
|
||||
is_earlylog = 1;
|
||||
} else {
|
||||
sbuf = (struct shared_buf **)
|
||||
&per_cpu(sbuf, pcpu_id)[ACRN_HVLOG];
|
||||
}
|
||||
sbuf = (struct shared_buf *)per_cpu(early_logbuf, pcpu_id);
|
||||
is_earlylog = 1;
|
||||
|
||||
spinlock_irqsave_obtain(&(logmsg.lock), &rflags);
|
||||
if ((*sbuf) != NULL) {
|
||||
printf("CPU%hu: head: 0x%x, tail: 0x%x %s\n\r",
|
||||
pcpu_id, (*sbuf)->head, (*sbuf)->tail,
|
||||
(is_earlylog != 0) ? "[earlylog]" : "");
|
||||
}
|
||||
|
||||
printf("CPU%hu: head: 0x%x, tail: 0x%x %s\n\r",
|
||||
pcpu_id, (sbuf)->head, (sbuf)->tail,
|
||||
(is_earlylog != 0) ? "[earlylog]" : "");
|
||||
|
||||
spinlock_irqrestore_release(&(logmsg.lock), rflags);
|
||||
|
||||
do {
|
||||
uint32_t idx;
|
||||
(void)memset(buffer, 0U, LOG_ENTRY_SIZE + 1U);
|
||||
|
||||
if ((*sbuf == NULL) || (buffer == NULL)) {
|
||||
return;
|
||||
}
|
||||
|
||||
read_cnt = sbuf_get(*sbuf, (uint8_t *)buffer);
|
||||
read_cnt = sbuf_get(sbuf, (uint8_t *)buffer);
|
||||
|
||||
if (read_cnt == 0U) {
|
||||
return;
|
||||
|
@ -25,63 +25,6 @@ uint32_t sbuf_next_ptr(uint32_t pos_arg,
|
||||
return pos;
|
||||
}
|
||||
|
||||
static inline uint32_t sbuf_calculate_allocate_size(uint32_t ele_num,
|
||||
uint32_t ele_size)
|
||||
{
|
||||
uint64_t sbuf_allocate_size;
|
||||
|
||||
sbuf_allocate_size = ele_num * ele_size;
|
||||
sbuf_allocate_size += SBUF_HEAD_SIZE;
|
||||
if (sbuf_allocate_size > SBUF_MAX_SIZE) {
|
||||
pr_err("%s, num=0x%x, size=0x%x exceed 0x%x",
|
||||
__func__, ele_num, ele_size, SBUF_MAX_SIZE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (uint32_t) sbuf_allocate_size;
|
||||
}
|
||||
|
||||
struct shared_buf *sbuf_allocate(uint32_t ele_num, uint32_t ele_size)
|
||||
{
|
||||
struct shared_buf *sbuf;
|
||||
uint32_t sbuf_allocate_size;
|
||||
|
||||
if ((ele_num == 0U) || (ele_size == 0U)) {
|
||||
pr_err("%s invalid parameter!", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sbuf_allocate_size = sbuf_calculate_allocate_size(ele_num, ele_size);
|
||||
if (sbuf_allocate_size == 0U) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sbuf = calloc(1U, sbuf_allocate_size);
|
||||
if (sbuf == NULL) {
|
||||
pr_err("%s no memory!", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sbuf->ele_num = ele_num;
|
||||
sbuf->ele_size = ele_size;
|
||||
sbuf->size = ele_num * ele_size;
|
||||
sbuf->magic = SBUF_MAGIC;
|
||||
pr_info("%s ele_num=0x%x, ele_size=0x%x allocated",
|
||||
__func__, ele_num, ele_size);
|
||||
return sbuf;
|
||||
}
|
||||
|
||||
void sbuf_free(struct shared_buf *sbuf)
|
||||
{
|
||||
if ((sbuf == NULL) || (sbuf->magic != SBUF_MAGIC)) {
|
||||
pr_err("%s invalid parameter!", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
sbuf->magic = 0UL;
|
||||
free(sbuf);
|
||||
}
|
||||
|
||||
uint32_t sbuf_get(struct shared_buf *sbuf, uint8_t *data)
|
||||
{
|
||||
const void *from;
|
||||
|
@ -24,6 +24,9 @@ struct per_cpu_region {
|
||||
uint8_t vmxon_region[CPU_PAGE_SIZE];
|
||||
#ifdef HV_DEBUG
|
||||
uint64_t *sbuf[ACRN_SBUF_ID_MAX];
|
||||
char logbuf[LOG_MESSAGE_MAX_SIZE];
|
||||
bool is_early_logbuf;
|
||||
char early_logbuf[CONFIG_LOG_BUF_SIZE];
|
||||
uint64_t vmexit_cnt[64];
|
||||
uint64_t vmexit_time[64];
|
||||
uint32_t npk_log_ref;
|
||||
@ -31,7 +34,6 @@ struct per_cpu_region {
|
||||
uint64_t irq_count[NR_IRQS];
|
||||
uint64_t softirq_pending;
|
||||
uint64_t spurious;
|
||||
struct shared_buf *earlylog_sbuf;
|
||||
void *vcpu;
|
||||
void *ever_run_vcpu;
|
||||
#ifdef STACK_PROTECTOR
|
||||
@ -47,7 +49,6 @@ struct per_cpu_region {
|
||||
uint8_t df_stack[CONFIG_STACK_SIZE] __aligned(16);
|
||||
uint8_t sf_stack[CONFIG_STACK_SIZE] __aligned(16);
|
||||
uint8_t stack[CONFIG_STACK_SIZE] __aligned(16);
|
||||
char logbuf[LOG_MESSAGE_MAX_SIZE];
|
||||
uint32_t lapic_id;
|
||||
uint32_t lapic_ldr;
|
||||
struct smp_call_info_data smp_call_info;
|
||||
|
@ -74,8 +74,6 @@ static inline void sbuf_add_flags(struct shared_buf *sbuf, uint64_t flags)
|
||||
sbuf->flags |= flags;
|
||||
}
|
||||
|
||||
struct shared_buf *sbuf_allocate(uint32_t ele_num, uint32_t ele_size);
|
||||
void sbuf_free(struct shared_buf *sbuf);
|
||||
/**
|
||||
*@pre sbuf != NULL
|
||||
*@pre data != NULL
|
||||
@ -109,18 +107,6 @@ static inline void sbuf_add_flags(
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct shared_buf *sbuf_allocate(
|
||||
__unused uint32_t ele_num,
|
||||
__unused uint32_t ele_size)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void sbuf_free(
|
||||
__unused struct shared_buf *sbuf)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int sbuf_get(
|
||||
__unused struct shared_buf *sbuf,
|
||||
__unused uint8_t *data)
|
||||
|
Loading…
Reference in New Issue
Block a user