HV:treewide:C99-friendly per_cpu implementation change the per_cpu method

The current implementation of per_cpu relies on several non-c99 features,
and in additional involves arbitrary pointer arithmetic which is not MIS-
RA C friendly.

This patch introduces struct per_cpu_region which holds all the per_cpu
variables. Allocation of per_cpu data regions and access to per_cpu vari-
ables are greatly simplified, at the cost of making all per_cpu varaibl-
es accessible in files.

Signed-off-by: Huihuang Shi <huihuang.shi@intel.com>
This commit is contained in:
Huihuang Shi 2018-06-05 15:25:07 +08:00 committed by lijinxia
parent cbb692d910
commit e591315a65
20 changed files with 95 additions and 130 deletions

View File

@ -18,7 +18,7 @@ spinlock_t up_count_spinlock = {
.tail = 0
};
void *per_cpu_data_base_ptr;
struct per_cpu_region *per_cpu_data_base_ptr;
int phy_cpu_num = 0;
unsigned long pcpu_sync = 0;
uint32_t up_count = 0;
@ -26,11 +26,6 @@ uint32_t up_count = 0;
/* physical cpu active bitmap, support up to 64 cpus */
uint64_t pcpu_active_bitmap = 0;
DEFINE_CPU_DATA(uint8_t[STACK_SIZE], stack) __aligned(16);
DEFINE_CPU_DATA(uint8_t, lapic_id);
DEFINE_CPU_DATA(void *, vcpu);
DEFINE_CPU_DATA(int, state);
/* TODO: add more capability per requirement */
/*APICv features*/
#define VAPIC_FEATURE_VIRT_ACCESS (1 << 0)
@ -228,7 +223,7 @@ static void alloc_phy_cpu_data(int pcpu_num)
{
phy_cpu_num = pcpu_num;
per_cpu_data_base_ptr = calloc(1, PER_CPU_DATA_SIZE * pcpu_num);
per_cpu_data_base_ptr = calloc(pcpu_num, sizeof(struct per_cpu_region));
ASSERT(per_cpu_data_base_ptr != NULL, "");
}
@ -294,14 +289,6 @@ static void cpu_set_current_state(uint32_t logical_id, int state)
}
#ifdef STACK_PROTECTOR
struct stack_canary {
/* Gcc generates extra code, using [fs:40] to access canary */
uint8_t reserved[40];
uint64_t canary;
};
static DEFINE_CPU_DATA(struct stack_canary, stack_canary);
static uint64_t get_random_value(void)
{
uint64_t random = 0;

View File

@ -6,12 +6,6 @@
#include <hypervisor.h>
DEFINE_CPU_DATA(struct tss_64, tss);
DEFINE_CPU_DATA(struct host_gdt, gdt);
DEFINE_CPU_DATA(uint8_t[STACK_SIZE], mc_stack) __aligned(16);
DEFINE_CPU_DATA(uint8_t[STACK_SIZE], df_stack) __aligned(16);
DEFINE_CPU_DATA(uint8_t[STACK_SIZE], sf_stack) __aligned(16);
static void set_tss_desc(union tss_64_descriptor *desc,
void *tss, int tss_limit, int type)
{

View File

@ -9,14 +9,6 @@
#include "instr_emul_wrapper.h"
#include "instr_emul.h"
struct emul_cnx {
struct vie vie;
struct vm_guest_paging paging;
struct vcpu *vcpu;
};
static DEFINE_CPU_DATA(struct emul_cnx, g_inst_ctxt);
static int
encode_vmcs_seg_desc(int seg, uint32_t *base, uint32_t *lim, uint32_t *acc);

View File

@ -133,6 +133,12 @@ struct vm_guest_paging {
enum vm_paging_mode paging_mode;
};
struct emul_cnx {
struct vie vie;
struct vm_guest_paging paging;
struct vcpu *vcpu;
};
/*
* Identifiers for architecturally defined registers.
*/

View File

@ -37,9 +37,6 @@ struct irq_desc {
static struct irq_desc *irq_desc_base;
static int vector_to_irq[NR_MAX_VECTOR + 1];
static DEFINE_CPU_DATA(uint64_t[NR_MAX_IRQS], irq_count);
static DEFINE_CPU_DATA(uint64_t, spurious);
spurious_handler_t spurious_handler;
static void init_irq_desc(void)

View File

@ -6,8 +6,6 @@
#include <hypervisor.h>
static DEFINE_CPU_DATA(uint64_t, softirq_pending);
void disable_softirq(int cpu_id)
{
bitmap_clear(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, cpu_id));

View File

@ -13,14 +13,6 @@
uint64_t tsc_hz = 1000000000;
struct per_cpu_timers {
struct list_head timer_list; /* it's for runtime active timer list */
};
static DEFINE_CPU_DATA(struct per_cpu_timers, cpu_timers);
static DEFINE_CPU_DATA(struct dev_handler_node *, timer_node);
static void run_timer(struct timer *timer)
{
/* deadline = 0 means stop timer, we should skip */

View File

@ -80,15 +80,6 @@ SECTIONS
_ld_bss_end = . ;
} > ram
.discard (NOLOAD):
{
. = ALIGN(4096) ;
_ld_cpu_data_start = .;
*(.cpu_data) ;
. = ALIGN(4096) ;
_ld_cpu_data_end = .;
} > ram
_ld_ram_size = LENGTH(ram) ;
_ld_ram_end = _ld_ram_size + _ld_ram_start ;
}

View File

@ -9,9 +9,6 @@
bool x2apic_enabled;
static DEFINE_CPU_DATA(uint64_t[64], vmexit_cnt);
static DEFINE_CPU_DATA(uint64_t[64], vmexit_time);
static void run_vcpu_pre_work(struct vcpu *vcpu)
{
unsigned long *pending_pre_work = &vcpu->pending_pre_work;

View File

@ -7,15 +7,6 @@
#include <hypervisor.h>
#include <schedule.h>
struct sched_context {
spinlock_t runqueue_lock;
struct list_head runqueue;
unsigned long need_scheduled;
struct vcpu *curr_vcpu;
spinlock_t scheduler_lock;
};
static DEFINE_CPU_DATA(struct sched_context, sched_ctx);
static unsigned long pcpu_used_bitmap;
void init_scheduler(void)

View File

@ -5,23 +5,13 @@
*/
#include <hypervisor.h>
#define LOG_ENTRY_SIZE 80
/* Size of buffer used to store a message being logged,
* should align to LOG_ENTRY_SIZE.
*/
#define LOG_MESSAGE_MAX_SIZE (4 * LOG_ENTRY_SIZE)
#include <per_cpu.h>
/* buf size should be identical to the size in hvlog option, which is
* transfered to SOS:
* bsp/uefi/clearlinux/acrn.conf: hvlog=2M@0x1FE00000
*/
#define HVLOG_BUF_SIZE (2*1024*1024)
DEFINE_CPU_DATA(char [LOG_MESSAGE_MAX_SIZE], logbuf);
DEFINE_CPU_DATA(struct shared_buf *, earlylog_sbuf);
struct logmsg {
uint32_t flags;
int seq;

View File

@ -11,8 +11,6 @@
#include <hypervisor.h>
DEFINE_CPU_DATA(uint64_t * [ACRN_SBUF_ID_MAX], sbuf);
static inline bool sbuf_is_empty(struct shared_buf *sbuf)
{
return (sbuf->head == sbuf->tail);

View File

@ -161,68 +161,33 @@ extern uint8_t _ld_cpu_secondary_reset_start[];
extern const uint64_t _ld_cpu_secondary_reset_size;
extern uint8_t _ld_bss_start[];
extern uint8_t _ld_bss_end[];
extern uint8_t _ld_cpu_data_start[];
extern uint8_t _ld_cpu_data_end[];
extern int ibrs_type;
/*
* To support per_cpu access, we use a special section ".cpu_data" to define
* To support per_cpu access, we use a special struct "per_cpu_region" to hold
* the pattern of per CPU data. And we allocate memory for per CPU data
* according to multiple this section size and pcpu number.
* according to multiple this struct size and pcpu number.
*
* +------------------+------------------+---+------------------+
* | percpu for pcpu0 | percpu for pcpu1 |...| percpu for pcpuX |
* +------------------+------------------+---+------------------+
* ^ ^
* | |
* --.cpu_data size--
* +-------------------+------------------+---+------------------+
* | percpu for pcpu0 | percpu for pcpu1 |...| percpu for pcpuX |
* +-------------------+------------------+---+------------------+
* ^ ^
* | |
* <per_cpu_region size>
*
* To access per cpu data, we use:
* per_cpu_data_base_ptr + curr_pcpu_id * cpu_data_section_size +
* offset_of_symbol_in_cpu_data_section
* per_cpu_base_ptr + sizeof(struct per_cpu_region) * curr_pcpu_id
* + offset_of_member_per_cpu_region
* to locate the per cpu data.
*/
/* declare per cpu data */
#define EXTERN_CPU_DATA(type, name) \
extern __typeof__(type) cpu_data_##name
EXTERN_CPU_DATA(uint8_t, lapic_id);
EXTERN_CPU_DATA(void *, vcpu);
EXTERN_CPU_DATA(uint8_t[STACK_SIZE], stack) __aligned(16);
/* define per cpu data */
#define DEFINE_CPU_DATA(type, name) \
__typeof__(type) cpu_data_##name \
__attribute__((__section__(".cpu_data")))
extern void *per_cpu_data_base_ptr;
extern int phy_cpu_num;
extern uint64_t pcpu_active_bitmap;
#define PER_CPU_DATA_OFFSET(sym_addr) \
((uint64_t)(sym_addr) - (uint64_t)(_ld_cpu_data_start))
#define PER_CPU_DATA_SIZE \
((uint64_t)_ld_cpu_data_end - (uint64_t)(_ld_cpu_data_start))
/*
* get percpu data for pcpu_id.
*
* It returns:
* per_cpu_data_##name[pcpu_id];
*/
#define per_cpu(name, pcpu_id) \
(*({ uint64_t base = (uint64_t)per_cpu_data_base_ptr; \
uint64_t off = PER_CPU_DATA_OFFSET(&cpu_data_##name); \
((typeof(&cpu_data_##name))(base + \
(pcpu_id) * PER_CPU_DATA_SIZE + off)); \
}))
/* get percpu data for current pcpu */
#define get_cpu_var(name) per_cpu(name, get_cpu_id())
/* CPUID feature words */
enum feature_word {
FEAT_1_ECX = 0, /* CPUID[1].ECX */
@ -254,6 +219,13 @@ struct cpuinfo_x86 {
char model_name[64];
struct cpu_state_info state_info;
};
#ifdef STACK_PROTECTOR
struct stack_canary {
/* Gcc generates extra code, using [fs:40] to access canary */
uint8_t reserved[40];
uint64_t canary;
};
#endif
extern struct cpuinfo_x86 boot_cpu_data;

View File

@ -279,12 +279,6 @@ extern struct host_gdt HOST_GDT;
extern struct host_gdt_descriptor HOST_GDTR;
void load_gdtr_and_tr(void);
EXTERN_CPU_DATA(struct tss_64, tss);
EXTERN_CPU_DATA(struct host_gdt, gdt);
EXTERN_CPU_DATA(uint8_t[STACK_SIZE], mc_stack) __aligned(16);
EXTERN_CPU_DATA(uint8_t[STACK_SIZE], df_stack) __aligned(16);
EXTERN_CPU_DATA(uint8_t[STACK_SIZE], sf_stack) __aligned(16);
#endif /* end #ifndef ASSEMBLER */
#endif /* GDT_H */

View File

@ -0,0 +1,52 @@
#ifndef PER_CPU_H
#define PER_CPU_H
#include <hypervisor.h>
#include <bsp_extern.h>
#include <schedule.h>
#include <version.h>
#include <irq.h>
#include <sbuf.h>
#include <gdt.h>
#include <timer.h>
#include <logmsg.h>
#include "arch/x86/guest/instr_emul_wrapper.h"
struct per_cpu_region {
uint64_t *sbuf[ACRN_SBUF_ID_MAX];
uint64_t irq_count[NR_MAX_IRQS];
uint64_t vmexit_cnt[64];
uint64_t vmexit_time[64];
uint64_t softirq_pending;
uint64_t spurious;
struct dev_handler_node *timer_node;
struct shared_buf *earlylog_sbuf;
void *vcpu;
#ifdef STACK_PROTECTOR
struct stack_canary stack_canary;
#endif
struct per_cpu_timers cpu_timers;
struct sched_context sched_ctx;
struct emul_cnx g_inst_ctxt;
struct host_gdt gdt;
struct tss_64 tss;
int state;
uint8_t mc_stack[STACK_SIZE] __aligned(16);
uint8_t df_stack[STACK_SIZE] __aligned(16);
uint8_t sf_stack[STACK_SIZE] __aligned(16);
uint8_t stack[STACK_SIZE] __aligned(16);
char logbuf[LOG_MESSAGE_MAX_SIZE];
uint8_t lapic_id;
} __aligned(CPU_PAGE_SIZE); //per_cpu_region size aligned with CPU_PAGE_SIZE
extern struct per_cpu_region *per_cpu_data_base_ptr;
extern int phy_cpu_num;
extern uint64_t pcpu_active_bitmap;
/*
* get percpu data for pcpu_id.
*/
#define per_cpu(name, pcpu_id) \
(per_cpu_data_base_ptr[pcpu_id].name)
/* get percpu data for current pcpu */
#define get_cpu_var(name) per_cpu(name, get_cpu_id())
#endif

View File

@ -14,6 +14,9 @@ enum tick_mode {
TICK_MODE_PERIODIC,
};
struct per_cpu_timers {
struct list_head timer_list; /* it's for runtime active timer list */
};
struct timer {
struct list_head node; /* link all timers */

View File

@ -9,6 +9,14 @@
#define NEED_RESCHEDULED (1)
struct sched_context {
spinlock_t runqueue_lock;
struct list_head runqueue;
unsigned long need_scheduled;
struct vcpu *curr_vcpu;
spinlock_t scheduler_lock;
};
void init_scheduler(void);
void get_schedule_lock(int pcpu_id);
void release_schedule_lock(int pcpu_id);

View File

@ -19,6 +19,11 @@
/* Logging flags */
#define LOG_FLAG_STDOUT 0x00000001
#define LOG_FLAG_MEMORY 0x00000002
#define LOG_ENTRY_SIZE 80
/* Size of buffer used to store a message being logged,
* should align to LOG_ENTRY_SIZE.
*/
#define LOG_MESSAGE_MAX_SIZE (4 * LOG_ENTRY_SIZE)
#if defined(HV_DEBUG)

View File

@ -57,8 +57,6 @@ struct shared_buf {
#ifdef HV_DEBUG
EXTERN_CPU_DATA(uint64_t * [ACRN_SBUF_ID_MAX], sbuf);
static inline void sbuf_clear_flags(struct shared_buf *sbuf, uint64_t flags)
{
sbuf->flags &= ~flags;

View File

@ -11,7 +11,7 @@
#ifndef TRACE_H
#define TRACE_H
#include <per_cpu.h>
/* TIMER EVENT */
#define TRACE_TIMER_ACTION_ADDED 0x1
#define TRACE_TIMER_ACTION_PCKUP 0x2