hv: don't combine the trampline code with AP start

Cleanup "cpu_secondary_xx" in the symbols/section/functions/variables
name in trampline code.

There is item left: the default C entry is Ap start c entry. Before
ACRN enter S3, the c entry will be updated to high level S3 C entry.
So s3 resume will go s3 resume path instead of AP startup path.

Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: Zheng Gen <gen.zheng@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
Acked-by: Eddie Dong <Eddie.dong@intel.com>
This commit is contained in:
Yin Fengwei 2018-06-07 12:20:56 +08:00 committed by lijinxia
parent 11df803da3
commit f3831cdc80
5 changed files with 50 additions and 49 deletions

View File

@ -8,7 +8,7 @@
#include <schedule.h>
#include <version.h>
spinlock_t cpu_secondary_spinlock = {
spinlock_t trampline_spinlock = {
.head = 0,
.tail = 0
};
@ -574,7 +574,7 @@ void cpu_secondary_init(void)
/* Release secondary boot spin-lock to allow one of the next CPU(s) to
* perform this common initialization
*/
spinlock_release(&cpu_secondary_spinlock);
spinlock_release(&trampline_spinlock);
/* Initialize secondary processor interrupts. */
interrupt_init(get_cpu_id());
@ -615,10 +615,10 @@ void start_cpus()
uint32_t expected_up;
/*Copy segment for AP initialization code below 1MB */
memcpy_s(_ld_cpu_secondary_reset_start,
(unsigned long)&_ld_cpu_secondary_reset_size,
_ld_cpu_secondary_reset_load,
(unsigned long)&_ld_cpu_secondary_reset_size);
memcpy_s(_ld_trampline_start,
(unsigned long)&_ld_trampline_size,
_ld_trampline_load,
(unsigned long)&_ld_trampline_size);
/* Set flag showing number of CPUs expected to be up to all
* cpus
@ -627,7 +627,7 @@ void start_cpus()
/* Broadcast IPIs to all other CPUs */
send_startup_ipi(INTR_CPU_STARTUP_ALL_EX_SELF,
-1U, ((uint64_t) cpu_secondary_reset));
-1U, ((uint64_t) trampline_start16));
/* Wait until global count is equal to expected CPU up count or
* configured time-out has expired

View File

@ -573,10 +573,10 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
* FIXME: here using hard code GUEST_INIT_PAGE_TABLE_START as guest init page
* table gpa start, and it will occupy at most GUEST_INIT_PT_PAGE_NUM pages.
* Some check here:
* - guest page table space should not override cpu_secondary_reset code area
* - guest page table space should not override trampline code area
* (it's a little tricky here, as under current identical mapping, HV & SOS
* share same memory under 1M; under uefi boot mode, the defered AP startup
* need cpu_secondary_reset code area which reserved by uefi stub keep there
* need trampline code area which reserved by uefi stub keep there
* no change even after SOS startup)
* - guest page table space should not override possible RSDP fix segment
*
@ -604,8 +604,8 @@ uint64_t create_guest_initial_paging(struct vm *vm)
RSDP_F_ADDR, "RSDP fix segment could be override");
if (GUEST_INIT_PAGE_TABLE_SKIP_SIZE <
(unsigned long)&_ld_cpu_secondary_reset_size) {
panic("guest init PTs override cpu_secondary_reset code");
(unsigned long)&_ld_trampline_size) {
panic("guest init PTs override trampline code");
}
/* Using continuous memory for guest page tables, the total 4K page

View File

@ -16,12 +16,12 @@
.extern _ld_bss_end
.extern HOST_GDTR
.section .cpu_secondary_reset,"ax"
.section .trampline_reset,"ax"
.align 4
.code16
.global cpu_secondary_reset
cpu_secondary_reset:
.global trampline_start16
trampline_start16:
/* Disable local interrupts */
@ -54,14 +54,14 @@ cpu_secondary_reset:
mov %ebx, %cr0
/* Load temportary GDT pointer value */
lgdt (cpu_secondary_gdt_ptr - cpu_secondary_reset)
lgdt (trampline_gdt_ptr - trampline_start16)
/* Perform a long jump based to start executing in 64-bit mode */
data32 ljmp $HOST_GDT_RING0_CODE_SEL, $cpu_secondary_long_mode
data32 ljmp $HOST_GDT_RING0_CODE_SEL, $trampline_start64
.code64
cpu_secondary_long_mode:
trampline_start64:
/* Set up all other data segment registers */
@ -72,10 +72,8 @@ cpu_secondary_long_mode:
mov %eax, %fs
mov %eax, %gs
/* Obtain secondary CPU spin-lock to serialize
booting of secondary cores for a bit */
spinlock_obtain(cpu_secondary_spinlock)
/* Obtain CPU spin-lock to serialize trampline for different APs */
spinlock_obtain(trampline_spinlock)
/* Initialize temporary stack pointer
NOTE: Using the PML4 memory (PDPT address is top of memory
@ -86,51 +84,54 @@ cpu_secondary_long_mode:
used for a VERY short period of time, so
this reuse of PML4 memory should be acceptable. */
movq $cpu_secondary_pdpt_addr, %rsp
movq $trampline_pdpt_addr, %rsp
/* Push sp magic to top of stack for call trace */
pushq $SP_BOTTOM_MAGIC
/* Jump to C entry for the AP */
/* Jump to C entry */
movq main_entry(%rip), %rax
jmp %rax
call cpu_secondary_init
trampline_error: /* should never come here */
jmp trampline_error
cpu_secondary_error:
/* Error condition trap */
jmp cpu_secondary_error
/* main entry */
.align 8
.global main_entry
main_entry:
.quad cpu_secondary_init /* default entry is AP start entry */
/* GDT table */
.align 4
cpu_secondary_gdt:
trampline_gdt:
.quad 0x0000000000000000
.quad 0x00af9b000000ffff
.quad 0x00cf93000000ffff
cpu_secondary_gdt_end:
trampline_gdt_end:
/* GDT pointer */
.align 2
cpu_secondary_gdt_ptr:
.short (cpu_secondary_gdt_end - cpu_secondary_gdt) - 1
.quad cpu_secondary_gdt
trampline_gdt_ptr:
.short (trampline_gdt_end - trampline_gdt) - 1
.quad trampline_gdt
/* PML4, PDPT, and PD tables initialized to map first 4 GBytes of memory */
.align CPU_PAGE_SIZE
.global CPU_Boot_Page_Tables_Start
CPU_Boot_Page_Tables_Start:
.quad cpu_secondary_pdpt_addr + (IA32E_COMM_P_BIT | IA32E_COMM_RW_BIT)
.quad trampline_pdpt_addr + (IA32E_COMM_P_BIT | IA32E_COMM_RW_BIT)
.align CPU_PAGE_SIZE
cpu_secondary_pdpt_addr:
trampline_pdpt_addr:
address = 0
.rept 4
.quad cpu_secondary_pdt_addr + address + \
.quad trampline_pdt_addr + address + \
(IA32E_COMM_P_BIT | IA32E_COMM_RW_BIT)
address = address + CPU_PAGE_SIZE
.endr
.align CPU_PAGE_SIZE
cpu_secondary_pdt_addr:
trampline_pdt_addr:
address = 0
.rept 2048
.quad address + (IA32E_PDPTE_PS_BIT | IA32E_COMM_P_BIT | IA32E_COMM_RW_BIT)

View File

@ -39,20 +39,20 @@ SECTIONS
} > ram
_ld_cpu_secondary_reset_load = .;
_ld_trampline_load = .;
.cpu_secondary : AT (_ld_cpu_secondary_reset_load)
.trampline : AT (_ld_trampline_load)
{
_ld_cpu_secondary_reset_start = .;
*(.cpu_secondary_reset);
_ld_trampline_start = .;
*(.trampline_reset);
. = ALIGN(4);
_ld_cpu_secondary_reset_end = .;
_ld_trampline_end = .;
} > lowram
_ld_cpu_secondary_reset_size = _ld_cpu_secondary_reset_end - _ld_cpu_secondary_reset_start;
_ld_trampline_size = _ld_trampline_end - _ld_trampline_start;
.data (_ld_cpu_secondary_reset_load + _ld_cpu_secondary_reset_size):
.data (_ld_trampline_load + _ld_trampline_size):
{
*(.data) ;
*(.data*) ;

View File

@ -156,9 +156,9 @@ int cpu_find_logical_id(uint32_t lapic_id);
/**********************************/
/* EXTERNAL VARIABLES */
/**********************************/
extern const uint8_t _ld_cpu_secondary_reset_load[];
extern uint8_t _ld_cpu_secondary_reset_start[];
extern const uint64_t _ld_cpu_secondary_reset_size;
extern const uint8_t _ld_trampline_load[];
extern uint8_t _ld_trampline_start[];
extern const uint64_t _ld_trampline_size;
extern uint8_t _ld_bss_start[];
extern uint8_t _ld_bss_end[];
@ -239,7 +239,7 @@ extern struct cpuinfo_x86 boot_cpu_data;
/* Function prototypes */
void cpu_dead(uint32_t logical_id);
void cpu_secondary_reset(void);
void trampline_start16(void);
int hv_main(int cpu_id);
bool is_vapic_supported(void);
bool is_vapic_intr_delivery_supported(void);