modularization: boot component - move files

Boot component prepares the very basic platform boot env. It finally call
into platform initilization entries:

- bsp_boot_init & cpu_secondary_init for start up
- or restore_s3_context for wakeup

This patch just move files into reorg dir.

Tracked-On: #1842
Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
This commit is contained in:
Jason Chen CJ
2018-11-29 13:40:21 +08:00
committed by lijinxia
parent 667e0444a9
commit 0bc85d2ebc
6 changed files with 8 additions and 5 deletions

View File

@@ -0,0 +1,248 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <cpu.h>
#include <mmu.h>
#include <gdt.h>
#include <idt.h>
#include <msr.h>
/* NOTE:
*
* MISRA C requires that all unsigned constants should have the suffix 'U'
* (e.g. 0xffU), but the assembler may not accept such C-style constants. For
* example, binutils 2.26 fails to compile assembly in that case. To work this
* around, all unsigned constants must be explicitly spells out in assembly
* with a comment tracking the original expression from which the magic
* number is calculated. As an example:
*
* /* 0x00000668 =
* * (CR4_DE | CR4_PAE | CR4_MCE | CR4_OSFXSR | CR4_OSXMMEXCPT) *\/
* movl $0x00000668, %eax
*
* Make sure that these numbers are updated accordingly if the definition of
* the macros involved are changed.
*/
/* MULTIBOOT HEADER */
#define MULTIBOOT_HEADER_MAGIC 0x1badb002
#define MULTIBOOT_HEADER_FLAGS 0x00000002 /*flags bit 1 : enable mem_*, mmap_**/
.extern cpu_primary_save32
.extern cpu_primary_save64
.section multiboot_header, "a"
.align 4
/* header magic */
.long MULTIBOOT_HEADER_MAGIC
/* header flags - flags bit 6 : enable mmap_* */
.long MULTIBOOT_HEADER_FLAGS
/* header checksum = -(magic + flags) */
.long -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
.section entry, "ax"
.align 8
.code32
.global cpu_primary_start_32
cpu_primary_start_32:
/* save the MULTBOOT magic number & MBI */
movl %eax, (boot_regs)
movl %ebx, (boot_regs+4)
/* Save boot context from 32bit mode */
call cpu_primary_save_32
/* Disable interrupts */
cli
/* Clear direction flag */
cld
/* detect whether it is in long mode
*
* 0xc0000080 = MSR_IA32_EFER
*/
movl $0xc0000080, %ecx
rdmsr
/* 0x400 = MSR_IA32_EFER_LMA_BIT */
test $0x400, %eax
/* jump to 64bit entry if it is already in long mode */
jne cpu_primary_start_64
/* Disable paging */
mov %cr0, %ebx
/* 0x7fffffff = ~CR0_PG */
andl $0x7fffffff, %ebx
mov %ebx, %cr0
/* Set DE, PAE, MCE and OS support bits in CR4
* 0x00000668 =
* (CR4_DE | CR4_PAE | CR4_MCE | CR4_OSFXSR | CR4_OSXMMEXCPT) */
movl $0x00000668, %eax
mov %eax, %cr4
/* Set CR3 to PML4 table address */
movl $cpu_boot32_page_tables_start, %edi
mov %edi, %cr3
/* Set LME bit in EFER */
/* 0xc0000080 = MSR_IA32_EFER */
movl $0xc0000080, %ecx
rdmsr
/* 0x00000100 = MSR_IA32_EFER_LME_BIT */
orl $0x00000100, %eax
wrmsr
/* Enable paging, protection, numeric error and co-processor
monitoring in CR0 to enter long mode */
mov %cr0, %ebx
/* 0x80000023 = (CR0_PG | CR0_PE | CR0_MP | CR0_NE) */
orl $0x80000023, %ebx
mov %ebx, %cr0
/* Load temportary GDT pointer value */
mov $cpu_primary32_gdt_ptr, %ebx
lgdt (%ebx)
/* Perform a long jump based to start executing in 64-bit mode */
/* 0x0008 = HOST_GDT_RING0_CODE_SEL */
ljmp $0x0008, $primary_start_long_mode
.code64
.org 0x200
.global cpu_primary_start_64
cpu_primary_start_64:
/* save the MULTBOOT magic number & MBI */
lea boot_regs(%rip), %rax
movl %edi, (%rax)
movl %esi, 4(%rax)
/* Save boot context from 64bit mode */
call cpu_primary_save_64
primary_start_long_mode:
/* Initialize temporary stack pointer */
lea ld_bss_end(%rip), %rsp
/*0x1000 = CPU_PAGE_SIZE*/
add $0x1000,%rsp
/* 16 = CPU_STACK_ALIGN */
and $(~(16 - 1)),%rsp
/*
* Fix up the .rela sections
* Notes: this includes the fixup to IDT tables and temporary
* page tables
*/
call relocate
/* Load temportary GDT pointer value */
lea cpu_primary32_gdt_ptr(%rip), %rbx
lgdt (%ebx)
/* Set the correct long jump address */
lea jmpbuf(%rip), %rax
lea after(%rip), %rbx
mov %rbx, (%rax)
rex.w ljmp *(%rax)
.data
jmpbuf: .quad 0
/* 0x0008 = HOST_GDT_RING0_CODE_SEL */
.word 0x0008
.text
after:
// load all selector registers with appropriate values
xor %edx, %edx
lldt %dx
/* 0x10 = HOST_GDT_RING0_DATA_SEL*/
movl $0x10,%eax
mov %eax,%ss // Was 32bit POC Stack
mov %eax,%ds // Was 32bit POC Data
mov %eax,%es // Was 32bit POC Data
mov %edx,%fs // Was 32bit POC Data
mov %edx,%gs // Was 32bit POC CLS
/*
* Fix up the IDT desciptors
* The relocation delta in IDT tables has been fixed in relocate()
*/
leal HOST_IDT(%rip), %edx
movl $HOST_IDT_ENTRIES, %ecx
.fixup_idt_entries:
xorl %eax, %eax
xchgl %eax, 12(%edx) /* Set rsvd bits to 0; eax now has
high 32 of entry point */
xchgl %eax, 8(%edx) /* Set bits 63..32 of entry point;
eax now has low 32 of entry point */
movw %ax, (%edx) /* Set bits 0-15 of procedure entry
point */
shr $16, %eax
movw %ax, 6(%edx) /* Set bits 16-31 of entry point */
addl $X64_IDT_DESC_SIZE,%edx
loop .fixup_idt_entries
/* Load IDT */
lea HOST_IDTR(%rip), %rbx
lidtq (%rbx)
/* continue with chipset level initialization */
call bsp_boot_init
loop:
jmp loop
.align 4
.global boot_regs
boot_regs:
.long 0x00000000
.long 0x00000000
/* GDT table */
.align 4
cpu_primary32_gdt:
.quad 0x0000000000000000
.quad 0x00af9b000000ffff
.quad 0x00cf93000000ffff
cpu_primary32_gdt_end:
/* GDT pointer */
.align 2
cpu_primary32_gdt_ptr:
.short (cpu_primary32_gdt_end - cpu_primary32_gdt) - 1
.quad cpu_primary32_gdt
/* PML4, PDPT, and PD tables initialized to map first 4 GBytes of memory */
/*0x1000 = CPU_PAGE_SIZE*/
.align 0x1000
.global cpu_boot32_page_tables_start
cpu_boot32_page_tables_start:
/* 0x3 = (PAGE_PRESENT | PAGE_RW) */
.quad cpu_primary32_pdpt_addr + 0x3
/*0x1000 = CPU_PAGE_SIZE*/
.align 0x1000
cpu_primary32_pdpt_addr:
address = 0
.rept 4
/* 0x3 = (PAGE_PRESENT | PAGE_RW) */
.quad cpu_primary32_pdt_addr + address + 0x3
/*0x1000 = CPU_PAGE_SIZE*/
address = address + 0x1000
.endr
/*0x1000 = CPU_PAGE_SIZE*/
.align 0x1000
cpu_primary32_pdt_addr:
address = 0
.rept 2048
/* 0x83 = (PAGE_PSE | PAGE_PRESENT | PAGE_RW) */
.quad address + 0x83
address = address + 0x200000
.endr

View File

@@ -0,0 +1,97 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <vm0_boot.h>
.section entry, "ax"
.align 8
.code32
.global cpu_primary_save_32
cpu_primary_save_32:
/* save context from 32bit mode */
lea vm0_boot_context, %eax
sgdt BOOT_CTX_GDT_OFFSET(%eax)
sidt BOOT_CTX_IDT_OFFSET(%eax)
str BOOT_CTX_TR_SEL_OFFSET(%eax)
sldt BOOT_CTX_LDT_SEL_OFFSET(%eax)
movl %cr0, %ecx
movl %ecx, BOOT_CTX_CR0_OFFSET(%eax)
movl %cr3, %ecx
movl %ecx, BOOT_CTX_CR3_OFFSET(%eax)
movl %cr4, %ecx
movl %ecx, BOOT_CTX_CR4_OFFSET(%eax)
mov %cs, %cx
mov %cx, BOOT_CTX_CS_SEL_OFFSET(%eax)
lar %ecx, %ecx
/* CS AR start from bit 8 */
shr $8, %ecx
/* Clear Limit field, bit 8-11 */
andl $0x0000f0ff, %ecx
mov %ecx, BOOT_CTX_CS_AR_OFFSET(%eax)
/* Save CS limit field */
mov %cs, %cx
xor %edx, %edx
lsl %ecx, %edx
mov %edx, BOOT_CTX_CS_LIMIT_OFFSET(%eax)
mov %es, BOOT_CTX_ES_SEL_OFFSET(%eax)
mov %ss, BOOT_CTX_SS_SEL_OFFSET(%eax)
mov %ds, BOOT_CTX_DS_SEL_OFFSET(%eax)
mov %fs, BOOT_CTX_FS_SEL_OFFSET(%eax)
mov %gs, BOOT_CTX_GS_SEL_OFFSET(%eax)
ret
.code64
.global cpu_primary_save_64
cpu_primary_save_64:
/* save context from 64bit mode */
lea vm0_boot_context(%rip), %r8
sgdt BOOT_CTX_GDT_OFFSET(%r8)
sidt BOOT_CTX_IDT_OFFSET(%r8)
str BOOT_CTX_TR_SEL_OFFSET(%r8)
sldt BOOT_CTX_LDT_SEL_OFFSET(%r8)
mov %cr0, %rcx
mov %rcx, BOOT_CTX_CR0_OFFSET(%r8)
mov %cr3, %rcx
mov %rcx, BOOT_CTX_CR3_OFFSET(%r8)
mov %cr4, %rcx
mov %rcx, BOOT_CTX_CR4_OFFSET(%r8)
mov %cs, %cx
mov %cx, BOOT_CTX_CS_SEL_OFFSET(%r8)
lar %ecx, %ecx
/* CS AR start from bit 8 */
shr $8, %ecx
/* Clear Limit field, bit 8-11 */
andl $0x0000f0ff, %ecx
mov %ecx, BOOT_CTX_CS_AR_OFFSET(%r8)
/* Save CS limit field */
mov %cs, %cx
xor %edx, %edx
lsl %ecx, %edx
mov %edx, BOOT_CTX_CS_LIMIT_OFFSET(%r8)
mov %es, BOOT_CTX_ES_SEL_OFFSET(%r8)
mov %ss, BOOT_CTX_SS_SEL_OFFSET(%r8)
mov %ds, BOOT_CTX_DS_SEL_OFFSET(%r8)
mov %fs, BOOT_CTX_FS_SEL_OFFSET(%r8)
mov %gs, BOOT_CTX_GS_SEL_OFFSET(%r8)
/* 0xc0000080 = MSR_IA32_EFER */
movl $0xc0000080, %ecx
rdmsr
movl %eax, BOOT_CTX_EFER_LOW_OFFSET(%r8)
movl %edx, BOOT_CTX_EFER_HIGH_OFFSET(%r8)
ret
.text
.align 8
.global vm0_boot_context
vm0_boot_context:
.rept SIZE_OF_BOOT_CTX
.byte 0x00
.endr

View File

@@ -0,0 +1,420 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <gdt.h>
#include <idt.h>
.altmacro
.global HOST_IDT
.global HOST_IDTR
.section .data
.align 8
.long 0
.short 0
HOST_IDTR:
.short HOST_IDT_SIZE - 1
.quad HOST_IDT
/*
* We'll rearrange and fix up the descriptors at runtime
*/
.macro interrupt_descriptor entry, dpl=0 ist=0
/* 0x0008 = HOST_GDT_RING0_CODE_SEL */
.long 0x0008 << 16
.long 0x00008e00 + (dpl << 13) + ist
.quad entry
.endm
.macro trap_descriptor entry, dpl=0, ist=0
/* 0x0008 = HOST_GDT_RING0_CODE_SEL */
.long 0x0008 << 16
.long 0x00008f00 + (dpl <<13) + ist
.quad entry
.endm
.macro _external_interrupt_descriptor vector
__external_interrupt_descriptor %vector
.endm
.macro __external_interrupt_descriptor vector
interrupt_descriptor external_interrupt_\vector
.endm
#define MACHINE_CHECK_IST (0x1)
#define DOUBLE_FAULT_IST (0x2)
#define STACK_FAULT_IST (0x3)
/*
* We'll use interrupt gates. Change to trap or task only as needed.
*/
.section .rodata
.align 16
HOST_IDT:
interrupt_descriptor excp_divide_error
interrupt_descriptor excp_debug, 3
interrupt_descriptor excp_nmi
interrupt_descriptor excp_breakpoint, 3
interrupt_descriptor excp_overflow, 3
interrupt_descriptor excp_bounds_check
interrupt_descriptor excp_illegal_opcode
interrupt_descriptor excp_device_not_available
interrupt_descriptor excp_double_fault, 0, DOUBLE_FAULT_IST
interrupt_descriptor excp_rsvd_09
interrupt_descriptor excp_invalid_tss
interrupt_descriptor excp_segment_not_present
interrupt_descriptor excp_stack_fault, 0, STACK_FAULT_IST
interrupt_descriptor excp_general_protection
interrupt_descriptor excp_page_fault
interrupt_descriptor excp_rsvd_0f
interrupt_descriptor excp_float_error
interrupt_descriptor excp_alignment_check
interrupt_descriptor expt_machine_check, 0, MACHINE_CHECK_IST
interrupt_descriptor excp_simd_fp_error
interrupt_descriptor excp_virtualization
interrupt_descriptor excp_rsvd_21
interrupt_descriptor excp_rsvd_22
interrupt_descriptor excp_rsvd_23
interrupt_descriptor excp_rsvd_24
interrupt_descriptor excp_rsvd_25
interrupt_descriptor excp_rsvd_26
interrupt_descriptor excp_rsvd_27
interrupt_descriptor excp_rsvd_28
interrupt_descriptor excp_rsvd_29
interrupt_descriptor excp_rsvd_30
interrupt_descriptor excp_rsvd_31
vector = 0x20
.rept (0x100 - 0x20)
_external_interrupt_descriptor vector
vector = vector + 1
.endr
.section .text
.align 16
excp_divide_error:
pushq $0x0 /* pseudo error code */
pushq $0x00
jmp excp_save_frame
.align 8
excp_debug:
pushq $0x0 /* pseudo error code */
pushq $0x01
jmp excp_save_frame
.align 8
excp_nmi:
.align 8
excp_breakpoint:
pushq $0x0 /* pseudo error code */
pushq $0x03
jmp excp_save_frame
.align 8
excp_overflow:
pushq $0x0 /* pseudo error code */
pushq $0x04
jmp excp_save_frame
.align 8
excp_bounds_check:
pushq $0x0 /* pseudo error code */
pushq $0x05
jmp excp_save_frame
.align 8
excp_illegal_opcode:
pushq $0x0 /* pseudo error code */
pushq $0x06
jmp excp_save_frame
.align 8
excp_device_not_available:
pushq $0x0 /* pseudo error code */
pushq $0x07
jmp excp_save_frame
.align 8
excp_double_fault:
pushq $0x08
jmp excp_save_frame
.align 8
excp_invalid_tss:
pushq $0x0A
jmp excp_save_frame
.align 8
excp_segment_not_present:
pushq $0x0B
jmp excp_save_frame
.align 8
excp_stack_fault:
pushq $0x0C
jmp excp_save_frame
.align 8
excp_general_protection:
pushq $0x0D
jmp excp_save_frame
.align 8
excp_page_fault:
pushq $0x0E
jmp excp_save_frame
.align 8
excp_float_error:
pushq $0x0 /* pseudo error code */
pushq $0x10
jmp excp_save_frame
.align 8
excp_alignment_check:
pushq $0x11
jmp excp_save_frame
.align 8
expt_machine_check:
pushq $0x0 /* pseudo error code */
pushq $0x12
jmp excp_save_frame
.align 8
excp_simd_fp_error:
pushq $0x0 /* pseudo error code */
pushq $0x13
jmp excp_save_frame
.align 8
excp_virtualization:
pushq $0x0 /* pseudo error code */
pushq $0x14
jmp excp_save_frame
/*
* Macros for rsvd vectors. Vectors 0x09, 0x0F, 0x15 through 0x1F
*/
.macro _rsvd_vector vector
__rsvd_vector %vector
.endm
.macro __rsvd_vector vector
.align 8
excp_rsvd_\vector\():
pushq $0x0 /* pseudo error code */
pushq $\vector
jmp excp_rsvd
.endm
.align 8
excp_rsvd_09:
_rsvd_vector 0x09
.align 8
excp_rsvd_0f:
_rsvd_vector 0x0f
vector = 0x15
.rept (0x20 - 0x15)
_rsvd_vector vector
vector = vector + 1
.endr
/*
* Macros for external interrupts. Vectors$0x20 through$0xFF
*/
.macro _external_interrupt vector
__external_interrupt %vector
.endm
.macro __external_interrupt vector
.align 8
external_interrupt_\vector\():
pushq $0x0 /* pseudo error code */
pushq $\vector
jmp external_interrupt_save_frame
.endm
vector =0x20
.rept (0x100 - 0x20)
_external_interrupt vector
vector = vector + 1
.endr
/*
* Common entry point for defined exceptions
*/
.align 8
excp_save_frame:
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %r11
pushq %r10
pushq %r9
pushq %r8
pushq %rdi
pushq %rsi
pushq %rbp
pushq %rsp
pushq %rbx
pushq %rdx
pushq %rcx
pushq %rax
/* Put current stack pointer into 1st param register (rdi) */
movq %rsp, %rdi
call dispatch_exception
popq %rax
popq %rcx
popq %rdx
popq %rbx
popq %rsp
popq %rbp
popq %rsi
popq %rdi
popq %r8
popq %r9
popq %r10
popq %r11
popq %r12
popq %r13
popq %r14
popq %r15
/* Skip vector and error code*/
add $16, %rsp
iretq
/*
* Common entry point for reserved exceptions.
* These should never execute.
* We put a handler on them anyway to highlight the unexpected.
*/
.align 8
excp_rsvd:
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %r11
pushq %r10
pushq %r9
pushq %r8
pushq %rdi
pushq %rsi
pushq %rbp
pushq %rsp
pushq %rbx
pushq %rdx
pushq %rcx
pushq %rax
/* Put current stack pointer into 1st param register (rdi) */
movq %rsp, %rdi
call dispatch_exception
popq %rax
popq %rcx
popq %rdx
popq %rbx
popq %rsp
popq %rbp
popq %rsi
popq %rdi
popq %r8
popq %r9
popq %r10
popq %r11
popq %r12
popq %r13
popq %r14
popq %r15
/* Skip vector and error code*/
add $16, %rsp
iretq
/*
* Common entry point for defined interrupts.
* Vectors 0x20 through 0xFF
*/
.align 8
external_interrupt_save_frame:
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %r11
pushq %r10
pushq %r9
pushq %r8
pushq %rdi
pushq %rsi
pushq %rbp
pushq %rsp
pushq %rbx
pushq %rdx
pushq %rcx
pushq %rax
/* Put current stack pointer into 1st param register (rdi) */
movq %rsp, %rdi
call dispatch_interrupt
/*
* We disable softirq path from interrupt IRET, since right now all IRQ
* are for Guest.
*/
popq %rax
popq %rcx
popq %rdx
popq %rbx
popq %rsp
popq %rbp
popq %rsi
popq %rdi
popq %r8
popq %r9
popq %r10
popq %r11
popq %r12
popq %r13
popq %r14
popq %r15
/* Skip vector and error code*/
add $16, %rsp
iretq

View File

@@ -0,0 +1,229 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* This is entry for AP startup and BSP S3 wakeup
*
* When system jump to trampoline_start16, the CPU is in x86 real
* mode with no stack setup. CS:IP points to trampoline_start16.
*
* The CPU will be changed to long mode finally with temperay
* page table and gdt in this file. Then jump to different C main
* entry according to whether it's AP startup or BSP S3 resume.
* The real page table and gdt will be setup in C main entry.
*/
#include <spinlock.h>
#include <gdt.h>
#include <cpu.h>
#include <mmu.h>
#include <msr.h>
/* NOTE:
*
* MISRA C requires that all unsigned constants should have the suffix 'U'
* (e.g. 0xffU), but the assembler may not accept such C-style constants. For
* example, binutils 2.26 fails to compile assembly in that case. To work this
* around, all unsigned constants must be explicitly spells out in assembly
* with a comment tracking the original expression from which the magic
* number is calculated. As an example:
*
* /* 0x00000668 =
* * (CR4_DE | CR4_PAE | CR4_MCE | CR4_OSFXSR | CR4_OSXMMEXCPT) *\/
* movl $0x00000668, %eax
*
* Make sure that these numbers are updated accordingly if the definition of
* the macros involved are changed.
*/
.extern cpu_secondary_init
.extern ld_bss_end
.extern HOST_GDTR
.section .trampoline_reset,"ax"
.align 4
.code16
.global trampoline_start16
.org 0
trampoline_start16:
/* Disable local interrupts */
cli
/*
* There are two paths system could come here:
* - AP startup
* Silicon will set AP to real mode and setup CS:IP before
* jmp to trampoline_start16. And the IP is always 0 for sure.
* - BSP wakeup from S3
* Some bootloader (like ABL) doesn't guarante IP is set to
* zero before jump to trampoline_start16 after system resume
* from S3.
*
* To make trampoline code could work with all these cases, a far
* jump is issued here as fixup. It will update the CS:IP according
* to where the trampoline code is located.
*
* Here, we issue a far jump with "JMP ptr16:16" format (please refer
* sdm vol2A - JMP instruction description). The jump target is set
* to trampoline_fixup_target_addr. From trampoline_fixup_target_addr,
* The CS has same value for both AP startup and BSP wakeup from S3.
*
* Because the limitation of real mode (can't access ip register
* directly. So can't setup the trampoline_fixup_ip and
* trampoline_fixup_cs), we have to update the trampoline_fixup_ip
* and trampoline_fixup_cs when we preparing the trampoline code.
*
* Refer to preparing_trampoline() for fixup CS:IP setup
*/
.byte 0xea /* Opcode of "JMP ptr16:16" */
.global trampoline_fixup_ip
trampoline_fixup_ip:
.word 0 /* "EIP is intruction following JUMP instruction" */
.global trampoline_fixup_cs
trampoline_fixup_cs:
.word 0 /* CS */
.global trampoline_fixup_target
trampoline_fixup_target:
mov %cs, %ax
mov %ax, %ds
/* Set DE, PAE, MCE and OS support bits in CR4 */
/* 0x00000668 =
* (CR4_DE | CR4_PAE | CR4_MCE | CR4_OSFXSR | CR4_OSXMMEXCPT) */
movl $0x00000668, %eax
mov %eax, %cr4
/* Set CR3 to PML4 table address */
movl $cpu_boot_page_tables_ptr, %ebx
mov (%ebx), %eax
mov %eax, %cr3
/* Set LME bit in EFER */
/* 0xc0000080 = MSR_IA32_EFER */
movl $0xc0000080, %ecx
rdmsr
/* 0x00000100 = MSR_IA32_EFER_LME_BIT */
orl $0x00000100, %eax
wrmsr
/* Enable paging, protection, numeric error and co-processor
monitoring in CR0 to enter long mode */
mov %cr0, %ebx
/* 0x80000023 = (CR0_PG | CR0_PE | CR0_MP | CR0_NE) */
orl $0x80000023, %ebx
mov %ebx, %cr0
/* Load temportary GDT pointer value */
lgdt (trampoline_gdt_ptr - trampoline_start16)
/* Perform a long jump based to start executing in 64-bit mode */
movl $trampoline_start64_fixup, %ebx
ljmpl *(%ebx)
.align 8
.global trampoline_start64_fixup
trampoline_start64_fixup:
.long trampoline_start64
/* 0x0008 = HOST_GDT_RING0_CODE_SEL */
.word 0x0008
.code64
trampoline_start64:
/* Set up all other data segment registers */
/* 0x0010 = HOST_GDT_RING0_DATA_SEL */
movl $0x0010, %eax
mov %eax, %ss
mov %eax, %ds
mov %eax, %es
mov %eax, %fs
mov %eax, %gs
/* Obtain CPU spin-lock to serialize trampoline for different APs */
movq trampoline_spinlock_ptr(%rip), %rdi
spinlock_obtain(%rdi)
/* Initialize temporary stack pointer
NOTE: Using the PML4 memory (PDPT address is top of memory
for the PML4 page) for the temporary stack
as we are only using the very first entry in
this page and the stack is growing down from
the top of this page. This stack is only
used for a VERY short period of time, so
this reuse of PML4 memory should be acceptable. */
lea trampoline_pdpt_addr(%rip), %rsp
/* Jump to C entry */
movq main_entry(%rip), %rax
jmp *%rax
/* main entry */
.align 8
.global main_entry
main_entry:
.quad cpu_secondary_init /* default entry is AP start entry */
.global trampoline_spinlock_ptr
trampoline_spinlock_ptr:
.quad trampoline_spinlock
/* GDT table */
.align 4
trampoline_gdt:
.quad 0x0000000000000000
.quad 0x00af9b000000ffff
.quad 0x00cf93000000ffff
trampoline_gdt_end:
/* GDT pointer */
.align 2
.global trampoline_gdt_ptr
trampoline_gdt_ptr:
.short (trampoline_gdt_end - trampoline_gdt) - 1
.quad trampoline_gdt
/* PML4, PDPT, and PD tables initialized to map first 4 GBytes of memory */
.align 4
.global cpu_boot_page_tables_ptr
cpu_boot_page_tables_ptr:
.long cpu_boot_page_tables_start
/*0x1000 = CPU_PAGE_SIZE*/
.align 0x1000
.global cpu_boot_page_tables_start
cpu_boot_page_tables_start:
/* 0x3 = (PAGE_PRESENT | PAGE_RW) */
.quad trampoline_pdpt_addr + 0x3
/*0x1000 = CPU_PAGE_SIZE*/
.align 0x1000
.global trampoline_pdpt_addr
trampoline_pdpt_addr:
address = 0
.rept 4
/* 0x3 = (PAGE_PRESENT | PAGE_RW) */
.quad trampoline_pdt_addr + address + 0x3
/*0x1000 = CPU_PAGE_SIZE*/
address = address + 0x1000
.endr
/*0x1000 = CPU_PAGE_SIZE*/
.align 0x1000
trampoline_pdt_addr:
address = 0
.rept 2048
/* 0x83 = (PAGE_PSE | PAGE_PRESENT | PAGE_RW) */
.quad address + 0x83
address = address + 0x200000
.endr
.end