From 2938eca363133bd3322865360c76572443ce42dc Mon Sep 17 00:00:00 2001 From: Victor Sun Date: Tue, 22 Jun 2021 13:03:34 +0800 Subject: [PATCH] HV: vm_load: refine api of get_bzimage_kernel_load_addr() As the previous commit said the kernel load address should be moved from init_vm_sw_load() to vm_sw_loader() stage. This patch refines the API of get_bzimage_kernel_load_addr() in init_vm_kernel_info() for later use. Tracked-On: #6323 Signed-off-by: Victor Sun Reviewed-by: Jason Chen CJ --- hypervisor/boot/guest/vboot_info.c | 92 ++++++++++++++---------------- 1 file changed, 44 insertions(+), 48 deletions(-) diff --git a/hypervisor/boot/guest/vboot_info.c b/hypervisor/boot/guest/vboot_info.c index acd233fcb..c3fcd0fb8 100644 --- a/hypervisor/boot/guest/vboot_info.c +++ b/hypervisor/boot/guest/vboot_info.c @@ -145,65 +145,55 @@ static void init_vm_acpi_info(struct acrn_vm *vm, const struct abi_module *mod) /** * @pre vm != NULL */ -static void *get_kernel_load_addr(struct acrn_vm *vm) +static void *get_bzimage_kernel_load_addr(struct acrn_vm *vm) { void *load_addr = NULL; struct vm_sw_info *sw_info = &vm->sw; struct zero_page *zeropage; - struct acrn_vm_config *vm_config = get_vm_config(vm->vm_id); - switch (sw_info->kernel_type) { - case KERNEL_BZIMAGE: - /* According to the explaination for pref_address - * in Documentation/x86/boot.txt, a relocating - * bootloader should attempt to load kernel at pref_address - * if possible. A non-relocatable kernel will unconditionally - * move itself and to run at this address. + /* According to the explaination for pref_address + * in Documentation/x86/boot.txt, a relocating + * bootloader should attempt to load kernel at pref_address + * if possible. A non-relocatable kernel will unconditionally + * move itself and to run at this address. + */ + zeropage = (struct zero_page *)sw_info->kernel_info.kernel_src_addr; + + if ((is_sos_vm(vm)) && (zeropage->hdr.relocatable_kernel != 0U)) { + uint64_t mods_start, mods_end; + uint64_t kernel_load_gpa = INVALID_GPA; + uint32_t kernel_align = zeropage->hdr.kernel_alignment; + uint32_t kernel_init_size = zeropage->hdr.init_size; + /* Because the kernel load address need to be up aligned to kernel_align size + * whereas find_space_from_ve820() can only return page aligned address, + * we enlarge the needed size to (kernel_init_size + 2 * kernel_align). */ - zeropage = (struct zero_page *)sw_info->kernel_info.kernel_src_addr; + uint32_t kernel_size = kernel_init_size + 2 * kernel_align; - if ((is_sos_vm(vm)) && (zeropage->hdr.relocatable_kernel != 0U)) { - uint64_t mods_start, mods_end; - uint64_t kernel_load_gpa = INVALID_GPA; - uint32_t kernel_align = zeropage->hdr.kernel_alignment; - uint32_t kernel_init_size = zeropage->hdr.init_size; - /* Because the kernel load address need to be up aligned to kernel_align size - * whereas find_space_from_ve820() can only return page aligned address, - * we enlarge the needed size to (kernel_init_size + 2 * kernel_align). - */ - uint32_t kernel_size = kernel_init_size + 2 * kernel_align; + get_boot_mods_range(&mods_start, &mods_end); + mods_start = sos_vm_hpa2gpa(mods_start); + mods_end = sos_vm_hpa2gpa(mods_end); - get_boot_mods_range(&mods_start, &mods_end); - mods_start = sos_vm_hpa2gpa(mods_start); - mods_end = sos_vm_hpa2gpa(mods_end); + /* TODO: support load kernel when modules are beyond 4GB space. */ + if (mods_end < MEM_4G) { + kernel_load_gpa = find_space_from_ve820(vm, kernel_size, MEM_1M, mods_start); - /* TODO: support load kernel when modules are beyond 4GB space. */ - if (mods_end < MEM_4G) { - kernel_load_gpa = find_space_from_ve820(vm, kernel_size, MEM_1M, mods_start); - - if (kernel_load_gpa == INVALID_GPA) { - kernel_load_gpa = find_space_from_ve820(vm, kernel_size, mods_end, MEM_4G); - } - } - - if (kernel_load_gpa != INVALID_GPA) { - load_addr = (void *)roundup((uint64_t)kernel_load_gpa, kernel_align); - } - } else { - load_addr = (void *)zeropage->hdr.pref_addr; - if (is_sos_vm(vm)) { - /* The non-relocatable SOS kernel might overlap with boot modules. */ - pr_err("Non-relocatable kernel found, risk to boot!"); + if (kernel_load_gpa == INVALID_GPA) { + kernel_load_gpa = find_space_from_ve820(vm, kernel_size, mods_end, MEM_4G); } } - break; - case KERNEL_ZEPHYR: - load_addr = (void *)vm_config->os_config.kernel_load_addr; - break; - default: - pr_err("Unsupported Kernel type."); - break; + + if (kernel_load_gpa != INVALID_GPA) { + load_addr = (void *)roundup((uint64_t)kernel_load_gpa, kernel_align); + } + } else { + load_addr = (void *)zeropage->hdr.pref_addr; + if (is_sos_vm(vm)) { + /* The non-relocatable SOS kernel might overlap with boot modules. */ + pr_err("Non-relocatable kernel found, risk to boot!"); + } } + if (load_addr == NULL) { pr_err("Could not get kernel load addr of VM %d .", vm->vm_id); } @@ -226,7 +216,13 @@ static int32_t init_vm_kernel_info(struct acrn_vm *vm, const struct abi_module * vm->sw.kernel_info.kernel_src_addr = mod->start; if (vm->sw.kernel_info.kernel_src_addr != NULL) { vm->sw.kernel_info.kernel_size = mod->size; - vm->sw.kernel_info.kernel_load_addr = get_kernel_load_addr(vm); + if (vm->sw.kernel_type == KERNEL_BZIMAGE) { + vm->sw.kernel_info.kernel_load_addr = get_bzimage_kernel_load_addr(vm); + } else if (vm->sw.kernel_type == KERNEL_ZEPHYR) { + vm->sw.kernel_info.kernel_load_addr = (void *)vm_config->os_config.kernel_load_addr; + } else { + pr_err("Unsupported Kernel type."); + } } return (vm->sw.kernel_info.kernel_load_addr == NULL) ? (-EINVAL) : 0;