hv: riscv: implement cpu_ctx_save and cpu_ctx_restore

Implement cpu_ctx_save and cpu_ctx_restore to save and restore CPU
registers around exception and interrupt handling on RISC-V.

Tracked-On: #8813
Signed-off-by: Haicheng Li <haicheng.li@intel.com>
Co-developed-by: Shiqing Gao <shiqing.gao@intel.com>
Signed-off-by: Yifan Liu <yifan1.liu@intel.com>
Signed-off-by: Shiqing Gao <shiqing.gao@intel.com>
Reviewed-by: Yifan Liu  <yifan1.liu@intel.com>
Acked-by: Wang, Yu1 <yu1.wang@intel.com>
This commit is contained in:
Haicheng Li
2025-09-11 16:59:10 +08:00
committed by acrnsi-robot
parent 5803232d83
commit 6276763cd5
2 changed files with 217 additions and 0 deletions

View File

@@ -8,12 +8,61 @@
#ifndef RISCV_CPU_H #ifndef RISCV_CPU_H
#define RISCV_CPU_H #define RISCV_CPU_H
#ifndef ASSEMBLER
#include <types.h> #include <types.h>
#include <lib/util.h> #include <lib/util.h>
#include <logmsg.h> #include <logmsg.h>
#include <board_info.h> #include <board_info.h>
#include <barrier.h> #include <barrier.h>
/* The following symbols must remain consistent:
* - CPU_REGS_OFFSET_* macros in `include/arch/riscv/asm/offset.h`
* - struct cpu_regs
* - cpu_ctx_save/cpu_ctx_restore macros used in assembly
*/
struct cpu_regs {
/* General purpose registers. */
uint64_t zero;
uint64_t ra;
uint64_t sp;
uint64_t gp;
uint64_t tp;
uint64_t t0;
uint64_t t1;
uint64_t t2;
uint64_t s0;
uint64_t s1;
uint64_t a0;
uint64_t a1;
uint64_t a2;
uint64_t a3;
uint64_t a4;
uint64_t a5;
uint64_t a6;
uint64_t a7;
uint64_t s2;
uint64_t s3;
uint64_t s4;
uint64_t s5;
uint64_t s6;
uint64_t s7;
uint64_t s8;
uint64_t s9;
uint64_t s10;
uint64_t s11;
uint64_t t3;
uint64_t t4;
uint64_t t5;
uint64_t t6;
/* Control and Status Registers (CSRs). */
uint64_t epc;
uint64_t status;
uint64_t cause;
uint64_t tval;
uint64_t scratch;
};
#define cpu_relax() cpu_memory_barrier() /* TODO: replace with yield instruction */ #define cpu_relax() cpu_memory_barrier() /* TODO: replace with yield instruction */
#define NR_CPUS MAX_PCPU_NUM #define NR_CPUS MAX_PCPU_NUM
@@ -80,4 +129,111 @@ void wait_sync_change(volatile const uint64_t *sync, uint64_t wake_sync);
void init_percpu_hart_id(uint32_t bsp_hart_id); void init_percpu_hart_id(uint32_t bsp_hart_id);
uint16_t get_pcpu_id_from_hart_id(uint32_t hart_id); uint16_t get_pcpu_id_from_hart_id(uint32_t hart_id);
#else /* ASSEMBLER */
#include <asm/offset.h>
/* The following symbols must remain consistent:
* - CPU_REGS_OFFSET_* macros in `include/arch/riscv/asm/offset.h`
* - struct cpu_regs
* - cpu_ctx_save/cpu_ctx_restore macros used in assembly
*/
.macro cpu_ctx_save
addi sp, sp, -CPU_REGS_OFFSET_LAST
/* General purpose registers. */
/* Save sp first to avoid corrupting the stack frame */
sd sp, CPU_REGS_OFFSET_SP(sp)
sd ra, CPU_REGS_OFFSET_RA(sp)
sd gp, CPU_REGS_OFFSET_GP(sp)
sd tp, CPU_REGS_OFFSET_TP(sp)
sd t0, CPU_REGS_OFFSET_T0(sp)
sd t1, CPU_REGS_OFFSET_T1(sp)
sd t2, CPU_REGS_OFFSET_T2(sp)
sd s0, CPU_REGS_OFFSET_S0(sp)
sd s1, CPU_REGS_OFFSET_S1(sp)
sd a0, CPU_REGS_OFFSET_A0(sp)
sd a1, CPU_REGS_OFFSET_A1(sp)
sd a2, CPU_REGS_OFFSET_A2(sp)
sd a3, CPU_REGS_OFFSET_A3(sp)
sd a4, CPU_REGS_OFFSET_A4(sp)
sd a5, CPU_REGS_OFFSET_A5(sp)
sd a6, CPU_REGS_OFFSET_A6(sp)
sd a7, CPU_REGS_OFFSET_A7(sp)
sd s2, CPU_REGS_OFFSET_S2(sp)
sd s3, CPU_REGS_OFFSET_S3(sp)
sd s4, CPU_REGS_OFFSET_S4(sp)
sd s5, CPU_REGS_OFFSET_S5(sp)
sd s6, CPU_REGS_OFFSET_S6(sp)
sd s7, CPU_REGS_OFFSET_S7(sp)
sd s8, CPU_REGS_OFFSET_S8(sp)
sd s9, CPU_REGS_OFFSET_S9(sp)
sd s10, CPU_REGS_OFFSET_S10(sp)
sd s11, CPU_REGS_OFFSET_S11(sp)
sd t3, CPU_REGS_OFFSET_T3(sp)
sd t4, CPU_REGS_OFFSET_T4(sp)
sd t5, CPU_REGS_OFFSET_T5(sp)
sd t6, CPU_REGS_OFFSET_T6(sp)
/* Control and Status Registers (CSRs). */
csrr t0, sepc
sd t0, CPU_REGS_OFFSET_EPC(sp)
csrr t1, sstatus
sd t1, CPU_REGS_OFFSET_STATUS(sp)
csrr t2, scause
sd t2, CPU_REGS_OFFSET_CAUSE(sp)
csrr t3, stval
sd t3, CPU_REGS_OFFSET_TVAL(sp)
csrr t4, sscratch
sd t4, CPU_REGS_OFFSET_SCRATCH(sp)
.endm
.macro cpu_ctx_restore
/* Control and Status Registers (CSRs). */
ld t0, CPU_REGS_OFFSET_EPC(sp)
csrw sepc, t0
ld t1, CPU_REGS_OFFSET_STATUS(sp)
csrw sstatus, t1
/* Restoring scause/stval is unnecessary and will be skipped. */
ld t4, CPU_REGS_OFFSET_SCRATCH(sp)
csrw sscratch, t4
/* General purpose registers. */
ld ra, CPU_REGS_OFFSET_RA(sp)
ld gp, CPU_REGS_OFFSET_GP(sp)
ld tp, CPU_REGS_OFFSET_TP(sp)
ld t0, CPU_REGS_OFFSET_T0(sp)
ld t1, CPU_REGS_OFFSET_T1(sp)
ld t2, CPU_REGS_OFFSET_T2(sp)
ld s0, CPU_REGS_OFFSET_S0(sp)
ld s1, CPU_REGS_OFFSET_S1(sp)
ld a0, CPU_REGS_OFFSET_A0(sp)
ld a1, CPU_REGS_OFFSET_A1(sp)
ld a2, CPU_REGS_OFFSET_A2(sp)
ld a3, CPU_REGS_OFFSET_A3(sp)
ld a4, CPU_REGS_OFFSET_A4(sp)
ld a5, CPU_REGS_OFFSET_A5(sp)
ld a6, CPU_REGS_OFFSET_A6(sp)
ld a7, CPU_REGS_OFFSET_A7(sp)
ld s2, CPU_REGS_OFFSET_S2(sp)
ld s3, CPU_REGS_OFFSET_S3(sp)
ld s4, CPU_REGS_OFFSET_S4(sp)
ld s5, CPU_REGS_OFFSET_S5(sp)
ld s6, CPU_REGS_OFFSET_S6(sp)
ld s7, CPU_REGS_OFFSET_S7(sp)
ld s8, CPU_REGS_OFFSET_S8(sp)
ld s9, CPU_REGS_OFFSET_S9(sp)
ld s10, CPU_REGS_OFFSET_S10(sp)
ld s11, CPU_REGS_OFFSET_S11(sp)
ld t3, CPU_REGS_OFFSET_T3(sp)
ld t4, CPU_REGS_OFFSET_T4(sp)
ld t5, CPU_REGS_OFFSET_T5(sp)
ld t6, CPU_REGS_OFFSET_T6(sp)
/* Restore sp last to avoid corrupting the stack frame */
ld sp, CPU_REGS_OFFSET_SP(sp)
addi sp, sp, CPU_REGS_OFFSET_LAST
.endm
#endif /* ASSEMBLER */
#endif /* RISCV_CPU_H */ #endif /* RISCV_CPU_H */

View File

@@ -0,0 +1,61 @@
/*
* Copyright (C) 2023-2025 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* Authors:
* Haicheng Li <haicheng.li@intel.com>
*/
#ifndef RISCV_OFFSET_H
#define RISCV_OFFSET_H
/* The following symbols must remain consistent:
* - CPU_REGS_OFFSET_* macros in `include/arch/riscv/asm/offset.h`
* - struct cpu_regs
* - cpu_ctx_save/cpu_ctx_restore macros used in assembly
*/
/* General purpose registers. */
#define CPU_REGS_OFFSET_ZERO 0x0
#define CPU_REGS_OFFSET_RA 0x8
#define CPU_REGS_OFFSET_SP 0x10
#define CPU_REGS_OFFSET_GP 0x18
#define CPU_REGS_OFFSET_TP 0x20
#define CPU_REGS_OFFSET_T0 0x28
#define CPU_REGS_OFFSET_T1 0x30
#define CPU_REGS_OFFSET_T2 0x38
#define CPU_REGS_OFFSET_S0 0x40
#define CPU_REGS_OFFSET_S1 0x48
#define CPU_REGS_OFFSET_A0 0x50
#define CPU_REGS_OFFSET_A1 0x58
#define CPU_REGS_OFFSET_A2 0x60
#define CPU_REGS_OFFSET_A3 0x68
#define CPU_REGS_OFFSET_A4 0x70
#define CPU_REGS_OFFSET_A5 0x78
#define CPU_REGS_OFFSET_A6 0x80
#define CPU_REGS_OFFSET_A7 0x88
#define CPU_REGS_OFFSET_S2 0x90
#define CPU_REGS_OFFSET_S3 0x98
#define CPU_REGS_OFFSET_S4 0xA0
#define CPU_REGS_OFFSET_S5 0xA8
#define CPU_REGS_OFFSET_S6 0xB0
#define CPU_REGS_OFFSET_S7 0xB8
#define CPU_REGS_OFFSET_S8 0xC0
#define CPU_REGS_OFFSET_S9 0xC8
#define CPU_REGS_OFFSET_S10 0xD0
#define CPU_REGS_OFFSET_S11 0xD8
#define CPU_REGS_OFFSET_T3 0xE0
#define CPU_REGS_OFFSET_T4 0xE8
#define CPU_REGS_OFFSET_T5 0xF0
#define CPU_REGS_OFFSET_T6 0xF8
/* Control and Status Registers (CSRs). */
#define CPU_REGS_OFFSET_EPC 0x100
#define CPU_REGS_OFFSET_STATUS 0x108
#define CPU_REGS_OFFSET_CAUSE 0x110
#define CPU_REGS_OFFSET_TVAL 0x118
#define CPU_REGS_OFFSET_SCRATCH 0x120
#define CPU_REGS_OFFSET_LAST CPU_REGS_OFFSET_SCRATCH
#endif /* RISCV_OFFSET_H */