Files
acrn-hypervisor/hypervisor/arch/riscv/sbi.c
Jian Jun Chen 0a8eb09454 hv: riscv: fix the implementation of send_ipi
send_single_ipi/send_dest_ipi_mask are calling sbi_send_ipi. According
to SBI spec:
- uint64_t hart_mask is a scalar bit-vector containing hartids
- uint64_t hart_mask_base is the starting hartid from which the
  bit-vector must be computed

Logical pCPU ID and pCPU ID mask need to be converted to physical hart
ID mask and physical hart ID base.

Tracked-On: #8811
Signed-off-by: Jian Jun Chen <jian.jun.chen@intel.com>
Acked-by: Wang, Yu1 <yu1.wang@intel.com>
2025-10-10 11:15:17 +08:00

198 lines
5.1 KiB
C

/*
* Copyright (C) 2025 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* Authors:
* Haicheng Li <haicheng.li@intel.com>
*/
#include <types.h>
#include <asm/sbi.h>
#include <logmsg.h>
#include <per_cpu.h>
#include <bits.h>
/**
* An ECALL is used as the control transfer instruction between the
* supervisor and the SEE.
*
* a7 encodes the SBI extension ID (EID).
*
* a6 encodes the SBI function ID (FID) for a given extension ID
* encoded in a7 for any SBI extension defined in or after SBI v0.2.
*
* a0 through a5 contain the arguments for the SBI function call.
* Registers that are not defined in the SBI function call are not
* reserved.
*
* All registers except a0 & a1 must be preserved across an SBI call
* by the callee.
*
* SBI functions must return a pair of values in a0 and a1, with a0
* returning an error code. This is analogous to returning the C
* structure.
*/
static sbiret sbi_ecall(uint64_t arg0, uint64_t arg1, uint64_t arg2,
uint64_t arg3, uint64_t arg4, uint64_t arg5,
uint64_t func, uint64_t ext)
{
sbiret ret;
register uint64_t a0 asm ("a0") = arg0;
register uint64_t a1 asm ("a1") = arg1;
register uint64_t a2 asm ("a2") = arg2;
register uint64_t a3 asm ("a3") = arg3;
register uint64_t a4 asm ("a4") = arg4;
register uint64_t a5 asm ("a5") = arg5;
register uint64_t a6 asm ("a6") = func;
register uint64_t a7 asm ("a7") = ext;
asm volatile (
"ecall \n\t"
:"+r" (a0), "+r" (a1)
:"r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
: "memory"
);
ret.error = a0;
ret.value = a1;
return ret;
}
/**
* Implemented IPI functionality using the SBI IPI Extension (EID #0x735049).
* Legacy SBI extensions are not supported in ACRN.
*/
static int64_t sbi_send_ipi(uint64_t mask, uint64_t mask_base)
{
sbiret ret = sbi_ecall(mask, mask_base, 0UL, 0UL, 0UL, 0UL, SBI_IPI_FID_SEND_IPI, SBI_EID_IPI);
if (ret.error != SBI_SUCCESS) {
pr_err("%s: Failed to send IPI by SBI, error code: %lx", __func__, ret.error);
}
return ret.error;
}
/**
* sbi_hsm_start_hart - Start a RISC-V hart using SBI HSM extension
* @hartid: The hardware thread (hart) ID to start
* @addr: The start address for the hart
* @arg: Argument to pass to the hart at startup
*
* This function invokes the SBI HSM extension to start a specified hart
* at the given address with the provided argument.
*
* Returns the SBI error code from the operation.
*/
int64_t sbi_hsm_start_hart(uint64_t hartid, uint64_t addr, uint64_t arg)
{
sbiret ret;
ret = sbi_ecall(hartid, addr, arg, 0, 0, 0, SBI_HSM_FID_HART_START,
SBI_EID_HSM);
if (ret.error != SBI_SUCCESS) {
pr_err("%s: Failed to start hart (%x) by SBI, error code: %lx",
__func__, hartid, ret.error);
}
return ret.error;
}
/**
* msg_type is currently unused.
*
* At present, only IPI_NOTIFY_CPU is supported, covering two use cases:
* - SMP call
* - Kick pCPU out of non-root mode
*
* Callers should invoke this function with:
* send_single_ipi(pcpu_id, IPI_NOTIFY_CPU);
*
* msg_type is retained for future extensions and to stay aligned with
* the function prototype used on other architectures (e.g. x86).
*/
void send_single_ipi(uint16_t pcpu_id, __unused uint32_t msg_type)
{
uint64_t hmask = 1UL;
uint64_t hbase = per_cpu(arch.hart_id, pcpu_id);
sbi_send_ipi(hmask, hbase);
}
/**
* Similar to send_single_ipi() regards to msg_type.
*
* Callers should invoke this function with:
* send_dest_ipi_mask(dest_mask, IPI_NOTIFY_CPU);
*/
void send_dest_ipi_mask(uint64_t dest_mask, __unused uint32_t msg_type)
{
uint16_t i;
uint32_t hart_id, hart_id_max = 0U;
uint64_t hmask = 0UL, hbase = 0UL;
int64_t ret;
i = ffs64(dest_mask);
while (i != INVALID_BIT_INDEX) {
bitmap_clear_non_atomic(i, &dest_mask);
hart_id = per_cpu(arch.hart_id, i);
if (hmask) {
if ((hbase + BITS_PER_LONG <= hart_id) ||
(hart_id + BITS_PER_LONG <= hart_id_max)) {
/*
* Issue the SBI IPI when
* 1) The next hart_id is too large
* 2) The next hart_id is too small
*/
ret = sbi_send_ipi(hmask, hbase);
if (ret != SBI_SUCCESS) {
pr_err("Failed to send ipi to cpus[0x%lx, 0x%lx] by SBI",
hbase, hmask);
}
hmask = 0UL;
} else if (hart_id < hbase) {
/*
* Hart IDs corresponding to logical pCPU IDs
* are not sorted in ascending order, need to
* adjust the hbase and hmask
*/
hmask <<= (hbase - hart_id);
hbase = hart_id;
} else if (hart_id > hart_id_max) {
hart_id_max = hart_id;
}
}
if (!hmask) {
hbase = hart_id;
hart_id_max = hart_id;
}
bitmap_set_non_atomic(hart_id - hbase, &hmask);
i = ffs64(dest_mask);
}
if (hmask) {
ret = sbi_send_ipi(hmask, hbase);
if (ret != SBI_SUCCESS) {
pr_err("Failed to send ipi to cpus[0x%lx, 0x%lx] by SBI",
hbase, hmask);
}
}
}
/**
* Implemented Timer functionality using the SBI Timer Extension (EID #0x54494D45).
*/
int sbi_set_timer(uint64_t stime_value)
{
sbiret ret = sbi_ecall(stime_value, 0, 0, 0, 0, 0, SBI_TIMER_FID_SET_TIMER, SBI_EID_TIMER);
if (ret.error != SBI_SUCCESS) {
pr_err("%s: Failed to set Timer by SBI, error code: %lx", __func__, ret.error);
}
return ret.error;
}