hv: cleanup legacy terminologies in RTCM module

This patch updates below terminologies according
 to the latest TCC Spec:
  PTCT -> RTCT
  PTCM -> RTCM
  pSRAM -> Software SRAM

Tracked-On: #5649
Signed-off-by: Yonghua Huang <yonghua.huang@intel.com>
This commit is contained in:
Yonghua Huang 2021-01-26 21:15:13 +08:00 committed by wenlingz
parent 806f479108
commit a6420e8cfa
13 changed files with 201 additions and 187 deletions

View File

@ -24,7 +24,6 @@ struct rtct_entry {
uint32_t data[64]; uint32_t data[64];
} __packed; } __packed;
struct rtct_entry_data_psram { struct rtct_entry_data_psram {
uint32_t cache_level; uint32_t cache_level;
uint64_t base; uint64_t base;
@ -33,7 +32,6 @@ struct rtct_entry_data_psram {
uint32_t apic_id_tbl[64]; uint32_t apic_id_tbl[64];
} __packed; } __packed;
struct rtct_entry_data_mem_hi_latency { struct rtct_entry_data_mem_hi_latency {
uint32_t hierarchy; uint32_t hierarchy;
uint32_t clock_cycles; uint32_t clock_cycles;

View File

@ -130,7 +130,7 @@ static struct acpi_mcfg_allocation *parse_mcfg_allocation_tables(const uint8_t *
/* put all ACPI fix up code here */ /* put all ACPI fix up code here */
int32_t acpi_fixup(void) int32_t acpi_fixup(void)
{ {
uint8_t *facp_addr = NULL, *facs_addr = NULL, *mcfg_addr = NULL, *ptct_tbl_addr = NULL; uint8_t *facp_addr = NULL, *facs_addr = NULL, *mcfg_addr = NULL, *rtct_tbl_addr = NULL;
struct acpi_mcfg_allocation *mcfg_table = NULL; struct acpi_mcfg_allocation *mcfg_table = NULL;
int32_t ret = 0; int32_t ret = 0;
struct acpi_generic_address pm1a_cnt, pm1a_evt; struct acpi_generic_address pm1a_cnt, pm1a_evt;
@ -169,9 +169,9 @@ int32_t acpi_fixup(void)
} }
} }
ptct_tbl_addr = (uint8_t *)get_acpi_tbl(ACPI_SIG_PTCT); rtct_tbl_addr = (uint8_t *)get_acpi_tbl(ACPI_SIG_RTCT);
if (ptct_tbl_addr != NULL) { if (rtct_tbl_addr != NULL) {
set_ptct_tbl((void *)ptct_tbl_addr); set_rtct_tbl((void *)rtct_tbl_addr);
} }
if ((facp_addr == NULL) || (facs_addr == NULL) if ((facp_addr == NULL) || (facs_addr == NULL)

View File

@ -277,13 +277,13 @@ config CDP_ENABLED
software configurable manner, depending on hardware support. software configurable manner, depending on hardware support.
config PSRAM_ENABLED config PSRAM_ENABLED
bool "Enable pseudo-SRAM (pSRAM) support" bool "Enable Software SRAM support"
depends on !CDP_ENABLED depends on !CDP_ENABLED
default n default n
help help
This will enable RTVM to make use of pSRAM to improve the performance This will enable RTVM to make use of Software SRAM to improve the performance
of Real-Time applications. pSRAM essentially a block of cache, and is separated via of Real-Time applications. Software SRAM essentially a block of cache, and is separated via
CAT and protected by some methods. pSRAM support and CDP support cannot co-exist. CAT and protected by some methods. Software SRAM support and CDP support cannot co-exist.
config GPU_SBDF config GPU_SBDF
hex "Segment, Bus, Device, and function of the GPU" hex "Segment, Bus, Device, and function of the GPU"

View File

@ -264,13 +264,13 @@ void init_pcpu_post(uint16_t pcpu_id)
ASSERT(get_pcpu_id() == BSP_CPU_ID, ""); ASSERT(get_pcpu_id() == BSP_CPU_ID, "");
init_psram(true); init_software_sram(true);
} else { } else {
pr_dbg("Core %hu is up", pcpu_id); pr_dbg("Core %hu is up", pcpu_id);
pr_warn("Skipping VM configuration check which should be done before building HV binary."); pr_warn("Skipping VM configuration check which should be done before building HV binary.");
init_psram(false); init_software_sram(false);
/* Initialize secondary processor interrupts. */ /* Initialize secondary processor interrupts. */
init_interrupt(pcpu_id); init_interrupt(pcpu_id);
@ -441,7 +441,7 @@ void cpu_dead(void)
if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) { if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) {
/* clean up native stuff */ /* clean up native stuff */
vmx_off(); vmx_off();
/* TODO: a cpu dead can't effect the RTVM which use pSRAM */ /* TODO: a cpu dead can't effect the RTVM which use Software SRAM */
cache_flush_invalidate_all(); cache_flush_invalidate_all();
/* Set state to show CPU is dead */ /* Set state to show CPU is dead */

View File

@ -179,38 +179,43 @@ void ept_flush_leaf_page(uint64_t *pge, uint64_t size)
flush_base_hpa = (*pge & (~(size - 1UL))); flush_base_hpa = (*pge & (~(size - 1UL)));
flush_end_hpa = flush_base_hpa + size; flush_end_hpa = flush_base_hpa + size;
/* When pSRAM is not intialized, both psram_area_bottom and psram_area_top is 0, /* When Software SRAM is not initialized, both software_sram_area_bottom
* and software_sram_area_top is 0,
* so the below if/else will have no use * so the below if/else will have no use
*/ */
if (flush_base_hpa < psram_area_bottom) { if (flush_base_hpa < software_sram_area_bottom) {
/* Only flush [flush_base_hpa, psram_area_bottom) and [psram_area_top, flush_base_hpa), /* Only flush [flush_base_hpa, software_sram_area_bottom)
* ignore [psram_area_bottom, psram_area_top) * and [software_sram_area_top, flush_base_hpa),
* ignore [software_sram_area_bottom, software_sram_area_top)
*/ */
if (flush_end_hpa > psram_area_top) { if (flush_end_hpa > software_sram_area_top) {
/* Only flush [flush_base_hpa, psram_area_bottom) and [psram_area_top, flush_base_hpa), /* Only flush [flush_base_hpa, software_sram_area_bottom)
* ignore [psram_area_bottom, psram_area_top) * and [software_sram_area_top, flush_base_hpa),
* ignore [software_sram_area_bottom, software_sram_area_top)
*/ */
flush_size = psram_area_bottom - flush_base_hpa; flush_size = software_sram_area_bottom - flush_base_hpa;
hva = hpa2hva(flush_base_hpa); hva = hpa2hva(flush_base_hpa);
stac(); stac();
flush_address_space(hva, flush_size); flush_address_space(hva, flush_size);
clac(); clac();
flush_size = flush_end_hpa - psram_area_top; flush_size = flush_end_hpa - software_sram_area_top;
flush_base_hpa = psram_area_top; flush_base_hpa = software_sram_area_top;
} else if (flush_end_hpa > psram_area_bottom) { } else if (flush_end_hpa > software_sram_area_bottom) {
/* Only flush [flush_base_hpa, psram_area_bottom) and /* Only flush [flush_base_hpa, software_sram_area_bottom) and
* ignore [psram_area_bottom, flush_end_hpa) * ignore [software_sram_area_bottom, flush_end_hpa)
*/ */
flush_size = psram_area_bottom - flush_base_hpa; flush_size = software_sram_area_bottom - flush_base_hpa;
} }
} else if (flush_base_hpa < psram_area_top) { } else if (flush_base_hpa < software_sram_area_top) {
if (flush_end_hpa <= psram_area_top) { if (flush_end_hpa <= software_sram_area_top) {
flush_size = 0UL; flush_size = 0UL;
} else { } else {
/* Only flush [psram_area_top, flush_end_hpa) and ignore [flush_base_hpa, psram_area_top) */ /* Only flush [software_sram_area_top, flush_end_hpa)
flush_base_hpa = psram_area_top; * and ignore [flush_base_hpa, software_sram_area_top)
flush_size = flush_end_hpa - psram_area_top; */
flush_base_hpa = software_sram_area_top;
flush_size = flush_end_hpa - software_sram_area_top;
} }
} }

View File

@ -14,7 +14,7 @@
#define ENTRY_HPA1_LOW_PART1 2U #define ENTRY_HPA1_LOW_PART1 2U
#define ENTRY_HPA1_LOW_PART2 4U #define ENTRY_HPA1_LOW_PART2 4U
#define ENTRY_PSRAM 3U #define ENTRY_SOFTWARE_SRAM 3U
#define ENTRY_HPA1_HI 8U #define ENTRY_HPA1_HI 8U
static struct e820_entry sos_vm_e820[E820_MAX_ENTRIES]; static struct e820_entry sos_vm_e820[E820_MAX_ENTRIES];
@ -133,20 +133,20 @@ static const struct e820_entry pre_ve820_template[E820_MAX_ENTRIES] = {
.length = 0x10000UL, /* 64KB */ .length = 0x10000UL, /* 64KB */
.type = E820_TYPE_RESERVED .type = E820_TYPE_RESERVED
}, },
/* pSRAM segment splits the lowmem into two parts */ /* Software SRAM segment splits the lowmem into two parts */
{ /* part1 of lowmem of hpa1*/ { /* part1 of lowmem of hpa1*/
.baseaddr = MEM_1M, /* 1MB */ .baseaddr = MEM_1M, /* 1MB */
.length = PSRAM_BASE_GPA - MEM_1M, .length = SOFTWARE_SRAM_BASE_GPA - MEM_1M,
.type = E820_TYPE_RAM .type = E820_TYPE_RAM
}, },
{ /* pSRAM */ { /* Software SRAM */
.baseaddr = PSRAM_BASE_GPA, .baseaddr = SOFTWARE_SRAM_BASE_GPA,
.length = PSRAM_MAX_SIZE, .length = SOFTWARE_SRAM_MAX_SIZE,
.type = E820_TYPE_RESERVED .type = E820_TYPE_RESERVED
}, },
{ /* part2 of lowmem of hpa1*/ { /* part2 of lowmem of hpa1*/
.baseaddr = PSRAM_BASE_GPA + PSRAM_MAX_SIZE, .baseaddr = SOFTWARE_SRAM_BASE_GPA + SOFTWARE_SRAM_MAX_SIZE,
.length = MEM_2G - MEM_1M - (PSRAM_BASE_GPA + PSRAM_MAX_SIZE), .length = MEM_2G - MEM_1M - (SOFTWARE_SRAM_BASE_GPA + SOFTWARE_SRAM_MAX_SIZE),
.type = E820_TYPE_RAM .type = E820_TYPE_RAM
}, },
{ /* ACPI Reclaim */ { /* ACPI Reclaim */
@ -182,28 +182,33 @@ static inline uint64_t add_ram_entry(struct e820_entry *entry, uint64_t gpa, uin
* *
* ve820 layout for pre-launched VM: * ve820 layout for pre-launched VM:
* *
* entry0: usable under 1MB * entry0: usable under 1MB
* entry1: reserved for MP Table/ACPI RSDP from 0xf0000 to 0xfffff * entry1: reserved for MP Table/ACPI RSDP from 0xf0000 to 0xfffff
* entry2: usable, the part1 of hpa1 in lowmem, from 0x100000, and up to the bottom of pSRAM area. * entry2: usable, the part1 of hpa1 in lowmem, from 0x100000,
* entry3: reserved, pSRAM segment, which will be identically mapped to physical pSRAM segment rather than hpa1. * and up to the bottom of Software SRAM area.
* entry4: usable, the part2 of hpa1 in lowmem, from the ceil of pSRAM segment, and up to 2G-1M. * entry3: reserved, Software SRAM segment, which will be identically mapped to physical
* entry5: ACPI Reclaim from 0x7ff00000 to 0x7ffeffff * Software SRAM segment rather than hpa1.
* entry6: ACPI NVS from 0x7fff0000 to 0x7fffffff * entry4: usable, the part2 of hpa1 in lowmem, from the ceil of Software SRAM segment,
* entry7: reserved for 32bit PCI hole from 0x80000000 to 0xffffffff * and up to 2G-1M.
* (entry8): usable for * entry5: ACPI Reclaim from 0x7ff00000 to 0x7ffeffff
* a) hpa1_hi, if hpa1 > 2GB - PSRAM_MAX_SIZE * entry6: ACPI NVS from 0x7fff0000 to 0x7fffffff
* b) hpa2, if (hpa1 + hpa2) < 2GB - PSRAM_MAX_SIZE * entry7: reserved for 32bit PCI hole from 0x80000000 to 0xffffffff
* c) hpa2_lo, if hpa1 < 2GB - PSRAM_MAX_SIZE and (hpa1 + hpa2) > 2GB - PSRAM_MAX_SIZE * (entry8): usable for
* (entry9): usable for * a) hpa1_hi, if hpa1 > 2GB - SOFTWARE_SRAM_MAX_SIZE
* a) hpa2, if hpa1 > 2GB - PSRAM_MAX_SIZE * b) hpa2, if (hpa1 + hpa2) < 2GB - SOFTWARE_SRAM_MAX_SIZE
* b) hpa2_hi, if hpa1 < 2GB - PSRAM_MAX_SIZE and (hpa1 + hpa2) > 2GB - PSRAM_MAX_SIZE * c) hpa2_lo,
* if hpa1 < 2GB - SOFTWARE_SRAM_MAX_SIZE and (hpa1 + hpa2) > 2GB - SOFTWARE_SRAM_MAX_SIZE
* (entry9): usable for
* a) hpa2, if hpa1 > 2GB - SOFTWARE_SRAM_MAX_SIZE
* b) hpa2_hi,
* if hpa1 < 2GB - SOFTWARE_SRAM_MAX_SIZE and (hpa1 + hpa2) > 2GB - SOFTWARE_SRAM_MAX_SIZE
*/ */
/* /*
The actual memory mapping under 2G looks like below: The actual memory mapping under 2G looks like below:
|<--1M-->| |<--1M-->|
|<-----hpa1_low_part1--->| |<-----hpa1_low_part1--->|
|<---pSRAM--->| |<---Software SRAM--->|
|<-----hpa1_low_part2--->| |<-----hpa1_low_part2--->|
|<---Non-mapped hole (if there is)-->| |<---Non-mapped hole (if there is)-->|
|<---1M ACPI NVS/DATA--->| |<---1M ACPI NVS/DATA--->|
@ -213,8 +218,8 @@ void create_prelaunched_vm_e820(struct acrn_vm *vm)
struct acrn_vm_config *vm_config = get_vm_config(vm->vm_id); struct acrn_vm_config *vm_config = get_vm_config(vm->vm_id);
uint64_t gpa_start = 0x100000000UL; uint64_t gpa_start = 0x100000000UL;
uint64_t hpa1_hi_size, hpa2_lo_size; uint64_t hpa1_hi_size, hpa2_lo_size;
uint64_t lowmem_max_length = MEM_2G - PSRAM_MAX_SIZE; uint64_t lowmem_max_length = MEM_2G - SOFTWARE_SRAM_MAX_SIZE;
uint64_t hpa1_part1_max_length = PSRAM_BASE_GPA - MEM_1M; uint64_t hpa1_part1_max_length = SOFTWARE_SRAM_BASE_GPA - MEM_1M;
uint64_t remaining_hpa2_size = vm_config->memory.size_hpa2; uint64_t remaining_hpa2_size = vm_config->memory.size_hpa2;
uint32_t entry_idx = ENTRY_HPA1_HI; uint32_t entry_idx = ENTRY_HPA1_HI;
@ -229,12 +234,17 @@ void create_prelaunched_vm_e820(struct acrn_vm *vm)
gpa_start = add_ram_entry((vm->e820_entries + entry_idx), gpa_start, hpa1_hi_size); gpa_start = add_ram_entry((vm->e820_entries + entry_idx), gpa_start, hpa1_hi_size);
entry_idx++; entry_idx++;
} else if (vm_config->memory.size <= MEM_1M + hpa1_part1_max_length + MEM_1M) { } else if (vm_config->memory.size <= MEM_1M + hpa1_part1_max_length + MEM_1M) {
/* in this case, hpa1 is only enough for the first 1M + part1 + last 1M (ACPI NVS/DATA), so part2 will be empty */ /*
vm->e820_entries[ENTRY_HPA1_LOW_PART1].length = vm_config->memory.size - MEM_2M; /* 2M includes the first and last 1M */ * In this case, hpa1 is only enough for the first
* 1M + part1 + last 1M (ACPI NVS/DATA), so part2 will be empty.
* Below 'MEM_2M' includes the first and last 1M
*/
vm->e820_entries[ENTRY_HPA1_LOW_PART1].length = vm_config->memory.size - MEM_2M;
vm->e820_entries[ENTRY_HPA1_LOW_PART2].length = 0; vm->e820_entries[ENTRY_HPA1_LOW_PART2].length = 0;
} else { } else {
/* Otherwise, part2 is not empty. */ /* Otherwise, part2 is not empty. */
vm->e820_entries[ENTRY_HPA1_LOW_PART2].length = vm_config->memory.size - PSRAM_BASE_GPA - MEM_1M; vm->e820_entries[ENTRY_HPA1_LOW_PART2].length =
vm_config->memory.size - SOFTWARE_SRAM_BASE_GPA - MEM_1M;
/* need to set gpa_start for hpa2 */ /* need to set gpa_start for hpa2 */
} }

View File

@ -231,12 +231,12 @@ static void prepare_prelaunched_vm_memmap(struct acrn_vm *vm, const struct acrn_
if (entry->length == 0UL) { if (entry->length == 0UL) {
continue; continue;
} else { } else {
if (is_psram_initialized && (entry->baseaddr == PSRAM_BASE_GPA) && if (is_sw_sram_initialized && (entry->baseaddr == SOFTWARE_SRAM_BASE_GPA) &&
((vm_config->guest_flags & GUEST_FLAG_RT) != 0U)){ ((vm_config->guest_flags & GUEST_FLAG_RT) != 0U)){
/* pass through pSRAM to pre-RTVM */ /* pass through Software SRAM to pre-RTVM */
pr_fatal("%s, %d___", __func__, __LINE__);
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
PSRAM_BASE_HPA, PSRAM_BASE_GPA, PSRAM_MAX_SIZE, EPT_RWX | EPT_WB); SOFTWARE_SRAM_BASE_HPA, SOFTWARE_SRAM_BASE_GPA,
SOFTWARE_SRAM_MAX_SIZE, EPT_RWX | EPT_WB);
continue; continue;
} }
} }
@ -365,8 +365,8 @@ static void prepare_sos_vm_memmap(struct acrn_vm *vm)
pci_mmcfg = get_mmcfg_region(); pci_mmcfg = get_mmcfg_region();
ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, pci_mmcfg->address, get_pci_mmcfg_size(pci_mmcfg)); ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, pci_mmcfg->address, get_pci_mmcfg_size(pci_mmcfg));
/* TODO: remove pSRAM from SOS prevent SOS to use clflush to flush the pSRAM cache. /* TODO: remove Software SRAM from SOS prevent SOS to use clflush to flush the Software SRAM cache.
* If we remove this EPT mapping from the SOS, the ACRN-DM can't do pSRAM EPT mapping * If we remove this EPT mapping from the SOS, the ACRN-DM can't do Software SRAM EPT mapping
* because the SOS can't get the HPA of this memory region. * because the SOS can't get the HPA of this memory region.
*/ */
} }

View File

@ -404,7 +404,7 @@ static int32_t wbinvd_vmexit_handler(struct acrn_vcpu *vcpu)
struct acrn_vcpu *other; struct acrn_vcpu *other;
/* GUEST_FLAG_RT has not set in post-launched RTVM before it has been created */ /* GUEST_FLAG_RT has not set in post-launched RTVM before it has been created */
if ((!is_psram_initialized) && (!has_rt_vm())) { if ((!is_sw_sram_initialized) && (!has_rt_vm())) {
cache_flush_invalidate_all(); cache_flush_invalidate_all();
} else { } else {
if (is_rt_vm(vcpu->vm)) { if (is_rt_vm(vcpu->vm)) {

View File

@ -11,27 +11,27 @@
#include <rtcm.h> #include <rtcm.h>
uint64_t psram_area_bottom; uint64_t software_sram_area_bottom;
uint64_t psram_area_top; uint64_t software_sram_area_top;
/* is_psram_initialized is used to tell whether psram is successfully initialized for all cores */ /* is_sw_sram_initialized is used to tell whether Software SRAM is successfully initialized for all cores */
volatile bool is_psram_initialized = false; volatile bool is_sw_sram_initialized = false;
#ifdef CONFIG_PSRAM_ENABLED #ifdef CONFIG_PSRAM_ENABLED
static struct ptct_entry_data_ptcm_binary *ptcm_binary = NULL; static struct rtct_entry_data_rtcm_binary *rtcm_binary = NULL;
static struct acpi_table_header *acpi_ptct_tbl = NULL; static struct acpi_table_header *acpi_rtct_tbl = NULL;
static inline void ptcm_set_nx(bool add) static inline void rtcm_set_nx(bool add)
{ {
ppt_set_nx_bit((uint64_t)hpa2hva(ptcm_binary->address), ptcm_binary->size, add); ppt_set_nx_bit((uint64_t)hpa2hva(rtcm_binary->address), rtcm_binary->size, add);
} }
static inline void ptcm_flush_binary_tlb(void) static inline void rtcm_flush_binary_tlb(void)
{ {
uint64_t linear_addr, start_addr = (uint64_t)hpa2hva(ptcm_binary->address); uint64_t linear_addr, start_addr = (uint64_t)hpa2hva(rtcm_binary->address);
uint64_t end_addr = start_addr + ptcm_binary->size; uint64_t end_addr = start_addr + rtcm_binary->size;
for (linear_addr = start_addr; linear_addr < end_addr; linear_addr += PAGE_SIZE) { for (linear_addr = start_addr; linear_addr < end_addr; linear_addr += PAGE_SIZE) {
invlpg(linear_addr); invlpg(linear_addr);
@ -40,128 +40,129 @@ static inline void ptcm_flush_binary_tlb(void)
} }
static inline void *get_ptct_address() static inline void *get_rtct_address()
{ {
return (void *)acpi_ptct_tbl + sizeof(*acpi_ptct_tbl); return (void *)acpi_rtct_tbl + sizeof(*acpi_rtct_tbl);
} }
void set_ptct_tbl(void *ptct_tbl_addr) void set_rtct_tbl(void *rtct_tbl_addr)
{ {
acpi_ptct_tbl = ptct_tbl_addr; acpi_rtct_tbl = rtct_tbl_addr;
} }
static void parse_ptct(void) static void parse_rtct(void)
{ {
struct ptct_entry *entry; struct rtct_entry *entry;
struct ptct_entry_data_psram *psram_entry; struct rtct_entry_data_software_sram *sw_sram_entry;
if (acpi_ptct_tbl != NULL) { if (acpi_rtct_tbl != NULL) {
pr_info("found PTCT subtable in HPA %llx, length: %d", acpi_ptct_tbl, acpi_ptct_tbl->length); pr_info("found RTCT subtable in HPA %llx, length: %d", acpi_rtct_tbl, acpi_rtct_tbl->length);
entry = get_ptct_address(); entry = get_rtct_address();
psram_area_bottom = PSRAM_BASE_HPA; software_sram_area_bottom = SOFTWARE_SRAM_BASE_HPA;
while (((uint64_t)entry - (uint64_t)acpi_ptct_tbl) < acpi_ptct_tbl->length) { while (((uint64_t)entry - (uint64_t)acpi_rtct_tbl) < acpi_rtct_tbl->length) {
switch (entry->type) { switch (entry->type) {
case PTCT_ENTRY_TYPE_PTCM_BINARY: case RTCT_ENTRY_TYPE_RTCM_BINARY:
ptcm_binary = (struct ptct_entry_data_ptcm_binary *)entry->data; rtcm_binary = (struct rtct_entry_data_rtcm_binary *)entry->data;
if (psram_area_top < ptcm_binary->address + ptcm_binary->size) { if (software_sram_area_top < rtcm_binary->address + rtcm_binary->size) {
psram_area_top = ptcm_binary->address + ptcm_binary->size; software_sram_area_top = rtcm_binary->address + rtcm_binary->size;
} }
pr_info("found PTCM bin, in HPA %llx, size %llx", ptcm_binary->address, ptcm_binary->size); pr_info("found RTCM bin, in HPA %llx, size %llx",
rtcm_binary->address, rtcm_binary->size);
break; break;
case PTCT_ENTRY_TYPE_PSRAM: case RTCT_ENTRY_TYPE_SOFTWARE_SRAM:
psram_entry = (struct ptct_entry_data_psram *)entry->data; sw_sram_entry = (struct rtct_entry_data_software_sram *)entry->data;
if (psram_area_top < psram_entry->base + psram_entry->size) { if (software_sram_area_top < sw_sram_entry->base + sw_sram_entry->size) {
psram_area_top = psram_entry->base + psram_entry->size; software_sram_area_top = sw_sram_entry->base + sw_sram_entry->size;
} }
pr_info("found L%d psram, at HPA %llx, size %x", psram_entry->cache_level, pr_info("found L%d Software SRAM, at HPA %llx, size %x", sw_sram_entry->cache_level,
psram_entry->base, psram_entry->size); sw_sram_entry->base, sw_sram_entry->size);
break; break;
/* In current phase, we ignore other entries like gt_clos and wrc_close */ /* In current phase, we ignore other entries like gt_clos and wrc_close */
default: default:
break; break;
} }
/* point to next ptct entry */ /* point to next rtct entry */
entry = (struct ptct_entry *)((uint64_t)entry + entry->size); entry = (struct rtct_entry *)((uint64_t)entry + entry->size);
} }
psram_area_top = round_page_up(psram_area_top); software_sram_area_top = round_page_up(software_sram_area_top);
} else { } else {
pr_fatal("Cannot find PTCT pointer!!!!"); pr_fatal("Cannot find RTCT pointer!!!!");
} }
} }
/* /*
* Function to initialize pSRAM. Both BSP and APs shall call this function to * Function to initialize Software SRAM. Both BSP and APs shall call this function to
* make sure pSRAM is initialized, which is required by PTCM. * make sure Software SRAM is initialized, which is required by RTCM.
* BSP: * BSP:
* To parse PTCT and find the entry of PTCM command function * To parse RTCT and find the entry of RTCM command function
* AP: * AP:
* Wait until BSP has done the parsing work, then call the PTCM ABI. * Wait until BSP has done the parsing work, then call the RTCM ABI.
* *
* Synchronization of AP and BSP is ensured, both inside and outside PTCM. * Synchronization of AP and BSP is ensured, both inside and outside RTCM.
* BSP shall be the last to finish the call. * BSP shall be the last to finish the call.
*/ */
void init_psram(bool is_bsp) void init_software_sram(bool is_bsp)
{ {
int32_t ptcm_ret_code; int32_t rtcm_ret_code;
struct ptcm_header *header; struct rtcm_header *header;
ptcm_abi_func ptcm_command_func = NULL; rtcm_abi_func rtcm_command_func = NULL;
static uint64_t init_psram_cpus_mask = (1UL << BSP_CPU_ID); static uint64_t init_sw_sram_cpus_mask = (1UL << BSP_CPU_ID);
/* /*
* When we shut down an RTVM, its pCPUs will be re-initialized * When we shut down an RTVM, its pCPUs will be re-initialized
* we must ensure init_psram() will only be executed at the first time when a pcpu is booted * we must ensure init_software_sram() will only be executed at the first time when a pcpu is booted
* That's why we add "!is_psram_initialized" as an condition. * That's why we add "!is_sw_sram_initialized" as an condition.
*/ */
if (!is_psram_initialized && (acpi_ptct_tbl != NULL)) { if (!is_sw_sram_initialized && (acpi_rtct_tbl != NULL)) {
/* TODO: We may use SMP call to flush TLB and do pSRAM initilization on APs */ /* TODO: We may use SMP call to flush TLB and do Software SRAM initialization on APs */
if (is_bsp) { if (is_bsp) {
parse_ptct(); parse_rtct();
/* Clear the NX bit of PTCM area */ /* Clear the NX bit of RTCM area */
ptcm_set_nx(false); rtcm_set_nx(false);
bitmap_clear_lock(get_pcpu_id(), &init_psram_cpus_mask); bitmap_clear_lock(get_pcpu_id(), &init_sw_sram_cpus_mask);
} }
wait_sync_change(&init_psram_cpus_mask, 0UL); wait_sync_change(&init_sw_sram_cpus_mask, 0UL);
pr_info("PTCT is parsed by BSP"); pr_info("RTCT is parsed by BSP");
header = hpa2hva(ptcm_binary->address); header = hpa2hva(rtcm_binary->address);
pr_info("ptcm_bin_address:%llx, ptcm magic:%x, ptcm version:%x", pr_info("rtcm_bin_address:%llx, rtcm magic:%x, rtcm version:%x",
ptcm_binary->address, header->magic, header->version); rtcm_binary->address, header->magic, header->version);
ASSERT(header->magic == PTCM_MAGIC, "Incorrect PTCM magic!"); ASSERT(header->magic == RTCM_MAGIC, "Incorrect RTCM magic!");
/* Flush the TLB, so that BSP/AP can execute the PTCM ABI */ /* Flush the TLB, so that BSP/AP can execute the RTCM ABI */
ptcm_flush_binary_tlb(); rtcm_flush_binary_tlb();
ptcm_command_func = (ptcm_abi_func)(hpa2hva(ptcm_binary->address) + header->command_offset); rtcm_command_func = (rtcm_abi_func)(hpa2hva(rtcm_binary->address) + header->command_offset);
pr_info("ptcm command function is found at %llx",ptcm_command_func); pr_info("rtcm command function is found at %llx", rtcm_command_func);
ptcm_ret_code = ptcm_command_func(PTCM_CMD_INIT_PSRAM, get_ptct_address()); rtcm_ret_code = rtcm_command_func(RTCM_CMD_INIT_SOFTWARE_SRAM, get_rtct_address());
pr_info("ptcm initialization return %d", ptcm_ret_code); pr_info("rtcm initialization return %d", rtcm_ret_code);
/* return 0 for success, -1 for failure */ /* return 0 for success, -1 for failure */
ASSERT(ptcm_ret_code == 0); ASSERT(rtcm_ret_code == 0);
if (is_bsp) { if (is_bsp) {
/* Restore the NX bit of PTCM area in page table */ /* Restore the NX bit of RTCM area in page table */
ptcm_set_nx(true); rtcm_set_nx(true);
} }
bitmap_set_lock(get_pcpu_id(), &init_psram_cpus_mask); bitmap_set_lock(get_pcpu_id(), &init_sw_sram_cpus_mask);
wait_sync_change(&init_psram_cpus_mask, ALL_CPUS_MASK); wait_sync_change(&init_sw_sram_cpus_mask, ALL_CPUS_MASK);
/* Flush the TLB on BSP and all APs to restore the NX for pSRAM area */ /* Flush the TLB on BSP and all APs to restore the NX for Software SRAM area */
ptcm_flush_binary_tlb(); rtcm_flush_binary_tlb();
if (is_bsp) { if (is_bsp) {
is_psram_initialized = true; is_sw_sram_initialized = true;
pr_info("BSP pSRAM has been initialized\n"); pr_info("BSP Software SRAM has been initialized\n");
} }
} }
} }
#else #else
void set_ptct_tbl(__unused void *ptct_tbl_addr) void set_rtct_tbl(__unused void *rtct_tbl_addr)
{ {
} }
void init_psram(__unused bool is_bsp) void init_software_sram(__unused bool is_bsp)
{ {
} }
#endif #endif

View File

@ -57,7 +57,7 @@
#define ACPI_SIG_MCFG "MCFG" /* Memory Mapped Configuration table */ #define ACPI_SIG_MCFG "MCFG" /* Memory Mapped Configuration table */
#define ACPI_SIG_DSDT "DSDT" /* Differentiated System Description Table */ #define ACPI_SIG_DSDT "DSDT" /* Differentiated System Description Table */
#define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module hardware interface table */ #define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module hardware interface table */
#define ACPI_SIG_PTCT "PTCT" /* Platform Tuning Configuration Table (Real-Time Configuration Table) */ #define ACPI_SIG_RTCT "PTCT" /* Platform Tuning Configuration Table (Real-Time Configuration Table) */
struct packed_gas { struct packed_gas {
uint8_t space_id; uint8_t space_id;

View File

@ -581,14 +581,14 @@ static int32_t add_vm_memory_region(struct acrn_vm *vm, struct acrn_vm *target_v
} else { } else {
prot |= EPT_UNCACHED; prot |= EPT_UNCACHED;
} }
/* If pSRAM is initialized, and HV received a request to map pSRAM area to guest, /* If Software SRAM is initialized, and HV received a request to map Software SRAM
* we should add EPT_WB flag to make pSRAM effective. * area to guest, we should add EPT_WB flag to make Software SRAM effective.
* Assumption: SOS must assign the PSRAM area as a whole and as a separate memory * Assumption: SOS must assign the Software SRAM area as a whole and as a separate memory
* region whose base address is PSRAM_BASE_HPA * region whose base address is SOFTWARE_SRAM_BASE_HPA
* TODO: We can enforce WB for any region has overlap with pSRAM, for simplicity, * TODO: We can enforce WB for any region has overlap with Software SRAM, for simplicity,
* and leave it to SOS to make sure it won't violate. * and leave it to SOS to make sure it won't violate.
*/ */
if ((hpa == PSRAM_BASE_HPA) && is_psram_initialized) { if ((hpa == SOFTWARE_SRAM_BASE_HPA) && is_sw_sram_initialized) {
prot |= EPT_WB; prot |= EPT_WB;
} }
/* create gpa to hpa EPT mapping */ /* create gpa to hpa EPT mapping */

View File

@ -4,29 +4,29 @@
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#ifndef PTCM_H #ifndef RTCM_H
#define PTCM_H #define RTCM_H
#include <rtct.h> #include <rtct.h>
#define MSABI __attribute__((ms_abi)) #define MSABI __attribute__((ms_abi))
typedef int32_t MSABI (*ptcm_abi_func)(uint32_t command, void *command_struct); typedef int32_t MSABI(*rtcm_abi_func)(uint32_t command, void *command_struct);
#define PTCM_CMD_INIT_PSRAM (int32_t)1U #define RTCM_CMD_INIT_SOFTWARE_SRAM (int32_t)1U
#define PTCM_CMD_CPUID (int32_t)2U #define RTCM_CMD_CPUID (int32_t)2U
#define PTCM_CMD_RDMSR (int32_t)3U #define RTCM_CMD_RDMSR (int32_t)3U
#define PTCM_CMD_WRMSR (int32_t)4U #define RTCM_CMD_WRMSR (int32_t)4U
#define PTCM_MAGIC 0x5054434dU #define RTCM_MAGIC 0x5054434dU
struct ptcm_header { struct rtcm_header {
uint32_t magic; uint32_t magic;
uint32_t version; uint32_t version;
uint64_t command_offset; uint64_t command_offset;
} __packed; } __packed;
extern volatile bool is_psram_initialized; extern volatile bool is_sw_sram_initialized;
void init_psram(bool is_bsp); void init_software_sram(bool is_bsp);
void set_ptct_tbl(void *ptct_tbl_addr); void set_rtct_tbl(void *rtct_tbl_addr);
#endif /* PTCM_H */ #endif /* RTCM_H */

View File

@ -4,40 +4,40 @@
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#ifndef PTCT_H #ifndef RTCT_H
#define PTCT_H #define RTCT_H
#include <acpi.h> #include <acpi.h>
#define PTCT_ENTRY_TYPE_PTCD_LIMIT 1U #define RTCT_ENTRY_TYPE_RTCD_LIMIT 1U
#define PTCT_ENTRY_TYPE_PTCM_BINARY 2U #define RTCT_ENTRY_TYPE_RTCM_BINARY 2U
#define PTCT_ENTRY_TYPE_WRC_L3_MASKS 3U #define RTCT_ENTRY_TYPE_WRC_L3_MASKS 3U
#define PTCT_ENTRY_TYPE_GT_L3_MASKS 4U #define RTCT_ENTRY_TYPE_GT_L3_MASKS 4U
#define PTCT_ENTRY_TYPE_PSRAM 5U #define RTCT_ENTRY_TYPE_SOFTWARE_SRAM 5U
#define PTCT_ENTRY_TYPE_STREAM_DATAPATH 6U #define RTCT_ENTRY_TYPE_STREAM_DATAPATH 6U
#define PTCT_ENTRY_TYPE_TIMEAWARE_SUBSYS 7U #define RTCT_ENTRY_TYPE_TIMEAWARE_SUBSYS 7U
#define PTCT_ENTRY_TYPE_RT_IOMMU 8U #define RTCT_ENTRY_TYPE_RT_IOMMU 8U
#define PTCT_ENTRY_TYPE_MEM_HIERARCHY_LATENCY 9U #define RTCT_ENTRY_TYPE_MEM_HIERARCHY_LATENCY 9U
#define PSRAM_BASE_HPA 0x40080000U #define SOFTWARE_SRAM_BASE_HPA 0x40080000U
#define PSRAM_BASE_GPA 0x40080000U #define SOFTWARE_SRAM_BASE_GPA 0x40080000U
#define PSRAM_MAX_SIZE 0x00800000U #define SOFTWARE_SRAM_MAX_SIZE 0x00800000U
struct ptct_entry{ struct rtct_entry {
uint16_t size; uint16_t size;
uint16_t format; uint16_t format;
uint32_t type; uint32_t type;
uint32_t data[64]; uint32_t data[64];
} __packed; } __packed;
struct ptct_entry_data_ptcm_binary struct rtct_entry_data_rtcm_binary
{ {
uint64_t address; uint64_t address;
uint32_t size; uint32_t size;
} __packed; } __packed;
struct ptct_entry_data_psram struct rtct_entry_data_software_sram
{ {
uint32_t cache_level; uint32_t cache_level;
uint64_t base; uint64_t base;
@ -47,7 +47,7 @@ struct ptct_entry_data_psram
} __packed; } __packed;
extern uint64_t psram_area_bottom; extern uint64_t software_sram_area_bottom;
extern uint64_t psram_area_top; extern uint64_t software_sram_area_top;
#endif /* PTCT_H */ #endif /* RTCT_H */