hv: mmu: rename hv_access_memory_region_update to ppt_clear_user_bit

Rename hv_access_memory_region_update to ppt_clear_user_bit to
verb + object style.

Tracked-On: #5330
Signed-off-by: Li Fei1 <fei1.li@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Li Fei1 2020-10-28 21:41:55 +08:00 committed by wenlingz
parent 35abee60d6
commit f3067f5385
9 changed files with 10 additions and 10 deletions

View File

@ -384,7 +384,7 @@ int32_t init_ioapic_id_info(void)
gsi = 0U;
for (ioapic_id = 0U; ioapic_id < ioapic_num; ioapic_id++) {
addr = map_ioapic(ioapic_array[ioapic_id].addr);
hv_access_memory_region_update((uint64_t)addr, PAGE_SIZE);
ppt_clear_user_bit((uint64_t)addr, PAGE_SIZE);
nr_pins = ioapic_nr_pins(addr);
if (nr_pins <= (uint32_t) CONFIG_MAX_IOAPIC_LINES) {

View File

@ -194,9 +194,9 @@ void enable_smap(void)
}
/*
* Update memory pages to be owned by hypervisor.
* Clean USER bit in page table to update memory pages to be owned by hypervisor.
*/
void hv_access_memory_region_update(uint64_t base, uint64_t size)
void ppt_clear_user_bit(uint64_t base, uint64_t size)
{
uint64_t base_aligned;
uint64_t size_aligned;

View File

@ -136,7 +136,7 @@ void reserve_buffer_for_ept_pages(void)
struct acrn_vm_config *vm_config;
pt_base = e820_alloc_memory(TOTAL_EPT_4K_PAGES_SIZE, ~0UL);
hv_access_memory_region_update(pt_base, TOTAL_EPT_4K_PAGES_SIZE);
ppt_clear_user_bit(pt_base, TOTAL_EPT_4K_PAGES_SIZE);
for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
vm_config = get_vm_config(vm_id);
ept_pages_info[vm_id].ept.nworld_pt_base = (struct page *)(void *)(pt_base + offset);

View File

@ -209,7 +209,7 @@ static int32_t register_hrhd_units(void)
drhd_rt->drhd = &platform_dmar_info->drhd_units[i];
drhd_rt->dmar_irq = IRQ_INVALID;
hv_access_memory_region_update(drhd_rt->drhd->reg_base_addr, PAGE_SIZE);
ppt_clear_user_bit(drhd_rt->drhd->reg_base_addr, PAGE_SIZE);
ret = dmar_register_hrhd(drhd_rt);
if (ret != 0) {

View File

@ -104,7 +104,7 @@ void npk_log_setup(struct hv_npk_log_param *param)
for (i = 0U; i < pcpu_nums; i++) {
per_cpu(npk_log_ref, i) = 0U;
}
hv_access_memory_region_update(base,
ppt_clear_user_bit(base,
pcpu_nums * (HV_NPK_LOG_REF_MASK + 1U)
* sizeof(struct npk_chan));
}

View File

@ -139,7 +139,7 @@ void uart16550_init(bool early_boot)
mmio_base_va = hpa2hva(hva2hpa_early(uart.mmio.pci.cached_mmio_base_va));
}
if (mmio_base_va != NULL) {
hv_access_memory_region_update((uint64_t)mmio_base_va, PDE_SIZE);
ppt_clear_user_bit((uint64_t)mmio_base_va, PDE_SIZE);
}
return;
}

View File

@ -142,7 +142,7 @@ void register_vgpio_handler(struct acrn_vm *vm, const struct acrn_mmiodev *mmiod
base_hpa = mmiodev->base_hpa + (P2SB_BASE_GPIO_PORT_ID << P2SB_PORTID_SHIFT);
/* emulate MMIO access to the GPIO private configuration space registers */
hv_access_memory_region_update((uint64_t)hpa2hva(base_hpa), gpio_pcr_sz);
ppt_clear_user_bit((uint64_t)hpa2hva(base_hpa), gpio_pcr_sz);
register_mmio_emulation_handler(vm, vgpio_mmio_handler, gpa_start, gpa_end, (void *)vm, false);
ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, gpa_start, gpio_pcr_sz);
}

View File

@ -635,7 +635,7 @@ void init_pci_pdev_list(void)
uint16_t bus;
bool was_visited = false;
hv_access_memory_region_update(phys_pci_mmcfg.address, get_pci_mmcfg_size(&phys_pci_mmcfg));
ppt_clear_user_bit(phys_pci_mmcfg.address, get_pci_mmcfg_size(&phys_pci_mmcfg));
pci_parse_iommu_devscopes(&bdfs_from_drhds, &drhd_idx_pci_all);

View File

@ -113,7 +113,7 @@ void mmu_add(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_base,
uint64_t size, uint64_t prot, const struct memory_ops *mem_ops);
void mmu_modify_or_del(uint64_t *pml4_page, uint64_t vaddr_base, uint64_t size,
uint64_t prot_set, uint64_t prot_clr, const struct memory_ops *mem_ops, uint32_t type);
void hv_access_memory_region_update(uint64_t base, uint64_t size);
void ppt_clear_user_bit(uint64_t base, uint64_t size);
void ppt_set_nx_bit(uint64_t base, uint64_t size, bool add);
/**