diff --git a/hypervisor/arch/x86/vtd.c b/hypervisor/arch/x86/vtd.c index 92de2671c..0c2b10960 100644 --- a/hypervisor/arch/x86/vtd.c +++ b/hypervisor/arch/x86/vtd.c @@ -287,16 +287,18 @@ static inline void dmar_wait_completion(const struct dmar_drhd_rt *dmar_unit, ui } } -/* flush cache when root table, context table updated */ -static void iommu_flush_cache(const struct dmar_drhd_rt *dmar_unit, - void *p, uint32_t size) +/* Flush CPU cache when root table, context table or second-level translation teable updated + * In the context of ACRN, GPA to HPA mapping relationship is not changed after VM created, + * skip flushing iotlb to avoid performance penalty. + */ +void iommu_flush_cache(const void *p, uint32_t size) { uint32_t i; /* if vtd support page-walk coherency, no need to flush cacheline */ - if (iommu_ecap_c(dmar_unit->ecap) == 0U) { + if (!iommu_page_walk_coherent) { for (i = 0U; i < size; i += CACHE_LINE_SIZE) { - clflush((char *)p + i); + clflush((const char *)p + i); } } } @@ -1088,7 +1090,7 @@ static int32_t add_iommu_device(struct iommu_domain *domain, uint16_t segment, u root_entry->hi_64 = 0UL; root_entry->lo_64 = lo_64; - iommu_flush_cache(dmar_unit, root_entry, sizeof(struct dmar_entry)); + iommu_flush_cache(root_entry, sizeof(struct dmar_entry)); } else { context_table_addr = dmar_get_bitslice(root_entry->lo_64, ROOT_ENTRY_LOWER_CTP_MASK, ROOT_ENTRY_LOWER_CTP_POS); @@ -1143,7 +1145,7 @@ static int32_t add_iommu_device(struct iommu_domain *domain, uint16_t segment, u context_entry->hi_64 = hi_64; context_entry->lo_64 = lo_64; - iommu_flush_cache(dmar_unit, context_entry, sizeof(struct dmar_entry)); + iommu_flush_cache(context_entry, sizeof(struct dmar_entry)); } } } @@ -1192,7 +1194,7 @@ static int32_t remove_iommu_device(const struct iommu_domain *domain, uint16_t s /* clear the present bit first */ context_entry->lo_64 = 0UL; context_entry->hi_64 = 0UL; - iommu_flush_cache(dmar_unit, context_entry, sizeof(struct dmar_entry)); + iommu_flush_cache(context_entry, sizeof(struct dmar_entry)); sid.bits.b = bus; sid.bits.d = pci_slot(devfun); @@ -1376,7 +1378,7 @@ int32_t dmar_assign_irte(struct intr_source intr_src, union dmar_ir_entry irte, ir_entry->entry.hi_64 = irte.entry.hi_64; ir_entry->entry.lo_64 = irte.entry.lo_64; - iommu_flush_cache(dmar_unit, ir_entry, sizeof(union dmar_ir_entry)); + iommu_flush_cache(ir_entry, sizeof(union dmar_ir_entry)); dmar_invalid_iec(dmar_unit, index, 0U, false); } return ret; @@ -1407,7 +1409,7 @@ void dmar_free_irte(struct intr_source intr_src, uint16_t index) ir_entry = ir_table + index; ir_entry->bits.present = 0x0UL; - iommu_flush_cache(dmar_unit, ir_entry, sizeof(union dmar_ir_entry)); + iommu_flush_cache(ir_entry, sizeof(union dmar_ir_entry)); dmar_invalid_iec(dmar_unit, index, 0U, false); } } diff --git a/hypervisor/include/arch/x86/mmu.h b/hypervisor/include/arch/x86/mmu.h index ce1a17a11..4bb63c0d4 100644 --- a/hypervisor/include/arch/x86/mmu.h +++ b/hypervisor/include/arch/x86/mmu.h @@ -102,8 +102,8 @@ enum _page_table_level { #define PAGE_SIZE_2M MEM_2M #define PAGE_SIZE_1G MEM_1G -void sanitize_pte_entry(uint64_t *ptep); -void sanitize_pte(uint64_t *pt_page); +void sanitize_pte_entry(uint64_t *ptep, const struct memory_ops *mem_ops); +void sanitize_pte(uint64_t *pt_page, const struct memory_ops *mem_ops); /** * @brief MMU paging enable * @@ -176,12 +176,12 @@ static inline void cache_flush_invalidate_all(void) asm volatile (" wbinvd\n" : : : "memory"); } -static inline void clflush(volatile void *p) +static inline void clflush(const volatile void *p) { asm volatile ("clflush (%0)" :: "r"(p)); } -static inline void clflushopt(volatile void *p) +static inline void clflushopt(const volatile void *p) { asm volatile ("clflushopt (%0)" :: "r"(p)); } diff --git a/hypervisor/include/arch/x86/vtd.h b/hypervisor/include/arch/x86/vtd.h index bc681243a..3a52d4d4f 100644 --- a/hypervisor/include/arch/x86/vtd.h +++ b/hypervisor/include/arch/x86/vtd.h @@ -549,7 +549,7 @@ struct iommu_domain; * @brief Assign a device specified by bus & devfun to a iommu domain. * * Remove the device from the from_domain (if non-NULL), and add it to the to_domain (if non-NULL). - * API silently fails to add/remove devices to/from domains that are under "Ignored" DMAR units. + * API silently fails to add/remove devices to/from domains that are under "Ignored" DMAR units. * * @param[in] from_domain iommu domain from which the device is removed from * @param[in] to_domain iommu domain to which the device is assgined to @@ -665,6 +665,18 @@ int32_t dmar_assign_irte(struct intr_source intr_src, union dmar_ir_entry irte, * */ void dmar_free_irte(struct intr_source intr_src, uint16_t index); + +/** + * @brief Flash cacheline(s) for a specific address with specific size. + * + * Flash cacheline(s) for a specific address with specific size, + * if all IOMMUs active support page-walk coherency, cacheline(s) are not fluashed. + * + * @param[in] p the address of the buffer, whose cache need to be invalidated + * @param[in] size the size of the buffer + * + */ +void iommu_flush_cache(const void *p, uint32_t size); /** * @} */