diff --git a/tools/packaging/kernel/patches/5.10.x/0001-arm64-mmu-compared-with-linear-start-physical-addres.patch b/tools/packaging/kernel/patches/5.10.x/0001-arm64-mmu-compared-with-linear-start-physical-addres.patch new file mode 100644 index 0000000000..3338aa139d --- /dev/null +++ b/tools/packaging/kernel/patches/5.10.x/0001-arm64-mmu-compared-with-linear-start-physical-addres.patch @@ -0,0 +1,31 @@ +From 0b0f3b3cd9c00bc5bce5686303ef008212ce9b99 Mon Sep 17 00:00:00 2001 +From: Jianyong Wu +Date: Wed, 31 Mar 2021 10:00:53 +0800 +Subject: [PATCH] arm64/mmu: compared with linear start physical address as + signed + +When CONFIG_RANDOM_BASE is enabled, the physical base address can be +a negative number. It may lead to bug if it is compared as a unsigned +number. So cast it before comparison. + +Signed-off-by: Jianyong Wu +--- + arch/arm64/mm/mmu.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c +index ca692a815731..24b36c85edf4 100644 +--- a/arch/arm64/mm/mmu.c ++++ b/arch/arm64/mm/mmu.c +@@ -1453,7 +1453,7 @@ static bool inside_linear_region(u64 start, u64 size) + * range which can be mapped inside this linear mapping range, must + * also be derived from its end points. + */ +- return start >= __pa(_PAGE_OFFSET(vabits_actual)) && ++ return (s64)start >= (s64)(__pa(_PAGE_OFFSET(vabits_actual))) && + (start + size - 1) <= __pa(PAGE_END - 1); + } + +-- +2.17.1 + diff --git a/tools/packaging/kernel/patches/5.10.x/0002-arm64-mm-Define-arch_get_mappable_range.patch b/tools/packaging/kernel/patches/5.10.x/0002-arm64-mm-Define-arch_get_mappable_range.patch deleted file mode 100644 index c0e606bc00..0000000000 --- a/tools/packaging/kernel/patches/5.10.x/0002-arm64-mm-Define-arch_get_mappable_range.patch +++ /dev/null @@ -1,389 +0,0 @@ -From patchwork Mon Jan 18 13:12:59 2021 -From: Anshuman Khandual -Subject: [PATCH V3 1/3] mm/memory_hotplug: Prevalidate the address range being - added with platform -Date: Mon, 18 Jan 2021 18:42:59 +0530 -Message-Id: <1610975582-12646-2-git-send-email-anshuman.khandual@arm.com> -X-Mailer: git-send-email 2.7.4 -In-Reply-To: <1610975582-12646-1-git-send-email-anshuman.khandual@arm.com> -References: <1610975582-12646-1-git-send-email-anshuman.khandual@arm.com> -X-Bogosity: Ham, tests=bogofilter, spamicity=0.000000, version=1.2.4 -Sender: owner-linux-mm@kvack.org -Precedence: bulk -X-Loop: owner-majordomo@kvack.org -List-ID: - -This introduces memhp_range_allowed() which can be called in various memory -hotplug paths to prevalidate the address range which is being added, with -the platform. Then memhp_range_allowed() calls memhp_get_pluggable_range() -which provides applicable address range depending on whether linear mapping -is required or not. For ranges that require linear mapping, it calls a new -arch callback arch_get_mappable_range() which the platform can override. So -the new callback, in turn provides the platform an opportunity to configure -acceptable memory hotplug address ranges in case there are constraints. - -This mechanism will help prevent platform specific errors deep down during -hotplug calls. This drops now redundant check_hotplug_memory_addressable() -check in __add_pages() but instead adds a VM_BUG_ON() check which would -ensure that the range has been validated with memhp_range_allowed() earlier -in the call chain. Besides memhp_get_pluggable_range() also can be used by -potential memory hotplug callers to avail the allowed physical range which -would go through on a given platform. - -This does not really add any new range check in generic memory hotplug but -instead compensates for lost checks in arch_add_memory() where applicable -and check_hotplug_memory_addressable(), with unified memhp_range_allowed(). - -Cc: David Hildenbrand -Cc: Andrew Morton -Cc: linux-mm@kvack.org -Cc: linux-kernel@vger.kernel.org -Suggested-by: David Hildenbrand -Signed-off-by: Anshuman Khandual -Reviewed-by: Oscar Salvador ---- - include/linux/memory_hotplug.h | 10 +++++ - mm/memory_hotplug.c | 79 ++++++++++++++++++++++++++-------- - mm/memremap.c | 6 +++ - 3 files changed, 76 insertions(+), 19 deletions(-) - -diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h -index 15acce5ab106..439b013f818a 100644 ---- a/include/linux/memory_hotplug.h -+++ b/include/linux/memory_hotplug.h -@@ -70,6 +70,9 @@ typedef int __bitwise mhp_t; - */ - #define MEMHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) - -+bool memhp_range_allowed(u64 start, u64 size, bool need_mapping); -+struct range memhp_get_pluggable_range(bool need_mapping); -+ - /* - * Extended parameters for memory hotplug: - * altmap: alternative allocator for memmap array (optional) -@@ -281,6 +284,13 @@ static inline bool movable_node_is_enabled(void) - } - #endif /* ! CONFIG_MEMORY_HOTPLUG */ - -+/* -+ * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some -+ * platforms might override and use arch_get_mappable_range() -+ * for internal non memory hotplug purposes. -+ */ -+struct range arch_get_mappable_range(void); -+ - #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) - /* - * pgdat resizing functions -diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c -index f9d57b9be8c7..f62664e77ff9 100644 ---- a/mm/memory_hotplug.c -+++ b/mm/memory_hotplug.c -@@ -107,6 +107,9 @@ static struct resource *register_memory_resource(u64 start, u64 size, - if (strcmp(resource_name, "System RAM")) - flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED; - -+ if (!memhp_range_allowed(start, size, true)) -+ return ERR_PTR(-E2BIG); -+ - /* - * Make sure value parsed from 'mem=' only restricts memory adding - * while booting, so that memory hotplug won't be impacted. Please -@@ -284,22 +287,6 @@ static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, - return 0; - } - --static int check_hotplug_memory_addressable(unsigned long pfn, -- unsigned long nr_pages) --{ -- const u64 max_addr = PFN_PHYS(pfn + nr_pages) - 1; -- -- if (max_addr >> MAX_PHYSMEM_BITS) { -- const u64 max_allowed = (1ull << (MAX_PHYSMEM_BITS + 1)) - 1; -- WARN(1, -- "Hotplugged memory exceeds maximum addressable address, range=%#llx-%#llx, maximum=%#llx\n", -- (u64)PFN_PHYS(pfn), max_addr, max_allowed); -- return -E2BIG; -- } -- -- return 0; --} -- - /* - * Reasonably generic function for adding memory. It is - * expected that archs that support memory hotplug will -@@ -317,9 +304,8 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, - if (WARN_ON_ONCE(!params->pgprot.pgprot)) - return -EINVAL; - -- err = check_hotplug_memory_addressable(pfn, nr_pages); -- if (err) -- return err; -+ if(!memhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false)) -+ return -E2BIG; - - if (altmap) { - /* -@@ -1180,6 +1166,61 @@ int add_memory_driver_managed(int nid, u64 start, u64 size, - } - EXPORT_SYMBOL_GPL(add_memory_driver_managed); - -+/* -+ * Platforms should define arch_get_mappable_range() that provides -+ * maximum possible addressable physical memory range for which the -+ * linear mapping could be created. The platform returned address -+ * range must adhere to these following semantics. -+ * -+ * - range.start <= range.end -+ * - Range includes both end points [range.start..range.end] -+ * -+ * There is also a fallback definition provided here, allowing the -+ * entire possible physical address range in case any platform does -+ * not define arch_get_mappable_range(). -+ */ -+struct range __weak arch_get_mappable_range(void) -+{ -+ struct range memhp_range = { -+ .start = 0UL, -+ .end = -1ULL, -+ }; -+ return memhp_range; -+} -+ -+struct range memhp_get_pluggable_range(bool need_mapping) -+{ -+ const u64 max_phys = (1ULL << (MAX_PHYSMEM_BITS + 1)) - 1; -+ struct range memhp_range; -+ -+ if (need_mapping) { -+ memhp_range = arch_get_mappable_range(); -+ if (memhp_range.start > max_phys) { -+ memhp_range.start = 0; -+ memhp_range.end = 0; -+ } -+ memhp_range.end = min_t(u64, memhp_range.end, max_phys); -+ } else { -+ memhp_range.start = 0; -+ memhp_range.end = max_phys; -+ } -+ return memhp_range; -+} -+EXPORT_SYMBOL_GPL(memhp_get_pluggable_range); -+ -+bool memhp_range_allowed(u64 start, u64 size, bool need_mapping) -+{ -+ struct range memhp_range = memhp_get_pluggable_range(need_mapping); -+ u64 end = start + size; -+ -+ if (start < end && start >= memhp_range.start && (end - 1) <= memhp_range.end) -+ return true; -+ -+ pr_warn("Hotplug memory [%#llx-%#llx] exceeds maximum addressable range [%#llx-%#llx]\n", -+ start, end, memhp_range.start, memhp_range.end); -+ return false; -+} -+ - #ifdef CONFIG_MEMORY_HOTREMOVE - /* - * Confirm all pages in a range [start, end) belong to the same zone (skipping -diff --git a/mm/memremap.c b/mm/memremap.c -index 16b2fb482da1..e15b13736f6a 100644 ---- a/mm/memremap.c -+++ b/mm/memremap.c -@@ -253,6 +253,12 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, - goto err_kasan; - } - -+ if (!memhp_range_allowed(range->start, range_len(range), true)) { -+ error = -ERANGE; -+ mem_hotplug_done(); -+ goto err_add_memory; -+ } -+ - error = arch_add_memory(nid, range->start, range_len(range), - params); - } - -From patchwork Mon Jan 18 13:13:00 2021 -From: Anshuman Khandual -To: linux-mm@kvack.org, - akpm@linux-foundation.org, - david@redhat.com, - hca@linux.ibm.com, - catalin.marinas@arm.com -Cc: Anshuman Khandual , - Oscar Salvador , - Vasily Gorbik , - Will Deacon , - Ard Biesheuvel , - Mark Rutland , - linux-arm-kernel@lists.infradead.org, - linux-s390@vger.kernel.org, - linux-kernel@vger.kernel.org -Subject: [PATCH V3 2/3] arm64/mm: Define arch_get_mappable_range() -Date: Mon, 18 Jan 2021 18:43:00 +0530 -Message-Id: <1610975582-12646-3-git-send-email-anshuman.khandual@arm.com> -X-Mailer: git-send-email 2.7.4 -In-Reply-To: <1610975582-12646-1-git-send-email-anshuman.khandual@arm.com> -References: <1610975582-12646-1-git-send-email-anshuman.khandual@arm.com> -X-Bogosity: Ham, tests=bogofilter, spamicity=0.000000, version=1.2.4 -Sender: owner-linux-mm@kvack.org -Precedence: bulk -X-Loop: owner-majordomo@kvack.org -List-ID: - -This overrides arch_get_mappable_range() on arm64 platform which will be -used with recently added generic framework. It drops inside_linear_region() -and subsequent check in arch_add_memory() which are no longer required. It -also adds a VM_BUG_ON() check that would ensure that memhp_range_allowed() -has already been called. - -Cc: Catalin Marinas -Cc: Will Deacon -Cc: Ard Biesheuvel -Cc: Mark Rutland -Cc: David Hildenbrand -Cc: linux-arm-kernel@lists.infradead.org -Cc: linux-kernel@vger.kernel.org -Signed-off-by: Anshuman Khandual -Reviewed-by: David Hildenbrand ---- - arch/arm64/mm/mmu.c | 15 +++++++-------- - 1 file changed, 7 insertions(+), 8 deletions(-) - -diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c -index ae0c3d023824..f2e1770c9f29 100644 ---- a/arch/arm64/mm/mmu.c -+++ b/arch/arm64/mm/mmu.c -@@ -1442,16 +1442,19 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) - free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); - } - --static bool inside_linear_region(u64 start, u64 size) -+struct range arch_get_mappable_range(void) - { -+ struct range memhp_range; -+ - /* - * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] - * accommodating both its ends but excluding PAGE_END. Max physical - * range which can be mapped inside this linear mapping range, must - * also be derived from its end points. - */ -- return start >= __pa(_PAGE_OFFSET(vabits_actual)) && -- (start + size - 1) <= __pa(PAGE_END - 1); -+ memhp_range.start = __pa(_PAGE_OFFSET(vabits_actual)); -+ memhp_range.end = __pa(PAGE_END - 1); -+ return memhp_range; - } - - int arch_add_memory(int nid, u64 start, u64 size, -@@ -1459,11 +1462,7 @@ int arch_add_memory(int nid, u64 start, u64 size, - { - int ret, flags = 0; - -- if (!inside_linear_region(start, size)) { -- pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size); -- return -EINVAL; -- } -- -+ VM_BUG_ON(!memhp_range_allowed(start, size, true)); - if (rodata_full || debug_pagealloc_enabled()) - flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; - - -From patchwork Mon Jan 18 13:13:01 2021 -From: Anshuman Khandual -To: linux-mm@kvack.org, - akpm@linux-foundation.org, - david@redhat.com, - hca@linux.ibm.com, - catalin.marinas@arm.com -Cc: Anshuman Khandual , - Oscar Salvador , - Vasily Gorbik , - Will Deacon , - Ard Biesheuvel , - Mark Rutland , - linux-arm-kernel@lists.infradead.org, - linux-s390@vger.kernel.org, - linux-kernel@vger.kernel.org -Subject: [PATCH V3 3/3] s390/mm: Define arch_get_mappable_range() -Date: Mon, 18 Jan 2021 18:43:01 +0530 -Message-Id: <1610975582-12646-4-git-send-email-anshuman.khandual@arm.com> -X-Mailer: git-send-email 2.7.4 -In-Reply-To: <1610975582-12646-1-git-send-email-anshuman.khandual@arm.com> -References: <1610975582-12646-1-git-send-email-anshuman.khandual@arm.com> -X-Bogosity: Ham, tests=bogofilter, spamicity=0.000000, version=1.2.4 -Sender: owner-linux-mm@kvack.org -Precedence: bulk -X-Loop: owner-majordomo@kvack.org -List-ID: - -This overrides arch_get_mappabble_range() on s390 platform which will be -used with recently added generic framework. It modifies the existing range -check in vmem_add_mapping() using arch_get_mappable_range(). It also adds a -VM_BUG_ON() check that would ensure that memhp_range_allowed() has already -been called on the hotplug path. - -Cc: Heiko Carstens -Cc: Vasily Gorbik -Cc: David Hildenbrand -Cc: linux-s390@vger.kernel.org -Cc: linux-kernel@vger.kernel.org -Acked-by: Heiko Carstens -Signed-off-by: Anshuman Khandual ---- - arch/s390/mm/init.c | 1 + - arch/s390/mm/vmem.c | 15 ++++++++++++++- - 2 files changed, 15 insertions(+), 1 deletion(-) - -diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c -index 73a163065b95..97017a4bcc90 100644 ---- a/arch/s390/mm/init.c -+++ b/arch/s390/mm/init.c -@@ -297,6 +297,7 @@ int arch_add_memory(int nid, u64 start, u64 size, - if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)) - return -EINVAL; - -+ VM_BUG_ON(!memhp_range_allowed(start, size, true)); - rc = vmem_add_mapping(start, size); - if (rc) - return rc; -diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c -index 01f3a5f58e64..afc39ff1cc8d 100644 ---- a/arch/s390/mm/vmem.c -+++ b/arch/s390/mm/vmem.c -@@ -4,6 +4,7 @@ - * Author(s): Heiko Carstens - */ - -+#include - #include - #include - #include -@@ -532,11 +533,23 @@ void vmem_remove_mapping(unsigned long start, unsigned long size) - mutex_unlock(&vmem_mutex); - } - -+struct range arch_get_mappable_range(void) -+{ -+ struct range memhp_range; -+ -+ memhp_range.start = 0; -+ memhp_range.end = VMEM_MAX_PHYS - 1; -+ return memhp_range; -+} -+ - int vmem_add_mapping(unsigned long start, unsigned long size) - { -+ struct range range; - int ret; - -- if (start + size > VMEM_MAX_PHYS || -+ range = arch_get_mappable_range(); -+ if (start < range.start || -+ start + size > range.end + 1 || - start + size < start) - return -ERANGE; - diff --git a/versions.yaml b/versions.yaml index 31718f0ba2..d629459283 100644 --- a/versions.yaml +++ b/versions.yaml @@ -155,9 +155,6 @@ assets: uscan-url: >- https://mirrors.edge.kernel.org/pub/linux/kernel/v5.x/linux-(5\.4\..+)\.tar\.gz version: "v5.10.25" - architecture: - aarch64: - version: "v5.4.71" kernel-experimental: description: "Linux kernel with virtio-fs support"