diff --git a/tools/packaging/kernel/patches/5.10.x/0001-NO-UPSTREAM-9P-always-use-cached-inode-to-fill-in-v9.patch b/tools/packaging/kernel/patches/5.10.x/0001-NO-UPSTREAM-9P-always-use-cached-inode-to-fill-in-v9.patch new file mode 100644 index 0000000000..86e587503e --- /dev/null +++ b/tools/packaging/kernel/patches/5.10.x/0001-NO-UPSTREAM-9P-always-use-cached-inode-to-fill-in-v9.patch @@ -0,0 +1,47 @@ +From cab495651e8f71c39e87a08abbe051916110b3ca Mon Sep 17 00:00:00 2001 +From: Julio Montes +Date: Mon, 18 Sep 2017 11:46:59 -0500 +Subject: [PATCH 3/5] NO-UPSTREAM: 9P: always use cached inode to fill in + v9fs_vfs_getattr + +So that if in cache=none mode, we don't have to lookup server that +might not support open-unlink-fstat operation. + +fixes https://github.com/01org/cc-oci-runtime/issues/47 +fixes https://github.com/01org/cc-oci-runtime/issues/1062 + +Signed-off-by: Peng Tao +--- + fs/9p/vfs_inode.c | 2 +- + fs/9p/vfs_inode_dotl.c | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c +index 85ff859d3af5..efdc2a8f37bb 100644 +--- a/fs/9p/vfs_inode.c ++++ b/fs/9p/vfs_inode.c +@@ -1080,7 +1080,7 @@ v9fs_vfs_getattr(const struct path *path, struct kstat *stat, + + p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry); + v9ses = v9fs_dentry2v9ses(dentry); +- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { ++ if (!d_really_is_negative(dentry) || v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + generic_fillattr(d_inode(dentry), stat); + return 0; + } +diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c +index 4823e1c46999..daa5e6a41864 100644 +--- a/fs/9p/vfs_inode_dotl.c ++++ b/fs/9p/vfs_inode_dotl.c +@@ -480,7 +480,7 @@ v9fs_vfs_getattr_dotl(const struct path *path, struct kstat *stat, + + p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry); + v9ses = v9fs_dentry2v9ses(dentry); +- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { ++ if (!d_really_is_negative(dentry) || v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + generic_fillattr(d_inode(dentry), stat); + return 0; + } +-- +2.20.1 + diff --git a/tools/packaging/kernel/patches/5.10.x/0002-arm64-mm-Define-arch_get_mappable_range.patch b/tools/packaging/kernel/patches/5.10.x/0002-arm64-mm-Define-arch_get_mappable_range.patch new file mode 100644 index 0000000000..c0e606bc00 --- /dev/null +++ b/tools/packaging/kernel/patches/5.10.x/0002-arm64-mm-Define-arch_get_mappable_range.patch @@ -0,0 +1,389 @@ +From patchwork Mon Jan 18 13:12:59 2021 +From: Anshuman Khandual +Subject: [PATCH V3 1/3] mm/memory_hotplug: Prevalidate the address range being + added with platform +Date: Mon, 18 Jan 2021 18:42:59 +0530 +Message-Id: <1610975582-12646-2-git-send-email-anshuman.khandual@arm.com> +X-Mailer: git-send-email 2.7.4 +In-Reply-To: <1610975582-12646-1-git-send-email-anshuman.khandual@arm.com> +References: <1610975582-12646-1-git-send-email-anshuman.khandual@arm.com> +X-Bogosity: Ham, tests=bogofilter, spamicity=0.000000, version=1.2.4 +Sender: owner-linux-mm@kvack.org +Precedence: bulk +X-Loop: owner-majordomo@kvack.org +List-ID: + +This introduces memhp_range_allowed() which can be called in various memory +hotplug paths to prevalidate the address range which is being added, with +the platform. Then memhp_range_allowed() calls memhp_get_pluggable_range() +which provides applicable address range depending on whether linear mapping +is required or not. For ranges that require linear mapping, it calls a new +arch callback arch_get_mappable_range() which the platform can override. So +the new callback, in turn provides the platform an opportunity to configure +acceptable memory hotplug address ranges in case there are constraints. + +This mechanism will help prevent platform specific errors deep down during +hotplug calls. This drops now redundant check_hotplug_memory_addressable() +check in __add_pages() but instead adds a VM_BUG_ON() check which would +ensure that the range has been validated with memhp_range_allowed() earlier +in the call chain. Besides memhp_get_pluggable_range() also can be used by +potential memory hotplug callers to avail the allowed physical range which +would go through on a given platform. + +This does not really add any new range check in generic memory hotplug but +instead compensates for lost checks in arch_add_memory() where applicable +and check_hotplug_memory_addressable(), with unified memhp_range_allowed(). + +Cc: David Hildenbrand +Cc: Andrew Morton +Cc: linux-mm@kvack.org +Cc: linux-kernel@vger.kernel.org +Suggested-by: David Hildenbrand +Signed-off-by: Anshuman Khandual +Reviewed-by: Oscar Salvador +--- + include/linux/memory_hotplug.h | 10 +++++ + mm/memory_hotplug.c | 79 ++++++++++++++++++++++++++-------- + mm/memremap.c | 6 +++ + 3 files changed, 76 insertions(+), 19 deletions(-) + +diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h +index 15acce5ab106..439b013f818a 100644 +--- a/include/linux/memory_hotplug.h ++++ b/include/linux/memory_hotplug.h +@@ -70,6 +70,9 @@ typedef int __bitwise mhp_t; + */ + #define MEMHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) + ++bool memhp_range_allowed(u64 start, u64 size, bool need_mapping); ++struct range memhp_get_pluggable_range(bool need_mapping); ++ + /* + * Extended parameters for memory hotplug: + * altmap: alternative allocator for memmap array (optional) +@@ -281,6 +284,13 @@ static inline bool movable_node_is_enabled(void) + } + #endif /* ! CONFIG_MEMORY_HOTPLUG */ + ++/* ++ * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some ++ * platforms might override and use arch_get_mappable_range() ++ * for internal non memory hotplug purposes. ++ */ ++struct range arch_get_mappable_range(void); ++ + #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) + /* + * pgdat resizing functions +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c +index f9d57b9be8c7..f62664e77ff9 100644 +--- a/mm/memory_hotplug.c ++++ b/mm/memory_hotplug.c +@@ -107,6 +107,9 @@ static struct resource *register_memory_resource(u64 start, u64 size, + if (strcmp(resource_name, "System RAM")) + flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED; + ++ if (!memhp_range_allowed(start, size, true)) ++ return ERR_PTR(-E2BIG); ++ + /* + * Make sure value parsed from 'mem=' only restricts memory adding + * while booting, so that memory hotplug won't be impacted. Please +@@ -284,22 +287,6 @@ static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, + return 0; + } + +-static int check_hotplug_memory_addressable(unsigned long pfn, +- unsigned long nr_pages) +-{ +- const u64 max_addr = PFN_PHYS(pfn + nr_pages) - 1; +- +- if (max_addr >> MAX_PHYSMEM_BITS) { +- const u64 max_allowed = (1ull << (MAX_PHYSMEM_BITS + 1)) - 1; +- WARN(1, +- "Hotplugged memory exceeds maximum addressable address, range=%#llx-%#llx, maximum=%#llx\n", +- (u64)PFN_PHYS(pfn), max_addr, max_allowed); +- return -E2BIG; +- } +- +- return 0; +-} +- + /* + * Reasonably generic function for adding memory. It is + * expected that archs that support memory hotplug will +@@ -317,9 +304,8 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, + if (WARN_ON_ONCE(!params->pgprot.pgprot)) + return -EINVAL; + +- err = check_hotplug_memory_addressable(pfn, nr_pages); +- if (err) +- return err; ++ if(!memhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false)) ++ return -E2BIG; + + if (altmap) { + /* +@@ -1180,6 +1166,61 @@ int add_memory_driver_managed(int nid, u64 start, u64 size, + } + EXPORT_SYMBOL_GPL(add_memory_driver_managed); + ++/* ++ * Platforms should define arch_get_mappable_range() that provides ++ * maximum possible addressable physical memory range for which the ++ * linear mapping could be created. The platform returned address ++ * range must adhere to these following semantics. ++ * ++ * - range.start <= range.end ++ * - Range includes both end points [range.start..range.end] ++ * ++ * There is also a fallback definition provided here, allowing the ++ * entire possible physical address range in case any platform does ++ * not define arch_get_mappable_range(). ++ */ ++struct range __weak arch_get_mappable_range(void) ++{ ++ struct range memhp_range = { ++ .start = 0UL, ++ .end = -1ULL, ++ }; ++ return memhp_range; ++} ++ ++struct range memhp_get_pluggable_range(bool need_mapping) ++{ ++ const u64 max_phys = (1ULL << (MAX_PHYSMEM_BITS + 1)) - 1; ++ struct range memhp_range; ++ ++ if (need_mapping) { ++ memhp_range = arch_get_mappable_range(); ++ if (memhp_range.start > max_phys) { ++ memhp_range.start = 0; ++ memhp_range.end = 0; ++ } ++ memhp_range.end = min_t(u64, memhp_range.end, max_phys); ++ } else { ++ memhp_range.start = 0; ++ memhp_range.end = max_phys; ++ } ++ return memhp_range; ++} ++EXPORT_SYMBOL_GPL(memhp_get_pluggable_range); ++ ++bool memhp_range_allowed(u64 start, u64 size, bool need_mapping) ++{ ++ struct range memhp_range = memhp_get_pluggable_range(need_mapping); ++ u64 end = start + size; ++ ++ if (start < end && start >= memhp_range.start && (end - 1) <= memhp_range.end) ++ return true; ++ ++ pr_warn("Hotplug memory [%#llx-%#llx] exceeds maximum addressable range [%#llx-%#llx]\n", ++ start, end, memhp_range.start, memhp_range.end); ++ return false; ++} ++ + #ifdef CONFIG_MEMORY_HOTREMOVE + /* + * Confirm all pages in a range [start, end) belong to the same zone (skipping +diff --git a/mm/memremap.c b/mm/memremap.c +index 16b2fb482da1..e15b13736f6a 100644 +--- a/mm/memremap.c ++++ b/mm/memremap.c +@@ -253,6 +253,12 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, + goto err_kasan; + } + ++ if (!memhp_range_allowed(range->start, range_len(range), true)) { ++ error = -ERANGE; ++ mem_hotplug_done(); ++ goto err_add_memory; ++ } ++ + error = arch_add_memory(nid, range->start, range_len(range), + params); + } + +From patchwork Mon Jan 18 13:13:00 2021 +From: Anshuman Khandual +To: linux-mm@kvack.org, + akpm@linux-foundation.org, + david@redhat.com, + hca@linux.ibm.com, + catalin.marinas@arm.com +Cc: Anshuman Khandual , + Oscar Salvador , + Vasily Gorbik , + Will Deacon , + Ard Biesheuvel , + Mark Rutland , + linux-arm-kernel@lists.infradead.org, + linux-s390@vger.kernel.org, + linux-kernel@vger.kernel.org +Subject: [PATCH V3 2/3] arm64/mm: Define arch_get_mappable_range() +Date: Mon, 18 Jan 2021 18:43:00 +0530 +Message-Id: <1610975582-12646-3-git-send-email-anshuman.khandual@arm.com> +X-Mailer: git-send-email 2.7.4 +In-Reply-To: <1610975582-12646-1-git-send-email-anshuman.khandual@arm.com> +References: <1610975582-12646-1-git-send-email-anshuman.khandual@arm.com> +X-Bogosity: Ham, tests=bogofilter, spamicity=0.000000, version=1.2.4 +Sender: owner-linux-mm@kvack.org +Precedence: bulk +X-Loop: owner-majordomo@kvack.org +List-ID: + +This overrides arch_get_mappable_range() on arm64 platform which will be +used with recently added generic framework. It drops inside_linear_region() +and subsequent check in arch_add_memory() which are no longer required. It +also adds a VM_BUG_ON() check that would ensure that memhp_range_allowed() +has already been called. + +Cc: Catalin Marinas +Cc: Will Deacon +Cc: Ard Biesheuvel +Cc: Mark Rutland +Cc: David Hildenbrand +Cc: linux-arm-kernel@lists.infradead.org +Cc: linux-kernel@vger.kernel.org +Signed-off-by: Anshuman Khandual +Reviewed-by: David Hildenbrand +--- + arch/arm64/mm/mmu.c | 15 +++++++-------- + 1 file changed, 7 insertions(+), 8 deletions(-) + +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c +index ae0c3d023824..f2e1770c9f29 100644 +--- a/arch/arm64/mm/mmu.c ++++ b/arch/arm64/mm/mmu.c +@@ -1442,16 +1442,19 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) + free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); + } + +-static bool inside_linear_region(u64 start, u64 size) ++struct range arch_get_mappable_range(void) + { ++ struct range memhp_range; ++ + /* + * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] + * accommodating both its ends but excluding PAGE_END. Max physical + * range which can be mapped inside this linear mapping range, must + * also be derived from its end points. + */ +- return start >= __pa(_PAGE_OFFSET(vabits_actual)) && +- (start + size - 1) <= __pa(PAGE_END - 1); ++ memhp_range.start = __pa(_PAGE_OFFSET(vabits_actual)); ++ memhp_range.end = __pa(PAGE_END - 1); ++ return memhp_range; + } + + int arch_add_memory(int nid, u64 start, u64 size, +@@ -1459,11 +1462,7 @@ int arch_add_memory(int nid, u64 start, u64 size, + { + int ret, flags = 0; + +- if (!inside_linear_region(start, size)) { +- pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size); +- return -EINVAL; +- } +- ++ VM_BUG_ON(!memhp_range_allowed(start, size, true)); + if (rodata_full || debug_pagealloc_enabled()) + flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + + +From patchwork Mon Jan 18 13:13:01 2021 +From: Anshuman Khandual +To: linux-mm@kvack.org, + akpm@linux-foundation.org, + david@redhat.com, + hca@linux.ibm.com, + catalin.marinas@arm.com +Cc: Anshuman Khandual , + Oscar Salvador , + Vasily Gorbik , + Will Deacon , + Ard Biesheuvel , + Mark Rutland , + linux-arm-kernel@lists.infradead.org, + linux-s390@vger.kernel.org, + linux-kernel@vger.kernel.org +Subject: [PATCH V3 3/3] s390/mm: Define arch_get_mappable_range() +Date: Mon, 18 Jan 2021 18:43:01 +0530 +Message-Id: <1610975582-12646-4-git-send-email-anshuman.khandual@arm.com> +X-Mailer: git-send-email 2.7.4 +In-Reply-To: <1610975582-12646-1-git-send-email-anshuman.khandual@arm.com> +References: <1610975582-12646-1-git-send-email-anshuman.khandual@arm.com> +X-Bogosity: Ham, tests=bogofilter, spamicity=0.000000, version=1.2.4 +Sender: owner-linux-mm@kvack.org +Precedence: bulk +X-Loop: owner-majordomo@kvack.org +List-ID: + +This overrides arch_get_mappabble_range() on s390 platform which will be +used with recently added generic framework. It modifies the existing range +check in vmem_add_mapping() using arch_get_mappable_range(). It also adds a +VM_BUG_ON() check that would ensure that memhp_range_allowed() has already +been called on the hotplug path. + +Cc: Heiko Carstens +Cc: Vasily Gorbik +Cc: David Hildenbrand +Cc: linux-s390@vger.kernel.org +Cc: linux-kernel@vger.kernel.org +Acked-by: Heiko Carstens +Signed-off-by: Anshuman Khandual +--- + arch/s390/mm/init.c | 1 + + arch/s390/mm/vmem.c | 15 ++++++++++++++- + 2 files changed, 15 insertions(+), 1 deletion(-) + +diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c +index 73a163065b95..97017a4bcc90 100644 +--- a/arch/s390/mm/init.c ++++ b/arch/s390/mm/init.c +@@ -297,6 +297,7 @@ int arch_add_memory(int nid, u64 start, u64 size, + if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)) + return -EINVAL; + ++ VM_BUG_ON(!memhp_range_allowed(start, size, true)); + rc = vmem_add_mapping(start, size); + if (rc) + return rc; +diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c +index 01f3a5f58e64..afc39ff1cc8d 100644 +--- a/arch/s390/mm/vmem.c ++++ b/arch/s390/mm/vmem.c +@@ -4,6 +4,7 @@ + * Author(s): Heiko Carstens + */ + ++#include + #include + #include + #include +@@ -532,11 +533,23 @@ void vmem_remove_mapping(unsigned long start, unsigned long size) + mutex_unlock(&vmem_mutex); + } + ++struct range arch_get_mappable_range(void) ++{ ++ struct range memhp_range; ++ ++ memhp_range.start = 0; ++ memhp_range.end = VMEM_MAX_PHYS - 1; ++ return memhp_range; ++} ++ + int vmem_add_mapping(unsigned long start, unsigned long size) + { ++ struct range range; + int ret; + +- if (start + size > VMEM_MAX_PHYS || ++ range = arch_get_mappable_range(); ++ if (start < range.start || ++ start + size > range.end + 1 || + start + size < start) + return -ERANGE; + diff --git a/versions.yaml b/versions.yaml index 3566768197..d629459283 100644 --- a/versions.yaml +++ b/versions.yaml @@ -154,7 +154,7 @@ assets: url: "https://cdn.kernel.org/pub/linux/kernel/v4.x/" uscan-url: >- https://mirrors.edge.kernel.org/pub/linux/kernel/v5.x/linux-(5\.4\..+)\.tar\.gz - version: "v5.4.71" + version: "v5.10.25" kernel-experimental: description: "Linux kernel with virtio-fs support"