Merge tag 'mm-hotfixes-stable-2025-09-27-22-35' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "7 hotfixes. 4 are cc:stable and the remainder address post-6.16 issues
  or aren't considered necessary for -stable kernels. 6 of these fixes
  are for MM.

  All singletons, please see the changelogs for details"

* tag 'mm-hotfixes-stable-2025-09-27-22-35' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  include/linux/pgtable.h: convert arch_enter_lazy_mmu_mode() and friends to static inlines
  mm/damon/sysfs: do not ignore callback's return value in damon_sysfs_damon_call()
  mailmap: add entry for Bence Csókás
  fs/proc/task_mmu: check p->vec_buf for NULL
  kmsan: fix out-of-bounds access to shadow memory
  mm/hugetlb: fix copy_hugetlb_page_range() to use ->pt_share_count
  mm/hugetlb: fix folio is still mapped when deleted
This commit is contained in:
Linus Torvalds
2025-09-28 09:32:00 -07:00
9 changed files with 49 additions and 21 deletions

View File

@@ -134,6 +134,7 @@ Ben M Cahill <ben.m.cahill@intel.com>
Ben Widawsky <bwidawsk@kernel.org> <ben@bwidawsk.net>
Ben Widawsky <bwidawsk@kernel.org> <ben.widawsky@intel.com>
Ben Widawsky <bwidawsk@kernel.org> <benjamin.widawsky@intel.com>
Bence Csókás <bence98@sch.bme.hu> <csokas.bence@prolan.hu>
Benjamin Poirier <benjamin.poirier@gmail.com> <bpoirier@suse.de>
Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@gmail.com>
Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@redhat.com>

View File

@@ -517,14 +517,16 @@ static bool remove_inode_single_folio(struct hstate *h, struct inode *inode,
/*
* If folio is mapped, it was faulted in after being
* unmapped in caller. Unmap (again) while holding
* the fault mutex. The mutex will prevent faults
* until we finish removing the folio.
* unmapped in caller or hugetlb_vmdelete_list() skips
* unmapping it due to fail to grab lock. Unmap (again)
* while holding the fault mutex. The mutex will prevent
* faults until we finish removing the folio. Hold folio
* lock to guarantee no concurrent migration.
*/
folio_lock(folio);
if (unlikely(folio_mapped(folio)))
hugetlb_unmap_file_folio(h, mapping, folio, index);
folio_lock(folio);
/*
* We must remove the folio from page cache before removing
* the region/ reserve map (hugetlb_unreserve_pages). In

View File

@@ -2417,6 +2417,9 @@ static void pagemap_scan_backout_range(struct pagemap_scan_private *p,
{
struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
if (!p->vec_buf)
return;
if (cur_buf->start != addr)
cur_buf->end = addr;
else

View File

@@ -631,6 +631,11 @@ static inline int ptdesc_pmd_pts_count(struct ptdesc *ptdesc)
{
return atomic_read(&ptdesc->pt_share_count);
}
static inline bool ptdesc_pmd_is_shared(struct ptdesc *ptdesc)
{
return !!ptdesc_pmd_pts_count(ptdesc);
}
#else
static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
{

View File

@@ -232,9 +232,9 @@ static inline int pmd_dirty(pmd_t pmd)
* and the mode cannot be used in interrupt context.
*/
#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
#define arch_enter_lazy_mmu_mode() do {} while (0)
#define arch_leave_lazy_mmu_mode() do {} while (0)
#define arch_flush_lazy_mmu_mode() do {} while (0)
static inline void arch_enter_lazy_mmu_mode(void) {}
static inline void arch_leave_lazy_mmu_mode(void) {}
static inline void arch_flush_lazy_mmu_mode(void) {}
#endif
#ifndef pte_batch_hint

View File

@@ -1592,12 +1592,14 @@ static int damon_sysfs_damon_call(int (*fn)(void *data),
struct damon_sysfs_kdamond *kdamond)
{
struct damon_call_control call_control = {};
int err;
if (!kdamond->damon_ctx)
return -EINVAL;
call_control.fn = fn;
call_control.data = kdamond;
return damon_call(kdamond->damon_ctx, &call_control);
err = damon_call(kdamond->damon_ctx, &call_control);
return err ? err : call_control.return_code;
}
struct damon_sysfs_schemes_walk_data {

View File

@@ -5594,18 +5594,13 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
break;
}
/*
* If the pagetables are shared don't copy or take references.
*
* dst_pte == src_pte is the common case of src/dest sharing.
* However, src could have 'unshared' and dst shares with
* another vma. So page_count of ptep page is checked instead
* to reliably determine whether pte is shared.
*/
if (page_count(virt_to_page(dst_pte)) > 1) {
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
/* If the pagetables are shared, there is nothing to do */
if (ptdesc_pmd_is_shared(virt_to_ptdesc(dst_pte))) {
addr |= last_addr_mask;
continue;
}
#endif
dst_ptl = huge_pte_lock(h, dst, dst_pte);
src_ptl = huge_pte_lockptr(h, src, src_pte);
@@ -7602,7 +7597,7 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
hugetlb_vma_assert_locked(vma);
if (sz != PMD_SIZE)
return 0;
if (!ptdesc_pmd_pts_count(virt_to_ptdesc(ptep)))
if (!ptdesc_pmd_is_shared(virt_to_ptdesc(ptep)))
return 0;
pud_clear(pud);

View File

@@ -195,7 +195,8 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
u32 origin, bool checked)
{
u64 address = (u64)addr;
u32 *shadow_start, *origin_start;
void *shadow_start;
u32 *aligned_shadow, *origin_start;
size_t pad = 0;
KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
@@ -214,9 +215,12 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
}
__memset(shadow_start, b, size);
if (!IS_ALIGNED(address, KMSAN_ORIGIN_SIZE)) {
if (IS_ALIGNED(address, KMSAN_ORIGIN_SIZE)) {
aligned_shadow = shadow_start;
} else {
pad = address % KMSAN_ORIGIN_SIZE;
address -= pad;
aligned_shadow = shadow_start - pad;
size += pad;
}
size = ALIGN(size, KMSAN_ORIGIN_SIZE);
@@ -230,7 +234,7 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
* corresponding shadow slot is zero.
*/
for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) {
if (origin || !shadow_start[i])
if (origin || !aligned_shadow[i])
origin_start[i] = origin;
}
}

View File

@@ -556,6 +556,21 @@ DEFINE_TEST_MEMSETXX(16)
DEFINE_TEST_MEMSETXX(32)
DEFINE_TEST_MEMSETXX(64)
/* Test case: ensure that KMSAN does not access shadow memory out of bounds. */
static void test_memset_on_guarded_buffer(struct kunit *test)
{
void *buf = vmalloc(PAGE_SIZE);
kunit_info(test,
"memset() on ends of guarded buffer should not crash\n");
for (size_t size = 0; size <= 128; size++) {
memset(buf, 0xff, size);
memset(buf + PAGE_SIZE - size, 0xff, size);
}
vfree(buf);
}
static noinline void fibonacci(int *array, int size, int start)
{
if (start < 2 || (start == size))
@@ -677,6 +692,7 @@ static struct kunit_case kmsan_test_cases[] = {
KUNIT_CASE(test_memset16),
KUNIT_CASE(test_memset32),
KUNIT_CASE(test_memset64),
KUNIT_CASE(test_memset_on_guarded_buffer),
KUNIT_CASE(test_long_origin_chain),
KUNIT_CASE(test_stackdepot_roundtrip),
KUNIT_CASE(test_unpoison_memory),