Merge tag 'mm-hotfixes-stable-2025-09-17-21-10' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "15 hotfixes. 11 are cc:stable and the remainder address post-6.16
  issues or aren't considered necessary for -stable kernels. 13 of these
  fixes are for MM.

  The usual shower of singletons, plus

   - fixes from Hugh to address various misbehaviors in get_user_pages()

   - patches from SeongJae to address a quite severe issue in DAMON

   - another series also from SeongJae which completes some fixes for a
     DAMON startup issue"

* tag 'mm-hotfixes-stable-2025-09-17-21-10' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  zram: fix slot write race condition
  nilfs2: fix CFI failure when accessing /sys/fs/nilfs2/features/*
  samples/damon/mtier: avoid starting DAMON before initialization
  samples/damon/prcl: avoid starting DAMON before initialization
  samples/damon/wsse: avoid starting DAMON before initialization
  MAINTAINERS: add Lance Yang as a THP reviewer
  MAINTAINERS: add Jann Horn as rmap reviewer
  mm/damon/sysfs: use dynamically allocated repeat mode damon_call_control
  mm/damon/core: introduce damon_call_control->dealloc_on_cancel
  mm: folio_may_be_lru_cached() unless folio_test_large()
  mm: revert "mm: vmscan.c: fix OOM on swap stress test"
  mm: revert "mm/gup: clear the LRU flag of a page before adding to LRU batch"
  mm/gup: local lru_add_drain() to avoid lru_add_drain_all()
  mm/gup: check ref_count instead of lru before migration
This commit is contained in:
Linus Torvalds
2025-09-17 21:34:26 -07:00
15 changed files with 94 additions and 52 deletions

View File

@@ -16196,6 +16196,7 @@ R: Rik van Riel <riel@surriel.com>
R: Liam R. Howlett <Liam.Howlett@oracle.com> R: Liam R. Howlett <Liam.Howlett@oracle.com>
R: Vlastimil Babka <vbabka@suse.cz> R: Vlastimil Babka <vbabka@suse.cz>
R: Harry Yoo <harry.yoo@oracle.com> R: Harry Yoo <harry.yoo@oracle.com>
R: Jann Horn <jannh@google.com>
L: linux-mm@kvack.org L: linux-mm@kvack.org
S: Maintained S: Maintained
F: include/linux/rmap.h F: include/linux/rmap.h
@@ -16240,6 +16241,7 @@ R: Nico Pache <npache@redhat.com>
R: Ryan Roberts <ryan.roberts@arm.com> R: Ryan Roberts <ryan.roberts@arm.com>
R: Dev Jain <dev.jain@arm.com> R: Dev Jain <dev.jain@arm.com>
R: Barry Song <baohua@kernel.org> R: Barry Song <baohua@kernel.org>
R: Lance Yang <lance.yang@linux.dev>
L: linux-mm@kvack.org L: linux-mm@kvack.org
S: Maintained S: Maintained
W: http://www.linux-mm.org W: http://www.linux-mm.org

View File

@@ -1795,6 +1795,7 @@ static int write_same_filled_page(struct zram *zram, unsigned long fill,
u32 index) u32 index)
{ {
zram_slot_lock(zram, index); zram_slot_lock(zram, index);
zram_free_page(zram, index);
zram_set_flag(zram, index, ZRAM_SAME); zram_set_flag(zram, index, ZRAM_SAME);
zram_set_handle(zram, index, fill); zram_set_handle(zram, index, fill);
zram_slot_unlock(zram, index); zram_slot_unlock(zram, index);
@@ -1832,6 +1833,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
kunmap_local(src); kunmap_local(src);
zram_slot_lock(zram, index); zram_slot_lock(zram, index);
zram_free_page(zram, index);
zram_set_flag(zram, index, ZRAM_HUGE); zram_set_flag(zram, index, ZRAM_HUGE);
zram_set_handle(zram, index, handle); zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, PAGE_SIZE); zram_set_obj_size(zram, index, PAGE_SIZE);
@@ -1855,11 +1857,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
unsigned long element; unsigned long element;
bool same_filled; bool same_filled;
/* First, free memory allocated to this slot (if any) */
zram_slot_lock(zram, index);
zram_free_page(zram, index);
zram_slot_unlock(zram, index);
mem = kmap_local_page(page); mem = kmap_local_page(page);
same_filled = page_same_filled(mem, &element); same_filled = page_same_filled(mem, &element);
kunmap_local(mem); kunmap_local(mem);
@@ -1901,6 +1898,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
zcomp_stream_put(zstrm); zcomp_stream_put(zstrm);
zram_slot_lock(zram, index); zram_slot_lock(zram, index);
zram_free_page(zram, index);
zram_set_handle(zram, index, handle); zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, comp_len); zram_set_obj_size(zram, index, comp_len);
zram_slot_unlock(zram, index); zram_slot_unlock(zram, index);

View File

@@ -1075,7 +1075,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
************************************************************************/ ************************************************************************/
static ssize_t nilfs_feature_revision_show(struct kobject *kobj, static ssize_t nilfs_feature_revision_show(struct kobject *kobj,
struct attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
{ {
return sysfs_emit(buf, "%d.%d\n", return sysfs_emit(buf, "%d.%d\n",
NILFS_CURRENT_REV, NILFS_MINOR_REV); NILFS_CURRENT_REV, NILFS_MINOR_REV);
@@ -1087,7 +1087,7 @@ static const char features_readme_str[] =
"(1) revision\n\tshow current revision of NILFS file system driver.\n"; "(1) revision\n\tshow current revision of NILFS file system driver.\n";
static ssize_t nilfs_feature_README_show(struct kobject *kobj, static ssize_t nilfs_feature_README_show(struct kobject *kobj,
struct attribute *attr, struct kobj_attribute *attr,
char *buf) char *buf)
{ {
return sysfs_emit(buf, features_readme_str); return sysfs_emit(buf, features_readme_str);

View File

@@ -50,16 +50,16 @@ struct nilfs_sysfs_dev_subgroups {
struct completion sg_segments_kobj_unregister; struct completion sg_segments_kobj_unregister;
}; };
#define NILFS_COMMON_ATTR_STRUCT(name) \ #define NILFS_KOBJ_ATTR_STRUCT(name) \
struct nilfs_##name##_attr { \ struct nilfs_##name##_attr { \
struct attribute attr; \ struct attribute attr; \
ssize_t (*show)(struct kobject *, struct attribute *, \ ssize_t (*show)(struct kobject *, struct kobj_attribute *, \
char *); \ char *); \
ssize_t (*store)(struct kobject *, struct attribute *, \ ssize_t (*store)(struct kobject *, struct kobj_attribute *, \
const char *, size_t); \ const char *, size_t); \
} }
NILFS_COMMON_ATTR_STRUCT(feature); NILFS_KOBJ_ATTR_STRUCT(feature);
#define NILFS_DEV_ATTR_STRUCT(name) \ #define NILFS_DEV_ATTR_STRUCT(name) \
struct nilfs_##name##_attr { \ struct nilfs_##name##_attr { \

View File

@@ -636,6 +636,7 @@ struct damon_operations {
* @data: Data that will be passed to @fn. * @data: Data that will be passed to @fn.
* @repeat: Repeat invocations. * @repeat: Repeat invocations.
* @return_code: Return code from @fn invocation. * @return_code: Return code from @fn invocation.
* @dealloc_on_cancel: De-allocate when canceled.
* *
* Control damon_call(), which requests specific kdamond to invoke a given * Control damon_call(), which requests specific kdamond to invoke a given
* function. Refer to damon_call() for more details. * function. Refer to damon_call() for more details.
@@ -645,6 +646,7 @@ struct damon_call_control {
void *data; void *data;
bool repeat; bool repeat;
int return_code; int return_code;
bool dealloc_on_cancel;
/* private: internal use only */ /* private: internal use only */
/* informs if the kdamond finished handling of the request */ /* informs if the kdamond finished handling of the request */
struct completion completion; struct completion completion;

View File

@@ -385,6 +385,16 @@ void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
void mark_page_accessed(struct page *); void mark_page_accessed(struct page *);
void folio_mark_accessed(struct folio *); void folio_mark_accessed(struct folio *);
static inline bool folio_may_be_lru_cached(struct folio *folio)
{
/*
* Holding PMD-sized folios in per-CPU LRU cache unbalances accounting.
* Holding small numbers of low-order mTHP folios in per-CPU LRU cache
* will be sensible, but nobody has implemented and tested that yet.
*/
return !folio_test_large(folio);
}
extern atomic_t lru_disable_count; extern atomic_t lru_disable_count;
static inline bool lru_cache_disabled(void) static inline bool lru_cache_disabled(void)

View File

@@ -2479,10 +2479,14 @@ static void kdamond_call(struct damon_ctx *ctx, bool cancel)
mutex_lock(&ctx->call_controls_lock); mutex_lock(&ctx->call_controls_lock);
list_del(&control->list); list_del(&control->list);
mutex_unlock(&ctx->call_controls_lock); mutex_unlock(&ctx->call_controls_lock);
if (!control->repeat) if (!control->repeat) {
complete(&control->completion); complete(&control->completion);
else } else if (control->canceled && control->dealloc_on_cancel) {
kfree(control);
continue;
} else {
list_add(&control->list, &repeat_controls); list_add(&control->list, &repeat_controls);
}
} }
control = list_first_entry_or_null(&repeat_controls, control = list_first_entry_or_null(&repeat_controls,
struct damon_call_control, list); struct damon_call_control, list);

View File

@@ -1534,14 +1534,10 @@ static int damon_sysfs_repeat_call_fn(void *data)
return 0; return 0;
} }
static struct damon_call_control damon_sysfs_repeat_call_control = {
.fn = damon_sysfs_repeat_call_fn,
.repeat = true,
};
static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond) static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
{ {
struct damon_ctx *ctx; struct damon_ctx *ctx;
struct damon_call_control *repeat_call_control;
int err; int err;
if (damon_sysfs_kdamond_running(kdamond)) if (damon_sysfs_kdamond_running(kdamond))
@@ -1554,18 +1550,29 @@ static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
damon_destroy_ctx(kdamond->damon_ctx); damon_destroy_ctx(kdamond->damon_ctx);
kdamond->damon_ctx = NULL; kdamond->damon_ctx = NULL;
repeat_call_control = kmalloc(sizeof(*repeat_call_control),
GFP_KERNEL);
if (!repeat_call_control)
return -ENOMEM;
ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]); ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
if (IS_ERR(ctx)) if (IS_ERR(ctx)) {
kfree(repeat_call_control);
return PTR_ERR(ctx); return PTR_ERR(ctx);
}
err = damon_start(&ctx, 1, false); err = damon_start(&ctx, 1, false);
if (err) { if (err) {
kfree(repeat_call_control);
damon_destroy_ctx(ctx); damon_destroy_ctx(ctx);
return err; return err;
} }
kdamond->damon_ctx = ctx; kdamond->damon_ctx = ctx;
damon_sysfs_repeat_call_control.data = kdamond; repeat_call_control->fn = damon_sysfs_repeat_call_fn;
damon_call(ctx, &damon_sysfs_repeat_call_control); repeat_call_control->data = kdamond;
repeat_call_control->repeat = true;
repeat_call_control->dealloc_on_cancel = true;
damon_call(ctx, repeat_call_control);
return err; return err;
} }

View File

@@ -2287,8 +2287,8 @@ static unsigned long collect_longterm_unpinnable_folios(
struct pages_or_folios *pofs) struct pages_or_folios *pofs)
{ {
unsigned long collected = 0; unsigned long collected = 0;
bool drain_allow = true;
struct folio *folio; struct folio *folio;
int drained = 0;
long i = 0; long i = 0;
for (folio = pofs_get_folio(pofs, i); folio; for (folio = pofs_get_folio(pofs, i); folio;
@@ -2307,9 +2307,17 @@ static unsigned long collect_longterm_unpinnable_folios(
continue; continue;
} }
if (!folio_test_lru(folio) && drain_allow) { if (drained == 0 && folio_may_be_lru_cached(folio) &&
folio_ref_count(folio) !=
folio_expected_ref_count(folio) + 1) {
lru_add_drain();
drained = 1;
}
if (drained == 1 && folio_may_be_lru_cached(folio) &&
folio_ref_count(folio) !=
folio_expected_ref_count(folio) + 1) {
lru_add_drain_all(); lru_add_drain_all();
drain_allow = false; drained = 2;
} }
if (!folio_isolate_lru(folio)) if (!folio_isolate_lru(folio))

View File

@@ -255,7 +255,7 @@ void mlock_folio(struct folio *folio)
folio_get(folio); folio_get(folio);
if (!folio_batch_add(fbatch, mlock_lru(folio)) || if (!folio_batch_add(fbatch, mlock_lru(folio)) ||
folio_test_large(folio) || lru_cache_disabled()) !folio_may_be_lru_cached(folio) || lru_cache_disabled())
mlock_folio_batch(fbatch); mlock_folio_batch(fbatch);
local_unlock(&mlock_fbatch.lock); local_unlock(&mlock_fbatch.lock);
} }
@@ -278,7 +278,7 @@ void mlock_new_folio(struct folio *folio)
folio_get(folio); folio_get(folio);
if (!folio_batch_add(fbatch, mlock_new(folio)) || if (!folio_batch_add(fbatch, mlock_new(folio)) ||
folio_test_large(folio) || lru_cache_disabled()) !folio_may_be_lru_cached(folio) || lru_cache_disabled())
mlock_folio_batch(fbatch); mlock_folio_batch(fbatch);
local_unlock(&mlock_fbatch.lock); local_unlock(&mlock_fbatch.lock);
} }
@@ -299,7 +299,7 @@ void munlock_folio(struct folio *folio)
*/ */
folio_get(folio); folio_get(folio);
if (!folio_batch_add(fbatch, folio) || if (!folio_batch_add(fbatch, folio) ||
folio_test_large(folio) || lru_cache_disabled()) !folio_may_be_lru_cached(folio) || lru_cache_disabled())
mlock_folio_batch(fbatch); mlock_folio_batch(fbatch);
local_unlock(&mlock_fbatch.lock); local_unlock(&mlock_fbatch.lock);
} }

View File

@@ -164,6 +164,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
for (i = 0; i < folio_batch_count(fbatch); i++) { for (i = 0; i < folio_batch_count(fbatch); i++) {
struct folio *folio = fbatch->folios[i]; struct folio *folio = fbatch->folios[i];
/* block memcg migration while the folio moves between lru */
if (move_fn != lru_add && !folio_test_clear_lru(folio))
continue;
folio_lruvec_relock_irqsave(folio, &lruvec, &flags); folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
move_fn(lruvec, folio); move_fn(lruvec, folio);
@@ -176,14 +180,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
} }
static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch, static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
struct folio *folio, move_fn_t move_fn, struct folio *folio, move_fn_t move_fn, bool disable_irq)
bool on_lru, bool disable_irq)
{ {
unsigned long flags; unsigned long flags;
if (on_lru && !folio_test_clear_lru(folio))
return;
folio_get(folio); folio_get(folio);
if (disable_irq) if (disable_irq)
@@ -191,8 +191,8 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
else else
local_lock(&cpu_fbatches.lock); local_lock(&cpu_fbatches.lock);
if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) || if (!folio_batch_add(this_cpu_ptr(fbatch), folio) ||
lru_cache_disabled()) !folio_may_be_lru_cached(folio) || lru_cache_disabled())
folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn); folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn);
if (disable_irq) if (disable_irq)
@@ -201,13 +201,13 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
local_unlock(&cpu_fbatches.lock); local_unlock(&cpu_fbatches.lock);
} }
#define folio_batch_add_and_move(folio, op, on_lru) \ #define folio_batch_add_and_move(folio, op) \
__folio_batch_add_and_move( \ __folio_batch_add_and_move( \
&cpu_fbatches.op, \ &cpu_fbatches.op, \
folio, \ folio, \
op, \ op, \
on_lru, \ offsetof(struct cpu_fbatches, op) >= \
offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \ offsetof(struct cpu_fbatches, lock_irq) \
) )
static void lru_move_tail(struct lruvec *lruvec, struct folio *folio) static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
@@ -231,10 +231,10 @@ static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
void folio_rotate_reclaimable(struct folio *folio) void folio_rotate_reclaimable(struct folio *folio)
{ {
if (folio_test_locked(folio) || folio_test_dirty(folio) || if (folio_test_locked(folio) || folio_test_dirty(folio) ||
folio_test_unevictable(folio)) folio_test_unevictable(folio) || !folio_test_lru(folio))
return; return;
folio_batch_add_and_move(folio, lru_move_tail, true); folio_batch_add_and_move(folio, lru_move_tail);
} }
void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file, void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file,
@@ -328,10 +328,11 @@ static void folio_activate_drain(int cpu)
void folio_activate(struct folio *folio) void folio_activate(struct folio *folio)
{ {
if (folio_test_active(folio) || folio_test_unevictable(folio)) if (folio_test_active(folio) || folio_test_unevictable(folio) ||
!folio_test_lru(folio))
return; return;
folio_batch_add_and_move(folio, lru_activate, true); folio_batch_add_and_move(folio, lru_activate);
} }
#else #else
@@ -507,7 +508,7 @@ void folio_add_lru(struct folio *folio)
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC)) lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
folio_set_active(folio); folio_set_active(folio);
folio_batch_add_and_move(folio, lru_add, false); folio_batch_add_and_move(folio, lru_add);
} }
EXPORT_SYMBOL(folio_add_lru); EXPORT_SYMBOL(folio_add_lru);
@@ -685,13 +686,13 @@ void lru_add_drain_cpu(int cpu)
void deactivate_file_folio(struct folio *folio) void deactivate_file_folio(struct folio *folio)
{ {
/* Deactivating an unevictable folio will not accelerate reclaim */ /* Deactivating an unevictable folio will not accelerate reclaim */
if (folio_test_unevictable(folio)) if (folio_test_unevictable(folio) || !folio_test_lru(folio))
return; return;
if (lru_gen_enabled() && lru_gen_clear_refs(folio)) if (lru_gen_enabled() && lru_gen_clear_refs(folio))
return; return;
folio_batch_add_and_move(folio, lru_deactivate_file, true); folio_batch_add_and_move(folio, lru_deactivate_file);
} }
/* /*
@@ -704,13 +705,13 @@ void deactivate_file_folio(struct folio *folio)
*/ */
void folio_deactivate(struct folio *folio) void folio_deactivate(struct folio *folio)
{ {
if (folio_test_unevictable(folio)) if (folio_test_unevictable(folio) || !folio_test_lru(folio))
return; return;
if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio)) if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio))
return; return;
folio_batch_add_and_move(folio, lru_deactivate, true); folio_batch_add_and_move(folio, lru_deactivate);
} }
/** /**
@@ -723,10 +724,11 @@ void folio_deactivate(struct folio *folio)
void folio_mark_lazyfree(struct folio *folio) void folio_mark_lazyfree(struct folio *folio)
{ {
if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) || if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
!folio_test_lru(folio) ||
folio_test_swapcache(folio) || folio_test_unevictable(folio)) folio_test_swapcache(folio) || folio_test_unevictable(folio))
return; return;
folio_batch_add_and_move(folio, lru_lazyfree, true); folio_batch_add_and_move(folio, lru_lazyfree);
} }
void lru_add_drain(void) void lru_add_drain(void)

View File

@@ -4507,7 +4507,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
} }
/* ineligible */ /* ineligible */
if (!folio_test_lru(folio) || zone > sc->reclaim_idx) { if (zone > sc->reclaim_idx) {
gen = folio_inc_gen(lruvec, folio, false); gen = folio_inc_gen(lruvec, folio, false);
list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
return true; return true;

View File

@@ -208,6 +208,9 @@ static int damon_sample_mtier_enable_store(
if (enabled == is_enabled) if (enabled == is_enabled)
return 0; return 0;
if (!init_called)
return 0;
if (enabled) { if (enabled) {
err = damon_sample_mtier_start(); err = damon_sample_mtier_start();
if (err) if (err)

View File

@@ -137,6 +137,9 @@ static int damon_sample_prcl_enable_store(
if (enabled == is_enabled) if (enabled == is_enabled)
return 0; return 0;
if (!init_called)
return 0;
if (enabled) { if (enabled) {
err = damon_sample_prcl_start(); err = damon_sample_prcl_start();
if (err) if (err)

View File

@@ -118,6 +118,9 @@ static int damon_sample_wsse_enable_store(
return 0; return 0;
if (enabled) { if (enabled) {
if (!init_called)
return 0;
err = damon_sample_wsse_start(); err = damon_sample_wsse_start();
if (err) if (err)
enabled = false; enabled = false;