mm: introduce memdesc_flags_t

Patch series "Add and use memdesc_flags_t".

At some point struct page will be separated from struct slab and struct
folio.  This is a step towards that by introducing a type for the 'flags'
word of all three structures.  This gives us a certain amount of type
safety by establishing that some of these unsigned longs are different
from other unsigned longs in that they contain things like node ID,
section number and zone number in the upper bits.  That lets us have
functions that can be easily called by anyone who has a slab, folio or
page (but not easily by anyone else) to get the node or zone.

There's going to be some unusual merge problems with this as some odd bits
of the kernel decide they want to print out the flags value or something
similar by writing page->flags and now they'll need to write page->flags.f
instead.  That's most of the churn here.  Maybe we should be removing
these things from the debug output?


This patch (of 11):

Wrap the unsigned long flags in a typedef.  In upcoming patches, this will
provide a strong hint that you can't just pass a random unsigned long to
functions which take this as an argument.

[willy@infradead.org: s/flags/flags.f/ in several architectures]
  Link: https://lkml.kernel.org/r/aKMgPRLD-WnkPxYm@casper.infradead.org
[nicola.vetrini@gmail.com: mips: fix compilation error]
  Link: https://lore.kernel.org/lkml/CA+G9fYvkpmqGr6wjBNHY=dRp71PLCoi2341JxOudi60yqaeUdg@mail.gmail.com/
  Link: https://lkml.kernel.org/r/20250825214245.1838158-1-nicola.vetrini@gmail.com
Link: https://lkml.kernel.org/r/20250805172307.1302730-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20250805172307.1302730-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Zi Yan <ziy@nvidia.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle)
2025-08-05 18:22:51 +01:00
committed by Andrew Morton
parent 4e915656a3
commit 53fbef56e0
58 changed files with 195 additions and 190 deletions

View File

@@ -704,7 +704,7 @@ static inline void arc_slc_enable(void)
void flush_dcache_folio(struct folio *folio)
{
clear_bit(PG_dc_clean, &folio->flags);
clear_bit(PG_dc_clean, &folio->flags.f);
return;
}
EXPORT_SYMBOL(flush_dcache_folio);
@@ -889,8 +889,8 @@ void copy_user_highpage(struct page *to, struct page *from,
copy_page(kto, kfrom);
clear_bit(PG_dc_clean, &dst->flags);
clear_bit(PG_dc_clean, &src->flags);
clear_bit(PG_dc_clean, &dst->flags.f);
clear_bit(PG_dc_clean, &src->flags.f);
kunmap_atomic(kto);
kunmap_atomic(kfrom);
@@ -900,7 +900,7 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
{
struct folio *folio = page_folio(page);
clear_page(to);
clear_bit(PG_dc_clean, &folio->flags);
clear_bit(PG_dc_clean, &folio->flags.f);
}
EXPORT_SYMBOL(clear_user_page);

View File

@@ -488,7 +488,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
*/
if (vma->vm_flags & VM_EXEC) {
struct folio *folio = page_folio(page);
int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f);
if (dirty) {
unsigned long offset = offset_in_folio(folio, paddr);
nr = folio_nr_pages(folio);

View File

@@ -17,7 +17,7 @@
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
clear_bit(PG_dcache_clean, &folio->flags);
clear_bit(PG_dcache_clean, &folio->flags.f);
}
#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags

View File

@@ -67,7 +67,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
struct folio *src = page_folio(from);
void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &src->flags))
if (!test_and_set_bit(PG_dcache_clean, &src->flags.f))
__flush_dcache_folio(folio_flush_mapping(src), src);
raw_spin_lock(&minicache_lock);

View File

@@ -73,7 +73,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long kfrom, kto;
if (!test_and_set_bit(PG_dcache_clean, &src->flags))
if (!test_and_set_bit(PG_dcache_clean, &src->flags.f))
__flush_dcache_folio(folio_flush_mapping(src), src);
/* FIXME: not highmem safe */

View File

@@ -87,7 +87,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
struct folio *src = page_folio(from);
void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &src->flags))
if (!test_and_set_bit(PG_dcache_clean, &src->flags.f))
__flush_dcache_folio(folio_flush_mapping(src), src);
raw_spin_lock(&minicache_lock);

View File

@@ -718,7 +718,7 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
if (size < sz)
break;
if (!offset)
set_bit(PG_dcache_clean, &folio->flags);
set_bit(PG_dcache_clean, &folio->flags.f);
offset = 0;
size -= sz;
if (!size)

View File

@@ -203,7 +203,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
folio = page_folio(pfn_to_page(pfn));
mapping = folio_flush_mapping(folio);
if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
__flush_dcache_folio(mapping, folio);
if (mapping) {
if (cache_is_vivt())

View File

@@ -304,7 +304,7 @@ void __sync_icache_dcache(pte_t pteval)
else
mapping = NULL;
if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
__flush_dcache_folio(mapping, folio);
if (pte_exec(pteval))
@@ -343,8 +343,8 @@ void flush_dcache_folio(struct folio *folio)
return;
if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
if (test_bit(PG_dcache_clean, &folio->flags))
clear_bit(PG_dcache_clean, &folio->flags);
if (test_bit(PG_dcache_clean, &folio->flags.f))
clear_bit(PG_dcache_clean, &folio->flags.f);
return;
}
@@ -352,14 +352,14 @@ void flush_dcache_folio(struct folio *folio)
if (!cache_ops_need_broadcast() &&
mapping && !folio_mapped(folio))
clear_bit(PG_dcache_clean, &folio->flags);
clear_bit(PG_dcache_clean, &folio->flags.f);
else {
__flush_dcache_folio(mapping, folio);
if (mapping && cache_is_vivt())
__flush_dcache_aliases(mapping, folio);
else if (mapping)
__flush_icache_all();
set_bit(PG_dcache_clean, &folio->flags);
set_bit(PG_dcache_clean, &folio->flags.f);
}
}
EXPORT_SYMBOL(flush_dcache_folio);

View File

@@ -21,12 +21,12 @@ extern bool arch_hugetlb_migration_supported(struct hstate *h);
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
clear_bit(PG_dcache_clean, &folio->flags);
clear_bit(PG_dcache_clean, &folio->flags.f);
#ifdef CONFIG_ARM64_MTE
if (system_supports_mte()) {
clear_bit(PG_mte_tagged, &folio->flags);
clear_bit(PG_mte_lock, &folio->flags);
clear_bit(PG_mte_tagged, &folio->flags.f);
clear_bit(PG_mte_lock, &folio->flags.f);
}
#endif
}

View File

@@ -48,12 +48,12 @@ static inline void set_page_mte_tagged(struct page *page)
* before the page flags update.
*/
smp_wmb();
set_bit(PG_mte_tagged, &page->flags);
set_bit(PG_mte_tagged, &page->flags.f);
}
static inline bool page_mte_tagged(struct page *page)
{
bool ret = test_bit(PG_mte_tagged, &page->flags);
bool ret = test_bit(PG_mte_tagged, &page->flags.f);
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
@@ -82,7 +82,7 @@ static inline bool try_page_mte_tagging(struct page *page)
{
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
if (!test_and_set_bit(PG_mte_lock, &page->flags))
if (!test_and_set_bit(PG_mte_lock, &page->flags.f))
return true;
/*
@@ -90,7 +90,7 @@ static inline bool try_page_mte_tagging(struct page *page)
* already. Check if the PG_mte_tagged flag has been set or wait
* otherwise.
*/
smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged));
smp_cond_load_acquire(&page->flags.f, VAL & (1UL << PG_mte_tagged));
return false;
}
@@ -173,13 +173,13 @@ static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
* before the folio flags update.
*/
smp_wmb();
set_bit(PG_mte_tagged, &folio->flags);
set_bit(PG_mte_tagged, &folio->flags.f);
}
static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
{
bool ret = test_bit(PG_mte_tagged, &folio->flags);
bool ret = test_bit(PG_mte_tagged, &folio->flags.f);
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
@@ -196,7 +196,7 @@ static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
{
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
if (!test_and_set_bit(PG_mte_lock, &folio->flags))
if (!test_and_set_bit(PG_mte_lock, &folio->flags.f))
return true;
/*
@@ -204,7 +204,7 @@ static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
* already. Check if the PG_mte_tagged flag has been set or wait
* otherwise.
*/
smp_cond_load_acquire(&folio->flags, VAL & (1UL << PG_mte_tagged));
smp_cond_load_acquire(&folio->flags.f, VAL & (1UL << PG_mte_tagged));
return false;
}

View File

@@ -53,11 +53,11 @@ void __sync_icache_dcache(pte_t pte)
{
struct folio *folio = page_folio(pte_page(pte));
if (!test_bit(PG_dcache_clean, &folio->flags)) {
if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
sync_icache_aliases((unsigned long)folio_address(folio),
(unsigned long)folio_address(folio) +
folio_size(folio));
set_bit(PG_dcache_clean, &folio->flags);
set_bit(PG_dcache_clean, &folio->flags.f);
}
}
EXPORT_SYMBOL_GPL(__sync_icache_dcache);
@@ -69,8 +69,8 @@ EXPORT_SYMBOL_GPL(__sync_icache_dcache);
*/
void flush_dcache_folio(struct folio *folio)
{
if (test_bit(PG_dcache_clean, &folio->flags))
clear_bit(PG_dcache_clean, &folio->flags);
if (test_bit(PG_dcache_clean, &folio->flags.f))
clear_bit(PG_dcache_clean, &folio->flags.f);
}
EXPORT_SYMBOL(flush_dcache_folio);

View File

@@ -25,12 +25,12 @@ void flush_dcache_folio(struct folio *folio)
mapping = folio_flush_mapping(folio);
if (mapping && !folio_mapped(folio))
clear_bit(PG_dcache_clean, &folio->flags);
clear_bit(PG_dcache_clean, &folio->flags.f);
else {
dcache_wbinv_all();
if (mapping)
icache_inv_all();
set_bit(PG_dcache_clean, &folio->flags);
set_bit(PG_dcache_clean, &folio->flags.f);
}
}
EXPORT_SYMBOL(flush_dcache_folio);
@@ -56,7 +56,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
return;
folio = page_folio(pfn_to_page(pfn));
if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
dcache_wbinv_all();
if (folio_flush_mapping(folio)) {

View File

@@ -37,11 +37,11 @@
#define PG_dcache_dirty PG_arch_1
#define folio_test_dcache_dirty(folio) \
test_bit(PG_dcache_dirty, &(folio)->flags)
test_bit(PG_dcache_dirty, &(folio)->flags.f)
#define folio_set_dcache_dirty(folio) \
set_bit(PG_dcache_dirty, &(folio)->flags)
set_bit(PG_dcache_dirty, &(folio)->flags.f)
#define folio_clear_dcache_dirty(folio) \
clear_bit(PG_dcache_dirty, &(folio)->flags)
clear_bit(PG_dcache_dirty, &(folio)->flags.f)
extern void (*flush_cache_all)(void);
extern void (*__flush_cache_all)(void);

View File

@@ -187,7 +187,7 @@ void flush_dcache_folio(struct folio *folio)
/* Flush this page if there are aliases. */
if (mapping && !mapping_mapped(mapping)) {
clear_bit(PG_dcache_clean, &folio->flags);
clear_bit(PG_dcache_clean, &folio->flags.f);
} else {
__flush_dcache_folio(folio);
if (mapping) {
@@ -195,7 +195,7 @@ void flush_dcache_folio(struct folio *folio)
flush_aliases(mapping, folio);
flush_icache_range(start, start + folio_size(folio));
}
set_bit(PG_dcache_clean, &folio->flags);
set_bit(PG_dcache_clean, &folio->flags.f);
}
}
EXPORT_SYMBOL(flush_dcache_folio);
@@ -227,7 +227,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
return;
folio = page_folio(pfn_to_page(pfn));
if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
__flush_dcache_folio(folio);
mapping = folio_flush_mapping(folio);

View File

@@ -75,7 +75,7 @@ static inline void sync_icache_dcache(struct page *page)
static inline void flush_dcache_folio(struct folio *folio)
{
clear_bit(PG_dc_clean, &folio->flags);
clear_bit(PG_dc_clean, &folio->flags.f);
}
#define flush_dcache_folio flush_dcache_folio

View File

@@ -83,7 +83,7 @@ void update_cache(struct vm_area_struct *vma, unsigned long address,
{
unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT;
struct folio *folio = page_folio(pfn_to_page(pfn));
int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f);
/*
* Since icaches do not snoop for updated data on OpenRISC, we

View File

@@ -122,10 +122,10 @@ void __update_cache(pte_t pte)
pfn = folio_pfn(folio);
nr = folio_nr_pages(folio);
if (folio_flush_mapping(folio) &&
test_bit(PG_dcache_dirty, &folio->flags)) {
test_bit(PG_dcache_dirty, &folio->flags.f)) {
while (nr--)
flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
clear_bit(PG_dcache_dirty, &folio->flags);
clear_bit(PG_dcache_dirty, &folio->flags.f);
} else if (parisc_requires_coherency())
while (nr--)
flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
@@ -481,7 +481,7 @@ void flush_dcache_folio(struct folio *folio)
pgoff_t pgoff;
if (mapping && !mapping_mapped(mapping)) {
set_bit(PG_dcache_dirty, &folio->flags);
set_bit(PG_dcache_dirty, &folio->flags.f);
return;
}

View File

@@ -40,8 +40,8 @@ static inline void flush_dcache_folio(struct folio *folio)
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
return;
/* avoid an atomic op if possible */
if (test_bit(PG_dcache_clean, &folio->flags))
clear_bit(PG_dcache_clean, &folio->flags);
if (test_bit(PG_dcache_clean, &folio->flags.f))
clear_bit(PG_dcache_clean, &folio->flags.f);
}
#define flush_dcache_folio flush_dcache_folio

View File

@@ -939,9 +939,9 @@ static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
/* Clear i-cache for new pages */
folio = page_folio(pfn_to_page(pfn));
if (!test_bit(PG_dcache_clean, &folio->flags)) {
if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
flush_dcache_icache_folio(folio);
set_bit(PG_dcache_clean, &folio->flags);
set_bit(PG_dcache_clean, &folio->flags.f);
}
}

View File

@@ -1562,11 +1562,11 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
folio = page_folio(pte_page(pte));
/* page is dirty */
if (!test_bit(PG_dcache_clean, &folio->flags) &&
if (!test_bit(PG_dcache_clean, &folio->flags.f) &&
!folio_test_reserved(folio)) {
if (trap == INTERRUPT_INST_STORAGE) {
flush_dcache_icache_folio(folio);
set_bit(PG_dcache_clean, &folio->flags);
set_bit(PG_dcache_clean, &folio->flags.f);
} else
pp |= HPTE_R_N;
}

View File

@@ -87,9 +87,9 @@ static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr)
struct folio *folio = maybe_pte_to_folio(pte);
if (!folio)
return pte;
if (!test_bit(PG_dcache_clean, &folio->flags)) {
if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
flush_dcache_icache_folio(folio);
set_bit(PG_dcache_clean, &folio->flags);
set_bit(PG_dcache_clean, &folio->flags.f);
}
}
return pte;
@@ -127,13 +127,13 @@ static inline pte_t set_pte_filter(pte_t pte, unsigned long addr)
return pte;
/* If the page clean, we move on */
if (test_bit(PG_dcache_clean, &folio->flags))
if (test_bit(PG_dcache_clean, &folio->flags.f))
return pte;
/* If it's an exec fault, we flush the cache and make it clean */
if (is_exec_fault()) {
flush_dcache_icache_folio(folio);
set_bit(PG_dcache_clean, &folio->flags);
set_bit(PG_dcache_clean, &folio->flags.f);
return pte;
}
@@ -175,12 +175,12 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
goto bail;
/* If the page is already clean, we move on */
if (test_bit(PG_dcache_clean, &folio->flags))
if (test_bit(PG_dcache_clean, &folio->flags.f))
goto bail;
/* Clean the page and set PG_dcache_clean */
flush_dcache_icache_folio(folio);
set_bit(PG_dcache_clean, &folio->flags);
set_bit(PG_dcache_clean, &folio->flags.f);
bail:
return pte_mkexec(pte);

View File

@@ -23,8 +23,8 @@ static inline void local_flush_icache_range(unsigned long start,
static inline void flush_dcache_folio(struct folio *folio)
{
if (test_bit(PG_dcache_clean, &folio->flags))
clear_bit(PG_dcache_clean, &folio->flags);
if (test_bit(PG_dcache_clean, &folio->flags.f))
clear_bit(PG_dcache_clean, &folio->flags.f);
}
#define flush_dcache_folio flush_dcache_folio
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1

View File

@@ -7,7 +7,7 @@
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
clear_bit(PG_dcache_clean, &folio->flags);
clear_bit(PG_dcache_clean, &folio->flags.f);
}
#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags

View File

@@ -101,9 +101,9 @@ void flush_icache_pte(struct mm_struct *mm, pte_t pte)
{
struct folio *folio = page_folio(pte_page(pte));
if (!test_bit(PG_dcache_clean, &folio->flags)) {
if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
flush_icache_mm(mm, false);
set_bit(PG_dcache_clean, &folio->flags);
set_bit(PG_dcache_clean, &folio->flags.f);
}
}
#endif /* CONFIG_MMU */

View File

@@ -39,7 +39,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
clear_bit(PG_arch_1, &folio->flags);
clear_bit(PG_arch_1, &folio->flags.f);
}
#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags

View File

@@ -144,7 +144,7 @@ int uv_destroy_folio(struct folio *folio)
folio_get(folio);
rc = uv_destroy(folio_to_phys(folio));
if (!rc)
clear_bit(PG_arch_1, &folio->flags);
clear_bit(PG_arch_1, &folio->flags.f);
folio_put(folio);
return rc;
}
@@ -193,7 +193,7 @@ int uv_convert_from_secure_folio(struct folio *folio)
folio_get(folio);
rc = uv_convert_from_secure(folio_to_phys(folio));
if (!rc)
clear_bit(PG_arch_1, &folio->flags);
clear_bit(PG_arch_1, &folio->flags.f);
folio_put(folio);
return rc;
}
@@ -289,7 +289,7 @@ static int __make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
expected = expected_folio_refs(folio) + 1;
if (!folio_ref_freeze(folio, expected))
return -EBUSY;
set_bit(PG_arch_1, &folio->flags);
set_bit(PG_arch_1, &folio->flags.f);
/*
* If the UVC does not succeed or fail immediately, we don't want to
* loop for long, or we might get stall notifications.
@@ -483,18 +483,18 @@ int arch_make_folio_accessible(struct folio *folio)
* convert_to_secure.
* As secure pages are never large folios, both variants can co-exists.
*/
if (!test_bit(PG_arch_1, &folio->flags))
if (!test_bit(PG_arch_1, &folio->flags.f))
return 0;
rc = uv_pin_shared(folio_to_phys(folio));
if (!rc) {
clear_bit(PG_arch_1, &folio->flags);
clear_bit(PG_arch_1, &folio->flags.f);
return 0;
}
rc = uv_convert_from_secure(folio_to_phys(folio));
if (!rc) {
clear_bit(PG_arch_1, &folio->flags);
clear_bit(PG_arch_1, &folio->flags.f);
return 0;
}

View File

@@ -2272,7 +2272,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
start = pmd_val(*pmd) & HPAGE_MASK;
end = start + HPAGE_SIZE;
__storage_key_init_range(start, end);
set_bit(PG_arch_1, &folio->flags);
set_bit(PG_arch_1, &folio->flags.f);
cond_resched();
return 0;
}

View File

@@ -155,7 +155,7 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
paddr = rste & PMD_MASK;
}
if (!test_and_set_bit(PG_arch_1, &folio->flags))
if (!test_and_set_bit(PG_arch_1, &folio->flags.f))
__storage_key_init_range(paddr, paddr + size);
}

View File

@@ -14,7 +14,7 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
clear_bit(PG_dcache_clean, &folio->flags);
clear_bit(PG_dcache_clean, &folio->flags.f);
}
#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags

View File

@@ -114,7 +114,7 @@ static void sh4_flush_dcache_folio(void *arg)
struct address_space *mapping = folio_flush_mapping(folio);
if (mapping && !mapping_mapped(mapping))
clear_bit(PG_dcache_clean, &folio->flags);
clear_bit(PG_dcache_clean, &folio->flags.f);
else
#endif
{

View File

@@ -138,7 +138,7 @@ static void sh7705_flush_dcache_folio(void *arg)
struct address_space *mapping = folio_flush_mapping(folio);
if (mapping && !mapping_mapped(mapping))
clear_bit(PG_dcache_clean, &folio->flags);
clear_bit(PG_dcache_clean, &folio->flags.f);
else {
unsigned long pfn = folio_pfn(folio);
unsigned int i, nr = folio_nr_pages(folio);

View File

@@ -64,14 +64,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
struct folio *folio = page_folio(page);
if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
test_bit(PG_dcache_clean, &folio->flags)) {
test_bit(PG_dcache_clean, &folio->flags.f)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
kunmap_coherent(vto);
} else {
memcpy(dst, src, len);
if (boot_cpu_data.dcache.n_aliases)
clear_bit(PG_dcache_clean, &folio->flags);
clear_bit(PG_dcache_clean, &folio->flags.f);
}
if (vma->vm_flags & VM_EXEC)
@@ -85,14 +85,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
struct folio *folio = page_folio(page);
if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
test_bit(PG_dcache_clean, &folio->flags)) {
test_bit(PG_dcache_clean, &folio->flags.f)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent(vfrom);
} else {
memcpy(dst, src, len);
if (boot_cpu_data.dcache.n_aliases)
clear_bit(PG_dcache_clean, &folio->flags);
clear_bit(PG_dcache_clean, &folio->flags.f);
}
}
@@ -105,7 +105,7 @@ void copy_user_highpage(struct page *to, struct page *from,
vto = kmap_atomic(to);
if (boot_cpu_data.dcache.n_aliases && folio_mapped(src) &&
test_bit(PG_dcache_clean, &src->flags)) {
test_bit(PG_dcache_clean, &src->flags.f)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent(vfrom);
@@ -148,7 +148,7 @@ void __update_cache(struct vm_area_struct *vma,
if (pfn_valid(pfn)) {
struct folio *folio = page_folio(pfn_to_page(pfn));
int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags);
int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags.f);
if (dirty)
__flush_purge_region(folio_address(folio),
folio_size(folio));
@@ -162,7 +162,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
if (pages_do_alias(addr, vmaddr)) {
if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
test_bit(PG_dcache_clean, &folio->flags)) {
test_bit(PG_dcache_clean, &folio->flags.f)) {
void *kaddr;
kaddr = kmap_coherent(page, vmaddr);

View File

@@ -31,7 +31,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
enum fixed_addresses idx;
unsigned long vaddr;
BUG_ON(!test_bit(PG_dcache_clean, &folio->flags));
BUG_ON(!test_bit(PG_dcache_clean, &folio->flags.f));
preempt_disable();
pagefault_disable();

View File

@@ -224,7 +224,7 @@ inline void flush_dcache_folio_impl(struct folio *folio)
((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
#define dcache_dirty_cpu(folio) \
(((folio)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
(((folio)->flags.f >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
static inline void set_dcache_dirty(struct folio *folio, int this_cpu)
{
@@ -243,7 +243,7 @@ static inline void set_dcache_dirty(struct folio *folio, int this_cpu)
"bne,pn %%xcc, 1b\n\t"
" nop"
: /* no outputs */
: "r" (mask), "r" (non_cpu_bits), "r" (&folio->flags)
: "r" (mask), "r" (non_cpu_bits), "r" (&folio->flags.f)
: "g1", "g7");
}
@@ -265,7 +265,7 @@ static inline void clear_dcache_dirty_cpu(struct folio *folio, unsigned long cpu
" nop\n"
"2:"
: /* no outputs */
: "r" (cpu), "r" (mask), "r" (&folio->flags),
: "r" (cpu), "r" (mask), "r" (&folio->flags.f),
"i" (PG_dcache_cpu_mask),
"i" (PG_dcache_cpu_shift)
: "g1", "g7");
@@ -292,7 +292,7 @@ static void flush_dcache(unsigned long pfn)
struct folio *folio = page_folio(page);
unsigned long pg_flags;
pg_flags = folio->flags;
pg_flags = folio->flags.f;
if (pg_flags & (1UL << PG_dcache_dirty)) {
int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
PG_dcache_cpu_mask);
@@ -480,7 +480,7 @@ void flush_dcache_folio(struct folio *folio)
mapping = folio_flush_mapping(folio);
if (mapping && !mapping_mapped(mapping)) {
bool dirty = test_bit(PG_dcache_dirty, &folio->flags);
bool dirty = test_bit(PG_dcache_dirty, &folio->flags.f);
if (dirty) {
int dirty_cpu = dcache_dirty_cpu(folio);

View File

@@ -126,7 +126,7 @@ __setup("debugpat", pat_debug_setup);
static inline enum page_cache_mode get_page_memtype(struct page *pg)
{
unsigned long pg_flags = pg->flags & _PGMT_MASK;
unsigned long pg_flags = pg->flags.f & _PGMT_MASK;
if (pg_flags == _PGMT_WB)
return _PAGE_CACHE_MODE_WB;
@@ -161,10 +161,10 @@ static inline void set_page_memtype(struct page *pg,
break;
}
old_flags = READ_ONCE(pg->flags);
old_flags = READ_ONCE(pg->flags.f);
do {
new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
} while (!try_cmpxchg(&pg->flags, &old_flags, new_flags));
} while (!try_cmpxchg(&pg->flags.f, &old_flags, new_flags));
}
#else
static inline enum page_cache_mode get_page_memtype(struct page *pg)

View File

@@ -134,8 +134,8 @@ void flush_dcache_folio(struct folio *folio)
*/
if (mapping && !mapping_mapped(mapping)) {
if (!test_bit(PG_arch_1, &folio->flags))
set_bit(PG_arch_1, &folio->flags);
if (!test_bit(PG_arch_1, &folio->flags.f))
set_bit(PG_arch_1, &folio->flags.f);
return;
} else {
@@ -232,7 +232,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags)) {
if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags.f)) {
unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
unsigned long tmp;
@@ -247,10 +247,10 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
}
preempt_enable();
clear_bit(PG_arch_1, &folio->flags);
clear_bit(PG_arch_1, &folio->flags.f);
}
#else
if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags)
if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags.f)
&& (vma->vm_flags & VM_EXEC) != 0) {
for (i = 0; i < nr; i++) {
void *paddr = kmap_local_folio(folio, i * PAGE_SIZE);
@@ -258,7 +258,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
__invalidate_icache_page((unsigned long)paddr);
kunmap_local(paddr);
}
set_bit(PG_arch_1, &folio->flags);
set_bit(PG_arch_1, &folio->flags.f);
}
#endif
}

View File

@@ -935,7 +935,7 @@ static int fuse_check_folio(struct folio *folio)
{
if (folio_mapped(folio) ||
folio->mapping != NULL ||
(folio->flags & PAGE_FLAGS_CHECK_AT_PREP &
(folio->flags.f & PAGE_FLAGS_CHECK_AT_PREP &
~(1 << PG_locked |
1 << PG_referenced |
1 << PG_lru |

View File

@@ -40,7 +40,7 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
"AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
"state 0x%lx\n",
bh, (unsigned long long)bh->b_blocknr, bh->b_state,
bh->b_folio->mapping, bh->b_folio->flags);
bh->b_folio->mapping, bh->b_folio->flags.f);
fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
gl->gl_name.ln_type, gl->gl_name.ln_number,
gfs2_glock2aspace(gl));

View File

@@ -230,7 +230,7 @@ static int jffs2_write_begin(const struct kiocb *iocb,
goto release_sem;
}
}
jffs2_dbg(1, "end write_begin(). folio->flags %lx\n", folio->flags);
jffs2_dbg(1, "end write_begin(). folio->flags %lx\n", folio->flags.f);
release_sem:
mutex_unlock(&c->alloc_sem);
@@ -259,7 +259,7 @@ static int jffs2_write_end(const struct kiocb *iocb,
jffs2_dbg(1, "%s(): ino #%lu, page at 0x%llx, range %d-%d, flags %lx\n",
__func__, inode->i_ino, folio_pos(folio),
start, end, folio->flags);
start, end, folio->flags.f);
/* We need to avoid deadlock with page_cache_read() in
jffs2_garbage_collect_pass(). So the folio must be

View File

@@ -167,7 +167,7 @@ void nilfs_folio_bug(struct folio *folio)
printk(KERN_CRIT "NILFS_FOLIO_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
"mapping=%p ino=%lu\n",
folio, folio_ref_count(folio),
(unsigned long long)folio->index, folio->flags, m, ino);
(unsigned long long)folio->index, folio->flags.f, m, ino);
head = folio_buffers(folio);
if (head) {

View File

@@ -163,7 +163,7 @@ u64 stable_page_flags(const struct page *page)
snapshot_page(&ps, page);
folio = &ps.folio_snapshot;
k = folio->flags;
k = folio->flags.f;
mapping = (unsigned long)folio->mapping;
is_anon = mapping & FOLIO_MAPPING_ANON;
@@ -238,7 +238,7 @@ u64 stable_page_flags(const struct page *page)
if (u & (1 << KPF_HUGE))
u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
else
u |= kpf_copy_bit(ps.page_snapshot.flags, KPF_HWPOISON, PG_hwpoison);
u |= kpf_copy_bit(ps.page_snapshot.flags.f, KPF_HWPOISON, PG_hwpoison);
#endif
u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved);

View File

@@ -107,7 +107,7 @@ static int do_readpage(struct folio *folio)
size_t offset = 0;
dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
inode->i_ino, folio->index, i_size, folio->flags);
inode->i_ino, folio->index, i_size, folio->flags.f);
ubifs_assert(c, !folio_test_checked(folio));
ubifs_assert(c, !folio->private);
@@ -600,7 +600,7 @@ static int populate_page(struct ubifs_info *c, struct folio *folio,
pgoff_t end_index;
dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
inode->i_ino, folio->index, i_size, folio->flags);
inode->i_ino, folio->index, i_size, folio->flags.f);
end_index = (i_size - 1) >> PAGE_SHIFT;
if (!i_size || folio->index > end_index) {
@@ -988,7 +988,7 @@ static int ubifs_writepage(struct folio *folio, struct writeback_control *wbc)
int err, len = folio_size(folio);
dbg_gen("ino %lu, pg %lu, pg flags %#lx",
inode->i_ino, folio->index, folio->flags);
inode->i_ino, folio->index, folio->flags.f);
ubifs_assert(c, folio->private != NULL);
/* Is the folio fully outside @i_size? (truncate in progress) */

View File

@@ -1024,7 +1024,7 @@ static inline unsigned int compound_order(struct page *page)
{
struct folio *folio = (struct folio *)page;
if (!test_bit(PG_head, &folio->flags))
if (!test_bit(PG_head, &folio->flags.f))
return 0;
return folio_large_order(folio);
}
@@ -1554,7 +1554,7 @@ static inline bool is_nommu_shared_mapping(vm_flags_t flags)
*/
static inline int page_zone_id(struct page *page)
{
return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
return (page->flags.f >> ZONEID_PGSHIFT) & ZONEID_MASK;
}
#ifdef NODE_NOT_IN_PAGE_FLAGS
@@ -1562,7 +1562,7 @@ int page_to_nid(const struct page *page);
#else
static inline int page_to_nid(const struct page *page)
{
return (PF_POISONED_CHECK(page)->flags >> NODES_PGSHIFT) & NODES_MASK;
return (PF_POISONED_CHECK(page)->flags.f >> NODES_PGSHIFT) & NODES_MASK;
}
#endif
@@ -1637,14 +1637,14 @@ static inline void page_cpupid_reset_last(struct page *page)
#else
static inline int folio_last_cpupid(struct folio *folio)
{
return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
return (folio->flags.f >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
}
int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
static inline void page_cpupid_reset_last(struct page *page)
{
page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
page->flags.f |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
@@ -1740,7 +1740,7 @@ static inline u8 page_kasan_tag(const struct page *page)
u8 tag = KASAN_TAG_KERNEL;
if (kasan_enabled()) {
tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
tag = (page->flags.f >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
tag ^= 0xff;
}
@@ -1755,12 +1755,12 @@ static inline void page_kasan_tag_set(struct page *page, u8 tag)
return;
tag ^= 0xff;
old_flags = READ_ONCE(page->flags);
old_flags = READ_ONCE(page->flags.f);
do {
flags = old_flags;
flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
} while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
} while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags)));
}
static inline void page_kasan_tag_reset(struct page *page)
@@ -1804,13 +1804,13 @@ static inline pg_data_t *folio_pgdat(const struct folio *folio)
#ifdef SECTION_IN_PAGE_FLAGS
static inline void set_page_section(struct page *page, unsigned long section)
{
page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
page->flags.f &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
page->flags.f |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
}
static inline unsigned long page_to_section(const struct page *page)
{
return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
return (page->flags.f >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
#endif
@@ -2015,14 +2015,14 @@ static inline bool folio_is_longterm_pinnable(struct folio *folio)
static inline void set_page_zone(struct page *page, enum zone_type zone)
{
page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
page->flags.f &= ~(ZONES_MASK << ZONES_PGSHIFT);
page->flags.f |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
}
static inline void set_page_node(struct page *page, unsigned long node)
{
page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
page->flags.f &= ~(NODES_MASK << NODES_PGSHIFT);
page->flags.f |= (node & NODES_MASK) << NODES_PGSHIFT;
}
static inline void set_page_links(struct page *page, enum zone_type zone,
@@ -2064,7 +2064,7 @@ static inline long compound_nr(struct page *page)
{
struct folio *folio = (struct folio *)page;
if (!test_bit(PG_head, &folio->flags))
if (!test_bit(PG_head, &folio->flags.f))
return 1;
return folio_large_nr_pages(folio);
}

View File

@@ -143,7 +143,7 @@ static inline int lru_tier_from_refs(int refs, bool workingset)
static inline int folio_lru_refs(struct folio *folio)
{
unsigned long flags = READ_ONCE(folio->flags);
unsigned long flags = READ_ONCE(folio->flags.f);
if (!(flags & BIT(PG_referenced)))
return 0;
@@ -156,7 +156,7 @@ static inline int folio_lru_refs(struct folio *folio)
static inline int folio_lru_gen(struct folio *folio)
{
unsigned long flags = READ_ONCE(folio->flags);
unsigned long flags = READ_ONCE(folio->flags.f);
return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
}
@@ -268,7 +268,7 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio,
gen = lru_gen_from_seq(seq);
flags = (gen + 1UL) << LRU_GEN_PGOFF;
/* see the comment on MIN_NR_GENS about PG_active */
set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
set_mask_bits(&folio->flags.f, LRU_GEN_MASK | BIT(PG_active), flags);
lru_gen_update_size(lruvec, folio, -1, gen);
/* for folio_rotate_reclaimable() */
@@ -293,7 +293,7 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
/* for folio_migrate_flags() */
flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags);
flags = set_mask_bits(&folio->flags.f, LRU_GEN_MASK, flags);
gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
lru_gen_update_size(lruvec, folio, gen, -1);
@@ -304,9 +304,9 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
static inline void folio_migrate_refs(struct folio *new, struct folio *old)
{
unsigned long refs = READ_ONCE(old->flags) & LRU_REFS_MASK;
unsigned long refs = READ_ONCE(old->flags.f) & LRU_REFS_MASK;
set_mask_bits(&new->flags, LRU_REFS_MASK, refs);
set_mask_bits(&new->flags.f, LRU_REFS_MASK, refs);
}
#else /* !CONFIG_LRU_GEN */

View File

@@ -34,6 +34,10 @@ struct address_space;
struct futex_private_hash;
struct mem_cgroup;
typedef struct {
unsigned long f;
} memdesc_flags_t;
/*
* Each physical page in the system has a struct page associated with
* it to keep track of whatever it is we are using the page for at the
@@ -72,7 +76,7 @@ struct mem_cgroup;
#endif
struct page {
unsigned long flags; /* Atomic flags, some possibly
memdesc_flags_t flags; /* Atomic flags, some possibly
* updated asynchronously */
/*
* Five words (20/40 bytes) are available in this union.
@@ -383,7 +387,7 @@ struct folio {
union {
struct {
/* public: */
unsigned long flags;
memdesc_flags_t flags;
union {
struct list_head lru;
/* private: avoid cluttering the output */

View File

@@ -1186,7 +1186,7 @@ static inline bool zone_is_empty(struct zone *zone)
static inline enum zone_type page_zonenum(const struct page *page)
{
ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
return (page->flags.f >> ZONES_PGSHIFT) & ZONES_MASK;
}
static inline enum zone_type folio_zonenum(const struct folio *folio)

View File

@@ -217,7 +217,7 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page
* cold cacheline in some cases.
*/
if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
test_bit(PG_head, &page->flags)) {
test_bit(PG_head, &page->flags.f)) {
/*
* We can safely access the field of the @page[1] with PG_head
* because the @page is a compound page composed with at least
@@ -325,14 +325,14 @@ static __always_inline int PageTail(const struct page *page)
static __always_inline int PageCompound(const struct page *page)
{
return test_bit(PG_head, &page->flags) ||
return test_bit(PG_head, &page->flags.f) ||
READ_ONCE(page->compound_head) & 1;
}
#define PAGE_POISON_PATTERN -1l
static inline int PagePoisoned(const struct page *page)
{
return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
return READ_ONCE(page->flags.f) == PAGE_POISON_PATTERN;
}
#ifdef CONFIG_DEBUG_VM
@@ -349,8 +349,8 @@ static const unsigned long *const_folio_flags(const struct folio *folio,
const struct page *page = &folio->page;
VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
return &page[n].flags;
VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page);
return &page[n].flags.f;
}
static unsigned long *folio_flags(struct folio *folio, unsigned n)
@@ -358,8 +358,8 @@ static unsigned long *folio_flags(struct folio *folio, unsigned n)
struct page *page = &folio->page;
VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
return &page[n].flags;
VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page);
return &page[n].flags.f;
}
/*
@@ -449,37 +449,37 @@ FOLIO_CLEAR_FLAG(name, page)
#define TESTPAGEFLAG(uname, lname, policy) \
FOLIO_TEST_FLAG(lname, FOLIO_##policy) \
static __always_inline int Page##uname(const struct page *page) \
{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
{ return test_bit(PG_##lname, &policy(page, 0)->flags.f); }
#define SETPAGEFLAG(uname, lname, policy) \
FOLIO_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline void SetPage##uname(struct page *page) \
{ set_bit(PG_##lname, &policy(page, 1)->flags); }
{ set_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define CLEARPAGEFLAG(uname, lname, policy) \
FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline void ClearPage##uname(struct page *page) \
{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
{ clear_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define __SETPAGEFLAG(uname, lname, policy) \
__FOLIO_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline void __SetPage##uname(struct page *page) \
{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
{ __set_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define __CLEARPAGEFLAG(uname, lname, policy) \
__FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline void __ClearPage##uname(struct page *page) \
{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
{ __clear_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define TESTSETFLAG(uname, lname, policy) \
FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline int TestSetPage##uname(struct page *page) \
{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define TESTCLEARFLAG(uname, lname, policy) \
FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline int TestClearPage##uname(struct page *page) \
{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define PAGEFLAG(uname, lname, policy) \
TESTPAGEFLAG(uname, lname, policy) \
@@ -846,7 +846,7 @@ static __always_inline bool folio_test_head(const struct folio *folio)
static __always_inline int PageHead(const struct page *page)
{
PF_POISONED_CHECK(page);
return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
return test_bit(PG_head, &page->flags.f) && !page_is_fake_head(page);
}
__SETPAGEFLAG(Head, head, PF_ANY)
@@ -1170,28 +1170,28 @@ static __always_inline int PageAnonExclusive(const struct page *page)
*/
if (PageHuge(page))
page = compound_head(page);
return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
static __always_inline void SetPageAnonExclusive(struct page *page)
{
VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page);
VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
static __always_inline void ClearPageAnonExclusive(struct page *page)
{
VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page);
VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
static __always_inline void __ClearPageAnonExclusive(struct page *page)
{
VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
__clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
__clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
#ifdef CONFIG_MMU
@@ -1241,7 +1241,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
*/
static inline int folio_has_private(const struct folio *folio)
{
return !!(folio->flags & PAGE_FLAGS_PRIVATE);
return !!(folio->flags.f & PAGE_FLAGS_PRIVATE);
}
#undef PF_ANY

View File

@@ -107,7 +107,8 @@ static inline bool get_page_tag_ref(struct page *page, union codetag_ref *ref,
if (static_key_enabled(&mem_profiling_compressed)) {
pgalloc_tag_idx idx;
idx = (page->flags >> alloc_tag_ref_offs) & alloc_tag_ref_mask;
idx = (page->flags.f >> alloc_tag_ref_offs) &
alloc_tag_ref_mask;
idx_to_ref(idx, ref);
handle->page = page;
} else {
@@ -149,11 +150,11 @@ static inline void update_page_tag_ref(union pgtag_ref_handle handle, union code
idx = (unsigned long)ref_to_idx(ref);
idx = (idx & alloc_tag_ref_mask) << alloc_tag_ref_offs;
do {
old_flags = READ_ONCE(page->flags);
old_flags = READ_ONCE(page->flags.f);
flags = old_flags;
flags &= ~(alloc_tag_ref_mask << alloc_tag_ref_offs);
flags |= idx;
} while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
} while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags)));
} else {
if (WARN_ON(!handle.ref || !ref))
return;

View File

@@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_template,
TP_fast_assign(
__entry->pfn = page_to_pfn(page);
__entry->flags = page->flags;
__entry->flags = page->flags.f;
__entry->count = page_ref_count(page);
__entry->mapcount = atomic_read(&page->_mapcount);
__entry->mapping = page->mapping;
@@ -77,7 +77,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_and_test_template,
TP_fast_assign(
__entry->pfn = page_to_pfn(page);
__entry->flags = page->flags;
__entry->flags = page->flags.f;
__entry->count = page_ref_count(page);
__entry->mapcount = atomic_read(&page->_mapcount);
__entry->mapping = page->mapping;

View File

@@ -1140,10 +1140,10 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
*/
flags = wait->flags;
if (flags & WQ_FLAG_EXCLUSIVE) {
if (test_bit(key->bit_nr, &key->folio->flags))
if (test_bit(key->bit_nr, &key->folio->flags.f))
return -1;
if (flags & WQ_FLAG_CUSTOM) {
if (test_and_set_bit(key->bit_nr, &key->folio->flags))
if (test_and_set_bit(key->bit_nr, &key->folio->flags.f))
return -1;
flags |= WQ_FLAG_DONE;
}
@@ -1226,9 +1226,9 @@ static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
struct wait_queue_entry *wait)
{
if (wait->flags & WQ_FLAG_EXCLUSIVE) {
if (test_and_set_bit(bit_nr, &folio->flags))
if (test_and_set_bit(bit_nr, &folio->flags.f))
return false;
} else if (test_bit(bit_nr, &folio->flags))
} else if (test_bit(bit_nr, &folio->flags.f))
return false;
wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;

View File

@@ -3303,8 +3303,8 @@ static void __split_folio_to_order(struct folio *folio, int old_order,
* unreferenced sub-pages of an anonymous THP: we can simply drop
* PG_anon_exclusive (-> PG_mappedtodisk) for these here.
*/
new_folio->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
new_folio->flags |= (folio->flags &
new_folio->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
new_folio->flags.f |= (folio->flags.f &
((1L << PG_referenced) |
(1L << PG_swapbacked) |
(1L << PG_swapcache) |

View File

@@ -1707,10 +1707,10 @@ static int identify_page_state(unsigned long pfn, struct page *p,
* carried out only if the first check can't determine the page status.
*/
for (ps = error_states;; ps++)
if ((p->flags & ps->mask) == ps->res)
if ((p->flags.f & ps->mask) == ps->res)
break;
page_flags |= (p->flags & (1UL << PG_dirty));
page_flags |= (p->flags.f & (1UL << PG_dirty));
if (!ps->mask)
for (ps = error_states;; ps++)
@@ -2137,7 +2137,7 @@ retry:
return action_result(pfn, MF_MSG_FREE_HUGE, res);
}
page_flags = folio->flags;
page_flags = folio->flags.f;
if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
folio_unlock(folio);
@@ -2398,7 +2398,7 @@ try_again:
* folio_remove_rmap_*() in try_to_unmap_one(). So to determine page
* status correctly, we save a copy of the page flags at this time.
*/
page_flags = folio->flags;
page_flags = folio->flags.f;
/*
* __munlock_folio() may clear a writeback folio's LRU flag without
@@ -2744,13 +2744,13 @@ static int soft_offline_in_use_page(struct page *page)
putback_movable_pages(&pagelist);
pr_info("%#lx: %s migration failed %ld, type %pGp\n",
pfn, msg_page[huge], ret, &page->flags);
pfn, msg_page[huge], ret, &page->flags.f);
if (ret > 0)
ret = -EBUSY;
}
} else {
pr_info("%#lx: %s isolation failed, page count %d, type %pGp\n",
pfn, msg_page[huge], page_count(page), &page->flags);
pfn, msg_page[huge], page_count(page), &page->flags.f);
ret = -EBUSY;
}
return ret;

View File

@@ -99,14 +99,14 @@ int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
unsigned long old_flags, flags;
int last_cpupid;
old_flags = READ_ONCE(folio->flags);
old_flags = READ_ONCE(folio->flags.f);
do {
flags = old_flags;
last_cpupid = (flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
} while (unlikely(!try_cmpxchg(&folio->flags, &old_flags, flags)));
} while (unlikely(!try_cmpxchg(&folio->flags.f, &old_flags, flags)));
return last_cpupid;
}

View File

@@ -950,7 +950,7 @@ static inline void __free_one_page(struct page *page,
bool to_tail;
VM_BUG_ON(!zone_is_initialized(zone));
VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
VM_BUG_ON_PAGE(page->flags.f & PAGE_FLAGS_CHECK_AT_PREP, page);
VM_BUG_ON(migratetype == -1);
VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
@@ -1043,7 +1043,7 @@ static inline bool page_expected_state(struct page *page,
page->memcg_data |
#endif
page_pool_page_is_pp(page) |
(page->flags & check_flags)))
(page->flags.f & check_flags)))
return false;
return true;
@@ -1059,7 +1059,7 @@ static const char *page_bad_reason(struct page *page, unsigned long flags)
bad_reason = "non-NULL mapping";
if (unlikely(page_ref_count(page) != 0))
bad_reason = "nonzero _refcount";
if (unlikely(page->flags & flags)) {
if (unlikely(page->flags.f & flags)) {
if (flags == PAGE_FLAGS_CHECK_AT_PREP)
bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
else
@@ -1358,7 +1358,7 @@ __always_inline bool free_pages_prepare(struct page *page,
int i;
if (compound) {
page[1].flags &= ~PAGE_FLAGS_SECOND;
page[1].flags.f &= ~PAGE_FLAGS_SECOND;
#ifdef NR_PAGES_IN_LARGE_FOLIO
folio->_nr_pages = 0;
#endif
@@ -1372,7 +1372,7 @@ __always_inline bool free_pages_prepare(struct page *page,
continue;
}
}
(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
(page + i)->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
}
}
if (folio_test_anon(folio)) {
@@ -1391,7 +1391,7 @@ __always_inline bool free_pages_prepare(struct page *page,
}
page_cpupid_reset_last(page);
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
page->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP;
reset_page_owner(page, order);
page_table_check_free(page, order);
pgalloc_tag_sub(page, 1 << order);

View File

@@ -387,14 +387,14 @@ static void __lru_cache_activate_folio(struct folio *folio)
static void lru_gen_inc_refs(struct folio *folio)
{
unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f);
if (folio_test_unevictable(folio))
return;
/* see the comment on LRU_REFS_FLAGS */
if (!folio_test_referenced(folio)) {
set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced));
set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced));
return;
}
@@ -406,7 +406,7 @@ static void lru_gen_inc_refs(struct folio *folio)
}
new_flags = old_flags + BIT(LRU_REFS_PGOFF);
} while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
} while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags));
}
static bool lru_gen_clear_refs(struct folio *folio)
@@ -418,7 +418,7 @@ static bool lru_gen_clear_refs(struct folio *folio)
if (gen < 0)
return true;
set_mask_bits(&folio->flags, LRU_REFS_FLAGS | BIT(PG_workingset), 0);
set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS | BIT(PG_workingset), 0);
lrugen = &folio_lruvec(folio)->lrugen;
/* whether can do without shuffling under the LRU lock */

View File

@@ -888,11 +888,11 @@ static bool lru_gen_set_refs(struct folio *folio)
{
/* see the comment on LRU_REFS_FLAGS */
if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) {
set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced));
set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced));
return false;
}
set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_workingset));
set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS, BIT(PG_workingset));
return true;
}
#else
@@ -3257,13 +3257,13 @@ static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv)
/* promote pages accessed through page tables */
static int folio_update_gen(struct folio *folio, int gen)
{
unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f);
VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
/* see the comment on LRU_REFS_FLAGS */
if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) {
set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced));
set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced));
return -1;
}
@@ -3274,7 +3274,7 @@ static int folio_update_gen(struct folio *folio, int gen)
new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_FLAGS);
new_flags |= ((gen + 1UL) << LRU_GEN_PGOFF) | BIT(PG_workingset);
} while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
} while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags));
return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
}
@@ -3285,7 +3285,7 @@ static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclai
int type = folio_is_file_lru(folio);
struct lru_gen_folio *lrugen = &lruvec->lrugen;
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f);
VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio);
@@ -3302,7 +3302,7 @@ static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclai
/* for folio_end_writeback() */
if (reclaiming)
new_flags |= BIT(PG_reclaim);
} while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
} while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags));
lru_gen_update_size(lruvec, folio, old_gen, new_gen);
@@ -4553,7 +4553,7 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca
/* see the comment on LRU_REFS_FLAGS */
if (!folio_test_referenced(folio))
set_mask_bits(&folio->flags, LRU_REFS_MASK, 0);
set_mask_bits(&folio->flags.f, LRU_REFS_MASK, 0);
/* for shrink_folio_list() */
folio_clear_reclaim(folio);
@@ -4766,7 +4766,7 @@ retry:
/* don't add rejected folios to the oldest generation */
if (lru_gen_folio_seq(lruvec, folio, false) == min_seq[type])
set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_active));
set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS, BIT(PG_active));
}
spin_lock_irq(&lruvec->lru_lock);

View File

@@ -318,7 +318,7 @@ static void lru_gen_refault(struct folio *folio, void *shadow)
folio_set_workingset(folio);
mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta);
} else
set_mask_bits(&folio->flags, LRU_REFS_MASK, (refs - 1UL) << LRU_REFS_PGOFF);
set_mask_bits(&folio->flags.f, LRU_REFS_MASK, (refs - 1UL) << LRU_REFS_PGOFF);
unlock:
rcu_read_unlock();
}