mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-06 10:00:17 +00:00
mm/huge_memory: initialise the tags of the huge zero folio
On arm64 with MTE enabled, a page mapped as Normal Tagged (PROT_MTE) in user space will need to have its allocation tags initialised. This is normally done in the arm64 set_pte_at() after checking the memory attributes. Such page is also marked with the PG_mte_tagged flag to avoid subsequent clearing. Since this relies on having a struct page, pte_special() mappings are ignored. Commitd82d09e482("mm/huge_memory: mark PMD mappings of the huge zero folio special") maps the huge zero folio special and the arm64 set_pmd_at() will no longer zero the tags. There is no guarantee that the tags are zero, especially if parts of this huge page have been previously tagged. It's fairly easy to detect this by regularly dropping the caches to force the reallocation of the huge zero folio. Allocate the huge zero folio with the __GFP_ZEROTAGS flag. In addition, do not warn in the arm64 __access_remote_tags() when reading tags from the huge zero page. I bundled the arm64 change in here as well since they are both related to the commit mapping the huge zero folio as special. [catalin.marinas@arm.com: handle arch mte_zero_clear_page_tags() code issuing MTE instructions] Link: https://lkml.kernel.org/r/aQi8dA_QpXM8XqrE@arm.com Link: https://lkml.kernel.org/r/20251031170133.280742-1-catalin.marinas@arm.com Fixes:d82d09e482("mm/huge_memory: mark PMD mappings of the huge zero folio special") Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Lance Yang <lance.yang@linux.dev> Tested-by: Beleswar Padhi <b-padhi@ti.com> Cc: Will Deacon <will@kernel.org> Cc: Mark Brown <broonie@kernel.org> Cc: Aishwarya TCV <aishwarya.tcv@arm.com> Cc: David Hildenbrand (Red Hat) <david@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
9a6b60cb14
commit
adfb6609c6
@@ -476,7 +476,8 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
folio = page_folio(page);
|
||||
if (folio_test_hugetlb(folio))
|
||||
WARN_ON_ONCE(!folio_test_hugetlb_mte_tagged(folio));
|
||||
WARN_ON_ONCE(!folio_test_hugetlb_mte_tagged(folio) &&
|
||||
!is_huge_zero_folio(folio));
|
||||
else
|
||||
WARN_ON_ONCE(!page_mte_tagged(page) && !is_zero_page(page));
|
||||
|
||||
|
||||
@@ -969,6 +969,16 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
|
||||
|
||||
void tag_clear_highpage(struct page *page)
|
||||
{
|
||||
/*
|
||||
* Check if MTE is supported and fall back to clear_highpage().
|
||||
* get_huge_zero_folio() unconditionally passes __GFP_ZEROTAGS and
|
||||
* post_alloc_hook() will invoke tag_clear_highpage().
|
||||
*/
|
||||
if (!system_supports_mte()) {
|
||||
clear_highpage(page);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Newly allocated page, shouldn't have been tagged yet */
|
||||
WARN_ON_ONCE(!try_page_mte_tagging(page));
|
||||
mte_zero_clear_page_tags(page_address(page));
|
||||
|
||||
@@ -214,7 +214,8 @@ retry:
|
||||
if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
|
||||
return true;
|
||||
|
||||
zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
|
||||
zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO | __GFP_ZEROTAGS) &
|
||||
~__GFP_MOVABLE,
|
||||
HPAGE_PMD_ORDER);
|
||||
if (!zero_folio) {
|
||||
count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
|
||||
|
||||
Reference in New Issue
Block a user