mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-06 01:49:46 +00:00
Merge tag 'mm-hotfixes-stable-2025-01-16-21-11' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "7 singleton hotfixes. 6 are MM. Two are cc:stable and the remainder address post-6.12 issues" * tag 'mm-hotfixes-stable-2025-01-16-21-11' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: ocfs2: check dir i_size in ocfs2_find_entry mailmap: update entry for Ethan Carter Edwards mm: zswap: move allocations during CPU init outside the lock mm: khugepaged: fix call hpage_collapse_scan_file() for anonymous vma mm: shmem: use signed int for version handling in casefold option alloc_tag: skip pgalloc_tag_swap if profiling is disabled mm: page_alloc: fix missed updates of lowmem_reserve in adjust_managed_page_count
This commit is contained in:
1
.mailmap
1
.mailmap
@@ -202,6 +202,7 @@ Elliot Berman <quic_eberman@quicinc.com> <eberman@codeaurora.org>
|
|||||||
Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>
|
Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>
|
||||||
Enric Balletbo i Serra <eballetbo@kernel.org> <eballetbo@iseebcn.com>
|
Enric Balletbo i Serra <eballetbo@kernel.org> <eballetbo@iseebcn.com>
|
||||||
Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com>
|
Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com>
|
||||||
|
Ethan Carter Edwards <ethan@ethancedwards.com> Ethan Edwards <ethancarteredwards@gmail.com>
|
||||||
Eugen Hristev <eugen.hristev@linaro.org> <eugen.hristev@microchip.com>
|
Eugen Hristev <eugen.hristev@linaro.org> <eugen.hristev@microchip.com>
|
||||||
Eugen Hristev <eugen.hristev@linaro.org> <eugen.hristev@collabora.com>
|
Eugen Hristev <eugen.hristev@linaro.org> <eugen.hristev@collabora.com>
|
||||||
Evgeniy Polyakov <johnpol@2ka.mipt.ru>
|
Evgeniy Polyakov <johnpol@2ka.mipt.ru>
|
||||||
|
|||||||
@@ -1065,26 +1065,39 @@ int ocfs2_find_entry(const char *name, int namelen,
|
|||||||
{
|
{
|
||||||
struct buffer_head *bh;
|
struct buffer_head *bh;
|
||||||
struct ocfs2_dir_entry *res_dir = NULL;
|
struct ocfs2_dir_entry *res_dir = NULL;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if (ocfs2_dir_indexed(dir))
|
if (ocfs2_dir_indexed(dir))
|
||||||
return ocfs2_find_entry_dx(name, namelen, dir, lookup);
|
return ocfs2_find_entry_dx(name, namelen, dir, lookup);
|
||||||
|
|
||||||
|
if (unlikely(i_size_read(dir) <= 0)) {
|
||||||
|
ret = -EFSCORRUPTED;
|
||||||
|
mlog_errno(ret);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* The unindexed dir code only uses part of the lookup
|
* The unindexed dir code only uses part of the lookup
|
||||||
* structure, so there's no reason to push it down further
|
* structure, so there's no reason to push it down further
|
||||||
* than this.
|
* than this.
|
||||||
*/
|
*/
|
||||||
if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
|
if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
|
||||||
|
if (unlikely(i_size_read(dir) > dir->i_sb->s_blocksize)) {
|
||||||
|
ret = -EFSCORRUPTED;
|
||||||
|
mlog_errno(ret);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir);
|
bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir);
|
||||||
else
|
} else {
|
||||||
bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir);
|
bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir);
|
||||||
|
}
|
||||||
|
|
||||||
if (bh == NULL)
|
if (bh == NULL)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
lookup->dl_leaf_bh = bh;
|
lookup->dl_leaf_bh = bh;
|
||||||
lookup->dl_entry = res_dir;
|
lookup->dl_entry = res_dir;
|
||||||
return 0;
|
out:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2010,6 +2023,7 @@ int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
|
|||||||
*
|
*
|
||||||
* Return 0 if the name does not exist
|
* Return 0 if the name does not exist
|
||||||
* Return -EEXIST if the directory contains the name
|
* Return -EEXIST if the directory contains the name
|
||||||
|
* Return -EFSCORRUPTED if found corruption
|
||||||
*
|
*
|
||||||
* Callers should have i_rwsem + a cluster lock on dir
|
* Callers should have i_rwsem + a cluster lock on dir
|
||||||
*/
|
*/
|
||||||
@@ -2023,9 +2037,12 @@ int ocfs2_check_dir_for_entry(struct inode *dir,
|
|||||||
trace_ocfs2_check_dir_for_entry(
|
trace_ocfs2_check_dir_for_entry(
|
||||||
(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
|
(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
|
||||||
|
|
||||||
if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) {
|
ret = ocfs2_find_entry(name, namelen, dir, &lookup);
|
||||||
|
if (ret == 0) {
|
||||||
ret = -EEXIST;
|
ret = -EEXIST;
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
|
} else if (ret == -ENOENT) {
|
||||||
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
ocfs2_free_dir_lookup_result(&lookup);
|
ocfs2_free_dir_lookup_result(&lookup);
|
||||||
|
|||||||
@@ -195,6 +195,9 @@ void pgalloc_tag_swap(struct folio *new, struct folio *old)
|
|||||||
union codetag_ref ref_old, ref_new;
|
union codetag_ref ref_old, ref_new;
|
||||||
struct alloc_tag *tag_old, *tag_new;
|
struct alloc_tag *tag_old, *tag_new;
|
||||||
|
|
||||||
|
if (!mem_alloc_profiling_enabled())
|
||||||
|
return;
|
||||||
|
|
||||||
tag_old = pgalloc_tag_get(&old->page);
|
tag_old = pgalloc_tag_get(&old->page);
|
||||||
if (!tag_old)
|
if (!tag_old)
|
||||||
return;
|
return;
|
||||||
|
|||||||
@@ -2422,7 +2422,7 @@ skip:
|
|||||||
VM_BUG_ON(khugepaged_scan.address < hstart ||
|
VM_BUG_ON(khugepaged_scan.address < hstart ||
|
||||||
khugepaged_scan.address + HPAGE_PMD_SIZE >
|
khugepaged_scan.address + HPAGE_PMD_SIZE >
|
||||||
hend);
|
hend);
|
||||||
if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
|
if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
|
||||||
struct file *file = get_file(vma->vm_file);
|
struct file *file = get_file(vma->vm_file);
|
||||||
pgoff_t pgoff = linear_page_index(vma,
|
pgoff_t pgoff = linear_page_index(vma,
|
||||||
khugepaged_scan.address);
|
khugepaged_scan.address);
|
||||||
@@ -2768,7 +2768,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
|
|||||||
mmap_assert_locked(mm);
|
mmap_assert_locked(mm);
|
||||||
memset(cc->node_load, 0, sizeof(cc->node_load));
|
memset(cc->node_load, 0, sizeof(cc->node_load));
|
||||||
nodes_clear(cc->alloc_nmask);
|
nodes_clear(cc->alloc_nmask);
|
||||||
if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
|
if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
|
||||||
struct file *file = get_file(vma->vm_file);
|
struct file *file = get_file(vma->vm_file);
|
||||||
pgoff_t pgoff = linear_page_index(vma, addr);
|
pgoff_t pgoff = linear_page_index(vma, addr);
|
||||||
|
|
||||||
|
|||||||
@@ -5692,10 +5692,13 @@ __meminit void zone_pcp_init(struct zone *zone)
|
|||||||
zone->present_pages, zone_batchsize(zone));
|
zone->present_pages, zone_batchsize(zone));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void setup_per_zone_lowmem_reserve(void);
|
||||||
|
|
||||||
void adjust_managed_page_count(struct page *page, long count)
|
void adjust_managed_page_count(struct page *page, long count)
|
||||||
{
|
{
|
||||||
atomic_long_add(count, &page_zone(page)->managed_pages);
|
atomic_long_add(count, &page_zone(page)->managed_pages);
|
||||||
totalram_pages_add(count);
|
totalram_pages_add(count);
|
||||||
|
setup_per_zone_lowmem_reserve();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(adjust_managed_page_count);
|
EXPORT_SYMBOL(adjust_managed_page_count);
|
||||||
|
|
||||||
|
|||||||
@@ -4368,7 +4368,7 @@ static int shmem_parse_opt_casefold(struct fs_context *fc, struct fs_parameter *
|
|||||||
bool latest_version)
|
bool latest_version)
|
||||||
{
|
{
|
||||||
struct shmem_options *ctx = fc->fs_private;
|
struct shmem_options *ctx = fc->fs_private;
|
||||||
unsigned int version = UTF8_LATEST;
|
int version = UTF8_LATEST;
|
||||||
struct unicode_map *encoding;
|
struct unicode_map *encoding;
|
||||||
char *version_str = param->string + 5;
|
char *version_str = param->string + 5;
|
||||||
|
|
||||||
|
|||||||
42
mm/zswap.c
42
mm/zswap.c
@@ -820,15 +820,15 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
|
|||||||
{
|
{
|
||||||
struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
|
struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
|
||||||
struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
|
struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
|
||||||
struct crypto_acomp *acomp;
|
struct crypto_acomp *acomp = NULL;
|
||||||
struct acomp_req *req;
|
struct acomp_req *req = NULL;
|
||||||
|
u8 *buffer = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&acomp_ctx->mutex);
|
buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
|
||||||
acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
|
if (!buffer) {
|
||||||
if (!acomp_ctx->buffer) {
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto buffer_fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
|
acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
|
||||||
@@ -836,21 +836,25 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
|
|||||||
pr_err("could not alloc crypto acomp %s : %ld\n",
|
pr_err("could not alloc crypto acomp %s : %ld\n",
|
||||||
pool->tfm_name, PTR_ERR(acomp));
|
pool->tfm_name, PTR_ERR(acomp));
|
||||||
ret = PTR_ERR(acomp);
|
ret = PTR_ERR(acomp);
|
||||||
goto acomp_fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
acomp_ctx->acomp = acomp;
|
|
||||||
acomp_ctx->is_sleepable = acomp_is_async(acomp);
|
|
||||||
|
|
||||||
req = acomp_request_alloc(acomp_ctx->acomp);
|
req = acomp_request_alloc(acomp);
|
||||||
if (!req) {
|
if (!req) {
|
||||||
pr_err("could not alloc crypto acomp_request %s\n",
|
pr_err("could not alloc crypto acomp_request %s\n",
|
||||||
pool->tfm_name);
|
pool->tfm_name);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto req_fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
acomp_ctx->req = req;
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Only hold the mutex after completing allocations, otherwise we may
|
||||||
|
* recurse into zswap through reclaim and attempt to hold the mutex
|
||||||
|
* again resulting in a deadlock.
|
||||||
|
*/
|
||||||
|
mutex_lock(&acomp_ctx->mutex);
|
||||||
crypto_init_wait(&acomp_ctx->wait);
|
crypto_init_wait(&acomp_ctx->wait);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if the backend of acomp is async zip, crypto_req_done() will wakeup
|
* if the backend of acomp is async zip, crypto_req_done() will wakeup
|
||||||
* crypto_wait_req(); if the backend of acomp is scomp, the callback
|
* crypto_wait_req(); if the backend of acomp is scomp, the callback
|
||||||
@@ -859,15 +863,17 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
|
|||||||
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||||
crypto_req_done, &acomp_ctx->wait);
|
crypto_req_done, &acomp_ctx->wait);
|
||||||
|
|
||||||
|
acomp_ctx->buffer = buffer;
|
||||||
|
acomp_ctx->acomp = acomp;
|
||||||
|
acomp_ctx->is_sleepable = acomp_is_async(acomp);
|
||||||
|
acomp_ctx->req = req;
|
||||||
mutex_unlock(&acomp_ctx->mutex);
|
mutex_unlock(&acomp_ctx->mutex);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
req_fail:
|
fail:
|
||||||
crypto_free_acomp(acomp_ctx->acomp);
|
if (acomp)
|
||||||
acomp_fail:
|
crypto_free_acomp(acomp);
|
||||||
kfree(acomp_ctx->buffer);
|
kfree(buffer);
|
||||||
buffer_fail:
|
|
||||||
mutex_unlock(&acomp_ctx->mutex);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user