mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-25 03:22:39 +00:00
binder: replace alloc->vma with alloc->mapped
It is unsafe to use alloc->vma outside of the mmap_sem. Instead, add a new boolean alloc->mapped to save the vma state (mapped or unmmaped) and use this as a replacement for alloc->vma to validate several paths. Using the alloc->vma caused several performance and security issues in the past. Now that it has been replaced with either vm_lookup() or the alloc->mapped state, we can finally remove it. Cc: Minchan Kim <minchan@kernel.org> Cc: Liam R. Howlett <Liam.Howlett@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Suren Baghdasaryan <surenb@google.com> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Carlos Llamas <cmllamas@google.com> Link: https://lore.kernel.org/r/20241210143114.661252-6-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
f909f03082
commit
072010abc3
@@ -220,6 +220,19 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
|
||||
}
|
||||
}
|
||||
|
||||
static inline
|
||||
void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state)
|
||||
{
|
||||
/* pairs with smp_load_acquire in binder_alloc_is_mapped() */
|
||||
smp_store_release(&alloc->mapped, state);
|
||||
}
|
||||
|
||||
static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
|
||||
{
|
||||
/* pairs with smp_store_release in binder_alloc_set_mapped() */
|
||||
return smp_load_acquire(&alloc->mapped);
|
||||
}
|
||||
|
||||
static struct page *binder_page_alloc(struct binder_alloc *alloc,
|
||||
unsigned long index)
|
||||
{
|
||||
@@ -271,7 +284,7 @@ static int binder_install_single_page(struct binder_alloc *alloc,
|
||||
|
||||
mmap_read_lock(alloc->mm);
|
||||
vma = vma_lookup(alloc->mm, addr);
|
||||
if (!vma || vma != alloc->vma) {
|
||||
if (!vma || !binder_alloc_is_mapped(alloc)) {
|
||||
binder_free_page(page);
|
||||
pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
|
||||
ret = -ESRCH;
|
||||
@@ -379,20 +392,6 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
|
||||
}
|
||||
}
|
||||
|
||||
static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
/* pairs with smp_load_acquire in binder_alloc_get_vma() */
|
||||
smp_store_release(&alloc->vma, vma);
|
||||
}
|
||||
|
||||
static inline struct vm_area_struct *binder_alloc_get_vma(
|
||||
struct binder_alloc *alloc)
|
||||
{
|
||||
/* pairs with smp_store_release in binder_alloc_set_vma() */
|
||||
return smp_load_acquire(&alloc->vma);
|
||||
}
|
||||
|
||||
static void debug_no_space_locked(struct binder_alloc *alloc)
|
||||
{
|
||||
size_t largest_alloc_size = 0;
|
||||
@@ -626,7 +625,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
||||
int ret;
|
||||
|
||||
/* Check binder_alloc is fully initialized */
|
||||
if (!binder_alloc_get_vma(alloc)) {
|
||||
if (!binder_alloc_is_mapped(alloc)) {
|
||||
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
|
||||
"%d: binder_alloc_buf, no vma\n",
|
||||
alloc->pid);
|
||||
@@ -908,7 +907,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
||||
alloc->free_async_space = alloc->buffer_size / 2;
|
||||
|
||||
/* Signal binder_alloc is fully initialized */
|
||||
binder_alloc_set_vma(alloc, vma);
|
||||
binder_alloc_set_mapped(alloc, true);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -938,7 +937,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
||||
|
||||
buffers = 0;
|
||||
mutex_lock(&alloc->mutex);
|
||||
BUG_ON(alloc->vma);
|
||||
BUG_ON(alloc->mapped);
|
||||
|
||||
while ((n = rb_first(&alloc->allocated_buffers))) {
|
||||
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
||||
@@ -1044,7 +1043,7 @@ void binder_alloc_print_pages(struct seq_file *m,
|
||||
* Make sure the binder_alloc is fully initialized, otherwise we might
|
||||
* read inconsistent state.
|
||||
*/
|
||||
if (binder_alloc_get_vma(alloc) != NULL) {
|
||||
if (binder_alloc_is_mapped(alloc)) {
|
||||
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
|
||||
page = binder_get_installed_page(alloc, i);
|
||||
if (!page)
|
||||
@@ -1084,12 +1083,12 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
|
||||
* @alloc: binder_alloc for this proc
|
||||
*
|
||||
* Called from binder_vma_close() when releasing address space.
|
||||
* Clears alloc->vma to prevent new incoming transactions from
|
||||
* Clears alloc->mapped to prevent new incoming transactions from
|
||||
* allocating more buffers.
|
||||
*/
|
||||
void binder_alloc_vma_close(struct binder_alloc *alloc)
|
||||
{
|
||||
binder_alloc_set_vma(alloc, NULL);
|
||||
binder_alloc_set_mapped(alloc, false);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1125,7 +1124,12 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
||||
page_addr = alloc->buffer + index * PAGE_SIZE;
|
||||
|
||||
vma = vma_lookup(mm, page_addr);
|
||||
if (vma && vma != binder_alloc_get_vma(alloc))
|
||||
/*
|
||||
* Since a binder_alloc can only be mapped once, we ensure
|
||||
* the vma corresponds to this mapping by checking whether
|
||||
* the binder_alloc is still mapped.
|
||||
*/
|
||||
if (vma && !binder_alloc_is_mapped(alloc))
|
||||
goto err_invalid_vma;
|
||||
|
||||
trace_binder_unmap_kernel_start(alloc, index);
|
||||
|
||||
Reference in New Issue
Block a user