mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-17 23:34:24 +00:00
[ Upstream commitc9358de193] The hfi1 user SDMA pinned-page cache will leave a stale cache entry when the cache-entry's virtual address range is invalidated but that cache entry is in-use by an outstanding SDMA request. Subsequent user SDMA requests with buffers in or spanning the virtual address range of the stale cache entry will result in packets constructed from the wrong memory, the physical pages pointed to by the stale cache entry. To fix this, remove mmu_rb_node cache entries from the mmu_rb_handler cache independent of the cache entry's refcount. Add 'struct kref refcount' to struct mmu_rb_node and manage mmu_rb_node lifetime with kref_get() and kref_put(). mmu_rb_node.refcount makes sdma_mmu_node.refcount redundant. Remove 'atomic_t refcount' from struct sdma_mmu_node and change sdma_mmu_node code to use mmu_rb_node.refcount. Move the mmu_rb_handler destructor call after a wait-for-SDMA-request-completion call so mmu_rb_nodes that need mmu_rb_handler's workqueue to queue themselves up for destruction from an interrupt context may do so. Fixes:f48ad614c1("IB/hfi1: Move driver out of staging") Fixes:00cbce5cbf("IB/hfi1: Fix bugs with non-PAGE_SIZE-end multi-iovec user SDMA requests") Link: https://lore.kernel.org/r/168451393605.3700681.13493776139032178861.stgit@awfm-02.cornelisnetworks.com Reviewed-by: Dean Luick <dean.luick@cornelisnetworks.com> Signed-off-by: Brendan Cunningham <bcunningham@cornelisnetworks.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
63 lines
1.7 KiB
C
63 lines
1.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
|
|
/*
|
|
* Copyright(c) 2020 Cornelis Networks, Inc.
|
|
* Copyright(c) 2016 Intel Corporation.
|
|
*/
|
|
|
|
#ifndef _HFI1_MMU_RB_H
|
|
#define _HFI1_MMU_RB_H
|
|
|
|
#include "hfi.h"
|
|
|
|
struct mmu_rb_node {
|
|
unsigned long addr;
|
|
unsigned long len;
|
|
unsigned long __last;
|
|
struct rb_node node;
|
|
struct mmu_rb_handler *handler;
|
|
struct list_head list;
|
|
struct kref refcount;
|
|
};
|
|
|
|
/*
|
|
* NOTE: filter, insert, invalidate, and evict must not sleep. Only remove is
|
|
* allowed to sleep.
|
|
*/
|
|
struct mmu_rb_ops {
|
|
bool (*filter)(struct mmu_rb_node *node, unsigned long addr,
|
|
unsigned long len);
|
|
int (*insert)(void *ops_arg, struct mmu_rb_node *mnode);
|
|
void (*remove)(void *ops_arg, struct mmu_rb_node *mnode);
|
|
int (*invalidate)(void *ops_arg, struct mmu_rb_node *node);
|
|
int (*evict)(void *ops_arg, struct mmu_rb_node *mnode,
|
|
void *evict_arg, bool *stop);
|
|
};
|
|
|
|
struct mmu_rb_handler {
|
|
struct mmu_notifier mn;
|
|
struct rb_root_cached root;
|
|
void *ops_arg;
|
|
spinlock_t lock; /* protect the RB tree */
|
|
struct mmu_rb_ops *ops;
|
|
struct list_head lru_list;
|
|
struct work_struct del_work;
|
|
struct list_head del_list;
|
|
struct workqueue_struct *wq;
|
|
};
|
|
|
|
int hfi1_mmu_rb_register(void *ops_arg,
|
|
struct mmu_rb_ops *ops,
|
|
struct workqueue_struct *wq,
|
|
struct mmu_rb_handler **handler);
|
|
void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
|
|
int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
|
|
struct mmu_rb_node *mnode);
|
|
void hfi1_mmu_rb_release(struct kref *refcount);
|
|
|
|
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
|
|
struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
|
|
unsigned long addr,
|
|
unsigned long len);
|
|
|
|
#endif /* _HFI1_MMU_RB_H */
|