net: gro_cells: Use nested-BH locking for gro_cell

The gro_cell data structure is per-CPU variable and relies on disabled
BH for its locking. Without per-CPU locking in local_bh_disable() on
PREEMPT_RT this data structure requires explicit locking.

Add a local_lock_t to the data structure and use
local_lock_nested_bh() for locking. This change adds only lockdep
coverage and does not alter the functional behaviour for !PREEMPT_RT.

Reported-by: syzbot+8715dd783e9b0bef43b1@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/all/68c6c3b1.050a0220.2ff435.0382.GAE@google.com/
Fixes: 3253cb49cb ("softirq: Allow to drop the softirq-BKL lock on PREEMPT_RT")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://patch.msgid.link/20251009094338.j1jyKfjR@linutronix.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Sebastian Andrzej Siewior
2025-10-09 11:43:38 +02:00
committed by Jakub Kicinski
parent fcb8b32a68
commit 25718fdcbd

View File

@@ -8,11 +8,13 @@
struct gro_cell { struct gro_cell {
struct sk_buff_head napi_skbs; struct sk_buff_head napi_skbs;
struct napi_struct napi; struct napi_struct napi;
local_lock_t bh_lock;
}; };
int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
{ {
struct net_device *dev = skb->dev; struct net_device *dev = skb->dev;
bool have_bh_lock = false;
struct gro_cell *cell; struct gro_cell *cell;
int res; int res;
@@ -25,6 +27,8 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
goto unlock; goto unlock;
} }
local_lock_nested_bh(&gcells->cells->bh_lock);
have_bh_lock = true;
cell = this_cpu_ptr(gcells->cells); cell = this_cpu_ptr(gcells->cells);
if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) { if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) {
@@ -39,6 +43,9 @@ drop:
if (skb_queue_len(&cell->napi_skbs) == 1) if (skb_queue_len(&cell->napi_skbs) == 1)
napi_schedule(&cell->napi); napi_schedule(&cell->napi);
if (have_bh_lock)
local_unlock_nested_bh(&gcells->cells->bh_lock);
res = NET_RX_SUCCESS; res = NET_RX_SUCCESS;
unlock: unlock:
@@ -54,6 +61,7 @@ static int gro_cell_poll(struct napi_struct *napi, int budget)
struct sk_buff *skb; struct sk_buff *skb;
int work_done = 0; int work_done = 0;
__local_lock_nested_bh(&cell->bh_lock);
while (work_done < budget) { while (work_done < budget) {
skb = __skb_dequeue(&cell->napi_skbs); skb = __skb_dequeue(&cell->napi_skbs);
if (!skb) if (!skb)
@@ -64,6 +72,7 @@ static int gro_cell_poll(struct napi_struct *napi, int budget)
if (work_done < budget) if (work_done < budget)
napi_complete_done(napi, work_done); napi_complete_done(napi, work_done);
__local_unlock_nested_bh(&cell->bh_lock);
return work_done; return work_done;
} }
@@ -79,6 +88,7 @@ int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
__skb_queue_head_init(&cell->napi_skbs); __skb_queue_head_init(&cell->napi_skbs);
local_lock_init(&cell->bh_lock);
set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state); set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);