Files
linux/include/net/netdev_lock.h
Jakub Kicinski 22cbc1ee26 netdev: fix the locking for netdev notifications
Kuniyuki reports that the assert for netdev lock fires when
there are netdev event listeners (otherwise we skip the netlink
event generation).

Correct the locking when coming from the notifier.

The NETDEV_XDP_FEAT_CHANGE notifier is already fully locked,
it's the documentation that's incorrect.

Fixes: 99e44f39a8 ("netdev: depend on netdev->lock for xdp features")
Reported-by: syzkaller <syzkaller@googlegroups.com>
Reported-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://lore.kernel.org/20250410171019.62128-1-kuniyu@amazon.com
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Link: https://patch.msgid.link/20250416030447.1077551-1-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-04-17 18:55:14 -07:00

137 lines
3.3 KiB
C

/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _NET_NETDEV_LOCK_H
#define _NET_NETDEV_LOCK_H
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
static inline bool netdev_trylock(struct net_device *dev)
{
return mutex_trylock(&dev->lock);
}
static inline void netdev_assert_locked(const struct net_device *dev)
{
lockdep_assert_held(&dev->lock);
}
static inline void
netdev_assert_locked_or_invisible(const struct net_device *dev)
{
if (dev->reg_state == NETREG_REGISTERED ||
dev->reg_state == NETREG_UNREGISTERING)
netdev_assert_locked(dev);
}
static inline bool netdev_need_ops_lock(const struct net_device *dev)
{
bool ret = dev->request_ops_lock || !!dev->queue_mgmt_ops;
#if IS_ENABLED(CONFIG_NET_SHAPER)
ret |= !!dev->netdev_ops->net_shaper_ops;
#endif
return ret;
}
static inline void netdev_lock_ops(struct net_device *dev)
{
if (netdev_need_ops_lock(dev))
netdev_lock(dev);
}
static inline void netdev_unlock_ops(struct net_device *dev)
{
if (netdev_need_ops_lock(dev))
netdev_unlock(dev);
}
static inline void netdev_lock_ops_to_full(struct net_device *dev)
{
if (netdev_need_ops_lock(dev))
netdev_assert_locked(dev);
else
netdev_lock(dev);
}
static inline void netdev_unlock_full_to_ops(struct net_device *dev)
{
if (netdev_need_ops_lock(dev))
netdev_assert_locked(dev);
else
netdev_unlock(dev);
}
static inline void netdev_ops_assert_locked(const struct net_device *dev)
{
if (netdev_need_ops_lock(dev))
lockdep_assert_held(&dev->lock);
else
ASSERT_RTNL();
}
static inline void
netdev_ops_assert_locked_or_invisible(const struct net_device *dev)
{
if (dev->reg_state == NETREG_REGISTERED ||
dev->reg_state == NETREG_UNREGISTERING)
netdev_ops_assert_locked(dev);
}
static inline void netdev_lock_ops_compat(struct net_device *dev)
{
if (netdev_need_ops_lock(dev))
netdev_lock(dev);
else
rtnl_lock();
}
static inline void netdev_unlock_ops_compat(struct net_device *dev)
{
if (netdev_need_ops_lock(dev))
netdev_unlock(dev);
else
rtnl_unlock();
}
static inline int netdev_lock_cmp_fn(const struct lockdep_map *a,
const struct lockdep_map *b)
{
/* Only lower devices currently grab the instance lock, so no
* real ordering issues can occur. In the near future, only
* hardware devices will grab instance lock which also does not
* involve any ordering. Suppress lockdep ordering warnings
* until (if) we start grabbing instance lock on pure SW
* devices (bond/team/veth/etc).
*/
if (a == b)
return 0;
return -1;
}
#define netdev_lockdep_set_classes(dev) \
{ \
static struct lock_class_key qdisc_tx_busylock_key; \
static struct lock_class_key qdisc_xmit_lock_key; \
static struct lock_class_key dev_addr_list_lock_key; \
static struct lock_class_key dev_instance_lock_key; \
unsigned int i; \
\
(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
lockdep_set_class(&(dev)->addr_list_lock, \
&dev_addr_list_lock_key); \
lockdep_set_class(&(dev)->lock, \
&dev_instance_lock_key); \
lock_set_cmp_fn(&dev->lock, netdev_lock_cmp_fn, NULL); \
for (i = 0; i < (dev)->num_tx_queues; i++) \
lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
&qdisc_xmit_lock_key); \
}
int netdev_debug_event(struct notifier_block *nb, unsigned long event,
void *ptr);
#endif