summaryrefslogtreecommitdiff
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h81
1 files changed, 1 insertions, 80 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d206c9592b60..9a297757df7e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2630,40 +2630,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
f(dev, &dev->_tx[i], arg);
}
-static inline int netdev_lock_cmp_fn(const struct lockdep_map *a,
- const struct lockdep_map *b)
-{
- /* Only lower devices currently grab the instance lock, so no
- * real ordering issues can occur. In the near future, only
- * hardware devices will grab instance lock which also does not
- * involve any ordering. Suppress lockdep ordering warnings
- * until (if) we start grabbing instance lock on pure SW
- * devices (bond/team/veth/etc).
- */
- if (a == b)
- return 0;
- return -1;
-}
-
-#define netdev_lockdep_set_classes(dev) \
-{ \
- static struct lock_class_key qdisc_tx_busylock_key; \
- static struct lock_class_key qdisc_xmit_lock_key; \
- static struct lock_class_key dev_addr_list_lock_key; \
- static struct lock_class_key dev_instance_lock_key; \
- unsigned int i; \
- \
- (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
- lockdep_set_class(&(dev)->addr_list_lock, \
- &dev_addr_list_lock_key); \
- lockdep_set_class(&(dev)->lock, \
- &dev_instance_lock_key); \
- lock_set_cmp_fn(&dev->lock, netdev_lock_cmp_fn, NULL); \
- for (i = 0; i < (dev)->num_tx_queues; i++) \
- lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
- &qdisc_xmit_lock_key); \
-}
-
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
@@ -2765,56 +2731,11 @@ static inline void netdev_lock(struct net_device *dev)
mutex_lock(&dev->lock);
}
-static inline bool netdev_trylock(struct net_device *dev)
-{
- return mutex_trylock(&dev->lock);
-}
-
static inline void netdev_unlock(struct net_device *dev)
{
mutex_unlock(&dev->lock);
}
-
-static inline void netdev_assert_locked(struct net_device *dev)
-{
- lockdep_assert_held(&dev->lock);
-}
-
-static inline void netdev_assert_locked_or_invisible(struct net_device *dev)
-{
- if (dev->reg_state == NETREG_REGISTERED ||
- dev->reg_state == NETREG_UNREGISTERING)
- netdev_assert_locked(dev);
-}
-
-static inline bool netdev_need_ops_lock(struct net_device *dev)
-{
- bool ret = dev->request_ops_lock || !!dev->queue_mgmt_ops;
-
-#if IS_ENABLED(CONFIG_NET_SHAPER)
- ret |= !!dev->netdev_ops->net_shaper_ops;
-#endif
-
- return ret;
-}
-
-static inline void netdev_lock_ops(struct net_device *dev)
-{
- if (netdev_need_ops_lock(dev))
- netdev_lock(dev);
-}
-
-static inline void netdev_unlock_ops(struct net_device *dev)
-{
- if (netdev_need_ops_lock(dev))
- netdev_unlock(dev);
-}
-
-static inline void netdev_ops_assert_locked(struct net_device *dev)
-{
- if (netdev_need_ops_lock(dev))
- lockdep_assert_held(&dev->lock);
-}
+/* Additional netdev_lock()-related helpers are in net/netdev_lock.h */
void netif_napi_set_irq_locked(struct napi_struct *napi, int irq);