summaryrefslogtreecommitdiff
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h79
1 files changed, 53 insertions, 26 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 6151e903eef0..f8898a435dc5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -144,8 +144,6 @@ static inline bool dev_xmit_complete(int rc)
# else
# define LL_MAX_HEADER 96
# endif
-#elif IS_ENABLED(CONFIG_TR)
-# define LL_MAX_HEADER 48
#else
# define LL_MAX_HEADER 32
#endif
@@ -211,6 +209,7 @@ struct netdev_hw_addr {
#define NETDEV_HW_ADDR_T_UNICAST 4
#define NETDEV_HW_ADDR_T_MULTICAST 5
bool global_use;
+ int sync_cnt;
int refcount;
int synced;
struct rcu_head rcu_head;
@@ -785,13 +784,13 @@ struct netdev_fcoe_hbainfo {
* 3. Update dev->stats asynchronously and atomically, and define
* neither operation.
*
- * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
- * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
- * this function is called when a VLAN id is registered.
+ * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
+ * If device support VLAN filtering this function is called when a
+ * VLAN id is registered.
*
* int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
- * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
- * this function is called when a VLAN id is unregistered.
+ * If device support VLAN filtering this function is called when a
+ * VLAN id is unregistered.
*
* void (*ndo_poll_controller)(struct net_device *dev);
*
@@ -935,9 +934,9 @@ struct net_device_ops {
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
- unsigned short vid);
+ __be16 proto, u16 vid);
int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
- unsigned short vid);
+ __be16 proto, u16 vid);
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev);
int (*ndo_netpoll_setup)(struct net_device *dev,
@@ -1073,6 +1072,8 @@ struct net_device {
struct list_head dev_list;
struct list_head napi_list;
struct list_head unreg_list;
+ struct list_head upper_dev_list; /* List of upper devices */
+
/* currently active device features */
netdev_features_t features;
@@ -1145,6 +1146,13 @@ struct net_device {
spinlock_t addr_list_lock;
struct netdev_hw_addr_list uc; /* Unicast mac addresses */
struct netdev_hw_addr_list mc; /* Multicast mac addresses */
+ struct netdev_hw_addr_list dev_addrs; /* list of device
+ * hw addresses
+ */
+#ifdef CONFIG_SYSFS
+ struct kset *queues_kset;
+#endif
+
bool uc_promisc;
unsigned int promiscuity;
unsigned int allmulti;
@@ -1177,21 +1185,11 @@ struct net_device {
* avoid dirtying this cache line.
*/
- struct list_head upper_dev_list; /* List of upper devices */
-
/* Interface address info used in eth_type_trans() */
unsigned char *dev_addr; /* hw address, (before bcast
because most packets are
unicast) */
- struct netdev_hw_addr_list dev_addrs; /* list of device
- hw addresses */
-
- unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
-
-#ifdef CONFIG_SYSFS
- struct kset *queues_kset;
-#endif
#ifdef CONFIG_RPS
struct netdev_rx_queue *_rx;
@@ -1202,18 +1200,14 @@ struct net_device {
/* Number of RX queues currently active in device */
unsigned int real_num_rx_queues;
-#ifdef CONFIG_RFS_ACCEL
- /* CPU reverse-mapping for RX completion interrupts, indexed
- * by RX queue number. Assigned by driver. This must only be
- * set if the ndo_rx_flow_steer operation is defined. */
- struct cpu_rmap *rx_cpu_rmap;
-#endif
#endif
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
struct netdev_queue __rcu *ingress_queue;
+ unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
+
/*
* Cache lines mostly used on transmit path
@@ -1235,6 +1229,12 @@ struct net_device {
#ifdef CONFIG_XPS
struct xps_dev_maps __rcu *xps_maps;
#endif
+#ifdef CONFIG_RFS_ACCEL
+ /* CPU reverse-mapping for RX completion interrupts, indexed
+ * by RX queue number. Assigned by driver. This must only be
+ * set if the ndo_rx_flow_steer operation is defined. */
+ struct cpu_rmap *rx_cpu_rmap;
+#endif
/* These may be needed for future network-power-down code. */
@@ -1475,6 +1475,11 @@ static inline void *netdev_priv(const struct net_device *dev)
*/
#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
+/* Default NAPI poll() weight
+ * Device drivers are strongly advised to not use bigger value
+ */
+#define NAPI_POLL_WEIGHT 64
+
/**
* netif_napi_add - initialize a napi context
* @dev: network device
@@ -1612,6 +1617,9 @@ extern seqcount_t devnet_rename_seq; /* Device rename seq */
list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_continue_rcu(net, d) \
list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
+#define for_each_netdev_in_bond_rcu(bond, slave) \
+ for_each_netdev_rcu(&init_net, slave) \
+ if (netdev_master_upper_dev_get_rcu(slave) == bond)
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
static inline struct net_device *next_net_device(struct net_device *dev)
@@ -1684,7 +1692,6 @@ extern int netdev_refcnt_read(const struct net_device *dev);
extern void free_netdev(struct net_device *dev);
extern void synchronize_net(void);
extern int init_dummy_netdev(struct net_device *dev);
-extern void netdev_resync_ops(struct net_device *dev);
extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
@@ -2621,6 +2628,7 @@ extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
extern int dev_uc_sync(struct net_device *to, struct net_device *from);
+extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
extern void dev_uc_flush(struct net_device *dev);
extern void dev_uc_init(struct net_device *dev);
@@ -2632,6 +2640,7 @@ extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
extern int dev_mc_sync(struct net_device *to, struct net_device *from);
+extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
extern void dev_mc_flush(struct net_device *dev);
extern void dev_mc_init(struct net_device *dev);
@@ -2678,6 +2687,19 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
{
return __skb_gso_segment(skb, features, true);
}
+__be16 skb_network_protocol(struct sk_buff *skb);
+
+static inline bool can_checksum_protocol(netdev_features_t features,
+ __be16 protocol)
+{
+ return ((features & NETIF_F_GEN_CSUM) ||
+ ((features & NETIF_F_V4_CSUM) &&
+ protocol == htons(ETH_P_IP)) ||
+ ((features & NETIF_F_V6_CSUM) &&
+ protocol == htons(ETH_P_IPV6)) ||
+ ((features & NETIF_F_FCOE_CRC) &&
+ protocol == htons(ETH_P_FCOE)));
+}
#ifdef CONFIG_BUG
extern void netdev_rx_csum_fault(struct net_device *dev);
@@ -2756,6 +2778,11 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
dev->gso_max_size = size;
}
+static inline bool netif_is_bond_master(struct net_device *dev)
+{
+ return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
+}
+
static inline bool netif_is_bond_slave(struct net_device *dev)
{
return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;