diff options
Diffstat (limited to 'net/core/dev.c')
| -rw-r--r-- | net/core/dev.c | 219 | 
1 files changed, 67 insertions, 152 deletions
| diff --git a/net/core/dev.c b/net/core/dev.c index bab8bcedd62e..5379b0c1190a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -170,25 +170,6 @@ static DEFINE_SPINLOCK(ptype_lock);  static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;  static struct list_head ptype_all __read_mostly;	/* Taps */ -#ifdef CONFIG_NET_DMA -struct net_dma { -	struct dma_client client; -	spinlock_t lock; -	cpumask_t channel_mask; -	struct dma_chan **channels; -}; - -static enum dma_state_client -netdev_dma_event(struct dma_client *client, struct dma_chan *chan, -	enum dma_state state); - -static struct net_dma net_dma = { -	.client = { -		.event_callback = netdev_dma_event, -	}, -}; -#endif -  /*   * The @dev_base_head list is protected by @dev_base_lock and the rtnl   * semaphore. @@ -1107,6 +1088,11 @@ int dev_open(struct net_device *dev)  		dev->flags |= IFF_UP;  		/* +		 *	Enable NET_DMA +		 */ +		dmaengine_get(); + +		/*  		 *	Initialize multicasting status  		 */  		dev_set_rx_mode(dev); @@ -1183,6 +1169,11 @@ int dev_close(struct net_device *dev)  	 */  	call_netdevice_notifiers(NETDEV_DOWN, dev); +	/* +	 *	Shutdown NET_DMA +	 */ +	dmaengine_put(); +  	return 0;  } @@ -1543,7 +1534,19 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)  	skb->mac_len = skb->network_header - skb->mac_header;  	__skb_pull(skb, skb->mac_len); -	if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) { +	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { +		struct net_device *dev = skb->dev; +		struct ethtool_drvinfo info = {}; + +		if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) +			dev->ethtool_ops->get_drvinfo(dev, &info); + +		WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d " +			"ip_summed=%d", +		     info.driver, dev ? dev->features : 0L, +		     skb->sk ? skb->sk->sk_route_caps : 0L, +		     skb->len, skb->data_len, skb->ip_summed); +  		if (skb_header_cloned(skb) &&  		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))  			return ERR_PTR(err); @@ -2401,6 +2404,9 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)  	if (!(skb->dev->features & NETIF_F_GRO))  		goto normal; +	if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list) +		goto normal; +  	rcu_read_lock();  	list_for_each_entry_rcu(ptype, head, list) {  		struct sk_buff *p; @@ -2497,12 +2503,6 @@ EXPORT_SYMBOL(napi_gro_receive);  void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)  { -	skb_shinfo(skb)->nr_frags = 0; - -	skb->len -= skb->data_len; -	skb->truesize -= skb->data_len; -	skb->data_len = 0; -  	__skb_pull(skb, skb_headlen(skb));  	skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); @@ -2536,6 +2536,7 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,  	if (!pskb_may_pull(skb, ETH_HLEN)) {  		napi_reuse_skb(napi, skb); +		skb = NULL;  		goto out;  	} @@ -2754,14 +2755,7 @@ out:  	 * There may not be any more sk_buffs coming right now, so push  	 * any pending DMA copies to hardware  	 */ -	if (!cpus_empty(net_dma.channel_mask)) { -		int chan_idx; -		for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) { -			struct dma_chan *chan = net_dma.channels[chan_idx]; -			if (chan) -				dma_async_memcpy_issue_pending(chan); -		} -	} +	dma_issue_pending_all();  #endif  	return; @@ -4450,6 +4444,45 @@ err_uninit:  }  /** + *	init_dummy_netdev	- init a dummy network device for NAPI + *	@dev: device to init + * + *	This takes a network device structure and initialize the minimum + *	amount of fields so it can be used to schedule NAPI polls without + *	registering a full blown interface. This is to be used by drivers + *	that need to tie several hardware interfaces to a single NAPI + *	poll scheduler due to HW limitations. + */ +int init_dummy_netdev(struct net_device *dev) +{ +	/* Clear everything. Note we don't initialize spinlocks +	 * are they aren't supposed to be taken by any of the +	 * NAPI code and this dummy netdev is supposed to be +	 * only ever used for NAPI polls +	 */ +	memset(dev, 0, sizeof(struct net_device)); + +	/* make sure we BUG if trying to hit standard +	 * register/unregister code path +	 */ +	dev->reg_state = NETREG_DUMMY; + +	/* initialize the ref count */ +	atomic_set(&dev->refcnt, 1); + +	/* NAPI wants this */ +	INIT_LIST_HEAD(&dev->napi_list); + +	/* a dummy interface is started by default */ +	set_bit(__LINK_STATE_PRESENT, &dev->state); +	set_bit(__LINK_STATE_START, &dev->state); + +	return 0; +} +EXPORT_SYMBOL_GPL(init_dummy_netdev); + + +/**   *	register_netdev	- register a network device   *	@dev: device to register   * @@ -4952,122 +4985,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,  	return NOTIFY_OK;  } -#ifdef CONFIG_NET_DMA -/** - * net_dma_rebalance - try to maintain one DMA channel per CPU - * @net_dma: DMA client and associated data (lock, channels, channel_mask) - * - * This is called when the number of channels allocated to the net_dma client - * changes.  The net_dma client tries to have one DMA channel per CPU. - */ - -static void net_dma_rebalance(struct net_dma *net_dma) -{ -	unsigned int cpu, i, n, chan_idx; -	struct dma_chan *chan; - -	if (cpus_empty(net_dma->channel_mask)) { -		for_each_online_cpu(cpu) -			rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); -		return; -	} - -	i = 0; -	cpu = first_cpu(cpu_online_map); - -	for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) { -		chan = net_dma->channels[chan_idx]; - -		n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) -		   + (i < (num_online_cpus() % -			cpus_weight(net_dma->channel_mask)) ? 1 : 0)); - -		while(n) { -			per_cpu(softnet_data, cpu).net_dma = chan; -			cpu = next_cpu(cpu, cpu_online_map); -			n--; -		} -		i++; -	} -} - -/** - * netdev_dma_event - event callback for the net_dma_client - * @client: should always be net_dma_client - * @chan: DMA channel for the event - * @state: DMA state to be handled - */ -static enum dma_state_client -netdev_dma_event(struct dma_client *client, struct dma_chan *chan, -	enum dma_state state) -{ -	int i, found = 0, pos = -1; -	struct net_dma *net_dma = -		container_of(client, struct net_dma, client); -	enum dma_state_client ack = DMA_DUP; /* default: take no action */ - -	spin_lock(&net_dma->lock); -	switch (state) { -	case DMA_RESOURCE_AVAILABLE: -		for (i = 0; i < nr_cpu_ids; i++) -			if (net_dma->channels[i] == chan) { -				found = 1; -				break; -			} else if (net_dma->channels[i] == NULL && pos < 0) -				pos = i; - -		if (!found && pos >= 0) { -			ack = DMA_ACK; -			net_dma->channels[pos] = chan; -			cpu_set(pos, net_dma->channel_mask); -			net_dma_rebalance(net_dma); -		} -		break; -	case DMA_RESOURCE_REMOVED: -		for (i = 0; i < nr_cpu_ids; i++) -			if (net_dma->channels[i] == chan) { -				found = 1; -				pos = i; -				break; -			} - -		if (found) { -			ack = DMA_ACK; -			cpu_clear(pos, net_dma->channel_mask); -			net_dma->channels[i] = NULL; -			net_dma_rebalance(net_dma); -		} -		break; -	default: -		break; -	} -	spin_unlock(&net_dma->lock); - -	return ack; -} - -/** - * netdev_dma_register - register the networking subsystem as a DMA client - */ -static int __init netdev_dma_register(void) -{ -	net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma), -								GFP_KERNEL); -	if (unlikely(!net_dma.channels)) { -		printk(KERN_NOTICE -				"netdev_dma: no memory for net_dma.channels\n"); -		return -ENOMEM; -	} -	spin_lock_init(&net_dma.lock); -	dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask); -	dma_async_client_register(&net_dma.client); -	dma_async_client_chan_request(&net_dma.client); -	return 0; -} - -#else -static int __init netdev_dma_register(void) { return -ENODEV; } -#endif /* CONFIG_NET_DMA */  /**   *	netdev_increment_features - increment feature set by one @@ -5287,8 +5204,6 @@ static int __init net_dev_init(void)  	if (register_pernet_device(&default_device_ops))  		goto out; -	netdev_dma_register(); -  	open_softirq(NET_TX_SOFTIRQ, net_tx_action);  	open_softirq(NET_RX_SOFTIRQ, net_rx_action); | 
