summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-02-18 13:51:42 +1100
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-02-18 13:51:42 +1100
commitc58310bf4933986513020fa90b4190c7492995ae (patch)
tree143f2c7578d02ebef5db8fc57ae69e951ae0e2ee /net/core
parent269cdfaf769f5cd831284cc831790c7c5038040f (diff)
parent1309d4e68497184d2fd87e892ddf14076c2bda98 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-linus
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c9
-rw-r--r--net/core/flow.c6
-rw-r--r--net/core/neighbour.c12
-rw-r--r--net/core/rtnetlink.c80
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/core/sock.c4
6 files changed, 41 insertions, 77 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 9549417250bb..908f07c3bd7d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1071,8 +1071,6 @@ int dev_close(struct net_device *dev)
*/
call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
- dev_deactivate(dev);
-
clear_bit(__LINK_STATE_START, &dev->state);
/* Synchronize to scheduled poll. We cannot touch poll list,
@@ -1083,6 +1081,8 @@ int dev_close(struct net_device *dev)
*/
smp_mb__after_clear_bit(); /* Commit netif_running(). */
+ dev_deactivate(dev);
+
/*
* Call the device specific close. This cannot fail.
* Only if device is UP
@@ -2143,7 +2143,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
*
* The entry's receive function will be scheduled to run
*/
-void fastcall __napi_schedule(struct napi_struct *n)
+void __napi_schedule(struct napi_struct *n)
{
unsigned long flags;
@@ -3038,8 +3038,7 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from)
EXPORT_SYMBOL(dev_unicast_sync);
/**
- * dev_unicast_unsync - Remove synchronized addresses from the destination
- * device
+ * dev_unicast_unsync - Remove synchronized addresses from the destination device
* @to: destination device
* @from: source device
*
diff --git a/net/core/flow.c b/net/core/flow.c
index 46b38e06e0d7..a77531c139b7 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -30,8 +30,8 @@ struct flow_cache_entry {
struct flow_cache_entry *next;
u16 family;
u8 dir;
- struct flowi key;
u32 genid;
+ struct flowi key;
void *object;
atomic_t *object_ref;
};
@@ -52,7 +52,7 @@ struct flow_percpu_info {
int hash_rnd_recalc;
u32 hash_rnd;
int count;
-} ____cacheline_aligned;
+};
static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
#define flow_hash_rnd_recalc(cpu) \
@@ -346,7 +346,7 @@ static int __init flow_cache_init(void)
flow_cachep = kmem_cache_create("flow_cache",
sizeof(struct flow_cache_entry),
- 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ 0, SLAB_PANIC,
NULL);
flow_hash_shift = 10;
flow_lwm = 2 * flow_hash_size;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index a16cf1ec5e5e..7bb6a9a1256d 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -834,18 +834,12 @@ static void neigh_timer_handler(unsigned long arg)
}
if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
struct sk_buff *skb = skb_peek(&neigh->arp_queue);
- /* keep skb alive even if arp_queue overflows */
- if (skb)
- skb_get(skb);
- write_unlock(&neigh->lock);
+
neigh->ops->solicit(neigh, skb);
atomic_inc(&neigh->probes);
- if (skb)
- kfree_skb(skb);
- } else {
-out:
- write_unlock(&neigh->lock);
}
+out:
+ write_unlock(&neigh->lock);
if (notify)
neigh_update_notify(neigh);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index ddbdde82a700..ecb02afd52dc 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -82,32 +82,6 @@ int rtnl_trylock(void)
return mutex_trylock(&rtnl_mutex);
}
-int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, int len)
-{
- memset(tb, 0, sizeof(struct rtattr*)*maxattr);
-
- while (RTA_OK(rta, len)) {
- unsigned flavor = rta->rta_type;
- if (flavor && flavor <= maxattr)
- tb[flavor-1] = rta;
- rta = RTA_NEXT(rta, len);
- }
- return 0;
-}
-
-int __rtattr_parse_nested_compat(struct rtattr *tb[], int maxattr,
- struct rtattr *rta, int len)
-{
- if (RTA_PAYLOAD(rta) < len)
- return -1;
- if (RTA_PAYLOAD(rta) >= RTA_ALIGN(len) + sizeof(struct rtattr)) {
- rta = RTA_DATA(rta) + RTA_ALIGN(len);
- return rtattr_parse_nested(tb, maxattr, rta);
- }
- memset(tb, 0, sizeof(struct rtattr *) * maxattr);
- return 0;
-}
-
static struct rtnl_link *rtnl_msg_handlers[NPROTO];
static inline int rtm_msgindex(int msgtype)
@@ -442,21 +416,6 @@ void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data
memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
}
-size_t rtattr_strlcpy(char *dest, const struct rtattr *rta, size_t size)
-{
- size_t ret = RTA_PAYLOAD(rta);
- char *src = RTA_DATA(rta);
-
- if (ret > 0 && src[ret - 1] == '\0')
- ret--;
- if (size > 0) {
- size_t len = (ret >= size) ? size - 1 : ret;
- memset(dest, 0, size);
- memcpy(dest, src, len);
- }
- return ret;
-}
-
int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo)
{
struct sock *rtnl = net->rtnl;
@@ -545,7 +504,7 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
-static void set_operstate(struct net_device *dev, unsigned char transition)
+static int set_operstate(struct net_device *dev, unsigned char transition, bool send_notification)
{
unsigned char operstate = dev->operstate;
@@ -568,8 +527,12 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
write_lock_bh(&dev_base_lock);
dev->operstate = operstate;
write_unlock_bh(&dev_base_lock);
- netdev_state_change(dev);
- }
+
+ if (send_notification)
+ netdev_state_change(dev);
+ return 1;
+ } else
+ return 0;
}
static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
@@ -863,6 +826,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
if (tb[IFLA_BROADCAST]) {
nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
send_addr_notify = 1;
+ modified = 1;
}
if (ifm->ifi_flags || ifm->ifi_change) {
@@ -875,16 +839,23 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
dev_change_flags(dev, flags);
}
- if (tb[IFLA_TXQLEN])
- dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
+ if (tb[IFLA_TXQLEN]) {
+ if (dev->tx_queue_len != nla_get_u32(tb[IFLA_TXQLEN])) {
+ dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
+ modified = 1;
+ }
+ }
if (tb[IFLA_OPERSTATE])
- set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
+ modified |= set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]), false);
if (tb[IFLA_LINKMODE]) {
- write_lock_bh(&dev_base_lock);
- dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
- write_unlock_bh(&dev_base_lock);
+ if (dev->link_mode != nla_get_u8(tb[IFLA_LINKMODE])) {
+ write_lock_bh(&dev_base_lock);
+ dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
+ write_lock_bh(&dev_base_lock);
+ modified = 1;
+ }
}
err = 0;
@@ -898,6 +869,10 @@ errout:
if (send_addr_notify)
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+
+ if (modified)
+ netdev_state_change(dev);
+
return err;
}
@@ -1015,7 +990,7 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname,
if (tb[IFLA_TXQLEN])
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
if (tb[IFLA_OPERSTATE])
- set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
+ set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]), true);
if (tb[IFLA_LINKMODE])
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
@@ -1411,9 +1386,6 @@ void __init rtnetlink_init(void)
}
EXPORT_SYMBOL(__rta_fill);
-EXPORT_SYMBOL(rtattr_strlcpy);
-EXPORT_SYMBOL(rtattr_parse);
-EXPORT_SYMBOL(__rtattr_parse_nested_compat);
EXPORT_SYMBOL(rtnetlink_put_metrics);
EXPORT_SYMBOL(rtnl_lock);
EXPORT_SYMBOL(rtnl_trylock);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4e354221ec23..0d0fd28a9041 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1907,11 +1907,11 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
* of bytes already consumed and the next call to
* skb_seq_read() will return the remaining part of the block.
*
- * Note: The size of each block of data returned can be arbitary,
+ * Note 1: The size of each block of data returned can be arbitary,
* this limitation is the cost for zerocopy seqeuental
* reads of potentially non linear data.
*
- * Note: Fragment lists within fragments are not implemented
+ * Note 2: Fragment lists within fragments are not implemented
* at the moment, state->root_skb could be replaced with
* a stack for this purpose.
*/
@@ -2106,11 +2106,10 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
/**
* skb_pull_rcsum - pull skb and update receive checksum
* @skb: buffer to update
- * @start: start of data before pull
* @len: length of data pulled
*
* This function performs an skb_pull on the packet and updates
- * update the CHECKSUM_COMPLETE checksum. It should be used on
+ * the CHECKSUM_COMPLETE checksum. It should be used on
* receive path processing instead of skb_pull unless you know
* that the checksum difference is zero (e.g., a valid IP header)
* or you are setting ip_summed to CHECKSUM_NONE.
diff --git a/net/core/sock.c b/net/core/sock.c
index 433715fb141a..09cb3a74de7f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1731,7 +1731,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
atomic_set(&sk->sk_drops, 0);
}
-void fastcall lock_sock_nested(struct sock *sk, int subclass)
+void lock_sock_nested(struct sock *sk, int subclass)
{
might_sleep();
spin_lock_bh(&sk->sk_lock.slock);
@@ -1748,7 +1748,7 @@ void fastcall lock_sock_nested(struct sock *sk, int subclass)
EXPORT_SYMBOL(lock_sock_nested);
-void fastcall release_sock(struct sock *sk)
+void release_sock(struct sock *sk)
{
/*
* The sk_lock has mutex_unlock() semantics: