summaryrefslogtreecommitdiff
path: root/net/decnet/dn_route.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/decnet/dn_route.c')
-rw-r--r--net/decnet/dn_route.c49
1 files changed, 27 insertions, 22 deletions
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 2193ae529e75..32b1bed8ae51 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -84,8 +84,7 @@
#include <net/dn_neigh.h>
#include <net/dn_fib.h>
-struct dn_rt_hash_bucket
-{
+struct dn_rt_hash_bucket {
struct dn_route __rcu *chain;
spinlock_t lock;
};
@@ -93,7 +92,7 @@ struct dn_rt_hash_bucket
extern struct neigh_table dn_neigh_table;
-static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00};
+static unsigned char dn_hiord_addr[6] = {0xAA, 0x00, 0x04, 0x00, 0x00, 0x00};
static const int dn_rt_min_delay = 2 * HZ;
static const int dn_rt_max_delay = 10 * HZ;
@@ -359,10 +358,11 @@ static void dn_run_flush(struct timer_list *unused)
for (i = 0; i < dn_rt_hash_mask; i++) {
spin_lock_bh(&dn_rt_hash_table[i].lock);
- if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL)
+ rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL);
+ if (!rt)
goto nothing_to_declare;
- for(; rt; rt = next) {
+ for (; rt; rt = next) {
next = rcu_dereference_raw(rt->dn_next);
RCU_INIT_POINTER(rt->dn_next, NULL);
dst_dev_put(&rt->dst);
@@ -425,7 +425,8 @@ static int dn_return_short(struct sk_buff *skb)
/* Add back headers */
skb_push(skb, skb->data - skb_network_header(skb));
- if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
return NET_RX_DROP;
cb = DN_SKB_CB(skb);
@@ -461,7 +462,8 @@ static int dn_return_long(struct sk_buff *skb)
/* Add back all headers */
skb_push(skb, skb->data - skb_network_header(skb));
- if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
return NET_RX_DROP;
cb = DN_SKB_CB(skb);
@@ -505,7 +507,8 @@ static int dn_route_rx_packet(struct net *net, struct sock *sk, struct sk_buff *
struct dn_skb_cb *cb;
int err;
- if ((err = dn_route_input(skb)) == 0)
+ err = dn_route_input(skb);
+ if (err == 0)
return dst_input(skb);
cb = DN_SKB_CB(skb);
@@ -629,7 +632,8 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
if (dn == NULL)
goto dump_it;
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
goto out;
if (!pskb_may_pull(skb, 3))
@@ -898,7 +902,7 @@ static inline int dn_match_addr(__le16 addr1, __le16 addr2)
{
__u16 tmp = le16_to_cpu(addr1) ^ le16_to_cpu(addr2);
int match = 16;
- while(tmp) {
+ while (tmp) {
tmp >>= 1;
match--;
}
@@ -1324,7 +1328,8 @@ static int dn_route_input_slow(struct sk_buff *skb)
dev_hold(in_dev);
- if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL)
+ dn_db = rcu_dereference(in_dev->dn_ptr);
+ if (!dn_db)
goto out;
/* Zero source addresses are not allowed */
@@ -1383,7 +1388,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
fld.saddr = src_map;
}
- switch(res.type) {
+ switch (res.type) {
case RTN_UNICAST:
/*
* Forwarding check here, we only check for forwarding
@@ -1407,7 +1412,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
flags |= RTCF_DOREDIRECT;
local_src = DN_FIB_RES_PREFSRC(res);
-
+ break;
case RTN_BLACKHOLE:
case RTN_UNREACHABLE:
break;
@@ -1526,7 +1531,7 @@ static int dn_route_input(struct sk_buff *skb)
return 0;
rcu_read_lock();
- for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
+ for (rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
rt = rcu_dereference(rt->dn_next)) {
if ((rt->fld.saddr == cb->src) &&
(rt->fld.daddr == cb->dst) &&
@@ -1739,13 +1744,13 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
s_h = cb->args[0];
s_idx = idx = cb->args[1];
- for(h = 0; h <= dn_rt_hash_mask; h++) {
+ for (h = 0; h <= dn_rt_hash_mask; h++) {
if (h < s_h)
continue;
if (h > s_h)
s_idx = 0;
rcu_read_lock_bh();
- for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
+ for (rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
rt;
rt = rcu_dereference_bh(rt->dn_next), idx++) {
if (idx < s_idx)
@@ -1779,7 +1784,7 @@ static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq)
struct dn_route *rt = NULL;
struct dn_rt_cache_iter_state *s = seq->private;
- for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
+ for (s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
rcu_read_lock_bh();
rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
if (rt)
@@ -1809,7 +1814,7 @@ static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
struct dn_route *rt = dn_rt_cache_get_first(seq);
if (rt) {
- while(*pos && (rt = dn_rt_cache_get_next(seq, rt)))
+ while (*pos && (rt = dn_rt_cache_get_next(seq, rt)))
--*pos;
}
return *pos ? NULL : rt;
@@ -1864,21 +1869,21 @@ void __init dn_route_init(void)
goal = totalram_pages() >> (26 - PAGE_SHIFT);
- for(order = 0; (1UL << order) < goal; order++)
+ for (order = 0; (1UL << order) < goal; order++)
/* NOTHING */;
/*
* Only want 1024 entries max, since the table is very, very unlikely
* to be larger than that.
*/
- while(order && ((((1UL << order) * PAGE_SIZE) /
+ while (order && ((((1UL << order) * PAGE_SIZE) /
sizeof(struct dn_rt_hash_bucket)) >= 2048))
order--;
do {
dn_rt_hash_mask = (1UL << order) * PAGE_SIZE /
sizeof(struct dn_rt_hash_bucket);
- while(dn_rt_hash_mask & (dn_rt_hash_mask - 1))
+ while (dn_rt_hash_mask & (dn_rt_hash_mask - 1))
dn_rt_hash_mask--;
dn_rt_hash_table = (struct dn_rt_hash_bucket *)
__get_free_pages(GFP_ATOMIC, order);
@@ -1893,7 +1898,7 @@ void __init dn_route_init(void)
(long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024);
dn_rt_hash_mask--;
- for(i = 0; i <= dn_rt_hash_mask; i++) {
+ for (i = 0; i <= dn_rt_hash_mask; i++) {
spin_lock_init(&dn_rt_hash_table[i].lock);
dn_rt_hash_table[i].chain = NULL;
}