From 93ece0c1a7ace88f10411dbb5643d2aa2fe00ebf Mon Sep 17 00:00:00 2001 From: Eugenia Emantayev Date: Thu, 19 Jan 2012 09:45:05 +0000 Subject: mlx4_en: eth statistics modification In native mode display all available staticstics. In SRIOV mode on VF display only SW counters statistics, in SRIOV mode on hypervisor display SW counters and errors (got from FW) statistics. Signed-off-by: Eugenia Emantayev Reviewed-by: Yevgeny Petrilin Signed-off-by: David S. Miller --- include/linux/mlx4/device.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 5c4fe8e5bfe5..aea61905499b 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -621,6 +621,7 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn); void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn); +void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap); int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); -- cgit From 974c12360dfe6ab01201fe9e708e7755c413f8b6 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 19 Jan 2012 14:42:21 +0000 Subject: tcp: detect loss above high_seq in recovery Correctly implement a loss detection heuristic: New sequences (above high_seq) sent during the fast recovery are deemed lost when higher sequences are SACKed. Current code does not catch these losses, because tcp_mark_head_lost() does not check packets beyond high_seq. The fix is straight-forward by checking packets until the highest sacked packet. In addition, all the FLAG_DATA_LOST logic are in-effective and redundant and can be removed. Update the loss heuristic comments. The algorithm above is documented as heuristic B, but it is redundant too because heuristic A already covers B. Note that this change only marks some forward-retransmitted packets LOST. It does NOT forbid TCP performing further CWR on new losses. A potential follow-up patch under preparation is to perform another CWR on "new" losses such as 1) sequence above high_seq is lost (by resetting high_seq to snd_nxt) 2) retransmission is lost. Signed-off-by: Yuchung Cheng Signed-off-by: David S. Miller --- include/linux/snmp.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/snmp.h b/include/linux/snmp.h index e16557a357e5..c1241c428179 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -192,7 +192,6 @@ enum LINUX_MIB_TCPPARTIALUNDO, /* TCPPartialUndo */ LINUX_MIB_TCPDSACKUNDO, /* TCPDSACKUndo */ LINUX_MIB_TCPLOSSUNDO, /* TCPLossUndo */ - LINUX_MIB_TCPLOSS, /* TCPLoss */ LINUX_MIB_TCPLOSTRETRANSMIT, /* TCPLostRetransmit */ LINUX_MIB_TCPRENOFAILURES, /* TCPRenoFailures */ LINUX_MIB_TCPSACKFAILURES, /* TCPSackFailures */ -- cgit From 8cfd14ad1eb52e44cb1fe7b47a68126e45e04026 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Fri, 20 Jan 2012 04:57:15 +0000 Subject: cgroup: make sure memcg margin is 0 when over limit For the memcg sock code, we'll need to register allocations that are temporarily over limit. Let's make sure that margin is 0 in this case. I am keeping this as a separate patch, so that if any weirdness interaction appears in the future, we can now exactly what caused it. Suggested by Johannes Weiner Signed-off-by: Glauber Costa CC: KAMEZAWA Hiroyuki CC: Johannes Weiner CC: Michal Hocko CC: Tejun Heo CC: Li Zefan Acked-by: Tejun Heo Signed-off-by: David S. Miller --- include/linux/res_counter.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index c9d625ca659e..d06d014afda6 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -142,7 +142,10 @@ static inline unsigned long long res_counter_margin(struct res_counter *cnt) unsigned long flags; spin_lock_irqsave(&cnt->lock, flags); - margin = cnt->limit - cnt->usage; + if (cnt->limit > cnt->usage) + margin = cnt->limit - cnt->usage; + else + margin = 0; spin_unlock_irqrestore(&cnt->lock, flags); return margin; } -- cgit From 0e90b31f4ba77027a7c21cbfc66404df0851ca21 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Fri, 20 Jan 2012 04:57:16 +0000 Subject: net: introduce res_counter_charge_nofail() for socket allocations There is a case in __sk_mem_schedule(), where an allocation is beyond the maximum, but yet we are allowed to proceed. It happens under the following condition: sk->sk_wmem_queued + size >= sk->sk_sndbuf The network code won't revert the allocation in this case, meaning that at some point later it'll try to do it. Since this is never communicated to the underlying res_counter code, there is an inbalance in res_counter uncharge operation. I see two ways of fixing this: 1) storing the information about those allocations somewhere in memcg, and then deducting from that first, before we start draining the res_counter, 2) providing a slightly different allocation function for the res_counter, that matches the original behavior of the network code more closely. I decided to go for #2 here, believing it to be more elegant, since #1 would require us to do basically that, but in a more obscure way. Signed-off-by: Glauber Costa Cc: KAMEZAWA Hiroyuki Cc: Johannes Weiner Cc: Michal Hocko CC: Tejun Heo CC: Li Zefan CC: Laurent Chavey Acked-by: Tejun Heo Signed-off-by: David S. Miller --- include/linux/res_counter.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index d06d014afda6..da81af086eaf 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -109,12 +109,18 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent); * * returns 0 on success and <0 if the counter->usage will exceed the * counter->limit _locked call expects the counter->lock to be taken + * + * charge_nofail works the same, except that it charges the resource + * counter unconditionally, and returns < 0 if the after the current + * charge we are over limit. */ int __must_check res_counter_charge_locked(struct res_counter *counter, unsigned long val); int __must_check res_counter_charge(struct res_counter *counter, unsigned long val, struct res_counter **limit_fail_at); +int __must_check res_counter_charge_nofail(struct res_counter *counter, + unsigned long val, struct res_counter **limit_fail_at); /* * uncharge - tell that some portion of the resource is released -- cgit From b82b9183d4f18f9b8c4bb31f223eb6c79b734eb0 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 24 Jan 2012 05:16:00 +0000 Subject: team: send only changed options/ports via netlink This patch changes event message behaviour to send only updated records instead of whole list. This fixes bug on which userspace receives non-actual data in case multiple events occur in row. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/linux/if_team.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 828181fbad5d..58404b0c5010 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -46,6 +46,10 @@ struct team_port { u32 speed; u8 duplex; + /* Custom gennetlink interface related flags */ + bool changed; + bool removed; + struct rcu_head rcu; }; @@ -72,6 +76,10 @@ struct team_option { enum team_option_type type; int (*getter)(struct team *team, void *arg); int (*setter)(struct team *team, void *arg); + + /* Custom gennetlink interface related flags */ + bool changed; + bool removed; }; struct team_mode { @@ -207,6 +215,7 @@ enum { TEAM_ATTR_OPTION_CHANGED, /* flag */ TEAM_ATTR_OPTION_TYPE, /* u8 */ TEAM_ATTR_OPTION_DATA, /* dynamic */ + TEAM_ATTR_OPTION_REMOVED, /* flag */ __TEAM_ATTR_OPTION_MAX, TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1, @@ -227,6 +236,7 @@ enum { TEAM_ATTR_PORT_LINKUP, /* flag */ TEAM_ATTR_PORT_SPEED, /* u32 */ TEAM_ATTR_PORT_DUPLEX, /* u8 */ + TEAM_ATTR_PORT_REMOVED, /* flag */ __TEAM_ATTR_PORT_MAX, TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1, -- cgit