summaryrefslogtreecommitdiff
path: root/kernel/bpf/bpf_lru_list.h
diff options
context:
space:
mode:
authorMartin KaFai Lau <kafai@fb.com>2017-08-31 23:27:13 -0700
committerDavid S. Miller <davem@davemloft.net>2017-09-01 09:57:39 -0700
commitbb9b9f8802212d98e70c63045b1734162945eaa5 (patch)
tree271be828c272d07ad318ee244e27dd77c38bb34e /kernel/bpf/bpf_lru_list.h
parentcc555421bc118edd070f41258d6f55f1ccfc2558 (diff)
bpf: Only set node->ref = 1 if it has not been set
This patch writes 'node->ref = 1' only if node->ref is 0. The number of lookups/s for a ~1M entries LRU map increased by ~30% (260097 to 343313). Other writes on 'node->ref = 0' is not changed. In those cases, the same cache line has to be changed anyway. First column: Size of the LRU hash Second column: Number of lookups/s Before: > echo "$((2**20+1)): $(./map_perf_test 1024 1 $((2**20+1)) 10000000 | awk '{print $3}')" 1048577: 260097 After: > echo "$((2**20+1)): $(./map_perf_test 1024 1 $((2**20+1)) 10000000 | awk '{print $3}')" 1048577: 343313 Signed-off-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/bpf_lru_list.h')
-rw-r--r--kernel/bpf/bpf_lru_list.h3
1 files changed, 2 insertions, 1 deletions
diff --git a/kernel/bpf/bpf_lru_list.h b/kernel/bpf/bpf_lru_list.h
index 5c35a98d02bf..7d4f89b7cb84 100644
--- a/kernel/bpf/bpf_lru_list.h
+++ b/kernel/bpf/bpf_lru_list.h
@@ -69,7 +69,8 @@ static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node)
/* ref is an approximation on access frequency. It does not
* have to be very accurate. Hence, no protection is used.
*/
- node->ref = 1;
+ if (!node->ref)
+ node->ref = 1;
}
int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,