summaryrefslogtreecommitdiff
path: root/fs/mbcache.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-02-05 12:55:38 -0700
committerJens Axboe <axboe@kernel.dk>2018-02-05 12:55:38 -0700
commit9e05c864993c5442227f83ae1694a737d7a102ed (patch)
treee35b60bb3c0c179f147e9acaad5444f1e5d9117e /fs/mbcache.c
parent3c15f3f545afa320c5e3822825a9a53c664776b6 (diff)
parent35277995e17919ab838beae765f440674e8576eb (diff)
Merge branch 'master' into test
* master: (688 commits) dt-bindings: mailbox: qcom: Document the APCS clock binding mailbox: qcom: Create APCS child device for clock controller mailbox: qcom: Convert APCS IPC driver to use regmap KVM/SVM: Allow direct access to MSR_IA32_SPEC_CTRL KVM/VMX: Allow direct access to MSR_IA32_SPEC_CTRL KVM/VMX: Emulate MSR_IA32_ARCH_CAPABILITIES KVM/x86: Add IBPB support KVM/x86: Update the reverse_cpuid list to include CPUID_7_EDX pinctrl: remove include file from <linux/device.h> firmware: dmi: handle missing DMI data gracefully firmware: dmi_scan: Fix handling of empty DMI strings firmware: dmi_scan: Drop dmi_initialized firmware: dmi: Optimize dmi_matches Revert "defer call to mem_cgroup_sk_alloc()" soreuseport: fix mem leak in reuseport_add_sock() net: qlge: use memmove instead of skb_copy_to_linear_data net: qed: use correct strncpy() size net: cxgb4: avoid memcpy beyond end of source buffer cls_u32: add missing RCU annotation. r8152: set rx mode early when linking on ...
Diffstat (limited to 'fs/mbcache.c')
-rw-r--r--fs/mbcache.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/fs/mbcache.c b/fs/mbcache.c
index b8b8b9ced9f8..bf41e2e72c18 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -94,6 +94,7 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
entry->e_key = key;
entry->e_value = value;
entry->e_reusable = reusable;
+ entry->e_referenced = 0;
head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head);
hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
@@ -238,7 +239,9 @@ void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
spin_lock(&cache->c_list_lock);
if (!list_empty(&entry->e_list)) {
list_del_init(&entry->e_list);
- cache->c_entry_count--;
+ if (!WARN_ONCE(cache->c_entry_count == 0,
+ "mbcache: attempt to decrement c_entry_count past zero"))
+ cache->c_entry_count--;
atomic_dec(&entry->e_refcnt);
}
spin_unlock(&cache->c_list_lock);
@@ -269,9 +272,6 @@ static unsigned long mb_cache_count(struct shrinker *shrink,
struct mb_cache *cache = container_of(shrink, struct mb_cache,
c_shrink);
- /* Unlikely, but not impossible */
- if (unlikely(cache->c_entry_count < 0))
- return 0;
return cache->c_entry_count;
}