summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_mru_cache.c
diff options
context:
space:
mode:
authorDavid Chinner <dgc@sgi.com>2007-11-23 16:28:09 +1100
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-02-07 18:14:38 +1100
commita8272ce0c1d49aa3bec57682678f0bdfe28ed4ca (patch)
tree691b5ac5b1e36bf0dd59408434e3856438258a0e /fs/xfs/xfs_mru_cache.c
parenta69b176df246d59626e6a9c640b44c0921fa4566 (diff)
[XFS] Fix up sparse warnings.
These are mostly locking annotations, marking things static, casts where needed and declaring stuff in header files. SGI-PV: 971186 SGI-Modid: xfs-linux-melb:xfs-kern:30002a Signed-off-by: David Chinner <dgc@sgi.com> Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_mru_cache.c')
-rw-r--r--fs/xfs/xfs_mru_cache.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index 012209eda35d..a0b2c0a2589a 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -225,10 +225,14 @@ _xfs_mru_cache_list_insert(
* list need to be deleted. For each element this involves removing it from the
* data store, removing it from the reap list, calling the client's free
* function and deleting the element from the element zone.
+ *
+ * We get called holding the mru->lock, which we drop and then reacquire.
+ * Sparse need special help with this to tell it we know what we are doing.
*/
STATIC void
_xfs_mru_cache_clear_reap_list(
- xfs_mru_cache_t *mru)
+ xfs_mru_cache_t *mru) __releases(mru->lock) __acquires(mru->lock)
+
{
xfs_mru_cache_elem_t *elem, *next;
struct list_head tmp;
@@ -528,6 +532,10 @@ xfs_mru_cache_delete(
*
* If the element isn't found, this function returns NULL and the spinlock is
* released. xfs_mru_cache_done() should NOT be called when this occurs.
+ *
+ * Because sparse isn't smart enough to know about conditional lock return
+ * status, we need to help it get it right by annotating the path that does
+ * not release the lock.
*/
void *
xfs_mru_cache_lookup(
@@ -545,8 +553,8 @@ xfs_mru_cache_lookup(
if (elem) {
list_del(&elem->list_node);
_xfs_mru_cache_list_insert(mru, elem);
- }
- else
+ __release(mru_lock); /* help sparse not be stupid */
+ } else
spin_unlock(&mru->lock);
return elem ? elem->value : NULL;
@@ -575,6 +583,8 @@ xfs_mru_cache_peek(
elem = radix_tree_lookup(&mru->store, key);
if (!elem)
spin_unlock(&mru->lock);
+ else
+ __release(mru_lock); /* help sparse not be stupid */
return elem ? elem->value : NULL;
}
@@ -586,7 +596,7 @@ xfs_mru_cache_peek(
*/
void
xfs_mru_cache_done(
- xfs_mru_cache_t *mru)
+ xfs_mru_cache_t *mru) __releases(mru->lock)
{
spin_unlock(&mru->lock);
}