vfs: introduce noop_llseek()
[safe/jmp/linux-2.6] / fs / xfs / xfs_mru_cache.c
index 012209e..45ce15d 100644 (file)
@@ -225,10 +225,14 @@ _xfs_mru_cache_list_insert(
  * list need to be deleted.  For each element this involves removing it from the
  * data store, removing it from the reap list, calling the client's free
  * function and deleting the element from the element zone.
+ *
+ * We get called holding the mru->lock, which we drop and then reacquire.
+ * Sparse need special help with this to tell it we know what we are doing.
  */
 STATIC void
 _xfs_mru_cache_clear_reap_list(
-       xfs_mru_cache_t         *mru)
+       xfs_mru_cache_t         *mru) __releases(mru->lock) __acquires(mru->lock)
+
 {
        xfs_mru_cache_elem_t    *elem, *next;
        struct list_head        tmp;
@@ -303,15 +307,18 @@ xfs_mru_cache_init(void)
        xfs_mru_elem_zone = kmem_zone_init(sizeof(xfs_mru_cache_elem_t),
                                         "xfs_mru_cache_elem");
        if (!xfs_mru_elem_zone)
-               return ENOMEM;
+               goto out;
 
        xfs_mru_reap_wq = create_singlethread_workqueue("xfs_mru_cache");
-       if (!xfs_mru_reap_wq) {
-               kmem_zone_destroy(xfs_mru_elem_zone);
-               return ENOMEM;
-       }
+       if (!xfs_mru_reap_wq)
+               goto out_destroy_mru_elem_zone;
 
        return 0;
+
+ out_destroy_mru_elem_zone:
+       kmem_zone_destroy(xfs_mru_elem_zone);
+ out:
+       return -ENOMEM;
 }
 
 void
@@ -378,9 +385,9 @@ xfs_mru_cache_create(
 
 exit:
        if (err && mru && mru->lists)
-               kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists));
+               kmem_free(mru->lists);
        if (err && mru)
-               kmem_free(mru, sizeof(*mru));
+               kmem_free(mru);
 
        return err;
 }
@@ -391,7 +398,7 @@ exit:
  * guaranteed that all the free functions for all the elements have finished
  * executing and the reaper is not running.
  */
-void
+static void
 xfs_mru_cache_flush(
        xfs_mru_cache_t         *mru)
 {
@@ -420,8 +427,8 @@ xfs_mru_cache_destroy(
 
        xfs_mru_cache_flush(mru);
 
-       kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists));
-       kmem_free(mru, sizeof(*mru));
+       kmem_free(mru->lists);
+       kmem_free(mru);
 }
 
 /*
@@ -528,6 +535,10 @@ xfs_mru_cache_delete(
  *
  * If the element isn't found, this function returns NULL and the spinlock is
  * released.  xfs_mru_cache_done() should NOT be called when this occurs.
+ *
+ * Because sparse isn't smart enough to know about conditional lock return
+ * status, we need to help it get it right by annotating the path that does
+ * not release the lock.
  */
 void *
 xfs_mru_cache_lookup(
@@ -545,35 +556,8 @@ xfs_mru_cache_lookup(
        if (elem) {
                list_del(&elem->list_node);
                _xfs_mru_cache_list_insert(mru, elem);
-       }
-       else
-               spin_unlock(&mru->lock);
-
-       return elem ? elem->value : NULL;
-}
-
-/*
- * To look up an element using its key, but leave its location in the internal
- * lists alone, call xfs_mru_cache_peek().  If the element isn't found, this
- * function returns NULL.
- *
- * See the comments above the declaration of the xfs_mru_cache_lookup() function
- * for important locking information pertaining to this call.
- */
-void *
-xfs_mru_cache_peek(
-       xfs_mru_cache_t *mru,
-       unsigned long   key)
-{
-       xfs_mru_cache_elem_t *elem;
-
-       ASSERT(mru && mru->lists);
-       if (!mru || !mru->lists)
-               return NULL;
-
-       spin_lock(&mru->lock);
-       elem = radix_tree_lookup(&mru->store, key);
-       if (!elem)
+               __release(mru_lock); /* help sparse not be stupid */
+       } else
                spin_unlock(&mru->lock);
 
        return elem ? elem->value : NULL;
@@ -586,7 +570,7 @@ xfs_mru_cache_peek(
  */
 void
 xfs_mru_cache_done(
-       xfs_mru_cache_t *mru)
+       xfs_mru_cache_t *mru) __releases(mru->lock)
 {
        spin_unlock(&mru->lock);
 }