X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fxfs%2Fxfs_mru_cache.c;h=afee7eb243234824a44a8ad9b91fc23200fdc6ea;hb=76a67ec6fb79ff3570dcb5342142c16098299911;hp=dc64630e870ea324bf28dab9dcebd20500bb20b8;hpb=ba74d0cba51dcaa99e4dc2e4fb62e6e13abbf703;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c index dc64630..afee7eb 100644 --- a/fs/xfs/xfs_mru_cache.c +++ b/fs/xfs/xfs_mru_cache.c @@ -225,10 +225,14 @@ _xfs_mru_cache_list_insert( * list need to be deleted. For each element this involves removing it from the * data store, removing it from the reap list, calling the client's free * function and deleting the element from the element zone. + * + * We get called holding the mru->lock, which we drop and then reacquire. + * Sparse need special help with this to tell it we know what we are doing. */ STATIC void _xfs_mru_cache_clear_reap_list( - xfs_mru_cache_t *mru) + xfs_mru_cache_t *mru) __releases(mru->lock) __acquires(mru->lock) + { xfs_mru_cache_elem_t *elem, *next; struct list_head tmp; @@ -303,15 +307,18 @@ xfs_mru_cache_init(void) xfs_mru_elem_zone = kmem_zone_init(sizeof(xfs_mru_cache_elem_t), "xfs_mru_cache_elem"); if (!xfs_mru_elem_zone) - return ENOMEM; + goto out; xfs_mru_reap_wq = create_singlethread_workqueue("xfs_mru_cache"); - if (!xfs_mru_reap_wq) { - kmem_zone_destroy(xfs_mru_elem_zone); - return ENOMEM; - } + if (!xfs_mru_reap_wq) + goto out_destroy_mru_elem_zone; return 0; + + out_destroy_mru_elem_zone: + kmem_zone_destroy(xfs_mru_elem_zone); + out: + return -ENOMEM; } void @@ -368,7 +375,7 @@ xfs_mru_cache_create( */ INIT_RADIX_TREE(&mru->store, GFP_ATOMIC); INIT_LIST_HEAD(&mru->reap_list); - spinlock_init(&mru->lock, "xfs_mru_cache"); + spin_lock_init(&mru->lock); INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); mru->grp_time = grp_time; @@ -378,9 +385,9 @@ xfs_mru_cache_create( exit: if (err && mru && mru->lists) - kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists)); + kmem_free(mru->lists); if (err && mru) - kmem_free(mru, sizeof(*mru)); + kmem_free(mru); return err; } @@ -420,8 +427,8 @@ xfs_mru_cache_destroy( xfs_mru_cache_flush(mru); - kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists)); - kmem_free(mru, sizeof(*mru)); + kmem_free(mru->lists); + kmem_free(mru); } /* @@ -528,6 +535,10 @@ xfs_mru_cache_delete( * * If the element isn't found, this function returns NULL and the spinlock is * released. xfs_mru_cache_done() should NOT be called when this occurs. + * + * Because sparse isn't smart enough to know about conditional lock return + * status, we need to help it get it right by annotating the path that does + * not release the lock. */ void * xfs_mru_cache_lookup( @@ -545,8 +556,8 @@ xfs_mru_cache_lookup( if (elem) { list_del(&elem->list_node); _xfs_mru_cache_list_insert(mru, elem); - } - else + __release(mru_lock); /* help sparse not be stupid */ + } else spin_unlock(&mru->lock); return elem ? elem->value : NULL; @@ -575,6 +586,8 @@ xfs_mru_cache_peek( elem = radix_tree_lookup(&mru->store, key); if (!elem) spin_unlock(&mru->lock); + else + __release(mru_lock); /* help sparse not be stupid */ return elem ? elem->value : NULL; } @@ -586,7 +599,7 @@ xfs_mru_cache_peek( */ void xfs_mru_cache_done( - xfs_mru_cache_t *mru) + xfs_mru_cache_t *mru) __releases(mru->lock) { spin_unlock(&mru->lock); }