git://ftp.safe.ca
/
safe
/
jmp
/
linux-2.6
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
fuse: invalidate target of rename
[safe/jmp/linux-2.6]
/
fs
/
mbcache.c
diff --git
a/fs/mbcache.c
b/fs/mbcache.c
index
c7170b9
..
ec88ff3
100644
(file)
--- a/
fs/mbcache.c
+++ b/
fs/mbcache.c
@@
-85,7
+85,7
@@
struct mb_cache {
#ifndef MB_CACHE_INDEXES_COUNT
int c_indexes_count;
#endif
#ifndef MB_CACHE_INDEXES_COUNT
int c_indexes_count;
#endif
-
kmem_cache_t
*c_entry_cache;
+
struct kmem_cache
*c_entry_cache;
struct list_head *c_block_hash;
struct list_head *c_indexes_hash[0];
};
struct list_head *c_block_hash;
struct list_head *c_indexes_hash[0];
};
@@
-100,7
+100,6
@@
struct mb_cache {
static LIST_HEAD(mb_cache_list);
static LIST_HEAD(mb_cache_lru_list);
static DEFINE_SPINLOCK(mb_cache_spinlock);
static LIST_HEAD(mb_cache_list);
static LIST_HEAD(mb_cache_lru_list);
static DEFINE_SPINLOCK(mb_cache_spinlock);
-static struct shrinker *mb_shrinker;
static inline int
mb_cache_indexes(struct mb_cache *cache)
static inline int
mb_cache_indexes(struct mb_cache *cache)
@@
-116,8
+115,12
@@
mb_cache_indexes(struct mb_cache *cache)
* What the mbcache registers as to get shrunk dynamically.
*/
* What the mbcache registers as to get shrunk dynamically.
*/
-static int mb_cache_shrink_fn(int nr_to_scan,
unsigned in
t gfp_mask);
+static int mb_cache_shrink_fn(int nr_to_scan,
gfp_
t gfp_mask);
+static struct shrinker mb_cache_shrinker = {
+ .shrink = mb_cache_shrink_fn,
+ .seeks = DEFAULT_SEEKS,
+};
static inline int
__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
static inline int
__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
@@
-126,7
+129,7
@@
__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
}
}
-static
inline
void
+static void
__mb_cache_entry_unhash(struct mb_cache_entry *ce)
{
int n;
__mb_cache_entry_unhash(struct mb_cache_entry *ce)
{
int n;
@@
-139,8
+142,8
@@
__mb_cache_entry_unhash(struct mb_cache_entry *ce)
}
}
-static
inline
void
-__mb_cache_entry_forget(struct mb_cache_entry *ce,
in
t gfp_mask)
+static void
+__mb_cache_entry_forget(struct mb_cache_entry *ce,
gfp_
t gfp_mask)
{
struct mb_cache *cache = ce->e_cache;
{
struct mb_cache *cache = ce->e_cache;
@@
-158,8
+161,9
@@
__mb_cache_entry_forget(struct mb_cache_entry *ce, int gfp_mask)
}
}
-static
inline
void
+static void
__mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
__mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
+ __releases(mb_cache_spinlock)
{
/* Wake up all processes queuing for this cache entry. */
if (ce->e_queued)
{
/* Wake up all processes queuing for this cache entry. */
if (ce->e_queued)
@@
-193,7
+197,7
@@
forget:
* Returns the number of objects which are present in the cache.
*/
static int
* Returns the number of objects which are present in the cache.
*/
static int
-mb_cache_shrink_fn(int nr_to_scan,
unsigned in
t gfp_mask)
+mb_cache_shrink_fn(int nr_to_scan,
gfp_
t gfp_mask)
{
LIST_HEAD(free_list);
struct list_head *l, *ltmp;
{
LIST_HEAD(free_list);
struct list_head *l, *ltmp;
@@
-288,7
+292,7
@@
mb_cache_create(const char *name, struct mb_cache_op *cache_op,
INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]);
}
cache->c_entry_cache = kmem_cache_create(name, entry_size, 0,
INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]);
}
cache->c_entry_cache = kmem_cache_create(name, entry_size, 0,
- SLAB_RECLAIM_ACCOUNT
, NULL
, NULL);
+ SLAB_RECLAIM_ACCOUNT
|SLAB_MEM_SPREAD
, NULL);
if (!cache->c_entry_cache)
goto fail;
if (!cache->c_entry_cache)
goto fail;
@@
-301,8
+305,7
@@
fail:
if (cache) {
while (--m >= 0)
kfree(cache->c_indexes_hash[m]);
if (cache) {
while (--m >= 0)
kfree(cache->c_indexes_hash[m]);
- if (cache->c_block_hash)
- kfree(cache->c_block_hash);
+ kfree(cache->c_block_hash);
kfree(cache);
}
return NULL;
kfree(cache);
}
return NULL;
@@
-312,15
+315,14
@@
fail:
/*
* mb_cache_shrink()
*
/*
* mb_cache_shrink()
*
- * Removes all cache ent
ir
es of a device from the cache. All cache entries
+ * Removes all cache ent
ri
es of a device from the cache. All cache entries
* currently in use cannot be freed, and thus remain in the cache. All others
* are freed.
*
* currently in use cannot be freed, and thus remain in the cache. All others
* are freed.
*
- * @cache: which cache to shrink
* @bdev: which device's cache entries to shrink
*/
void
* @bdev: which device's cache entries to shrink
*/
void
-mb_cache_shrink(struct
mb_cache *cache, struct
block_device *bdev)
+mb_cache_shrink(struct block_device *bdev)
{
LIST_HEAD(free_list);
struct list_head *l, *ltmp;
{
LIST_HEAD(free_list);
struct list_head *l, *ltmp;
@@
-397,13
+399,13
@@
mb_cache_destroy(struct mb_cache *cache)
* if no more memory was available.
*/
struct mb_cache_entry *
* if no more memory was available.
*/
struct mb_cache_entry *
-mb_cache_entry_alloc(struct mb_cache *cache)
+mb_cache_entry_alloc(struct mb_cache *cache
, gfp_t gfp_flags
)
{
struct mb_cache_entry *ce;
{
struct mb_cache_entry *ce;
- atomic_inc(&cache->c_entry_count);
- ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL);
+ ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
if (ce) {
if (ce) {
+ atomic_inc(&cache->c_entry_count);
INIT_LIST_HEAD(&ce->e_lru_list);
INIT_LIST_HEAD(&ce->e_block_list);
ce->e_cache = cache;
INIT_LIST_HEAD(&ce->e_lru_list);
INIT_LIST_HEAD(&ce->e_block_list);
ce->e_cache = cache;
@@
-663,13
+665,13
@@
mb_cache_entry_find_next(struct mb_cache_entry *prev, int index,
static int __init init_mbcache(void)
{
static int __init init_mbcache(void)
{
-
mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn
);
+
register_shrinker(&mb_cache_shrinker
);
return 0;
}
static void __exit exit_mbcache(void)
{
return 0;
}
static void __exit exit_mbcache(void)
{
-
remove_shrinker(mb
_shrinker);
+
unregister_shrinker(&mb_cache
_shrinker);
}
module_init(init_mbcache)
}
module_init(init_mbcache)