#include <linux/compiler.h>
#include <asm/uaccess.h>
#include <linux/gfp.h>
+#include <linux/bitops.h>
/*
* Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
+static inline void mapping_set_error(struct address_space *mapping, int error)
+{
+ if (error) {
+ if (error == -ENOSPC)
+ set_bit(AS_ENOSPC, &mapping->flags);
+ else
+ set_bit(AS_EIO, &mapping->flags);
+ }
+}
+
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
#define page_cache_release(page) put_page(page)
void release_pages(struct page **pages, int nr, int cold);
+#ifdef CONFIG_NUMA
+extern struct page *__page_cache_alloc(gfp_t gfp);
+#else
+static inline struct page *__page_cache_alloc(gfp_t gfp)
+{
+ return alloc_pages(gfp, 0);
+}
+#endif
+
static inline struct page *page_cache_alloc(struct address_space *x)
{
- return alloc_pages(mapping_gfp_mask(x), 0);
+ return __page_cache_alloc(mapping_gfp_mask(x));
}
static inline struct page *page_cache_alloc_cold(struct address_space *x)
{
- return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
+ return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
}
typedef int filler_t(void *, struct page *);
unsigned long index);
extern struct page * find_lock_page(struct address_space *mapping,
unsigned long index);
-extern struct page * find_trylock_page(struct address_space *mapping,
- unsigned long index);
extern struct page * find_or_create_page(struct address_space *mapping,
unsigned long index, gfp_t gfp_mask);
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
+unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
+ unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
int tag, unsigned int nr_pages, struct page **pages);
extern struct page * grab_cache_page_nowait(struct address_space *mapping,
unsigned long index);
+extern struct page * read_cache_page_async(struct address_space *mapping,
+ unsigned long index, filler_t *filler,
+ void *data);
extern struct page * read_cache_page(struct address_space *mapping,
unsigned long index, filler_t *filler,
void *data);
extern int read_cache_pages(struct address_space *mapping,
struct list_head *pages, filler_t *filler, void *data);
-int add_to_page_cache(struct page *page, struct address_space *mapping,
- unsigned long index, gfp_t gfp_mask);
-int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
- unsigned long index, gfp_t gfp_mask);
-extern void remove_from_page_cache(struct page *page);
-extern void __remove_from_page_cache(struct page *page);
-
-extern atomic_t nr_pagecache;
-
-#ifdef CONFIG_SMP
-
-#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2)
-DECLARE_PER_CPU(long, nr_pagecache_local);
-
-/*
- * pagecache_acct implements approximate accounting for pagecache.
- * vm_enough_memory() do not need high accuracy. Writers will keep
- * an offset in their per-cpu arena and will spill that into the
- * global count whenever the absolute value of the local count
- * exceeds the counter's threshold.
- *
- * MUST be protected from preemption.
- * current protection is mapping->page_lock.
- */
-static inline void pagecache_acct(int count)
+static inline struct page *read_mapping_page_async(
+ struct address_space *mapping,
+ unsigned long index, void *data)
{
- long *local;
-
- local = &__get_cpu_var(nr_pagecache_local);
- *local += count;
- if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) {
- atomic_add(*local, &nr_pagecache);
- *local = 0;
- }
+ filler_t *filler = (filler_t *)mapping->a_ops->readpage;
+ return read_cache_page_async(mapping, index, filler, data);
}
-#else
-
-static inline void pagecache_acct(int count)
+static inline struct page *read_mapping_page(struct address_space *mapping,
+ unsigned long index, void *data)
{
- atomic_add(count, &nr_pagecache);
+ filler_t *filler = (filler_t *)mapping->a_ops->readpage;
+ return read_cache_page(mapping, index, filler, data);
}
-#endif
-static inline unsigned long get_page_cache_size(void)
-{
- int ret = atomic_read(&nr_pagecache);
- if (unlikely(ret < 0))
- ret = 0;
- return ret;
-}
+int add_to_page_cache(struct page *page, struct address_space *mapping,
+ unsigned long index, gfp_t gfp_mask);
+int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
+ unsigned long index, gfp_t gfp_mask);
+extern void remove_from_page_cache(struct page *page);
+extern void __remove_from_page_cache(struct page *page);
/*
* Return byte-offset into filesystem object for page.
}
extern void FASTCALL(__lock_page(struct page *page));
+extern void FASTCALL(__lock_page_nosync(struct page *page));
extern void FASTCALL(unlock_page(struct page *page));
+/*
+ * lock_page may only be called if we have the page's inode pinned.
+ */
static inline void lock_page(struct page *page)
{
might_sleep();
if (TestSetPageLocked(page))
__lock_page(page);
}
+
+/*
+ * lock_page_nosync should only be used if we can't pin the page's inode.
+ * Doesn't play quite so well with block device plugging.
+ */
+static inline void lock_page_nosync(struct page *page)
+{
+ might_sleep();
+ if (TestSetPageLocked(page))
+ __lock_page_nosync(page);
+}
/*
* This is exported only for wait_on_page_locked/wait_on_page_writeback.