sparsemem: on no vmemmap path put mem_map on node high too
[safe/jmp/linux-2.6] / mm / filemap.c
index 96ac6b0..d6f4f07 100644 (file)
  * the NFS filesystem used to do this differently, for example)
  */
 #include <linux/module.h>
-#include <linux/slab.h>
 #include <linux/compiler.h>
 #include <linux/fs.h>
 #include <linux/uaccess.h>
 #include <linux/aio.h>
 #include <linux/capability.h>
 #include <linux/kernel_stat.h>
+#include <linux/gfp.h>
 #include <linux/mm.h>
 #include <linux/swap.h>
 #include <linux/mman.h>
@@ -441,7 +441,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
        /*
         * Splice_read and readahead add shmem/tmpfs pages into the page cache
         * before shmem_readpage has a chance to mark them as SwapBacked: they
-        * need to go on the active_anon lru below, and mem_cgroup_cache_charge
+        * need to go on the anon lru below, and mem_cgroup_cache_charge
         * (called in add_to_page_cache) needs to know where they're going too.
         */
        if (mapping_cap_swap_backed(mapping))
@@ -452,7 +452,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
                if (page_is_file_cache(page))
                        lru_cache_add_file(page);
                else
-                       lru_cache_add_active_anon(page);
+                       lru_cache_add_anon(page);
        }
        return ret;
 }
@@ -1117,7 +1117,7 @@ readpage:
                        if (!PageUptodate(page)) {
                                if (page->mapping == NULL) {
                                        /*
-                                        * invalidate_inode_pages got it
+                                        * invalidate_mapping_pages got it
                                         */
                                        unlock_page(page);
                                        page_cache_release(page);
@@ -1634,14 +1634,15 @@ EXPORT_SYMBOL(generic_file_readonly_mmap);
 static struct page *__read_cache_page(struct address_space *mapping,
                                pgoff_t index,
                                int (*filler)(void *,struct page*),
-                               void *data)
+                               void *data,
+                               gfp_t gfp)
 {
        struct page *page;
        int err;
 repeat:
        page = find_get_page(mapping, index);
        if (!page) {
-               page = page_cache_alloc_cold(mapping);
+               page = __page_cache_alloc(gfp | __GFP_COLD);
                if (!page)
                        return ERR_PTR(-ENOMEM);
                err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
@@ -1661,31 +1662,18 @@ repeat:
        return page;
 }
 
-/**
- * read_cache_page_async - read into page cache, fill it if needed
- * @mapping:   the page's address_space
- * @index:     the page index
- * @filler:    function to perform the read
- * @data:      destination for read data
- *
- * Same as read_cache_page, but don't wait for page to become unlocked
- * after submitting it to the filler.
- *
- * Read into the page cache. If a page already exists, and PageUptodate() is
- * not set, try to fill the page but don't wait for it to become unlocked.
- *
- * If the page does not get brought uptodate, return -EIO.
- */
-struct page *read_cache_page_async(struct address_space *mapping,
+static struct page *do_read_cache_page(struct address_space *mapping,
                                pgoff_t index,
                                int (*filler)(void *,struct page*),
-                               void *data)
+                               void *data,
+                               gfp_t gfp)
+
 {
        struct page *page;
        int err;
 
 retry:
-       page = __read_cache_page(mapping, index, filler, data);
+       page = __read_cache_page(mapping, index, filler, data, gfp);
        if (IS_ERR(page))
                return page;
        if (PageUptodate(page))
@@ -1710,8 +1698,67 @@ out:
        mark_page_accessed(page);
        return page;
 }
+
+/**
+ * read_cache_page_async - read into page cache, fill it if needed
+ * @mapping:   the page's address_space
+ * @index:     the page index
+ * @filler:    function to perform the read
+ * @data:      destination for read data
+ *
+ * Same as read_cache_page, but don't wait for page to become unlocked
+ * after submitting it to the filler.
+ *
+ * Read into the page cache. If a page already exists, and PageUptodate() is
+ * not set, try to fill the page but don't wait for it to become unlocked.
+ *
+ * If the page does not get brought uptodate, return -EIO.
+ */
+struct page *read_cache_page_async(struct address_space *mapping,
+                               pgoff_t index,
+                               int (*filler)(void *,struct page*),
+                               void *data)
+{
+       return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
+}
 EXPORT_SYMBOL(read_cache_page_async);
 
+static struct page *wait_on_page_read(struct page *page)
+{
+       if (!IS_ERR(page)) {
+               wait_on_page_locked(page);
+               if (!PageUptodate(page)) {
+                       page_cache_release(page);
+                       page = ERR_PTR(-EIO);
+               }
+       }
+       return page;
+}
+
+/**
+ * read_cache_page_gfp - read into page cache, using specified page allocation flags.
+ * @mapping:   the page's address_space
+ * @index:     the page index
+ * @gfp:       the page allocator flags to use if allocating
+ *
+ * This is the same as "read_mapping_page(mapping, index, NULL)", but with
+ * any new page allocations done using the specified allocation flags. Note
+ * that the Radix tree operations will still use GFP_KERNEL, so you can't
+ * expect to do this atomically or anything like that - but you can pass in
+ * other page requirements.
+ *
+ * If the page does not get brought uptodate, return -EIO.
+ */
+struct page *read_cache_page_gfp(struct address_space *mapping,
+                               pgoff_t index,
+                               gfp_t gfp)
+{
+       filler_t *filler = (filler_t *)mapping->a_ops->readpage;
+
+       return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
+}
+EXPORT_SYMBOL(read_cache_page_gfp);
+
 /**
  * read_cache_page - read into page cache, fill it if needed
  * @mapping:   the page's address_space
@@ -1729,18 +1776,7 @@ struct page *read_cache_page(struct address_space *mapping,
                                int (*filler)(void *,struct page*),
                                void *data)
 {
-       struct page *page;
-
-       page = read_cache_page_async(mapping, index, filler, data);
-       if (IS_ERR(page))
-               goto out;
-       wait_on_page_locked(page);
-       if (!PageUptodate(page)) {
-               page_cache_release(page);
-               page = ERR_PTR(-EIO);
-       }
- out:
-       return page;
+       return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
 }
 EXPORT_SYMBOL(read_cache_page);
 
@@ -1950,7 +1986,7 @@ EXPORT_SYMBOL(iov_iter_single_seg_count);
 inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
 {
        struct inode *inode = file->f_mapping->host;
-       unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+       unsigned long limit = rlimit(RLIMIT_FSIZE);
 
         if (unlikely(*pos < 0))
                 return -EINVAL;
@@ -2196,6 +2232,9 @@ again:
                if (unlikely(status))
                        break;
 
+               if (mapping_writably_mapped(mapping))
+                       flush_dcache_page(page);
+
                pagefault_disable();
                copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
                pagefault_enable();