filemap: fix kernel-doc warnings
[safe/jmp/linux-2.6] / mm / filemap.c
index f5769b4..8bd4980 100644 (file)
@@ -210,7 +210,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
        int ret;
        struct writeback_control wbc = {
                .sync_mode = sync_mode,
-               .nr_to_write = mapping->nrpages * 2,
+               .nr_to_write = LONG_MAX,
                .range_start = start,
                .range_end = end,
        };
@@ -460,7 +460,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
        VM_BUG_ON(!PageLocked(page));
 
        error = mem_cgroup_cache_charge(page, current->mm,
-                                       gfp_mask & ~__GFP_HIGHMEM);
+                                       gfp_mask & GFP_RECLAIM_MASK);
        if (error)
                goto out;
 
@@ -513,6 +513,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
        }
        return ret;
 }
+EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
 
 #ifdef CONFIG_NUMA
 struct page *__page_cache_alloc(gfp_t gfp)
@@ -565,6 +566,24 @@ void wait_on_page_bit(struct page *page, int bit_nr)
 EXPORT_SYMBOL(wait_on_page_bit);
 
 /**
+ * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
+ * @page: Page defining the wait queue of interest
+ * @waiter: Waiter to add to the queue
+ *
+ * Add an arbitrary @waiter to the wait queue for the nominated @page.
+ */
+void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
+{
+       wait_queue_head_t *q = page_waitqueue(page);
+       unsigned long flags;
+
+       spin_lock_irqsave(&q->lock, flags);
+       __add_wait_queue(q, waiter);
+       spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL_GPL(add_page_wait_queue);
+
+/**
  * unlock_page - unlock a locked page
  * @page: the page
  *
@@ -627,6 +646,7 @@ int __lock_page_killable(struct page *page)
        return __wait_on_bit_lock(page_waitqueue(page), &wait,
                                        sync_page_killable, TASK_KILLABLE);
 }
+EXPORT_SYMBOL_GPL(__lock_page_killable);
 
 /**
  * __lock_page_nosync - get a lock on the page, without calling sync_page()
@@ -741,7 +761,14 @@ repeat:
                page = __page_cache_alloc(gfp_mask);
                if (!page)
                        return NULL;
-               err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
+               /*
+                * We want a regular kernel memory (not highmem or DMA etc)
+                * allocation for the radix tree nodes, but we need to honour
+                * the context-specific requirements the caller has asked for.
+                * GFP_RECLAIM_MASK collects those requirements.
+                */
+               err = add_to_page_cache_lru(page, mapping, index,
+                       (gfp_mask & GFP_RECLAIM_MASK));
                if (unlikely(err)) {
                        page_cache_release(page);
                        page = NULL;
@@ -950,7 +977,7 @@ grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
                return NULL;
        }
        page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
-       if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) {
+       if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
                page_cache_release(page);
                page = NULL;
        }
@@ -1317,7 +1344,8 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                        goto out; /* skip atime */
                size = i_size_read(inode);
                if (pos < size) {
-                       retval = filemap_write_and_wait(mapping);
+                       retval = filemap_write_and_wait_range(mapping, pos,
+                                       pos + iov_length(iov, nr_segs) - 1);
                        if (!retval) {
                                retval = mapping->a_ops->direct_IO(READ, iocb,
                                                        iov, pos, nr_segs);
@@ -1366,7 +1394,7 @@ do_readahead(struct address_space *mapping, struct file *filp,
        return 0;
 }
 
-asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
+SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
 {
        ssize_t ret;
        struct file *file;
@@ -1385,6 +1413,13 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
        }
        return ret;
 }
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
+{
+       return SYSC_readahead((int) fd, offset, (size_t) count);
+}
+SYSCALL_ALIAS(sys_readahead, SyS_readahead);
+#endif
 
 #ifdef CONFIG_MMU
 /**
@@ -1530,7 +1565,6 @@ retry_find:
        /*
         * Found the page and have a reference on it.
         */
-       mark_page_accessed(page);
        ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
        vmf->page = page;
        return ret | VM_FAULT_LOCKED;
@@ -1809,7 +1843,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
                int copy = min(bytes, iov->iov_len - base);
 
                base = 0;
-               left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
+               left = __copy_from_user_inatomic(vaddr, buf, copy);
                copied += copy;
                bytes -= copy;
                vaddr += copy;
@@ -1837,8 +1871,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
        if (likely(i->nr_segs == 1)) {
                int left;
                char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user_inatomic_nocache(kaddr + offset,
-                                                       buf, bytes);
+               left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
                copied = bytes - left;
        } else {
                copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -1866,7 +1899,7 @@ size_t iov_iter_copy_from_user(struct page *page,
        if (likely(i->nr_segs == 1)) {
                int left;
                char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
+               left = __copy_from_user(kaddr + offset, buf, bytes);
                copied = bytes - left;
        } else {
                copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -2060,18 +2093,10 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
        if (count != ocount)
                *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
 
-       /*
-        * Unmap all mmappings of the file up-front.
-        *
-        * This will cause any pte dirty bits to be propagated into the
-        * pageframes for the subsequent filemap_write_and_wait().
-        */
        write_len = iov_length(iov, *nr_segs);
        end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
-       if (mapping_mapped(mapping))
-               unmap_mapping_range(mapping, pos, write_len, 0);
 
-       written = filemap_write_and_wait(mapping);
+       written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
        if (written)
                goto out;
 
@@ -2291,7 +2316,8 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
         * the file data here, to try to honour O_DIRECT expectations.
         */
        if (unlikely(file->f_flags & O_DIRECT) && written)
-               status = filemap_write_and_wait(mapping);
+               status = filemap_write_and_wait_range(mapping,
+                                       pos, pos + written - 1);
 
        return written ? written : status;
 }
@@ -2457,6 +2483,9 @@ EXPORT_SYMBOL(generic_file_aio_write);
  * (presumably at page->private).  If the release was successful, return `1'.
  * Otherwise return zero.
  *
+ * This may also be called if PG_fscache is set on a page, indicating that the
+ * page is known to the local caching routines.
+ *
  * The @gfp_mask argument specifies whether I/O may be performed to release
  * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
  *