netns xfrm: fix "ip xfrm state|policy count" misreport
[safe/jmp/linux-2.6] / mm / readahead.c
index ceed7e4..033bc13 100644 (file)
@@ -330,6 +330,59 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
  */
 
 /*
+ * Count contiguously cached pages from @offset-1 to @offset-@max,
+ * this count is a conservative estimation of
+ *     - length of the sequential read sequence, or
+ *     - thrashing threshold in memory tight systems
+ */
+static pgoff_t count_history_pages(struct address_space *mapping,
+                                  struct file_ra_state *ra,
+                                  pgoff_t offset, unsigned long max)
+{
+       pgoff_t head;
+
+       rcu_read_lock();
+       head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max);
+       rcu_read_unlock();
+
+       return offset - 1 - head;
+}
+
+/*
+ * page cache context based read-ahead
+ */
+static int try_context_readahead(struct address_space *mapping,
+                                struct file_ra_state *ra,
+                                pgoff_t offset,
+                                unsigned long req_size,
+                                unsigned long max)
+{
+       pgoff_t size;
+
+       size = count_history_pages(mapping, ra, offset, max);
+
+       /*
+        * no history pages:
+        * it could be a random read
+        */
+       if (!size)
+               return 0;
+
+       /*
+        * starts from beginning of file:
+        * it is a strong indication of long-run stream (or whole-file-read)
+        */
+       if (size >= offset)
+               size *= 2;
+
+       ra->start = offset;
+       ra->size = get_init_ra_size(size + req_size, max);
+       ra->async_size = ra->size;
+
+       return 1;
+}
+
+/*
  * A minimal readahead algorithm for trivial sequential/random reads.
  */
 static unsigned long
@@ -395,6 +448,13 @@ ondemand_readahead(struct address_space *mapping,
                goto initial_readahead;
 
        /*
+        * Query the page cache and look for the traces(cached history pages)
+        * that a sequential stream would leave behind.
+        */
+       if (try_context_readahead(mapping, ra, offset, req_size, max))
+               goto readit;
+
+       /*
         * standalone, small random read
         * Read as is, and do not pollute the readahead state.
         */
@@ -487,5 +547,17 @@ page_cache_async_readahead(struct address_space *mapping,
 
        /* do read-ahead */
        ondemand_readahead(mapping, ra, filp, true, offset, req_size);
+
+#ifdef CONFIG_BLOCK
+       /*
+        * Normally the current page is !uptodate and lock_page() will be
+        * immediately called to implicitly unplug the device. However this
+        * is not always true for RAID conifgurations, where data arrives
+        * not strictly in their submission order. In this case we need to
+        * explicitly kick off the IO.
+        */
+       if (PageUptodate(page))
+               blk_run_backing_dev(mapping->backing_dev_info, NULL);
+#endif
 }
 EXPORT_SYMBOL_GPL(page_cache_async_readahead);