memcg: add mem_cgroup_cancel_charge()
[safe/jmp/linux-2.6] / mm / readahead.c
index a7f01fc..aa1aa23 100644 (file)
@@ -330,6 +330,59 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
  */
 
 /*
+ * Count contiguously cached pages from @offset-1 to @offset-@max,
+ * this count is a conservative estimation of
+ *     - length of the sequential read sequence, or
+ *     - thrashing threshold in memory tight systems
+ */
+static pgoff_t count_history_pages(struct address_space *mapping,
+                                  struct file_ra_state *ra,
+                                  pgoff_t offset, unsigned long max)
+{
+       pgoff_t head;
+
+       rcu_read_lock();
+       head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max);
+       rcu_read_unlock();
+
+       return offset - 1 - head;
+}
+
+/*
+ * page cache context based read-ahead
+ */
+static int try_context_readahead(struct address_space *mapping,
+                                struct file_ra_state *ra,
+                                pgoff_t offset,
+                                unsigned long req_size,
+                                unsigned long max)
+{
+       pgoff_t size;
+
+       size = count_history_pages(mapping, ra, offset, max);
+
+       /*
+        * no history pages:
+        * it could be a random read
+        */
+       if (!size)
+               return 0;
+
+       /*
+        * starts from beginning of file:
+        * it is a strong indication of long-run stream (or whole-file-read)
+        */
+       if (size >= offset)
+               size *= 2;
+
+       ra->start = offset;
+       ra->size = get_init_ra_size(size + req_size, max);
+       ra->async_size = ra->size;
+
+       return 1;
+}
+
+/*
  * A minimal readahead algorithm for trivial sequential/random reads.
  */
 static unsigned long
@@ -339,33 +392,25 @@ ondemand_readahead(struct address_space *mapping,
                   unsigned long req_size)
 {
        unsigned long max = max_sane_readahead(ra->ra_pages);
-       pgoff_t prev_offset;
-       int     sequential;
+
+       /*
+        * start of file
+        */
+       if (!offset)
+               goto initial_readahead;
 
        /*
         * It's the expected callback offset, assume sequential access.
         * Ramp up sizes, and push forward the readahead window.
         */
-       if (offset && (offset == (ra->start + ra->size - ra->async_size) ||
-                       offset == (ra->start + ra->size))) {
+       if ((offset == (ra->start + ra->size - ra->async_size) ||
+            offset == (ra->start + ra->size))) {
                ra->start += ra->size;
                ra->size = get_next_ra_size(ra, max);
                ra->async_size = ra->size;
                goto readit;
        }
 
-       prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
-       sequential = offset - prev_offset <= 1UL || req_size > max;
-
-       /*
-        * Standalone, small read.
-        * Read as is, and do not pollute the readahead state.
-        */
-       if (!hit_readahead_marker && !sequential) {
-               return __do_page_cache_readahead(mapping, filp,
-                                               offset, req_size, 0);
-       }
-
        /*
         * Hit a marked page without valid readahead state.
         * E.g. interleaved reads.
@@ -391,12 +436,31 @@ ondemand_readahead(struct address_space *mapping,
        }
 
        /*
-        * It may be one of
-        *      - first read on start of file
-        *      - sequential cache miss
-        *      - oversize random read
-        * Start readahead for it.
+        * oversize read
         */
+       if (req_size > max)
+               goto initial_readahead;
+
+       /*
+        * sequential cache miss
+        */
+       if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
+               goto initial_readahead;
+
+       /*
+        * Query the page cache and look for the traces(cached history pages)
+        * that a sequential stream would leave behind.
+        */
+       if (try_context_readahead(mapping, ra, offset, req_size, max))
+               goto readit;
+
+       /*
+        * standalone, small random read
+        * Read as is, and do not pollute the readahead state.
+        */
+       return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
+
+initial_readahead:
        ra->start = offset;
        ra->size = get_init_ra_size(req_size, max);
        ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;