Btrfs: Drop some verbose printks
[safe/jmp/linux-2.6] / fs / btrfs / extent_io.c
index fb64008..dd403b4 100644 (file)
@@ -26,6 +26,7 @@ static struct kmem_cache *extent_buffer_cache;
 
 static LIST_HEAD(buffers);
 static LIST_HEAD(states);
+static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
 
 #define BUFFER_LRU_MAX 64
 
@@ -64,15 +65,22 @@ free_state_cache:
 void extent_io_exit(void)
 {
        struct extent_state *state;
+       struct extent_buffer *eb;
 
        while (!list_empty(&states)) {
-               state = list_entry(states.next, struct extent_state, list);
+               state = list_entry(states.next, struct extent_state, leak_list);
                printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
-               list_del(&state->list);
+               list_del(&state->leak_list);
                kmem_cache_free(extent_state_cache, state);
 
        }
 
+       while (!list_empty(&buffers)) {
+               eb = list_entry(buffers.next, struct extent_buffer, leak_list);
+               printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
+               list_del(&eb->leak_list);
+               kmem_cache_free(extent_buffer_cache, eb);
+       }
        if (extent_state_cache)
                kmem_cache_destroy(extent_state_cache);
        if (extent_buffer_cache)
@@ -109,13 +117,17 @@ EXPORT_SYMBOL(extent_io_tree_empty_lru);
 struct extent_state *alloc_extent_state(gfp_t mask)
 {
        struct extent_state *state;
+       unsigned long flags;
 
        state = kmem_cache_alloc(extent_state_cache, mask);
-       if (!state || IS_ERR(state))
+       if (!state)
                return state;
        state->state = 0;
        state->private = 0;
        state->tree = NULL;
+       spin_lock_irqsave(&leak_lock, flags);
+       list_add(&state->leak_list, &states);
+       spin_unlock_irqrestore(&leak_lock, flags);
 
        atomic_set(&state->refs, 1);
        init_waitqueue_head(&state->wq);
@@ -128,7 +140,11 @@ void free_extent_state(struct extent_state *state)
        if (!state)
                return;
        if (atomic_dec_and_test(&state->refs)) {
+               unsigned long flags;
                WARN_ON(state->tree);
+               spin_lock_irqsave(&leak_lock, flags);
+               list_del(&state->leak_list);
+               spin_unlock_irqrestore(&leak_lock, flags);
                kmem_cache_free(extent_state_cache, state);
        }
 }
@@ -941,7 +957,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
         * our range starts.
         */
        node = tree_search(tree, start);
-       if (!node || IS_ERR(node)) {
+       if (!node) {
                goto out;
        }
 
@@ -974,7 +990,7 @@ struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
         * our range starts.
         */
        node = tree_search(tree, start);
-       if (!node || IS_ERR(node)) {
+       if (!node) {
                goto out;
        }
 
@@ -1008,8 +1024,9 @@ u64 find_lock_delalloc_range(struct extent_io_tree *tree,
         */
 search_again:
        node = tree_search(tree, cur_start);
-       if (!node || IS_ERR(node)) {
-               *end = (u64)-1;
+       if (!node) {
+               if (!found)
+                       *end = (u64)-1;
                goto out;
        }
 
@@ -1096,7 +1113,7 @@ u64 count_range_bits(struct extent_io_tree *tree,
         * our range starts.
         */
        node = tree_search(tree, cur_start);
-       if (!node || IS_ERR(node)) {
+       if (!node) {
                goto out;
        }
 
@@ -1197,7 +1214,7 @@ int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
         * our range starts.
         */
        node = tree_search(tree, start);
-       if (!node || IS_ERR(node)) {
+       if (!node) {
                ret = -ENOENT;
                goto out;
        }
@@ -1224,7 +1241,7 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
         * our range starts.
         */
        node = tree_search(tree, start);
-       if (!node || IS_ERR(node)) {
+       if (!node) {
                ret = -ENOENT;
                goto out;
        }
@@ -1518,6 +1535,17 @@ static int end_bio_extent_readpage(struct bio *bio,
                        if (ret)
                                uptodate = 0;
                }
+               if (!uptodate && tree->ops &&
+                   tree->ops->readpage_io_failed_hook) {
+                       ret = tree->ops->readpage_io_failed_hook(bio, page,
+                                                        start, end, state);
+                       if (ret == 0) {
+                               state = NULL;
+                               uptodate =
+                                       test_bit(BIO_UPTODATE, &bio->bi_flags);
+                               continue;
+                       }
+               }
 
                spin_lock_irqsave(&tree->lock, flags);
                if (!state || state->end != end) {
@@ -1532,8 +1560,9 @@ static int end_bio_extent_readpage(struct bio *bio,
                        }
                        if (!state) {
                                spin_unlock_irqrestore(&tree->lock, flags);
-                               set_extent_uptodate(tree, start, end,
-                                                   GFP_ATOMIC);
+                               if (uptodate)
+                                       set_extent_uptodate(tree, start, end,
+                                                           GFP_ATOMIC);
                                unlock_extent(tree, start, end, GFP_ATOMIC);
                                goto next_io;
                        }
@@ -1551,8 +1580,10 @@ static int end_bio_extent_readpage(struct bio *bio,
                        } else {
                                state = NULL;
                        }
-                       set_state_cb(tree, clear, EXTENT_UPTODATE);
-                       clear->state |= EXTENT_UPTODATE;
+                       if (uptodate) {
+                               set_state_cb(tree, clear, EXTENT_UPTODATE);
+                               clear->state |= EXTENT_UPTODATE;
+                       }
                        clear_state_bit(tree, clear, EXTENT_LOCKED,
                                        1, 0);
                        if (cur == start)
@@ -1663,15 +1694,15 @@ extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
        }
 
        if (bio) {
+               bio->bi_size = 0;
                bio->bi_bdev = bdev;
                bio->bi_sector = first_sector;
        }
        return bio;
 }
 
-static int submit_one_bio(int rw, struct bio *bio)
+static int submit_one_bio(int rw, struct bio *bio, int mirror_num)
 {
-       u64 maxsector;
        int ret = 0;
        struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
        struct page *page = bvec->bv_page;
@@ -1699,14 +1730,11 @@ static int submit_one_bio(int rw, struct bio *bio)
 
        bio_get(bio);
 
-        maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
-       if (maxsector < bio->bi_sector) {
-               printk("sector too large max %Lu got %llu\n", maxsector,
-                       (unsigned long long)bio->bi_sector);
-               WARN_ON(1);
-       }
-
-       submit_bio(rw, bio);
+       if (tree->ops && tree->ops->submit_bio_hook)
+               tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
+                                          mirror_num);
+       else
+               submit_bio(rw, bio);
        if (bio_flagged(bio, BIO_EOPNOTSUPP))
                ret = -EOPNOTSUPP;
        bio_put(bio);
@@ -1719,7 +1747,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
                              struct block_device *bdev,
                              struct bio **bio_ret,
                              unsigned long max_pages,
-                             bio_end_io_t end_io_func)
+                             bio_end_io_t end_io_func,
+                             int mirror_num)
 {
        int ret = 0;
        struct bio *bio;
@@ -1728,8 +1757,10 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
        if (bio_ret && *bio_ret) {
                bio = *bio_ret;
                if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
+                   (tree->ops && tree->ops->merge_bio_hook &&
+                    tree->ops->merge_bio_hook(page, offset, size, bio)) ||
                    bio_add_page(bio, page, size, offset) < size) {
-                       ret = submit_one_bio(rw, bio);
+                       ret = submit_one_bio(rw, bio, mirror_num);
                        bio = NULL;
                } else {
                        return 0;
@@ -1749,7 +1780,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
        if (bio_ret) {
                *bio_ret = bio;
        } else {
-               ret = submit_one_bio(rw, bio);
+               ret = submit_one_bio(rw, bio, mirror_num);
        }
 
        return ret;
@@ -1778,7 +1809,7 @@ void set_page_extent_head(struct page *page, unsigned long len)
 static int __extent_read_full_page(struct extent_io_tree *tree,
                                   struct page *page,
                                   get_extent_t *get_extent,
-                                  struct bio **bio)
+                                  struct bio **bio, int mirror_num)
 {
        struct inode *inode = page->mapping->host;
        u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
@@ -1881,7 +1912,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                        ret = submit_extent_page(READ, tree, page,
                                         sector, iosize, page_offset,
                                         bdev, bio, nr,
-                                        end_bio_extent_readpage);
+                                        end_bio_extent_readpage, mirror_num);
                }
                if (ret)
                        SetPageError(page);
@@ -1903,9 +1934,9 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
        struct bio *bio = NULL;
        int ret;
 
-       ret = __extent_read_full_page(tree, page, get_extent, &bio);
+       ret = __extent_read_full_page(tree, page, get_extent, &bio, 0);
        if (bio)
-               submit_one_bio(READ, bio);
+               submit_one_bio(READ, bio, 0);
        return ret;
 }
 EXPORT_SYMBOL(extent_read_full_page);
@@ -2057,7 +2088,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
                        ret = submit_extent_page(WRITE, tree, page, sector,
                                                 iosize, page_offset, bdev,
                                                 &epd->bio, max_nr,
-                                                end_bio_extent_writepage);
+                                                end_bio_extent_writepage, 0);
                        if (ret)
                                SetPageError(page);
                }
@@ -2076,8 +2107,7 @@ done:
        return 0;
 }
 
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
-
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
 /* Taken directly from 2.6.23 for 2.6.18 back port */
 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
                                 void *data);
@@ -2224,7 +2254,7 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
 
        write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
        if (epd.bio) {
-               submit_one_bio(WRITE, epd.bio);
+               submit_one_bio(WRITE, epd.bio, 0);
        }
        return ret;
 }
@@ -2245,7 +2275,7 @@ int extent_writepages(struct extent_io_tree *tree,
 
        ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
        if (epd.bio) {
-               submit_one_bio(WRITE, epd.bio);
+               submit_one_bio(WRITE, epd.bio, 0);
        }
        return ret;
 }
@@ -2277,7 +2307,8 @@ int extent_readpages(struct extent_io_tree *tree,
                        page_cache_get(page);
                        if (!pagevec_add(&pvec, page))
                                __pagevec_lru_add(&pvec);
-                       __extent_read_full_page(tree, page, get_extent, &bio);
+                       __extent_read_full_page(tree, page, get_extent,
+                                               &bio, 0);
                }
                page_cache_release(page);
        }
@@ -2285,7 +2316,7 @@ int extent_readpages(struct extent_io_tree *tree,
                __pagevec_lru_add(&pvec);
        BUG_ON(!list_empty(pages));
        if (bio)
-               submit_one_bio(READ, bio);
+               submit_one_bio(READ, bio, 0);
        return 0;
 }
 EXPORT_SYMBOL(extent_readpages);
@@ -2410,7 +2441,7 @@ int extent_prepare_write(struct extent_io_tree *tree,
                        ret = submit_extent_page(READ, tree, page,
                                         sector, iosize, page_offset, em->bdev,
                                         NULL, 1,
-                                        end_bio_extent_preparewrite);
+                                        end_bio_extent_preparewrite, 0);
                        iocount++;
                        block_start = block_start + iosize;
                } else {
@@ -2434,6 +2465,31 @@ err:
 EXPORT_SYMBOL(extent_prepare_write);
 
 /*
+ * a helper for releasepage, this tests for areas of the page that
+ * are locked or under IO and drops the related state bits if it is safe
+ * to drop the page.
+ */
+int try_release_extent_state(struct extent_map_tree *map,
+                            struct extent_io_tree *tree, struct page *page,
+                            gfp_t mask)
+{
+       u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+       u64 end = start + PAGE_CACHE_SIZE - 1;
+       int ret = 1;
+
+       if (test_range_bit(tree, start, end, EXTENT_IOBITS, 0))
+               ret = 0;
+       else {
+               if ((mask & GFP_NOFS) == GFP_NOFS)
+                       mask = GFP_NOFS;
+               clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
+                                1, 1, mask);
+       }
+       return ret;
+}
+EXPORT_SYMBOL(try_release_extent_state);
+
+/*
  * a helper for releasepage.  As long as there are no locked extents
  * in the range corresponding to the page, both state records and extent
  * map records are removed
@@ -2445,8 +2501,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
        struct extent_map *em;
        u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
        u64 end = start + PAGE_CACHE_SIZE - 1;
-       u64 orig_start = start;
-       int ret = 1;
+
        if ((mask & __GFP_WAIT) &&
            page->mapping->host->i_size > 16 * 1024 * 1024) {
                u64 len;
@@ -2477,15 +2532,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
                        free_extent_map(em);
                }
        }
-       if (test_range_bit(tree, orig_start, end, EXTENT_IOBITS, 0))
-               ret = 0;
-       else {
-               if ((mask & GFP_NOFS) == GFP_NOFS)
-                       mask = GFP_NOFS;
-               clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
-                                1, 1, mask);
-       }
-       return ret;
+       return try_release_extent_state(map, tree, page, mask);
 }
 EXPORT_SYMBOL(try_release_extent_mapping);
 
@@ -2572,12 +2619,53 @@ static inline struct page *extent_buffer_page(struct extent_buffer *eb,
        return p;
 }
 
+int release_extent_buffer_tail_pages(struct extent_buffer *eb)
+{
+       unsigned long num_pages = num_extent_pages(eb->start, eb->len);
+       struct page *page;
+       unsigned long i;
+
+       if (num_pages == 1)
+               return 0;
+       for (i = 1; i < num_pages; i++) {
+               page = extent_buffer_page(eb, i);
+               page_cache_release(page);
+       }
+       return 0;
+}
+
+
+int invalidate_extent_lru(struct extent_io_tree *tree, u64 start,
+                         unsigned long len)
+{
+       struct list_head *lru = &tree->buffer_lru;
+       struct list_head *cur = lru->next;
+       struct extent_buffer *eb;
+       int found = 0;
+
+       spin_lock(&tree->lru_lock);
+       if (list_empty(lru))
+               goto out;
+
+       do {
+               eb = list_entry(cur, struct extent_buffer, lru);
+               if (eb->start <= start && eb->start + eb->len > start) {
+                       eb->flags &= ~EXTENT_UPTODATE;
+               }
+               cur = cur->next;
+       } while (cur != lru);
+out:
+       spin_unlock(&tree->lru_lock);
+       return found;
+}
+
 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
                                                   u64 start,
                                                   unsigned long len,
                                                   gfp_t mask)
 {
        struct extent_buffer *eb = NULL;
+       unsigned long flags;
 
        spin_lock(&tree->lru_lock);
        eb = find_lru(tree, start, len);
@@ -2590,6 +2678,9 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
        INIT_LIST_HEAD(&eb->lru);
        eb->start = start;
        eb->len = len;
+       spin_lock_irqsave(&leak_lock, flags);
+       list_add(&eb->leak_list, &buffers);
+       spin_unlock_irqrestore(&leak_lock, flags);
        atomic_set(&eb->refs, 1);
 
        return eb;
@@ -2597,6 +2688,10 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
 
 static void __free_extent_buffer(struct extent_buffer *eb)
 {
+       unsigned long flags;
+       spin_lock_irqsave(&leak_lock, flags);
+       list_del(&eb->leak_list);
+       spin_unlock_irqrestore(&leak_lock, flags);
        kmem_cache_free(extent_buffer_cache, eb);
 }
 
@@ -2614,7 +2709,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
        int uptodate = 1;
 
        eb = __alloc_extent_buffer(tree, start, len, mask);
-       if (!eb || IS_ERR(eb))
+       if (!eb)
                return NULL;
 
        if (eb->flags & EXTENT_BUFFER_FILLED)
@@ -2627,8 +2722,8 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
                page_cache_get(page0);
                mark_page_accessed(page0);
                set_page_extent_mapped(page0);
-               WARN_ON(!PageUptodate(page0));
                set_page_extent_head(page0, len);
+               uptodate = PageUptodate(page0);
        } else {
                i = 0;
        }
@@ -2689,7 +2784,7 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
        int uptodate = 1;
 
        eb = __alloc_extent_buffer(tree, start, len, mask);
-       if (!eb || IS_ERR(eb))
+       if (!eb)
                return NULL;
 
        if (eb->flags & EXTENT_BUFFER_FILLED)
@@ -2878,34 +2973,81 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree,
 }
 EXPORT_SYMBOL(set_extent_buffer_uptodate);
 
+int extent_range_uptodate(struct extent_io_tree *tree,
+                         u64 start, u64 end)
+{
+       struct page *page;
+       int ret;
+       int pg_uptodate = 1;
+       int uptodate;
+       unsigned long index;
+
+       ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
+       if (ret)
+               return 1;
+       while(start <= end) {
+               index = start >> PAGE_CACHE_SHIFT;
+               page = find_get_page(tree->mapping, index);
+               uptodate = PageUptodate(page);
+               page_cache_release(page);
+               if (!uptodate) {
+                       pg_uptodate = 0;
+                       break;
+               }
+               start += PAGE_CACHE_SIZE;
+       }
+       return pg_uptodate;
+}
+
 int extent_buffer_uptodate(struct extent_io_tree *tree,
-                            struct extent_buffer *eb)
+                          struct extent_buffer *eb)
 {
+       int ret = 0;
+       unsigned long num_pages;
+       unsigned long i;
+       struct page *page;
+       int pg_uptodate = 1;
+
        if (eb->flags & EXTENT_UPTODATE)
                return 1;
-       return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
+
+       ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
                           EXTENT_UPTODATE, 1);
+       if (ret)
+               return ret;
+
+       num_pages = num_extent_pages(eb->start, eb->len);
+       for (i = 0; i < num_pages; i++) {
+               page = extent_buffer_page(eb, i);
+               if (!PageUptodate(page)) {
+                       pg_uptodate = 0;
+                       break;
+               }
+       }
+       return pg_uptodate;
 }
 EXPORT_SYMBOL(extent_buffer_uptodate);
 
 int read_extent_buffer_pages(struct extent_io_tree *tree,
                             struct extent_buffer *eb,
                             u64 start, int wait,
-                            get_extent_t *get_extent)
+                            get_extent_t *get_extent, int mirror_num)
 {
        unsigned long i;
        unsigned long start_i;
        struct page *page;
        int err;
        int ret = 0;
+       int locked_pages = 0;
+       int all_uptodate = 1;
+       int inc_all_pages = 0;
        unsigned long num_pages;
        struct bio *bio = NULL;
 
-
        if (eb->flags & EXTENT_UPTODATE)
                return 0;
 
-       if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
+       if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
                           EXTENT_UPTODATE, 1)) {
                return 0;
        }
@@ -2921,19 +3063,34 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
        num_pages = num_extent_pages(eb->start, eb->len);
        for (i = start_i; i < num_pages; i++) {
                page = extent_buffer_page(eb, i);
-               if (PageUptodate(page)) {
-                       continue;
-               }
                if (!wait) {
-                       if (TestSetPageLocked(page)) {
-                               continue;
-                       }
+                       if (TestSetPageLocked(page))
+                               goto unlock_exit;
                } else {
                        lock_page(page);
                }
+               locked_pages++;
                if (!PageUptodate(page)) {
+                       all_uptodate = 0;
+               }
+       }
+       if (all_uptodate) {
+               if (start_i == 0)
+                       eb->flags |= EXTENT_UPTODATE;
+               goto unlock_exit;
+       }
+
+       for (i = start_i; i < num_pages; i++) {
+               page = extent_buffer_page(eb, i);
+               if (inc_all_pages)
+                       page_cache_get(page);
+               if (!PageUptodate(page)) {
+                       if (start_i == 0)
+                               inc_all_pages = 1;
+                       ClearPageError(page);
                        err = __extent_read_full_page(tree, page,
-                                                     get_extent, &bio);
+                                                     get_extent, &bio,
+                                                     mirror_num);
                        if (err) {
                                ret = err;
                        }
@@ -2943,7 +3100,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
        }
 
        if (bio)
-               submit_one_bio(READ, bio);
+               submit_one_bio(READ, bio, mirror_num);
 
        if (ret || !wait) {
                return ret;
@@ -2958,6 +3115,16 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
        if (!ret)
                eb->flags |= EXTENT_UPTODATE;
        return ret;
+
+unlock_exit:
+       i = start_i;
+       while(locked_pages > 0) {
+               page = extent_buffer_page(eb, i);
+               i++;
+               unlock_page(page);
+               locked_pages--;
+       }
+       return ret;
 }
 EXPORT_SYMBOL(read_extent_buffer_pages);
 
@@ -2972,7 +3139,6 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
        char *dst = (char *)dstv;
        size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
        unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
-       unsigned long num_pages = num_extent_pages(eb->start, eb->len);
 
        WARN_ON(start > eb->len);
        WARN_ON(start + len > eb->start + eb->len);
@@ -2981,11 +3147,6 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
 
        while(len > 0) {
                page = extent_buffer_page(eb, i);
-               if (!PageUptodate(page)) {
-                       printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
-                       WARN_ON(1);
-               }
-               WARN_ON(!PageUptodate(page));
 
                cur = min(len, (PAGE_CACHE_SIZE - offset));
                kaddr = kmap_atomic(page, KM_USER1);
@@ -3029,7 +3190,6 @@ printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len,
        }
 
        p = extent_buffer_page(eb, i);
-       WARN_ON(!PageUptodate(p));
        kaddr = kmap_atomic(p, km);
        *token = kaddr;
        *map = kaddr + offset;
@@ -3089,7 +3249,6 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
 
        while(len > 0) {
                page = extent_buffer_page(eb, i);
-               WARN_ON(!PageUptodate(page));
 
                cur = min(len, (PAGE_CACHE_SIZE - offset));