Revert "[XFS] use scalable vmap API"
[safe/jmp/linux-2.6] / fs / xfs / linux-2.6 / xfs_buf.c
index a49dd8d..0b2177a 100644 (file)
@@ -58,7 +58,7 @@ xfs_buf_trace(
                bp, id,
                (void *)(unsigned long)bp->b_flags,
                (void *)(unsigned long)bp->b_hold.counter,
-               (void *)(unsigned long)bp->b_sema.count.counter,
+               (void *)(unsigned long)bp->b_sema.count,
                (void *)current,
                data, ra,
                (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
@@ -166,75 +166,6 @@ test_page_region(
 }
 
 /*
- *     Mapping of multi-page buffers into contiguous virtual space
- */
-
-typedef struct a_list {
-       void            *vm_addr;
-       struct a_list   *next;
-} a_list_t;
-
-static a_list_t                *as_free_head;
-static int             as_list_len;
-static DEFINE_SPINLOCK(as_lock);
-
-/*
- *     Try to batch vunmaps because they are costly.
- */
-STATIC void
-free_address(
-       void            *addr)
-{
-       a_list_t        *aentry;
-
-#ifdef CONFIG_XEN
-       /*
-        * Xen needs to be able to make sure it can get an exclusive
-        * RO mapping of pages it wants to turn into a pagetable.  If
-        * a newly allocated page is also still being vmap()ed by xfs,
-        * it will cause pagetable construction to fail.  This is a
-        * quick workaround to always eagerly unmap pages so that Xen
-        * is happy.
-        */
-       vunmap(addr);
-       return;
-#endif
-
-       aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
-       if (likely(aentry)) {
-               spin_lock(&as_lock);
-               aentry->next = as_free_head;
-               aentry->vm_addr = addr;
-               as_free_head = aentry;
-               as_list_len++;
-               spin_unlock(&as_lock);
-       } else {
-               vunmap(addr);
-       }
-}
-
-STATIC void
-purge_addresses(void)
-{
-       a_list_t        *aentry, *old;
-
-       if (as_free_head == NULL)
-               return;
-
-       spin_lock(&as_lock);
-       aentry = as_free_head;
-       as_free_head = NULL;
-       as_list_len = 0;
-       spin_unlock(&as_lock);
-
-       while ((old = aentry) != NULL) {
-               vunmap(aentry->vm_addr);
-               aentry = aentry->next;
-               kfree(old);
-       }
-}
-
-/*
  *     Internal xfs_buf_t object manipulation
  */
 
@@ -253,7 +184,7 @@ _xfs_buf_initialize(
 
        memset(bp, 0, sizeof(xfs_buf_t));
        atomic_set(&bp->b_hold, 1);
-       init_MUTEX_LOCKED(&bp->b_iodonesema);
+       init_completion(&bp->b_iowait);
        INIT_LIST_HEAD(&bp->b_list);
        INIT_LIST_HEAD(&bp->b_hash_list);
        init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
@@ -310,8 +241,7 @@ _xfs_buf_free_pages(
        xfs_buf_t       *bp)
 {
        if (bp->b_pages != bp->b_page_array) {
-               kmem_free(bp->b_pages,
-                         bp->b_page_count * sizeof(struct page *));
+               kmem_free(bp->b_pages);
        }
 }
 
@@ -334,7 +264,7 @@ xfs_buf_free(
                uint            i;
 
                if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
-                       free_address(bp->b_addr - bp->b_offset);
+                       vunmap(bp->b_addr - bp->b_offset);
 
                for (i = 0; i < bp->b_page_count; i++) {
                        struct page     *page = bp->b_pages[i];
@@ -402,7 +332,7 @@ _xfs_buf_lookup_pages(
                                printk(KERN_ERR
                                        "XFS: possible memory allocation "
                                        "deadlock in %s (mode:0x%x)\n",
-                                       __FUNCTION__, gfp_mask);
+                                       __func__, gfp_mask);
 
                        XFS_STATS_INC(xb_page_retries);
                        xfsbufd_wakeup(0, gfp_mask);
@@ -420,7 +350,7 @@ _xfs_buf_lookup_pages(
                        page_count--;
                        if (blocksize >= PAGE_CACHE_SIZE) {
                                if (flags & XBF_READ)
-                                       bp->b_locked = 1;
+                                       bp->b_flags |= _XBF_PAGE_LOCKED;
                        } else if (!PagePrivate(page)) {
                                if (test_page_region(page, offset, nbytes))
                                        page_count++;
@@ -431,7 +361,7 @@ _xfs_buf_lookup_pages(
                offset = 0;
        }
 
-       if (!bp->b_locked) {
+       if (!(bp->b_flags & _XBF_PAGE_LOCKED)) {
                for (i = 0; i < bp->b_page_count; i++)
                        unlock_page(bp->b_pages[i]);
        }
@@ -456,8 +386,6 @@ _xfs_buf_map_pages(
                bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
                bp->b_flags |= XBF_MAPPED;
        } else if (flags & XBF_MAPPED) {
-               if (as_list_len > 64)
-                       purge_addresses();
                bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
                                        VM_MAP, PAGE_KERNEL);
                if (unlikely(bp->b_addr == NULL))
@@ -607,7 +535,7 @@ xfs_buf_get_flags(
                error = _xfs_buf_map_pages(bp, flags);
                if (unlikely(error)) {
                        printk(KERN_WARNING "%s: failed to map pages\n",
-                                       __FUNCTION__);
+                                       __func__);
                        goto no_buffer;
                }
        }
@@ -631,6 +559,29 @@ xfs_buf_get_flags(
        return NULL;
 }
 
+STATIC int
+_xfs_buf_read(
+       xfs_buf_t               *bp,
+       xfs_buf_flags_t         flags)
+{
+       int                     status;
+
+       XB_TRACE(bp, "_xfs_buf_read", (unsigned long)flags);
+
+       ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
+       ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
+
+       bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
+                       XBF_READ_AHEAD | _XBF_RUN_QUEUES);
+       bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
+                       XBF_READ_AHEAD | _XBF_RUN_QUEUES);
+
+       status = xfs_buf_iorequest(bp);
+       if (!status && !(flags & XBF_ASYNC))
+               status = xfs_buf_iowait(bp);
+       return status;
+}
+
 xfs_buf_t *
 xfs_buf_read_flags(
        xfs_buftarg_t           *target,
@@ -647,7 +598,7 @@ xfs_buf_read_flags(
                if (!XFS_BUF_ISDONE(bp)) {
                        XB_TRACE(bp, "read", (unsigned long)flags);
                        XFS_STATS_INC(xb_get_read);
-                       xfs_buf_iostart(bp, flags);
+                       _xfs_buf_read(bp, flags);
                } else if (flags & XBF_ASYNC) {
                        XB_TRACE(bp, "read_async", (unsigned long)flags);
                        /*
@@ -709,8 +660,7 @@ static inline struct page *
 mem_to_page(
        void                    *addr)
 {
-       if (((unsigned long)addr < VMALLOC_START) ||
-           ((unsigned long)addr >= VMALLOC_END)) {
+       if ((!is_vmalloc_addr(addr))) {
                return virt_to_page(addr);
        } else {
                return vmalloc_to_page(addr);
@@ -752,11 +702,11 @@ xfs_buf_associate_memory(
                bp->b_pages[i] = mem_to_page((void *)pageaddr);
                pageaddr += PAGE_CACHE_SIZE;
        }
-       bp->b_locked = 0;
 
        bp->b_count_desired = len;
        bp->b_buffer_length = buflen;
        bp->b_flags |= XBF_MAPPED;
+       bp->b_flags &= ~_XBF_PAGE_LOCKED;
 
        return 0;
 }
@@ -789,7 +739,7 @@ xfs_buf_get_noaddr(
        error = _xfs_buf_map_pages(bp, XBF_MAPPED);
        if (unlikely(error)) {
                printk(KERN_WARNING "%s: failed to map pages\n",
-                               __FUNCTION__);
+                               __func__);
                goto fail_free_mem;
        }
 
@@ -840,6 +790,7 @@ xfs_buf_rele(
                return;
        }
 
+       ASSERT(atomic_read(&bp->b_hold) > 0);
        if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
                if (bp->b_relse) {
                        atomic_inc(&bp->b_hold);
@@ -853,11 +804,6 @@ xfs_buf_rele(
                        spin_unlock(&hash->bh_lock);
                        xfs_buf_free(bp);
                }
-       } else {
-               /*
-                * Catch reference count leaks
-                */
-               ASSERT(atomic_read(&bp->b_hold) >= 0);
        }
 }
 
@@ -897,7 +843,7 @@ int
 xfs_buf_lock_value(
        xfs_buf_t               *bp)
 {
-       return atomic_read(&bp->b_sema.count);
+       return bp->b_sema.count;
 }
 #endif
 
@@ -1007,12 +953,13 @@ xfs_buf_iodone_work(
         * We can get an EOPNOTSUPP to ordered writes.  Here we clear the
         * ordered flag and reissue them.  Because we can't tell the higher
         * layers directly that they should not issue ordered I/O anymore, they
-        * need to check if the ordered flag was cleared during I/O completion.
+        * need to check if the _XFS_BARRIER_FAILED flag was set during I/O completion.
         */
        if ((bp->b_error == EOPNOTSUPP) &&
            (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
                XB_TRACE(bp, "ordered_retry", bp->b_iodone);
                bp->b_flags &= ~XBF_ORDERED;
+               bp->b_flags |= _XFS_BARRIER_FAILED;
                xfs_buf_iorequest(bp);
        } else if (bp->b_iodone)
                (*(bp->b_iodone))(bp);
@@ -1039,7 +986,7 @@ xfs_buf_ioend(
                        xfs_buf_iodone_work(&bp->b_iodone_work);
                }
        } else {
-               up(&bp->b_iodonesema);
+               complete(&bp->b_iowait);
        }
 }
 
@@ -1053,60 +1000,39 @@ xfs_buf_ioerror(
        XB_TRACE(bp, "ioerror", (unsigned long)error);
 }
 
-/*
- *     Initiate I/O on a buffer, based on the flags supplied.
- *     The b_iodone routine in the buffer supplied will only be called
- *     when all of the subsidiary I/O requests, if any, have been completed.
- */
 int
-xfs_buf_iostart(
-       xfs_buf_t               *bp,
-       xfs_buf_flags_t         flags)
+xfs_bawrite(
+       void                    *mp,
+       struct xfs_buf          *bp)
 {
-       int                     status = 0;
+       XB_TRACE(bp, "bawrite", 0);
 
-       XB_TRACE(bp, "iostart", (unsigned long)flags);
+       ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
 
-       if (flags & XBF_DELWRI) {
-               bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
-               bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
-               xfs_buf_delwri_queue(bp, 1);
-               return status;
-       }
+       xfs_buf_delwri_dequeue(bp);
 
-       bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
-                       XBF_READ_AHEAD | _XBF_RUN_QUEUES);
-       bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
-                       XBF_READ_AHEAD | _XBF_RUN_QUEUES);
+       bp->b_flags &= ~(XBF_READ | XBF_DELWRI | XBF_READ_AHEAD);
+       bp->b_flags |= (XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
 
-       BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
+       bp->b_mount = mp;
+       bp->b_strat = xfs_bdstrat_cb;
+       return xfs_bdstrat_cb(bp);
+}
 
-       /* For writes allow an alternate strategy routine to precede
-        * the actual I/O request (which may not be issued at all in
-        * a shutdown situation, for example).
-        */
-       status = (flags & XBF_WRITE) ?
-               xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
+void
+xfs_bdwrite(
+       void                    *mp,
+       struct xfs_buf          *bp)
+{
+       XB_TRACE(bp, "bdwrite", 0);
 
-       /* Wait for I/O if we are not an async request.
-        * Note: async I/O request completion will release the buffer,
-        * and that can already be done by this point.  So using the
-        * buffer pointer from here on, after async I/O, is invalid.
-        */
-       if (!status && !(flags & XBF_ASYNC))
-               status = xfs_buf_iowait(bp);
+       bp->b_strat = xfs_bdstrat_cb;
+       bp->b_mount = mp;
 
-       return status;
-}
+       bp->b_flags &= ~XBF_READ;
+       bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
 
-STATIC_INLINE int
-_xfs_buf_iolocked(
-       xfs_buf_t               *bp)
-{
-       ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
-       if (bp->b_flags & XBF_READ)
-               return bp->b_locked;
-       return 0;
+       xfs_buf_delwri_queue(bp, 1);
 }
 
 STATIC_INLINE void
@@ -1115,7 +1041,7 @@ _xfs_buf_ioend(
        int                     schedule)
 {
        if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
-               bp->b_locked = 0;
+               bp->b_flags &= ~_XBF_PAGE_LOCKED;
                xfs_buf_ioend(bp, schedule);
        }
 }
@@ -1129,8 +1055,7 @@ xfs_buf_bio_end_io(
        unsigned int            blocksize = bp->b_target->bt_bsize;
        struct bio_vec          *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
 
-       if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
-               bp->b_error = EIO;
+       xfs_buf_ioerror(bp, -error);
 
        do {
                struct page     *page = bvec->bv_page;
@@ -1149,9 +1074,8 @@ xfs_buf_bio_end_io(
                if (--bvec >= bio->bi_io_vec)
                        prefetchw(&bvec->bv_page->flags);
 
-               if (_xfs_buf_iolocked(bp)) {
+               if (bp->b_flags & _XBF_PAGE_LOCKED)
                        unlock_page(page);
-               }
        } while (bvec >= bio->bi_io_vec);
 
        _xfs_buf_ioend(bp, 1);
@@ -1162,13 +1086,12 @@ STATIC void
 _xfs_buf_ioapply(
        xfs_buf_t               *bp)
 {
-       int                     i, rw, map_i, total_nr_pages, nr_pages;
+       int                     rw, map_i, total_nr_pages, nr_pages;
        struct bio              *bio;
        int                     offset = bp->b_offset;
        int                     size = bp->b_count_desired;
        sector_t                sector = bp->b_bn;
        unsigned int            blocksize = bp->b_target->bt_bsize;
-       int                     locking = _xfs_buf_iolocked(bp);
 
        total_nr_pages = bp->b_page_count;
        map_i = 0;
@@ -1191,7 +1114,8 @@ _xfs_buf_ioapply(
         * filesystem block size is not smaller than the page size.
         */
        if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
-           (bp->b_flags & XBF_READ) && locking &&
+           ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
+             (XBF_READ|_XBF_PAGE_LOCKED)) &&
            (blocksize >= PAGE_CACHE_SIZE)) {
                bio = bio_alloc(GFP_NOIO, 1);
 
@@ -1208,24 +1132,6 @@ _xfs_buf_ioapply(
                goto submit_io;
        }
 
-       /* Lock down the pages which we need to for the request */
-       if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
-               for (i = 0; size; i++) {
-                       int             nbytes = PAGE_CACHE_SIZE - offset;
-                       struct page     *page = bp->b_pages[i];
-
-                       if (nbytes > size)
-                               nbytes = size;
-
-                       lock_page(page);
-
-                       size -= nbytes;
-                       offset = 0;
-               }
-               offset = bp->b_offset;
-               size = bp->b_count_desired;
-       }
-
 next_chunk:
        atomic_inc(&bp->b_io_remaining);
        nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
@@ -1306,7 +1212,7 @@ xfs_buf_iowait(
        XB_TRACE(bp, "iowait", 0);
        if (atomic_read(&bp->b_io_remaining))
                blk_run_address_space(bp->b_target->bt_mapping);
-       down(&bp->b_iodonesema);
+       wait_for_completion(&bp->b_iowait);
        XB_TRACE(bp, "iowaited", (long)bp->b_error);
        return bp->b_error;
 }
@@ -1428,7 +1334,7 @@ STATIC void
 xfs_free_bufhash(
        xfs_buftarg_t           *btp)
 {
-       kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
+       kmem_free(btp->bt_hash);
        btp->bt_hash = NULL;
 }
 
@@ -1458,13 +1364,10 @@ xfs_unregister_buftarg(
 
 void
 xfs_free_buftarg(
-       xfs_buftarg_t           *btp,
-       int                     external)
+       xfs_buftarg_t           *btp)
 {
        xfs_flush_buftarg(btp, 1);
        xfs_blkdev_issue_flush(btp);
-       if (external)
-               xfs_blkdev_put(btp->bt_bdev);
        xfs_free_bufhash(btp);
        iput(btp->bt_mapping->host);
 
@@ -1474,7 +1377,7 @@ xfs_free_buftarg(
        xfs_unregister_buftarg(btp);
        kthread_stop(btp->bt_task);
 
-       kmem_free(btp, sizeof(*btp));
+       kmem_free(btp);
 }
 
 STATIC int
@@ -1572,7 +1475,7 @@ xfs_alloc_delwrite_queue(
 
        INIT_LIST_HEAD(&btp->bt_list);
        INIT_LIST_HEAD(&btp->bt_delwrite_queue);
-       spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
+       spin_lock_init(&btp->bt_delwrite_lock);
        btp->bt_flags = 0;
        btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
        if (IS_ERR(btp->bt_task)) {
@@ -1605,7 +1508,7 @@ xfs_alloc_buftarg(
        return btp;
 
 error:
-       kmem_free(btp, sizeof(*btp));
+       kmem_free(btp);
        return NULL;
 }
 
@@ -1769,8 +1672,6 @@ xfsbufd(
                        count++;
                }
 
-               if (as_list_len > 0)
-                       purge_addresses();
                if (count)
                        blk_run_address_space(target->bt_mapping);
 
@@ -1833,7 +1734,7 @@ int __init
 xfs_buf_init(void)
 {
 #ifdef XFS_BUF_TRACE
-       xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
+       xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS);
 #endif
 
        xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",