Revert "[XFS] use scalable vmap API"
[safe/jmp/linux-2.6] / fs / xfs / linux-2.6 / xfs_buf.c
index ed03c6d..0b2177a 100644 (file)
@@ -58,7 +58,7 @@ xfs_buf_trace(
                bp, id,
                (void *)(unsigned long)bp->b_flags,
                (void *)(unsigned long)bp->b_hold.counter,
-               (void *)(unsigned long)bp->b_sema.count.counter,
+               (void *)(unsigned long)bp->b_sema.count,
                (void *)current,
                data, ra,
                (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
@@ -166,75 +166,6 @@ test_page_region(
 }
 
 /*
- *     Mapping of multi-page buffers into contiguous virtual space
- */
-
-typedef struct a_list {
-       void            *vm_addr;
-       struct a_list   *next;
-} a_list_t;
-
-static a_list_t                *as_free_head;
-static int             as_list_len;
-static DEFINE_SPINLOCK(as_lock);
-
-/*
- *     Try to batch vunmaps because they are costly.
- */
-STATIC void
-free_address(
-       void            *addr)
-{
-       a_list_t        *aentry;
-
-#ifdef CONFIG_XEN
-       /*
-        * Xen needs to be able to make sure it can get an exclusive
-        * RO mapping of pages it wants to turn into a pagetable.  If
-        * a newly allocated page is also still being vmap()ed by xfs,
-        * it will cause pagetable construction to fail.  This is a
-        * quick workaround to always eagerly unmap pages so that Xen
-        * is happy.
-        */
-       vunmap(addr);
-       return;
-#endif
-
-       aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
-       if (likely(aentry)) {
-               spin_lock(&as_lock);
-               aentry->next = as_free_head;
-               aentry->vm_addr = addr;
-               as_free_head = aentry;
-               as_list_len++;
-               spin_unlock(&as_lock);
-       } else {
-               vunmap(addr);
-       }
-}
-
-STATIC void
-purge_addresses(void)
-{
-       a_list_t        *aentry, *old;
-
-       if (as_free_head == NULL)
-               return;
-
-       spin_lock(&as_lock);
-       aentry = as_free_head;
-       as_free_head = NULL;
-       as_list_len = 0;
-       spin_unlock(&as_lock);
-
-       while ((old = aentry) != NULL) {
-               vunmap(aentry->vm_addr);
-               aentry = aentry->next;
-               kfree(old);
-       }
-}
-
-/*
  *     Internal xfs_buf_t object manipulation
  */
 
@@ -253,7 +184,7 @@ _xfs_buf_initialize(
 
        memset(bp, 0, sizeof(xfs_buf_t));
        atomic_set(&bp->b_hold, 1);
-       init_MUTEX_LOCKED(&bp->b_iodonesema);
+       init_completion(&bp->b_iowait);
        INIT_LIST_HEAD(&bp->b_list);
        INIT_LIST_HEAD(&bp->b_hash_list);
        init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
@@ -333,7 +264,7 @@ xfs_buf_free(
                uint            i;
 
                if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
-                       free_address(bp->b_addr - bp->b_offset);
+                       vunmap(bp->b_addr - bp->b_offset);
 
                for (i = 0; i < bp->b_page_count; i++) {
                        struct page     *page = bp->b_pages[i];
@@ -455,8 +386,6 @@ _xfs_buf_map_pages(
                bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
                bp->b_flags |= XBF_MAPPED;
        } else if (flags & XBF_MAPPED) {
-               if (as_list_len > 64)
-                       purge_addresses();
                bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
                                        VM_MAP, PAGE_KERNEL);
                if (unlikely(bp->b_addr == NULL))
@@ -630,6 +559,29 @@ xfs_buf_get_flags(
        return NULL;
 }
 
+STATIC int
+_xfs_buf_read(
+       xfs_buf_t               *bp,
+       xfs_buf_flags_t         flags)
+{
+       int                     status;
+
+       XB_TRACE(bp, "_xfs_buf_read", (unsigned long)flags);
+
+       ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
+       ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
+
+       bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
+                       XBF_READ_AHEAD | _XBF_RUN_QUEUES);
+       bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
+                       XBF_READ_AHEAD | _XBF_RUN_QUEUES);
+
+       status = xfs_buf_iorequest(bp);
+       if (!status && !(flags & XBF_ASYNC))
+               status = xfs_buf_iowait(bp);
+       return status;
+}
+
 xfs_buf_t *
 xfs_buf_read_flags(
        xfs_buftarg_t           *target,
@@ -646,7 +598,7 @@ xfs_buf_read_flags(
                if (!XFS_BUF_ISDONE(bp)) {
                        XB_TRACE(bp, "read", (unsigned long)flags);
                        XFS_STATS_INC(xb_get_read);
-                       xfs_buf_iostart(bp, flags);
+                       _xfs_buf_read(bp, flags);
                } else if (flags & XBF_ASYNC) {
                        XB_TRACE(bp, "read_async", (unsigned long)flags);
                        /*
@@ -838,6 +790,7 @@ xfs_buf_rele(
                return;
        }
 
+       ASSERT(atomic_read(&bp->b_hold) > 0);
        if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
                if (bp->b_relse) {
                        atomic_inc(&bp->b_hold);
@@ -851,11 +804,6 @@ xfs_buf_rele(
                        spin_unlock(&hash->bh_lock);
                        xfs_buf_free(bp);
                }
-       } else {
-               /*
-                * Catch reference count leaks
-                */
-               ASSERT(atomic_read(&bp->b_hold) >= 0);
        }
 }
 
@@ -1005,12 +953,13 @@ xfs_buf_iodone_work(
         * We can get an EOPNOTSUPP to ordered writes.  Here we clear the
         * ordered flag and reissue them.  Because we can't tell the higher
         * layers directly that they should not issue ordered I/O anymore, they
-        * need to check if the ordered flag was cleared during I/O completion.
+        * need to check if the _XFS_BARRIER_FAILED flag was set during I/O completion.
         */
        if ((bp->b_error == EOPNOTSUPP) &&
            (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
                XB_TRACE(bp, "ordered_retry", bp->b_iodone);
                bp->b_flags &= ~XBF_ORDERED;
+               bp->b_flags |= _XFS_BARRIER_FAILED;
                xfs_buf_iorequest(bp);
        } else if (bp->b_iodone)
                (*(bp->b_iodone))(bp);
@@ -1037,7 +986,7 @@ xfs_buf_ioend(
                        xfs_buf_iodone_work(&bp->b_iodone_work);
                }
        } else {
-               up(&bp->b_iodonesema);
+               complete(&bp->b_iowait);
        }
 }
 
@@ -1051,50 +1000,39 @@ xfs_buf_ioerror(
        XB_TRACE(bp, "ioerror", (unsigned long)error);
 }
 
-/*
- *     Initiate I/O on a buffer, based on the flags supplied.
- *     The b_iodone routine in the buffer supplied will only be called
- *     when all of the subsidiary I/O requests, if any, have been completed.
- */
 int
-xfs_buf_iostart(
-       xfs_buf_t               *bp,
-       xfs_buf_flags_t         flags)
+xfs_bawrite(
+       void                    *mp,
+       struct xfs_buf          *bp)
 {
-       int                     status = 0;
+       XB_TRACE(bp, "bawrite", 0);
 
-       XB_TRACE(bp, "iostart", (unsigned long)flags);
+       ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
 
-       if (flags & XBF_DELWRI) {
-               bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
-               bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
-               xfs_buf_delwri_queue(bp, 1);
-               return 0;
-       }
+       xfs_buf_delwri_dequeue(bp);
 
-       bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
-                       XBF_READ_AHEAD | _XBF_RUN_QUEUES);
-       bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
-                       XBF_READ_AHEAD | _XBF_RUN_QUEUES);
+       bp->b_flags &= ~(XBF_READ | XBF_DELWRI | XBF_READ_AHEAD);
+       bp->b_flags |= (XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
 
-       BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
+       bp->b_mount = mp;
+       bp->b_strat = xfs_bdstrat_cb;
+       return xfs_bdstrat_cb(bp);
+}
 
-       /* For writes allow an alternate strategy routine to precede
-        * the actual I/O request (which may not be issued at all in
-        * a shutdown situation, for example).
-        */
-       status = (flags & XBF_WRITE) ?
-               xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
+void
+xfs_bdwrite(
+       void                    *mp,
+       struct xfs_buf          *bp)
+{
+       XB_TRACE(bp, "bdwrite", 0);
 
-       /* Wait for I/O if we are not an async request.
-        * Note: async I/O request completion will release the buffer,
-        * and that can already be done by this point.  So using the
-        * buffer pointer from here on, after async I/O, is invalid.
-        */
-       if (!status && !(flags & XBF_ASYNC))
-               status = xfs_buf_iowait(bp);
+       bp->b_strat = xfs_bdstrat_cb;
+       bp->b_mount = mp;
 
-       return status;
+       bp->b_flags &= ~XBF_READ;
+       bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
+
+       xfs_buf_delwri_queue(bp, 1);
 }
 
 STATIC_INLINE void
@@ -1117,8 +1055,7 @@ xfs_buf_bio_end_io(
        unsigned int            blocksize = bp->b_target->bt_bsize;
        struct bio_vec          *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
 
-       if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
-               bp->b_error = EIO;
+       xfs_buf_ioerror(bp, -error);
 
        do {
                struct page     *page = bvec->bv_page;
@@ -1275,7 +1212,7 @@ xfs_buf_iowait(
        XB_TRACE(bp, "iowait", 0);
        if (atomic_read(&bp->b_io_remaining))
                blk_run_address_space(bp->b_target->bt_mapping);
-       down(&bp->b_iodonesema);
+       wait_for_completion(&bp->b_iowait);
        XB_TRACE(bp, "iowaited", (long)bp->b_error);
        return bp->b_error;
 }
@@ -1427,13 +1364,10 @@ xfs_unregister_buftarg(
 
 void
 xfs_free_buftarg(
-       xfs_buftarg_t           *btp,
-       int                     external)
+       xfs_buftarg_t           *btp)
 {
        xfs_flush_buftarg(btp, 1);
        xfs_blkdev_issue_flush(btp);
-       if (external)
-               xfs_blkdev_put(btp->bt_bdev);
        xfs_free_bufhash(btp);
        iput(btp->bt_mapping->host);
 
@@ -1738,8 +1672,6 @@ xfsbufd(
                        count++;
                }
 
-               if (as_list_len > 0)
-                       purge_addresses();
                if (count)
                        blk_run_address_space(target->bt_mapping);
 
@@ -1802,7 +1734,7 @@ int __init
 xfs_buf_init(void)
 {
 #ifdef XFS_BUF_TRACE
-       xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
+       xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS);
 #endif
 
        xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",