Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux...
[safe/jmp/linux-2.6] / fs / xfs / linux-2.6 / xfs_buf.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36
37 #include "xfs_sb.h"
38 #include "xfs_inum.h"
39 #include "xfs_ag.h"
40 #include "xfs_dmapi.h"
41 #include "xfs_mount.h"
42
43 static kmem_zone_t *xfs_buf_zone;
44 STATIC int xfsbufd(void *);
45 STATIC int xfsbufd_wakeup(int, gfp_t);
46 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
47 static struct shrinker xfs_buf_shake = {
48         .shrink = xfsbufd_wakeup,
49         .seeks = DEFAULT_SEEKS,
50 };
51
52 static struct workqueue_struct *xfslogd_workqueue;
53 struct workqueue_struct *xfsdatad_workqueue;
54
55 #ifdef XFS_BUF_TRACE
56 void
57 xfs_buf_trace(
58         xfs_buf_t       *bp,
59         char            *id,
60         void            *data,
61         void            *ra)
62 {
63         ktrace_enter(xfs_buf_trace_buf,
64                 bp, id,
65                 (void *)(unsigned long)bp->b_flags,
66                 (void *)(unsigned long)bp->b_hold.counter,
67                 (void *)(unsigned long)bp->b_sema.count,
68                 (void *)current,
69                 data, ra,
70                 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
71                 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
72                 (void *)(unsigned long)bp->b_buffer_length,
73                 NULL, NULL, NULL, NULL, NULL);
74 }
75 ktrace_t *xfs_buf_trace_buf;
76 #define XFS_BUF_TRACE_SIZE      4096
77 #define XB_TRACE(bp, id, data)  \
78         xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
79 #else
80 #define XB_TRACE(bp, id, data)  do { } while (0)
81 #endif
82
83 #ifdef XFS_BUF_LOCK_TRACKING
84 # define XB_SET_OWNER(bp)       ((bp)->b_last_holder = current->pid)
85 # define XB_CLEAR_OWNER(bp)     ((bp)->b_last_holder = -1)
86 # define XB_GET_OWNER(bp)       ((bp)->b_last_holder)
87 #else
88 # define XB_SET_OWNER(bp)       do { } while (0)
89 # define XB_CLEAR_OWNER(bp)     do { } while (0)
90 # define XB_GET_OWNER(bp)       do { } while (0)
91 #endif
92
93 #define xb_to_gfp(flags) \
94         ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
95           ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
96
97 #define xb_to_km(flags) \
98          (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
99
100 #define xfs_buf_allocate(flags) \
101         kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
102 #define xfs_buf_deallocate(bp) \
103         kmem_zone_free(xfs_buf_zone, (bp));
104
105 /*
106  *      Page Region interfaces.
107  *
108  *      For pages in filesystems where the blocksize is smaller than the
109  *      pagesize, we use the page->private field (long) to hold a bitmap
110  *      of uptodate regions within the page.
111  *
112  *      Each such region is "bytes per page / bits per long" bytes long.
113  *
114  *      NBPPR == number-of-bytes-per-page-region
115  *      BTOPR == bytes-to-page-region (rounded up)
116  *      BTOPRT == bytes-to-page-region-truncated (rounded down)
117  */
118 #if (BITS_PER_LONG == 32)
119 #define PRSHIFT         (PAGE_CACHE_SHIFT - 5)  /* (32 == 1<<5) */
120 #elif (BITS_PER_LONG == 64)
121 #define PRSHIFT         (PAGE_CACHE_SHIFT - 6)  /* (64 == 1<<6) */
122 #else
123 #error BITS_PER_LONG must be 32 or 64
124 #endif
125 #define NBPPR           (PAGE_CACHE_SIZE/BITS_PER_LONG)
126 #define BTOPR(b)        (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
127 #define BTOPRT(b)       (((unsigned int)(b) >> PRSHIFT))
128
129 STATIC unsigned long
130 page_region_mask(
131         size_t          offset,
132         size_t          length)
133 {
134         unsigned long   mask;
135         int             first, final;
136
137         first = BTOPR(offset);
138         final = BTOPRT(offset + length - 1);
139         first = min(first, final);
140
141         mask = ~0UL;
142         mask <<= BITS_PER_LONG - (final - first);
143         mask >>= BITS_PER_LONG - (final);
144
145         ASSERT(offset + length <= PAGE_CACHE_SIZE);
146         ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
147
148         return mask;
149 }
150
151 STATIC_INLINE void
152 set_page_region(
153         struct page     *page,
154         size_t          offset,
155         size_t          length)
156 {
157         set_page_private(page,
158                 page_private(page) | page_region_mask(offset, length));
159         if (page_private(page) == ~0UL)
160                 SetPageUptodate(page);
161 }
162
163 STATIC_INLINE int
164 test_page_region(
165         struct page     *page,
166         size_t          offset,
167         size_t          length)
168 {
169         unsigned long   mask = page_region_mask(offset, length);
170
171         return (mask && (page_private(page) & mask) == mask);
172 }
173
174 /*
175  *      Mapping of multi-page buffers into contiguous virtual space
176  */
177
178 typedef struct a_list {
179         void            *vm_addr;
180         struct a_list   *next;
181 } a_list_t;
182
183 static a_list_t         *as_free_head;
184 static int              as_list_len;
185 static DEFINE_SPINLOCK(as_lock);
186
187 /*
188  *      Try to batch vunmaps because they are costly.
189  */
190 STATIC void
191 free_address(
192         void            *addr)
193 {
194         a_list_t        *aentry;
195
196 #ifdef CONFIG_XEN
197         /*
198          * Xen needs to be able to make sure it can get an exclusive
199          * RO mapping of pages it wants to turn into a pagetable.  If
200          * a newly allocated page is also still being vmap()ed by xfs,
201          * it will cause pagetable construction to fail.  This is a
202          * quick workaround to always eagerly unmap pages so that Xen
203          * is happy.
204          */
205         vunmap(addr);
206         return;
207 #endif
208
209         aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
210         if (likely(aentry)) {
211                 spin_lock(&as_lock);
212                 aentry->next = as_free_head;
213                 aentry->vm_addr = addr;
214                 as_free_head = aentry;
215                 as_list_len++;
216                 spin_unlock(&as_lock);
217         } else {
218                 vunmap(addr);
219         }
220 }
221
222 STATIC void
223 purge_addresses(void)
224 {
225         a_list_t        *aentry, *old;
226
227         if (as_free_head == NULL)
228                 return;
229
230         spin_lock(&as_lock);
231         aentry = as_free_head;
232         as_free_head = NULL;
233         as_list_len = 0;
234         spin_unlock(&as_lock);
235
236         while ((old = aentry) != NULL) {
237                 vunmap(aentry->vm_addr);
238                 aentry = aentry->next;
239                 kfree(old);
240         }
241 }
242
243 /*
244  *      Internal xfs_buf_t object manipulation
245  */
246
247 STATIC void
248 _xfs_buf_initialize(
249         xfs_buf_t               *bp,
250         xfs_buftarg_t           *target,
251         xfs_off_t               range_base,
252         size_t                  range_length,
253         xfs_buf_flags_t         flags)
254 {
255         /*
256          * We don't want certain flags to appear in b_flags.
257          */
258         flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
259
260         memset(bp, 0, sizeof(xfs_buf_t));
261         atomic_set(&bp->b_hold, 1);
262         init_completion(&bp->b_iowait);
263         INIT_LIST_HEAD(&bp->b_list);
264         INIT_LIST_HEAD(&bp->b_hash_list);
265         init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
266         XB_SET_OWNER(bp);
267         bp->b_target = target;
268         bp->b_file_offset = range_base;
269         /*
270          * Set buffer_length and count_desired to the same value initially.
271          * I/O routines should use count_desired, which will be the same in
272          * most cases but may be reset (e.g. XFS recovery).
273          */
274         bp->b_buffer_length = bp->b_count_desired = range_length;
275         bp->b_flags = flags;
276         bp->b_bn = XFS_BUF_DADDR_NULL;
277         atomic_set(&bp->b_pin_count, 0);
278         init_waitqueue_head(&bp->b_waiters);
279
280         XFS_STATS_INC(xb_create);
281         XB_TRACE(bp, "initialize", target);
282 }
283
284 /*
285  *      Allocate a page array capable of holding a specified number
286  *      of pages, and point the page buf at it.
287  */
288 STATIC int
289 _xfs_buf_get_pages(
290         xfs_buf_t               *bp,
291         int                     page_count,
292         xfs_buf_flags_t         flags)
293 {
294         /* Make sure that we have a page list */
295         if (bp->b_pages == NULL) {
296                 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
297                 bp->b_page_count = page_count;
298                 if (page_count <= XB_PAGES) {
299                         bp->b_pages = bp->b_page_array;
300                 } else {
301                         bp->b_pages = kmem_alloc(sizeof(struct page *) *
302                                         page_count, xb_to_km(flags));
303                         if (bp->b_pages == NULL)
304                                 return -ENOMEM;
305                 }
306                 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
307         }
308         return 0;
309 }
310
311 /*
312  *      Frees b_pages if it was allocated.
313  */
314 STATIC void
315 _xfs_buf_free_pages(
316         xfs_buf_t       *bp)
317 {
318         if (bp->b_pages != bp->b_page_array) {
319                 kmem_free(bp->b_pages);
320         }
321 }
322
323 /*
324  *      Releases the specified buffer.
325  *
326  *      The modification state of any associated pages is left unchanged.
327  *      The buffer most not be on any hash - use xfs_buf_rele instead for
328  *      hashed and refcounted buffers
329  */
330 void
331 xfs_buf_free(
332         xfs_buf_t               *bp)
333 {
334         XB_TRACE(bp, "free", 0);
335
336         ASSERT(list_empty(&bp->b_hash_list));
337
338         if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
339                 uint            i;
340
341                 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
342                         free_address(bp->b_addr - bp->b_offset);
343
344                 for (i = 0; i < bp->b_page_count; i++) {
345                         struct page     *page = bp->b_pages[i];
346
347                         if (bp->b_flags & _XBF_PAGE_CACHE)
348                                 ASSERT(!PagePrivate(page));
349                         page_cache_release(page);
350                 }
351                 _xfs_buf_free_pages(bp);
352         }
353
354         xfs_buf_deallocate(bp);
355 }
356
357 /*
358  *      Finds all pages for buffer in question and builds it's page list.
359  */
360 STATIC int
361 _xfs_buf_lookup_pages(
362         xfs_buf_t               *bp,
363         uint                    flags)
364 {
365         struct address_space    *mapping = bp->b_target->bt_mapping;
366         size_t                  blocksize = bp->b_target->bt_bsize;
367         size_t                  size = bp->b_count_desired;
368         size_t                  nbytes, offset;
369         gfp_t                   gfp_mask = xb_to_gfp(flags);
370         unsigned short          page_count, i;
371         pgoff_t                 first;
372         xfs_off_t               end;
373         int                     error;
374
375         end = bp->b_file_offset + bp->b_buffer_length;
376         page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
377
378         error = _xfs_buf_get_pages(bp, page_count, flags);
379         if (unlikely(error))
380                 return error;
381         bp->b_flags |= _XBF_PAGE_CACHE;
382
383         offset = bp->b_offset;
384         first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
385
386         for (i = 0; i < bp->b_page_count; i++) {
387                 struct page     *page;
388                 uint            retries = 0;
389
390               retry:
391                 page = find_or_create_page(mapping, first + i, gfp_mask);
392                 if (unlikely(page == NULL)) {
393                         if (flags & XBF_READ_AHEAD) {
394                                 bp->b_page_count = i;
395                                 for (i = 0; i < bp->b_page_count; i++)
396                                         unlock_page(bp->b_pages[i]);
397                                 return -ENOMEM;
398                         }
399
400                         /*
401                          * This could deadlock.
402                          *
403                          * But until all the XFS lowlevel code is revamped to
404                          * handle buffer allocation failures we can't do much.
405                          */
406                         if (!(++retries % 100))
407                                 printk(KERN_ERR
408                                         "XFS: possible memory allocation "
409                                         "deadlock in %s (mode:0x%x)\n",
410                                         __func__, gfp_mask);
411
412                         XFS_STATS_INC(xb_page_retries);
413                         xfsbufd_wakeup(0, gfp_mask);
414                         congestion_wait(WRITE, HZ/50);
415                         goto retry;
416                 }
417
418                 XFS_STATS_INC(xb_page_found);
419
420                 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
421                 size -= nbytes;
422
423                 ASSERT(!PagePrivate(page));
424                 if (!PageUptodate(page)) {
425                         page_count--;
426                         if (blocksize >= PAGE_CACHE_SIZE) {
427                                 if (flags & XBF_READ)
428                                         bp->b_flags |= _XBF_PAGE_LOCKED;
429                         } else if (!PagePrivate(page)) {
430                                 if (test_page_region(page, offset, nbytes))
431                                         page_count++;
432                         }
433                 }
434
435                 bp->b_pages[i] = page;
436                 offset = 0;
437         }
438
439         if (!(bp->b_flags & _XBF_PAGE_LOCKED)) {
440                 for (i = 0; i < bp->b_page_count; i++)
441                         unlock_page(bp->b_pages[i]);
442         }
443
444         if (page_count == bp->b_page_count)
445                 bp->b_flags |= XBF_DONE;
446
447         XB_TRACE(bp, "lookup_pages", (long)page_count);
448         return error;
449 }
450
451 /*
452  *      Map buffer into kernel address-space if nessecary.
453  */
454 STATIC int
455 _xfs_buf_map_pages(
456         xfs_buf_t               *bp,
457         uint                    flags)
458 {
459         /* A single page buffer is always mappable */
460         if (bp->b_page_count == 1) {
461                 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
462                 bp->b_flags |= XBF_MAPPED;
463         } else if (flags & XBF_MAPPED) {
464                 if (as_list_len > 64)
465                         purge_addresses();
466                 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
467                                         VM_MAP, PAGE_KERNEL);
468                 if (unlikely(bp->b_addr == NULL))
469                         return -ENOMEM;
470                 bp->b_addr += bp->b_offset;
471                 bp->b_flags |= XBF_MAPPED;
472         }
473
474         return 0;
475 }
476
477 /*
478  *      Finding and Reading Buffers
479  */
480
481 /*
482  *      Look up, and creates if absent, a lockable buffer for
483  *      a given range of an inode.  The buffer is returned
484  *      locked.  If other overlapping buffers exist, they are
485  *      released before the new buffer is created and locked,
486  *      which may imply that this call will block until those buffers
487  *      are unlocked.  No I/O is implied by this call.
488  */
489 xfs_buf_t *
490 _xfs_buf_find(
491         xfs_buftarg_t           *btp,   /* block device target          */
492         xfs_off_t               ioff,   /* starting offset of range     */
493         size_t                  isize,  /* length of range              */
494         xfs_buf_flags_t         flags,
495         xfs_buf_t               *new_bp)
496 {
497         xfs_off_t               range_base;
498         size_t                  range_length;
499         xfs_bufhash_t           *hash;
500         xfs_buf_t               *bp, *n;
501
502         range_base = (ioff << BBSHIFT);
503         range_length = (isize << BBSHIFT);
504
505         /* Check for IOs smaller than the sector size / not sector aligned */
506         ASSERT(!(range_length < (1 << btp->bt_sshift)));
507         ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
508
509         hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
510
511         spin_lock(&hash->bh_lock);
512
513         list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
514                 ASSERT(btp == bp->b_target);
515                 if (bp->b_file_offset == range_base &&
516                     bp->b_buffer_length == range_length) {
517                         /*
518                          * If we look at something, bring it to the
519                          * front of the list for next time.
520                          */
521                         atomic_inc(&bp->b_hold);
522                         list_move(&bp->b_hash_list, &hash->bh_list);
523                         goto found;
524                 }
525         }
526
527         /* No match found */
528         if (new_bp) {
529                 _xfs_buf_initialize(new_bp, btp, range_base,
530                                 range_length, flags);
531                 new_bp->b_hash = hash;
532                 list_add(&new_bp->b_hash_list, &hash->bh_list);
533         } else {
534                 XFS_STATS_INC(xb_miss_locked);
535         }
536
537         spin_unlock(&hash->bh_lock);
538         return new_bp;
539
540 found:
541         spin_unlock(&hash->bh_lock);
542
543         /* Attempt to get the semaphore without sleeping,
544          * if this does not work then we need to drop the
545          * spinlock and do a hard attempt on the semaphore.
546          */
547         if (down_trylock(&bp->b_sema)) {
548                 if (!(flags & XBF_TRYLOCK)) {
549                         /* wait for buffer ownership */
550                         XB_TRACE(bp, "get_lock", 0);
551                         xfs_buf_lock(bp);
552                         XFS_STATS_INC(xb_get_locked_waited);
553                 } else {
554                         /* We asked for a trylock and failed, no need
555                          * to look at file offset and length here, we
556                          * know that this buffer at least overlaps our
557                          * buffer and is locked, therefore our buffer
558                          * either does not exist, or is this buffer.
559                          */
560                         xfs_buf_rele(bp);
561                         XFS_STATS_INC(xb_busy_locked);
562                         return NULL;
563                 }
564         } else {
565                 /* trylock worked */
566                 XB_SET_OWNER(bp);
567         }
568
569         if (bp->b_flags & XBF_STALE) {
570                 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
571                 bp->b_flags &= XBF_MAPPED;
572         }
573         XB_TRACE(bp, "got_lock", 0);
574         XFS_STATS_INC(xb_get_locked);
575         return bp;
576 }
577
578 /*
579  *      Assembles a buffer covering the specified range.
580  *      Storage in memory for all portions of the buffer will be allocated,
581  *      although backing storage may not be.
582  */
583 xfs_buf_t *
584 xfs_buf_get_flags(
585         xfs_buftarg_t           *target,/* target for buffer            */
586         xfs_off_t               ioff,   /* starting offset of range     */
587         size_t                  isize,  /* length of range              */
588         xfs_buf_flags_t         flags)
589 {
590         xfs_buf_t               *bp, *new_bp;
591         int                     error = 0, i;
592
593         new_bp = xfs_buf_allocate(flags);
594         if (unlikely(!new_bp))
595                 return NULL;
596
597         bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
598         if (bp == new_bp) {
599                 error = _xfs_buf_lookup_pages(bp, flags);
600                 if (error)
601                         goto no_buffer;
602         } else {
603                 xfs_buf_deallocate(new_bp);
604                 if (unlikely(bp == NULL))
605                         return NULL;
606         }
607
608         for (i = 0; i < bp->b_page_count; i++)
609                 mark_page_accessed(bp->b_pages[i]);
610
611         if (!(bp->b_flags & XBF_MAPPED)) {
612                 error = _xfs_buf_map_pages(bp, flags);
613                 if (unlikely(error)) {
614                         printk(KERN_WARNING "%s: failed to map pages\n",
615                                         __func__);
616                         goto no_buffer;
617                 }
618         }
619
620         XFS_STATS_INC(xb_get);
621
622         /*
623          * Always fill in the block number now, the mapped cases can do
624          * their own overlay of this later.
625          */
626         bp->b_bn = ioff;
627         bp->b_count_desired = bp->b_buffer_length;
628
629         XB_TRACE(bp, "get", (unsigned long)flags);
630         return bp;
631
632  no_buffer:
633         if (flags & (XBF_LOCK | XBF_TRYLOCK))
634                 xfs_buf_unlock(bp);
635         xfs_buf_rele(bp);
636         return NULL;
637 }
638
639 STATIC int
640 _xfs_buf_read(
641         xfs_buf_t               *bp,
642         xfs_buf_flags_t         flags)
643 {
644         int                     status;
645
646         XB_TRACE(bp, "_xfs_buf_read", (unsigned long)flags);
647
648         ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
649         ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
650
651         bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
652                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
653         bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
654                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
655
656         status = xfs_buf_iorequest(bp);
657         if (!status && !(flags & XBF_ASYNC))
658                 status = xfs_buf_iowait(bp);
659         return status;
660 }
661
662 xfs_buf_t *
663 xfs_buf_read_flags(
664         xfs_buftarg_t           *target,
665         xfs_off_t               ioff,
666         size_t                  isize,
667         xfs_buf_flags_t         flags)
668 {
669         xfs_buf_t               *bp;
670
671         flags |= XBF_READ;
672
673         bp = xfs_buf_get_flags(target, ioff, isize, flags);
674         if (bp) {
675                 if (!XFS_BUF_ISDONE(bp)) {
676                         XB_TRACE(bp, "read", (unsigned long)flags);
677                         XFS_STATS_INC(xb_get_read);
678                         _xfs_buf_read(bp, flags);
679                 } else if (flags & XBF_ASYNC) {
680                         XB_TRACE(bp, "read_async", (unsigned long)flags);
681                         /*
682                          * Read ahead call which is already satisfied,
683                          * drop the buffer
684                          */
685                         goto no_buffer;
686                 } else {
687                         XB_TRACE(bp, "read_done", (unsigned long)flags);
688                         /* We do not want read in the flags */
689                         bp->b_flags &= ~XBF_READ;
690                 }
691         }
692
693         return bp;
694
695  no_buffer:
696         if (flags & (XBF_LOCK | XBF_TRYLOCK))
697                 xfs_buf_unlock(bp);
698         xfs_buf_rele(bp);
699         return NULL;
700 }
701
702 /*
703  *      If we are not low on memory then do the readahead in a deadlock
704  *      safe manner.
705  */
706 void
707 xfs_buf_readahead(
708         xfs_buftarg_t           *target,
709         xfs_off_t               ioff,
710         size_t                  isize,
711         xfs_buf_flags_t         flags)
712 {
713         struct backing_dev_info *bdi;
714
715         bdi = target->bt_mapping->backing_dev_info;
716         if (bdi_read_congested(bdi))
717                 return;
718
719         flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
720         xfs_buf_read_flags(target, ioff, isize, flags);
721 }
722
723 xfs_buf_t *
724 xfs_buf_get_empty(
725         size_t                  len,
726         xfs_buftarg_t           *target)
727 {
728         xfs_buf_t               *bp;
729
730         bp = xfs_buf_allocate(0);
731         if (bp)
732                 _xfs_buf_initialize(bp, target, 0, len, 0);
733         return bp;
734 }
735
736 static inline struct page *
737 mem_to_page(
738         void                    *addr)
739 {
740         if ((!is_vmalloc_addr(addr))) {
741                 return virt_to_page(addr);
742         } else {
743                 return vmalloc_to_page(addr);
744         }
745 }
746
747 int
748 xfs_buf_associate_memory(
749         xfs_buf_t               *bp,
750         void                    *mem,
751         size_t                  len)
752 {
753         int                     rval;
754         int                     i = 0;
755         unsigned long           pageaddr;
756         unsigned long           offset;
757         size_t                  buflen;
758         int                     page_count;
759
760         pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
761         offset = (unsigned long)mem - pageaddr;
762         buflen = PAGE_CACHE_ALIGN(len + offset);
763         page_count = buflen >> PAGE_CACHE_SHIFT;
764
765         /* Free any previous set of page pointers */
766         if (bp->b_pages)
767                 _xfs_buf_free_pages(bp);
768
769         bp->b_pages = NULL;
770         bp->b_addr = mem;
771
772         rval = _xfs_buf_get_pages(bp, page_count, 0);
773         if (rval)
774                 return rval;
775
776         bp->b_offset = offset;
777
778         for (i = 0; i < bp->b_page_count; i++) {
779                 bp->b_pages[i] = mem_to_page((void *)pageaddr);
780                 pageaddr += PAGE_CACHE_SIZE;
781         }
782
783         bp->b_count_desired = len;
784         bp->b_buffer_length = buflen;
785         bp->b_flags |= XBF_MAPPED;
786         bp->b_flags &= ~_XBF_PAGE_LOCKED;
787
788         return 0;
789 }
790
791 xfs_buf_t *
792 xfs_buf_get_noaddr(
793         size_t                  len,
794         xfs_buftarg_t           *target)
795 {
796         unsigned long           page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
797         int                     error, i;
798         xfs_buf_t               *bp;
799
800         bp = xfs_buf_allocate(0);
801         if (unlikely(bp == NULL))
802                 goto fail;
803         _xfs_buf_initialize(bp, target, 0, len, 0);
804
805         error = _xfs_buf_get_pages(bp, page_count, 0);
806         if (error)
807                 goto fail_free_buf;
808
809         for (i = 0; i < page_count; i++) {
810                 bp->b_pages[i] = alloc_page(GFP_KERNEL);
811                 if (!bp->b_pages[i])
812                         goto fail_free_mem;
813         }
814         bp->b_flags |= _XBF_PAGES;
815
816         error = _xfs_buf_map_pages(bp, XBF_MAPPED);
817         if (unlikely(error)) {
818                 printk(KERN_WARNING "%s: failed to map pages\n",
819                                 __func__);
820                 goto fail_free_mem;
821         }
822
823         xfs_buf_unlock(bp);
824
825         XB_TRACE(bp, "no_daddr", len);
826         return bp;
827
828  fail_free_mem:
829         while (--i >= 0)
830                 __free_page(bp->b_pages[i]);
831         _xfs_buf_free_pages(bp);
832  fail_free_buf:
833         xfs_buf_deallocate(bp);
834  fail:
835         return NULL;
836 }
837
838 /*
839  *      Increment reference count on buffer, to hold the buffer concurrently
840  *      with another thread which may release (free) the buffer asynchronously.
841  *      Must hold the buffer already to call this function.
842  */
843 void
844 xfs_buf_hold(
845         xfs_buf_t               *bp)
846 {
847         atomic_inc(&bp->b_hold);
848         XB_TRACE(bp, "hold", 0);
849 }
850
851 /*
852  *      Releases a hold on the specified buffer.  If the
853  *      the hold count is 1, calls xfs_buf_free.
854  */
855 void
856 xfs_buf_rele(
857         xfs_buf_t               *bp)
858 {
859         xfs_bufhash_t           *hash = bp->b_hash;
860
861         XB_TRACE(bp, "rele", bp->b_relse);
862
863         if (unlikely(!hash)) {
864                 ASSERT(!bp->b_relse);
865                 if (atomic_dec_and_test(&bp->b_hold))
866                         xfs_buf_free(bp);
867                 return;
868         }
869
870         ASSERT(atomic_read(&bp->b_hold) > 0);
871         if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
872                 if (bp->b_relse) {
873                         atomic_inc(&bp->b_hold);
874                         spin_unlock(&hash->bh_lock);
875                         (*(bp->b_relse)) (bp);
876                 } else if (bp->b_flags & XBF_FS_MANAGED) {
877                         spin_unlock(&hash->bh_lock);
878                 } else {
879                         ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
880                         list_del_init(&bp->b_hash_list);
881                         spin_unlock(&hash->bh_lock);
882                         xfs_buf_free(bp);
883                 }
884         }
885 }
886
887
888 /*
889  *      Mutual exclusion on buffers.  Locking model:
890  *
891  *      Buffers associated with inodes for which buffer locking
892  *      is not enabled are not protected by semaphores, and are
893  *      assumed to be exclusively owned by the caller.  There is a
894  *      spinlock in the buffer, used by the caller when concurrent
895  *      access is possible.
896  */
897
898 /*
899  *      Locks a buffer object, if it is not already locked.
900  *      Note that this in no way locks the underlying pages, so it is only
901  *      useful for synchronizing concurrent use of buffer objects, not for
902  *      synchronizing independent access to the underlying pages.
903  */
904 int
905 xfs_buf_cond_lock(
906         xfs_buf_t               *bp)
907 {
908         int                     locked;
909
910         locked = down_trylock(&bp->b_sema) == 0;
911         if (locked) {
912                 XB_SET_OWNER(bp);
913         }
914         XB_TRACE(bp, "cond_lock", (long)locked);
915         return locked ? 0 : -EBUSY;
916 }
917
918 #if defined(DEBUG) || defined(XFS_BLI_TRACE)
919 int
920 xfs_buf_lock_value(
921         xfs_buf_t               *bp)
922 {
923         return bp->b_sema.count;
924 }
925 #endif
926
927 /*
928  *      Locks a buffer object.
929  *      Note that this in no way locks the underlying pages, so it is only
930  *      useful for synchronizing concurrent use of buffer objects, not for
931  *      synchronizing independent access to the underlying pages.
932  */
933 void
934 xfs_buf_lock(
935         xfs_buf_t               *bp)
936 {
937         XB_TRACE(bp, "lock", 0);
938         if (atomic_read(&bp->b_io_remaining))
939                 blk_run_address_space(bp->b_target->bt_mapping);
940         down(&bp->b_sema);
941         XB_SET_OWNER(bp);
942         XB_TRACE(bp, "locked", 0);
943 }
944
945 /*
946  *      Releases the lock on the buffer object.
947  *      If the buffer is marked delwri but is not queued, do so before we
948  *      unlock the buffer as we need to set flags correctly.  We also need to
949  *      take a reference for the delwri queue because the unlocker is going to
950  *      drop their's and they don't know we just queued it.
951  */
952 void
953 xfs_buf_unlock(
954         xfs_buf_t               *bp)
955 {
956         if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
957                 atomic_inc(&bp->b_hold);
958                 bp->b_flags |= XBF_ASYNC;
959                 xfs_buf_delwri_queue(bp, 0);
960         }
961
962         XB_CLEAR_OWNER(bp);
963         up(&bp->b_sema);
964         XB_TRACE(bp, "unlock", 0);
965 }
966
967
968 /*
969  *      Pinning Buffer Storage in Memory
970  *      Ensure that no attempt to force a buffer to disk will succeed.
971  */
972 void
973 xfs_buf_pin(
974         xfs_buf_t               *bp)
975 {
976         atomic_inc(&bp->b_pin_count);
977         XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
978 }
979
980 void
981 xfs_buf_unpin(
982         xfs_buf_t               *bp)
983 {
984         if (atomic_dec_and_test(&bp->b_pin_count))
985                 wake_up_all(&bp->b_waiters);
986         XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
987 }
988
989 int
990 xfs_buf_ispin(
991         xfs_buf_t               *bp)
992 {
993         return atomic_read(&bp->b_pin_count);
994 }
995
996 STATIC void
997 xfs_buf_wait_unpin(
998         xfs_buf_t               *bp)
999 {
1000         DECLARE_WAITQUEUE       (wait, current);
1001
1002         if (atomic_read(&bp->b_pin_count) == 0)
1003                 return;
1004
1005         add_wait_queue(&bp->b_waiters, &wait);
1006         for (;;) {
1007                 set_current_state(TASK_UNINTERRUPTIBLE);
1008                 if (atomic_read(&bp->b_pin_count) == 0)
1009                         break;
1010                 if (atomic_read(&bp->b_io_remaining))
1011                         blk_run_address_space(bp->b_target->bt_mapping);
1012                 schedule();
1013         }
1014         remove_wait_queue(&bp->b_waiters, &wait);
1015         set_current_state(TASK_RUNNING);
1016 }
1017
1018 /*
1019  *      Buffer Utility Routines
1020  */
1021
1022 STATIC void
1023 xfs_buf_iodone_work(
1024         struct work_struct      *work)
1025 {
1026         xfs_buf_t               *bp =
1027                 container_of(work, xfs_buf_t, b_iodone_work);
1028
1029         /*
1030          * We can get an EOPNOTSUPP to ordered writes.  Here we clear the
1031          * ordered flag and reissue them.  Because we can't tell the higher
1032          * layers directly that they should not issue ordered I/O anymore, they
1033          * need to check if the _XFS_BARRIER_FAILED flag was set during I/O completion.
1034          */
1035         if ((bp->b_error == EOPNOTSUPP) &&
1036             (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
1037                 XB_TRACE(bp, "ordered_retry", bp->b_iodone);
1038                 bp->b_flags &= ~XBF_ORDERED;
1039                 bp->b_flags |= _XFS_BARRIER_FAILED;
1040                 xfs_buf_iorequest(bp);
1041         } else if (bp->b_iodone)
1042                 (*(bp->b_iodone))(bp);
1043         else if (bp->b_flags & XBF_ASYNC)
1044                 xfs_buf_relse(bp);
1045 }
1046
1047 void
1048 xfs_buf_ioend(
1049         xfs_buf_t               *bp,
1050         int                     schedule)
1051 {
1052         bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1053         if (bp->b_error == 0)
1054                 bp->b_flags |= XBF_DONE;
1055
1056         XB_TRACE(bp, "iodone", bp->b_iodone);
1057
1058         if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1059                 if (schedule) {
1060                         INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1061                         queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1062                 } else {
1063                         xfs_buf_iodone_work(&bp->b_iodone_work);
1064                 }
1065         } else {
1066                 complete(&bp->b_iowait);
1067         }
1068 }
1069
1070 void
1071 xfs_buf_ioerror(
1072         xfs_buf_t               *bp,
1073         int                     error)
1074 {
1075         ASSERT(error >= 0 && error <= 0xffff);
1076         bp->b_error = (unsigned short)error;
1077         XB_TRACE(bp, "ioerror", (unsigned long)error);
1078 }
1079
1080 int
1081 xfs_bawrite(
1082         void                    *mp,
1083         struct xfs_buf          *bp)
1084 {
1085         XB_TRACE(bp, "bawrite", 0);
1086
1087         ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
1088
1089         xfs_buf_delwri_dequeue(bp);
1090
1091         bp->b_flags &= ~(XBF_READ | XBF_DELWRI | XBF_READ_AHEAD);
1092         bp->b_flags |= (XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
1093
1094         bp->b_mount = mp;
1095         bp->b_strat = xfs_bdstrat_cb;
1096         return xfs_bdstrat_cb(bp);
1097 }
1098
1099 void
1100 xfs_bdwrite(
1101         void                    *mp,
1102         struct xfs_buf          *bp)
1103 {
1104         XB_TRACE(bp, "bdwrite", 0);
1105
1106         bp->b_strat = xfs_bdstrat_cb;
1107         bp->b_mount = mp;
1108
1109         bp->b_flags &= ~XBF_READ;
1110         bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
1111
1112         xfs_buf_delwri_queue(bp, 1);
1113 }
1114
1115 STATIC_INLINE void
1116 _xfs_buf_ioend(
1117         xfs_buf_t               *bp,
1118         int                     schedule)
1119 {
1120         if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1121                 bp->b_flags &= ~_XBF_PAGE_LOCKED;
1122                 xfs_buf_ioend(bp, schedule);
1123         }
1124 }
1125
1126 STATIC void
1127 xfs_buf_bio_end_io(
1128         struct bio              *bio,
1129         int                     error)
1130 {
1131         xfs_buf_t               *bp = (xfs_buf_t *)bio->bi_private;
1132         unsigned int            blocksize = bp->b_target->bt_bsize;
1133         struct bio_vec          *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1134
1135         xfs_buf_ioerror(bp, -error);
1136
1137         do {
1138                 struct page     *page = bvec->bv_page;
1139
1140                 ASSERT(!PagePrivate(page));
1141                 if (unlikely(bp->b_error)) {
1142                         if (bp->b_flags & XBF_READ)
1143                                 ClearPageUptodate(page);
1144                 } else if (blocksize >= PAGE_CACHE_SIZE) {
1145                         SetPageUptodate(page);
1146                 } else if (!PagePrivate(page) &&
1147                                 (bp->b_flags & _XBF_PAGE_CACHE)) {
1148                         set_page_region(page, bvec->bv_offset, bvec->bv_len);
1149                 }
1150
1151                 if (--bvec >= bio->bi_io_vec)
1152                         prefetchw(&bvec->bv_page->flags);
1153
1154                 if (bp->b_flags & _XBF_PAGE_LOCKED)
1155                         unlock_page(page);
1156         } while (bvec >= bio->bi_io_vec);
1157
1158         _xfs_buf_ioend(bp, 1);
1159         bio_put(bio);
1160 }
1161
1162 STATIC void
1163 _xfs_buf_ioapply(
1164         xfs_buf_t               *bp)
1165 {
1166         int                     rw, map_i, total_nr_pages, nr_pages;
1167         struct bio              *bio;
1168         int                     offset = bp->b_offset;
1169         int                     size = bp->b_count_desired;
1170         sector_t                sector = bp->b_bn;
1171         unsigned int            blocksize = bp->b_target->bt_bsize;
1172
1173         total_nr_pages = bp->b_page_count;
1174         map_i = 0;
1175
1176         if (bp->b_flags & XBF_ORDERED) {
1177                 ASSERT(!(bp->b_flags & XBF_READ));
1178                 rw = WRITE_BARRIER;
1179         } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1180                 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1181                 bp->b_flags &= ~_XBF_RUN_QUEUES;
1182                 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1183         } else {
1184                 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1185                      (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1186         }
1187
1188         /* Special code path for reading a sub page size buffer in --
1189          * we populate up the whole page, and hence the other metadata
1190          * in the same page.  This optimization is only valid when the
1191          * filesystem block size is not smaller than the page size.
1192          */
1193         if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1194             ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
1195               (XBF_READ|_XBF_PAGE_LOCKED)) &&
1196             (blocksize >= PAGE_CACHE_SIZE)) {
1197                 bio = bio_alloc(GFP_NOIO, 1);
1198
1199                 bio->bi_bdev = bp->b_target->bt_bdev;
1200                 bio->bi_sector = sector - (offset >> BBSHIFT);
1201                 bio->bi_end_io = xfs_buf_bio_end_io;
1202                 bio->bi_private = bp;
1203
1204                 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1205                 size = 0;
1206
1207                 atomic_inc(&bp->b_io_remaining);
1208
1209                 goto submit_io;
1210         }
1211
1212 next_chunk:
1213         atomic_inc(&bp->b_io_remaining);
1214         nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1215         if (nr_pages > total_nr_pages)
1216                 nr_pages = total_nr_pages;
1217
1218         bio = bio_alloc(GFP_NOIO, nr_pages);
1219         bio->bi_bdev = bp->b_target->bt_bdev;
1220         bio->bi_sector = sector;
1221         bio->bi_end_io = xfs_buf_bio_end_io;
1222         bio->bi_private = bp;
1223
1224         for (; size && nr_pages; nr_pages--, map_i++) {
1225                 int     rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1226
1227                 if (nbytes > size)
1228                         nbytes = size;
1229
1230                 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1231                 if (rbytes < nbytes)
1232                         break;
1233
1234                 offset = 0;
1235                 sector += nbytes >> BBSHIFT;
1236                 size -= nbytes;
1237                 total_nr_pages--;
1238         }
1239
1240 submit_io:
1241         if (likely(bio->bi_size)) {
1242                 submit_bio(rw, bio);
1243                 if (size)
1244                         goto next_chunk;
1245         } else {
1246                 bio_put(bio);
1247                 xfs_buf_ioerror(bp, EIO);
1248         }
1249 }
1250
1251 int
1252 xfs_buf_iorequest(
1253         xfs_buf_t               *bp)
1254 {
1255         XB_TRACE(bp, "iorequest", 0);
1256
1257         if (bp->b_flags & XBF_DELWRI) {
1258                 xfs_buf_delwri_queue(bp, 1);
1259                 return 0;
1260         }
1261
1262         if (bp->b_flags & XBF_WRITE) {
1263                 xfs_buf_wait_unpin(bp);
1264         }
1265
1266         xfs_buf_hold(bp);
1267
1268         /* Set the count to 1 initially, this will stop an I/O
1269          * completion callout which happens before we have started
1270          * all the I/O from calling xfs_buf_ioend too early.
1271          */
1272         atomic_set(&bp->b_io_remaining, 1);
1273         _xfs_buf_ioapply(bp);
1274         _xfs_buf_ioend(bp, 0);
1275
1276         xfs_buf_rele(bp);
1277         return 0;
1278 }
1279
1280 /*
1281  *      Waits for I/O to complete on the buffer supplied.
1282  *      It returns immediately if no I/O is pending.
1283  *      It returns the I/O error code, if any, or 0 if there was no error.
1284  */
1285 int
1286 xfs_buf_iowait(
1287         xfs_buf_t               *bp)
1288 {
1289         XB_TRACE(bp, "iowait", 0);
1290         if (atomic_read(&bp->b_io_remaining))
1291                 blk_run_address_space(bp->b_target->bt_mapping);
1292         wait_for_completion(&bp->b_iowait);
1293         XB_TRACE(bp, "iowaited", (long)bp->b_error);
1294         return bp->b_error;
1295 }
1296
1297 xfs_caddr_t
1298 xfs_buf_offset(
1299         xfs_buf_t               *bp,
1300         size_t                  offset)
1301 {
1302         struct page             *page;
1303
1304         if (bp->b_flags & XBF_MAPPED)
1305                 return XFS_BUF_PTR(bp) + offset;
1306
1307         offset += bp->b_offset;
1308         page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1309         return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1310 }
1311
1312 /*
1313  *      Move data into or out of a buffer.
1314  */
1315 void
1316 xfs_buf_iomove(
1317         xfs_buf_t               *bp,    /* buffer to process            */
1318         size_t                  boff,   /* starting buffer offset       */
1319         size_t                  bsize,  /* length to copy               */
1320         caddr_t                 data,   /* data address                 */
1321         xfs_buf_rw_t            mode)   /* read/write/zero flag         */
1322 {
1323         size_t                  bend, cpoff, csize;
1324         struct page             *page;
1325
1326         bend = boff + bsize;
1327         while (boff < bend) {
1328                 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1329                 cpoff = xfs_buf_poff(boff + bp->b_offset);
1330                 csize = min_t(size_t,
1331                               PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1332
1333                 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1334
1335                 switch (mode) {
1336                 case XBRW_ZERO:
1337                         memset(page_address(page) + cpoff, 0, csize);
1338                         break;
1339                 case XBRW_READ:
1340                         memcpy(data, page_address(page) + cpoff, csize);
1341                         break;
1342                 case XBRW_WRITE:
1343                         memcpy(page_address(page) + cpoff, data, csize);
1344                 }
1345
1346                 boff += csize;
1347                 data += csize;
1348         }
1349 }
1350
1351 /*
1352  *      Handling of buffer targets (buftargs).
1353  */
1354
1355 /*
1356  *      Wait for any bufs with callbacks that have been submitted but
1357  *      have not yet returned... walk the hash list for the target.
1358  */
1359 void
1360 xfs_wait_buftarg(
1361         xfs_buftarg_t   *btp)
1362 {
1363         xfs_buf_t       *bp, *n;
1364         xfs_bufhash_t   *hash;
1365         uint            i;
1366
1367         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1368                 hash = &btp->bt_hash[i];
1369 again:
1370                 spin_lock(&hash->bh_lock);
1371                 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1372                         ASSERT(btp == bp->b_target);
1373                         if (!(bp->b_flags & XBF_FS_MANAGED)) {
1374                                 spin_unlock(&hash->bh_lock);
1375                                 /*
1376                                  * Catch superblock reference count leaks
1377                                  * immediately
1378                                  */
1379                                 BUG_ON(bp->b_bn == 0);
1380                                 delay(100);
1381                                 goto again;
1382                         }
1383                 }
1384                 spin_unlock(&hash->bh_lock);
1385         }
1386 }
1387
1388 /*
1389  *      Allocate buffer hash table for a given target.
1390  *      For devices containing metadata (i.e. not the log/realtime devices)
1391  *      we need to allocate a much larger hash table.
1392  */
1393 STATIC void
1394 xfs_alloc_bufhash(
1395         xfs_buftarg_t           *btp,
1396         int                     external)
1397 {
1398         unsigned int            i;
1399
1400         btp->bt_hashshift = external ? 3 : 8;   /* 8 or 256 buckets */
1401         btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1402         btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
1403                                         sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE);
1404         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1405                 spin_lock_init(&btp->bt_hash[i].bh_lock);
1406                 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1407         }
1408 }
1409
1410 STATIC void
1411 xfs_free_bufhash(
1412         xfs_buftarg_t           *btp)
1413 {
1414         kmem_free(btp->bt_hash);
1415         btp->bt_hash = NULL;
1416 }
1417
1418 /*
1419  *      buftarg list for delwrite queue processing
1420  */
1421 static LIST_HEAD(xfs_buftarg_list);
1422 static DEFINE_SPINLOCK(xfs_buftarg_lock);
1423
1424 STATIC void
1425 xfs_register_buftarg(
1426         xfs_buftarg_t           *btp)
1427 {
1428         spin_lock(&xfs_buftarg_lock);
1429         list_add(&btp->bt_list, &xfs_buftarg_list);
1430         spin_unlock(&xfs_buftarg_lock);
1431 }
1432
1433 STATIC void
1434 xfs_unregister_buftarg(
1435         xfs_buftarg_t           *btp)
1436 {
1437         spin_lock(&xfs_buftarg_lock);
1438         list_del(&btp->bt_list);
1439         spin_unlock(&xfs_buftarg_lock);
1440 }
1441
1442 void
1443 xfs_free_buftarg(
1444         struct xfs_mount        *mp,
1445         struct xfs_buftarg      *btp)
1446 {
1447         xfs_flush_buftarg(btp, 1);
1448         if (mp->m_flags & XFS_MOUNT_BARRIER)
1449                 xfs_blkdev_issue_flush(btp);
1450         xfs_free_bufhash(btp);
1451         iput(btp->bt_mapping->host);
1452
1453         /* Unregister the buftarg first so that we don't get a
1454          * wakeup finding a non-existent task
1455          */
1456         xfs_unregister_buftarg(btp);
1457         kthread_stop(btp->bt_task);
1458
1459         kmem_free(btp);
1460 }
1461
1462 STATIC int
1463 xfs_setsize_buftarg_flags(
1464         xfs_buftarg_t           *btp,
1465         unsigned int            blocksize,
1466         unsigned int            sectorsize,
1467         int                     verbose)
1468 {
1469         btp->bt_bsize = blocksize;
1470         btp->bt_sshift = ffs(sectorsize) - 1;
1471         btp->bt_smask = sectorsize - 1;
1472
1473         if (set_blocksize(btp->bt_bdev, sectorsize)) {
1474                 printk(KERN_WARNING
1475                         "XFS: Cannot set_blocksize to %u on device %s\n",
1476                         sectorsize, XFS_BUFTARG_NAME(btp));
1477                 return EINVAL;
1478         }
1479
1480         if (verbose &&
1481             (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1482                 printk(KERN_WARNING
1483                         "XFS: %u byte sectors in use on device %s.  "
1484                         "This is suboptimal; %u or greater is ideal.\n",
1485                         sectorsize, XFS_BUFTARG_NAME(btp),
1486                         (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1487         }
1488
1489         return 0;
1490 }
1491
1492 /*
1493  *      When allocating the initial buffer target we have not yet
1494  *      read in the superblock, so don't know what sized sectors
1495  *      are being used is at this early stage.  Play safe.
1496  */
1497 STATIC int
1498 xfs_setsize_buftarg_early(
1499         xfs_buftarg_t           *btp,
1500         struct block_device     *bdev)
1501 {
1502         return xfs_setsize_buftarg_flags(btp,
1503                         PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1504 }
1505
1506 int
1507 xfs_setsize_buftarg(
1508         xfs_buftarg_t           *btp,
1509         unsigned int            blocksize,
1510         unsigned int            sectorsize)
1511 {
1512         return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1513 }
1514
1515 STATIC int
1516 xfs_mapping_buftarg(
1517         xfs_buftarg_t           *btp,
1518         struct block_device     *bdev)
1519 {
1520         struct backing_dev_info *bdi;
1521         struct inode            *inode;
1522         struct address_space    *mapping;
1523         static const struct address_space_operations mapping_aops = {
1524                 .sync_page = block_sync_page,
1525                 .migratepage = fail_migrate_page,
1526         };
1527
1528         inode = new_inode(bdev->bd_inode->i_sb);
1529         if (!inode) {
1530                 printk(KERN_WARNING
1531                         "XFS: Cannot allocate mapping inode for device %s\n",
1532                         XFS_BUFTARG_NAME(btp));
1533                 return ENOMEM;
1534         }
1535         inode->i_mode = S_IFBLK;
1536         inode->i_bdev = bdev;
1537         inode->i_rdev = bdev->bd_dev;
1538         bdi = blk_get_backing_dev_info(bdev);
1539         if (!bdi)
1540                 bdi = &default_backing_dev_info;
1541         mapping = &inode->i_data;
1542         mapping->a_ops = &mapping_aops;
1543         mapping->backing_dev_info = bdi;
1544         mapping_set_gfp_mask(mapping, GFP_NOFS);
1545         btp->bt_mapping = mapping;
1546         return 0;
1547 }
1548
1549 STATIC int
1550 xfs_alloc_delwrite_queue(
1551         xfs_buftarg_t           *btp)
1552 {
1553         int     error = 0;
1554
1555         INIT_LIST_HEAD(&btp->bt_list);
1556         INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1557         spin_lock_init(&btp->bt_delwrite_lock);
1558         btp->bt_flags = 0;
1559         btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1560         if (IS_ERR(btp->bt_task)) {
1561                 error = PTR_ERR(btp->bt_task);
1562                 goto out_error;
1563         }
1564         xfs_register_buftarg(btp);
1565 out_error:
1566         return error;
1567 }
1568
1569 xfs_buftarg_t *
1570 xfs_alloc_buftarg(
1571         struct block_device     *bdev,
1572         int                     external)
1573 {
1574         xfs_buftarg_t           *btp;
1575
1576         btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1577
1578         btp->bt_dev =  bdev->bd_dev;
1579         btp->bt_bdev = bdev;
1580         if (xfs_setsize_buftarg_early(btp, bdev))
1581                 goto error;
1582         if (xfs_mapping_buftarg(btp, bdev))
1583                 goto error;
1584         if (xfs_alloc_delwrite_queue(btp))
1585                 goto error;
1586         xfs_alloc_bufhash(btp, external);
1587         return btp;
1588
1589 error:
1590         kmem_free(btp);
1591         return NULL;
1592 }
1593
1594
1595 /*
1596  *      Delayed write buffer handling
1597  */
1598 STATIC void
1599 xfs_buf_delwri_queue(
1600         xfs_buf_t               *bp,
1601         int                     unlock)
1602 {
1603         struct list_head        *dwq = &bp->b_target->bt_delwrite_queue;
1604         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1605
1606         XB_TRACE(bp, "delwri_q", (long)unlock);
1607         ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1608
1609         spin_lock(dwlk);
1610         /* If already in the queue, dequeue and place at tail */
1611         if (!list_empty(&bp->b_list)) {
1612                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1613                 if (unlock)
1614                         atomic_dec(&bp->b_hold);
1615                 list_del(&bp->b_list);
1616         }
1617
1618         bp->b_flags |= _XBF_DELWRI_Q;
1619         list_add_tail(&bp->b_list, dwq);
1620         bp->b_queuetime = jiffies;
1621         spin_unlock(dwlk);
1622
1623         if (unlock)
1624                 xfs_buf_unlock(bp);
1625 }
1626
1627 void
1628 xfs_buf_delwri_dequeue(
1629         xfs_buf_t               *bp)
1630 {
1631         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1632         int                     dequeued = 0;
1633
1634         spin_lock(dwlk);
1635         if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1636                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1637                 list_del_init(&bp->b_list);
1638                 dequeued = 1;
1639         }
1640         bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1641         spin_unlock(dwlk);
1642
1643         if (dequeued)
1644                 xfs_buf_rele(bp);
1645
1646         XB_TRACE(bp, "delwri_dq", (long)dequeued);
1647 }
1648
1649 STATIC void
1650 xfs_buf_runall_queues(
1651         struct workqueue_struct *queue)
1652 {
1653         flush_workqueue(queue);
1654 }
1655
1656 STATIC int
1657 xfsbufd_wakeup(
1658         int                     priority,
1659         gfp_t                   mask)
1660 {
1661         xfs_buftarg_t           *btp;
1662
1663         spin_lock(&xfs_buftarg_lock);
1664         list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1665                 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1666                         continue;
1667                 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1668                 wake_up_process(btp->bt_task);
1669         }
1670         spin_unlock(&xfs_buftarg_lock);
1671         return 0;
1672 }
1673
1674 /*
1675  * Move as many buffers as specified to the supplied list
1676  * idicating if we skipped any buffers to prevent deadlocks.
1677  */
1678 STATIC int
1679 xfs_buf_delwri_split(
1680         xfs_buftarg_t   *target,
1681         struct list_head *list,
1682         unsigned long   age)
1683 {
1684         xfs_buf_t       *bp, *n;
1685         struct list_head *dwq = &target->bt_delwrite_queue;
1686         spinlock_t      *dwlk = &target->bt_delwrite_lock;
1687         int             skipped = 0;
1688         int             force;
1689
1690         force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1691         INIT_LIST_HEAD(list);
1692         spin_lock(dwlk);
1693         list_for_each_entry_safe(bp, n, dwq, b_list) {
1694                 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1695                 ASSERT(bp->b_flags & XBF_DELWRI);
1696
1697                 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1698                         if (!force &&
1699                             time_before(jiffies, bp->b_queuetime + age)) {
1700                                 xfs_buf_unlock(bp);
1701                                 break;
1702                         }
1703
1704                         bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1705                                          _XBF_RUN_QUEUES);
1706                         bp->b_flags |= XBF_WRITE;
1707                         list_move_tail(&bp->b_list, list);
1708                 } else
1709                         skipped++;
1710         }
1711         spin_unlock(dwlk);
1712
1713         return skipped;
1714
1715 }
1716
1717 STATIC int
1718 xfsbufd(
1719         void            *data)
1720 {
1721         struct list_head tmp;
1722         xfs_buftarg_t   *target = (xfs_buftarg_t *)data;
1723         int             count;
1724         xfs_buf_t       *bp;
1725
1726         current->flags |= PF_MEMALLOC;
1727
1728         set_freezable();
1729
1730         do {
1731                 if (unlikely(freezing(current))) {
1732                         set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1733                         refrigerator();
1734                 } else {
1735                         clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1736                 }
1737
1738                 schedule_timeout_interruptible(
1739                         xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1740
1741                 xfs_buf_delwri_split(target, &tmp,
1742                                 xfs_buf_age_centisecs * msecs_to_jiffies(10));
1743
1744                 count = 0;
1745                 while (!list_empty(&tmp)) {
1746                         bp = list_entry(tmp.next, xfs_buf_t, b_list);
1747                         ASSERT(target == bp->b_target);
1748
1749                         list_del_init(&bp->b_list);
1750                         xfs_buf_iostrategy(bp);
1751                         count++;
1752                 }
1753
1754                 if (as_list_len > 0)
1755                         purge_addresses();
1756                 if (count)
1757                         blk_run_address_space(target->bt_mapping);
1758
1759         } while (!kthread_should_stop());
1760
1761         return 0;
1762 }
1763
1764 /*
1765  *      Go through all incore buffers, and release buffers if they belong to
1766  *      the given device. This is used in filesystem error handling to
1767  *      preserve the consistency of its metadata.
1768  */
1769 int
1770 xfs_flush_buftarg(
1771         xfs_buftarg_t   *target,
1772         int             wait)
1773 {
1774         struct list_head tmp;
1775         xfs_buf_t       *bp, *n;
1776         int             pincount = 0;
1777
1778         xfs_buf_runall_queues(xfsdatad_workqueue);
1779         xfs_buf_runall_queues(xfslogd_workqueue);
1780
1781         set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1782         pincount = xfs_buf_delwri_split(target, &tmp, 0);
1783
1784         /*
1785          * Dropped the delayed write list lock, now walk the temporary list
1786          */
1787         list_for_each_entry_safe(bp, n, &tmp, b_list) {
1788                 ASSERT(target == bp->b_target);
1789                 if (wait)
1790                         bp->b_flags &= ~XBF_ASYNC;
1791                 else
1792                         list_del_init(&bp->b_list);
1793
1794                 xfs_buf_iostrategy(bp);
1795         }
1796
1797         if (wait)
1798                 blk_run_address_space(target->bt_mapping);
1799
1800         /*
1801          * Remaining list items must be flushed before returning
1802          */
1803         while (!list_empty(&tmp)) {
1804                 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1805
1806                 list_del_init(&bp->b_list);
1807                 xfs_iowait(bp);
1808                 xfs_buf_relse(bp);
1809         }
1810
1811         return pincount;
1812 }
1813
1814 int __init
1815 xfs_buf_init(void)
1816 {
1817 #ifdef XFS_BUF_TRACE
1818         xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS);
1819 #endif
1820
1821         xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1822                                                 KM_ZONE_HWALIGN, NULL);
1823         if (!xfs_buf_zone)
1824                 goto out_free_trace_buf;
1825
1826         xfslogd_workqueue = create_workqueue("xfslogd");
1827         if (!xfslogd_workqueue)
1828                 goto out_free_buf_zone;
1829
1830         xfsdatad_workqueue = create_workqueue("xfsdatad");
1831         if (!xfsdatad_workqueue)
1832                 goto out_destroy_xfslogd_workqueue;
1833
1834         register_shrinker(&xfs_buf_shake);
1835         return 0;
1836
1837  out_destroy_xfslogd_workqueue:
1838         destroy_workqueue(xfslogd_workqueue);
1839  out_free_buf_zone:
1840         kmem_zone_destroy(xfs_buf_zone);
1841  out_free_trace_buf:
1842 #ifdef XFS_BUF_TRACE
1843         ktrace_free(xfs_buf_trace_buf);
1844 #endif
1845         return -ENOMEM;
1846 }
1847
1848 void
1849 xfs_buf_terminate(void)
1850 {
1851         unregister_shrinker(&xfs_buf_shake);
1852         destroy_workqueue(xfsdatad_workqueue);
1853         destroy_workqueue(xfslogd_workqueue);
1854         kmem_zone_destroy(xfs_buf_zone);
1855 #ifdef XFS_BUF_TRACE
1856         ktrace_free(xfs_buf_trace_buf);
1857 #endif
1858 }
1859
1860 #ifdef CONFIG_KDB_MODULES
1861 struct list_head *
1862 xfs_get_buftarg_list(void)
1863 {
1864         return &xfs_buftarg_list;
1865 }
1866 #endif