WorkStruct: make allyesconfig
[safe/jmp/linux-2.6] / fs / xfs / linux-2.6 / xfs_buf.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35
36 STATIC kmem_zone_t *xfs_buf_zone;
37 STATIC kmem_shaker_t xfs_buf_shake;
38 STATIC int xfsbufd(void *);
39 STATIC int xfsbufd_wakeup(int, gfp_t);
40 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
41
42 STATIC struct workqueue_struct *xfslogd_workqueue;
43 struct workqueue_struct *xfsdatad_workqueue;
44
45 #ifdef XFS_BUF_TRACE
46 void
47 xfs_buf_trace(
48         xfs_buf_t       *bp,
49         char            *id,
50         void            *data,
51         void            *ra)
52 {
53         ktrace_enter(xfs_buf_trace_buf,
54                 bp, id,
55                 (void *)(unsigned long)bp->b_flags,
56                 (void *)(unsigned long)bp->b_hold.counter,
57                 (void *)(unsigned long)bp->b_sema.count.counter,
58                 (void *)current,
59                 data, ra,
60                 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
61                 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
62                 (void *)(unsigned long)bp->b_buffer_length,
63                 NULL, NULL, NULL, NULL, NULL);
64 }
65 ktrace_t *xfs_buf_trace_buf;
66 #define XFS_BUF_TRACE_SIZE      4096
67 #define XB_TRACE(bp, id, data)  \
68         xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
69 #else
70 #define XB_TRACE(bp, id, data)  do { } while (0)
71 #endif
72
73 #ifdef XFS_BUF_LOCK_TRACKING
74 # define XB_SET_OWNER(bp)       ((bp)->b_last_holder = current->pid)
75 # define XB_CLEAR_OWNER(bp)     ((bp)->b_last_holder = -1)
76 # define XB_GET_OWNER(bp)       ((bp)->b_last_holder)
77 #else
78 # define XB_SET_OWNER(bp)       do { } while (0)
79 # define XB_CLEAR_OWNER(bp)     do { } while (0)
80 # define XB_GET_OWNER(bp)       do { } while (0)
81 #endif
82
83 #define xb_to_gfp(flags) \
84         ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
85           ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
86
87 #define xb_to_km(flags) \
88          (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
89
90 #define xfs_buf_allocate(flags) \
91         kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
92 #define xfs_buf_deallocate(bp) \
93         kmem_zone_free(xfs_buf_zone, (bp));
94
95 /*
96  *      Page Region interfaces.
97  *
98  *      For pages in filesystems where the blocksize is smaller than the
99  *      pagesize, we use the page->private field (long) to hold a bitmap
100  *      of uptodate regions within the page.
101  *
102  *      Each such region is "bytes per page / bits per long" bytes long.
103  *
104  *      NBPPR == number-of-bytes-per-page-region
105  *      BTOPR == bytes-to-page-region (rounded up)
106  *      BTOPRT == bytes-to-page-region-truncated (rounded down)
107  */
108 #if (BITS_PER_LONG == 32)
109 #define PRSHIFT         (PAGE_CACHE_SHIFT - 5)  /* (32 == 1<<5) */
110 #elif (BITS_PER_LONG == 64)
111 #define PRSHIFT         (PAGE_CACHE_SHIFT - 6)  /* (64 == 1<<6) */
112 #else
113 #error BITS_PER_LONG must be 32 or 64
114 #endif
115 #define NBPPR           (PAGE_CACHE_SIZE/BITS_PER_LONG)
116 #define BTOPR(b)        (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
117 #define BTOPRT(b)       (((unsigned int)(b) >> PRSHIFT))
118
119 STATIC unsigned long
120 page_region_mask(
121         size_t          offset,
122         size_t          length)
123 {
124         unsigned long   mask;
125         int             first, final;
126
127         first = BTOPR(offset);
128         final = BTOPRT(offset + length - 1);
129         first = min(first, final);
130
131         mask = ~0UL;
132         mask <<= BITS_PER_LONG - (final - first);
133         mask >>= BITS_PER_LONG - (final);
134
135         ASSERT(offset + length <= PAGE_CACHE_SIZE);
136         ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
137
138         return mask;
139 }
140
141 STATIC inline void
142 set_page_region(
143         struct page     *page,
144         size_t          offset,
145         size_t          length)
146 {
147         set_page_private(page,
148                 page_private(page) | page_region_mask(offset, length));
149         if (page_private(page) == ~0UL)
150                 SetPageUptodate(page);
151 }
152
153 STATIC inline int
154 test_page_region(
155         struct page     *page,
156         size_t          offset,
157         size_t          length)
158 {
159         unsigned long   mask = page_region_mask(offset, length);
160
161         return (mask && (page_private(page) & mask) == mask);
162 }
163
164 /*
165  *      Mapping of multi-page buffers into contiguous virtual space
166  */
167
168 typedef struct a_list {
169         void            *vm_addr;
170         struct a_list   *next;
171 } a_list_t;
172
173 STATIC a_list_t         *as_free_head;
174 STATIC int              as_list_len;
175 STATIC DEFINE_SPINLOCK(as_lock);
176
177 /*
178  *      Try to batch vunmaps because they are costly.
179  */
180 STATIC void
181 free_address(
182         void            *addr)
183 {
184         a_list_t        *aentry;
185
186         aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
187         if (likely(aentry)) {
188                 spin_lock(&as_lock);
189                 aentry->next = as_free_head;
190                 aentry->vm_addr = addr;
191                 as_free_head = aentry;
192                 as_list_len++;
193                 spin_unlock(&as_lock);
194         } else {
195                 vunmap(addr);
196         }
197 }
198
199 STATIC void
200 purge_addresses(void)
201 {
202         a_list_t        *aentry, *old;
203
204         if (as_free_head == NULL)
205                 return;
206
207         spin_lock(&as_lock);
208         aentry = as_free_head;
209         as_free_head = NULL;
210         as_list_len = 0;
211         spin_unlock(&as_lock);
212
213         while ((old = aentry) != NULL) {
214                 vunmap(aentry->vm_addr);
215                 aentry = aentry->next;
216                 kfree(old);
217         }
218 }
219
220 /*
221  *      Internal xfs_buf_t object manipulation
222  */
223
224 STATIC void
225 _xfs_buf_initialize(
226         xfs_buf_t               *bp,
227         xfs_buftarg_t           *target,
228         xfs_off_t               range_base,
229         size_t                  range_length,
230         xfs_buf_flags_t         flags)
231 {
232         /*
233          * We don't want certain flags to appear in b_flags.
234          */
235         flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
236
237         memset(bp, 0, sizeof(xfs_buf_t));
238         atomic_set(&bp->b_hold, 1);
239         init_MUTEX_LOCKED(&bp->b_iodonesema);
240         INIT_LIST_HEAD(&bp->b_list);
241         INIT_LIST_HEAD(&bp->b_hash_list);
242         init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
243         XB_SET_OWNER(bp);
244         bp->b_target = target;
245         bp->b_file_offset = range_base;
246         /*
247          * Set buffer_length and count_desired to the same value initially.
248          * I/O routines should use count_desired, which will be the same in
249          * most cases but may be reset (e.g. XFS recovery).
250          */
251         bp->b_buffer_length = bp->b_count_desired = range_length;
252         bp->b_flags = flags;
253         bp->b_bn = XFS_BUF_DADDR_NULL;
254         atomic_set(&bp->b_pin_count, 0);
255         init_waitqueue_head(&bp->b_waiters);
256
257         XFS_STATS_INC(xb_create);
258         XB_TRACE(bp, "initialize", target);
259 }
260
261 /*
262  *      Allocate a page array capable of holding a specified number
263  *      of pages, and point the page buf at it.
264  */
265 STATIC int
266 _xfs_buf_get_pages(
267         xfs_buf_t               *bp,
268         int                     page_count,
269         xfs_buf_flags_t         flags)
270 {
271         /* Make sure that we have a page list */
272         if (bp->b_pages == NULL) {
273                 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
274                 bp->b_page_count = page_count;
275                 if (page_count <= XB_PAGES) {
276                         bp->b_pages = bp->b_page_array;
277                 } else {
278                         bp->b_pages = kmem_alloc(sizeof(struct page *) *
279                                         page_count, xb_to_km(flags));
280                         if (bp->b_pages == NULL)
281                                 return -ENOMEM;
282                 }
283                 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
284         }
285         return 0;
286 }
287
288 /*
289  *      Frees b_pages if it was allocated.
290  */
291 STATIC void
292 _xfs_buf_free_pages(
293         xfs_buf_t       *bp)
294 {
295         if (bp->b_pages != bp->b_page_array) {
296                 kmem_free(bp->b_pages,
297                           bp->b_page_count * sizeof(struct page *));
298         }
299 }
300
301 /*
302  *      Releases the specified buffer.
303  *
304  *      The modification state of any associated pages is left unchanged.
305  *      The buffer most not be on any hash - use xfs_buf_rele instead for
306  *      hashed and refcounted buffers
307  */
308 void
309 xfs_buf_free(
310         xfs_buf_t               *bp)
311 {
312         XB_TRACE(bp, "free", 0);
313
314         ASSERT(list_empty(&bp->b_hash_list));
315
316         if (bp->b_flags & _XBF_PAGE_CACHE) {
317                 uint            i;
318
319                 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
320                         free_address(bp->b_addr - bp->b_offset);
321
322                 for (i = 0; i < bp->b_page_count; i++) {
323                         struct page     *page = bp->b_pages[i];
324
325                         ASSERT(!PagePrivate(page));
326                         page_cache_release(page);
327                 }
328                 _xfs_buf_free_pages(bp);
329         } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
330                  /*
331                   * XXX(hch): bp->b_count_desired might be incorrect (see
332                   * xfs_buf_associate_memory for details), but fortunately
333                   * the Linux version of kmem_free ignores the len argument..
334                   */
335                 kmem_free(bp->b_addr, bp->b_count_desired);
336                 _xfs_buf_free_pages(bp);
337         }
338
339         xfs_buf_deallocate(bp);
340 }
341
342 /*
343  *      Finds all pages for buffer in question and builds it's page list.
344  */
345 STATIC int
346 _xfs_buf_lookup_pages(
347         xfs_buf_t               *bp,
348         uint                    flags)
349 {
350         struct address_space    *mapping = bp->b_target->bt_mapping;
351         size_t                  blocksize = bp->b_target->bt_bsize;
352         size_t                  size = bp->b_count_desired;
353         size_t                  nbytes, offset;
354         gfp_t                   gfp_mask = xb_to_gfp(flags);
355         unsigned short          page_count, i;
356         pgoff_t                 first;
357         xfs_off_t               end;
358         int                     error;
359
360         end = bp->b_file_offset + bp->b_buffer_length;
361         page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
362
363         error = _xfs_buf_get_pages(bp, page_count, flags);
364         if (unlikely(error))
365                 return error;
366         bp->b_flags |= _XBF_PAGE_CACHE;
367
368         offset = bp->b_offset;
369         first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
370
371         for (i = 0; i < bp->b_page_count; i++) {
372                 struct page     *page;
373                 uint            retries = 0;
374
375               retry:
376                 page = find_or_create_page(mapping, first + i, gfp_mask);
377                 if (unlikely(page == NULL)) {
378                         if (flags & XBF_READ_AHEAD) {
379                                 bp->b_page_count = i;
380                                 for (i = 0; i < bp->b_page_count; i++)
381                                         unlock_page(bp->b_pages[i]);
382                                 return -ENOMEM;
383                         }
384
385                         /*
386                          * This could deadlock.
387                          *
388                          * But until all the XFS lowlevel code is revamped to
389                          * handle buffer allocation failures we can't do much.
390                          */
391                         if (!(++retries % 100))
392                                 printk(KERN_ERR
393                                         "XFS: possible memory allocation "
394                                         "deadlock in %s (mode:0x%x)\n",
395                                         __FUNCTION__, gfp_mask);
396
397                         XFS_STATS_INC(xb_page_retries);
398                         xfsbufd_wakeup(0, gfp_mask);
399                         congestion_wait(WRITE, HZ/50);
400                         goto retry;
401                 }
402
403                 XFS_STATS_INC(xb_page_found);
404
405                 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
406                 size -= nbytes;
407
408                 ASSERT(!PagePrivate(page));
409                 if (!PageUptodate(page)) {
410                         page_count--;
411                         if (blocksize >= PAGE_CACHE_SIZE) {
412                                 if (flags & XBF_READ)
413                                         bp->b_locked = 1;
414                         } else if (!PagePrivate(page)) {
415                                 if (test_page_region(page, offset, nbytes))
416                                         page_count++;
417                         }
418                 }
419
420                 bp->b_pages[i] = page;
421                 offset = 0;
422         }
423
424         if (!bp->b_locked) {
425                 for (i = 0; i < bp->b_page_count; i++)
426                         unlock_page(bp->b_pages[i]);
427         }
428
429         if (page_count == bp->b_page_count)
430                 bp->b_flags |= XBF_DONE;
431
432         XB_TRACE(bp, "lookup_pages", (long)page_count);
433         return error;
434 }
435
436 /*
437  *      Map buffer into kernel address-space if nessecary.
438  */
439 STATIC int
440 _xfs_buf_map_pages(
441         xfs_buf_t               *bp,
442         uint                    flags)
443 {
444         /* A single page buffer is always mappable */
445         if (bp->b_page_count == 1) {
446                 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
447                 bp->b_flags |= XBF_MAPPED;
448         } else if (flags & XBF_MAPPED) {
449                 if (as_list_len > 64)
450                         purge_addresses();
451                 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
452                                         VM_MAP, PAGE_KERNEL);
453                 if (unlikely(bp->b_addr == NULL))
454                         return -ENOMEM;
455                 bp->b_addr += bp->b_offset;
456                 bp->b_flags |= XBF_MAPPED;
457         }
458
459         return 0;
460 }
461
462 /*
463  *      Finding and Reading Buffers
464  */
465
466 /*
467  *      Look up, and creates if absent, a lockable buffer for
468  *      a given range of an inode.  The buffer is returned
469  *      locked.  If other overlapping buffers exist, they are
470  *      released before the new buffer is created and locked,
471  *      which may imply that this call will block until those buffers
472  *      are unlocked.  No I/O is implied by this call.
473  */
474 xfs_buf_t *
475 _xfs_buf_find(
476         xfs_buftarg_t           *btp,   /* block device target          */
477         xfs_off_t               ioff,   /* starting offset of range     */
478         size_t                  isize,  /* length of range              */
479         xfs_buf_flags_t         flags,
480         xfs_buf_t               *new_bp)
481 {
482         xfs_off_t               range_base;
483         size_t                  range_length;
484         xfs_bufhash_t           *hash;
485         xfs_buf_t               *bp, *n;
486
487         range_base = (ioff << BBSHIFT);
488         range_length = (isize << BBSHIFT);
489
490         /* Check for IOs smaller than the sector size / not sector aligned */
491         ASSERT(!(range_length < (1 << btp->bt_sshift)));
492         ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
493
494         hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
495
496         spin_lock(&hash->bh_lock);
497
498         list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
499                 ASSERT(btp == bp->b_target);
500                 if (bp->b_file_offset == range_base &&
501                     bp->b_buffer_length == range_length) {
502                         /*
503                          * If we look at something, bring it to the
504                          * front of the list for next time.
505                          */
506                         atomic_inc(&bp->b_hold);
507                         list_move(&bp->b_hash_list, &hash->bh_list);
508                         goto found;
509                 }
510         }
511
512         /* No match found */
513         if (new_bp) {
514                 _xfs_buf_initialize(new_bp, btp, range_base,
515                                 range_length, flags);
516                 new_bp->b_hash = hash;
517                 list_add(&new_bp->b_hash_list, &hash->bh_list);
518         } else {
519                 XFS_STATS_INC(xb_miss_locked);
520         }
521
522         spin_unlock(&hash->bh_lock);
523         return new_bp;
524
525 found:
526         spin_unlock(&hash->bh_lock);
527
528         /* Attempt to get the semaphore without sleeping,
529          * if this does not work then we need to drop the
530          * spinlock and do a hard attempt on the semaphore.
531          */
532         if (down_trylock(&bp->b_sema)) {
533                 if (!(flags & XBF_TRYLOCK)) {
534                         /* wait for buffer ownership */
535                         XB_TRACE(bp, "get_lock", 0);
536                         xfs_buf_lock(bp);
537                         XFS_STATS_INC(xb_get_locked_waited);
538                 } else {
539                         /* We asked for a trylock and failed, no need
540                          * to look at file offset and length here, we
541                          * know that this buffer at least overlaps our
542                          * buffer and is locked, therefore our buffer
543                          * either does not exist, or is this buffer.
544                          */
545                         xfs_buf_rele(bp);
546                         XFS_STATS_INC(xb_busy_locked);
547                         return NULL;
548                 }
549         } else {
550                 /* trylock worked */
551                 XB_SET_OWNER(bp);
552         }
553
554         if (bp->b_flags & XBF_STALE) {
555                 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
556                 bp->b_flags &= XBF_MAPPED;
557         }
558         XB_TRACE(bp, "got_lock", 0);
559         XFS_STATS_INC(xb_get_locked);
560         return bp;
561 }
562
563 /*
564  *      Assembles a buffer covering the specified range.
565  *      Storage in memory for all portions of the buffer will be allocated,
566  *      although backing storage may not be.
567  */
568 xfs_buf_t *
569 xfs_buf_get_flags(
570         xfs_buftarg_t           *target,/* target for buffer            */
571         xfs_off_t               ioff,   /* starting offset of range     */
572         size_t                  isize,  /* length of range              */
573         xfs_buf_flags_t         flags)
574 {
575         xfs_buf_t               *bp, *new_bp;
576         int                     error = 0, i;
577
578         new_bp = xfs_buf_allocate(flags);
579         if (unlikely(!new_bp))
580                 return NULL;
581
582         bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
583         if (bp == new_bp) {
584                 error = _xfs_buf_lookup_pages(bp, flags);
585                 if (error)
586                         goto no_buffer;
587         } else {
588                 xfs_buf_deallocate(new_bp);
589                 if (unlikely(bp == NULL))
590                         return NULL;
591         }
592
593         for (i = 0; i < bp->b_page_count; i++)
594                 mark_page_accessed(bp->b_pages[i]);
595
596         if (!(bp->b_flags & XBF_MAPPED)) {
597                 error = _xfs_buf_map_pages(bp, flags);
598                 if (unlikely(error)) {
599                         printk(KERN_WARNING "%s: failed to map pages\n",
600                                         __FUNCTION__);
601                         goto no_buffer;
602                 }
603         }
604
605         XFS_STATS_INC(xb_get);
606
607         /*
608          * Always fill in the block number now, the mapped cases can do
609          * their own overlay of this later.
610          */
611         bp->b_bn = ioff;
612         bp->b_count_desired = bp->b_buffer_length;
613
614         XB_TRACE(bp, "get", (unsigned long)flags);
615         return bp;
616
617  no_buffer:
618         if (flags & (XBF_LOCK | XBF_TRYLOCK))
619                 xfs_buf_unlock(bp);
620         xfs_buf_rele(bp);
621         return NULL;
622 }
623
624 xfs_buf_t *
625 xfs_buf_read_flags(
626         xfs_buftarg_t           *target,
627         xfs_off_t               ioff,
628         size_t                  isize,
629         xfs_buf_flags_t         flags)
630 {
631         xfs_buf_t               *bp;
632
633         flags |= XBF_READ;
634
635         bp = xfs_buf_get_flags(target, ioff, isize, flags);
636         if (bp) {
637                 if (!XFS_BUF_ISDONE(bp)) {
638                         XB_TRACE(bp, "read", (unsigned long)flags);
639                         XFS_STATS_INC(xb_get_read);
640                         xfs_buf_iostart(bp, flags);
641                 } else if (flags & XBF_ASYNC) {
642                         XB_TRACE(bp, "read_async", (unsigned long)flags);
643                         /*
644                          * Read ahead call which is already satisfied,
645                          * drop the buffer
646                          */
647                         goto no_buffer;
648                 } else {
649                         XB_TRACE(bp, "read_done", (unsigned long)flags);
650                         /* We do not want read in the flags */
651                         bp->b_flags &= ~XBF_READ;
652                 }
653         }
654
655         return bp;
656
657  no_buffer:
658         if (flags & (XBF_LOCK | XBF_TRYLOCK))
659                 xfs_buf_unlock(bp);
660         xfs_buf_rele(bp);
661         return NULL;
662 }
663
664 /*
665  *      If we are not low on memory then do the readahead in a deadlock
666  *      safe manner.
667  */
668 void
669 xfs_buf_readahead(
670         xfs_buftarg_t           *target,
671         xfs_off_t               ioff,
672         size_t                  isize,
673         xfs_buf_flags_t         flags)
674 {
675         struct backing_dev_info *bdi;
676
677         bdi = target->bt_mapping->backing_dev_info;
678         if (bdi_read_congested(bdi))
679                 return;
680
681         flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
682         xfs_buf_read_flags(target, ioff, isize, flags);
683 }
684
685 xfs_buf_t *
686 xfs_buf_get_empty(
687         size_t                  len,
688         xfs_buftarg_t           *target)
689 {
690         xfs_buf_t               *bp;
691
692         bp = xfs_buf_allocate(0);
693         if (bp)
694                 _xfs_buf_initialize(bp, target, 0, len, 0);
695         return bp;
696 }
697
698 static inline struct page *
699 mem_to_page(
700         void                    *addr)
701 {
702         if (((unsigned long)addr < VMALLOC_START) ||
703             ((unsigned long)addr >= VMALLOC_END)) {
704                 return virt_to_page(addr);
705         } else {
706                 return vmalloc_to_page(addr);
707         }
708 }
709
710 int
711 xfs_buf_associate_memory(
712         xfs_buf_t               *bp,
713         void                    *mem,
714         size_t                  len)
715 {
716         int                     rval;
717         int                     i = 0;
718         size_t                  ptr;
719         size_t                  end, end_cur;
720         off_t                   offset;
721         int                     page_count;
722
723         page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
724         offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
725         if (offset && (len > PAGE_CACHE_SIZE))
726                 page_count++;
727
728         /* Free any previous set of page pointers */
729         if (bp->b_pages)
730                 _xfs_buf_free_pages(bp);
731
732         bp->b_pages = NULL;
733         bp->b_addr = mem;
734
735         rval = _xfs_buf_get_pages(bp, page_count, 0);
736         if (rval)
737                 return rval;
738
739         bp->b_offset = offset;
740         ptr = (size_t) mem & PAGE_CACHE_MASK;
741         end = PAGE_CACHE_ALIGN((size_t) mem + len);
742         end_cur = end;
743         /* set up first page */
744         bp->b_pages[0] = mem_to_page(mem);
745
746         ptr += PAGE_CACHE_SIZE;
747         bp->b_page_count = ++i;
748         while (ptr < end) {
749                 bp->b_pages[i] = mem_to_page((void *)ptr);
750                 bp->b_page_count = ++i;
751                 ptr += PAGE_CACHE_SIZE;
752         }
753         bp->b_locked = 0;
754
755         bp->b_count_desired = bp->b_buffer_length = len;
756         bp->b_flags |= XBF_MAPPED;
757
758         return 0;
759 }
760
761 xfs_buf_t *
762 xfs_buf_get_noaddr(
763         size_t                  len,
764         xfs_buftarg_t           *target)
765 {
766         size_t                  malloc_len = len;
767         xfs_buf_t               *bp;
768         void                    *data;
769         int                     error;
770
771         bp = xfs_buf_allocate(0);
772         if (unlikely(bp == NULL))
773                 goto fail;
774         _xfs_buf_initialize(bp, target, 0, len, 0);
775
776  try_again:
777         data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL | KM_LARGE);
778         if (unlikely(data == NULL))
779                 goto fail_free_buf;
780
781         /* check whether alignment matches.. */
782         if ((__psunsigned_t)data !=
783             ((__psunsigned_t)data & ~target->bt_smask)) {
784                 /* .. else double the size and try again */
785                 kmem_free(data, malloc_len);
786                 malloc_len <<= 1;
787                 goto try_again;
788         }
789
790         error = xfs_buf_associate_memory(bp, data, len);
791         if (error)
792                 goto fail_free_mem;
793         bp->b_flags |= _XBF_KMEM_ALLOC;
794
795         xfs_buf_unlock(bp);
796
797         XB_TRACE(bp, "no_daddr", data);
798         return bp;
799  fail_free_mem:
800         kmem_free(data, malloc_len);
801  fail_free_buf:
802         xfs_buf_free(bp);
803  fail:
804         return NULL;
805 }
806
807 /*
808  *      Increment reference count on buffer, to hold the buffer concurrently
809  *      with another thread which may release (free) the buffer asynchronously.
810  *      Must hold the buffer already to call this function.
811  */
812 void
813 xfs_buf_hold(
814         xfs_buf_t               *bp)
815 {
816         atomic_inc(&bp->b_hold);
817         XB_TRACE(bp, "hold", 0);
818 }
819
820 /*
821  *      Releases a hold on the specified buffer.  If the
822  *      the hold count is 1, calls xfs_buf_free.
823  */
824 void
825 xfs_buf_rele(
826         xfs_buf_t               *bp)
827 {
828         xfs_bufhash_t           *hash = bp->b_hash;
829
830         XB_TRACE(bp, "rele", bp->b_relse);
831
832         if (unlikely(!hash)) {
833                 ASSERT(!bp->b_relse);
834                 if (atomic_dec_and_test(&bp->b_hold))
835                         xfs_buf_free(bp);
836                 return;
837         }
838
839         if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
840                 if (bp->b_relse) {
841                         atomic_inc(&bp->b_hold);
842                         spin_unlock(&hash->bh_lock);
843                         (*(bp->b_relse)) (bp);
844                 } else if (bp->b_flags & XBF_FS_MANAGED) {
845                         spin_unlock(&hash->bh_lock);
846                 } else {
847                         ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
848                         list_del_init(&bp->b_hash_list);
849                         spin_unlock(&hash->bh_lock);
850                         xfs_buf_free(bp);
851                 }
852         } else {
853                 /*
854                  * Catch reference count leaks
855                  */
856                 ASSERT(atomic_read(&bp->b_hold) >= 0);
857         }
858 }
859
860
861 /*
862  *      Mutual exclusion on buffers.  Locking model:
863  *
864  *      Buffers associated with inodes for which buffer locking
865  *      is not enabled are not protected by semaphores, and are
866  *      assumed to be exclusively owned by the caller.  There is a
867  *      spinlock in the buffer, used by the caller when concurrent
868  *      access is possible.
869  */
870
871 /*
872  *      Locks a buffer object, if it is not already locked.
873  *      Note that this in no way locks the underlying pages, so it is only
874  *      useful for synchronizing concurrent use of buffer objects, not for
875  *      synchronizing independent access to the underlying pages.
876  */
877 int
878 xfs_buf_cond_lock(
879         xfs_buf_t               *bp)
880 {
881         int                     locked;
882
883         locked = down_trylock(&bp->b_sema) == 0;
884         if (locked) {
885                 XB_SET_OWNER(bp);
886         }
887         XB_TRACE(bp, "cond_lock", (long)locked);
888         return locked ? 0 : -EBUSY;
889 }
890
891 #if defined(DEBUG) || defined(XFS_BLI_TRACE)
892 int
893 xfs_buf_lock_value(
894         xfs_buf_t               *bp)
895 {
896         return atomic_read(&bp->b_sema.count);
897 }
898 #endif
899
900 /*
901  *      Locks a buffer object.
902  *      Note that this in no way locks the underlying pages, so it is only
903  *      useful for synchronizing concurrent use of buffer objects, not for
904  *      synchronizing independent access to the underlying pages.
905  */
906 void
907 xfs_buf_lock(
908         xfs_buf_t               *bp)
909 {
910         XB_TRACE(bp, "lock", 0);
911         if (atomic_read(&bp->b_io_remaining))
912                 blk_run_address_space(bp->b_target->bt_mapping);
913         down(&bp->b_sema);
914         XB_SET_OWNER(bp);
915         XB_TRACE(bp, "locked", 0);
916 }
917
918 /*
919  *      Releases the lock on the buffer object.
920  *      If the buffer is marked delwri but is not queued, do so before we
921  *      unlock the buffer as we need to set flags correctly.  We also need to
922  *      take a reference for the delwri queue because the unlocker is going to
923  *      drop their's and they don't know we just queued it.
924  */
925 void
926 xfs_buf_unlock(
927         xfs_buf_t               *bp)
928 {
929         if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
930                 atomic_inc(&bp->b_hold);
931                 bp->b_flags |= XBF_ASYNC;
932                 xfs_buf_delwri_queue(bp, 0);
933         }
934
935         XB_CLEAR_OWNER(bp);
936         up(&bp->b_sema);
937         XB_TRACE(bp, "unlock", 0);
938 }
939
940
941 /*
942  *      Pinning Buffer Storage in Memory
943  *      Ensure that no attempt to force a buffer to disk will succeed.
944  */
945 void
946 xfs_buf_pin(
947         xfs_buf_t               *bp)
948 {
949         atomic_inc(&bp->b_pin_count);
950         XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
951 }
952
953 void
954 xfs_buf_unpin(
955         xfs_buf_t               *bp)
956 {
957         if (atomic_dec_and_test(&bp->b_pin_count))
958                 wake_up_all(&bp->b_waiters);
959         XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
960 }
961
962 int
963 xfs_buf_ispin(
964         xfs_buf_t               *bp)
965 {
966         return atomic_read(&bp->b_pin_count);
967 }
968
969 STATIC void
970 xfs_buf_wait_unpin(
971         xfs_buf_t               *bp)
972 {
973         DECLARE_WAITQUEUE       (wait, current);
974
975         if (atomic_read(&bp->b_pin_count) == 0)
976                 return;
977
978         add_wait_queue(&bp->b_waiters, &wait);
979         for (;;) {
980                 set_current_state(TASK_UNINTERRUPTIBLE);
981                 if (atomic_read(&bp->b_pin_count) == 0)
982                         break;
983                 if (atomic_read(&bp->b_io_remaining))
984                         blk_run_address_space(bp->b_target->bt_mapping);
985                 schedule();
986         }
987         remove_wait_queue(&bp->b_waiters, &wait);
988         set_current_state(TASK_RUNNING);
989 }
990
991 /*
992  *      Buffer Utility Routines
993  */
994
995 STATIC void
996 xfs_buf_iodone_work(
997         struct work_struct      *work)
998 {
999         xfs_buf_t               *bp =
1000                 container_of(work, xfs_buf_t, b_iodone_work);
1001
1002         if (bp->b_iodone)
1003                 (*(bp->b_iodone))(bp);
1004         else if (bp->b_flags & XBF_ASYNC)
1005                 xfs_buf_relse(bp);
1006 }
1007
1008 void
1009 xfs_buf_ioend(
1010         xfs_buf_t               *bp,
1011         int                     schedule)
1012 {
1013         bp->b_flags &= ~(XBF_READ | XBF_WRITE);
1014         if (bp->b_error == 0)
1015                 bp->b_flags |= XBF_DONE;
1016
1017         XB_TRACE(bp, "iodone", bp->b_iodone);
1018
1019         if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1020                 if (schedule) {
1021                         INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1022                         queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1023                 } else {
1024                         xfs_buf_iodone_work(&bp->b_iodone_work);
1025                 }
1026         } else {
1027                 up(&bp->b_iodonesema);
1028         }
1029 }
1030
1031 void
1032 xfs_buf_ioerror(
1033         xfs_buf_t               *bp,
1034         int                     error)
1035 {
1036         ASSERT(error >= 0 && error <= 0xffff);
1037         bp->b_error = (unsigned short)error;
1038         XB_TRACE(bp, "ioerror", (unsigned long)error);
1039 }
1040
1041 /*
1042  *      Initiate I/O on a buffer, based on the flags supplied.
1043  *      The b_iodone routine in the buffer supplied will only be called
1044  *      when all of the subsidiary I/O requests, if any, have been completed.
1045  */
1046 int
1047 xfs_buf_iostart(
1048         xfs_buf_t               *bp,
1049         xfs_buf_flags_t         flags)
1050 {
1051         int                     status = 0;
1052
1053         XB_TRACE(bp, "iostart", (unsigned long)flags);
1054
1055         if (flags & XBF_DELWRI) {
1056                 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
1057                 bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
1058                 xfs_buf_delwri_queue(bp, 1);
1059                 return status;
1060         }
1061
1062         bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
1063                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1064         bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
1065                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1066
1067         BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
1068
1069         /* For writes allow an alternate strategy routine to precede
1070          * the actual I/O request (which may not be issued at all in
1071          * a shutdown situation, for example).
1072          */
1073         status = (flags & XBF_WRITE) ?
1074                 xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
1075
1076         /* Wait for I/O if we are not an async request.
1077          * Note: async I/O request completion will release the buffer,
1078          * and that can already be done by this point.  So using the
1079          * buffer pointer from here on, after async I/O, is invalid.
1080          */
1081         if (!status && !(flags & XBF_ASYNC))
1082                 status = xfs_buf_iowait(bp);
1083
1084         return status;
1085 }
1086
1087 STATIC __inline__ int
1088 _xfs_buf_iolocked(
1089         xfs_buf_t               *bp)
1090 {
1091         ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
1092         if (bp->b_flags & XBF_READ)
1093                 return bp->b_locked;
1094         return 0;
1095 }
1096
1097 STATIC __inline__ void
1098 _xfs_buf_ioend(
1099         xfs_buf_t               *bp,
1100         int                     schedule)
1101 {
1102         if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1103                 bp->b_locked = 0;
1104                 xfs_buf_ioend(bp, schedule);
1105         }
1106 }
1107
1108 STATIC int
1109 xfs_buf_bio_end_io(
1110         struct bio              *bio,
1111         unsigned int            bytes_done,
1112         int                     error)
1113 {
1114         xfs_buf_t               *bp = (xfs_buf_t *)bio->bi_private;
1115         unsigned int            blocksize = bp->b_target->bt_bsize;
1116         struct bio_vec          *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1117
1118         if (bio->bi_size)
1119                 return 1;
1120
1121         if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1122                 bp->b_error = EIO;
1123
1124         do {
1125                 struct page     *page = bvec->bv_page;
1126
1127                 ASSERT(!PagePrivate(page));
1128                 if (unlikely(bp->b_error)) {
1129                         if (bp->b_flags & XBF_READ)
1130                                 ClearPageUptodate(page);
1131                 } else if (blocksize >= PAGE_CACHE_SIZE) {
1132                         SetPageUptodate(page);
1133                 } else if (!PagePrivate(page) &&
1134                                 (bp->b_flags & _XBF_PAGE_CACHE)) {
1135                         set_page_region(page, bvec->bv_offset, bvec->bv_len);
1136                 }
1137
1138                 if (--bvec >= bio->bi_io_vec)
1139                         prefetchw(&bvec->bv_page->flags);
1140
1141                 if (_xfs_buf_iolocked(bp)) {
1142                         unlock_page(page);
1143                 }
1144         } while (bvec >= bio->bi_io_vec);
1145
1146         _xfs_buf_ioend(bp, 1);
1147         bio_put(bio);
1148         return 0;
1149 }
1150
1151 STATIC void
1152 _xfs_buf_ioapply(
1153         xfs_buf_t               *bp)
1154 {
1155         int                     i, rw, map_i, total_nr_pages, nr_pages;
1156         struct bio              *bio;
1157         int                     offset = bp->b_offset;
1158         int                     size = bp->b_count_desired;
1159         sector_t                sector = bp->b_bn;
1160         unsigned int            blocksize = bp->b_target->bt_bsize;
1161         int                     locking = _xfs_buf_iolocked(bp);
1162
1163         total_nr_pages = bp->b_page_count;
1164         map_i = 0;
1165
1166         if (bp->b_flags & XBF_ORDERED) {
1167                 ASSERT(!(bp->b_flags & XBF_READ));
1168                 rw = WRITE_BARRIER;
1169         } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1170                 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1171                 bp->b_flags &= ~_XBF_RUN_QUEUES;
1172                 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1173         } else {
1174                 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1175                      (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1176         }
1177
1178         /* Special code path for reading a sub page size buffer in --
1179          * we populate up the whole page, and hence the other metadata
1180          * in the same page.  This optimization is only valid when the
1181          * filesystem block size is not smaller than the page size.
1182          */
1183         if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1184             (bp->b_flags & XBF_READ) && locking &&
1185             (blocksize >= PAGE_CACHE_SIZE)) {
1186                 bio = bio_alloc(GFP_NOIO, 1);
1187
1188                 bio->bi_bdev = bp->b_target->bt_bdev;
1189                 bio->bi_sector = sector - (offset >> BBSHIFT);
1190                 bio->bi_end_io = xfs_buf_bio_end_io;
1191                 bio->bi_private = bp;
1192
1193                 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1194                 size = 0;
1195
1196                 atomic_inc(&bp->b_io_remaining);
1197
1198                 goto submit_io;
1199         }
1200
1201         /* Lock down the pages which we need to for the request */
1202         if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
1203                 for (i = 0; size; i++) {
1204                         int             nbytes = PAGE_CACHE_SIZE - offset;
1205                         struct page     *page = bp->b_pages[i];
1206
1207                         if (nbytes > size)
1208                                 nbytes = size;
1209
1210                         lock_page(page);
1211
1212                         size -= nbytes;
1213                         offset = 0;
1214                 }
1215                 offset = bp->b_offset;
1216                 size = bp->b_count_desired;
1217         }
1218
1219 next_chunk:
1220         atomic_inc(&bp->b_io_remaining);
1221         nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1222         if (nr_pages > total_nr_pages)
1223                 nr_pages = total_nr_pages;
1224
1225         bio = bio_alloc(GFP_NOIO, nr_pages);
1226         bio->bi_bdev = bp->b_target->bt_bdev;
1227         bio->bi_sector = sector;
1228         bio->bi_end_io = xfs_buf_bio_end_io;
1229         bio->bi_private = bp;
1230
1231         for (; size && nr_pages; nr_pages--, map_i++) {
1232                 int     rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1233
1234                 if (nbytes > size)
1235                         nbytes = size;
1236
1237                 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1238                 if (rbytes < nbytes)
1239                         break;
1240
1241                 offset = 0;
1242                 sector += nbytes >> BBSHIFT;
1243                 size -= nbytes;
1244                 total_nr_pages--;
1245         }
1246
1247 submit_io:
1248         if (likely(bio->bi_size)) {
1249                 submit_bio(rw, bio);
1250                 if (size)
1251                         goto next_chunk;
1252         } else {
1253                 bio_put(bio);
1254                 xfs_buf_ioerror(bp, EIO);
1255         }
1256 }
1257
1258 int
1259 xfs_buf_iorequest(
1260         xfs_buf_t               *bp)
1261 {
1262         XB_TRACE(bp, "iorequest", 0);
1263
1264         if (bp->b_flags & XBF_DELWRI) {
1265                 xfs_buf_delwri_queue(bp, 1);
1266                 return 0;
1267         }
1268
1269         if (bp->b_flags & XBF_WRITE) {
1270                 xfs_buf_wait_unpin(bp);
1271         }
1272
1273         xfs_buf_hold(bp);
1274
1275         /* Set the count to 1 initially, this will stop an I/O
1276          * completion callout which happens before we have started
1277          * all the I/O from calling xfs_buf_ioend too early.
1278          */
1279         atomic_set(&bp->b_io_remaining, 1);
1280         _xfs_buf_ioapply(bp);
1281         _xfs_buf_ioend(bp, 0);
1282
1283         xfs_buf_rele(bp);
1284         return 0;
1285 }
1286
1287 /*
1288  *      Waits for I/O to complete on the buffer supplied.
1289  *      It returns immediately if no I/O is pending.
1290  *      It returns the I/O error code, if any, or 0 if there was no error.
1291  */
1292 int
1293 xfs_buf_iowait(
1294         xfs_buf_t               *bp)
1295 {
1296         XB_TRACE(bp, "iowait", 0);
1297         if (atomic_read(&bp->b_io_remaining))
1298                 blk_run_address_space(bp->b_target->bt_mapping);
1299         down(&bp->b_iodonesema);
1300         XB_TRACE(bp, "iowaited", (long)bp->b_error);
1301         return bp->b_error;
1302 }
1303
1304 xfs_caddr_t
1305 xfs_buf_offset(
1306         xfs_buf_t               *bp,
1307         size_t                  offset)
1308 {
1309         struct page             *page;
1310
1311         if (bp->b_flags & XBF_MAPPED)
1312                 return XFS_BUF_PTR(bp) + offset;
1313
1314         offset += bp->b_offset;
1315         page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1316         return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1317 }
1318
1319 /*
1320  *      Move data into or out of a buffer.
1321  */
1322 void
1323 xfs_buf_iomove(
1324         xfs_buf_t               *bp,    /* buffer to process            */
1325         size_t                  boff,   /* starting buffer offset       */
1326         size_t                  bsize,  /* length to copy               */
1327         caddr_t                 data,   /* data address                 */
1328         xfs_buf_rw_t            mode)   /* read/write/zero flag         */
1329 {
1330         size_t                  bend, cpoff, csize;
1331         struct page             *page;
1332
1333         bend = boff + bsize;
1334         while (boff < bend) {
1335                 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1336                 cpoff = xfs_buf_poff(boff + bp->b_offset);
1337                 csize = min_t(size_t,
1338                               PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1339
1340                 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1341
1342                 switch (mode) {
1343                 case XBRW_ZERO:
1344                         memset(page_address(page) + cpoff, 0, csize);
1345                         break;
1346                 case XBRW_READ:
1347                         memcpy(data, page_address(page) + cpoff, csize);
1348                         break;
1349                 case XBRW_WRITE:
1350                         memcpy(page_address(page) + cpoff, data, csize);
1351                 }
1352
1353                 boff += csize;
1354                 data += csize;
1355         }
1356 }
1357
1358 /*
1359  *      Handling of buffer targets (buftargs).
1360  */
1361
1362 /*
1363  *      Wait for any bufs with callbacks that have been submitted but
1364  *      have not yet returned... walk the hash list for the target.
1365  */
1366 void
1367 xfs_wait_buftarg(
1368         xfs_buftarg_t   *btp)
1369 {
1370         xfs_buf_t       *bp, *n;
1371         xfs_bufhash_t   *hash;
1372         uint            i;
1373
1374         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1375                 hash = &btp->bt_hash[i];
1376 again:
1377                 spin_lock(&hash->bh_lock);
1378                 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1379                         ASSERT(btp == bp->b_target);
1380                         if (!(bp->b_flags & XBF_FS_MANAGED)) {
1381                                 spin_unlock(&hash->bh_lock);
1382                                 /*
1383                                  * Catch superblock reference count leaks
1384                                  * immediately
1385                                  */
1386                                 BUG_ON(bp->b_bn == 0);
1387                                 delay(100);
1388                                 goto again;
1389                         }
1390                 }
1391                 spin_unlock(&hash->bh_lock);
1392         }
1393 }
1394
1395 /*
1396  *      Allocate buffer hash table for a given target.
1397  *      For devices containing metadata (i.e. not the log/realtime devices)
1398  *      we need to allocate a much larger hash table.
1399  */
1400 STATIC void
1401 xfs_alloc_bufhash(
1402         xfs_buftarg_t           *btp,
1403         int                     external)
1404 {
1405         unsigned int            i;
1406
1407         btp->bt_hashshift = external ? 3 : 8;   /* 8 or 256 buckets */
1408         btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1409         btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
1410                                         sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE);
1411         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1412                 spin_lock_init(&btp->bt_hash[i].bh_lock);
1413                 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1414         }
1415 }
1416
1417 STATIC void
1418 xfs_free_bufhash(
1419         xfs_buftarg_t           *btp)
1420 {
1421         kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1422         btp->bt_hash = NULL;
1423 }
1424
1425 /*
1426  *      buftarg list for delwrite queue processing
1427  */
1428 STATIC LIST_HEAD(xfs_buftarg_list);
1429 STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
1430
1431 STATIC void
1432 xfs_register_buftarg(
1433         xfs_buftarg_t           *btp)
1434 {
1435         spin_lock(&xfs_buftarg_lock);
1436         list_add(&btp->bt_list, &xfs_buftarg_list);
1437         spin_unlock(&xfs_buftarg_lock);
1438 }
1439
1440 STATIC void
1441 xfs_unregister_buftarg(
1442         xfs_buftarg_t           *btp)
1443 {
1444         spin_lock(&xfs_buftarg_lock);
1445         list_del(&btp->bt_list);
1446         spin_unlock(&xfs_buftarg_lock);
1447 }
1448
1449 void
1450 xfs_free_buftarg(
1451         xfs_buftarg_t           *btp,
1452         int                     external)
1453 {
1454         xfs_flush_buftarg(btp, 1);
1455         if (external)
1456                 xfs_blkdev_put(btp->bt_bdev);
1457         xfs_free_bufhash(btp);
1458         iput(btp->bt_mapping->host);
1459
1460         /* Unregister the buftarg first so that we don't get a
1461          * wakeup finding a non-existent task
1462          */
1463         xfs_unregister_buftarg(btp);
1464         kthread_stop(btp->bt_task);
1465
1466         kmem_free(btp, sizeof(*btp));
1467 }
1468
1469 STATIC int
1470 xfs_setsize_buftarg_flags(
1471         xfs_buftarg_t           *btp,
1472         unsigned int            blocksize,
1473         unsigned int            sectorsize,
1474         int                     verbose)
1475 {
1476         btp->bt_bsize = blocksize;
1477         btp->bt_sshift = ffs(sectorsize) - 1;
1478         btp->bt_smask = sectorsize - 1;
1479
1480         if (set_blocksize(btp->bt_bdev, sectorsize)) {
1481                 printk(KERN_WARNING
1482                         "XFS: Cannot set_blocksize to %u on device %s\n",
1483                         sectorsize, XFS_BUFTARG_NAME(btp));
1484                 return EINVAL;
1485         }
1486
1487         if (verbose &&
1488             (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1489                 printk(KERN_WARNING
1490                         "XFS: %u byte sectors in use on device %s.  "
1491                         "This is suboptimal; %u or greater is ideal.\n",
1492                         sectorsize, XFS_BUFTARG_NAME(btp),
1493                         (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1494         }
1495
1496         return 0;
1497 }
1498
1499 /*
1500  *      When allocating the initial buffer target we have not yet
1501  *      read in the superblock, so don't know what sized sectors
1502  *      are being used is at this early stage.  Play safe.
1503  */
1504 STATIC int
1505 xfs_setsize_buftarg_early(
1506         xfs_buftarg_t           *btp,
1507         struct block_device     *bdev)
1508 {
1509         return xfs_setsize_buftarg_flags(btp,
1510                         PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1511 }
1512
1513 int
1514 xfs_setsize_buftarg(
1515         xfs_buftarg_t           *btp,
1516         unsigned int            blocksize,
1517         unsigned int            sectorsize)
1518 {
1519         return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1520 }
1521
1522 STATIC int
1523 xfs_mapping_buftarg(
1524         xfs_buftarg_t           *btp,
1525         struct block_device     *bdev)
1526 {
1527         struct backing_dev_info *bdi;
1528         struct inode            *inode;
1529         struct address_space    *mapping;
1530         static const struct address_space_operations mapping_aops = {
1531                 .sync_page = block_sync_page,
1532                 .migratepage = fail_migrate_page,
1533         };
1534
1535         inode = new_inode(bdev->bd_inode->i_sb);
1536         if (!inode) {
1537                 printk(KERN_WARNING
1538                         "XFS: Cannot allocate mapping inode for device %s\n",
1539                         XFS_BUFTARG_NAME(btp));
1540                 return ENOMEM;
1541         }
1542         inode->i_mode = S_IFBLK;
1543         inode->i_bdev = bdev;
1544         inode->i_rdev = bdev->bd_dev;
1545         bdi = blk_get_backing_dev_info(bdev);
1546         if (!bdi)
1547                 bdi = &default_backing_dev_info;
1548         mapping = &inode->i_data;
1549         mapping->a_ops = &mapping_aops;
1550         mapping->backing_dev_info = bdi;
1551         mapping_set_gfp_mask(mapping, GFP_NOFS);
1552         btp->bt_mapping = mapping;
1553         return 0;
1554 }
1555
1556 STATIC int
1557 xfs_alloc_delwrite_queue(
1558         xfs_buftarg_t           *btp)
1559 {
1560         int     error = 0;
1561
1562         INIT_LIST_HEAD(&btp->bt_list);
1563         INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1564         spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
1565         btp->bt_flags = 0;
1566         btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1567         if (IS_ERR(btp->bt_task)) {
1568                 error = PTR_ERR(btp->bt_task);
1569                 goto out_error;
1570         }
1571         xfs_register_buftarg(btp);
1572 out_error:
1573         return error;
1574 }
1575
1576 xfs_buftarg_t *
1577 xfs_alloc_buftarg(
1578         struct block_device     *bdev,
1579         int                     external)
1580 {
1581         xfs_buftarg_t           *btp;
1582
1583         btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1584
1585         btp->bt_dev =  bdev->bd_dev;
1586         btp->bt_bdev = bdev;
1587         if (xfs_setsize_buftarg_early(btp, bdev))
1588                 goto error;
1589         if (xfs_mapping_buftarg(btp, bdev))
1590                 goto error;
1591         if (xfs_alloc_delwrite_queue(btp))
1592                 goto error;
1593         xfs_alloc_bufhash(btp, external);
1594         return btp;
1595
1596 error:
1597         kmem_free(btp, sizeof(*btp));
1598         return NULL;
1599 }
1600
1601
1602 /*
1603  *      Delayed write buffer handling
1604  */
1605 STATIC void
1606 xfs_buf_delwri_queue(
1607         xfs_buf_t               *bp,
1608         int                     unlock)
1609 {
1610         struct list_head        *dwq = &bp->b_target->bt_delwrite_queue;
1611         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1612
1613         XB_TRACE(bp, "delwri_q", (long)unlock);
1614         ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1615
1616         spin_lock(dwlk);
1617         /* If already in the queue, dequeue and place at tail */
1618         if (!list_empty(&bp->b_list)) {
1619                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1620                 if (unlock)
1621                         atomic_dec(&bp->b_hold);
1622                 list_del(&bp->b_list);
1623         }
1624
1625         bp->b_flags |= _XBF_DELWRI_Q;
1626         list_add_tail(&bp->b_list, dwq);
1627         bp->b_queuetime = jiffies;
1628         spin_unlock(dwlk);
1629
1630         if (unlock)
1631                 xfs_buf_unlock(bp);
1632 }
1633
1634 void
1635 xfs_buf_delwri_dequeue(
1636         xfs_buf_t               *bp)
1637 {
1638         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1639         int                     dequeued = 0;
1640
1641         spin_lock(dwlk);
1642         if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1643                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1644                 list_del_init(&bp->b_list);
1645                 dequeued = 1;
1646         }
1647         bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1648         spin_unlock(dwlk);
1649
1650         if (dequeued)
1651                 xfs_buf_rele(bp);
1652
1653         XB_TRACE(bp, "delwri_dq", (long)dequeued);
1654 }
1655
1656 STATIC void
1657 xfs_buf_runall_queues(
1658         struct workqueue_struct *queue)
1659 {
1660         flush_workqueue(queue);
1661 }
1662
1663 STATIC int
1664 xfsbufd_wakeup(
1665         int                     priority,
1666         gfp_t                   mask)
1667 {
1668         xfs_buftarg_t           *btp;
1669
1670         spin_lock(&xfs_buftarg_lock);
1671         list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1672                 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1673                         continue;
1674                 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1675                 wake_up_process(btp->bt_task);
1676         }
1677         spin_unlock(&xfs_buftarg_lock);
1678         return 0;
1679 }
1680
1681 STATIC int
1682 xfsbufd(
1683         void                    *data)
1684 {
1685         struct list_head        tmp;
1686         unsigned long           age;
1687         xfs_buftarg_t           *target = (xfs_buftarg_t *)data;
1688         xfs_buf_t               *bp, *n;
1689         struct list_head        *dwq = &target->bt_delwrite_queue;
1690         spinlock_t              *dwlk = &target->bt_delwrite_lock;
1691         int                     count;
1692
1693         current->flags |= PF_MEMALLOC;
1694
1695         INIT_LIST_HEAD(&tmp);
1696         do {
1697                 if (unlikely(freezing(current))) {
1698                         set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1699                         refrigerator();
1700                 } else {
1701                         clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1702                 }
1703
1704                 schedule_timeout_interruptible(
1705                         xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1706
1707                 count = 0;
1708                 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1709                 spin_lock(dwlk);
1710                 list_for_each_entry_safe(bp, n, dwq, b_list) {
1711                         XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1712                         ASSERT(bp->b_flags & XBF_DELWRI);
1713
1714                         if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1715                                 if (!test_bit(XBT_FORCE_FLUSH,
1716                                                 &target->bt_flags) &&
1717                                     time_before(jiffies,
1718                                                 bp->b_queuetime + age)) {
1719                                         xfs_buf_unlock(bp);
1720                                         break;
1721                                 }
1722
1723                                 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1724                                                  _XBF_RUN_QUEUES);
1725                                 bp->b_flags |= XBF_WRITE;
1726                                 list_move_tail(&bp->b_list, &tmp);
1727                                 count++;
1728                         }
1729                 }
1730                 spin_unlock(dwlk);
1731
1732                 while (!list_empty(&tmp)) {
1733                         bp = list_entry(tmp.next, xfs_buf_t, b_list);
1734                         ASSERT(target == bp->b_target);
1735
1736                         list_del_init(&bp->b_list);
1737                         xfs_buf_iostrategy(bp);
1738                 }
1739
1740                 if (as_list_len > 0)
1741                         purge_addresses();
1742                 if (count)
1743                         blk_run_address_space(target->bt_mapping);
1744
1745                 clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1746         } while (!kthread_should_stop());
1747
1748         return 0;
1749 }
1750
1751 /*
1752  *      Go through all incore buffers, and release buffers if they belong to
1753  *      the given device. This is used in filesystem error handling to
1754  *      preserve the consistency of its metadata.
1755  */
1756 int
1757 xfs_flush_buftarg(
1758         xfs_buftarg_t           *target,
1759         int                     wait)
1760 {
1761         struct list_head        tmp;
1762         xfs_buf_t               *bp, *n;
1763         int                     pincount = 0;
1764         struct list_head        *dwq = &target->bt_delwrite_queue;
1765         spinlock_t              *dwlk = &target->bt_delwrite_lock;
1766
1767         xfs_buf_runall_queues(xfsdatad_workqueue);
1768         xfs_buf_runall_queues(xfslogd_workqueue);
1769
1770         INIT_LIST_HEAD(&tmp);
1771         spin_lock(dwlk);
1772         list_for_each_entry_safe(bp, n, dwq, b_list) {
1773                 ASSERT(bp->b_target == target);
1774                 ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
1775                 XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
1776                 if (xfs_buf_ispin(bp)) {
1777                         pincount++;
1778                         continue;
1779                 }
1780
1781                 list_move_tail(&bp->b_list, &tmp);
1782         }
1783         spin_unlock(dwlk);
1784
1785         /*
1786          * Dropped the delayed write list lock, now walk the temporary list
1787          */
1788         list_for_each_entry_safe(bp, n, &tmp, b_list) {
1789                 xfs_buf_lock(bp);
1790                 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|_XBF_RUN_QUEUES);
1791                 bp->b_flags |= XBF_WRITE;
1792                 if (wait)
1793                         bp->b_flags &= ~XBF_ASYNC;
1794                 else
1795                         list_del_init(&bp->b_list);
1796
1797                 xfs_buf_iostrategy(bp);
1798         }
1799
1800         if (wait)
1801                 blk_run_address_space(target->bt_mapping);
1802
1803         /*
1804          * Remaining list items must be flushed before returning
1805          */
1806         while (!list_empty(&tmp)) {
1807                 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1808
1809                 list_del_init(&bp->b_list);
1810                 xfs_iowait(bp);
1811                 xfs_buf_relse(bp);
1812         }
1813
1814         return pincount;
1815 }
1816
1817 int __init
1818 xfs_buf_init(void)
1819 {
1820 #ifdef XFS_BUF_TRACE
1821         xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
1822 #endif
1823
1824         xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1825                                                 KM_ZONE_HWALIGN, NULL);
1826         if (!xfs_buf_zone)
1827                 goto out_free_trace_buf;
1828
1829         xfslogd_workqueue = create_workqueue("xfslogd");
1830         if (!xfslogd_workqueue)
1831                 goto out_free_buf_zone;
1832
1833         xfsdatad_workqueue = create_workqueue("xfsdatad");
1834         if (!xfsdatad_workqueue)
1835                 goto out_destroy_xfslogd_workqueue;
1836
1837         xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
1838         if (!xfs_buf_shake)
1839                 goto out_destroy_xfsdatad_workqueue;
1840
1841         return 0;
1842
1843  out_destroy_xfsdatad_workqueue:
1844         destroy_workqueue(xfsdatad_workqueue);
1845  out_destroy_xfslogd_workqueue:
1846         destroy_workqueue(xfslogd_workqueue);
1847  out_free_buf_zone:
1848         kmem_zone_destroy(xfs_buf_zone);
1849  out_free_trace_buf:
1850 #ifdef XFS_BUF_TRACE
1851         ktrace_free(xfs_buf_trace_buf);
1852 #endif
1853         return -ENOMEM;
1854 }
1855
1856 void
1857 xfs_buf_terminate(void)
1858 {
1859         kmem_shake_deregister(xfs_buf_shake);
1860         destroy_workqueue(xfsdatad_workqueue);
1861         destroy_workqueue(xfslogd_workqueue);
1862         kmem_zone_destroy(xfs_buf_zone);
1863 #ifdef XFS_BUF_TRACE
1864         ktrace_free(xfs_buf_trace_buf);
1865 #endif
1866 }