[XFS] Fix double free in xfs_buf_get_noaddr error handling path
[safe/jmp/linux-2.6] / fs / xfs / linux-2.6 / xfs_buf.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36
37 static kmem_zone_t *xfs_buf_zone;
38 static kmem_shaker_t xfs_buf_shake;
39 STATIC int xfsbufd(void *);
40 STATIC int xfsbufd_wakeup(int, gfp_t);
41 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
42
43 static struct workqueue_struct *xfslogd_workqueue;
44 struct workqueue_struct *xfsdatad_workqueue;
45
46 #ifdef XFS_BUF_TRACE
47 void
48 xfs_buf_trace(
49         xfs_buf_t       *bp,
50         char            *id,
51         void            *data,
52         void            *ra)
53 {
54         ktrace_enter(xfs_buf_trace_buf,
55                 bp, id,
56                 (void *)(unsigned long)bp->b_flags,
57                 (void *)(unsigned long)bp->b_hold.counter,
58                 (void *)(unsigned long)bp->b_sema.count.counter,
59                 (void *)current,
60                 data, ra,
61                 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
62                 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
63                 (void *)(unsigned long)bp->b_buffer_length,
64                 NULL, NULL, NULL, NULL, NULL);
65 }
66 ktrace_t *xfs_buf_trace_buf;
67 #define XFS_BUF_TRACE_SIZE      4096
68 #define XB_TRACE(bp, id, data)  \
69         xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
70 #else
71 #define XB_TRACE(bp, id, data)  do { } while (0)
72 #endif
73
74 #ifdef XFS_BUF_LOCK_TRACKING
75 # define XB_SET_OWNER(bp)       ((bp)->b_last_holder = current->pid)
76 # define XB_CLEAR_OWNER(bp)     ((bp)->b_last_holder = -1)
77 # define XB_GET_OWNER(bp)       ((bp)->b_last_holder)
78 #else
79 # define XB_SET_OWNER(bp)       do { } while (0)
80 # define XB_CLEAR_OWNER(bp)     do { } while (0)
81 # define XB_GET_OWNER(bp)       do { } while (0)
82 #endif
83
84 #define xb_to_gfp(flags) \
85         ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
86           ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
87
88 #define xb_to_km(flags) \
89          (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
90
91 #define xfs_buf_allocate(flags) \
92         kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
93 #define xfs_buf_deallocate(bp) \
94         kmem_zone_free(xfs_buf_zone, (bp));
95
96 /*
97  *      Page Region interfaces.
98  *
99  *      For pages in filesystems where the blocksize is smaller than the
100  *      pagesize, we use the page->private field (long) to hold a bitmap
101  *      of uptodate regions within the page.
102  *
103  *      Each such region is "bytes per page / bits per long" bytes long.
104  *
105  *      NBPPR == number-of-bytes-per-page-region
106  *      BTOPR == bytes-to-page-region (rounded up)
107  *      BTOPRT == bytes-to-page-region-truncated (rounded down)
108  */
109 #if (BITS_PER_LONG == 32)
110 #define PRSHIFT         (PAGE_CACHE_SHIFT - 5)  /* (32 == 1<<5) */
111 #elif (BITS_PER_LONG == 64)
112 #define PRSHIFT         (PAGE_CACHE_SHIFT - 6)  /* (64 == 1<<6) */
113 #else
114 #error BITS_PER_LONG must be 32 or 64
115 #endif
116 #define NBPPR           (PAGE_CACHE_SIZE/BITS_PER_LONG)
117 #define BTOPR(b)        (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
118 #define BTOPRT(b)       (((unsigned int)(b) >> PRSHIFT))
119
120 STATIC unsigned long
121 page_region_mask(
122         size_t          offset,
123         size_t          length)
124 {
125         unsigned long   mask;
126         int             first, final;
127
128         first = BTOPR(offset);
129         final = BTOPRT(offset + length - 1);
130         first = min(first, final);
131
132         mask = ~0UL;
133         mask <<= BITS_PER_LONG - (final - first);
134         mask >>= BITS_PER_LONG - (final);
135
136         ASSERT(offset + length <= PAGE_CACHE_SIZE);
137         ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
138
139         return mask;
140 }
141
142 STATIC_INLINE void
143 set_page_region(
144         struct page     *page,
145         size_t          offset,
146         size_t          length)
147 {
148         set_page_private(page,
149                 page_private(page) | page_region_mask(offset, length));
150         if (page_private(page) == ~0UL)
151                 SetPageUptodate(page);
152 }
153
154 STATIC_INLINE int
155 test_page_region(
156         struct page     *page,
157         size_t          offset,
158         size_t          length)
159 {
160         unsigned long   mask = page_region_mask(offset, length);
161
162         return (mask && (page_private(page) & mask) == mask);
163 }
164
165 /*
166  *      Mapping of multi-page buffers into contiguous virtual space
167  */
168
169 typedef struct a_list {
170         void            *vm_addr;
171         struct a_list   *next;
172 } a_list_t;
173
174 static a_list_t         *as_free_head;
175 static int              as_list_len;
176 static DEFINE_SPINLOCK(as_lock);
177
178 /*
179  *      Try to batch vunmaps because they are costly.
180  */
181 STATIC void
182 free_address(
183         void            *addr)
184 {
185         a_list_t        *aentry;
186
187         aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
188         if (likely(aentry)) {
189                 spin_lock(&as_lock);
190                 aentry->next = as_free_head;
191                 aentry->vm_addr = addr;
192                 as_free_head = aentry;
193                 as_list_len++;
194                 spin_unlock(&as_lock);
195         } else {
196                 vunmap(addr);
197         }
198 }
199
200 STATIC void
201 purge_addresses(void)
202 {
203         a_list_t        *aentry, *old;
204
205         if (as_free_head == NULL)
206                 return;
207
208         spin_lock(&as_lock);
209         aentry = as_free_head;
210         as_free_head = NULL;
211         as_list_len = 0;
212         spin_unlock(&as_lock);
213
214         while ((old = aentry) != NULL) {
215                 vunmap(aentry->vm_addr);
216                 aentry = aentry->next;
217                 kfree(old);
218         }
219 }
220
221 /*
222  *      Internal xfs_buf_t object manipulation
223  */
224
225 STATIC void
226 _xfs_buf_initialize(
227         xfs_buf_t               *bp,
228         xfs_buftarg_t           *target,
229         xfs_off_t               range_base,
230         size_t                  range_length,
231         xfs_buf_flags_t         flags)
232 {
233         /*
234          * We don't want certain flags to appear in b_flags.
235          */
236         flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
237
238         memset(bp, 0, sizeof(xfs_buf_t));
239         atomic_set(&bp->b_hold, 1);
240         init_MUTEX_LOCKED(&bp->b_iodonesema);
241         INIT_LIST_HEAD(&bp->b_list);
242         INIT_LIST_HEAD(&bp->b_hash_list);
243         init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
244         XB_SET_OWNER(bp);
245         bp->b_target = target;
246         bp->b_file_offset = range_base;
247         /*
248          * Set buffer_length and count_desired to the same value initially.
249          * I/O routines should use count_desired, which will be the same in
250          * most cases but may be reset (e.g. XFS recovery).
251          */
252         bp->b_buffer_length = bp->b_count_desired = range_length;
253         bp->b_flags = flags;
254         bp->b_bn = XFS_BUF_DADDR_NULL;
255         atomic_set(&bp->b_pin_count, 0);
256         init_waitqueue_head(&bp->b_waiters);
257
258         XFS_STATS_INC(xb_create);
259         XB_TRACE(bp, "initialize", target);
260 }
261
262 /*
263  *      Allocate a page array capable of holding a specified number
264  *      of pages, and point the page buf at it.
265  */
266 STATIC int
267 _xfs_buf_get_pages(
268         xfs_buf_t               *bp,
269         int                     page_count,
270         xfs_buf_flags_t         flags)
271 {
272         /* Make sure that we have a page list */
273         if (bp->b_pages == NULL) {
274                 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
275                 bp->b_page_count = page_count;
276                 if (page_count <= XB_PAGES) {
277                         bp->b_pages = bp->b_page_array;
278                 } else {
279                         bp->b_pages = kmem_alloc(sizeof(struct page *) *
280                                         page_count, xb_to_km(flags));
281                         if (bp->b_pages == NULL)
282                                 return -ENOMEM;
283                 }
284                 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
285         }
286         return 0;
287 }
288
289 /*
290  *      Frees b_pages if it was allocated.
291  */
292 STATIC void
293 _xfs_buf_free_pages(
294         xfs_buf_t       *bp)
295 {
296         if (bp->b_pages != bp->b_page_array) {
297                 kmem_free(bp->b_pages,
298                           bp->b_page_count * sizeof(struct page *));
299         }
300 }
301
302 /*
303  *      Releases the specified buffer.
304  *
305  *      The modification state of any associated pages is left unchanged.
306  *      The buffer most not be on any hash - use xfs_buf_rele instead for
307  *      hashed and refcounted buffers
308  */
309 void
310 xfs_buf_free(
311         xfs_buf_t               *bp)
312 {
313         XB_TRACE(bp, "free", 0);
314
315         ASSERT(list_empty(&bp->b_hash_list));
316
317         if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
318                 uint            i;
319
320                 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
321                         free_address(bp->b_addr - bp->b_offset);
322
323                 for (i = 0; i < bp->b_page_count; i++) {
324                         struct page     *page = bp->b_pages[i];
325
326                         if (bp->b_flags & _XBF_PAGE_CACHE)
327                                 ASSERT(!PagePrivate(page));
328                         page_cache_release(page);
329                 }
330                 _xfs_buf_free_pages(bp);
331         }
332
333         xfs_buf_deallocate(bp);
334 }
335
336 /*
337  *      Finds all pages for buffer in question and builds it's page list.
338  */
339 STATIC int
340 _xfs_buf_lookup_pages(
341         xfs_buf_t               *bp,
342         uint                    flags)
343 {
344         struct address_space    *mapping = bp->b_target->bt_mapping;
345         size_t                  blocksize = bp->b_target->bt_bsize;
346         size_t                  size = bp->b_count_desired;
347         size_t                  nbytes, offset;
348         gfp_t                   gfp_mask = xb_to_gfp(flags);
349         unsigned short          page_count, i;
350         pgoff_t                 first;
351         xfs_off_t               end;
352         int                     error;
353
354         end = bp->b_file_offset + bp->b_buffer_length;
355         page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
356
357         error = _xfs_buf_get_pages(bp, page_count, flags);
358         if (unlikely(error))
359                 return error;
360         bp->b_flags |= _XBF_PAGE_CACHE;
361
362         offset = bp->b_offset;
363         first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
364
365         for (i = 0; i < bp->b_page_count; i++) {
366                 struct page     *page;
367                 uint            retries = 0;
368
369               retry:
370                 page = find_or_create_page(mapping, first + i, gfp_mask);
371                 if (unlikely(page == NULL)) {
372                         if (flags & XBF_READ_AHEAD) {
373                                 bp->b_page_count = i;
374                                 for (i = 0; i < bp->b_page_count; i++)
375                                         unlock_page(bp->b_pages[i]);
376                                 return -ENOMEM;
377                         }
378
379                         /*
380                          * This could deadlock.
381                          *
382                          * But until all the XFS lowlevel code is revamped to
383                          * handle buffer allocation failures we can't do much.
384                          */
385                         if (!(++retries % 100))
386                                 printk(KERN_ERR
387                                         "XFS: possible memory allocation "
388                                         "deadlock in %s (mode:0x%x)\n",
389                                         __FUNCTION__, gfp_mask);
390
391                         XFS_STATS_INC(xb_page_retries);
392                         xfsbufd_wakeup(0, gfp_mask);
393                         congestion_wait(WRITE, HZ/50);
394                         goto retry;
395                 }
396
397                 XFS_STATS_INC(xb_page_found);
398
399                 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
400                 size -= nbytes;
401
402                 ASSERT(!PagePrivate(page));
403                 if (!PageUptodate(page)) {
404                         page_count--;
405                         if (blocksize >= PAGE_CACHE_SIZE) {
406                                 if (flags & XBF_READ)
407                                         bp->b_locked = 1;
408                         } else if (!PagePrivate(page)) {
409                                 if (test_page_region(page, offset, nbytes))
410                                         page_count++;
411                         }
412                 }
413
414                 bp->b_pages[i] = page;
415                 offset = 0;
416         }
417
418         if (!bp->b_locked) {
419                 for (i = 0; i < bp->b_page_count; i++)
420                         unlock_page(bp->b_pages[i]);
421         }
422
423         if (page_count == bp->b_page_count)
424                 bp->b_flags |= XBF_DONE;
425
426         XB_TRACE(bp, "lookup_pages", (long)page_count);
427         return error;
428 }
429
430 /*
431  *      Map buffer into kernel address-space if nessecary.
432  */
433 STATIC int
434 _xfs_buf_map_pages(
435         xfs_buf_t               *bp,
436         uint                    flags)
437 {
438         /* A single page buffer is always mappable */
439         if (bp->b_page_count == 1) {
440                 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
441                 bp->b_flags |= XBF_MAPPED;
442         } else if (flags & XBF_MAPPED) {
443                 if (as_list_len > 64)
444                         purge_addresses();
445                 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
446                                         VM_MAP, PAGE_KERNEL);
447                 if (unlikely(bp->b_addr == NULL))
448                         return -ENOMEM;
449                 bp->b_addr += bp->b_offset;
450                 bp->b_flags |= XBF_MAPPED;
451         }
452
453         return 0;
454 }
455
456 /*
457  *      Finding and Reading Buffers
458  */
459
460 /*
461  *      Look up, and creates if absent, a lockable buffer for
462  *      a given range of an inode.  The buffer is returned
463  *      locked.  If other overlapping buffers exist, they are
464  *      released before the new buffer is created and locked,
465  *      which may imply that this call will block until those buffers
466  *      are unlocked.  No I/O is implied by this call.
467  */
468 xfs_buf_t *
469 _xfs_buf_find(
470         xfs_buftarg_t           *btp,   /* block device target          */
471         xfs_off_t               ioff,   /* starting offset of range     */
472         size_t                  isize,  /* length of range              */
473         xfs_buf_flags_t         flags,
474         xfs_buf_t               *new_bp)
475 {
476         xfs_off_t               range_base;
477         size_t                  range_length;
478         xfs_bufhash_t           *hash;
479         xfs_buf_t               *bp, *n;
480
481         range_base = (ioff << BBSHIFT);
482         range_length = (isize << BBSHIFT);
483
484         /* Check for IOs smaller than the sector size / not sector aligned */
485         ASSERT(!(range_length < (1 << btp->bt_sshift)));
486         ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
487
488         hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
489
490         spin_lock(&hash->bh_lock);
491
492         list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
493                 ASSERT(btp == bp->b_target);
494                 if (bp->b_file_offset == range_base &&
495                     bp->b_buffer_length == range_length) {
496                         /*
497                          * If we look at something, bring it to the
498                          * front of the list for next time.
499                          */
500                         atomic_inc(&bp->b_hold);
501                         list_move(&bp->b_hash_list, &hash->bh_list);
502                         goto found;
503                 }
504         }
505
506         /* No match found */
507         if (new_bp) {
508                 _xfs_buf_initialize(new_bp, btp, range_base,
509                                 range_length, flags);
510                 new_bp->b_hash = hash;
511                 list_add(&new_bp->b_hash_list, &hash->bh_list);
512         } else {
513                 XFS_STATS_INC(xb_miss_locked);
514         }
515
516         spin_unlock(&hash->bh_lock);
517         return new_bp;
518
519 found:
520         spin_unlock(&hash->bh_lock);
521
522         /* Attempt to get the semaphore without sleeping,
523          * if this does not work then we need to drop the
524          * spinlock and do a hard attempt on the semaphore.
525          */
526         if (down_trylock(&bp->b_sema)) {
527                 if (!(flags & XBF_TRYLOCK)) {
528                         /* wait for buffer ownership */
529                         XB_TRACE(bp, "get_lock", 0);
530                         xfs_buf_lock(bp);
531                         XFS_STATS_INC(xb_get_locked_waited);
532                 } else {
533                         /* We asked for a trylock and failed, no need
534                          * to look at file offset and length here, we
535                          * know that this buffer at least overlaps our
536                          * buffer and is locked, therefore our buffer
537                          * either does not exist, or is this buffer.
538                          */
539                         xfs_buf_rele(bp);
540                         XFS_STATS_INC(xb_busy_locked);
541                         return NULL;
542                 }
543         } else {
544                 /* trylock worked */
545                 XB_SET_OWNER(bp);
546         }
547
548         if (bp->b_flags & XBF_STALE) {
549                 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
550                 bp->b_flags &= XBF_MAPPED;
551         }
552         XB_TRACE(bp, "got_lock", 0);
553         XFS_STATS_INC(xb_get_locked);
554         return bp;
555 }
556
557 /*
558  *      Assembles a buffer covering the specified range.
559  *      Storage in memory for all portions of the buffer will be allocated,
560  *      although backing storage may not be.
561  */
562 xfs_buf_t *
563 xfs_buf_get_flags(
564         xfs_buftarg_t           *target,/* target for buffer            */
565         xfs_off_t               ioff,   /* starting offset of range     */
566         size_t                  isize,  /* length of range              */
567         xfs_buf_flags_t         flags)
568 {
569         xfs_buf_t               *bp, *new_bp;
570         int                     error = 0, i;
571
572         new_bp = xfs_buf_allocate(flags);
573         if (unlikely(!new_bp))
574                 return NULL;
575
576         bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
577         if (bp == new_bp) {
578                 error = _xfs_buf_lookup_pages(bp, flags);
579                 if (error)
580                         goto no_buffer;
581         } else {
582                 xfs_buf_deallocate(new_bp);
583                 if (unlikely(bp == NULL))
584                         return NULL;
585         }
586
587         for (i = 0; i < bp->b_page_count; i++)
588                 mark_page_accessed(bp->b_pages[i]);
589
590         if (!(bp->b_flags & XBF_MAPPED)) {
591                 error = _xfs_buf_map_pages(bp, flags);
592                 if (unlikely(error)) {
593                         printk(KERN_WARNING "%s: failed to map pages\n",
594                                         __FUNCTION__);
595                         goto no_buffer;
596                 }
597         }
598
599         XFS_STATS_INC(xb_get);
600
601         /*
602          * Always fill in the block number now, the mapped cases can do
603          * their own overlay of this later.
604          */
605         bp->b_bn = ioff;
606         bp->b_count_desired = bp->b_buffer_length;
607
608         XB_TRACE(bp, "get", (unsigned long)flags);
609         return bp;
610
611  no_buffer:
612         if (flags & (XBF_LOCK | XBF_TRYLOCK))
613                 xfs_buf_unlock(bp);
614         xfs_buf_rele(bp);
615         return NULL;
616 }
617
618 xfs_buf_t *
619 xfs_buf_read_flags(
620         xfs_buftarg_t           *target,
621         xfs_off_t               ioff,
622         size_t                  isize,
623         xfs_buf_flags_t         flags)
624 {
625         xfs_buf_t               *bp;
626
627         flags |= XBF_READ;
628
629         bp = xfs_buf_get_flags(target, ioff, isize, flags);
630         if (bp) {
631                 if (!XFS_BUF_ISDONE(bp)) {
632                         XB_TRACE(bp, "read", (unsigned long)flags);
633                         XFS_STATS_INC(xb_get_read);
634                         xfs_buf_iostart(bp, flags);
635                 } else if (flags & XBF_ASYNC) {
636                         XB_TRACE(bp, "read_async", (unsigned long)flags);
637                         /*
638                          * Read ahead call which is already satisfied,
639                          * drop the buffer
640                          */
641                         goto no_buffer;
642                 } else {
643                         XB_TRACE(bp, "read_done", (unsigned long)flags);
644                         /* We do not want read in the flags */
645                         bp->b_flags &= ~XBF_READ;
646                 }
647         }
648
649         return bp;
650
651  no_buffer:
652         if (flags & (XBF_LOCK | XBF_TRYLOCK))
653                 xfs_buf_unlock(bp);
654         xfs_buf_rele(bp);
655         return NULL;
656 }
657
658 /*
659  *      If we are not low on memory then do the readahead in a deadlock
660  *      safe manner.
661  */
662 void
663 xfs_buf_readahead(
664         xfs_buftarg_t           *target,
665         xfs_off_t               ioff,
666         size_t                  isize,
667         xfs_buf_flags_t         flags)
668 {
669         struct backing_dev_info *bdi;
670
671         bdi = target->bt_mapping->backing_dev_info;
672         if (bdi_read_congested(bdi))
673                 return;
674
675         flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
676         xfs_buf_read_flags(target, ioff, isize, flags);
677 }
678
679 xfs_buf_t *
680 xfs_buf_get_empty(
681         size_t                  len,
682         xfs_buftarg_t           *target)
683 {
684         xfs_buf_t               *bp;
685
686         bp = xfs_buf_allocate(0);
687         if (bp)
688                 _xfs_buf_initialize(bp, target, 0, len, 0);
689         return bp;
690 }
691
692 static inline struct page *
693 mem_to_page(
694         void                    *addr)
695 {
696         if (((unsigned long)addr < VMALLOC_START) ||
697             ((unsigned long)addr >= VMALLOC_END)) {
698                 return virt_to_page(addr);
699         } else {
700                 return vmalloc_to_page(addr);
701         }
702 }
703
704 int
705 xfs_buf_associate_memory(
706         xfs_buf_t               *bp,
707         void                    *mem,
708         size_t                  len)
709 {
710         int                     rval;
711         int                     i = 0;
712         size_t                  ptr;
713         size_t                  end, end_cur;
714         off_t                   offset;
715         int                     page_count;
716
717         page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
718         offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
719         if (offset && (len > PAGE_CACHE_SIZE))
720                 page_count++;
721
722         /* Free any previous set of page pointers */
723         if (bp->b_pages)
724                 _xfs_buf_free_pages(bp);
725
726         bp->b_pages = NULL;
727         bp->b_addr = mem;
728
729         rval = _xfs_buf_get_pages(bp, page_count, 0);
730         if (rval)
731                 return rval;
732
733         bp->b_offset = offset;
734         ptr = (size_t) mem & PAGE_CACHE_MASK;
735         end = PAGE_CACHE_ALIGN((size_t) mem + len);
736         end_cur = end;
737         /* set up first page */
738         bp->b_pages[0] = mem_to_page(mem);
739
740         ptr += PAGE_CACHE_SIZE;
741         bp->b_page_count = ++i;
742         while (ptr < end) {
743                 bp->b_pages[i] = mem_to_page((void *)ptr);
744                 bp->b_page_count = ++i;
745                 ptr += PAGE_CACHE_SIZE;
746         }
747         bp->b_locked = 0;
748
749         bp->b_count_desired = bp->b_buffer_length = len;
750         bp->b_flags |= XBF_MAPPED;
751
752         return 0;
753 }
754
755 xfs_buf_t *
756 xfs_buf_get_noaddr(
757         size_t                  len,
758         xfs_buftarg_t           *target)
759 {
760         unsigned long           page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
761         int                     error, i;
762         xfs_buf_t               *bp;
763
764         bp = xfs_buf_allocate(0);
765         if (unlikely(bp == NULL))
766                 goto fail;
767         _xfs_buf_initialize(bp, target, 0, len, 0);
768
769         error = _xfs_buf_get_pages(bp, page_count, 0);
770         if (error)
771                 goto fail_free_buf;
772
773         for (i = 0; i < page_count; i++) {
774                 bp->b_pages[i] = alloc_page(GFP_KERNEL);
775                 if (!bp->b_pages[i])
776                         goto fail_free_mem;
777         }
778         bp->b_flags |= _XBF_PAGES;
779
780         error = _xfs_buf_map_pages(bp, XBF_MAPPED);
781         if (unlikely(error)) {
782                 printk(KERN_WARNING "%s: failed to map pages\n",
783                                 __FUNCTION__);
784                 goto fail_free_mem;
785         }
786
787         xfs_buf_unlock(bp);
788
789         XB_TRACE(bp, "no_daddr", len);
790         return bp;
791
792  fail_free_mem:
793         while (--i >= 0)
794                 __free_page(bp->b_pages[i]);
795         _xfs_buf_free_pages(bp);
796  fail_free_buf:
797         xfs_buf_deallocate(bp);
798  fail:
799         return NULL;
800 }
801
802 /*
803  *      Increment reference count on buffer, to hold the buffer concurrently
804  *      with another thread which may release (free) the buffer asynchronously.
805  *      Must hold the buffer already to call this function.
806  */
807 void
808 xfs_buf_hold(
809         xfs_buf_t               *bp)
810 {
811         atomic_inc(&bp->b_hold);
812         XB_TRACE(bp, "hold", 0);
813 }
814
815 /*
816  *      Releases a hold on the specified buffer.  If the
817  *      the hold count is 1, calls xfs_buf_free.
818  */
819 void
820 xfs_buf_rele(
821         xfs_buf_t               *bp)
822 {
823         xfs_bufhash_t           *hash = bp->b_hash;
824
825         XB_TRACE(bp, "rele", bp->b_relse);
826
827         if (unlikely(!hash)) {
828                 ASSERT(!bp->b_relse);
829                 if (atomic_dec_and_test(&bp->b_hold))
830                         xfs_buf_free(bp);
831                 return;
832         }
833
834         if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
835                 if (bp->b_relse) {
836                         atomic_inc(&bp->b_hold);
837                         spin_unlock(&hash->bh_lock);
838                         (*(bp->b_relse)) (bp);
839                 } else if (bp->b_flags & XBF_FS_MANAGED) {
840                         spin_unlock(&hash->bh_lock);
841                 } else {
842                         ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
843                         list_del_init(&bp->b_hash_list);
844                         spin_unlock(&hash->bh_lock);
845                         xfs_buf_free(bp);
846                 }
847         } else {
848                 /*
849                  * Catch reference count leaks
850                  */
851                 ASSERT(atomic_read(&bp->b_hold) >= 0);
852         }
853 }
854
855
856 /*
857  *      Mutual exclusion on buffers.  Locking model:
858  *
859  *      Buffers associated with inodes for which buffer locking
860  *      is not enabled are not protected by semaphores, and are
861  *      assumed to be exclusively owned by the caller.  There is a
862  *      spinlock in the buffer, used by the caller when concurrent
863  *      access is possible.
864  */
865
866 /*
867  *      Locks a buffer object, if it is not already locked.
868  *      Note that this in no way locks the underlying pages, so it is only
869  *      useful for synchronizing concurrent use of buffer objects, not for
870  *      synchronizing independent access to the underlying pages.
871  */
872 int
873 xfs_buf_cond_lock(
874         xfs_buf_t               *bp)
875 {
876         int                     locked;
877
878         locked = down_trylock(&bp->b_sema) == 0;
879         if (locked) {
880                 XB_SET_OWNER(bp);
881         }
882         XB_TRACE(bp, "cond_lock", (long)locked);
883         return locked ? 0 : -EBUSY;
884 }
885
886 #if defined(DEBUG) || defined(XFS_BLI_TRACE)
887 int
888 xfs_buf_lock_value(
889         xfs_buf_t               *bp)
890 {
891         return atomic_read(&bp->b_sema.count);
892 }
893 #endif
894
895 /*
896  *      Locks a buffer object.
897  *      Note that this in no way locks the underlying pages, so it is only
898  *      useful for synchronizing concurrent use of buffer objects, not for
899  *      synchronizing independent access to the underlying pages.
900  */
901 void
902 xfs_buf_lock(
903         xfs_buf_t               *bp)
904 {
905         XB_TRACE(bp, "lock", 0);
906         if (atomic_read(&bp->b_io_remaining))
907                 blk_run_address_space(bp->b_target->bt_mapping);
908         down(&bp->b_sema);
909         XB_SET_OWNER(bp);
910         XB_TRACE(bp, "locked", 0);
911 }
912
913 /*
914  *      Releases the lock on the buffer object.
915  *      If the buffer is marked delwri but is not queued, do so before we
916  *      unlock the buffer as we need to set flags correctly.  We also need to
917  *      take a reference for the delwri queue because the unlocker is going to
918  *      drop their's and they don't know we just queued it.
919  */
920 void
921 xfs_buf_unlock(
922         xfs_buf_t               *bp)
923 {
924         if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
925                 atomic_inc(&bp->b_hold);
926                 bp->b_flags |= XBF_ASYNC;
927                 xfs_buf_delwri_queue(bp, 0);
928         }
929
930         XB_CLEAR_OWNER(bp);
931         up(&bp->b_sema);
932         XB_TRACE(bp, "unlock", 0);
933 }
934
935
936 /*
937  *      Pinning Buffer Storage in Memory
938  *      Ensure that no attempt to force a buffer to disk will succeed.
939  */
940 void
941 xfs_buf_pin(
942         xfs_buf_t               *bp)
943 {
944         atomic_inc(&bp->b_pin_count);
945         XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
946 }
947
948 void
949 xfs_buf_unpin(
950         xfs_buf_t               *bp)
951 {
952         if (atomic_dec_and_test(&bp->b_pin_count))
953                 wake_up_all(&bp->b_waiters);
954         XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
955 }
956
957 int
958 xfs_buf_ispin(
959         xfs_buf_t               *bp)
960 {
961         return atomic_read(&bp->b_pin_count);
962 }
963
964 STATIC void
965 xfs_buf_wait_unpin(
966         xfs_buf_t               *bp)
967 {
968         DECLARE_WAITQUEUE       (wait, current);
969
970         if (atomic_read(&bp->b_pin_count) == 0)
971                 return;
972
973         add_wait_queue(&bp->b_waiters, &wait);
974         for (;;) {
975                 set_current_state(TASK_UNINTERRUPTIBLE);
976                 if (atomic_read(&bp->b_pin_count) == 0)
977                         break;
978                 if (atomic_read(&bp->b_io_remaining))
979                         blk_run_address_space(bp->b_target->bt_mapping);
980                 schedule();
981         }
982         remove_wait_queue(&bp->b_waiters, &wait);
983         set_current_state(TASK_RUNNING);
984 }
985
986 /*
987  *      Buffer Utility Routines
988  */
989
990 STATIC void
991 xfs_buf_iodone_work(
992         struct work_struct      *work)
993 {
994         xfs_buf_t               *bp =
995                 container_of(work, xfs_buf_t, b_iodone_work);
996
997         if (bp->b_iodone)
998                 (*(bp->b_iodone))(bp);
999         else if (bp->b_flags & XBF_ASYNC)
1000                 xfs_buf_relse(bp);
1001 }
1002
1003 void
1004 xfs_buf_ioend(
1005         xfs_buf_t               *bp,
1006         int                     schedule)
1007 {
1008         bp->b_flags &= ~(XBF_READ | XBF_WRITE);
1009         if (bp->b_error == 0)
1010                 bp->b_flags |= XBF_DONE;
1011
1012         XB_TRACE(bp, "iodone", bp->b_iodone);
1013
1014         if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1015                 if (schedule) {
1016                         INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1017                         queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1018                 } else {
1019                         xfs_buf_iodone_work(&bp->b_iodone_work);
1020                 }
1021         } else {
1022                 up(&bp->b_iodonesema);
1023         }
1024 }
1025
1026 void
1027 xfs_buf_ioerror(
1028         xfs_buf_t               *bp,
1029         int                     error)
1030 {
1031         ASSERT(error >= 0 && error <= 0xffff);
1032         bp->b_error = (unsigned short)error;
1033         XB_TRACE(bp, "ioerror", (unsigned long)error);
1034 }
1035
1036 /*
1037  *      Initiate I/O on a buffer, based on the flags supplied.
1038  *      The b_iodone routine in the buffer supplied will only be called
1039  *      when all of the subsidiary I/O requests, if any, have been completed.
1040  */
1041 int
1042 xfs_buf_iostart(
1043         xfs_buf_t               *bp,
1044         xfs_buf_flags_t         flags)
1045 {
1046         int                     status = 0;
1047
1048         XB_TRACE(bp, "iostart", (unsigned long)flags);
1049
1050         if (flags & XBF_DELWRI) {
1051                 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
1052                 bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
1053                 xfs_buf_delwri_queue(bp, 1);
1054                 return status;
1055         }
1056
1057         bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
1058                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1059         bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
1060                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1061
1062         BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
1063
1064         /* For writes allow an alternate strategy routine to precede
1065          * the actual I/O request (which may not be issued at all in
1066          * a shutdown situation, for example).
1067          */
1068         status = (flags & XBF_WRITE) ?
1069                 xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
1070
1071         /* Wait for I/O if we are not an async request.
1072          * Note: async I/O request completion will release the buffer,
1073          * and that can already be done by this point.  So using the
1074          * buffer pointer from here on, after async I/O, is invalid.
1075          */
1076         if (!status && !(flags & XBF_ASYNC))
1077                 status = xfs_buf_iowait(bp);
1078
1079         return status;
1080 }
1081
1082 STATIC_INLINE int
1083 _xfs_buf_iolocked(
1084         xfs_buf_t               *bp)
1085 {
1086         ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
1087         if (bp->b_flags & XBF_READ)
1088                 return bp->b_locked;
1089         return 0;
1090 }
1091
1092 STATIC_INLINE void
1093 _xfs_buf_ioend(
1094         xfs_buf_t               *bp,
1095         int                     schedule)
1096 {
1097         if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1098                 bp->b_locked = 0;
1099                 xfs_buf_ioend(bp, schedule);
1100         }
1101 }
1102
1103 STATIC int
1104 xfs_buf_bio_end_io(
1105         struct bio              *bio,
1106         unsigned int            bytes_done,
1107         int                     error)
1108 {
1109         xfs_buf_t               *bp = (xfs_buf_t *)bio->bi_private;
1110         unsigned int            blocksize = bp->b_target->bt_bsize;
1111         struct bio_vec          *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1112
1113         if (bio->bi_size)
1114                 return 1;
1115
1116         if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1117                 bp->b_error = EIO;
1118
1119         do {
1120                 struct page     *page = bvec->bv_page;
1121
1122                 ASSERT(!PagePrivate(page));
1123                 if (unlikely(bp->b_error)) {
1124                         if (bp->b_flags & XBF_READ)
1125                                 ClearPageUptodate(page);
1126                 } else if (blocksize >= PAGE_CACHE_SIZE) {
1127                         SetPageUptodate(page);
1128                 } else if (!PagePrivate(page) &&
1129                                 (bp->b_flags & _XBF_PAGE_CACHE)) {
1130                         set_page_region(page, bvec->bv_offset, bvec->bv_len);
1131                 }
1132
1133                 if (--bvec >= bio->bi_io_vec)
1134                         prefetchw(&bvec->bv_page->flags);
1135
1136                 if (_xfs_buf_iolocked(bp)) {
1137                         unlock_page(page);
1138                 }
1139         } while (bvec >= bio->bi_io_vec);
1140
1141         _xfs_buf_ioend(bp, 1);
1142         bio_put(bio);
1143         return 0;
1144 }
1145
1146 STATIC void
1147 _xfs_buf_ioapply(
1148         xfs_buf_t               *bp)
1149 {
1150         int                     i, rw, map_i, total_nr_pages, nr_pages;
1151         struct bio              *bio;
1152         int                     offset = bp->b_offset;
1153         int                     size = bp->b_count_desired;
1154         sector_t                sector = bp->b_bn;
1155         unsigned int            blocksize = bp->b_target->bt_bsize;
1156         int                     locking = _xfs_buf_iolocked(bp);
1157
1158         total_nr_pages = bp->b_page_count;
1159         map_i = 0;
1160
1161         if (bp->b_flags & XBF_ORDERED) {
1162                 ASSERT(!(bp->b_flags & XBF_READ));
1163                 rw = WRITE_BARRIER;
1164         } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1165                 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1166                 bp->b_flags &= ~_XBF_RUN_QUEUES;
1167                 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1168         } else {
1169                 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1170                      (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1171         }
1172
1173         /* Special code path for reading a sub page size buffer in --
1174          * we populate up the whole page, and hence the other metadata
1175          * in the same page.  This optimization is only valid when the
1176          * filesystem block size is not smaller than the page size.
1177          */
1178         if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1179             (bp->b_flags & XBF_READ) && locking &&
1180             (blocksize >= PAGE_CACHE_SIZE)) {
1181                 bio = bio_alloc(GFP_NOIO, 1);
1182
1183                 bio->bi_bdev = bp->b_target->bt_bdev;
1184                 bio->bi_sector = sector - (offset >> BBSHIFT);
1185                 bio->bi_end_io = xfs_buf_bio_end_io;
1186                 bio->bi_private = bp;
1187
1188                 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1189                 size = 0;
1190
1191                 atomic_inc(&bp->b_io_remaining);
1192
1193                 goto submit_io;
1194         }
1195
1196         /* Lock down the pages which we need to for the request */
1197         if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
1198                 for (i = 0; size; i++) {
1199                         int             nbytes = PAGE_CACHE_SIZE - offset;
1200                         struct page     *page = bp->b_pages[i];
1201
1202                         if (nbytes > size)
1203                                 nbytes = size;
1204
1205                         lock_page(page);
1206
1207                         size -= nbytes;
1208                         offset = 0;
1209                 }
1210                 offset = bp->b_offset;
1211                 size = bp->b_count_desired;
1212         }
1213
1214 next_chunk:
1215         atomic_inc(&bp->b_io_remaining);
1216         nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1217         if (nr_pages > total_nr_pages)
1218                 nr_pages = total_nr_pages;
1219
1220         bio = bio_alloc(GFP_NOIO, nr_pages);
1221         bio->bi_bdev = bp->b_target->bt_bdev;
1222         bio->bi_sector = sector;
1223         bio->bi_end_io = xfs_buf_bio_end_io;
1224         bio->bi_private = bp;
1225
1226         for (; size && nr_pages; nr_pages--, map_i++) {
1227                 int     rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1228
1229                 if (nbytes > size)
1230                         nbytes = size;
1231
1232                 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1233                 if (rbytes < nbytes)
1234                         break;
1235
1236                 offset = 0;
1237                 sector += nbytes >> BBSHIFT;
1238                 size -= nbytes;
1239                 total_nr_pages--;
1240         }
1241
1242 submit_io:
1243         if (likely(bio->bi_size)) {
1244                 submit_bio(rw, bio);
1245                 if (size)
1246                         goto next_chunk;
1247         } else {
1248                 bio_put(bio);
1249                 xfs_buf_ioerror(bp, EIO);
1250         }
1251 }
1252
1253 int
1254 xfs_buf_iorequest(
1255         xfs_buf_t               *bp)
1256 {
1257         XB_TRACE(bp, "iorequest", 0);
1258
1259         if (bp->b_flags & XBF_DELWRI) {
1260                 xfs_buf_delwri_queue(bp, 1);
1261                 return 0;
1262         }
1263
1264         if (bp->b_flags & XBF_WRITE) {
1265                 xfs_buf_wait_unpin(bp);
1266         }
1267
1268         xfs_buf_hold(bp);
1269
1270         /* Set the count to 1 initially, this will stop an I/O
1271          * completion callout which happens before we have started
1272          * all the I/O from calling xfs_buf_ioend too early.
1273          */
1274         atomic_set(&bp->b_io_remaining, 1);
1275         _xfs_buf_ioapply(bp);
1276         _xfs_buf_ioend(bp, 0);
1277
1278         xfs_buf_rele(bp);
1279         return 0;
1280 }
1281
1282 /*
1283  *      Waits for I/O to complete on the buffer supplied.
1284  *      It returns immediately if no I/O is pending.
1285  *      It returns the I/O error code, if any, or 0 if there was no error.
1286  */
1287 int
1288 xfs_buf_iowait(
1289         xfs_buf_t               *bp)
1290 {
1291         XB_TRACE(bp, "iowait", 0);
1292         if (atomic_read(&bp->b_io_remaining))
1293                 blk_run_address_space(bp->b_target->bt_mapping);
1294         down(&bp->b_iodonesema);
1295         XB_TRACE(bp, "iowaited", (long)bp->b_error);
1296         return bp->b_error;
1297 }
1298
1299 xfs_caddr_t
1300 xfs_buf_offset(
1301         xfs_buf_t               *bp,
1302         size_t                  offset)
1303 {
1304         struct page             *page;
1305
1306         if (bp->b_flags & XBF_MAPPED)
1307                 return XFS_BUF_PTR(bp) + offset;
1308
1309         offset += bp->b_offset;
1310         page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1311         return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1312 }
1313
1314 /*
1315  *      Move data into or out of a buffer.
1316  */
1317 void
1318 xfs_buf_iomove(
1319         xfs_buf_t               *bp,    /* buffer to process            */
1320         size_t                  boff,   /* starting buffer offset       */
1321         size_t                  bsize,  /* length to copy               */
1322         caddr_t                 data,   /* data address                 */
1323         xfs_buf_rw_t            mode)   /* read/write/zero flag         */
1324 {
1325         size_t                  bend, cpoff, csize;
1326         struct page             *page;
1327
1328         bend = boff + bsize;
1329         while (boff < bend) {
1330                 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1331                 cpoff = xfs_buf_poff(boff + bp->b_offset);
1332                 csize = min_t(size_t,
1333                               PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1334
1335                 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1336
1337                 switch (mode) {
1338                 case XBRW_ZERO:
1339                         memset(page_address(page) + cpoff, 0, csize);
1340                         break;
1341                 case XBRW_READ:
1342                         memcpy(data, page_address(page) + cpoff, csize);
1343                         break;
1344                 case XBRW_WRITE:
1345                         memcpy(page_address(page) + cpoff, data, csize);
1346                 }
1347
1348                 boff += csize;
1349                 data += csize;
1350         }
1351 }
1352
1353 /*
1354  *      Handling of buffer targets (buftargs).
1355  */
1356
1357 /*
1358  *      Wait for any bufs with callbacks that have been submitted but
1359  *      have not yet returned... walk the hash list for the target.
1360  */
1361 void
1362 xfs_wait_buftarg(
1363         xfs_buftarg_t   *btp)
1364 {
1365         xfs_buf_t       *bp, *n;
1366         xfs_bufhash_t   *hash;
1367         uint            i;
1368
1369         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1370                 hash = &btp->bt_hash[i];
1371 again:
1372                 spin_lock(&hash->bh_lock);
1373                 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1374                         ASSERT(btp == bp->b_target);
1375                         if (!(bp->b_flags & XBF_FS_MANAGED)) {
1376                                 spin_unlock(&hash->bh_lock);
1377                                 /*
1378                                  * Catch superblock reference count leaks
1379                                  * immediately
1380                                  */
1381                                 BUG_ON(bp->b_bn == 0);
1382                                 delay(100);
1383                                 goto again;
1384                         }
1385                 }
1386                 spin_unlock(&hash->bh_lock);
1387         }
1388 }
1389
1390 /*
1391  *      Allocate buffer hash table for a given target.
1392  *      For devices containing metadata (i.e. not the log/realtime devices)
1393  *      we need to allocate a much larger hash table.
1394  */
1395 STATIC void
1396 xfs_alloc_bufhash(
1397         xfs_buftarg_t           *btp,
1398         int                     external)
1399 {
1400         unsigned int            i;
1401
1402         btp->bt_hashshift = external ? 3 : 8;   /* 8 or 256 buckets */
1403         btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1404         btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
1405                                         sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE);
1406         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1407                 spin_lock_init(&btp->bt_hash[i].bh_lock);
1408                 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1409         }
1410 }
1411
1412 STATIC void
1413 xfs_free_bufhash(
1414         xfs_buftarg_t           *btp)
1415 {
1416         kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1417         btp->bt_hash = NULL;
1418 }
1419
1420 /*
1421  *      buftarg list for delwrite queue processing
1422  */
1423 static LIST_HEAD(xfs_buftarg_list);
1424 static DEFINE_SPINLOCK(xfs_buftarg_lock);
1425
1426 STATIC void
1427 xfs_register_buftarg(
1428         xfs_buftarg_t           *btp)
1429 {
1430         spin_lock(&xfs_buftarg_lock);
1431         list_add(&btp->bt_list, &xfs_buftarg_list);
1432         spin_unlock(&xfs_buftarg_lock);
1433 }
1434
1435 STATIC void
1436 xfs_unregister_buftarg(
1437         xfs_buftarg_t           *btp)
1438 {
1439         spin_lock(&xfs_buftarg_lock);
1440         list_del(&btp->bt_list);
1441         spin_unlock(&xfs_buftarg_lock);
1442 }
1443
1444 void
1445 xfs_free_buftarg(
1446         xfs_buftarg_t           *btp,
1447         int                     external)
1448 {
1449         xfs_flush_buftarg(btp, 1);
1450         if (external)
1451                 xfs_blkdev_put(btp->bt_bdev);
1452         xfs_free_bufhash(btp);
1453         iput(btp->bt_mapping->host);
1454
1455         /* Unregister the buftarg first so that we don't get a
1456          * wakeup finding a non-existent task
1457          */
1458         xfs_unregister_buftarg(btp);
1459         kthread_stop(btp->bt_task);
1460
1461         kmem_free(btp, sizeof(*btp));
1462 }
1463
1464 STATIC int
1465 xfs_setsize_buftarg_flags(
1466         xfs_buftarg_t           *btp,
1467         unsigned int            blocksize,
1468         unsigned int            sectorsize,
1469         int                     verbose)
1470 {
1471         btp->bt_bsize = blocksize;
1472         btp->bt_sshift = ffs(sectorsize) - 1;
1473         btp->bt_smask = sectorsize - 1;
1474
1475         if (set_blocksize(btp->bt_bdev, sectorsize)) {
1476                 printk(KERN_WARNING
1477                         "XFS: Cannot set_blocksize to %u on device %s\n",
1478                         sectorsize, XFS_BUFTARG_NAME(btp));
1479                 return EINVAL;
1480         }
1481
1482         if (verbose &&
1483             (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1484                 printk(KERN_WARNING
1485                         "XFS: %u byte sectors in use on device %s.  "
1486                         "This is suboptimal; %u or greater is ideal.\n",
1487                         sectorsize, XFS_BUFTARG_NAME(btp),
1488                         (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1489         }
1490
1491         return 0;
1492 }
1493
1494 /*
1495  *      When allocating the initial buffer target we have not yet
1496  *      read in the superblock, so don't know what sized sectors
1497  *      are being used is at this early stage.  Play safe.
1498  */
1499 STATIC int
1500 xfs_setsize_buftarg_early(
1501         xfs_buftarg_t           *btp,
1502         struct block_device     *bdev)
1503 {
1504         return xfs_setsize_buftarg_flags(btp,
1505                         PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1506 }
1507
1508 int
1509 xfs_setsize_buftarg(
1510         xfs_buftarg_t           *btp,
1511         unsigned int            blocksize,
1512         unsigned int            sectorsize)
1513 {
1514         return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1515 }
1516
1517 STATIC int
1518 xfs_mapping_buftarg(
1519         xfs_buftarg_t           *btp,
1520         struct block_device     *bdev)
1521 {
1522         struct backing_dev_info *bdi;
1523         struct inode            *inode;
1524         struct address_space    *mapping;
1525         static const struct address_space_operations mapping_aops = {
1526                 .sync_page = block_sync_page,
1527                 .migratepage = fail_migrate_page,
1528         };
1529
1530         inode = new_inode(bdev->bd_inode->i_sb);
1531         if (!inode) {
1532                 printk(KERN_WARNING
1533                         "XFS: Cannot allocate mapping inode for device %s\n",
1534                         XFS_BUFTARG_NAME(btp));
1535                 return ENOMEM;
1536         }
1537         inode->i_mode = S_IFBLK;
1538         inode->i_bdev = bdev;
1539         inode->i_rdev = bdev->bd_dev;
1540         bdi = blk_get_backing_dev_info(bdev);
1541         if (!bdi)
1542                 bdi = &default_backing_dev_info;
1543         mapping = &inode->i_data;
1544         mapping->a_ops = &mapping_aops;
1545         mapping->backing_dev_info = bdi;
1546         mapping_set_gfp_mask(mapping, GFP_NOFS);
1547         btp->bt_mapping = mapping;
1548         return 0;
1549 }
1550
1551 STATIC int
1552 xfs_alloc_delwrite_queue(
1553         xfs_buftarg_t           *btp)
1554 {
1555         int     error = 0;
1556
1557         INIT_LIST_HEAD(&btp->bt_list);
1558         INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1559         spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
1560         btp->bt_flags = 0;
1561         btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1562         if (IS_ERR(btp->bt_task)) {
1563                 error = PTR_ERR(btp->bt_task);
1564                 goto out_error;
1565         }
1566         xfs_register_buftarg(btp);
1567 out_error:
1568         return error;
1569 }
1570
1571 xfs_buftarg_t *
1572 xfs_alloc_buftarg(
1573         struct block_device     *bdev,
1574         int                     external)
1575 {
1576         xfs_buftarg_t           *btp;
1577
1578         btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1579
1580         btp->bt_dev =  bdev->bd_dev;
1581         btp->bt_bdev = bdev;
1582         if (xfs_setsize_buftarg_early(btp, bdev))
1583                 goto error;
1584         if (xfs_mapping_buftarg(btp, bdev))
1585                 goto error;
1586         if (xfs_alloc_delwrite_queue(btp))
1587                 goto error;
1588         xfs_alloc_bufhash(btp, external);
1589         return btp;
1590
1591 error:
1592         kmem_free(btp, sizeof(*btp));
1593         return NULL;
1594 }
1595
1596
1597 /*
1598  *      Delayed write buffer handling
1599  */
1600 STATIC void
1601 xfs_buf_delwri_queue(
1602         xfs_buf_t               *bp,
1603         int                     unlock)
1604 {
1605         struct list_head        *dwq = &bp->b_target->bt_delwrite_queue;
1606         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1607
1608         XB_TRACE(bp, "delwri_q", (long)unlock);
1609         ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1610
1611         spin_lock(dwlk);
1612         /* If already in the queue, dequeue and place at tail */
1613         if (!list_empty(&bp->b_list)) {
1614                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1615                 if (unlock)
1616                         atomic_dec(&bp->b_hold);
1617                 list_del(&bp->b_list);
1618         }
1619
1620         bp->b_flags |= _XBF_DELWRI_Q;
1621         list_add_tail(&bp->b_list, dwq);
1622         bp->b_queuetime = jiffies;
1623         spin_unlock(dwlk);
1624
1625         if (unlock)
1626                 xfs_buf_unlock(bp);
1627 }
1628
1629 void
1630 xfs_buf_delwri_dequeue(
1631         xfs_buf_t               *bp)
1632 {
1633         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1634         int                     dequeued = 0;
1635
1636         spin_lock(dwlk);
1637         if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1638                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1639                 list_del_init(&bp->b_list);
1640                 dequeued = 1;
1641         }
1642         bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1643         spin_unlock(dwlk);
1644
1645         if (dequeued)
1646                 xfs_buf_rele(bp);
1647
1648         XB_TRACE(bp, "delwri_dq", (long)dequeued);
1649 }
1650
1651 STATIC void
1652 xfs_buf_runall_queues(
1653         struct workqueue_struct *queue)
1654 {
1655         flush_workqueue(queue);
1656 }
1657
1658 STATIC int
1659 xfsbufd_wakeup(
1660         int                     priority,
1661         gfp_t                   mask)
1662 {
1663         xfs_buftarg_t           *btp;
1664
1665         spin_lock(&xfs_buftarg_lock);
1666         list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1667                 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1668                         continue;
1669                 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1670                 wake_up_process(btp->bt_task);
1671         }
1672         spin_unlock(&xfs_buftarg_lock);
1673         return 0;
1674 }
1675
1676 /*
1677  * Move as many buffers as specified to the supplied list
1678  * idicating if we skipped any buffers to prevent deadlocks.
1679  */
1680 STATIC int
1681 xfs_buf_delwri_split(
1682         xfs_buftarg_t   *target,
1683         struct list_head *list,
1684         unsigned long   age)
1685 {
1686         xfs_buf_t       *bp, *n;
1687         struct list_head *dwq = &target->bt_delwrite_queue;
1688         spinlock_t      *dwlk = &target->bt_delwrite_lock;
1689         int             skipped = 0;
1690         int             force;
1691
1692         force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1693         INIT_LIST_HEAD(list);
1694         spin_lock(dwlk);
1695         list_for_each_entry_safe(bp, n, dwq, b_list) {
1696                 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1697                 ASSERT(bp->b_flags & XBF_DELWRI);
1698
1699                 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1700                         if (!force &&
1701                             time_before(jiffies, bp->b_queuetime + age)) {
1702                                 xfs_buf_unlock(bp);
1703                                 break;
1704                         }
1705
1706                         bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1707                                          _XBF_RUN_QUEUES);
1708                         bp->b_flags |= XBF_WRITE;
1709                         list_move_tail(&bp->b_list, list);
1710                 } else
1711                         skipped++;
1712         }
1713         spin_unlock(dwlk);
1714
1715         return skipped;
1716
1717 }
1718
1719 STATIC int
1720 xfsbufd(
1721         void            *data)
1722 {
1723         struct list_head tmp;
1724         xfs_buftarg_t   *target = (xfs_buftarg_t *)data;
1725         int             count;
1726         xfs_buf_t       *bp;
1727
1728         current->flags |= PF_MEMALLOC;
1729
1730         do {
1731                 if (unlikely(freezing(current))) {
1732                         set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1733                         refrigerator();
1734                 } else {
1735                         clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1736                 }
1737
1738                 schedule_timeout_interruptible(
1739                         xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1740
1741                 xfs_buf_delwri_split(target, &tmp,
1742                                 xfs_buf_age_centisecs * msecs_to_jiffies(10));
1743
1744                 count = 0;
1745                 while (!list_empty(&tmp)) {
1746                         bp = list_entry(tmp.next, xfs_buf_t, b_list);
1747                         ASSERT(target == bp->b_target);
1748
1749                         list_del_init(&bp->b_list);
1750                         xfs_buf_iostrategy(bp);
1751                         count++;
1752                 }
1753
1754                 if (as_list_len > 0)
1755                         purge_addresses();
1756                 if (count)
1757                         blk_run_address_space(target->bt_mapping);
1758
1759         } while (!kthread_should_stop());
1760
1761         return 0;
1762 }
1763
1764 /*
1765  *      Go through all incore buffers, and release buffers if they belong to
1766  *      the given device. This is used in filesystem error handling to
1767  *      preserve the consistency of its metadata.
1768  */
1769 int
1770 xfs_flush_buftarg(
1771         xfs_buftarg_t   *target,
1772         int             wait)
1773 {
1774         struct list_head tmp;
1775         xfs_buf_t       *bp, *n;
1776         int             pincount = 0;
1777
1778         xfs_buf_runall_queues(xfsdatad_workqueue);
1779         xfs_buf_runall_queues(xfslogd_workqueue);
1780
1781         set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1782         pincount = xfs_buf_delwri_split(target, &tmp, 0);
1783
1784         /*
1785          * Dropped the delayed write list lock, now walk the temporary list
1786          */
1787         list_for_each_entry_safe(bp, n, &tmp, b_list) {
1788                 ASSERT(target == bp->b_target);
1789                 if (wait)
1790                         bp->b_flags &= ~XBF_ASYNC;
1791                 else
1792                         list_del_init(&bp->b_list);
1793
1794                 xfs_buf_iostrategy(bp);
1795         }
1796
1797         if (wait)
1798                 blk_run_address_space(target->bt_mapping);
1799
1800         /*
1801          * Remaining list items must be flushed before returning
1802          */
1803         while (!list_empty(&tmp)) {
1804                 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1805
1806                 list_del_init(&bp->b_list);
1807                 xfs_iowait(bp);
1808                 xfs_buf_relse(bp);
1809         }
1810
1811         return pincount;
1812 }
1813
1814 int __init
1815 xfs_buf_init(void)
1816 {
1817 #ifdef XFS_BUF_TRACE
1818         xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
1819 #endif
1820
1821         xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1822                                                 KM_ZONE_HWALIGN, NULL);
1823         if (!xfs_buf_zone)
1824                 goto out_free_trace_buf;
1825
1826         xfslogd_workqueue = create_workqueue("xfslogd");
1827         if (!xfslogd_workqueue)
1828                 goto out_free_buf_zone;
1829
1830         xfsdatad_workqueue = create_workqueue("xfsdatad");
1831         if (!xfsdatad_workqueue)
1832                 goto out_destroy_xfslogd_workqueue;
1833
1834         xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
1835         if (!xfs_buf_shake)
1836                 goto out_destroy_xfsdatad_workqueue;
1837
1838         return 0;
1839
1840  out_destroy_xfsdatad_workqueue:
1841         destroy_workqueue(xfsdatad_workqueue);
1842  out_destroy_xfslogd_workqueue:
1843         destroy_workqueue(xfslogd_workqueue);
1844  out_free_buf_zone:
1845         kmem_zone_destroy(xfs_buf_zone);
1846  out_free_trace_buf:
1847 #ifdef XFS_BUF_TRACE
1848         ktrace_free(xfs_buf_trace_buf);
1849 #endif
1850         return -ENOMEM;
1851 }
1852
1853 void
1854 xfs_buf_terminate(void)
1855 {
1856         kmem_shake_deregister(xfs_buf_shake);
1857         destroy_workqueue(xfsdatad_workqueue);
1858         destroy_workqueue(xfslogd_workqueue);
1859         kmem_zone_destroy(xfs_buf_zone);
1860 #ifdef XFS_BUF_TRACE
1861         ktrace_free(xfs_buf_trace_buf);
1862 #endif
1863 }
1864
1865 #ifdef CONFIG_KDB_MODULES
1866 struct list_head *
1867 xfs_get_buftarg_list(void)
1868 {
1869         return &xfs_buftarg_list;
1870 }
1871 #endif