xfs: encapsulate bbcount validity checking
[safe/jmp/linux-2.6] / fs / xfs / xfs_log_recover.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_error.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_inode_item.h"
39 #include "xfs_alloc.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_log_priv.h"
42 #include "xfs_buf_item.h"
43 #include "xfs_log_recover.h"
44 #include "xfs_extfree_item.h"
45 #include "xfs_trans_priv.h"
46 #include "xfs_quota.h"
47 #include "xfs_rw.h"
48 #include "xfs_utils.h"
49 #include "xfs_trace.h"
50
51 STATIC int      xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
52 STATIC int      xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
53 #if defined(DEBUG)
54 STATIC void     xlog_recover_check_summary(xlog_t *);
55 #else
56 #define xlog_recover_check_summary(log)
57 #endif
58
59 /*
60  * Sector aligned buffer routines for buffer create/read/write/access
61  */
62
63 /* Number of basic blocks in a log sector */
64 #define xlog_sectbb(log) (1 << (log)->l_sectbb_log)
65
66 /*
67  * Verify the given count of basic blocks is valid number of blocks
68  * to specify for an operation involving the given XFS log buffer.
69  * Returns nonzero if the count is valid, 0 otherwise.
70  */
71
72 static inline int
73 xlog_buf_bbcount_valid(
74         xlog_t          *log,
75         int             bbcount)
76 {
77         return bbcount > 0 && bbcount <= log->l_logBBsize;
78 }
79
80 STATIC xfs_buf_t *
81 xlog_get_bp(
82         xlog_t          *log,
83         int             nbblks)
84 {
85         if (!xlog_buf_bbcount_valid(log, nbblks)) {
86                 xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
87                         nbblks);
88                 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
89                 return NULL;
90         }
91
92         if (log->l_sectbb_log) {
93                 if (nbblks > 1)
94                         nbblks += xlog_sectbb(log);
95                 nbblks = round_up(nbblks, xlog_sectbb(log));
96         }
97         return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp);
98 }
99
100 STATIC void
101 xlog_put_bp(
102         xfs_buf_t       *bp)
103 {
104         xfs_buf_free(bp);
105 }
106
107 STATIC xfs_caddr_t
108 xlog_align(
109         xlog_t          *log,
110         xfs_daddr_t     blk_no,
111         int             nbblks,
112         xfs_buf_t       *bp)
113 {
114         xfs_caddr_t     ptr;
115
116         if (!log->l_sectbb_log)
117                 return XFS_BUF_PTR(bp);
118
119         ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask);
120         ASSERT(XFS_BUF_SIZE(bp) >=
121                 BBTOB(nbblks + (blk_no & log->l_sectbb_mask)));
122         return ptr;
123 }
124
125
126 /*
127  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
128  */
129 STATIC int
130 xlog_bread_noalign(
131         xlog_t          *log,
132         xfs_daddr_t     blk_no,
133         int             nbblks,
134         xfs_buf_t       *bp)
135 {
136         int             error;
137
138         if (!xlog_buf_bbcount_valid(log, nbblks)) {
139                 xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
140                         nbblks);
141                 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
142                 return EFSCORRUPTED;
143         }
144
145         if (log->l_sectbb_log) {
146                 blk_no = round_down(blk_no, xlog_sectbb(log));
147                 nbblks = round_up(nbblks, xlog_sectbb(log));
148         }
149
150         ASSERT(nbblks > 0);
151         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
152         ASSERT(bp);
153
154         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
155         XFS_BUF_READ(bp);
156         XFS_BUF_BUSY(bp);
157         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
158         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
159
160         xfsbdstrat(log->l_mp, bp);
161         error = xfs_iowait(bp);
162         if (error)
163                 xfs_ioerror_alert("xlog_bread", log->l_mp,
164                                   bp, XFS_BUF_ADDR(bp));
165         return error;
166 }
167
168 STATIC int
169 xlog_bread(
170         xlog_t          *log,
171         xfs_daddr_t     blk_no,
172         int             nbblks,
173         xfs_buf_t       *bp,
174         xfs_caddr_t     *offset)
175 {
176         int             error;
177
178         error = xlog_bread_noalign(log, blk_no, nbblks, bp);
179         if (error)
180                 return error;
181
182         *offset = xlog_align(log, blk_no, nbblks, bp);
183         return 0;
184 }
185
186 /*
187  * Write out the buffer at the given block for the given number of blocks.
188  * The buffer is kept locked across the write and is returned locked.
189  * This can only be used for synchronous log writes.
190  */
191 STATIC int
192 xlog_bwrite(
193         xlog_t          *log,
194         xfs_daddr_t     blk_no,
195         int             nbblks,
196         xfs_buf_t       *bp)
197 {
198         int             error;
199
200         if (!xlog_buf_bbcount_valid(log, nbblks)) {
201                 xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
202                         nbblks);
203                 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
204                 return EFSCORRUPTED;
205         }
206
207         if (log->l_sectbb_log) {
208                 blk_no = round_down(blk_no, xlog_sectbb(log));
209                 nbblks = round_up(nbblks, xlog_sectbb(log));
210         }
211
212         ASSERT(nbblks > 0);
213         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
214
215         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
216         XFS_BUF_ZEROFLAGS(bp);
217         XFS_BUF_BUSY(bp);
218         XFS_BUF_HOLD(bp);
219         XFS_BUF_PSEMA(bp, PRIBIO);
220         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
221         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
222
223         if ((error = xfs_bwrite(log->l_mp, bp)))
224                 xfs_ioerror_alert("xlog_bwrite", log->l_mp,
225                                   bp, XFS_BUF_ADDR(bp));
226         return error;
227 }
228
229 #ifdef DEBUG
230 /*
231  * dump debug superblock and log record information
232  */
233 STATIC void
234 xlog_header_check_dump(
235         xfs_mount_t             *mp,
236         xlog_rec_header_t       *head)
237 {
238         cmn_err(CE_DEBUG, "%s:  SB : uuid = %pU, fmt = %d\n",
239                 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
240         cmn_err(CE_DEBUG, "    log : uuid = %pU, fmt = %d\n",
241                 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
242 }
243 #else
244 #define xlog_header_check_dump(mp, head)
245 #endif
246
247 /*
248  * check log record header for recovery
249  */
250 STATIC int
251 xlog_header_check_recover(
252         xfs_mount_t             *mp,
253         xlog_rec_header_t       *head)
254 {
255         ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
256
257         /*
258          * IRIX doesn't write the h_fmt field and leaves it zeroed
259          * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
260          * a dirty log created in IRIX.
261          */
262         if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) {
263                 xlog_warn(
264         "XFS: dirty log written in incompatible format - can't recover");
265                 xlog_header_check_dump(mp, head);
266                 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
267                                  XFS_ERRLEVEL_HIGH, mp);
268                 return XFS_ERROR(EFSCORRUPTED);
269         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
270                 xlog_warn(
271         "XFS: dirty log entry has mismatched uuid - can't recover");
272                 xlog_header_check_dump(mp, head);
273                 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
274                                  XFS_ERRLEVEL_HIGH, mp);
275                 return XFS_ERROR(EFSCORRUPTED);
276         }
277         return 0;
278 }
279
280 /*
281  * read the head block of the log and check the header
282  */
283 STATIC int
284 xlog_header_check_mount(
285         xfs_mount_t             *mp,
286         xlog_rec_header_t       *head)
287 {
288         ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
289
290         if (uuid_is_nil(&head->h_fs_uuid)) {
291                 /*
292                  * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
293                  * h_fs_uuid is nil, we assume this log was last mounted
294                  * by IRIX and continue.
295                  */
296                 xlog_warn("XFS: nil uuid in log - IRIX style log");
297         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
298                 xlog_warn("XFS: log has mismatched uuid - can't recover");
299                 xlog_header_check_dump(mp, head);
300                 XFS_ERROR_REPORT("xlog_header_check_mount",
301                                  XFS_ERRLEVEL_HIGH, mp);
302                 return XFS_ERROR(EFSCORRUPTED);
303         }
304         return 0;
305 }
306
307 STATIC void
308 xlog_recover_iodone(
309         struct xfs_buf  *bp)
310 {
311         if (XFS_BUF_GETERROR(bp)) {
312                 /*
313                  * We're not going to bother about retrying
314                  * this during recovery. One strike!
315                  */
316                 xfs_ioerror_alert("xlog_recover_iodone",
317                                   bp->b_mount, bp, XFS_BUF_ADDR(bp));
318                 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
319         }
320         bp->b_mount = NULL;
321         XFS_BUF_CLR_IODONE_FUNC(bp);
322         xfs_biodone(bp);
323 }
324
325 /*
326  * This routine finds (to an approximation) the first block in the physical
327  * log which contains the given cycle.  It uses a binary search algorithm.
328  * Note that the algorithm can not be perfect because the disk will not
329  * necessarily be perfect.
330  */
331 STATIC int
332 xlog_find_cycle_start(
333         xlog_t          *log,
334         xfs_buf_t       *bp,
335         xfs_daddr_t     first_blk,
336         xfs_daddr_t     *last_blk,
337         uint            cycle)
338 {
339         xfs_caddr_t     offset;
340         xfs_daddr_t     mid_blk;
341         uint            mid_cycle;
342         int             error;
343
344         mid_blk = BLK_AVG(first_blk, *last_blk);
345         while (mid_blk != first_blk && mid_blk != *last_blk) {
346                 error = xlog_bread(log, mid_blk, 1, bp, &offset);
347                 if (error)
348                         return error;
349                 mid_cycle = xlog_get_cycle(offset);
350                 if (mid_cycle == cycle) {
351                         *last_blk = mid_blk;
352                         /* last_half_cycle == mid_cycle */
353                 } else {
354                         first_blk = mid_blk;
355                         /* first_half_cycle == mid_cycle */
356                 }
357                 mid_blk = BLK_AVG(first_blk, *last_blk);
358         }
359         ASSERT((mid_blk == first_blk && mid_blk+1 == *last_blk) ||
360                (mid_blk == *last_blk && mid_blk-1 == first_blk));
361
362         return 0;
363 }
364
365 /*
366  * Check that the range of blocks does not contain the cycle number
367  * given.  The scan needs to occur from front to back and the ptr into the
368  * region must be updated since a later routine will need to perform another
369  * test.  If the region is completely good, we end up returning the same
370  * last block number.
371  *
372  * Set blkno to -1 if we encounter no errors.  This is an invalid block number
373  * since we don't ever expect logs to get this large.
374  */
375 STATIC int
376 xlog_find_verify_cycle(
377         xlog_t          *log,
378         xfs_daddr_t     start_blk,
379         int             nbblks,
380         uint            stop_on_cycle_no,
381         xfs_daddr_t     *new_blk)
382 {
383         xfs_daddr_t     i, j;
384         uint            cycle;
385         xfs_buf_t       *bp;
386         xfs_daddr_t     bufblks;
387         xfs_caddr_t     buf = NULL;
388         int             error = 0;
389
390         /*
391          * Greedily allocate a buffer big enough to handle the full
392          * range of basic blocks we'll be examining.  If that fails,
393          * try a smaller size.  We need to be able to read at least
394          * a log sector, or we're out of luck.
395          */
396         bufblks = 1 << ffs(nbblks);
397         while (!(bp = xlog_get_bp(log, bufblks))) {
398                 bufblks >>= 1;
399                 if (bufblks < xlog_sectbb(log))
400                         return ENOMEM;
401         }
402
403         for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
404                 int     bcount;
405
406                 bcount = min(bufblks, (start_blk + nbblks - i));
407
408                 error = xlog_bread(log, i, bcount, bp, &buf);
409                 if (error)
410                         goto out;
411
412                 for (j = 0; j < bcount; j++) {
413                         cycle = xlog_get_cycle(buf);
414                         if (cycle == stop_on_cycle_no) {
415                                 *new_blk = i+j;
416                                 goto out;
417                         }
418
419                         buf += BBSIZE;
420                 }
421         }
422
423         *new_blk = -1;
424
425 out:
426         xlog_put_bp(bp);
427         return error;
428 }
429
430 /*
431  * Potentially backup over partial log record write.
432  *
433  * In the typical case, last_blk is the number of the block directly after
434  * a good log record.  Therefore, we subtract one to get the block number
435  * of the last block in the given buffer.  extra_bblks contains the number
436  * of blocks we would have read on a previous read.  This happens when the
437  * last log record is split over the end of the physical log.
438  *
439  * extra_bblks is the number of blocks potentially verified on a previous
440  * call to this routine.
441  */
442 STATIC int
443 xlog_find_verify_log_record(
444         xlog_t                  *log,
445         xfs_daddr_t             start_blk,
446         xfs_daddr_t             *last_blk,
447         int                     extra_bblks)
448 {
449         xfs_daddr_t             i;
450         xfs_buf_t               *bp;
451         xfs_caddr_t             offset = NULL;
452         xlog_rec_header_t       *head = NULL;
453         int                     error = 0;
454         int                     smallmem = 0;
455         int                     num_blks = *last_blk - start_blk;
456         int                     xhdrs;
457
458         ASSERT(start_blk != 0 || *last_blk != start_blk);
459
460         if (!(bp = xlog_get_bp(log, num_blks))) {
461                 if (!(bp = xlog_get_bp(log, 1)))
462                         return ENOMEM;
463                 smallmem = 1;
464         } else {
465                 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
466                 if (error)
467                         goto out;
468                 offset += ((num_blks - 1) << BBSHIFT);
469         }
470
471         for (i = (*last_blk) - 1; i >= 0; i--) {
472                 if (i < start_blk) {
473                         /* valid log record not found */
474                         xlog_warn(
475                 "XFS: Log inconsistent (didn't find previous header)");
476                         ASSERT(0);
477                         error = XFS_ERROR(EIO);
478                         goto out;
479                 }
480
481                 if (smallmem) {
482                         error = xlog_bread(log, i, 1, bp, &offset);
483                         if (error)
484                                 goto out;
485                 }
486
487                 head = (xlog_rec_header_t *)offset;
488
489                 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno))
490                         break;
491
492                 if (!smallmem)
493                         offset -= BBSIZE;
494         }
495
496         /*
497          * We hit the beginning of the physical log & still no header.  Return
498          * to caller.  If caller can handle a return of -1, then this routine
499          * will be called again for the end of the physical log.
500          */
501         if (i == -1) {
502                 error = -1;
503                 goto out;
504         }
505
506         /*
507          * We have the final block of the good log (the first block
508          * of the log record _before_ the head. So we check the uuid.
509          */
510         if ((error = xlog_header_check_mount(log->l_mp, head)))
511                 goto out;
512
513         /*
514          * We may have found a log record header before we expected one.
515          * last_blk will be the 1st block # with a given cycle #.  We may end
516          * up reading an entire log record.  In this case, we don't want to
517          * reset last_blk.  Only when last_blk points in the middle of a log
518          * record do we update last_blk.
519          */
520         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
521                 uint    h_size = be32_to_cpu(head->h_size);
522
523                 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
524                 if (h_size % XLOG_HEADER_CYCLE_SIZE)
525                         xhdrs++;
526         } else {
527                 xhdrs = 1;
528         }
529
530         if (*last_blk - i + extra_bblks !=
531             BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
532                 *last_blk = i;
533
534 out:
535         xlog_put_bp(bp);
536         return error;
537 }
538
539 /*
540  * Head is defined to be the point of the log where the next log write
541  * write could go.  This means that incomplete LR writes at the end are
542  * eliminated when calculating the head.  We aren't guaranteed that previous
543  * LR have complete transactions.  We only know that a cycle number of
544  * current cycle number -1 won't be present in the log if we start writing
545  * from our current block number.
546  *
547  * last_blk contains the block number of the first block with a given
548  * cycle number.
549  *
550  * Return: zero if normal, non-zero if error.
551  */
552 STATIC int
553 xlog_find_head(
554         xlog_t          *log,
555         xfs_daddr_t     *return_head_blk)
556 {
557         xfs_buf_t       *bp;
558         xfs_caddr_t     offset;
559         xfs_daddr_t     new_blk, first_blk, start_blk, last_blk, head_blk;
560         int             num_scan_bblks;
561         uint            first_half_cycle, last_half_cycle;
562         uint            stop_on_cycle;
563         int             error, log_bbnum = log->l_logBBsize;
564
565         /* Is the end of the log device zeroed? */
566         if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
567                 *return_head_blk = first_blk;
568
569                 /* Is the whole lot zeroed? */
570                 if (!first_blk) {
571                         /* Linux XFS shouldn't generate totally zeroed logs -
572                          * mkfs etc write a dummy unmount record to a fresh
573                          * log so we can store the uuid in there
574                          */
575                         xlog_warn("XFS: totally zeroed log");
576                 }
577
578                 return 0;
579         } else if (error) {
580                 xlog_warn("XFS: empty log check failed");
581                 return error;
582         }
583
584         first_blk = 0;                  /* get cycle # of 1st block */
585         bp = xlog_get_bp(log, 1);
586         if (!bp)
587                 return ENOMEM;
588
589         error = xlog_bread(log, 0, 1, bp, &offset);
590         if (error)
591                 goto bp_err;
592
593         first_half_cycle = xlog_get_cycle(offset);
594
595         last_blk = head_blk = log_bbnum - 1;    /* get cycle # of last block */
596         error = xlog_bread(log, last_blk, 1, bp, &offset);
597         if (error)
598                 goto bp_err;
599
600         last_half_cycle = xlog_get_cycle(offset);
601         ASSERT(last_half_cycle != 0);
602
603         /*
604          * If the 1st half cycle number is equal to the last half cycle number,
605          * then the entire log is stamped with the same cycle number.  In this
606          * case, head_blk can't be set to zero (which makes sense).  The below
607          * math doesn't work out properly with head_blk equal to zero.  Instead,
608          * we set it to log_bbnum which is an invalid block number, but this
609          * value makes the math correct.  If head_blk doesn't changed through
610          * all the tests below, *head_blk is set to zero at the very end rather
611          * than log_bbnum.  In a sense, log_bbnum and zero are the same block
612          * in a circular file.
613          */
614         if (first_half_cycle == last_half_cycle) {
615                 /*
616                  * In this case we believe that the entire log should have
617                  * cycle number last_half_cycle.  We need to scan backwards
618                  * from the end verifying that there are no holes still
619                  * containing last_half_cycle - 1.  If we find such a hole,
620                  * then the start of that hole will be the new head.  The
621                  * simple case looks like
622                  *        x | x ... | x - 1 | x
623                  * Another case that fits this picture would be
624                  *        x | x + 1 | x ... | x
625                  * In this case the head really is somewhere at the end of the
626                  * log, as one of the latest writes at the beginning was
627                  * incomplete.
628                  * One more case is
629                  *        x | x + 1 | x ... | x - 1 | x
630                  * This is really the combination of the above two cases, and
631                  * the head has to end up at the start of the x-1 hole at the
632                  * end of the log.
633                  *
634                  * In the 256k log case, we will read from the beginning to the
635                  * end of the log and search for cycle numbers equal to x-1.
636                  * We don't worry about the x+1 blocks that we encounter,
637                  * because we know that they cannot be the head since the log
638                  * started with x.
639                  */
640                 head_blk = log_bbnum;
641                 stop_on_cycle = last_half_cycle - 1;
642         } else {
643                 /*
644                  * In this case we want to find the first block with cycle
645                  * number matching last_half_cycle.  We expect the log to be
646                  * some variation on
647                  *        x + 1 ... | x ...
648                  * The first block with cycle number x (last_half_cycle) will
649                  * be where the new head belongs.  First we do a binary search
650                  * for the first occurrence of last_half_cycle.  The binary
651                  * search may not be totally accurate, so then we scan back
652                  * from there looking for occurrences of last_half_cycle before
653                  * us.  If that backwards scan wraps around the beginning of
654                  * the log, then we look for occurrences of last_half_cycle - 1
655                  * at the end of the log.  The cases we're looking for look
656                  * like
657                  *        x + 1 ... | x | x + 1 | x ...
658                  *                               ^ binary search stopped here
659                  * or
660                  *        x + 1 ... | x ... | x - 1 | x
661                  *        <---------> less than scan distance
662                  */
663                 stop_on_cycle = last_half_cycle;
664                 if ((error = xlog_find_cycle_start(log, bp, first_blk,
665                                                 &head_blk, last_half_cycle)))
666                         goto bp_err;
667         }
668
669         /*
670          * Now validate the answer.  Scan back some number of maximum possible
671          * blocks and make sure each one has the expected cycle number.  The
672          * maximum is determined by the total possible amount of buffering
673          * in the in-core log.  The following number can be made tighter if
674          * we actually look at the block size of the filesystem.
675          */
676         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
677         if (head_blk >= num_scan_bblks) {
678                 /*
679                  * We are guaranteed that the entire check can be performed
680                  * in one buffer.
681                  */
682                 start_blk = head_blk - num_scan_bblks;
683                 if ((error = xlog_find_verify_cycle(log,
684                                                 start_blk, num_scan_bblks,
685                                                 stop_on_cycle, &new_blk)))
686                         goto bp_err;
687                 if (new_blk != -1)
688                         head_blk = new_blk;
689         } else {                /* need to read 2 parts of log */
690                 /*
691                  * We are going to scan backwards in the log in two parts.
692                  * First we scan the physical end of the log.  In this part
693                  * of the log, we are looking for blocks with cycle number
694                  * last_half_cycle - 1.
695                  * If we find one, then we know that the log starts there, as
696                  * we've found a hole that didn't get written in going around
697                  * the end of the physical log.  The simple case for this is
698                  *        x + 1 ... | x ... | x - 1 | x
699                  *        <---------> less than scan distance
700                  * If all of the blocks at the end of the log have cycle number
701                  * last_half_cycle, then we check the blocks at the start of
702                  * the log looking for occurrences of last_half_cycle.  If we
703                  * find one, then our current estimate for the location of the
704                  * first occurrence of last_half_cycle is wrong and we move
705                  * back to the hole we've found.  This case looks like
706                  *        x + 1 ... | x | x + 1 | x ...
707                  *                               ^ binary search stopped here
708                  * Another case we need to handle that only occurs in 256k
709                  * logs is
710                  *        x + 1 ... | x ... | x+1 | x ...
711                  *                   ^ binary search stops here
712                  * In a 256k log, the scan at the end of the log will see the
713                  * x + 1 blocks.  We need to skip past those since that is
714                  * certainly not the head of the log.  By searching for
715                  * last_half_cycle-1 we accomplish that.
716                  */
717                 start_blk = log_bbnum - num_scan_bblks + head_blk;
718                 ASSERT(head_blk <= INT_MAX &&
719                         (xfs_daddr_t) num_scan_bblks - head_blk >= 0);
720                 if ((error = xlog_find_verify_cycle(log, start_blk,
721                                         num_scan_bblks - (int)head_blk,
722                                         (stop_on_cycle - 1), &new_blk)))
723                         goto bp_err;
724                 if (new_blk != -1) {
725                         head_blk = new_blk;
726                         goto bad_blk;
727                 }
728
729                 /*
730                  * Scan beginning of log now.  The last part of the physical
731                  * log is good.  This scan needs to verify that it doesn't find
732                  * the last_half_cycle.
733                  */
734                 start_blk = 0;
735                 ASSERT(head_blk <= INT_MAX);
736                 if ((error = xlog_find_verify_cycle(log,
737                                         start_blk, (int)head_blk,
738                                         stop_on_cycle, &new_blk)))
739                         goto bp_err;
740                 if (new_blk != -1)
741                         head_blk = new_blk;
742         }
743
744  bad_blk:
745         /*
746          * Now we need to make sure head_blk is not pointing to a block in
747          * the middle of a log record.
748          */
749         num_scan_bblks = XLOG_REC_SHIFT(log);
750         if (head_blk >= num_scan_bblks) {
751                 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
752
753                 /* start ptr at last block ptr before head_blk */
754                 if ((error = xlog_find_verify_log_record(log, start_blk,
755                                                         &head_blk, 0)) == -1) {
756                         error = XFS_ERROR(EIO);
757                         goto bp_err;
758                 } else if (error)
759                         goto bp_err;
760         } else {
761                 start_blk = 0;
762                 ASSERT(head_blk <= INT_MAX);
763                 if ((error = xlog_find_verify_log_record(log, start_blk,
764                                                         &head_blk, 0)) == -1) {
765                         /* We hit the beginning of the log during our search */
766                         start_blk = log_bbnum - num_scan_bblks + head_blk;
767                         new_blk = log_bbnum;
768                         ASSERT(start_blk <= INT_MAX &&
769                                 (xfs_daddr_t) log_bbnum-start_blk >= 0);
770                         ASSERT(head_blk <= INT_MAX);
771                         if ((error = xlog_find_verify_log_record(log,
772                                                         start_blk, &new_blk,
773                                                         (int)head_blk)) == -1) {
774                                 error = XFS_ERROR(EIO);
775                                 goto bp_err;
776                         } else if (error)
777                                 goto bp_err;
778                         if (new_blk != log_bbnum)
779                                 head_blk = new_blk;
780                 } else if (error)
781                         goto bp_err;
782         }
783
784         xlog_put_bp(bp);
785         if (head_blk == log_bbnum)
786                 *return_head_blk = 0;
787         else
788                 *return_head_blk = head_blk;
789         /*
790          * When returning here, we have a good block number.  Bad block
791          * means that during a previous crash, we didn't have a clean break
792          * from cycle number N to cycle number N-1.  In this case, we need
793          * to find the first block with cycle number N-1.
794          */
795         return 0;
796
797  bp_err:
798         xlog_put_bp(bp);
799
800         if (error)
801             xlog_warn("XFS: failed to find log head");
802         return error;
803 }
804
805 /*
806  * Find the sync block number or the tail of the log.
807  *
808  * This will be the block number of the last record to have its
809  * associated buffers synced to disk.  Every log record header has
810  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
811  * to get a sync block number.  The only concern is to figure out which
812  * log record header to believe.
813  *
814  * The following algorithm uses the log record header with the largest
815  * lsn.  The entire log record does not need to be valid.  We only care
816  * that the header is valid.
817  *
818  * We could speed up search by using current head_blk buffer, but it is not
819  * available.
820  */
821 STATIC int
822 xlog_find_tail(
823         xlog_t                  *log,
824         xfs_daddr_t             *head_blk,
825         xfs_daddr_t             *tail_blk)
826 {
827         xlog_rec_header_t       *rhead;
828         xlog_op_header_t        *op_head;
829         xfs_caddr_t             offset = NULL;
830         xfs_buf_t               *bp;
831         int                     error, i, found;
832         xfs_daddr_t             umount_data_blk;
833         xfs_daddr_t             after_umount_blk;
834         xfs_lsn_t               tail_lsn;
835         int                     hblks;
836
837         found = 0;
838
839         /*
840          * Find previous log record
841          */
842         if ((error = xlog_find_head(log, head_blk)))
843                 return error;
844
845         bp = xlog_get_bp(log, 1);
846         if (!bp)
847                 return ENOMEM;
848         if (*head_blk == 0) {                           /* special case */
849                 error = xlog_bread(log, 0, 1, bp, &offset);
850                 if (error)
851                         goto bread_err;
852
853                 if (xlog_get_cycle(offset) == 0) {
854                         *tail_blk = 0;
855                         /* leave all other log inited values alone */
856                         goto exit;
857                 }
858         }
859
860         /*
861          * Search backwards looking for log record header block
862          */
863         ASSERT(*head_blk < INT_MAX);
864         for (i = (int)(*head_blk) - 1; i >= 0; i--) {
865                 error = xlog_bread(log, i, 1, bp, &offset);
866                 if (error)
867                         goto bread_err;
868
869                 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) {
870                         found = 1;
871                         break;
872                 }
873         }
874         /*
875          * If we haven't found the log record header block, start looking
876          * again from the end of the physical log.  XXXmiken: There should be
877          * a check here to make sure we didn't search more than N blocks in
878          * the previous code.
879          */
880         if (!found) {
881                 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
882                         error = xlog_bread(log, i, 1, bp, &offset);
883                         if (error)
884                                 goto bread_err;
885
886                         if (XLOG_HEADER_MAGIC_NUM ==
887                             be32_to_cpu(*(__be32 *)offset)) {
888                                 found = 2;
889                                 break;
890                         }
891                 }
892         }
893         if (!found) {
894                 xlog_warn("XFS: xlog_find_tail: couldn't find sync record");
895                 ASSERT(0);
896                 return XFS_ERROR(EIO);
897         }
898
899         /* find blk_no of tail of log */
900         rhead = (xlog_rec_header_t *)offset;
901         *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
902
903         /*
904          * Reset log values according to the state of the log when we
905          * crashed.  In the case where head_blk == 0, we bump curr_cycle
906          * one because the next write starts a new cycle rather than
907          * continuing the cycle of the last good log record.  At this
908          * point we have guaranteed that all partial log records have been
909          * accounted for.  Therefore, we know that the last good log record
910          * written was complete and ended exactly on the end boundary
911          * of the physical log.
912          */
913         log->l_prev_block = i;
914         log->l_curr_block = (int)*head_blk;
915         log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
916         if (found == 2)
917                 log->l_curr_cycle++;
918         log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn);
919         log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn);
920         log->l_grant_reserve_cycle = log->l_curr_cycle;
921         log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
922         log->l_grant_write_cycle = log->l_curr_cycle;
923         log->l_grant_write_bytes = BBTOB(log->l_curr_block);
924
925         /*
926          * Look for unmount record.  If we find it, then we know there
927          * was a clean unmount.  Since 'i' could be the last block in
928          * the physical log, we convert to a log block before comparing
929          * to the head_blk.
930          *
931          * Save the current tail lsn to use to pass to
932          * xlog_clear_stale_blocks() below.  We won't want to clear the
933          * unmount record if there is one, so we pass the lsn of the
934          * unmount record rather than the block after it.
935          */
936         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
937                 int     h_size = be32_to_cpu(rhead->h_size);
938                 int     h_version = be32_to_cpu(rhead->h_version);
939
940                 if ((h_version & XLOG_VERSION_2) &&
941                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
942                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
943                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
944                                 hblks++;
945                 } else {
946                         hblks = 1;
947                 }
948         } else {
949                 hblks = 1;
950         }
951         after_umount_blk = (i + hblks + (int)
952                 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
953         tail_lsn = log->l_tail_lsn;
954         if (*head_blk == after_umount_blk &&
955             be32_to_cpu(rhead->h_num_logops) == 1) {
956                 umount_data_blk = (i + hblks) % log->l_logBBsize;
957                 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
958                 if (error)
959                         goto bread_err;
960
961                 op_head = (xlog_op_header_t *)offset;
962                 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
963                         /*
964                          * Set tail and last sync so that newly written
965                          * log records will point recovery to after the
966                          * current unmount record.
967                          */
968                         log->l_tail_lsn =
969                                 xlog_assign_lsn(log->l_curr_cycle,
970                                                 after_umount_blk);
971                         log->l_last_sync_lsn =
972                                 xlog_assign_lsn(log->l_curr_cycle,
973                                                 after_umount_blk);
974                         *tail_blk = after_umount_blk;
975
976                         /*
977                          * Note that the unmount was clean. If the unmount
978                          * was not clean, we need to know this to rebuild the
979                          * superblock counters from the perag headers if we
980                          * have a filesystem using non-persistent counters.
981                          */
982                         log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
983                 }
984         }
985
986         /*
987          * Make sure that there are no blocks in front of the head
988          * with the same cycle number as the head.  This can happen
989          * because we allow multiple outstanding log writes concurrently,
990          * and the later writes might make it out before earlier ones.
991          *
992          * We use the lsn from before modifying it so that we'll never
993          * overwrite the unmount record after a clean unmount.
994          *
995          * Do this only if we are going to recover the filesystem
996          *
997          * NOTE: This used to say "if (!readonly)"
998          * However on Linux, we can & do recover a read-only filesystem.
999          * We only skip recovery if NORECOVERY is specified on mount,
1000          * in which case we would not be here.
1001          *
1002          * But... if the -device- itself is readonly, just skip this.
1003          * We can't recover this device anyway, so it won't matter.
1004          */
1005         if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) {
1006                 error = xlog_clear_stale_blocks(log, tail_lsn);
1007         }
1008
1009 bread_err:
1010 exit:
1011         xlog_put_bp(bp);
1012
1013         if (error)
1014                 xlog_warn("XFS: failed to locate log tail");
1015         return error;
1016 }
1017
1018 /*
1019  * Is the log zeroed at all?
1020  *
1021  * The last binary search should be changed to perform an X block read
1022  * once X becomes small enough.  You can then search linearly through
1023  * the X blocks.  This will cut down on the number of reads we need to do.
1024  *
1025  * If the log is partially zeroed, this routine will pass back the blkno
1026  * of the first block with cycle number 0.  It won't have a complete LR
1027  * preceding it.
1028  *
1029  * Return:
1030  *      0  => the log is completely written to
1031  *      -1 => use *blk_no as the first block of the log
1032  *      >0 => error has occurred
1033  */
1034 STATIC int
1035 xlog_find_zeroed(
1036         xlog_t          *log,
1037         xfs_daddr_t     *blk_no)
1038 {
1039         xfs_buf_t       *bp;
1040         xfs_caddr_t     offset;
1041         uint            first_cycle, last_cycle;
1042         xfs_daddr_t     new_blk, last_blk, start_blk;
1043         xfs_daddr_t     num_scan_bblks;
1044         int             error, log_bbnum = log->l_logBBsize;
1045
1046         *blk_no = 0;
1047
1048         /* check totally zeroed log */
1049         bp = xlog_get_bp(log, 1);
1050         if (!bp)
1051                 return ENOMEM;
1052         error = xlog_bread(log, 0, 1, bp, &offset);
1053         if (error)
1054                 goto bp_err;
1055
1056         first_cycle = xlog_get_cycle(offset);
1057         if (first_cycle == 0) {         /* completely zeroed log */
1058                 *blk_no = 0;
1059                 xlog_put_bp(bp);
1060                 return -1;
1061         }
1062
1063         /* check partially zeroed log */
1064         error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1065         if (error)
1066                 goto bp_err;
1067
1068         last_cycle = xlog_get_cycle(offset);
1069         if (last_cycle != 0) {          /* log completely written to */
1070                 xlog_put_bp(bp);
1071                 return 0;
1072         } else if (first_cycle != 1) {
1073                 /*
1074                  * If the cycle of the last block is zero, the cycle of
1075                  * the first block must be 1. If it's not, maybe we're
1076                  * not looking at a log... Bail out.
1077                  */
1078                 xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)");
1079                 return XFS_ERROR(EINVAL);
1080         }
1081
1082         /* we have a partially zeroed log */
1083         last_blk = log_bbnum-1;
1084         if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1085                 goto bp_err;
1086
1087         /*
1088          * Validate the answer.  Because there is no way to guarantee that
1089          * the entire log is made up of log records which are the same size,
1090          * we scan over the defined maximum blocks.  At this point, the maximum
1091          * is not chosen to mean anything special.   XXXmiken
1092          */
1093         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1094         ASSERT(num_scan_bblks <= INT_MAX);
1095
1096         if (last_blk < num_scan_bblks)
1097                 num_scan_bblks = last_blk;
1098         start_blk = last_blk - num_scan_bblks;
1099
1100         /*
1101          * We search for any instances of cycle number 0 that occur before
1102          * our current estimate of the head.  What we're trying to detect is
1103          *        1 ... | 0 | 1 | 0...
1104          *                       ^ binary search ends here
1105          */
1106         if ((error = xlog_find_verify_cycle(log, start_blk,
1107                                          (int)num_scan_bblks, 0, &new_blk)))
1108                 goto bp_err;
1109         if (new_blk != -1)
1110                 last_blk = new_blk;
1111
1112         /*
1113          * Potentially backup over partial log record write.  We don't need
1114          * to search the end of the log because we know it is zero.
1115          */
1116         if ((error = xlog_find_verify_log_record(log, start_blk,
1117                                 &last_blk, 0)) == -1) {
1118             error = XFS_ERROR(EIO);
1119             goto bp_err;
1120         } else if (error)
1121             goto bp_err;
1122
1123         *blk_no = last_blk;
1124 bp_err:
1125         xlog_put_bp(bp);
1126         if (error)
1127                 return error;
1128         return -1;
1129 }
1130
1131 /*
1132  * These are simple subroutines used by xlog_clear_stale_blocks() below
1133  * to initialize a buffer full of empty log record headers and write
1134  * them into the log.
1135  */
1136 STATIC void
1137 xlog_add_record(
1138         xlog_t                  *log,
1139         xfs_caddr_t             buf,
1140         int                     cycle,
1141         int                     block,
1142         int                     tail_cycle,
1143         int                     tail_block)
1144 {
1145         xlog_rec_header_t       *recp = (xlog_rec_header_t *)buf;
1146
1147         memset(buf, 0, BBSIZE);
1148         recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1149         recp->h_cycle = cpu_to_be32(cycle);
1150         recp->h_version = cpu_to_be32(
1151                         xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1152         recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1153         recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1154         recp->h_fmt = cpu_to_be32(XLOG_FMT);
1155         memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1156 }
1157
1158 STATIC int
1159 xlog_write_log_records(
1160         xlog_t          *log,
1161         int             cycle,
1162         int             start_block,
1163         int             blocks,
1164         int             tail_cycle,
1165         int             tail_block)
1166 {
1167         xfs_caddr_t     offset;
1168         xfs_buf_t       *bp;
1169         int             balign, ealign;
1170         int             sectbb = xlog_sectbb(log);
1171         int             end_block = start_block + blocks;
1172         int             bufblks;
1173         int             error = 0;
1174         int             i, j = 0;
1175
1176         /*
1177          * Greedily allocate a buffer big enough to handle the full
1178          * range of basic blocks to be written.  If that fails, try
1179          * a smaller size.  We need to be able to write at least a
1180          * log sector, or we're out of luck.
1181          */
1182         bufblks = 1 << ffs(blocks);
1183         while (!(bp = xlog_get_bp(log, bufblks))) {
1184                 bufblks >>= 1;
1185                 if (bufblks < xlog_sectbb(log))
1186                         return ENOMEM;
1187         }
1188
1189         /* We may need to do a read at the start to fill in part of
1190          * the buffer in the starting sector not covered by the first
1191          * write below.
1192          */
1193         balign = round_down(start_block, sectbb);
1194         if (balign != start_block) {
1195                 error = xlog_bread_noalign(log, start_block, 1, bp);
1196                 if (error)
1197                         goto out_put_bp;
1198
1199                 j = start_block - balign;
1200         }
1201
1202         for (i = start_block; i < end_block; i += bufblks) {
1203                 int             bcount, endcount;
1204
1205                 bcount = min(bufblks, end_block - start_block);
1206                 endcount = bcount - j;
1207
1208                 /* We may need to do a read at the end to fill in part of
1209                  * the buffer in the final sector not covered by the write.
1210                  * If this is the same sector as the above read, skip it.
1211                  */
1212                 ealign = round_down(end_block, sectbb);
1213                 if (j == 0 && (start_block + endcount > ealign)) {
1214                         offset = XFS_BUF_PTR(bp);
1215                         balign = BBTOB(ealign - start_block);
1216                         error = XFS_BUF_SET_PTR(bp, offset + balign,
1217                                                 BBTOB(sectbb));
1218                         if (error)
1219                                 break;
1220
1221                         error = xlog_bread_noalign(log, ealign, sectbb, bp);
1222                         if (error)
1223                                 break;
1224
1225                         error = XFS_BUF_SET_PTR(bp, offset, bufblks);
1226                         if (error)
1227                                 break;
1228                 }
1229
1230                 offset = xlog_align(log, start_block, endcount, bp);
1231                 for (; j < endcount; j++) {
1232                         xlog_add_record(log, offset, cycle, i+j,
1233                                         tail_cycle, tail_block);
1234                         offset += BBSIZE;
1235                 }
1236                 error = xlog_bwrite(log, start_block, endcount, bp);
1237                 if (error)
1238                         break;
1239                 start_block += endcount;
1240                 j = 0;
1241         }
1242
1243  out_put_bp:
1244         xlog_put_bp(bp);
1245         return error;
1246 }
1247
1248 /*
1249  * This routine is called to blow away any incomplete log writes out
1250  * in front of the log head.  We do this so that we won't become confused
1251  * if we come up, write only a little bit more, and then crash again.
1252  * If we leave the partial log records out there, this situation could
1253  * cause us to think those partial writes are valid blocks since they
1254  * have the current cycle number.  We get rid of them by overwriting them
1255  * with empty log records with the old cycle number rather than the
1256  * current one.
1257  *
1258  * The tail lsn is passed in rather than taken from
1259  * the log so that we will not write over the unmount record after a
1260  * clean unmount in a 512 block log.  Doing so would leave the log without
1261  * any valid log records in it until a new one was written.  If we crashed
1262  * during that time we would not be able to recover.
1263  */
1264 STATIC int
1265 xlog_clear_stale_blocks(
1266         xlog_t          *log,
1267         xfs_lsn_t       tail_lsn)
1268 {
1269         int             tail_cycle, head_cycle;
1270         int             tail_block, head_block;
1271         int             tail_distance, max_distance;
1272         int             distance;
1273         int             error;
1274
1275         tail_cycle = CYCLE_LSN(tail_lsn);
1276         tail_block = BLOCK_LSN(tail_lsn);
1277         head_cycle = log->l_curr_cycle;
1278         head_block = log->l_curr_block;
1279
1280         /*
1281          * Figure out the distance between the new head of the log
1282          * and the tail.  We want to write over any blocks beyond the
1283          * head that we may have written just before the crash, but
1284          * we don't want to overwrite the tail of the log.
1285          */
1286         if (head_cycle == tail_cycle) {
1287                 /*
1288                  * The tail is behind the head in the physical log,
1289                  * so the distance from the head to the tail is the
1290                  * distance from the head to the end of the log plus
1291                  * the distance from the beginning of the log to the
1292                  * tail.
1293                  */
1294                 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1295                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1296                                          XFS_ERRLEVEL_LOW, log->l_mp);
1297                         return XFS_ERROR(EFSCORRUPTED);
1298                 }
1299                 tail_distance = tail_block + (log->l_logBBsize - head_block);
1300         } else {
1301                 /*
1302                  * The head is behind the tail in the physical log,
1303                  * so the distance from the head to the tail is just
1304                  * the tail block minus the head block.
1305                  */
1306                 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1307                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1308                                          XFS_ERRLEVEL_LOW, log->l_mp);
1309                         return XFS_ERROR(EFSCORRUPTED);
1310                 }
1311                 tail_distance = tail_block - head_block;
1312         }
1313
1314         /*
1315          * If the head is right up against the tail, we can't clear
1316          * anything.
1317          */
1318         if (tail_distance <= 0) {
1319                 ASSERT(tail_distance == 0);
1320                 return 0;
1321         }
1322
1323         max_distance = XLOG_TOTAL_REC_SHIFT(log);
1324         /*
1325          * Take the smaller of the maximum amount of outstanding I/O
1326          * we could have and the distance to the tail to clear out.
1327          * We take the smaller so that we don't overwrite the tail and
1328          * we don't waste all day writing from the head to the tail
1329          * for no reason.
1330          */
1331         max_distance = MIN(max_distance, tail_distance);
1332
1333         if ((head_block + max_distance) <= log->l_logBBsize) {
1334                 /*
1335                  * We can stomp all the blocks we need to without
1336                  * wrapping around the end of the log.  Just do it
1337                  * in a single write.  Use the cycle number of the
1338                  * current cycle minus one so that the log will look like:
1339                  *     n ... | n - 1 ...
1340                  */
1341                 error = xlog_write_log_records(log, (head_cycle - 1),
1342                                 head_block, max_distance, tail_cycle,
1343                                 tail_block);
1344                 if (error)
1345                         return error;
1346         } else {
1347                 /*
1348                  * We need to wrap around the end of the physical log in
1349                  * order to clear all the blocks.  Do it in two separate
1350                  * I/Os.  The first write should be from the head to the
1351                  * end of the physical log, and it should use the current
1352                  * cycle number minus one just like above.
1353                  */
1354                 distance = log->l_logBBsize - head_block;
1355                 error = xlog_write_log_records(log, (head_cycle - 1),
1356                                 head_block, distance, tail_cycle,
1357                                 tail_block);
1358
1359                 if (error)
1360                         return error;
1361
1362                 /*
1363                  * Now write the blocks at the start of the physical log.
1364                  * This writes the remainder of the blocks we want to clear.
1365                  * It uses the current cycle number since we're now on the
1366                  * same cycle as the head so that we get:
1367                  *    n ... n ... | n - 1 ...
1368                  *    ^^^^^ blocks we're writing
1369                  */
1370                 distance = max_distance - (log->l_logBBsize - head_block);
1371                 error = xlog_write_log_records(log, head_cycle, 0, distance,
1372                                 tail_cycle, tail_block);
1373                 if (error)
1374                         return error;
1375         }
1376
1377         return 0;
1378 }
1379
1380 /******************************************************************************
1381  *
1382  *              Log recover routines
1383  *
1384  ******************************************************************************
1385  */
1386
1387 STATIC xlog_recover_t *
1388 xlog_recover_find_tid(
1389         struct hlist_head       *head,
1390         xlog_tid_t              tid)
1391 {
1392         xlog_recover_t          *trans;
1393         struct hlist_node       *n;
1394
1395         hlist_for_each_entry(trans, n, head, r_list) {
1396                 if (trans->r_log_tid == tid)
1397                         return trans;
1398         }
1399         return NULL;
1400 }
1401
1402 STATIC void
1403 xlog_recover_new_tid(
1404         struct hlist_head       *head,
1405         xlog_tid_t              tid,
1406         xfs_lsn_t               lsn)
1407 {
1408         xlog_recover_t          *trans;
1409
1410         trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1411         trans->r_log_tid   = tid;
1412         trans->r_lsn       = lsn;
1413         INIT_LIST_HEAD(&trans->r_itemq);
1414
1415         INIT_HLIST_NODE(&trans->r_list);
1416         hlist_add_head(&trans->r_list, head);
1417 }
1418
1419 STATIC void
1420 xlog_recover_add_item(
1421         struct list_head        *head)
1422 {
1423         xlog_recover_item_t     *item;
1424
1425         item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1426         INIT_LIST_HEAD(&item->ri_list);
1427         list_add_tail(&item->ri_list, head);
1428 }
1429
1430 STATIC int
1431 xlog_recover_add_to_cont_trans(
1432         struct log              *log,
1433         xlog_recover_t          *trans,
1434         xfs_caddr_t             dp,
1435         int                     len)
1436 {
1437         xlog_recover_item_t     *item;
1438         xfs_caddr_t             ptr, old_ptr;
1439         int                     old_len;
1440
1441         if (list_empty(&trans->r_itemq)) {
1442                 /* finish copying rest of trans header */
1443                 xlog_recover_add_item(&trans->r_itemq);
1444                 ptr = (xfs_caddr_t) &trans->r_theader +
1445                                 sizeof(xfs_trans_header_t) - len;
1446                 memcpy(ptr, dp, len); /* d, s, l */
1447                 return 0;
1448         }
1449         /* take the tail entry */
1450         item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1451
1452         old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1453         old_len = item->ri_buf[item->ri_cnt-1].i_len;
1454
1455         ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0u);
1456         memcpy(&ptr[old_len], dp, len); /* d, s, l */
1457         item->ri_buf[item->ri_cnt-1].i_len += len;
1458         item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1459         trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1460         return 0;
1461 }
1462
1463 /*
1464  * The next region to add is the start of a new region.  It could be
1465  * a whole region or it could be the first part of a new region.  Because
1466  * of this, the assumption here is that the type and size fields of all
1467  * format structures fit into the first 32 bits of the structure.
1468  *
1469  * This works because all regions must be 32 bit aligned.  Therefore, we
1470  * either have both fields or we have neither field.  In the case we have
1471  * neither field, the data part of the region is zero length.  We only have
1472  * a log_op_header and can throw away the header since a new one will appear
1473  * later.  If we have at least 4 bytes, then we can determine how many regions
1474  * will appear in the current log item.
1475  */
1476 STATIC int
1477 xlog_recover_add_to_trans(
1478         struct log              *log,
1479         xlog_recover_t          *trans,
1480         xfs_caddr_t             dp,
1481         int                     len)
1482 {
1483         xfs_inode_log_format_t  *in_f;                  /* any will do */
1484         xlog_recover_item_t     *item;
1485         xfs_caddr_t             ptr;
1486
1487         if (!len)
1488                 return 0;
1489         if (list_empty(&trans->r_itemq)) {
1490                 /* we need to catch log corruptions here */
1491                 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1492                         xlog_warn("XFS: xlog_recover_add_to_trans: "
1493                                   "bad header magic number");
1494                         ASSERT(0);
1495                         return XFS_ERROR(EIO);
1496                 }
1497                 if (len == sizeof(xfs_trans_header_t))
1498                         xlog_recover_add_item(&trans->r_itemq);
1499                 memcpy(&trans->r_theader, dp, len); /* d, s, l */
1500                 return 0;
1501         }
1502
1503         ptr = kmem_alloc(len, KM_SLEEP);
1504         memcpy(ptr, dp, len);
1505         in_f = (xfs_inode_log_format_t *)ptr;
1506
1507         /* take the tail entry */
1508         item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1509         if (item->ri_total != 0 &&
1510              item->ri_total == item->ri_cnt) {
1511                 /* tail item is in use, get a new one */
1512                 xlog_recover_add_item(&trans->r_itemq);
1513                 item = list_entry(trans->r_itemq.prev,
1514                                         xlog_recover_item_t, ri_list);
1515         }
1516
1517         if (item->ri_total == 0) {              /* first region to be added */
1518                 if (in_f->ilf_size == 0 ||
1519                     in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1520                         xlog_warn(
1521         "XFS: bad number of regions (%d) in inode log format",
1522                                   in_f->ilf_size);
1523                         ASSERT(0);
1524                         return XFS_ERROR(EIO);
1525                 }
1526
1527                 item->ri_total = in_f->ilf_size;
1528                 item->ri_buf =
1529                         kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1530                                     KM_SLEEP);
1531         }
1532         ASSERT(item->ri_total > item->ri_cnt);
1533         /* Description region is ri_buf[0] */
1534         item->ri_buf[item->ri_cnt].i_addr = ptr;
1535         item->ri_buf[item->ri_cnt].i_len  = len;
1536         item->ri_cnt++;
1537         trace_xfs_log_recover_item_add(log, trans, item, 0);
1538         return 0;
1539 }
1540
1541 /*
1542  * Sort the log items in the transaction. Cancelled buffers need
1543  * to be put first so they are processed before any items that might
1544  * modify the buffers. If they are cancelled, then the modifications
1545  * don't need to be replayed.
1546  */
1547 STATIC int
1548 xlog_recover_reorder_trans(
1549         struct log              *log,
1550         xlog_recover_t          *trans,
1551         int                     pass)
1552 {
1553         xlog_recover_item_t     *item, *n;
1554         LIST_HEAD(sort_list);
1555
1556         list_splice_init(&trans->r_itemq, &sort_list);
1557         list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1558                 xfs_buf_log_format_t    *buf_f;
1559
1560                 buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
1561
1562                 switch (ITEM_TYPE(item)) {
1563                 case XFS_LI_BUF:
1564                         if (!(buf_f->blf_flags & XFS_BLI_CANCEL)) {
1565                                 trace_xfs_log_recover_item_reorder_head(log,
1566                                                         trans, item, pass);
1567                                 list_move(&item->ri_list, &trans->r_itemq);
1568                                 break;
1569                         }
1570                 case XFS_LI_INODE:
1571                 case XFS_LI_DQUOT:
1572                 case XFS_LI_QUOTAOFF:
1573                 case XFS_LI_EFD:
1574                 case XFS_LI_EFI:
1575                         trace_xfs_log_recover_item_reorder_tail(log,
1576                                                         trans, item, pass);
1577                         list_move_tail(&item->ri_list, &trans->r_itemq);
1578                         break;
1579                 default:
1580                         xlog_warn(
1581         "XFS: xlog_recover_reorder_trans: unrecognized type of log operation");
1582                         ASSERT(0);
1583                         return XFS_ERROR(EIO);
1584                 }
1585         }
1586         ASSERT(list_empty(&sort_list));
1587         return 0;
1588 }
1589
1590 /*
1591  * Build up the table of buf cancel records so that we don't replay
1592  * cancelled data in the second pass.  For buffer records that are
1593  * not cancel records, there is nothing to do here so we just return.
1594  *
1595  * If we get a cancel record which is already in the table, this indicates
1596  * that the buffer was cancelled multiple times.  In order to ensure
1597  * that during pass 2 we keep the record in the table until we reach its
1598  * last occurrence in the log, we keep a reference count in the cancel
1599  * record in the table to tell us how many times we expect to see this
1600  * record during the second pass.
1601  */
1602 STATIC void
1603 xlog_recover_do_buffer_pass1(
1604         xlog_t                  *log,
1605         xfs_buf_log_format_t    *buf_f)
1606 {
1607         xfs_buf_cancel_t        *bcp;
1608         xfs_buf_cancel_t        *nextp;
1609         xfs_buf_cancel_t        *prevp;
1610         xfs_buf_cancel_t        **bucket;
1611         xfs_daddr_t             blkno = 0;
1612         uint                    len = 0;
1613         ushort                  flags = 0;
1614
1615         switch (buf_f->blf_type) {
1616         case XFS_LI_BUF:
1617                 blkno = buf_f->blf_blkno;
1618                 len = buf_f->blf_len;
1619                 flags = buf_f->blf_flags;
1620                 break;
1621         }
1622
1623         /*
1624          * If this isn't a cancel buffer item, then just return.
1625          */
1626         if (!(flags & XFS_BLI_CANCEL)) {
1627                 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1628                 return;
1629         }
1630
1631         /*
1632          * Insert an xfs_buf_cancel record into the hash table of
1633          * them.  If there is already an identical record, bump
1634          * its reference count.
1635          */
1636         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1637                                           XLOG_BC_TABLE_SIZE];
1638         /*
1639          * If the hash bucket is empty then just insert a new record into
1640          * the bucket.
1641          */
1642         if (*bucket == NULL) {
1643                 bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1644                                                      KM_SLEEP);
1645                 bcp->bc_blkno = blkno;
1646                 bcp->bc_len = len;
1647                 bcp->bc_refcount = 1;
1648                 bcp->bc_next = NULL;
1649                 *bucket = bcp;
1650                 return;
1651         }
1652
1653         /*
1654          * The hash bucket is not empty, so search for duplicates of our
1655          * record.  If we find one them just bump its refcount.  If not
1656          * then add us at the end of the list.
1657          */
1658         prevp = NULL;
1659         nextp = *bucket;
1660         while (nextp != NULL) {
1661                 if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
1662                         nextp->bc_refcount++;
1663                         trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1664                         return;
1665                 }
1666                 prevp = nextp;
1667                 nextp = nextp->bc_next;
1668         }
1669         ASSERT(prevp != NULL);
1670         bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1671                                              KM_SLEEP);
1672         bcp->bc_blkno = blkno;
1673         bcp->bc_len = len;
1674         bcp->bc_refcount = 1;
1675         bcp->bc_next = NULL;
1676         prevp->bc_next = bcp;
1677         trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1678 }
1679
1680 /*
1681  * Check to see whether the buffer being recovered has a corresponding
1682  * entry in the buffer cancel record table.  If it does then return 1
1683  * so that it will be cancelled, otherwise return 0.  If the buffer is
1684  * actually a buffer cancel item (XFS_BLI_CANCEL is set), then decrement
1685  * the refcount on the entry in the table and remove it from the table
1686  * if this is the last reference.
1687  *
1688  * We remove the cancel record from the table when we encounter its
1689  * last occurrence in the log so that if the same buffer is re-used
1690  * again after its last cancellation we actually replay the changes
1691  * made at that point.
1692  */
1693 STATIC int
1694 xlog_check_buffer_cancelled(
1695         xlog_t                  *log,
1696         xfs_daddr_t             blkno,
1697         uint                    len,
1698         ushort                  flags)
1699 {
1700         xfs_buf_cancel_t        *bcp;
1701         xfs_buf_cancel_t        *prevp;
1702         xfs_buf_cancel_t        **bucket;
1703
1704         if (log->l_buf_cancel_table == NULL) {
1705                 /*
1706                  * There is nothing in the table built in pass one,
1707                  * so this buffer must not be cancelled.
1708                  */
1709                 ASSERT(!(flags & XFS_BLI_CANCEL));
1710                 return 0;
1711         }
1712
1713         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1714                                           XLOG_BC_TABLE_SIZE];
1715         bcp = *bucket;
1716         if (bcp == NULL) {
1717                 /*
1718                  * There is no corresponding entry in the table built
1719                  * in pass one, so this buffer has not been cancelled.
1720                  */
1721                 ASSERT(!(flags & XFS_BLI_CANCEL));
1722                 return 0;
1723         }
1724
1725         /*
1726          * Search for an entry in the buffer cancel table that
1727          * matches our buffer.
1728          */
1729         prevp = NULL;
1730         while (bcp != NULL) {
1731                 if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
1732                         /*
1733                          * We've go a match, so return 1 so that the
1734                          * recovery of this buffer is cancelled.
1735                          * If this buffer is actually a buffer cancel
1736                          * log item, then decrement the refcount on the
1737                          * one in the table and remove it if this is the
1738                          * last reference.
1739                          */
1740                         if (flags & XFS_BLI_CANCEL) {
1741                                 bcp->bc_refcount--;
1742                                 if (bcp->bc_refcount == 0) {
1743                                         if (prevp == NULL) {
1744                                                 *bucket = bcp->bc_next;
1745                                         } else {
1746                                                 prevp->bc_next = bcp->bc_next;
1747                                         }
1748                                         kmem_free(bcp);
1749                                 }
1750                         }
1751                         return 1;
1752                 }
1753                 prevp = bcp;
1754                 bcp = bcp->bc_next;
1755         }
1756         /*
1757          * We didn't find a corresponding entry in the table, so
1758          * return 0 so that the buffer is NOT cancelled.
1759          */
1760         ASSERT(!(flags & XFS_BLI_CANCEL));
1761         return 0;
1762 }
1763
1764 STATIC int
1765 xlog_recover_do_buffer_pass2(
1766         xlog_t                  *log,
1767         xfs_buf_log_format_t    *buf_f)
1768 {
1769         xfs_daddr_t             blkno = 0;
1770         ushort                  flags = 0;
1771         uint                    len = 0;
1772
1773         switch (buf_f->blf_type) {
1774         case XFS_LI_BUF:
1775                 blkno = buf_f->blf_blkno;
1776                 flags = buf_f->blf_flags;
1777                 len = buf_f->blf_len;
1778                 break;
1779         }
1780
1781         return xlog_check_buffer_cancelled(log, blkno, len, flags);
1782 }
1783
1784 /*
1785  * Perform recovery for a buffer full of inodes.  In these buffers,
1786  * the only data which should be recovered is that which corresponds
1787  * to the di_next_unlinked pointers in the on disk inode structures.
1788  * The rest of the data for the inodes is always logged through the
1789  * inodes themselves rather than the inode buffer and is recovered
1790  * in xlog_recover_do_inode_trans().
1791  *
1792  * The only time when buffers full of inodes are fully recovered is
1793  * when the buffer is full of newly allocated inodes.  In this case
1794  * the buffer will not be marked as an inode buffer and so will be
1795  * sent to xlog_recover_do_reg_buffer() below during recovery.
1796  */
1797 STATIC int
1798 xlog_recover_do_inode_buffer(
1799         xfs_mount_t             *mp,
1800         xlog_recover_item_t     *item,
1801         xfs_buf_t               *bp,
1802         xfs_buf_log_format_t    *buf_f)
1803 {
1804         int                     i;
1805         int                     item_index;
1806         int                     bit;
1807         int                     nbits;
1808         int                     reg_buf_offset;
1809         int                     reg_buf_bytes;
1810         int                     next_unlinked_offset;
1811         int                     inodes_per_buf;
1812         xfs_agino_t             *logged_nextp;
1813         xfs_agino_t             *buffer_nextp;
1814         unsigned int            *data_map = NULL;
1815         unsigned int            map_size = 0;
1816
1817         trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1818
1819         switch (buf_f->blf_type) {
1820         case XFS_LI_BUF:
1821                 data_map = buf_f->blf_data_map;
1822                 map_size = buf_f->blf_map_size;
1823                 break;
1824         }
1825         /*
1826          * Set the variables corresponding to the current region to
1827          * 0 so that we'll initialize them on the first pass through
1828          * the loop.
1829          */
1830         reg_buf_offset = 0;
1831         reg_buf_bytes = 0;
1832         bit = 0;
1833         nbits = 0;
1834         item_index = 0;
1835         inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
1836         for (i = 0; i < inodes_per_buf; i++) {
1837                 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1838                         offsetof(xfs_dinode_t, di_next_unlinked);
1839
1840                 while (next_unlinked_offset >=
1841                        (reg_buf_offset + reg_buf_bytes)) {
1842                         /*
1843                          * The next di_next_unlinked field is beyond
1844                          * the current logged region.  Find the next
1845                          * logged region that contains or is beyond
1846                          * the current di_next_unlinked field.
1847                          */
1848                         bit += nbits;
1849                         bit = xfs_next_bit(data_map, map_size, bit);
1850
1851                         /*
1852                          * If there are no more logged regions in the
1853                          * buffer, then we're done.
1854                          */
1855                         if (bit == -1) {
1856                                 return 0;
1857                         }
1858
1859                         nbits = xfs_contig_bits(data_map, map_size,
1860                                                          bit);
1861                         ASSERT(nbits > 0);
1862                         reg_buf_offset = bit << XFS_BLI_SHIFT;
1863                         reg_buf_bytes = nbits << XFS_BLI_SHIFT;
1864                         item_index++;
1865                 }
1866
1867                 /*
1868                  * If the current logged region starts after the current
1869                  * di_next_unlinked field, then move on to the next
1870                  * di_next_unlinked field.
1871                  */
1872                 if (next_unlinked_offset < reg_buf_offset) {
1873                         continue;
1874                 }
1875
1876                 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1877                 ASSERT((item->ri_buf[item_index].i_len % XFS_BLI_CHUNK) == 0);
1878                 ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
1879
1880                 /*
1881                  * The current logged region contains a copy of the
1882                  * current di_next_unlinked field.  Extract its value
1883                  * and copy it to the buffer copy.
1884                  */
1885                 logged_nextp = (xfs_agino_t *)
1886                                ((char *)(item->ri_buf[item_index].i_addr) +
1887                                 (next_unlinked_offset - reg_buf_offset));
1888                 if (unlikely(*logged_nextp == 0)) {
1889                         xfs_fs_cmn_err(CE_ALERT, mp,
1890                                 "bad inode buffer log record (ptr = 0x%p, bp = 0x%p).  XFS trying to replay bad (0) inode di_next_unlinked field",
1891                                 item, bp);
1892                         XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1893                                          XFS_ERRLEVEL_LOW, mp);
1894                         return XFS_ERROR(EFSCORRUPTED);
1895                 }
1896
1897                 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1898                                               next_unlinked_offset);
1899                 *buffer_nextp = *logged_nextp;
1900         }
1901
1902         return 0;
1903 }
1904
1905 /*
1906  * Perform a 'normal' buffer recovery.  Each logged region of the
1907  * buffer should be copied over the corresponding region in the
1908  * given buffer.  The bitmap in the buf log format structure indicates
1909  * where to place the logged data.
1910  */
1911 /*ARGSUSED*/
1912 STATIC void
1913 xlog_recover_do_reg_buffer(
1914         struct xfs_mount        *mp,
1915         xlog_recover_item_t     *item,
1916         xfs_buf_t               *bp,
1917         xfs_buf_log_format_t    *buf_f)
1918 {
1919         int                     i;
1920         int                     bit;
1921         int                     nbits;
1922         unsigned int            *data_map = NULL;
1923         unsigned int            map_size = 0;
1924         int                     error;
1925
1926         trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
1927
1928         switch (buf_f->blf_type) {
1929         case XFS_LI_BUF:
1930                 data_map = buf_f->blf_data_map;
1931                 map_size = buf_f->blf_map_size;
1932                 break;
1933         }
1934         bit = 0;
1935         i = 1;  /* 0 is the buf format structure */
1936         while (1) {
1937                 bit = xfs_next_bit(data_map, map_size, bit);
1938                 if (bit == -1)
1939                         break;
1940                 nbits = xfs_contig_bits(data_map, map_size, bit);
1941                 ASSERT(nbits > 0);
1942                 ASSERT(item->ri_buf[i].i_addr != NULL);
1943                 ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
1944                 ASSERT(XFS_BUF_COUNT(bp) >=
1945                        ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
1946
1947                 /*
1948                  * Do a sanity check if this is a dquot buffer. Just checking
1949                  * the first dquot in the buffer should do. XXXThis is
1950                  * probably a good thing to do for other buf types also.
1951                  */
1952                 error = 0;
1953                 if (buf_f->blf_flags &
1954                    (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
1955                         if (item->ri_buf[i].i_addr == NULL) {
1956                                 cmn_err(CE_ALERT,
1957                                         "XFS: NULL dquot in %s.", __func__);
1958                                 goto next;
1959                         }
1960                         if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
1961                                 cmn_err(CE_ALERT,
1962                                         "XFS: dquot too small (%d) in %s.",
1963                                         item->ri_buf[i].i_len, __func__);
1964                                 goto next;
1965                         }
1966                         error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
1967                                                item->ri_buf[i].i_addr,
1968                                                -1, 0, XFS_QMOPT_DOWARN,
1969                                                "dquot_buf_recover");
1970                         if (error)
1971                                 goto next;
1972                 }
1973
1974                 memcpy(xfs_buf_offset(bp,
1975                         (uint)bit << XFS_BLI_SHIFT),    /* dest */
1976                         item->ri_buf[i].i_addr,         /* source */
1977                         nbits<<XFS_BLI_SHIFT);          /* length */
1978  next:
1979                 i++;
1980                 bit += nbits;
1981         }
1982
1983         /* Shouldn't be any more regions */
1984         ASSERT(i == item->ri_total);
1985 }
1986
1987 /*
1988  * Do some primitive error checking on ondisk dquot data structures.
1989  */
1990 int
1991 xfs_qm_dqcheck(
1992         xfs_disk_dquot_t *ddq,
1993         xfs_dqid_t       id,
1994         uint             type,    /* used only when IO_dorepair is true */
1995         uint             flags,
1996         char             *str)
1997 {
1998         xfs_dqblk_t      *d = (xfs_dqblk_t *)ddq;
1999         int             errs = 0;
2000
2001         /*
2002          * We can encounter an uninitialized dquot buffer for 2 reasons:
2003          * 1. If we crash while deleting the quotainode(s), and those blks got
2004          *    used for user data. This is because we take the path of regular
2005          *    file deletion; however, the size field of quotainodes is never
2006          *    updated, so all the tricks that we play in itruncate_finish
2007          *    don't quite matter.
2008          *
2009          * 2. We don't play the quota buffers when there's a quotaoff logitem.
2010          *    But the allocation will be replayed so we'll end up with an
2011          *    uninitialized quota block.
2012          *
2013          * This is all fine; things are still consistent, and we haven't lost
2014          * any quota information. Just don't complain about bad dquot blks.
2015          */
2016         if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) {
2017                 if (flags & XFS_QMOPT_DOWARN)
2018                         cmn_err(CE_ALERT,
2019                         "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
2020                         str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
2021                 errs++;
2022         }
2023         if (ddq->d_version != XFS_DQUOT_VERSION) {
2024                 if (flags & XFS_QMOPT_DOWARN)
2025                         cmn_err(CE_ALERT,
2026                         "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
2027                         str, id, ddq->d_version, XFS_DQUOT_VERSION);
2028                 errs++;
2029         }
2030
2031         if (ddq->d_flags != XFS_DQ_USER &&
2032             ddq->d_flags != XFS_DQ_PROJ &&
2033             ddq->d_flags != XFS_DQ_GROUP) {
2034                 if (flags & XFS_QMOPT_DOWARN)
2035                         cmn_err(CE_ALERT,
2036                         "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
2037                         str, id, ddq->d_flags);
2038                 errs++;
2039         }
2040
2041         if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
2042                 if (flags & XFS_QMOPT_DOWARN)
2043                         cmn_err(CE_ALERT,
2044                         "%s : ondisk-dquot 0x%p, ID mismatch: "
2045                         "0x%x expected, found id 0x%x",
2046                         str, ddq, id, be32_to_cpu(ddq->d_id));
2047                 errs++;
2048         }
2049
2050         if (!errs && ddq->d_id) {
2051                 if (ddq->d_blk_softlimit &&
2052                     be64_to_cpu(ddq->d_bcount) >=
2053                                 be64_to_cpu(ddq->d_blk_softlimit)) {
2054                         if (!ddq->d_btimer) {
2055                                 if (flags & XFS_QMOPT_DOWARN)
2056                                         cmn_err(CE_ALERT,
2057                                         "%s : Dquot ID 0x%x (0x%p) "
2058                                         "BLK TIMER NOT STARTED",
2059                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2060                                 errs++;
2061                         }
2062                 }
2063                 if (ddq->d_ino_softlimit &&
2064                     be64_to_cpu(ddq->d_icount) >=
2065                                 be64_to_cpu(ddq->d_ino_softlimit)) {
2066                         if (!ddq->d_itimer) {
2067                                 if (flags & XFS_QMOPT_DOWARN)
2068                                         cmn_err(CE_ALERT,
2069                                         "%s : Dquot ID 0x%x (0x%p) "
2070                                         "INODE TIMER NOT STARTED",
2071                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2072                                 errs++;
2073                         }
2074                 }
2075                 if (ddq->d_rtb_softlimit &&
2076                     be64_to_cpu(ddq->d_rtbcount) >=
2077                                 be64_to_cpu(ddq->d_rtb_softlimit)) {
2078                         if (!ddq->d_rtbtimer) {
2079                                 if (flags & XFS_QMOPT_DOWARN)
2080                                         cmn_err(CE_ALERT,
2081                                         "%s : Dquot ID 0x%x (0x%p) "
2082                                         "RTBLK TIMER NOT STARTED",
2083                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2084                                 errs++;
2085                         }
2086                 }
2087         }
2088
2089         if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2090                 return errs;
2091
2092         if (flags & XFS_QMOPT_DOWARN)
2093                 cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
2094
2095         /*
2096          * Typically, a repair is only requested by quotacheck.
2097          */
2098         ASSERT(id != -1);
2099         ASSERT(flags & XFS_QMOPT_DQREPAIR);
2100         memset(d, 0, sizeof(xfs_dqblk_t));
2101
2102         d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2103         d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2104         d->dd_diskdq.d_flags = type;
2105         d->dd_diskdq.d_id = cpu_to_be32(id);
2106
2107         return errs;
2108 }
2109
2110 /*
2111  * Perform a dquot buffer recovery.
2112  * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2113  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2114  * Else, treat it as a regular buffer and do recovery.
2115  */
2116 STATIC void
2117 xlog_recover_do_dquot_buffer(
2118         xfs_mount_t             *mp,
2119         xlog_t                  *log,
2120         xlog_recover_item_t     *item,
2121         xfs_buf_t               *bp,
2122         xfs_buf_log_format_t    *buf_f)
2123 {
2124         uint                    type;
2125
2126         trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2127
2128         /*
2129          * Filesystems are required to send in quota flags at mount time.
2130          */
2131         if (mp->m_qflags == 0) {
2132                 return;
2133         }
2134
2135         type = 0;
2136         if (buf_f->blf_flags & XFS_BLI_UDQUOT_BUF)
2137                 type |= XFS_DQ_USER;
2138         if (buf_f->blf_flags & XFS_BLI_PDQUOT_BUF)
2139                 type |= XFS_DQ_PROJ;
2140         if (buf_f->blf_flags & XFS_BLI_GDQUOT_BUF)
2141                 type |= XFS_DQ_GROUP;
2142         /*
2143          * This type of quotas was turned off, so ignore this buffer
2144          */
2145         if (log->l_quotaoffs_flag & type)
2146                 return;
2147
2148         xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2149 }
2150
2151 /*
2152  * This routine replays a modification made to a buffer at runtime.
2153  * There are actually two types of buffer, regular and inode, which
2154  * are handled differently.  Inode buffers are handled differently
2155  * in that we only recover a specific set of data from them, namely
2156  * the inode di_next_unlinked fields.  This is because all other inode
2157  * data is actually logged via inode records and any data we replay
2158  * here which overlaps that may be stale.
2159  *
2160  * When meta-data buffers are freed at run time we log a buffer item
2161  * with the XFS_BLI_CANCEL bit set to indicate that previous copies
2162  * of the buffer in the log should not be replayed at recovery time.
2163  * This is so that if the blocks covered by the buffer are reused for
2164  * file data before we crash we don't end up replaying old, freed
2165  * meta-data into a user's file.
2166  *
2167  * To handle the cancellation of buffer log items, we make two passes
2168  * over the log during recovery.  During the first we build a table of
2169  * those buffers which have been cancelled, and during the second we
2170  * only replay those buffers which do not have corresponding cancel
2171  * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
2172  * for more details on the implementation of the table of cancel records.
2173  */
2174 STATIC int
2175 xlog_recover_do_buffer_trans(
2176         xlog_t                  *log,
2177         xlog_recover_item_t     *item,
2178         int                     pass)
2179 {
2180         xfs_buf_log_format_t    *buf_f;
2181         xfs_mount_t             *mp;
2182         xfs_buf_t               *bp;
2183         int                     error;
2184         int                     cancel;
2185         xfs_daddr_t             blkno;
2186         int                     len;
2187         ushort                  flags;
2188         uint                    buf_flags;
2189
2190         buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
2191
2192         if (pass == XLOG_RECOVER_PASS1) {
2193                 /*
2194                  * In this pass we're only looking for buf items
2195                  * with the XFS_BLI_CANCEL bit set.
2196                  */
2197                 xlog_recover_do_buffer_pass1(log, buf_f);
2198                 return 0;
2199         } else {
2200                 /*
2201                  * In this pass we want to recover all the buffers
2202                  * which have not been cancelled and are not
2203                  * cancellation buffers themselves.  The routine
2204                  * we call here will tell us whether or not to
2205                  * continue with the replay of this buffer.
2206                  */
2207                 cancel = xlog_recover_do_buffer_pass2(log, buf_f);
2208                 if (cancel) {
2209                         trace_xfs_log_recover_buf_cancel(log, buf_f);
2210                         return 0;
2211                 }
2212         }
2213         trace_xfs_log_recover_buf_recover(log, buf_f);
2214         switch (buf_f->blf_type) {
2215         case XFS_LI_BUF:
2216                 blkno = buf_f->blf_blkno;
2217                 len = buf_f->blf_len;
2218                 flags = buf_f->blf_flags;
2219                 break;
2220         default:
2221                 xfs_fs_cmn_err(CE_ALERT, log->l_mp,
2222                         "xfs_log_recover: unknown buffer type 0x%x, logdev %s",
2223                         buf_f->blf_type, log->l_mp->m_logname ?
2224                         log->l_mp->m_logname : "internal");
2225                 XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
2226                                  XFS_ERRLEVEL_LOW, log->l_mp);
2227                 return XFS_ERROR(EFSCORRUPTED);
2228         }
2229
2230         mp = log->l_mp;
2231         buf_flags = XBF_LOCK;
2232         if (!(flags & XFS_BLI_INODE_BUF))
2233                 buf_flags |= XBF_MAPPED;
2234
2235         bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags);
2236         if (XFS_BUF_ISERROR(bp)) {
2237                 xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
2238                                   bp, blkno);
2239                 error = XFS_BUF_GETERROR(bp);
2240                 xfs_buf_relse(bp);
2241                 return error;
2242         }
2243
2244         error = 0;
2245         if (flags & XFS_BLI_INODE_BUF) {
2246                 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2247         } else if (flags &
2248                   (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
2249                 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2250         } else {
2251                 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2252         }
2253         if (error)
2254                 return XFS_ERROR(error);
2255
2256         /*
2257          * Perform delayed write on the buffer.  Asynchronous writes will be
2258          * slower when taking into account all the buffers to be flushed.
2259          *
2260          * Also make sure that only inode buffers with good sizes stay in
2261          * the buffer cache.  The kernel moves inodes in buffers of 1 block
2262          * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode
2263          * buffers in the log can be a different size if the log was generated
2264          * by an older kernel using unclustered inode buffers or a newer kernel
2265          * running with a different inode cluster size.  Regardless, if the
2266          * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2267          * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2268          * the buffer out of the buffer cache so that the buffer won't
2269          * overlap with future reads of those inodes.
2270          */
2271         if (XFS_DINODE_MAGIC ==
2272             be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2273             (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
2274                         (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2275                 XFS_BUF_STALE(bp);
2276                 error = xfs_bwrite(mp, bp);
2277         } else {
2278                 ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
2279                 bp->b_mount = mp;
2280                 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2281                 xfs_bdwrite(mp, bp);
2282         }
2283
2284         return (error);
2285 }
2286
2287 STATIC int
2288 xlog_recover_do_inode_trans(
2289         xlog_t                  *log,
2290         xlog_recover_item_t     *item,
2291         int                     pass)
2292 {
2293         xfs_inode_log_format_t  *in_f;
2294         xfs_mount_t             *mp;
2295         xfs_buf_t               *bp;
2296         xfs_dinode_t            *dip;
2297         xfs_ino_t               ino;
2298         int                     len;
2299         xfs_caddr_t             src;
2300         xfs_caddr_t             dest;
2301         int                     error;
2302         int                     attr_index;
2303         uint                    fields;
2304         xfs_icdinode_t          *dicp;
2305         int                     need_free = 0;
2306
2307         if (pass == XLOG_RECOVER_PASS1) {
2308                 return 0;
2309         }
2310
2311         if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2312                 in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr;
2313         } else {
2314                 in_f = (xfs_inode_log_format_t *)kmem_alloc(
2315                         sizeof(xfs_inode_log_format_t), KM_SLEEP);
2316                 need_free = 1;
2317                 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2318                 if (error)
2319                         goto error;
2320         }
2321         ino = in_f->ilf_ino;
2322         mp = log->l_mp;
2323
2324         /*
2325          * Inode buffers can be freed, look out for it,
2326          * and do not replay the inode.
2327          */
2328         if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2329                                         in_f->ilf_len, 0)) {
2330                 error = 0;
2331                 trace_xfs_log_recover_inode_cancel(log, in_f);
2332                 goto error;
2333         }
2334         trace_xfs_log_recover_inode_recover(log, in_f);
2335
2336         bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
2337                           XBF_LOCK);
2338         if (XFS_BUF_ISERROR(bp)) {
2339                 xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
2340                                   bp, in_f->ilf_blkno);
2341                 error = XFS_BUF_GETERROR(bp);
2342                 xfs_buf_relse(bp);
2343                 goto error;
2344         }
2345         error = 0;
2346         ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2347         dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2348
2349         /*
2350          * Make sure the place we're flushing out to really looks
2351          * like an inode!
2352          */
2353         if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) {
2354                 xfs_buf_relse(bp);
2355                 xfs_fs_cmn_err(CE_ALERT, mp,
2356                         "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
2357                         dip, bp, ino);
2358                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
2359                                  XFS_ERRLEVEL_LOW, mp);
2360                 error = EFSCORRUPTED;
2361                 goto error;
2362         }
2363         dicp = (xfs_icdinode_t *)(item->ri_buf[1].i_addr);
2364         if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2365                 xfs_buf_relse(bp);
2366                 xfs_fs_cmn_err(CE_ALERT, mp,
2367                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
2368                         item, ino);
2369                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
2370                                  XFS_ERRLEVEL_LOW, mp);
2371                 error = EFSCORRUPTED;
2372                 goto error;
2373         }
2374
2375         /* Skip replay when the on disk inode is newer than the log one */
2376         if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2377                 /*
2378                  * Deal with the wrap case, DI_MAX_FLUSH is less
2379                  * than smaller numbers
2380                  */
2381                 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2382                     dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2383                         /* do nothing */
2384                 } else {
2385                         xfs_buf_relse(bp);
2386                         trace_xfs_log_recover_inode_skip(log, in_f);
2387                         error = 0;
2388                         goto error;
2389                 }
2390         }
2391         /* Take the opportunity to reset the flush iteration count */
2392         dicp->di_flushiter = 0;
2393
2394         if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
2395                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2396                     (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2397                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
2398                                          XFS_ERRLEVEL_LOW, mp, dicp);
2399                         xfs_buf_relse(bp);
2400                         xfs_fs_cmn_err(CE_ALERT, mp,
2401                                 "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2402                                 item, dip, bp, ino);
2403                         error = EFSCORRUPTED;
2404                         goto error;
2405                 }
2406         } else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) {
2407                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2408                     (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2409                     (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2410                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
2411                                              XFS_ERRLEVEL_LOW, mp, dicp);
2412                         xfs_buf_relse(bp);
2413                         xfs_fs_cmn_err(CE_ALERT, mp,
2414                                 "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2415                                 item, dip, bp, ino);
2416                         error = EFSCORRUPTED;
2417                         goto error;
2418                 }
2419         }
2420         if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2421                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
2422                                      XFS_ERRLEVEL_LOW, mp, dicp);
2423                 xfs_buf_relse(bp);
2424                 xfs_fs_cmn_err(CE_ALERT, mp,
2425                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2426                         item, dip, bp, ino,
2427                         dicp->di_nextents + dicp->di_anextents,
2428                         dicp->di_nblocks);
2429                 error = EFSCORRUPTED;
2430                 goto error;
2431         }
2432         if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2433                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
2434                                      XFS_ERRLEVEL_LOW, mp, dicp);
2435                 xfs_buf_relse(bp);
2436                 xfs_fs_cmn_err(CE_ALERT, mp,
2437                         "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
2438                         item, dip, bp, ino, dicp->di_forkoff);
2439                 error = EFSCORRUPTED;
2440                 goto error;
2441         }
2442         if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
2443                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
2444                                      XFS_ERRLEVEL_LOW, mp, dicp);
2445                 xfs_buf_relse(bp);
2446                 xfs_fs_cmn_err(CE_ALERT, mp,
2447                         "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p",
2448                         item->ri_buf[1].i_len, item);
2449                 error = EFSCORRUPTED;
2450                 goto error;
2451         }
2452
2453         /* The core is in in-core format */
2454         xfs_dinode_to_disk(dip, (xfs_icdinode_t *)item->ri_buf[1].i_addr);
2455
2456         /* the rest is in on-disk format */
2457         if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
2458                 memcpy((xfs_caddr_t) dip + sizeof(struct xfs_icdinode),
2459                         item->ri_buf[1].i_addr + sizeof(struct xfs_icdinode),
2460                         item->ri_buf[1].i_len  - sizeof(struct xfs_icdinode));
2461         }
2462
2463         fields = in_f->ilf_fields;
2464         switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2465         case XFS_ILOG_DEV:
2466                 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2467                 break;
2468         case XFS_ILOG_UUID:
2469                 memcpy(XFS_DFORK_DPTR(dip),
2470                        &in_f->ilf_u.ilfu_uuid,
2471                        sizeof(uuid_t));
2472                 break;
2473         }
2474
2475         if (in_f->ilf_size == 2)
2476                 goto write_inode_buffer;
2477         len = item->ri_buf[2].i_len;
2478         src = item->ri_buf[2].i_addr;
2479         ASSERT(in_f->ilf_size <= 4);
2480         ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2481         ASSERT(!(fields & XFS_ILOG_DFORK) ||
2482                (len == in_f->ilf_dsize));
2483
2484         switch (fields & XFS_ILOG_DFORK) {
2485         case XFS_ILOG_DDATA:
2486         case XFS_ILOG_DEXT:
2487                 memcpy(XFS_DFORK_DPTR(dip), src, len);
2488                 break;
2489
2490         case XFS_ILOG_DBROOT:
2491                 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2492                                  (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2493                                  XFS_DFORK_DSIZE(dip, mp));
2494                 break;
2495
2496         default:
2497                 /*
2498                  * There are no data fork flags set.
2499                  */
2500                 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2501                 break;
2502         }
2503
2504         /*
2505          * If we logged any attribute data, recover it.  There may or
2506          * may not have been any other non-core data logged in this
2507          * transaction.
2508          */
2509         if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2510                 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2511                         attr_index = 3;
2512                 } else {
2513                         attr_index = 2;
2514                 }
2515                 len = item->ri_buf[attr_index].i_len;
2516                 src = item->ri_buf[attr_index].i_addr;
2517                 ASSERT(len == in_f->ilf_asize);
2518
2519                 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2520                 case XFS_ILOG_ADATA:
2521                 case XFS_ILOG_AEXT:
2522                         dest = XFS_DFORK_APTR(dip);
2523                         ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2524                         memcpy(dest, src, len);
2525                         break;
2526
2527                 case XFS_ILOG_ABROOT:
2528                         dest = XFS_DFORK_APTR(dip);
2529                         xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2530                                          len, (xfs_bmdr_block_t*)dest,
2531                                          XFS_DFORK_ASIZE(dip, mp));
2532                         break;
2533
2534                 default:
2535                         xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
2536                         ASSERT(0);
2537                         xfs_buf_relse(bp);
2538                         error = EIO;
2539                         goto error;
2540                 }
2541         }
2542
2543 write_inode_buffer:
2544         ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
2545         bp->b_mount = mp;
2546         XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2547         xfs_bdwrite(mp, bp);
2548 error:
2549         if (need_free)
2550                 kmem_free(in_f);
2551         return XFS_ERROR(error);
2552 }
2553
2554 /*
2555  * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
2556  * structure, so that we know not to do any dquot item or dquot buffer recovery,
2557  * of that type.
2558  */
2559 STATIC int
2560 xlog_recover_do_quotaoff_trans(
2561         xlog_t                  *log,
2562         xlog_recover_item_t     *item,
2563         int                     pass)
2564 {
2565         xfs_qoff_logformat_t    *qoff_f;
2566
2567         if (pass == XLOG_RECOVER_PASS2) {
2568                 return (0);
2569         }
2570
2571         qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr;
2572         ASSERT(qoff_f);
2573
2574         /*
2575          * The logitem format's flag tells us if this was user quotaoff,
2576          * group/project quotaoff or both.
2577          */
2578         if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2579                 log->l_quotaoffs_flag |= XFS_DQ_USER;
2580         if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2581                 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2582         if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2583                 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2584
2585         return (0);
2586 }
2587
2588 /*
2589  * Recover a dquot record
2590  */
2591 STATIC int
2592 xlog_recover_do_dquot_trans(
2593         xlog_t                  *log,
2594         xlog_recover_item_t     *item,
2595         int                     pass)
2596 {
2597         xfs_mount_t             *mp;
2598         xfs_buf_t               *bp;
2599         struct xfs_disk_dquot   *ddq, *recddq;
2600         int                     error;
2601         xfs_dq_logformat_t      *dq_f;
2602         uint                    type;
2603
2604         if (pass == XLOG_RECOVER_PASS1) {
2605                 return 0;
2606         }
2607         mp = log->l_mp;
2608
2609         /*
2610          * Filesystems are required to send in quota flags at mount time.
2611          */
2612         if (mp->m_qflags == 0)
2613                 return (0);
2614
2615         recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr;
2616
2617         if (item->ri_buf[1].i_addr == NULL) {
2618                 cmn_err(CE_ALERT,
2619                         "XFS: NULL dquot in %s.", __func__);
2620                 return XFS_ERROR(EIO);
2621         }
2622         if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2623                 cmn_err(CE_ALERT,
2624                         "XFS: dquot too small (%d) in %s.",
2625                         item->ri_buf[1].i_len, __func__);
2626                 return XFS_ERROR(EIO);
2627         }
2628
2629         /*
2630          * This type of quotas was turned off, so ignore this record.
2631          */
2632         type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2633         ASSERT(type);
2634         if (log->l_quotaoffs_flag & type)
2635                 return (0);
2636
2637         /*
2638          * At this point we know that quota was _not_ turned off.
2639          * Since the mount flags are not indicating to us otherwise, this
2640          * must mean that quota is on, and the dquot needs to be replayed.
2641          * Remember that we may not have fully recovered the superblock yet,
2642          * so we can't do the usual trick of looking at the SB quota bits.
2643          *
2644          * The other possibility, of course, is that the quota subsystem was
2645          * removed since the last mount - ENOSYS.
2646          */
2647         dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr;
2648         ASSERT(dq_f);
2649         if ((error = xfs_qm_dqcheck(recddq,
2650                            dq_f->qlf_id,
2651                            0, XFS_QMOPT_DOWARN,
2652                            "xlog_recover_do_dquot_trans (log copy)"))) {
2653                 return XFS_ERROR(EIO);
2654         }
2655         ASSERT(dq_f->qlf_len == 1);
2656
2657         error = xfs_read_buf(mp, mp->m_ddev_targp,
2658                              dq_f->qlf_blkno,
2659                              XFS_FSB_TO_BB(mp, dq_f->qlf_len),
2660                              0, &bp);
2661         if (error) {
2662                 xfs_ioerror_alert("xlog_recover_do..(read#3)", mp,
2663                                   bp, dq_f->qlf_blkno);
2664                 return error;
2665         }
2666         ASSERT(bp);
2667         ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2668
2669         /*
2670          * At least the magic num portion should be on disk because this
2671          * was among a chunk of dquots created earlier, and we did some
2672          * minimal initialization then.
2673          */
2674         if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2675                            "xlog_recover_do_dquot_trans")) {
2676                 xfs_buf_relse(bp);
2677                 return XFS_ERROR(EIO);
2678         }
2679
2680         memcpy(ddq, recddq, item->ri_buf[1].i_len);
2681
2682         ASSERT(dq_f->qlf_size == 2);
2683         ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
2684         bp->b_mount = mp;
2685         XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2686         xfs_bdwrite(mp, bp);
2687
2688         return (0);
2689 }
2690
2691 /*
2692  * This routine is called to create an in-core extent free intent
2693  * item from the efi format structure which was logged on disk.
2694  * It allocates an in-core efi, copies the extents from the format
2695  * structure into it, and adds the efi to the AIL with the given
2696  * LSN.
2697  */
2698 STATIC int
2699 xlog_recover_do_efi_trans(
2700         xlog_t                  *log,
2701         xlog_recover_item_t     *item,
2702         xfs_lsn_t               lsn,
2703         int                     pass)
2704 {
2705         int                     error;
2706         xfs_mount_t             *mp;
2707         xfs_efi_log_item_t      *efip;
2708         xfs_efi_log_format_t    *efi_formatp;
2709
2710         if (pass == XLOG_RECOVER_PASS1) {
2711                 return 0;
2712         }
2713
2714         efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr;
2715
2716         mp = log->l_mp;
2717         efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2718         if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2719                                          &(efip->efi_format)))) {
2720                 xfs_efi_item_free(efip);
2721                 return error;
2722         }
2723         efip->efi_next_extent = efi_formatp->efi_nextents;
2724         efip->efi_flags |= XFS_EFI_COMMITTED;
2725
2726         spin_lock(&log->l_ailp->xa_lock);
2727         /*
2728          * xfs_trans_ail_update() drops the AIL lock.
2729          */
2730         xfs_trans_ail_update(log->l_ailp, (xfs_log_item_t *)efip, lsn);
2731         return 0;
2732 }
2733
2734
2735 /*
2736  * This routine is called when an efd format structure is found in
2737  * a committed transaction in the log.  It's purpose is to cancel
2738  * the corresponding efi if it was still in the log.  To do this
2739  * it searches the AIL for the efi with an id equal to that in the
2740  * efd format structure.  If we find it, we remove the efi from the
2741  * AIL and free it.
2742  */
2743 STATIC void
2744 xlog_recover_do_efd_trans(
2745         xlog_t                  *log,
2746         xlog_recover_item_t     *item,
2747         int                     pass)
2748 {
2749         xfs_efd_log_format_t    *efd_formatp;
2750         xfs_efi_log_item_t      *efip = NULL;
2751         xfs_log_item_t          *lip;
2752         __uint64_t              efi_id;
2753         struct xfs_ail_cursor   cur;
2754         struct xfs_ail          *ailp = log->l_ailp;
2755
2756         if (pass == XLOG_RECOVER_PASS1) {
2757                 return;
2758         }
2759
2760         efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr;
2761         ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2762                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2763                (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2764                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
2765         efi_id = efd_formatp->efd_efi_id;
2766
2767         /*
2768          * Search for the efi with the id in the efd format structure
2769          * in the AIL.
2770          */
2771         spin_lock(&ailp->xa_lock);
2772         lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2773         while (lip != NULL) {
2774                 if (lip->li_type == XFS_LI_EFI) {
2775                         efip = (xfs_efi_log_item_t *)lip;
2776                         if (efip->efi_format.efi_id == efi_id) {
2777                                 /*
2778                                  * xfs_trans_ail_delete() drops the
2779                                  * AIL lock.
2780                                  */
2781                                 xfs_trans_ail_delete(ailp, lip);
2782                                 xfs_efi_item_free(efip);
2783                                 spin_lock(&ailp->xa_lock);
2784                                 break;
2785                         }
2786                 }
2787                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
2788         }
2789         xfs_trans_ail_cursor_done(ailp, &cur);
2790         spin_unlock(&ailp->xa_lock);
2791 }
2792
2793 /*
2794  * Perform the transaction
2795  *
2796  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
2797  * EFIs and EFDs get queued up by adding entries into the AIL for them.
2798  */
2799 STATIC int
2800 xlog_recover_do_trans(
2801         xlog_t                  *log,
2802         xlog_recover_t          *trans,
2803         int                     pass)
2804 {
2805         int                     error = 0;
2806         xlog_recover_item_t     *item;
2807
2808         error = xlog_recover_reorder_trans(log, trans, pass);
2809         if (error)
2810                 return error;
2811
2812         list_for_each_entry(item, &trans->r_itemq, ri_list) {
2813                 trace_xfs_log_recover_item_recover(log, trans, item, pass);
2814                 switch (ITEM_TYPE(item)) {
2815                 case XFS_LI_BUF:
2816                         error = xlog_recover_do_buffer_trans(log, item, pass);
2817                         break;
2818                 case XFS_LI_INODE:
2819                         error = xlog_recover_do_inode_trans(log, item, pass);
2820                         break;
2821                 case XFS_LI_EFI:
2822                         error = xlog_recover_do_efi_trans(log, item,
2823                                                           trans->r_lsn, pass);
2824                         break;
2825                 case XFS_LI_EFD:
2826                         xlog_recover_do_efd_trans(log, item, pass);
2827                         error = 0;
2828                         break;
2829                 case XFS_LI_DQUOT:
2830                         error = xlog_recover_do_dquot_trans(log, item, pass);
2831                         break;
2832                 case XFS_LI_QUOTAOFF:
2833                         error = xlog_recover_do_quotaoff_trans(log, item,
2834                                                                pass);
2835                         break;
2836                 default:
2837                         xlog_warn(
2838         "XFS: invalid item type (%d) xlog_recover_do_trans", ITEM_TYPE(item));
2839                         ASSERT(0);
2840                         error = XFS_ERROR(EIO);
2841                         break;
2842                 }
2843
2844                 if (error)
2845                         return error;
2846         }
2847
2848         return 0;
2849 }
2850
2851 /*
2852  * Free up any resources allocated by the transaction
2853  *
2854  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2855  */
2856 STATIC void
2857 xlog_recover_free_trans(
2858         xlog_recover_t          *trans)
2859 {
2860         xlog_recover_item_t     *item, *n;
2861         int                     i;
2862
2863         list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2864                 /* Free the regions in the item. */
2865                 list_del(&item->ri_list);
2866                 for (i = 0; i < item->ri_cnt; i++)
2867                         kmem_free(item->ri_buf[i].i_addr);
2868                 /* Free the item itself */
2869                 kmem_free(item->ri_buf);
2870                 kmem_free(item);
2871         }
2872         /* Free the transaction recover structure */
2873         kmem_free(trans);
2874 }
2875
2876 STATIC int
2877 xlog_recover_commit_trans(
2878         xlog_t                  *log,
2879         xlog_recover_t          *trans,
2880         int                     pass)
2881 {
2882         int                     error;
2883
2884         hlist_del(&trans->r_list);
2885         if ((error = xlog_recover_do_trans(log, trans, pass)))
2886                 return error;
2887         xlog_recover_free_trans(trans);                 /* no error */
2888         return 0;
2889 }
2890
2891 STATIC int
2892 xlog_recover_unmount_trans(
2893         xlog_recover_t          *trans)
2894 {
2895         /* Do nothing now */
2896         xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR");
2897         return 0;
2898 }
2899
2900 /*
2901  * There are two valid states of the r_state field.  0 indicates that the
2902  * transaction structure is in a normal state.  We have either seen the
2903  * start of the transaction or the last operation we added was not a partial
2904  * operation.  If the last operation we added to the transaction was a
2905  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2906  *
2907  * NOTE: skip LRs with 0 data length.
2908  */
2909 STATIC int
2910 xlog_recover_process_data(
2911         xlog_t                  *log,
2912         struct hlist_head       rhash[],
2913         xlog_rec_header_t       *rhead,
2914         xfs_caddr_t             dp,
2915         int                     pass)
2916 {
2917         xfs_caddr_t             lp;
2918         int                     num_logops;
2919         xlog_op_header_t        *ohead;
2920         xlog_recover_t          *trans;
2921         xlog_tid_t              tid;
2922         int                     error;
2923         unsigned long           hash;
2924         uint                    flags;
2925
2926         lp = dp + be32_to_cpu(rhead->h_len);
2927         num_logops = be32_to_cpu(rhead->h_num_logops);
2928
2929         /* check the log format matches our own - else we can't recover */
2930         if (xlog_header_check_recover(log->l_mp, rhead))
2931                 return (XFS_ERROR(EIO));
2932
2933         while ((dp < lp) && num_logops) {
2934                 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
2935                 ohead = (xlog_op_header_t *)dp;
2936                 dp += sizeof(xlog_op_header_t);
2937                 if (ohead->oh_clientid != XFS_TRANSACTION &&
2938                     ohead->oh_clientid != XFS_LOG) {
2939                         xlog_warn(
2940                 "XFS: xlog_recover_process_data: bad clientid");
2941                         ASSERT(0);
2942                         return (XFS_ERROR(EIO));
2943                 }
2944                 tid = be32_to_cpu(ohead->oh_tid);
2945                 hash = XLOG_RHASH(tid);
2946                 trans = xlog_recover_find_tid(&rhash[hash], tid);
2947                 if (trans == NULL) {               /* not found; add new tid */
2948                         if (ohead->oh_flags & XLOG_START_TRANS)
2949                                 xlog_recover_new_tid(&rhash[hash], tid,
2950                                         be64_to_cpu(rhead->h_lsn));
2951                 } else {
2952                         if (dp + be32_to_cpu(ohead->oh_len) > lp) {
2953                                 xlog_warn(
2954                         "XFS: xlog_recover_process_data: bad length");
2955                                 WARN_ON(1);
2956                                 return (XFS_ERROR(EIO));
2957                         }
2958                         flags = ohead->oh_flags & ~XLOG_END_TRANS;
2959                         if (flags & XLOG_WAS_CONT_TRANS)
2960                                 flags &= ~XLOG_CONTINUE_TRANS;
2961                         switch (flags) {
2962                         case XLOG_COMMIT_TRANS:
2963                                 error = xlog_recover_commit_trans(log,
2964                                                                 trans, pass);
2965                                 break;
2966                         case XLOG_UNMOUNT_TRANS:
2967                                 error = xlog_recover_unmount_trans(trans);
2968                                 break;
2969                         case XLOG_WAS_CONT_TRANS:
2970                                 error = xlog_recover_add_to_cont_trans(log,
2971                                                 trans, dp,
2972                                                 be32_to_cpu(ohead->oh_len));
2973                                 break;
2974                         case XLOG_START_TRANS:
2975                                 xlog_warn(
2976                         "XFS: xlog_recover_process_data: bad transaction");
2977                                 ASSERT(0);
2978                                 error = XFS_ERROR(EIO);
2979                                 break;
2980                         case 0:
2981                         case XLOG_CONTINUE_TRANS:
2982                                 error = xlog_recover_add_to_trans(log, trans,
2983                                                 dp, be32_to_cpu(ohead->oh_len));
2984                                 break;
2985                         default:
2986                                 xlog_warn(
2987                         "XFS: xlog_recover_process_data: bad flag");
2988                                 ASSERT(0);
2989                                 error = XFS_ERROR(EIO);
2990                                 break;
2991                         }
2992                         if (error)
2993                                 return error;
2994                 }
2995                 dp += be32_to_cpu(ohead->oh_len);
2996                 num_logops--;
2997         }
2998         return 0;
2999 }
3000
3001 /*
3002  * Process an extent free intent item that was recovered from
3003  * the log.  We need to free the extents that it describes.
3004  */
3005 STATIC int
3006 xlog_recover_process_efi(
3007         xfs_mount_t             *mp,
3008         xfs_efi_log_item_t      *efip)
3009 {
3010         xfs_efd_log_item_t      *efdp;
3011         xfs_trans_t             *tp;
3012         int                     i;
3013         int                     error = 0;
3014         xfs_extent_t            *extp;
3015         xfs_fsblock_t           startblock_fsb;
3016
3017         ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
3018
3019         /*
3020          * First check the validity of the extents described by the
3021          * EFI.  If any are bad, then assume that all are bad and
3022          * just toss the EFI.
3023          */
3024         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3025                 extp = &(efip->efi_format.efi_extents[i]);
3026                 startblock_fsb = XFS_BB_TO_FSB(mp,
3027                                    XFS_FSB_TO_DADDR(mp, extp->ext_start));
3028                 if ((startblock_fsb == 0) ||
3029                     (extp->ext_len == 0) ||
3030                     (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3031                     (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3032                         /*
3033                          * This will pull the EFI from the AIL and
3034                          * free the memory associated with it.
3035                          */
3036                         xfs_efi_release(efip, efip->efi_format.efi_nextents);
3037                         return XFS_ERROR(EIO);
3038                 }
3039         }
3040
3041         tp = xfs_trans_alloc(mp, 0);
3042         error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
3043         if (error)
3044                 goto abort_error;
3045         efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3046
3047         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3048                 extp = &(efip->efi_format.efi_extents[i]);
3049                 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3050                 if (error)
3051                         goto abort_error;
3052                 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3053                                          extp->ext_len);
3054         }
3055
3056         efip->efi_flags |= XFS_EFI_RECOVERED;
3057         error = xfs_trans_commit(tp, 0);
3058         return error;
3059
3060 abort_error:
3061         xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3062         return error;
3063 }
3064
3065 /*
3066  * When this is called, all of the EFIs which did not have
3067  * corresponding EFDs should be in the AIL.  What we do now
3068  * is free the extents associated with each one.
3069  *
3070  * Since we process the EFIs in normal transactions, they
3071  * will be removed at some point after the commit.  This prevents
3072  * us from just walking down the list processing each one.
3073  * We'll use a flag in the EFI to skip those that we've already
3074  * processed and use the AIL iteration mechanism's generation
3075  * count to try to speed this up at least a bit.
3076  *
3077  * When we start, we know that the EFIs are the only things in
3078  * the AIL.  As we process them, however, other items are added
3079  * to the AIL.  Since everything added to the AIL must come after
3080  * everything already in the AIL, we stop processing as soon as
3081  * we see something other than an EFI in the AIL.
3082  */
3083 STATIC int
3084 xlog_recover_process_efis(
3085         xlog_t                  *log)
3086 {
3087         xfs_log_item_t          *lip;
3088         xfs_efi_log_item_t      *efip;
3089         int                     error = 0;
3090         struct xfs_ail_cursor   cur;
3091         struct xfs_ail          *ailp;
3092
3093         ailp = log->l_ailp;
3094         spin_lock(&ailp->xa_lock);
3095         lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3096         while (lip != NULL) {
3097                 /*
3098                  * We're done when we see something other than an EFI.
3099                  * There should be no EFIs left in the AIL now.
3100                  */
3101                 if (lip->li_type != XFS_LI_EFI) {
3102 #ifdef DEBUG
3103                         for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3104                                 ASSERT(lip->li_type != XFS_LI_EFI);
3105 #endif
3106                         break;
3107                 }
3108
3109                 /*
3110                  * Skip EFIs that we've already processed.
3111                  */
3112                 efip = (xfs_efi_log_item_t *)lip;
3113                 if (efip->efi_flags & XFS_EFI_RECOVERED) {
3114                         lip = xfs_trans_ail_cursor_next(ailp, &cur);
3115                         continue;
3116                 }
3117
3118                 spin_unlock(&ailp->xa_lock);
3119                 error = xlog_recover_process_efi(log->l_mp, efip);
3120                 spin_lock(&ailp->xa_lock);
3121                 if (error)
3122                         goto out;
3123                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3124         }
3125 out:
3126         xfs_trans_ail_cursor_done(ailp, &cur);
3127         spin_unlock(&ailp->xa_lock);
3128         return error;
3129 }
3130
3131 /*
3132  * This routine performs a transaction to null out a bad inode pointer
3133  * in an agi unlinked inode hash bucket.
3134  */
3135 STATIC void
3136 xlog_recover_clear_agi_bucket(
3137         xfs_mount_t     *mp,
3138         xfs_agnumber_t  agno,
3139         int             bucket)
3140 {
3141         xfs_trans_t     *tp;
3142         xfs_agi_t       *agi;
3143         xfs_buf_t       *agibp;
3144         int             offset;
3145         int             error;
3146
3147         tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3148         error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
3149                                   0, 0, 0);
3150         if (error)
3151                 goto out_abort;
3152
3153         error = xfs_read_agi(mp, tp, agno, &agibp);
3154         if (error)
3155                 goto out_abort;
3156
3157         agi = XFS_BUF_TO_AGI(agibp);
3158         agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3159         offset = offsetof(xfs_agi_t, agi_unlinked) +
3160                  (sizeof(xfs_agino_t) * bucket);
3161         xfs_trans_log_buf(tp, agibp, offset,
3162                           (offset + sizeof(xfs_agino_t) - 1));
3163
3164         error = xfs_trans_commit(tp, 0);
3165         if (error)
3166                 goto out_error;
3167         return;
3168
3169 out_abort:
3170         xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3171 out_error:
3172         xfs_fs_cmn_err(CE_WARN, mp, "xlog_recover_clear_agi_bucket: "
3173                         "failed to clear agi %d. Continuing.", agno);
3174         return;
3175 }
3176
3177 STATIC xfs_agino_t
3178 xlog_recover_process_one_iunlink(
3179         struct xfs_mount                *mp,
3180         xfs_agnumber_t                  agno,
3181         xfs_agino_t                     agino,
3182         int                             bucket)
3183 {
3184         struct xfs_buf                  *ibp;
3185         struct xfs_dinode               *dip;
3186         struct xfs_inode                *ip;
3187         xfs_ino_t                       ino;
3188         int                             error;
3189
3190         ino = XFS_AGINO_TO_INO(mp, agno, agino);
3191         error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0);
3192         if (error)
3193                 goto fail;
3194
3195         /*
3196          * Get the on disk inode to find the next inode in the bucket.
3197          */
3198         error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XBF_LOCK);
3199         if (error)
3200                 goto fail_iput;
3201
3202         ASSERT(ip->i_d.di_nlink == 0);
3203         ASSERT(ip->i_d.di_mode != 0);
3204
3205         /* setup for the next pass */
3206         agino = be32_to_cpu(dip->di_next_unlinked);
3207         xfs_buf_relse(ibp);
3208
3209         /*
3210          * Prevent any DMAPI event from being sent when the reference on
3211          * the inode is dropped.
3212          */
3213         ip->i_d.di_dmevmask = 0;
3214
3215         IRELE(ip);
3216         return agino;
3217
3218  fail_iput:
3219         IRELE(ip);
3220  fail:
3221         /*
3222          * We can't read in the inode this bucket points to, or this inode
3223          * is messed up.  Just ditch this bucket of inodes.  We will lose
3224          * some inodes and space, but at least we won't hang.
3225          *
3226          * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3227          * clear the inode pointer in the bucket.
3228          */
3229         xlog_recover_clear_agi_bucket(mp, agno, bucket);
3230         return NULLAGINO;
3231 }
3232
3233 /*
3234  * xlog_iunlink_recover
3235  *
3236  * This is called during recovery to process any inodes which
3237  * we unlinked but not freed when the system crashed.  These
3238  * inodes will be on the lists in the AGI blocks.  What we do
3239  * here is scan all the AGIs and fully truncate and free any
3240  * inodes found on the lists.  Each inode is removed from the
3241  * lists when it has been fully truncated and is freed.  The
3242  * freeing of the inode and its removal from the list must be
3243  * atomic.
3244  */
3245 STATIC void
3246 xlog_recover_process_iunlinks(
3247         xlog_t          *log)
3248 {
3249         xfs_mount_t     *mp;
3250         xfs_agnumber_t  agno;
3251         xfs_agi_t       *agi;
3252         xfs_buf_t       *agibp;
3253         xfs_agino_t     agino;
3254         int             bucket;
3255         int             error;
3256         uint            mp_dmevmask;
3257
3258         mp = log->l_mp;
3259
3260         /*
3261          * Prevent any DMAPI event from being sent while in this function.
3262          */
3263         mp_dmevmask = mp->m_dmevmask;
3264         mp->m_dmevmask = 0;
3265
3266         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3267                 /*
3268                  * Find the agi for this ag.
3269                  */
3270                 error = xfs_read_agi(mp, NULL, agno, &agibp);
3271                 if (error) {
3272                         /*
3273                          * AGI is b0rked. Don't process it.
3274                          *
3275                          * We should probably mark the filesystem as corrupt
3276                          * after we've recovered all the ag's we can....
3277                          */
3278                         continue;
3279                 }
3280                 agi = XFS_BUF_TO_AGI(agibp);
3281
3282                 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3283                         agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3284                         while (agino != NULLAGINO) {
3285                                 /*
3286                                  * Release the agi buffer so that it can
3287                                  * be acquired in the normal course of the
3288                                  * transaction to truncate and free the inode.
3289                                  */
3290                                 xfs_buf_relse(agibp);
3291
3292                                 agino = xlog_recover_process_one_iunlink(mp,
3293                                                         agno, agino, bucket);
3294
3295                                 /*
3296                                  * Reacquire the agibuffer and continue around
3297                                  * the loop. This should never fail as we know
3298                                  * the buffer was good earlier on.
3299                                  */
3300                                 error = xfs_read_agi(mp, NULL, agno, &agibp);
3301                                 ASSERT(error == 0);
3302                                 agi = XFS_BUF_TO_AGI(agibp);
3303                         }
3304                 }
3305
3306                 /*
3307                  * Release the buffer for the current agi so we can
3308                  * go on to the next one.
3309                  */
3310                 xfs_buf_relse(agibp);
3311         }
3312
3313         mp->m_dmevmask = mp_dmevmask;
3314 }
3315
3316
3317 #ifdef DEBUG
3318 STATIC void
3319 xlog_pack_data_checksum(
3320         xlog_t          *log,
3321         xlog_in_core_t  *iclog,
3322         int             size)
3323 {
3324         int             i;
3325         __be32          *up;
3326         uint            chksum = 0;
3327
3328         up = (__be32 *)iclog->ic_datap;
3329         /* divide length by 4 to get # words */
3330         for (i = 0; i < (size >> 2); i++) {
3331                 chksum ^= be32_to_cpu(*up);
3332                 up++;
3333         }
3334         iclog->ic_header.h_chksum = cpu_to_be32(chksum);
3335 }
3336 #else
3337 #define xlog_pack_data_checksum(log, iclog, size)
3338 #endif
3339
3340 /*
3341  * Stamp cycle number in every block
3342  */
3343 void
3344 xlog_pack_data(
3345         xlog_t                  *log,
3346         xlog_in_core_t          *iclog,
3347         int                     roundoff)
3348 {
3349         int                     i, j, k;
3350         int                     size = iclog->ic_offset + roundoff;
3351         __be32                  cycle_lsn;
3352         xfs_caddr_t             dp;
3353
3354         xlog_pack_data_checksum(log, iclog, size);
3355
3356         cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
3357
3358         dp = iclog->ic_datap;
3359         for (i = 0; i < BTOBB(size) &&
3360                 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3361                 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
3362                 *(__be32 *)dp = cycle_lsn;
3363                 dp += BBSIZE;
3364         }
3365
3366         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3367                 xlog_in_core_2_t *xhdr = iclog->ic_data;
3368
3369                 for ( ; i < BTOBB(size); i++) {
3370                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3371                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3372                         xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
3373                         *(__be32 *)dp = cycle_lsn;
3374                         dp += BBSIZE;
3375                 }
3376
3377                 for (i = 1; i < log->l_iclog_heads; i++) {
3378                         xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
3379                 }
3380         }
3381 }
3382
3383 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
3384 STATIC void
3385 xlog_unpack_data_checksum(
3386         xlog_rec_header_t       *rhead,
3387         xfs_caddr_t             dp,
3388         xlog_t                  *log)
3389 {
3390         __be32                  *up = (__be32 *)dp;
3391         uint                    chksum = 0;
3392         int                     i;
3393
3394         /* divide length by 4 to get # words */
3395         for (i=0; i < be32_to_cpu(rhead->h_len) >> 2; i++) {
3396                 chksum ^= be32_to_cpu(*up);
3397                 up++;
3398         }
3399         if (chksum != be32_to_cpu(rhead->h_chksum)) {
3400             if (rhead->h_chksum ||
3401                 ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) {
3402                     cmn_err(CE_DEBUG,
3403                         "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n",
3404                             be32_to_cpu(rhead->h_chksum), chksum);
3405                     cmn_err(CE_DEBUG,
3406 "XFS: Disregard message if filesystem was created with non-DEBUG kernel");
3407                     if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3408                             cmn_err(CE_DEBUG,
3409                                 "XFS: LogR this is a LogV2 filesystem\n");
3410                     }
3411                     log->l_flags |= XLOG_CHKSUM_MISMATCH;
3412             }
3413         }
3414 }
3415 #else
3416 #define xlog_unpack_data_checksum(rhead, dp, log)
3417 #endif
3418
3419 STATIC void
3420 xlog_unpack_data(
3421         xlog_rec_header_t       *rhead,
3422         xfs_caddr_t             dp,
3423         xlog_t                  *log)
3424 {
3425         int                     i, j, k;
3426
3427         for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3428                   i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3429                 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3430                 dp += BBSIZE;
3431         }
3432
3433         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3434                 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
3435                 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3436                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3437                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3438                         *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3439                         dp += BBSIZE;
3440                 }
3441         }
3442
3443         xlog_unpack_data_checksum(rhead, dp, log);
3444 }
3445
3446 STATIC int
3447 xlog_valid_rec_header(
3448         xlog_t                  *log,
3449         xlog_rec_header_t       *rhead,
3450         xfs_daddr_t             blkno)
3451 {
3452         int                     hlen;
3453
3454         if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) {
3455                 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3456                                 XFS_ERRLEVEL_LOW, log->l_mp);
3457                 return XFS_ERROR(EFSCORRUPTED);
3458         }
3459         if (unlikely(
3460             (!rhead->h_version ||
3461             (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3462                 xlog_warn("XFS: %s: unrecognised log version (%d).",
3463                         __func__, be32_to_cpu(rhead->h_version));
3464                 return XFS_ERROR(EIO);
3465         }
3466
3467         /* LR body must have data or it wouldn't have been written */
3468         hlen = be32_to_cpu(rhead->h_len);
3469         if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3470                 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3471                                 XFS_ERRLEVEL_LOW, log->l_mp);
3472                 return XFS_ERROR(EFSCORRUPTED);
3473         }
3474         if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3475                 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3476                                 XFS_ERRLEVEL_LOW, log->l_mp);
3477                 return XFS_ERROR(EFSCORRUPTED);
3478         }
3479         return 0;
3480 }
3481
3482 /*
3483  * Read the log from tail to head and process the log records found.
3484  * Handle the two cases where the tail and head are in the same cycle
3485  * and where the active portion of the log wraps around the end of
3486  * the physical log separately.  The pass parameter is passed through
3487  * to the routines called to process the data and is not looked at
3488  * here.
3489  */
3490 STATIC int
3491 xlog_do_recovery_pass(
3492         xlog_t                  *log,
3493         xfs_daddr_t             head_blk,
3494         xfs_daddr_t             tail_blk,
3495         int                     pass)
3496 {
3497         xlog_rec_header_t       *rhead;
3498         xfs_daddr_t             blk_no;
3499         xfs_caddr_t             offset;
3500         xfs_buf_t               *hbp, *dbp;
3501         int                     error = 0, h_size;
3502         int                     bblks, split_bblks;
3503         int                     hblks, split_hblks, wrapped_hblks;
3504         struct hlist_head       rhash[XLOG_RHASH_SIZE];
3505
3506         ASSERT(head_blk != tail_blk);
3507
3508         /*
3509          * Read the header of the tail block and get the iclog buffer size from
3510          * h_size.  Use this to tell how many sectors make up the log header.
3511          */
3512         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3513                 /*
3514                  * When using variable length iclogs, read first sector of
3515                  * iclog header and extract the header size from it.  Get a
3516                  * new hbp that is the correct size.
3517                  */
3518                 hbp = xlog_get_bp(log, 1);
3519                 if (!hbp)
3520                         return ENOMEM;
3521
3522                 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3523                 if (error)
3524                         goto bread_err1;
3525
3526                 rhead = (xlog_rec_header_t *)offset;
3527                 error = xlog_valid_rec_header(log, rhead, tail_blk);
3528                 if (error)
3529                         goto bread_err1;
3530                 h_size = be32_to_cpu(rhead->h_size);
3531                 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3532                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3533                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3534                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
3535                                 hblks++;
3536                         xlog_put_bp(hbp);
3537                         hbp = xlog_get_bp(log, hblks);
3538                 } else {
3539                         hblks = 1;
3540                 }
3541         } else {
3542                 ASSERT(log->l_sectbb_log == 0);
3543                 hblks = 1;
3544                 hbp = xlog_get_bp(log, 1);
3545                 h_size = XLOG_BIG_RECORD_BSIZE;
3546         }
3547
3548         if (!hbp)
3549                 return ENOMEM;
3550         dbp = xlog_get_bp(log, BTOBB(h_size));
3551         if (!dbp) {
3552                 xlog_put_bp(hbp);
3553                 return ENOMEM;
3554         }
3555
3556         memset(rhash, 0, sizeof(rhash));
3557         if (tail_blk <= head_blk) {
3558                 for (blk_no = tail_blk; blk_no < head_blk; ) {
3559                         error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3560                         if (error)
3561                                 goto bread_err2;
3562
3563                         rhead = (xlog_rec_header_t *)offset;
3564                         error = xlog_valid_rec_header(log, rhead, blk_no);
3565                         if (error)
3566                                 goto bread_err2;
3567
3568                         /* blocks in data section */
3569                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3570                         error = xlog_bread(log, blk_no + hblks, bblks, dbp,
3571                                            &offset);
3572                         if (error)
3573                                 goto bread_err2;
3574
3575                         xlog_unpack_data(rhead, offset, log);
3576                         if ((error = xlog_recover_process_data(log,
3577                                                 rhash, rhead, offset, pass)))
3578                                 goto bread_err2;
3579                         blk_no += bblks + hblks;
3580                 }
3581         } else {
3582                 /*
3583                  * Perform recovery around the end of the physical log.
3584                  * When the head is not on the same cycle number as the tail,
3585                  * we can't do a sequential recovery as above.
3586                  */
3587                 blk_no = tail_blk;
3588                 while (blk_no < log->l_logBBsize) {
3589                         /*
3590                          * Check for header wrapping around physical end-of-log
3591                          */
3592                         offset = XFS_BUF_PTR(hbp);
3593                         split_hblks = 0;
3594                         wrapped_hblks = 0;
3595                         if (blk_no + hblks <= log->l_logBBsize) {
3596                                 /* Read header in one read */
3597                                 error = xlog_bread(log, blk_no, hblks, hbp,
3598                                                    &offset);
3599                                 if (error)
3600                                         goto bread_err2;
3601                         } else {
3602                                 /* This LR is split across physical log end */
3603                                 if (blk_no != log->l_logBBsize) {
3604                                         /* some data before physical log end */
3605                                         ASSERT(blk_no <= INT_MAX);
3606                                         split_hblks = log->l_logBBsize - (int)blk_no;
3607                                         ASSERT(split_hblks > 0);
3608                                         error = xlog_bread(log, blk_no,
3609                                                            split_hblks, hbp,
3610                                                            &offset);
3611                                         if (error)
3612                                                 goto bread_err2;
3613                                 }
3614
3615                                 /*
3616                                  * Note: this black magic still works with
3617                                  * large sector sizes (non-512) only because:
3618                                  * - we increased the buffer size originally
3619                                  *   by 1 sector giving us enough extra space
3620                                  *   for the second read;
3621                                  * - the log start is guaranteed to be sector
3622                                  *   aligned;
3623                                  * - we read the log end (LR header start)
3624                                  *   _first_, then the log start (LR header end)
3625                                  *   - order is important.
3626                                  */
3627                                 wrapped_hblks = hblks - split_hblks;
3628                                 error = XFS_BUF_SET_PTR(hbp,
3629                                                 offset + BBTOB(split_hblks),
3630                                                 BBTOB(hblks - split_hblks));
3631                                 if (error)
3632                                         goto bread_err2;
3633
3634                                 error = xlog_bread_noalign(log, 0,
3635                                                            wrapped_hblks, hbp);
3636                                 if (error)
3637                                         goto bread_err2;
3638
3639                                 error = XFS_BUF_SET_PTR(hbp, offset,
3640                                                         BBTOB(hblks));
3641                                 if (error)
3642                                         goto bread_err2;
3643                         }
3644                         rhead = (xlog_rec_header_t *)offset;
3645                         error = xlog_valid_rec_header(log, rhead,
3646                                                 split_hblks ? blk_no : 0);
3647                         if (error)
3648                                 goto bread_err2;
3649
3650                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3651                         blk_no += hblks;
3652
3653                         /* Read in data for log record */
3654                         if (blk_no + bblks <= log->l_logBBsize) {
3655                                 error = xlog_bread(log, blk_no, bblks, dbp,
3656                                                    &offset);
3657                                 if (error)
3658                                         goto bread_err2;
3659                         } else {
3660                                 /* This log record is split across the
3661                                  * physical end of log */
3662                                 offset = XFS_BUF_PTR(dbp);
3663                                 split_bblks = 0;
3664                                 if (blk_no != log->l_logBBsize) {
3665                                         /* some data is before the physical
3666                                          * end of log */
3667                                         ASSERT(!wrapped_hblks);
3668                                         ASSERT(blk_no <= INT_MAX);
3669                                         split_bblks =
3670                                                 log->l_logBBsize - (int)blk_no;
3671                                         ASSERT(split_bblks > 0);
3672                                         error = xlog_bread(log, blk_no,
3673                                                         split_bblks, dbp,
3674                                                         &offset);
3675                                         if (error)
3676                                                 goto bread_err2;
3677                                 }
3678
3679                                 /*
3680                                  * Note: this black magic still works with
3681                                  * large sector sizes (non-512) only because:
3682                                  * - we increased the buffer size originally
3683                                  *   by 1 sector giving us enough extra space
3684                                  *   for the second read;
3685                                  * - the log start is guaranteed to be sector
3686                                  *   aligned;
3687                                  * - we read the log end (LR header start)
3688                                  *   _first_, then the log start (LR header end)
3689                                  *   - order is important.
3690                                  */
3691                                 error = XFS_BUF_SET_PTR(dbp,
3692                                                 offset + BBTOB(split_bblks),
3693                                                 BBTOB(bblks - split_bblks));
3694                                 if (error)
3695                                         goto bread_err2;
3696
3697                                 error = xlog_bread_noalign(log, wrapped_hblks,
3698                                                 bblks - split_bblks,
3699                                                 dbp);
3700                                 if (error)
3701                                         goto bread_err2;
3702
3703                                 error = XFS_BUF_SET_PTR(dbp, offset, h_size);
3704                                 if (error)
3705                                         goto bread_err2;
3706                         }
3707                         xlog_unpack_data(rhead, offset, log);
3708                         if ((error = xlog_recover_process_data(log, rhash,
3709                                                         rhead, offset, pass)))
3710                                 goto bread_err2;
3711                         blk_no += bblks;
3712                 }
3713
3714                 ASSERT(blk_no >= log->l_logBBsize);
3715                 blk_no -= log->l_logBBsize;
3716
3717                 /* read first part of physical log */
3718                 while (blk_no < head_blk) {
3719                         error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3720                         if (error)
3721                                 goto bread_err2;
3722
3723                         rhead = (xlog_rec_header_t *)offset;
3724                         error = xlog_valid_rec_header(log, rhead, blk_no);
3725                         if (error)
3726                                 goto bread_err2;
3727
3728                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3729                         error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3730                                            &offset);
3731                         if (error)
3732                                 goto bread_err2;
3733
3734                         xlog_unpack_data(rhead, offset, log);
3735                         if ((error = xlog_recover_process_data(log, rhash,
3736                                                         rhead, offset, pass)))
3737                                 goto bread_err2;
3738                         blk_no += bblks + hblks;
3739                 }
3740         }
3741
3742  bread_err2:
3743         xlog_put_bp(dbp);
3744  bread_err1:
3745         xlog_put_bp(hbp);
3746         return error;
3747 }
3748
3749 /*
3750  * Do the recovery of the log.  We actually do this in two phases.
3751  * The two passes are necessary in order to implement the function
3752  * of cancelling a record written into the log.  The first pass
3753  * determines those things which have been cancelled, and the
3754  * second pass replays log items normally except for those which
3755  * have been cancelled.  The handling of the replay and cancellations
3756  * takes place in the log item type specific routines.
3757  *
3758  * The table of items which have cancel records in the log is allocated
3759  * and freed at this level, since only here do we know when all of
3760  * the log recovery has been completed.
3761  */
3762 STATIC int
3763 xlog_do_log_recovery(
3764         xlog_t          *log,
3765         xfs_daddr_t     head_blk,
3766         xfs_daddr_t     tail_blk)
3767 {
3768         int             error;
3769
3770         ASSERT(head_blk != tail_blk);
3771
3772         /*
3773          * First do a pass to find all of the cancelled buf log items.
3774          * Store them in the buf_cancel_table for use in the second pass.
3775          */
3776         log->l_buf_cancel_table =
3777                 (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
3778                                                  sizeof(xfs_buf_cancel_t*),
3779                                                  KM_SLEEP);
3780         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3781                                       XLOG_RECOVER_PASS1);
3782         if (error != 0) {
3783                 kmem_free(log->l_buf_cancel_table);
3784                 log->l_buf_cancel_table = NULL;
3785                 return error;
3786         }
3787         /*
3788          * Then do a second pass to actually recover the items in the log.
3789          * When it is complete free the table of buf cancel items.
3790          */
3791         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3792                                       XLOG_RECOVER_PASS2);
3793 #ifdef DEBUG
3794         if (!error) {
3795                 int     i;
3796
3797                 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3798                         ASSERT(log->l_buf_cancel_table[i] == NULL);
3799         }
3800 #endif  /* DEBUG */
3801
3802         kmem_free(log->l_buf_cancel_table);
3803         log->l_buf_cancel_table = NULL;
3804
3805         return error;
3806 }
3807
3808 /*
3809  * Do the actual recovery
3810  */
3811 STATIC int
3812 xlog_do_recover(
3813         xlog_t          *log,
3814         xfs_daddr_t     head_blk,
3815         xfs_daddr_t     tail_blk)
3816 {
3817         int             error;
3818         xfs_buf_t       *bp;
3819         xfs_sb_t        *sbp;
3820
3821         /*
3822          * First replay the images in the log.
3823          */
3824         error = xlog_do_log_recovery(log, head_blk, tail_blk);
3825         if (error) {
3826                 return error;
3827         }
3828
3829         XFS_bflush(log->l_mp->m_ddev_targp);
3830
3831         /*
3832          * If IO errors happened during recovery, bail out.
3833          */
3834         if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3835                 return (EIO);
3836         }
3837
3838         /*
3839          * We now update the tail_lsn since much of the recovery has completed
3840          * and there may be space available to use.  If there were no extent
3841          * or iunlinks, we can free up the entire log and set the tail_lsn to
3842          * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3843          * lsn of the last known good LR on disk.  If there are extent frees
3844          * or iunlinks they will have some entries in the AIL; so we look at
3845          * the AIL to determine how to set the tail_lsn.
3846          */
3847         xlog_assign_tail_lsn(log->l_mp);
3848
3849         /*
3850          * Now that we've finished replaying all buffer and inode
3851          * updates, re-read in the superblock.
3852          */
3853         bp = xfs_getsb(log->l_mp, 0);
3854         XFS_BUF_UNDONE(bp);
3855         ASSERT(!(XFS_BUF_ISWRITE(bp)));
3856         ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
3857         XFS_BUF_READ(bp);
3858         XFS_BUF_UNASYNC(bp);
3859         xfsbdstrat(log->l_mp, bp);
3860         error = xfs_iowait(bp);
3861         if (error) {
3862                 xfs_ioerror_alert("xlog_do_recover",
3863                                   log->l_mp, bp, XFS_BUF_ADDR(bp));
3864                 ASSERT(0);
3865                 xfs_buf_relse(bp);
3866                 return error;
3867         }
3868
3869         /* Convert superblock from on-disk format */
3870         sbp = &log->l_mp->m_sb;
3871         xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
3872         ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
3873         ASSERT(xfs_sb_good_version(sbp));
3874         xfs_buf_relse(bp);
3875
3876         /* We've re-read the superblock so re-initialize per-cpu counters */
3877         xfs_icsb_reinit_counters(log->l_mp);
3878
3879         xlog_recover_check_summary(log);
3880
3881         /* Normal transactions can now occur */
3882         log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3883         return 0;
3884 }
3885
3886 /*
3887  * Perform recovery and re-initialize some log variables in xlog_find_tail.
3888  *
3889  * Return error or zero.
3890  */
3891 int
3892 xlog_recover(
3893         xlog_t          *log)
3894 {
3895         xfs_daddr_t     head_blk, tail_blk;
3896         int             error;
3897
3898         /* find the tail of the log */
3899         if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
3900                 return error;
3901
3902         if (tail_blk != head_blk) {
3903                 /* There used to be a comment here:
3904                  *
3905                  * disallow recovery on read-only mounts.  note -- mount
3906                  * checks for ENOSPC and turns it into an intelligent
3907                  * error message.
3908                  * ...but this is no longer true.  Now, unless you specify
3909                  * NORECOVERY (in which case this function would never be
3910                  * called), we just go ahead and recover.  We do this all
3911                  * under the vfs layer, so we can get away with it unless
3912                  * the device itself is read-only, in which case we fail.
3913                  */
3914                 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3915                         return error;
3916                 }
3917
3918                 cmn_err(CE_NOTE,
3919                         "Starting XFS recovery on filesystem: %s (logdev: %s)",
3920                         log->l_mp->m_fsname, log->l_mp->m_logname ?
3921                         log->l_mp->m_logname : "internal");
3922
3923                 error = xlog_do_recover(log, head_blk, tail_blk);
3924                 log->l_flags |= XLOG_RECOVERY_NEEDED;
3925         }
3926         return error;
3927 }
3928
3929 /*
3930  * In the first part of recovery we replay inodes and buffers and build
3931  * up the list of extent free items which need to be processed.  Here
3932  * we process the extent free items and clean up the on disk unlinked
3933  * inode lists.  This is separated from the first part of recovery so
3934  * that the root and real-time bitmap inodes can be read in from disk in
3935  * between the two stages.  This is necessary so that we can free space
3936  * in the real-time portion of the file system.
3937  */
3938 int
3939 xlog_recover_finish(
3940         xlog_t          *log)
3941 {
3942         /*
3943          * Now we're ready to do the transactions needed for the
3944          * rest of recovery.  Start with completing all the extent
3945          * free intent records and then process the unlinked inode
3946          * lists.  At this point, we essentially run in normal mode
3947          * except that we're still performing recovery actions
3948          * rather than accepting new requests.
3949          */
3950         if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3951                 int     error;
3952                 error = xlog_recover_process_efis(log);
3953                 if (error) {
3954                         cmn_err(CE_ALERT,
3955                                 "Failed to recover EFIs on filesystem: %s",
3956                                 log->l_mp->m_fsname);
3957                         return error;
3958                 }
3959                 /*
3960                  * Sync the log to get all the EFIs out of the AIL.
3961                  * This isn't absolutely necessary, but it helps in
3962                  * case the unlink transactions would have problems
3963                  * pushing the EFIs out of the way.
3964                  */
3965                 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3966
3967                 xlog_recover_process_iunlinks(log);
3968
3969                 xlog_recover_check_summary(log);
3970
3971                 cmn_err(CE_NOTE,
3972                         "Ending XFS recovery on filesystem: %s (logdev: %s)",
3973                         log->l_mp->m_fsname, log->l_mp->m_logname ?
3974                         log->l_mp->m_logname : "internal");
3975                 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3976         } else {
3977                 cmn_err(CE_DEBUG,
3978                         "!Ending clean XFS mount for filesystem: %s\n",
3979                         log->l_mp->m_fsname);
3980         }
3981         return 0;
3982 }
3983
3984
3985 #if defined(DEBUG)
3986 /*
3987  * Read all of the agf and agi counters and check that they
3988  * are consistent with the superblock counters.
3989  */
3990 void
3991 xlog_recover_check_summary(
3992         xlog_t          *log)
3993 {
3994         xfs_mount_t     *mp;
3995         xfs_agf_t       *agfp;
3996         xfs_buf_t       *agfbp;
3997         xfs_buf_t       *agibp;
3998         xfs_buf_t       *sbbp;
3999 #ifdef XFS_LOUD_RECOVERY
4000         xfs_sb_t        *sbp;
4001 #endif
4002         xfs_agnumber_t  agno;
4003         __uint64_t      freeblks;
4004         __uint64_t      itotal;
4005         __uint64_t      ifree;
4006         int             error;
4007
4008         mp = log->l_mp;
4009
4010         freeblks = 0LL;
4011         itotal = 0LL;
4012         ifree = 0LL;
4013         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4014                 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
4015                 if (error) {
4016                         xfs_fs_cmn_err(CE_ALERT, mp,
4017                                         "xlog_recover_check_summary(agf)"
4018                                         "agf read failed agno %d error %d",
4019                                                         agno, error);
4020                 } else {
4021                         agfp = XFS_BUF_TO_AGF(agfbp);
4022                         freeblks += be32_to_cpu(agfp->agf_freeblks) +
4023                                     be32_to_cpu(agfp->agf_flcount);
4024                         xfs_buf_relse(agfbp);
4025                 }
4026
4027                 error = xfs_read_agi(mp, NULL, agno, &agibp);
4028                 if (!error) {
4029                         struct xfs_agi  *agi = XFS_BUF_TO_AGI(agibp);
4030
4031                         itotal += be32_to_cpu(agi->agi_count);
4032                         ifree += be32_to_cpu(agi->agi_freecount);
4033                         xfs_buf_relse(agibp);
4034                 }
4035         }
4036
4037         sbbp = xfs_getsb(mp, 0);
4038 #ifdef XFS_LOUD_RECOVERY
4039         sbp = &mp->m_sb;
4040         xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(sbbp));
4041         cmn_err(CE_NOTE,
4042                 "xlog_recover_check_summary: sb_icount %Lu itotal %Lu",
4043                 sbp->sb_icount, itotal);
4044         cmn_err(CE_NOTE,
4045                 "xlog_recover_check_summary: sb_ifree %Lu itotal %Lu",
4046                 sbp->sb_ifree, ifree);
4047         cmn_err(CE_NOTE,
4048                 "xlog_recover_check_summary: sb_fdblocks %Lu freeblks %Lu",
4049                 sbp->sb_fdblocks, freeblks);
4050 #if 0
4051         /*
4052          * This is turned off until I account for the allocation
4053          * btree blocks which live in free space.
4054          */
4055         ASSERT(sbp->sb_icount == itotal);
4056         ASSERT(sbp->sb_ifree == ifree);
4057         ASSERT(sbp->sb_fdblocks == freeblks);
4058 #endif
4059 #endif
4060         xfs_buf_relse(sbbp);
4061 }
4062 #endif /* DEBUG */