[XFS] clean up some xfs_log_priv.h macros
[safe/jmp/linux-2.6] / fs / xfs / xfs_log_recover.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_error.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_inode_item.h"
39 #include "xfs_imap.h"
40 #include "xfs_alloc.h"
41 #include "xfs_ialloc.h"
42 #include "xfs_log_priv.h"
43 #include "xfs_buf_item.h"
44 #include "xfs_log_recover.h"
45 #include "xfs_extfree_item.h"
46 #include "xfs_trans_priv.h"
47 #include "xfs_quota.h"
48 #include "xfs_rw.h"
49
50 STATIC int      xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
51 STATIC int      xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
52 STATIC void     xlog_recover_insert_item_backq(xlog_recover_item_t **q,
53                                                xlog_recover_item_t *item);
54 #if defined(DEBUG)
55 STATIC void     xlog_recover_check_summary(xlog_t *);
56 STATIC void     xlog_recover_check_ail(xfs_mount_t *, xfs_log_item_t *, int);
57 #else
58 #define xlog_recover_check_summary(log)
59 #define xlog_recover_check_ail(mp, lip, gen)
60 #endif
61
62
63 /*
64  * Sector aligned buffer routines for buffer create/read/write/access
65  */
66
67 #define XLOG_SECTOR_ROUNDUP_BBCOUNT(log, bbs)   \
68         ( ((log)->l_sectbb_mask && (bbs & (log)->l_sectbb_mask)) ? \
69         ((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) )
70 #define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno)   ((bno) & ~(log)->l_sectbb_mask)
71
72 xfs_buf_t *
73 xlog_get_bp(
74         xlog_t          *log,
75         int             num_bblks)
76 {
77         ASSERT(num_bblks > 0);
78
79         if (log->l_sectbb_log) {
80                 if (num_bblks > 1)
81                         num_bblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
82                 num_bblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, num_bblks);
83         }
84         return xfs_buf_get_noaddr(BBTOB(num_bblks), log->l_mp->m_logdev_targp);
85 }
86
87 void
88 xlog_put_bp(
89         xfs_buf_t       *bp)
90 {
91         xfs_buf_free(bp);
92 }
93
94
95 /*
96  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
97  */
98 int
99 xlog_bread(
100         xlog_t          *log,
101         xfs_daddr_t     blk_no,
102         int             nbblks,
103         xfs_buf_t       *bp)
104 {
105         int             error;
106
107         if (log->l_sectbb_log) {
108                 blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
109                 nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
110         }
111
112         ASSERT(nbblks > 0);
113         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
114         ASSERT(bp);
115
116         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
117         XFS_BUF_READ(bp);
118         XFS_BUF_BUSY(bp);
119         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
120         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
121
122         xfsbdstrat(log->l_mp, bp);
123         if ((error = xfs_iowait(bp)))
124                 xfs_ioerror_alert("xlog_bread", log->l_mp,
125                                   bp, XFS_BUF_ADDR(bp));
126         return error;
127 }
128
129 /*
130  * Write out the buffer at the given block for the given number of blocks.
131  * The buffer is kept locked across the write and is returned locked.
132  * This can only be used for synchronous log writes.
133  */
134 STATIC int
135 xlog_bwrite(
136         xlog_t          *log,
137         xfs_daddr_t     blk_no,
138         int             nbblks,
139         xfs_buf_t       *bp)
140 {
141         int             error;
142
143         if (log->l_sectbb_log) {
144                 blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
145                 nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
146         }
147
148         ASSERT(nbblks > 0);
149         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
150
151         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
152         XFS_BUF_ZEROFLAGS(bp);
153         XFS_BUF_BUSY(bp);
154         XFS_BUF_HOLD(bp);
155         XFS_BUF_PSEMA(bp, PRIBIO);
156         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
157         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
158
159         if ((error = xfs_bwrite(log->l_mp, bp)))
160                 xfs_ioerror_alert("xlog_bwrite", log->l_mp,
161                                   bp, XFS_BUF_ADDR(bp));
162         return error;
163 }
164
165 STATIC xfs_caddr_t
166 xlog_align(
167         xlog_t          *log,
168         xfs_daddr_t     blk_no,
169         int             nbblks,
170         xfs_buf_t       *bp)
171 {
172         xfs_caddr_t     ptr;
173
174         if (!log->l_sectbb_log)
175                 return XFS_BUF_PTR(bp);
176
177         ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask);
178         ASSERT(XFS_BUF_SIZE(bp) >=
179                 BBTOB(nbblks + (blk_no & log->l_sectbb_mask)));
180         return ptr;
181 }
182
183 #ifdef DEBUG
184 /*
185  * dump debug superblock and log record information
186  */
187 STATIC void
188 xlog_header_check_dump(
189         xfs_mount_t             *mp,
190         xlog_rec_header_t       *head)
191 {
192         int                     b;
193
194         cmn_err(CE_DEBUG, "%s:  SB : uuid = ", __FUNCTION__);
195         for (b = 0; b < 16; b++)
196                 cmn_err(CE_DEBUG, "%02x", ((uchar_t *)&mp->m_sb.sb_uuid)[b]);
197         cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT);
198         cmn_err(CE_DEBUG, "    log : uuid = ");
199         for (b = 0; b < 16; b++)
200                 cmn_err(CE_DEBUG, "%02x",((uchar_t *)&head->h_fs_uuid)[b]);
201         cmn_err(CE_DEBUG, ", fmt = %d\n", INT_GET(head->h_fmt, ARCH_CONVERT));
202 }
203 #else
204 #define xlog_header_check_dump(mp, head)
205 #endif
206
207 /*
208  * check log record header for recovery
209  */
210 STATIC int
211 xlog_header_check_recover(
212         xfs_mount_t             *mp,
213         xlog_rec_header_t       *head)
214 {
215         ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM);
216
217         /*
218          * IRIX doesn't write the h_fmt field and leaves it zeroed
219          * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
220          * a dirty log created in IRIX.
221          */
222         if (unlikely(INT_GET(head->h_fmt, ARCH_CONVERT) != XLOG_FMT)) {
223                 xlog_warn(
224         "XFS: dirty log written in incompatible format - can't recover");
225                 xlog_header_check_dump(mp, head);
226                 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
227                                  XFS_ERRLEVEL_HIGH, mp);
228                 return XFS_ERROR(EFSCORRUPTED);
229         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
230                 xlog_warn(
231         "XFS: dirty log entry has mismatched uuid - can't recover");
232                 xlog_header_check_dump(mp, head);
233                 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
234                                  XFS_ERRLEVEL_HIGH, mp);
235                 return XFS_ERROR(EFSCORRUPTED);
236         }
237         return 0;
238 }
239
240 /*
241  * read the head block of the log and check the header
242  */
243 STATIC int
244 xlog_header_check_mount(
245         xfs_mount_t             *mp,
246         xlog_rec_header_t       *head)
247 {
248         ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM);
249
250         if (uuid_is_nil(&head->h_fs_uuid)) {
251                 /*
252                  * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
253                  * h_fs_uuid is nil, we assume this log was last mounted
254                  * by IRIX and continue.
255                  */
256                 xlog_warn("XFS: nil uuid in log - IRIX style log");
257         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
258                 xlog_warn("XFS: log has mismatched uuid - can't recover");
259                 xlog_header_check_dump(mp, head);
260                 XFS_ERROR_REPORT("xlog_header_check_mount",
261                                  XFS_ERRLEVEL_HIGH, mp);
262                 return XFS_ERROR(EFSCORRUPTED);
263         }
264         return 0;
265 }
266
267 STATIC void
268 xlog_recover_iodone(
269         struct xfs_buf  *bp)
270 {
271         xfs_mount_t     *mp;
272
273         ASSERT(XFS_BUF_FSPRIVATE(bp, void *));
274
275         if (XFS_BUF_GETERROR(bp)) {
276                 /*
277                  * We're not going to bother about retrying
278                  * this during recovery. One strike!
279                  */
280                 mp = XFS_BUF_FSPRIVATE(bp, xfs_mount_t *);
281                 xfs_ioerror_alert("xlog_recover_iodone",
282                                   mp, bp, XFS_BUF_ADDR(bp));
283                 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
284         }
285         XFS_BUF_SET_FSPRIVATE(bp, NULL);
286         XFS_BUF_CLR_IODONE_FUNC(bp);
287         xfs_biodone(bp);
288 }
289
290 /*
291  * This routine finds (to an approximation) the first block in the physical
292  * log which contains the given cycle.  It uses a binary search algorithm.
293  * Note that the algorithm can not be perfect because the disk will not
294  * necessarily be perfect.
295  */
296 int
297 xlog_find_cycle_start(
298         xlog_t          *log,
299         xfs_buf_t       *bp,
300         xfs_daddr_t     first_blk,
301         xfs_daddr_t     *last_blk,
302         uint            cycle)
303 {
304         xfs_caddr_t     offset;
305         xfs_daddr_t     mid_blk;
306         uint            mid_cycle;
307         int             error;
308
309         mid_blk = BLK_AVG(first_blk, *last_blk);
310         while (mid_blk != first_blk && mid_blk != *last_blk) {
311                 if ((error = xlog_bread(log, mid_blk, 1, bp)))
312                         return error;
313                 offset = xlog_align(log, mid_blk, 1, bp);
314                 mid_cycle = xlog_get_cycle(offset);
315                 if (mid_cycle == cycle) {
316                         *last_blk = mid_blk;
317                         /* last_half_cycle == mid_cycle */
318                 } else {
319                         first_blk = mid_blk;
320                         /* first_half_cycle == mid_cycle */
321                 }
322                 mid_blk = BLK_AVG(first_blk, *last_blk);
323         }
324         ASSERT((mid_blk == first_blk && mid_blk+1 == *last_blk) ||
325                (mid_blk == *last_blk && mid_blk-1 == first_blk));
326
327         return 0;
328 }
329
330 /*
331  * Check that the range of blocks does not contain the cycle number
332  * given.  The scan needs to occur from front to back and the ptr into the
333  * region must be updated since a later routine will need to perform another
334  * test.  If the region is completely good, we end up returning the same
335  * last block number.
336  *
337  * Set blkno to -1 if we encounter no errors.  This is an invalid block number
338  * since we don't ever expect logs to get this large.
339  */
340 STATIC int
341 xlog_find_verify_cycle(
342         xlog_t          *log,
343         xfs_daddr_t     start_blk,
344         int             nbblks,
345         uint            stop_on_cycle_no,
346         xfs_daddr_t     *new_blk)
347 {
348         xfs_daddr_t     i, j;
349         uint            cycle;
350         xfs_buf_t       *bp;
351         xfs_daddr_t     bufblks;
352         xfs_caddr_t     buf = NULL;
353         int             error = 0;
354
355         bufblks = 1 << ffs(nbblks);
356
357         while (!(bp = xlog_get_bp(log, bufblks))) {
358                 /* can't get enough memory to do everything in one big buffer */
359                 bufblks >>= 1;
360                 if (bufblks <= log->l_sectbb_log)
361                         return ENOMEM;
362         }
363
364         for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
365                 int     bcount;
366
367                 bcount = min(bufblks, (start_blk + nbblks - i));
368
369                 if ((error = xlog_bread(log, i, bcount, bp)))
370                         goto out;
371
372                 buf = xlog_align(log, i, bcount, bp);
373                 for (j = 0; j < bcount; j++) {
374                         cycle = xlog_get_cycle(buf);
375                         if (cycle == stop_on_cycle_no) {
376                                 *new_blk = i+j;
377                                 goto out;
378                         }
379
380                         buf += BBSIZE;
381                 }
382         }
383
384         *new_blk = -1;
385
386 out:
387         xlog_put_bp(bp);
388         return error;
389 }
390
391 /*
392  * Potentially backup over partial log record write.
393  *
394  * In the typical case, last_blk is the number of the block directly after
395  * a good log record.  Therefore, we subtract one to get the block number
396  * of the last block in the given buffer.  extra_bblks contains the number
397  * of blocks we would have read on a previous read.  This happens when the
398  * last log record is split over the end of the physical log.
399  *
400  * extra_bblks is the number of blocks potentially verified on a previous
401  * call to this routine.
402  */
403 STATIC int
404 xlog_find_verify_log_record(
405         xlog_t                  *log,
406         xfs_daddr_t             start_blk,
407         xfs_daddr_t             *last_blk,
408         int                     extra_bblks)
409 {
410         xfs_daddr_t             i;
411         xfs_buf_t               *bp;
412         xfs_caddr_t             offset = NULL;
413         xlog_rec_header_t       *head = NULL;
414         int                     error = 0;
415         int                     smallmem = 0;
416         int                     num_blks = *last_blk - start_blk;
417         int                     xhdrs;
418
419         ASSERT(start_blk != 0 || *last_blk != start_blk);
420
421         if (!(bp = xlog_get_bp(log, num_blks))) {
422                 if (!(bp = xlog_get_bp(log, 1)))
423                         return ENOMEM;
424                 smallmem = 1;
425         } else {
426                 if ((error = xlog_bread(log, start_blk, num_blks, bp)))
427                         goto out;
428                 offset = xlog_align(log, start_blk, num_blks, bp);
429                 offset += ((num_blks - 1) << BBSHIFT);
430         }
431
432         for (i = (*last_blk) - 1; i >= 0; i--) {
433                 if (i < start_blk) {
434                         /* valid log record not found */
435                         xlog_warn(
436                 "XFS: Log inconsistent (didn't find previous header)");
437                         ASSERT(0);
438                         error = XFS_ERROR(EIO);
439                         goto out;
440                 }
441
442                 if (smallmem) {
443                         if ((error = xlog_bread(log, i, 1, bp)))
444                                 goto out;
445                         offset = xlog_align(log, i, 1, bp);
446                 }
447
448                 head = (xlog_rec_header_t *)offset;
449
450                 if (XLOG_HEADER_MAGIC_NUM ==
451                     INT_GET(head->h_magicno, ARCH_CONVERT))
452                         break;
453
454                 if (!smallmem)
455                         offset -= BBSIZE;
456         }
457
458         /*
459          * We hit the beginning of the physical log & still no header.  Return
460          * to caller.  If caller can handle a return of -1, then this routine
461          * will be called again for the end of the physical log.
462          */
463         if (i == -1) {
464                 error = -1;
465                 goto out;
466         }
467
468         /*
469          * We have the final block of the good log (the first block
470          * of the log record _before_ the head. So we check the uuid.
471          */
472         if ((error = xlog_header_check_mount(log->l_mp, head)))
473                 goto out;
474
475         /*
476          * We may have found a log record header before we expected one.
477          * last_blk will be the 1st block # with a given cycle #.  We may end
478          * up reading an entire log record.  In this case, we don't want to
479          * reset last_blk.  Only when last_blk points in the middle of a log
480          * record do we update last_blk.
481          */
482         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
483                 uint    h_size = INT_GET(head->h_size, ARCH_CONVERT);
484
485                 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
486                 if (h_size % XLOG_HEADER_CYCLE_SIZE)
487                         xhdrs++;
488         } else {
489                 xhdrs = 1;
490         }
491
492         if (*last_blk - i + extra_bblks
493                         != BTOBB(INT_GET(head->h_len, ARCH_CONVERT)) + xhdrs)
494                 *last_blk = i;
495
496 out:
497         xlog_put_bp(bp);
498         return error;
499 }
500
501 /*
502  * Head is defined to be the point of the log where the next log write
503  * write could go.  This means that incomplete LR writes at the end are
504  * eliminated when calculating the head.  We aren't guaranteed that previous
505  * LR have complete transactions.  We only know that a cycle number of
506  * current cycle number -1 won't be present in the log if we start writing
507  * from our current block number.
508  *
509  * last_blk contains the block number of the first block with a given
510  * cycle number.
511  *
512  * Return: zero if normal, non-zero if error.
513  */
514 STATIC int
515 xlog_find_head(
516         xlog_t          *log,
517         xfs_daddr_t     *return_head_blk)
518 {
519         xfs_buf_t       *bp;
520         xfs_caddr_t     offset;
521         xfs_daddr_t     new_blk, first_blk, start_blk, last_blk, head_blk;
522         int             num_scan_bblks;
523         uint            first_half_cycle, last_half_cycle;
524         uint            stop_on_cycle;
525         int             error, log_bbnum = log->l_logBBsize;
526
527         /* Is the end of the log device zeroed? */
528         if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
529                 *return_head_blk = first_blk;
530
531                 /* Is the whole lot zeroed? */
532                 if (!first_blk) {
533                         /* Linux XFS shouldn't generate totally zeroed logs -
534                          * mkfs etc write a dummy unmount record to a fresh
535                          * log so we can store the uuid in there
536                          */
537                         xlog_warn("XFS: totally zeroed log");
538                 }
539
540                 return 0;
541         } else if (error) {
542                 xlog_warn("XFS: empty log check failed");
543                 return error;
544         }
545
546         first_blk = 0;                  /* get cycle # of 1st block */
547         bp = xlog_get_bp(log, 1);
548         if (!bp)
549                 return ENOMEM;
550         if ((error = xlog_bread(log, 0, 1, bp)))
551                 goto bp_err;
552         offset = xlog_align(log, 0, 1, bp);
553         first_half_cycle = xlog_get_cycle(offset);
554
555         last_blk = head_blk = log_bbnum - 1;    /* get cycle # of last block */
556         if ((error = xlog_bread(log, last_blk, 1, bp)))
557                 goto bp_err;
558         offset = xlog_align(log, last_blk, 1, bp);
559         last_half_cycle = xlog_get_cycle(offset);
560         ASSERT(last_half_cycle != 0);
561
562         /*
563          * If the 1st half cycle number is equal to the last half cycle number,
564          * then the entire log is stamped with the same cycle number.  In this
565          * case, head_blk can't be set to zero (which makes sense).  The below
566          * math doesn't work out properly with head_blk equal to zero.  Instead,
567          * we set it to log_bbnum which is an invalid block number, but this
568          * value makes the math correct.  If head_blk doesn't changed through
569          * all the tests below, *head_blk is set to zero at the very end rather
570          * than log_bbnum.  In a sense, log_bbnum and zero are the same block
571          * in a circular file.
572          */
573         if (first_half_cycle == last_half_cycle) {
574                 /*
575                  * In this case we believe that the entire log should have
576                  * cycle number last_half_cycle.  We need to scan backwards
577                  * from the end verifying that there are no holes still
578                  * containing last_half_cycle - 1.  If we find such a hole,
579                  * then the start of that hole will be the new head.  The
580                  * simple case looks like
581                  *        x | x ... | x - 1 | x
582                  * Another case that fits this picture would be
583                  *        x | x + 1 | x ... | x
584                  * In this case the head really is somewhere at the end of the
585                  * log, as one of the latest writes at the beginning was
586                  * incomplete.
587                  * One more case is
588                  *        x | x + 1 | x ... | x - 1 | x
589                  * This is really the combination of the above two cases, and
590                  * the head has to end up at the start of the x-1 hole at the
591                  * end of the log.
592                  *
593                  * In the 256k log case, we will read from the beginning to the
594                  * end of the log and search for cycle numbers equal to x-1.
595                  * We don't worry about the x+1 blocks that we encounter,
596                  * because we know that they cannot be the head since the log
597                  * started with x.
598                  */
599                 head_blk = log_bbnum;
600                 stop_on_cycle = last_half_cycle - 1;
601         } else {
602                 /*
603                  * In this case we want to find the first block with cycle
604                  * number matching last_half_cycle.  We expect the log to be
605                  * some variation on
606                  *        x + 1 ... | x ...
607                  * The first block with cycle number x (last_half_cycle) will
608                  * be where the new head belongs.  First we do a binary search
609                  * for the first occurrence of last_half_cycle.  The binary
610                  * search may not be totally accurate, so then we scan back
611                  * from there looking for occurrences of last_half_cycle before
612                  * us.  If that backwards scan wraps around the beginning of
613                  * the log, then we look for occurrences of last_half_cycle - 1
614                  * at the end of the log.  The cases we're looking for look
615                  * like
616                  *        x + 1 ... | x | x + 1 | x ...
617                  *                               ^ binary search stopped here
618                  * or
619                  *        x + 1 ... | x ... | x - 1 | x
620                  *        <---------> less than scan distance
621                  */
622                 stop_on_cycle = last_half_cycle;
623                 if ((error = xlog_find_cycle_start(log, bp, first_blk,
624                                                 &head_blk, last_half_cycle)))
625                         goto bp_err;
626         }
627
628         /*
629          * Now validate the answer.  Scan back some number of maximum possible
630          * blocks and make sure each one has the expected cycle number.  The
631          * maximum is determined by the total possible amount of buffering
632          * in the in-core log.  The following number can be made tighter if
633          * we actually look at the block size of the filesystem.
634          */
635         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
636         if (head_blk >= num_scan_bblks) {
637                 /*
638                  * We are guaranteed that the entire check can be performed
639                  * in one buffer.
640                  */
641                 start_blk = head_blk - num_scan_bblks;
642                 if ((error = xlog_find_verify_cycle(log,
643                                                 start_blk, num_scan_bblks,
644                                                 stop_on_cycle, &new_blk)))
645                         goto bp_err;
646                 if (new_blk != -1)
647                         head_blk = new_blk;
648         } else {                /* need to read 2 parts of log */
649                 /*
650                  * We are going to scan backwards in the log in two parts.
651                  * First we scan the physical end of the log.  In this part
652                  * of the log, we are looking for blocks with cycle number
653                  * last_half_cycle - 1.
654                  * If we find one, then we know that the log starts there, as
655                  * we've found a hole that didn't get written in going around
656                  * the end of the physical log.  The simple case for this is
657                  *        x + 1 ... | x ... | x - 1 | x
658                  *        <---------> less than scan distance
659                  * If all of the blocks at the end of the log have cycle number
660                  * last_half_cycle, then we check the blocks at the start of
661                  * the log looking for occurrences of last_half_cycle.  If we
662                  * find one, then our current estimate for the location of the
663                  * first occurrence of last_half_cycle is wrong and we move
664                  * back to the hole we've found.  This case looks like
665                  *        x + 1 ... | x | x + 1 | x ...
666                  *                               ^ binary search stopped here
667                  * Another case we need to handle that only occurs in 256k
668                  * logs is
669                  *        x + 1 ... | x ... | x+1 | x ...
670                  *                   ^ binary search stops here
671                  * In a 256k log, the scan at the end of the log will see the
672                  * x + 1 blocks.  We need to skip past those since that is
673                  * certainly not the head of the log.  By searching for
674                  * last_half_cycle-1 we accomplish that.
675                  */
676                 start_blk = log_bbnum - num_scan_bblks + head_blk;
677                 ASSERT(head_blk <= INT_MAX &&
678                         (xfs_daddr_t) num_scan_bblks - head_blk >= 0);
679                 if ((error = xlog_find_verify_cycle(log, start_blk,
680                                         num_scan_bblks - (int)head_blk,
681                                         (stop_on_cycle - 1), &new_blk)))
682                         goto bp_err;
683                 if (new_blk != -1) {
684                         head_blk = new_blk;
685                         goto bad_blk;
686                 }
687
688                 /*
689                  * Scan beginning of log now.  The last part of the physical
690                  * log is good.  This scan needs to verify that it doesn't find
691                  * the last_half_cycle.
692                  */
693                 start_blk = 0;
694                 ASSERT(head_blk <= INT_MAX);
695                 if ((error = xlog_find_verify_cycle(log,
696                                         start_blk, (int)head_blk,
697                                         stop_on_cycle, &new_blk)))
698                         goto bp_err;
699                 if (new_blk != -1)
700                         head_blk = new_blk;
701         }
702
703  bad_blk:
704         /*
705          * Now we need to make sure head_blk is not pointing to a block in
706          * the middle of a log record.
707          */
708         num_scan_bblks = XLOG_REC_SHIFT(log);
709         if (head_blk >= num_scan_bblks) {
710                 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
711
712                 /* start ptr at last block ptr before head_blk */
713                 if ((error = xlog_find_verify_log_record(log, start_blk,
714                                                         &head_blk, 0)) == -1) {
715                         error = XFS_ERROR(EIO);
716                         goto bp_err;
717                 } else if (error)
718                         goto bp_err;
719         } else {
720                 start_blk = 0;
721                 ASSERT(head_blk <= INT_MAX);
722                 if ((error = xlog_find_verify_log_record(log, start_blk,
723                                                         &head_blk, 0)) == -1) {
724                         /* We hit the beginning of the log during our search */
725                         start_blk = log_bbnum - num_scan_bblks + head_blk;
726                         new_blk = log_bbnum;
727                         ASSERT(start_blk <= INT_MAX &&
728                                 (xfs_daddr_t) log_bbnum-start_blk >= 0);
729                         ASSERT(head_blk <= INT_MAX);
730                         if ((error = xlog_find_verify_log_record(log,
731                                                         start_blk, &new_blk,
732                                                         (int)head_blk)) == -1) {
733                                 error = XFS_ERROR(EIO);
734                                 goto bp_err;
735                         } else if (error)
736                                 goto bp_err;
737                         if (new_blk != log_bbnum)
738                                 head_blk = new_blk;
739                 } else if (error)
740                         goto bp_err;
741         }
742
743         xlog_put_bp(bp);
744         if (head_blk == log_bbnum)
745                 *return_head_blk = 0;
746         else
747                 *return_head_blk = head_blk;
748         /*
749          * When returning here, we have a good block number.  Bad block
750          * means that during a previous crash, we didn't have a clean break
751          * from cycle number N to cycle number N-1.  In this case, we need
752          * to find the first block with cycle number N-1.
753          */
754         return 0;
755
756  bp_err:
757         xlog_put_bp(bp);
758
759         if (error)
760             xlog_warn("XFS: failed to find log head");
761         return error;
762 }
763
764 /*
765  * Find the sync block number or the tail of the log.
766  *
767  * This will be the block number of the last record to have its
768  * associated buffers synced to disk.  Every log record header has
769  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
770  * to get a sync block number.  The only concern is to figure out which
771  * log record header to believe.
772  *
773  * The following algorithm uses the log record header with the largest
774  * lsn.  The entire log record does not need to be valid.  We only care
775  * that the header is valid.
776  *
777  * We could speed up search by using current head_blk buffer, but it is not
778  * available.
779  */
780 int
781 xlog_find_tail(
782         xlog_t                  *log,
783         xfs_daddr_t             *head_blk,
784         xfs_daddr_t             *tail_blk)
785 {
786         xlog_rec_header_t       *rhead;
787         xlog_op_header_t        *op_head;
788         xfs_caddr_t             offset = NULL;
789         xfs_buf_t               *bp;
790         int                     error, i, found;
791         xfs_daddr_t             umount_data_blk;
792         xfs_daddr_t             after_umount_blk;
793         xfs_lsn_t               tail_lsn;
794         int                     hblks;
795
796         found = 0;
797
798         /*
799          * Find previous log record
800          */
801         if ((error = xlog_find_head(log, head_blk)))
802                 return error;
803
804         bp = xlog_get_bp(log, 1);
805         if (!bp)
806                 return ENOMEM;
807         if (*head_blk == 0) {                           /* special case */
808                 if ((error = xlog_bread(log, 0, 1, bp)))
809                         goto bread_err;
810                 offset = xlog_align(log, 0, 1, bp);
811                 if (xlog_get_cycle(offset) == 0) {
812                         *tail_blk = 0;
813                         /* leave all other log inited values alone */
814                         goto exit;
815                 }
816         }
817
818         /*
819          * Search backwards looking for log record header block
820          */
821         ASSERT(*head_blk < INT_MAX);
822         for (i = (int)(*head_blk) - 1; i >= 0; i--) {
823                 if ((error = xlog_bread(log, i, 1, bp)))
824                         goto bread_err;
825                 offset = xlog_align(log, i, 1, bp);
826                 if (XLOG_HEADER_MAGIC_NUM ==
827                     INT_GET(*(uint *)offset, ARCH_CONVERT)) {
828                         found = 1;
829                         break;
830                 }
831         }
832         /*
833          * If we haven't found the log record header block, start looking
834          * again from the end of the physical log.  XXXmiken: There should be
835          * a check here to make sure we didn't search more than N blocks in
836          * the previous code.
837          */
838         if (!found) {
839                 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
840                         if ((error = xlog_bread(log, i, 1, bp)))
841                                 goto bread_err;
842                         offset = xlog_align(log, i, 1, bp);
843                         if (XLOG_HEADER_MAGIC_NUM ==
844                             INT_GET(*(uint*)offset, ARCH_CONVERT)) {
845                                 found = 2;
846                                 break;
847                         }
848                 }
849         }
850         if (!found) {
851                 xlog_warn("XFS: xlog_find_tail: couldn't find sync record");
852                 ASSERT(0);
853                 return XFS_ERROR(EIO);
854         }
855
856         /* find blk_no of tail of log */
857         rhead = (xlog_rec_header_t *)offset;
858         *tail_blk = BLOCK_LSN(INT_GET(rhead->h_tail_lsn, ARCH_CONVERT));
859
860         /*
861          * Reset log values according to the state of the log when we
862          * crashed.  In the case where head_blk == 0, we bump curr_cycle
863          * one because the next write starts a new cycle rather than
864          * continuing the cycle of the last good log record.  At this
865          * point we have guaranteed that all partial log records have been
866          * accounted for.  Therefore, we know that the last good log record
867          * written was complete and ended exactly on the end boundary
868          * of the physical log.
869          */
870         log->l_prev_block = i;
871         log->l_curr_block = (int)*head_blk;
872         log->l_curr_cycle = INT_GET(rhead->h_cycle, ARCH_CONVERT);
873         if (found == 2)
874                 log->l_curr_cycle++;
875         log->l_tail_lsn = INT_GET(rhead->h_tail_lsn, ARCH_CONVERT);
876         log->l_last_sync_lsn = INT_GET(rhead->h_lsn, ARCH_CONVERT);
877         log->l_grant_reserve_cycle = log->l_curr_cycle;
878         log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
879         log->l_grant_write_cycle = log->l_curr_cycle;
880         log->l_grant_write_bytes = BBTOB(log->l_curr_block);
881
882         /*
883          * Look for unmount record.  If we find it, then we know there
884          * was a clean unmount.  Since 'i' could be the last block in
885          * the physical log, we convert to a log block before comparing
886          * to the head_blk.
887          *
888          * Save the current tail lsn to use to pass to
889          * xlog_clear_stale_blocks() below.  We won't want to clear the
890          * unmount record if there is one, so we pass the lsn of the
891          * unmount record rather than the block after it.
892          */
893         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
894                 int     h_size = INT_GET(rhead->h_size, ARCH_CONVERT);
895                 int     h_version = INT_GET(rhead->h_version, ARCH_CONVERT);
896
897                 if ((h_version & XLOG_VERSION_2) &&
898                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
899                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
900                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
901                                 hblks++;
902                 } else {
903                         hblks = 1;
904                 }
905         } else {
906                 hblks = 1;
907         }
908         after_umount_blk = (i + hblks + (int)
909                 BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT))) % log->l_logBBsize;
910         tail_lsn = log->l_tail_lsn;
911         if (*head_blk == after_umount_blk &&
912             INT_GET(rhead->h_num_logops, ARCH_CONVERT) == 1) {
913                 umount_data_blk = (i + hblks) % log->l_logBBsize;
914                 if ((error = xlog_bread(log, umount_data_blk, 1, bp))) {
915                         goto bread_err;
916                 }
917                 offset = xlog_align(log, umount_data_blk, 1, bp);
918                 op_head = (xlog_op_header_t *)offset;
919                 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
920                         /*
921                          * Set tail and last sync so that newly written
922                          * log records will point recovery to after the
923                          * current unmount record.
924                          */
925                         log->l_tail_lsn =
926                                 xlog_assign_lsn(log->l_curr_cycle,
927                                                 after_umount_blk);
928                         log->l_last_sync_lsn =
929                                 xlog_assign_lsn(log->l_curr_cycle,
930                                                 after_umount_blk);
931                         *tail_blk = after_umount_blk;
932
933                         /*
934                          * Note that the unmount was clean. If the unmount
935                          * was not clean, we need to know this to rebuild the
936                          * superblock counters from the perag headers if we
937                          * have a filesystem using non-persistent counters.
938                          */
939                         log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
940                 }
941         }
942
943         /*
944          * Make sure that there are no blocks in front of the head
945          * with the same cycle number as the head.  This can happen
946          * because we allow multiple outstanding log writes concurrently,
947          * and the later writes might make it out before earlier ones.
948          *
949          * We use the lsn from before modifying it so that we'll never
950          * overwrite the unmount record after a clean unmount.
951          *
952          * Do this only if we are going to recover the filesystem
953          *
954          * NOTE: This used to say "if (!readonly)"
955          * However on Linux, we can & do recover a read-only filesystem.
956          * We only skip recovery if NORECOVERY is specified on mount,
957          * in which case we would not be here.
958          *
959          * But... if the -device- itself is readonly, just skip this.
960          * We can't recover this device anyway, so it won't matter.
961          */
962         if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) {
963                 error = xlog_clear_stale_blocks(log, tail_lsn);
964         }
965
966 bread_err:
967 exit:
968         xlog_put_bp(bp);
969
970         if (error)
971                 xlog_warn("XFS: failed to locate log tail");
972         return error;
973 }
974
975 /*
976  * Is the log zeroed at all?
977  *
978  * The last binary search should be changed to perform an X block read
979  * once X becomes small enough.  You can then search linearly through
980  * the X blocks.  This will cut down on the number of reads we need to do.
981  *
982  * If the log is partially zeroed, this routine will pass back the blkno
983  * of the first block with cycle number 0.  It won't have a complete LR
984  * preceding it.
985  *
986  * Return:
987  *      0  => the log is completely written to
988  *      -1 => use *blk_no as the first block of the log
989  *      >0 => error has occurred
990  */
991 int
992 xlog_find_zeroed(
993         xlog_t          *log,
994         xfs_daddr_t     *blk_no)
995 {
996         xfs_buf_t       *bp;
997         xfs_caddr_t     offset;
998         uint            first_cycle, last_cycle;
999         xfs_daddr_t     new_blk, last_blk, start_blk;
1000         xfs_daddr_t     num_scan_bblks;
1001         int             error, log_bbnum = log->l_logBBsize;
1002
1003         *blk_no = 0;
1004
1005         /* check totally zeroed log */
1006         bp = xlog_get_bp(log, 1);
1007         if (!bp)
1008                 return ENOMEM;
1009         if ((error = xlog_bread(log, 0, 1, bp)))
1010                 goto bp_err;
1011         offset = xlog_align(log, 0, 1, bp);
1012         first_cycle = xlog_get_cycle(offset);
1013         if (first_cycle == 0) {         /* completely zeroed log */
1014                 *blk_no = 0;
1015                 xlog_put_bp(bp);
1016                 return -1;
1017         }
1018
1019         /* check partially zeroed log */
1020         if ((error = xlog_bread(log, log_bbnum-1, 1, bp)))
1021                 goto bp_err;
1022         offset = xlog_align(log, log_bbnum-1, 1, bp);
1023         last_cycle = xlog_get_cycle(offset);
1024         if (last_cycle != 0) {          /* log completely written to */
1025                 xlog_put_bp(bp);
1026                 return 0;
1027         } else if (first_cycle != 1) {
1028                 /*
1029                  * If the cycle of the last block is zero, the cycle of
1030                  * the first block must be 1. If it's not, maybe we're
1031                  * not looking at a log... Bail out.
1032                  */
1033                 xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)");
1034                 return XFS_ERROR(EINVAL);
1035         }
1036
1037         /* we have a partially zeroed log */
1038         last_blk = log_bbnum-1;
1039         if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1040                 goto bp_err;
1041
1042         /*
1043          * Validate the answer.  Because there is no way to guarantee that
1044          * the entire log is made up of log records which are the same size,
1045          * we scan over the defined maximum blocks.  At this point, the maximum
1046          * is not chosen to mean anything special.   XXXmiken
1047          */
1048         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1049         ASSERT(num_scan_bblks <= INT_MAX);
1050
1051         if (last_blk < num_scan_bblks)
1052                 num_scan_bblks = last_blk;
1053         start_blk = last_blk - num_scan_bblks;
1054
1055         /*
1056          * We search for any instances of cycle number 0 that occur before
1057          * our current estimate of the head.  What we're trying to detect is
1058          *        1 ... | 0 | 1 | 0...
1059          *                       ^ binary search ends here
1060          */
1061         if ((error = xlog_find_verify_cycle(log, start_blk,
1062                                          (int)num_scan_bblks, 0, &new_blk)))
1063                 goto bp_err;
1064         if (new_blk != -1)
1065                 last_blk = new_blk;
1066
1067         /*
1068          * Potentially backup over partial log record write.  We don't need
1069          * to search the end of the log because we know it is zero.
1070          */
1071         if ((error = xlog_find_verify_log_record(log, start_blk,
1072                                 &last_blk, 0)) == -1) {
1073             error = XFS_ERROR(EIO);
1074             goto bp_err;
1075         } else if (error)
1076             goto bp_err;
1077
1078         *blk_no = last_blk;
1079 bp_err:
1080         xlog_put_bp(bp);
1081         if (error)
1082                 return error;
1083         return -1;
1084 }
1085
1086 /*
1087  * These are simple subroutines used by xlog_clear_stale_blocks() below
1088  * to initialize a buffer full of empty log record headers and write
1089  * them into the log.
1090  */
1091 STATIC void
1092 xlog_add_record(
1093         xlog_t                  *log,
1094         xfs_caddr_t             buf,
1095         int                     cycle,
1096         int                     block,
1097         int                     tail_cycle,
1098         int                     tail_block)
1099 {
1100         xlog_rec_header_t       *recp = (xlog_rec_header_t *)buf;
1101
1102         memset(buf, 0, BBSIZE);
1103         INT_SET(recp->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM);
1104         INT_SET(recp->h_cycle, ARCH_CONVERT, cycle);
1105         INT_SET(recp->h_version, ARCH_CONVERT,
1106                         XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
1107         INT_SET(recp->h_lsn, ARCH_CONVERT, xlog_assign_lsn(cycle, block));
1108         INT_SET(recp->h_tail_lsn, ARCH_CONVERT,
1109                 xlog_assign_lsn(tail_cycle, tail_block));
1110         INT_SET(recp->h_fmt, ARCH_CONVERT, XLOG_FMT);
1111         memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1112 }
1113
1114 STATIC int
1115 xlog_write_log_records(
1116         xlog_t          *log,
1117         int             cycle,
1118         int             start_block,
1119         int             blocks,
1120         int             tail_cycle,
1121         int             tail_block)
1122 {
1123         xfs_caddr_t     offset;
1124         xfs_buf_t       *bp;
1125         int             balign, ealign;
1126         int             sectbb = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
1127         int             end_block = start_block + blocks;
1128         int             bufblks;
1129         int             error = 0;
1130         int             i, j = 0;
1131
1132         bufblks = 1 << ffs(blocks);
1133         while (!(bp = xlog_get_bp(log, bufblks))) {
1134                 bufblks >>= 1;
1135                 if (bufblks <= log->l_sectbb_log)
1136                         return ENOMEM;
1137         }
1138
1139         /* We may need to do a read at the start to fill in part of
1140          * the buffer in the starting sector not covered by the first
1141          * write below.
1142          */
1143         balign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, start_block);
1144         if (balign != start_block) {
1145                 if ((error = xlog_bread(log, start_block, 1, bp))) {
1146                         xlog_put_bp(bp);
1147                         return error;
1148                 }
1149                 j = start_block - balign;
1150         }
1151
1152         for (i = start_block; i < end_block; i += bufblks) {
1153                 int             bcount, endcount;
1154
1155                 bcount = min(bufblks, end_block - start_block);
1156                 endcount = bcount - j;
1157
1158                 /* We may need to do a read at the end to fill in part of
1159                  * the buffer in the final sector not covered by the write.
1160                  * If this is the same sector as the above read, skip it.
1161                  */
1162                 ealign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, end_block);
1163                 if (j == 0 && (start_block + endcount > ealign)) {
1164                         offset = XFS_BUF_PTR(bp);
1165                         balign = BBTOB(ealign - start_block);
1166                         XFS_BUF_SET_PTR(bp, offset + balign, BBTOB(sectbb));
1167                         if ((error = xlog_bread(log, ealign, sectbb, bp)))
1168                                 break;
1169                         XFS_BUF_SET_PTR(bp, offset, bufblks);
1170                 }
1171
1172                 offset = xlog_align(log, start_block, endcount, bp);
1173                 for (; j < endcount; j++) {
1174                         xlog_add_record(log, offset, cycle, i+j,
1175                                         tail_cycle, tail_block);
1176                         offset += BBSIZE;
1177                 }
1178                 error = xlog_bwrite(log, start_block, endcount, bp);
1179                 if (error)
1180                         break;
1181                 start_block += endcount;
1182                 j = 0;
1183         }
1184         xlog_put_bp(bp);
1185         return error;
1186 }
1187
1188 /*
1189  * This routine is called to blow away any incomplete log writes out
1190  * in front of the log head.  We do this so that we won't become confused
1191  * if we come up, write only a little bit more, and then crash again.
1192  * If we leave the partial log records out there, this situation could
1193  * cause us to think those partial writes are valid blocks since they
1194  * have the current cycle number.  We get rid of them by overwriting them
1195  * with empty log records with the old cycle number rather than the
1196  * current one.
1197  *
1198  * The tail lsn is passed in rather than taken from
1199  * the log so that we will not write over the unmount record after a
1200  * clean unmount in a 512 block log.  Doing so would leave the log without
1201  * any valid log records in it until a new one was written.  If we crashed
1202  * during that time we would not be able to recover.
1203  */
1204 STATIC int
1205 xlog_clear_stale_blocks(
1206         xlog_t          *log,
1207         xfs_lsn_t       tail_lsn)
1208 {
1209         int             tail_cycle, head_cycle;
1210         int             tail_block, head_block;
1211         int             tail_distance, max_distance;
1212         int             distance;
1213         int             error;
1214
1215         tail_cycle = CYCLE_LSN(tail_lsn);
1216         tail_block = BLOCK_LSN(tail_lsn);
1217         head_cycle = log->l_curr_cycle;
1218         head_block = log->l_curr_block;
1219
1220         /*
1221          * Figure out the distance between the new head of the log
1222          * and the tail.  We want to write over any blocks beyond the
1223          * head that we may have written just before the crash, but
1224          * we don't want to overwrite the tail of the log.
1225          */
1226         if (head_cycle == tail_cycle) {
1227                 /*
1228                  * The tail is behind the head in the physical log,
1229                  * so the distance from the head to the tail is the
1230                  * distance from the head to the end of the log plus
1231                  * the distance from the beginning of the log to the
1232                  * tail.
1233                  */
1234                 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1235                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1236                                          XFS_ERRLEVEL_LOW, log->l_mp);
1237                         return XFS_ERROR(EFSCORRUPTED);
1238                 }
1239                 tail_distance = tail_block + (log->l_logBBsize - head_block);
1240         } else {
1241                 /*
1242                  * The head is behind the tail in the physical log,
1243                  * so the distance from the head to the tail is just
1244                  * the tail block minus the head block.
1245                  */
1246                 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1247                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1248                                          XFS_ERRLEVEL_LOW, log->l_mp);
1249                         return XFS_ERROR(EFSCORRUPTED);
1250                 }
1251                 tail_distance = tail_block - head_block;
1252         }
1253
1254         /*
1255          * If the head is right up against the tail, we can't clear
1256          * anything.
1257          */
1258         if (tail_distance <= 0) {
1259                 ASSERT(tail_distance == 0);
1260                 return 0;
1261         }
1262
1263         max_distance = XLOG_TOTAL_REC_SHIFT(log);
1264         /*
1265          * Take the smaller of the maximum amount of outstanding I/O
1266          * we could have and the distance to the tail to clear out.
1267          * We take the smaller so that we don't overwrite the tail and
1268          * we don't waste all day writing from the head to the tail
1269          * for no reason.
1270          */
1271         max_distance = MIN(max_distance, tail_distance);
1272
1273         if ((head_block + max_distance) <= log->l_logBBsize) {
1274                 /*
1275                  * We can stomp all the blocks we need to without
1276                  * wrapping around the end of the log.  Just do it
1277                  * in a single write.  Use the cycle number of the
1278                  * current cycle minus one so that the log will look like:
1279                  *     n ... | n - 1 ...
1280                  */
1281                 error = xlog_write_log_records(log, (head_cycle - 1),
1282                                 head_block, max_distance, tail_cycle,
1283                                 tail_block);
1284                 if (error)
1285                         return error;
1286         } else {
1287                 /*
1288                  * We need to wrap around the end of the physical log in
1289                  * order to clear all the blocks.  Do it in two separate
1290                  * I/Os.  The first write should be from the head to the
1291                  * end of the physical log, and it should use the current
1292                  * cycle number minus one just like above.
1293                  */
1294                 distance = log->l_logBBsize - head_block;
1295                 error = xlog_write_log_records(log, (head_cycle - 1),
1296                                 head_block, distance, tail_cycle,
1297                                 tail_block);
1298
1299                 if (error)
1300                         return error;
1301
1302                 /*
1303                  * Now write the blocks at the start of the physical log.
1304                  * This writes the remainder of the blocks we want to clear.
1305                  * It uses the current cycle number since we're now on the
1306                  * same cycle as the head so that we get:
1307                  *    n ... n ... | n - 1 ...
1308                  *    ^^^^^ blocks we're writing
1309                  */
1310                 distance = max_distance - (log->l_logBBsize - head_block);
1311                 error = xlog_write_log_records(log, head_cycle, 0, distance,
1312                                 tail_cycle, tail_block);
1313                 if (error)
1314                         return error;
1315         }
1316
1317         return 0;
1318 }
1319
1320 /******************************************************************************
1321  *
1322  *              Log recover routines
1323  *
1324  ******************************************************************************
1325  */
1326
1327 STATIC xlog_recover_t *
1328 xlog_recover_find_tid(
1329         xlog_recover_t          *q,
1330         xlog_tid_t              tid)
1331 {
1332         xlog_recover_t          *p = q;
1333
1334         while (p != NULL) {
1335                 if (p->r_log_tid == tid)
1336                     break;
1337                 p = p->r_next;
1338         }
1339         return p;
1340 }
1341
1342 STATIC void
1343 xlog_recover_put_hashq(
1344         xlog_recover_t          **q,
1345         xlog_recover_t          *trans)
1346 {
1347         trans->r_next = *q;
1348         *q = trans;
1349 }
1350
1351 STATIC void
1352 xlog_recover_add_item(
1353         xlog_recover_item_t     **itemq)
1354 {
1355         xlog_recover_item_t     *item;
1356
1357         item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1358         xlog_recover_insert_item_backq(itemq, item);
1359 }
1360
1361 STATIC int
1362 xlog_recover_add_to_cont_trans(
1363         xlog_recover_t          *trans,
1364         xfs_caddr_t             dp,
1365         int                     len)
1366 {
1367         xlog_recover_item_t     *item;
1368         xfs_caddr_t             ptr, old_ptr;
1369         int                     old_len;
1370
1371         item = trans->r_itemq;
1372         if (item == NULL) {
1373                 /* finish copying rest of trans header */
1374                 xlog_recover_add_item(&trans->r_itemq);
1375                 ptr = (xfs_caddr_t) &trans->r_theader +
1376                                 sizeof(xfs_trans_header_t) - len;
1377                 memcpy(ptr, dp, len); /* d, s, l */
1378                 return 0;
1379         }
1380         item = item->ri_prev;
1381
1382         old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1383         old_len = item->ri_buf[item->ri_cnt-1].i_len;
1384
1385         ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0u);
1386         memcpy(&ptr[old_len], dp, len); /* d, s, l */
1387         item->ri_buf[item->ri_cnt-1].i_len += len;
1388         item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1389         return 0;
1390 }
1391
1392 /*
1393  * The next region to add is the start of a new region.  It could be
1394  * a whole region or it could be the first part of a new region.  Because
1395  * of this, the assumption here is that the type and size fields of all
1396  * format structures fit into the first 32 bits of the structure.
1397  *
1398  * This works because all regions must be 32 bit aligned.  Therefore, we
1399  * either have both fields or we have neither field.  In the case we have
1400  * neither field, the data part of the region is zero length.  We only have
1401  * a log_op_header and can throw away the header since a new one will appear
1402  * later.  If we have at least 4 bytes, then we can determine how many regions
1403  * will appear in the current log item.
1404  */
1405 STATIC int
1406 xlog_recover_add_to_trans(
1407         xlog_recover_t          *trans,
1408         xfs_caddr_t             dp,
1409         int                     len)
1410 {
1411         xfs_inode_log_format_t  *in_f;                  /* any will do */
1412         xlog_recover_item_t     *item;
1413         xfs_caddr_t             ptr;
1414
1415         if (!len)
1416                 return 0;
1417         item = trans->r_itemq;
1418         if (item == NULL) {
1419                 ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC);
1420                 if (len == sizeof(xfs_trans_header_t))
1421                         xlog_recover_add_item(&trans->r_itemq);
1422                 memcpy(&trans->r_theader, dp, len); /* d, s, l */
1423                 return 0;
1424         }
1425
1426         ptr = kmem_alloc(len, KM_SLEEP);
1427         memcpy(ptr, dp, len);
1428         in_f = (xfs_inode_log_format_t *)ptr;
1429
1430         if (item->ri_prev->ri_total != 0 &&
1431              item->ri_prev->ri_total == item->ri_prev->ri_cnt) {
1432                 xlog_recover_add_item(&trans->r_itemq);
1433         }
1434         item = trans->r_itemq;
1435         item = item->ri_prev;
1436
1437         if (item->ri_total == 0) {              /* first region to be added */
1438                 item->ri_total  = in_f->ilf_size;
1439                 ASSERT(item->ri_total <= XLOG_MAX_REGIONS_IN_ITEM);
1440                 item->ri_buf = kmem_zalloc((item->ri_total *
1441                                             sizeof(xfs_log_iovec_t)), KM_SLEEP);
1442         }
1443         ASSERT(item->ri_total > item->ri_cnt);
1444         /* Description region is ri_buf[0] */
1445         item->ri_buf[item->ri_cnt].i_addr = ptr;
1446         item->ri_buf[item->ri_cnt].i_len  = len;
1447         item->ri_cnt++;
1448         return 0;
1449 }
1450
1451 STATIC void
1452 xlog_recover_new_tid(
1453         xlog_recover_t          **q,
1454         xlog_tid_t              tid,
1455         xfs_lsn_t               lsn)
1456 {
1457         xlog_recover_t          *trans;
1458
1459         trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1460         trans->r_log_tid   = tid;
1461         trans->r_lsn       = lsn;
1462         xlog_recover_put_hashq(q, trans);
1463 }
1464
1465 STATIC int
1466 xlog_recover_unlink_tid(
1467         xlog_recover_t          **q,
1468         xlog_recover_t          *trans)
1469 {
1470         xlog_recover_t          *tp;
1471         int                     found = 0;
1472
1473         ASSERT(trans != NULL);
1474         if (trans == *q) {
1475                 *q = (*q)->r_next;
1476         } else {
1477                 tp = *q;
1478                 while (tp) {
1479                         if (tp->r_next == trans) {
1480                                 found = 1;
1481                                 break;
1482                         }
1483                         tp = tp->r_next;
1484                 }
1485                 if (!found) {
1486                         xlog_warn(
1487                              "XFS: xlog_recover_unlink_tid: trans not found");
1488                         ASSERT(0);
1489                         return XFS_ERROR(EIO);
1490                 }
1491                 tp->r_next = tp->r_next->r_next;
1492         }
1493         return 0;
1494 }
1495
1496 STATIC void
1497 xlog_recover_insert_item_backq(
1498         xlog_recover_item_t     **q,
1499         xlog_recover_item_t     *item)
1500 {
1501         if (*q == NULL) {
1502                 item->ri_prev = item->ri_next = item;
1503                 *q = item;
1504         } else {
1505                 item->ri_next           = *q;
1506                 item->ri_prev           = (*q)->ri_prev;
1507                 (*q)->ri_prev           = item;
1508                 item->ri_prev->ri_next  = item;
1509         }
1510 }
1511
1512 STATIC void
1513 xlog_recover_insert_item_frontq(
1514         xlog_recover_item_t     **q,
1515         xlog_recover_item_t     *item)
1516 {
1517         xlog_recover_insert_item_backq(q, item);
1518         *q = item;
1519 }
1520
1521 STATIC int
1522 xlog_recover_reorder_trans(
1523         xlog_recover_t          *trans)
1524 {
1525         xlog_recover_item_t     *first_item, *itemq, *itemq_next;
1526         xfs_buf_log_format_t    *buf_f;
1527         ushort                  flags = 0;
1528
1529         first_item = itemq = trans->r_itemq;
1530         trans->r_itemq = NULL;
1531         do {
1532                 itemq_next = itemq->ri_next;
1533                 buf_f = (xfs_buf_log_format_t *)itemq->ri_buf[0].i_addr;
1534
1535                 switch (ITEM_TYPE(itemq)) {
1536                 case XFS_LI_BUF:
1537                         flags = buf_f->blf_flags;
1538                         if (!(flags & XFS_BLI_CANCEL)) {
1539                                 xlog_recover_insert_item_frontq(&trans->r_itemq,
1540                                                                 itemq);
1541                                 break;
1542                         }
1543                 case XFS_LI_INODE:
1544                 case XFS_LI_DQUOT:
1545                 case XFS_LI_QUOTAOFF:
1546                 case XFS_LI_EFD:
1547                 case XFS_LI_EFI:
1548                         xlog_recover_insert_item_backq(&trans->r_itemq, itemq);
1549                         break;
1550                 default:
1551                         xlog_warn(
1552         "XFS: xlog_recover_reorder_trans: unrecognized type of log operation");
1553                         ASSERT(0);
1554                         return XFS_ERROR(EIO);
1555                 }
1556                 itemq = itemq_next;
1557         } while (first_item != itemq);
1558         return 0;
1559 }
1560
1561 /*
1562  * Build up the table of buf cancel records so that we don't replay
1563  * cancelled data in the second pass.  For buffer records that are
1564  * not cancel records, there is nothing to do here so we just return.
1565  *
1566  * If we get a cancel record which is already in the table, this indicates
1567  * that the buffer was cancelled multiple times.  In order to ensure
1568  * that during pass 2 we keep the record in the table until we reach its
1569  * last occurrence in the log, we keep a reference count in the cancel
1570  * record in the table to tell us how many times we expect to see this
1571  * record during the second pass.
1572  */
1573 STATIC void
1574 xlog_recover_do_buffer_pass1(
1575         xlog_t                  *log,
1576         xfs_buf_log_format_t    *buf_f)
1577 {
1578         xfs_buf_cancel_t        *bcp;
1579         xfs_buf_cancel_t        *nextp;
1580         xfs_buf_cancel_t        *prevp;
1581         xfs_buf_cancel_t        **bucket;
1582         xfs_daddr_t             blkno = 0;
1583         uint                    len = 0;
1584         ushort                  flags = 0;
1585
1586         switch (buf_f->blf_type) {
1587         case XFS_LI_BUF:
1588                 blkno = buf_f->blf_blkno;
1589                 len = buf_f->blf_len;
1590                 flags = buf_f->blf_flags;
1591                 break;
1592         }
1593
1594         /*
1595          * If this isn't a cancel buffer item, then just return.
1596          */
1597         if (!(flags & XFS_BLI_CANCEL))
1598                 return;
1599
1600         /*
1601          * Insert an xfs_buf_cancel record into the hash table of
1602          * them.  If there is already an identical record, bump
1603          * its reference count.
1604          */
1605         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1606                                           XLOG_BC_TABLE_SIZE];
1607         /*
1608          * If the hash bucket is empty then just insert a new record into
1609          * the bucket.
1610          */
1611         if (*bucket == NULL) {
1612                 bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1613                                                      KM_SLEEP);
1614                 bcp->bc_blkno = blkno;
1615                 bcp->bc_len = len;
1616                 bcp->bc_refcount = 1;
1617                 bcp->bc_next = NULL;
1618                 *bucket = bcp;
1619                 return;
1620         }
1621
1622         /*
1623          * The hash bucket is not empty, so search for duplicates of our
1624          * record.  If we find one them just bump its refcount.  If not
1625          * then add us at the end of the list.
1626          */
1627         prevp = NULL;
1628         nextp = *bucket;
1629         while (nextp != NULL) {
1630                 if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
1631                         nextp->bc_refcount++;
1632                         return;
1633                 }
1634                 prevp = nextp;
1635                 nextp = nextp->bc_next;
1636         }
1637         ASSERT(prevp != NULL);
1638         bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1639                                              KM_SLEEP);
1640         bcp->bc_blkno = blkno;
1641         bcp->bc_len = len;
1642         bcp->bc_refcount = 1;
1643         bcp->bc_next = NULL;
1644         prevp->bc_next = bcp;
1645 }
1646
1647 /*
1648  * Check to see whether the buffer being recovered has a corresponding
1649  * entry in the buffer cancel record table.  If it does then return 1
1650  * so that it will be cancelled, otherwise return 0.  If the buffer is
1651  * actually a buffer cancel item (XFS_BLI_CANCEL is set), then decrement
1652  * the refcount on the entry in the table and remove it from the table
1653  * if this is the last reference.
1654  *
1655  * We remove the cancel record from the table when we encounter its
1656  * last occurrence in the log so that if the same buffer is re-used
1657  * again after its last cancellation we actually replay the changes
1658  * made at that point.
1659  */
1660 STATIC int
1661 xlog_check_buffer_cancelled(
1662         xlog_t                  *log,
1663         xfs_daddr_t             blkno,
1664         uint                    len,
1665         ushort                  flags)
1666 {
1667         xfs_buf_cancel_t        *bcp;
1668         xfs_buf_cancel_t        *prevp;
1669         xfs_buf_cancel_t        **bucket;
1670
1671         if (log->l_buf_cancel_table == NULL) {
1672                 /*
1673                  * There is nothing in the table built in pass one,
1674                  * so this buffer must not be cancelled.
1675                  */
1676                 ASSERT(!(flags & XFS_BLI_CANCEL));
1677                 return 0;
1678         }
1679
1680         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1681                                           XLOG_BC_TABLE_SIZE];
1682         bcp = *bucket;
1683         if (bcp == NULL) {
1684                 /*
1685                  * There is no corresponding entry in the table built
1686                  * in pass one, so this buffer has not been cancelled.
1687                  */
1688                 ASSERT(!(flags & XFS_BLI_CANCEL));
1689                 return 0;
1690         }
1691
1692         /*
1693          * Search for an entry in the buffer cancel table that
1694          * matches our buffer.
1695          */
1696         prevp = NULL;
1697         while (bcp != NULL) {
1698                 if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
1699                         /*
1700                          * We've go a match, so return 1 so that the
1701                          * recovery of this buffer is cancelled.
1702                          * If this buffer is actually a buffer cancel
1703                          * log item, then decrement the refcount on the
1704                          * one in the table and remove it if this is the
1705                          * last reference.
1706                          */
1707                         if (flags & XFS_BLI_CANCEL) {
1708                                 bcp->bc_refcount--;
1709                                 if (bcp->bc_refcount == 0) {
1710                                         if (prevp == NULL) {
1711                                                 *bucket = bcp->bc_next;
1712                                         } else {
1713                                                 prevp->bc_next = bcp->bc_next;
1714                                         }
1715                                         kmem_free(bcp,
1716                                                   sizeof(xfs_buf_cancel_t));
1717                                 }
1718                         }
1719                         return 1;
1720                 }
1721                 prevp = bcp;
1722                 bcp = bcp->bc_next;
1723         }
1724         /*
1725          * We didn't find a corresponding entry in the table, so
1726          * return 0 so that the buffer is NOT cancelled.
1727          */
1728         ASSERT(!(flags & XFS_BLI_CANCEL));
1729         return 0;
1730 }
1731
1732 STATIC int
1733 xlog_recover_do_buffer_pass2(
1734         xlog_t                  *log,
1735         xfs_buf_log_format_t    *buf_f)
1736 {
1737         xfs_daddr_t             blkno = 0;
1738         ushort                  flags = 0;
1739         uint                    len = 0;
1740
1741         switch (buf_f->blf_type) {
1742         case XFS_LI_BUF:
1743                 blkno = buf_f->blf_blkno;
1744                 flags = buf_f->blf_flags;
1745                 len = buf_f->blf_len;
1746                 break;
1747         }
1748
1749         return xlog_check_buffer_cancelled(log, blkno, len, flags);
1750 }
1751
1752 /*
1753  * Perform recovery for a buffer full of inodes.  In these buffers,
1754  * the only data which should be recovered is that which corresponds
1755  * to the di_next_unlinked pointers in the on disk inode structures.
1756  * The rest of the data for the inodes is always logged through the
1757  * inodes themselves rather than the inode buffer and is recovered
1758  * in xlog_recover_do_inode_trans().
1759  *
1760  * The only time when buffers full of inodes are fully recovered is
1761  * when the buffer is full of newly allocated inodes.  In this case
1762  * the buffer will not be marked as an inode buffer and so will be
1763  * sent to xlog_recover_do_reg_buffer() below during recovery.
1764  */
1765 STATIC int
1766 xlog_recover_do_inode_buffer(
1767         xfs_mount_t             *mp,
1768         xlog_recover_item_t     *item,
1769         xfs_buf_t               *bp,
1770         xfs_buf_log_format_t    *buf_f)
1771 {
1772         int                     i;
1773         int                     item_index;
1774         int                     bit;
1775         int                     nbits;
1776         int                     reg_buf_offset;
1777         int                     reg_buf_bytes;
1778         int                     next_unlinked_offset;
1779         int                     inodes_per_buf;
1780         xfs_agino_t             *logged_nextp;
1781         xfs_agino_t             *buffer_nextp;
1782         unsigned int            *data_map = NULL;
1783         unsigned int            map_size = 0;
1784
1785         switch (buf_f->blf_type) {
1786         case XFS_LI_BUF:
1787                 data_map = buf_f->blf_data_map;
1788                 map_size = buf_f->blf_map_size;
1789                 break;
1790         }
1791         /*
1792          * Set the variables corresponding to the current region to
1793          * 0 so that we'll initialize them on the first pass through
1794          * the loop.
1795          */
1796         reg_buf_offset = 0;
1797         reg_buf_bytes = 0;
1798         bit = 0;
1799         nbits = 0;
1800         item_index = 0;
1801         inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
1802         for (i = 0; i < inodes_per_buf; i++) {
1803                 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1804                         offsetof(xfs_dinode_t, di_next_unlinked);
1805
1806                 while (next_unlinked_offset >=
1807                        (reg_buf_offset + reg_buf_bytes)) {
1808                         /*
1809                          * The next di_next_unlinked field is beyond
1810                          * the current logged region.  Find the next
1811                          * logged region that contains or is beyond
1812                          * the current di_next_unlinked field.
1813                          */
1814                         bit += nbits;
1815                         bit = xfs_next_bit(data_map, map_size, bit);
1816
1817                         /*
1818                          * If there are no more logged regions in the
1819                          * buffer, then we're done.
1820                          */
1821                         if (bit == -1) {
1822                                 return 0;
1823                         }
1824
1825                         nbits = xfs_contig_bits(data_map, map_size,
1826                                                          bit);
1827                         ASSERT(nbits > 0);
1828                         reg_buf_offset = bit << XFS_BLI_SHIFT;
1829                         reg_buf_bytes = nbits << XFS_BLI_SHIFT;
1830                         item_index++;
1831                 }
1832
1833                 /*
1834                  * If the current logged region starts after the current
1835                  * di_next_unlinked field, then move on to the next
1836                  * di_next_unlinked field.
1837                  */
1838                 if (next_unlinked_offset < reg_buf_offset) {
1839                         continue;
1840                 }
1841
1842                 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1843                 ASSERT((item->ri_buf[item_index].i_len % XFS_BLI_CHUNK) == 0);
1844                 ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
1845
1846                 /*
1847                  * The current logged region contains a copy of the
1848                  * current di_next_unlinked field.  Extract its value
1849                  * and copy it to the buffer copy.
1850                  */
1851                 logged_nextp = (xfs_agino_t *)
1852                                ((char *)(item->ri_buf[item_index].i_addr) +
1853                                 (next_unlinked_offset - reg_buf_offset));
1854                 if (unlikely(*logged_nextp == 0)) {
1855                         xfs_fs_cmn_err(CE_ALERT, mp,
1856                                 "bad inode buffer log record (ptr = 0x%p, bp = 0x%p).  XFS trying to replay bad (0) inode di_next_unlinked field",
1857                                 item, bp);
1858                         XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1859                                          XFS_ERRLEVEL_LOW, mp);
1860                         return XFS_ERROR(EFSCORRUPTED);
1861                 }
1862
1863                 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1864                                               next_unlinked_offset);
1865                 *buffer_nextp = *logged_nextp;
1866         }
1867
1868         return 0;
1869 }
1870
1871 /*
1872  * Perform a 'normal' buffer recovery.  Each logged region of the
1873  * buffer should be copied over the corresponding region in the
1874  * given buffer.  The bitmap in the buf log format structure indicates
1875  * where to place the logged data.
1876  */
1877 /*ARGSUSED*/
1878 STATIC void
1879 xlog_recover_do_reg_buffer(
1880         xlog_recover_item_t     *item,
1881         xfs_buf_t               *bp,
1882         xfs_buf_log_format_t    *buf_f)
1883 {
1884         int                     i;
1885         int                     bit;
1886         int                     nbits;
1887         unsigned int            *data_map = NULL;
1888         unsigned int            map_size = 0;
1889         int                     error;
1890
1891         switch (buf_f->blf_type) {
1892         case XFS_LI_BUF:
1893                 data_map = buf_f->blf_data_map;
1894                 map_size = buf_f->blf_map_size;
1895                 break;
1896         }
1897         bit = 0;
1898         i = 1;  /* 0 is the buf format structure */
1899         while (1) {
1900                 bit = xfs_next_bit(data_map, map_size, bit);
1901                 if (bit == -1)
1902                         break;
1903                 nbits = xfs_contig_bits(data_map, map_size, bit);
1904                 ASSERT(nbits > 0);
1905                 ASSERT(item->ri_buf[i].i_addr != NULL);
1906                 ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
1907                 ASSERT(XFS_BUF_COUNT(bp) >=
1908                        ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
1909
1910                 /*
1911                  * Do a sanity check if this is a dquot buffer. Just checking
1912                  * the first dquot in the buffer should do. XXXThis is
1913                  * probably a good thing to do for other buf types also.
1914                  */
1915                 error = 0;
1916                 if (buf_f->blf_flags &
1917                    (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
1918                         error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
1919                                                item->ri_buf[i].i_addr,
1920                                                -1, 0, XFS_QMOPT_DOWARN,
1921                                                "dquot_buf_recover");
1922                 }
1923                 if (!error)
1924                         memcpy(xfs_buf_offset(bp,
1925                                 (uint)bit << XFS_BLI_SHIFT),    /* dest */
1926                                 item->ri_buf[i].i_addr,         /* source */
1927                                 nbits<<XFS_BLI_SHIFT);          /* length */
1928                 i++;
1929                 bit += nbits;
1930         }
1931
1932         /* Shouldn't be any more regions */
1933         ASSERT(i == item->ri_total);
1934 }
1935
1936 /*
1937  * Do some primitive error checking on ondisk dquot data structures.
1938  */
1939 int
1940 xfs_qm_dqcheck(
1941         xfs_disk_dquot_t *ddq,
1942         xfs_dqid_t       id,
1943         uint             type,    /* used only when IO_dorepair is true */
1944         uint             flags,
1945         char             *str)
1946 {
1947         xfs_dqblk_t      *d = (xfs_dqblk_t *)ddq;
1948         int             errs = 0;
1949
1950         /*
1951          * We can encounter an uninitialized dquot buffer for 2 reasons:
1952          * 1. If we crash while deleting the quotainode(s), and those blks got
1953          *    used for user data. This is because we take the path of regular
1954          *    file deletion; however, the size field of quotainodes is never
1955          *    updated, so all the tricks that we play in itruncate_finish
1956          *    don't quite matter.
1957          *
1958          * 2. We don't play the quota buffers when there's a quotaoff logitem.
1959          *    But the allocation will be replayed so we'll end up with an
1960          *    uninitialized quota block.
1961          *
1962          * This is all fine; things are still consistent, and we haven't lost
1963          * any quota information. Just don't complain about bad dquot blks.
1964          */
1965         if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) {
1966                 if (flags & XFS_QMOPT_DOWARN)
1967                         cmn_err(CE_ALERT,
1968                         "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
1969                         str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
1970                 errs++;
1971         }
1972         if (ddq->d_version != XFS_DQUOT_VERSION) {
1973                 if (flags & XFS_QMOPT_DOWARN)
1974                         cmn_err(CE_ALERT,
1975                         "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
1976                         str, id, ddq->d_version, XFS_DQUOT_VERSION);
1977                 errs++;
1978         }
1979
1980         if (ddq->d_flags != XFS_DQ_USER &&
1981             ddq->d_flags != XFS_DQ_PROJ &&
1982             ddq->d_flags != XFS_DQ_GROUP) {
1983                 if (flags & XFS_QMOPT_DOWARN)
1984                         cmn_err(CE_ALERT,
1985                         "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
1986                         str, id, ddq->d_flags);
1987                 errs++;
1988         }
1989
1990         if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
1991                 if (flags & XFS_QMOPT_DOWARN)
1992                         cmn_err(CE_ALERT,
1993                         "%s : ondisk-dquot 0x%p, ID mismatch: "
1994                         "0x%x expected, found id 0x%x",
1995                         str, ddq, id, be32_to_cpu(ddq->d_id));
1996                 errs++;
1997         }
1998
1999         if (!errs && ddq->d_id) {
2000                 if (ddq->d_blk_softlimit &&
2001                     be64_to_cpu(ddq->d_bcount) >=
2002                                 be64_to_cpu(ddq->d_blk_softlimit)) {
2003                         if (!ddq->d_btimer) {
2004                                 if (flags & XFS_QMOPT_DOWARN)
2005                                         cmn_err(CE_ALERT,
2006                                         "%s : Dquot ID 0x%x (0x%p) "
2007                                         "BLK TIMER NOT STARTED",
2008                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2009                                 errs++;
2010                         }
2011                 }
2012                 if (ddq->d_ino_softlimit &&
2013                     be64_to_cpu(ddq->d_icount) >=
2014                                 be64_to_cpu(ddq->d_ino_softlimit)) {
2015                         if (!ddq->d_itimer) {
2016                                 if (flags & XFS_QMOPT_DOWARN)
2017                                         cmn_err(CE_ALERT,
2018                                         "%s : Dquot ID 0x%x (0x%p) "
2019                                         "INODE TIMER NOT STARTED",
2020                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2021                                 errs++;
2022                         }
2023                 }
2024                 if (ddq->d_rtb_softlimit &&
2025                     be64_to_cpu(ddq->d_rtbcount) >=
2026                                 be64_to_cpu(ddq->d_rtb_softlimit)) {
2027                         if (!ddq->d_rtbtimer) {
2028                                 if (flags & XFS_QMOPT_DOWARN)
2029                                         cmn_err(CE_ALERT,
2030                                         "%s : Dquot ID 0x%x (0x%p) "
2031                                         "RTBLK TIMER NOT STARTED",
2032                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2033                                 errs++;
2034                         }
2035                 }
2036         }
2037
2038         if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2039                 return errs;
2040
2041         if (flags & XFS_QMOPT_DOWARN)
2042                 cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
2043
2044         /*
2045          * Typically, a repair is only requested by quotacheck.
2046          */
2047         ASSERT(id != -1);
2048         ASSERT(flags & XFS_QMOPT_DQREPAIR);
2049         memset(d, 0, sizeof(xfs_dqblk_t));
2050
2051         d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2052         d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2053         d->dd_diskdq.d_flags = type;
2054         d->dd_diskdq.d_id = cpu_to_be32(id);
2055
2056         return errs;
2057 }
2058
2059 /*
2060  * Perform a dquot buffer recovery.
2061  * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2062  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2063  * Else, treat it as a regular buffer and do recovery.
2064  */
2065 STATIC void
2066 xlog_recover_do_dquot_buffer(
2067         xfs_mount_t             *mp,
2068         xlog_t                  *log,
2069         xlog_recover_item_t     *item,
2070         xfs_buf_t               *bp,
2071         xfs_buf_log_format_t    *buf_f)
2072 {
2073         uint                    type;
2074
2075         /*
2076          * Filesystems are required to send in quota flags at mount time.
2077          */
2078         if (mp->m_qflags == 0) {
2079                 return;
2080         }
2081
2082         type = 0;
2083         if (buf_f->blf_flags & XFS_BLI_UDQUOT_BUF)
2084                 type |= XFS_DQ_USER;
2085         if (buf_f->blf_flags & XFS_BLI_PDQUOT_BUF)
2086                 type |= XFS_DQ_PROJ;
2087         if (buf_f->blf_flags & XFS_BLI_GDQUOT_BUF)
2088                 type |= XFS_DQ_GROUP;
2089         /*
2090          * This type of quotas was turned off, so ignore this buffer
2091          */
2092         if (log->l_quotaoffs_flag & type)
2093                 return;
2094
2095         xlog_recover_do_reg_buffer(item, bp, buf_f);
2096 }
2097
2098 /*
2099  * This routine replays a modification made to a buffer at runtime.
2100  * There are actually two types of buffer, regular and inode, which
2101  * are handled differently.  Inode buffers are handled differently
2102  * in that we only recover a specific set of data from them, namely
2103  * the inode di_next_unlinked fields.  This is because all other inode
2104  * data is actually logged via inode records and any data we replay
2105  * here which overlaps that may be stale.
2106  *
2107  * When meta-data buffers are freed at run time we log a buffer item
2108  * with the XFS_BLI_CANCEL bit set to indicate that previous copies
2109  * of the buffer in the log should not be replayed at recovery time.
2110  * This is so that if the blocks covered by the buffer are reused for
2111  * file data before we crash we don't end up replaying old, freed
2112  * meta-data into a user's file.
2113  *
2114  * To handle the cancellation of buffer log items, we make two passes
2115  * over the log during recovery.  During the first we build a table of
2116  * those buffers which have been cancelled, and during the second we
2117  * only replay those buffers which do not have corresponding cancel
2118  * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
2119  * for more details on the implementation of the table of cancel records.
2120  */
2121 STATIC int
2122 xlog_recover_do_buffer_trans(
2123         xlog_t                  *log,
2124         xlog_recover_item_t     *item,
2125         int                     pass)
2126 {
2127         xfs_buf_log_format_t    *buf_f;
2128         xfs_mount_t             *mp;
2129         xfs_buf_t               *bp;
2130         int                     error;
2131         int                     cancel;
2132         xfs_daddr_t             blkno;
2133         int                     len;
2134         ushort                  flags;
2135
2136         buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
2137
2138         if (pass == XLOG_RECOVER_PASS1) {
2139                 /*
2140                  * In this pass we're only looking for buf items
2141                  * with the XFS_BLI_CANCEL bit set.
2142                  */
2143                 xlog_recover_do_buffer_pass1(log, buf_f);
2144                 return 0;
2145         } else {
2146                 /*
2147                  * In this pass we want to recover all the buffers
2148                  * which have not been cancelled and are not
2149                  * cancellation buffers themselves.  The routine
2150                  * we call here will tell us whether or not to
2151                  * continue with the replay of this buffer.
2152                  */
2153                 cancel = xlog_recover_do_buffer_pass2(log, buf_f);
2154                 if (cancel) {
2155                         return 0;
2156                 }
2157         }
2158         switch (buf_f->blf_type) {
2159         case XFS_LI_BUF:
2160                 blkno = buf_f->blf_blkno;
2161                 len = buf_f->blf_len;
2162                 flags = buf_f->blf_flags;
2163                 break;
2164         default:
2165                 xfs_fs_cmn_err(CE_ALERT, log->l_mp,
2166                         "xfs_log_recover: unknown buffer type 0x%x, logdev %s",
2167                         buf_f->blf_type, log->l_mp->m_logname ?
2168                         log->l_mp->m_logname : "internal");
2169                 XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
2170                                  XFS_ERRLEVEL_LOW, log->l_mp);
2171                 return XFS_ERROR(EFSCORRUPTED);
2172         }
2173
2174         mp = log->l_mp;
2175         if (flags & XFS_BLI_INODE_BUF) {
2176                 bp = xfs_buf_read_flags(mp->m_ddev_targp, blkno, len,
2177                                                                 XFS_BUF_LOCK);
2178         } else {
2179                 bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, 0);
2180         }
2181         if (XFS_BUF_ISERROR(bp)) {
2182                 xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
2183                                   bp, blkno);
2184                 error = XFS_BUF_GETERROR(bp);
2185                 xfs_buf_relse(bp);
2186                 return error;
2187         }
2188
2189         error = 0;
2190         if (flags & XFS_BLI_INODE_BUF) {
2191                 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2192         } else if (flags &
2193                   (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
2194                 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2195         } else {
2196                 xlog_recover_do_reg_buffer(item, bp, buf_f);
2197         }
2198         if (error)
2199                 return XFS_ERROR(error);
2200
2201         /*
2202          * Perform delayed write on the buffer.  Asynchronous writes will be
2203          * slower when taking into account all the buffers to be flushed.
2204          *
2205          * Also make sure that only inode buffers with good sizes stay in
2206          * the buffer cache.  The kernel moves inodes in buffers of 1 block
2207          * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode
2208          * buffers in the log can be a different size if the log was generated
2209          * by an older kernel using unclustered inode buffers or a newer kernel
2210          * running with a different inode cluster size.  Regardless, if the
2211          * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2212          * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2213          * the buffer out of the buffer cache so that the buffer won't
2214          * overlap with future reads of those inodes.
2215          */
2216         if (XFS_DINODE_MAGIC ==
2217             INT_GET(*((__uint16_t *)(xfs_buf_offset(bp, 0))), ARCH_CONVERT) &&
2218             (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
2219                         (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2220                 XFS_BUF_STALE(bp);
2221                 error = xfs_bwrite(mp, bp);
2222         } else {
2223                 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
2224                        XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
2225                 XFS_BUF_SET_FSPRIVATE(bp, mp);
2226                 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2227                 xfs_bdwrite(mp, bp);
2228         }
2229
2230         return (error);
2231 }
2232
2233 STATIC int
2234 xlog_recover_do_inode_trans(
2235         xlog_t                  *log,
2236         xlog_recover_item_t     *item,
2237         int                     pass)
2238 {
2239         xfs_inode_log_format_t  *in_f;
2240         xfs_mount_t             *mp;
2241         xfs_buf_t               *bp;
2242         xfs_imap_t              imap;
2243         xfs_dinode_t            *dip;
2244         xfs_ino_t               ino;
2245         int                     len;
2246         xfs_caddr_t             src;
2247         xfs_caddr_t             dest;
2248         int                     error;
2249         int                     attr_index;
2250         uint                    fields;
2251         xfs_icdinode_t          *dicp;
2252         int                     need_free = 0;
2253
2254         if (pass == XLOG_RECOVER_PASS1) {
2255                 return 0;
2256         }
2257
2258         if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2259                 in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr;
2260         } else {
2261                 in_f = (xfs_inode_log_format_t *)kmem_alloc(
2262                         sizeof(xfs_inode_log_format_t), KM_SLEEP);
2263                 need_free = 1;
2264                 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2265                 if (error)
2266                         goto error;
2267         }
2268         ino = in_f->ilf_ino;
2269         mp = log->l_mp;
2270         if (ITEM_TYPE(item) == XFS_LI_INODE) {
2271                 imap.im_blkno = (xfs_daddr_t)in_f->ilf_blkno;
2272                 imap.im_len = in_f->ilf_len;
2273                 imap.im_boffset = in_f->ilf_boffset;
2274         } else {
2275                 /*
2276                  * It's an old inode format record.  We don't know where
2277                  * its cluster is located on disk, and we can't allow
2278                  * xfs_imap() to figure it out because the inode btrees
2279                  * are not ready to be used.  Therefore do not pass the
2280                  * XFS_IMAP_LOOKUP flag to xfs_imap().  This will give
2281                  * us only the single block in which the inode lives
2282                  * rather than its cluster, so we must make sure to
2283                  * invalidate the buffer when we write it out below.
2284                  */
2285                 imap.im_blkno = 0;
2286                 xfs_imap(log->l_mp, NULL, ino, &imap, 0);
2287         }
2288
2289         /*
2290          * Inode buffers can be freed, look out for it,
2291          * and do not replay the inode.
2292          */
2293         if (xlog_check_buffer_cancelled(log, imap.im_blkno, imap.im_len, 0)) {
2294                 error = 0;
2295                 goto error;
2296         }
2297
2298         bp = xfs_buf_read_flags(mp->m_ddev_targp, imap.im_blkno, imap.im_len,
2299                                                                 XFS_BUF_LOCK);
2300         if (XFS_BUF_ISERROR(bp)) {
2301                 xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
2302                                   bp, imap.im_blkno);
2303                 error = XFS_BUF_GETERROR(bp);
2304                 xfs_buf_relse(bp);
2305                 goto error;
2306         }
2307         error = 0;
2308         ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2309         dip = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
2310
2311         /*
2312          * Make sure the place we're flushing out to really looks
2313          * like an inode!
2314          */
2315         if (unlikely(be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC)) {
2316                 xfs_buf_relse(bp);
2317                 xfs_fs_cmn_err(CE_ALERT, mp,
2318                         "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
2319                         dip, bp, ino);
2320                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
2321                                  XFS_ERRLEVEL_LOW, mp);
2322                 error = EFSCORRUPTED;
2323                 goto error;
2324         }
2325         dicp = (xfs_icdinode_t *)(item->ri_buf[1].i_addr);
2326         if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2327                 xfs_buf_relse(bp);
2328                 xfs_fs_cmn_err(CE_ALERT, mp,
2329                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
2330                         item, ino);
2331                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
2332                                  XFS_ERRLEVEL_LOW, mp);
2333                 error = EFSCORRUPTED;
2334                 goto error;
2335         }
2336
2337         /* Skip replay when the on disk inode is newer than the log one */
2338         if (dicp->di_flushiter < be16_to_cpu(dip->di_core.di_flushiter)) {
2339                 /*
2340                  * Deal with the wrap case, DI_MAX_FLUSH is less
2341                  * than smaller numbers
2342                  */
2343                 if (be16_to_cpu(dip->di_core.di_flushiter) == DI_MAX_FLUSH &&
2344                     dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2345                         /* do nothing */
2346                 } else {
2347                         xfs_buf_relse(bp);
2348                         error = 0;
2349                         goto error;
2350                 }
2351         }
2352         /* Take the opportunity to reset the flush iteration count */
2353         dicp->di_flushiter = 0;
2354
2355         if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
2356                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2357                     (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2358                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
2359                                          XFS_ERRLEVEL_LOW, mp, dicp);
2360                         xfs_buf_relse(bp);
2361                         xfs_fs_cmn_err(CE_ALERT, mp,
2362                                 "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2363                                 item, dip, bp, ino);
2364                         error = EFSCORRUPTED;
2365                         goto error;
2366                 }
2367         } else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) {
2368                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2369                     (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2370                     (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2371                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
2372                                              XFS_ERRLEVEL_LOW, mp, dicp);
2373                         xfs_buf_relse(bp);
2374                         xfs_fs_cmn_err(CE_ALERT, mp,
2375                                 "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2376                                 item, dip, bp, ino);
2377                         error = EFSCORRUPTED;
2378                         goto error;
2379                 }
2380         }
2381         if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2382                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
2383                                      XFS_ERRLEVEL_LOW, mp, dicp);
2384                 xfs_buf_relse(bp);
2385                 xfs_fs_cmn_err(CE_ALERT, mp,
2386                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2387                         item, dip, bp, ino,
2388                         dicp->di_nextents + dicp->di_anextents,
2389                         dicp->di_nblocks);
2390                 error = EFSCORRUPTED;
2391                 goto error;
2392         }
2393         if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2394                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
2395                                      XFS_ERRLEVEL_LOW, mp, dicp);
2396                 xfs_buf_relse(bp);
2397                 xfs_fs_cmn_err(CE_ALERT, mp,
2398                         "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
2399                         item, dip, bp, ino, dicp->di_forkoff);
2400                 error = EFSCORRUPTED;
2401                 goto error;
2402         }
2403         if (unlikely(item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t))) {
2404                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
2405                                      XFS_ERRLEVEL_LOW, mp, dicp);
2406                 xfs_buf_relse(bp);
2407                 xfs_fs_cmn_err(CE_ALERT, mp,
2408                         "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p",
2409                         item->ri_buf[1].i_len, item);
2410                 error = EFSCORRUPTED;
2411                 goto error;
2412         }
2413
2414         /* The core is in in-core format */
2415         xfs_dinode_to_disk(&dip->di_core,
2416                 (xfs_icdinode_t *)item->ri_buf[1].i_addr);
2417
2418         /* the rest is in on-disk format */
2419         if (item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t)) {
2420                 memcpy((xfs_caddr_t) dip + sizeof(xfs_dinode_core_t),
2421                         item->ri_buf[1].i_addr + sizeof(xfs_dinode_core_t),
2422                         item->ri_buf[1].i_len  - sizeof(xfs_dinode_core_t));
2423         }
2424
2425         fields = in_f->ilf_fields;
2426         switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2427         case XFS_ILOG_DEV:
2428                 dip->di_u.di_dev = cpu_to_be32(in_f->ilf_u.ilfu_rdev);
2429                 break;
2430         case XFS_ILOG_UUID:
2431                 dip->di_u.di_muuid = in_f->ilf_u.ilfu_uuid;
2432                 break;
2433         }
2434
2435         if (in_f->ilf_size == 2)
2436                 goto write_inode_buffer;
2437         len = item->ri_buf[2].i_len;
2438         src = item->ri_buf[2].i_addr;
2439         ASSERT(in_f->ilf_size <= 4);
2440         ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2441         ASSERT(!(fields & XFS_ILOG_DFORK) ||
2442                (len == in_f->ilf_dsize));
2443
2444         switch (fields & XFS_ILOG_DFORK) {
2445         case XFS_ILOG_DDATA:
2446         case XFS_ILOG_DEXT:
2447                 memcpy(&dip->di_u, src, len);
2448                 break;
2449
2450         case XFS_ILOG_DBROOT:
2451                 xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len,
2452                                  &(dip->di_u.di_bmbt),
2453                                  XFS_DFORK_DSIZE(dip, mp));
2454                 break;
2455
2456         default:
2457                 /*
2458                  * There are no data fork flags set.
2459                  */
2460                 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2461                 break;
2462         }
2463
2464         /*
2465          * If we logged any attribute data, recover it.  There may or
2466          * may not have been any other non-core data logged in this
2467          * transaction.
2468          */
2469         if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2470                 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2471                         attr_index = 3;
2472                 } else {
2473                         attr_index = 2;
2474                 }
2475                 len = item->ri_buf[attr_index].i_len;
2476                 src = item->ri_buf[attr_index].i_addr;
2477                 ASSERT(len == in_f->ilf_asize);
2478
2479                 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2480                 case XFS_ILOG_ADATA:
2481                 case XFS_ILOG_AEXT:
2482                         dest = XFS_DFORK_APTR(dip);
2483                         ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2484                         memcpy(dest, src, len);
2485                         break;
2486
2487                 case XFS_ILOG_ABROOT:
2488                         dest = XFS_DFORK_APTR(dip);
2489                         xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len,
2490                                          (xfs_bmdr_block_t*)dest,
2491                                          XFS_DFORK_ASIZE(dip, mp));
2492                         break;
2493
2494                 default:
2495                         xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
2496                         ASSERT(0);
2497                         xfs_buf_relse(bp);
2498                         error = EIO;
2499                         goto error;
2500                 }
2501         }
2502
2503 write_inode_buffer:
2504         if (ITEM_TYPE(item) == XFS_LI_INODE) {
2505                 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
2506                        XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
2507                 XFS_BUF_SET_FSPRIVATE(bp, mp);
2508                 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2509                 xfs_bdwrite(mp, bp);
2510         } else {
2511                 XFS_BUF_STALE(bp);
2512                 error = xfs_bwrite(mp, bp);
2513         }
2514
2515 error:
2516         if (need_free)
2517                 kmem_free(in_f, sizeof(*in_f));
2518         return XFS_ERROR(error);
2519 }
2520
2521 /*
2522  * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
2523  * structure, so that we know not to do any dquot item or dquot buffer recovery,
2524  * of that type.
2525  */
2526 STATIC int
2527 xlog_recover_do_quotaoff_trans(
2528         xlog_t                  *log,
2529         xlog_recover_item_t     *item,
2530         int                     pass)
2531 {
2532         xfs_qoff_logformat_t    *qoff_f;
2533
2534         if (pass == XLOG_RECOVER_PASS2) {
2535                 return (0);
2536         }
2537
2538         qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr;
2539         ASSERT(qoff_f);
2540
2541         /*
2542          * The logitem format's flag tells us if this was user quotaoff,
2543          * group/project quotaoff or both.
2544          */
2545         if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2546                 log->l_quotaoffs_flag |= XFS_DQ_USER;
2547         if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2548                 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2549         if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2550                 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2551
2552         return (0);
2553 }
2554
2555 /*
2556  * Recover a dquot record
2557  */
2558 STATIC int
2559 xlog_recover_do_dquot_trans(
2560         xlog_t                  *log,
2561         xlog_recover_item_t     *item,
2562         int                     pass)
2563 {
2564         xfs_mount_t             *mp;
2565         xfs_buf_t               *bp;
2566         struct xfs_disk_dquot   *ddq, *recddq;
2567         int                     error;
2568         xfs_dq_logformat_t      *dq_f;
2569         uint                    type;
2570
2571         if (pass == XLOG_RECOVER_PASS1) {
2572                 return 0;
2573         }
2574         mp = log->l_mp;
2575
2576         /*
2577          * Filesystems are required to send in quota flags at mount time.
2578          */
2579         if (mp->m_qflags == 0)
2580                 return (0);
2581
2582         recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr;
2583         ASSERT(recddq);
2584         /*
2585          * This type of quotas was turned off, so ignore this record.
2586          */
2587         type = INT_GET(recddq->d_flags, ARCH_CONVERT) &
2588                         (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2589         ASSERT(type);
2590         if (log->l_quotaoffs_flag & type)
2591                 return (0);
2592
2593         /*
2594          * At this point we know that quota was _not_ turned off.
2595          * Since the mount flags are not indicating to us otherwise, this
2596          * must mean that quota is on, and the dquot needs to be replayed.
2597          * Remember that we may not have fully recovered the superblock yet,
2598          * so we can't do the usual trick of looking at the SB quota bits.
2599          *
2600          * The other possibility, of course, is that the quota subsystem was
2601          * removed since the last mount - ENOSYS.
2602          */
2603         dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr;
2604         ASSERT(dq_f);
2605         if ((error = xfs_qm_dqcheck(recddq,
2606                            dq_f->qlf_id,
2607                            0, XFS_QMOPT_DOWARN,
2608                            "xlog_recover_do_dquot_trans (log copy)"))) {
2609                 return XFS_ERROR(EIO);
2610         }
2611         ASSERT(dq_f->qlf_len == 1);
2612
2613         error = xfs_read_buf(mp, mp->m_ddev_targp,
2614                              dq_f->qlf_blkno,
2615                              XFS_FSB_TO_BB(mp, dq_f->qlf_len),
2616                              0, &bp);
2617         if (error) {
2618                 xfs_ioerror_alert("xlog_recover_do..(read#3)", mp,
2619                                   bp, dq_f->qlf_blkno);
2620                 return error;
2621         }
2622         ASSERT(bp);
2623         ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2624
2625         /*
2626          * At least the magic num portion should be on disk because this
2627          * was among a chunk of dquots created earlier, and we did some
2628          * minimal initialization then.
2629          */
2630         if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2631                            "xlog_recover_do_dquot_trans")) {
2632                 xfs_buf_relse(bp);
2633                 return XFS_ERROR(EIO);
2634         }
2635
2636         memcpy(ddq, recddq, item->ri_buf[1].i_len);
2637
2638         ASSERT(dq_f->qlf_size == 2);
2639         ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
2640                XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
2641         XFS_BUF_SET_FSPRIVATE(bp, mp);
2642         XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2643         xfs_bdwrite(mp, bp);
2644
2645         return (0);
2646 }
2647
2648 /*
2649  * This routine is called to create an in-core extent free intent
2650  * item from the efi format structure which was logged on disk.
2651  * It allocates an in-core efi, copies the extents from the format
2652  * structure into it, and adds the efi to the AIL with the given
2653  * LSN.
2654  */
2655 STATIC int
2656 xlog_recover_do_efi_trans(
2657         xlog_t                  *log,
2658         xlog_recover_item_t     *item,
2659         xfs_lsn_t               lsn,
2660         int                     pass)
2661 {
2662         int                     error;
2663         xfs_mount_t             *mp;
2664         xfs_efi_log_item_t      *efip;
2665         xfs_efi_log_format_t    *efi_formatp;
2666
2667         if (pass == XLOG_RECOVER_PASS1) {
2668                 return 0;
2669         }
2670
2671         efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr;
2672
2673         mp = log->l_mp;
2674         efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2675         if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2676                                          &(efip->efi_format)))) {
2677                 xfs_efi_item_free(efip);
2678                 return error;
2679         }
2680         efip->efi_next_extent = efi_formatp->efi_nextents;
2681         efip->efi_flags |= XFS_EFI_COMMITTED;
2682
2683         spin_lock(&mp->m_ail_lock);
2684         /*
2685          * xfs_trans_update_ail() drops the AIL lock.
2686          */
2687         xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn);
2688         return 0;
2689 }
2690
2691
2692 /*
2693  * This routine is called when an efd format structure is found in
2694  * a committed transaction in the log.  It's purpose is to cancel
2695  * the corresponding efi if it was still in the log.  To do this
2696  * it searches the AIL for the efi with an id equal to that in the
2697  * efd format structure.  If we find it, we remove the efi from the
2698  * AIL and free it.
2699  */
2700 STATIC void
2701 xlog_recover_do_efd_trans(
2702         xlog_t                  *log,
2703         xlog_recover_item_t     *item,
2704         int                     pass)
2705 {
2706         xfs_mount_t             *mp;
2707         xfs_efd_log_format_t    *efd_formatp;
2708         xfs_efi_log_item_t      *efip = NULL;
2709         xfs_log_item_t          *lip;
2710         int                     gen;
2711         __uint64_t              efi_id;
2712
2713         if (pass == XLOG_RECOVER_PASS1) {
2714                 return;
2715         }
2716
2717         efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr;
2718         ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2719                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2720                (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2721                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
2722         efi_id = efd_formatp->efd_efi_id;
2723
2724         /*
2725          * Search for the efi with the id in the efd format structure
2726          * in the AIL.
2727          */
2728         mp = log->l_mp;
2729         spin_lock(&mp->m_ail_lock);
2730         lip = xfs_trans_first_ail(mp, &gen);
2731         while (lip != NULL) {
2732                 if (lip->li_type == XFS_LI_EFI) {
2733                         efip = (xfs_efi_log_item_t *)lip;
2734                         if (efip->efi_format.efi_id == efi_id) {
2735                                 /*
2736                                  * xfs_trans_delete_ail() drops the
2737                                  * AIL lock.
2738                                  */
2739                                 xfs_trans_delete_ail(mp, lip);
2740                                 break;
2741                         }
2742                 }
2743                 lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
2744         }
2745
2746         /*
2747          * If we found it, then free it up.  If it wasn't there, it
2748          * must have been overwritten in the log.  Oh well.
2749          */
2750         if (lip != NULL) {
2751                 xfs_efi_item_free(efip);
2752         } else {
2753                 spin_unlock(&mp->m_ail_lock);
2754         }
2755 }
2756
2757 /*
2758  * Perform the transaction
2759  *
2760  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
2761  * EFIs and EFDs get queued up by adding entries into the AIL for them.
2762  */
2763 STATIC int
2764 xlog_recover_do_trans(
2765         xlog_t                  *log,
2766         xlog_recover_t          *trans,
2767         int                     pass)
2768 {
2769         int                     error = 0;
2770         xlog_recover_item_t     *item, *first_item;
2771
2772         if ((error = xlog_recover_reorder_trans(trans)))
2773                 return error;
2774         first_item = item = trans->r_itemq;
2775         do {
2776                 /*
2777                  * we don't need to worry about the block number being
2778                  * truncated in > 1 TB buffers because in user-land,
2779                  * we're now n32 or 64-bit so xfs_daddr_t is 64-bits so
2780                  * the blknos will get through the user-mode buffer
2781                  * cache properly.  The only bad case is o32 kernels
2782                  * where xfs_daddr_t is 32-bits but mount will warn us
2783                  * off a > 1 TB filesystem before we get here.
2784                  */
2785                 if ((ITEM_TYPE(item) == XFS_LI_BUF)) {
2786                         if  ((error = xlog_recover_do_buffer_trans(log, item,
2787                                                                  pass)))
2788                                 break;
2789                 } else if ((ITEM_TYPE(item) == XFS_LI_INODE)) {
2790                         if ((error = xlog_recover_do_inode_trans(log, item,
2791                                                                 pass)))
2792                                 break;
2793                 } else if (ITEM_TYPE(item) == XFS_LI_EFI) {
2794                         if ((error = xlog_recover_do_efi_trans(log, item, trans->r_lsn,
2795                                                   pass)))
2796                                 break;
2797                 } else if (ITEM_TYPE(item) == XFS_LI_EFD) {
2798                         xlog_recover_do_efd_trans(log, item, pass);
2799                 } else if (ITEM_TYPE(item) == XFS_LI_DQUOT) {
2800                         if ((error = xlog_recover_do_dquot_trans(log, item,
2801                                                                    pass)))
2802                                         break;
2803                 } else if ((ITEM_TYPE(item) == XFS_LI_QUOTAOFF)) {
2804                         if ((error = xlog_recover_do_quotaoff_trans(log, item,
2805                                                                    pass)))
2806                                         break;
2807                 } else {
2808                         xlog_warn("XFS: xlog_recover_do_trans");
2809                         ASSERT(0);
2810                         error = XFS_ERROR(EIO);
2811                         break;
2812                 }
2813                 item = item->ri_next;
2814         } while (first_item != item);
2815
2816         return error;
2817 }
2818
2819 /*
2820  * Free up any resources allocated by the transaction
2821  *
2822  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2823  */
2824 STATIC void
2825 xlog_recover_free_trans(
2826         xlog_recover_t          *trans)
2827 {
2828         xlog_recover_item_t     *first_item, *item, *free_item;
2829         int                     i;
2830
2831         item = first_item = trans->r_itemq;
2832         do {
2833                 free_item = item;
2834                 item = item->ri_next;
2835                  /* Free the regions in the item. */
2836                 for (i = 0; i < free_item->ri_cnt; i++) {
2837                         kmem_free(free_item->ri_buf[i].i_addr,
2838                                   free_item->ri_buf[i].i_len);
2839                 }
2840                 /* Free the item itself */
2841                 kmem_free(free_item->ri_buf,
2842                           (free_item->ri_total * sizeof(xfs_log_iovec_t)));
2843                 kmem_free(free_item, sizeof(xlog_recover_item_t));
2844         } while (first_item != item);
2845         /* Free the transaction recover structure */
2846         kmem_free(trans, sizeof(xlog_recover_t));
2847 }
2848
2849 STATIC int
2850 xlog_recover_commit_trans(
2851         xlog_t                  *log,
2852         xlog_recover_t          **q,
2853         xlog_recover_t          *trans,
2854         int                     pass)
2855 {
2856         int                     error;
2857
2858         if ((error = xlog_recover_unlink_tid(q, trans)))
2859                 return error;
2860         if ((error = xlog_recover_do_trans(log, trans, pass)))
2861                 return error;
2862         xlog_recover_free_trans(trans);                 /* no error */
2863         return 0;
2864 }
2865
2866 STATIC int
2867 xlog_recover_unmount_trans(
2868         xlog_recover_t          *trans)
2869 {
2870         /* Do nothing now */
2871         xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR");
2872         return 0;
2873 }
2874
2875 /*
2876  * There are two valid states of the r_state field.  0 indicates that the
2877  * transaction structure is in a normal state.  We have either seen the
2878  * start of the transaction or the last operation we added was not a partial
2879  * operation.  If the last operation we added to the transaction was a
2880  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2881  *
2882  * NOTE: skip LRs with 0 data length.
2883  */
2884 STATIC int
2885 xlog_recover_process_data(
2886         xlog_t                  *log,
2887         xlog_recover_t          *rhash[],
2888         xlog_rec_header_t       *rhead,
2889         xfs_caddr_t             dp,
2890         int                     pass)
2891 {
2892         xfs_caddr_t             lp;
2893         int                     num_logops;
2894         xlog_op_header_t        *ohead;
2895         xlog_recover_t          *trans;
2896         xlog_tid_t              tid;
2897         int                     error;
2898         unsigned long           hash;
2899         uint                    flags;
2900
2901         lp = dp + INT_GET(rhead->h_len, ARCH_CONVERT);
2902         num_logops = INT_GET(rhead->h_num_logops, ARCH_CONVERT);
2903
2904         /* check the log format matches our own - else we can't recover */
2905         if (xlog_header_check_recover(log->l_mp, rhead))
2906                 return (XFS_ERROR(EIO));
2907
2908         while ((dp < lp) && num_logops) {
2909                 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
2910                 ohead = (xlog_op_header_t *)dp;
2911                 dp += sizeof(xlog_op_header_t);
2912                 if (ohead->oh_clientid != XFS_TRANSACTION &&
2913                     ohead->oh_clientid != XFS_LOG) {
2914                         xlog_warn(
2915                 "XFS: xlog_recover_process_data: bad clientid");
2916                         ASSERT(0);
2917                         return (XFS_ERROR(EIO));
2918                 }
2919                 tid = be32_to_cpu(ohead->oh_tid);
2920                 hash = XLOG_RHASH(tid);
2921                 trans = xlog_recover_find_tid(rhash[hash], tid);
2922                 if (trans == NULL) {               /* not found; add new tid */
2923                         if (ohead->oh_flags & XLOG_START_TRANS)
2924                                 xlog_recover_new_tid(&rhash[hash], tid,
2925                                         INT_GET(rhead->h_lsn, ARCH_CONVERT));
2926                 } else {
2927                         ASSERT(dp + be32_to_cpu(ohead->oh_len) <= lp);
2928                         flags = ohead->oh_flags & ~XLOG_END_TRANS;
2929                         if (flags & XLOG_WAS_CONT_TRANS)
2930                                 flags &= ~XLOG_CONTINUE_TRANS;
2931                         switch (flags) {
2932                         case XLOG_COMMIT_TRANS:
2933                                 error = xlog_recover_commit_trans(log,
2934                                                 &rhash[hash], trans, pass);
2935                                 break;
2936                         case XLOG_UNMOUNT_TRANS:
2937                                 error = xlog_recover_unmount_trans(trans);
2938                                 break;
2939                         case XLOG_WAS_CONT_TRANS:
2940                                 error = xlog_recover_add_to_cont_trans(trans,
2941                                                 dp, be32_to_cpu(ohead->oh_len));
2942                                 break;
2943                         case XLOG_START_TRANS:
2944                                 xlog_warn(
2945                         "XFS: xlog_recover_process_data: bad transaction");
2946                                 ASSERT(0);
2947                                 error = XFS_ERROR(EIO);
2948                                 break;
2949                         case 0:
2950                         case XLOG_CONTINUE_TRANS:
2951                                 error = xlog_recover_add_to_trans(trans,
2952                                                 dp, be32_to_cpu(ohead->oh_len));
2953                                 break;
2954                         default:
2955                                 xlog_warn(
2956                         "XFS: xlog_recover_process_data: bad flag");
2957                                 ASSERT(0);
2958                                 error = XFS_ERROR(EIO);
2959                                 break;
2960                         }
2961                         if (error)
2962                                 return error;
2963                 }
2964                 dp += be32_to_cpu(ohead->oh_len);
2965                 num_logops--;
2966         }
2967         return 0;
2968 }
2969
2970 /*
2971  * Process an extent free intent item that was recovered from
2972  * the log.  We need to free the extents that it describes.
2973  */
2974 STATIC void
2975 xlog_recover_process_efi(
2976         xfs_mount_t             *mp,
2977         xfs_efi_log_item_t      *efip)
2978 {
2979         xfs_efd_log_item_t      *efdp;
2980         xfs_trans_t             *tp;
2981         int                     i;
2982         xfs_extent_t            *extp;
2983         xfs_fsblock_t           startblock_fsb;
2984
2985         ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
2986
2987         /*
2988          * First check the validity of the extents described by the
2989          * EFI.  If any are bad, then assume that all are bad and
2990          * just toss the EFI.
2991          */
2992         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
2993                 extp = &(efip->efi_format.efi_extents[i]);
2994                 startblock_fsb = XFS_BB_TO_FSB(mp,
2995                                    XFS_FSB_TO_DADDR(mp, extp->ext_start));
2996                 if ((startblock_fsb == 0) ||
2997                     (extp->ext_len == 0) ||
2998                     (startblock_fsb >= mp->m_sb.sb_dblocks) ||
2999                     (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3000                         /*
3001                          * This will pull the EFI from the AIL and
3002                          * free the memory associated with it.
3003                          */
3004                         xfs_efi_release(efip, efip->efi_format.efi_nextents);
3005                         return;
3006                 }
3007         }
3008
3009         tp = xfs_trans_alloc(mp, 0);
3010         xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
3011         efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3012
3013         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3014                 extp = &(efip->efi_format.efi_extents[i]);
3015                 xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3016                 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3017                                          extp->ext_len);
3018         }
3019
3020         efip->efi_flags |= XFS_EFI_RECOVERED;
3021         xfs_trans_commit(tp, 0);
3022 }
3023
3024 /*
3025  * Verify that once we've encountered something other than an EFI
3026  * in the AIL that there are no more EFIs in the AIL.
3027  */
3028 #if defined(DEBUG)
3029 STATIC void
3030 xlog_recover_check_ail(
3031         xfs_mount_t             *mp,
3032         xfs_log_item_t          *lip,
3033         int                     gen)
3034 {
3035         int                     orig_gen = gen;
3036
3037         do {
3038                 ASSERT(lip->li_type != XFS_LI_EFI);
3039                 lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
3040                 /*
3041                  * The check will be bogus if we restart from the
3042                  * beginning of the AIL, so ASSERT that we don't.
3043                  * We never should since we're holding the AIL lock
3044                  * the entire time.
3045                  */
3046                 ASSERT(gen == orig_gen);
3047         } while (lip != NULL);
3048 }
3049 #endif  /* DEBUG */
3050
3051 /*
3052  * When this is called, all of the EFIs which did not have
3053  * corresponding EFDs should be in the AIL.  What we do now
3054  * is free the extents associated with each one.
3055  *
3056  * Since we process the EFIs in normal transactions, they
3057  * will be removed at some point after the commit.  This prevents
3058  * us from just walking down the list processing each one.
3059  * We'll use a flag in the EFI to skip those that we've already
3060  * processed and use the AIL iteration mechanism's generation
3061  * count to try to speed this up at least a bit.
3062  *
3063  * When we start, we know that the EFIs are the only things in
3064  * the AIL.  As we process them, however, other items are added
3065  * to the AIL.  Since everything added to the AIL must come after
3066  * everything already in the AIL, we stop processing as soon as
3067  * we see something other than an EFI in the AIL.
3068  */
3069 STATIC void
3070 xlog_recover_process_efis(
3071         xlog_t                  *log)
3072 {
3073         xfs_log_item_t          *lip;
3074         xfs_efi_log_item_t      *efip;
3075         int                     gen;
3076         xfs_mount_t             *mp;
3077
3078         mp = log->l_mp;
3079         spin_lock(&mp->m_ail_lock);
3080
3081         lip = xfs_trans_first_ail(mp, &gen);
3082         while (lip != NULL) {
3083                 /*
3084                  * We're done when we see something other than an EFI.
3085                  */
3086                 if (lip->li_type != XFS_LI_EFI) {
3087                         xlog_recover_check_ail(mp, lip, gen);
3088                         break;
3089                 }
3090
3091                 /*
3092                  * Skip EFIs that we've already processed.
3093                  */
3094                 efip = (xfs_efi_log_item_t *)lip;
3095                 if (efip->efi_flags & XFS_EFI_RECOVERED) {
3096                         lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
3097                         continue;
3098                 }
3099
3100                 spin_unlock(&mp->m_ail_lock);
3101                 xlog_recover_process_efi(mp, efip);
3102                 spin_lock(&mp->m_ail_lock);
3103                 lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
3104         }
3105         spin_unlock(&mp->m_ail_lock);
3106 }
3107
3108 /*
3109  * This routine performs a transaction to null out a bad inode pointer
3110  * in an agi unlinked inode hash bucket.
3111  */
3112 STATIC void
3113 xlog_recover_clear_agi_bucket(
3114         xfs_mount_t     *mp,
3115         xfs_agnumber_t  agno,
3116         int             bucket)
3117 {
3118         xfs_trans_t     *tp;
3119         xfs_agi_t       *agi;
3120         xfs_buf_t       *agibp;
3121         int             offset;
3122         int             error;
3123
3124         tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3125         xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), 0, 0, 0);
3126
3127         error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
3128                                    XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
3129                                    XFS_FSS_TO_BB(mp, 1), 0, &agibp);
3130         if (error) {
3131                 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3132                 return;
3133         }
3134
3135         agi = XFS_BUF_TO_AGI(agibp);
3136         if (be32_to_cpu(agi->agi_magicnum) != XFS_AGI_MAGIC) {
3137                 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3138                 return;
3139         }
3140
3141         agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3142         offset = offsetof(xfs_agi_t, agi_unlinked) +
3143                  (sizeof(xfs_agino_t) * bucket);
3144         xfs_trans_log_buf(tp, agibp, offset,
3145                           (offset + sizeof(xfs_agino_t) - 1));
3146
3147         (void) xfs_trans_commit(tp, 0);
3148 }
3149
3150 /*
3151  * xlog_iunlink_recover
3152  *
3153  * This is called during recovery to process any inodes which
3154  * we unlinked but not freed when the system crashed.  These
3155  * inodes will be on the lists in the AGI blocks.  What we do
3156  * here is scan all the AGIs and fully truncate and free any
3157  * inodes found on the lists.  Each inode is removed from the
3158  * lists when it has been fully truncated and is freed.  The
3159  * freeing of the inode and its removal from the list must be
3160  * atomic.
3161  */
3162 void
3163 xlog_recover_process_iunlinks(
3164         xlog_t          *log)
3165 {
3166         xfs_mount_t     *mp;
3167         xfs_agnumber_t  agno;
3168         xfs_agi_t       *agi;
3169         xfs_buf_t       *agibp;
3170         xfs_buf_t       *ibp;
3171         xfs_dinode_t    *dip;
3172         xfs_inode_t     *ip;
3173         xfs_agino_t     agino;
3174         xfs_ino_t       ino;
3175         int             bucket;
3176         int             error;
3177         uint            mp_dmevmask;
3178
3179         mp = log->l_mp;
3180
3181         /*
3182          * Prevent any DMAPI event from being sent while in this function.
3183          */
3184         mp_dmevmask = mp->m_dmevmask;
3185         mp->m_dmevmask = 0;
3186
3187         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3188                 /*
3189                  * Find the agi for this ag.
3190                  */
3191                 agibp = xfs_buf_read(mp->m_ddev_targp,
3192                                 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
3193                                 XFS_FSS_TO_BB(mp, 1), 0);
3194                 if (XFS_BUF_ISERROR(agibp)) {
3195                         xfs_ioerror_alert("xlog_recover_process_iunlinks(#1)",
3196                                 log->l_mp, agibp,
3197                                 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)));
3198                 }
3199                 agi = XFS_BUF_TO_AGI(agibp);
3200                 ASSERT(XFS_AGI_MAGIC == be32_to_cpu(agi->agi_magicnum));
3201
3202                 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3203
3204                         agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3205                         while (agino != NULLAGINO) {
3206
3207                                 /*
3208                                  * Release the agi buffer so that it can
3209                                  * be acquired in the normal course of the
3210                                  * transaction to truncate and free the inode.
3211                                  */
3212                                 xfs_buf_relse(agibp);
3213
3214                                 ino = XFS_AGINO_TO_INO(mp, agno, agino);
3215                                 error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0);
3216                                 ASSERT(error || (ip != NULL));
3217
3218                                 if (!error) {
3219                                         /*
3220                                          * Get the on disk inode to find the
3221                                          * next inode in the bucket.
3222                                          */
3223                                         error = xfs_itobp(mp, NULL, ip, &dip,
3224                                                         &ibp, 0, 0);
3225                                         ASSERT(error || (dip != NULL));
3226                                 }
3227
3228                                 if (!error) {
3229                                         ASSERT(ip->i_d.di_nlink == 0);
3230
3231                                         /* setup for the next pass */
3232                                         agino = be32_to_cpu(
3233                                                         dip->di_next_unlinked);
3234                                         xfs_buf_relse(ibp);
3235                                         /*
3236                                          * Prevent any DMAPI event from
3237                                          * being sent when the
3238                                          * reference on the inode is
3239                                          * dropped.
3240                                          */
3241                                         ip->i_d.di_dmevmask = 0;
3242
3243                                         /*
3244                                          * If this is a new inode, handle
3245                                          * it specially.  Otherwise,
3246                                          * just drop our reference to the
3247                                          * inode.  If there are no
3248                                          * other references, this will
3249                                          * send the inode to
3250                                          * xfs_inactive() which will
3251                                          * truncate the file and free
3252                                          * the inode.
3253                                          */
3254                                         if (ip->i_d.di_mode == 0)
3255                                                 xfs_iput_new(ip, 0);
3256                                         else
3257                                                 VN_RELE(XFS_ITOV(ip));
3258                                 } else {
3259                                         /*
3260                                          * We can't read in the inode
3261                                          * this bucket points to, or
3262                                          * this inode is messed up.  Just
3263                                          * ditch this bucket of inodes.  We
3264                                          * will lose some inodes and space,
3265                                          * but at least we won't hang.  Call
3266                                          * xlog_recover_clear_agi_bucket()
3267                                          * to perform a transaction to clear
3268                                          * the inode pointer in the bucket.
3269                                          */
3270                                         xlog_recover_clear_agi_bucket(mp, agno,
3271                                                         bucket);
3272
3273                                         agino = NULLAGINO;
3274                                 }
3275
3276                                 /*
3277                                  * Reacquire the agibuffer and continue around
3278                                  * the loop.
3279                                  */
3280                                 agibp = xfs_buf_read(mp->m_ddev_targp,
3281                                                 XFS_AG_DADDR(mp, agno,
3282                                                         XFS_AGI_DADDR(mp)),
3283                                                 XFS_FSS_TO_BB(mp, 1), 0);
3284                                 if (XFS_BUF_ISERROR(agibp)) {
3285                                         xfs_ioerror_alert(
3286                                 "xlog_recover_process_iunlinks(#2)",
3287                                                 log->l_mp, agibp,
3288                                                 XFS_AG_DADDR(mp, agno,
3289                                                         XFS_AGI_DADDR(mp)));
3290                                 }
3291                                 agi = XFS_BUF_TO_AGI(agibp);
3292                                 ASSERT(XFS_AGI_MAGIC == be32_to_cpu(
3293                                         agi->agi_magicnum));
3294                         }
3295                 }
3296
3297                 /*
3298                  * Release the buffer for the current agi so we can
3299                  * go on to the next one.
3300                  */
3301                 xfs_buf_relse(agibp);
3302         }
3303
3304         mp->m_dmevmask = mp_dmevmask;
3305 }
3306
3307
3308 #ifdef DEBUG
3309 STATIC void
3310 xlog_pack_data_checksum(
3311         xlog_t          *log,
3312         xlog_in_core_t  *iclog,
3313         int             size)
3314 {
3315         int             i;
3316         uint            *up;
3317         uint            chksum = 0;
3318
3319         up = (uint *)iclog->ic_datap;
3320         /* divide length by 4 to get # words */
3321         for (i = 0; i < (size >> 2); i++) {
3322                 chksum ^= INT_GET(*up, ARCH_CONVERT);
3323                 up++;
3324         }
3325         INT_SET(iclog->ic_header.h_chksum, ARCH_CONVERT, chksum);
3326 }
3327 #else
3328 #define xlog_pack_data_checksum(log, iclog, size)
3329 #endif
3330
3331 /*
3332  * Stamp cycle number in every block
3333  */
3334 void
3335 xlog_pack_data(
3336         xlog_t                  *log,
3337         xlog_in_core_t          *iclog,
3338         int                     roundoff)
3339 {
3340         int                     i, j, k;
3341         int                     size = iclog->ic_offset + roundoff;
3342         uint                    cycle_lsn;
3343         xfs_caddr_t             dp;
3344         xlog_in_core_2_t        *xhdr;
3345
3346         xlog_pack_data_checksum(log, iclog, size);
3347
3348         cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
3349
3350         dp = iclog->ic_datap;
3351         for (i = 0; i < BTOBB(size) &&
3352                 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3353                 iclog->ic_header.h_cycle_data[i] = *(uint *)dp;
3354                 *(uint *)dp = cycle_lsn;
3355                 dp += BBSIZE;
3356         }
3357
3358         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
3359                 xhdr = (xlog_in_core_2_t *)&iclog->ic_header;
3360                 for ( ; i < BTOBB(size); i++) {
3361                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3362                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3363                         xhdr[j].hic_xheader.xh_cycle_data[k] = *(uint *)dp;
3364                         *(uint *)dp = cycle_lsn;
3365                         dp += BBSIZE;
3366                 }
3367
3368                 for (i = 1; i < log->l_iclog_heads; i++) {
3369                         xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
3370                 }
3371         }
3372 }
3373
3374 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
3375 STATIC void
3376 xlog_unpack_data_checksum(
3377         xlog_rec_header_t       *rhead,
3378         xfs_caddr_t             dp,
3379         xlog_t                  *log)
3380 {
3381         uint                    *up = (uint *)dp;
3382         uint                    chksum = 0;
3383         int                     i;
3384
3385         /* divide length by 4 to get # words */
3386         for (i=0; i < INT_GET(rhead->h_len, ARCH_CONVERT) >> 2; i++) {
3387                 chksum ^= INT_GET(*up, ARCH_CONVERT);
3388                 up++;
3389         }
3390         if (chksum != INT_GET(rhead->h_chksum, ARCH_CONVERT)) {
3391             if (rhead->h_chksum ||
3392                 ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) {
3393                     cmn_err(CE_DEBUG,
3394                         "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n",
3395                             INT_GET(rhead->h_chksum, ARCH_CONVERT), chksum);
3396                     cmn_err(CE_DEBUG,
3397 "XFS: Disregard message if filesystem was created with non-DEBUG kernel");
3398                     if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
3399                             cmn_err(CE_DEBUG,
3400                                 "XFS: LogR this is a LogV2 filesystem\n");
3401                     }
3402                     log->l_flags |= XLOG_CHKSUM_MISMATCH;
3403             }
3404         }
3405 }
3406 #else
3407 #define xlog_unpack_data_checksum(rhead, dp, log)
3408 #endif
3409
3410 STATIC void
3411 xlog_unpack_data(
3412         xlog_rec_header_t       *rhead,
3413         xfs_caddr_t             dp,
3414         xlog_t                  *log)
3415 {
3416         int                     i, j, k;
3417         xlog_in_core_2_t        *xhdr;
3418
3419         for (i = 0; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)) &&
3420                   i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3421                 *(uint *)dp = *(uint *)&rhead->h_cycle_data[i];
3422                 dp += BBSIZE;
3423         }
3424
3425         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
3426                 xhdr = (xlog_in_core_2_t *)rhead;
3427                 for ( ; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); i++) {
3428                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3429                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3430                         *(uint *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3431                         dp += BBSIZE;
3432                 }
3433         }
3434
3435         xlog_unpack_data_checksum(rhead, dp, log);
3436 }
3437
3438 STATIC int
3439 xlog_valid_rec_header(
3440         xlog_t                  *log,
3441         xlog_rec_header_t       *rhead,
3442         xfs_daddr_t             blkno)
3443 {
3444         int                     hlen;
3445
3446         if (unlikely(
3447             (INT_GET(rhead->h_magicno, ARCH_CONVERT) !=
3448                         XLOG_HEADER_MAGIC_NUM))) {
3449                 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3450                                 XFS_ERRLEVEL_LOW, log->l_mp);
3451                 return XFS_ERROR(EFSCORRUPTED);
3452         }
3453         if (unlikely(
3454             (!rhead->h_version ||
3455             (INT_GET(rhead->h_version, ARCH_CONVERT) &
3456                         (~XLOG_VERSION_OKBITS)) != 0))) {
3457                 xlog_warn("XFS: %s: unrecognised log version (%d).",
3458                         __FUNCTION__, INT_GET(rhead->h_version, ARCH_CONVERT));
3459                 return XFS_ERROR(EIO);
3460         }
3461
3462         /* LR body must have data or it wouldn't have been written */
3463         hlen = INT_GET(rhead->h_len, ARCH_CONVERT);
3464         if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3465                 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3466                                 XFS_ERRLEVEL_LOW, log->l_mp);
3467                 return XFS_ERROR(EFSCORRUPTED);
3468         }
3469         if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3470                 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3471                                 XFS_ERRLEVEL_LOW, log->l_mp);
3472                 return XFS_ERROR(EFSCORRUPTED);
3473         }
3474         return 0;
3475 }
3476
3477 /*
3478  * Read the log from tail to head and process the log records found.
3479  * Handle the two cases where the tail and head are in the same cycle
3480  * and where the active portion of the log wraps around the end of
3481  * the physical log separately.  The pass parameter is passed through
3482  * to the routines called to process the data and is not looked at
3483  * here.
3484  */
3485 STATIC int
3486 xlog_do_recovery_pass(
3487         xlog_t                  *log,
3488         xfs_daddr_t             head_blk,
3489         xfs_daddr_t             tail_blk,
3490         int                     pass)
3491 {
3492         xlog_rec_header_t       *rhead;
3493         xfs_daddr_t             blk_no;
3494         xfs_caddr_t             bufaddr, offset;
3495         xfs_buf_t               *hbp, *dbp;
3496         int                     error = 0, h_size;
3497         int                     bblks, split_bblks;
3498         int                     hblks, split_hblks, wrapped_hblks;
3499         xlog_recover_t          *rhash[XLOG_RHASH_SIZE];
3500
3501         ASSERT(head_blk != tail_blk);
3502
3503         /*
3504          * Read the header of the tail block and get the iclog buffer size from
3505          * h_size.  Use this to tell how many sectors make up the log header.
3506          */
3507         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
3508                 /*
3509                  * When using variable length iclogs, read first sector of
3510                  * iclog header and extract the header size from it.  Get a
3511                  * new hbp that is the correct size.
3512                  */
3513                 hbp = xlog_get_bp(log, 1);
3514                 if (!hbp)
3515                         return ENOMEM;
3516                 if ((error = xlog_bread(log, tail_blk, 1, hbp)))
3517                         goto bread_err1;
3518                 offset = xlog_align(log, tail_blk, 1, hbp);
3519                 rhead = (xlog_rec_header_t *)offset;
3520                 error = xlog_valid_rec_header(log, rhead, tail_blk);
3521                 if (error)
3522                         goto bread_err1;
3523                 h_size = INT_GET(rhead->h_size, ARCH_CONVERT);
3524                 if ((INT_GET(rhead->h_version, ARCH_CONVERT)
3525                                 & XLOG_VERSION_2) &&
3526                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3527                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3528                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
3529                                 hblks++;
3530                         xlog_put_bp(hbp);
3531                         hbp = xlog_get_bp(log, hblks);
3532                 } else {
3533                         hblks = 1;
3534                 }
3535         } else {
3536                 ASSERT(log->l_sectbb_log == 0);
3537                 hblks = 1;
3538                 hbp = xlog_get_bp(log, 1);
3539                 h_size = XLOG_BIG_RECORD_BSIZE;
3540         }
3541
3542         if (!hbp)
3543                 return ENOMEM;
3544         dbp = xlog_get_bp(log, BTOBB(h_size));
3545         if (!dbp) {
3546                 xlog_put_bp(hbp);
3547                 return ENOMEM;
3548         }
3549
3550         memset(rhash, 0, sizeof(rhash));
3551         if (tail_blk <= head_blk) {
3552                 for (blk_no = tail_blk; blk_no < head_blk; ) {
3553                         if ((error = xlog_bread(log, blk_no, hblks, hbp)))
3554                                 goto bread_err2;
3555                         offset = xlog_align(log, blk_no, hblks, hbp);
3556                         rhead = (xlog_rec_header_t *)offset;
3557                         error = xlog_valid_rec_header(log, rhead, blk_no);
3558                         if (error)
3559                                 goto bread_err2;
3560
3561                         /* blocks in data section */
3562                         bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
3563                         error = xlog_bread(log, blk_no + hblks, bblks, dbp);
3564                         if (error)
3565                                 goto bread_err2;
3566                         offset = xlog_align(log, blk_no + hblks, bblks, dbp);
3567                         xlog_unpack_data(rhead, offset, log);
3568                         if ((error = xlog_recover_process_data(log,
3569                                                 rhash, rhead, offset, pass)))
3570                                 goto bread_err2;
3571                         blk_no += bblks + hblks;
3572                 }
3573         } else {
3574                 /*
3575                  * Perform recovery around the end of the physical log.
3576                  * When the head is not on the same cycle number as the tail,
3577                  * we can't do a sequential recovery as above.
3578                  */
3579                 blk_no = tail_blk;
3580                 while (blk_no < log->l_logBBsize) {
3581                         /*
3582                          * Check for header wrapping around physical end-of-log
3583                          */
3584                         offset = NULL;
3585                         split_hblks = 0;
3586                         wrapped_hblks = 0;
3587                         if (blk_no + hblks <= log->l_logBBsize) {
3588                                 /* Read header in one read */
3589                                 error = xlog_bread(log, blk_no, hblks, hbp);
3590                                 if (error)
3591                                         goto bread_err2;
3592                                 offset = xlog_align(log, blk_no, hblks, hbp);
3593                         } else {
3594                                 /* This LR is split across physical log end */
3595                                 if (blk_no != log->l_logBBsize) {
3596                                         /* some data before physical log end */
3597                                         ASSERT(blk_no <= INT_MAX);
3598                                         split_hblks = log->l_logBBsize - (int)blk_no;
3599                                         ASSERT(split_hblks > 0);
3600                                         if ((error = xlog_bread(log, blk_no,
3601                                                         split_hblks, hbp)))
3602                                                 goto bread_err2;
3603                                         offset = xlog_align(log, blk_no,
3604                                                         split_hblks, hbp);
3605                                 }
3606                                 /*
3607                                  * Note: this black magic still works with
3608                                  * large sector sizes (non-512) only because:
3609                                  * - we increased the buffer size originally
3610                                  *   by 1 sector giving us enough extra space
3611                                  *   for the second read;
3612                                  * - the log start is guaranteed to be sector
3613                                  *   aligned;
3614                                  * - we read the log end (LR header start)
3615                                  *   _first_, then the log start (LR header end)
3616                                  *   - order is important.
3617                                  */
3618                                 bufaddr = XFS_BUF_PTR(hbp);
3619                                 XFS_BUF_SET_PTR(hbp,
3620                                                 bufaddr + BBTOB(split_hblks),
3621                                                 BBTOB(hblks - split_hblks));
3622                                 wrapped_hblks = hblks - split_hblks;
3623                                 error = xlog_bread(log, 0, wrapped_hblks, hbp);
3624                                 if (error)
3625                                         goto bread_err2;
3626                                 XFS_BUF_SET_PTR(hbp, bufaddr, BBTOB(hblks));
3627                                 if (!offset)
3628                                         offset = xlog_align(log, 0,
3629                                                         wrapped_hblks, hbp);
3630                         }
3631                         rhead = (xlog_rec_header_t *)offset;
3632                         error = xlog_valid_rec_header(log, rhead,
3633                                                 split_hblks ? blk_no : 0);
3634                         if (error)
3635                                 goto bread_err2;
3636
3637                         bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
3638                         blk_no += hblks;
3639
3640                         /* Read in data for log record */
3641                         if (blk_no + bblks <= log->l_logBBsize) {
3642                                 error = xlog_bread(log, blk_no, bblks, dbp);
3643                                 if (error)
3644                                         goto bread_err2;
3645                                 offset = xlog_align(log, blk_no, bblks, dbp);
3646                         } else {
3647                                 /* This log record is split across the
3648                                  * physical end of log */
3649                                 offset = NULL;
3650                                 split_bblks = 0;
3651                                 if (blk_no != log->l_logBBsize) {
3652                                         /* some data is before the physical
3653                                          * end of log */
3654                                         ASSERT(!wrapped_hblks);
3655                                         ASSERT(blk_no <= INT_MAX);
3656                                         split_bblks =
3657                                                 log->l_logBBsize - (int)blk_no;
3658                                         ASSERT(split_bblks > 0);
3659                                         if ((error = xlog_bread(log, blk_no,
3660                                                         split_bblks, dbp)))
3661                                                 goto bread_err2;
3662                                         offset = xlog_align(log, blk_no,
3663                                                         split_bblks, dbp);
3664                                 }
3665                                 /*
3666                                  * Note: this black magic still works with
3667                                  * large sector sizes (non-512) only because:
3668                                  * - we increased the buffer size originally
3669                                  *   by 1 sector giving us enough extra space
3670                                  *   for the second read;
3671                                  * - the log start is guaranteed to be sector
3672                                  *   aligned;
3673                                  * - we read the log end (LR header start)
3674                                  *   _first_, then the log start (LR header end)
3675                                  *   - order is important.
3676                                  */
3677                                 bufaddr = XFS_BUF_PTR(dbp);
3678                                 XFS_BUF_SET_PTR(dbp,
3679                                                 bufaddr + BBTOB(split_bblks),
3680                                                 BBTOB(bblks - split_bblks));
3681                                 if ((error = xlog_bread(log, wrapped_hblks,
3682                                                 bblks - split_bblks, dbp)))
3683                                         goto bread_err2;
3684                                 XFS_BUF_SET_PTR(dbp, bufaddr, h_size);
3685                                 if (!offset)
3686                                         offset = xlog_align(log, wrapped_hblks,
3687                                                 bblks - split_bblks, dbp);
3688                         }
3689                         xlog_unpack_data(rhead, offset, log);
3690                         if ((error = xlog_recover_process_data(log, rhash,
3691                                                         rhead, offset, pass)))
3692                                 goto bread_err2;
3693                         blk_no += bblks;
3694                 }
3695
3696                 ASSERT(blk_no >= log->l_logBBsize);
3697                 blk_no -= log->l_logBBsize;
3698
3699                 /* read first part of physical log */
3700                 while (blk_no < head_blk) {
3701                         if ((error = xlog_bread(log, blk_no, hblks, hbp)))
3702                                 goto bread_err2;
3703                         offset = xlog_align(log, blk_no, hblks, hbp);
3704                         rhead = (xlog_rec_header_t *)offset;
3705                         error = xlog_valid_rec_header(log, rhead, blk_no);
3706                         if (error)
3707                                 goto bread_err2;
3708                         bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
3709                         if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp)))
3710                                 goto bread_err2;
3711                         offset = xlog_align(log, blk_no+hblks, bblks, dbp);
3712                         xlog_unpack_data(rhead, offset, log);
3713                         if ((error = xlog_recover_process_data(log, rhash,
3714                                                         rhead, offset, pass)))
3715                                 goto bread_err2;
3716                         blk_no += bblks + hblks;
3717                 }
3718         }
3719
3720  bread_err2:
3721         xlog_put_bp(dbp);
3722  bread_err1:
3723         xlog_put_bp(hbp);
3724         return error;
3725 }
3726
3727 /*
3728  * Do the recovery of the log.  We actually do this in two phases.
3729  * The two passes are necessary in order to implement the function
3730  * of cancelling a record written into the log.  The first pass
3731  * determines those things which have been cancelled, and the
3732  * second pass replays log items normally except for those which
3733  * have been cancelled.  The handling of the replay and cancellations
3734  * takes place in the log item type specific routines.
3735  *
3736  * The table of items which have cancel records in the log is allocated
3737  * and freed at this level, since only here do we know when all of
3738  * the log recovery has been completed.
3739  */
3740 STATIC int
3741 xlog_do_log_recovery(
3742         xlog_t          *log,
3743         xfs_daddr_t     head_blk,
3744         xfs_daddr_t     tail_blk)
3745 {
3746         int             error;
3747
3748         ASSERT(head_blk != tail_blk);
3749
3750         /*
3751          * First do a pass to find all of the cancelled buf log items.
3752          * Store them in the buf_cancel_table for use in the second pass.
3753          */
3754         log->l_buf_cancel_table =
3755                 (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
3756                                                  sizeof(xfs_buf_cancel_t*),
3757                                                  KM_SLEEP);
3758         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3759                                       XLOG_RECOVER_PASS1);
3760         if (error != 0) {
3761                 kmem_free(log->l_buf_cancel_table,
3762                           XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*));
3763                 log->l_buf_cancel_table = NULL;
3764                 return error;
3765         }
3766         /*
3767          * Then do a second pass to actually recover the items in the log.
3768          * When it is complete free the table of buf cancel items.
3769          */
3770         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3771                                       XLOG_RECOVER_PASS2);
3772 #ifdef DEBUG
3773         if (!error) {
3774                 int     i;
3775
3776                 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3777                         ASSERT(log->l_buf_cancel_table[i] == NULL);
3778         }
3779 #endif  /* DEBUG */
3780
3781         kmem_free(log->l_buf_cancel_table,
3782                   XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*));
3783         log->l_buf_cancel_table = NULL;
3784
3785         return error;
3786 }
3787
3788 /*
3789  * Do the actual recovery
3790  */
3791 STATIC int
3792 xlog_do_recover(
3793         xlog_t          *log,
3794         xfs_daddr_t     head_blk,
3795         xfs_daddr_t     tail_blk)
3796 {
3797         int             error;
3798         xfs_buf_t       *bp;
3799         xfs_sb_t        *sbp;
3800
3801         /*
3802          * First replay the images in the log.
3803          */
3804         error = xlog_do_log_recovery(log, head_blk, tail_blk);
3805         if (error) {
3806                 return error;
3807         }
3808
3809         XFS_bflush(log->l_mp->m_ddev_targp);
3810
3811         /*
3812          * If IO errors happened during recovery, bail out.
3813          */
3814         if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3815                 return (EIO);
3816         }
3817
3818         /*
3819          * We now update the tail_lsn since much of the recovery has completed
3820          * and there may be space available to use.  If there were no extent
3821          * or iunlinks, we can free up the entire log and set the tail_lsn to
3822          * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3823          * lsn of the last known good LR on disk.  If there are extent frees
3824          * or iunlinks they will have some entries in the AIL; so we look at
3825          * the AIL to determine how to set the tail_lsn.
3826          */
3827         xlog_assign_tail_lsn(log->l_mp);
3828
3829         /*
3830          * Now that we've finished replaying all buffer and inode
3831          * updates, re-read in the superblock.
3832          */
3833         bp = xfs_getsb(log->l_mp, 0);
3834         XFS_BUF_UNDONE(bp);
3835         ASSERT(!(XFS_BUF_ISWRITE(bp)));
3836         ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
3837         XFS_BUF_READ(bp);
3838         XFS_BUF_UNASYNC(bp);
3839         xfsbdstrat(log->l_mp, bp);
3840         if ((error = xfs_iowait(bp))) {
3841                 xfs_ioerror_alert("xlog_do_recover",
3842                                   log->l_mp, bp, XFS_BUF_ADDR(bp));
3843                 ASSERT(0);
3844                 xfs_buf_relse(bp);
3845                 return error;
3846         }
3847
3848         /* Convert superblock from on-disk format */
3849         sbp = &log->l_mp->m_sb;
3850         xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
3851         ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
3852         ASSERT(XFS_SB_GOOD_VERSION(sbp));
3853         xfs_buf_relse(bp);
3854
3855         /* We've re-read the superblock so re-initialize per-cpu counters */
3856         xfs_icsb_reinit_counters(log->l_mp);
3857
3858         xlog_recover_check_summary(log);
3859
3860         /* Normal transactions can now occur */
3861         log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3862         return 0;
3863 }
3864
3865 /*
3866  * Perform recovery and re-initialize some log variables in xlog_find_tail.
3867  *
3868  * Return error or zero.
3869  */
3870 int
3871 xlog_recover(
3872         xlog_t          *log)
3873 {
3874         xfs_daddr_t     head_blk, tail_blk;
3875         int             error;
3876
3877         /* find the tail of the log */
3878         if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
3879                 return error;
3880
3881         if (tail_blk != head_blk) {
3882                 /* There used to be a comment here:
3883                  *
3884                  * disallow recovery on read-only mounts.  note -- mount
3885                  * checks for ENOSPC and turns it into an intelligent
3886                  * error message.
3887                  * ...but this is no longer true.  Now, unless you specify
3888                  * NORECOVERY (in which case this function would never be
3889                  * called), we just go ahead and recover.  We do this all
3890                  * under the vfs layer, so we can get away with it unless
3891                  * the device itself is read-only, in which case we fail.
3892                  */
3893                 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3894                         return error;
3895                 }
3896
3897                 cmn_err(CE_NOTE,
3898                         "Starting XFS recovery on filesystem: %s (logdev: %s)",
3899                         log->l_mp->m_fsname, log->l_mp->m_logname ?
3900                         log->l_mp->m_logname : "internal");
3901
3902                 error = xlog_do_recover(log, head_blk, tail_blk);
3903                 log->l_flags |= XLOG_RECOVERY_NEEDED;
3904         }
3905         return error;
3906 }
3907
3908 /*
3909  * In the first part of recovery we replay inodes and buffers and build
3910  * up the list of extent free items which need to be processed.  Here
3911  * we process the extent free items and clean up the on disk unlinked
3912  * inode lists.  This is separated from the first part of recovery so
3913  * that the root and real-time bitmap inodes can be read in from disk in
3914  * between the two stages.  This is necessary so that we can free space
3915  * in the real-time portion of the file system.
3916  */
3917 int
3918 xlog_recover_finish(
3919         xlog_t          *log,
3920         int             mfsi_flags)
3921 {
3922         /*
3923          * Now we're ready to do the transactions needed for the
3924          * rest of recovery.  Start with completing all the extent
3925          * free intent records and then process the unlinked inode
3926          * lists.  At this point, we essentially run in normal mode
3927          * except that we're still performing recovery actions
3928          * rather than accepting new requests.
3929          */
3930         if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3931                 xlog_recover_process_efis(log);
3932                 /*
3933                  * Sync the log to get all the EFIs out of the AIL.
3934                  * This isn't absolutely necessary, but it helps in
3935                  * case the unlink transactions would have problems
3936                  * pushing the EFIs out of the way.
3937                  */
3938                 xfs_log_force(log->l_mp, (xfs_lsn_t)0,
3939                               (XFS_LOG_FORCE | XFS_LOG_SYNC));
3940
3941                 if ( (mfsi_flags & XFS_MFSI_NOUNLINK) == 0 ) {
3942                         xlog_recover_process_iunlinks(log);
3943                 }
3944
3945                 xlog_recover_check_summary(log);
3946
3947                 cmn_err(CE_NOTE,
3948                         "Ending XFS recovery on filesystem: %s (logdev: %s)",
3949                         log->l_mp->m_fsname, log->l_mp->m_logname ?
3950                         log->l_mp->m_logname : "internal");
3951                 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3952         } else {
3953                 cmn_err(CE_DEBUG,
3954                         "!Ending clean XFS mount for filesystem: %s\n",
3955                         log->l_mp->m_fsname);
3956         }
3957         return 0;
3958 }
3959
3960
3961 #if defined(DEBUG)
3962 /*
3963  * Read all of the agf and agi counters and check that they
3964  * are consistent with the superblock counters.
3965  */
3966 void
3967 xlog_recover_check_summary(
3968         xlog_t          *log)
3969 {
3970         xfs_mount_t     *mp;
3971         xfs_agf_t       *agfp;
3972         xfs_agi_t       *agip;
3973         xfs_buf_t       *agfbp;
3974         xfs_buf_t       *agibp;
3975         xfs_daddr_t     agfdaddr;
3976         xfs_daddr_t     agidaddr;
3977         xfs_buf_t       *sbbp;
3978 #ifdef XFS_LOUD_RECOVERY
3979         xfs_sb_t        *sbp;
3980 #endif
3981         xfs_agnumber_t  agno;
3982         __uint64_t      freeblks;
3983         __uint64_t      itotal;
3984         __uint64_t      ifree;
3985
3986         mp = log->l_mp;
3987
3988         freeblks = 0LL;
3989         itotal = 0LL;
3990         ifree = 0LL;
3991         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3992                 agfdaddr = XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp));
3993                 agfbp = xfs_buf_read(mp->m_ddev_targp, agfdaddr,
3994                                 XFS_FSS_TO_BB(mp, 1), 0);
3995                 if (XFS_BUF_ISERROR(agfbp)) {
3996                         xfs_ioerror_alert("xlog_recover_check_summary(agf)",
3997                                                 mp, agfbp, agfdaddr);
3998                 }
3999                 agfp = XFS_BUF_TO_AGF(agfbp);
4000                 ASSERT(XFS_AGF_MAGIC == be32_to_cpu(agfp->agf_magicnum));
4001                 ASSERT(XFS_AGF_GOOD_VERSION(be32_to_cpu(agfp->agf_versionnum)));
4002                 ASSERT(be32_to_cpu(agfp->agf_seqno) == agno);
4003
4004                 freeblks += be32_to_cpu(agfp->agf_freeblks) +
4005                             be32_to_cpu(agfp->agf_flcount);
4006                 xfs_buf_relse(agfbp);
4007
4008                 agidaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
4009                 agibp = xfs_buf_read(mp->m_ddev_targp, agidaddr,
4010                                 XFS_FSS_TO_BB(mp, 1), 0);
4011                 if (XFS_BUF_ISERROR(agibp)) {
4012                         xfs_ioerror_alert("xlog_recover_check_summary(agi)",
4013                                           mp, agibp, agidaddr);
4014                 }
4015                 agip = XFS_BUF_TO_AGI(agibp);
4016                 ASSERT(XFS_AGI_MAGIC == be32_to_cpu(agip->agi_magicnum));
4017                 ASSERT(XFS_AGI_GOOD_VERSION(be32_to_cpu(agip->agi_versionnum)));
4018                 ASSERT(be32_to_cpu(agip->agi_seqno) == agno);
4019
4020                 itotal += be32_to_cpu(agip->agi_count);
4021                 ifree += be32_to_cpu(agip->agi_freecount);
4022                 xfs_buf_relse(agibp);
4023         }
4024
4025         sbbp = xfs_getsb(mp, 0);
4026 #ifdef XFS_LOUD_RECOVERY
4027         sbp = &mp->m_sb;
4028         xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(sbbp));
4029         cmn_err(CE_NOTE,
4030                 "xlog_recover_check_summary: sb_icount %Lu itotal %Lu",
4031                 sbp->sb_icount, itotal);
4032         cmn_err(CE_NOTE,
4033                 "xlog_recover_check_summary: sb_ifree %Lu itotal %Lu",
4034                 sbp->sb_ifree, ifree);
4035         cmn_err(CE_NOTE,
4036                 "xlog_recover_check_summary: sb_fdblocks %Lu freeblks %Lu",
4037                 sbp->sb_fdblocks, freeblks);
4038 #if 0
4039         /*
4040          * This is turned off until I account for the allocation
4041          * btree blocks which live in free space.
4042          */
4043         ASSERT(sbp->sb_icount == itotal);
4044         ASSERT(sbp->sb_ifree == ifree);
4045         ASSERT(sbp->sb_fdblocks == freeblks);
4046 #endif
4047 #endif
4048         xfs_buf_relse(sbbp);
4049 }
4050 #endif /* DEBUG */