[XFS] Move the AIL lock into the struct xfs_ail
[safe/jmp/linux-2.6] / fs / xfs / xfs_log_recover.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_error.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_inode_item.h"
39 #include "xfs_imap.h"
40 #include "xfs_alloc.h"
41 #include "xfs_ialloc.h"
42 #include "xfs_log_priv.h"
43 #include "xfs_buf_item.h"
44 #include "xfs_log_recover.h"
45 #include "xfs_extfree_item.h"
46 #include "xfs_trans_priv.h"
47 #include "xfs_quota.h"
48 #include "xfs_rw.h"
49 #include "xfs_utils.h"
50
51 STATIC int      xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
52 STATIC int      xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
53 STATIC void     xlog_recover_insert_item_backq(xlog_recover_item_t **q,
54                                                xlog_recover_item_t *item);
55 #if defined(DEBUG)
56 STATIC void     xlog_recover_check_summary(xlog_t *);
57 #else
58 #define xlog_recover_check_summary(log)
59 #endif
60
61
62 /*
63  * Sector aligned buffer routines for buffer create/read/write/access
64  */
65
66 #define XLOG_SECTOR_ROUNDUP_BBCOUNT(log, bbs)   \
67         ( ((log)->l_sectbb_mask && (bbs & (log)->l_sectbb_mask)) ? \
68         ((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) )
69 #define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno)   ((bno) & ~(log)->l_sectbb_mask)
70
71 xfs_buf_t *
72 xlog_get_bp(
73         xlog_t          *log,
74         int             num_bblks)
75 {
76         ASSERT(num_bblks > 0);
77
78         if (log->l_sectbb_log) {
79                 if (num_bblks > 1)
80                         num_bblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
81                 num_bblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, num_bblks);
82         }
83         return xfs_buf_get_noaddr(BBTOB(num_bblks), log->l_mp->m_logdev_targp);
84 }
85
86 void
87 xlog_put_bp(
88         xfs_buf_t       *bp)
89 {
90         xfs_buf_free(bp);
91 }
92
93
94 /*
95  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
96  */
97 int
98 xlog_bread(
99         xlog_t          *log,
100         xfs_daddr_t     blk_no,
101         int             nbblks,
102         xfs_buf_t       *bp)
103 {
104         int             error;
105
106         if (log->l_sectbb_log) {
107                 blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
108                 nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
109         }
110
111         ASSERT(nbblks > 0);
112         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
113         ASSERT(bp);
114
115         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
116         XFS_BUF_READ(bp);
117         XFS_BUF_BUSY(bp);
118         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
119         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
120
121         xfsbdstrat(log->l_mp, bp);
122         error = xfs_iowait(bp);
123         if (error)
124                 xfs_ioerror_alert("xlog_bread", log->l_mp,
125                                   bp, XFS_BUF_ADDR(bp));
126         return error;
127 }
128
129 /*
130  * Write out the buffer at the given block for the given number of blocks.
131  * The buffer is kept locked across the write and is returned locked.
132  * This can only be used for synchronous log writes.
133  */
134 STATIC int
135 xlog_bwrite(
136         xlog_t          *log,
137         xfs_daddr_t     blk_no,
138         int             nbblks,
139         xfs_buf_t       *bp)
140 {
141         int             error;
142
143         if (log->l_sectbb_log) {
144                 blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
145                 nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
146         }
147
148         ASSERT(nbblks > 0);
149         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
150
151         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
152         XFS_BUF_ZEROFLAGS(bp);
153         XFS_BUF_BUSY(bp);
154         XFS_BUF_HOLD(bp);
155         XFS_BUF_PSEMA(bp, PRIBIO);
156         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
157         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
158
159         if ((error = xfs_bwrite(log->l_mp, bp)))
160                 xfs_ioerror_alert("xlog_bwrite", log->l_mp,
161                                   bp, XFS_BUF_ADDR(bp));
162         return error;
163 }
164
165 STATIC xfs_caddr_t
166 xlog_align(
167         xlog_t          *log,
168         xfs_daddr_t     blk_no,
169         int             nbblks,
170         xfs_buf_t       *bp)
171 {
172         xfs_caddr_t     ptr;
173
174         if (!log->l_sectbb_log)
175                 return XFS_BUF_PTR(bp);
176
177         ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask);
178         ASSERT(XFS_BUF_SIZE(bp) >=
179                 BBTOB(nbblks + (blk_no & log->l_sectbb_mask)));
180         return ptr;
181 }
182
183 #ifdef DEBUG
184 /*
185  * dump debug superblock and log record information
186  */
187 STATIC void
188 xlog_header_check_dump(
189         xfs_mount_t             *mp,
190         xlog_rec_header_t       *head)
191 {
192         int                     b;
193
194         cmn_err(CE_DEBUG, "%s:  SB : uuid = ", __func__);
195         for (b = 0; b < 16; b++)
196                 cmn_err(CE_DEBUG, "%02x", ((uchar_t *)&mp->m_sb.sb_uuid)[b]);
197         cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT);
198         cmn_err(CE_DEBUG, "    log : uuid = ");
199         for (b = 0; b < 16; b++)
200                 cmn_err(CE_DEBUG, "%02x",((uchar_t *)&head->h_fs_uuid)[b]);
201         cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt));
202 }
203 #else
204 #define xlog_header_check_dump(mp, head)
205 #endif
206
207 /*
208  * check log record header for recovery
209  */
210 STATIC int
211 xlog_header_check_recover(
212         xfs_mount_t             *mp,
213         xlog_rec_header_t       *head)
214 {
215         ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
216
217         /*
218          * IRIX doesn't write the h_fmt field and leaves it zeroed
219          * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
220          * a dirty log created in IRIX.
221          */
222         if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) {
223                 xlog_warn(
224         "XFS: dirty log written in incompatible format - can't recover");
225                 xlog_header_check_dump(mp, head);
226                 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
227                                  XFS_ERRLEVEL_HIGH, mp);
228                 return XFS_ERROR(EFSCORRUPTED);
229         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
230                 xlog_warn(
231         "XFS: dirty log entry has mismatched uuid - can't recover");
232                 xlog_header_check_dump(mp, head);
233                 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
234                                  XFS_ERRLEVEL_HIGH, mp);
235                 return XFS_ERROR(EFSCORRUPTED);
236         }
237         return 0;
238 }
239
240 /*
241  * read the head block of the log and check the header
242  */
243 STATIC int
244 xlog_header_check_mount(
245         xfs_mount_t             *mp,
246         xlog_rec_header_t       *head)
247 {
248         ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
249
250         if (uuid_is_nil(&head->h_fs_uuid)) {
251                 /*
252                  * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
253                  * h_fs_uuid is nil, we assume this log was last mounted
254                  * by IRIX and continue.
255                  */
256                 xlog_warn("XFS: nil uuid in log - IRIX style log");
257         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
258                 xlog_warn("XFS: log has mismatched uuid - can't recover");
259                 xlog_header_check_dump(mp, head);
260                 XFS_ERROR_REPORT("xlog_header_check_mount",
261                                  XFS_ERRLEVEL_HIGH, mp);
262                 return XFS_ERROR(EFSCORRUPTED);
263         }
264         return 0;
265 }
266
267 STATIC void
268 xlog_recover_iodone(
269         struct xfs_buf  *bp)
270 {
271         xfs_mount_t     *mp;
272
273         ASSERT(XFS_BUF_FSPRIVATE(bp, void *));
274
275         if (XFS_BUF_GETERROR(bp)) {
276                 /*
277                  * We're not going to bother about retrying
278                  * this during recovery. One strike!
279                  */
280                 mp = XFS_BUF_FSPRIVATE(bp, xfs_mount_t *);
281                 xfs_ioerror_alert("xlog_recover_iodone",
282                                   mp, bp, XFS_BUF_ADDR(bp));
283                 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
284         }
285         XFS_BUF_SET_FSPRIVATE(bp, NULL);
286         XFS_BUF_CLR_IODONE_FUNC(bp);
287         xfs_biodone(bp);
288 }
289
290 /*
291  * This routine finds (to an approximation) the first block in the physical
292  * log which contains the given cycle.  It uses a binary search algorithm.
293  * Note that the algorithm can not be perfect because the disk will not
294  * necessarily be perfect.
295  */
296 STATIC int
297 xlog_find_cycle_start(
298         xlog_t          *log,
299         xfs_buf_t       *bp,
300         xfs_daddr_t     first_blk,
301         xfs_daddr_t     *last_blk,
302         uint            cycle)
303 {
304         xfs_caddr_t     offset;
305         xfs_daddr_t     mid_blk;
306         uint            mid_cycle;
307         int             error;
308
309         mid_blk = BLK_AVG(first_blk, *last_blk);
310         while (mid_blk != first_blk && mid_blk != *last_blk) {
311                 if ((error = xlog_bread(log, mid_blk, 1, bp)))
312                         return error;
313                 offset = xlog_align(log, mid_blk, 1, bp);
314                 mid_cycle = xlog_get_cycle(offset);
315                 if (mid_cycle == cycle) {
316                         *last_blk = mid_blk;
317                         /* last_half_cycle == mid_cycle */
318                 } else {
319                         first_blk = mid_blk;
320                         /* first_half_cycle == mid_cycle */
321                 }
322                 mid_blk = BLK_AVG(first_blk, *last_blk);
323         }
324         ASSERT((mid_blk == first_blk && mid_blk+1 == *last_blk) ||
325                (mid_blk == *last_blk && mid_blk-1 == first_blk));
326
327         return 0;
328 }
329
330 /*
331  * Check that the range of blocks does not contain the cycle number
332  * given.  The scan needs to occur from front to back and the ptr into the
333  * region must be updated since a later routine will need to perform another
334  * test.  If the region is completely good, we end up returning the same
335  * last block number.
336  *
337  * Set blkno to -1 if we encounter no errors.  This is an invalid block number
338  * since we don't ever expect logs to get this large.
339  */
340 STATIC int
341 xlog_find_verify_cycle(
342         xlog_t          *log,
343         xfs_daddr_t     start_blk,
344         int             nbblks,
345         uint            stop_on_cycle_no,
346         xfs_daddr_t     *new_blk)
347 {
348         xfs_daddr_t     i, j;
349         uint            cycle;
350         xfs_buf_t       *bp;
351         xfs_daddr_t     bufblks;
352         xfs_caddr_t     buf = NULL;
353         int             error = 0;
354
355         bufblks = 1 << ffs(nbblks);
356
357         while (!(bp = xlog_get_bp(log, bufblks))) {
358                 /* can't get enough memory to do everything in one big buffer */
359                 bufblks >>= 1;
360                 if (bufblks <= log->l_sectbb_log)
361                         return ENOMEM;
362         }
363
364         for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
365                 int     bcount;
366
367                 bcount = min(bufblks, (start_blk + nbblks - i));
368
369                 if ((error = xlog_bread(log, i, bcount, bp)))
370                         goto out;
371
372                 buf = xlog_align(log, i, bcount, bp);
373                 for (j = 0; j < bcount; j++) {
374                         cycle = xlog_get_cycle(buf);
375                         if (cycle == stop_on_cycle_no) {
376                                 *new_blk = i+j;
377                                 goto out;
378                         }
379
380                         buf += BBSIZE;
381                 }
382         }
383
384         *new_blk = -1;
385
386 out:
387         xlog_put_bp(bp);
388         return error;
389 }
390
391 /*
392  * Potentially backup over partial log record write.
393  *
394  * In the typical case, last_blk is the number of the block directly after
395  * a good log record.  Therefore, we subtract one to get the block number
396  * of the last block in the given buffer.  extra_bblks contains the number
397  * of blocks we would have read on a previous read.  This happens when the
398  * last log record is split over the end of the physical log.
399  *
400  * extra_bblks is the number of blocks potentially verified on a previous
401  * call to this routine.
402  */
403 STATIC int
404 xlog_find_verify_log_record(
405         xlog_t                  *log,
406         xfs_daddr_t             start_blk,
407         xfs_daddr_t             *last_blk,
408         int                     extra_bblks)
409 {
410         xfs_daddr_t             i;
411         xfs_buf_t               *bp;
412         xfs_caddr_t             offset = NULL;
413         xlog_rec_header_t       *head = NULL;
414         int                     error = 0;
415         int                     smallmem = 0;
416         int                     num_blks = *last_blk - start_blk;
417         int                     xhdrs;
418
419         ASSERT(start_blk != 0 || *last_blk != start_blk);
420
421         if (!(bp = xlog_get_bp(log, num_blks))) {
422                 if (!(bp = xlog_get_bp(log, 1)))
423                         return ENOMEM;
424                 smallmem = 1;
425         } else {
426                 if ((error = xlog_bread(log, start_blk, num_blks, bp)))
427                         goto out;
428                 offset = xlog_align(log, start_blk, num_blks, bp);
429                 offset += ((num_blks - 1) << BBSHIFT);
430         }
431
432         for (i = (*last_blk) - 1; i >= 0; i--) {
433                 if (i < start_blk) {
434                         /* valid log record not found */
435                         xlog_warn(
436                 "XFS: Log inconsistent (didn't find previous header)");
437                         ASSERT(0);
438                         error = XFS_ERROR(EIO);
439                         goto out;
440                 }
441
442                 if (smallmem) {
443                         if ((error = xlog_bread(log, i, 1, bp)))
444                                 goto out;
445                         offset = xlog_align(log, i, 1, bp);
446                 }
447
448                 head = (xlog_rec_header_t *)offset;
449
450                 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno))
451                         break;
452
453                 if (!smallmem)
454                         offset -= BBSIZE;
455         }
456
457         /*
458          * We hit the beginning of the physical log & still no header.  Return
459          * to caller.  If caller can handle a return of -1, then this routine
460          * will be called again for the end of the physical log.
461          */
462         if (i == -1) {
463                 error = -1;
464                 goto out;
465         }
466
467         /*
468          * We have the final block of the good log (the first block
469          * of the log record _before_ the head. So we check the uuid.
470          */
471         if ((error = xlog_header_check_mount(log->l_mp, head)))
472                 goto out;
473
474         /*
475          * We may have found a log record header before we expected one.
476          * last_blk will be the 1st block # with a given cycle #.  We may end
477          * up reading an entire log record.  In this case, we don't want to
478          * reset last_blk.  Only when last_blk points in the middle of a log
479          * record do we update last_blk.
480          */
481         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
482                 uint    h_size = be32_to_cpu(head->h_size);
483
484                 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
485                 if (h_size % XLOG_HEADER_CYCLE_SIZE)
486                         xhdrs++;
487         } else {
488                 xhdrs = 1;
489         }
490
491         if (*last_blk - i + extra_bblks !=
492             BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
493                 *last_blk = i;
494
495 out:
496         xlog_put_bp(bp);
497         return error;
498 }
499
500 /*
501  * Head is defined to be the point of the log where the next log write
502  * write could go.  This means that incomplete LR writes at the end are
503  * eliminated when calculating the head.  We aren't guaranteed that previous
504  * LR have complete transactions.  We only know that a cycle number of
505  * current cycle number -1 won't be present in the log if we start writing
506  * from our current block number.
507  *
508  * last_blk contains the block number of the first block with a given
509  * cycle number.
510  *
511  * Return: zero if normal, non-zero if error.
512  */
513 STATIC int
514 xlog_find_head(
515         xlog_t          *log,
516         xfs_daddr_t     *return_head_blk)
517 {
518         xfs_buf_t       *bp;
519         xfs_caddr_t     offset;
520         xfs_daddr_t     new_blk, first_blk, start_blk, last_blk, head_blk;
521         int             num_scan_bblks;
522         uint            first_half_cycle, last_half_cycle;
523         uint            stop_on_cycle;
524         int             error, log_bbnum = log->l_logBBsize;
525
526         /* Is the end of the log device zeroed? */
527         if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
528                 *return_head_blk = first_blk;
529
530                 /* Is the whole lot zeroed? */
531                 if (!first_blk) {
532                         /* Linux XFS shouldn't generate totally zeroed logs -
533                          * mkfs etc write a dummy unmount record to a fresh
534                          * log so we can store the uuid in there
535                          */
536                         xlog_warn("XFS: totally zeroed log");
537                 }
538
539                 return 0;
540         } else if (error) {
541                 xlog_warn("XFS: empty log check failed");
542                 return error;
543         }
544
545         first_blk = 0;                  /* get cycle # of 1st block */
546         bp = xlog_get_bp(log, 1);
547         if (!bp)
548                 return ENOMEM;
549         if ((error = xlog_bread(log, 0, 1, bp)))
550                 goto bp_err;
551         offset = xlog_align(log, 0, 1, bp);
552         first_half_cycle = xlog_get_cycle(offset);
553
554         last_blk = head_blk = log_bbnum - 1;    /* get cycle # of last block */
555         if ((error = xlog_bread(log, last_blk, 1, bp)))
556                 goto bp_err;
557         offset = xlog_align(log, last_blk, 1, bp);
558         last_half_cycle = xlog_get_cycle(offset);
559         ASSERT(last_half_cycle != 0);
560
561         /*
562          * If the 1st half cycle number is equal to the last half cycle number,
563          * then the entire log is stamped with the same cycle number.  In this
564          * case, head_blk can't be set to zero (which makes sense).  The below
565          * math doesn't work out properly with head_blk equal to zero.  Instead,
566          * we set it to log_bbnum which is an invalid block number, but this
567          * value makes the math correct.  If head_blk doesn't changed through
568          * all the tests below, *head_blk is set to zero at the very end rather
569          * than log_bbnum.  In a sense, log_bbnum and zero are the same block
570          * in a circular file.
571          */
572         if (first_half_cycle == last_half_cycle) {
573                 /*
574                  * In this case we believe that the entire log should have
575                  * cycle number last_half_cycle.  We need to scan backwards
576                  * from the end verifying that there are no holes still
577                  * containing last_half_cycle - 1.  If we find such a hole,
578                  * then the start of that hole will be the new head.  The
579                  * simple case looks like
580                  *        x | x ... | x - 1 | x
581                  * Another case that fits this picture would be
582                  *        x | x + 1 | x ... | x
583                  * In this case the head really is somewhere at the end of the
584                  * log, as one of the latest writes at the beginning was
585                  * incomplete.
586                  * One more case is
587                  *        x | x + 1 | x ... | x - 1 | x
588                  * This is really the combination of the above two cases, and
589                  * the head has to end up at the start of the x-1 hole at the
590                  * end of the log.
591                  *
592                  * In the 256k log case, we will read from the beginning to the
593                  * end of the log and search for cycle numbers equal to x-1.
594                  * We don't worry about the x+1 blocks that we encounter,
595                  * because we know that they cannot be the head since the log
596                  * started with x.
597                  */
598                 head_blk = log_bbnum;
599                 stop_on_cycle = last_half_cycle - 1;
600         } else {
601                 /*
602                  * In this case we want to find the first block with cycle
603                  * number matching last_half_cycle.  We expect the log to be
604                  * some variation on
605                  *        x + 1 ... | x ...
606                  * The first block with cycle number x (last_half_cycle) will
607                  * be where the new head belongs.  First we do a binary search
608                  * for the first occurrence of last_half_cycle.  The binary
609                  * search may not be totally accurate, so then we scan back
610                  * from there looking for occurrences of last_half_cycle before
611                  * us.  If that backwards scan wraps around the beginning of
612                  * the log, then we look for occurrences of last_half_cycle - 1
613                  * at the end of the log.  The cases we're looking for look
614                  * like
615                  *        x + 1 ... | x | x + 1 | x ...
616                  *                               ^ binary search stopped here
617                  * or
618                  *        x + 1 ... | x ... | x - 1 | x
619                  *        <---------> less than scan distance
620                  */
621                 stop_on_cycle = last_half_cycle;
622                 if ((error = xlog_find_cycle_start(log, bp, first_blk,
623                                                 &head_blk, last_half_cycle)))
624                         goto bp_err;
625         }
626
627         /*
628          * Now validate the answer.  Scan back some number of maximum possible
629          * blocks and make sure each one has the expected cycle number.  The
630          * maximum is determined by the total possible amount of buffering
631          * in the in-core log.  The following number can be made tighter if
632          * we actually look at the block size of the filesystem.
633          */
634         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
635         if (head_blk >= num_scan_bblks) {
636                 /*
637                  * We are guaranteed that the entire check can be performed
638                  * in one buffer.
639                  */
640                 start_blk = head_blk - num_scan_bblks;
641                 if ((error = xlog_find_verify_cycle(log,
642                                                 start_blk, num_scan_bblks,
643                                                 stop_on_cycle, &new_blk)))
644                         goto bp_err;
645                 if (new_blk != -1)
646                         head_blk = new_blk;
647         } else {                /* need to read 2 parts of log */
648                 /*
649                  * We are going to scan backwards in the log in two parts.
650                  * First we scan the physical end of the log.  In this part
651                  * of the log, we are looking for blocks with cycle number
652                  * last_half_cycle - 1.
653                  * If we find one, then we know that the log starts there, as
654                  * we've found a hole that didn't get written in going around
655                  * the end of the physical log.  The simple case for this is
656                  *        x + 1 ... | x ... | x - 1 | x
657                  *        <---------> less than scan distance
658                  * If all of the blocks at the end of the log have cycle number
659                  * last_half_cycle, then we check the blocks at the start of
660                  * the log looking for occurrences of last_half_cycle.  If we
661                  * find one, then our current estimate for the location of the
662                  * first occurrence of last_half_cycle is wrong and we move
663                  * back to the hole we've found.  This case looks like
664                  *        x + 1 ... | x | x + 1 | x ...
665                  *                               ^ binary search stopped here
666                  * Another case we need to handle that only occurs in 256k
667                  * logs is
668                  *        x + 1 ... | x ... | x+1 | x ...
669                  *                   ^ binary search stops here
670                  * In a 256k log, the scan at the end of the log will see the
671                  * x + 1 blocks.  We need to skip past those since that is
672                  * certainly not the head of the log.  By searching for
673                  * last_half_cycle-1 we accomplish that.
674                  */
675                 start_blk = log_bbnum - num_scan_bblks + head_blk;
676                 ASSERT(head_blk <= INT_MAX &&
677                         (xfs_daddr_t) num_scan_bblks - head_blk >= 0);
678                 if ((error = xlog_find_verify_cycle(log, start_blk,
679                                         num_scan_bblks - (int)head_blk,
680                                         (stop_on_cycle - 1), &new_blk)))
681                         goto bp_err;
682                 if (new_blk != -1) {
683                         head_blk = new_blk;
684                         goto bad_blk;
685                 }
686
687                 /*
688                  * Scan beginning of log now.  The last part of the physical
689                  * log is good.  This scan needs to verify that it doesn't find
690                  * the last_half_cycle.
691                  */
692                 start_blk = 0;
693                 ASSERT(head_blk <= INT_MAX);
694                 if ((error = xlog_find_verify_cycle(log,
695                                         start_blk, (int)head_blk,
696                                         stop_on_cycle, &new_blk)))
697                         goto bp_err;
698                 if (new_blk != -1)
699                         head_blk = new_blk;
700         }
701
702  bad_blk:
703         /*
704          * Now we need to make sure head_blk is not pointing to a block in
705          * the middle of a log record.
706          */
707         num_scan_bblks = XLOG_REC_SHIFT(log);
708         if (head_blk >= num_scan_bblks) {
709                 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
710
711                 /* start ptr at last block ptr before head_blk */
712                 if ((error = xlog_find_verify_log_record(log, start_blk,
713                                                         &head_blk, 0)) == -1) {
714                         error = XFS_ERROR(EIO);
715                         goto bp_err;
716                 } else if (error)
717                         goto bp_err;
718         } else {
719                 start_blk = 0;
720                 ASSERT(head_blk <= INT_MAX);
721                 if ((error = xlog_find_verify_log_record(log, start_blk,
722                                                         &head_blk, 0)) == -1) {
723                         /* We hit the beginning of the log during our search */
724                         start_blk = log_bbnum - num_scan_bblks + head_blk;
725                         new_blk = log_bbnum;
726                         ASSERT(start_blk <= INT_MAX &&
727                                 (xfs_daddr_t) log_bbnum-start_blk >= 0);
728                         ASSERT(head_blk <= INT_MAX);
729                         if ((error = xlog_find_verify_log_record(log,
730                                                         start_blk, &new_blk,
731                                                         (int)head_blk)) == -1) {
732                                 error = XFS_ERROR(EIO);
733                                 goto bp_err;
734                         } else if (error)
735                                 goto bp_err;
736                         if (new_blk != log_bbnum)
737                                 head_blk = new_blk;
738                 } else if (error)
739                         goto bp_err;
740         }
741
742         xlog_put_bp(bp);
743         if (head_blk == log_bbnum)
744                 *return_head_blk = 0;
745         else
746                 *return_head_blk = head_blk;
747         /*
748          * When returning here, we have a good block number.  Bad block
749          * means that during a previous crash, we didn't have a clean break
750          * from cycle number N to cycle number N-1.  In this case, we need
751          * to find the first block with cycle number N-1.
752          */
753         return 0;
754
755  bp_err:
756         xlog_put_bp(bp);
757
758         if (error)
759             xlog_warn("XFS: failed to find log head");
760         return error;
761 }
762
763 /*
764  * Find the sync block number or the tail of the log.
765  *
766  * This will be the block number of the last record to have its
767  * associated buffers synced to disk.  Every log record header has
768  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
769  * to get a sync block number.  The only concern is to figure out which
770  * log record header to believe.
771  *
772  * The following algorithm uses the log record header with the largest
773  * lsn.  The entire log record does not need to be valid.  We only care
774  * that the header is valid.
775  *
776  * We could speed up search by using current head_blk buffer, but it is not
777  * available.
778  */
779 int
780 xlog_find_tail(
781         xlog_t                  *log,
782         xfs_daddr_t             *head_blk,
783         xfs_daddr_t             *tail_blk)
784 {
785         xlog_rec_header_t       *rhead;
786         xlog_op_header_t        *op_head;
787         xfs_caddr_t             offset = NULL;
788         xfs_buf_t               *bp;
789         int                     error, i, found;
790         xfs_daddr_t             umount_data_blk;
791         xfs_daddr_t             after_umount_blk;
792         xfs_lsn_t               tail_lsn;
793         int                     hblks;
794
795         found = 0;
796
797         /*
798          * Find previous log record
799          */
800         if ((error = xlog_find_head(log, head_blk)))
801                 return error;
802
803         bp = xlog_get_bp(log, 1);
804         if (!bp)
805                 return ENOMEM;
806         if (*head_blk == 0) {                           /* special case */
807                 if ((error = xlog_bread(log, 0, 1, bp)))
808                         goto bread_err;
809                 offset = xlog_align(log, 0, 1, bp);
810                 if (xlog_get_cycle(offset) == 0) {
811                         *tail_blk = 0;
812                         /* leave all other log inited values alone */
813                         goto exit;
814                 }
815         }
816
817         /*
818          * Search backwards looking for log record header block
819          */
820         ASSERT(*head_blk < INT_MAX);
821         for (i = (int)(*head_blk) - 1; i >= 0; i--) {
822                 if ((error = xlog_bread(log, i, 1, bp)))
823                         goto bread_err;
824                 offset = xlog_align(log, i, 1, bp);
825                 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) {
826                         found = 1;
827                         break;
828                 }
829         }
830         /*
831          * If we haven't found the log record header block, start looking
832          * again from the end of the physical log.  XXXmiken: There should be
833          * a check here to make sure we didn't search more than N blocks in
834          * the previous code.
835          */
836         if (!found) {
837                 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
838                         if ((error = xlog_bread(log, i, 1, bp)))
839                                 goto bread_err;
840                         offset = xlog_align(log, i, 1, bp);
841                         if (XLOG_HEADER_MAGIC_NUM ==
842                             be32_to_cpu(*(__be32 *)offset)) {
843                                 found = 2;
844                                 break;
845                         }
846                 }
847         }
848         if (!found) {
849                 xlog_warn("XFS: xlog_find_tail: couldn't find sync record");
850                 ASSERT(0);
851                 return XFS_ERROR(EIO);
852         }
853
854         /* find blk_no of tail of log */
855         rhead = (xlog_rec_header_t *)offset;
856         *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
857
858         /*
859          * Reset log values according to the state of the log when we
860          * crashed.  In the case where head_blk == 0, we bump curr_cycle
861          * one because the next write starts a new cycle rather than
862          * continuing the cycle of the last good log record.  At this
863          * point we have guaranteed that all partial log records have been
864          * accounted for.  Therefore, we know that the last good log record
865          * written was complete and ended exactly on the end boundary
866          * of the physical log.
867          */
868         log->l_prev_block = i;
869         log->l_curr_block = (int)*head_blk;
870         log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
871         if (found == 2)
872                 log->l_curr_cycle++;
873         log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn);
874         log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn);
875         log->l_grant_reserve_cycle = log->l_curr_cycle;
876         log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
877         log->l_grant_write_cycle = log->l_curr_cycle;
878         log->l_grant_write_bytes = BBTOB(log->l_curr_block);
879
880         /*
881          * Look for unmount record.  If we find it, then we know there
882          * was a clean unmount.  Since 'i' could be the last block in
883          * the physical log, we convert to a log block before comparing
884          * to the head_blk.
885          *
886          * Save the current tail lsn to use to pass to
887          * xlog_clear_stale_blocks() below.  We won't want to clear the
888          * unmount record if there is one, so we pass the lsn of the
889          * unmount record rather than the block after it.
890          */
891         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
892                 int     h_size = be32_to_cpu(rhead->h_size);
893                 int     h_version = be32_to_cpu(rhead->h_version);
894
895                 if ((h_version & XLOG_VERSION_2) &&
896                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
897                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
898                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
899                                 hblks++;
900                 } else {
901                         hblks = 1;
902                 }
903         } else {
904                 hblks = 1;
905         }
906         after_umount_blk = (i + hblks + (int)
907                 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
908         tail_lsn = log->l_tail_lsn;
909         if (*head_blk == after_umount_blk &&
910             be32_to_cpu(rhead->h_num_logops) == 1) {
911                 umount_data_blk = (i + hblks) % log->l_logBBsize;
912                 if ((error = xlog_bread(log, umount_data_blk, 1, bp))) {
913                         goto bread_err;
914                 }
915                 offset = xlog_align(log, umount_data_blk, 1, bp);
916                 op_head = (xlog_op_header_t *)offset;
917                 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
918                         /*
919                          * Set tail and last sync so that newly written
920                          * log records will point recovery to after the
921                          * current unmount record.
922                          */
923                         log->l_tail_lsn =
924                                 xlog_assign_lsn(log->l_curr_cycle,
925                                                 after_umount_blk);
926                         log->l_last_sync_lsn =
927                                 xlog_assign_lsn(log->l_curr_cycle,
928                                                 after_umount_blk);
929                         *tail_blk = after_umount_blk;
930
931                         /*
932                          * Note that the unmount was clean. If the unmount
933                          * was not clean, we need to know this to rebuild the
934                          * superblock counters from the perag headers if we
935                          * have a filesystem using non-persistent counters.
936                          */
937                         log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
938                 }
939         }
940
941         /*
942          * Make sure that there are no blocks in front of the head
943          * with the same cycle number as the head.  This can happen
944          * because we allow multiple outstanding log writes concurrently,
945          * and the later writes might make it out before earlier ones.
946          *
947          * We use the lsn from before modifying it so that we'll never
948          * overwrite the unmount record after a clean unmount.
949          *
950          * Do this only if we are going to recover the filesystem
951          *
952          * NOTE: This used to say "if (!readonly)"
953          * However on Linux, we can & do recover a read-only filesystem.
954          * We only skip recovery if NORECOVERY is specified on mount,
955          * in which case we would not be here.
956          *
957          * But... if the -device- itself is readonly, just skip this.
958          * We can't recover this device anyway, so it won't matter.
959          */
960         if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) {
961                 error = xlog_clear_stale_blocks(log, tail_lsn);
962         }
963
964 bread_err:
965 exit:
966         xlog_put_bp(bp);
967
968         if (error)
969                 xlog_warn("XFS: failed to locate log tail");
970         return error;
971 }
972
973 /*
974  * Is the log zeroed at all?
975  *
976  * The last binary search should be changed to perform an X block read
977  * once X becomes small enough.  You can then search linearly through
978  * the X blocks.  This will cut down on the number of reads we need to do.
979  *
980  * If the log is partially zeroed, this routine will pass back the blkno
981  * of the first block with cycle number 0.  It won't have a complete LR
982  * preceding it.
983  *
984  * Return:
985  *      0  => the log is completely written to
986  *      -1 => use *blk_no as the first block of the log
987  *      >0 => error has occurred
988  */
989 STATIC int
990 xlog_find_zeroed(
991         xlog_t          *log,
992         xfs_daddr_t     *blk_no)
993 {
994         xfs_buf_t       *bp;
995         xfs_caddr_t     offset;
996         uint            first_cycle, last_cycle;
997         xfs_daddr_t     new_blk, last_blk, start_blk;
998         xfs_daddr_t     num_scan_bblks;
999         int             error, log_bbnum = log->l_logBBsize;
1000
1001         *blk_no = 0;
1002
1003         /* check totally zeroed log */
1004         bp = xlog_get_bp(log, 1);
1005         if (!bp)
1006                 return ENOMEM;
1007         if ((error = xlog_bread(log, 0, 1, bp)))
1008                 goto bp_err;
1009         offset = xlog_align(log, 0, 1, bp);
1010         first_cycle = xlog_get_cycle(offset);
1011         if (first_cycle == 0) {         /* completely zeroed log */
1012                 *blk_no = 0;
1013                 xlog_put_bp(bp);
1014                 return -1;
1015         }
1016
1017         /* check partially zeroed log */
1018         if ((error = xlog_bread(log, log_bbnum-1, 1, bp)))
1019                 goto bp_err;
1020         offset = xlog_align(log, log_bbnum-1, 1, bp);
1021         last_cycle = xlog_get_cycle(offset);
1022         if (last_cycle != 0) {          /* log completely written to */
1023                 xlog_put_bp(bp);
1024                 return 0;
1025         } else if (first_cycle != 1) {
1026                 /*
1027                  * If the cycle of the last block is zero, the cycle of
1028                  * the first block must be 1. If it's not, maybe we're
1029                  * not looking at a log... Bail out.
1030                  */
1031                 xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)");
1032                 return XFS_ERROR(EINVAL);
1033         }
1034
1035         /* we have a partially zeroed log */
1036         last_blk = log_bbnum-1;
1037         if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1038                 goto bp_err;
1039
1040         /*
1041          * Validate the answer.  Because there is no way to guarantee that
1042          * the entire log is made up of log records which are the same size,
1043          * we scan over the defined maximum blocks.  At this point, the maximum
1044          * is not chosen to mean anything special.   XXXmiken
1045          */
1046         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1047         ASSERT(num_scan_bblks <= INT_MAX);
1048
1049         if (last_blk < num_scan_bblks)
1050                 num_scan_bblks = last_blk;
1051         start_blk = last_blk - num_scan_bblks;
1052
1053         /*
1054          * We search for any instances of cycle number 0 that occur before
1055          * our current estimate of the head.  What we're trying to detect is
1056          *        1 ... | 0 | 1 | 0...
1057          *                       ^ binary search ends here
1058          */
1059         if ((error = xlog_find_verify_cycle(log, start_blk,
1060                                          (int)num_scan_bblks, 0, &new_blk)))
1061                 goto bp_err;
1062         if (new_blk != -1)
1063                 last_blk = new_blk;
1064
1065         /*
1066          * Potentially backup over partial log record write.  We don't need
1067          * to search the end of the log because we know it is zero.
1068          */
1069         if ((error = xlog_find_verify_log_record(log, start_blk,
1070                                 &last_blk, 0)) == -1) {
1071             error = XFS_ERROR(EIO);
1072             goto bp_err;
1073         } else if (error)
1074             goto bp_err;
1075
1076         *blk_no = last_blk;
1077 bp_err:
1078         xlog_put_bp(bp);
1079         if (error)
1080                 return error;
1081         return -1;
1082 }
1083
1084 /*
1085  * These are simple subroutines used by xlog_clear_stale_blocks() below
1086  * to initialize a buffer full of empty log record headers and write
1087  * them into the log.
1088  */
1089 STATIC void
1090 xlog_add_record(
1091         xlog_t                  *log,
1092         xfs_caddr_t             buf,
1093         int                     cycle,
1094         int                     block,
1095         int                     tail_cycle,
1096         int                     tail_block)
1097 {
1098         xlog_rec_header_t       *recp = (xlog_rec_header_t *)buf;
1099
1100         memset(buf, 0, BBSIZE);
1101         recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1102         recp->h_cycle = cpu_to_be32(cycle);
1103         recp->h_version = cpu_to_be32(
1104                         xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1105         recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1106         recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1107         recp->h_fmt = cpu_to_be32(XLOG_FMT);
1108         memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1109 }
1110
1111 STATIC int
1112 xlog_write_log_records(
1113         xlog_t          *log,
1114         int             cycle,
1115         int             start_block,
1116         int             blocks,
1117         int             tail_cycle,
1118         int             tail_block)
1119 {
1120         xfs_caddr_t     offset;
1121         xfs_buf_t       *bp;
1122         int             balign, ealign;
1123         int             sectbb = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
1124         int             end_block = start_block + blocks;
1125         int             bufblks;
1126         int             error = 0;
1127         int             i, j = 0;
1128
1129         bufblks = 1 << ffs(blocks);
1130         while (!(bp = xlog_get_bp(log, bufblks))) {
1131                 bufblks >>= 1;
1132                 if (bufblks <= log->l_sectbb_log)
1133                         return ENOMEM;
1134         }
1135
1136         /* We may need to do a read at the start to fill in part of
1137          * the buffer in the starting sector not covered by the first
1138          * write below.
1139          */
1140         balign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, start_block);
1141         if (balign != start_block) {
1142                 if ((error = xlog_bread(log, start_block, 1, bp))) {
1143                         xlog_put_bp(bp);
1144                         return error;
1145                 }
1146                 j = start_block - balign;
1147         }
1148
1149         for (i = start_block; i < end_block; i += bufblks) {
1150                 int             bcount, endcount;
1151
1152                 bcount = min(bufblks, end_block - start_block);
1153                 endcount = bcount - j;
1154
1155                 /* We may need to do a read at the end to fill in part of
1156                  * the buffer in the final sector not covered by the write.
1157                  * If this is the same sector as the above read, skip it.
1158                  */
1159                 ealign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, end_block);
1160                 if (j == 0 && (start_block + endcount > ealign)) {
1161                         offset = XFS_BUF_PTR(bp);
1162                         balign = BBTOB(ealign - start_block);
1163                         error = XFS_BUF_SET_PTR(bp, offset + balign,
1164                                                 BBTOB(sectbb));
1165                         if (!error)
1166                                 error = xlog_bread(log, ealign, sectbb, bp);
1167                         if (!error)
1168                                 error = XFS_BUF_SET_PTR(bp, offset, bufblks);
1169                         if (error)
1170                                 break;
1171                 }
1172
1173                 offset = xlog_align(log, start_block, endcount, bp);
1174                 for (; j < endcount; j++) {
1175                         xlog_add_record(log, offset, cycle, i+j,
1176                                         tail_cycle, tail_block);
1177                         offset += BBSIZE;
1178                 }
1179                 error = xlog_bwrite(log, start_block, endcount, bp);
1180                 if (error)
1181                         break;
1182                 start_block += endcount;
1183                 j = 0;
1184         }
1185         xlog_put_bp(bp);
1186         return error;
1187 }
1188
1189 /*
1190  * This routine is called to blow away any incomplete log writes out
1191  * in front of the log head.  We do this so that we won't become confused
1192  * if we come up, write only a little bit more, and then crash again.
1193  * If we leave the partial log records out there, this situation could
1194  * cause us to think those partial writes are valid blocks since they
1195  * have the current cycle number.  We get rid of them by overwriting them
1196  * with empty log records with the old cycle number rather than the
1197  * current one.
1198  *
1199  * The tail lsn is passed in rather than taken from
1200  * the log so that we will not write over the unmount record after a
1201  * clean unmount in a 512 block log.  Doing so would leave the log without
1202  * any valid log records in it until a new one was written.  If we crashed
1203  * during that time we would not be able to recover.
1204  */
1205 STATIC int
1206 xlog_clear_stale_blocks(
1207         xlog_t          *log,
1208         xfs_lsn_t       tail_lsn)
1209 {
1210         int             tail_cycle, head_cycle;
1211         int             tail_block, head_block;
1212         int             tail_distance, max_distance;
1213         int             distance;
1214         int             error;
1215
1216         tail_cycle = CYCLE_LSN(tail_lsn);
1217         tail_block = BLOCK_LSN(tail_lsn);
1218         head_cycle = log->l_curr_cycle;
1219         head_block = log->l_curr_block;
1220
1221         /*
1222          * Figure out the distance between the new head of the log
1223          * and the tail.  We want to write over any blocks beyond the
1224          * head that we may have written just before the crash, but
1225          * we don't want to overwrite the tail of the log.
1226          */
1227         if (head_cycle == tail_cycle) {
1228                 /*
1229                  * The tail is behind the head in the physical log,
1230                  * so the distance from the head to the tail is the
1231                  * distance from the head to the end of the log plus
1232                  * the distance from the beginning of the log to the
1233                  * tail.
1234                  */
1235                 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1236                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1237                                          XFS_ERRLEVEL_LOW, log->l_mp);
1238                         return XFS_ERROR(EFSCORRUPTED);
1239                 }
1240                 tail_distance = tail_block + (log->l_logBBsize - head_block);
1241         } else {
1242                 /*
1243                  * The head is behind the tail in the physical log,
1244                  * so the distance from the head to the tail is just
1245                  * the tail block minus the head block.
1246                  */
1247                 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1248                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1249                                          XFS_ERRLEVEL_LOW, log->l_mp);
1250                         return XFS_ERROR(EFSCORRUPTED);
1251                 }
1252                 tail_distance = tail_block - head_block;
1253         }
1254
1255         /*
1256          * If the head is right up against the tail, we can't clear
1257          * anything.
1258          */
1259         if (tail_distance <= 0) {
1260                 ASSERT(tail_distance == 0);
1261                 return 0;
1262         }
1263
1264         max_distance = XLOG_TOTAL_REC_SHIFT(log);
1265         /*
1266          * Take the smaller of the maximum amount of outstanding I/O
1267          * we could have and the distance to the tail to clear out.
1268          * We take the smaller so that we don't overwrite the tail and
1269          * we don't waste all day writing from the head to the tail
1270          * for no reason.
1271          */
1272         max_distance = MIN(max_distance, tail_distance);
1273
1274         if ((head_block + max_distance) <= log->l_logBBsize) {
1275                 /*
1276                  * We can stomp all the blocks we need to without
1277                  * wrapping around the end of the log.  Just do it
1278                  * in a single write.  Use the cycle number of the
1279                  * current cycle minus one so that the log will look like:
1280                  *     n ... | n - 1 ...
1281                  */
1282                 error = xlog_write_log_records(log, (head_cycle - 1),
1283                                 head_block, max_distance, tail_cycle,
1284                                 tail_block);
1285                 if (error)
1286                         return error;
1287         } else {
1288                 /*
1289                  * We need to wrap around the end of the physical log in
1290                  * order to clear all the blocks.  Do it in two separate
1291                  * I/Os.  The first write should be from the head to the
1292                  * end of the physical log, and it should use the current
1293                  * cycle number minus one just like above.
1294                  */
1295                 distance = log->l_logBBsize - head_block;
1296                 error = xlog_write_log_records(log, (head_cycle - 1),
1297                                 head_block, distance, tail_cycle,
1298                                 tail_block);
1299
1300                 if (error)
1301                         return error;
1302
1303                 /*
1304                  * Now write the blocks at the start of the physical log.
1305                  * This writes the remainder of the blocks we want to clear.
1306                  * It uses the current cycle number since we're now on the
1307                  * same cycle as the head so that we get:
1308                  *    n ... n ... | n - 1 ...
1309                  *    ^^^^^ blocks we're writing
1310                  */
1311                 distance = max_distance - (log->l_logBBsize - head_block);
1312                 error = xlog_write_log_records(log, head_cycle, 0, distance,
1313                                 tail_cycle, tail_block);
1314                 if (error)
1315                         return error;
1316         }
1317
1318         return 0;
1319 }
1320
1321 /******************************************************************************
1322  *
1323  *              Log recover routines
1324  *
1325  ******************************************************************************
1326  */
1327
1328 STATIC xlog_recover_t *
1329 xlog_recover_find_tid(
1330         xlog_recover_t          *q,
1331         xlog_tid_t              tid)
1332 {
1333         xlog_recover_t          *p = q;
1334
1335         while (p != NULL) {
1336                 if (p->r_log_tid == tid)
1337                     break;
1338                 p = p->r_next;
1339         }
1340         return p;
1341 }
1342
1343 STATIC void
1344 xlog_recover_put_hashq(
1345         xlog_recover_t          **q,
1346         xlog_recover_t          *trans)
1347 {
1348         trans->r_next = *q;
1349         *q = trans;
1350 }
1351
1352 STATIC void
1353 xlog_recover_add_item(
1354         xlog_recover_item_t     **itemq)
1355 {
1356         xlog_recover_item_t     *item;
1357
1358         item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1359         xlog_recover_insert_item_backq(itemq, item);
1360 }
1361
1362 STATIC int
1363 xlog_recover_add_to_cont_trans(
1364         xlog_recover_t          *trans,
1365         xfs_caddr_t             dp,
1366         int                     len)
1367 {
1368         xlog_recover_item_t     *item;
1369         xfs_caddr_t             ptr, old_ptr;
1370         int                     old_len;
1371
1372         item = trans->r_itemq;
1373         if (item == NULL) {
1374                 /* finish copying rest of trans header */
1375                 xlog_recover_add_item(&trans->r_itemq);
1376                 ptr = (xfs_caddr_t) &trans->r_theader +
1377                                 sizeof(xfs_trans_header_t) - len;
1378                 memcpy(ptr, dp, len); /* d, s, l */
1379                 return 0;
1380         }
1381         item = item->ri_prev;
1382
1383         old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1384         old_len = item->ri_buf[item->ri_cnt-1].i_len;
1385
1386         ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0u);
1387         memcpy(&ptr[old_len], dp, len); /* d, s, l */
1388         item->ri_buf[item->ri_cnt-1].i_len += len;
1389         item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1390         return 0;
1391 }
1392
1393 /*
1394  * The next region to add is the start of a new region.  It could be
1395  * a whole region or it could be the first part of a new region.  Because
1396  * of this, the assumption here is that the type and size fields of all
1397  * format structures fit into the first 32 bits of the structure.
1398  *
1399  * This works because all regions must be 32 bit aligned.  Therefore, we
1400  * either have both fields or we have neither field.  In the case we have
1401  * neither field, the data part of the region is zero length.  We only have
1402  * a log_op_header and can throw away the header since a new one will appear
1403  * later.  If we have at least 4 bytes, then we can determine how many regions
1404  * will appear in the current log item.
1405  */
1406 STATIC int
1407 xlog_recover_add_to_trans(
1408         xlog_recover_t          *trans,
1409         xfs_caddr_t             dp,
1410         int                     len)
1411 {
1412         xfs_inode_log_format_t  *in_f;                  /* any will do */
1413         xlog_recover_item_t     *item;
1414         xfs_caddr_t             ptr;
1415
1416         if (!len)
1417                 return 0;
1418         item = trans->r_itemq;
1419         if (item == NULL) {
1420                 ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC);
1421                 if (len == sizeof(xfs_trans_header_t))
1422                         xlog_recover_add_item(&trans->r_itemq);
1423                 memcpy(&trans->r_theader, dp, len); /* d, s, l */
1424                 return 0;
1425         }
1426
1427         ptr = kmem_alloc(len, KM_SLEEP);
1428         memcpy(ptr, dp, len);
1429         in_f = (xfs_inode_log_format_t *)ptr;
1430
1431         if (item->ri_prev->ri_total != 0 &&
1432              item->ri_prev->ri_total == item->ri_prev->ri_cnt) {
1433                 xlog_recover_add_item(&trans->r_itemq);
1434         }
1435         item = trans->r_itemq;
1436         item = item->ri_prev;
1437
1438         if (item->ri_total == 0) {              /* first region to be added */
1439                 item->ri_total  = in_f->ilf_size;
1440                 ASSERT(item->ri_total <= XLOG_MAX_REGIONS_IN_ITEM);
1441                 item->ri_buf = kmem_zalloc((item->ri_total *
1442                                             sizeof(xfs_log_iovec_t)), KM_SLEEP);
1443         }
1444         ASSERT(item->ri_total > item->ri_cnt);
1445         /* Description region is ri_buf[0] */
1446         item->ri_buf[item->ri_cnt].i_addr = ptr;
1447         item->ri_buf[item->ri_cnt].i_len  = len;
1448         item->ri_cnt++;
1449         return 0;
1450 }
1451
1452 STATIC void
1453 xlog_recover_new_tid(
1454         xlog_recover_t          **q,
1455         xlog_tid_t              tid,
1456         xfs_lsn_t               lsn)
1457 {
1458         xlog_recover_t          *trans;
1459
1460         trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1461         trans->r_log_tid   = tid;
1462         trans->r_lsn       = lsn;
1463         xlog_recover_put_hashq(q, trans);
1464 }
1465
1466 STATIC int
1467 xlog_recover_unlink_tid(
1468         xlog_recover_t          **q,
1469         xlog_recover_t          *trans)
1470 {
1471         xlog_recover_t          *tp;
1472         int                     found = 0;
1473
1474         ASSERT(trans != NULL);
1475         if (trans == *q) {
1476                 *q = (*q)->r_next;
1477         } else {
1478                 tp = *q;
1479                 while (tp) {
1480                         if (tp->r_next == trans) {
1481                                 found = 1;
1482                                 break;
1483                         }
1484                         tp = tp->r_next;
1485                 }
1486                 if (!found) {
1487                         xlog_warn(
1488                              "XFS: xlog_recover_unlink_tid: trans not found");
1489                         ASSERT(0);
1490                         return XFS_ERROR(EIO);
1491                 }
1492                 tp->r_next = tp->r_next->r_next;
1493         }
1494         return 0;
1495 }
1496
1497 STATIC void
1498 xlog_recover_insert_item_backq(
1499         xlog_recover_item_t     **q,
1500         xlog_recover_item_t     *item)
1501 {
1502         if (*q == NULL) {
1503                 item->ri_prev = item->ri_next = item;
1504                 *q = item;
1505         } else {
1506                 item->ri_next           = *q;
1507                 item->ri_prev           = (*q)->ri_prev;
1508                 (*q)->ri_prev           = item;
1509                 item->ri_prev->ri_next  = item;
1510         }
1511 }
1512
1513 STATIC void
1514 xlog_recover_insert_item_frontq(
1515         xlog_recover_item_t     **q,
1516         xlog_recover_item_t     *item)
1517 {
1518         xlog_recover_insert_item_backq(q, item);
1519         *q = item;
1520 }
1521
1522 STATIC int
1523 xlog_recover_reorder_trans(
1524         xlog_recover_t          *trans)
1525 {
1526         xlog_recover_item_t     *first_item, *itemq, *itemq_next;
1527         xfs_buf_log_format_t    *buf_f;
1528         ushort                  flags = 0;
1529
1530         first_item = itemq = trans->r_itemq;
1531         trans->r_itemq = NULL;
1532         do {
1533                 itemq_next = itemq->ri_next;
1534                 buf_f = (xfs_buf_log_format_t *)itemq->ri_buf[0].i_addr;
1535
1536                 switch (ITEM_TYPE(itemq)) {
1537                 case XFS_LI_BUF:
1538                         flags = buf_f->blf_flags;
1539                         if (!(flags & XFS_BLI_CANCEL)) {
1540                                 xlog_recover_insert_item_frontq(&trans->r_itemq,
1541                                                                 itemq);
1542                                 break;
1543                         }
1544                 case XFS_LI_INODE:
1545                 case XFS_LI_DQUOT:
1546                 case XFS_LI_QUOTAOFF:
1547                 case XFS_LI_EFD:
1548                 case XFS_LI_EFI:
1549                         xlog_recover_insert_item_backq(&trans->r_itemq, itemq);
1550                         break;
1551                 default:
1552                         xlog_warn(
1553         "XFS: xlog_recover_reorder_trans: unrecognized type of log operation");
1554                         ASSERT(0);
1555                         return XFS_ERROR(EIO);
1556                 }
1557                 itemq = itemq_next;
1558         } while (first_item != itemq);
1559         return 0;
1560 }
1561
1562 /*
1563  * Build up the table of buf cancel records so that we don't replay
1564  * cancelled data in the second pass.  For buffer records that are
1565  * not cancel records, there is nothing to do here so we just return.
1566  *
1567  * If we get a cancel record which is already in the table, this indicates
1568  * that the buffer was cancelled multiple times.  In order to ensure
1569  * that during pass 2 we keep the record in the table until we reach its
1570  * last occurrence in the log, we keep a reference count in the cancel
1571  * record in the table to tell us how many times we expect to see this
1572  * record during the second pass.
1573  */
1574 STATIC void
1575 xlog_recover_do_buffer_pass1(
1576         xlog_t                  *log,
1577         xfs_buf_log_format_t    *buf_f)
1578 {
1579         xfs_buf_cancel_t        *bcp;
1580         xfs_buf_cancel_t        *nextp;
1581         xfs_buf_cancel_t        *prevp;
1582         xfs_buf_cancel_t        **bucket;
1583         xfs_daddr_t             blkno = 0;
1584         uint                    len = 0;
1585         ushort                  flags = 0;
1586
1587         switch (buf_f->blf_type) {
1588         case XFS_LI_BUF:
1589                 blkno = buf_f->blf_blkno;
1590                 len = buf_f->blf_len;
1591                 flags = buf_f->blf_flags;
1592                 break;
1593         }
1594
1595         /*
1596          * If this isn't a cancel buffer item, then just return.
1597          */
1598         if (!(flags & XFS_BLI_CANCEL))
1599                 return;
1600
1601         /*
1602          * Insert an xfs_buf_cancel record into the hash table of
1603          * them.  If there is already an identical record, bump
1604          * its reference count.
1605          */
1606         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1607                                           XLOG_BC_TABLE_SIZE];
1608         /*
1609          * If the hash bucket is empty then just insert a new record into
1610          * the bucket.
1611          */
1612         if (*bucket == NULL) {
1613                 bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1614                                                      KM_SLEEP);
1615                 bcp->bc_blkno = blkno;
1616                 bcp->bc_len = len;
1617                 bcp->bc_refcount = 1;
1618                 bcp->bc_next = NULL;
1619                 *bucket = bcp;
1620                 return;
1621         }
1622
1623         /*
1624          * The hash bucket is not empty, so search for duplicates of our
1625          * record.  If we find one them just bump its refcount.  If not
1626          * then add us at the end of the list.
1627          */
1628         prevp = NULL;
1629         nextp = *bucket;
1630         while (nextp != NULL) {
1631                 if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
1632                         nextp->bc_refcount++;
1633                         return;
1634                 }
1635                 prevp = nextp;
1636                 nextp = nextp->bc_next;
1637         }
1638         ASSERT(prevp != NULL);
1639         bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1640                                              KM_SLEEP);
1641         bcp->bc_blkno = blkno;
1642         bcp->bc_len = len;
1643         bcp->bc_refcount = 1;
1644         bcp->bc_next = NULL;
1645         prevp->bc_next = bcp;
1646 }
1647
1648 /*
1649  * Check to see whether the buffer being recovered has a corresponding
1650  * entry in the buffer cancel record table.  If it does then return 1
1651  * so that it will be cancelled, otherwise return 0.  If the buffer is
1652  * actually a buffer cancel item (XFS_BLI_CANCEL is set), then decrement
1653  * the refcount on the entry in the table and remove it from the table
1654  * if this is the last reference.
1655  *
1656  * We remove the cancel record from the table when we encounter its
1657  * last occurrence in the log so that if the same buffer is re-used
1658  * again after its last cancellation we actually replay the changes
1659  * made at that point.
1660  */
1661 STATIC int
1662 xlog_check_buffer_cancelled(
1663         xlog_t                  *log,
1664         xfs_daddr_t             blkno,
1665         uint                    len,
1666         ushort                  flags)
1667 {
1668         xfs_buf_cancel_t        *bcp;
1669         xfs_buf_cancel_t        *prevp;
1670         xfs_buf_cancel_t        **bucket;
1671
1672         if (log->l_buf_cancel_table == NULL) {
1673                 /*
1674                  * There is nothing in the table built in pass one,
1675                  * so this buffer must not be cancelled.
1676                  */
1677                 ASSERT(!(flags & XFS_BLI_CANCEL));
1678                 return 0;
1679         }
1680
1681         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1682                                           XLOG_BC_TABLE_SIZE];
1683         bcp = *bucket;
1684         if (bcp == NULL) {
1685                 /*
1686                  * There is no corresponding entry in the table built
1687                  * in pass one, so this buffer has not been cancelled.
1688                  */
1689                 ASSERT(!(flags & XFS_BLI_CANCEL));
1690                 return 0;
1691         }
1692
1693         /*
1694          * Search for an entry in the buffer cancel table that
1695          * matches our buffer.
1696          */
1697         prevp = NULL;
1698         while (bcp != NULL) {
1699                 if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
1700                         /*
1701                          * We've go a match, so return 1 so that the
1702                          * recovery of this buffer is cancelled.
1703                          * If this buffer is actually a buffer cancel
1704                          * log item, then decrement the refcount on the
1705                          * one in the table and remove it if this is the
1706                          * last reference.
1707                          */
1708                         if (flags & XFS_BLI_CANCEL) {
1709                                 bcp->bc_refcount--;
1710                                 if (bcp->bc_refcount == 0) {
1711                                         if (prevp == NULL) {
1712                                                 *bucket = bcp->bc_next;
1713                                         } else {
1714                                                 prevp->bc_next = bcp->bc_next;
1715                                         }
1716                                         kmem_free(bcp);
1717                                 }
1718                         }
1719                         return 1;
1720                 }
1721                 prevp = bcp;
1722                 bcp = bcp->bc_next;
1723         }
1724         /*
1725          * We didn't find a corresponding entry in the table, so
1726          * return 0 so that the buffer is NOT cancelled.
1727          */
1728         ASSERT(!(flags & XFS_BLI_CANCEL));
1729         return 0;
1730 }
1731
1732 STATIC int
1733 xlog_recover_do_buffer_pass2(
1734         xlog_t                  *log,
1735         xfs_buf_log_format_t    *buf_f)
1736 {
1737         xfs_daddr_t             blkno = 0;
1738         ushort                  flags = 0;
1739         uint                    len = 0;
1740
1741         switch (buf_f->blf_type) {
1742         case XFS_LI_BUF:
1743                 blkno = buf_f->blf_blkno;
1744                 flags = buf_f->blf_flags;
1745                 len = buf_f->blf_len;
1746                 break;
1747         }
1748
1749         return xlog_check_buffer_cancelled(log, blkno, len, flags);
1750 }
1751
1752 /*
1753  * Perform recovery for a buffer full of inodes.  In these buffers,
1754  * the only data which should be recovered is that which corresponds
1755  * to the di_next_unlinked pointers in the on disk inode structures.
1756  * The rest of the data for the inodes is always logged through the
1757  * inodes themselves rather than the inode buffer and is recovered
1758  * in xlog_recover_do_inode_trans().
1759  *
1760  * The only time when buffers full of inodes are fully recovered is
1761  * when the buffer is full of newly allocated inodes.  In this case
1762  * the buffer will not be marked as an inode buffer and so will be
1763  * sent to xlog_recover_do_reg_buffer() below during recovery.
1764  */
1765 STATIC int
1766 xlog_recover_do_inode_buffer(
1767         xfs_mount_t             *mp,
1768         xlog_recover_item_t     *item,
1769         xfs_buf_t               *bp,
1770         xfs_buf_log_format_t    *buf_f)
1771 {
1772         int                     i;
1773         int                     item_index;
1774         int                     bit;
1775         int                     nbits;
1776         int                     reg_buf_offset;
1777         int                     reg_buf_bytes;
1778         int                     next_unlinked_offset;
1779         int                     inodes_per_buf;
1780         xfs_agino_t             *logged_nextp;
1781         xfs_agino_t             *buffer_nextp;
1782         unsigned int            *data_map = NULL;
1783         unsigned int            map_size = 0;
1784
1785         switch (buf_f->blf_type) {
1786         case XFS_LI_BUF:
1787                 data_map = buf_f->blf_data_map;
1788                 map_size = buf_f->blf_map_size;
1789                 break;
1790         }
1791         /*
1792          * Set the variables corresponding to the current region to
1793          * 0 so that we'll initialize them on the first pass through
1794          * the loop.
1795          */
1796         reg_buf_offset = 0;
1797         reg_buf_bytes = 0;
1798         bit = 0;
1799         nbits = 0;
1800         item_index = 0;
1801         inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
1802         for (i = 0; i < inodes_per_buf; i++) {
1803                 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1804                         offsetof(xfs_dinode_t, di_next_unlinked);
1805
1806                 while (next_unlinked_offset >=
1807                        (reg_buf_offset + reg_buf_bytes)) {
1808                         /*
1809                          * The next di_next_unlinked field is beyond
1810                          * the current logged region.  Find the next
1811                          * logged region that contains or is beyond
1812                          * the current di_next_unlinked field.
1813                          */
1814                         bit += nbits;
1815                         bit = xfs_next_bit(data_map, map_size, bit);
1816
1817                         /*
1818                          * If there are no more logged regions in the
1819                          * buffer, then we're done.
1820                          */
1821                         if (bit == -1) {
1822                                 return 0;
1823                         }
1824
1825                         nbits = xfs_contig_bits(data_map, map_size,
1826                                                          bit);
1827                         ASSERT(nbits > 0);
1828                         reg_buf_offset = bit << XFS_BLI_SHIFT;
1829                         reg_buf_bytes = nbits << XFS_BLI_SHIFT;
1830                         item_index++;
1831                 }
1832
1833                 /*
1834                  * If the current logged region starts after the current
1835                  * di_next_unlinked field, then move on to the next
1836                  * di_next_unlinked field.
1837                  */
1838                 if (next_unlinked_offset < reg_buf_offset) {
1839                         continue;
1840                 }
1841
1842                 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1843                 ASSERT((item->ri_buf[item_index].i_len % XFS_BLI_CHUNK) == 0);
1844                 ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
1845
1846                 /*
1847                  * The current logged region contains a copy of the
1848                  * current di_next_unlinked field.  Extract its value
1849                  * and copy it to the buffer copy.
1850                  */
1851                 logged_nextp = (xfs_agino_t *)
1852                                ((char *)(item->ri_buf[item_index].i_addr) +
1853                                 (next_unlinked_offset - reg_buf_offset));
1854                 if (unlikely(*logged_nextp == 0)) {
1855                         xfs_fs_cmn_err(CE_ALERT, mp,
1856                                 "bad inode buffer log record (ptr = 0x%p, bp = 0x%p).  XFS trying to replay bad (0) inode di_next_unlinked field",
1857                                 item, bp);
1858                         XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1859                                          XFS_ERRLEVEL_LOW, mp);
1860                         return XFS_ERROR(EFSCORRUPTED);
1861                 }
1862
1863                 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1864                                               next_unlinked_offset);
1865                 *buffer_nextp = *logged_nextp;
1866         }
1867
1868         return 0;
1869 }
1870
1871 /*
1872  * Perform a 'normal' buffer recovery.  Each logged region of the
1873  * buffer should be copied over the corresponding region in the
1874  * given buffer.  The bitmap in the buf log format structure indicates
1875  * where to place the logged data.
1876  */
1877 /*ARGSUSED*/
1878 STATIC void
1879 xlog_recover_do_reg_buffer(
1880         xlog_recover_item_t     *item,
1881         xfs_buf_t               *bp,
1882         xfs_buf_log_format_t    *buf_f)
1883 {
1884         int                     i;
1885         int                     bit;
1886         int                     nbits;
1887         unsigned int            *data_map = NULL;
1888         unsigned int            map_size = 0;
1889         int                     error;
1890
1891         switch (buf_f->blf_type) {
1892         case XFS_LI_BUF:
1893                 data_map = buf_f->blf_data_map;
1894                 map_size = buf_f->blf_map_size;
1895                 break;
1896         }
1897         bit = 0;
1898         i = 1;  /* 0 is the buf format structure */
1899         while (1) {
1900                 bit = xfs_next_bit(data_map, map_size, bit);
1901                 if (bit == -1)
1902                         break;
1903                 nbits = xfs_contig_bits(data_map, map_size, bit);
1904                 ASSERT(nbits > 0);
1905                 ASSERT(item->ri_buf[i].i_addr != NULL);
1906                 ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
1907                 ASSERT(XFS_BUF_COUNT(bp) >=
1908                        ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
1909
1910                 /*
1911                  * Do a sanity check if this is a dquot buffer. Just checking
1912                  * the first dquot in the buffer should do. XXXThis is
1913                  * probably a good thing to do for other buf types also.
1914                  */
1915                 error = 0;
1916                 if (buf_f->blf_flags &
1917                    (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
1918                         error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
1919                                                item->ri_buf[i].i_addr,
1920                                                -1, 0, XFS_QMOPT_DOWARN,
1921                                                "dquot_buf_recover");
1922                 }
1923                 if (!error)
1924                         memcpy(xfs_buf_offset(bp,
1925                                 (uint)bit << XFS_BLI_SHIFT),    /* dest */
1926                                 item->ri_buf[i].i_addr,         /* source */
1927                                 nbits<<XFS_BLI_SHIFT);          /* length */
1928                 i++;
1929                 bit += nbits;
1930         }
1931
1932         /* Shouldn't be any more regions */
1933         ASSERT(i == item->ri_total);
1934 }
1935
1936 /*
1937  * Do some primitive error checking on ondisk dquot data structures.
1938  */
1939 int
1940 xfs_qm_dqcheck(
1941         xfs_disk_dquot_t *ddq,
1942         xfs_dqid_t       id,
1943         uint             type,    /* used only when IO_dorepair is true */
1944         uint             flags,
1945         char             *str)
1946 {
1947         xfs_dqblk_t      *d = (xfs_dqblk_t *)ddq;
1948         int             errs = 0;
1949
1950         /*
1951          * We can encounter an uninitialized dquot buffer for 2 reasons:
1952          * 1. If we crash while deleting the quotainode(s), and those blks got
1953          *    used for user data. This is because we take the path of regular
1954          *    file deletion; however, the size field of quotainodes is never
1955          *    updated, so all the tricks that we play in itruncate_finish
1956          *    don't quite matter.
1957          *
1958          * 2. We don't play the quota buffers when there's a quotaoff logitem.
1959          *    But the allocation will be replayed so we'll end up with an
1960          *    uninitialized quota block.
1961          *
1962          * This is all fine; things are still consistent, and we haven't lost
1963          * any quota information. Just don't complain about bad dquot blks.
1964          */
1965         if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) {
1966                 if (flags & XFS_QMOPT_DOWARN)
1967                         cmn_err(CE_ALERT,
1968                         "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
1969                         str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
1970                 errs++;
1971         }
1972         if (ddq->d_version != XFS_DQUOT_VERSION) {
1973                 if (flags & XFS_QMOPT_DOWARN)
1974                         cmn_err(CE_ALERT,
1975                         "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
1976                         str, id, ddq->d_version, XFS_DQUOT_VERSION);
1977                 errs++;
1978         }
1979
1980         if (ddq->d_flags != XFS_DQ_USER &&
1981             ddq->d_flags != XFS_DQ_PROJ &&
1982             ddq->d_flags != XFS_DQ_GROUP) {
1983                 if (flags & XFS_QMOPT_DOWARN)
1984                         cmn_err(CE_ALERT,
1985                         "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
1986                         str, id, ddq->d_flags);
1987                 errs++;
1988         }
1989
1990         if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
1991                 if (flags & XFS_QMOPT_DOWARN)
1992                         cmn_err(CE_ALERT,
1993                         "%s : ondisk-dquot 0x%p, ID mismatch: "
1994                         "0x%x expected, found id 0x%x",
1995                         str, ddq, id, be32_to_cpu(ddq->d_id));
1996                 errs++;
1997         }
1998
1999         if (!errs && ddq->d_id) {
2000                 if (ddq->d_blk_softlimit &&
2001                     be64_to_cpu(ddq->d_bcount) >=
2002                                 be64_to_cpu(ddq->d_blk_softlimit)) {
2003                         if (!ddq->d_btimer) {
2004                                 if (flags & XFS_QMOPT_DOWARN)
2005                                         cmn_err(CE_ALERT,
2006                                         "%s : Dquot ID 0x%x (0x%p) "
2007                                         "BLK TIMER NOT STARTED",
2008                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2009                                 errs++;
2010                         }
2011                 }
2012                 if (ddq->d_ino_softlimit &&
2013                     be64_to_cpu(ddq->d_icount) >=
2014                                 be64_to_cpu(ddq->d_ino_softlimit)) {
2015                         if (!ddq->d_itimer) {
2016                                 if (flags & XFS_QMOPT_DOWARN)
2017                                         cmn_err(CE_ALERT,
2018                                         "%s : Dquot ID 0x%x (0x%p) "
2019                                         "INODE TIMER NOT STARTED",
2020                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2021                                 errs++;
2022                         }
2023                 }
2024                 if (ddq->d_rtb_softlimit &&
2025                     be64_to_cpu(ddq->d_rtbcount) >=
2026                                 be64_to_cpu(ddq->d_rtb_softlimit)) {
2027                         if (!ddq->d_rtbtimer) {
2028                                 if (flags & XFS_QMOPT_DOWARN)
2029                                         cmn_err(CE_ALERT,
2030                                         "%s : Dquot ID 0x%x (0x%p) "
2031                                         "RTBLK TIMER NOT STARTED",
2032                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2033                                 errs++;
2034                         }
2035                 }
2036         }
2037
2038         if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2039                 return errs;
2040
2041         if (flags & XFS_QMOPT_DOWARN)
2042                 cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
2043
2044         /*
2045          * Typically, a repair is only requested by quotacheck.
2046          */
2047         ASSERT(id != -1);
2048         ASSERT(flags & XFS_QMOPT_DQREPAIR);
2049         memset(d, 0, sizeof(xfs_dqblk_t));
2050
2051         d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2052         d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2053         d->dd_diskdq.d_flags = type;
2054         d->dd_diskdq.d_id = cpu_to_be32(id);
2055
2056         return errs;
2057 }
2058
2059 /*
2060  * Perform a dquot buffer recovery.
2061  * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2062  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2063  * Else, treat it as a regular buffer and do recovery.
2064  */
2065 STATIC void
2066 xlog_recover_do_dquot_buffer(
2067         xfs_mount_t             *mp,
2068         xlog_t                  *log,
2069         xlog_recover_item_t     *item,
2070         xfs_buf_t               *bp,
2071         xfs_buf_log_format_t    *buf_f)
2072 {
2073         uint                    type;
2074
2075         /*
2076          * Filesystems are required to send in quota flags at mount time.
2077          */
2078         if (mp->m_qflags == 0) {
2079                 return;
2080         }
2081
2082         type = 0;
2083         if (buf_f->blf_flags & XFS_BLI_UDQUOT_BUF)
2084                 type |= XFS_DQ_USER;
2085         if (buf_f->blf_flags & XFS_BLI_PDQUOT_BUF)
2086                 type |= XFS_DQ_PROJ;
2087         if (buf_f->blf_flags & XFS_BLI_GDQUOT_BUF)
2088                 type |= XFS_DQ_GROUP;
2089         /*
2090          * This type of quotas was turned off, so ignore this buffer
2091          */
2092         if (log->l_quotaoffs_flag & type)
2093                 return;
2094
2095         xlog_recover_do_reg_buffer(item, bp, buf_f);
2096 }
2097
2098 /*
2099  * This routine replays a modification made to a buffer at runtime.
2100  * There are actually two types of buffer, regular and inode, which
2101  * are handled differently.  Inode buffers are handled differently
2102  * in that we only recover a specific set of data from them, namely
2103  * the inode di_next_unlinked fields.  This is because all other inode
2104  * data is actually logged via inode records and any data we replay
2105  * here which overlaps that may be stale.
2106  *
2107  * When meta-data buffers are freed at run time we log a buffer item
2108  * with the XFS_BLI_CANCEL bit set to indicate that previous copies
2109  * of the buffer in the log should not be replayed at recovery time.
2110  * This is so that if the blocks covered by the buffer are reused for
2111  * file data before we crash we don't end up replaying old, freed
2112  * meta-data into a user's file.
2113  *
2114  * To handle the cancellation of buffer log items, we make two passes
2115  * over the log during recovery.  During the first we build a table of
2116  * those buffers which have been cancelled, and during the second we
2117  * only replay those buffers which do not have corresponding cancel
2118  * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
2119  * for more details on the implementation of the table of cancel records.
2120  */
2121 STATIC int
2122 xlog_recover_do_buffer_trans(
2123         xlog_t                  *log,
2124         xlog_recover_item_t     *item,
2125         int                     pass)
2126 {
2127         xfs_buf_log_format_t    *buf_f;
2128         xfs_mount_t             *mp;
2129         xfs_buf_t               *bp;
2130         int                     error;
2131         int                     cancel;
2132         xfs_daddr_t             blkno;
2133         int                     len;
2134         ushort                  flags;
2135
2136         buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
2137
2138         if (pass == XLOG_RECOVER_PASS1) {
2139                 /*
2140                  * In this pass we're only looking for buf items
2141                  * with the XFS_BLI_CANCEL bit set.
2142                  */
2143                 xlog_recover_do_buffer_pass1(log, buf_f);
2144                 return 0;
2145         } else {
2146                 /*
2147                  * In this pass we want to recover all the buffers
2148                  * which have not been cancelled and are not
2149                  * cancellation buffers themselves.  The routine
2150                  * we call here will tell us whether or not to
2151                  * continue with the replay of this buffer.
2152                  */
2153                 cancel = xlog_recover_do_buffer_pass2(log, buf_f);
2154                 if (cancel) {
2155                         return 0;
2156                 }
2157         }
2158         switch (buf_f->blf_type) {
2159         case XFS_LI_BUF:
2160                 blkno = buf_f->blf_blkno;
2161                 len = buf_f->blf_len;
2162                 flags = buf_f->blf_flags;
2163                 break;
2164         default:
2165                 xfs_fs_cmn_err(CE_ALERT, log->l_mp,
2166                         "xfs_log_recover: unknown buffer type 0x%x, logdev %s",
2167                         buf_f->blf_type, log->l_mp->m_logname ?
2168                         log->l_mp->m_logname : "internal");
2169                 XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
2170                                  XFS_ERRLEVEL_LOW, log->l_mp);
2171                 return XFS_ERROR(EFSCORRUPTED);
2172         }
2173
2174         mp = log->l_mp;
2175         if (flags & XFS_BLI_INODE_BUF) {
2176                 bp = xfs_buf_read_flags(mp->m_ddev_targp, blkno, len,
2177                                                                 XFS_BUF_LOCK);
2178         } else {
2179                 bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, 0);
2180         }
2181         if (XFS_BUF_ISERROR(bp)) {
2182                 xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
2183                                   bp, blkno);
2184                 error = XFS_BUF_GETERROR(bp);
2185                 xfs_buf_relse(bp);
2186                 return error;
2187         }
2188
2189         error = 0;
2190         if (flags & XFS_BLI_INODE_BUF) {
2191                 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2192         } else if (flags &
2193                   (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
2194                 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2195         } else {
2196                 xlog_recover_do_reg_buffer(item, bp, buf_f);
2197         }
2198         if (error)
2199                 return XFS_ERROR(error);
2200
2201         /*
2202          * Perform delayed write on the buffer.  Asynchronous writes will be
2203          * slower when taking into account all the buffers to be flushed.
2204          *
2205          * Also make sure that only inode buffers with good sizes stay in
2206          * the buffer cache.  The kernel moves inodes in buffers of 1 block
2207          * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode
2208          * buffers in the log can be a different size if the log was generated
2209          * by an older kernel using unclustered inode buffers or a newer kernel
2210          * running with a different inode cluster size.  Regardless, if the
2211          * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2212          * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2213          * the buffer out of the buffer cache so that the buffer won't
2214          * overlap with future reads of those inodes.
2215          */
2216         if (XFS_DINODE_MAGIC ==
2217             be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2218             (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
2219                         (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2220                 XFS_BUF_STALE(bp);
2221                 error = xfs_bwrite(mp, bp);
2222         } else {
2223                 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
2224                        XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
2225                 XFS_BUF_SET_FSPRIVATE(bp, mp);
2226                 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2227                 xfs_bdwrite(mp, bp);
2228         }
2229
2230         return (error);
2231 }
2232
2233 STATIC int
2234 xlog_recover_do_inode_trans(
2235         xlog_t                  *log,
2236         xlog_recover_item_t     *item,
2237         int                     pass)
2238 {
2239         xfs_inode_log_format_t  *in_f;
2240         xfs_mount_t             *mp;
2241         xfs_buf_t               *bp;
2242         xfs_imap_t              imap;
2243         xfs_dinode_t            *dip;
2244         xfs_ino_t               ino;
2245         int                     len;
2246         xfs_caddr_t             src;
2247         xfs_caddr_t             dest;
2248         int                     error;
2249         int                     attr_index;
2250         uint                    fields;
2251         xfs_icdinode_t          *dicp;
2252         int                     need_free = 0;
2253
2254         if (pass == XLOG_RECOVER_PASS1) {
2255                 return 0;
2256         }
2257
2258         if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2259                 in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr;
2260         } else {
2261                 in_f = (xfs_inode_log_format_t *)kmem_alloc(
2262                         sizeof(xfs_inode_log_format_t), KM_SLEEP);
2263                 need_free = 1;
2264                 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2265                 if (error)
2266                         goto error;
2267         }
2268         ino = in_f->ilf_ino;
2269         mp = log->l_mp;
2270         if (ITEM_TYPE(item) == XFS_LI_INODE) {
2271                 imap.im_blkno = (xfs_daddr_t)in_f->ilf_blkno;
2272                 imap.im_len = in_f->ilf_len;
2273                 imap.im_boffset = in_f->ilf_boffset;
2274         } else {
2275                 /*
2276                  * It's an old inode format record.  We don't know where
2277                  * its cluster is located on disk, and we can't allow
2278                  * xfs_imap() to figure it out because the inode btrees
2279                  * are not ready to be used.  Therefore do not pass the
2280                  * XFS_IMAP_LOOKUP flag to xfs_imap().  This will give
2281                  * us only the single block in which the inode lives
2282                  * rather than its cluster, so we must make sure to
2283                  * invalidate the buffer when we write it out below.
2284                  */
2285                 imap.im_blkno = 0;
2286                 error = xfs_imap(log->l_mp, NULL, ino, &imap, 0);
2287                 if (error)
2288                         goto error;
2289         }
2290
2291         /*
2292          * Inode buffers can be freed, look out for it,
2293          * and do not replay the inode.
2294          */
2295         if (xlog_check_buffer_cancelled(log, imap.im_blkno, imap.im_len, 0)) {
2296                 error = 0;
2297                 goto error;
2298         }
2299
2300         bp = xfs_buf_read_flags(mp->m_ddev_targp, imap.im_blkno, imap.im_len,
2301                                                                 XFS_BUF_LOCK);
2302         if (XFS_BUF_ISERROR(bp)) {
2303                 xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
2304                                   bp, imap.im_blkno);
2305                 error = XFS_BUF_GETERROR(bp);
2306                 xfs_buf_relse(bp);
2307                 goto error;
2308         }
2309         error = 0;
2310         ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2311         dip = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
2312
2313         /*
2314          * Make sure the place we're flushing out to really looks
2315          * like an inode!
2316          */
2317         if (unlikely(be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC)) {
2318                 xfs_buf_relse(bp);
2319                 xfs_fs_cmn_err(CE_ALERT, mp,
2320                         "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
2321                         dip, bp, ino);
2322                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
2323                                  XFS_ERRLEVEL_LOW, mp);
2324                 error = EFSCORRUPTED;
2325                 goto error;
2326         }
2327         dicp = (xfs_icdinode_t *)(item->ri_buf[1].i_addr);
2328         if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2329                 xfs_buf_relse(bp);
2330                 xfs_fs_cmn_err(CE_ALERT, mp,
2331                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
2332                         item, ino);
2333                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
2334                                  XFS_ERRLEVEL_LOW, mp);
2335                 error = EFSCORRUPTED;
2336                 goto error;
2337         }
2338
2339         /* Skip replay when the on disk inode is newer than the log one */
2340         if (dicp->di_flushiter < be16_to_cpu(dip->di_core.di_flushiter)) {
2341                 /*
2342                  * Deal with the wrap case, DI_MAX_FLUSH is less
2343                  * than smaller numbers
2344                  */
2345                 if (be16_to_cpu(dip->di_core.di_flushiter) == DI_MAX_FLUSH &&
2346                     dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2347                         /* do nothing */
2348                 } else {
2349                         xfs_buf_relse(bp);
2350                         error = 0;
2351                         goto error;
2352                 }
2353         }
2354         /* Take the opportunity to reset the flush iteration count */
2355         dicp->di_flushiter = 0;
2356
2357         if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
2358                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2359                     (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2360                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
2361                                          XFS_ERRLEVEL_LOW, mp, dicp);
2362                         xfs_buf_relse(bp);
2363                         xfs_fs_cmn_err(CE_ALERT, mp,
2364                                 "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2365                                 item, dip, bp, ino);
2366                         error = EFSCORRUPTED;
2367                         goto error;
2368                 }
2369         } else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) {
2370                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2371                     (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2372                     (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2373                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
2374                                              XFS_ERRLEVEL_LOW, mp, dicp);
2375                         xfs_buf_relse(bp);
2376                         xfs_fs_cmn_err(CE_ALERT, mp,
2377                                 "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2378                                 item, dip, bp, ino);
2379                         error = EFSCORRUPTED;
2380                         goto error;
2381                 }
2382         }
2383         if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2384                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
2385                                      XFS_ERRLEVEL_LOW, mp, dicp);
2386                 xfs_buf_relse(bp);
2387                 xfs_fs_cmn_err(CE_ALERT, mp,
2388                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2389                         item, dip, bp, ino,
2390                         dicp->di_nextents + dicp->di_anextents,
2391                         dicp->di_nblocks);
2392                 error = EFSCORRUPTED;
2393                 goto error;
2394         }
2395         if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2396                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
2397                                      XFS_ERRLEVEL_LOW, mp, dicp);
2398                 xfs_buf_relse(bp);
2399                 xfs_fs_cmn_err(CE_ALERT, mp,
2400                         "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
2401                         item, dip, bp, ino, dicp->di_forkoff);
2402                 error = EFSCORRUPTED;
2403                 goto error;
2404         }
2405         if (unlikely(item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t))) {
2406                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
2407                                      XFS_ERRLEVEL_LOW, mp, dicp);
2408                 xfs_buf_relse(bp);
2409                 xfs_fs_cmn_err(CE_ALERT, mp,
2410                         "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p",
2411                         item->ri_buf[1].i_len, item);
2412                 error = EFSCORRUPTED;
2413                 goto error;
2414         }
2415
2416         /* The core is in in-core format */
2417         xfs_dinode_to_disk(&dip->di_core,
2418                 (xfs_icdinode_t *)item->ri_buf[1].i_addr);
2419
2420         /* the rest is in on-disk format */
2421         if (item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t)) {
2422                 memcpy((xfs_caddr_t) dip + sizeof(xfs_dinode_core_t),
2423                         item->ri_buf[1].i_addr + sizeof(xfs_dinode_core_t),
2424                         item->ri_buf[1].i_len  - sizeof(xfs_dinode_core_t));
2425         }
2426
2427         fields = in_f->ilf_fields;
2428         switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2429         case XFS_ILOG_DEV:
2430                 dip->di_u.di_dev = cpu_to_be32(in_f->ilf_u.ilfu_rdev);
2431                 break;
2432         case XFS_ILOG_UUID:
2433                 dip->di_u.di_muuid = in_f->ilf_u.ilfu_uuid;
2434                 break;
2435         }
2436
2437         if (in_f->ilf_size == 2)
2438                 goto write_inode_buffer;
2439         len = item->ri_buf[2].i_len;
2440         src = item->ri_buf[2].i_addr;
2441         ASSERT(in_f->ilf_size <= 4);
2442         ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2443         ASSERT(!(fields & XFS_ILOG_DFORK) ||
2444                (len == in_f->ilf_dsize));
2445
2446         switch (fields & XFS_ILOG_DFORK) {
2447         case XFS_ILOG_DDATA:
2448         case XFS_ILOG_DEXT:
2449                 memcpy(&dip->di_u, src, len);
2450                 break;
2451
2452         case XFS_ILOG_DBROOT:
2453                 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2454                                  &dip->di_u.di_bmbt,
2455                                  XFS_DFORK_DSIZE(dip, mp));
2456                 break;
2457
2458         default:
2459                 /*
2460                  * There are no data fork flags set.
2461                  */
2462                 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2463                 break;
2464         }
2465
2466         /*
2467          * If we logged any attribute data, recover it.  There may or
2468          * may not have been any other non-core data logged in this
2469          * transaction.
2470          */
2471         if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2472                 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2473                         attr_index = 3;
2474                 } else {
2475                         attr_index = 2;
2476                 }
2477                 len = item->ri_buf[attr_index].i_len;
2478                 src = item->ri_buf[attr_index].i_addr;
2479                 ASSERT(len == in_f->ilf_asize);
2480
2481                 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2482                 case XFS_ILOG_ADATA:
2483                 case XFS_ILOG_AEXT:
2484                         dest = XFS_DFORK_APTR(dip);
2485                         ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2486                         memcpy(dest, src, len);
2487                         break;
2488
2489                 case XFS_ILOG_ABROOT:
2490                         dest = XFS_DFORK_APTR(dip);
2491                         xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2492                                          len, (xfs_bmdr_block_t*)dest,
2493                                          XFS_DFORK_ASIZE(dip, mp));
2494                         break;
2495
2496                 default:
2497                         xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
2498                         ASSERT(0);
2499                         xfs_buf_relse(bp);
2500                         error = EIO;
2501                         goto error;
2502                 }
2503         }
2504
2505 write_inode_buffer:
2506         if (ITEM_TYPE(item) == XFS_LI_INODE) {
2507                 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
2508                        XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
2509                 XFS_BUF_SET_FSPRIVATE(bp, mp);
2510                 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2511                 xfs_bdwrite(mp, bp);
2512         } else {
2513                 XFS_BUF_STALE(bp);
2514                 error = xfs_bwrite(mp, bp);
2515         }
2516
2517 error:
2518         if (need_free)
2519                 kmem_free(in_f);
2520         return XFS_ERROR(error);
2521 }
2522
2523 /*
2524  * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
2525  * structure, so that we know not to do any dquot item or dquot buffer recovery,
2526  * of that type.
2527  */
2528 STATIC int
2529 xlog_recover_do_quotaoff_trans(
2530         xlog_t                  *log,
2531         xlog_recover_item_t     *item,
2532         int                     pass)
2533 {
2534         xfs_qoff_logformat_t    *qoff_f;
2535
2536         if (pass == XLOG_RECOVER_PASS2) {
2537                 return (0);
2538         }
2539
2540         qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr;
2541         ASSERT(qoff_f);
2542
2543         /*
2544          * The logitem format's flag tells us if this was user quotaoff,
2545          * group/project quotaoff or both.
2546          */
2547         if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2548                 log->l_quotaoffs_flag |= XFS_DQ_USER;
2549         if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2550                 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2551         if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2552                 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2553
2554         return (0);
2555 }
2556
2557 /*
2558  * Recover a dquot record
2559  */
2560 STATIC int
2561 xlog_recover_do_dquot_trans(
2562         xlog_t                  *log,
2563         xlog_recover_item_t     *item,
2564         int                     pass)
2565 {
2566         xfs_mount_t             *mp;
2567         xfs_buf_t               *bp;
2568         struct xfs_disk_dquot   *ddq, *recddq;
2569         int                     error;
2570         xfs_dq_logformat_t      *dq_f;
2571         uint                    type;
2572
2573         if (pass == XLOG_RECOVER_PASS1) {
2574                 return 0;
2575         }
2576         mp = log->l_mp;
2577
2578         /*
2579          * Filesystems are required to send in quota flags at mount time.
2580          */
2581         if (mp->m_qflags == 0)
2582                 return (0);
2583
2584         recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr;
2585         ASSERT(recddq);
2586         /*
2587          * This type of quotas was turned off, so ignore this record.
2588          */
2589         type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2590         ASSERT(type);
2591         if (log->l_quotaoffs_flag & type)
2592                 return (0);
2593
2594         /*
2595          * At this point we know that quota was _not_ turned off.
2596          * Since the mount flags are not indicating to us otherwise, this
2597          * must mean that quota is on, and the dquot needs to be replayed.
2598          * Remember that we may not have fully recovered the superblock yet,
2599          * so we can't do the usual trick of looking at the SB quota bits.
2600          *
2601          * The other possibility, of course, is that the quota subsystem was
2602          * removed since the last mount - ENOSYS.
2603          */
2604         dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr;
2605         ASSERT(dq_f);
2606         if ((error = xfs_qm_dqcheck(recddq,
2607                            dq_f->qlf_id,
2608                            0, XFS_QMOPT_DOWARN,
2609                            "xlog_recover_do_dquot_trans (log copy)"))) {
2610                 return XFS_ERROR(EIO);
2611         }
2612         ASSERT(dq_f->qlf_len == 1);
2613
2614         error = xfs_read_buf(mp, mp->m_ddev_targp,
2615                              dq_f->qlf_blkno,
2616                              XFS_FSB_TO_BB(mp, dq_f->qlf_len),
2617                              0, &bp);
2618         if (error) {
2619                 xfs_ioerror_alert("xlog_recover_do..(read#3)", mp,
2620                                   bp, dq_f->qlf_blkno);
2621                 return error;
2622         }
2623         ASSERT(bp);
2624         ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2625
2626         /*
2627          * At least the magic num portion should be on disk because this
2628          * was among a chunk of dquots created earlier, and we did some
2629          * minimal initialization then.
2630          */
2631         if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2632                            "xlog_recover_do_dquot_trans")) {
2633                 xfs_buf_relse(bp);
2634                 return XFS_ERROR(EIO);
2635         }
2636
2637         memcpy(ddq, recddq, item->ri_buf[1].i_len);
2638
2639         ASSERT(dq_f->qlf_size == 2);
2640         ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
2641                XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
2642         XFS_BUF_SET_FSPRIVATE(bp, mp);
2643         XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2644         xfs_bdwrite(mp, bp);
2645
2646         return (0);
2647 }
2648
2649 /*
2650  * This routine is called to create an in-core extent free intent
2651  * item from the efi format structure which was logged on disk.
2652  * It allocates an in-core efi, copies the extents from the format
2653  * structure into it, and adds the efi to the AIL with the given
2654  * LSN.
2655  */
2656 STATIC int
2657 xlog_recover_do_efi_trans(
2658         xlog_t                  *log,
2659         xlog_recover_item_t     *item,
2660         xfs_lsn_t               lsn,
2661         int                     pass)
2662 {
2663         int                     error;
2664         xfs_mount_t             *mp;
2665         xfs_efi_log_item_t      *efip;
2666         xfs_efi_log_format_t    *efi_formatp;
2667
2668         if (pass == XLOG_RECOVER_PASS1) {
2669                 return 0;
2670         }
2671
2672         efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr;
2673
2674         mp = log->l_mp;
2675         efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2676         if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2677                                          &(efip->efi_format)))) {
2678                 xfs_efi_item_free(efip);
2679                 return error;
2680         }
2681         efip->efi_next_extent = efi_formatp->efi_nextents;
2682         efip->efi_flags |= XFS_EFI_COMMITTED;
2683
2684         spin_lock(&mp->m_ail->xa_lock);
2685         /*
2686          * xfs_trans_update_ail() drops the AIL lock.
2687          */
2688         xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn);
2689         return 0;
2690 }
2691
2692
2693 /*
2694  * This routine is called when an efd format structure is found in
2695  * a committed transaction in the log.  It's purpose is to cancel
2696  * the corresponding efi if it was still in the log.  To do this
2697  * it searches the AIL for the efi with an id equal to that in the
2698  * efd format structure.  If we find it, we remove the efi from the
2699  * AIL and free it.
2700  */
2701 STATIC void
2702 xlog_recover_do_efd_trans(
2703         xlog_t                  *log,
2704         xlog_recover_item_t     *item,
2705         int                     pass)
2706 {
2707         xfs_mount_t             *mp;
2708         xfs_efd_log_format_t    *efd_formatp;
2709         xfs_efi_log_item_t      *efip = NULL;
2710         xfs_log_item_t          *lip;
2711         __uint64_t              efi_id;
2712         struct xfs_ail_cursor   cur;
2713
2714         if (pass == XLOG_RECOVER_PASS1) {
2715                 return;
2716         }
2717
2718         efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr;
2719         ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2720                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2721                (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2722                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
2723         efi_id = efd_formatp->efd_efi_id;
2724
2725         /*
2726          * Search for the efi with the id in the efd format structure
2727          * in the AIL.
2728          */
2729         mp = log->l_mp;
2730         spin_lock(&mp->m_ail->xa_lock);
2731         lip = xfs_trans_ail_cursor_first(mp->m_ail, &cur, 0);
2732         while (lip != NULL) {
2733                 if (lip->li_type == XFS_LI_EFI) {
2734                         efip = (xfs_efi_log_item_t *)lip;
2735                         if (efip->efi_format.efi_id == efi_id) {
2736                                 /*
2737                                  * xfs_trans_delete_ail() drops the
2738                                  * AIL lock.
2739                                  */
2740                                 xfs_trans_delete_ail(mp, lip);
2741                                 xfs_efi_item_free(efip);
2742                                 spin_lock(&mp->m_ail->xa_lock);
2743                                 break;
2744                         }
2745                 }
2746                 lip = xfs_trans_ail_cursor_next(mp->m_ail, &cur);
2747         }
2748         xfs_trans_ail_cursor_done(mp->m_ail, &cur);
2749         spin_unlock(&mp->m_ail->xa_lock);
2750 }
2751
2752 /*
2753  * Perform the transaction
2754  *
2755  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
2756  * EFIs and EFDs get queued up by adding entries into the AIL for them.
2757  */
2758 STATIC int
2759 xlog_recover_do_trans(
2760         xlog_t                  *log,
2761         xlog_recover_t          *trans,
2762         int                     pass)
2763 {
2764         int                     error = 0;
2765         xlog_recover_item_t     *item, *first_item;
2766
2767         if ((error = xlog_recover_reorder_trans(trans)))
2768                 return error;
2769         first_item = item = trans->r_itemq;
2770         do {
2771                 /*
2772                  * we don't need to worry about the block number being
2773                  * truncated in > 1 TB buffers because in user-land,
2774                  * we're now n32 or 64-bit so xfs_daddr_t is 64-bits so
2775                  * the blknos will get through the user-mode buffer
2776                  * cache properly.  The only bad case is o32 kernels
2777                  * where xfs_daddr_t is 32-bits but mount will warn us
2778                  * off a > 1 TB filesystem before we get here.
2779                  */
2780                 if ((ITEM_TYPE(item) == XFS_LI_BUF)) {
2781                         if  ((error = xlog_recover_do_buffer_trans(log, item,
2782                                                                  pass)))
2783                                 break;
2784                 } else if ((ITEM_TYPE(item) == XFS_LI_INODE)) {
2785                         if ((error = xlog_recover_do_inode_trans(log, item,
2786                                                                 pass)))
2787                                 break;
2788                 } else if (ITEM_TYPE(item) == XFS_LI_EFI) {
2789                         if ((error = xlog_recover_do_efi_trans(log, item, trans->r_lsn,
2790                                                   pass)))
2791                                 break;
2792                 } else if (ITEM_TYPE(item) == XFS_LI_EFD) {
2793                         xlog_recover_do_efd_trans(log, item, pass);
2794                 } else if (ITEM_TYPE(item) == XFS_LI_DQUOT) {
2795                         if ((error = xlog_recover_do_dquot_trans(log, item,
2796                                                                    pass)))
2797                                         break;
2798                 } else if ((ITEM_TYPE(item) == XFS_LI_QUOTAOFF)) {
2799                         if ((error = xlog_recover_do_quotaoff_trans(log, item,
2800                                                                    pass)))
2801                                         break;
2802                 } else {
2803                         xlog_warn("XFS: xlog_recover_do_trans");
2804                         ASSERT(0);
2805                         error = XFS_ERROR(EIO);
2806                         break;
2807                 }
2808                 item = item->ri_next;
2809         } while (first_item != item);
2810
2811         return error;
2812 }
2813
2814 /*
2815  * Free up any resources allocated by the transaction
2816  *
2817  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2818  */
2819 STATIC void
2820 xlog_recover_free_trans(
2821         xlog_recover_t          *trans)
2822 {
2823         xlog_recover_item_t     *first_item, *item, *free_item;
2824         int                     i;
2825
2826         item = first_item = trans->r_itemq;
2827         do {
2828                 free_item = item;
2829                 item = item->ri_next;
2830                  /* Free the regions in the item. */
2831                 for (i = 0; i < free_item->ri_cnt; i++) {
2832                         kmem_free(free_item->ri_buf[i].i_addr);
2833                 }
2834                 /* Free the item itself */
2835                 kmem_free(free_item->ri_buf);
2836                 kmem_free(free_item);
2837         } while (first_item != item);
2838         /* Free the transaction recover structure */
2839         kmem_free(trans);
2840 }
2841
2842 STATIC int
2843 xlog_recover_commit_trans(
2844         xlog_t                  *log,
2845         xlog_recover_t          **q,
2846         xlog_recover_t          *trans,
2847         int                     pass)
2848 {
2849         int                     error;
2850
2851         if ((error = xlog_recover_unlink_tid(q, trans)))
2852                 return error;
2853         if ((error = xlog_recover_do_trans(log, trans, pass)))
2854                 return error;
2855         xlog_recover_free_trans(trans);                 /* no error */
2856         return 0;
2857 }
2858
2859 STATIC int
2860 xlog_recover_unmount_trans(
2861         xlog_recover_t          *trans)
2862 {
2863         /* Do nothing now */
2864         xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR");
2865         return 0;
2866 }
2867
2868 /*
2869  * There are two valid states of the r_state field.  0 indicates that the
2870  * transaction structure is in a normal state.  We have either seen the
2871  * start of the transaction or the last operation we added was not a partial
2872  * operation.  If the last operation we added to the transaction was a
2873  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2874  *
2875  * NOTE: skip LRs with 0 data length.
2876  */
2877 STATIC int
2878 xlog_recover_process_data(
2879         xlog_t                  *log,
2880         xlog_recover_t          *rhash[],
2881         xlog_rec_header_t       *rhead,
2882         xfs_caddr_t             dp,
2883         int                     pass)
2884 {
2885         xfs_caddr_t             lp;
2886         int                     num_logops;
2887         xlog_op_header_t        *ohead;
2888         xlog_recover_t          *trans;
2889         xlog_tid_t              tid;
2890         int                     error;
2891         unsigned long           hash;
2892         uint                    flags;
2893
2894         lp = dp + be32_to_cpu(rhead->h_len);
2895         num_logops = be32_to_cpu(rhead->h_num_logops);
2896
2897         /* check the log format matches our own - else we can't recover */
2898         if (xlog_header_check_recover(log->l_mp, rhead))
2899                 return (XFS_ERROR(EIO));
2900
2901         while ((dp < lp) && num_logops) {
2902                 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
2903                 ohead = (xlog_op_header_t *)dp;
2904                 dp += sizeof(xlog_op_header_t);
2905                 if (ohead->oh_clientid != XFS_TRANSACTION &&
2906                     ohead->oh_clientid != XFS_LOG) {
2907                         xlog_warn(
2908                 "XFS: xlog_recover_process_data: bad clientid");
2909                         ASSERT(0);
2910                         return (XFS_ERROR(EIO));
2911                 }
2912                 tid = be32_to_cpu(ohead->oh_tid);
2913                 hash = XLOG_RHASH(tid);
2914                 trans = xlog_recover_find_tid(rhash[hash], tid);
2915                 if (trans == NULL) {               /* not found; add new tid */
2916                         if (ohead->oh_flags & XLOG_START_TRANS)
2917                                 xlog_recover_new_tid(&rhash[hash], tid,
2918                                         be64_to_cpu(rhead->h_lsn));
2919                 } else {
2920                         if (dp + be32_to_cpu(ohead->oh_len) > lp) {
2921                                 xlog_warn(
2922                         "XFS: xlog_recover_process_data: bad length");
2923                                 WARN_ON(1);
2924                                 return (XFS_ERROR(EIO));
2925                         }
2926                         flags = ohead->oh_flags & ~XLOG_END_TRANS;
2927                         if (flags & XLOG_WAS_CONT_TRANS)
2928                                 flags &= ~XLOG_CONTINUE_TRANS;
2929                         switch (flags) {
2930                         case XLOG_COMMIT_TRANS:
2931                                 error = xlog_recover_commit_trans(log,
2932                                                 &rhash[hash], trans, pass);
2933                                 break;
2934                         case XLOG_UNMOUNT_TRANS:
2935                                 error = xlog_recover_unmount_trans(trans);
2936                                 break;
2937                         case XLOG_WAS_CONT_TRANS:
2938                                 error = xlog_recover_add_to_cont_trans(trans,
2939                                                 dp, be32_to_cpu(ohead->oh_len));
2940                                 break;
2941                         case XLOG_START_TRANS:
2942                                 xlog_warn(
2943                         "XFS: xlog_recover_process_data: bad transaction");
2944                                 ASSERT(0);
2945                                 error = XFS_ERROR(EIO);
2946                                 break;
2947                         case 0:
2948                         case XLOG_CONTINUE_TRANS:
2949                                 error = xlog_recover_add_to_trans(trans,
2950                                                 dp, be32_to_cpu(ohead->oh_len));
2951                                 break;
2952                         default:
2953                                 xlog_warn(
2954                         "XFS: xlog_recover_process_data: bad flag");
2955                                 ASSERT(0);
2956                                 error = XFS_ERROR(EIO);
2957                                 break;
2958                         }
2959                         if (error)
2960                                 return error;
2961                 }
2962                 dp += be32_to_cpu(ohead->oh_len);
2963                 num_logops--;
2964         }
2965         return 0;
2966 }
2967
2968 /*
2969  * Process an extent free intent item that was recovered from
2970  * the log.  We need to free the extents that it describes.
2971  */
2972 STATIC int
2973 xlog_recover_process_efi(
2974         xfs_mount_t             *mp,
2975         xfs_efi_log_item_t      *efip)
2976 {
2977         xfs_efd_log_item_t      *efdp;
2978         xfs_trans_t             *tp;
2979         int                     i;
2980         int                     error = 0;
2981         xfs_extent_t            *extp;
2982         xfs_fsblock_t           startblock_fsb;
2983
2984         ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
2985
2986         /*
2987          * First check the validity of the extents described by the
2988          * EFI.  If any are bad, then assume that all are bad and
2989          * just toss the EFI.
2990          */
2991         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
2992                 extp = &(efip->efi_format.efi_extents[i]);
2993                 startblock_fsb = XFS_BB_TO_FSB(mp,
2994                                    XFS_FSB_TO_DADDR(mp, extp->ext_start));
2995                 if ((startblock_fsb == 0) ||
2996                     (extp->ext_len == 0) ||
2997                     (startblock_fsb >= mp->m_sb.sb_dblocks) ||
2998                     (extp->ext_len >= mp->m_sb.sb_agblocks)) {
2999                         /*
3000                          * This will pull the EFI from the AIL and
3001                          * free the memory associated with it.
3002                          */
3003                         xfs_efi_release(efip, efip->efi_format.efi_nextents);
3004                         return XFS_ERROR(EIO);
3005                 }
3006         }
3007
3008         tp = xfs_trans_alloc(mp, 0);
3009         error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
3010         if (error)
3011                 goto abort_error;
3012         efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3013
3014         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3015                 extp = &(efip->efi_format.efi_extents[i]);
3016                 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3017                 if (error)
3018                         goto abort_error;
3019                 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3020                                          extp->ext_len);
3021         }
3022
3023         efip->efi_flags |= XFS_EFI_RECOVERED;
3024         error = xfs_trans_commit(tp, 0);
3025         return error;
3026
3027 abort_error:
3028         xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3029         return error;
3030 }
3031
3032 /*
3033  * When this is called, all of the EFIs which did not have
3034  * corresponding EFDs should be in the AIL.  What we do now
3035  * is free the extents associated with each one.
3036  *
3037  * Since we process the EFIs in normal transactions, they
3038  * will be removed at some point after the commit.  This prevents
3039  * us from just walking down the list processing each one.
3040  * We'll use a flag in the EFI to skip those that we've already
3041  * processed and use the AIL iteration mechanism's generation
3042  * count to try to speed this up at least a bit.
3043  *
3044  * When we start, we know that the EFIs are the only things in
3045  * the AIL.  As we process them, however, other items are added
3046  * to the AIL.  Since everything added to the AIL must come after
3047  * everything already in the AIL, we stop processing as soon as
3048  * we see something other than an EFI in the AIL.
3049  */
3050 STATIC int
3051 xlog_recover_process_efis(
3052         xlog_t                  *log)
3053 {
3054         xfs_log_item_t          *lip;
3055         xfs_efi_log_item_t      *efip;
3056         xfs_mount_t             *mp;
3057         int                     error = 0;
3058         struct xfs_ail_cursor   cur;
3059
3060         mp = log->l_mp;
3061         spin_lock(&mp->m_ail->xa_lock);
3062
3063         lip = xfs_trans_ail_cursor_first(mp->m_ail, &cur, 0);
3064         while (lip != NULL) {
3065                 /*
3066                  * We're done when we see something other than an EFI.
3067                  * There should be no EFIs left in the AIL now.
3068                  */
3069                 if (lip->li_type != XFS_LI_EFI) {
3070 #ifdef DEBUG
3071                         for (; lip;
3072                                lip = xfs_trans_ail_cursor_next(mp->m_ail, &cur))
3073                                 ASSERT(lip->li_type != XFS_LI_EFI);
3074 #endif
3075                         break;
3076                 }
3077
3078                 /*
3079                  * Skip EFIs that we've already processed.
3080                  */
3081                 efip = (xfs_efi_log_item_t *)lip;
3082                 if (efip->efi_flags & XFS_EFI_RECOVERED) {
3083                         lip = xfs_trans_ail_cursor_next(mp->m_ail, &cur);
3084                         continue;
3085                 }
3086
3087                 spin_unlock(&mp->m_ail->xa_lock);
3088                 error = xlog_recover_process_efi(mp, efip);
3089                 spin_lock(&mp->m_ail->xa_lock);
3090                 if (error)
3091                         goto out;
3092                 lip = xfs_trans_ail_cursor_next(mp->m_ail, &cur);
3093         }
3094 out:
3095         xfs_trans_ail_cursor_done(mp->m_ail, &cur);
3096         spin_unlock(&mp->m_ail->xa_lock);
3097         return error;
3098 }
3099
3100 /*
3101  * This routine performs a transaction to null out a bad inode pointer
3102  * in an agi unlinked inode hash bucket.
3103  */
3104 STATIC void
3105 xlog_recover_clear_agi_bucket(
3106         xfs_mount_t     *mp,
3107         xfs_agnumber_t  agno,
3108         int             bucket)
3109 {
3110         xfs_trans_t     *tp;
3111         xfs_agi_t       *agi;
3112         xfs_buf_t       *agibp;
3113         int             offset;
3114         int             error;
3115
3116         tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3117         error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), 0, 0, 0);
3118         if (!error)
3119                 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
3120                                    XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
3121                                    XFS_FSS_TO_BB(mp, 1), 0, &agibp);
3122         if (error)
3123                 goto out_abort;
3124
3125         error = EINVAL;
3126         agi = XFS_BUF_TO_AGI(agibp);
3127         if (be32_to_cpu(agi->agi_magicnum) != XFS_AGI_MAGIC)
3128                 goto out_abort;
3129
3130         agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3131         offset = offsetof(xfs_agi_t, agi_unlinked) +
3132                  (sizeof(xfs_agino_t) * bucket);
3133         xfs_trans_log_buf(tp, agibp, offset,
3134                           (offset + sizeof(xfs_agino_t) - 1));
3135
3136         error = xfs_trans_commit(tp, 0);
3137         if (error)
3138                 goto out_error;
3139         return;
3140
3141 out_abort:
3142         xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3143 out_error:
3144         xfs_fs_cmn_err(CE_WARN, mp, "xlog_recover_clear_agi_bucket: "
3145                         "failed to clear agi %d. Continuing.", agno);
3146         return;
3147 }
3148
3149 /*
3150  * xlog_iunlink_recover
3151  *
3152  * This is called during recovery to process any inodes which
3153  * we unlinked but not freed when the system crashed.  These
3154  * inodes will be on the lists in the AGI blocks.  What we do
3155  * here is scan all the AGIs and fully truncate and free any
3156  * inodes found on the lists.  Each inode is removed from the
3157  * lists when it has been fully truncated and is freed.  The
3158  * freeing of the inode and its removal from the list must be
3159  * atomic.
3160  */
3161 void
3162 xlog_recover_process_iunlinks(
3163         xlog_t          *log)
3164 {
3165         xfs_mount_t     *mp;
3166         xfs_agnumber_t  agno;
3167         xfs_agi_t       *agi;
3168         xfs_buf_t       *agibp;
3169         xfs_buf_t       *ibp;
3170         xfs_dinode_t    *dip;
3171         xfs_inode_t     *ip;
3172         xfs_agino_t     agino;
3173         xfs_ino_t       ino;
3174         int             bucket;
3175         int             error;
3176         uint            mp_dmevmask;
3177
3178         mp = log->l_mp;
3179
3180         /*
3181          * Prevent any DMAPI event from being sent while in this function.
3182          */
3183         mp_dmevmask = mp->m_dmevmask;
3184         mp->m_dmevmask = 0;
3185
3186         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3187                 /*
3188                  * Find the agi for this ag.
3189                  */
3190                 agibp = xfs_buf_read(mp->m_ddev_targp,
3191                                 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
3192                                 XFS_FSS_TO_BB(mp, 1), 0);
3193                 if (XFS_BUF_ISERROR(agibp)) {
3194                         xfs_ioerror_alert("xlog_recover_process_iunlinks(#1)",
3195                                 log->l_mp, agibp,
3196                                 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)));
3197                 }
3198                 agi = XFS_BUF_TO_AGI(agibp);
3199                 ASSERT(XFS_AGI_MAGIC == be32_to_cpu(agi->agi_magicnum));
3200
3201                 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3202
3203                         agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3204                         while (agino != NULLAGINO) {
3205
3206                                 /*
3207                                  * Release the agi buffer so that it can
3208                                  * be acquired in the normal course of the
3209                                  * transaction to truncate and free the inode.
3210                                  */
3211                                 xfs_buf_relse(agibp);
3212
3213                                 ino = XFS_AGINO_TO_INO(mp, agno, agino);
3214                                 error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0);
3215                                 ASSERT(error || (ip != NULL));
3216
3217                                 if (!error) {
3218                                         /*
3219                                          * Get the on disk inode to find the
3220                                          * next inode in the bucket.
3221                                          */
3222                                         error = xfs_itobp(mp, NULL, ip, &dip,
3223                                                         &ibp, 0, 0,
3224                                                         XFS_BUF_LOCK);
3225                                         ASSERT(error || (dip != NULL));
3226                                 }
3227
3228                                 if (!error) {
3229                                         ASSERT(ip->i_d.di_nlink == 0);
3230
3231                                         /* setup for the next pass */
3232                                         agino = be32_to_cpu(
3233                                                         dip->di_next_unlinked);
3234                                         xfs_buf_relse(ibp);
3235                                         /*
3236                                          * Prevent any DMAPI event from
3237                                          * being sent when the
3238                                          * reference on the inode is
3239                                          * dropped.
3240                                          */
3241                                         ip->i_d.di_dmevmask = 0;
3242
3243                                         /*
3244                                          * If this is a new inode, handle
3245                                          * it specially.  Otherwise,
3246                                          * just drop our reference to the
3247                                          * inode.  If there are no
3248                                          * other references, this will
3249                                          * send the inode to
3250                                          * xfs_inactive() which will
3251                                          * truncate the file and free
3252                                          * the inode.
3253                                          */
3254                                         if (ip->i_d.di_mode == 0)
3255                                                 xfs_iput_new(ip, 0);
3256                                         else
3257                                                 IRELE(ip);
3258                                 } else {
3259                                         /*
3260                                          * We can't read in the inode
3261                                          * this bucket points to, or
3262                                          * this inode is messed up.  Just
3263                                          * ditch this bucket of inodes.  We
3264                                          * will lose some inodes and space,
3265                                          * but at least we won't hang.  Call
3266                                          * xlog_recover_clear_agi_bucket()
3267                                          * to perform a transaction to clear
3268                                          * the inode pointer in the bucket.
3269                                          */
3270                                         xlog_recover_clear_agi_bucket(mp, agno,
3271                                                         bucket);
3272
3273                                         agino = NULLAGINO;
3274                                 }
3275
3276                                 /*
3277                                  * Reacquire the agibuffer and continue around
3278                                  * the loop.
3279                                  */
3280                                 agibp = xfs_buf_read(mp->m_ddev_targp,
3281                                                 XFS_AG_DADDR(mp, agno,
3282                                                         XFS_AGI_DADDR(mp)),
3283                                                 XFS_FSS_TO_BB(mp, 1), 0);
3284                                 if (XFS_BUF_ISERROR(agibp)) {
3285                                         xfs_ioerror_alert(
3286                                 "xlog_recover_process_iunlinks(#2)",
3287                                                 log->l_mp, agibp,
3288                                                 XFS_AG_DADDR(mp, agno,
3289                                                         XFS_AGI_DADDR(mp)));
3290                                 }
3291                                 agi = XFS_BUF_TO_AGI(agibp);
3292                                 ASSERT(XFS_AGI_MAGIC == be32_to_cpu(
3293                                         agi->agi_magicnum));
3294                         }
3295                 }
3296
3297                 /*
3298                  * Release the buffer for the current agi so we can
3299                  * go on to the next one.
3300                  */
3301                 xfs_buf_relse(agibp);
3302         }
3303
3304         mp->m_dmevmask = mp_dmevmask;
3305 }
3306
3307
3308 #ifdef DEBUG
3309 STATIC void
3310 xlog_pack_data_checksum(
3311         xlog_t          *log,
3312         xlog_in_core_t  *iclog,
3313         int             size)
3314 {
3315         int             i;
3316         __be32          *up;
3317         uint            chksum = 0;
3318
3319         up = (__be32 *)iclog->ic_datap;
3320         /* divide length by 4 to get # words */
3321         for (i = 0; i < (size >> 2); i++) {
3322                 chksum ^= be32_to_cpu(*up);
3323                 up++;
3324         }
3325         iclog->ic_header.h_chksum = cpu_to_be32(chksum);
3326 }
3327 #else
3328 #define xlog_pack_data_checksum(log, iclog, size)
3329 #endif
3330
3331 /*
3332  * Stamp cycle number in every block
3333  */
3334 void
3335 xlog_pack_data(
3336         xlog_t                  *log,
3337         xlog_in_core_t          *iclog,
3338         int                     roundoff)
3339 {
3340         int                     i, j, k;
3341         int                     size = iclog->ic_offset + roundoff;
3342         __be32                  cycle_lsn;
3343         xfs_caddr_t             dp;
3344         xlog_in_core_2_t        *xhdr;
3345
3346         xlog_pack_data_checksum(log, iclog, size);
3347
3348         cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
3349
3350         dp = iclog->ic_datap;
3351         for (i = 0; i < BTOBB(size) &&
3352                 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3353                 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
3354                 *(__be32 *)dp = cycle_lsn;
3355                 dp += BBSIZE;
3356         }
3357
3358         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3359                 xhdr = (xlog_in_core_2_t *)&iclog->ic_header;
3360                 for ( ; i < BTOBB(size); i++) {
3361                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3362                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3363                         xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
3364                         *(__be32 *)dp = cycle_lsn;
3365                         dp += BBSIZE;
3366                 }
3367
3368                 for (i = 1; i < log->l_iclog_heads; i++) {
3369                         xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
3370                 }
3371         }
3372 }
3373
3374 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
3375 STATIC void
3376 xlog_unpack_data_checksum(
3377         xlog_rec_header_t       *rhead,
3378         xfs_caddr_t             dp,
3379         xlog_t                  *log)
3380 {
3381         __be32                  *up = (__be32 *)dp;
3382         uint                    chksum = 0;
3383         int                     i;
3384
3385         /* divide length by 4 to get # words */
3386         for (i=0; i < be32_to_cpu(rhead->h_len) >> 2; i++) {
3387                 chksum ^= be32_to_cpu(*up);
3388                 up++;
3389         }
3390         if (chksum != be32_to_cpu(rhead->h_chksum)) {
3391             if (rhead->h_chksum ||
3392                 ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) {
3393                     cmn_err(CE_DEBUG,
3394                         "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n",
3395                             be32_to_cpu(rhead->h_chksum), chksum);
3396                     cmn_err(CE_DEBUG,
3397 "XFS: Disregard message if filesystem was created with non-DEBUG kernel");
3398                     if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3399                             cmn_err(CE_DEBUG,
3400                                 "XFS: LogR this is a LogV2 filesystem\n");
3401                     }
3402                     log->l_flags |= XLOG_CHKSUM_MISMATCH;
3403             }
3404         }
3405 }
3406 #else
3407 #define xlog_unpack_data_checksum(rhead, dp, log)
3408 #endif
3409
3410 STATIC void
3411 xlog_unpack_data(
3412         xlog_rec_header_t       *rhead,
3413         xfs_caddr_t             dp,
3414         xlog_t                  *log)
3415 {
3416         int                     i, j, k;
3417         xlog_in_core_2_t        *xhdr;
3418
3419         for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3420                   i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3421                 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3422                 dp += BBSIZE;
3423         }
3424
3425         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3426                 xhdr = (xlog_in_core_2_t *)rhead;
3427                 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3428                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3429                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3430                         *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3431                         dp += BBSIZE;
3432                 }
3433         }
3434
3435         xlog_unpack_data_checksum(rhead, dp, log);
3436 }
3437
3438 STATIC int
3439 xlog_valid_rec_header(
3440         xlog_t                  *log,
3441         xlog_rec_header_t       *rhead,
3442         xfs_daddr_t             blkno)
3443 {
3444         int                     hlen;
3445
3446         if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) {
3447                 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3448                                 XFS_ERRLEVEL_LOW, log->l_mp);
3449                 return XFS_ERROR(EFSCORRUPTED);
3450         }
3451         if (unlikely(
3452             (!rhead->h_version ||
3453             (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3454                 xlog_warn("XFS: %s: unrecognised log version (%d).",
3455                         __func__, be32_to_cpu(rhead->h_version));
3456                 return XFS_ERROR(EIO);
3457         }
3458
3459         /* LR body must have data or it wouldn't have been written */
3460         hlen = be32_to_cpu(rhead->h_len);
3461         if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3462                 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3463                                 XFS_ERRLEVEL_LOW, log->l_mp);
3464                 return XFS_ERROR(EFSCORRUPTED);
3465         }
3466         if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3467                 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3468                                 XFS_ERRLEVEL_LOW, log->l_mp);
3469                 return XFS_ERROR(EFSCORRUPTED);
3470         }
3471         return 0;
3472 }
3473
3474 /*
3475  * Read the log from tail to head and process the log records found.
3476  * Handle the two cases where the tail and head are in the same cycle
3477  * and where the active portion of the log wraps around the end of
3478  * the physical log separately.  The pass parameter is passed through
3479  * to the routines called to process the data and is not looked at
3480  * here.
3481  */
3482 STATIC int
3483 xlog_do_recovery_pass(
3484         xlog_t                  *log,
3485         xfs_daddr_t             head_blk,
3486         xfs_daddr_t             tail_blk,
3487         int                     pass)
3488 {
3489         xlog_rec_header_t       *rhead;
3490         xfs_daddr_t             blk_no;
3491         xfs_caddr_t             bufaddr, offset;
3492         xfs_buf_t               *hbp, *dbp;
3493         int                     error = 0, h_size;
3494         int                     bblks, split_bblks;
3495         int                     hblks, split_hblks, wrapped_hblks;
3496         xlog_recover_t          *rhash[XLOG_RHASH_SIZE];
3497
3498         ASSERT(head_blk != tail_blk);
3499
3500         /*
3501          * Read the header of the tail block and get the iclog buffer size from
3502          * h_size.  Use this to tell how many sectors make up the log header.
3503          */
3504         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3505                 /*
3506                  * When using variable length iclogs, read first sector of
3507                  * iclog header and extract the header size from it.  Get a
3508                  * new hbp that is the correct size.
3509                  */
3510                 hbp = xlog_get_bp(log, 1);
3511                 if (!hbp)
3512                         return ENOMEM;
3513                 if ((error = xlog_bread(log, tail_blk, 1, hbp)))
3514                         goto bread_err1;
3515                 offset = xlog_align(log, tail_blk, 1, hbp);
3516                 rhead = (xlog_rec_header_t *)offset;
3517                 error = xlog_valid_rec_header(log, rhead, tail_blk);
3518                 if (error)
3519                         goto bread_err1;
3520                 h_size = be32_to_cpu(rhead->h_size);
3521                 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3522                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3523                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3524                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
3525                                 hblks++;
3526                         xlog_put_bp(hbp);
3527                         hbp = xlog_get_bp(log, hblks);
3528                 } else {
3529                         hblks = 1;
3530                 }
3531         } else {
3532                 ASSERT(log->l_sectbb_log == 0);
3533                 hblks = 1;
3534                 hbp = xlog_get_bp(log, 1);
3535                 h_size = XLOG_BIG_RECORD_BSIZE;
3536         }
3537
3538         if (!hbp)
3539                 return ENOMEM;
3540         dbp = xlog_get_bp(log, BTOBB(h_size));
3541         if (!dbp) {
3542                 xlog_put_bp(hbp);
3543                 return ENOMEM;
3544         }
3545
3546         memset(rhash, 0, sizeof(rhash));
3547         if (tail_blk <= head_blk) {
3548                 for (blk_no = tail_blk; blk_no < head_blk; ) {
3549                         if ((error = xlog_bread(log, blk_no, hblks, hbp)))
3550                                 goto bread_err2;
3551                         offset = xlog_align(log, blk_no, hblks, hbp);
3552                         rhead = (xlog_rec_header_t *)offset;
3553                         error = xlog_valid_rec_header(log, rhead, blk_no);
3554                         if (error)
3555                                 goto bread_err2;
3556
3557                         /* blocks in data section */
3558                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3559                         error = xlog_bread(log, blk_no + hblks, bblks, dbp);
3560                         if (error)
3561                                 goto bread_err2;
3562                         offset = xlog_align(log, blk_no + hblks, bblks, dbp);
3563                         xlog_unpack_data(rhead, offset, log);
3564                         if ((error = xlog_recover_process_data(log,
3565                                                 rhash, rhead, offset, pass)))
3566                                 goto bread_err2;
3567                         blk_no += bblks + hblks;
3568                 }
3569         } else {
3570                 /*
3571                  * Perform recovery around the end of the physical log.
3572                  * When the head is not on the same cycle number as the tail,
3573                  * we can't do a sequential recovery as above.
3574                  */
3575                 blk_no = tail_blk;
3576                 while (blk_no < log->l_logBBsize) {
3577                         /*
3578                          * Check for header wrapping around physical end-of-log
3579                          */
3580                         offset = NULL;
3581                         split_hblks = 0;
3582                         wrapped_hblks = 0;
3583                         if (blk_no + hblks <= log->l_logBBsize) {
3584                                 /* Read header in one read */
3585                                 error = xlog_bread(log, blk_no, hblks, hbp);
3586                                 if (error)
3587                                         goto bread_err2;
3588                                 offset = xlog_align(log, blk_no, hblks, hbp);
3589                         } else {
3590                                 /* This LR is split across physical log end */
3591                                 if (blk_no != log->l_logBBsize) {
3592                                         /* some data before physical log end */
3593                                         ASSERT(blk_no <= INT_MAX);
3594                                         split_hblks = log->l_logBBsize - (int)blk_no;
3595                                         ASSERT(split_hblks > 0);
3596                                         if ((error = xlog_bread(log, blk_no,
3597                                                         split_hblks, hbp)))
3598                                                 goto bread_err2;
3599                                         offset = xlog_align(log, blk_no,
3600                                                         split_hblks, hbp);
3601                                 }
3602                                 /*
3603                                  * Note: this black magic still works with
3604                                  * large sector sizes (non-512) only because:
3605                                  * - we increased the buffer size originally
3606                                  *   by 1 sector giving us enough extra space
3607                                  *   for the second read;
3608                                  * - the log start is guaranteed to be sector
3609                                  *   aligned;
3610                                  * - we read the log end (LR header start)
3611                                  *   _first_, then the log start (LR header end)
3612                                  *   - order is important.
3613                                  */
3614                                 wrapped_hblks = hblks - split_hblks;
3615                                 bufaddr = XFS_BUF_PTR(hbp);
3616                                 error = XFS_BUF_SET_PTR(hbp,
3617                                                 bufaddr + BBTOB(split_hblks),
3618                                                 BBTOB(hblks - split_hblks));
3619                                 if (!error)
3620                                         error = xlog_bread(log, 0,
3621                                                         wrapped_hblks, hbp);
3622                                 if (!error)
3623                                         error = XFS_BUF_SET_PTR(hbp, bufaddr,
3624                                                         BBTOB(hblks));
3625                                 if (error)
3626                                         goto bread_err2;
3627                                 if (!offset)
3628                                         offset = xlog_align(log, 0,
3629                                                         wrapped_hblks, hbp);
3630                         }
3631                         rhead = (xlog_rec_header_t *)offset;
3632                         error = xlog_valid_rec_header(log, rhead,
3633                                                 split_hblks ? blk_no : 0);
3634                         if (error)
3635                                 goto bread_err2;
3636
3637                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3638                         blk_no += hblks;
3639
3640                         /* Read in data for log record */
3641                         if (blk_no + bblks <= log->l_logBBsize) {
3642                                 error = xlog_bread(log, blk_no, bblks, dbp);
3643                                 if (error)
3644                                         goto bread_err2;
3645                                 offset = xlog_align(log, blk_no, bblks, dbp);
3646                         } else {
3647                                 /* This log record is split across the
3648                                  * physical end of log */
3649                                 offset = NULL;
3650                                 split_bblks = 0;
3651                                 if (blk_no != log->l_logBBsize) {
3652                                         /* some data is before the physical
3653                                          * end of log */
3654                                         ASSERT(!wrapped_hblks);
3655                                         ASSERT(blk_no <= INT_MAX);
3656                                         split_bblks =
3657                                                 log->l_logBBsize - (int)blk_no;
3658                                         ASSERT(split_bblks > 0);
3659                                         if ((error = xlog_bread(log, blk_no,
3660                                                         split_bblks, dbp)))
3661                                                 goto bread_err2;
3662                                         offset = xlog_align(log, blk_no,
3663                                                         split_bblks, dbp);
3664                                 }
3665                                 /*
3666                                  * Note: this black magic still works with
3667                                  * large sector sizes (non-512) only because:
3668                                  * - we increased the buffer size originally
3669                                  *   by 1 sector giving us enough extra space
3670                                  *   for the second read;
3671                                  * - the log start is guaranteed to be sector
3672                                  *   aligned;
3673                                  * - we read the log end (LR header start)
3674                                  *   _first_, then the log start (LR header end)
3675                                  *   - order is important.
3676                                  */
3677                                 bufaddr = XFS_BUF_PTR(dbp);
3678                                 error = XFS_BUF_SET_PTR(dbp,
3679                                                 bufaddr + BBTOB(split_bblks),
3680                                                 BBTOB(bblks - split_bblks));
3681                                 if (!error)
3682                                         error = xlog_bread(log, wrapped_hblks,
3683                                                         bblks - split_bblks,
3684                                                         dbp);
3685                                 if (!error)
3686                                         error = XFS_BUF_SET_PTR(dbp, bufaddr,
3687                                                         h_size);
3688                                 if (error)
3689                                         goto bread_err2;
3690                                 if (!offset)
3691                                         offset = xlog_align(log, wrapped_hblks,
3692                                                 bblks - split_bblks, dbp);
3693                         }
3694                         xlog_unpack_data(rhead, offset, log);
3695                         if ((error = xlog_recover_process_data(log, rhash,
3696                                                         rhead, offset, pass)))
3697                                 goto bread_err2;
3698                         blk_no += bblks;
3699                 }
3700
3701                 ASSERT(blk_no >= log->l_logBBsize);
3702                 blk_no -= log->l_logBBsize;
3703
3704                 /* read first part of physical log */
3705                 while (blk_no < head_blk) {
3706                         if ((error = xlog_bread(log, blk_no, hblks, hbp)))
3707                                 goto bread_err2;
3708                         offset = xlog_align(log, blk_no, hblks, hbp);
3709                         rhead = (xlog_rec_header_t *)offset;
3710                         error = xlog_valid_rec_header(log, rhead, blk_no);
3711                         if (error)
3712                                 goto bread_err2;
3713                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3714                         if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp)))
3715                                 goto bread_err2;
3716                         offset = xlog_align(log, blk_no+hblks, bblks, dbp);
3717                         xlog_unpack_data(rhead, offset, log);
3718                         if ((error = xlog_recover_process_data(log, rhash,
3719                                                         rhead, offset, pass)))
3720                                 goto bread_err2;
3721                         blk_no += bblks + hblks;
3722                 }
3723         }
3724
3725  bread_err2:
3726         xlog_put_bp(dbp);
3727  bread_err1:
3728         xlog_put_bp(hbp);
3729         return error;
3730 }
3731
3732 /*
3733  * Do the recovery of the log.  We actually do this in two phases.
3734  * The two passes are necessary in order to implement the function
3735  * of cancelling a record written into the log.  The first pass
3736  * determines those things which have been cancelled, and the
3737  * second pass replays log items normally except for those which
3738  * have been cancelled.  The handling of the replay and cancellations
3739  * takes place in the log item type specific routines.
3740  *
3741  * The table of items which have cancel records in the log is allocated
3742  * and freed at this level, since only here do we know when all of
3743  * the log recovery has been completed.
3744  */
3745 STATIC int
3746 xlog_do_log_recovery(
3747         xlog_t          *log,
3748         xfs_daddr_t     head_blk,
3749         xfs_daddr_t     tail_blk)
3750 {
3751         int             error;
3752
3753         ASSERT(head_blk != tail_blk);
3754
3755         /*
3756          * First do a pass to find all of the cancelled buf log items.
3757          * Store them in the buf_cancel_table for use in the second pass.
3758          */
3759         log->l_buf_cancel_table =
3760                 (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
3761                                                  sizeof(xfs_buf_cancel_t*),
3762                                                  KM_SLEEP);
3763         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3764                                       XLOG_RECOVER_PASS1);
3765         if (error != 0) {
3766                 kmem_free(log->l_buf_cancel_table);
3767                 log->l_buf_cancel_table = NULL;
3768                 return error;
3769         }
3770         /*
3771          * Then do a second pass to actually recover the items in the log.
3772          * When it is complete free the table of buf cancel items.
3773          */
3774         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3775                                       XLOG_RECOVER_PASS2);
3776 #ifdef DEBUG
3777         if (!error) {
3778                 int     i;
3779
3780                 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3781                         ASSERT(log->l_buf_cancel_table[i] == NULL);
3782         }
3783 #endif  /* DEBUG */
3784
3785         kmem_free(log->l_buf_cancel_table);
3786         log->l_buf_cancel_table = NULL;
3787
3788         return error;
3789 }
3790
3791 /*
3792  * Do the actual recovery
3793  */
3794 STATIC int
3795 xlog_do_recover(
3796         xlog_t          *log,
3797         xfs_daddr_t     head_blk,
3798         xfs_daddr_t     tail_blk)
3799 {
3800         int             error;
3801         xfs_buf_t       *bp;
3802         xfs_sb_t        *sbp;
3803
3804         /*
3805          * First replay the images in the log.
3806          */
3807         error = xlog_do_log_recovery(log, head_blk, tail_blk);
3808         if (error) {
3809                 return error;
3810         }
3811
3812         XFS_bflush(log->l_mp->m_ddev_targp);
3813
3814         /*
3815          * If IO errors happened during recovery, bail out.
3816          */
3817         if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3818                 return (EIO);
3819         }
3820
3821         /*
3822          * We now update the tail_lsn since much of the recovery has completed
3823          * and there may be space available to use.  If there were no extent
3824          * or iunlinks, we can free up the entire log and set the tail_lsn to
3825          * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3826          * lsn of the last known good LR on disk.  If there are extent frees
3827          * or iunlinks they will have some entries in the AIL; so we look at
3828          * the AIL to determine how to set the tail_lsn.
3829          */
3830         xlog_assign_tail_lsn(log->l_mp);
3831
3832         /*
3833          * Now that we've finished replaying all buffer and inode
3834          * updates, re-read in the superblock.
3835          */
3836         bp = xfs_getsb(log->l_mp, 0);
3837         XFS_BUF_UNDONE(bp);
3838         ASSERT(!(XFS_BUF_ISWRITE(bp)));
3839         ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
3840         XFS_BUF_READ(bp);
3841         XFS_BUF_UNASYNC(bp);
3842         xfsbdstrat(log->l_mp, bp);
3843         error = xfs_iowait(bp);
3844         if (error) {
3845                 xfs_ioerror_alert("xlog_do_recover",
3846                                   log->l_mp, bp, XFS_BUF_ADDR(bp));
3847                 ASSERT(0);
3848                 xfs_buf_relse(bp);
3849                 return error;
3850         }
3851
3852         /* Convert superblock from on-disk format */
3853         sbp = &log->l_mp->m_sb;
3854         xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
3855         ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
3856         ASSERT(xfs_sb_good_version(sbp));
3857         xfs_buf_relse(bp);
3858
3859         /* We've re-read the superblock so re-initialize per-cpu counters */
3860         xfs_icsb_reinit_counters(log->l_mp);
3861
3862         xlog_recover_check_summary(log);
3863
3864         /* Normal transactions can now occur */
3865         log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3866         return 0;
3867 }
3868
3869 /*
3870  * Perform recovery and re-initialize some log variables in xlog_find_tail.
3871  *
3872  * Return error or zero.
3873  */
3874 int
3875 xlog_recover(
3876         xlog_t          *log)
3877 {
3878         xfs_daddr_t     head_blk, tail_blk;
3879         int             error;
3880
3881         /* find the tail of the log */
3882         if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
3883                 return error;
3884
3885         if (tail_blk != head_blk) {
3886                 /* There used to be a comment here:
3887                  *
3888                  * disallow recovery on read-only mounts.  note -- mount
3889                  * checks for ENOSPC and turns it into an intelligent
3890                  * error message.
3891                  * ...but this is no longer true.  Now, unless you specify
3892                  * NORECOVERY (in which case this function would never be
3893                  * called), we just go ahead and recover.  We do this all
3894                  * under the vfs layer, so we can get away with it unless
3895                  * the device itself is read-only, in which case we fail.
3896                  */
3897                 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3898                         return error;
3899                 }
3900
3901                 cmn_err(CE_NOTE,
3902                         "Starting XFS recovery on filesystem: %s (logdev: %s)",
3903                         log->l_mp->m_fsname, log->l_mp->m_logname ?
3904                         log->l_mp->m_logname : "internal");
3905
3906                 error = xlog_do_recover(log, head_blk, tail_blk);
3907                 log->l_flags |= XLOG_RECOVERY_NEEDED;
3908         }
3909         return error;
3910 }
3911
3912 /*
3913  * In the first part of recovery we replay inodes and buffers and build
3914  * up the list of extent free items which need to be processed.  Here
3915  * we process the extent free items and clean up the on disk unlinked
3916  * inode lists.  This is separated from the first part of recovery so
3917  * that the root and real-time bitmap inodes can be read in from disk in
3918  * between the two stages.  This is necessary so that we can free space
3919  * in the real-time portion of the file system.
3920  */
3921 int
3922 xlog_recover_finish(
3923         xlog_t          *log)
3924 {
3925         /*
3926          * Now we're ready to do the transactions needed for the
3927          * rest of recovery.  Start with completing all the extent
3928          * free intent records and then process the unlinked inode
3929          * lists.  At this point, we essentially run in normal mode
3930          * except that we're still performing recovery actions
3931          * rather than accepting new requests.
3932          */
3933         if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3934                 int     error;
3935                 error = xlog_recover_process_efis(log);
3936                 if (error) {
3937                         cmn_err(CE_ALERT,
3938                                 "Failed to recover EFIs on filesystem: %s",
3939                                 log->l_mp->m_fsname);
3940                         return error;
3941                 }
3942                 /*
3943                  * Sync the log to get all the EFIs out of the AIL.
3944                  * This isn't absolutely necessary, but it helps in
3945                  * case the unlink transactions would have problems
3946                  * pushing the EFIs out of the way.
3947                  */
3948                 xfs_log_force(log->l_mp, (xfs_lsn_t)0,
3949                               (XFS_LOG_FORCE | XFS_LOG_SYNC));
3950
3951                 xlog_recover_process_iunlinks(log);
3952
3953                 xlog_recover_check_summary(log);
3954
3955                 cmn_err(CE_NOTE,
3956                         "Ending XFS recovery on filesystem: %s (logdev: %s)",
3957                         log->l_mp->m_fsname, log->l_mp->m_logname ?
3958                         log->l_mp->m_logname : "internal");
3959                 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3960         } else {
3961                 cmn_err(CE_DEBUG,
3962                         "!Ending clean XFS mount for filesystem: %s\n",
3963                         log->l_mp->m_fsname);
3964         }
3965         return 0;
3966 }
3967
3968
3969 #if defined(DEBUG)
3970 /*
3971  * Read all of the agf and agi counters and check that they
3972  * are consistent with the superblock counters.
3973  */
3974 void
3975 xlog_recover_check_summary(
3976         xlog_t          *log)
3977 {
3978         xfs_mount_t     *mp;
3979         xfs_agf_t       *agfp;
3980         xfs_agi_t       *agip;
3981         xfs_buf_t       *agfbp;
3982         xfs_buf_t       *agibp;
3983         xfs_daddr_t     agfdaddr;
3984         xfs_daddr_t     agidaddr;
3985         xfs_buf_t       *sbbp;
3986 #ifdef XFS_LOUD_RECOVERY
3987         xfs_sb_t        *sbp;
3988 #endif
3989         xfs_agnumber_t  agno;
3990         __uint64_t      freeblks;
3991         __uint64_t      itotal;
3992         __uint64_t      ifree;
3993
3994         mp = log->l_mp;
3995
3996         freeblks = 0LL;
3997         itotal = 0LL;
3998         ifree = 0LL;
3999         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4000                 agfdaddr = XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp));
4001                 agfbp = xfs_buf_read(mp->m_ddev_targp, agfdaddr,
4002                                 XFS_FSS_TO_BB(mp, 1), 0);
4003                 if (XFS_BUF_ISERROR(agfbp)) {
4004                         xfs_ioerror_alert("xlog_recover_check_summary(agf)",
4005                                                 mp, agfbp, agfdaddr);
4006                 }
4007                 agfp = XFS_BUF_TO_AGF(agfbp);
4008                 ASSERT(XFS_AGF_MAGIC == be32_to_cpu(agfp->agf_magicnum));
4009                 ASSERT(XFS_AGF_GOOD_VERSION(be32_to_cpu(agfp->agf_versionnum)));
4010                 ASSERT(be32_to_cpu(agfp->agf_seqno) == agno);
4011
4012                 freeblks += be32_to_cpu(agfp->agf_freeblks) +
4013                             be32_to_cpu(agfp->agf_flcount);
4014                 xfs_buf_relse(agfbp);
4015
4016                 agidaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
4017                 agibp = xfs_buf_read(mp->m_ddev_targp, agidaddr,
4018                                 XFS_FSS_TO_BB(mp, 1), 0);
4019                 if (XFS_BUF_ISERROR(agibp)) {
4020                         xfs_ioerror_alert("xlog_recover_check_summary(agi)",
4021                                           mp, agibp, agidaddr);
4022                 }
4023                 agip = XFS_BUF_TO_AGI(agibp);
4024                 ASSERT(XFS_AGI_MAGIC == be32_to_cpu(agip->agi_magicnum));
4025                 ASSERT(XFS_AGI_GOOD_VERSION(be32_to_cpu(agip->agi_versionnum)));
4026                 ASSERT(be32_to_cpu(agip->agi_seqno) == agno);
4027
4028                 itotal += be32_to_cpu(agip->agi_count);
4029                 ifree += be32_to_cpu(agip->agi_freecount);
4030                 xfs_buf_relse(agibp);
4031         }
4032
4033         sbbp = xfs_getsb(mp, 0);
4034 #ifdef XFS_LOUD_RECOVERY
4035         sbp = &mp->m_sb;
4036         xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(sbbp));
4037         cmn_err(CE_NOTE,
4038                 "xlog_recover_check_summary: sb_icount %Lu itotal %Lu",
4039                 sbp->sb_icount, itotal);
4040         cmn_err(CE_NOTE,
4041                 "xlog_recover_check_summary: sb_ifree %Lu itotal %Lu",
4042                 sbp->sb_ifree, ifree);
4043         cmn_err(CE_NOTE,
4044                 "xlog_recover_check_summary: sb_fdblocks %Lu freeblks %Lu",
4045                 sbp->sb_fdblocks, freeblks);
4046 #if 0
4047         /*
4048          * This is turned off until I account for the allocation
4049          * btree blocks which live in free space.
4050          */
4051         ASSERT(sbp->sb_icount == itotal);
4052         ASSERT(sbp->sb_ifree == ifree);
4053         ASSERT(sbp->sb_fdblocks == freeblks);
4054 #endif
4055 #endif
4056         xfs_buf_relse(sbbp);
4057 }
4058 #endif /* DEBUG */