[XFS] Fix up a 32/64 local flags variable issue when enabling attr2 mode.
[safe/jmp/linux-2.6] / fs / xfs / xfs_bmap.c
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir.h"
28 #include "xfs_dir2.h"
29 #include "xfs_da_btree.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir_sf.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_btree.h"
39 #include "xfs_dmapi.h"
40 #include "xfs_mount.h"
41 #include "xfs_ialloc.h"
42 #include "xfs_itable.h"
43 #include "xfs_inode_item.h"
44 #include "xfs_extfree_item.h"
45 #include "xfs_alloc.h"
46 #include "xfs_bmap.h"
47 #include "xfs_rtalloc.h"
48 #include "xfs_error.h"
49 #include "xfs_dir_leaf.h"
50 #include "xfs_attr_leaf.h"
51 #include "xfs_rw.h"
52 #include "xfs_quota.h"
53 #include "xfs_trans_space.h"
54 #include "xfs_buf_item.h"
55
56
57 #ifdef DEBUG
58 STATIC void
59 xfs_bmap_check_leaf_extents(xfs_btree_cur_t *cur, xfs_inode_t *ip, int whichfork);
60 #endif
61
62 kmem_zone_t             *xfs_bmap_free_item_zone;
63
64 /*
65  * Prototypes for internal bmap routines.
66  */
67
68
69 /*
70  * Called from xfs_bmap_add_attrfork to handle extents format files.
71  */
72 STATIC int                                      /* error */
73 xfs_bmap_add_attrfork_extents(
74         xfs_trans_t             *tp,            /* transaction pointer */
75         xfs_inode_t             *ip,            /* incore inode pointer */
76         xfs_fsblock_t           *firstblock,    /* first block allocated */
77         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
78         int                     *flags);        /* inode logging flags */
79
80 /*
81  * Called from xfs_bmap_add_attrfork to handle local format files.
82  */
83 STATIC int                                      /* error */
84 xfs_bmap_add_attrfork_local(
85         xfs_trans_t             *tp,            /* transaction pointer */
86         xfs_inode_t             *ip,            /* incore inode pointer */
87         xfs_fsblock_t           *firstblock,    /* first block allocated */
88         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
89         int                     *flags);        /* inode logging flags */
90
91 /*
92  * Called by xfs_bmapi to update extent list structure and the btree
93  * after allocating space (or doing a delayed allocation).
94  */
95 STATIC int                              /* error */
96 xfs_bmap_add_extent(
97         xfs_inode_t             *ip,    /* incore inode pointer */
98         xfs_extnum_t            idx,    /* extent number to update/insert */
99         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
100         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
101         xfs_fsblock_t           *first, /* pointer to firstblock variable */
102         xfs_bmap_free_t         *flist, /* list of extents to be freed */
103         int                     *logflagsp, /* inode logging flags */
104         int                     whichfork, /* data or attr fork */
105         int                     rsvd);  /* OK to allocate reserved blocks */
106
107 /*
108  * Called by xfs_bmap_add_extent to handle cases converting a delayed
109  * allocation to a real allocation.
110  */
111 STATIC int                              /* error */
112 xfs_bmap_add_extent_delay_real(
113         xfs_inode_t             *ip,    /* incore inode pointer */
114         xfs_extnum_t            idx,    /* extent number to update/insert */
115         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
116         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
117         xfs_filblks_t           *dnew,  /* new delayed-alloc indirect blocks */
118         xfs_fsblock_t           *first, /* pointer to firstblock variable */
119         xfs_bmap_free_t         *flist, /* list of extents to be freed */
120         int                     *logflagsp, /* inode logging flags */
121         int                     rsvd);  /* OK to allocate reserved blocks */
122
123 /*
124  * Called by xfs_bmap_add_extent to handle cases converting a hole
125  * to a delayed allocation.
126  */
127 STATIC int                              /* error */
128 xfs_bmap_add_extent_hole_delay(
129         xfs_inode_t             *ip,    /* incore inode pointer */
130         xfs_extnum_t            idx,    /* extent number to update/insert */
131         xfs_btree_cur_t         *cur,   /* if null, not a btree */
132         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
133         int                     *logflagsp,/* inode logging flags */
134         int                     rsvd);  /* OK to allocate reserved blocks */
135
136 /*
137  * Called by xfs_bmap_add_extent to handle cases converting a hole
138  * to a real allocation.
139  */
140 STATIC int                              /* error */
141 xfs_bmap_add_extent_hole_real(
142         xfs_inode_t             *ip,    /* incore inode pointer */
143         xfs_extnum_t            idx,    /* extent number to update/insert */
144         xfs_btree_cur_t         *cur,   /* if null, not a btree */
145         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
146         int                     *logflagsp, /* inode logging flags */
147         int                     whichfork); /* data or attr fork */
148
149 /*
150  * Called by xfs_bmap_add_extent to handle cases converting an unwritten
151  * allocation to a real allocation or vice versa.
152  */
153 STATIC int                              /* error */
154 xfs_bmap_add_extent_unwritten_real(
155         xfs_inode_t             *ip,    /* incore inode pointer */
156         xfs_extnum_t            idx,    /* extent number to update/insert */
157         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
158         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
159         int                     *logflagsp); /* inode logging flags */
160
161 /*
162  * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
163  * It figures out where to ask the underlying allocator to put the new extent.
164  */
165 STATIC int                              /* error */
166 xfs_bmap_alloc(
167         xfs_bmalloca_t          *ap);   /* bmap alloc argument struct */
168
169 /*
170  * Transform a btree format file with only one leaf node, where the
171  * extents list will fit in the inode, into an extents format file.
172  * Since the extent list is already in-core, all we have to do is
173  * give up the space for the btree root and pitch the leaf block.
174  */
175 STATIC int                              /* error */
176 xfs_bmap_btree_to_extents(
177         xfs_trans_t             *tp,    /* transaction pointer */
178         xfs_inode_t             *ip,    /* incore inode pointer */
179         xfs_btree_cur_t         *cur,   /* btree cursor */
180         int                     *logflagsp, /* inode logging flags */
181         int                     whichfork); /* data or attr fork */
182
183 #ifdef DEBUG
184 /*
185  * Check that the extents list for the inode ip is in the right order.
186  */
187 STATIC void
188 xfs_bmap_check_extents(
189         xfs_inode_t             *ip,            /* incore inode pointer */
190         int                     whichfork);     /* data or attr fork */
191 #endif
192
193 /*
194  * Called by xfs_bmapi to update extent list structure and the btree
195  * after removing space (or undoing a delayed allocation).
196  */
197 STATIC int                              /* error */
198 xfs_bmap_del_extent(
199         xfs_inode_t             *ip,    /* incore inode pointer */
200         xfs_trans_t             *tp,    /* current trans pointer */
201         xfs_extnum_t            idx,    /* extent number to update/insert */
202         xfs_bmap_free_t         *flist, /* list of extents to be freed */
203         xfs_btree_cur_t         *cur,   /* if null, not a btree */
204         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
205         int                     *logflagsp,/* inode logging flags */
206         int                     whichfork, /* data or attr fork */
207         int                     rsvd);   /* OK to allocate reserved blocks */
208
209 /*
210  * Remove the entry "free" from the free item list.  Prev points to the
211  * previous entry, unless "free" is the head of the list.
212  */
213 STATIC void
214 xfs_bmap_del_free(
215         xfs_bmap_free_t         *flist, /* free item list header */
216         xfs_bmap_free_item_t    *prev,  /* previous item on list, if any */
217         xfs_bmap_free_item_t    *free); /* list item to be freed */
218
219 /*
220  * Remove count entries from the extents array for inode "ip", starting
221  * at index "idx".  Copies the remaining items down over the deleted ones,
222  * and gives back the excess memory.
223  */
224 STATIC void
225 xfs_bmap_delete_exlist(
226         xfs_inode_t     *ip,            /* incode inode pointer */
227         xfs_extnum_t    idx,            /* starting delete index */
228         xfs_extnum_t    count,          /* count of items to delete */
229         int             whichfork);     /* data or attr fork */
230
231 /*
232  * Convert an extents-format file into a btree-format file.
233  * The new file will have a root block (in the inode) and a single child block.
234  */
235 STATIC int                                      /* error */
236 xfs_bmap_extents_to_btree(
237         xfs_trans_t             *tp,            /* transaction pointer */
238         xfs_inode_t             *ip,            /* incore inode pointer */
239         xfs_fsblock_t           *firstblock,    /* first-block-allocated */
240         xfs_bmap_free_t         *flist,         /* blocks freed in xaction */
241         xfs_btree_cur_t         **curp,         /* cursor returned to caller */
242         int                     wasdel,         /* converting a delayed alloc */
243         int                     *logflagsp,     /* inode logging flags */
244         int                     whichfork);     /* data or attr fork */
245
246 /*
247  * Insert new item(s) in the extent list for inode "ip".
248  * Count new items are inserted at offset idx.
249  */
250 STATIC void
251 xfs_bmap_insert_exlist(
252         xfs_inode_t     *ip,            /* incore inode pointer */
253         xfs_extnum_t    idx,            /* starting index of new items */
254         xfs_extnum_t    count,          /* number of inserted items */
255         xfs_bmbt_irec_t *new,           /* items to insert */
256         int             whichfork);     /* data or attr fork */
257
258 /*
259  * Convert a local file to an extents file.
260  * This code is sort of bogus, since the file data needs to get
261  * logged so it won't be lost.  The bmap-level manipulations are ok, though.
262  */
263 STATIC int                              /* error */
264 xfs_bmap_local_to_extents(
265         xfs_trans_t     *tp,            /* transaction pointer */
266         xfs_inode_t     *ip,            /* incore inode pointer */
267         xfs_fsblock_t   *firstblock,    /* first block allocated in xaction */
268         xfs_extlen_t    total,          /* total blocks needed by transaction */
269         int             *logflagsp,     /* inode logging flags */
270         int             whichfork);     /* data or attr fork */
271
272 /*
273  * Search the extents list for the inode, for the extent containing bno.
274  * If bno lies in a hole, point to the next entry.  If bno lies past eof,
275  * *eofp will be set, and *prevp will contain the last entry (null if none).
276  * Else, *lastxp will be set to the index of the found
277  * entry; *gotp will contain the entry.
278  */
279 STATIC xfs_bmbt_rec_t *                 /* pointer to found extent entry */
280 xfs_bmap_search_extents(
281         xfs_inode_t     *ip,            /* incore inode pointer */
282         xfs_fileoff_t   bno,            /* block number searched for */
283         int             whichfork,      /* data or attr fork */
284         int             *eofp,          /* out: end of file found */
285         xfs_extnum_t    *lastxp,        /* out: last extent index */
286         xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
287         xfs_bmbt_irec_t *prevp);        /* out: previous extent entry found */
288
289 /*
290  * Check the last inode extent to determine whether this allocation will result
291  * in blocks being allocated at the end of the file. When we allocate new data
292  * blocks at the end of the file which do not start at the previous data block,
293  * we will try to align the new blocks at stripe unit boundaries.
294  */
295 STATIC int                              /* error */
296 xfs_bmap_isaeof(
297         xfs_inode_t     *ip,            /* incore inode pointer */
298         xfs_fileoff_t   off,            /* file offset in fsblocks */
299         int             whichfork,      /* data or attribute fork */
300         char            *aeof);         /* return value */
301
302 #ifdef XFS_BMAP_TRACE
303 /*
304  * Add a bmap trace buffer entry.  Base routine for the others.
305  */
306 STATIC void
307 xfs_bmap_trace_addentry(
308         int             opcode,         /* operation */
309         char            *fname,         /* function name */
310         char            *desc,          /* operation description */
311         xfs_inode_t     *ip,            /* incore inode pointer */
312         xfs_extnum_t    idx,            /* index of entry(ies) */
313         xfs_extnum_t    cnt,            /* count of entries, 1 or 2 */
314         xfs_bmbt_rec_t  *r1,            /* first record */
315         xfs_bmbt_rec_t  *r2,            /* second record or null */
316         int             whichfork);     /* data or attr fork */
317
318 /*
319  * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist.
320  */
321 STATIC void
322 xfs_bmap_trace_delete(
323         char            *fname,         /* function name */
324         char            *desc,          /* operation description */
325         xfs_inode_t     *ip,            /* incore inode pointer */
326         xfs_extnum_t    idx,            /* index of entry(entries) deleted */
327         xfs_extnum_t    cnt,            /* count of entries deleted, 1 or 2 */
328         int             whichfork);     /* data or attr fork */
329
330 /*
331  * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or
332  * reading in the extents list from the disk (in the btree).
333  */
334 STATIC void
335 xfs_bmap_trace_insert(
336         char            *fname,         /* function name */
337         char            *desc,          /* operation description */
338         xfs_inode_t     *ip,            /* incore inode pointer */
339         xfs_extnum_t    idx,            /* index of entry(entries) inserted */
340         xfs_extnum_t    cnt,            /* count of entries inserted, 1 or 2 */
341         xfs_bmbt_irec_t *r1,            /* inserted record 1 */
342         xfs_bmbt_irec_t *r2,            /* inserted record 2 or null */
343         int             whichfork);     /* data or attr fork */
344
345 /*
346  * Add bmap trace entry after updating an extent list entry in place.
347  */
348 STATIC void
349 xfs_bmap_trace_post_update(
350         char            *fname,         /* function name */
351         char            *desc,          /* operation description */
352         xfs_inode_t     *ip,            /* incore inode pointer */
353         xfs_extnum_t    idx,            /* index of entry updated */
354         int             whichfork);     /* data or attr fork */
355
356 /*
357  * Add bmap trace entry prior to updating an extent list entry in place.
358  */
359 STATIC void
360 xfs_bmap_trace_pre_update(
361         char            *fname,         /* function name */
362         char            *desc,          /* operation description */
363         xfs_inode_t     *ip,            /* incore inode pointer */
364         xfs_extnum_t    idx,            /* index of entry to be updated */
365         int             whichfork);     /* data or attr fork */
366
367 #else
368 #define xfs_bmap_trace_delete(f,d,ip,i,c,w)
369 #define xfs_bmap_trace_insert(f,d,ip,i,c,r1,r2,w)
370 #define xfs_bmap_trace_post_update(f,d,ip,i,w)
371 #define xfs_bmap_trace_pre_update(f,d,ip,i,w)
372 #endif  /* XFS_BMAP_TRACE */
373
374 /*
375  * Compute the worst-case number of indirect blocks that will be used
376  * for ip's delayed extent of length "len".
377  */
378 STATIC xfs_filblks_t
379 xfs_bmap_worst_indlen(
380         xfs_inode_t             *ip,    /* incore inode pointer */
381         xfs_filblks_t           len);   /* delayed extent length */
382
383 #ifdef DEBUG
384 /*
385  * Perform various validation checks on the values being returned
386  * from xfs_bmapi().
387  */
388 STATIC void
389 xfs_bmap_validate_ret(
390         xfs_fileoff_t           bno,
391         xfs_filblks_t           len,
392         int                     flags,
393         xfs_bmbt_irec_t         *mval,
394         int                     nmap,
395         int                     ret_nmap);
396 #else
397 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
398 #endif /* DEBUG */
399
400 #if defined(XFS_RW_TRACE)
401 STATIC void
402 xfs_bunmap_trace(
403         xfs_inode_t             *ip,
404         xfs_fileoff_t           bno,
405         xfs_filblks_t           len,
406         int                     flags,
407         inst_t                  *ra);
408 #else
409 #define xfs_bunmap_trace(ip, bno, len, flags, ra)
410 #endif  /* XFS_RW_TRACE */
411
412 STATIC int
413 xfs_bmap_count_tree(
414         xfs_mount_t     *mp,
415         xfs_trans_t     *tp,
416         xfs_fsblock_t   blockno,
417         int             levelin,
418         int             *count);
419
420 STATIC int
421 xfs_bmap_count_leaves(
422         xfs_bmbt_rec_t          *frp,
423         int                     numrecs,
424         int                     *count);
425
426 /*
427  * Bmap internal routines.
428  */
429
430 /*
431  * Called from xfs_bmap_add_attrfork to handle btree format files.
432  */
433 STATIC int                                      /* error */
434 xfs_bmap_add_attrfork_btree(
435         xfs_trans_t             *tp,            /* transaction pointer */
436         xfs_inode_t             *ip,            /* incore inode pointer */
437         xfs_fsblock_t           *firstblock,    /* first block allocated */
438         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
439         int                     *flags)         /* inode logging flags */
440 {
441         xfs_btree_cur_t         *cur;           /* btree cursor */
442         int                     error;          /* error return value */
443         xfs_mount_t             *mp;            /* file system mount struct */
444         int                     stat;           /* newroot status */
445
446         mp = ip->i_mount;
447         if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
448                 *flags |= XFS_ILOG_DBROOT;
449         else {
450                 cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip,
451                         XFS_DATA_FORK);
452                 cur->bc_private.b.flist = flist;
453                 cur->bc_private.b.firstblock = *firstblock;
454                 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
455                         goto error0;
456                 ASSERT(stat == 1);      /* must be at least one entry */
457                 if ((error = xfs_bmbt_newroot(cur, flags, &stat)))
458                         goto error0;
459                 if (stat == 0) {
460                         xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
461                         return XFS_ERROR(ENOSPC);
462                 }
463                 *firstblock = cur->bc_private.b.firstblock;
464                 cur->bc_private.b.allocated = 0;
465                 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
466         }
467         return 0;
468 error0:
469         xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
470         return error;
471 }
472
473 /*
474  * Called from xfs_bmap_add_attrfork to handle extents format files.
475  */
476 STATIC int                                      /* error */
477 xfs_bmap_add_attrfork_extents(
478         xfs_trans_t             *tp,            /* transaction pointer */
479         xfs_inode_t             *ip,            /* incore inode pointer */
480         xfs_fsblock_t           *firstblock,    /* first block allocated */
481         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
482         int                     *flags)         /* inode logging flags */
483 {
484         xfs_btree_cur_t         *cur;           /* bmap btree cursor */
485         int                     error;          /* error return value */
486
487         if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
488                 return 0;
489         cur = NULL;
490         error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
491                 flags, XFS_DATA_FORK);
492         if (cur) {
493                 cur->bc_private.b.allocated = 0;
494                 xfs_btree_del_cursor(cur,
495                         error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
496         }
497         return error;
498 }
499
500 /*
501  * Called from xfs_bmap_add_attrfork to handle local format files.
502  */
503 STATIC int                                      /* error */
504 xfs_bmap_add_attrfork_local(
505         xfs_trans_t             *tp,            /* transaction pointer */
506         xfs_inode_t             *ip,            /* incore inode pointer */
507         xfs_fsblock_t           *firstblock,    /* first block allocated */
508         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
509         int                     *flags)         /* inode logging flags */
510 {
511         xfs_da_args_t           dargs;          /* args for dir/attr code */
512         int                     error;          /* error return value */
513         xfs_mount_t             *mp;            /* mount structure pointer */
514
515         if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
516                 return 0;
517         if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
518                 mp = ip->i_mount;
519                 memset(&dargs, 0, sizeof(dargs));
520                 dargs.dp = ip;
521                 dargs.firstblock = firstblock;
522                 dargs.flist = flist;
523                 dargs.total = mp->m_dirblkfsbs;
524                 dargs.whichfork = XFS_DATA_FORK;
525                 dargs.trans = tp;
526                 error = XFS_DIR_SHORTFORM_TO_SINGLE(mp, &dargs);
527         } else
528                 error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
529                         XFS_DATA_FORK);
530         return error;
531 }
532
533 /*
534  * Called by xfs_bmapi to update extent list structure and the btree
535  * after allocating space (or doing a delayed allocation).
536  */
537 STATIC int                              /* error */
538 xfs_bmap_add_extent(
539         xfs_inode_t             *ip,    /* incore inode pointer */
540         xfs_extnum_t            idx,    /* extent number to update/insert */
541         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
542         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
543         xfs_fsblock_t           *first, /* pointer to firstblock variable */
544         xfs_bmap_free_t         *flist, /* list of extents to be freed */
545         int                     *logflagsp, /* inode logging flags */
546         int                     whichfork, /* data or attr fork */
547         int                     rsvd)   /* OK to use reserved data blocks */
548 {
549         xfs_btree_cur_t         *cur;   /* btree cursor or null */
550         xfs_filblks_t           da_new; /* new count del alloc blocks used */
551         xfs_filblks_t           da_old; /* old count del alloc blocks used */
552         int                     error;  /* error return value */
553 #ifdef XFS_BMAP_TRACE
554         static char             fname[] = "xfs_bmap_add_extent";
555 #endif
556         xfs_ifork_t             *ifp;   /* inode fork ptr */
557         int                     logflags; /* returned value */
558         xfs_extnum_t            nextents; /* number of extents in file now */
559
560         XFS_STATS_INC(xs_add_exlist);
561         cur = *curp;
562         ifp = XFS_IFORK_PTR(ip, whichfork);
563         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
564         ASSERT(idx <= nextents);
565         da_old = da_new = 0;
566         error = 0;
567         /*
568          * This is the first extent added to a new/empty file.
569          * Special case this one, so other routines get to assume there are
570          * already extents in the list.
571          */
572         if (nextents == 0) {
573                 xfs_bmap_trace_insert(fname, "insert empty", ip, 0, 1, new,
574                         NULL, whichfork);
575                 xfs_bmap_insert_exlist(ip, 0, 1, new, whichfork);
576                 ASSERT(cur == NULL);
577                 ifp->if_lastex = 0;
578                 if (!ISNULLSTARTBLOCK(new->br_startblock)) {
579                         XFS_IFORK_NEXT_SET(ip, whichfork, 1);
580                         logflags = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
581                 } else
582                         logflags = 0;
583         }
584         /*
585          * Any kind of new delayed allocation goes here.
586          */
587         else if (ISNULLSTARTBLOCK(new->br_startblock)) {
588                 if (cur)
589                         ASSERT((cur->bc_private.b.flags &
590                                 XFS_BTCUR_BPRV_WASDEL) == 0);
591                 if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, cur, new,
592                                 &logflags, rsvd)))
593                         goto done;
594         }
595         /*
596          * Real allocation off the end of the file.
597          */
598         else if (idx == nextents) {
599                 if (cur)
600                         ASSERT((cur->bc_private.b.flags &
601                                 XFS_BTCUR_BPRV_WASDEL) == 0);
602                 if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
603                                 &logflags, whichfork)))
604                         goto done;
605         } else {
606                 xfs_bmbt_irec_t prev;   /* old extent at offset idx */
607
608                 /*
609                  * Get the record referred to by idx.
610                  */
611                 xfs_bmbt_get_all(&ifp->if_u1.if_extents[idx], &prev);
612                 /*
613                  * If it's a real allocation record, and the new allocation ends
614                  * after the start of the referred to record, then we're filling
615                  * in a delayed or unwritten allocation with a real one, or
616                  * converting real back to unwritten.
617                  */
618                 if (!ISNULLSTARTBLOCK(new->br_startblock) &&
619                     new->br_startoff + new->br_blockcount > prev.br_startoff) {
620                         if (prev.br_state != XFS_EXT_UNWRITTEN &&
621                             ISNULLSTARTBLOCK(prev.br_startblock)) {
622                                 da_old = STARTBLOCKVAL(prev.br_startblock);
623                                 if (cur)
624                                         ASSERT(cur->bc_private.b.flags &
625                                                 XFS_BTCUR_BPRV_WASDEL);
626                                 if ((error = xfs_bmap_add_extent_delay_real(ip,
627                                         idx, &cur, new, &da_new, first, flist,
628                                         &logflags, rsvd)))
629                                         goto done;
630                         } else if (new->br_state == XFS_EXT_NORM) {
631                                 ASSERT(new->br_state == XFS_EXT_NORM);
632                                 if ((error = xfs_bmap_add_extent_unwritten_real(
633                                         ip, idx, &cur, new, &logflags)))
634                                         goto done;
635                         } else {
636                                 ASSERT(new->br_state == XFS_EXT_UNWRITTEN);
637                                 if ((error = xfs_bmap_add_extent_unwritten_real(
638                                         ip, idx, &cur, new, &logflags)))
639                                         goto done;
640                         }
641                         ASSERT(*curp == cur || *curp == NULL);
642                 }
643                 /*
644                  * Otherwise we're filling in a hole with an allocation.
645                  */
646                 else {
647                         if (cur)
648                                 ASSERT((cur->bc_private.b.flags &
649                                         XFS_BTCUR_BPRV_WASDEL) == 0);
650                         if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
651                                         new, &logflags, whichfork)))
652                                 goto done;
653                 }
654         }
655
656         ASSERT(*curp == cur || *curp == NULL);
657         /*
658          * Convert to a btree if necessary.
659          */
660         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
661             XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
662                 int     tmp_logflags;   /* partial log flag return val */
663
664                 ASSERT(cur == NULL);
665                 error = xfs_bmap_extents_to_btree(ip->i_transp, ip, first,
666                         flist, &cur, da_old > 0, &tmp_logflags, whichfork);
667                 logflags |= tmp_logflags;
668                 if (error)
669                         goto done;
670         }
671         /*
672          * Adjust for changes in reserved delayed indirect blocks.
673          * Nothing to do for disk quotas here.
674          */
675         if (da_old || da_new) {
676                 xfs_filblks_t   nblks;
677
678                 nblks = da_new;
679                 if (cur)
680                         nblks += cur->bc_private.b.allocated;
681                 ASSERT(nblks <= da_old);
682                 if (nblks < da_old)
683                         xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
684                                 (int)(da_old - nblks), rsvd);
685         }
686         /*
687          * Clear out the allocated field, done with it now in any case.
688          */
689         if (cur) {
690                 cur->bc_private.b.allocated = 0;
691                 *curp = cur;
692         }
693 done:
694 #ifdef DEBUG
695         if (!error)
696                 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
697 #endif
698         *logflagsp = logflags;
699         return error;
700 }
701
702 /*
703  * Called by xfs_bmap_add_extent to handle cases converting a delayed
704  * allocation to a real allocation.
705  */
706 STATIC int                              /* error */
707 xfs_bmap_add_extent_delay_real(
708         xfs_inode_t             *ip,    /* incore inode pointer */
709         xfs_extnum_t            idx,    /* extent number to update/insert */
710         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
711         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
712         xfs_filblks_t           *dnew,  /* new delayed-alloc indirect blocks */
713         xfs_fsblock_t           *first, /* pointer to firstblock variable */
714         xfs_bmap_free_t         *flist, /* list of extents to be freed */
715         int                     *logflagsp, /* inode logging flags */
716         int                     rsvd)   /* OK to use reserved data block allocation */
717 {
718         xfs_bmbt_rec_t          *base;  /* base of extent entry list */
719         xfs_btree_cur_t         *cur;   /* btree cursor */
720         int                     diff;   /* temp value */
721         xfs_bmbt_rec_t          *ep;    /* extent entry for idx */
722         int                     error;  /* error return value */
723 #ifdef XFS_BMAP_TRACE
724         static char             fname[] = "xfs_bmap_add_extent_delay_real";
725 #endif
726         int                     i;      /* temp state */
727         xfs_fileoff_t           new_endoff;     /* end offset of new entry */
728         xfs_bmbt_irec_t         r[3];   /* neighbor extent entries */
729                                         /* left is 0, right is 1, prev is 2 */
730         int                     rval=0; /* return value (logging flags) */
731         int                     state = 0;/* state bits, accessed thru macros */
732         xfs_filblks_t           temp;   /* value for dnew calculations */
733         xfs_filblks_t           temp2;  /* value for dnew calculations */
734         int                     tmp_rval;       /* partial logging flags */
735         enum {                          /* bit number definitions for state */
736                 LEFT_CONTIG,    RIGHT_CONTIG,
737                 LEFT_FILLING,   RIGHT_FILLING,
738                 LEFT_DELAY,     RIGHT_DELAY,
739                 LEFT_VALID,     RIGHT_VALID
740         };
741
742 #define LEFT            r[0]
743 #define RIGHT           r[1]
744 #define PREV            r[2]
745 #define MASK(b)         (1 << (b))
746 #define MASK2(a,b)      (MASK(a) | MASK(b))
747 #define MASK3(a,b,c)    (MASK2(a,b) | MASK(c))
748 #define MASK4(a,b,c,d)  (MASK3(a,b,c) | MASK(d))
749 #define STATE_SET(b,v)  ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
750 #define STATE_TEST(b)   (state & MASK(b))
751 #define STATE_SET_TEST(b,v)     ((v) ? ((state |= MASK(b)), 1) : \
752                                        ((state &= ~MASK(b)), 0))
753 #define SWITCH_STATE            \
754         (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
755
756         /*
757          * Set up a bunch of variables to make the tests simpler.
758          */
759         cur = *curp;
760         base = ip->i_df.if_u1.if_extents;
761         ep = &base[idx];
762         xfs_bmbt_get_all(ep, &PREV);
763         new_endoff = new->br_startoff + new->br_blockcount;
764         ASSERT(PREV.br_startoff <= new->br_startoff);
765         ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
766         /*
767          * Set flags determining what part of the previous delayed allocation
768          * extent is being replaced by a real allocation.
769          */
770         STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff);
771         STATE_SET(RIGHT_FILLING,
772                 PREV.br_startoff + PREV.br_blockcount == new_endoff);
773         /*
774          * Check and set flags if this segment has a left neighbor.
775          * Don't set contiguous if the combined extent would be too large.
776          */
777         if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
778                 xfs_bmbt_get_all(ep - 1, &LEFT);
779                 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock));
780         }
781         STATE_SET(LEFT_CONTIG,
782                 STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
783                 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
784                 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
785                 LEFT.br_state == new->br_state &&
786                 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN);
787         /*
788          * Check and set flags if this segment has a right neighbor.
789          * Don't set contiguous if the combined extent would be too large.
790          * Also check for all-three-contiguous being too large.
791          */
792         if (STATE_SET_TEST(RIGHT_VALID,
793                         idx <
794                         ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
795                 xfs_bmbt_get_all(ep + 1, &RIGHT);
796                 STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock));
797         }
798         STATE_SET(RIGHT_CONTIG,
799                 STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
800                 new_endoff == RIGHT.br_startoff &&
801                 new->br_startblock + new->br_blockcount ==
802                     RIGHT.br_startblock &&
803                 new->br_state == RIGHT.br_state &&
804                 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
805                 ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) !=
806                   MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) ||
807                  LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
808                      <= MAXEXTLEN));
809         error = 0;
810         /*
811          * Switch out based on the FILLING and CONTIG state bits.
812          */
813         switch (SWITCH_STATE) {
814
815         case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
816                 /*
817                  * Filling in all of a previously delayed allocation extent.
818                  * The left and right neighbors are both contiguous with new.
819                  */
820                 xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1,
821                         XFS_DATA_FORK);
822                 xfs_bmbt_set_blockcount(ep - 1,
823                         LEFT.br_blockcount + PREV.br_blockcount +
824                         RIGHT.br_blockcount);
825                 xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1,
826                         XFS_DATA_FORK);
827                 xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2,
828                         XFS_DATA_FORK);
829                 xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK);
830                 ip->i_df.if_lastex = idx - 1;
831                 ip->i_d.di_nextents--;
832                 if (cur == NULL)
833                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
834                 else {
835                         rval = XFS_ILOG_CORE;
836                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
837                                         RIGHT.br_startblock,
838                                         RIGHT.br_blockcount, &i)))
839                                 goto done;
840                         ASSERT(i == 1);
841                         if ((error = xfs_bmbt_delete(cur, &i)))
842                                 goto done;
843                         ASSERT(i == 1);
844                         if ((error = xfs_bmbt_decrement(cur, 0, &i)))
845                                 goto done;
846                         ASSERT(i == 1);
847                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
848                                         LEFT.br_startblock,
849                                         LEFT.br_blockcount +
850                                         PREV.br_blockcount +
851                                         RIGHT.br_blockcount, LEFT.br_state)))
852                                 goto done;
853                 }
854                 *dnew = 0;
855                 break;
856
857         case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG):
858                 /*
859                  * Filling in all of a previously delayed allocation extent.
860                  * The left neighbor is contiguous, the right is not.
861                  */
862                 xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1,
863                         XFS_DATA_FORK);
864                 xfs_bmbt_set_blockcount(ep - 1,
865                         LEFT.br_blockcount + PREV.br_blockcount);
866                 xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1,
867                         XFS_DATA_FORK);
868                 ip->i_df.if_lastex = idx - 1;
869                 xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1,
870                         XFS_DATA_FORK);
871                 xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK);
872                 if (cur == NULL)
873                         rval = XFS_ILOG_DEXT;
874                 else {
875                         rval = 0;
876                         if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
877                                         LEFT.br_startblock, LEFT.br_blockcount,
878                                         &i)))
879                                 goto done;
880                         ASSERT(i == 1);
881                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
882                                         LEFT.br_startblock,
883                                         LEFT.br_blockcount +
884                                         PREV.br_blockcount, LEFT.br_state)))
885                                 goto done;
886                 }
887                 *dnew = 0;
888                 break;
889
890         case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG):
891                 /*
892                  * Filling in all of a previously delayed allocation extent.
893                  * The right neighbor is contiguous, the left is not.
894                  */
895                 xfs_bmap_trace_pre_update(fname, "LF|RF|RC", ip, idx,
896                         XFS_DATA_FORK);
897                 xfs_bmbt_set_startblock(ep, new->br_startblock);
898                 xfs_bmbt_set_blockcount(ep,
899                         PREV.br_blockcount + RIGHT.br_blockcount);
900                 xfs_bmap_trace_post_update(fname, "LF|RF|RC", ip, idx,
901                         XFS_DATA_FORK);
902                 ip->i_df.if_lastex = idx;
903                 xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1,
904                         XFS_DATA_FORK);
905                 xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK);
906                 if (cur == NULL)
907                         rval = XFS_ILOG_DEXT;
908                 else {
909                         rval = 0;
910                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
911                                         RIGHT.br_startblock,
912                                         RIGHT.br_blockcount, &i)))
913                                 goto done;
914                         ASSERT(i == 1);
915                         if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
916                                         new->br_startblock,
917                                         PREV.br_blockcount +
918                                         RIGHT.br_blockcount, PREV.br_state)))
919                                 goto done;
920                 }
921                 *dnew = 0;
922                 break;
923
924         case MASK2(LEFT_FILLING, RIGHT_FILLING):
925                 /*
926                  * Filling in all of a previously delayed allocation extent.
927                  * Neither the left nor right neighbors are contiguous with
928                  * the new one.
929                  */
930                 xfs_bmap_trace_pre_update(fname, "LF|RF", ip, idx,
931                         XFS_DATA_FORK);
932                 xfs_bmbt_set_startblock(ep, new->br_startblock);
933                 xfs_bmap_trace_post_update(fname, "LF|RF", ip, idx,
934                         XFS_DATA_FORK);
935                 ip->i_df.if_lastex = idx;
936                 ip->i_d.di_nextents++;
937                 if (cur == NULL)
938                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
939                 else {
940                         rval = XFS_ILOG_CORE;
941                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
942                                         new->br_startblock, new->br_blockcount,
943                                         &i)))
944                                 goto done;
945                         ASSERT(i == 0);
946                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
947                         if ((error = xfs_bmbt_insert(cur, &i)))
948                                 goto done;
949                         ASSERT(i == 1);
950                 }
951                 *dnew = 0;
952                 break;
953
954         case MASK2(LEFT_FILLING, LEFT_CONTIG):
955                 /*
956                  * Filling in the first part of a previous delayed allocation.
957                  * The left neighbor is contiguous.
958                  */
959                 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1,
960                         XFS_DATA_FORK);
961                 xfs_bmbt_set_blockcount(ep - 1,
962                         LEFT.br_blockcount + new->br_blockcount);
963                 xfs_bmbt_set_startoff(ep,
964                         PREV.br_startoff + new->br_blockcount);
965                 xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx - 1,
966                         XFS_DATA_FORK);
967                 temp = PREV.br_blockcount - new->br_blockcount;
968                 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx,
969                         XFS_DATA_FORK);
970                 xfs_bmbt_set_blockcount(ep, temp);
971                 ip->i_df.if_lastex = idx - 1;
972                 if (cur == NULL)
973                         rval = XFS_ILOG_DEXT;
974                 else {
975                         rval = 0;
976                         if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
977                                         LEFT.br_startblock, LEFT.br_blockcount,
978                                         &i)))
979                                 goto done;
980                         ASSERT(i == 1);
981                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
982                                         LEFT.br_startblock,
983                                         LEFT.br_blockcount +
984                                         new->br_blockcount,
985                                         LEFT.br_state)))
986                                 goto done;
987                 }
988                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
989                         STARTBLOCKVAL(PREV.br_startblock));
990                 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
991                 xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx,
992                         XFS_DATA_FORK);
993                 *dnew = temp;
994                 break;
995
996         case MASK(LEFT_FILLING):
997                 /*
998                  * Filling in the first part of a previous delayed allocation.
999                  * The left neighbor is not contiguous.
1000                  */
1001                 xfs_bmap_trace_pre_update(fname, "LF", ip, idx, XFS_DATA_FORK);
1002                 xfs_bmbt_set_startoff(ep, new_endoff);
1003                 temp = PREV.br_blockcount - new->br_blockcount;
1004                 xfs_bmbt_set_blockcount(ep, temp);
1005                 xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL,
1006                         XFS_DATA_FORK);
1007                 xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK);
1008                 ip->i_df.if_lastex = idx;
1009                 ip->i_d.di_nextents++;
1010                 if (cur == NULL)
1011                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1012                 else {
1013                         rval = XFS_ILOG_CORE;
1014                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1015                                         new->br_startblock, new->br_blockcount,
1016                                         &i)))
1017                                 goto done;
1018                         ASSERT(i == 0);
1019                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1020                         if ((error = xfs_bmbt_insert(cur, &i)))
1021                                 goto done;
1022                         ASSERT(i == 1);
1023                 }
1024                 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1025                     ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1026                         error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1027                                         first, flist, &cur, 1, &tmp_rval,
1028                                         XFS_DATA_FORK);
1029                         rval |= tmp_rval;
1030                         if (error)
1031                                 goto done;
1032                 }
1033                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1034                         STARTBLOCKVAL(PREV.br_startblock) -
1035                         (cur ? cur->bc_private.b.allocated : 0));
1036                 base = ip->i_df.if_u1.if_extents;
1037                 ep = &base[idx + 1];
1038                 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1039                 xfs_bmap_trace_post_update(fname, "LF", ip, idx + 1,
1040                         XFS_DATA_FORK);
1041                 *dnew = temp;
1042                 break;
1043
1044         case MASK2(RIGHT_FILLING, RIGHT_CONTIG):
1045                 /*
1046                  * Filling in the last part of a previous delayed allocation.
1047                  * The right neighbor is contiguous with the new allocation.
1048                  */
1049                 temp = PREV.br_blockcount - new->br_blockcount;
1050                 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx,
1051                         XFS_DATA_FORK);
1052                 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1,
1053                         XFS_DATA_FORK);
1054                 xfs_bmbt_set_blockcount(ep, temp);
1055                 xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock,
1056                         new->br_blockcount + RIGHT.br_blockcount,
1057                         RIGHT.br_state);
1058                 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1,
1059                         XFS_DATA_FORK);
1060                 ip->i_df.if_lastex = idx + 1;
1061                 if (cur == NULL)
1062                         rval = XFS_ILOG_DEXT;
1063                 else {
1064                         rval = 0;
1065                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1066                                         RIGHT.br_startblock,
1067                                         RIGHT.br_blockcount, &i)))
1068                                 goto done;
1069                         ASSERT(i == 1);
1070                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1071                                         new->br_startblock,
1072                                         new->br_blockcount +
1073                                         RIGHT.br_blockcount,
1074                                         RIGHT.br_state)))
1075                                 goto done;
1076                 }
1077                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1078                         STARTBLOCKVAL(PREV.br_startblock));
1079                 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1080                 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx,
1081                         XFS_DATA_FORK);
1082                 *dnew = temp;
1083                 break;
1084
1085         case MASK(RIGHT_FILLING):
1086                 /*
1087                  * Filling in the last part of a previous delayed allocation.
1088                  * The right neighbor is not contiguous.
1089                  */
1090                 temp = PREV.br_blockcount - new->br_blockcount;
1091                 xfs_bmap_trace_pre_update(fname, "RF", ip, idx, XFS_DATA_FORK);
1092                 xfs_bmbt_set_blockcount(ep, temp);
1093                 xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1,
1094                         new, NULL, XFS_DATA_FORK);
1095                 xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK);
1096                 ip->i_df.if_lastex = idx + 1;
1097                 ip->i_d.di_nextents++;
1098                 if (cur == NULL)
1099                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1100                 else {
1101                         rval = XFS_ILOG_CORE;
1102                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1103                                         new->br_startblock, new->br_blockcount,
1104                                         &i)))
1105                                 goto done;
1106                         ASSERT(i == 0);
1107                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1108                         if ((error = xfs_bmbt_insert(cur, &i)))
1109                                 goto done;
1110                         ASSERT(i == 1);
1111                 }
1112                 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1113                     ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1114                         error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1115                                 first, flist, &cur, 1, &tmp_rval,
1116                                 XFS_DATA_FORK);
1117                         rval |= tmp_rval;
1118                         if (error)
1119                                 goto done;
1120                 }
1121                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1122                         STARTBLOCKVAL(PREV.br_startblock) -
1123                         (cur ? cur->bc_private.b.allocated : 0));
1124                 base = ip->i_df.if_u1.if_extents;
1125                 ep = &base[idx];
1126                 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1127                 xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK);
1128                 *dnew = temp;
1129                 break;
1130
1131         case 0:
1132                 /*
1133                  * Filling in the middle part of a previous delayed allocation.
1134                  * Contiguity is impossible here.
1135                  * This case is avoided almost all the time.
1136                  */
1137                 temp = new->br_startoff - PREV.br_startoff;
1138                 xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK);
1139                 xfs_bmbt_set_blockcount(ep, temp);
1140                 r[0] = *new;
1141                 r[1].br_startoff = new_endoff;
1142                 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
1143                 r[1].br_blockcount = temp2;
1144                 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1],
1145                         XFS_DATA_FORK);
1146                 xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK);
1147                 ip->i_df.if_lastex = idx + 1;
1148                 ip->i_d.di_nextents++;
1149                 if (cur == NULL)
1150                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1151                 else {
1152                         rval = XFS_ILOG_CORE;
1153                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1154                                         new->br_startblock, new->br_blockcount,
1155                                         &i)))
1156                                 goto done;
1157                         ASSERT(i == 0);
1158                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1159                         if ((error = xfs_bmbt_insert(cur, &i)))
1160                                 goto done;
1161                         ASSERT(i == 1);
1162                 }
1163                 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1164                     ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1165                         error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1166                                         first, flist, &cur, 1, &tmp_rval,
1167                                         XFS_DATA_FORK);
1168                         rval |= tmp_rval;
1169                         if (error)
1170                                 goto done;
1171                 }
1172                 temp = xfs_bmap_worst_indlen(ip, temp);
1173                 temp2 = xfs_bmap_worst_indlen(ip, temp2);
1174                 diff = (int)(temp + temp2 - STARTBLOCKVAL(PREV.br_startblock) -
1175                         (cur ? cur->bc_private.b.allocated : 0));
1176                 if (diff > 0 &&
1177                     xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -diff, rsvd)) {
1178                         /*
1179                          * Ick gross gag me with a spoon.
1180                          */
1181                         ASSERT(0);      /* want to see if this ever happens! */
1182                         while (diff > 0) {
1183                                 if (temp) {
1184                                         temp--;
1185                                         diff--;
1186                                         if (!diff ||
1187                                             !xfs_mod_incore_sb(ip->i_mount,
1188                                                     XFS_SBS_FDBLOCKS, -diff, rsvd))
1189                                                 break;
1190                                 }
1191                                 if (temp2) {
1192                                         temp2--;
1193                                         diff--;
1194                                         if (!diff ||
1195                                             !xfs_mod_incore_sb(ip->i_mount,
1196                                                     XFS_SBS_FDBLOCKS, -diff, rsvd))
1197                                                 break;
1198                                 }
1199                         }
1200                 }
1201                 base = ip->i_df.if_u1.if_extents;
1202                 ep = &base[idx];
1203                 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1204                 xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK);
1205                 xfs_bmap_trace_pre_update(fname, "0", ip, idx + 2,
1206                         XFS_DATA_FORK);
1207                 xfs_bmbt_set_startblock(ep + 2, NULLSTARTBLOCK((int)temp2));
1208                 xfs_bmap_trace_post_update(fname, "0", ip, idx + 2,
1209                         XFS_DATA_FORK);
1210                 *dnew = temp + temp2;
1211                 break;
1212
1213         case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
1214         case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
1215         case MASK2(LEFT_FILLING, RIGHT_CONTIG):
1216         case MASK2(RIGHT_FILLING, LEFT_CONTIG):
1217         case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
1218         case MASK(LEFT_CONTIG):
1219         case MASK(RIGHT_CONTIG):
1220                 /*
1221                  * These cases are all impossible.
1222                  */
1223                 ASSERT(0);
1224         }
1225         *curp = cur;
1226 done:
1227         *logflagsp = rval;
1228         return error;
1229 #undef  LEFT
1230 #undef  RIGHT
1231 #undef  PREV
1232 #undef  MASK
1233 #undef  MASK2
1234 #undef  MASK3
1235 #undef  MASK4
1236 #undef  STATE_SET
1237 #undef  STATE_TEST
1238 #undef  STATE_SET_TEST
1239 #undef  SWITCH_STATE
1240 }
1241
1242 /*
1243  * Called by xfs_bmap_add_extent to handle cases converting an unwritten
1244  * allocation to a real allocation or vice versa.
1245  */
1246 STATIC int                              /* error */
1247 xfs_bmap_add_extent_unwritten_real(
1248         xfs_inode_t             *ip,    /* incore inode pointer */
1249         xfs_extnum_t            idx,    /* extent number to update/insert */
1250         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
1251         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
1252         int                     *logflagsp) /* inode logging flags */
1253 {
1254         xfs_bmbt_rec_t          *base;  /* base of extent entry list */
1255         xfs_btree_cur_t         *cur;   /* btree cursor */
1256         xfs_bmbt_rec_t          *ep;    /* extent entry for idx */
1257         int                     error;  /* error return value */
1258 #ifdef XFS_BMAP_TRACE
1259         static char             fname[] = "xfs_bmap_add_extent_unwritten_real";
1260 #endif
1261         int                     i;      /* temp state */
1262         xfs_fileoff_t           new_endoff;     /* end offset of new entry */
1263         xfs_exntst_t            newext; /* new extent state */
1264         xfs_exntst_t            oldext; /* old extent state */
1265         xfs_bmbt_irec_t         r[3];   /* neighbor extent entries */
1266                                         /* left is 0, right is 1, prev is 2 */
1267         int                     rval=0; /* return value (logging flags) */
1268         int                     state = 0;/* state bits, accessed thru macros */
1269         enum {                          /* bit number definitions for state */
1270                 LEFT_CONTIG,    RIGHT_CONTIG,
1271                 LEFT_FILLING,   RIGHT_FILLING,
1272                 LEFT_DELAY,     RIGHT_DELAY,
1273                 LEFT_VALID,     RIGHT_VALID
1274         };
1275
1276 #define LEFT            r[0]
1277 #define RIGHT           r[1]
1278 #define PREV            r[2]
1279 #define MASK(b)         (1 << (b))
1280 #define MASK2(a,b)      (MASK(a) | MASK(b))
1281 #define MASK3(a,b,c)    (MASK2(a,b) | MASK(c))
1282 #define MASK4(a,b,c,d)  (MASK3(a,b,c) | MASK(d))
1283 #define STATE_SET(b,v)  ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
1284 #define STATE_TEST(b)   (state & MASK(b))
1285 #define STATE_SET_TEST(b,v)     ((v) ? ((state |= MASK(b)), 1) : \
1286                                        ((state &= ~MASK(b)), 0))
1287 #define SWITCH_STATE            \
1288         (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
1289
1290         /*
1291          * Set up a bunch of variables to make the tests simpler.
1292          */
1293         error = 0;
1294         cur = *curp;
1295         base = ip->i_df.if_u1.if_extents;
1296         ep = &base[idx];
1297         xfs_bmbt_get_all(ep, &PREV);
1298         newext = new->br_state;
1299         oldext = (newext == XFS_EXT_UNWRITTEN) ?
1300                 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
1301         ASSERT(PREV.br_state == oldext);
1302         new_endoff = new->br_startoff + new->br_blockcount;
1303         ASSERT(PREV.br_startoff <= new->br_startoff);
1304         ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1305         /*
1306          * Set flags determining what part of the previous oldext allocation
1307          * extent is being replaced by a newext allocation.
1308          */
1309         STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff);
1310         STATE_SET(RIGHT_FILLING,
1311                 PREV.br_startoff + PREV.br_blockcount == new_endoff);
1312         /*
1313          * Check and set flags if this segment has a left neighbor.
1314          * Don't set contiguous if the combined extent would be too large.
1315          */
1316         if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
1317                 xfs_bmbt_get_all(ep - 1, &LEFT);
1318                 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock));
1319         }
1320         STATE_SET(LEFT_CONTIG,
1321                 STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
1322                 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1323                 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1324                 LEFT.br_state == newext &&
1325                 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN);
1326         /*
1327          * Check and set flags if this segment has a right neighbor.
1328          * Don't set contiguous if the combined extent would be too large.
1329          * Also check for all-three-contiguous being too large.
1330          */
1331         if (STATE_SET_TEST(RIGHT_VALID,
1332                         idx <
1333                         ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
1334                 xfs_bmbt_get_all(ep + 1, &RIGHT);
1335                 STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock));
1336         }
1337         STATE_SET(RIGHT_CONTIG,
1338                 STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
1339                 new_endoff == RIGHT.br_startoff &&
1340                 new->br_startblock + new->br_blockcount ==
1341                     RIGHT.br_startblock &&
1342                 newext == RIGHT.br_state &&
1343                 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1344                 ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) !=
1345                   MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) ||
1346                  LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1347                      <= MAXEXTLEN));
1348         /*
1349          * Switch out based on the FILLING and CONTIG state bits.
1350          */
1351         switch (SWITCH_STATE) {
1352
1353         case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
1354                 /*
1355                  * Setting all of a previous oldext extent to newext.
1356                  * The left and right neighbors are both contiguous with new.
1357                  */
1358                 xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1,
1359                         XFS_DATA_FORK);
1360                 xfs_bmbt_set_blockcount(ep - 1,
1361                         LEFT.br_blockcount + PREV.br_blockcount +
1362                         RIGHT.br_blockcount);
1363                 xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1,
1364                         XFS_DATA_FORK);
1365                 xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2,
1366                         XFS_DATA_FORK);
1367                 xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK);
1368                 ip->i_df.if_lastex = idx - 1;
1369                 ip->i_d.di_nextents -= 2;
1370                 if (cur == NULL)
1371                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1372                 else {
1373                         rval = XFS_ILOG_CORE;
1374                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1375                                         RIGHT.br_startblock,
1376                                         RIGHT.br_blockcount, &i)))
1377                                 goto done;
1378                         ASSERT(i == 1);
1379                         if ((error = xfs_bmbt_delete(cur, &i)))
1380                                 goto done;
1381                         ASSERT(i == 1);
1382                         if ((error = xfs_bmbt_decrement(cur, 0, &i)))
1383                                 goto done;
1384                         ASSERT(i == 1);
1385                         if ((error = xfs_bmbt_delete(cur, &i)))
1386                                 goto done;
1387                         ASSERT(i == 1);
1388                         if ((error = xfs_bmbt_decrement(cur, 0, &i)))
1389                                 goto done;
1390                         ASSERT(i == 1);
1391                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1392                                 LEFT.br_startblock,
1393                                 LEFT.br_blockcount + PREV.br_blockcount +
1394                                 RIGHT.br_blockcount, LEFT.br_state)))
1395                                 goto done;
1396                 }
1397                 break;
1398
1399         case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG):
1400                 /*
1401                  * Setting all of a previous oldext extent to newext.
1402                  * The left neighbor is contiguous, the right is not.
1403                  */
1404                 xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1,
1405                         XFS_DATA_FORK);
1406                 xfs_bmbt_set_blockcount(ep - 1,
1407                         LEFT.br_blockcount + PREV.br_blockcount);
1408                 xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1,
1409                         XFS_DATA_FORK);
1410                 ip->i_df.if_lastex = idx - 1;
1411                 xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1,
1412                         XFS_DATA_FORK);
1413                 xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK);
1414                 ip->i_d.di_nextents--;
1415                 if (cur == NULL)
1416                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1417                 else {
1418                         rval = XFS_ILOG_CORE;
1419                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1420                                         PREV.br_startblock, PREV.br_blockcount,
1421                                         &i)))
1422                                 goto done;
1423                         ASSERT(i == 1);
1424                         if ((error = xfs_bmbt_delete(cur, &i)))
1425                                 goto done;
1426                         ASSERT(i == 1);
1427                         if ((error = xfs_bmbt_decrement(cur, 0, &i)))
1428                                 goto done;
1429                         ASSERT(i == 1);
1430                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1431                                 LEFT.br_startblock,
1432                                 LEFT.br_blockcount + PREV.br_blockcount,
1433                                 LEFT.br_state)))
1434                                 goto done;
1435                 }
1436                 break;
1437
1438         case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG):
1439                 /*
1440                  * Setting all of a previous oldext extent to newext.
1441                  * The right neighbor is contiguous, the left is not.
1442                  */
1443                 xfs_bmap_trace_pre_update(fname, "LF|RF|RC", ip, idx,
1444                         XFS_DATA_FORK);
1445                 xfs_bmbt_set_blockcount(ep,
1446                         PREV.br_blockcount + RIGHT.br_blockcount);
1447                 xfs_bmbt_set_state(ep, newext);
1448                 xfs_bmap_trace_post_update(fname, "LF|RF|RC", ip, idx,
1449                         XFS_DATA_FORK);
1450                 ip->i_df.if_lastex = idx;
1451                 xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1,
1452                         XFS_DATA_FORK);
1453                 xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK);
1454                 ip->i_d.di_nextents--;
1455                 if (cur == NULL)
1456                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1457                 else {
1458                         rval = XFS_ILOG_CORE;
1459                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1460                                         RIGHT.br_startblock,
1461                                         RIGHT.br_blockcount, &i)))
1462                                 goto done;
1463                         ASSERT(i == 1);
1464                         if ((error = xfs_bmbt_delete(cur, &i)))
1465                                 goto done;
1466                         ASSERT(i == 1);
1467                         if ((error = xfs_bmbt_decrement(cur, 0, &i)))
1468                                 goto done;
1469                         ASSERT(i == 1);
1470                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1471                                 new->br_startblock,
1472                                 new->br_blockcount + RIGHT.br_blockcount,
1473                                 newext)))
1474                                 goto done;
1475                 }
1476                 break;
1477
1478         case MASK2(LEFT_FILLING, RIGHT_FILLING):
1479                 /*
1480                  * Setting all of a previous oldext extent to newext.
1481                  * Neither the left nor right neighbors are contiguous with
1482                  * the new one.
1483                  */
1484                 xfs_bmap_trace_pre_update(fname, "LF|RF", ip, idx,
1485                         XFS_DATA_FORK);
1486                 xfs_bmbt_set_state(ep, newext);
1487                 xfs_bmap_trace_post_update(fname, "LF|RF", ip, idx,
1488                         XFS_DATA_FORK);
1489                 ip->i_df.if_lastex = idx;
1490                 if (cur == NULL)
1491                         rval = XFS_ILOG_DEXT;
1492                 else {
1493                         rval = 0;
1494                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1495                                         new->br_startblock, new->br_blockcount,
1496                                         &i)))
1497                                 goto done;
1498                         ASSERT(i == 1);
1499                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1500                                 new->br_startblock, new->br_blockcount,
1501                                 newext)))
1502                                 goto done;
1503                 }
1504                 break;
1505
1506         case MASK2(LEFT_FILLING, LEFT_CONTIG):
1507                 /*
1508                  * Setting the first part of a previous oldext extent to newext.
1509                  * The left neighbor is contiguous.
1510                  */
1511                 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1,
1512                         XFS_DATA_FORK);
1513                 xfs_bmbt_set_blockcount(ep - 1,
1514                         LEFT.br_blockcount + new->br_blockcount);
1515                 xfs_bmbt_set_startoff(ep,
1516                         PREV.br_startoff + new->br_blockcount);
1517                 xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx - 1,
1518                         XFS_DATA_FORK);
1519                 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx,
1520                         XFS_DATA_FORK);
1521                 xfs_bmbt_set_startblock(ep,
1522                         new->br_startblock + new->br_blockcount);
1523                 xfs_bmbt_set_blockcount(ep,
1524                         PREV.br_blockcount - new->br_blockcount);
1525                 xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx,
1526                         XFS_DATA_FORK);
1527                 ip->i_df.if_lastex = idx - 1;
1528                 if (cur == NULL)
1529                         rval = XFS_ILOG_DEXT;
1530                 else {
1531                         rval = 0;
1532                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1533                                         PREV.br_startblock, PREV.br_blockcount,
1534                                         &i)))
1535                                 goto done;
1536                         ASSERT(i == 1);
1537                         if ((error = xfs_bmbt_update(cur,
1538                                 PREV.br_startoff + new->br_blockcount,
1539                                 PREV.br_startblock + new->br_blockcount,
1540                                 PREV.br_blockcount - new->br_blockcount,
1541                                 oldext)))
1542                                 goto done;
1543                         if ((error = xfs_bmbt_decrement(cur, 0, &i)))
1544                                 goto done;
1545                         if (xfs_bmbt_update(cur, LEFT.br_startoff,
1546                                 LEFT.br_startblock,
1547                                 LEFT.br_blockcount + new->br_blockcount,
1548                                 LEFT.br_state))
1549                                 goto done;
1550                 }
1551                 break;
1552
1553         case MASK(LEFT_FILLING):
1554                 /*
1555                  * Setting the first part of a previous oldext extent to newext.
1556                  * The left neighbor is not contiguous.
1557                  */
1558                 xfs_bmap_trace_pre_update(fname, "LF", ip, idx, XFS_DATA_FORK);
1559                 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
1560                 xfs_bmbt_set_startoff(ep, new_endoff);
1561                 xfs_bmbt_set_blockcount(ep,
1562                         PREV.br_blockcount - new->br_blockcount);
1563                 xfs_bmbt_set_startblock(ep,
1564                         new->br_startblock + new->br_blockcount);
1565                 xfs_bmap_trace_post_update(fname, "LF", ip, idx, XFS_DATA_FORK);
1566                 xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL,
1567                         XFS_DATA_FORK);
1568                 xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK);
1569                 ip->i_df.if_lastex = idx;
1570                 ip->i_d.di_nextents++;
1571                 if (cur == NULL)
1572                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1573                 else {
1574                         rval = XFS_ILOG_CORE;
1575                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1576                                         PREV.br_startblock, PREV.br_blockcount,
1577                                         &i)))
1578                                 goto done;
1579                         ASSERT(i == 1);
1580                         if ((error = xfs_bmbt_update(cur,
1581                                 PREV.br_startoff + new->br_blockcount,
1582                                 PREV.br_startblock + new->br_blockcount,
1583                                 PREV.br_blockcount - new->br_blockcount,
1584                                 oldext)))
1585                                 goto done;
1586                         cur->bc_rec.b = *new;
1587                         if ((error = xfs_bmbt_insert(cur, &i)))
1588                                 goto done;
1589                         ASSERT(i == 1);
1590                 }
1591                 break;
1592
1593         case MASK2(RIGHT_FILLING, RIGHT_CONTIG):
1594                 /*
1595                  * Setting the last part of a previous oldext extent to newext.
1596                  * The right neighbor is contiguous with the new allocation.
1597                  */
1598                 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx,
1599                         XFS_DATA_FORK);
1600                 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1,
1601                         XFS_DATA_FORK);
1602                 xfs_bmbt_set_blockcount(ep,
1603                         PREV.br_blockcount - new->br_blockcount);
1604                 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx,
1605                         XFS_DATA_FORK);
1606                 xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock,
1607                         new->br_blockcount + RIGHT.br_blockcount, newext);
1608                 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1,
1609                         XFS_DATA_FORK);
1610                 ip->i_df.if_lastex = idx + 1;
1611                 if (cur == NULL)
1612                         rval = XFS_ILOG_DEXT;
1613                 else {
1614                         rval = 0;
1615                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1616                                         PREV.br_startblock,
1617                                         PREV.br_blockcount, &i)))
1618                                 goto done;
1619                         ASSERT(i == 1);
1620                         if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1621                                 PREV.br_startblock,
1622                                 PREV.br_blockcount - new->br_blockcount,
1623                                 oldext)))
1624                                 goto done;
1625                         if ((error = xfs_bmbt_increment(cur, 0, &i)))
1626                                 goto done;
1627                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1628                                 new->br_startblock,
1629                                 new->br_blockcount + RIGHT.br_blockcount,
1630                                 newext)))
1631                                 goto done;
1632                 }
1633                 break;
1634
1635         case MASK(RIGHT_FILLING):
1636                 /*
1637                  * Setting the last part of a previous oldext extent to newext.
1638                  * The right neighbor is not contiguous.
1639                  */
1640                 xfs_bmap_trace_pre_update(fname, "RF", ip, idx, XFS_DATA_FORK);
1641                 xfs_bmbt_set_blockcount(ep,
1642                         PREV.br_blockcount - new->br_blockcount);
1643                 xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK);
1644                 xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1,
1645                         new, NULL, XFS_DATA_FORK);
1646                 xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK);
1647                 ip->i_df.if_lastex = idx + 1;
1648                 ip->i_d.di_nextents++;
1649                 if (cur == NULL)
1650                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1651                 else {
1652                         rval = XFS_ILOG_CORE;
1653                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1654                                         PREV.br_startblock, PREV.br_blockcount,
1655                                         &i)))
1656                                 goto done;
1657                         ASSERT(i == 1);
1658                         if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1659                                 PREV.br_startblock,
1660                                 PREV.br_blockcount - new->br_blockcount,
1661                                 oldext)))
1662                                 goto done;
1663                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1664                                         new->br_startblock, new->br_blockcount,
1665                                         &i)))
1666                                 goto done;
1667                         ASSERT(i == 0);
1668                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1669                         if ((error = xfs_bmbt_insert(cur, &i)))
1670                                 goto done;
1671                         ASSERT(i == 1);
1672                 }
1673                 break;
1674
1675         case 0:
1676                 /*
1677                  * Setting the middle part of a previous oldext extent to
1678                  * newext.  Contiguity is impossible here.
1679                  * One extent becomes three extents.
1680                  */
1681                 xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK);
1682                 xfs_bmbt_set_blockcount(ep,
1683                         new->br_startoff - PREV.br_startoff);
1684                 xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK);
1685                 r[0] = *new;
1686                 r[1].br_startoff = new_endoff;
1687                 r[1].br_blockcount =
1688                         PREV.br_startoff + PREV.br_blockcount - new_endoff;
1689                 r[1].br_startblock = new->br_startblock + new->br_blockcount;
1690                 r[1].br_state = oldext;
1691                 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1],
1692                         XFS_DATA_FORK);
1693                 xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK);
1694                 ip->i_df.if_lastex = idx + 1;
1695                 ip->i_d.di_nextents += 2;
1696                 if (cur == NULL)
1697                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1698                 else {
1699                         rval = XFS_ILOG_CORE;
1700                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1701                                         PREV.br_startblock, PREV.br_blockcount,
1702                                         &i)))
1703                                 goto done;
1704                         ASSERT(i == 1);
1705                         /* new right extent - oldext */
1706                         if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
1707                                 r[1].br_startblock, r[1].br_blockcount,
1708                                 r[1].br_state)))
1709                                 goto done;
1710                         /* new left extent - oldext */
1711                         PREV.br_blockcount =
1712                                 new->br_startoff - PREV.br_startoff;
1713                         cur->bc_rec.b = PREV;
1714                         if ((error = xfs_bmbt_insert(cur, &i)))
1715                                 goto done;
1716                         ASSERT(i == 1);
1717                         if ((error = xfs_bmbt_increment(cur, 0, &i)))
1718                                 goto done;
1719                         ASSERT(i == 1);
1720                         /* new middle extent - newext */
1721                         cur->bc_rec.b = *new;
1722                         if ((error = xfs_bmbt_insert(cur, &i)))
1723                                 goto done;
1724                         ASSERT(i == 1);
1725                 }
1726                 break;
1727
1728         case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
1729         case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
1730         case MASK2(LEFT_FILLING, RIGHT_CONTIG):
1731         case MASK2(RIGHT_FILLING, LEFT_CONTIG):
1732         case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
1733         case MASK(LEFT_CONTIG):
1734         case MASK(RIGHT_CONTIG):
1735                 /*
1736                  * These cases are all impossible.
1737                  */
1738                 ASSERT(0);
1739         }
1740         *curp = cur;
1741 done:
1742         *logflagsp = rval;
1743         return error;
1744 #undef  LEFT
1745 #undef  RIGHT
1746 #undef  PREV
1747 #undef  MASK
1748 #undef  MASK2
1749 #undef  MASK3
1750 #undef  MASK4
1751 #undef  STATE_SET
1752 #undef  STATE_TEST
1753 #undef  STATE_SET_TEST
1754 #undef  SWITCH_STATE
1755 }
1756
1757 /*
1758  * Called by xfs_bmap_add_extent to handle cases converting a hole
1759  * to a delayed allocation.
1760  */
1761 /*ARGSUSED*/
1762 STATIC int                              /* error */
1763 xfs_bmap_add_extent_hole_delay(
1764         xfs_inode_t             *ip,    /* incore inode pointer */
1765         xfs_extnum_t            idx,    /* extent number to update/insert */
1766         xfs_btree_cur_t         *cur,   /* if null, not a btree */
1767         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
1768         int                     *logflagsp, /* inode logging flags */
1769         int                     rsvd)           /* OK to allocate reserved blocks */
1770 {
1771         xfs_bmbt_rec_t          *base;  /* base of extent entry list */
1772         xfs_bmbt_rec_t          *ep;    /* extent list entry for idx */
1773 #ifdef XFS_BMAP_TRACE
1774         static char             fname[] = "xfs_bmap_add_extent_hole_delay";
1775 #endif
1776         xfs_bmbt_irec_t         left;   /* left neighbor extent entry */
1777         xfs_filblks_t           newlen=0;       /* new indirect size */
1778         xfs_filblks_t           oldlen=0;       /* old indirect size */
1779         xfs_bmbt_irec_t         right;  /* right neighbor extent entry */
1780         int                     state;  /* state bits, accessed thru macros */
1781         xfs_filblks_t           temp;   /* temp for indirect calculations */
1782         enum {                          /* bit number definitions for state */
1783                 LEFT_CONTIG,    RIGHT_CONTIG,
1784                 LEFT_DELAY,     RIGHT_DELAY,
1785                 LEFT_VALID,     RIGHT_VALID
1786         };
1787
1788 #define MASK(b)                 (1 << (b))
1789 #define MASK2(a,b)              (MASK(a) | MASK(b))
1790 #define STATE_SET(b,v)          ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
1791 #define STATE_TEST(b)           (state & MASK(b))
1792 #define STATE_SET_TEST(b,v)     ((v) ? ((state |= MASK(b)), 1) : \
1793                                        ((state &= ~MASK(b)), 0))
1794 #define SWITCH_STATE            (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
1795
1796         base = ip->i_df.if_u1.if_extents;
1797         ep = &base[idx];
1798         state = 0;
1799         ASSERT(ISNULLSTARTBLOCK(new->br_startblock));
1800         /*
1801          * Check and set flags if this segment has a left neighbor
1802          */
1803         if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
1804                 xfs_bmbt_get_all(ep - 1, &left);
1805                 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock));
1806         }
1807         /*
1808          * Check and set flags if the current (right) segment exists.
1809          * If it doesn't exist, we're converting the hole at end-of-file.
1810          */
1811         if (STATE_SET_TEST(RIGHT_VALID,
1812                            idx <
1813                            ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
1814                 xfs_bmbt_get_all(ep, &right);
1815                 STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock));
1816         }
1817         /*
1818          * Set contiguity flags on the left and right neighbors.
1819          * Don't let extents get too large, even if the pieces are contiguous.
1820          */
1821         STATE_SET(LEFT_CONTIG,
1822                 STATE_TEST(LEFT_VALID) && STATE_TEST(LEFT_DELAY) &&
1823                 left.br_startoff + left.br_blockcount == new->br_startoff &&
1824                 left.br_blockcount + new->br_blockcount <= MAXEXTLEN);
1825         STATE_SET(RIGHT_CONTIG,
1826                 STATE_TEST(RIGHT_VALID) && STATE_TEST(RIGHT_DELAY) &&
1827                 new->br_startoff + new->br_blockcount == right.br_startoff &&
1828                 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1829                 (!STATE_TEST(LEFT_CONTIG) ||
1830                  (left.br_blockcount + new->br_blockcount +
1831                      right.br_blockcount <= MAXEXTLEN)));
1832         /*
1833          * Switch out based on the contiguity flags.
1834          */
1835         switch (SWITCH_STATE) {
1836
1837         case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
1838                 /*
1839                  * New allocation is contiguous with delayed allocations
1840                  * on the left and on the right.
1841                  * Merge all three into a single extent list entry.
1842                  */
1843                 temp = left.br_blockcount + new->br_blockcount +
1844                         right.br_blockcount;
1845                 xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1,
1846                         XFS_DATA_FORK);
1847                 xfs_bmbt_set_blockcount(ep - 1, temp);
1848                 oldlen = STARTBLOCKVAL(left.br_startblock) +
1849                         STARTBLOCKVAL(new->br_startblock) +
1850                         STARTBLOCKVAL(right.br_startblock);
1851                 newlen = xfs_bmap_worst_indlen(ip, temp);
1852                 xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen));
1853                 xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1,
1854                         XFS_DATA_FORK);
1855                 xfs_bmap_trace_delete(fname, "LC|RC", ip, idx, 1,
1856                         XFS_DATA_FORK);
1857                 xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK);
1858                 ip->i_df.if_lastex = idx - 1;
1859                 break;
1860
1861         case MASK(LEFT_CONTIG):
1862                 /*
1863                  * New allocation is contiguous with a delayed allocation
1864                  * on the left.
1865                  * Merge the new allocation with the left neighbor.
1866                  */
1867                 temp = left.br_blockcount + new->br_blockcount;
1868                 xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1,
1869                         XFS_DATA_FORK);
1870                 xfs_bmbt_set_blockcount(ep - 1, temp);
1871                 oldlen = STARTBLOCKVAL(left.br_startblock) +
1872                         STARTBLOCKVAL(new->br_startblock);
1873                 newlen = xfs_bmap_worst_indlen(ip, temp);
1874                 xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen));
1875                 xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1,
1876                         XFS_DATA_FORK);
1877                 ip->i_df.if_lastex = idx - 1;
1878                 break;
1879
1880         case MASK(RIGHT_CONTIG):
1881                 /*
1882                  * New allocation is contiguous with a delayed allocation
1883                  * on the right.
1884                  * Merge the new allocation with the right neighbor.
1885                  */
1886                 xfs_bmap_trace_pre_update(fname, "RC", ip, idx, XFS_DATA_FORK);
1887                 temp = new->br_blockcount + right.br_blockcount;
1888                 oldlen = STARTBLOCKVAL(new->br_startblock) +
1889                         STARTBLOCKVAL(right.br_startblock);
1890                 newlen = xfs_bmap_worst_indlen(ip, temp);
1891                 xfs_bmbt_set_allf(ep, new->br_startoff,
1892                         NULLSTARTBLOCK((int)newlen), temp, right.br_state);
1893                 xfs_bmap_trace_post_update(fname, "RC", ip, idx, XFS_DATA_FORK);
1894                 ip->i_df.if_lastex = idx;
1895                 break;
1896
1897         case 0:
1898                 /*
1899                  * New allocation is not contiguous with another
1900                  * delayed allocation.
1901                  * Insert a new entry.
1902                  */
1903                 oldlen = newlen = 0;
1904                 xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL,
1905                         XFS_DATA_FORK);
1906                 xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK);
1907                 ip->i_df.if_lastex = idx;
1908                 break;
1909         }
1910         if (oldlen != newlen) {
1911                 ASSERT(oldlen > newlen);
1912                 xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
1913                         (int)(oldlen - newlen), rsvd);
1914                 /*
1915                  * Nothing to do for disk quota accounting here.
1916                  */
1917         }
1918         *logflagsp = 0;
1919         return 0;
1920 #undef  MASK
1921 #undef  MASK2
1922 #undef  STATE_SET
1923 #undef  STATE_TEST
1924 #undef  STATE_SET_TEST
1925 #undef  SWITCH_STATE
1926 }
1927
1928 /*
1929  * Called by xfs_bmap_add_extent to handle cases converting a hole
1930  * to a real allocation.
1931  */
1932 STATIC int                              /* error */
1933 xfs_bmap_add_extent_hole_real(
1934         xfs_inode_t             *ip,    /* incore inode pointer */
1935         xfs_extnum_t            idx,    /* extent number to update/insert */
1936         xfs_btree_cur_t         *cur,   /* if null, not a btree */
1937         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
1938         int                     *logflagsp, /* inode logging flags */
1939         int                     whichfork) /* data or attr fork */
1940 {
1941         xfs_bmbt_rec_t          *ep;    /* pointer to extent entry ins. point */
1942         int                     error;  /* error return value */
1943 #ifdef XFS_BMAP_TRACE
1944         static char             fname[] = "xfs_bmap_add_extent_hole_real";
1945 #endif
1946         int                     i;      /* temp state */
1947         xfs_ifork_t             *ifp;   /* inode fork pointer */
1948         xfs_bmbt_irec_t         left;   /* left neighbor extent entry */
1949         xfs_bmbt_irec_t         right;  /* right neighbor extent entry */
1950         int                     state;  /* state bits, accessed thru macros */
1951         enum {                          /* bit number definitions for state */
1952                 LEFT_CONTIG,    RIGHT_CONTIG,
1953                 LEFT_DELAY,     RIGHT_DELAY,
1954                 LEFT_VALID,     RIGHT_VALID
1955         };
1956
1957 #define MASK(b)                 (1 << (b))
1958 #define MASK2(a,b)              (MASK(a) | MASK(b))
1959 #define STATE_SET(b,v)          ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
1960 #define STATE_TEST(b)           (state & MASK(b))
1961 #define STATE_SET_TEST(b,v)     ((v) ? ((state |= MASK(b)), 1) : \
1962                                        ((state &= ~MASK(b)), 0))
1963 #define SWITCH_STATE            (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
1964
1965         ifp = XFS_IFORK_PTR(ip, whichfork);
1966         ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
1967         ep = &ifp->if_u1.if_extents[idx];
1968         state = 0;
1969         /*
1970          * Check and set flags if this segment has a left neighbor.
1971          */
1972         if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
1973                 xfs_bmbt_get_all(ep - 1, &left);
1974                 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock));
1975         }
1976         /*
1977          * Check and set flags if this segment has a current value.
1978          * Not true if we're inserting into the "hole" at eof.
1979          */
1980         if (STATE_SET_TEST(RIGHT_VALID,
1981                            idx <
1982                            ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
1983                 xfs_bmbt_get_all(ep, &right);
1984                 STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock));
1985         }
1986         /*
1987          * We're inserting a real allocation between "left" and "right".
1988          * Set the contiguity flags.  Don't let extents get too large.
1989          */
1990         STATE_SET(LEFT_CONTIG,
1991                 STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
1992                 left.br_startoff + left.br_blockcount == new->br_startoff &&
1993                 left.br_startblock + left.br_blockcount == new->br_startblock &&
1994                 left.br_state == new->br_state &&
1995                 left.br_blockcount + new->br_blockcount <= MAXEXTLEN);
1996         STATE_SET(RIGHT_CONTIG,
1997                 STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
1998                 new->br_startoff + new->br_blockcount == right.br_startoff &&
1999                 new->br_startblock + new->br_blockcount ==
2000                     right.br_startblock &&
2001                 new->br_state == right.br_state &&
2002                 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2003                 (!STATE_TEST(LEFT_CONTIG) ||
2004                  left.br_blockcount + new->br_blockcount +
2005                      right.br_blockcount <= MAXEXTLEN));
2006
2007         /*
2008          * Select which case we're in here, and implement it.
2009          */
2010         switch (SWITCH_STATE) {
2011
2012         case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
2013                 /*
2014                  * New allocation is contiguous with real allocations on the
2015                  * left and on the right.
2016                  * Merge all three into a single extent list entry.
2017                  */
2018                 xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1,
2019                         whichfork);
2020                 xfs_bmbt_set_blockcount(ep - 1,
2021                         left.br_blockcount + new->br_blockcount +
2022                         right.br_blockcount);
2023                 xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1,
2024                         whichfork);
2025                 xfs_bmap_trace_delete(fname, "LC|RC", ip,
2026                         idx, 1, whichfork);
2027                 xfs_bmap_delete_exlist(ip, idx, 1, whichfork);
2028                 ifp->if_lastex = idx - 1;
2029                 XFS_IFORK_NEXT_SET(ip, whichfork,
2030                         XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2031                 if (cur == NULL) {
2032                         *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
2033                         return 0;
2034                 }
2035                 *logflagsp = XFS_ILOG_CORE;
2036                 if ((error = xfs_bmbt_lookup_eq(cur, right.br_startoff,
2037                                 right.br_startblock, right.br_blockcount, &i)))
2038                         return error;
2039                 ASSERT(i == 1);
2040                 if ((error = xfs_bmbt_delete(cur, &i)))
2041                         return error;
2042                 ASSERT(i == 1);
2043                 if ((error = xfs_bmbt_decrement(cur, 0, &i)))
2044                         return error;
2045                 ASSERT(i == 1);
2046                 error = xfs_bmbt_update(cur, left.br_startoff,
2047                                 left.br_startblock,
2048                                 left.br_blockcount + new->br_blockcount +
2049                                 right.br_blockcount, left.br_state);
2050                 return error;
2051
2052         case MASK(LEFT_CONTIG):
2053                 /*
2054                  * New allocation is contiguous with a real allocation
2055                  * on the left.
2056                  * Merge the new allocation with the left neighbor.
2057                  */
2058                 xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, whichfork);
2059                 xfs_bmbt_set_blockcount(ep - 1,
2060                         left.br_blockcount + new->br_blockcount);
2061                 xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, whichfork);
2062                 ifp->if_lastex = idx - 1;
2063                 if (cur == NULL) {
2064                         *logflagsp = XFS_ILOG_FEXT(whichfork);
2065                         return 0;
2066                 }
2067                 *logflagsp = 0;
2068                 if ((error = xfs_bmbt_lookup_eq(cur, left.br_startoff,
2069                                 left.br_startblock, left.br_blockcount, &i)))
2070                         return error;
2071                 ASSERT(i == 1);
2072                 error = xfs_bmbt_update(cur, left.br_startoff,
2073                                 left.br_startblock,
2074                                 left.br_blockcount + new->br_blockcount,
2075                                 left.br_state);
2076                 return error;
2077
2078         case MASK(RIGHT_CONTIG):
2079                 /*
2080                  * New allocation is contiguous with a real allocation
2081                  * on the right.
2082                  * Merge the new allocation with the right neighbor.
2083                  */
2084                 xfs_bmap_trace_pre_update(fname, "RC", ip, idx, whichfork);
2085                 xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock,
2086                         new->br_blockcount + right.br_blockcount,
2087                         right.br_state);
2088                 xfs_bmap_trace_post_update(fname, "RC", ip, idx, whichfork);
2089                 ifp->if_lastex = idx;
2090                 if (cur == NULL) {
2091                         *logflagsp = XFS_ILOG_FEXT(whichfork);
2092                         return 0;
2093                 }
2094                 *logflagsp = 0;
2095                 if ((error = xfs_bmbt_lookup_eq(cur, right.br_startoff,
2096                                 right.br_startblock, right.br_blockcount, &i)))
2097                         return error;
2098                 ASSERT(i == 1);
2099                 error = xfs_bmbt_update(cur, new->br_startoff,
2100                                 new->br_startblock,
2101                                 new->br_blockcount + right.br_blockcount,
2102                                 right.br_state);
2103                 return error;
2104
2105         case 0:
2106                 /*
2107                  * New allocation is not contiguous with another
2108                  * real allocation.
2109                  * Insert a new entry.
2110                  */
2111                 xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL,
2112                         whichfork);
2113                 xfs_bmap_insert_exlist(ip, idx, 1, new, whichfork);
2114                 ifp->if_lastex = idx;
2115                 XFS_IFORK_NEXT_SET(ip, whichfork,
2116                         XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2117                 if (cur == NULL) {
2118                         *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
2119                         return 0;
2120                 }
2121                 *logflagsp = XFS_ILOG_CORE;
2122                 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2123                                 new->br_startblock, new->br_blockcount, &i)))
2124                         return error;
2125                 ASSERT(i == 0);
2126                 cur->bc_rec.b.br_state = new->br_state;
2127                 if ((error = xfs_bmbt_insert(cur, &i)))
2128                         return error;
2129                 ASSERT(i == 1);
2130                 return 0;
2131         }
2132 #undef  MASK
2133 #undef  MASK2
2134 #undef  STATE_SET
2135 #undef  STATE_TEST
2136 #undef  STATE_SET_TEST
2137 #undef  SWITCH_STATE
2138         /* NOTREACHED */
2139         ASSERT(0);
2140         return 0; /* keep gcc quite */
2141 }
2142
2143 #define XFS_ALLOC_GAP_UNITS     4
2144
2145 /*
2146  * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
2147  * It figures out where to ask the underlying allocator to put the new extent.
2148  */
2149 STATIC int                              /* error */
2150 xfs_bmap_alloc(
2151         xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
2152 {
2153         xfs_fsblock_t   adjust;         /* adjustment to block numbers */
2154         xfs_alloctype_t atype=0;        /* type for allocation routines */
2155         int             error;          /* error return value */
2156         xfs_agnumber_t  fb_agno;        /* ag number of ap->firstblock */
2157         xfs_mount_t     *mp;            /* mount point structure */
2158         int             nullfb;         /* true if ap->firstblock isn't set */
2159         int             rt;             /* true if inode is realtime */
2160 #ifdef __KERNEL__
2161         xfs_extlen_t    prod=0;         /* product factor for allocators */
2162         xfs_extlen_t    ralen=0;        /* realtime allocation length */
2163 #endif
2164
2165 #define ISVALID(x,y)    \
2166         (rt ? \
2167                 (x) < mp->m_sb.sb_rblocks : \
2168                 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
2169                 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
2170                 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
2171
2172         /*
2173          * Set up variables.
2174          */
2175         mp = ap->ip->i_mount;
2176         nullfb = ap->firstblock == NULLFSBLOCK;
2177         rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
2178         fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2179 #ifdef __KERNEL__
2180         if (rt) {
2181                 xfs_extlen_t    extsz;          /* file extent size for rt */
2182                 xfs_fileoff_t   nexto;          /* next file offset */
2183                 xfs_extlen_t    orig_alen;      /* original ap->alen */
2184                 xfs_fileoff_t   orig_end;       /* original off+len */
2185                 xfs_fileoff_t   orig_off;       /* original ap->off */
2186                 xfs_extlen_t    mod_off;        /* modulus calculations */
2187                 xfs_fileoff_t   prevo;          /* previous file offset */
2188                 xfs_rtblock_t   rtx;            /* realtime extent number */
2189                 xfs_extlen_t    temp;           /* temp for rt calculations */
2190
2191                 /*
2192                  * Set prod to match the realtime extent size.
2193                  */
2194                 if (!(extsz = ap->ip->i_d.di_extsize))
2195                         extsz = mp->m_sb.sb_rextsize;
2196                 prod = extsz / mp->m_sb.sb_rextsize;
2197                 orig_off = ap->off;
2198                 orig_alen = ap->alen;
2199                 orig_end = orig_off + orig_alen;
2200                 /*
2201                  * If the file offset is unaligned vs. the extent size
2202                  * we need to align it.  This will be possible unless
2203                  * the file was previously written with a kernel that didn't
2204                  * perform this alignment.
2205                  */
2206                 mod_off = do_mod(orig_off, extsz);
2207                 if (mod_off) {
2208                         ap->alen += mod_off;
2209                         ap->off -= mod_off;
2210                 }
2211                 /*
2212                  * Same adjustment for the end of the requested area.
2213                  */
2214                 if ((temp = (ap->alen % extsz)))
2215                         ap->alen += extsz - temp;
2216                 /*
2217                  * If the previous block overlaps with this proposed allocation
2218                  * then move the start forward without adjusting the length.
2219                  */
2220                 prevo =
2221                         ap->prevp->br_startoff == NULLFILEOFF ?
2222                                 0 :
2223                                 (ap->prevp->br_startoff +
2224                                  ap->prevp->br_blockcount);
2225                 if (ap->off != orig_off && ap->off < prevo)
2226                         ap->off = prevo;
2227                 /*
2228                  * If the next block overlaps with this proposed allocation
2229                  * then move the start back without adjusting the length,
2230                  * but not before offset 0.
2231                  * This may of course make the start overlap previous block,
2232                  * and if we hit the offset 0 limit then the next block
2233                  * can still overlap too.
2234                  */
2235                 nexto = (ap->eof || ap->gotp->br_startoff == NULLFILEOFF) ?
2236                         NULLFILEOFF : ap->gotp->br_startoff;
2237                 if (!ap->eof &&
2238                     ap->off + ap->alen != orig_end &&
2239                     ap->off + ap->alen > nexto)
2240                         ap->off = nexto > ap->alen ? nexto - ap->alen : 0;
2241                 /*
2242                  * If we're now overlapping the next or previous extent that
2243                  * means we can't fit an extsz piece in this hole.  Just move
2244                  * the start forward to the first valid spot and set
2245                  * the length so we hit the end.
2246                  */
2247                 if ((ap->off != orig_off && ap->off < prevo) ||
2248                     (ap->off + ap->alen != orig_end &&
2249                      ap->off + ap->alen > nexto)) {
2250                         ap->off = prevo;
2251                         ap->alen = nexto - prevo;
2252                 }
2253                 /*
2254                  * If the result isn't a multiple of rtextents we need to
2255                  * remove blocks until it is.
2256                  */
2257                 if ((temp = (ap->alen % mp->m_sb.sb_rextsize))) {
2258                         /*
2259                          * We're not covering the original request, or
2260                          * we won't be able to once we fix the length.
2261                          */
2262                         if (orig_off < ap->off ||
2263                             orig_end > ap->off + ap->alen ||
2264                             ap->alen - temp < orig_alen)
2265                                 return XFS_ERROR(EINVAL);
2266                         /*
2267                          * Try to fix it by moving the start up.
2268                          */
2269                         if (ap->off + temp <= orig_off) {
2270                                 ap->alen -= temp;
2271                                 ap->off += temp;
2272                         }
2273                         /*
2274                          * Try to fix it by moving the end in.
2275                          */
2276                         else if (ap->off + ap->alen - temp >= orig_end)
2277                                 ap->alen -= temp;
2278                         /*
2279                          * Set the start to the minimum then trim the length.
2280                          */
2281                         else {
2282                                 ap->alen -= orig_off - ap->off;
2283                                 ap->off = orig_off;
2284                                 ap->alen -= ap->alen % mp->m_sb.sb_rextsize;
2285                         }
2286                         /*
2287                          * Result doesn't cover the request, fail it.
2288                          */
2289                         if (orig_off < ap->off || orig_end > ap->off + ap->alen)
2290                                 return XFS_ERROR(EINVAL);
2291                 }
2292                 ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0);
2293                 /*
2294                  * If the offset & length are not perfectly aligned
2295                  * then kill prod, it will just get us in trouble.
2296                  */
2297                 if (do_mod(ap->off, extsz) || ap->alen % extsz)
2298                         prod = 1;
2299                 /*
2300                  * Set ralen to be the actual requested length in rtextents.
2301                  */
2302                 ralen = ap->alen / mp->m_sb.sb_rextsize;
2303                 /*
2304                  * If the old value was close enough to MAXEXTLEN that
2305                  * we rounded up to it, cut it back so it's valid again.
2306                  * Note that if it's a really large request (bigger than
2307                  * MAXEXTLEN), we don't hear about that number, and can't
2308                  * adjust the starting point to match it.
2309                  */
2310                 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
2311                         ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
2312                 /*
2313                  * If it's an allocation to an empty file at offset 0,
2314                  * pick an extent that will space things out in the rt area.
2315                  */
2316                 if (ap->eof && ap->off == 0) {
2317                         error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
2318                         if (error)
2319                                 return error;
2320                         ap->rval = rtx * mp->m_sb.sb_rextsize;
2321                 } else
2322                         ap->rval = 0;
2323         }
2324 #else
2325         if (rt)
2326                 ap->rval = 0;
2327 #endif  /* __KERNEL__ */
2328         else if (nullfb)
2329                 ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2330         else
2331                 ap->rval = ap->firstblock;
2332         /*
2333          * If allocating at eof, and there's a previous real block,
2334          * try to use it's last block as our starting point.
2335          */
2336         if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF &&
2337             !ISNULLSTARTBLOCK(ap->prevp->br_startblock) &&
2338             ISVALID(ap->prevp->br_startblock + ap->prevp->br_blockcount,
2339                     ap->prevp->br_startblock)) {
2340                 ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount;
2341                 /*
2342                  * Adjust for the gap between prevp and us.
2343                  */
2344                 adjust = ap->off -
2345                         (ap->prevp->br_startoff + ap->prevp->br_blockcount);
2346                 if (adjust &&
2347                     ISVALID(ap->rval + adjust, ap->prevp->br_startblock))
2348                         ap->rval += adjust;
2349         }
2350         /*
2351          * If not at eof, then compare the two neighbor blocks.
2352          * Figure out whether either one gives us a good starting point,
2353          * and pick the better one.
2354          */
2355         else if (!ap->eof) {
2356                 xfs_fsblock_t   gotbno;         /* right side block number */
2357                 xfs_fsblock_t   gotdiff=0;      /* right side difference */
2358                 xfs_fsblock_t   prevbno;        /* left side block number */
2359                 xfs_fsblock_t   prevdiff=0;     /* left side difference */
2360
2361                 /*
2362                  * If there's a previous (left) block, select a requested
2363                  * start block based on it.
2364                  */
2365                 if (ap->prevp->br_startoff != NULLFILEOFF &&
2366                     !ISNULLSTARTBLOCK(ap->prevp->br_startblock) &&
2367                     (prevbno = ap->prevp->br_startblock +
2368                                ap->prevp->br_blockcount) &&
2369                     ISVALID(prevbno, ap->prevp->br_startblock)) {
2370                         /*
2371                          * Calculate gap to end of previous block.
2372                          */
2373                         adjust = prevdiff = ap->off -
2374                                 (ap->prevp->br_startoff +
2375                                  ap->prevp->br_blockcount);
2376                         /*
2377                          * Figure the startblock based on the previous block's
2378                          * end and the gap size.
2379                          * Heuristic!
2380                          * If the gap is large relative to the piece we're
2381                          * allocating, or using it gives us an invalid block
2382                          * number, then just use the end of the previous block.
2383                          */
2384                         if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
2385                             ISVALID(prevbno + prevdiff,
2386                                     ap->prevp->br_startblock))
2387                                 prevbno += adjust;
2388                         else
2389                                 prevdiff += adjust;
2390                         /*
2391                          * If the firstblock forbids it, can't use it,
2392                          * must use default.
2393                          */
2394                         if (!rt && !nullfb &&
2395                             XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
2396                                 prevbno = NULLFSBLOCK;
2397                 }
2398                 /*
2399                  * No previous block or can't follow it, just default.
2400                  */
2401                 else
2402                         prevbno = NULLFSBLOCK;
2403                 /*
2404                  * If there's a following (right) block, select a requested
2405                  * start block based on it.
2406                  */
2407                 if (!ISNULLSTARTBLOCK(ap->gotp->br_startblock)) {
2408                         /*
2409                          * Calculate gap to start of next block.
2410                          */
2411                         adjust = gotdiff = ap->gotp->br_startoff - ap->off;
2412                         /*
2413                          * Figure the startblock based on the next block's
2414                          * start and the gap size.
2415                          */
2416                         gotbno = ap->gotp->br_startblock;
2417                         /*
2418                          * Heuristic!
2419                          * If the gap is large relative to the piece we're
2420                          * allocating, or using it gives us an invalid block
2421                          * number, then just use the start of the next block
2422                          * offset by our length.
2423                          */
2424                         if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
2425                             ISVALID(gotbno - gotdiff, gotbno))
2426                                 gotbno -= adjust;
2427                         else if (ISVALID(gotbno - ap->alen, gotbno)) {
2428                                 gotbno -= ap->alen;
2429                                 gotdiff += adjust - ap->alen;
2430                         } else
2431                                 gotdiff += adjust;
2432                         /*
2433                          * If the firstblock forbids it, can't use it,
2434                          * must use default.
2435                          */
2436                         if (!rt && !nullfb &&
2437                             XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
2438                                 gotbno = NULLFSBLOCK;
2439                 }
2440                 /*
2441                  * No next block, just default.
2442                  */
2443                 else
2444                         gotbno = NULLFSBLOCK;
2445                 /*
2446                  * If both valid, pick the better one, else the only good
2447                  * one, else ap->rval is already set (to 0 or the inode block).
2448                  */
2449                 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
2450                         ap->rval = prevdiff <= gotdiff ? prevbno : gotbno;
2451                 else if (prevbno != NULLFSBLOCK)
2452                         ap->rval = prevbno;
2453                 else if (gotbno != NULLFSBLOCK)
2454                         ap->rval = gotbno;
2455         }
2456         /*
2457          * If allowed, use ap->rval; otherwise must use firstblock since
2458          * it's in the right allocation group.
2459          */
2460         if (nullfb || rt || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno)
2461                 ;
2462         else
2463                 ap->rval = ap->firstblock;
2464         /*
2465          * Realtime allocation, done through xfs_rtallocate_extent.
2466          */
2467         if (rt) {
2468 #ifndef __KERNEL__
2469                 ASSERT(0);
2470 #else
2471                 xfs_rtblock_t   rtb;
2472
2473                 atype = ap->rval == 0 ?
2474                         XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
2475                 do_div(ap->rval, mp->m_sb.sb_rextsize);
2476                 rtb = ap->rval;
2477                 ap->alen = ralen;
2478                 if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen,
2479                                 &ralen, atype, ap->wasdel, prod, &rtb)))
2480                         return error;
2481                 if (rtb == NULLFSBLOCK && prod > 1 &&
2482                     (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1,
2483                                                    ap->alen, &ralen, atype,
2484                                                    ap->wasdel, 1, &rtb)))
2485                         return error;
2486                 ap->rval = rtb;
2487                 if (ap->rval != NULLFSBLOCK) {
2488                         ap->rval *= mp->m_sb.sb_rextsize;
2489                         ralen *= mp->m_sb.sb_rextsize;
2490                         ap->alen = ralen;
2491                         ap->ip->i_d.di_nblocks += ralen;
2492                         xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2493                         if (ap->wasdel)
2494                                 ap->ip->i_delayed_blks -= ralen;
2495                         /*
2496                          * Adjust the disk quota also. This was reserved
2497                          * earlier.
2498                          */
2499                         XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
2500                                 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
2501                                                 XFS_TRANS_DQ_RTBCOUNT,
2502                                 (long) ralen);
2503                 } else
2504                         ap->alen = 0;
2505 #endif  /* __KERNEL__ */
2506         }
2507         /*
2508          * Normal allocation, done through xfs_alloc_vextent.
2509          */
2510         else {
2511                 xfs_agnumber_t  ag;
2512                 xfs_alloc_arg_t args;
2513                 xfs_extlen_t    blen;
2514                 xfs_extlen_t    delta;
2515                 int             isaligned;
2516                 xfs_extlen_t    longest;
2517                 xfs_extlen_t    need;
2518                 xfs_extlen_t    nextminlen=0;
2519                 int             notinit;
2520                 xfs_perag_t     *pag;
2521                 xfs_agnumber_t  startag;
2522                 int             tryagain;
2523
2524                 tryagain = isaligned = 0;
2525                 args.tp = ap->tp;
2526                 args.mp = mp;
2527                 args.fsbno = ap->rval;
2528                 args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks);
2529                 blen = 0;
2530                 if (nullfb) {
2531                         args.type = XFS_ALLOCTYPE_START_BNO;
2532                         args.total = ap->total;
2533                         /*
2534                          * Find the longest available space.
2535                          * We're going to try for the whole allocation at once.
2536                          */
2537                         startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno);
2538                         notinit = 0;
2539                         down_read(&mp->m_peraglock);
2540                         while (blen < ap->alen) {
2541                                 pag = &mp->m_perag[ag];
2542                                 if (!pag->pagf_init &&
2543                                     (error = xfs_alloc_pagf_init(mp, args.tp,
2544                                             ag, XFS_ALLOC_FLAG_TRYLOCK))) {
2545                                         up_read(&mp->m_peraglock);
2546                                         return error;
2547                                 }
2548                                 /*
2549                                  * See xfs_alloc_fix_freelist...
2550                                  */
2551                                 if (pag->pagf_init) {
2552                                         need = XFS_MIN_FREELIST_PAG(pag, mp);
2553                                         delta = need > pag->pagf_flcount ?
2554                                                 need - pag->pagf_flcount : 0;
2555                                         longest = (pag->pagf_longest > delta) ?
2556                                                 (pag->pagf_longest - delta) :
2557                                                 (pag->pagf_flcount > 0 ||
2558                                                  pag->pagf_longest > 0);
2559                                         if (blen < longest)
2560                                                 blen = longest;
2561                                 } else
2562                                         notinit = 1;
2563                                 if (++ag == mp->m_sb.sb_agcount)
2564                                         ag = 0;
2565                                 if (ag == startag)
2566                                         break;
2567                         }
2568                         up_read(&mp->m_peraglock);
2569                         /*
2570                          * Since the above loop did a BUF_TRYLOCK, it is
2571                          * possible that there is space for this request.
2572                          */
2573                         if (notinit || blen < ap->minlen)
2574                                 args.minlen = ap->minlen;
2575                         /*
2576                          * If the best seen length is less than the request
2577                          * length, use the best as the minimum.
2578                          */
2579                         else if (blen < ap->alen)
2580                                 args.minlen = blen;
2581                         /*
2582                          * Otherwise we've seen an extent as big as alen,
2583                          * use that as the minimum.
2584                          */
2585                         else
2586                                 args.minlen = ap->alen;
2587                 } else if (ap->low) {
2588                         args.type = XFS_ALLOCTYPE_FIRST_AG;
2589                         args.total = args.minlen = ap->minlen;
2590                 } else {
2591                         args.type = XFS_ALLOCTYPE_NEAR_BNO;
2592                         args.total = ap->total;
2593                         args.minlen = ap->minlen;
2594                 }
2595                 if (ap->ip->i_d.di_extsize) {
2596                         args.prod = ap->ip->i_d.di_extsize;
2597                         if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
2598                                 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2599                 } else if (mp->m_sb.sb_blocksize >= NBPP) {
2600                         args.prod = 1;
2601                         args.mod = 0;
2602                 } else {
2603                         args.prod = NBPP >> mp->m_sb.sb_blocklog;
2604                         if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod))))
2605                                 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2606                 }
2607                 /*
2608                  * If we are not low on available data blocks, and the
2609                  * underlying logical volume manager is a stripe, and
2610                  * the file offset is zero then try to allocate data
2611                  * blocks on stripe unit boundary.
2612                  * NOTE: ap->aeof is only set if the allocation length
2613                  * is >= the stripe unit and the allocation offset is
2614                  * at the end of file.
2615                  */
2616                 if (!ap->low && ap->aeof) {
2617                         if (!ap->off) {
2618                                 args.alignment = mp->m_dalign;
2619                                 atype = args.type;
2620                                 isaligned = 1;
2621                                 /*
2622                                  * Adjust for alignment
2623                                  */
2624                                 if (blen > args.alignment && blen <= ap->alen)
2625                                         args.minlen = blen - args.alignment;
2626                                 args.minalignslop = 0;
2627                         } else {
2628                                 /*
2629                                  * First try an exact bno allocation.
2630                                  * If it fails then do a near or start bno
2631                                  * allocation with alignment turned on.
2632                                  */
2633                                 atype = args.type;
2634                                 tryagain = 1;
2635                                 args.type = XFS_ALLOCTYPE_THIS_BNO;
2636                                 args.alignment = 1;
2637                                 /*
2638                                  * Compute the minlen+alignment for the
2639                                  * next case.  Set slop so that the value
2640                                  * of minlen+alignment+slop doesn't go up
2641                                  * between the calls.
2642                                  */
2643                                 if (blen > mp->m_dalign && blen <= ap->alen)
2644                                         nextminlen = blen - mp->m_dalign;
2645                                 else
2646                                         nextminlen = args.minlen;
2647                                 if (nextminlen + mp->m_dalign > args.minlen + 1)
2648                                         args.minalignslop =
2649                                                 nextminlen + mp->m_dalign -
2650                                                 args.minlen - 1;
2651                                 else
2652                                         args.minalignslop = 0;
2653                         }
2654                 } else {
2655                         args.alignment = 1;
2656                         args.minalignslop = 0;
2657                 }
2658                 args.minleft = ap->minleft;
2659                 args.wasdel = ap->wasdel;
2660                 args.isfl = 0;
2661                 args.userdata = ap->userdata;
2662                 if ((error = xfs_alloc_vextent(&args)))
2663                         return error;
2664                 if (tryagain && args.fsbno == NULLFSBLOCK) {
2665                         /*
2666                          * Exact allocation failed. Now try with alignment
2667                          * turned on.
2668                          */
2669                         args.type = atype;
2670                         args.fsbno = ap->rval;
2671                         args.alignment = mp->m_dalign;
2672                         args.minlen = nextminlen;
2673                         args.minalignslop = 0;
2674                         isaligned = 1;
2675                         if ((error = xfs_alloc_vextent(&args)))
2676                                 return error;
2677                 }
2678                 if (isaligned && args.fsbno == NULLFSBLOCK) {
2679                         /*
2680                          * allocation failed, so turn off alignment and
2681                          * try again.
2682                          */
2683                         args.type = atype;
2684                         args.fsbno = ap->rval;
2685                         args.alignment = 0;
2686                         if ((error = xfs_alloc_vextent(&args)))
2687                                 return error;
2688                 }
2689                 if (args.fsbno == NULLFSBLOCK && nullfb &&
2690                     args.minlen > ap->minlen) {
2691                         args.minlen = ap->minlen;
2692                         args.type = XFS_ALLOCTYPE_START_BNO;
2693                         args.fsbno = ap->rval;
2694                         if ((error = xfs_alloc_vextent(&args)))
2695                                 return error;
2696                 }
2697                 if (args.fsbno == NULLFSBLOCK && nullfb) {
2698                         args.fsbno = 0;
2699                         args.type = XFS_ALLOCTYPE_FIRST_AG;
2700                         args.total = ap->minlen;
2701                         args.minleft = 0;
2702                         if ((error = xfs_alloc_vextent(&args)))
2703                                 return error;
2704                         ap->low = 1;
2705                 }
2706                 if (args.fsbno != NULLFSBLOCK) {
2707                         ap->firstblock = ap->rval = args.fsbno;
2708                         ASSERT(nullfb || fb_agno == args.agno ||
2709                                (ap->low && fb_agno < args.agno));
2710                         ap->alen = args.len;
2711                         ap->ip->i_d.di_nblocks += args.len;
2712                         xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2713                         if (ap->wasdel)
2714                                 ap->ip->i_delayed_blks -= args.len;
2715                         /*
2716                          * Adjust the disk quota also. This was reserved
2717                          * earlier.
2718                          */
2719                         XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
2720                                 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
2721                                                 XFS_TRANS_DQ_BCOUNT,
2722                                 (long) args.len);
2723                 } else {
2724                         ap->rval = NULLFSBLOCK;
2725                         ap->alen = 0;
2726                 }
2727         }
2728         return 0;
2729 #undef  ISVALID
2730 }
2731
2732 /*
2733  * Transform a btree format file with only one leaf node, where the
2734  * extents list will fit in the inode, into an extents format file.
2735  * Since the extent list is already in-core, all we have to do is
2736  * give up the space for the btree root and pitch the leaf block.
2737  */
2738 STATIC int                              /* error */
2739 xfs_bmap_btree_to_extents(
2740         xfs_trans_t             *tp,    /* transaction pointer */
2741         xfs_inode_t             *ip,    /* incore inode pointer */
2742         xfs_btree_cur_t         *cur,   /* btree cursor */
2743         int                     *logflagsp, /* inode logging flags */
2744         int                     whichfork)  /* data or attr fork */
2745 {
2746         /* REFERENCED */
2747         xfs_bmbt_block_t        *cblock;/* child btree block */
2748         xfs_fsblock_t           cbno;   /* child block number */
2749         xfs_buf_t               *cbp;   /* child block's buffer */
2750         int                     error;  /* error return value */
2751         xfs_ifork_t             *ifp;   /* inode fork data */
2752         xfs_mount_t             *mp;    /* mount point structure */
2753         xfs_bmbt_ptr_t          *pp;    /* ptr to block address */
2754         xfs_bmbt_block_t        *rblock;/* root btree block */
2755
2756         ifp = XFS_IFORK_PTR(ip, whichfork);
2757         ASSERT(ifp->if_flags & XFS_IFEXTENTS);
2758         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
2759         rblock = ifp->if_broot;
2760         ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) == 1);
2761         ASSERT(INT_GET(rblock->bb_numrecs, ARCH_CONVERT) == 1);
2762         ASSERT(XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes) == 1);
2763         mp = ip->i_mount;
2764         pp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, ifp->if_broot_bytes);
2765         *logflagsp = 0;
2766 #ifdef DEBUG
2767         if ((error = xfs_btree_check_lptr(cur, INT_GET(*pp, ARCH_CONVERT), 1)))
2768                 return error;
2769 #endif
2770         cbno = INT_GET(*pp, ARCH_CONVERT);
2771         if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp,
2772                         XFS_BMAP_BTREE_REF)))
2773                 return error;
2774         cblock = XFS_BUF_TO_BMBT_BLOCK(cbp);
2775         if ((error = xfs_btree_check_lblock(cur, cblock, 0, cbp)))
2776                 return error;
2777         xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
2778         ip->i_d.di_nblocks--;
2779         XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
2780         xfs_trans_binval(tp, cbp);
2781         if (cur->bc_bufs[0] == cbp)
2782                 cur->bc_bufs[0] = NULL;
2783         xfs_iroot_realloc(ip, -1, whichfork);
2784         ASSERT(ifp->if_broot == NULL);
2785         ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
2786         XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
2787         *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
2788         return 0;
2789 }
2790
2791 /*
2792  * Called by xfs_bmapi to update extent list structure and the btree
2793  * after removing space (or undoing a delayed allocation).
2794  */
2795 STATIC int                              /* error */
2796 xfs_bmap_del_extent(
2797         xfs_inode_t             *ip,    /* incore inode pointer */
2798         xfs_trans_t             *tp,    /* current transaction pointer */
2799         xfs_extnum_t            idx,    /* extent number to update/delete */
2800         xfs_bmap_free_t         *flist, /* list of extents to be freed */
2801         xfs_btree_cur_t         *cur,   /* if null, not a btree */
2802         xfs_bmbt_irec_t         *del,   /* data to remove from extent list */
2803         int                     *logflagsp, /* inode logging flags */
2804         int                     whichfork, /* data or attr fork */
2805         int                     rsvd)   /* OK to allocate reserved blocks */
2806 {
2807         xfs_filblks_t           da_new; /* new delay-alloc indirect blocks */
2808         xfs_filblks_t           da_old; /* old delay-alloc indirect blocks */
2809         xfs_fsblock_t           del_endblock=0; /* first block past del */
2810         xfs_fileoff_t           del_endoff;     /* first offset past del */
2811         int                     delay;  /* current block is delayed allocated */
2812         int                     do_fx;  /* free extent at end of routine */
2813         xfs_bmbt_rec_t          *ep;    /* current extent entry pointer */
2814         int                     error;  /* error return value */
2815         int                     flags;  /* inode logging flags */
2816 #ifdef XFS_BMAP_TRACE
2817         static char             fname[] = "xfs_bmap_del_extent";
2818 #endif
2819         xfs_bmbt_irec_t         got;    /* current extent entry */
2820         xfs_fileoff_t           got_endoff;     /* first offset past got */
2821         int                     i;      /* temp state */
2822         xfs_ifork_t             *ifp;   /* inode fork pointer */
2823         xfs_mount_t             *mp;    /* mount structure */
2824         xfs_filblks_t           nblks;  /* quota/sb block count */
2825         xfs_bmbt_irec_t         new;    /* new record to be inserted */
2826         /* REFERENCED */
2827         xfs_extnum_t            nextents;       /* number of extents in list */
2828         uint                    qfield; /* quota field to update */
2829         xfs_filblks_t           temp;   /* for indirect length calculations */
2830         xfs_filblks_t           temp2;  /* for indirect length calculations */
2831
2832         XFS_STATS_INC(xs_del_exlist);
2833         mp = ip->i_mount;
2834         ifp = XFS_IFORK_PTR(ip, whichfork);
2835         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2836         ASSERT(idx >= 0 && idx < nextents);
2837         ASSERT(del->br_blockcount > 0);
2838         ep = &ifp->if_u1.if_extents[idx];
2839         xfs_bmbt_get_all(ep, &got);
2840         ASSERT(got.br_startoff <= del->br_startoff);
2841         del_endoff = del->br_startoff + del->br_blockcount;
2842         got_endoff = got.br_startoff + got.br_blockcount;
2843         ASSERT(got_endoff >= del_endoff);
2844         delay = ISNULLSTARTBLOCK(got.br_startblock);
2845         ASSERT(ISNULLSTARTBLOCK(del->br_startblock) == delay);
2846         flags = 0;
2847         qfield = 0;
2848         error = 0;
2849         /*
2850          * If deleting a real allocation, must free up the disk space.
2851          */
2852         if (!delay) {
2853                 flags = XFS_ILOG_CORE;
2854                 /*
2855                  * Realtime allocation.  Free it and record di_nblocks update.
2856                  */
2857                 if (whichfork == XFS_DATA_FORK &&
2858                     (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) {
2859                         xfs_fsblock_t   bno;
2860                         xfs_filblks_t   len;
2861
2862                         ASSERT(do_mod(del->br_blockcount,
2863                                       mp->m_sb.sb_rextsize) == 0);
2864                         ASSERT(do_mod(del->br_startblock,
2865                                       mp->m_sb.sb_rextsize) == 0);
2866                         bno = del->br_startblock;
2867                         len = del->br_blockcount;
2868                         do_div(bno, mp->m_sb.sb_rextsize);
2869                         do_div(len, mp->m_sb.sb_rextsize);
2870                         if ((error = xfs_rtfree_extent(ip->i_transp, bno,
2871                                         (xfs_extlen_t)len)))
2872                                 goto done;
2873                         do_fx = 0;
2874                         nblks = len * mp->m_sb.sb_rextsize;
2875                         qfield = XFS_TRANS_DQ_RTBCOUNT;
2876                 }
2877                 /*
2878                  * Ordinary allocation.
2879                  */
2880                 else {
2881                         do_fx = 1;
2882                         nblks = del->br_blockcount;
2883                         qfield = XFS_TRANS_DQ_BCOUNT;
2884                 }
2885                 /*
2886                  * Set up del_endblock and cur for later.
2887                  */
2888                 del_endblock = del->br_startblock + del->br_blockcount;
2889                 if (cur) {
2890                         if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
2891                                         got.br_startblock, got.br_blockcount,
2892                                         &i)))
2893                                 goto done;
2894                         ASSERT(i == 1);
2895                 }
2896                 da_old = da_new = 0;
2897         } else {
2898                 da_old = STARTBLOCKVAL(got.br_startblock);
2899                 da_new = 0;
2900                 nblks = 0;
2901                 do_fx = 0;
2902         }
2903         /*
2904          * Set flag value to use in switch statement.
2905          * Left-contig is 2, right-contig is 1.
2906          */
2907         switch (((got.br_startoff == del->br_startoff) << 1) |
2908                 (got_endoff == del_endoff)) {
2909         case 3:
2910                 /*
2911                  * Matches the whole extent.  Delete the entry.
2912                  */
2913                 xfs_bmap_trace_delete(fname, "3", ip, idx, 1, whichfork);
2914                 xfs_bmap_delete_exlist(ip, idx, 1, whichfork);
2915                 ifp->if_lastex = idx;
2916                 if (delay)
2917                         break;
2918                 XFS_IFORK_NEXT_SET(ip, whichfork,
2919                         XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2920                 flags |= XFS_ILOG_CORE;
2921                 if (!cur) {
2922                         flags |= XFS_ILOG_FEXT(whichfork);
2923                         break;
2924                 }
2925                 if ((error = xfs_bmbt_delete(cur, &i)))
2926                         goto done;
2927                 ASSERT(i == 1);
2928                 break;
2929
2930         case 2:
2931                 /*
2932                  * Deleting the first part of the extent.
2933                  */
2934                 xfs_bmap_trace_pre_update(fname, "2", ip, idx, whichfork);
2935                 xfs_bmbt_set_startoff(ep, del_endoff);
2936                 temp = got.br_blockcount - del->br_blockcount;
2937                 xfs_bmbt_set_blockcount(ep, temp);
2938                 ifp->if_lastex = idx;
2939                 if (delay) {
2940                         temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2941                                 da_old);
2942                         xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
2943                         xfs_bmap_trace_post_update(fname, "2", ip, idx,
2944                                 whichfork);
2945                         da_new = temp;
2946                         break;
2947                 }
2948                 xfs_bmbt_set_startblock(ep, del_endblock);
2949                 xfs_bmap_trace_post_update(fname, "2", ip, idx, whichfork);
2950                 if (!cur) {
2951                         flags |= XFS_ILOG_FEXT(whichfork);
2952                         break;
2953                 }
2954                 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
2955                                 got.br_blockcount - del->br_blockcount,
2956                                 got.br_state)))
2957                         goto done;
2958                 break;
2959
2960         case 1:
2961                 /*
2962                  * Deleting the last part of the extent.
2963                  */
2964                 temp = got.br_blockcount - del->br_blockcount;
2965                 xfs_bmap_trace_pre_update(fname, "1", ip, idx, whichfork);
2966                 xfs_bmbt_set_blockcount(ep, temp);
2967                 ifp->if_lastex = idx;
2968                 if (delay) {
2969                         temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2970                                 da_old);
2971                         xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
2972                         xfs_bmap_trace_post_update(fname, "1", ip, idx,
2973                                 whichfork);
2974                         da_new = temp;
2975                         break;
2976                 }
2977                 xfs_bmap_trace_post_update(fname, "1", ip, idx, whichfork);
2978                 if (!cur) {
2979                         flags |= XFS_ILOG_FEXT(whichfork);
2980                         break;
2981                 }
2982                 if ((error = xfs_bmbt_update(cur, got.br_startoff,
2983                                 got.br_startblock,
2984                                 got.br_blockcount - del->br_blockcount,
2985                                 got.br_state)))
2986                         goto done;
2987                 break;
2988
2989         case 0:
2990                 /*
2991                  * Deleting the middle of the extent.
2992                  */
2993                 temp = del->br_startoff - got.br_startoff;
2994                 xfs_bmap_trace_pre_update(fname, "0", ip, idx, whichfork);
2995                 xfs_bmbt_set_blockcount(ep, temp);
2996                 new.br_startoff = del_endoff;
2997                 temp2 = got_endoff - del_endoff;
2998                 new.br_blockcount = temp2;
2999                 new.br_state = got.br_state;
3000                 if (!delay) {
3001                         new.br_startblock = del_endblock;
3002                         flags |= XFS_ILOG_CORE;
3003                         if (cur) {
3004                                 if ((error = xfs_bmbt_update(cur,
3005                                                 got.br_startoff,
3006                                                 got.br_startblock, temp,
3007                                                 got.br_state)))
3008                                         goto done;
3009                                 if ((error = xfs_bmbt_increment(cur, 0, &i)))
3010                                         goto done;
3011                                 cur->bc_rec.b = new;
3012                                 error = xfs_bmbt_insert(cur, &i);
3013                                 if (error && error != ENOSPC)
3014                                         goto done;
3015                                 /*
3016                                  * If get no-space back from btree insert,
3017                                  * it tried a split, and we have a zero
3018                                  * block reservation.
3019                                  * Fix up our state and return the error.
3020                                  */
3021                                 if (error == ENOSPC) {
3022                                         /*
3023                                          * Reset the cursor, don't trust
3024                                          * it after any insert operation.
3025                                          */
3026                                         if ((error = xfs_bmbt_lookup_eq(cur,
3027                                                         got.br_startoff,
3028                                                         got.br_startblock,
3029                                                         temp, &i)))
3030                                                 goto done;
3031                                         ASSERT(i == 1);
3032                                         /*
3033                                          * Update the btree record back
3034                                          * to the original value.
3035                                          */
3036                                         if ((error = xfs_bmbt_update(cur,
3037                                                         got.br_startoff,
3038                                                         got.br_startblock,
3039                                                         got.br_blockcount,
3040                                                         got.br_state)))
3041                                                 goto done;
3042                                         /*
3043                                          * Reset the extent record back
3044                                          * to the original value.
3045                                          */
3046                                         xfs_bmbt_set_blockcount(ep,
3047                                                 got.br_blockcount);
3048                                         flags = 0;
3049                                         error = XFS_ERROR(ENOSPC);
3050                                         goto done;
3051                                 }
3052                                 ASSERT(i == 1);
3053                         } else
3054                                 flags |= XFS_ILOG_FEXT(whichfork);
3055                         XFS_IFORK_NEXT_SET(ip, whichfork,
3056                                 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
3057                 } else {
3058                         ASSERT(whichfork == XFS_DATA_FORK);
3059                         temp = xfs_bmap_worst_indlen(ip, temp);
3060                         xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
3061                         temp2 = xfs_bmap_worst_indlen(ip, temp2);
3062                         new.br_startblock = NULLSTARTBLOCK((int)temp2);
3063                         da_new = temp + temp2;
3064                         while (da_new > da_old) {
3065                                 if (temp) {
3066                                         temp--;
3067                                         da_new--;
3068                                         xfs_bmbt_set_startblock(ep,
3069                                                 NULLSTARTBLOCK((int)temp));
3070                                 }
3071                                 if (da_new == da_old)
3072                                         break;
3073                                 if (temp2) {
3074                                         temp2--;
3075                                         da_new--;
3076                                         new.br_startblock =
3077                                                 NULLSTARTBLOCK((int)temp2);
3078                                 }
3079                         }
3080                 }
3081                 xfs_bmap_trace_post_update(fname, "0", ip, idx, whichfork);
3082                 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 1, &new, NULL,
3083                         whichfork);
3084                 xfs_bmap_insert_exlist(ip, idx + 1, 1, &new, whichfork);
3085                 ifp->if_lastex = idx + 1;
3086                 break;
3087         }
3088         /*
3089          * If we need to, add to list of extents to delete.
3090          */
3091         if (do_fx)
3092                 xfs_bmap_add_free(del->br_startblock, del->br_blockcount, flist,
3093                         mp);
3094         /*
3095          * Adjust inode # blocks in the file.
3096          */
3097         if (nblks)
3098                 ip->i_d.di_nblocks -= nblks;
3099         /*
3100          * Adjust quota data.
3101          */
3102         if (qfield)
3103                 XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, qfield, (long)-nblks);
3104
3105         /*
3106          * Account for change in delayed indirect blocks.
3107          * Nothing to do for disk quota accounting here.
3108          */
3109         ASSERT(da_old >= da_new);
3110         if (da_old > da_new)
3111                 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int)(da_old - da_new),
3112                         rsvd);
3113 done:
3114         *logflagsp = flags;
3115         return error;
3116 }
3117
3118 /*
3119  * Remove the entry "free" from the free item list.  Prev points to the
3120  * previous entry, unless "free" is the head of the list.
3121  */
3122 STATIC void
3123 xfs_bmap_del_free(
3124         xfs_bmap_free_t         *flist, /* free item list header */
3125         xfs_bmap_free_item_t    *prev,  /* previous item on list, if any */
3126         xfs_bmap_free_item_t    *free)  /* list item to be freed */
3127 {
3128         if (prev)
3129                 prev->xbfi_next = free->xbfi_next;
3130         else
3131                 flist->xbf_first = free->xbfi_next;
3132         flist->xbf_count--;
3133         kmem_zone_free(xfs_bmap_free_item_zone, free);
3134 }
3135
3136 /*
3137  * Remove count entries from the extents array for inode "ip", starting
3138  * at index "idx".  Copies the remaining items down over the deleted ones,
3139  * and gives back the excess memory.
3140  */
3141 STATIC void
3142 xfs_bmap_delete_exlist(
3143         xfs_inode_t     *ip,            /* incore inode pointer */
3144         xfs_extnum_t    idx,            /* starting delete index */
3145         xfs_extnum_t    count,          /* count of items to delete */
3146         int             whichfork)      /* data or attr fork */
3147 {
3148         xfs_bmbt_rec_t  *base;          /* base of extent list */
3149         xfs_ifork_t     *ifp;           /* inode fork pointer */
3150         xfs_extnum_t    nextents;       /* number of extents in list after */
3151
3152         ifp = XFS_IFORK_PTR(ip, whichfork);
3153         ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3154         base = ifp->if_u1.if_extents;
3155         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - count;
3156         memmove(&base[idx], &base[idx + count],
3157                 (nextents - idx) * sizeof(*base));
3158         xfs_iext_realloc(ip, -count, whichfork);
3159 }
3160
3161 /*
3162  * Convert an extents-format file into a btree-format file.
3163  * The new file will have a root block (in the inode) and a single child block.
3164  */
3165 STATIC int                                      /* error */
3166 xfs_bmap_extents_to_btree(
3167         xfs_trans_t             *tp,            /* transaction pointer */
3168         xfs_inode_t             *ip,            /* incore inode pointer */
3169         xfs_fsblock_t           *firstblock,    /* first-block-allocated */
3170         xfs_bmap_free_t         *flist,         /* blocks freed in xaction */
3171         xfs_btree_cur_t         **curp,         /* cursor returned to caller */
3172         int                     wasdel,         /* converting a delayed alloc */
3173         int                     *logflagsp,     /* inode logging flags */
3174         int                     whichfork)      /* data or attr fork */
3175 {
3176         xfs_bmbt_block_t        *ablock;        /* allocated (child) bt block */
3177         xfs_buf_t               *abp;           /* buffer for ablock */
3178         xfs_alloc_arg_t         args;           /* allocation arguments */
3179         xfs_bmbt_rec_t          *arp;           /* child record pointer */
3180         xfs_bmbt_block_t        *block;         /* btree root block */
3181         xfs_btree_cur_t         *cur;           /* bmap btree cursor */
3182         xfs_bmbt_rec_t          *ep;            /* extent list pointer */
3183         int                     error;          /* error return value */
3184         xfs_extnum_t            i, cnt;         /* extent list index */
3185         xfs_ifork_t             *ifp;           /* inode fork pointer */
3186         xfs_bmbt_key_t          *kp;            /* root block key pointer */
3187         xfs_mount_t             *mp;            /* mount structure */
3188         xfs_extnum_t            nextents;       /* extent list size */
3189         xfs_bmbt_ptr_t          *pp;            /* root block address pointer */
3190
3191         ifp = XFS_IFORK_PTR(ip, whichfork);
3192         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
3193         ASSERT(ifp->if_ext_max ==
3194                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
3195         /*
3196          * Make space in the inode incore.
3197          */
3198         xfs_iroot_realloc(ip, 1, whichfork);
3199         ifp->if_flags |= XFS_IFBROOT;
3200         /*
3201          * Fill in the root.
3202          */
3203         block = ifp->if_broot;
3204         INT_SET(block->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC);
3205         INT_SET(block->bb_level, ARCH_CONVERT, 1);
3206         INT_SET(block->bb_numrecs, ARCH_CONVERT, 1);
3207         INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLDFSBNO);
3208         INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLDFSBNO);
3209         /*
3210          * Need a cursor.  Can't allocate until bb_level is filled in.
3211          */
3212         mp = ip->i_mount;
3213         cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip,
3214                 whichfork);
3215         cur->bc_private.b.firstblock = *firstblock;
3216         cur->bc_private.b.flist = flist;
3217         cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
3218         /*
3219          * Convert to a btree with two levels, one record in root.
3220          */
3221         XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
3222         args.tp = tp;
3223         args.mp = mp;
3224         if (*firstblock == NULLFSBLOCK) {
3225                 args.type = XFS_ALLOCTYPE_START_BNO;
3226                 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
3227         } else if (flist->xbf_low) {
3228                 args.type = XFS_ALLOCTYPE_START_BNO;
3229                 args.fsbno = *firstblock;
3230         } else {
3231                 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3232                 args.fsbno = *firstblock;
3233         }
3234         args.minlen = args.maxlen = args.prod = 1;
3235         args.total = args.minleft = args.alignment = args.mod = args.isfl =
3236                 args.minalignslop = 0;
3237         args.wasdel = wasdel;
3238         *logflagsp = 0;
3239         if ((error = xfs_alloc_vextent(&args))) {
3240                 xfs_iroot_realloc(ip, -1, whichfork);
3241                 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
3242                 return error;
3243         }
3244         /*
3245          * Allocation can't fail, the space was reserved.
3246          */
3247         ASSERT(args.fsbno != NULLFSBLOCK);
3248         ASSERT(*firstblock == NULLFSBLOCK ||
3249                args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
3250                (flist->xbf_low &&
3251                 args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
3252         *firstblock = cur->bc_private.b.firstblock = args.fsbno;
3253         cur->bc_private.b.allocated++;
3254         ip->i_d.di_nblocks++;
3255         XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
3256         abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
3257         /*
3258          * Fill in the child block.
3259          */
3260         ablock = XFS_BUF_TO_BMBT_BLOCK(abp);
3261         INT_SET(ablock->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC);
3262         ablock->bb_level = 0;
3263         INT_SET(ablock->bb_leftsib, ARCH_CONVERT, NULLDFSBNO);
3264         INT_SET(ablock->bb_rightsib, ARCH_CONVERT, NULLDFSBNO);
3265         arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
3266         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3267         for (ep = ifp->if_u1.if_extents, cnt = i = 0; i < nextents; i++, ep++) {
3268                 if (!ISNULLSTARTBLOCK(xfs_bmbt_get_startblock(ep))) {
3269                         arp->l0 = INT_GET(ep->l0, ARCH_CONVERT);
3270                         arp->l1 = INT_GET(ep->l1, ARCH_CONVERT);
3271                         arp++; cnt++;
3272                 }
3273         }
3274         INT_SET(ablock->bb_numrecs, ARCH_CONVERT, cnt);
3275         ASSERT(INT_GET(ablock->bb_numrecs, ARCH_CONVERT) == XFS_IFORK_NEXTENTS(ip, whichfork));
3276         /*
3277          * Fill in the root key and pointer.
3278          */
3279         kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
3280         arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
3281         INT_SET(kp->br_startoff, ARCH_CONVERT, xfs_bmbt_disk_get_startoff(arp));
3282         pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
3283         INT_SET(*pp, ARCH_CONVERT, args.fsbno);
3284         /*
3285          * Do all this logging at the end so that
3286          * the root is at the right level.
3287          */
3288         xfs_bmbt_log_block(cur, abp, XFS_BB_ALL_BITS);
3289         xfs_bmbt_log_recs(cur, abp, 1, INT_GET(ablock->bb_numrecs, ARCH_CONVERT));
3290         ASSERT(*curp == NULL);
3291         *curp = cur;
3292         *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FBROOT(whichfork);
3293         return 0;
3294 }
3295
3296 /*
3297  * Insert new item(s) in the extent list for inode "ip".
3298  * Count new items are inserted at offset idx.
3299  */
3300 STATIC void
3301 xfs_bmap_insert_exlist(
3302         xfs_inode_t     *ip,            /* incore inode pointer */
3303         xfs_extnum_t    idx,            /* starting index of new items */
3304         xfs_extnum_t    count,          /* number of inserted items */
3305         xfs_bmbt_irec_t *new,           /* items to insert */
3306         int             whichfork)      /* data or attr fork */
3307 {
3308         xfs_bmbt_rec_t  *base;          /* extent list base */
3309         xfs_ifork_t     *ifp;           /* inode fork pointer */
3310         xfs_extnum_t    nextents;       /* extent list size */
3311         xfs_extnum_t    to;             /* extent list index */
3312
3313         ifp = XFS_IFORK_PTR(ip, whichfork);
3314         ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3315         xfs_iext_realloc(ip, count, whichfork);
3316         base = ifp->if_u1.if_extents;
3317         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3318         memmove(&base[idx + count], &base[idx],
3319                 (nextents - (idx + count)) * sizeof(*base));
3320         for (to = idx; to < idx + count; to++, new++)
3321                 xfs_bmbt_set_all(&base[to], new);
3322 }
3323
3324 /*
3325  * Helper routine to reset inode di_forkoff field when switching
3326  * attribute fork from local to extent format - we reset it where
3327  * possible to make space available for inline data fork extents.
3328  */
3329 STATIC void
3330 xfs_bmap_forkoff_reset(
3331         xfs_mount_t     *mp,
3332         xfs_inode_t     *ip,
3333         int             whichfork)
3334 {
3335         if (whichfork == XFS_ATTR_FORK &&
3336             (ip->i_d.di_format != XFS_DINODE_FMT_DEV) &&
3337             (ip->i_d.di_format != XFS_DINODE_FMT_UUID) &&
3338             ((mp->m_attroffset >> 3) > ip->i_d.di_forkoff)) {
3339                 ip->i_d.di_forkoff = mp->m_attroffset >> 3;
3340                 ip->i_df.if_ext_max = XFS_IFORK_DSIZE(ip) /
3341                                         (uint)sizeof(xfs_bmbt_rec_t);
3342                 ip->i_afp->if_ext_max = XFS_IFORK_ASIZE(ip) /
3343                                         (uint)sizeof(xfs_bmbt_rec_t);
3344         }
3345 }
3346
3347 /*
3348  * Convert a local file to an extents file.
3349  * This code is out of bounds for data forks of regular files,
3350  * since the file data needs to get logged so things will stay consistent.
3351  * (The bmap-level manipulations are ok, though).
3352  */
3353 STATIC int                              /* error */
3354 xfs_bmap_local_to_extents(
3355         xfs_trans_t     *tp,            /* transaction pointer */
3356         xfs_inode_t     *ip,            /* incore inode pointer */
3357         xfs_fsblock_t   *firstblock,    /* first block allocated in xaction */
3358         xfs_extlen_t    total,          /* total blocks needed by transaction */
3359         int             *logflagsp,     /* inode logging flags */
3360         int             whichfork)      /* data or attr fork */
3361 {
3362         int             error;          /* error return value */
3363         int             flags;          /* logging flags returned */
3364 #ifdef XFS_BMAP_TRACE
3365         static char     fname[] = "xfs_bmap_local_to_extents";
3366 #endif
3367         xfs_ifork_t     *ifp;           /* inode fork pointer */
3368
3369         /*
3370          * We don't want to deal with the case of keeping inode data inline yet.
3371          * So sending the data fork of a regular inode is invalid.
3372          */
3373         ASSERT(!((ip->i_d.di_mode & S_IFMT) == S_IFREG &&
3374                  whichfork == XFS_DATA_FORK));
3375         ifp = XFS_IFORK_PTR(ip, whichfork);
3376         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
3377         flags = 0;
3378         error = 0;
3379         if (ifp->if_bytes) {
3380                 xfs_alloc_arg_t args;   /* allocation arguments */
3381                 xfs_buf_t       *bp;    /* buffer for extent list block */
3382                 xfs_bmbt_rec_t  *ep;    /* extent list pointer */
3383
3384                 args.tp = tp;
3385                 args.mp = ip->i_mount;
3386                 ASSERT(ifp->if_flags & XFS_IFINLINE);
3387                 /*
3388                  * Allocate a block.  We know we need only one, since the
3389                  * file currently fits in an inode.
3390                  */
3391                 if (*firstblock == NULLFSBLOCK) {
3392                         args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
3393                         args.type = XFS_ALLOCTYPE_START_BNO;
3394                 } else {
3395                         args.fsbno = *firstblock;
3396                         args.type = XFS_ALLOCTYPE_NEAR_BNO;
3397                 }
3398                 args.total = total;
3399                 args.mod = args.minleft = args.alignment = args.wasdel =
3400                         args.isfl = args.minalignslop = 0;
3401                 args.minlen = args.maxlen = args.prod = 1;
3402                 if ((error = xfs_alloc_vextent(&args)))
3403                         goto done;
3404                 /*
3405                  * Can't fail, the space was reserved.
3406                  */
3407                 ASSERT(args.fsbno != NULLFSBLOCK);
3408                 ASSERT(args.len == 1);
3409                 *firstblock = args.fsbno;
3410                 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
3411                 memcpy((char *)XFS_BUF_PTR(bp), ifp->if_u1.if_data,
3412                         ifp->if_bytes);
3413                 xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
3414                 xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
3415                 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
3416                 xfs_iext_realloc(ip, 1, whichfork);
3417                 ep = ifp->if_u1.if_extents;
3418                 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
3419                 xfs_bmap_trace_post_update(fname, "new", ip, 0, whichfork);
3420                 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
3421                 ip->i_d.di_nblocks = 1;
3422                 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip,
3423                         XFS_TRANS_DQ_BCOUNT, 1L);
3424                 flags |= XFS_ILOG_FEXT(whichfork);
3425         } else {
3426                 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
3427                 xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork);
3428         }
3429         ifp->if_flags &= ~XFS_IFINLINE;
3430         ifp->if_flags |= XFS_IFEXTENTS;
3431         XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
3432         flags |= XFS_ILOG_CORE;
3433 done:
3434         *logflagsp = flags;
3435         return error;
3436 }
3437
3438 xfs_bmbt_rec_t *                        /* pointer to found extent entry */
3439 xfs_bmap_do_search_extents(
3440         xfs_bmbt_rec_t  *base,          /* base of extent list */
3441         xfs_extnum_t    lastx,          /* last extent index used */
3442         xfs_extnum_t    nextents,       /* extent list size */
3443         xfs_fileoff_t   bno,            /* block number searched for */
3444         int             *eofp,          /* out: end of file found */
3445         xfs_extnum_t    *lastxp,        /* out: last extent index */
3446         xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
3447         xfs_bmbt_irec_t *prevp)         /* out: previous extent entry found */
3448 {
3449         xfs_bmbt_rec_t  *ep;            /* extent list entry pointer */
3450         xfs_bmbt_irec_t got;            /* extent list entry, decoded */
3451         int             high;           /* high index of binary search */
3452         int             low;            /* low index of binary search */
3453
3454         /*
3455          * Initialize the extent entry structure to catch access to
3456          * uninitialized br_startblock field.
3457          */
3458         got.br_startoff = 0xffa5a5a5a5a5a5a5LL;
3459         got.br_blockcount = 0xa55a5a5a5a5a5a5aLL;
3460         got.br_state = XFS_EXT_INVALID;
3461
3462 #if XFS_BIG_BLKNOS
3463         got.br_startblock = 0xffffa5a5a5a5a5a5LL;
3464 #else
3465         got.br_startblock = 0xffffa5a5;
3466 #endif
3467
3468         if (lastx != NULLEXTNUM && lastx < nextents)
3469                 ep = base + lastx;
3470         else
3471                 ep = NULL;
3472         prevp->br_startoff = NULLFILEOFF;
3473         if (ep && bno >= (got.br_startoff = xfs_bmbt_get_startoff(ep)) &&
3474             bno < got.br_startoff +
3475                   (got.br_blockcount = xfs_bmbt_get_blockcount(ep)))
3476                 *eofp = 0;
3477         else if (ep && lastx < nextents - 1 &&
3478                  bno >= (got.br_startoff = xfs_bmbt_get_startoff(ep + 1)) &&
3479                  bno < got.br_startoff +
3480                        (got.br_blockcount = xfs_bmbt_get_blockcount(ep + 1))) {
3481                 lastx++;
3482                 ep++;
3483                 *eofp = 0;
3484         } else if (nextents == 0)
3485                 *eofp = 1;
3486         else if (bno == 0 &&
3487                  (got.br_startoff = xfs_bmbt_get_startoff(base)) == 0) {
3488                 ep = base;
3489                 lastx = 0;
3490                 got.br_blockcount = xfs_bmbt_get_blockcount(ep);
3491                 *eofp = 0;
3492         } else {
3493                 /* binary search the extents array */
3494                 low = 0;
3495                 high = nextents - 1;
3496                 while (low <= high) {
3497                         XFS_STATS_INC(xs_cmp_exlist);
3498                         lastx = (low + high) >> 1;
3499                         ep = base + lastx;
3500                         got.br_startoff = xfs_bmbt_get_startoff(ep);
3501                         got.br_blockcount = xfs_bmbt_get_blockcount(ep);
3502                         if (bno < got.br_startoff)
3503                                 high = lastx - 1;
3504                         else if (bno >= got.br_startoff + got.br_blockcount)
3505                                 low = lastx + 1;
3506                         else {
3507                                 got.br_startblock = xfs_bmbt_get_startblock(ep);
3508                                 got.br_state = xfs_bmbt_get_state(ep);
3509                                 *eofp = 0;
3510                                 *lastxp = lastx;
3511                                 *gotp = got;
3512                                 return ep;
3513                         }
3514                 }
3515                 if (bno >= got.br_startoff + got.br_blockcount) {
3516                         lastx++;
3517                         if (lastx == nextents) {
3518                                 *eofp = 1;
3519                                 got.br_startblock = xfs_bmbt_get_startblock(ep);
3520                                 got.br_state = xfs_bmbt_get_state(ep);
3521                                 *prevp = got;
3522                                 ep = NULL;
3523                         } else {
3524                                 *eofp = 0;
3525                                 xfs_bmbt_get_all(ep, prevp);
3526                                 ep++;
3527                                 got.br_startoff = xfs_bmbt_get_startoff(ep);
3528                                 got.br_blockcount = xfs_bmbt_get_blockcount(ep);
3529                         }
3530                 } else {
3531                         *eofp = 0;
3532                         if (ep > base)
3533                                 xfs_bmbt_get_all(ep - 1, prevp);
3534                 }
3535         }
3536         if (ep) {
3537                 got.br_startblock = xfs_bmbt_get_startblock(ep);
3538                 got.br_state = xfs_bmbt_get_state(ep);
3539         }
3540         *lastxp = lastx;
3541         *gotp = got;
3542         return ep;
3543 }
3544
3545 /*
3546  * Search the extents list for the inode, for the extent containing bno.
3547  * If bno lies in a hole, point to the next entry.  If bno lies past eof,
3548  * *eofp will be set, and *prevp will contain the last entry (null if none).
3549  * Else, *lastxp will be set to the index of the found
3550  * entry; *gotp will contain the entry.
3551  */
3552 STATIC xfs_bmbt_rec_t *                 /* pointer to found extent entry */
3553 xfs_bmap_search_extents(
3554         xfs_inode_t     *ip,            /* incore inode pointer */
3555         xfs_fileoff_t   bno,            /* block number searched for */
3556         int             whichfork,      /* data or attr fork */
3557         int             *eofp,          /* out: end of file found */
3558         xfs_extnum_t    *lastxp,        /* out: last extent index */
3559         xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
3560         xfs_bmbt_irec_t *prevp)         /* out: previous extent entry found */
3561 {
3562         xfs_ifork_t     *ifp;           /* inode fork pointer */
3563         xfs_bmbt_rec_t  *base;          /* base of extent list */
3564         xfs_extnum_t    lastx;          /* last extent index used */
3565         xfs_extnum_t    nextents;       /* extent list size */
3566         xfs_bmbt_rec_t  *ep;            /* extent list entry pointer */
3567         int             rt;             /* realtime flag    */
3568
3569         XFS_STATS_INC(xs_look_exlist);
3570         ifp = XFS_IFORK_PTR(ip, whichfork);
3571         lastx = ifp->if_lastex;
3572         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3573         base = &ifp->if_u1.if_extents[0];
3574
3575         ep = xfs_bmap_do_search_extents(base, lastx, nextents, bno, eofp,
3576                                           lastxp, gotp, prevp);
3577         rt = ip->i_d.di_flags & XFS_DIFLAG_REALTIME;
3578         if(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM)) {
3579                 cmn_err(CE_PANIC,"Access to block zero: fs: <%s> inode: %lld "
3580                         "start_block : %llx start_off : %llx blkcnt : %llx "
3581                         "extent-state : %x \n",
3582                         (ip->i_mount)->m_fsname,(long long)ip->i_ino,
3583                         gotp->br_startblock, gotp->br_startoff,
3584                         gotp->br_blockcount,gotp->br_state);
3585         }
3586         return ep;
3587 }
3588
3589
3590 #ifdef XFS_BMAP_TRACE
3591 ktrace_t        *xfs_bmap_trace_buf;
3592
3593 /*
3594  * Add a bmap trace buffer entry.  Base routine for the others.
3595  */
3596 STATIC void
3597 xfs_bmap_trace_addentry(
3598         int             opcode,         /* operation */
3599         char            *fname,         /* function name */
3600         char            *desc,          /* operation description */
3601         xfs_inode_t     *ip,            /* incore inode pointer */
3602         xfs_extnum_t    idx,            /* index of entry(ies) */
3603         xfs_extnum_t    cnt,            /* count of entries, 1 or 2 */
3604         xfs_bmbt_rec_t  *r1,            /* first record */
3605         xfs_bmbt_rec_t  *r2,            /* second record or null */
3606         int             whichfork)      /* data or attr fork */
3607 {
3608         xfs_bmbt_rec_t  tr2;
3609
3610         ASSERT(cnt == 1 || cnt == 2);
3611         ASSERT(r1 != NULL);
3612         if (cnt == 1) {
3613                 ASSERT(r2 == NULL);
3614                 r2 = &tr2;
3615                 memset(&tr2, 0, sizeof(tr2));
3616         } else
3617                 ASSERT(r2 != NULL);
3618         ktrace_enter(xfs_bmap_trace_buf,
3619                 (void *)(__psint_t)(opcode | (whichfork << 16)),
3620                 (void *)fname, (void *)desc, (void *)ip,
3621                 (void *)(__psint_t)idx,
3622                 (void *)(__psint_t)cnt,
3623                 (void *)(__psunsigned_t)(ip->i_ino >> 32),
3624                 (void *)(__psunsigned_t)(unsigned)ip->i_ino,
3625                 (void *)(__psunsigned_t)(r1->l0 >> 32),
3626                 (void *)(__psunsigned_t)(unsigned)(r1->l0),
3627                 (void *)(__psunsigned_t)(r1->l1 >> 32),
3628                 (void *)(__psunsigned_t)(unsigned)(r1->l1),
3629                 (void *)(__psunsigned_t)(r2->l0 >> 32),
3630                 (void *)(__psunsigned_t)(unsigned)(r2->l0),
3631                 (void *)(__psunsigned_t)(r2->l1 >> 32),
3632                 (void *)(__psunsigned_t)(unsigned)(r2->l1)
3633                 );
3634         ASSERT(ip->i_xtrace);
3635         ktrace_enter(ip->i_xtrace,
3636                 (void *)(__psint_t)(opcode | (whichfork << 16)),
3637                 (void *)fname, (void *)desc, (void *)ip,
3638                 (void *)(__psint_t)idx,
3639                 (void *)(__psint_t)cnt,
3640                 (void *)(__psunsigned_t)(ip->i_ino >> 32),
3641                 (void *)(__psunsigned_t)(unsigned)ip->i_ino,
3642                 (void *)(__psunsigned_t)(r1->l0 >> 32),
3643                 (void *)(__psunsigned_t)(unsigned)(r1->l0),
3644                 (void *)(__psunsigned_t)(r1->l1 >> 32),
3645                 (void *)(__psunsigned_t)(unsigned)(r1->l1),
3646                 (void *)(__psunsigned_t)(r2->l0 >> 32),
3647                 (void *)(__psunsigned_t)(unsigned)(r2->l0),
3648                 (void *)(__psunsigned_t)(r2->l1 >> 32),
3649                 (void *)(__psunsigned_t)(unsigned)(r2->l1)
3650                 );
3651 }
3652
3653 /*
3654  * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist.
3655  */
3656 STATIC void
3657 xfs_bmap_trace_delete(
3658         char            *fname,         /* function name */
3659         char            *desc,          /* operation description */
3660         xfs_inode_t     *ip,            /* incore inode pointer */
3661         xfs_extnum_t    idx,            /* index of entry(entries) deleted */
3662         xfs_extnum_t    cnt,            /* count of entries deleted, 1 or 2 */
3663         int             whichfork)      /* data or attr fork */
3664 {
3665         xfs_ifork_t     *ifp;           /* inode fork pointer */
3666
3667         ifp = XFS_IFORK_PTR(ip, whichfork);
3668         xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx,
3669                 cnt, &ifp->if_u1.if_extents[idx],
3670                 cnt == 2 ? &ifp->if_u1.if_extents[idx + 1] : NULL,
3671                 whichfork);
3672 }
3673
3674 /*
3675  * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or
3676  * reading in the extents list from the disk (in the btree).
3677  */
3678 STATIC void
3679 xfs_bmap_trace_insert(
3680         char            *fname,         /* function name */
3681         char            *desc,          /* operation description */
3682         xfs_inode_t     *ip,            /* incore inode pointer */
3683         xfs_extnum_t    idx,            /* index of entry(entries) inserted */
3684         xfs_extnum_t    cnt,            /* count of entries inserted, 1 or 2 */
3685         xfs_bmbt_irec_t *r1,            /* inserted record 1 */
3686         xfs_bmbt_irec_t *r2,            /* inserted record 2 or null */
3687         int             whichfork)      /* data or attr fork */
3688 {
3689         xfs_bmbt_rec_t  tr1;            /* compressed record 1 */
3690         xfs_bmbt_rec_t  tr2;            /* compressed record 2 if needed */
3691
3692         xfs_bmbt_set_all(&tr1, r1);
3693         if (cnt == 2) {
3694                 ASSERT(r2 != NULL);
3695                 xfs_bmbt_set_all(&tr2, r2);
3696         } else {
3697                 ASSERT(cnt == 1);
3698                 ASSERT(r2 == NULL);
3699         }
3700         xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_INSERT, fname, desc, ip, idx,
3701                 cnt, &tr1, cnt == 2 ? &tr2 : NULL, whichfork);
3702 }
3703
3704 /*
3705  * Add bmap trace entry after updating an extent list entry in place.
3706  */
3707 STATIC void
3708 xfs_bmap_trace_post_update(
3709         char            *fname,         /* function name */
3710         char            *desc,          /* operation description */
3711         xfs_inode_t     *ip,            /* incore inode pointer */
3712         xfs_extnum_t    idx,            /* index of entry updated */
3713         int             whichfork)      /* data or attr fork */
3714 {
3715         xfs_ifork_t     *ifp;           /* inode fork pointer */
3716
3717         ifp = XFS_IFORK_PTR(ip, whichfork);
3718         xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx,
3719                 1, &ifp->if_u1.if_extents[idx], NULL, whichfork);
3720 }
3721
3722 /*
3723  * Add bmap trace entry prior to updating an extent list entry in place.
3724  */
3725 STATIC void
3726 xfs_bmap_trace_pre_update(
3727         char            *fname,         /* function name */
3728         char            *desc,          /* operation description */
3729         xfs_inode_t     *ip,            /* incore inode pointer */
3730         xfs_extnum_t    idx,            /* index of entry to be updated */
3731         int             whichfork)      /* data or attr fork */
3732 {
3733         xfs_ifork_t     *ifp;           /* inode fork pointer */
3734
3735         ifp = XFS_IFORK_PTR(ip, whichfork);
3736         xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1,
3737                 &ifp->if_u1.if_extents[idx], NULL, whichfork);
3738 }
3739 #endif  /* XFS_BMAP_TRACE */
3740
3741 /*
3742  * Compute the worst-case number of indirect blocks that will be used
3743  * for ip's delayed extent of length "len".
3744  */
3745 STATIC xfs_filblks_t
3746 xfs_bmap_worst_indlen(
3747         xfs_inode_t     *ip,            /* incore inode pointer */
3748         xfs_filblks_t   len)            /* delayed extent length */
3749 {
3750         int             level;          /* btree level number */
3751         int             maxrecs;        /* maximum record count at this level */
3752         xfs_mount_t     *mp;            /* mount structure */
3753         xfs_filblks_t   rval;           /* return value */
3754
3755         mp = ip->i_mount;
3756         maxrecs = mp->m_bmap_dmxr[0];
3757         for (level = 0, rval = 0;
3758              level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
3759              level++) {
3760                 len += maxrecs - 1;
3761                 do_div(len, maxrecs);
3762                 rval += len;
3763                 if (len == 1)
3764                         return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
3765                                 level - 1;
3766                 if (level == 0)
3767                         maxrecs = mp->m_bmap_dmxr[1];
3768         }
3769         return rval;
3770 }
3771
3772 #if defined(XFS_RW_TRACE)
3773 STATIC void
3774 xfs_bunmap_trace(
3775         xfs_inode_t             *ip,
3776         xfs_fileoff_t           bno,
3777         xfs_filblks_t           len,
3778         int                     flags,
3779         inst_t                  *ra)
3780 {
3781         if (ip->i_rwtrace == NULL)
3782                 return;
3783         ktrace_enter(ip->i_rwtrace,
3784                 (void *)(__psint_t)XFS_BUNMAPI,
3785                 (void *)ip,
3786                 (void *)(__psint_t)((ip->i_d.di_size >> 32) & 0xffffffff),
3787                 (void *)(__psint_t)(ip->i_d.di_size & 0xffffffff),
3788                 (void *)(__psint_t)(((xfs_dfiloff_t)bno >> 32) & 0xffffffff),
3789                 (void *)(__psint_t)((xfs_dfiloff_t)bno & 0xffffffff),
3790                 (void *)(__psint_t)len,
3791                 (void *)(__psint_t)flags,
3792                 (void *)(unsigned long)current_cpu(),
3793                 (void *)ra,
3794                 (void *)0,
3795                 (void *)0,
3796                 (void *)0,
3797                 (void *)0,
3798                 (void *)0,
3799                 (void *)0);
3800 }
3801 #endif
3802
3803 /*
3804  * Convert inode from non-attributed to attributed.
3805  * Must not be in a transaction, ip must not be locked.
3806  */
3807 int                                             /* error code */
3808 xfs_bmap_add_attrfork(
3809         xfs_inode_t             *ip,            /* incore inode pointer */
3810         int                     size,           /* space new attribute needs */
3811         int                     rsvd)           /* xact may use reserved blks */
3812 {
3813         xfs_fsblock_t           firstblock;     /* 1st block/ag allocated */
3814         xfs_bmap_free_t         flist;          /* freed extent list */
3815         xfs_mount_t             *mp;            /* mount structure */
3816         xfs_trans_t             *tp;            /* transaction pointer */
3817         unsigned long           s;              /* spinlock spl value */
3818         int                     blks;           /* space reservation */
3819         int                     version = 1;    /* superblock attr version */
3820         int                     committed;      /* xaction was committed */
3821         int                     logflags;       /* logging flags */
3822         int                     error;          /* error return value */
3823
3824         ASSERT(XFS_IFORK_Q(ip) == 0);
3825         ASSERT(ip->i_df.if_ext_max ==
3826                XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
3827
3828         mp = ip->i_mount;
3829         ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
3830         tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK);
3831         blks = XFS_ADDAFORK_SPACE_RES(mp);
3832         if (rsvd)
3833                 tp->t_flags |= XFS_TRANS_RESERVE;
3834         if ((error = xfs_trans_reserve(tp, blks, XFS_ADDAFORK_LOG_RES(mp), 0,
3835                         XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT)))
3836                 goto error0;
3837         xfs_ilock(ip, XFS_ILOCK_EXCL);
3838         error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, blks, 0, rsvd ?
3839                         XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
3840                         XFS_QMOPT_RES_REGBLKS);
3841         if (error) {
3842                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
3843                 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
3844                 return error;
3845         }
3846         if (XFS_IFORK_Q(ip))
3847                 goto error1;
3848         if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
3849                 /*
3850                  * For inodes coming from pre-6.2 filesystems.
3851                  */
3852                 ASSERT(ip->i_d.di_aformat == 0);
3853                 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
3854         }
3855         ASSERT(ip->i_d.di_anextents == 0);
3856         VN_HOLD(XFS_ITOV(ip));
3857         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
3858         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
3859         switch (ip->i_d.di_format) {
3860         case XFS_DINODE_FMT_DEV:
3861                 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
3862                 break;
3863         case XFS_DINODE_FMT_UUID:
3864                 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
3865                 break;
3866         case XFS_DINODE_FMT_LOCAL:
3867         case XFS_DINODE_FMT_EXTENTS:
3868         case XFS_DINODE_FMT_BTREE:
3869                 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
3870                 if (!ip->i_d.di_forkoff)
3871                         ip->i_d.di_forkoff = mp->m_attroffset >> 3;
3872                 else if (!(mp->m_flags & XFS_MOUNT_COMPAT_ATTR))
3873                         version = 2;
3874                 break;
3875         default:
3876                 ASSERT(0);
3877                 error = XFS_ERROR(EINVAL);
3878                 goto error1;
3879         }
3880         ip->i_df.if_ext_max =
3881                 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
3882         ASSERT(ip->i_afp == NULL);
3883         ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
3884         ip->i_afp->if_ext_max =
3885                 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
3886         ip->i_afp->if_flags = XFS_IFEXTENTS;
3887         logflags = 0;
3888         XFS_BMAP_INIT(&flist, &firstblock);
3889         switch (ip->i_d.di_format) {
3890         case XFS_DINODE_FMT_LOCAL:
3891                 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
3892                         &logflags);
3893                 break;
3894         case XFS_DINODE_FMT_EXTENTS:
3895                 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
3896                         &flist, &logflags);
3897                 break;
3898         case XFS_DINODE_FMT_BTREE:
3899                 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
3900                         &logflags);
3901                 break;
3902         default:
3903                 error = 0;
3904                 break;
3905         }
3906         if (logflags)
3907                 xfs_trans_log_inode(tp, ip, logflags);
3908         if (error)
3909                 goto error2;
3910         if (!XFS_SB_VERSION_HASATTR(&mp->m_sb) ||
3911            (!XFS_SB_VERSION_HASATTR2(&mp->m_sb) && version == 2)) {
3912                 __int64_t sbfields = 0;
3913
3914                 s = XFS_SB_LOCK(mp);
3915                 if (!XFS_SB_VERSION_HASATTR(&mp->m_sb)) {
3916                         XFS_SB_VERSION_ADDATTR(&mp->m_sb);
3917                         sbfields |= XFS_SB_VERSIONNUM;
3918                 }
3919                 if (!XFS_SB_VERSION_HASATTR2(&mp->m_sb) && version == 2) {
3920                         XFS_SB_VERSION_ADDATTR2(&mp->m_sb);
3921                         sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
3922                 }
3923                 if (sbfields) {
3924                         XFS_SB_UNLOCK(mp, s);
3925                         xfs_mod_sb(tp, sbfields);
3926                 } else
3927                         XFS_SB_UNLOCK(mp, s);
3928         }
3929         if ((error = xfs_bmap_finish(&tp, &flist, firstblock, &committed)))
3930                 goto error2;
3931         error = xfs_trans_commit(tp, XFS_TRANS_PERM_LOG_RES, NULL);
3932         ASSERT(ip->i_df.if_ext_max ==
3933                XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
3934         return error;
3935 error2:
3936         xfs_bmap_cancel(&flist);
3937 error1:
3938         ASSERT(ismrlocked(&ip->i_lock,MR_UPDATE));
3939         xfs_iunlock(ip, XFS_ILOCK_EXCL);
3940 error0:
3941         xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
3942         ASSERT(ip->i_df.if_ext_max ==
3943                XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
3944         return error;
3945 }
3946
3947 /*
3948  * Add the extent to the list of extents to be free at transaction end.
3949  * The list is maintained sorted (by block number).
3950  */
3951 /* ARGSUSED */
3952 void
3953 xfs_bmap_add_free(
3954         xfs_fsblock_t           bno,            /* fs block number of extent */
3955         xfs_filblks_t           len,            /* length of extent */
3956         xfs_bmap_free_t         *flist,         /* list of extents */
3957         xfs_mount_t             *mp)            /* mount point structure */
3958 {
3959         xfs_bmap_free_item_t    *cur;           /* current (next) element */
3960         xfs_bmap_free_item_t    *new;           /* new element */
3961         xfs_bmap_free_item_t    *prev;          /* previous element */
3962 #ifdef DEBUG
3963         xfs_agnumber_t          agno;
3964         xfs_agblock_t           agbno;
3965
3966         ASSERT(bno != NULLFSBLOCK);
3967         ASSERT(len > 0);
3968         ASSERT(len <= MAXEXTLEN);
3969         ASSERT(!ISNULLSTARTBLOCK(bno));
3970         agno = XFS_FSB_TO_AGNO(mp, bno);
3971         agbno = XFS_FSB_TO_AGBNO(mp, bno);
3972         ASSERT(agno < mp->m_sb.sb_agcount);
3973         ASSERT(agbno < mp->m_sb.sb_agblocks);
3974         ASSERT(len < mp->m_sb.sb_agblocks);
3975         ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
3976 #endif
3977         ASSERT(xfs_bmap_free_item_zone != NULL);
3978         new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
3979         new->xbfi_startblock = bno;
3980         new->xbfi_blockcount = (xfs_extlen_t)len;
3981         for (prev = NULL, cur = flist->xbf_first;
3982              cur != NULL;
3983              prev = cur, cur = cur->xbfi_next) {
3984                 if (cur->xbfi_startblock >= bno)
3985                         break;
3986         }
3987         if (prev)
3988                 prev->xbfi_next = new;
3989         else
3990                 flist->xbf_first = new;
3991         new->xbfi_next = cur;
3992         flist->xbf_count++;
3993 }
3994
3995 /*
3996  * Compute and fill in the value of the maximum depth of a bmap btree
3997  * in this filesystem.  Done once, during mount.
3998  */
3999 void
4000 xfs_bmap_compute_maxlevels(
4001         xfs_mount_t     *mp,            /* file system mount structure */
4002         int             whichfork)      /* data or attr fork */
4003 {
4004         int             level;          /* btree level */
4005         uint            maxblocks;      /* max blocks at this level */
4006         uint            maxleafents;    /* max leaf entries possible */
4007         int             maxrootrecs;    /* max records in root block */
4008         int             minleafrecs;    /* min records in leaf block */
4009         int             minnoderecs;    /* min records in node block */
4010         int             sz;             /* root block size */
4011
4012         /*
4013          * The maximum number of extents in a file, hence the maximum
4014          * number of leaf entries, is controlled by the type of di_nextents
4015          * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
4016          * (a signed 16-bit number, xfs_aextnum_t).
4017          */
4018         if (whichfork == XFS_DATA_FORK) {
4019                 maxleafents = MAXEXTNUM;
4020                 sz = (mp->m_flags & XFS_MOUNT_COMPAT_ATTR) ?
4021                         mp->m_attroffset : XFS_BMDR_SPACE_CALC(MINDBTPTRS);
4022         } else {
4023                 maxleafents = MAXAEXTNUM;
4024                 sz = (mp->m_flags & XFS_MOUNT_COMPAT_ATTR) ?
4025                         mp->m_sb.sb_inodesize - mp->m_attroffset :
4026                         XFS_BMDR_SPACE_CALC(MINABTPTRS);
4027         }
4028         maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0);
4029         minleafrecs = mp->m_bmap_dmnr[0];
4030         minnoderecs = mp->m_bmap_dmnr[1];
4031         maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
4032         for (level = 1; maxblocks > 1; level++) {
4033                 if (maxblocks <= maxrootrecs)
4034                         maxblocks = 1;
4035                 else
4036                         maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
4037         }
4038         mp->m_bm_maxlevels[whichfork] = level;
4039 }
4040
4041 /*
4042  * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
4043  * caller.  Frees all the extents that need freeing, which must be done
4044  * last due to locking considerations.  We never free any extents in
4045  * the first transaction.  This is to allow the caller to make the first
4046  * transaction a synchronous one so that the pointers to the data being
4047  * broken in this transaction will be permanent before the data is actually
4048  * freed.  This is necessary to prevent blocks from being reallocated
4049  * and written to before the free and reallocation are actually permanent.
4050  * We do not just make the first transaction synchronous here, because
4051  * there are more efficient ways to gain the same protection in some cases
4052  * (see the file truncation code).
4053  *
4054  * Return 1 if the given transaction was committed and a new one
4055  * started, and 0 otherwise in the committed parameter.
4056  */
4057 /*ARGSUSED*/
4058 int                                             /* error */
4059 xfs_bmap_finish(
4060         xfs_trans_t             **tp,           /* transaction pointer addr */
4061         xfs_bmap_free_t         *flist,         /* i/o: list extents to free */
4062         xfs_fsblock_t           firstblock,     /* controlled ag for allocs */
4063         int                     *committed)     /* xact committed or not */
4064 {
4065         xfs_efd_log_item_t      *efd;           /* extent free data */
4066         xfs_efi_log_item_t      *efi;           /* extent free intention */
4067         int                     error;          /* error return value */
4068         xfs_bmap_free_item_t    *free;          /* free extent list item */
4069         unsigned int            logres;         /* new log reservation */
4070         unsigned int            logcount;       /* new log count */
4071         xfs_mount_t             *mp;            /* filesystem mount structure */
4072         xfs_bmap_free_item_t    *next;          /* next item on free list */
4073         xfs_trans_t             *ntp;           /* new transaction pointer */
4074
4075         ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
4076         if (flist->xbf_count == 0) {
4077                 *committed = 0;
4078                 return 0;
4079         }
4080         ntp = *tp;
4081         efi = xfs_trans_get_efi(ntp, flist->xbf_count);
4082         for (free = flist->xbf_first; free; free = free->xbfi_next)
4083                 xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
4084                         free->xbfi_blockcount);
4085         logres = ntp->t_log_res;
4086         logcount = ntp->t_log_count;
4087         ntp = xfs_trans_dup(*tp);
4088         error = xfs_trans_commit(*tp, 0, NULL);
4089         *tp = ntp;
4090         *committed = 1;
4091         /*
4092          * We have a new transaction, so we should return committed=1,
4093          * even though we're returning an error.
4094          */
4095         if (error) {
4096                 return error;
4097         }
4098         if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES,
4099                         logcount)))
4100                 return error;
4101         efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
4102         for (free = flist->xbf_first; free != NULL; free = next) {
4103                 next = free->xbfi_next;
4104                 if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
4105                                 free->xbfi_blockcount))) {
4106                         /*
4107                          * The bmap free list will be cleaned up at a
4108                          * higher level.  The EFI will be canceled when
4109                          * this transaction is aborted.
4110                          * Need to force shutdown here to make sure it
4111                          * happens, since this transaction may not be
4112                          * dirty yet.
4113                          */
4114                         mp = ntp->t_mountp;
4115                         if (!XFS_FORCED_SHUTDOWN(mp))
4116                                 xfs_force_shutdown(mp,
4117                                                    (error == EFSCORRUPTED) ?
4118                                                    XFS_CORRUPT_INCORE :
4119                                                    XFS_METADATA_IO_ERROR);
4120                         return error;
4121                 }
4122                 xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
4123                         free->xbfi_blockcount);
4124                 xfs_bmap_del_free(flist, NULL, free);
4125         }
4126         return 0;
4127 }
4128
4129 /*
4130  * Free up any items left in the list.
4131  */
4132 void
4133 xfs_bmap_cancel(
4134         xfs_bmap_free_t         *flist) /* list of bmap_free_items */
4135 {
4136         xfs_bmap_free_item_t    *free;  /* free list item */
4137         xfs_bmap_free_item_t    *next;
4138
4139         if (flist->xbf_count == 0)
4140                 return;
4141         ASSERT(flist->xbf_first != NULL);
4142         for (free = flist->xbf_first; free; free = next) {
4143                 next = free->xbfi_next;
4144                 xfs_bmap_del_free(flist, NULL, free);
4145         }
4146         ASSERT(flist->xbf_count == 0);
4147 }
4148
4149 /*
4150  * Returns the file-relative block number of the first unused block(s)
4151  * in the file with at least "len" logically contiguous blocks free.
4152  * This is the lowest-address hole if the file has holes, else the first block
4153  * past the end of file.
4154  * Return 0 if the file is currently local (in-inode).
4155  */
4156 int                                             /* error */
4157 xfs_bmap_first_unused(
4158         xfs_trans_t     *tp,                    /* transaction pointer */
4159         xfs_inode_t     *ip,                    /* incore inode */
4160         xfs_extlen_t    len,                    /* size of hole to find */
4161         xfs_fileoff_t   *first_unused,          /* unused block */
4162         int             whichfork)              /* data or attr fork */
4163 {
4164         xfs_bmbt_rec_t  *base;                  /* base of extent array */
4165         xfs_bmbt_rec_t  *ep;                    /* pointer to an extent entry */
4166         int             error;                  /* error return value */
4167         xfs_ifork_t     *ifp;                   /* inode fork pointer */
4168         xfs_fileoff_t   lastaddr;               /* last block number seen */
4169         xfs_fileoff_t   lowest;                 /* lowest useful block */
4170         xfs_fileoff_t   max;                    /* starting useful block */
4171         xfs_fileoff_t   off;                    /* offset for this block */
4172         xfs_extnum_t    nextents;               /* number of extent entries */
4173
4174         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
4175                XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
4176                XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
4177         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4178                 *first_unused = 0;
4179                 return 0;
4180         }
4181         ifp = XFS_IFORK_PTR(ip, whichfork);
4182         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4183             (error = xfs_iread_extents(tp, ip, whichfork)))
4184                 return error;
4185         lowest = *first_unused;
4186         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4187         base = &ifp->if_u1.if_extents[0];
4188         for (lastaddr = 0, max = lowest, ep = base;
4189              ep < &base[nextents];
4190              ep++) {
4191                 off = xfs_bmbt_get_startoff(ep);
4192                 /*
4193                  * See if the hole before this extent will work.
4194                  */
4195                 if (off >= lowest + len && off - max >= len) {
4196                         *first_unused = max;
4197                         return 0;
4198                 }
4199                 lastaddr = off + xfs_bmbt_get_blockcount(ep);
4200                 max = XFS_FILEOFF_MAX(lastaddr, lowest);
4201         }
4202         *first_unused = max;
4203         return 0;
4204 }
4205
4206 /*
4207  * Returns the file-relative block number of the last block + 1 before
4208  * last_block (input value) in the file.
4209  * This is not based on i_size, it is based on the extent list.
4210  * Returns 0 for local files, as they do not have an extent list.
4211  */
4212 int                                             /* error */
4213 xfs_bmap_last_before(
4214         xfs_trans_t     *tp,                    /* transaction pointer */
4215         xfs_inode_t     *ip,                    /* incore inode */
4216         xfs_fileoff_t   *last_block,            /* last block */
4217         int             whichfork)              /* data or attr fork */
4218 {
4219         xfs_fileoff_t   bno;                    /* input file offset */
4220         int             eof;                    /* hit end of file */
4221         xfs_bmbt_rec_t  *ep;                    /* pointer to last extent */
4222         int             error;                  /* error return value */
4223         xfs_bmbt_irec_t got;                    /* current extent value */
4224         xfs_ifork_t     *ifp;                   /* inode fork pointer */
4225         xfs_extnum_t    lastx;                  /* last extent used */
4226         xfs_bmbt_irec_t prev;                   /* previous extent value */
4227
4228         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4229             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4230             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
4231                return XFS_ERROR(EIO);
4232         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4233                 *last_block = 0;
4234                 return 0;
4235         }
4236         ifp = XFS_IFORK_PTR(ip, whichfork);
4237         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4238             (error = xfs_iread_extents(tp, ip, whichfork)))
4239                 return error;
4240         bno = *last_block - 1;
4241         ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
4242                 &prev);
4243         if (eof || xfs_bmbt_get_startoff(ep) > bno) {
4244                 if (prev.br_startoff == NULLFILEOFF)
4245                         *last_block = 0;
4246                 else
4247                         *last_block = prev.br_startoff + prev.br_blockcount;
4248         }
4249         /*
4250          * Otherwise *last_block is already the right answer.
4251          */
4252         return 0;
4253 }
4254
4255 /*
4256  * Returns the file-relative block number of the first block past eof in
4257  * the file.  This is not based on i_size, it is based on the extent list.
4258  * Returns 0 for local files, as they do not have an extent list.
4259  */
4260 int                                             /* error */
4261 xfs_bmap_last_offset(
4262         xfs_trans_t     *tp,                    /* transaction pointer */
4263         xfs_inode_t     *ip,                    /* incore inode */
4264         xfs_fileoff_t   *last_block,            /* last block */
4265         int             whichfork)              /* data or attr fork */
4266 {
4267         xfs_bmbt_rec_t  *base;                  /* base of extent array */
4268         xfs_bmbt_rec_t  *ep;                    /* pointer to last extent */
4269         int             error;                  /* error return value */
4270         xfs_ifork_t     *ifp;                   /* inode fork pointer */
4271         xfs_extnum_t    nextents;               /* number of extent entries */
4272
4273         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4274             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4275             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
4276                return XFS_ERROR(EIO);
4277         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4278                 *last_block = 0;
4279                 return 0;
4280         }
4281         ifp = XFS_IFORK_PTR(ip, whichfork);
4282         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4283             (error = xfs_iread_extents(tp, ip, whichfork)))
4284                 return error;
4285         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4286         if (!nextents) {
4287                 *last_block = 0;
4288                 return 0;
4289         }
4290         base = &ifp->if_u1.if_extents[0];
4291         ASSERT(base != NULL);
4292         ep = &base[nextents - 1];
4293         *last_block = xfs_bmbt_get_startoff(ep) + xfs_bmbt_get_blockcount(ep);
4294         return 0;
4295 }
4296
4297 /*
4298  * Returns whether the selected fork of the inode has exactly one
4299  * block or not.  For the data fork we check this matches di_size,
4300  * implying the file's range is 0..bsize-1.
4301  */
4302 int                                     /* 1=>1 block, 0=>otherwise */
4303 xfs_bmap_one_block(
4304         xfs_inode_t     *ip,            /* incore inode */
4305         int             whichfork)      /* data or attr fork */
4306 {
4307         xfs_bmbt_rec_t  *ep;            /* ptr to fork's extent */
4308         xfs_ifork_t     *ifp;           /* inode fork pointer */
4309         int             rval;           /* return value */
4310         xfs_bmbt_irec_t s;              /* internal version of extent */
4311
4312 #ifndef DEBUG
4313         if (whichfork == XFS_DATA_FORK)
4314                 return ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize;
4315 #endif  /* !DEBUG */
4316         if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
4317                 return 0;
4318         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4319                 return 0;
4320         ifp = XFS_IFORK_PTR(ip, whichfork);
4321         ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4322         ep = ifp->if_u1.if_extents;
4323         xfs_bmbt_get_all(ep, &s);
4324         rval = s.br_startoff == 0 && s.br_blockcount == 1;
4325         if (rval && whichfork == XFS_DATA_FORK)
4326                 ASSERT(ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize);
4327         return rval;
4328 }
4329
4330 /*
4331  * Read in the extents to if_extents.
4332  * All inode fields are set up by caller, we just traverse the btree
4333  * and copy the records in. If the file system cannot contain unwritten
4334  * extents, the records are checked for no "state" flags.
4335  */
4336 int                                     /* error */
4337 xfs_bmap_read_extents(
4338         xfs_trans_t             *tp,    /* transaction pointer */
4339         xfs_inode_t             *ip,    /* incore inode */
4340         int                     whichfork) /* data or attr fork */
4341 {
4342         xfs_bmbt_block_t        *block; /* current btree block */
4343         xfs_fsblock_t           bno;    /* block # of "block" */
4344         xfs_buf_t               *bp;    /* buffer for "block" */
4345         int                     error;  /* error return value */
4346         xfs_exntfmt_t           exntf;  /* XFS_EXTFMT_NOSTATE, if checking */
4347 #ifdef XFS_BMAP_TRACE
4348         static char             fname[] = "xfs_bmap_read_extents";
4349 #endif
4350         xfs_extnum_t            i, j;   /* index into the extents list */
4351         xfs_ifork_t             *ifp;   /* fork structure */
4352         int                     level;  /* btree level, for checking */
4353         xfs_mount_t             *mp;    /* file system mount structure */
4354         xfs_bmbt_ptr_t          *pp;    /* pointer to block address */
4355         /* REFERENCED */
4356         xfs_extnum_t            room;   /* number of entries there's room for */
4357         xfs_bmbt_rec_t          *trp;   /* target record pointer */
4358
4359         bno = NULLFSBLOCK;
4360         mp = ip->i_mount;
4361         ifp = XFS_IFORK_PTR(ip, whichfork);
4362         exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
4363                                         XFS_EXTFMT_INODE(ip);
4364         block = ifp->if_broot;
4365         /*
4366          * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
4367          */
4368         ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0);
4369         level = INT_GET(block->bb_level, ARCH_CONVERT);
4370         pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
4371         ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO);
4372         ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount);
4373         ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks);
4374         bno = INT_GET(*pp, ARCH_CONVERT);
4375         /*
4376          * Go down the tree until leaf level is reached, following the first
4377          * pointer (leftmost) at each level.
4378          */
4379         while (level-- > 0) {
4380                 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
4381                                 XFS_BMAP_BTREE_REF)))
4382                         return error;
4383                 block = XFS_BUF_TO_BMBT_BLOCK(bp);
4384                 XFS_WANT_CORRUPTED_GOTO(
4385                         XFS_BMAP_SANITY_CHECK(mp, block, level),
4386                         error0);
4387                 if (level == 0)
4388                         break;
4389                 pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block,
4390                         1, mp->m_bmap_dmxr[1]);
4391                 XFS_WANT_CORRUPTED_GOTO(
4392                         XFS_FSB_SANITY_CHECK(mp, INT_GET(*pp, ARCH_CONVERT)),
4393                         error0);
4394                 bno = INT_GET(*pp, ARCH_CONVERT);
4395                 xfs_trans_brelse(tp, bp);
4396         }
4397         /*
4398          * Here with bp and block set to the leftmost leaf node in the tree.
4399          */
4400         room = ifp->if_bytes / (uint)sizeof(*trp);
4401         trp = ifp->if_u1.if_extents;
4402         i = 0;
4403         /*
4404          * Loop over all leaf nodes.  Copy information to the extent list.
4405          */
4406         for (;;) {
4407                 xfs_bmbt_rec_t  *frp, *temp;
4408                 xfs_fsblock_t   nextbno;
4409                 xfs_extnum_t    num_recs;
4410
4411
4412                 num_recs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
4413                 if (unlikely(i + num_recs > room)) {
4414                         ASSERT(i + num_recs <= room);
4415                         xfs_fs_cmn_err(CE_WARN, ip->i_mount,
4416                                 "corrupt dinode %Lu, (btree extents).  Unmount and run xfs_repair.",
4417                                 (unsigned long long) ip->i_ino);
4418                         XFS_ERROR_REPORT("xfs_bmap_read_extents(1)",
4419                                          XFS_ERRLEVEL_LOW,
4420                                         ip->i_mount);
4421                         goto error0;
4422                 }
4423                 XFS_WANT_CORRUPTED_GOTO(
4424                         XFS_BMAP_SANITY_CHECK(mp, block, 0),
4425                         error0);
4426                 /*
4427                  * Read-ahead the next leaf block, if any.
4428                  */
4429                 nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
4430                 if (nextbno != NULLFSBLOCK)
4431                         xfs_btree_reada_bufl(mp, nextbno, 1);
4432                 /*
4433                  * Copy records into the extent list.
4434                  */
4435                 frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
4436                         block, 1, mp->m_bmap_dmxr[0]);
4437                 temp = trp;
4438                 for (j = 0; j < num_recs; j++, frp++, trp++) {
4439                         trp->l0 = INT_GET(frp->l0, ARCH_CONVERT);
4440                         trp->l1 = INT_GET(frp->l1, ARCH_CONVERT);
4441                 }
4442                 if (exntf == XFS_EXTFMT_NOSTATE) {
4443                         /*
4444                          * Check all attribute bmap btree records and
4445                          * any "older" data bmap btree records for a
4446                          * set bit in the "extent flag" position.
4447                          */
4448                         if (unlikely(xfs_check_nostate_extents(temp, num_recs))) {
4449                                 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
4450                                                  XFS_ERRLEVEL_LOW,
4451                                                  ip->i_mount);
4452                                 goto error0;
4453                         }
4454                 }
4455                 i += num_recs;
4456                 xfs_trans_brelse(tp, bp);
4457                 bno = nextbno;
4458                 /*
4459                  * If we've reached the end, stop.
4460                  */
4461                 if (bno == NULLFSBLOCK)
4462                         break;
4463                 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
4464                                 XFS_BMAP_BTREE_REF)))
4465                         return error;
4466                 block = XFS_BUF_TO_BMBT_BLOCK(bp);
4467         }
4468         ASSERT(i == ifp->if_bytes / (uint)sizeof(*trp));
4469         ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
4470         xfs_bmap_trace_exlist(fname, ip, i, whichfork);
4471         return 0;
4472 error0:
4473         xfs_trans_brelse(tp, bp);
4474         return XFS_ERROR(EFSCORRUPTED);
4475 }
4476
4477 #ifdef XFS_BMAP_TRACE
4478 /*
4479  * Add bmap trace insert entries for all the contents of the extent list.
4480  */
4481 void
4482 xfs_bmap_trace_exlist(
4483         char            *fname,         /* function name */
4484         xfs_inode_t     *ip,            /* incore inode pointer */
4485         xfs_extnum_t    cnt,            /* count of entries in the list */
4486         int             whichfork)      /* data or attr fork */
4487 {
4488         xfs_bmbt_rec_t  *base;          /* base of extent list */
4489         xfs_bmbt_rec_t  *ep;            /* current entry in extent list */
4490         xfs_extnum_t    idx;            /* extent list entry number */
4491         xfs_ifork_t     *ifp;           /* inode fork pointer */
4492         xfs_bmbt_irec_t s;              /* extent list record */
4493
4494         ifp = XFS_IFORK_PTR(ip, whichfork);
4495         ASSERT(cnt == ifp->if_bytes / (uint)sizeof(*base));
4496         base = ifp->if_u1.if_extents;
4497         for (idx = 0, ep = base; idx < cnt; idx++, ep++) {
4498                 xfs_bmbt_get_all(ep, &s);
4499                 xfs_bmap_trace_insert(fname, "exlist", ip, idx, 1, &s, NULL,
4500                         whichfork);
4501         }
4502 }
4503 #endif
4504
4505 #ifdef DEBUG
4506 /*
4507  * Validate that the bmbt_irecs being returned from bmapi are valid
4508  * given the callers original parameters.  Specifically check the
4509  * ranges of the returned irecs to ensure that they only extent beyond
4510  * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
4511  */
4512 STATIC void
4513 xfs_bmap_validate_ret(
4514         xfs_fileoff_t           bno,
4515         xfs_filblks_t           len,
4516         int                     flags,
4517         xfs_bmbt_irec_t         *mval,
4518         int                     nmap,
4519         int                     ret_nmap)
4520 {
4521         int                     i;              /* index to map values */
4522
4523         ASSERT(ret_nmap <= nmap);
4524
4525         for (i = 0; i < ret_nmap; i++) {
4526                 ASSERT(mval[i].br_blockcount > 0);
4527                 if (!(flags & XFS_BMAPI_ENTIRE)) {
4528                         ASSERT(mval[i].br_startoff >= bno);
4529                         ASSERT(mval[i].br_blockcount <= len);
4530                         ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
4531                                bno + len);
4532                 } else {
4533                         ASSERT(mval[i].br_startoff < bno + len);
4534                         ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
4535                                bno);
4536                 }
4537                 ASSERT(i == 0 ||
4538                        mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
4539                        mval[i].br_startoff);
4540                 if ((flags & XFS_BMAPI_WRITE) && !(flags & XFS_BMAPI_DELAY))
4541                         ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
4542                                mval[i].br_startblock != HOLESTARTBLOCK);
4543                 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
4544                        mval[i].br_state == XFS_EXT_UNWRITTEN);
4545         }
4546 }
4547 #endif /* DEBUG */
4548
4549
4550 /*
4551  * Map file blocks to filesystem blocks.
4552  * File range is given by the bno/len pair.
4553  * Adds blocks to file if a write ("flags & XFS_BMAPI_WRITE" set)
4554  * into a hole or past eof.
4555  * Only allocates blocks from a single allocation group,
4556  * to avoid locking problems.
4557  * The returned value in "firstblock" from the first call in a transaction
4558  * must be remembered and presented to subsequent calls in "firstblock".
4559  * An upper bound for the number of blocks to be allocated is supplied to
4560  * the first call in "total"; if no allocation group has that many free
4561  * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4562  */
4563 int                                     /* error */
4564 xfs_bmapi(
4565         xfs_trans_t     *tp,            /* transaction pointer */
4566         xfs_inode_t     *ip,            /* incore inode */
4567         xfs_fileoff_t   bno,            /* starting file offs. mapped */
4568         xfs_filblks_t   len,            /* length to map in file */
4569         int             flags,          /* XFS_BMAPI_... */
4570         xfs_fsblock_t   *firstblock,    /* first allocated block
4571                                            controls a.g. for allocs */
4572         xfs_extlen_t    total,          /* total blocks needed */
4573         xfs_bmbt_irec_t *mval,          /* output: map values */
4574         int             *nmap,          /* i/o: mval size/count */
4575         xfs_bmap_free_t *flist)         /* i/o: list extents to free */
4576 {
4577         xfs_fsblock_t   abno;           /* allocated block number */
4578         xfs_extlen_t    alen;           /* allocated extent length */
4579         xfs_fileoff_t   aoff;           /* allocated file offset */
4580         xfs_bmalloca_t  bma;            /* args for xfs_bmap_alloc */
4581         xfs_btree_cur_t *cur;           /* bmap btree cursor */
4582         xfs_fileoff_t   end;            /* end of mapped file region */
4583         int             eof;            /* we've hit the end of extent list */
4584         char            contig;         /* allocation must be one extent */
4585         char            delay;          /* this request is for delayed alloc */
4586         char            exact;          /* don't do all of wasdelayed extent */
4587         xfs_bmbt_rec_t  *ep;            /* extent list entry pointer */
4588         int             error;          /* error return */
4589         xfs_bmbt_irec_t got;            /* current extent list record */
4590         xfs_ifork_t     *ifp;           /* inode fork pointer */
4591         xfs_extlen_t    indlen;         /* indirect blocks length */
4592         xfs_extnum_t    lastx;          /* last useful extent number */
4593         int             logflags;       /* flags for transaction logging */
4594         xfs_extlen_t    minleft;        /* min blocks left after allocation */
4595         xfs_extlen_t    minlen;         /* min allocation size */
4596         xfs_mount_t     *mp;            /* xfs mount structure */
4597         int             n;              /* current extent index */
4598         int             nallocs;        /* number of extents alloc\'d */
4599         xfs_extnum_t    nextents;       /* number of extents in file */
4600         xfs_fileoff_t   obno;           /* old block number (offset) */
4601         xfs_bmbt_irec_t prev;           /* previous extent list record */
4602         int             tmp_logflags;   /* temp flags holder */
4603         int             whichfork;      /* data or attr fork */
4604         char            inhole;         /* current location is hole in file */
4605         char            stateless;      /* ignore state flag set */
4606         char            trim;           /* output trimmed to match range */
4607         char            userdata;       /* allocating non-metadata */
4608         char            wasdelay;       /* old extent was delayed */
4609         char            wr;             /* this is a write request */
4610         char            rt;             /* this is a realtime file */
4611         char            rsvd;           /* OK to allocate reserved blocks */
4612 #ifdef DEBUG
4613         xfs_fileoff_t   orig_bno;       /* original block number value */
4614         int             orig_flags;     /* original flags arg value */
4615         xfs_filblks_t   orig_len;       /* original value of len arg */
4616         xfs_bmbt_irec_t *orig_mval;     /* original value of mval */
4617         int             orig_nmap;      /* original value of *nmap */
4618
4619         orig_bno = bno;
4620         orig_len = len;
4621         orig_flags = flags;
4622         orig_mval = mval;
4623         orig_nmap = *nmap;
4624 #endif
4625         ASSERT(*nmap >= 1);
4626         ASSERT(*nmap <= XFS_BMAP_MAX_NMAP || !(flags & XFS_BMAPI_WRITE));
4627         whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4628                 XFS_ATTR_FORK : XFS_DATA_FORK;
4629         mp = ip->i_mount;
4630         if (unlikely(XFS_TEST_ERROR(
4631             (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4632              XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4633              XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL),
4634              mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4635                 XFS_ERROR_REPORT("xfs_bmapi", XFS_ERRLEVEL_LOW, mp);
4636                 return XFS_ERROR(EFSCORRUPTED);
4637         }
4638         if (XFS_FORCED_SHUTDOWN(mp))
4639                 return XFS_ERROR(EIO);
4640         rt = XFS_IS_REALTIME_INODE(ip);
4641         ifp = XFS_IFORK_PTR(ip, whichfork);
4642         ASSERT(ifp->if_ext_max ==
4643                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
4644         if ((wr = (flags & XFS_BMAPI_WRITE)) != 0)
4645                 XFS_STATS_INC(xs_blk_mapw);
4646         else
4647                 XFS_STATS_INC(xs_blk_mapr);
4648         delay = (flags & XFS_BMAPI_DELAY) != 0;
4649         trim = (flags & XFS_BMAPI_ENTIRE) == 0;
4650         userdata = (flags & XFS_BMAPI_METADATA) == 0;
4651         exact = (flags & XFS_BMAPI_EXACT) != 0;
4652         rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
4653         contig = (flags & XFS_BMAPI_CONTIG) != 0;
4654         /*
4655          * stateless is used to combine extents which
4656          * differ only due to the state of the extents.
4657          * This technique is used from xfs_getbmap()
4658          * when the caller does not wish to see the
4659          * separation (which is the default).
4660          *
4661          * This technique is also used when writing a
4662          * buffer which has been partially written,
4663          * (usually by being flushed during a chunkread),
4664          * to ensure one write takes place. This also
4665          * prevents a change in the xfs inode extents at
4666          * this time, intentionally. This change occurs
4667          * on completion of the write operation, in
4668          * xfs_strat_comp(), where the xfs_bmapi() call
4669          * is transactioned, and the extents combined.
4670          */
4671         stateless = (flags & XFS_BMAPI_IGSTATE) != 0;
4672         if (stateless && wr)    /* if writing unwritten space, no */
4673                 wr = 0;         /* allocations are allowed */
4674         ASSERT(wr || !delay);
4675         logflags = 0;
4676         nallocs = 0;
4677         cur = NULL;
4678         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4679                 ASSERT(wr && tp);
4680                 if ((error = xfs_bmap_local_to_extents(tp, ip,
4681                                 firstblock, total, &logflags, whichfork)))
4682                         goto error0;
4683         }
4684         if (wr && *firstblock == NULLFSBLOCK) {
4685                 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4686                         minleft = INT_GET(ifp->if_broot->bb_level, ARCH_CONVERT) + 1;
4687                 else
4688                         minleft = 1;
4689         } else
4690                 minleft = 0;
4691         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4692             (error = xfs_iread_extents(tp, ip, whichfork)))
4693                 goto error0;
4694         ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
4695                 &prev);
4696         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4697         n = 0;
4698         end = bno + len;
4699         obno = bno;
4700         bma.ip = NULL;
4701         while (bno < end && n < *nmap) {
4702                 /*
4703                  * Reading past eof, act as though there's a hole
4704                  * up to end.
4705                  */
4706                 if (eof && !wr)
4707                         got.br_startoff = end;
4708                 inhole = eof || got.br_startoff > bno;
4709                 wasdelay = wr && !inhole && !delay &&
4710                         ISNULLSTARTBLOCK(got.br_startblock);
4711                 /*
4712                  * First, deal with the hole before the allocated space
4713                  * that we found, if any.
4714                  */
4715                 if (wr && (inhole || wasdelay)) {
4716                         /*
4717                          * For the wasdelay case, we could also just
4718                          * allocate the stuff asked for in this bmap call
4719                          * but that wouldn't be as good.
4720                          */
4721                         if (wasdelay && !exact) {
4722                                 alen = (xfs_extlen_t)got.br_blockcount;
4723                                 aoff = got.br_startoff;
4724                                 if (lastx != NULLEXTNUM && lastx) {
4725                                         ep = &ifp->if_u1.if_extents[lastx - 1];
4726                                         xfs_bmbt_get_all(ep, &prev);
4727                                 }
4728                         } else if (wasdelay) {
4729                                 alen = (xfs_extlen_t)
4730                                         XFS_FILBLKS_MIN(len,
4731                                                 (got.br_startoff +
4732                                                  got.br_blockcount) - bno);
4733                                 aoff = bno;
4734                         } else {
4735                                 alen = (xfs_extlen_t)
4736                                         XFS_FILBLKS_MIN(len, MAXEXTLEN);
4737                                 if (!eof)
4738                                         alen = (xfs_extlen_t)
4739                                                 XFS_FILBLKS_MIN(alen,
4740                                                         got.br_startoff - bno);
4741                                 aoff = bno;
4742                         }
4743                         minlen = contig ? alen : 1;
4744                         if (delay) {
4745                                 xfs_extlen_t    extsz = 0;
4746
4747                                 /* Figure out the extent size, adjust alen */
4748                                 if (rt) {
4749                                         if (!(extsz = ip->i_d.di_extsize))
4750                                                 extsz = mp->m_sb.sb_rextsize;
4751                                         alen = roundup(alen, extsz);
4752                                         extsz = alen / mp->m_sb.sb_rextsize;
4753                                 }
4754
4755                                 /*
4756                                  * Make a transaction-less quota reservation for
4757                                  * delayed allocation blocks. This number gets
4758                                  * adjusted later.
4759                                  * We return EDQUOT if we haven't allocated
4760                                  * blks already inside this loop;
4761                                  */
4762                                 if (XFS_TRANS_RESERVE_QUOTA_NBLKS(
4763                                                 mp, NULL, ip, (long)alen, 0,
4764                                                 rt ? XFS_QMOPT_RES_RTBLKS :
4765                                                      XFS_QMOPT_RES_REGBLKS)) {
4766                                         if (n == 0) {
4767                                                 *nmap = 0;
4768                                                 ASSERT(cur == NULL);
4769                                                 return XFS_ERROR(EDQUOT);
4770                                         }
4771                                         break;
4772                                 }
4773
4774                                 /*
4775                                  * Split changing sb for alen and indlen since
4776                                  * they could be coming from different places.
4777                                  */
4778                                 indlen = (xfs_extlen_t)
4779                                         xfs_bmap_worst_indlen(ip, alen);
4780                                 ASSERT(indlen > 0);
4781
4782                                 if (rt)
4783                                         error = xfs_mod_incore_sb(mp,
4784                                                         XFS_SBS_FREXTENTS,
4785                                                         -(extsz), rsvd);
4786                                 else
4787                                         error = xfs_mod_incore_sb(mp,
4788                                                         XFS_SBS_FDBLOCKS,
4789                                                         -(alen), rsvd);
4790                                 if (!error) {
4791                                         error = xfs_mod_incore_sb(mp,
4792                                                         XFS_SBS_FDBLOCKS,
4793                                                         -(indlen), rsvd);
4794                                         if (error && rt) {
4795                                                 xfs_mod_incore_sb(ip->i_mount,
4796                                                         XFS_SBS_FREXTENTS,
4797                                                         extsz, rsvd);
4798                                         } else if (error) {
4799                                                 xfs_mod_incore_sb(ip->i_mount,
4800                                                         XFS_SBS_FDBLOCKS,
4801                                                         alen, rsvd);
4802                                         }
4803                                 }
4804
4805                                 if (error) {
4806                                         if (XFS_IS_QUOTA_ON(ip->i_mount))
4807                                                 /* unreserve the blocks now */
4808                                                 XFS_TRANS_UNRESERVE_QUOTA_NBLKS(
4809                                                         mp, NULL, ip,
4810                                                         (long)alen, 0, rt ?
4811                                                         XFS_QMOPT_RES_RTBLKS :
4812                                                         XFS_QMOPT_RES_REGBLKS);
4813                                         break;
4814                                 }
4815
4816                                 ip->i_delayed_blks += alen;
4817                                 abno = NULLSTARTBLOCK(indlen);
4818                         } else {
4819                                 /*
4820                                  * If first time, allocate and fill in
4821                                  * once-only bma fields.
4822                                  */
4823                                 if (bma.ip == NULL) {
4824                                         bma.tp = tp;
4825                                         bma.ip = ip;
4826                                         bma.prevp = &prev;
4827                                         bma.gotp = &got;
4828                                         bma.total = total;
4829                                         bma.userdata = 0;
4830                                 }
4831                                 /* Indicate if this is the first user data
4832                                  * in the file, or just any user data.
4833                                  */
4834                                 if (userdata) {
4835                                         bma.userdata = (aoff == 0) ?
4836                                                 XFS_ALLOC_INITIAL_USER_DATA :
4837                                                 XFS_ALLOC_USERDATA;
4838                                 }
4839                                 /*
4840                                  * Fill in changeable bma fields.
4841                                  */
4842                                 bma.eof = eof;
4843                                 bma.firstblock = *firstblock;
4844                                 bma.alen = alen;
4845                                 bma.off = aoff;
4846                                 bma.wasdel = wasdelay;
4847                                 bma.minlen = minlen;
4848                                 bma.low = flist->xbf_low;
4849                                 bma.minleft = minleft;
4850                                 /*
4851                                  * Only want to do the alignment at the
4852                                  * eof if it is userdata and allocation length
4853                                  * is larger than a stripe unit.
4854                                  */
4855                                 if (mp->m_dalign && alen >= mp->m_dalign &&
4856                                     userdata && whichfork == XFS_DATA_FORK) {
4857                                         if ((error = xfs_bmap_isaeof(ip, aoff,
4858                                                         whichfork, &bma.aeof)))
4859                                                 goto error0;
4860                                 } else
4861                                         bma.aeof = 0;
4862                                 /*
4863                                  * Call allocator.
4864                                  */
4865                                 if ((error = xfs_bmap_alloc(&bma)))
4866                                         goto error0;
4867                                 /*
4868                                  * Copy out result fields.
4869                                  */
4870                                 abno = bma.rval;
4871                                 if ((flist->xbf_low = bma.low))
4872                                         minleft = 0;
4873                                 alen = bma.alen;
4874                                 aoff = bma.off;
4875                                 ASSERT(*firstblock == NULLFSBLOCK ||
4876                                        XFS_FSB_TO_AGNO(mp, *firstblock) ==
4877                                        XFS_FSB_TO_AGNO(mp, bma.firstblock) ||
4878                                        (flist->xbf_low &&
4879                                         XFS_FSB_TO_AGNO(mp, *firstblock) <
4880                                         XFS_FSB_TO_AGNO(mp, bma.firstblock)));
4881                                 *firstblock = bma.firstblock;
4882                                 if (cur)
4883                                         cur->bc_private.b.firstblock =
4884                                                 *firstblock;
4885                                 if (abno == NULLFSBLOCK)
4886                                         break;
4887                                 if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
4888                                         cur = xfs_btree_init_cursor(mp,
4889                                                 tp, NULL, 0, XFS_BTNUM_BMAP,
4890                                                 ip, whichfork);
4891                                         cur->bc_private.b.firstblock =
4892                                                 *firstblock;
4893                                         cur->bc_private.b.flist = flist;
4894                                 }
4895                                 /*
4896                                  * Bump the number of extents we've allocated
4897                                  * in this call.
4898                                  */
4899                                 nallocs++;
4900                         }
4901                         if (cur)
4902                                 cur->bc_private.b.flags =
4903                                         wasdelay ? XFS_BTCUR_BPRV_WASDEL : 0;
4904                         got.br_startoff = aoff;
4905                         got.br_startblock = abno;
4906                         got.br_blockcount = alen;
4907                         got.br_state = XFS_EXT_NORM;    /* assume normal */
4908                         /*
4909                          * Determine state of extent, and the filesystem.
4910                          * A wasdelay extent has been initialized, so
4911                          * shouldn't be flagged as unwritten.
4912                          */
4913                         if (wr && XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
4914                                 if (!wasdelay && (flags & XFS_BMAPI_PREALLOC))
4915                                         got.br_state = XFS_EXT_UNWRITTEN;
4916                         }
4917                         error = xfs_bmap_add_extent(ip, lastx, &cur, &got,
4918                                 firstblock, flist, &tmp_logflags, whichfork,
4919                                 rsvd);
4920                         logflags |= tmp_logflags;
4921                         if (error)
4922                                 goto error0;
4923                         lastx = ifp->if_lastex;
4924                         ep = &ifp->if_u1.if_extents[lastx];
4925                         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4926                         xfs_bmbt_get_all(ep, &got);
4927                         ASSERT(got.br_startoff <= aoff);
4928                         ASSERT(got.br_startoff + got.br_blockcount >=
4929                                 aoff + alen);
4930 #ifdef DEBUG
4931                         if (delay) {
4932                                 ASSERT(ISNULLSTARTBLOCK(got.br_startblock));
4933                                 ASSERT(STARTBLOCKVAL(got.br_startblock) > 0);
4934                         }
4935                         ASSERT(got.br_state == XFS_EXT_NORM ||
4936                                got.br_state == XFS_EXT_UNWRITTEN);
4937 #endif
4938                         /*
4939                          * Fall down into the found allocated space case.
4940                          */
4941                 } else if (inhole) {
4942                         /*
4943                          * Reading in a hole.
4944                          */
4945                         mval->br_startoff = bno;
4946                         mval->br_startblock = HOLESTARTBLOCK;
4947                         mval->br_blockcount =
4948                                 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4949                         mval->br_state = XFS_EXT_NORM;
4950                         bno += mval->br_blockcount;
4951                         len -= mval->br_blockcount;
4952                         mval++;
4953                         n++;
4954                         continue;
4955                 }
4956                 /*
4957                  * Then deal with the allocated space we found.
4958                  */
4959                 ASSERT(ep != NULL);
4960                 if (trim && (got.br_startoff + got.br_blockcount > obno)) {
4961                         if (obno > bno)
4962                                 bno = obno;
4963                         ASSERT((bno >= obno) || (n == 0));
4964                         ASSERT(bno < end);
4965                         mval->br_startoff = bno;
4966                         if (ISNULLSTARTBLOCK(got.br_startblock)) {
4967                                 ASSERT(!wr || delay);
4968                                 mval->br_startblock = DELAYSTARTBLOCK;
4969                         } else
4970                                 mval->br_startblock =
4971                                         got.br_startblock +
4972                                         (bno - got.br_startoff);
4973                         /*
4974                          * Return the minimum of what we got and what we
4975                          * asked for for the length.  We can use the len
4976                          * variable here because it is modified below
4977                          * and we could have been there before coming
4978                          * here if the first part of the allocation
4979                          * didn't overlap what was asked for.
4980                          */
4981                         mval->br_blockcount =
4982                                 XFS_FILBLKS_MIN(end - bno, got.br_blockcount -
4983                                         (bno - got.br_startoff));
4984                         mval->br_state = got.br_state;
4985                         ASSERT(mval->br_blockcount <= len);
4986                 } else {
4987                         *mval = got;
4988                         if (ISNULLSTARTBLOCK(mval->br_startblock)) {
4989                                 ASSERT(!wr || delay);
4990                                 mval->br_startblock = DELAYSTARTBLOCK;
4991                         }
4992                 }
4993
4994                 /*
4995                  * Check if writing previously allocated but
4996                  * unwritten extents.
4997                  */
4998                 if (wr && mval->br_state == XFS_EXT_UNWRITTEN &&
4999                     ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) {
5000                         /*
5001                          * Modify (by adding) the state flag, if writing.
5002                          */
5003                         ASSERT(mval->br_blockcount <= len);
5004                         if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
5005                                 cur = xfs_btree_init_cursor(mp,
5006                                         tp, NULL, 0, XFS_BTNUM_BMAP,
5007                                         ip, whichfork);
5008                                 cur->bc_private.b.firstblock =
5009                                         *firstblock;
5010                                 cur->bc_private.b.flist = flist;
5011                         }
5012                         mval->br_state = XFS_EXT_NORM;
5013                         error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
5014                                 firstblock, flist, &tmp_logflags, whichfork,
5015                                 rsvd);
5016                         logflags |= tmp_logflags;
5017                         if (error)
5018                                 goto error0;
5019                         lastx = ifp->if_lastex;
5020                         ep = &ifp->if_u1.if_extents[lastx];
5021                         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5022                         xfs_bmbt_get_all(ep, &got);
5023                         /*
5024                          * We may have combined previously unwritten
5025                          * space with written space, so generate
5026                          * another request.
5027                          */
5028                         if (mval->br_blockcount < len)
5029                                 continue;
5030                 }
5031
5032                 ASSERT(!trim ||
5033                        ((mval->br_startoff + mval->br_blockcount) <= end));
5034                 ASSERT(!trim || (mval->br_blockcount <= len) ||
5035                        (mval->br_startoff < obno));
5036                 bno = mval->br_startoff + mval->br_blockcount;
5037                 len = end - bno;
5038                 if (n > 0 && mval->br_startoff == mval[-1].br_startoff) {
5039                         ASSERT(mval->br_startblock == mval[-1].br_startblock);
5040                         ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
5041                         ASSERT(mval->br_state == mval[-1].br_state);
5042                         mval[-1].br_blockcount = mval->br_blockcount;
5043                         mval[-1].br_state = mval->br_state;
5044                 } else if (n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
5045                            mval[-1].br_startblock != DELAYSTARTBLOCK &&
5046                            mval[-1].br_startblock != HOLESTARTBLOCK &&
5047                            mval->br_startblock ==
5048                            mval[-1].br_startblock + mval[-1].br_blockcount &&
5049                            (stateless || mval[-1].br_state == mval->br_state)) {
5050                         ASSERT(mval->br_startoff ==
5051                                mval[-1].br_startoff + mval[-1].br_blockcount);
5052                         mval[-1].br_blockcount += mval->br_blockcount;
5053                 } else if (n > 0 &&
5054                            mval->br_startblock == DELAYSTARTBLOCK &&
5055                            mval[-1].br_startblock == DELAYSTARTBLOCK &&
5056                            mval->br_startoff ==
5057                            mval[-1].br_startoff + mval[-1].br_blockcount) {
5058                         mval[-1].br_blockcount += mval->br_blockcount;
5059                         mval[-1].br_state = mval->br_state;
5060                 } else if (!((n == 0) &&
5061                              ((mval->br_startoff + mval->br_blockcount) <=
5062                               obno))) {
5063                         mval++;
5064                         n++;
5065                 }
5066                 /*
5067                  * If we're done, stop now.  Stop when we've allocated
5068                  * XFS_BMAP_MAX_NMAP extents no matter what.  Otherwise
5069                  * the transaction may get too big.
5070                  */
5071                 if (bno >= end || n >= *nmap || nallocs >= *nmap)
5072                         break;
5073                 /*
5074                  * Else go on to the next record.
5075                  */
5076                 ep++;
5077                 lastx++;
5078                 if (lastx >= nextents) {
5079                         eof = 1;
5080                         prev = got;
5081                 } else
5082                         xfs_bmbt_get_all(ep, &got);
5083         }
5084         ifp->if_lastex = lastx;
5085         *nmap = n;
5086         /*
5087          * Transform from btree to extents, give it cur.
5088          */
5089         if (tp && XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
5090             XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
5091                 ASSERT(wr && cur);
5092                 error = xfs_bmap_btree_to_extents(tp, ip, cur,
5093                         &tmp_logflags, whichfork);
5094                 logflags |= tmp_logflags;
5095                 if (error)
5096                         goto error0;
5097         }
5098         ASSERT(ifp->if_ext_max ==
5099                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5100         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
5101                XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
5102         error = 0;
5103
5104 error0:
5105         /*
5106          * Log everything.  Do this after conversion, there's no point in
5107          * logging the extent list if we've converted to btree format.
5108          */
5109         if ((logflags & XFS_ILOG_FEXT(whichfork)) &&
5110             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5111                 logflags &= ~XFS_ILOG_FEXT(whichfork);
5112         else if ((logflags & XFS_ILOG_FBROOT(whichfork)) &&
5113                  XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5114                 logflags &= ~XFS_ILOG_FBROOT(whichfork);
5115         /*
5116          * Log whatever the flags say, even if error.  Otherwise we might miss
5117          * detecting a case where the data is changed, there's an error,
5118          * and it's not logged so we don't shutdown when we should.
5119          */
5120         if (logflags) {
5121                 ASSERT(tp && wr);
5122                 xfs_trans_log_inode(tp, ip, logflags);
5123         }
5124         if (cur) {
5125                 if (!error) {
5126                         ASSERT(*firstblock == NULLFSBLOCK ||
5127                                XFS_FSB_TO_AGNO(mp, *firstblock) ==
5128                                XFS_FSB_TO_AGNO(mp,
5129                                        cur->bc_private.b.firstblock) ||
5130                                (flist->xbf_low &&
5131                                 XFS_FSB_TO_AGNO(mp, *firstblock) <
5132                                 XFS_FSB_TO_AGNO(mp,
5133                                         cur->bc_private.b.firstblock)));
5134                         *firstblock = cur->bc_private.b.firstblock;
5135                 }
5136                 xfs_btree_del_cursor(cur,
5137                         error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5138         }
5139         if (!error)
5140                 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
5141                         orig_nmap, *nmap);
5142         return error;
5143 }
5144
5145 /*
5146  * Map file blocks to filesystem blocks, simple version.
5147  * One block (extent) only, read-only.
5148  * For flags, only the XFS_BMAPI_ATTRFORK flag is examined.
5149  * For the other flag values, the effect is as if XFS_BMAPI_METADATA
5150  * was set and all the others were clear.
5151  */
5152 int                                             /* error */
5153 xfs_bmapi_single(
5154         xfs_trans_t     *tp,            /* transaction pointer */
5155         xfs_inode_t     *ip,            /* incore inode */
5156         int             whichfork,      /* data or attr fork */
5157         xfs_fsblock_t   *fsb,           /* output: mapped block */
5158         xfs_fileoff_t   bno)            /* starting file offs. mapped */
5159 {
5160         int             eof;            /* we've hit the end of extent list */
5161         int             error;          /* error return */
5162         xfs_bmbt_irec_t got;            /* current extent list record */
5163         xfs_ifork_t     *ifp;           /* inode fork pointer */
5164         xfs_extnum_t    lastx;          /* last useful extent number */
5165         xfs_bmbt_irec_t prev;           /* previous extent list record */
5166
5167         ifp = XFS_IFORK_PTR(ip, whichfork);
5168         if (unlikely(
5169             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
5170             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)) {
5171                XFS_ERROR_REPORT("xfs_bmapi_single", XFS_ERRLEVEL_LOW,
5172                                 ip->i_mount);
5173                return XFS_ERROR(EFSCORRUPTED);
5174         }
5175         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
5176                 return XFS_ERROR(EIO);
5177         XFS_STATS_INC(xs_blk_mapr);
5178         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5179             (error = xfs_iread_extents(tp, ip, whichfork)))
5180                 return error;
5181         (void)xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5182                 &prev);
5183         /*
5184          * Reading past eof, act as though there's a hole
5185          * up to end.
5186          */
5187         if (eof || got.br_startoff > bno) {
5188                 *fsb = NULLFSBLOCK;
5189                 return 0;
5190         }
5191         ASSERT(!ISNULLSTARTBLOCK(got.br_startblock));
5192         ASSERT(bno < got.br_startoff + got.br_blockcount);
5193         *fsb = got.br_startblock + (bno - got.br_startoff);
5194         ifp->if_lastex = lastx;
5195         return 0;
5196 }
5197
5198 /*
5199  * Unmap (remove) blocks from a file.
5200  * If nexts is nonzero then the number of extents to remove is limited to
5201  * that value.  If not all extents in the block range can be removed then
5202  * *done is set.
5203  */
5204 int                                             /* error */
5205 xfs_bunmapi(
5206         xfs_trans_t             *tp,            /* transaction pointer */
5207         struct xfs_inode        *ip,            /* incore inode */
5208         xfs_fileoff_t           bno,            /* starting offset to unmap */
5209         xfs_filblks_t           len,            /* length to unmap in file */
5210         int                     flags,          /* misc flags */
5211         xfs_extnum_t            nexts,          /* number of extents max */
5212         xfs_fsblock_t           *firstblock,    /* first allocated block
5213                                                    controls a.g. for allocs */
5214         xfs_bmap_free_t         *flist,         /* i/o: list extents to free */
5215         int                     *done)          /* set if not done yet */
5216 {
5217         xfs_btree_cur_t         *cur;           /* bmap btree cursor */
5218         xfs_bmbt_irec_t         del;            /* extent being deleted */
5219         int                     eof;            /* is deleting at eof */
5220         xfs_bmbt_rec_t          *ep;            /* extent list entry pointer */
5221         int                     error;          /* error return value */
5222         xfs_extnum_t            extno;          /* extent number in list */
5223         xfs_bmbt_irec_t         got;            /* current extent list entry */
5224         xfs_ifork_t             *ifp;           /* inode fork pointer */
5225         int                     isrt;           /* freeing in rt area */
5226         xfs_extnum_t            lastx;          /* last extent index used */
5227         int                     logflags;       /* transaction logging flags */
5228         xfs_extlen_t            mod;            /* rt extent offset */
5229         xfs_mount_t             *mp;            /* mount structure */
5230         xfs_extnum_t            nextents;       /* size of extent list */
5231         xfs_bmbt_irec_t         prev;           /* previous extent list entry */
5232         xfs_fileoff_t           start;          /* first file offset deleted */
5233         int                     tmp_logflags;   /* partial logging flags */
5234         int                     wasdel;         /* was a delayed alloc extent */
5235         int                     whichfork;      /* data or attribute fork */
5236         int                     rsvd;           /* OK to allocate reserved blocks */
5237         xfs_fsblock_t           sum;
5238
5239         xfs_bunmap_trace(ip, bno, len, flags, (inst_t *)__return_address);
5240         whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
5241                 XFS_ATTR_FORK : XFS_DATA_FORK;
5242         ifp = XFS_IFORK_PTR(ip, whichfork);
5243         if (unlikely(
5244             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5245             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5246                 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5247                                  ip->i_mount);
5248                 return XFS_ERROR(EFSCORRUPTED);
5249         }
5250         mp = ip->i_mount;
5251         if (XFS_FORCED_SHUTDOWN(mp))
5252                 return XFS_ERROR(EIO);
5253         rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
5254         ASSERT(len > 0);
5255         ASSERT(nexts >= 0);
5256         ASSERT(ifp->if_ext_max ==
5257                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5258         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5259             (error = xfs_iread_extents(tp, ip, whichfork)))
5260                 return error;
5261         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5262         if (nextents == 0) {
5263                 *done = 1;
5264                 return 0;
5265         }
5266         XFS_STATS_INC(xs_blk_unmap);
5267         isrt = (whichfork == XFS_DATA_FORK) &&
5268                (ip->i_d.di_flags & XFS_DIFLAG_REALTIME);
5269         start = bno;
5270         bno = start + len - 1;
5271         ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5272                 &prev);
5273         /*
5274          * Check to see if the given block number is past the end of the
5275          * file, back up to the last block if so...
5276          */
5277         if (eof) {
5278                 ep = &ifp->if_u1.if_extents[--lastx];
5279                 xfs_bmbt_get_all(ep, &got);
5280                 bno = got.br_startoff + got.br_blockcount - 1;
5281         }
5282         logflags = 0;
5283         if (ifp->if_flags & XFS_IFBROOT) {
5284                 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5285                 cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip,
5286                         whichfork);
5287                 cur->bc_private.b.firstblock = *firstblock;
5288                 cur->bc_private.b.flist = flist;
5289                 cur->bc_private.b.flags = 0;
5290         } else
5291                 cur = NULL;
5292         extno = 0;
5293         while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5294                (nexts == 0 || extno < nexts)) {
5295                 /*
5296                  * Is the found extent after a hole in which bno lives?
5297                  * Just back up to the previous extent, if so.
5298                  */
5299                 if (got.br_startoff > bno) {
5300                         if (--lastx < 0)
5301                                 break;
5302                         ep--;
5303                         xfs_bmbt_get_all(ep, &got);
5304                 }
5305                 /*
5306                  * Is the last block of this extent before the range
5307                  * we're supposed to delete?  If so, we're done.
5308                  */
5309                 bno = XFS_FILEOFF_MIN(bno,
5310                         got.br_startoff + got.br_blockcount - 1);
5311                 if (bno < start)
5312                         break;
5313                 /*
5314                  * Then deal with the (possibly delayed) allocated space
5315                  * we found.
5316                  */
5317                 ASSERT(ep != NULL);
5318                 del = got;
5319                 wasdel = ISNULLSTARTBLOCK(del.br_startblock);
5320                 if (got.br_startoff < start) {
5321                         del.br_startoff = start;
5322                         del.br_blockcount -= start - got.br_startoff;
5323                         if (!wasdel)
5324                                 del.br_startblock += start - got.br_startoff;
5325                 }
5326                 if (del.br_startoff + del.br_blockcount > bno + 1)
5327                         del.br_blockcount = bno + 1 - del.br_startoff;
5328                 sum = del.br_startblock + del.br_blockcount;
5329                 if (isrt &&
5330                     (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5331                         /*
5332                          * Realtime extent not lined up at the end.
5333                          * The extent could have been split into written
5334                          * and unwritten pieces, or we could just be
5335                          * unmapping part of it.  But we can't really
5336                          * get rid of part of a realtime extent.
5337                          */
5338                         if (del.br_state == XFS_EXT_UNWRITTEN ||
5339                             !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
5340                                 /*
5341                                  * This piece is unwritten, or we're not
5342                                  * using unwritten extents.  Skip over it.
5343                                  */
5344                                 ASSERT(bno >= mod);
5345                                 bno -= mod > del.br_blockcount ?
5346                                         del.br_blockcount : mod;
5347                                 if (bno < got.br_startoff) {
5348                                         if (--lastx >= 0)
5349                                                 xfs_bmbt_get_all(--ep, &got);
5350                                 }
5351                                 continue;
5352                         }
5353                         /*
5354                          * It's written, turn it unwritten.
5355                          * This is better than zeroing it.
5356                          */
5357                         ASSERT(del.br_state == XFS_EXT_NORM);
5358                         ASSERT(xfs_trans_get_block_res(tp) > 0);
5359                         /*
5360                          * If this spans a realtime extent boundary,
5361                          * chop it back to the start of the one we end at.
5362                          */
5363                         if (del.br_blockcount > mod) {
5364                                 del.br_startoff += del.br_blockcount - mod;
5365                                 del.br_startblock += del.br_blockcount - mod;
5366                                 del.br_blockcount = mod;
5367                         }
5368                         del.br_state = XFS_EXT_UNWRITTEN;
5369                         error = xfs_bmap_add_extent(ip, lastx, &cur, &del,
5370                                 firstblock, flist, &logflags, XFS_DATA_FORK, 0);
5371                         if (error)
5372                                 goto error0;
5373                         goto nodelete;
5374                 }
5375                 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5376                         /*
5377                          * Realtime extent is lined up at the end but not
5378                          * at the front.  We'll get rid of full extents if
5379                          * we can.
5380                          */
5381                         mod = mp->m_sb.sb_rextsize - mod;
5382                         if (del.br_blockcount > mod) {
5383                                 del.br_blockcount -= mod;
5384                                 del.br_startoff += mod;
5385                                 del.br_startblock += mod;
5386                         } else if ((del.br_startoff == start &&
5387                                     (del.br_state == XFS_EXT_UNWRITTEN ||
5388                                      xfs_trans_get_block_res(tp) == 0)) ||
5389                                    !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
5390                                 /*
5391                                  * Can't make it unwritten.  There isn't
5392                                  * a full extent here so just skip it.
5393                                  */
5394                                 ASSERT(bno >= del.br_blockcount);
5395                                 bno -= del.br_blockcount;
5396                                 if (bno < got.br_startoff) {
5397                                         if (--lastx >= 0)
5398                                                 xfs_bmbt_get_all(--ep, &got);
5399                                 }
5400                                 continue;
5401                         } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5402                                 /*
5403                                  * This one is already unwritten.
5404                                  * It must have a written left neighbor.
5405                                  * Unwrite the killed part of that one and
5406                                  * try again.
5407                                  */
5408                                 ASSERT(lastx > 0);
5409                                 xfs_bmbt_get_all(ep - 1, &prev);
5410                                 ASSERT(prev.br_state == XFS_EXT_NORM);
5411                                 ASSERT(!ISNULLSTARTBLOCK(prev.br_startblock));
5412                                 ASSERT(del.br_startblock ==
5413                                        prev.br_startblock + prev.br_blockcount);
5414                                 if (prev.br_startoff < start) {
5415                                         mod = start - prev.br_startoff;
5416                                         prev.br_blockcount -= mod;
5417                                         prev.br_startblock += mod;
5418                                         prev.br_startoff = start;
5419                                 }
5420                                 prev.br_state = XFS_EXT_UNWRITTEN;
5421                                 error = xfs_bmap_add_extent(ip, lastx - 1, &cur,
5422                                         &prev, firstblock, flist, &logflags,
5423                                         XFS_DATA_FORK, 0);
5424                                 if (error)
5425                                         goto error0;
5426                                 goto nodelete;
5427                         } else {
5428                                 ASSERT(del.br_state == XFS_EXT_NORM);
5429                                 del.br_state = XFS_EXT_UNWRITTEN;
5430                                 error = xfs_bmap_add_extent(ip, lastx, &cur,
5431                                         &del, firstblock, flist, &logflags,
5432                                         XFS_DATA_FORK, 0);
5433                                 if (error)
5434                                         goto error0;
5435                                 goto nodelete;
5436                         }
5437                 }
5438                 if (wasdel) {
5439                         ASSERT(STARTBLOCKVAL(del.br_startblock) > 0);
5440                         /* Update realtim/data freespace, unreserve quota */
5441                         if (isrt) {
5442                                 xfs_filblks_t rtexts;
5443
5444                                 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5445                                 do_div(rtexts, mp->m_sb.sb_rextsize);
5446                                 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
5447                                                 (int)rtexts, rsvd);
5448                                 XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, NULL, ip,
5449                                         -((long)del.br_blockcount), 0,
5450                                         XFS_QMOPT_RES_RTBLKS);
5451                         } else {
5452                                 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
5453                                                 (int)del.br_blockcount, rsvd);
5454                                 XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, NULL, ip,
5455                                         -((long)del.br_blockcount), 0,
5456                                         XFS_QMOPT_RES_REGBLKS);
5457                         }
5458                         ip->i_delayed_blks -= del.br_blockcount;
5459                         if (cur)
5460                                 cur->bc_private.b.flags |=
5461                                         XFS_BTCUR_BPRV_WASDEL;
5462                 } else if (cur)
5463                         cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5464                 /*
5465                  * If it's the case where the directory code is running
5466                  * with no block reservation, and the deleted block is in
5467                  * the middle of its extent, and the resulting insert
5468                  * of an extent would cause transformation to btree format,
5469                  * then reject it.  The calling code will then swap
5470                  * blocks around instead.
5471                  * We have to do this now, rather than waiting for the
5472                  * conversion to btree format, since the transaction
5473                  * will be dirty.
5474                  */
5475                 if (!wasdel && xfs_trans_get_block_res(tp) == 0 &&
5476                     XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5477                     XFS_IFORK_NEXTENTS(ip, whichfork) >= ifp->if_ext_max &&
5478                     del.br_startoff > got.br_startoff &&
5479                     del.br_startoff + del.br_blockcount <
5480                     got.br_startoff + got.br_blockcount) {
5481                         error = XFS_ERROR(ENOSPC);
5482                         goto error0;
5483                 }
5484                 error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del,
5485                         &tmp_logflags, whichfork, rsvd);
5486                 logflags |= tmp_logflags;
5487                 if (error)
5488                         goto error0;
5489                 bno = del.br_startoff - 1;
5490 nodelete:
5491                 lastx = ifp->if_lastex;
5492                 /*
5493                  * If not done go on to the next (previous) record.
5494                  * Reset ep in case the extents array was re-alloced.
5495                  */
5496                 ep = &ifp->if_u1.if_extents[lastx];
5497                 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5498                         if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) ||
5499                             xfs_bmbt_get_startoff(ep) > bno) {
5500                                 lastx--;
5501                                 ep--;
5502                         }
5503                         if (lastx >= 0)
5504                                 xfs_bmbt_get_all(ep, &got);
5505                         extno++;
5506                 }
5507         }
5508         ifp->if_lastex = lastx;
5509         *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
5510         ASSERT(ifp->if_ext_max ==
5511                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5512         /*
5513          * Convert to a btree if necessary.
5514          */
5515         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5516             XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
5517                 ASSERT(cur == NULL);
5518                 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
5519                         &cur, 0, &tmp_logflags, whichfork);
5520                 logflags |= tmp_logflags;
5521                 if (error)
5522                         goto error0;
5523         }
5524         /*
5525          * transform from btree to extents, give it cur
5526          */
5527         else if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
5528                  XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
5529                 ASSERT(cur != NULL);
5530                 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5531                         whichfork);
5532                 logflags |= tmp_logflags;
5533                 if (error)
5534                         goto error0;
5535         }
5536         /*
5537          * transform from extents to local?
5538          */
5539         ASSERT(ifp->if_ext_max ==
5540                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5541         error = 0;
5542 error0:
5543         /*
5544          * Log everything.  Do this after conversion, there's no point in
5545          * logging the extent list if we've converted to btree format.
5546          */
5547         if ((logflags & XFS_ILOG_FEXT(whichfork)) &&
5548             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5549                 logflags &= ~XFS_ILOG_FEXT(whichfork);
5550         else if ((logflags & XFS_ILOG_FBROOT(whichfork)) &&
5551                  XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5552                 logflags &= ~XFS_ILOG_FBROOT(whichfork);
5553         /*
5554          * Log inode even in the error case, if the transaction
5555          * is dirty we'll need to shut down the filesystem.
5556          */
5557         if (logflags)
5558                 xfs_trans_log_inode(tp, ip, logflags);
5559         if (cur) {
5560                 if (!error) {
5561                         *firstblock = cur->bc_private.b.firstblock;
5562                         cur->bc_private.b.allocated = 0;
5563                 }
5564                 xfs_btree_del_cursor(cur,
5565                         error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5566         }
5567         return error;
5568 }
5569
5570 /*
5571  * Fcntl interface to xfs_bmapi.
5572  */
5573 int                                             /* error code */
5574 xfs_getbmap(
5575         bhv_desc_t              *bdp,           /* XFS behavior descriptor*/
5576         struct getbmap          *bmv,           /* user bmap structure */
5577         void                    __user *ap,     /* pointer to user's array */
5578         int                     interface)      /* interface flags */
5579 {
5580         __int64_t               bmvend;         /* last block requested */
5581         int                     error;          /* return value */
5582         __int64_t               fixlen;         /* length for -1 case */
5583         int                     i;              /* extent number */
5584         xfs_inode_t             *ip;            /* xfs incore inode pointer */
5585         vnode_t                 *vp;            /* corresponding vnode */
5586         int                     lock;           /* lock state */
5587         xfs_bmbt_irec_t         *map;           /* buffer for user's data */
5588         xfs_mount_t             *mp;            /* file system mount point */
5589         int                     nex;            /* # of user extents can do */
5590         int                     nexleft;        /* # of user extents left */
5591         int                     subnex;         /* # of bmapi's can do */
5592         int                     nmap;           /* number of map entries */
5593         struct getbmap          out;            /* output structure */
5594         int                     whichfork;      /* data or attr fork */
5595         int                     prealloced;     /* this is a file with
5596                                                  * preallocated data space */
5597         int                     sh_unwritten;   /* true, if unwritten */
5598                                                 /* extents listed separately */
5599         int                     bmapi_flags;    /* flags for xfs_bmapi */
5600         __int32_t               oflags;         /* getbmapx bmv_oflags field */
5601
5602         vp = BHV_TO_VNODE(bdp);
5603         ip = XFS_BHVTOI(bdp);
5604         mp = ip->i_mount;
5605
5606         whichfork = interface & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
5607         sh_unwritten = (interface & BMV_IF_PREALLOC) != 0;
5608
5609         /*      If the BMV_IF_NO_DMAPI_READ interface bit specified, do not
5610          *      generate a DMAPI read event.  Otherwise, if the DM_EVENT_READ
5611          *      bit is set for the file, generate a read event in order
5612          *      that the DMAPI application may do its thing before we return
5613          *      the extents.  Usually this means restoring user file data to
5614          *      regions of the file that look like holes.
5615          *
5616          *      The "old behavior" (from XFS_IOC_GETBMAP) is to not specify
5617          *      BMV_IF_NO_DMAPI_READ so that read events are generated.
5618          *      If this were not true, callers of ioctl( XFS_IOC_GETBMAP )
5619          *      could misinterpret holes in a DMAPI file as true holes,
5620          *      when in fact they may represent offline user data.
5621          */
5622         if (   (interface & BMV_IF_NO_DMAPI_READ) == 0
5623             && DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)
5624             && whichfork == XFS_DATA_FORK) {
5625
5626                 error = XFS_SEND_DATA(mp, DM_EVENT_READ, vp, 0, 0, 0, NULL);
5627                 if (error)
5628                         return XFS_ERROR(error);
5629         }
5630
5631         if (whichfork == XFS_ATTR_FORK) {
5632                 if (XFS_IFORK_Q(ip)) {
5633                         if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
5634                             ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
5635                             ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
5636                                 return XFS_ERROR(EINVAL);
5637                 } else if (unlikely(
5638                            ip->i_d.di_aformat != 0 &&
5639                            ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
5640                         XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
5641                                          ip->i_mount);
5642                         return XFS_ERROR(EFSCORRUPTED);
5643                 }
5644         } else if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
5645                    ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
5646                    ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
5647                 return XFS_ERROR(EINVAL);
5648         if (whichfork == XFS_DATA_FORK) {
5649                 if (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) {
5650                         prealloced = 1;
5651                         fixlen = XFS_MAXIOFFSET(mp);
5652                 } else {
5653                         prealloced = 0;
5654                         fixlen = ip->i_d.di_size;
5655                 }
5656         } else {
5657                 prealloced = 0;
5658                 fixlen = 1LL << 32;
5659         }
5660
5661         if (bmv->bmv_length == -1) {
5662                 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
5663                 bmv->bmv_length = MAX( (__int64_t)(fixlen - bmv->bmv_offset),
5664                                         (__int64_t)0);
5665         } else if (bmv->bmv_length < 0)
5666                 return XFS_ERROR(EINVAL);
5667         if (bmv->bmv_length == 0) {
5668                 bmv->bmv_entries = 0;
5669                 return 0;
5670         }
5671         nex = bmv->bmv_count - 1;
5672         if (nex <= 0)
5673                 return XFS_ERROR(EINVAL);
5674         bmvend = bmv->bmv_offset + bmv->bmv_length;
5675
5676         xfs_ilock(ip, XFS_IOLOCK_SHARED);
5677
5678         if (whichfork == XFS_DATA_FORK && ip->i_delayed_blks) {
5679                 /* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */
5680                 VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
5681         }
5682
5683         ASSERT(whichfork == XFS_ATTR_FORK || ip->i_delayed_blks == 0);
5684
5685         lock = xfs_ilock_map_shared(ip);
5686
5687         /*
5688          * Don't let nex be bigger than the number of extents
5689          * we can have assuming alternating holes and real extents.
5690          */
5691         if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
5692                 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
5693
5694         bmapi_flags = XFS_BMAPI_AFLAG(whichfork) |
5695                         ((sh_unwritten) ? 0 : XFS_BMAPI_IGSTATE);
5696
5697         /*
5698          * Allocate enough space to handle "subnex" maps at a time.
5699          */
5700         subnex = 16;
5701         map = kmem_alloc(subnex * sizeof(*map), KM_SLEEP);
5702
5703         bmv->bmv_entries = 0;
5704
5705         if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0) {
5706                 error = 0;
5707                 goto unlock_and_return;
5708         }
5709
5710         nexleft = nex;
5711
5712         do {
5713                 nmap = (nexleft > subnex) ? subnex : nexleft;
5714                 error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
5715                                   XFS_BB_TO_FSB(mp, bmv->bmv_length),
5716                                   bmapi_flags, NULL, 0, map, &nmap, NULL);
5717                 if (error)
5718                         goto unlock_and_return;
5719                 ASSERT(nmap <= subnex);
5720
5721                 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
5722                         nexleft--;
5723                         oflags = (map[i].br_state == XFS_EXT_UNWRITTEN) ?
5724                                         BMV_OF_PREALLOC : 0;
5725                         out.bmv_offset = XFS_FSB_TO_BB(mp, map[i].br_startoff);
5726                         out.bmv_length = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
5727                         ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
5728                         if (prealloced &&
5729                             map[i].br_startblock == HOLESTARTBLOCK &&
5730                             out.bmv_offset + out.bmv_length == bmvend) {
5731                                 /*
5732                                  * came to hole at end of file
5733                                  */
5734                                 goto unlock_and_return;
5735                         } else {
5736                                 out.bmv_block =
5737                                     (map[i].br_startblock == HOLESTARTBLOCK) ?
5738                                         -1 :
5739                                         XFS_FSB_TO_DB(ip, map[i].br_startblock);
5740
5741                                 /* return either getbmap/getbmapx structure. */
5742                                 if (interface & BMV_IF_EXTENDED) {
5743                                         struct  getbmapx        outx;
5744
5745                                         GETBMAP_CONVERT(out,outx);
5746                                         outx.bmv_oflags = oflags;
5747                                         outx.bmv_unused1 = outx.bmv_unused2 = 0;
5748                                         if (copy_to_user(ap, &outx,
5749                                                         sizeof(outx))) {
5750                                                 error = XFS_ERROR(EFAULT);
5751                                                 goto unlock_and_return;
5752                                         }
5753                                 } else {
5754                                         if (copy_to_user(ap, &out,
5755                                                         sizeof(out))) {
5756                                                 error = XFS_ERROR(EFAULT);
5757                                                 goto unlock_and_return;
5758                                         }
5759                                 }
5760                                 bmv->bmv_offset =
5761                                         out.bmv_offset + out.bmv_length;
5762                                 bmv->bmv_length = MAX((__int64_t)0,
5763                                         (__int64_t)(bmvend - bmv->bmv_offset));
5764                                 bmv->bmv_entries++;
5765                                 ap = (interface & BMV_IF_EXTENDED) ?
5766                                                 (void __user *)
5767                                         ((struct getbmapx __user *)ap + 1) :
5768                                                 (void __user *)
5769                                         ((struct getbmap __user *)ap + 1);
5770                         }
5771                 }
5772         } while (nmap && nexleft && bmv->bmv_length);
5773
5774 unlock_and_return:
5775         xfs_iunlock_map_shared(ip, lock);
5776         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
5777
5778         kmem_free(map, subnex * sizeof(*map));
5779
5780         return error;
5781 }
5782
5783 /*
5784  * Check the last inode extent to determine whether this allocation will result
5785  * in blocks being allocated at the end of the file. When we allocate new data
5786  * blocks at the end of the file which do not start at the previous data block,
5787  * we will try to align the new blocks at stripe unit boundaries.
5788  */
5789 STATIC int                              /* error */
5790 xfs_bmap_isaeof(
5791         xfs_inode_t     *ip,            /* incore inode pointer */
5792         xfs_fileoff_t   off,            /* file offset in fsblocks */
5793         int             whichfork,      /* data or attribute fork */
5794         char            *aeof)          /* return value */
5795 {
5796         int             error;          /* error return value */
5797         xfs_ifork_t     *ifp;           /* inode fork pointer */
5798         xfs_bmbt_rec_t  *lastrec;       /* extent list entry pointer */
5799         xfs_extnum_t    nextents;       /* size of extent list */
5800         xfs_bmbt_irec_t s;              /* expanded extent list entry */
5801
5802         ASSERT(whichfork == XFS_DATA_FORK);
5803         ifp = XFS_IFORK_PTR(ip, whichfork);
5804         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5805             (error = xfs_iread_extents(NULL, ip, whichfork)))
5806                 return error;
5807         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5808         if (nextents == 0) {
5809                 *aeof = 1;
5810                 return 0;
5811         }
5812         /*
5813          * Go to the last extent
5814          */
5815         lastrec = &ifp->if_u1.if_extents[nextents - 1];
5816         xfs_bmbt_get_all(lastrec, &s);
5817         /*
5818          * Check we are allocating in the last extent (for delayed allocations)
5819          * or past the last extent for non-delayed allocations.
5820          */
5821         *aeof = (off >= s.br_startoff &&
5822                  off < s.br_startoff + s.br_blockcount &&
5823                  ISNULLSTARTBLOCK(s.br_startblock)) ||
5824                 off >= s.br_startoff + s.br_blockcount;
5825         return 0;
5826 }
5827
5828 /*
5829  * Check if the endoff is outside the last extent. If so the caller will grow
5830  * the allocation to a stripe unit boundary.
5831  */
5832 int                                     /* error */
5833 xfs_bmap_eof(
5834         xfs_inode_t     *ip,            /* incore inode pointer */
5835         xfs_fileoff_t   endoff,         /* file offset in fsblocks */
5836         int             whichfork,      /* data or attribute fork */
5837         int             *eof)           /* result value */
5838 {
5839         xfs_fsblock_t   blockcount;     /* extent block count */
5840         int             error;          /* error return value */
5841         xfs_ifork_t     *ifp;           /* inode fork pointer */
5842         xfs_bmbt_rec_t  *lastrec;       /* extent list entry pointer */
5843         xfs_extnum_t    nextents;       /* size of extent list */
5844         xfs_fileoff_t   startoff;       /* extent starting file offset */
5845
5846         ASSERT(whichfork == XFS_DATA_FORK);
5847         ifp = XFS_IFORK_PTR(ip, whichfork);
5848         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5849             (error = xfs_iread_extents(NULL, ip, whichfork)))
5850                 return error;
5851         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5852         if (nextents == 0) {
5853                 *eof = 1;
5854                 return 0;
5855         }
5856         /*
5857          * Go to the last extent
5858          */
5859         lastrec = &ifp->if_u1.if_extents[nextents - 1];
5860         startoff = xfs_bmbt_get_startoff(lastrec);
5861         blockcount = xfs_bmbt_get_blockcount(lastrec);
5862         *eof = endoff >= startoff + blockcount;
5863         return 0;
5864 }
5865
5866 #ifdef DEBUG
5867 /*
5868  * Check that the extents list for the inode ip is in the right order.
5869  */
5870 STATIC void
5871 xfs_bmap_check_extents(
5872         xfs_inode_t             *ip,            /* incore inode pointer */
5873         int                     whichfork)      /* data or attr fork */
5874 {
5875         xfs_bmbt_rec_t          *base;          /* base of extents list */
5876         xfs_bmbt_rec_t          *ep;            /* current extent entry */
5877         xfs_ifork_t             *ifp;           /* inode fork pointer */
5878         xfs_extnum_t            nextents;       /* number of extents in list */
5879
5880         ifp = XFS_IFORK_PTR(ip, whichfork);
5881         ASSERT(ifp->if_flags & XFS_IFEXTENTS);
5882         base = ifp->if_u1.if_extents;
5883         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5884         for (ep = base; ep < &base[nextents - 1]; ep++) {
5885                 xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep,
5886                         (void *)(ep + 1));
5887         }
5888 }
5889
5890 STATIC
5891 xfs_buf_t *
5892 xfs_bmap_get_bp(
5893         xfs_btree_cur_t         *cur,
5894         xfs_fsblock_t           bno)
5895 {
5896         int i;
5897         xfs_buf_t *bp;
5898
5899         if (!cur)
5900                 return(NULL);
5901
5902         bp = NULL;
5903         for(i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
5904                 bp = cur->bc_bufs[i];
5905                 if (!bp) break;
5906                 if (XFS_BUF_ADDR(bp) == bno)
5907                         break;  /* Found it */
5908         }
5909         if (i == XFS_BTREE_MAXLEVELS)
5910                 bp = NULL;
5911
5912         if (!bp) { /* Chase down all the log items to see if the bp is there */
5913                 xfs_log_item_chunk_t    *licp;
5914                 xfs_trans_t             *tp;
5915
5916                 tp = cur->bc_tp;
5917                 licp = &tp->t_items;
5918                 while (!bp && licp != NULL) {
5919                         if (XFS_LIC_ARE_ALL_FREE(licp)) {
5920                                 licp = licp->lic_next;
5921                                 continue;
5922                         }
5923                         for (i = 0; i < licp->lic_unused; i++) {
5924                                 xfs_log_item_desc_t     *lidp;
5925                                 xfs_log_item_t          *lip;
5926                                 xfs_buf_log_item_t      *bip;
5927                                 xfs_buf_t               *lbp;
5928
5929                                 if (XFS_LIC_ISFREE(licp, i)) {
5930                                         continue;
5931                                 }
5932
5933                                 lidp = XFS_LIC_SLOT(licp, i);
5934                                 lip = lidp->lid_item;
5935                                 if (lip->li_type != XFS_LI_BUF)
5936                                         continue;
5937
5938                                 bip = (xfs_buf_log_item_t *)lip;
5939                                 lbp = bip->bli_buf;
5940
5941                                 if (XFS_BUF_ADDR(lbp) == bno) {
5942                                         bp = lbp;
5943                                         break; /* Found it */
5944                                 }
5945                         }
5946                         licp = licp->lic_next;
5947                 }
5948         }
5949         return(bp);
5950 }
5951
5952 void
5953 xfs_check_block(
5954         xfs_bmbt_block_t        *block,
5955         xfs_mount_t             *mp,
5956         int                     root,
5957         short                   sz)
5958 {
5959         int                     i, j, dmxr;
5960         xfs_bmbt_ptr_t          *pp, *thispa;   /* pointer to block address */
5961         xfs_bmbt_key_t          *prevp, *keyp;
5962
5963         ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0);
5964
5965         prevp = NULL;
5966         for( i = 1; i <= INT_GET(block->bb_numrecs, ARCH_CONVERT);i++) {
5967                 dmxr = mp->m_bmap_dmxr[0];
5968
5969                 if (root) {
5970                         keyp = XFS_BMAP_BROOT_KEY_ADDR(block, i, sz);
5971                 } else {
5972                         keyp = XFS_BTREE_KEY_ADDR(mp->m_sb.sb_blocksize,
5973                                 xfs_bmbt, block, i, dmxr);
5974                 }
5975
5976                 if (prevp) {
5977                         xfs_btree_check_key(XFS_BTNUM_BMAP, prevp, keyp);
5978                 }
5979                 prevp = keyp;
5980
5981                 /*
5982                  * Compare the block numbers to see if there are dups.
5983                  */
5984
5985                 if (root) {
5986                         pp = XFS_BMAP_BROOT_PTR_ADDR(block, i, sz);
5987                 } else {
5988                         pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
5989                                 xfs_bmbt, block, i, dmxr);
5990                 }
5991                 for (j = i+1; j <= INT_GET(block->bb_numrecs, ARCH_CONVERT); j++) {
5992                         if (root) {
5993                                 thispa = XFS_BMAP_BROOT_PTR_ADDR(block, j, sz);
5994                         } else {
5995                                 thispa = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
5996                                         xfs_bmbt, block, j, dmxr);
5997                         }
5998                         if (INT_GET(*thispa, ARCH_CONVERT) ==
5999                             INT_GET(*pp, ARCH_CONVERT)) {
6000                                 cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld",
6001                                         __FUNCTION__, j, i,
6002                                         INT_GET(*thispa, ARCH_CONVERT));
6003                                 panic("%s: ptrs are equal in node\n",
6004                                         __FUNCTION__);
6005                         }
6006                 }
6007         }
6008 }
6009
6010 /*
6011  * Check that the extents for the inode ip are in the right order in all
6012  * btree leaves.
6013  */
6014
6015 STATIC void
6016 xfs_bmap_check_leaf_extents(
6017         xfs_btree_cur_t         *cur,   /* btree cursor or null */
6018         xfs_inode_t             *ip,            /* incore inode pointer */
6019         int                     whichfork)      /* data or attr fork */
6020 {
6021         xfs_bmbt_block_t        *block; /* current btree block */
6022         xfs_fsblock_t           bno;    /* block # of "block" */
6023         xfs_buf_t               *bp;    /* buffer for "block" */
6024         int                     error;  /* error return value */
6025         xfs_extnum_t            i=0;    /* index into the extents list */
6026         xfs_ifork_t             *ifp;   /* fork structure */
6027         int                     level;  /* btree level, for checking */
6028         xfs_mount_t             *mp;    /* file system mount structure */
6029         xfs_bmbt_ptr_t          *pp;    /* pointer to block address */
6030         xfs_bmbt_rec_t          *ep, *lastp;    /* extent pointers in block entry */
6031         int                     bp_release = 0;
6032
6033         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
6034                 return;
6035         }
6036
6037         bno = NULLFSBLOCK;
6038         mp = ip->i_mount;
6039         ifp = XFS_IFORK_PTR(ip, whichfork);
6040         block = ifp->if_broot;
6041         /*
6042          * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
6043          */
6044         ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0);
6045         level = INT_GET(block->bb_level, ARCH_CONVERT);
6046         xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
6047         pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
6048         ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO);
6049         ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount);
6050         ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks);
6051         bno = INT_GET(*pp, ARCH_CONVERT);
6052         /*
6053          * Go down the tree until leaf level is reached, following the first
6054          * pointer (leftmost) at each level.
6055          */
6056         while (level-- > 0) {
6057                 /* See if buf is in cur first */
6058                 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
6059                 if (bp) {
6060                         bp_release = 0;
6061                 } else {
6062                         bp_release = 1;
6063                 }
6064                 if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
6065                                 XFS_BMAP_BTREE_REF)))
6066                         goto error_norelse;
6067                 block = XFS_BUF_TO_BMBT_BLOCK(bp);
6068                 XFS_WANT_CORRUPTED_GOTO(
6069                         XFS_BMAP_SANITY_CHECK(mp, block, level),
6070                         error0);
6071                 if (level == 0)
6072                         break;
6073
6074                 /*
6075                  * Check this block for basic sanity (increasing keys and
6076                  * no duplicate blocks).
6077                  */
6078
6079                 xfs_check_block(block, mp, 0, 0);
6080                 pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block,
6081                         1, mp->m_bmap_dmxr[1]);
6082                 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, INT_GET(*pp, ARCH_CONVERT)), error0);
6083                 bno = INT_GET(*pp, ARCH_CONVERT);
6084                 if (bp_release) {
6085                         bp_release = 0;
6086                         xfs_trans_brelse(NULL, bp);
6087                 }
6088         }
6089
6090         /*
6091          * Here with bp and block set to the leftmost leaf node in the tree.
6092          */
6093         i = 0;
6094
6095         /*
6096          * Loop over all leaf nodes checking that all extents are in the right order.
6097          */
6098         lastp = NULL;
6099         for (;;) {
6100                 xfs_bmbt_rec_t  *frp;
6101                 xfs_fsblock_t   nextbno;
6102                 xfs_extnum_t    num_recs;
6103
6104
6105                 num_recs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
6106
6107                 /*
6108                  * Read-ahead the next leaf block, if any.
6109                  */
6110
6111                 nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
6112
6113                 /*
6114                  * Check all the extents to make sure they are OK.
6115                  * If we had a previous block, the last entry should
6116                  * conform with the first entry in this one.
6117                  */
6118
6119                 frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
6120                         block, 1, mp->m_bmap_dmxr[0]);
6121
6122                 for (ep = frp;ep < frp + (num_recs - 1); ep++) {
6123                         if (lastp) {
6124                                 xfs_btree_check_rec(XFS_BTNUM_BMAP,
6125                                         (void *)lastp, (void *)ep);
6126                         }
6127                         xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep,
6128                                 (void *)(ep + 1));
6129                 }
6130                 lastp = frp + num_recs - 1; /* For the next iteration */
6131
6132                 i += num_recs;
6133                 if (bp_release) {
6134                         bp_release = 0;
6135                         xfs_trans_brelse(NULL, bp);
6136                 }
6137                 bno = nextbno;
6138                 /*
6139                  * If we've reached the end, stop.
6140                  */
6141                 if (bno == NULLFSBLOCK)
6142                         break;
6143
6144                 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
6145                 if (bp) {
6146                         bp_release = 0;
6147                 } else {
6148                         bp_release = 1;
6149                 }
6150                 if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
6151                                 XFS_BMAP_BTREE_REF)))
6152                         goto error_norelse;
6153                 block = XFS_BUF_TO_BMBT_BLOCK(bp);
6154         }
6155         if (bp_release) {
6156                 bp_release = 0;
6157                 xfs_trans_brelse(NULL, bp);
6158         }
6159         return;
6160
6161 error0:
6162         cmn_err(CE_WARN, "%s: at error0", __FUNCTION__);
6163         if (bp_release)
6164                 xfs_trans_brelse(NULL, bp);
6165 error_norelse:
6166         cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents",
6167                 __FUNCTION__, i);
6168         panic("%s: CORRUPTED BTREE OR SOMETHING", __FUNCTION__);
6169         return;
6170 }
6171 #endif
6172
6173 /*
6174  * Count fsblocks of the given fork.
6175  */
6176 int                                             /* error */
6177 xfs_bmap_count_blocks(
6178         xfs_trans_t             *tp,            /* transaction pointer */
6179         xfs_inode_t             *ip,            /* incore inode */
6180         int                     whichfork,      /* data or attr fork */
6181         int                     *count)         /* out: count of blocks */
6182 {
6183         xfs_bmbt_block_t        *block; /* current btree block */
6184         xfs_fsblock_t           bno;    /* block # of "block" */
6185         xfs_ifork_t             *ifp;   /* fork structure */
6186         int                     level;  /* btree level, for checking */
6187         xfs_mount_t             *mp;    /* file system mount structure */
6188         xfs_bmbt_ptr_t          *pp;    /* pointer to block address */
6189
6190         bno = NULLFSBLOCK;
6191         mp = ip->i_mount;
6192         ifp = XFS_IFORK_PTR(ip, whichfork);
6193         if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
6194                 if (unlikely(xfs_bmap_count_leaves(ifp->if_u1.if_extents,
6195                         ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
6196                         count) < 0)) {
6197                         XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)",
6198                                          XFS_ERRLEVEL_LOW, mp);
6199                         return XFS_ERROR(EFSCORRUPTED);
6200                 }
6201                 return 0;
6202         }
6203
6204         /*
6205          * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
6206          */
6207         block = ifp->if_broot;
6208         ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0);
6209         level = INT_GET(block->bb_level, ARCH_CONVERT);
6210         pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
6211         ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO);
6212         ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount);
6213         ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks);
6214         bno = INT_GET(*pp, ARCH_CONVERT);
6215
6216         if (unlikely(xfs_bmap_count_tree(mp, tp, bno, level, count) < 0)) {
6217                 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
6218                                  mp);
6219                 return XFS_ERROR(EFSCORRUPTED);
6220         }
6221
6222         return 0;
6223 }
6224
6225 /*
6226  * Recursively walks each level of a btree
6227  * to count total fsblocks is use.
6228  */
6229 int                                     /* error */
6230 xfs_bmap_count_tree(
6231         xfs_mount_t     *mp,            /* file system mount point */
6232         xfs_trans_t     *tp,            /* transaction pointer */
6233         xfs_fsblock_t   blockno,        /* file system block number */
6234         int             levelin,        /* level in btree */
6235         int             *count)         /* Count of blocks */
6236 {
6237         int                     error;
6238         xfs_buf_t               *bp, *nbp;
6239         int                     level = levelin;
6240         xfs_bmbt_ptr_t          *pp;
6241         xfs_fsblock_t           bno = blockno;
6242         xfs_fsblock_t           nextbno;
6243         xfs_bmbt_block_t        *block, *nextblock;
6244         int                     numrecs;
6245         xfs_bmbt_rec_t          *frp;
6246
6247         if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF)))
6248                 return error;
6249         *count += 1;
6250         block = XFS_BUF_TO_BMBT_BLOCK(bp);
6251
6252         if (--level) {
6253                 /* Not at node above leafs, count this level of nodes */
6254                 nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
6255                 while (nextbno != NULLFSBLOCK) {
6256                         if ((error = xfs_btree_read_bufl(mp, tp, nextbno,
6257                                 0, &nbp, XFS_BMAP_BTREE_REF)))
6258                                 return error;
6259                         *count += 1;
6260                         nextblock = XFS_BUF_TO_BMBT_BLOCK(nbp);
6261                         nextbno = INT_GET(nextblock->bb_rightsib, ARCH_CONVERT);
6262                         xfs_trans_brelse(tp, nbp);
6263                 }
6264
6265                 /* Dive to the next level */
6266                 pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
6267                         xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
6268                 bno = INT_GET(*pp, ARCH_CONVERT);
6269                 if (unlikely((error =
6270                      xfs_bmap_count_tree(mp, tp, bno, level, count)) < 0)) {
6271                         xfs_trans_brelse(tp, bp);
6272                         XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
6273                                          XFS_ERRLEVEL_LOW, mp);
6274                         return XFS_ERROR(EFSCORRUPTED);
6275                 }
6276                 xfs_trans_brelse(tp, bp);
6277         } else {
6278                 /* count all level 1 nodes and their leaves */
6279                 for (;;) {
6280                         nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
6281                         numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
6282                         frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize,
6283                                 xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]);
6284                         if (unlikely(xfs_bmap_count_leaves(frp, numrecs, count) < 0)) {
6285                                 xfs_trans_brelse(tp, bp);
6286                                 XFS_ERROR_REPORT("xfs_bmap_count_tree(2)",
6287                                                  XFS_ERRLEVEL_LOW, mp);
6288                                 return XFS_ERROR(EFSCORRUPTED);
6289                         }
6290                         xfs_trans_brelse(tp, bp);
6291                         if (nextbno == NULLFSBLOCK)
6292                                 break;
6293                         bno = nextbno;
6294                         if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
6295                                 XFS_BMAP_BTREE_REF)))
6296                                 return error;
6297                         *count += 1;
6298                         block = XFS_BUF_TO_BMBT_BLOCK(bp);
6299                 }
6300         }
6301         return 0;
6302 }
6303
6304 /*
6305  * Count leaf blocks given a pointer to an extent list.
6306  */
6307 int
6308 xfs_bmap_count_leaves(
6309         xfs_bmbt_rec_t          *frp,
6310         int                     numrecs,
6311         int                     *count)
6312 {
6313         int             b;
6314
6315         for ( b = 1; b <= numrecs; b++, frp++)
6316                 *count += xfs_bmbt_disk_get_blockcount(frp);
6317         return 0;
6318 }