2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_inode_item.h"
38 #include "xfs_alloc.h"
39 #include "xfs_btree.h"
40 #include "xfs_btree_trace.h"
41 #include "xfs_ialloc.h"
42 #include "xfs_itable.h"
44 #include "xfs_error.h"
45 #include "xfs_quota.h"
48 * Prototypes for internal btree functions.
52 STATIC int xfs_bmbt_killroot(xfs_btree_cur_t *);
53 STATIC void xfs_bmbt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
54 STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
58 #define ENTRY XBT_ENTRY
59 #define ERROR XBT_ERROR
63 * Keep the XFS_BMBT_TRACE_ names around for now until all code using them
64 * is converted to be generic and thus switches to the XFS_BTREE_TRACE_ names.
66 #define XFS_BMBT_TRACE_ARGBI(c,b,i) \
67 XFS_BTREE_TRACE_ARGBI(c,b,i)
68 #define XFS_BMBT_TRACE_ARGBII(c,b,i,j) \
69 XFS_BTREE_TRACE_ARGBII(c,b,i,j)
70 #define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) \
71 XFS_BTREE_TRACE_ARGFFFI(c,o,b,i,j)
72 #define XFS_BMBT_TRACE_ARGI(c,i) \
73 XFS_BTREE_TRACE_ARGI(c,i)
74 #define XFS_BMBT_TRACE_ARGIFK(c,i,f,s) \
75 XFS_BTREE_TRACE_ARGIPK(c,i,(union xfs_btree_ptr)f,s)
76 #define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \
77 XFS_BTREE_TRACE_ARGIPR(c,i, \
78 (union xfs_btree_ptr)f, (union xfs_btree_rec *)r)
79 #define XFS_BMBT_TRACE_ARGIK(c,i,k) \
80 XFS_BTREE_TRACE_ARGIK(c,i,(union xfs_btree_key *)k)
81 #define XFS_BMBT_TRACE_CURSOR(c,s) \
82 XFS_BTREE_TRACE_CURSOR(c,s)
90 * Delete record pointed to by cur/level.
92 STATIC int /* error */
96 int *stat) /* success/failure */
98 xfs_bmbt_block_t *block; /* bmap btree block */
99 xfs_fsblock_t bno; /* fs-relative block number */
100 xfs_buf_t *bp; /* buffer for block */
101 int error; /* error return value */
102 int i; /* loop counter */
103 int j; /* temp state */
104 xfs_bmbt_key_t key; /* bmap btree key */
105 xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */
106 xfs_fsblock_t lbno; /* left sibling block number */
107 xfs_buf_t *lbp; /* left buffer pointer */
108 xfs_bmbt_block_t *left; /* left btree block */
109 xfs_bmbt_key_t *lkp; /* left btree key */
110 xfs_bmbt_ptr_t *lpp; /* left address pointer */
111 int lrecs=0; /* left record count */
112 xfs_bmbt_rec_t *lrp; /* left record pointer */
113 xfs_mount_t *mp; /* file system mount point */
114 xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
115 int ptr; /* key/record index */
116 xfs_fsblock_t rbno; /* right sibling block number */
117 xfs_buf_t *rbp; /* right buffer pointer */
118 xfs_bmbt_block_t *right; /* right btree block */
119 xfs_bmbt_key_t *rkp; /* right btree key */
120 xfs_bmbt_rec_t *rp; /* pointer to bmap btree rec */
121 xfs_bmbt_ptr_t *rpp; /* right address pointer */
122 xfs_bmbt_block_t *rrblock; /* right-right btree block */
123 xfs_buf_t *rrbp; /* right-right buffer pointer */
124 int rrecs=0; /* right record count */
125 xfs_bmbt_rec_t *rrp; /* right record pointer */
126 xfs_btree_cur_t *tcur; /* temporary btree cursor */
127 int numrecs; /* temporary numrec count */
128 int numlrecs, numrrecs;
130 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
131 XFS_BMBT_TRACE_ARGI(cur, level);
132 ptr = cur->bc_ptrs[level];
135 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
139 block = xfs_bmbt_get_block(cur, level, &bp);
140 numrecs = be16_to_cpu(block->bb_numrecs);
142 if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
143 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
148 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
152 XFS_STATS_INC(xs_bmbt_delrec);
154 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
155 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
157 for (i = ptr; i < numrecs; i++) {
158 if ((error = xfs_btree_check_lptr_disk(cur, pp[i], level))) {
159 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
165 memmove(&kp[ptr - 1], &kp[ptr],
166 (numrecs - ptr) * sizeof(*kp));
167 memmove(&pp[ptr - 1], &pp[ptr],
168 (numrecs - ptr) * sizeof(*pp));
169 xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs - 1);
170 xfs_bmbt_log_keys(cur, bp, ptr, numrecs - 1);
173 rp = XFS_BMAP_REC_IADDR(block, 1, cur);
175 memmove(&rp[ptr - 1], &rp[ptr],
176 (numrecs - ptr) * sizeof(*rp));
177 xfs_bmbt_log_recs(cur, bp, ptr, numrecs - 1);
181 cpu_to_be64(xfs_bmbt_disk_get_startoff(rp));
186 block->bb_numrecs = cpu_to_be16(numrecs);
187 xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS);
189 * We're at the root level.
190 * First, shrink the root block in-memory.
191 * Try to get rid of the next level down.
192 * If we can't then there's nothing left to do.
194 if (level == cur->bc_nlevels - 1) {
195 xfs_iroot_realloc(cur->bc_private.b.ip, -1,
196 cur->bc_private.b.whichfork);
197 if ((error = xfs_bmbt_killroot(cur))) {
198 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
201 if (level > 0 && (error = xfs_btree_decrement(cur, level, &j))) {
202 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
205 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
209 if (ptr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)kp, level + 1))) {
210 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
213 if (numrecs >= XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
214 if (level > 0 && (error = xfs_btree_decrement(cur, level, &j))) {
215 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
218 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
222 rbno = be64_to_cpu(block->bb_rightsib);
223 lbno = be64_to_cpu(block->bb_leftsib);
225 * One child of root, need to get a chance to copy its contents
226 * into the root and delete it. Can't go up to next level,
227 * there's nothing to delete there.
229 if (lbno == NULLFSBLOCK && rbno == NULLFSBLOCK &&
230 level == cur->bc_nlevels - 2) {
231 if ((error = xfs_bmbt_killroot(cur))) {
232 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
235 if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) {
236 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
239 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
243 ASSERT(rbno != NULLFSBLOCK || lbno != NULLFSBLOCK);
244 if ((error = xfs_btree_dup_cursor(cur, &tcur))) {
245 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
249 if (rbno != NULLFSBLOCK) {
250 i = xfs_btree_lastrec(tcur, level);
251 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
252 if ((error = xfs_btree_increment(tcur, level, &i))) {
253 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
256 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
257 i = xfs_btree_lastrec(tcur, level);
258 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
259 rbp = tcur->bc_bufs[level];
260 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
262 if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
263 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
267 bno = be64_to_cpu(right->bb_leftsib);
268 if (be16_to_cpu(right->bb_numrecs) - 1 >=
269 XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
270 if ((error = xfs_btree_lshift(tcur, level, &i))) {
271 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
275 ASSERT(be16_to_cpu(block->bb_numrecs) >=
276 XFS_BMAP_BLOCK_IMINRECS(level, tcur));
277 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
280 if ((error = xfs_btree_decrement(cur,
282 XFS_BMBT_TRACE_CURSOR(cur,
287 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
292 rrecs = be16_to_cpu(right->bb_numrecs);
293 if (lbno != NULLFSBLOCK) {
294 i = xfs_btree_firstrec(tcur, level);
295 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
296 if ((error = xfs_btree_decrement(tcur, level, &i))) {
297 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
300 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
303 if (lbno != NULLFSBLOCK) {
304 i = xfs_btree_firstrec(tcur, level);
305 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
307 * decrement to last in block
309 if ((error = xfs_btree_decrement(tcur, level, &i))) {
310 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
313 i = xfs_btree_firstrec(tcur, level);
314 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
315 lbp = tcur->bc_bufs[level];
316 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
318 if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
319 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
323 bno = be64_to_cpu(left->bb_rightsib);
324 if (be16_to_cpu(left->bb_numrecs) - 1 >=
325 XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
326 if ((error = xfs_btree_rshift(tcur, level, &i))) {
327 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
331 ASSERT(be16_to_cpu(block->bb_numrecs) >=
332 XFS_BMAP_BLOCK_IMINRECS(level, tcur));
333 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
337 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
342 lrecs = be16_to_cpu(left->bb_numrecs);
344 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
347 ASSERT(bno != NULLFSBLOCK);
348 if (lbno != NULLFSBLOCK &&
349 lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
353 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, lbno, 0, &lbp,
354 XFS_BMAP_BTREE_REF))) {
355 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
358 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
359 if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
360 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
363 } else if (rbno != NULLFSBLOCK &&
364 rrecs + be16_to_cpu(block->bb_numrecs) <=
365 XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
369 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, rbno, 0, &rbp,
370 XFS_BMAP_BTREE_REF))) {
371 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
374 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
375 if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
376 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
379 lrecs = be16_to_cpu(left->bb_numrecs);
381 if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) {
382 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
385 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
389 numlrecs = be16_to_cpu(left->bb_numrecs);
390 numrrecs = be16_to_cpu(right->bb_numrecs);
392 lkp = XFS_BMAP_KEY_IADDR(left, numlrecs + 1, cur);
393 lpp = XFS_BMAP_PTR_IADDR(left, numlrecs + 1, cur);
394 rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
395 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
397 for (i = 0; i < numrrecs; i++) {
398 if ((error = xfs_btree_check_lptr_disk(cur, rpp[i], level))) {
399 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
404 memcpy(lkp, rkp, numrrecs * sizeof(*lkp));
405 memcpy(lpp, rpp, numrrecs * sizeof(*lpp));
406 xfs_bmbt_log_keys(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
407 xfs_bmbt_log_ptrs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
409 lrp = XFS_BMAP_REC_IADDR(left, numlrecs + 1, cur);
410 rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
411 memcpy(lrp, rrp, numrrecs * sizeof(*lrp));
412 xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
414 be16_add_cpu(&left->bb_numrecs, numrrecs);
415 left->bb_rightsib = right->bb_rightsib;
416 xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS);
417 if (be64_to_cpu(left->bb_rightsib) != NULLDFSBNO) {
418 if ((error = xfs_btree_read_bufl(mp, cur->bc_tp,
419 be64_to_cpu(left->bb_rightsib),
420 0, &rrbp, XFS_BMAP_BTREE_REF))) {
421 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
424 rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp);
425 if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) {
426 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
429 rrblock->bb_leftsib = cpu_to_be64(lbno);
430 xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB);
432 xfs_bmap_add_free(XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(rbp)), 1,
433 cur->bc_private.b.flist, mp);
434 cur->bc_private.b.ip->i_d.di_nblocks--;
435 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
436 XFS_TRANS_MOD_DQUOT_BYINO(mp, cur->bc_tp, cur->bc_private.b.ip,
437 XFS_TRANS_DQ_BCOUNT, -1L);
438 xfs_trans_binval(cur->bc_tp, rbp);
440 cur->bc_bufs[level] = lbp;
441 cur->bc_ptrs[level] += lrecs;
442 cur->bc_ra[level] = 0;
443 } else if ((error = xfs_btree_increment(cur, level + 1, &i))) {
444 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
448 cur->bc_ptrs[level]--;
449 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
455 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
460 * Insert one record/level. Return information to the caller
461 * allowing the next level up to proceed if necessary.
463 STATIC int /* error */
465 xfs_btree_cur_t *cur,
468 xfs_bmbt_rec_t *recp,
469 xfs_btree_cur_t **curp,
470 int *stat) /* no-go/done/continue */
472 xfs_bmbt_block_t *block; /* bmap btree block */
473 xfs_buf_t *bp; /* buffer for block */
474 int error; /* error return value */
475 int i; /* loop index */
476 xfs_bmbt_key_t key; /* bmap btree key */
477 xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */
478 int logflags; /* inode logging flags */
479 xfs_fsblock_t nbno; /* new block number */
480 struct xfs_btree_cur *ncur; /* new btree cursor */
481 __uint64_t startoff; /* new btree key value */
482 xfs_bmbt_rec_t nrec; /* new record count */
483 int optr; /* old key/record index */
484 xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
485 int ptr; /* key/record index */
486 xfs_bmbt_rec_t *rp=NULL; /* pointer to bmap btree rec */
489 ASSERT(level < cur->bc_nlevels);
490 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
491 XFS_BMBT_TRACE_ARGIFR(cur, level, *bnop, recp);
493 key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(recp));
494 optr = ptr = cur->bc_ptrs[level];
496 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
500 XFS_STATS_INC(xs_bmbt_insrec);
501 block = xfs_bmbt_get_block(cur, level, &bp);
502 numrecs = be16_to_cpu(block->bb_numrecs);
504 if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
505 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
508 if (ptr <= numrecs) {
510 rp = XFS_BMAP_REC_IADDR(block, ptr, cur);
511 xfs_btree_check_rec(XFS_BTNUM_BMAP, recp, rp);
513 kp = XFS_BMAP_KEY_IADDR(block, ptr, cur);
514 xfs_btree_check_key(XFS_BTNUM_BMAP, &key, kp);
519 if (numrecs == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
520 if (numrecs < XFS_BMAP_BLOCK_DMAXRECS(level, cur)) {
522 * A root block, that can be made bigger.
524 xfs_iroot_realloc(cur->bc_private.b.ip, 1,
525 cur->bc_private.b.whichfork);
526 block = xfs_bmbt_get_block(cur, level, &bp);
527 } else if (level == cur->bc_nlevels - 1) {
528 if ((error = xfs_btree_new_iroot(cur, &logflags, stat)) ||
530 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
533 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
535 block = xfs_bmbt_get_block(cur, level, &bp);
537 if ((error = xfs_btree_rshift(cur, level, &i))) {
538 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
544 if ((error = xfs_btree_lshift(cur, level, &i))) {
545 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
549 optr = ptr = cur->bc_ptrs[level];
551 union xfs_btree_ptr bno = { .l = cpu_to_be64(nbno) };
552 union xfs_btree_key skey;
553 if ((error = xfs_btree_split(cur, level,
556 XFS_BMBT_TRACE_CURSOR(cur,
560 nbno = be64_to_cpu(bno.l);
561 startoff = be64_to_cpu(skey.bmbt.br_startoff);
563 block = xfs_bmbt_get_block(
567 xfs_btree_check_lblock(cur,
568 block, level, bp))) {
569 XFS_BMBT_TRACE_CURSOR(
574 ptr = cur->bc_ptrs[level];
575 xfs_bmbt_disk_set_allf(&nrec,
579 XFS_BMBT_TRACE_CURSOR(cur,
588 numrecs = be16_to_cpu(block->bb_numrecs);
590 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
591 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
593 for (i = numrecs; i >= ptr; i--) {
594 if ((error = xfs_btree_check_lptr_disk(cur, pp[i - 1],
596 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
601 memmove(&kp[ptr], &kp[ptr - 1],
602 (numrecs - ptr + 1) * sizeof(*kp));
603 memmove(&pp[ptr], &pp[ptr - 1],
604 (numrecs - ptr + 1) * sizeof(*pp));
606 if ((error = xfs_btree_check_lptr(cur, *bnop, level))) {
607 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
612 pp[ptr - 1] = cpu_to_be64(*bnop);
614 block->bb_numrecs = cpu_to_be16(numrecs);
615 xfs_bmbt_log_keys(cur, bp, ptr, numrecs);
616 xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs);
618 rp = XFS_BMAP_REC_IADDR(block, 1, cur);
619 memmove(&rp[ptr], &rp[ptr - 1],
620 (numrecs - ptr + 1) * sizeof(*rp));
623 block->bb_numrecs = cpu_to_be16(numrecs);
624 xfs_bmbt_log_recs(cur, bp, ptr, numrecs);
626 xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS);
630 xfs_btree_check_rec(XFS_BTNUM_BMAP, rp + ptr - 1,
633 xfs_btree_check_key(XFS_BTNUM_BMAP, kp + ptr - 1,
637 if (optr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, level + 1))) {
638 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
642 if (nbno != NULLFSBLOCK) {
646 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
653 xfs_btree_cur_t *cur)
655 xfs_bmbt_block_t *block;
656 xfs_bmbt_block_t *cblock;
670 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
671 level = cur->bc_nlevels - 1;
674 * Don't deal with the root block needs to be a leaf case.
675 * We're just going to turn the thing back into extents anyway.
678 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
681 block = xfs_bmbt_get_block(cur, level, &cbp);
683 * Give up if the root has multiple children.
685 if (be16_to_cpu(block->bb_numrecs) != 1) {
686 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
690 * Only do this if the next level will fit.
691 * Then the data must be copied up to the inode,
692 * instead of freeing the root you free the next level.
694 cbp = cur->bc_bufs[level - 1];
695 cblock = XFS_BUF_TO_BMBT_BLOCK(cbp);
696 if (be16_to_cpu(cblock->bb_numrecs) > XFS_BMAP_BLOCK_DMAXRECS(level, cur)) {
697 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
700 ASSERT(be64_to_cpu(cblock->bb_leftsib) == NULLDFSBNO);
701 ASSERT(be64_to_cpu(cblock->bb_rightsib) == NULLDFSBNO);
702 ip = cur->bc_private.b.ip;
703 ifp = XFS_IFORK_PTR(ip, cur->bc_private.b.whichfork);
704 ASSERT(XFS_BMAP_BLOCK_IMAXRECS(level, cur) ==
705 XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes));
706 i = (int)(be16_to_cpu(cblock->bb_numrecs) - XFS_BMAP_BLOCK_IMAXRECS(level, cur));
708 xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork);
709 block = ifp->if_broot;
711 be16_add_cpu(&block->bb_numrecs, i);
712 ASSERT(block->bb_numrecs == cblock->bb_numrecs);
713 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
714 ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
715 memcpy(kp, ckp, be16_to_cpu(block->bb_numrecs) * sizeof(*kp));
716 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
717 cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
719 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
720 if ((error = xfs_btree_check_lptr_disk(cur, cpp[i], level - 1))) {
721 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
726 memcpy(pp, cpp, be16_to_cpu(block->bb_numrecs) * sizeof(*pp));
727 xfs_bmap_add_free(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(cbp)), 1,
728 cur->bc_private.b.flist, cur->bc_mp);
729 ip->i_d.di_nblocks--;
730 XFS_TRANS_MOD_DQUOT_BYINO(cur->bc_mp, cur->bc_tp, ip,
731 XFS_TRANS_DQ_BCOUNT, -1L);
732 xfs_trans_binval(cur->bc_tp, cbp);
733 cur->bc_bufs[level - 1] = NULL;
734 be16_add_cpu(&block->bb_level, -1);
735 xfs_trans_log_inode(cur->bc_tp, ip,
736 XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
738 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
743 * Log key values from the btree block.
747 xfs_btree_cur_t *cur,
754 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
755 XFS_BMBT_TRACE_ARGBII(cur, bp, kfirst, klast);
758 xfs_bmbt_block_t *block;
763 block = XFS_BUF_TO_BMBT_BLOCK(bp);
764 kp = XFS_BMAP_KEY_DADDR(block, 1, cur);
765 first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block);
766 last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block);
767 xfs_trans_log_buf(tp, bp, first, last);
771 ip = cur->bc_private.b.ip;
772 xfs_trans_log_inode(tp, ip,
773 XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
775 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
779 * Log pointer values from the btree block.
783 xfs_btree_cur_t *cur,
790 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
791 XFS_BMBT_TRACE_ARGBII(cur, bp, pfirst, plast);
794 xfs_bmbt_block_t *block;
799 block = XFS_BUF_TO_BMBT_BLOCK(bp);
800 pp = XFS_BMAP_PTR_DADDR(block, 1, cur);
801 first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block);
802 last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block);
803 xfs_trans_log_buf(tp, bp, first, last);
807 ip = cur->bc_private.b.ip;
808 xfs_trans_log_inode(tp, ip,
809 XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
811 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
815 * Determine the extent state.
824 ASSERT(blks != 0); /* saved for DMIG */
825 return XFS_EXT_UNWRITTEN;
831 * Convert on-disk form of btree root to in-memory form.
835 xfs_bmdr_block_t *dblock,
837 xfs_bmbt_block_t *rblock,
846 rblock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
847 rblock->bb_level = dblock->bb_level;
848 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
849 rblock->bb_numrecs = dblock->bb_numrecs;
850 rblock->bb_leftsib = cpu_to_be64(NULLDFSBNO);
851 rblock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
852 dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
853 fkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
854 tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
855 fpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
856 tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
857 dmxr = be16_to_cpu(dblock->bb_numrecs);
858 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
859 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
863 * Delete the record pointed to by cur.
867 xfs_btree_cur_t *cur,
868 int *stat) /* success/failure */
870 int error; /* error return value */
874 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
875 for (level = 0, i = 2; i == 2; level++) {
876 if ((error = xfs_bmbt_delrec(cur, level, &i))) {
877 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
882 for (level = 1; level < cur->bc_nlevels; level++) {
883 if (cur->bc_ptrs[level] == 0) {
884 if ((error = xfs_btree_decrement(cur, level,
886 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
893 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
899 * Convert a compressed bmap extent record to an uncompressed form.
900 * This code must be in sync with the routines xfs_bmbt_get_startoff,
901 * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
913 ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
914 s->br_startoff = ((xfs_fileoff_t)l0 &
915 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
917 s->br_startblock = (((xfs_fsblock_t)l0 & XFS_MASK64LO(9)) << 43) |
918 (((xfs_fsblock_t)l1) >> 21);
924 b = (((xfs_dfsbno_t)l0 & XFS_MASK64LO(9)) << 43) |
925 (((xfs_dfsbno_t)l1) >> 21);
926 ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
927 s->br_startblock = (xfs_fsblock_t)b;
930 s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21);
932 #endif /* XFS_BIG_BLKNOS */
933 s->br_blockcount = (xfs_filblks_t)(l1 & XFS_MASK64LO(21));
934 /* This is xfs_extent_state() in-line */
936 ASSERT(s->br_blockcount != 0); /* saved for DMIG */
937 st = XFS_EXT_UNWRITTEN;
945 xfs_bmbt_rec_host_t *r,
948 __xfs_bmbt_get_all(r->l0, r->l1, s);
952 * Get the block pointer for the given level of the cursor.
953 * Fill in the buffer pointer, if applicable.
957 xfs_btree_cur_t *cur,
962 xfs_bmbt_block_t *rval;
964 if (level < cur->bc_nlevels - 1) {
965 *bpp = cur->bc_bufs[level];
966 rval = XFS_BUF_TO_BMBT_BLOCK(*bpp);
969 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
970 cur->bc_private.b.whichfork);
971 rval = ifp->if_broot;
977 * Extract the blockcount field from an in memory bmap extent record.
980 xfs_bmbt_get_blockcount(
981 xfs_bmbt_rec_host_t *r)
983 return (xfs_filblks_t)(r->l1 & XFS_MASK64LO(21));
987 * Extract the startblock field from an in memory bmap extent record.
990 xfs_bmbt_get_startblock(
991 xfs_bmbt_rec_host_t *r)
994 return (((xfs_fsblock_t)r->l0 & XFS_MASK64LO(9)) << 43) |
995 (((xfs_fsblock_t)r->l1) >> 21);
1000 b = (((xfs_dfsbno_t)r->l0 & XFS_MASK64LO(9)) << 43) |
1001 (((xfs_dfsbno_t)r->l1) >> 21);
1002 ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
1003 return (xfs_fsblock_t)b;
1005 return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21);
1007 #endif /* XFS_BIG_BLKNOS */
1011 * Extract the startoff field from an in memory bmap extent record.
1014 xfs_bmbt_get_startoff(
1015 xfs_bmbt_rec_host_t *r)
1017 return ((xfs_fileoff_t)r->l0 &
1018 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
1023 xfs_bmbt_rec_host_t *r)
1027 ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN));
1028 return xfs_extent_state(xfs_bmbt_get_blockcount(r),
1032 /* Endian flipping versions of the bmbt extraction functions */
1034 xfs_bmbt_disk_get_all(
1038 __xfs_bmbt_get_all(be64_to_cpu(r->l0), be64_to_cpu(r->l1), s);
1042 * Extract the blockcount field from an on disk bmap extent record.
1045 xfs_bmbt_disk_get_blockcount(
1048 return (xfs_filblks_t)(be64_to_cpu(r->l1) & XFS_MASK64LO(21));
1052 * Extract the startoff field from a disk format bmap extent record.
1055 xfs_bmbt_disk_get_startoff(
1058 return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
1059 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
1063 * Insert the current record at the point referenced by cur.
1065 * A multi-level split of the tree on insert will invalidate the original
1066 * cursor. All callers of this function should assume that the cursor is
1067 * no longer valid and revalidate it.
1071 xfs_btree_cur_t *cur,
1072 int *stat) /* success/failure */
1074 int error; /* error return value */
1078 xfs_btree_cur_t *ncur;
1079 xfs_bmbt_rec_t nrec;
1080 xfs_btree_cur_t *pcur;
1082 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1085 xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b);
1089 if ((error = xfs_bmbt_insrec(pcur, level++, &nbno, &nrec, &ncur,
1092 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
1093 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1096 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1097 if (pcur != cur && (ncur || nbno == NULLFSBLOCK)) {
1098 cur->bc_nlevels = pcur->bc_nlevels;
1099 cur->bc_private.b.allocated +=
1100 pcur->bc_private.b.allocated;
1101 pcur->bc_private.b.allocated = 0;
1102 ASSERT((cur->bc_private.b.firstblock != NULLFSBLOCK) ||
1103 XFS_IS_REALTIME_INODE(cur->bc_private.b.ip));
1104 cur->bc_private.b.firstblock =
1105 pcur->bc_private.b.firstblock;
1106 ASSERT(cur->bc_private.b.flist ==
1107 pcur->bc_private.b.flist);
1108 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
1114 } while (nbno != NULLFSBLOCK);
1115 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1119 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1124 * Log fields from the btree block header.
1128 xfs_btree_cur_t *cur,
1135 static const short offsets[] = {
1136 offsetof(xfs_bmbt_block_t, bb_magic),
1137 offsetof(xfs_bmbt_block_t, bb_level),
1138 offsetof(xfs_bmbt_block_t, bb_numrecs),
1139 offsetof(xfs_bmbt_block_t, bb_leftsib),
1140 offsetof(xfs_bmbt_block_t, bb_rightsib),
1141 sizeof(xfs_bmbt_block_t)
1144 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1145 XFS_BMBT_TRACE_ARGBI(cur, bp, fields);
1148 xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first,
1150 xfs_trans_log_buf(tp, bp, first, last);
1152 xfs_trans_log_inode(tp, cur->bc_private.b.ip,
1153 XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
1154 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1158 * Log record values from the btree block.
1162 xfs_btree_cur_t *cur,
1167 xfs_bmbt_block_t *block;
1173 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1174 XFS_BMBT_TRACE_ARGBII(cur, bp, rfirst, rlast);
1177 block = XFS_BUF_TO_BMBT_BLOCK(bp);
1178 rp = XFS_BMAP_REC_DADDR(block, 1, cur);
1179 first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block);
1180 last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block);
1181 xfs_trans_log_buf(tp, bp, first, last);
1182 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1186 * Set all the fields in a bmap extent record from the arguments.
1190 xfs_bmbt_rec_host_t *r,
1191 xfs_fileoff_t startoff,
1192 xfs_fsblock_t startblock,
1193 xfs_filblks_t blockcount,
1196 int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
1198 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
1199 ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
1200 ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
1203 ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
1205 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
1206 ((xfs_bmbt_rec_base_t)startoff << 9) |
1207 ((xfs_bmbt_rec_base_t)startblock >> 43);
1208 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
1209 ((xfs_bmbt_rec_base_t)blockcount &
1210 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
1211 #else /* !XFS_BIG_BLKNOS */
1212 if (ISNULLSTARTBLOCK(startblock)) {
1213 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
1214 ((xfs_bmbt_rec_base_t)startoff << 9) |
1215 (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
1216 r->l1 = XFS_MASK64HI(11) |
1217 ((xfs_bmbt_rec_base_t)startblock << 21) |
1218 ((xfs_bmbt_rec_base_t)blockcount &
1219 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
1221 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
1222 ((xfs_bmbt_rec_base_t)startoff << 9);
1223 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
1224 ((xfs_bmbt_rec_base_t)blockcount &
1225 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
1227 #endif /* XFS_BIG_BLKNOS */
1231 * Set all the fields in a bmap extent record from the uncompressed form.
1235 xfs_bmbt_rec_host_t *r,
1238 xfs_bmbt_set_allf(r, s->br_startoff, s->br_startblock,
1239 s->br_blockcount, s->br_state);
1244 * Set all the fields in a disk format bmap extent record from the arguments.
1247 xfs_bmbt_disk_set_allf(
1249 xfs_fileoff_t startoff,
1250 xfs_fsblock_t startblock,
1251 xfs_filblks_t blockcount,
1254 int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
1256 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
1257 ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
1258 ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
1261 ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
1263 r->l0 = cpu_to_be64(
1264 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
1265 ((xfs_bmbt_rec_base_t)startoff << 9) |
1266 ((xfs_bmbt_rec_base_t)startblock >> 43));
1267 r->l1 = cpu_to_be64(
1268 ((xfs_bmbt_rec_base_t)startblock << 21) |
1269 ((xfs_bmbt_rec_base_t)blockcount &
1270 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
1271 #else /* !XFS_BIG_BLKNOS */
1272 if (ISNULLSTARTBLOCK(startblock)) {
1273 r->l0 = cpu_to_be64(
1274 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
1275 ((xfs_bmbt_rec_base_t)startoff << 9) |
1276 (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
1277 r->l1 = cpu_to_be64(XFS_MASK64HI(11) |
1278 ((xfs_bmbt_rec_base_t)startblock << 21) |
1279 ((xfs_bmbt_rec_base_t)blockcount &
1280 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
1282 r->l0 = cpu_to_be64(
1283 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
1284 ((xfs_bmbt_rec_base_t)startoff << 9));
1285 r->l1 = cpu_to_be64(
1286 ((xfs_bmbt_rec_base_t)startblock << 21) |
1287 ((xfs_bmbt_rec_base_t)blockcount &
1288 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
1290 #endif /* XFS_BIG_BLKNOS */
1294 * Set all the fields in a bmap extent record from the uncompressed form.
1297 xfs_bmbt_disk_set_all(
1301 xfs_bmbt_disk_set_allf(r, s->br_startoff, s->br_startblock,
1302 s->br_blockcount, s->br_state);
1306 * Set the blockcount field in a bmap extent record.
1309 xfs_bmbt_set_blockcount(
1310 xfs_bmbt_rec_host_t *r,
1313 ASSERT((v & XFS_MASK64HI(43)) == 0);
1314 r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(43)) |
1315 (xfs_bmbt_rec_base_t)(v & XFS_MASK64LO(21));
1319 * Set the startblock field in a bmap extent record.
1322 xfs_bmbt_set_startblock(
1323 xfs_bmbt_rec_host_t *r,
1327 ASSERT((v & XFS_MASK64HI(12)) == 0);
1328 r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(55)) |
1329 (xfs_bmbt_rec_base_t)(v >> 43);
1330 r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)) |
1331 (xfs_bmbt_rec_base_t)(v << 21);
1332 #else /* !XFS_BIG_BLKNOS */
1333 if (ISNULLSTARTBLOCK(v)) {
1334 r->l0 |= (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
1335 r->l1 = (xfs_bmbt_rec_base_t)XFS_MASK64HI(11) |
1336 ((xfs_bmbt_rec_base_t)v << 21) |
1337 (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
1339 r->l0 &= ~(xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
1340 r->l1 = ((xfs_bmbt_rec_base_t)v << 21) |
1341 (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
1343 #endif /* XFS_BIG_BLKNOS */
1347 * Set the startoff field in a bmap extent record.
1350 xfs_bmbt_set_startoff(
1351 xfs_bmbt_rec_host_t *r,
1354 ASSERT((v & XFS_MASK64HI(9)) == 0);
1355 r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) XFS_MASK64HI(1)) |
1356 ((xfs_bmbt_rec_base_t)v << 9) |
1357 (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
1361 * Set the extent state field in a bmap extent record.
1365 xfs_bmbt_rec_host_t *r,
1368 ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
1369 if (v == XFS_EXT_NORM)
1370 r->l0 &= XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN);
1372 r->l0 |= XFS_MASK64HI(BMBT_EXNTFLAG_BITLEN);
1376 * Convert in-memory form of btree root to on-disk form.
1380 xfs_bmbt_block_t *rblock,
1382 xfs_bmdr_block_t *dblock,
1386 xfs_bmbt_key_t *fkp;
1388 xfs_bmbt_key_t *tkp;
1391 ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC);
1392 ASSERT(be64_to_cpu(rblock->bb_leftsib) == NULLDFSBNO);
1393 ASSERT(be64_to_cpu(rblock->bb_rightsib) == NULLDFSBNO);
1394 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
1395 dblock->bb_level = rblock->bb_level;
1396 dblock->bb_numrecs = rblock->bb_numrecs;
1397 dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
1398 fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
1399 tkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
1400 fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
1401 tpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
1402 dmxr = be16_to_cpu(dblock->bb_numrecs);
1403 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
1404 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
1408 * Check extent records, which have just been read, for
1409 * any bit in the extent flag field. ASSERT on debug
1410 * kernels, as this condition should not occur.
1411 * Return an error condition (1) if any flags found,
1412 * otherwise return 0.
1416 xfs_check_nostate_extents(
1421 for (; num > 0; num--, idx++) {
1422 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
1424 (64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
1433 STATIC struct xfs_btree_cur *
1434 xfs_bmbt_dup_cursor(
1435 struct xfs_btree_cur *cur)
1437 struct xfs_btree_cur *new;
1439 new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
1440 cur->bc_private.b.ip, cur->bc_private.b.whichfork);
1443 * Copy the firstblock, flist, and flags values,
1444 * since init cursor doesn't get them.
1446 new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
1447 new->bc_private.b.flist = cur->bc_private.b.flist;
1448 new->bc_private.b.flags = cur->bc_private.b.flags;
1454 xfs_bmbt_alloc_block(
1455 struct xfs_btree_cur *cur,
1456 union xfs_btree_ptr *start,
1457 union xfs_btree_ptr *new,
1461 xfs_alloc_arg_t args; /* block allocation args */
1462 int error; /* error return value */
1464 memset(&args, 0, sizeof(args));
1465 args.tp = cur->bc_tp;
1466 args.mp = cur->bc_mp;
1467 args.fsbno = cur->bc_private.b.firstblock;
1468 args.firstblock = args.fsbno;
1470 if (args.fsbno == NULLFSBLOCK) {
1471 args.fsbno = be64_to_cpu(start->l);
1472 args.type = XFS_ALLOCTYPE_START_BNO;
1474 * Make sure there is sufficient room left in the AG to
1475 * complete a full tree split for an extent insert. If
1476 * we are converting the middle part of an extent then
1477 * we may need space for two tree splits.
1479 * We are relying on the caller to make the correct block
1480 * reservation for this operation to succeed. If the
1481 * reservation amount is insufficient then we may fail a
1482 * block allocation here and corrupt the filesystem.
1484 args.minleft = xfs_trans_get_block_res(args.tp);
1485 } else if (cur->bc_private.b.flist->xbf_low) {
1486 args.type = XFS_ALLOCTYPE_START_BNO;
1488 args.type = XFS_ALLOCTYPE_NEAR_BNO;
1491 args.minlen = args.maxlen = args.prod = 1;
1492 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
1493 if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) {
1494 error = XFS_ERROR(ENOSPC);
1497 error = xfs_alloc_vextent(&args);
1501 if (args.fsbno == NULLFSBLOCK && args.minleft) {
1503 * Could not find an AG with enough free space to satisfy
1504 * a full btree split. Try again without minleft and if
1505 * successful activate the lowspace algorithm.
1508 args.type = XFS_ALLOCTYPE_FIRST_AG;
1510 error = xfs_alloc_vextent(&args);
1513 cur->bc_private.b.flist->xbf_low = 1;
1515 if (args.fsbno == NULLFSBLOCK) {
1516 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1520 ASSERT(args.len == 1);
1521 cur->bc_private.b.firstblock = args.fsbno;
1522 cur->bc_private.b.allocated++;
1523 cur->bc_private.b.ip->i_d.di_nblocks++;
1524 xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
1525 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
1526 XFS_TRANS_DQ_BCOUNT, 1L);
1528 new->l = cpu_to_be64(args.fsbno);
1530 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1535 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
1540 xfs_bmbt_get_maxrecs(
1541 struct xfs_btree_cur *cur,
1544 return XFS_BMAP_BLOCK_IMAXRECS(level, cur);
1548 xfs_bmbt_init_key_from_rec(
1549 union xfs_btree_key *key,
1550 union xfs_btree_rec *rec)
1552 key->bmbt.br_startoff =
1553 cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
1557 xfs_bmbt_init_ptr_from_cur(
1558 struct xfs_btree_cur *cur,
1559 union xfs_btree_ptr *ptr)
1566 struct xfs_btree_cur *cur,
1567 union xfs_btree_key *key)
1569 return (__int64_t)be64_to_cpu(key->bmbt.br_startoff) -
1570 cur->bc_rec.b.br_startoff;
1573 #ifdef XFS_BTREE_TRACE
1574 ktrace_t *xfs_bmbt_trace_buf;
1577 xfs_bmbt_trace_enter(
1578 struct xfs_btree_cur *cur,
1595 struct xfs_inode *ip = cur->bc_private.b.ip;
1596 int whichfork = cur->bc_private.b.whichfork;
1598 ktrace_enter(xfs_bmbt_trace_buf,
1599 (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
1600 (void *)func, (void *)s, (void *)ip, (void *)cur,
1601 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
1602 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
1603 (void *)a8, (void *)a9, (void *)a10);
1604 ktrace_enter(ip->i_btrace,
1605 (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
1606 (void *)func, (void *)s, (void *)ip, (void *)cur,
1607 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
1608 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
1609 (void *)a8, (void *)a9, (void *)a10);
1613 xfs_bmbt_trace_cursor(
1614 struct xfs_btree_cur *cur,
1619 struct xfs_bmbt_rec_host r;
1621 xfs_bmbt_set_all(&r, &cur->bc_rec.b);
1623 *s0 = (cur->bc_nlevels << 24) |
1624 (cur->bc_private.b.flags << 16) |
1625 cur->bc_private.b.allocated;
1632 struct xfs_btree_cur *cur,
1633 union xfs_btree_key *key,
1637 *l0 = be64_to_cpu(key->bmbt.br_startoff);
1642 xfs_bmbt_trace_record(
1643 struct xfs_btree_cur *cur,
1644 union xfs_btree_rec *rec,
1649 struct xfs_bmbt_irec irec;
1651 xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
1652 *l0 = irec.br_startoff;
1653 *l1 = irec.br_startblock;
1654 *l2 = irec.br_blockcount;
1656 #endif /* XFS_BTREE_TRACE */
1658 static const struct xfs_btree_ops xfs_bmbt_ops = {
1659 .rec_len = sizeof(xfs_bmbt_rec_t),
1660 .key_len = sizeof(xfs_bmbt_key_t),
1662 .dup_cursor = xfs_bmbt_dup_cursor,
1663 .alloc_block = xfs_bmbt_alloc_block,
1664 .get_maxrecs = xfs_bmbt_get_maxrecs,
1665 .init_key_from_rec = xfs_bmbt_init_key_from_rec,
1666 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
1667 .key_diff = xfs_bmbt_key_diff,
1669 #ifdef XFS_BTREE_TRACE
1670 .trace_enter = xfs_bmbt_trace_enter,
1671 .trace_cursor = xfs_bmbt_trace_cursor,
1672 .trace_key = xfs_bmbt_trace_key,
1673 .trace_record = xfs_bmbt_trace_record,
1678 * Allocate a new bmap btree cursor.
1680 struct xfs_btree_cur * /* new bmap btree cursor */
1681 xfs_bmbt_init_cursor(
1682 struct xfs_mount *mp, /* file system mount point */
1683 struct xfs_trans *tp, /* transaction pointer */
1684 struct xfs_inode *ip, /* inode owning the btree */
1685 int whichfork) /* data or attr fork */
1687 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1688 struct xfs_btree_cur *cur;
1690 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
1694 cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
1695 cur->bc_btnum = XFS_BTNUM_BMAP;
1696 cur->bc_blocklog = mp->m_sb.sb_blocklog;
1698 cur->bc_ops = &xfs_bmbt_ops;
1699 cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
1701 cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
1702 cur->bc_private.b.ip = ip;
1703 cur->bc_private.b.firstblock = NULLFSBLOCK;
1704 cur->bc_private.b.flist = NULL;
1705 cur->bc_private.b.allocated = 0;
1706 cur->bc_private.b.flags = 0;
1707 cur->bc_private.b.whichfork = whichfork;