2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
12 ** directory_part_size
16 ** are_leaves_removable
20 ** is_left_neighbor_in_cache
24 ** can_node_be_removed
26 ** dc_check_balance_internal
27 ** dc_check_balance_leaf
37 #include <linux/time.h>
38 #include <linux/string.h>
39 #include <linux/reiserfs_fs.h>
40 #include <linux/buffer_head.h>
42 /* To make any changes in the tree we find a node, that contains item
43 to be changed/deleted or position in the node we insert a new item
44 to. We call this node S. To do balancing we need to decide what we
45 will shift to left/right neighbor, or to a new node, where new item
46 will be etc. To make this analysis simpler we build virtual
47 node. Virtual node is an array of items, that will replace items of
48 node S. (For instance if we are going to delete an item, virtual
49 node does not contain it). Virtual node keeps information about
50 item sizes and types, mergeability of first and last items, sizes
51 of all entries in directory item. We use this array of items when
52 calculating what we can shift to neighbors and how many nodes we
53 have to have if we do not any shiftings, if we shift to left/right
54 neighbor or to both. */
56 /* taking item number in virtual node, returns number of item, that it has in source buffer */
57 static inline int old_item_num(int new_num, int affected_item_num, int mode)
59 if (mode == M_PASTE || mode == M_CUT || new_num < affected_item_num)
62 if (mode == M_INSERT) {
65 "vs-8005: for INSERT mode and item number of inserted item");
70 RFALSE(mode != M_DELETE,
71 "vs-8010: old_item_num: mode must be M_DELETE (mode = \'%c\'",
77 static void create_virtual_node(struct tree_balance *tb, int h)
80 struct virtual_node *vn = tb->tb_vn;
82 struct buffer_head *Sh; /* this comes from tb->S[h] */
84 Sh = PATH_H_PBUFFER(tb->tb_path, h);
86 /* size of changed node */
88 MAX_CHILD_SIZE(Sh) - B_FREE_SPACE(Sh) + tb->insert_size[h];
90 /* for internal nodes array if virtual items is not created */
92 vn->vn_nr_item = (vn->vn_size - DC_SIZE) / (DC_SIZE + KEY_SIZE);
96 /* number of items in virtual node */
98 B_NR_ITEMS(Sh) + ((vn->vn_mode == M_INSERT) ? 1 : 0) -
99 ((vn->vn_mode == M_DELETE) ? 1 : 0);
101 /* first virtual item */
102 vn->vn_vi = (struct virtual_item *)(tb->tb_vn + 1);
103 memset(vn->vn_vi, 0, vn->vn_nr_item * sizeof(struct virtual_item));
104 vn->vn_free_ptr += vn->vn_nr_item * sizeof(struct virtual_item);
106 /* first item in the node */
107 ih = B_N_PITEM_HEAD(Sh, 0);
109 /* define the mergeability for 0-th item (if it is not being deleted) */
110 if (op_is_left_mergeable(&(ih->ih_key), Sh->b_size)
111 && (vn->vn_mode != M_DELETE || vn->vn_affected_item_num))
112 vn->vn_vi[0].vi_type |= VI_TYPE_LEFT_MERGEABLE;
114 /* go through all items those remain in the virtual node (except for the new (inserted) one) */
115 for (new_num = 0; new_num < vn->vn_nr_item; new_num++) {
117 struct virtual_item *vi = vn->vn_vi + new_num;
119 ((new_num != vn->vn_affected_item_num) ? 0 : 1);
121 if (is_affected && vn->vn_mode == M_INSERT)
124 /* get item number in source node */
125 j = old_item_num(new_num, vn->vn_affected_item_num,
128 vi->vi_item_len += ih_item_len(ih + j) + IH_SIZE;
130 vi->vi_item = B_I_PITEM(Sh, ih + j);
131 vi->vi_uarea = vn->vn_free_ptr;
133 // FIXME: there is no check, that item operation did not
134 // consume too much memory
136 op_create_vi(vn, vi, is_affected, tb->insert_size[0]);
137 if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr)
138 reiserfs_panic(tb->tb_sb, "vs-8030",
139 "virtual node space consumed");
142 /* this is not being changed */
145 if (vn->vn_mode == M_PASTE || vn->vn_mode == M_CUT) {
146 vn->vn_vi[new_num].vi_item_len += tb->insert_size[0];
147 vi->vi_new_data = vn->vn_data; // pointer to data which is going to be pasted
151 /* virtual inserted item is not defined yet */
152 if (vn->vn_mode == M_INSERT) {
153 struct virtual_item *vi = vn->vn_vi + vn->vn_affected_item_num;
155 RFALSE(vn->vn_ins_ih == NULL,
156 "vs-8040: item header of inserted item is not specified");
157 vi->vi_item_len = tb->insert_size[0];
158 vi->vi_ih = vn->vn_ins_ih;
159 vi->vi_item = vn->vn_data;
160 vi->vi_uarea = vn->vn_free_ptr;
162 op_create_vi(vn, vi, 0 /*not pasted or cut */ ,
166 /* set right merge flag we take right delimiting key and check whether it is a mergeable item */
168 struct reiserfs_key *key;
170 key = B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]);
171 if (op_is_left_mergeable(key, Sh->b_size)
172 && (vn->vn_mode != M_DELETE
173 || vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1))
174 vn->vn_vi[vn->vn_nr_item - 1].vi_type |=
175 VI_TYPE_RIGHT_MERGEABLE;
177 #ifdef CONFIG_REISERFS_CHECK
178 if (op_is_left_mergeable(key, Sh->b_size) &&
179 !(vn->vn_mode != M_DELETE
180 || vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1)) {
181 /* we delete last item and it could be merged with right neighbor's first item */
184 && is_direntry_le_ih(B_N_PITEM_HEAD(Sh, 0))
185 && I_ENTRY_COUNT(B_N_PITEM_HEAD(Sh, 0)) == 1)) {
186 /* node contains more than 1 item, or item is not directory item, or this item contains more than 1 entry */
187 print_block(Sh, 0, -1, -1);
188 reiserfs_panic(tb->tb_sb, "vs-8045",
189 "rdkey %k, affected item==%d "
190 "(mode==%c) Must be %c",
191 key, vn->vn_affected_item_num,
192 vn->vn_mode, M_DELETE);
200 /* using virtual node check, how many items can be shifted to left
202 static void check_left(struct tree_balance *tb, int h, int cur_free)
205 struct virtual_node *vn = tb->tb_vn;
206 struct virtual_item *vi;
209 RFALSE(cur_free < 0, "vs-8050: cur_free (%d) < 0", cur_free);
213 tb->lnum[h] = cur_free / (DC_SIZE + KEY_SIZE);
219 if (!cur_free || !vn->vn_nr_item) {
220 /* no free space or nothing to move */
226 RFALSE(!PATH_H_PPARENT(tb->tb_path, 0),
227 "vs-8055: parent does not exist or invalid");
230 if ((unsigned int)cur_free >=
232 ((vi->vi_type & VI_TYPE_LEFT_MERGEABLE) ? IH_SIZE : 0))) {
233 /* all contents of S[0] fits into L[0] */
235 RFALSE(vn->vn_mode == M_INSERT || vn->vn_mode == M_PASTE,
236 "vs-8055: invalid mode or balance condition failed");
238 tb->lnum[0] = vn->vn_nr_item;
243 d_size = 0, ih_size = IH_SIZE;
245 /* first item may be merge with last item in left neighbor */
246 if (vi->vi_type & VI_TYPE_LEFT_MERGEABLE)
247 d_size = -((int)IH_SIZE), ih_size = 0;
250 for (i = 0; i < vn->vn_nr_item;
251 i++, ih_size = IH_SIZE, d_size = 0, vi++) {
252 d_size += vi->vi_item_len;
253 if (cur_free >= d_size) {
254 /* the item can be shifted entirely */
260 /* the item cannot be shifted entirely, try to split it */
261 /* check whether L[0] can hold ih and at least one byte of the item body */
262 if (cur_free <= ih_size) {
263 /* cannot shift even a part of the current item */
269 tb->lbytes = op_check_left(vi, cur_free, 0, 0);
270 if (tb->lbytes != -1)
271 /* count partially shifted item */
280 /* using virtual node check, how many items can be shifted to right
282 static void check_right(struct tree_balance *tb, int h, int cur_free)
285 struct virtual_node *vn = tb->tb_vn;
286 struct virtual_item *vi;
289 RFALSE(cur_free < 0, "vs-8070: cur_free < 0");
293 tb->rnum[h] = cur_free / (DC_SIZE + KEY_SIZE);
299 if (!cur_free || !vn->vn_nr_item) {
306 RFALSE(!PATH_H_PPARENT(tb->tb_path, 0),
307 "vs-8075: parent does not exist or invalid");
309 vi = vn->vn_vi + vn->vn_nr_item - 1;
310 if ((unsigned int)cur_free >=
312 ((vi->vi_type & VI_TYPE_RIGHT_MERGEABLE) ? IH_SIZE : 0))) {
313 /* all contents of S[0] fits into R[0] */
315 RFALSE(vn->vn_mode == M_INSERT || vn->vn_mode == M_PASTE,
316 "vs-8080: invalid mode or balance condition failed");
318 tb->rnum[h] = vn->vn_nr_item;
323 d_size = 0, ih_size = IH_SIZE;
325 /* last item may be merge with first item in right neighbor */
326 if (vi->vi_type & VI_TYPE_RIGHT_MERGEABLE)
327 d_size = -(int)IH_SIZE, ih_size = 0;
330 for (i = vn->vn_nr_item - 1; i >= 0;
331 i--, d_size = 0, ih_size = IH_SIZE, vi--) {
332 d_size += vi->vi_item_len;
333 if (cur_free >= d_size) {
334 /* the item can be shifted entirely */
340 /* check whether R[0] can hold ih and at least one byte of the item body */
341 if (cur_free <= ih_size) { /* cannot shift even a part of the current item */
346 /* R[0] can hold the header of the item and at least one byte of its body */
347 cur_free -= ih_size; /* cur_free is still > 0 */
349 tb->rbytes = op_check_right(vi, cur_free);
350 if (tb->rbytes != -1)
351 /* count partially shifted item */
361 * from - number of items, which are shifted to left neighbor entirely
362 * to - number of item, which are shifted to right neighbor entirely
363 * from_bytes - number of bytes of boundary item (or directory entries) which are shifted to left neighbor
364 * to_bytes - number of bytes of boundary item (or directory entries) which are shifted to right neighbor */
365 static int get_num_ver(int mode, struct tree_balance *tb, int h,
366 int from, int from_bytes,
367 int to, int to_bytes, short *snum012, int flow)
373 struct virtual_node *vn = tb->tb_vn;
374 // struct virtual_item * vi;
376 int total_node_size, max_node_size, current_item_size;
378 int start_item, /* position of item we start filling node from */
379 end_item, /* position of item we finish filling node by */
380 start_bytes, /* number of first bytes (entries for directory) of start_item-th item
381 we do not include into node that is being filled */
382 end_bytes; /* number of last bytes (entries for directory) of end_item-th item
383 we do node include into node that is being filled */
384 int split_item_positions[2]; /* these are positions in virtual item of
385 items, that are split between S[0] and
386 S1new and S1new and S2new */
388 split_item_positions[0] = -1;
389 split_item_positions[1] = -1;
391 /* We only create additional nodes if we are in insert or paste mode
392 or we are in replace mode at the internal level. If h is 0 and
393 the mode is M_REPLACE then in fix_nodes we change the mode to
394 paste or insert before we get here in the code. */
395 RFALSE(tb->insert_size[h] < 0 || (mode != M_INSERT && mode != M_PASTE),
396 "vs-8100: insert_size < 0 in overflow");
398 max_node_size = MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, h));
400 /* snum012 [0-2] - number of items, that lay
401 to S[0], first new node and second new node */
402 snum012[3] = -1; /* s1bytes */
403 snum012[4] = -1; /* s2bytes */
407 i = ((to - from) * (KEY_SIZE + DC_SIZE) + DC_SIZE);
408 if (i == max_node_size)
410 return (i / max_node_size + 1);
416 cur_free = max_node_size;
418 // start from 'from'-th item
420 // skip its first 'start_bytes' units
421 start_bytes = ((from_bytes != -1) ? from_bytes : 0);
423 // last included item is the 'end_item'-th one
424 end_item = vn->vn_nr_item - to - 1;
425 // do not count last 'end_bytes' units of 'end_item'-th item
426 end_bytes = (to_bytes != -1) ? to_bytes : 0;
428 /* go through all item beginning from the start_item-th item and ending by
429 the end_item-th item. Do not count first 'start_bytes' units of
430 'start_item'-th item and last 'end_bytes' of 'end_item'-th item */
432 for (i = start_item; i <= end_item; i++) {
433 struct virtual_item *vi = vn->vn_vi + i;
434 int skip_from_end = ((i == end_item) ? end_bytes : 0);
436 RFALSE(needed_nodes > 3, "vs-8105: too many nodes are needed");
438 /* get size of current item */
439 current_item_size = vi->vi_item_len;
441 /* do not take in calculation head part (from_bytes) of from-th item */
443 op_part_size(vi, 0 /*from start */ , start_bytes);
445 /* do not take in calculation tail part of last item */
447 op_part_size(vi, 1 /*from end */ , skip_from_end);
449 /* if item fits into current node entierly */
450 if (total_node_size + current_item_size <= max_node_size) {
451 snum012[needed_nodes - 1]++;
452 total_node_size += current_item_size;
457 if (current_item_size > max_node_size) {
458 /* virtual item length is longer, than max size of item in
459 a node. It is impossible for direct item */
460 RFALSE(is_direct_le_ih(vi->vi_ih),
462 "direct item length is %d. It can not be longer than %d",
463 current_item_size, max_node_size);
464 /* we will try to split it */
469 /* as we do not split items, take new node and continue */
475 // calculate number of item units which fit into node being
480 free_space = max_node_size - total_node_size - IH_SIZE;
482 op_check_left(vi, free_space, start_bytes,
485 /* nothing fits into current node, take new node and continue */
486 needed_nodes++, i--, total_node_size = 0;
491 /* something fits into the current node */
492 //if (snum012[3] != -1 || needed_nodes != 1)
493 // reiserfs_panic (tb->tb_sb, "vs-8115: get_num_ver: too many nodes required");
494 //snum012[needed_nodes - 1 + 3] = op_unit_num (vi) - start_bytes - units;
495 start_bytes += units;
496 snum012[needed_nodes - 1 + 3] = units;
498 if (needed_nodes > 2)
499 reiserfs_warning(tb->tb_sb, "vs-8111",
500 "split_item_position is out of range");
501 snum012[needed_nodes - 1]++;
502 split_item_positions[needed_nodes - 1] = i;
504 /* continue from the same item with start_bytes != -1 */
510 // sum012[4] (if it is not -1) contains number of units of which
511 // are to be in S1new, snum012[3] - to be in S0. They are supposed
512 // to be S1bytes and S2bytes correspondingly, so recalculate
513 if (snum012[4] > 0) {
515 int bytes_to_r, bytes_to_l;
518 split_item_num = split_item_positions[1];
520 ((from == split_item_num
521 && from_bytes != -1) ? from_bytes : 0);
523 ((end_item == split_item_num
524 && end_bytes != -1) ? end_bytes : 0);
526 ((split_item_positions[0] ==
527 split_item_positions[1]) ? snum012[3] : 0);
531 op_unit_num(&vn->vn_vi[split_item_num]) - snum012[4] -
532 bytes_to_r - bytes_to_l - bytes_to_S1new;
534 if (vn->vn_vi[split_item_num].vi_index != TYPE_DIRENTRY &&
535 vn->vn_vi[split_item_num].vi_index != TYPE_INDIRECT)
536 reiserfs_warning(tb->tb_sb, "vs-8115",
537 "not directory or indirect item");
540 /* now we know S2bytes, calculate S1bytes */
541 if (snum012[3] > 0) {
543 int bytes_to_r, bytes_to_l;
546 split_item_num = split_item_positions[0];
548 ((from == split_item_num
549 && from_bytes != -1) ? from_bytes : 0);
551 ((end_item == split_item_num
552 && end_bytes != -1) ? end_bytes : 0);
554 ((split_item_positions[0] == split_item_positions[1]
555 && snum012[4] != -1) ? snum012[4] : 0);
559 op_unit_num(&vn->vn_vi[split_item_num]) - snum012[3] -
560 bytes_to_r - bytes_to_l - bytes_to_S2new;
566 #ifdef CONFIG_REISERFS_CHECK
567 extern struct tree_balance *cur_tb;
570 /* Set parameters for balancing.
571 * Performs write of results of analysis of balancing into structure tb,
572 * where it will later be used by the functions that actually do the balancing.
574 * tb tree_balance structure;
575 * h current level of the node;
576 * lnum number of items from S[h] that must be shifted to L[h];
577 * rnum number of items from S[h] that must be shifted to R[h];
578 * blk_num number of blocks that S[h] will be splitted into;
579 * s012 number of items that fall into splitted nodes.
580 * lbytes number of bytes which flow to the left neighbor from the item that is not
581 * not shifted entirely
582 * rbytes number of bytes which flow to the right neighbor from the item that is not
583 * not shifted entirely
584 * s1bytes number of bytes which flow to the first new node when S[0] splits (this number is contained in s012 array)
587 static void set_parameters(struct tree_balance *tb, int h, int lnum,
588 int rnum, int blk_num, short *s012, int lb, int rb)
593 tb->blknum[h] = blk_num;
595 if (h == 0) { /* only for leaf level */
598 tb->s1num = *s012++, tb->s2num = *s012++;
599 tb->s1bytes = *s012++;
605 PROC_INFO_ADD(tb->tb_sb, lnum[h], lnum);
606 PROC_INFO_ADD(tb->tb_sb, rnum[h], rnum);
608 PROC_INFO_ADD(tb->tb_sb, lbytes[h], lb);
609 PROC_INFO_ADD(tb->tb_sb, rbytes[h], rb);
612 /* check, does node disappear if we shift tb->lnum[0] items to left
613 neighbor and tb->rnum[0] to the right one. */
614 static int is_leaf_removable(struct tree_balance *tb)
616 struct virtual_node *vn = tb->tb_vn;
617 int to_left, to_right;
621 /* number of items, that will be shifted to left (right) neighbor
623 to_left = tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0);
624 to_right = tb->rnum[0] - ((tb->rbytes != -1) ? 1 : 0);
625 remain_items = vn->vn_nr_item;
627 /* how many items remain in S[0] after shiftings to neighbors */
628 remain_items -= (to_left + to_right);
630 if (remain_items < 1) {
631 /* all content of node can be shifted to neighbors */
632 set_parameters(tb, 0, to_left, vn->vn_nr_item - to_left, 0,
637 if (remain_items > 1 || tb->lbytes == -1 || tb->rbytes == -1)
638 /* S[0] is not removable */
641 /* check, whether we can divide 1 remaining item between neighbors */
643 /* get size of remaining item (in item units) */
644 size = op_unit_num(&(vn->vn_vi[to_left]));
646 if (tb->lbytes + tb->rbytes >= size) {
647 set_parameters(tb, 0, to_left + 1, to_right + 1, 0, NULL,
655 /* check whether L, S, R can be joined in one node */
656 static int are_leaves_removable(struct tree_balance *tb, int lfree, int rfree)
658 struct virtual_node *vn = tb->tb_vn;
660 struct buffer_head *S0;
662 S0 = PATH_H_PBUFFER(tb->tb_path, 0);
665 if (vn->vn_nr_item) {
666 if (vn->vn_vi[0].vi_type & VI_TYPE_LEFT_MERGEABLE)
669 if (vn->vn_vi[vn->vn_nr_item - 1].
670 vi_type & VI_TYPE_RIGHT_MERGEABLE)
673 /* there was only one item and it will be deleted */
674 struct item_head *ih;
676 RFALSE(B_NR_ITEMS(S0) != 1,
677 "vs-8125: item number must be 1: it is %d",
680 ih = B_N_PITEM_HEAD(S0, 0);
682 && !comp_short_le_keys(&(ih->ih_key),
683 B_N_PDELIM_KEY(tb->CFR[0],
685 if (is_direntry_le_ih(ih)) {
686 /* Directory must be in correct state here: that is
687 somewhere at the left side should exist first directory
688 item. But the item being deleted can not be that first
689 one because its right neighbor is item of the same
690 directory. (But first item always gets deleted in last
691 turn). So, neighbors of deleted item can be merged, so
692 we can save ih_size */
695 /* we might check that left neighbor exists and is of the
697 RFALSE(le_ih_k_offset(ih) == DOT_OFFSET,
698 "vs-8130: first directory item can not be removed until directory is not empty");
703 if (MAX_CHILD_SIZE(S0) + vn->vn_size <= rfree + lfree + ih_size) {
704 set_parameters(tb, 0, -1, -1, -1, NULL, -1, -1);
705 PROC_INFO_INC(tb->tb_sb, leaves_removable);
712 /* when we do not split item, lnum and rnum are numbers of entire items */
713 #define SET_PAR_SHIFT_LEFT \
718 to_l = (MAX_NR_KEY(Sh)+1 - lpar + vn->vn_nr_item + 1) / 2 -\
719 (MAX_NR_KEY(Sh) + 1 - lpar);\
721 set_parameters (tb, h, to_l, 0, lnver, NULL, -1, -1);\
725 if (lset==LEFT_SHIFT_FLOW)\
726 set_parameters (tb, h, lpar, 0, lnver, snum012+lset,\
729 set_parameters (tb, h, lpar - (tb->lbytes!=-1), 0, lnver, snum012+lset,\
733 #define SET_PAR_SHIFT_RIGHT \
738 to_r = (MAX_NR_KEY(Sh)+1 - rpar + vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 - rpar);\
740 set_parameters (tb, h, 0, to_r, rnver, NULL, -1, -1);\
744 if (rset==RIGHT_SHIFT_FLOW)\
745 set_parameters (tb, h, 0, rpar, rnver, snum012+rset,\
748 set_parameters (tb, h, 0, rpar - (tb->rbytes!=-1), rnver, snum012+rset,\
752 static void free_buffers_in_tb(struct tree_balance *tb)
756 pathrelse(tb->tb_path);
758 for (i = 0; i < MAX_HEIGHT; i++) {
775 /* Get new buffers for storing new nodes that are created while balancing.
776 * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
777 * CARRY_ON - schedule didn't occur while the function worked;
778 * NO_DISK_SPACE - no disk space.
780 /* The function is NOT SCHEDULE-SAFE! */
781 static int get_empty_nodes(struct tree_balance *tb, int h)
783 struct buffer_head *new_bh,
784 *Sh = PATH_H_PBUFFER(tb->tb_path, h);
785 b_blocknr_t *blocknr, blocknrs[MAX_AMOUNT_NEEDED] = { 0, };
786 int counter, number_of_freeblk, amount_needed, /* number of needed empty blocks */
788 struct super_block *sb = tb->tb_sb;
790 /* number_of_freeblk is the number of empty blocks which have been
791 acquired for use by the balancing algorithm minus the number of
792 empty blocks used in the previous levels of the analysis,
793 number_of_freeblk = tb->cur_blknum can be non-zero if a schedule occurs
794 after empty blocks are acquired, and the balancing analysis is
795 then restarted, amount_needed is the number needed by this level
796 (h) of the balancing analysis.
798 Note that for systems with many processes writing, it would be
799 more layout optimal to calculate the total number needed by all
800 levels and then to run reiserfs_new_blocks to get all of them at once. */
802 /* Initiate number_of_freeblk to the amount acquired prior to the restart of
803 the analysis or 0 if not restarted, then subtract the amount needed
804 by all of the levels of the tree below h. */
805 /* blknum includes S[h], so we subtract 1 in this calculation */
806 for (counter = 0, number_of_freeblk = tb->cur_blknum;
807 counter < h; counter++)
809 (tb->blknum[counter]) ? (tb->blknum[counter] -
812 /* Allocate missing empty blocks. */
813 /* if Sh == 0 then we are getting a new root */
814 amount_needed = (Sh) ? (tb->blknum[h] - 1) : 1;
815 /* Amount_needed = the amount that we need more than the amount that we have. */
816 if (amount_needed > number_of_freeblk)
817 amount_needed -= number_of_freeblk;
818 else /* If we have enough already then there is nothing to do. */
821 /* No need to check quota - is not allocated for blocks used for formatted nodes */
822 if (reiserfs_new_form_blocknrs(tb, blocknrs,
823 amount_needed) == NO_DISK_SPACE)
824 return NO_DISK_SPACE;
826 /* for each blocknumber we just got, get a buffer and stick it on FEB */
827 for (blocknr = blocknrs, counter = 0;
828 counter < amount_needed; blocknr++, counter++) {
831 "PAP-8135: reiserfs_new_blocknrs failed when got new blocks");
833 new_bh = sb_getblk(sb, *blocknr);
834 RFALSE(buffer_dirty(new_bh) ||
835 buffer_journaled(new_bh) ||
836 buffer_journal_dirty(new_bh),
837 "PAP-8140: journlaled or dirty buffer %b for the new block",
840 /* Put empty buffers into the array. */
841 RFALSE(tb->FEB[tb->cur_blknum],
842 "PAP-8141: busy slot for new buffer");
844 set_buffer_journal_new(new_bh);
845 tb->FEB[tb->cur_blknum++] = new_bh;
848 if (retval == CARRY_ON && FILESYSTEM_CHANGED_TB(tb))
849 retval = REPEAT_SEARCH;
854 /* Get free space of the left neighbor, which is stored in the parent
855 * node of the left neighbor. */
856 static int get_lfree(struct tree_balance *tb, int h)
858 struct buffer_head *l, *f;
861 if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL ||
862 (l = tb->FL[h]) == NULL)
866 order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) - 1;
868 order = B_NR_ITEMS(l);
872 return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order)));
875 /* Get free space of the right neighbor,
876 * which is stored in the parent node of the right neighbor.
878 static int get_rfree(struct tree_balance *tb, int h)
880 struct buffer_head *r, *f;
883 if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL ||
884 (r = tb->FR[h]) == NULL)
888 order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) + 1;
894 return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order)));
898 /* Check whether left neighbor is in memory. */
899 static int is_left_neighbor_in_cache(struct tree_balance *tb, int h)
901 struct buffer_head *father, *left;
902 struct super_block *sb = tb->tb_sb;
903 b_blocknr_t left_neighbor_blocknr;
904 int left_neighbor_position;
906 /* Father of the left neighbor does not exist. */
910 /* Calculate father of the node to be balanced. */
911 father = PATH_H_PBUFFER(tb->tb_path, h + 1);
914 !B_IS_IN_TREE(father) ||
915 !B_IS_IN_TREE(tb->FL[h]) ||
916 !buffer_uptodate(father) ||
917 !buffer_uptodate(tb->FL[h]),
918 "vs-8165: F[h] (%b) or FL[h] (%b) is invalid",
921 /* Get position of the pointer to the left neighbor into the left father. */
922 left_neighbor_position = (father == tb->FL[h]) ?
923 tb->lkey[h] : B_NR_ITEMS(tb->FL[h]);
924 /* Get left neighbor block number. */
925 left_neighbor_blocknr =
926 B_N_CHILD_NUM(tb->FL[h], left_neighbor_position);
927 /* Look for the left neighbor in the cache. */
928 if ((left = sb_find_get_block(sb, left_neighbor_blocknr))) {
930 RFALSE(buffer_uptodate(left) && !B_IS_IN_TREE(left),
931 "vs-8170: left neighbor (%b %z) is not in the tree",
940 #define LEFT_PARENTS 'l'
941 #define RIGHT_PARENTS 'r'
943 static void decrement_key(struct cpu_key *key)
945 // call item specific function for this key
946 item_ops[cpu_key_k_type(key)]->decrement_key(key);
949 /* Calculate far left/right parent of the left/right neighbor of the current node, that
950 * is calculate the left/right (FL[h]/FR[h]) neighbor of the parent F[h].
951 * Calculate left/right common parent of the current node and L[h]/R[h].
952 * Calculate left/right delimiting key position.
953 * Returns: PATH_INCORRECT - path in the tree is not correct;
954 SCHEDULE_OCCURRED - schedule occurred while the function worked;
955 * CARRY_ON - schedule didn't occur while the function worked;
957 static int get_far_parent(struct tree_balance *tb,
959 struct buffer_head **pfather,
960 struct buffer_head **pcom_father, char c_lr_par)
962 struct buffer_head *parent;
963 INITIALIZE_PATH(s_path_to_neighbor_father);
964 struct treepath *path = tb->tb_path;
965 struct cpu_key s_lr_father_key;
968 first_last_position = 0,
969 path_offset = PATH_H_PATH_OFFSET(path, h);
971 /* Starting from F[h] go upwards in the tree, and look for the common
972 ancestor of F[h], and its neighbor l/r, that should be obtained. */
974 counter = path_offset;
976 RFALSE(counter < FIRST_PATH_ELEMENT_OFFSET,
977 "PAP-8180: invalid path length");
979 for (; counter > FIRST_PATH_ELEMENT_OFFSET; counter--) {
980 /* Check whether parent of the current buffer in the path is really parent in the tree. */
982 (parent = PATH_OFFSET_PBUFFER(path, counter - 1)))
983 return REPEAT_SEARCH;
984 /* Check whether position in the parent is correct. */
986 PATH_OFFSET_POSITION(path,
989 return REPEAT_SEARCH;
990 /* Check whether parent at the path really points to the child. */
991 if (B_N_CHILD_NUM(parent, position) !=
992 PATH_OFFSET_PBUFFER(path, counter)->b_blocknr)
993 return REPEAT_SEARCH;
994 /* Return delimiting key if position in the parent is not equal to first/last one. */
995 if (c_lr_par == RIGHT_PARENTS)
996 first_last_position = B_NR_ITEMS(parent);
997 if (position != first_last_position) {
998 *pcom_father = parent;
999 get_bh(*pcom_father);
1000 /*(*pcom_father = parent)->b_count++; */
1005 /* if we are in the root of the tree, then there is no common father */
1006 if (counter == FIRST_PATH_ELEMENT_OFFSET) {
1007 /* Check whether first buffer in the path is the root of the tree. */
1008 if (PATH_OFFSET_PBUFFER
1010 FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
1011 SB_ROOT_BLOCK(tb->tb_sb)) {
1012 *pfather = *pcom_father = NULL;
1015 return REPEAT_SEARCH;
1018 RFALSE(B_LEVEL(*pcom_father) <= DISK_LEAF_NODE_LEVEL,
1019 "PAP-8185: (%b %z) level too small",
1020 *pcom_father, *pcom_father);
1022 /* Check whether the common parent is locked. */
1024 if (buffer_locked(*pcom_father)) {
1026 /* Release the write lock while the buffer is busy */
1027 reiserfs_write_unlock(tb->tb_sb);
1028 __wait_on_buffer(*pcom_father);
1029 reiserfs_write_lock(tb->tb_sb);
1030 if (FILESYSTEM_CHANGED_TB(tb)) {
1031 brelse(*pcom_father);
1032 return REPEAT_SEARCH;
1036 /* So, we got common parent of the current node and its left/right neighbor.
1037 Now we are geting the parent of the left/right neighbor. */
1039 /* Form key to get parent of the left/right neighbor. */
1040 le_key2cpu_key(&s_lr_father_key,
1041 B_N_PDELIM_KEY(*pcom_father,
1043 LEFT_PARENTS) ? (tb->lkey[h - 1] =
1049 if (c_lr_par == LEFT_PARENTS)
1050 decrement_key(&s_lr_father_key);
1053 (tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father,
1058 if (FILESYSTEM_CHANGED_TB(tb)) {
1059 pathrelse(&s_path_to_neighbor_father);
1060 brelse(*pcom_father);
1061 return REPEAT_SEARCH;
1064 *pfather = PATH_PLAST_BUFFER(&s_path_to_neighbor_father);
1066 RFALSE(B_LEVEL(*pfather) != h + 1,
1067 "PAP-8190: (%b %z) level too small", *pfather, *pfather);
1068 RFALSE(s_path_to_neighbor_father.path_length <
1069 FIRST_PATH_ELEMENT_OFFSET, "PAP-8192: path length is too small");
1071 s_path_to_neighbor_father.path_length--;
1072 pathrelse(&s_path_to_neighbor_father);
1076 /* Get parents of neighbors of node in the path(S[path_offset]) and common parents of
1077 * S[path_offset] and L[path_offset]/R[path_offset]: F[path_offset], FL[path_offset],
1078 * FR[path_offset], CFL[path_offset], CFR[path_offset].
1079 * Calculate numbers of left and right delimiting keys position: lkey[path_offset], rkey[path_offset].
1080 * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
1081 * CARRY_ON - schedule didn't occur while the function worked;
1083 static int get_parents(struct tree_balance *tb, int h)
1085 struct treepath *path = tb->tb_path;
1088 path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h);
1089 struct buffer_head *curf, *curcf;
1091 /* Current node is the root of the tree or will be root of the tree */
1092 if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
1093 /* The root can not have parents.
1094 Release nodes which previously were obtained as parents of the current node neighbors. */
1106 /* Get parent FL[path_offset] of L[path_offset]. */
1107 position = PATH_OFFSET_POSITION(path, path_offset - 1);
1109 /* Current node is not the first child of its parent. */
1110 curf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
1111 curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
1114 tb->lkey[h] = position - 1;
1116 /* Calculate current parent of L[path_offset], which is the left neighbor of the current node.
1117 Calculate current common parent of L[path_offset] and the current node. Note that
1118 CFL[path_offset] not equal FL[path_offset] and CFL[path_offset] not equal F[path_offset].
1119 Calculate lkey[path_offset]. */
1120 if ((ret = get_far_parent(tb, h + 1, &curf,
1122 LEFT_PARENTS)) != CARRY_ON)
1127 tb->FL[h] = curf; /* New initialization of FL[h]. */
1129 tb->CFL[h] = curcf; /* New initialization of CFL[h]. */
1131 RFALSE((curf && !B_IS_IN_TREE(curf)) ||
1132 (curcf && !B_IS_IN_TREE(curcf)),
1133 "PAP-8195: FL (%b) or CFL (%b) is invalid", curf, curcf);
1135 /* Get parent FR[h] of R[h]. */
1137 /* Current node is the last child of F[h]. FR[h] != F[h]. */
1138 if (position == B_NR_ITEMS(PATH_H_PBUFFER(path, h + 1))) {
1139 /* Calculate current parent of R[h], which is the right neighbor of F[h].
1140 Calculate current common parent of R[h] and current node. Note that CFR[h]
1141 not equal FR[path_offset] and CFR[h] not equal F[h]. */
1143 get_far_parent(tb, h + 1, &curf, &curcf,
1144 RIGHT_PARENTS)) != CARRY_ON)
1147 /* Current node is not the last child of its parent F[h]. */
1148 curf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
1149 curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
1152 tb->rkey[h] = position;
1156 /* New initialization of FR[path_offset]. */
1160 /* New initialization of CFR[path_offset]. */
1163 RFALSE((curf && !B_IS_IN_TREE(curf)) ||
1164 (curcf && !B_IS_IN_TREE(curcf)),
1165 "PAP-8205: FR (%b) or CFR (%b) is invalid", curf, curcf);
1170 /* it is possible to remove node as result of shiftings to
1171 neighbors even when we insert or paste item. */
1172 static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree,
1173 struct tree_balance *tb, int h)
1175 struct buffer_head *Sh = PATH_H_PBUFFER(tb->tb_path, h);
1176 int levbytes = tb->insert_size[h];
1177 struct item_head *ih;
1178 struct reiserfs_key *r_key = NULL;
1180 ih = B_N_PITEM_HEAD(Sh, 0);
1182 r_key = B_N_PDELIM_KEY(tb->CFR[h], tb->rkey[h]);
1184 if (lfree + rfree + sfree < MAX_CHILD_SIZE(Sh) + levbytes
1185 /* shifting may merge items which might save space */
1188 && op_is_left_mergeable(&(ih->ih_key), Sh->b_size)) ? IH_SIZE : 0)
1191 && op_is_left_mergeable(r_key, Sh->b_size)) ? IH_SIZE : 0)
1192 + ((h) ? KEY_SIZE : 0)) {
1193 /* node can not be removed */
1194 if (sfree >= levbytes) { /* new item fits into node S[h] without any shifting */
1198 ((mode == M_INSERT) ? 1 : 0);
1199 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
1200 return NO_BALANCING_NEEDED;
1203 PROC_INFO_INC(tb->tb_sb, can_node_be_removed[h]);
1204 return !NO_BALANCING_NEEDED;
1207 /* Check whether current node S[h] is balanced when increasing its size by
1208 * Inserting or Pasting.
1209 * Calculate parameters for balancing for current level h.
1211 * tb tree_balance structure;
1212 * h current level of the node;
1213 * inum item number in S[h];
1214 * mode i - insert, p - paste;
1215 * Returns: 1 - schedule occurred;
1216 * 0 - balancing for higher levels needed;
1217 * -1 - no balancing for higher levels needed;
1218 * -2 - no disk space.
1220 /* ip means Inserting or Pasting */
1221 static int ip_check_balance(struct tree_balance *tb, int h)
1223 struct virtual_node *vn = tb->tb_vn;
1224 int levbytes, /* Number of bytes that must be inserted into (value
1225 is negative if bytes are deleted) buffer which
1226 contains node being balanced. The mnemonic is
1227 that the attempted change in node space used level
1228 is levbytes bytes. */
1231 int lfree, sfree, rfree /* free space in L, S and R */ ;
1233 /* nver is short for number of vertixes, and lnver is the number if
1234 we shift to the left, rnver is the number if we shift to the
1235 right, and lrnver is the number if we shift in both directions.
1236 The goal is to minimize first the number of vertixes, and second,
1237 the number of vertixes whose contents are changed by shifting,
1238 and third the number of uncached vertixes whose contents are
1239 changed by shifting and must be read from disk. */
1240 int nver, lnver, rnver, lrnver;
1242 /* used at leaf level only, S0 = S[0] is the node being balanced,
1243 sInum [ I = 0,1,2 ] is the number of items that will
1244 remain in node SI after balancing. S1 and S2 are new
1245 nodes that might be created. */
1247 /* we perform 8 calls to get_num_ver(). For each call we calculate five parameters.
1248 where 4th parameter is s1bytes and 5th - s2bytes
1250 short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases
1251 0,1 - do not shift and do not shift but bottle
1252 2 - shift only whole item to left
1253 3 - shift to left and bottle as much as possible
1254 4,5 - shift to right (whole items and as much as possible
1255 6,7 - shift to both directions (whole items and as much as possible)
1258 /* Sh is the node whose balance is currently being checked */
1259 struct buffer_head *Sh;
1261 Sh = PATH_H_PBUFFER(tb->tb_path, h);
1262 levbytes = tb->insert_size[h];
1264 /* Calculate balance parameters for creating new root. */
1267 reiserfs_panic(tb->tb_sb, "vs-8210",
1268 "S[0] can not be 0");
1269 switch (ret = get_empty_nodes(tb, h)) {
1271 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
1272 return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */
1278 reiserfs_panic(tb->tb_sb, "vs-8215", "incorrect "
1279 "return value of get_empty_nodes");
1283 if ((ret = get_parents(tb, h)) != CARRY_ON) /* get parents of S[h] neighbors. */
1286 sfree = B_FREE_SPACE(Sh);
1288 /* get free space of neighbors */
1289 rfree = get_rfree(tb, h);
1290 lfree = get_lfree(tb, h);
1292 if (can_node_be_removed(vn->vn_mode, lfree, sfree, rfree, tb, h) ==
1293 NO_BALANCING_NEEDED)
1294 /* and new item fits into node S[h] without any shifting */
1295 return NO_BALANCING_NEEDED;
1297 create_virtual_node(tb, h);
1300 determine maximal number of items we can shift to the left neighbor (in tb structure)
1301 and the maximal number of bytes that can flow to the left neighbor
1302 from the left most liquid item that cannot be shifted from S[0] entirely (returned value)
1304 check_left(tb, h, lfree);
1307 determine maximal number of items we can shift to the right neighbor (in tb structure)
1308 and the maximal number of bytes that can flow to the right neighbor
1309 from the right most liquid item that cannot be shifted from S[0] entirely (returned value)
1311 check_right(tb, h, rfree);
1313 /* all contents of internal node S[h] can be moved into its
1314 neighbors, S[h] will be removed after balancing */
1315 if (h && (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)) {
1318 /* Since we are working on internal nodes, and our internal
1319 nodes have fixed size entries, then we can balance by the
1320 number of items rather than the space they consume. In this
1321 routine we set the left node equal to the right node,
1322 allowing a difference of less than or equal to 1 child
1325 ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] +
1326 vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 -
1328 set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL,
1333 /* this checks balance condition, that any two neighboring nodes can not fit in one node */
1335 (tb->lnum[h] >= vn->vn_nr_item + 1 ||
1336 tb->rnum[h] >= vn->vn_nr_item + 1),
1337 "vs-8220: tree is not balanced on internal level");
1338 RFALSE(!h && ((tb->lnum[h] >= vn->vn_nr_item && (tb->lbytes == -1)) ||
1339 (tb->rnum[h] >= vn->vn_nr_item && (tb->rbytes == -1))),
1340 "vs-8225: tree is not balanced on leaf level");
1342 /* all contents of S[0] can be moved into its neighbors
1343 S[0] will be removed after balancing. */
1344 if (!h && is_leaf_removable(tb))
1347 /* why do we perform this check here rather than earlier??
1348 Answer: we can win 1 node in some cases above. Moreover we
1349 checked it above, when we checked, that S[0] is not removable
1351 if (sfree >= levbytes) { /* new item fits into node S[h] without any shifting */
1353 tb->s0num = vn->vn_nr_item;
1354 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
1355 return NO_BALANCING_NEEDED;
1359 int lpar, rpar, nset, lset, rset, lrset;
1361 * regular overflowing of the node
1364 /* get_num_ver works in 2 modes (FLOW & NO_FLOW)
1365 lpar, rpar - number of items we can shift to left/right neighbor (including splitting item)
1366 nset, lset, rset, lrset - shows, whether flowing items give better packing
1369 #define NO_FLOW 0 /* do not any splitting */
1371 /* we choose one the following */
1372 #define NOTHING_SHIFT_NO_FLOW 0
1373 #define NOTHING_SHIFT_FLOW 5
1374 #define LEFT_SHIFT_NO_FLOW 10
1375 #define LEFT_SHIFT_FLOW 15
1376 #define RIGHT_SHIFT_NO_FLOW 20
1377 #define RIGHT_SHIFT_FLOW 25
1378 #define LR_SHIFT_NO_FLOW 30
1379 #define LR_SHIFT_FLOW 35
1384 /* calculate number of blocks S[h] must be split into when
1385 nothing is shifted to the neighbors,
1386 as well as number of items in each part of the split node (s012 numbers),
1387 and number of bytes (s1bytes) of the shared drop which flow to S1 if any */
1388 nset = NOTHING_SHIFT_NO_FLOW;
1389 nver = get_num_ver(vn->vn_mode, tb, h,
1390 0, -1, h ? vn->vn_nr_item : 0, -1,
1396 /* note, that in this case we try to bottle between S[0] and S1 (S1 - the first new node) */
1397 nver1 = get_num_ver(vn->vn_mode, tb, h,
1399 snum012 + NOTHING_SHIFT_FLOW, FLOW);
1401 nset = NOTHING_SHIFT_FLOW, nver = nver1;
1404 /* calculate number of blocks S[h] must be split into when
1405 l_shift_num first items and l_shift_bytes of the right most
1406 liquid item to be shifted are shifted to the left neighbor,
1407 as well as number of items in each part of the splitted node (s012 numbers),
1408 and number of bytes (s1bytes) of the shared drop which flow to S1 if any
1410 lset = LEFT_SHIFT_NO_FLOW;
1411 lnver = get_num_ver(vn->vn_mode, tb, h,
1412 lpar - ((h || tb->lbytes == -1) ? 0 : 1),
1413 -1, h ? vn->vn_nr_item : 0, -1,
1414 snum012 + LEFT_SHIFT_NO_FLOW, NO_FLOW);
1418 lnver1 = get_num_ver(vn->vn_mode, tb, h,
1420 ((tb->lbytes != -1) ? 1 : 0),
1422 snum012 + LEFT_SHIFT_FLOW, FLOW);
1424 lset = LEFT_SHIFT_FLOW, lnver = lnver1;
1427 /* calculate number of blocks S[h] must be split into when
1428 r_shift_num first items and r_shift_bytes of the left most
1429 liquid item to be shifted are shifted to the right neighbor,
1430 as well as number of items in each part of the splitted node (s012 numbers),
1431 and number of bytes (s1bytes) of the shared drop which flow to S1 if any
1433 rset = RIGHT_SHIFT_NO_FLOW;
1434 rnver = get_num_ver(vn->vn_mode, tb, h,
1436 h ? (vn->vn_nr_item - rpar) : (rpar -
1441 snum012 + RIGHT_SHIFT_NO_FLOW, NO_FLOW);
1445 rnver1 = get_num_ver(vn->vn_mode, tb, h,
1448 ((tb->rbytes != -1) ? 1 : 0)),
1450 snum012 + RIGHT_SHIFT_FLOW, FLOW);
1453 rset = RIGHT_SHIFT_FLOW, rnver = rnver1;
1456 /* calculate number of blocks S[h] must be split into when
1457 items are shifted in both directions,
1458 as well as number of items in each part of the splitted node (s012 numbers),
1459 and number of bytes (s1bytes) of the shared drop which flow to S1 if any
1461 lrset = LR_SHIFT_NO_FLOW;
1462 lrnver = get_num_ver(vn->vn_mode, tb, h,
1463 lpar - ((h || tb->lbytes == -1) ? 0 : 1),
1465 h ? (vn->vn_nr_item - rpar) : (rpar -
1470 snum012 + LR_SHIFT_NO_FLOW, NO_FLOW);
1474 lrnver1 = get_num_ver(vn->vn_mode, tb, h,
1476 ((tb->lbytes != -1) ? 1 : 0),
1479 ((tb->rbytes != -1) ? 1 : 0)),
1481 snum012 + LR_SHIFT_FLOW, FLOW);
1482 if (lrnver > lrnver1)
1483 lrset = LR_SHIFT_FLOW, lrnver = lrnver1;
1486 /* Our general shifting strategy is:
1487 1) to minimized number of new nodes;
1488 2) to minimized number of neighbors involved in shifting;
1489 3) to minimized number of disk reads; */
1491 /* we can win TWO or ONE nodes by shifting in both directions */
1492 if (lrnver < lnver && lrnver < rnver) {
1494 (tb->lnum[h] != 1 ||
1496 lrnver != 1 || rnver != 2 || lnver != 2
1497 || h != 1), "vs-8230: bad h");
1498 if (lrset == LR_SHIFT_FLOW)
1499 set_parameters(tb, h, tb->lnum[h], tb->rnum[h],
1500 lrnver, snum012 + lrset,
1501 tb->lbytes, tb->rbytes);
1503 set_parameters(tb, h,
1505 ((tb->lbytes == -1) ? 0 : 1),
1507 ((tb->rbytes == -1) ? 0 : 1),
1508 lrnver, snum012 + lrset, -1, -1);
1513 /* if shifting doesn't lead to better packing then don't shift */
1514 if (nver == lrnver) {
1515 set_parameters(tb, h, 0, 0, nver, snum012 + nset, -1,
1520 /* now we know that for better packing shifting in only one
1521 direction either to the left or to the right is required */
1523 /* if shifting to the left is better than shifting to the right */
1524 if (lnver < rnver) {
1529 /* if shifting to the right is better than shifting to the left */
1530 if (lnver > rnver) {
1531 SET_PAR_SHIFT_RIGHT;
1535 /* now shifting in either direction gives the same number
1536 of nodes and we can make use of the cached neighbors */
1537 if (is_left_neighbor_in_cache(tb, h)) {
1542 /* shift to the right independently on whether the right neighbor in cache or not */
1543 SET_PAR_SHIFT_RIGHT;
1548 /* Check whether current node S[h] is balanced when Decreasing its size by
1549 * Deleting or Cutting for INTERNAL node of S+tree.
1550 * Calculate parameters for balancing for current level h.
1552 * tb tree_balance structure;
1553 * h current level of the node;
1554 * inum item number in S[h];
1555 * mode i - insert, p - paste;
1556 * Returns: 1 - schedule occurred;
1557 * 0 - balancing for higher levels needed;
1558 * -1 - no balancing for higher levels needed;
1559 * -2 - no disk space.
1561 * Note: Items of internal nodes have fixed size, so the balance condition for
1562 * the internal part of S+tree is as for the B-trees.
1564 static int dc_check_balance_internal(struct tree_balance *tb, int h)
1566 struct virtual_node *vn = tb->tb_vn;
1568 /* Sh is the node whose balance is currently being checked,
1569 and Fh is its father. */
1570 struct buffer_head *Sh, *Fh;
1572 int lfree, rfree /* free space in L and R */ ;
1574 Sh = PATH_H_PBUFFER(tb->tb_path, h);
1575 Fh = PATH_H_PPARENT(tb->tb_path, h);
1577 maxsize = MAX_CHILD_SIZE(Sh);
1579 /* using tb->insert_size[h], which is negative in this case, create_virtual_node calculates: */
1580 /* new_nr_item = number of items node would have if operation is */
1581 /* performed without balancing (new_nr_item); */
1582 create_virtual_node(tb, h);
1584 if (!Fh) { /* S[h] is the root. */
1585 if (vn->vn_nr_item > 0) {
1586 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
1587 return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */
1589 /* new_nr_item == 0.
1590 * Current root will be deleted resulting in
1591 * decrementing the tree height. */
1592 set_parameters(tb, h, 0, 0, 0, NULL, -1, -1);
1596 if ((ret = get_parents(tb, h)) != CARRY_ON)
1599 /* get free space of neighbors */
1600 rfree = get_rfree(tb, h);
1601 lfree = get_lfree(tb, h);
1603 /* determine maximal number of items we can fit into neighbors */
1604 check_left(tb, h, lfree);
1605 check_right(tb, h, rfree);
1607 if (vn->vn_nr_item >= MIN_NR_KEY(Sh)) { /* Balance condition for the internal node is valid.
1608 * In this case we balance only if it leads to better packing. */
1609 if (vn->vn_nr_item == MIN_NR_KEY(Sh)) { /* Here we join S[h] with one of its neighbors,
1610 * which is impossible with greater values of new_nr_item. */
1611 if (tb->lnum[h] >= vn->vn_nr_item + 1) {
1612 /* All contents of S[h] can be moved to L[h]. */
1618 PATH_H_B_ITEM_ORDER(tb->tb_path,
1620 0) ? B_NR_ITEMS(tb->FL[h]) : n - 1;
1621 n = dc_size(B_N_CHILD(tb->FL[h], order_L)) /
1622 (DC_SIZE + KEY_SIZE);
1623 set_parameters(tb, h, -n - 1, 0, 0, NULL, -1,
1628 if (tb->rnum[h] >= vn->vn_nr_item + 1) {
1629 /* All contents of S[h] can be moved to R[h]. */
1635 PATH_H_B_ITEM_ORDER(tb->tb_path,
1637 B_NR_ITEMS(Fh)) ? 0 : n + 1;
1638 n = dc_size(B_N_CHILD(tb->FR[h], order_R)) /
1639 (DC_SIZE + KEY_SIZE);
1640 set_parameters(tb, h, 0, -n - 1, 0, NULL, -1,
1646 if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) {
1647 /* All contents of S[h] can be moved to the neighbors (L[h] & R[h]). */
1651 ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] -
1652 tb->rnum[h] + vn->vn_nr_item + 1) / 2 -
1653 (MAX_NR_KEY(Sh) + 1 - tb->rnum[h]);
1654 set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r,
1659 /* Balancing does not lead to better packing. */
1660 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
1661 return NO_BALANCING_NEEDED;
1664 /* Current node contain insufficient number of items. Balancing is required. */
1665 /* Check whether we can merge S[h] with left neighbor. */
1666 if (tb->lnum[h] >= vn->vn_nr_item + 1)
1667 if (is_left_neighbor_in_cache(tb, h)
1668 || tb->rnum[h] < vn->vn_nr_item + 1 || !tb->FR[h]) {
1674 PATH_H_B_ITEM_ORDER(tb->tb_path,
1676 0) ? B_NR_ITEMS(tb->FL[h]) : n - 1;
1677 n = dc_size(B_N_CHILD(tb->FL[h], order_L)) / (DC_SIZE +
1679 set_parameters(tb, h, -n - 1, 0, 0, NULL, -1, -1);
1683 /* Check whether we can merge S[h] with right neighbor. */
1684 if (tb->rnum[h] >= vn->vn_nr_item + 1) {
1690 PATH_H_B_ITEM_ORDER(tb->tb_path,
1691 h)) == B_NR_ITEMS(Fh)) ? 0 : (n + 1);
1692 n = dc_size(B_N_CHILD(tb->FR[h], order_R)) / (DC_SIZE +
1694 set_parameters(tb, h, 0, -n - 1, 0, NULL, -1, -1);
1698 /* All contents of S[h] can be moved to the neighbors (L[h] & R[h]). */
1699 if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) {
1703 ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] +
1704 vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 -
1706 set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL,
1711 /* For internal nodes try to borrow item from a neighbor */
1712 RFALSE(!tb->FL[h] && !tb->FR[h], "vs-8235: trying to borrow for root");
1714 /* Borrow one or two items from caching neighbor */
1715 if (is_left_neighbor_in_cache(tb, h) || !tb->FR[h]) {
1719 (MAX_NR_KEY(Sh) + 1 - tb->lnum[h] + vn->vn_nr_item +
1720 1) / 2 - (vn->vn_nr_item + 1);
1721 set_parameters(tb, h, -from_l, 0, 1, NULL, -1, -1);
1725 set_parameters(tb, h, 0,
1726 -((MAX_NR_KEY(Sh) + 1 - tb->rnum[h] + vn->vn_nr_item +
1727 1) / 2 - (vn->vn_nr_item + 1)), 1, NULL, -1, -1);
1731 /* Check whether current node S[h] is balanced when Decreasing its size by
1732 * Deleting or Truncating for LEAF node of S+tree.
1733 * Calculate parameters for balancing for current level h.
1735 * tb tree_balance structure;
1736 * h current level of the node;
1737 * inum item number in S[h];
1738 * mode i - insert, p - paste;
1739 * Returns: 1 - schedule occurred;
1740 * 0 - balancing for higher levels needed;
1741 * -1 - no balancing for higher levels needed;
1742 * -2 - no disk space.
1744 static int dc_check_balance_leaf(struct tree_balance *tb, int h)
1746 struct virtual_node *vn = tb->tb_vn;
1748 /* Number of bytes that must be deleted from
1749 (value is negative if bytes are deleted) buffer which
1750 contains node being balanced. The mnemonic is that the
1751 attempted change in node space used level is levbytes bytes. */
1753 /* the maximal item size */
1755 /* S0 is the node whose balance is currently being checked,
1756 and F0 is its father. */
1757 struct buffer_head *S0, *F0;
1758 int lfree, rfree /* free space in L and R */ ;
1760 S0 = PATH_H_PBUFFER(tb->tb_path, 0);
1761 F0 = PATH_H_PPARENT(tb->tb_path, 0);
1763 levbytes = tb->insert_size[h];
1765 maxsize = MAX_CHILD_SIZE(S0); /* maximal possible size of an item */
1767 if (!F0) { /* S[0] is the root now. */
1769 RFALSE(-levbytes >= maxsize - B_FREE_SPACE(S0),
1770 "vs-8240: attempt to create empty buffer tree");
1772 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
1773 return NO_BALANCING_NEEDED;
1776 if ((ret = get_parents(tb, h)) != CARRY_ON)
1779 /* get free space of neighbors */
1780 rfree = get_rfree(tb, h);
1781 lfree = get_lfree(tb, h);
1783 create_virtual_node(tb, h);
1785 /* if 3 leaves can be merge to one, set parameters and return */
1786 if (are_leaves_removable(tb, lfree, rfree))
1789 /* determine maximal number of items we can shift to the left/right neighbor
1790 and the maximal number of bytes that can flow to the left/right neighbor
1791 from the left/right most liquid item that cannot be shifted from S[0] entirely
1793 check_left(tb, h, lfree);
1794 check_right(tb, h, rfree);
1796 /* check whether we can merge S with left neighbor. */
1797 if (tb->lnum[0] >= vn->vn_nr_item && tb->lbytes == -1)
1798 if (is_left_neighbor_in_cache(tb, h) || ((tb->rnum[0] - ((tb->rbytes == -1) ? 0 : 1)) < vn->vn_nr_item) || /* S can not be merged with R */
1802 "vs-8245: dc_check_balance_leaf: FL[h] must exist");
1804 /* set parameter to merge S[0] with its left neighbor */
1805 set_parameters(tb, h, -1, 0, 0, NULL, -1, -1);
1809 /* check whether we can merge S[0] with right neighbor. */
1810 if (tb->rnum[0] >= vn->vn_nr_item && tb->rbytes == -1) {
1811 set_parameters(tb, h, 0, -1, 0, NULL, -1, -1);
1815 /* All contents of S[0] can be moved to the neighbors (L[0] & R[0]). Set parameters and return */
1816 if (is_leaf_removable(tb))
1819 /* Balancing is not required. */
1820 tb->s0num = vn->vn_nr_item;
1821 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
1822 return NO_BALANCING_NEEDED;
1825 /* Check whether current node S[h] is balanced when Decreasing its size by
1826 * Deleting or Cutting.
1827 * Calculate parameters for balancing for current level h.
1829 * tb tree_balance structure;
1830 * h current level of the node;
1831 * inum item number in S[h];
1832 * mode d - delete, c - cut.
1833 * Returns: 1 - schedule occurred;
1834 * 0 - balancing for higher levels needed;
1835 * -1 - no balancing for higher levels needed;
1836 * -2 - no disk space.
1838 static int dc_check_balance(struct tree_balance *tb, int h)
1840 RFALSE(!(PATH_H_PBUFFER(tb->tb_path, h)),
1841 "vs-8250: S is not initialized");
1844 return dc_check_balance_internal(tb, h);
1846 return dc_check_balance_leaf(tb, h);
1849 /* Check whether current node S[h] is balanced.
1850 * Calculate parameters for balancing for current level h.
1853 * tb tree_balance structure:
1855 * tb is a large structure that must be read about in the header file
1856 * at the same time as this procedure if the reader is to successfully
1857 * understand this procedure
1859 * h current level of the node;
1860 * inum item number in S[h];
1861 * mode i - insert, p - paste, d - delete, c - cut.
1862 * Returns: 1 - schedule occurred;
1863 * 0 - balancing for higher levels needed;
1864 * -1 - no balancing for higher levels needed;
1865 * -2 - no disk space.
1867 static int check_balance(int mode,
1868 struct tree_balance *tb,
1872 struct item_head *ins_ih, const void *data)
1874 struct virtual_node *vn;
1876 vn = tb->tb_vn = (struct virtual_node *)(tb->vn_buf);
1877 vn->vn_free_ptr = (char *)(tb->tb_vn + 1);
1879 vn->vn_affected_item_num = inum;
1880 vn->vn_pos_in_item = pos_in_item;
1881 vn->vn_ins_ih = ins_ih;
1884 RFALSE(mode == M_INSERT && !vn->vn_ins_ih,
1885 "vs-8255: ins_ih can not be 0 in insert mode");
1887 if (tb->insert_size[h] > 0)
1888 /* Calculate balance parameters when size of node is increasing. */
1889 return ip_check_balance(tb, h);
1891 /* Calculate balance parameters when size of node is decreasing. */
1892 return dc_check_balance(tb, h);
1895 /* Check whether parent at the path is the really parent of the current node.*/
1896 static int get_direct_parent(struct tree_balance *tb, int h)
1898 struct buffer_head *bh;
1899 struct treepath *path = tb->tb_path;
1901 path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h);
1903 /* We are in the root or in the new root. */
1904 if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
1906 RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET - 1,
1907 "PAP-8260: invalid offset in the path");
1909 if (PATH_OFFSET_PBUFFER(path, FIRST_PATH_ELEMENT_OFFSET)->
1910 b_blocknr == SB_ROOT_BLOCK(tb->tb_sb)) {
1911 /* Root is not changed. */
1912 PATH_OFFSET_PBUFFER(path, path_offset - 1) = NULL;
1913 PATH_OFFSET_POSITION(path, path_offset - 1) = 0;
1916 return REPEAT_SEARCH; /* Root is changed and we must recalculate the path. */
1920 (bh = PATH_OFFSET_PBUFFER(path, path_offset - 1)))
1921 return REPEAT_SEARCH; /* Parent in the path is not in the tree. */
1924 PATH_OFFSET_POSITION(path,
1925 path_offset - 1)) > B_NR_ITEMS(bh))
1926 return REPEAT_SEARCH;
1928 if (B_N_CHILD_NUM(bh, position) !=
1929 PATH_OFFSET_PBUFFER(path, path_offset)->b_blocknr)
1930 /* Parent in the path is not parent of the current node in the tree. */
1931 return REPEAT_SEARCH;
1933 if (buffer_locked(bh)) {
1934 reiserfs_write_unlock(tb->tb_sb);
1935 __wait_on_buffer(bh);
1936 reiserfs_write_lock(tb->tb_sb);
1937 if (FILESYSTEM_CHANGED_TB(tb))
1938 return REPEAT_SEARCH;
1941 return CARRY_ON; /* Parent in the path is unlocked and really parent of the current node. */
1944 /* Using lnum[h] and rnum[h] we should determine what neighbors
1946 * need in order to balance S[h], and get them if necessary.
1947 * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
1948 * CARRY_ON - schedule didn't occur while the function worked;
1950 static int get_neighbors(struct tree_balance *tb, int h)
1953 path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h + 1);
1954 unsigned long son_number;
1955 struct super_block *sb = tb->tb_sb;
1956 struct buffer_head *bh;
1958 PROC_INFO_INC(sb, get_neighbors[h]);
1961 /* We need left neighbor to balance S[h]. */
1962 PROC_INFO_INC(sb, need_l_neighbor[h]);
1963 bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset);
1965 RFALSE(bh == tb->FL[h] &&
1966 !PATH_OFFSET_POSITION(tb->tb_path, path_offset),
1967 "PAP-8270: invalid position in the parent");
1971 tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb->
1973 son_number = B_N_CHILD_NUM(tb->FL[h], child_position);
1974 bh = sb_bread(sb, son_number);
1977 if (FILESYSTEM_CHANGED_TB(tb)) {
1979 PROC_INFO_INC(sb, get_neighbors_restart[h]);
1980 return REPEAT_SEARCH;
1983 RFALSE(!B_IS_IN_TREE(tb->FL[h]) ||
1984 child_position > B_NR_ITEMS(tb->FL[h]) ||
1985 B_N_CHILD_NUM(tb->FL[h], child_position) !=
1986 bh->b_blocknr, "PAP-8275: invalid parent");
1987 RFALSE(!B_IS_IN_TREE(bh), "PAP-8280: invalid child");
1990 MAX_CHILD_SIZE(bh) -
1991 dc_size(B_N_CHILD(tb->FL[0], child_position)),
1992 "PAP-8290: invalid child size of left neighbor");
1998 /* We need right neighbor to balance S[path_offset]. */
1999 if (tb->rnum[h]) { /* We need right neighbor to balance S[path_offset]. */
2000 PROC_INFO_INC(sb, need_r_neighbor[h]);
2001 bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset);
2003 RFALSE(bh == tb->FR[h] &&
2004 PATH_OFFSET_POSITION(tb->tb_path,
2007 "PAP-8295: invalid position in the parent");
2010 (bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0;
2011 son_number = B_N_CHILD_NUM(tb->FR[h], child_position);
2012 bh = sb_bread(sb, son_number);
2015 if (FILESYSTEM_CHANGED_TB(tb)) {
2017 PROC_INFO_INC(sb, get_neighbors_restart[h]);
2018 return REPEAT_SEARCH;
2024 && B_FREE_SPACE(bh) !=
2025 MAX_CHILD_SIZE(bh) -
2026 dc_size(B_N_CHILD(tb->FR[0], child_position)),
2027 "PAP-8300: invalid child size of right neighbor (%d != %d - %d)",
2028 B_FREE_SPACE(bh), MAX_CHILD_SIZE(bh),
2029 dc_size(B_N_CHILD(tb->FR[0], child_position)));
2035 static int get_virtual_node_size(struct super_block *sb, struct buffer_head *bh)
2037 int max_num_of_items;
2038 int max_num_of_entries;
2039 unsigned long blocksize = sb->s_blocksize;
2041 #define MIN_NAME_LEN 1
2043 max_num_of_items = (blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN);
2044 max_num_of_entries = (blocksize - BLKH_SIZE - IH_SIZE) /
2045 (DEH_SIZE + MIN_NAME_LEN);
2047 return sizeof(struct virtual_node) +
2048 max(max_num_of_items * sizeof(struct virtual_item),
2049 sizeof(struct virtual_item) + sizeof(struct direntry_uarea) +
2050 (max_num_of_entries - 1) * sizeof(__u16));
2053 /* maybe we should fail balancing we are going to perform when kmalloc
2054 fails several times. But now it will loop until kmalloc gets
2056 static int get_mem_for_virtual_node(struct tree_balance *tb)
2062 size = get_virtual_node_size(tb->tb_sb, PATH_PLAST_BUFFER(tb->tb_path));
2064 if (size > tb->vn_buf_size) {
2065 /* we have to allocate more memory for virtual node */
2067 /* free memory allocated before */
2069 /* this is not needed if kfree is atomic */
2073 /* virtual node requires now more memory */
2074 tb->vn_buf_size = size;
2076 /* get memory for virtual item */
2077 buf = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN);
2079 /* getting memory with GFP_KERNEL priority may involve
2080 balancing now (due to indirect_to_direct conversion on
2081 dcache shrinking). So, release path and collected
2083 free_buffers_in_tb(tb);
2084 buf = kmalloc(size, GFP_NOFS);
2086 tb->vn_buf_size = 0;
2090 return REPEAT_SEARCH;
2096 if (check_fs && FILESYSTEM_CHANGED_TB(tb))
2097 return REPEAT_SEARCH;
2102 #ifdef CONFIG_REISERFS_CHECK
2103 static void tb_buffer_sanity_check(struct super_block *sb,
2104 struct buffer_head *bh,
2105 const char *descr, int level)
2108 if (atomic_read(&(bh->b_count)) <= 0)
2110 reiserfs_panic(sb, "jmacd-1", "negative or zero "
2111 "reference counter for buffer %s[%d] "
2112 "(%b)", descr, level, bh);
2114 if (!buffer_uptodate(bh))
2115 reiserfs_panic(sb, "jmacd-2", "buffer is not up "
2116 "to date %s[%d] (%b)",
2119 if (!B_IS_IN_TREE(bh))
2120 reiserfs_panic(sb, "jmacd-3", "buffer is not "
2121 "in tree %s[%d] (%b)",
2124 if (bh->b_bdev != sb->s_bdev)
2125 reiserfs_panic(sb, "jmacd-4", "buffer has wrong "
2126 "device %s[%d] (%b)",
2129 if (bh->b_size != sb->s_blocksize)
2130 reiserfs_panic(sb, "jmacd-5", "buffer has wrong "
2131 "blocksize %s[%d] (%b)",
2134 if (bh->b_blocknr > SB_BLOCK_COUNT(sb))
2135 reiserfs_panic(sb, "jmacd-6", "buffer block "
2136 "number too high %s[%d] (%b)",
2141 static void tb_buffer_sanity_check(struct super_block *sb,
2142 struct buffer_head *bh,
2143 const char *descr, int level)
2148 static int clear_all_dirty_bits(struct super_block *s, struct buffer_head *bh)
2150 return reiserfs_prepare_for_journal(s, bh, 0);
2153 static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
2155 struct buffer_head *locked;
2156 #ifdef CONFIG_REISERFS_CHECK
2157 int repeat_counter = 0;
2165 for (i = tb->tb_path->path_length;
2166 !locked && i > ILLEGAL_PATH_ELEMENT_OFFSET; i--) {
2167 if (PATH_OFFSET_PBUFFER(tb->tb_path, i)) {
2168 /* if I understand correctly, we can only be sure the last buffer
2169 ** in the path is in the tree --clm
2171 #ifdef CONFIG_REISERFS_CHECK
2172 if (PATH_PLAST_BUFFER(tb->tb_path) ==
2173 PATH_OFFSET_PBUFFER(tb->tb_path, i))
2174 tb_buffer_sanity_check(tb->tb_sb,
2181 if (!clear_all_dirty_bits(tb->tb_sb,
2186 PATH_OFFSET_PBUFFER(tb->tb_path,
2192 for (i = 0; !locked && i < MAX_HEIGHT && tb->insert_size[i];
2198 tb_buffer_sanity_check(tb->tb_sb,
2201 if (!clear_all_dirty_bits
2202 (tb->tb_sb, tb->L[i]))
2206 if (!locked && tb->FL[i]) {
2207 tb_buffer_sanity_check(tb->tb_sb,
2210 if (!clear_all_dirty_bits
2211 (tb->tb_sb, tb->FL[i]))
2215 if (!locked && tb->CFL[i]) {
2216 tb_buffer_sanity_check(tb->tb_sb,
2219 if (!clear_all_dirty_bits
2220 (tb->tb_sb, tb->CFL[i]))
2221 locked = tb->CFL[i];
2226 if (!locked && (tb->rnum[i])) {
2229 tb_buffer_sanity_check(tb->tb_sb,
2232 if (!clear_all_dirty_bits
2233 (tb->tb_sb, tb->R[i]))
2237 if (!locked && tb->FR[i]) {
2238 tb_buffer_sanity_check(tb->tb_sb,
2241 if (!clear_all_dirty_bits
2242 (tb->tb_sb, tb->FR[i]))
2246 if (!locked && tb->CFR[i]) {
2247 tb_buffer_sanity_check(tb->tb_sb,
2250 if (!clear_all_dirty_bits
2251 (tb->tb_sb, tb->CFR[i]))
2252 locked = tb->CFR[i];
2256 /* as far as I can tell, this is not required. The FEB list seems
2257 ** to be full of newly allocated nodes, which will never be locked,
2258 ** dirty, or anything else.
2259 ** To be safe, I'm putting in the checks and waits in. For the moment,
2260 ** they are needed to keep the code in journal.c from complaining
2261 ** about the buffer. That code is inside CONFIG_REISERFS_CHECK as well.
2264 for (i = 0; !locked && i < MAX_FEB_SIZE; i++) {
2266 if (!clear_all_dirty_bits
2267 (tb->tb_sb, tb->FEB[i]))
2268 locked = tb->FEB[i];
2273 #ifdef CONFIG_REISERFS_CHECK
2275 if ((repeat_counter % 10000) == 0) {
2276 reiserfs_warning(tb->tb_sb, "reiserfs-8200",
2277 "too many iterations waiting "
2278 "for buffer to unlock "
2281 /* Don't loop forever. Try to recover from possible error. */
2283 return (FILESYSTEM_CHANGED_TB(tb)) ?
2284 REPEAT_SEARCH : CARRY_ON;
2287 reiserfs_write_unlock(tb->tb_sb);
2288 __wait_on_buffer(locked);
2289 reiserfs_write_lock(tb->tb_sb);
2290 if (FILESYSTEM_CHANGED_TB(tb))
2291 return REPEAT_SEARCH;
2299 /* Prepare for balancing, that is
2300 * get all necessary parents, and neighbors;
2301 * analyze what and where should be moved;
2302 * get sufficient number of new nodes;
2303 * Balancing will start only after all resources will be collected at a time.
2305 * When ported to SMP kernels, only at the last moment after all needed nodes
2306 * are collected in cache, will the resources be locked using the usual
2307 * textbook ordered lock acquisition algorithms. Note that ensuring that
2308 * this code neither write locks what it does not need to write lock nor locks out of order
2309 * will be a pain in the butt that could have been avoided. Grumble grumble. -Hans
2311 * fix is meant in the sense of render unchanging
2313 * Latency might be improved by first gathering a list of what buffers are needed
2314 * and then getting as many of them in parallel as possible? -Hans
2317 * op_mode i - insert, d - delete, c - cut (truncate), p - paste (append)
2318 * tb tree_balance structure;
2319 * inum item number in S[h];
2320 * pos_in_item - comment this if you can
2321 * ins_ih item head of item being inserted
2322 * data inserted item or data to be pasted
2323 * Returns: 1 - schedule occurred while the function worked;
2324 * 0 - schedule didn't occur while the function worked;
2325 * -1 - if no_disk_space
2328 int fix_nodes(int op_mode, struct tree_balance *tb,
2329 struct item_head *ins_ih, const void *data)
2331 int ret, h, item_num = PATH_LAST_POSITION(tb->tb_path);
2334 /* we set wait_tb_buffers_run when we have to restore any dirty bits cleared
2335 ** during wait_tb_buffers_run
2337 int wait_tb_buffers_run = 0;
2338 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
2340 ++REISERFS_SB(tb->tb_sb)->s_fix_nodes;
2342 pos_in_item = tb->tb_path->pos_in_item;
2344 tb->fs_gen = get_generation(tb->tb_sb);
2346 /* we prepare and log the super here so it will already be in the
2347 ** transaction when do_balance needs to change it.
2348 ** This way do_balance won't have to schedule when trying to prepare
2349 ** the super for logging
2351 reiserfs_prepare_for_journal(tb->tb_sb,
2352 SB_BUFFER_WITH_SB(tb->tb_sb), 1);
2353 journal_mark_dirty(tb->transaction_handle, tb->tb_sb,
2354 SB_BUFFER_WITH_SB(tb->tb_sb));
2355 if (FILESYSTEM_CHANGED_TB(tb))
2356 return REPEAT_SEARCH;
2358 /* if it possible in indirect_to_direct conversion */
2359 if (buffer_locked(tbS0)) {
2360 reiserfs_write_unlock(tb->tb_sb);
2361 __wait_on_buffer(tbS0);
2362 reiserfs_write_lock(tb->tb_sb);
2363 if (FILESYSTEM_CHANGED_TB(tb))
2364 return REPEAT_SEARCH;
2366 #ifdef CONFIG_REISERFS_CHECK
2368 print_cur_tb("fix_nodes");
2369 reiserfs_panic(tb->tb_sb, "PAP-8305",
2370 "there is pending do_balance");
2373 if (!buffer_uptodate(tbS0) || !B_IS_IN_TREE(tbS0))
2374 reiserfs_panic(tb->tb_sb, "PAP-8320", "S[0] (%b %z) is "
2375 "not uptodate at the beginning of fix_nodes "
2376 "or not in tree (mode %c)",
2377 tbS0, tbS0, op_mode);
2379 /* Check parameters. */
2382 if (item_num <= 0 || item_num > B_NR_ITEMS(tbS0))
2383 reiserfs_panic(tb->tb_sb, "PAP-8330", "Incorrect "
2384 "item number %d (in S0 - %d) in case "
2385 "of insert", item_num,
2391 if (item_num < 0 || item_num >= B_NR_ITEMS(tbS0)) {
2392 print_block(tbS0, 0, -1, -1);
2393 reiserfs_panic(tb->tb_sb, "PAP-8335", "Incorrect "
2394 "item number(%d); mode = %c "
2397 tb->insert_size[0]);
2401 reiserfs_panic(tb->tb_sb, "PAP-8340", "Incorrect mode "
2406 if (get_mem_for_virtual_node(tb) == REPEAT_SEARCH)
2407 // FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat
2408 return REPEAT_SEARCH;
2410 /* Starting from the leaf level; for all levels h of the tree. */
2411 for (h = 0; h < MAX_HEIGHT && tb->insert_size[h]; h++) {
2412 ret = get_direct_parent(tb, h);
2413 if (ret != CARRY_ON)
2416 ret = check_balance(op_mode, tb, h, item_num,
2417 pos_in_item, ins_ih, data);
2418 if (ret != CARRY_ON) {
2419 if (ret == NO_BALANCING_NEEDED) {
2420 /* No balancing for higher levels needed. */
2421 ret = get_neighbors(tb, h);
2422 if (ret != CARRY_ON)
2424 if (h != MAX_HEIGHT - 1)
2425 tb->insert_size[h + 1] = 0;
2426 /* ok, analysis and resource gathering are complete */
2432 ret = get_neighbors(tb, h);
2433 if (ret != CARRY_ON)
2436 /* No disk space, or schedule occurred and analysis may be
2437 * invalid and needs to be redone. */
2438 ret = get_empty_nodes(tb, h);
2439 if (ret != CARRY_ON)
2442 if (!PATH_H_PBUFFER(tb->tb_path, h)) {
2443 /* We have a positive insert size but no nodes exist on this
2444 level, this means that we are creating a new root. */
2446 RFALSE(tb->blknum[h] != 1,
2447 "PAP-8350: creating new empty root");
2449 if (h < MAX_HEIGHT - 1)
2450 tb->insert_size[h + 1] = 0;
2451 } else if (!PATH_H_PBUFFER(tb->tb_path, h + 1)) {
2452 if (tb->blknum[h] > 1) {
2453 /* The tree needs to be grown, so this node S[h]
2454 which is the root node is split into two nodes,
2455 and a new node (S[h+1]) will be created to
2456 become the root node. */
2458 RFALSE(h == MAX_HEIGHT - 1,
2459 "PAP-8355: attempt to create too high of a tree");
2461 tb->insert_size[h + 1] =
2463 KEY_SIZE) * (tb->blknum[h] - 1) +
2465 } else if (h < MAX_HEIGHT - 1)
2466 tb->insert_size[h + 1] = 0;
2468 tb->insert_size[h + 1] =
2469 (DC_SIZE + KEY_SIZE) * (tb->blknum[h] - 1);
2472 ret = wait_tb_buffers_until_unlocked(tb);
2473 if (ret == CARRY_ON) {
2474 if (FILESYSTEM_CHANGED_TB(tb)) {
2475 wait_tb_buffers_run = 1;
2476 ret = REPEAT_SEARCH;
2482 wait_tb_buffers_run = 1;
2487 // fix_nodes was unable to perform its calculation due to
2488 // filesystem got changed under us, lack of free disk space or i/o
2489 // failure. If the first is the case - the search will be
2490 // repeated. For now - free all resources acquired so far except
2491 // for the new allocated nodes
2495 /* Release path buffers. */
2496 if (wait_tb_buffers_run) {
2497 pathrelse_and_restore(tb->tb_sb, tb->tb_path);
2499 pathrelse(tb->tb_path);
2501 /* brelse all resources collected for balancing */
2502 for (i = 0; i < MAX_HEIGHT; i++) {
2503 if (wait_tb_buffers_run) {
2504 reiserfs_restore_prepared_buffer(tb->tb_sb,
2506 reiserfs_restore_prepared_buffer(tb->tb_sb,
2508 reiserfs_restore_prepared_buffer(tb->tb_sb,
2510 reiserfs_restore_prepared_buffer(tb->tb_sb,
2512 reiserfs_restore_prepared_buffer(tb->tb_sb,
2515 reiserfs_restore_prepared_buffer(tb->tb_sb,
2535 if (wait_tb_buffers_run) {
2536 for (i = 0; i < MAX_FEB_SIZE; i++) {
2538 reiserfs_restore_prepared_buffer
2539 (tb->tb_sb, tb->FEB[i]);
2547 /* Anatoly will probably forgive me renaming tb to tb. I just
2548 wanted to make lines shorter */
2549 void unfix_nodes(struct tree_balance *tb)
2553 /* Release path buffers. */
2554 pathrelse_and_restore(tb->tb_sb, tb->tb_path);
2556 /* brelse all resources collected for balancing */
2557 for (i = 0; i < MAX_HEIGHT; i++) {
2558 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->L[i]);
2559 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->R[i]);
2560 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->FL[i]);
2561 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->FR[i]);
2562 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->CFL[i]);
2563 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->CFR[i]);
2573 /* deal with list of allocated (used and unused) nodes */
2574 for (i = 0; i < MAX_FEB_SIZE; i++) {
2576 b_blocknr_t blocknr = tb->FEB[i]->b_blocknr;
2577 /* de-allocated block which was not used by balancing and
2578 bforget about buffer for it */
2580 reiserfs_free_block(tb->transaction_handle, NULL,
2584 /* release used as new nodes including a new root */
2585 brelse(tb->used[i]);