2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/sched.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
24 #undef UFS_BALLOC_DEBUG
26 #ifdef UFS_BALLOC_DEBUG
27 #define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
32 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
33 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
34 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
35 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
36 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
37 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
40 * Free 'count' fragments from fragment number 'fragment'
42 void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count)
44 struct super_block * sb;
45 struct ufs_sb_private_info * uspi;
46 struct ufs_super_block_first * usb1;
47 struct ufs_cg_private_info * ucpi;
48 struct ufs_cylinder_group * ucg;
49 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
52 uspi = UFS_SB(sb)->s_uspi;
53 usb1 = ubh_get_usb_first(uspi);
55 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
57 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
58 ufs_error (sb, "ufs_free_fragments", "internal error");
62 cgno = ufs_dtog(fragment);
63 bit = ufs_dtogd(fragment);
64 if (cgno >= uspi->s_ncg) {
65 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
69 ucpi = ufs_load_cylinder (sb, cgno);
72 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
73 if (!ufs_cg_chkmagic(sb, ucg)) {
74 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
78 end_bit = bit + count;
79 bbase = ufs_blknum (bit);
80 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
81 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
82 for (i = bit; i < end_bit; i++) {
83 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
84 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
86 ufs_error (sb, "ufs_free_fragments",
87 "bit already cleared for fragment %u", i);
90 DQUOT_FREE_BLOCK (inode, count);
93 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
94 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
95 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
96 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
97 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
100 * Trying to reassemble free fragments into block
102 blkno = ufs_fragstoblks (bbase);
103 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
104 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
105 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
106 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
107 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
108 ufs_clusteracct (sb, ucpi, blkno, 1);
109 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
110 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
111 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
112 cylno = ufs_cbtocylno (bbase);
113 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
114 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
117 ubh_mark_buffer_dirty (USPI_UBH(uspi));
118 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
119 if (sb->s_flags & MS_SYNCHRONOUS) {
120 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
121 ubh_wait_on_buffer (UCPI_UBH(ucpi));
131 UFSD(("EXIT (FAILED)\n"))
136 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
138 void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
140 struct super_block * sb;
141 struct ufs_sb_private_info * uspi;
142 struct ufs_super_block_first * usb1;
143 struct ufs_cg_private_info * ucpi;
144 struct ufs_cylinder_group * ucg;
145 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
148 uspi = UFS_SB(sb)->s_uspi;
149 usb1 = ubh_get_usb_first(uspi);
151 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
153 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
154 ufs_error (sb, "ufs_free_blocks", "internal error, "
155 "fragment %u, count %u\n", fragment, count);
163 cgno = ufs_dtog (fragment);
164 bit = ufs_dtogd (fragment);
165 if (cgno >= uspi->s_ncg) {
166 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
169 end_bit = bit + count;
170 if (end_bit > uspi->s_fpg) {
171 overflow = bit + count - uspi->s_fpg;
176 ucpi = ufs_load_cylinder (sb, cgno);
179 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
180 if (!ufs_cg_chkmagic(sb, ucg)) {
181 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
185 for (i = bit; i < end_bit; i += uspi->s_fpb) {
186 blkno = ufs_fragstoblks(i);
187 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
188 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
190 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
191 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
192 ufs_clusteracct (sb, ucpi, blkno, 1);
193 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
195 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
196 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
197 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
198 cylno = ufs_cbtocylno(i);
199 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
200 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
203 ubh_mark_buffer_dirty (USPI_UBH(uspi));
204 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
205 if (sb->s_flags & MS_SYNCHRONOUS) {
206 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
207 ubh_wait_on_buffer (UCPI_UBH(ucpi));
223 UFSD(("EXIT (FAILED)\n"))
227 static struct page *ufs_get_locked_page(struct address_space *mapping,
233 page = find_lock_page(mapping, index);
235 page = read_cache_page(mapping, index,
236 (filler_t*)mapping->a_ops->readpage,
239 printk(KERN_ERR "ufs_change_blocknr: "
240 "read_cache_page error: ino %lu, index: %lu\n",
241 mapping->host->i_ino, index);
247 if (!PageUptodate(page) || PageError(page)) {
249 page_cache_release(page);
251 printk(KERN_ERR "ufs_change_blocknr: "
252 "can not read page: ino %lu, index: %lu\n",
253 mapping->host->i_ino, index);
255 page = ERR_PTR(-EIO);
260 if (unlikely(!page->mapping || !page_has_buffers(page))) {
262 page_cache_release(page);
263 goto try_again;/*we really need these buffers*/
270 * Modify inode page cache in such way:
271 * have - blocks with b_blocknr equal to oldb...oldb+count-1
272 * get - blocks with b_blocknr equal to newb...newb+count-1
273 * also we suppose that oldb...oldb+count-1 blocks
274 * situated at the end of file.
276 * We can come here from ufs_writepage or ufs_prepare_write,
277 * locked_page is argument of these functions, so we already lock it.
279 static void ufs_change_blocknr(struct inode *inode, unsigned int count,
280 unsigned int oldb, unsigned int newb,
281 struct page *locked_page)
283 unsigned int blk_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
285 struct address_space *mapping = inode->i_mapping;
286 pgoff_t index, cur_index = locked_page->index;
289 struct buffer_head *head, *bh;
291 baseblk = ((i_size_read(inode) - 1) >> inode->i_blkbits) + 1 - count;
293 UFSD(("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
294 inode->i_ino, count, oldb, newb));
296 BUG_ON(!PageLocked(locked_page));
298 for (i = 0; i < count; i += blk_per_page) {
299 index = (baseblk+i) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
301 if (likely(cur_index != index)) {
302 page = ufs_get_locked_page(mapping, index);
309 head = page_buffers(page);
312 if (likely(bh->b_blocknr == j + oldb && j < count)) {
313 unmap_underlying_metadata(bh->b_bdev,
315 bh->b_blocknr = newb + j++;
316 mark_buffer_dirty(bh);
319 bh = bh->b_this_page;
320 } while (bh != head);
322 set_page_dirty(page);
324 if (likely(cur_index != index)) {
326 page_cache_release(page);
332 unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
333 unsigned goal, unsigned count, int * err, struct page *locked_page)
335 struct super_block * sb;
336 struct ufs_sb_private_info * uspi;
337 struct ufs_super_block_first * usb1;
338 unsigned cgno, oldcount, newcount, tmp, request, result;
340 UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
343 uspi = UFS_SB(sb)->s_uspi;
344 usb1 = ubh_get_usb_first(uspi);
349 tmp = fs32_to_cpu(sb, *p);
350 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
351 ufs_warning (sb, "ufs_new_fragments", "internal warning"
352 " fragment %u, count %u", fragment, count);
353 count = uspi->s_fpb - ufs_fragnum(fragment);
355 oldcount = ufs_fragnum (fragment);
356 newcount = oldcount + count;
359 * Somebody else has just allocated our fragments
363 ufs_error (sb, "ufs_new_fragments", "internal error, "
364 "fragment %u, tmp %u\n", fragment, tmp);
368 if (fragment < UFS_I(inode)->i_lastfrag) {
369 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
376 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
383 * There is not enough space for user on the device
385 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) {
387 UFSD(("EXIT (FAILED)\n"))
391 if (goal >= uspi->s_size)
394 cgno = ufs_inotocg (inode->i_ino);
396 cgno = ufs_dtog (goal);
399 * allocate new fragment
402 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
404 *p = cpu_to_fs32(sb, result);
406 inode->i_blocks += count << uspi->s_nspfshift;
407 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
410 UFSD(("EXIT, result %u\n", result))
417 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
420 inode->i_blocks += count << uspi->s_nspfshift;
421 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
423 UFSD(("EXIT, result %u\n", result))
428 * allocate new block and move data
430 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
433 if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree)
434 > uspi->s_dsize * uspi->s_minfree / (2 * 100) )
436 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
439 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
442 request = uspi->s_fpb;
443 if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize *
444 (uspi->s_minfree - 2) / 100)
446 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
449 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
451 ufs_change_blocknr(inode, oldcount, tmp, result, locked_page);
453 *p = cpu_to_fs32(sb, result);
455 inode->i_blocks += count << uspi->s_nspfshift;
456 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
458 if (newcount < request)
459 ufs_free_fragments (inode, result + newcount, request - newcount);
460 ufs_free_fragments (inode, tmp, oldcount);
461 UFSD(("EXIT, result %u\n", result))
466 UFSD(("EXIT (FAILED)\n"))
471 ufs_add_fragments (struct inode * inode, unsigned fragment,
472 unsigned oldcount, unsigned newcount, int * err)
474 struct super_block * sb;
475 struct ufs_sb_private_info * uspi;
476 struct ufs_super_block_first * usb1;
477 struct ufs_cg_private_info * ucpi;
478 struct ufs_cylinder_group * ucg;
479 unsigned cgno, fragno, fragoff, count, fragsize, i;
481 UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount))
484 uspi = UFS_SB(sb)->s_uspi;
485 usb1 = ubh_get_usb_first (uspi);
486 count = newcount - oldcount;
488 cgno = ufs_dtog(fragment);
489 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
491 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
493 ucpi = ufs_load_cylinder (sb, cgno);
496 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
497 if (!ufs_cg_chkmagic(sb, ucg)) {
498 ufs_panic (sb, "ufs_add_fragments",
499 "internal error, bad magic number on cg %u", cgno);
503 fragno = ufs_dtogd (fragment);
504 fragoff = ufs_fragnum (fragno);
505 for (i = oldcount; i < newcount; i++)
506 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
509 * Block can be extended
511 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
512 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
513 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
515 fragsize = i - oldcount;
516 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
517 ufs_panic (sb, "ufs_add_fragments",
518 "internal error or corrupted bitmap on cg %u", cgno);
519 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
520 if (fragsize != count)
521 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
522 for (i = oldcount; i < newcount; i++)
523 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
524 if(DQUOT_ALLOC_BLOCK(inode, count)) {
529 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
530 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
531 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
533 ubh_mark_buffer_dirty (USPI_UBH(uspi));
534 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
535 if (sb->s_flags & MS_SYNCHRONOUS) {
536 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
537 ubh_wait_on_buffer (UCPI_UBH(ucpi));
541 UFSD(("EXIT, fragment %u\n", fragment))
546 #define UFS_TEST_FREE_SPACE_CG \
547 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
548 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
550 for (k = count; k < uspi->s_fpb; k++) \
551 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
554 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
555 unsigned goal, unsigned count, int * err)
557 struct super_block * sb;
558 struct ufs_sb_private_info * uspi;
559 struct ufs_super_block_first * usb1;
560 struct ufs_cg_private_info * ucpi;
561 struct ufs_cylinder_group * ucg;
562 unsigned oldcg, i, j, k, result, allocsize;
564 UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count))
567 uspi = UFS_SB(sb)->s_uspi;
568 usb1 = ubh_get_usb_first(uspi);
572 * 1. searching on preferred cylinder group
574 UFS_TEST_FREE_SPACE_CG
577 * 2. quadratic rehash
579 for (j = 1; j < uspi->s_ncg; j *= 2) {
581 if (cgno >= uspi->s_ncg)
583 UFS_TEST_FREE_SPACE_CG
587 * 3. brute force search
588 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
590 cgno = (oldcg + 1) % uspi->s_ncg;
591 for (j = 2; j < uspi->s_ncg; j++) {
593 if (cgno >= uspi->s_ncg)
595 UFS_TEST_FREE_SPACE_CG
598 UFSD(("EXIT (FAILED)\n"))
602 ucpi = ufs_load_cylinder (sb, cgno);
605 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
606 if (!ufs_cg_chkmagic(sb, ucg))
607 ufs_panic (sb, "ufs_alloc_fragments",
608 "internal error, bad magic number on cg %u", cgno);
609 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
611 if (count == uspi->s_fpb) {
612 result = ufs_alloccg_block (inode, ucpi, goal, err);
613 if (result == (unsigned)-1)
618 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
619 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
622 if (allocsize == uspi->s_fpb) {
623 result = ufs_alloccg_block (inode, ucpi, goal, err);
624 if (result == (unsigned)-1)
626 goal = ufs_dtogd (result);
627 for (i = count; i < uspi->s_fpb; i++)
628 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
629 i = uspi->s_fpb - count;
630 DQUOT_FREE_BLOCK(inode, i);
632 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
633 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
634 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
635 fs32_add(sb, &ucg->cg_frsum[i], 1);
639 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
640 if (result == (unsigned)-1)
642 if(DQUOT_ALLOC_BLOCK(inode, count)) {
646 for (i = 0; i < count; i++)
647 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
649 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
650 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
651 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
652 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
654 if (count != allocsize)
655 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
658 ubh_mark_buffer_dirty (USPI_UBH(uspi));
659 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
660 if (sb->s_flags & MS_SYNCHRONOUS) {
661 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
662 ubh_wait_on_buffer (UCPI_UBH(ucpi));
666 result += cgno * uspi->s_fpg;
667 UFSD(("EXIT3, result %u\n", result))
671 static unsigned ufs_alloccg_block (struct inode * inode,
672 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
674 struct super_block * sb;
675 struct ufs_sb_private_info * uspi;
676 struct ufs_super_block_first * usb1;
677 struct ufs_cylinder_group * ucg;
678 unsigned result, cylno, blkno;
680 UFSD(("ENTER, goal %u\n", goal))
683 uspi = UFS_SB(sb)->s_uspi;
684 usb1 = ubh_get_usb_first(uspi);
685 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
688 goal = ucpi->c_rotor;
691 goal = ufs_blknum (goal);
692 goal = ufs_dtogd (goal);
695 * If the requested block is available, use it.
697 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
703 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
704 if (result == (unsigned)-1)
706 ucpi->c_rotor = result;
708 blkno = ufs_fragstoblks(result);
709 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
710 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
711 ufs_clusteracct (sb, ucpi, blkno, -1);
712 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
717 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
718 fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
719 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
720 cylno = ufs_cbtocylno(result);
721 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
722 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
724 UFSD(("EXIT, result %u\n", result))
729 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
730 struct ufs_buffer_head *ubh,
731 unsigned begin, unsigned size,
732 unsigned char *table, unsigned char mask)
734 unsigned rest, offset;
738 offset = begin & ~uspi->s_fmask;
739 begin >>= uspi->s_fshift;
741 if ((offset + size) < uspi->s_fsize)
744 rest = uspi->s_fsize - offset;
746 cp = ubh->bh[begin]->b_data + offset;
747 while ((table[*cp++] & mask) == 0 && --rest)
754 return (size + rest);
758 * Find a block of the specified size in the specified cylinder group.
759 * @sp: pointer to super block
760 * @ucpi: pointer to cylinder group info
761 * @goal: near which block we want find new one
762 * @count: specified size
764 static unsigned ufs_bitmap_search(struct super_block *sb,
765 struct ufs_cg_private_info *ucpi,
766 unsigned goal, unsigned count)
769 * Bit patterns for identifying fragments in the block map
770 * used as ((map & mask_arr) == want_arr)
772 static const int mask_arr[9] = {
773 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
775 static const int want_arr[9] = {
776 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
778 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
779 struct ufs_super_block_first *usb1;
780 struct ufs_cylinder_group *ucg;
781 unsigned start, length, loc, result;
782 unsigned pos, want, blockmap, mask, end;
784 UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count));
786 usb1 = ubh_get_usb_first (uspi);
787 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
790 start = ufs_dtogd(goal) >> 3;
792 start = ucpi->c_frotor >> 3;
794 length = ((uspi->s_fpg + 7) >> 3) - start;
795 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
796 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
797 1 << (count - 1 + (uspi->s_fpb & 7)));
800 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
801 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
803 1 << (count - 1 + (uspi->s_fpb & 7)));
805 ufs_error(sb, "ufs_bitmap_search",
806 "bitmap corrupted on cg %u, start %u,"
807 " length %u, count %u, freeoff %u\n",
808 ucpi->c_cgx, start, length, count,
814 result = (start + length - loc) << 3;
815 ucpi->c_frotor = result;
818 * found the byte in the map
821 for (end = result + 8; result < end; result += uspi->s_fpb) {
822 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
824 mask = mask_arr[count];
825 want = want_arr[count];
826 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
827 if ((blockmap & mask) == want) {
828 UFSD(("EXIT, result %u\n", result));
836 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
838 UFSD(("EXIT (FAILED)\n"));
842 static void ufs_clusteracct(struct super_block * sb,
843 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
845 struct ufs_sb_private_info * uspi;
846 int i, start, end, forw, back;
848 uspi = UFS_SB(sb)->s_uspi;
849 if (uspi->s_contigsumsize <= 0)
853 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
855 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
858 * Find the size of the cluster going forward.
861 end = start + uspi->s_contigsumsize;
862 if ( end >= ucpi->c_nclusterblks)
863 end = ucpi->c_nclusterblks;
864 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
870 * Find the size of the cluster going backward.
873 end = start - uspi->s_contigsumsize;
876 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
882 * Account for old cluster and the possibly new forward and
886 if (i > uspi->s_contigsumsize)
887 i = uspi->s_contigsumsize;
888 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
890 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
892 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
896 static unsigned char ufs_fragtable_8fpb[] = {
897 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
898 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
899 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
900 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
901 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
902 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
903 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
904 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
905 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
906 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
907 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
908 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
909 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
910 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
911 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
912 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
915 static unsigned char ufs_fragtable_other[] = {
916 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
917 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
918 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
919 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
920 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
921 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
922 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
923 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
924 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
925 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
926 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
927 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
928 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
929 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
930 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
931 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,