sh: convert /proc/cpu/aligmnent, /proc/cpu/kernel_alignment to seq_file
[safe/jmp/linux-2.6] / fs / ufs / balloc.c
index bcc4408..54c16ec 100644 (file)
@@ -9,7 +9,6 @@
  */
 
 #include <linux/fs.h>
-#include <linux/ufs_fs.h>
 #include <linux/stat.h>
 #include <linux/time.h>
 #include <linux/string.h>
@@ -19,6 +18,8 @@
 #include <linux/bitops.h>
 #include <asm/byteorder.h>
 
+#include "ufs_fs.h"
+#include "ufs.h"
 #include "swab.h"
 #include "util.h"
 
@@ -84,7 +85,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
                                   "bit already cleared for fragment %u", i);
        }
        
-       DQUOT_FREE_BLOCK (inode, count);
+       vfs_dq_free_block(inode, count);
 
        
        fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
@@ -194,7 +195,7 @@ do_more:
                ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
                if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
                        ufs_clusteracct (sb, ucpi, blkno, 1);
-               DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
+               vfs_dq_free_block(inode, uspi->s_fpb);
 
                fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
                uspi->cs_total.cs_nbfree++;
@@ -244,62 +245,87 @@ failed:
  * We can come here from ufs_writepage or ufs_prepare_write,
  * locked_page is argument of these functions, so we already lock it.
  */
-static void ufs_change_blocknr(struct inode *inode, unsigned int beg,
-                              unsigned int count, unsigned int oldb,
-                              unsigned int newb, struct page *locked_page)
+static void ufs_change_blocknr(struct inode *inode, sector_t beg,
+                              unsigned int count, sector_t oldb,
+                              sector_t newb, struct page *locked_page)
 {
-       const unsigned mask = (1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1;
+       const unsigned blks_per_page =
+               1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       const unsigned mask = blks_per_page - 1;
        struct address_space * const mapping = inode->i_mapping;
-       pgoff_t index, cur_index;
-       unsigned end, pos, j;
+       pgoff_t index, cur_index, last_index;
+       unsigned pos, j, lblock;
+       sector_t end, i;
        struct page *page;
        struct buffer_head *head, *bh;
 
-       UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
-             inode->i_ino, count, oldb, newb);
+       UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
+             inode->i_ino, count,
+            (unsigned long long)oldb, (unsigned long long)newb);
 
        BUG_ON(!locked_page);
        BUG_ON(!PageLocked(locked_page));
 
        cur_index = locked_page->index;
-
-       for (end = count + beg; beg < end; beg = (beg | mask) + 1) {
-               index = beg >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       end = count + beg;
+       last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       for (i = beg; i < end; i = (i | mask) + 1) {
+               index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
 
                if (likely(cur_index != index)) {
                        page = ufs_get_locked_page(mapping, index);
-                       if (!page || IS_ERR(page)) /* it was truncated or EIO */
+                       if (!page)/* it was truncated */
+                               continue;
+                       if (IS_ERR(page)) {/* or EIO */
+                               ufs_error(inode->i_sb, __func__,
+                                         "read of page %llu failed\n",
+                                         (unsigned long long)index);
                                continue;
+                       }
                } else
                        page = locked_page;
 
                head = page_buffers(page);
                bh = head;
-               pos = beg & mask;
+               pos = i & mask;
                for (j = 0; j < pos; ++j)
                        bh = bh->b_this_page;
-               j = 0;
+
+
+               if (unlikely(index == last_index))
+                       lblock = end & mask;
+               else
+                       lblock = blks_per_page;
+
                do {
-                       if (buffer_mapped(bh)) {
-                               pos = bh->b_blocknr - oldb;
-                               if (pos < count) {
-                                       UFSD(" change from %llu to %llu\n",
-                                            (unsigned long long)pos + oldb,
-                                            (unsigned long long)pos + newb);
-                                       bh->b_blocknr = newb + pos;
-                                       unmap_underlying_metadata(bh->b_bdev,
-                                                                 bh->b_blocknr);
-                                       mark_buffer_dirty(bh);
-                                       ++j;
+                       if (j >= lblock)
+                               break;
+                       pos = (i - beg) + j;
+
+                       if (!buffer_mapped(bh))
+                                       map_bh(bh, inode->i_sb, oldb + pos);
+                       if (!buffer_uptodate(bh)) {
+                               ll_rw_block(READ, 1, &bh);
+                               wait_on_buffer(bh);
+                               if (!buffer_uptodate(bh)) {
+                                       ufs_error(inode->i_sb, __func__,
+                                                 "read of block failed\n");
+                                       break;
                                }
                        }
 
+                       UFSD(" change from %llu to %llu, pos %u\n",
+                            (unsigned long long)(pos + oldb),
+                            (unsigned long long)(pos + newb), pos);
+
+                       bh->b_blocknr = newb + pos;
+                       unmap_underlying_metadata(bh->b_bdev,
+                                                 bh->b_blocknr);
+                       mark_buffer_dirty(bh);
+                       ++j;
                        bh = bh->b_this_page;
                } while (bh != head);
 
-               if (j)
-                       set_page_dirty(page);
-
                if (likely(cur_index != index))
                        ufs_put_locked_page(page);
        }
@@ -457,8 +483,9 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
        if (result) {
                ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
                                locked_page != NULL);
-               ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp,
-                                  result, locked_page);
+               ufs_change_blocknr(inode, fragment - oldcount, oldcount,
+                                  uspi->s_sbbase + tmp,
+                                  uspi->s_sbbase + result, locked_page);
                ufs_cpu_to_data_ptr(sb, p, result);
                *err = 0;
                UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
@@ -529,7 +556,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
                fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
        for (i = oldcount; i < newcount; i++)
                ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
-       if(DQUOT_ALLOC_BLOCK(inode, count)) {
+       if (vfs_dq_alloc_block(inode, count)) {
                *err = -EDQUOT;
                return 0;
        }
@@ -637,7 +664,7 @@ cg_found:
                for (i = count; i < uspi->s_fpb; i++)
                        ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
                i = uspi->s_fpb - count;
-               DQUOT_FREE_BLOCK(inode, i);
+               vfs_dq_free_block(inode, i);
 
                fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
                uspi->cs_total.cs_nffree += i;
@@ -649,7 +676,7 @@ cg_found:
        result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
        if (result == INVBLOCK)
                return 0;
-       if(DQUOT_ALLOC_BLOCK(inode, count)) {
+       if (vfs_dq_alloc_block(inode, count)) {
                *err = -EDQUOT;
                return 0;
        }
@@ -720,7 +747,7 @@ gotit:
        ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
        if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
                ufs_clusteracct (sb, ucpi, blkno, -1);
-       if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
+       if (vfs_dq_alloc_block(inode, uspi->s_fpb)) {
                *err = -EDQUOT;
                return INVBLOCK;
        }