kill I_LOCK
[safe/jmp/linux-2.6] / fs / ntfs / aops.c
index 8f23c60..cfce53c 100644 (file)
@@ -2,7 +2,7 @@
  * aops.c - NTFS kernel address space operations and page cache handling.
  *         Part of the Linux-NTFS project.
  *
- * Copyright (c) 2001-2005 Anton Altaparmakov
+ * Copyright (c) 2001-2007 Anton Altaparmakov
  * Copyright (c) 2002 Richard Russon
  *
  * This program/include file is free software; you can redistribute it and/or
@@ -22,6 +22,7 @@
  */
 
 #include <linux/errno.h>
+#include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/swap.h>
@@ -85,17 +86,19 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
                }
                /* Check for the current buffer head overflowing. */
                if (unlikely(file_ofs + bh->b_size > init_size)) {
-                       u8 *kaddr;
                        int ofs;
+                       void *kaddr;
 
                        ofs = 0;
                        if (file_ofs < init_size)
                                ofs = init_size - file_ofs;
+                       local_irq_save(flags);
                        kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
                        memset(kaddr + bh_offset(bh) + ofs, 0,
                                        bh->b_size - ofs);
-                       kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
                        flush_dcache_page(page);
+                       kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
+                       local_irq_restore(flags);
                }
        } else {
                clear_buffer_uptodate(bh);
@@ -142,11 +145,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
                recs = PAGE_CACHE_SIZE / rec_size;
                /* Should have been verified before we got here... */
                BUG_ON(!recs);
+               local_irq_save(flags);
                kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
                for (i = 0; i < recs; i++)
                        post_read_mst_fixup((NTFS_RECORD*)(kaddr +
                                        i * rec_size), rec_size);
                kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
+               local_irq_restore(flags);
                flush_dcache_page(page);
                if (likely(page_uptodate && !PageError(page)))
                        SetPageUptodate(page);
@@ -200,8 +205,8 @@ static int ntfs_read_block(struct page *page)
        /* $MFT/$DATA must have its complete runlist in memory at all times. */
        BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));
 
-       blocksize_bits = VFS_I(ni)->i_blkbits;
-       blocksize = 1 << blocksize_bits;
+       blocksize = vol->sb->s_blocksize;
+       blocksize_bits = vol->sb->s_blocksize_bits;
 
        if (!page_has_buffers(page)) {
                create_empty_buffers(page, blocksize, 0);
@@ -240,8 +245,7 @@ static int ntfs_read_block(struct page *page)
        rl = NULL;
        nr = i = 0;
        do {
-               u8 *kaddr;
-               int err;
+               int err = 0;
 
                if (unlikely(buffer_uptodate(bh)))
                        continue;
@@ -249,11 +253,10 @@ static int ntfs_read_block(struct page *page)
                        arr[nr++] = bh;
                        continue;
                }
-               err = 0;
                bh->b_bdev = vol->sb->s_bdev;
                /* Is the block within the allowed limits? */
                if (iblock < lblock) {
-                       BOOL is_retry = FALSE;
+                       bool is_retry = false;
 
                        /* Convert iblock into corresponding vcn and offset. */
                        vcn = (VCN)iblock << blocksize_bits >>
@@ -291,7 +294,7 @@ lock_retry_remap:
                                goto handle_hole;
                        /* If first try and runlist unmapped, map and retry. */
                        if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
-                               is_retry = TRUE;
+                               is_retry = true;
                                /*
                                 * Attempt to map runlist, dropping lock for
                                 * the duration.
@@ -335,10 +338,7 @@ handle_hole:
                bh->b_blocknr = -1UL;
                clear_buffer_mapped(bh);
 handle_zblock:
-               kaddr = kmap_atomic(page, KM_USER0);
-               memset(kaddr + i * blocksize, 0, blocksize);
-               kunmap_atomic(kaddr, KM_USER0);
-               flush_dcache_page(page);
+               zero_user(page, i * blocksize, blocksize);
                if (likely(!err))
                        set_buffer_uptodate(bh);
        } while (i++, iblock++, (bh = bh->b_this_page) != head);
@@ -400,7 +400,7 @@ static int ntfs_readpage(struct file *file, struct page *page)
        loff_t i_size;
        struct inode *vi;
        ntfs_inode *ni, *base_ni;
-       u8 *kaddr;
+       u8 *addr;
        ntfs_attr_search_ctx *ctx;
        MFT_RECORD *mrec;
        unsigned long flags;
@@ -409,6 +409,15 @@ static int ntfs_readpage(struct file *file, struct page *page)
 
 retry_readpage:
        BUG_ON(!PageLocked(page));
+       vi = page->mapping->host;
+       i_size = i_size_read(vi);
+       /* Is the page fully outside i_size? (truncate in progress) */
+       if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
+                       PAGE_CACHE_SHIFT)) {
+               zero_user(page, 0, PAGE_CACHE_SIZE);
+               ntfs_debug("Read outside i_size - truncated?");
+               goto done;
+       }
        /*
         * This can potentially happen because we clear PageUptodate() during
         * ntfs_writepage() of MstProtected() attributes.
@@ -417,7 +426,6 @@ retry_readpage:
                unlock_page(page);
                return 0;
        }
-       vi = page->mapping->host;
        ni = NTFS_I(vi);
        /*
         * Only $DATA attributes can be encrypted and only unnamed $DATA
@@ -455,10 +463,7 @@ retry_readpage:
         * ok to ignore the compressed flag here.
         */
        if (unlikely(page->index > 0)) {
-               kaddr = kmap_atomic(page, KM_USER0);
-               memset(kaddr, 0, PAGE_CACHE_SIZE);
-               flush_dcache_page(page);
-               kunmap_atomic(kaddr, KM_USER0);
+               zero_user(page, 0, PAGE_CACHE_SIZE);
                goto done;
        }
        if (!NInoAttr(ni))
@@ -498,15 +503,15 @@ retry_readpage:
                /* Race with shrinking truncate. */
                attr_len = i_size;
        }
-       kaddr = kmap_atomic(page, KM_USER0);
+       addr = kmap_atomic(page, KM_USER0);
        /* Copy the data to the page. */
-       memcpy(kaddr, (u8*)ctx->attr +
+       memcpy(addr, (u8*)ctx->attr +
                        le16_to_cpu(ctx->attr->data.resident.value_offset),
                        attr_len);
        /* Zero the remainder of the page. */
-       memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+       memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
        flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(addr, KM_USER0);
 put_unm_err_out:
        ntfs_attr_put_search_ctx(ctx);
 unm_err_out:
@@ -557,7 +562,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
        unsigned long flags;
        unsigned int blocksize, vcn_ofs;
        int err;
-       BOOL need_end_writeback;
+       bool need_end_writeback;
        unsigned char blocksize_bits;
 
        vi = page->mapping->host;
@@ -569,10 +574,8 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
 
        BUG_ON(!NInoNonResident(ni));
        BUG_ON(NInoMstProtected(ni));
-
-       blocksize_bits = vi->i_blkbits;
-       blocksize = 1 << blocksize_bits;
-
+       blocksize = vol->sb->s_blocksize;
+       blocksize_bits = vol->sb->s_blocksize_bits;
        if (!page_has_buffers(page)) {
                BUG_ON(!PageUptodate(page));
                create_empty_buffers(page, blocksize,
@@ -627,7 +630,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
        rl = NULL;
        err = 0;
        do {
-               BOOL is_retry = FALSE;
+               bool is_retry = false;
 
                if (unlikely(block >= dblock)) {
                        /*
@@ -769,7 +772,7 @@ lock_retry_remap:
                }
                /* If first try and runlist unmapped, map and retry. */
                if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
-                       is_retry = TRUE;
+                       is_retry = true;
                        /*
                         * Attempt to map runlist, dropping lock for
                         * the duration.
@@ -787,14 +790,9 @@ lock_retry_remap:
                 * uptodate so it can get discarded by the VM.
                 */
                if (err == -ENOENT || lcn == LCN_ENOENT) {
-                       u8 *kaddr;
-
                        bh->b_blocknr = -1;
                        clear_buffer_dirty(bh);
-                       kaddr = kmap_atomic(page, KM_USER0);
-                       memset(kaddr + bh_offset(bh), 0, blocksize);
-                       kunmap_atomic(kaddr, KM_USER0);
-                       flush_dcache_page(page);
+                       zero_user(page, bh_offset(bh), blocksize);
                        set_buffer_uptodate(bh);
                        err = 0;
                        continue;
@@ -875,12 +873,12 @@ lock_retry_remap:
        set_page_writeback(page);       /* Keeps try_to_free_buffers() away. */
 
        /* Submit the prepared buffers for i/o. */
-       need_end_writeback = TRUE;
+       need_end_writeback = true;
        do {
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
                        submit_bh(WRITE, bh);
-                       need_end_writeback = FALSE;
+                       need_end_writeback = false;
                }
                bh = next;
        } while (bh != head);
@@ -933,7 +931,7 @@ static int ntfs_write_mst_block(struct page *page,
        runlist_element *rl;
        int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2;
        unsigned bh_size, rec_size_bits;
-       BOOL sync, is_mft, page_is_dirty, rec_is_dirty;
+       bool sync, is_mft, page_is_dirty, rec_is_dirty;
        unsigned char bh_size_bits;
 
        ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
@@ -949,8 +947,8 @@ static int ntfs_write_mst_block(struct page *page,
         */
        BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) ||
                        (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
-       bh_size_bits = vi->i_blkbits;
-       bh_size = 1 << bh_size_bits;
+       bh_size = vol->sb->s_blocksize;
+       bh_size_bits = vol->sb->s_blocksize_bits;
        max_bhs = PAGE_CACHE_SIZE / bh_size;
        BUG_ON(!max_bhs);
        BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
@@ -976,10 +974,10 @@ static int ntfs_write_mst_block(struct page *page,
 
        rl = NULL;
        err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
-       page_is_dirty = rec_is_dirty = FALSE;
+       page_is_dirty = rec_is_dirty = false;
        rec_start_bh = NULL;
        do {
-               BOOL is_retry = FALSE;
+               bool is_retry = false;
 
                if (likely(block < rec_block)) {
                        if (unlikely(block >= dblock)) {
@@ -1010,10 +1008,10 @@ static int ntfs_write_mst_block(struct page *page,
                        }
                        if (!buffer_dirty(bh)) {
                                /* Clean records are not written out. */
-                               rec_is_dirty = FALSE;
+                               rec_is_dirty = false;
                                continue;
                        }
-                       rec_is_dirty = TRUE;
+                       rec_is_dirty = true;
                        rec_start_bh = bh;
                }
                /* Need to map the buffer if it is not mapped already. */
@@ -1054,7 +1052,7 @@ lock_retry_remap:
                                 */
                                if (!is_mft && !is_retry &&
                                                lcn == LCN_RL_NOT_MAPPED) {
-                                       is_retry = TRUE;
+                                       is_retry = true;
                                        /*
                                         * Attempt to map runlist, dropping
                                         * lock for the duration.
@@ -1064,7 +1062,7 @@ lock_retry_remap:
                                        if (likely(!err2))
                                                goto lock_retry_remap;
                                        if (err2 == -ENOMEM)
-                                               page_is_dirty = TRUE;
+                                               page_is_dirty = true;
                                        lcn = err2;
                                } else {
                                        err2 = -EIO;
@@ -1146,7 +1144,7 @@ lock_retry_remap:
                                 * means we need to redirty the page before
                                 * returning.
                                 */
-                               page_is_dirty = TRUE;
+                               page_is_dirty = true;
                                /*
                                 * Remove the buffers in this mft record from
                                 * the list of buffers to write.
@@ -1196,7 +1194,7 @@ lock_retry_remap:
                tbh = bhs[i];
                if (!tbh)
                        continue;
-               if (unlikely(test_set_buffer_locked(tbh)))
+               if (!trylock_buffer(tbh))
                        BUG();
                /* The buffer dirty state is now irrelevant, just clean it. */
                clear_buffer_dirty(tbh);
@@ -1279,18 +1277,18 @@ unm_done:
                
                tni = locked_nis[nr_locked_nis];
                /* Get the base inode. */
-               down(&tni->extent_lock);
+               mutex_lock(&tni->extent_lock);
                if (tni->nr_extents >= 0)
                        base_tni = tni;
                else {
                        base_tni = tni->ext.base_ntfs_ino;
                        BUG_ON(!base_tni);
                }
-               up(&tni->extent_lock);
+               mutex_unlock(&tni->extent_lock);
                ntfs_debug("Unlocking %s inode 0x%lx.",
                                tni == base_tni ? "base" : "extent",
                                tni->mft_no);
-               up(&tni->mrec_lock);
+               mutex_unlock(&tni->mrec_lock);
                atomic_dec(&tni->count);
                iput(VFS_I(base_tni));
        }
@@ -1357,7 +1355,7 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
        loff_t i_size;
        struct inode *vi = page->mapping->host;
        ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
-       char *kaddr;
+       char *addr;
        ntfs_attr_search_ctx *ctx = NULL;
        MFT_RECORD *m = NULL;
        u32 attr_len;
@@ -1391,8 +1389,7 @@ retry_writepage:
                if (NInoEncrypted(ni)) {
                        unlock_page(page);
                        BUG_ON(ni->type != AT_DATA);
-                       ntfs_debug("Denying write access to encrypted "
-                                       "file.");
+                       ntfs_debug("Denying write access to encrypted file.");
                        return -EACCES;
                }
                /* Compressed data streams are handled in compress.c. */
@@ -1420,10 +1417,7 @@ retry_writepage:
                if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
                        /* The page straddles i_size. */
                        unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
-                       kaddr = kmap_atomic(page, KM_USER0);
-                       memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs);
-                       kunmap_atomic(kaddr, KM_USER0);
-                       flush_dcache_page(page);
+                       zero_user_segment(page, ofs, PAGE_CACHE_SIZE);
                }
                /* Handle mst protected attributes. */
                if (NInoMstProtected(ni))
@@ -1500,16 +1494,16 @@ retry_writepage:
                /* Shrinking cannot fail. */
                BUG_ON(err);
        }
-       kaddr = kmap_atomic(page, KM_USER0);
+       addr = kmap_atomic(page, KM_USER0);
        /* Copy the data from the page to the mft record. */
        memcpy((u8*)ctx->attr +
                        le16_to_cpu(ctx->attr->data.resident.value_offset),
-                       kaddr, attr_len);
+                       addr, attr_len);
        /* Zero out of bounds area in the page cache page. */
-       memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
-       kunmap_atomic(kaddr, KM_USER0);
-       flush_dcache_mft_record_page(ctx->ntfs_ino);
+       memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+       kunmap_atomic(addr, KM_USER0);
        flush_dcache_page(page);
+       flush_dcache_mft_record_page(ctx->ntfs_ino);
        /* We are done with the page. */
        end_page_writeback(page);
        /* Finally, mark the mft record dirty, so it gets written back. */
@@ -1532,7 +1526,6 @@ err_out:
                                "error %i.", err);
                SetPageError(page);
                NVolSetErrors(ni->vol);
-               make_bad_inode(vi);
        }
        unlock_page(page);
        if (ctx)
@@ -1547,20 +1540,24 @@ err_out:
 /**
  * ntfs_aops - general address space operations for inodes and attributes
  */
-struct address_space_operations ntfs_aops = {
+const struct address_space_operations ntfs_aops = {
        .readpage       = ntfs_readpage,        /* Fill page with data. */
        .sync_page      = block_sync_page,      /* Currently, just unplugs the
                                                   disk request queue. */
 #ifdef NTFS_RW
        .writepage      = ntfs_writepage,       /* Write dirty page to disk. */
 #endif /* NTFS_RW */
+       .migratepage    = buffer_migrate_page,  /* Move a page cache page from
+                                                  one physical page to an
+                                                  other. */
+       .error_remove_page = generic_error_remove_page,
 };
 
 /**
  * ntfs_mst_aops - general address space operations for mst protecteed inodes
  *                and attributes
  */
-struct address_space_operations ntfs_mst_aops = {
+const struct address_space_operations ntfs_mst_aops = {
        .readpage       = ntfs_readpage,        /* Fill page with data. */
        .sync_page      = block_sync_page,      /* Currently, just unplugs the
                                                   disk request queue. */
@@ -1570,6 +1567,10 @@ struct address_space_operations ntfs_mst_aops = {
                                                   without touching the buffers
                                                   belonging to the page. */
 #endif /* NTFS_RW */
+       .migratepage    = buffer_migrate_page,  /* Move a page cache page from
+                                                  one physical page to an
+                                                  other. */
+       .error_remove_page = generic_error_remove_page,
 };
 
 #ifdef NTFS_RW
@@ -1597,7 +1598,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
 
        BUG_ON(!PageUptodate(page));
        end = ofs + ni->itype.index.block_size;
-       bh_size = 1 << VFS_I(ni)->i_blkbits;
+       bh_size = VFS_I(ni)->i_sb->s_blocksize;
        spin_lock(&mapping->private_lock);
        if (unlikely(!page_has_buffers(page))) {
                spin_unlock(&mapping->private_lock);