X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Ffilemap.c;h=5c74b68935acde87e1b9a91152b399a408509cf7;hb=d847afe7d4966d35eb7a6fe6f196a0d7e5633f35;hp=4fb1546bbad6abd95dd33f5281c88517540631fb;hpb=7a4050791b23c55a451974027d41d72a9b78039b;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/filemap.c b/mm/filemap.c index 4fb1546..5c74b68 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -25,12 +25,15 @@ #include #include #include +#include #include #include +#include #include #include #include #include /* for BUG_ON(!in_atomic()) only */ +#include #include "internal.h" /* @@ -63,7 +66,6 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, * ->private_lock (__free_pte->__set_page_dirty_buffers) * ->swap_lock (exclusive_swap_page, others) * ->mapping->tree_lock - * ->zone.lock * * ->i_mutex * ->i_mmap_lock (truncate->unmap_mapping_range) @@ -117,11 +119,24 @@ void __remove_from_page_cache(struct page *page) { struct address_space *mapping = page->mapping; + mem_cgroup_uncharge_page(page); radix_tree_delete(&mapping->page_tree, page->index); page->mapping = NULL; mapping->nrpages--; __dec_zone_page_state(page, NR_FILE_PAGES); BUG_ON(page_mapped(page)); + + /* + * Some filesystems seem to re-dirty the page even after + * the VM has canceled the dirty bit (eg ext3 journaling). + * + * Fix it up by doing a final dirty accounting check after + * having removed the page entirely. + */ + if (PageDirty(page) && mapping_cap_account_dirty(mapping)) { + dec_zone_page_state(page, NR_FILE_DIRTY); + dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); + } } void remove_from_page_cache(struct page *page) @@ -171,6 +186,12 @@ static int sync_page(void *word) return 0; } +static int sync_page_killable(void *word) +{ + sync_page(word); + return fatal_signal_pending(current) ? -EINTR : 0; +} + /** * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range * @mapping: address space structure to write @@ -439,8 +460,12 @@ int filemap_write_and_wait_range(struct address_space *mapping, int add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { - int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); + int error = mem_cgroup_cache_charge(page, current->mm, + gfp_mask & ~__GFP_HIGHMEM); + if (error) + goto out; + error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); if (error == 0) { write_lock_irq(&mapping->tree_lock); error = radix_tree_insert(&mapping->page_tree, offset, page); @@ -451,10 +476,14 @@ int add_to_page_cache(struct page *page, struct address_space *mapping, page->index = offset; mapping->nrpages++; __inc_zone_page_state(page, NR_FILE_PAGES); - } + } else + mem_cgroup_uncharge_page(page); + write_unlock_irq(&mapping->tree_lock); radix_tree_preload_end(); - } + } else + mem_cgroup_uncharge_page(page); +out: return error; } EXPORT_SYMBOL(add_to_page_cache); @@ -508,7 +537,7 @@ static inline void wake_up_page(struct page *page, int bit) __wake_up_bit(page_waitqueue(page), &page->flags, bit); } -void fastcall wait_on_page_bit(struct page *page, int bit_nr) +void wait_on_page_bit(struct page *page, int bit_nr) { DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); @@ -532,7 +561,7 @@ EXPORT_SYMBOL(wait_on_page_bit); * the clear_bit and the read of the waitqueue (to avoid SMP races with a * parallel wait_on_page_locked()). */ -void fastcall unlock_page(struct page *page) +void unlock_page(struct page *page) { smp_mb__before_clear_bit(); if (!TestClearPageLocked(page)) @@ -566,7 +595,7 @@ EXPORT_SYMBOL(end_page_writeback); * chances are that on the second loop, the block layer's plug list is empty, * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. */ -void fastcall __lock_page(struct page *page) +void __lock_page(struct page *page) { DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); @@ -575,11 +604,19 @@ void fastcall __lock_page(struct page *page) } EXPORT_SYMBOL(__lock_page); +int __lock_page_killable(struct page *page) +{ + DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); + + return __wait_on_bit_lock(page_waitqueue(page), &wait, + sync_page_killable, TASK_KILLABLE); +} + /* * Variant of lock_page that does not require the caller to hold a reference * on the page's mapping. */ -void fastcall __lock_page_nosync(struct page *page) +void __lock_page_nosync(struct page *page) { DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock, @@ -838,9 +875,7 @@ static void shrink_readahead_size_eio(struct file *filp, } /** - * do_generic_mapping_read - generic file read routine - * @mapping: address_space to be read - * @_ra: file's readahead state + * do_generic_file_read - generic file read routine * @filp: the file to read * @ppos: current file position * @desc: read_descriptor @@ -851,18 +886,13 @@ static void shrink_readahead_size_eio(struct file *filp, * * This is really ugly. But the goto's actually try to clarify some * of the logic when it comes to error handling etc. - * - * Note the struct file* is only passed for the use of readpage. - * It may be NULL. */ -void do_generic_mapping_read(struct address_space *mapping, - struct file_ra_state *ra, - struct file *filp, - loff_t *ppos, - read_descriptor_t *desc, - read_actor_t actor) +static void do_generic_file_read(struct file *filp, loff_t *ppos, + read_descriptor_t *desc, read_actor_t actor) { + struct address_space *mapping = filp->f_mapping; struct inode *inode = mapping->host; + struct file_ra_state *ra = &filp->f_ra; pgoff_t index; pgoff_t last_index; pgoff_t prev_index; @@ -966,7 +996,8 @@ page_ok: page_not_up_to_date: /* Get exclusive access to the page ... */ - lock_page(page); + if (lock_page_killable(page)) + goto readpage_eio; /* Did it get truncated before we got the lock? */ if (!page->mapping) { @@ -994,7 +1025,8 @@ readpage: } if (!PageUptodate(page)) { - lock_page(page); + if (lock_page_killable(page)) + goto readpage_eio; if (!PageUptodate(page)) { if (page->mapping == NULL) { /* @@ -1005,15 +1037,16 @@ readpage: goto find_page; } unlock_page(page); - error = -EIO; shrink_readahead_size_eio(filp, ra); - goto readpage_error; + goto readpage_eio; } unlock_page(page); } goto page_ok; +readpage_eio: + error = -EIO; readpage_error: /* UHHUH! A synchronous read error occurred. Report it */ desc->error = error; @@ -1051,7 +1084,6 @@ out: if (filp) file_accessed(filp); } -EXPORT_SYMBOL(do_generic_mapping_read); int file_read_actor(read_descriptor_t *desc, struct page *page, unsigned long offset, unsigned long size) @@ -1246,7 +1278,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) * This adds the requested page to the page cache if it isn't already there, * and schedules an I/O to read in its contents from disk. */ -static int fastcall page_cache_read(struct file * file, pgoff_t offset) +static int page_cache_read(struct file *file, pgoff_t offset) { struct address_space *mapping = file->f_mapping; struct page *page; @@ -1292,13 +1324,13 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct file_ra_state *ra = &file->f_ra; struct inode *inode = mapping->host; struct page *page; - unsigned long size; + pgoff_t size; int did_readaround = 0; int ret = 0; size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (vmf->pgoff >= size) - goto outside_data_content; + return VM_FAULT_SIGBUS; /* If we don't want any read-ahead, don't bother */ if (VM_RandomReadHint(vma)) @@ -1375,7 +1407,7 @@ retry_find: if (unlikely(vmf->pgoff >= size)) { unlock_page(page); page_cache_release(page); - goto outside_data_content; + return VM_FAULT_SIGBUS; } /* @@ -1386,15 +1418,6 @@ retry_find: vmf->page = page; return ret | VM_FAULT_LOCKED; -outside_data_content: - /* - * An external ptracer can access pages that normally aren't - * accessible.. - */ - if (vma->vm_mm == current->mm) - return VM_FAULT_SIGBUS; - - /* Fall through to the non-read-ahead case */ no_cached_page: /* * We're only likely to ever get here if MADV_RANDOM is in @@ -1627,12 +1650,18 @@ int __remove_suid(struct dentry *dentry, int kill) int remove_suid(struct dentry *dentry) { - int kill = should_remove_suid(dentry); + int killsuid = should_remove_suid(dentry); + int killpriv = security_inode_need_killpriv(dentry); + int error = 0; - if (unlikely(kill)) - return __remove_suid(dentry, kill); + if (killpriv < 0) + return killpriv; + if (killpriv) + error = security_inode_killpriv(dentry); + if (!error && killsuid) + error = __remove_suid(dentry, killsuid); - return 0; + return error; } EXPORT_SYMBOL(remove_suid); @@ -1722,7 +1751,11 @@ static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes) const struct iovec *iov = i->iov; size_t base = i->iov_offset; - while (bytes) { + /* + * The !iov->iov_len check ensures we skip over unlikely + * zero-length segments. + */ + while (bytes || !iov->iov_len) { int copy = min(bytes, iov->iov_len - base); bytes -= copy; @@ -2240,6 +2273,7 @@ again: cond_resched(); + iov_iter_advance(i, copied); if (unlikely(copied == 0)) { /* * If we were unable to copy any data at all, we must @@ -2253,7 +2287,6 @@ again: iov_iter_single_seg_count(i)); goto again; } - iov_iter_advance(i, copied); pos += copied; written += copied; @@ -2503,21 +2536,17 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, } retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs); - if (retval) - goto out; /* * Finally, try again to invalidate clean pages which might have been - * faulted in by get_user_pages() if the source of the write was an - * mmap()ed region of the file we're writing. That's a pretty crazy - * thing to do, so we don't support it 100%. If this invalidation - * fails and we have -EIOCBQUEUED we ignore the failure. + * cached by non-direct readahead, or faulted in by get_user_pages() + * if the source of the write was an mmap'ed region of the file + * we're writing. Either one is a pretty crazy thing to do, + * so we don't support it 100%. If this invalidation + * fails, tough, the write still worked... */ if (rw == WRITE && mapping->nrpages) { - int err = invalidate_inode_pages2_range(mapping, - offset >> PAGE_CACHE_SHIFT, end); - if (err && retval >= 0) - retval = err; + invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end); } out: return retval;