X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Ffilemap.c;h=f4d0cded0e10aa21b02707fcaf99c4cbcafa4f06;hb=1f98eefae8ca451d317b1602f2cedf7515b032ff;hp=5209e47b7fe39dba855e84ee9ca4d89283fd3d80;hpb=8f731f7d83d6c6a3eeb32cce79bfcddbf7fac8cc;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/filemap.c b/mm/filemap.c index 5209e47..f4d0cde 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -123,6 +124,18 @@ void __remove_from_page_cache(struct page *page) mapping->nrpages--; __dec_zone_page_state(page, NR_FILE_PAGES); BUG_ON(page_mapped(page)); + + /* + * Some filesystems seem to re-dirty the page even after + * the VM has canceled the dirty bit (eg ext3 journaling). + * + * Fix it up by doing a final dirty accounting check after + * having removed the page entirely. + */ + if (PageDirty(page) && mapping_cap_account_dirty(mapping)) { + dec_zone_page_state(page, NR_FILE_DIRTY); + dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); + } } void remove_from_page_cache(struct page *page) @@ -1299,7 +1312,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (vmf->pgoff >= size) - goto outside_data_content; + return VM_FAULT_SIGBUS; /* If we don't want any read-ahead, don't bother */ if (VM_RandomReadHint(vma)) @@ -1376,7 +1389,7 @@ retry_find: if (unlikely(vmf->pgoff >= size)) { unlock_page(page); page_cache_release(page); - goto outside_data_content; + return VM_FAULT_SIGBUS; } /* @@ -1387,15 +1400,6 @@ retry_find: vmf->page = page; return ret | VM_FAULT_LOCKED; -outside_data_content: - /* - * An external ptracer can access pages that normally aren't - * accessible.. - */ - if (vma->vm_mm == current->mm) - return VM_FAULT_SIGBUS; - - /* Fall through to the non-read-ahead case */ no_cached_page: /* * We're only likely to ever get here if MADV_RANDOM is in @@ -2510,21 +2514,17 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, } retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs); - if (retval) - goto out; /* * Finally, try again to invalidate clean pages which might have been - * faulted in by get_user_pages() if the source of the write was an - * mmap()ed region of the file we're writing. That's a pretty crazy - * thing to do, so we don't support it 100%. If this invalidation - * fails and we have -EIOCBQUEUED we ignore the failure. + * cached by non-direct readahead, or faulted in by get_user_pages() + * if the source of the write was an mmap'ed region of the file + * we're writing. Either one is a pretty crazy thing to do, + * so we don't support it 100%. If this invalidation + * fails, tough, the write still worked... */ if (rw == WRITE && mapping->nrpages) { - int err = invalidate_inode_pages2_range(mapping, - offset >> PAGE_CACHE_SHIFT, end); - if (err && retval >= 0) - retval = err; + invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end); } out: return retval;