X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Ffilemap_xip.c;h=5e598c42afd726be44f9d862a086d52ed8adbd2a;hb=2004dc8eec1b4f0692b3be87ea80c70faa44d619;hp=3b6e384b98a671b4156b7244a729c5dee03a2924;hpb=eb6fe0c388e43b02e261f0fdee60e42f6298d7f7;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 3b6e384..5e598c4 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -13,8 +13,32 @@ #include #include #include +#include #include -#include "filemap.h" + +/* + * We do use our own empty page to avoid interference with other users + * of ZERO_PAGE(), such as /dev/zero + */ +static struct page *__xip_sparse_page; + +static struct page *xip_sparse_page(void) +{ + if (!__xip_sparse_page) { + struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO); + + if (page) { + static DEFINE_SPINLOCK(xip_alloc_lock); + spin_lock(&xip_alloc_lock); + if (!__xip_sparse_page) + __xip_sparse_page = page; + else + __free_page(page); + spin_unlock(&xip_alloc_lock); + } + } + return __xip_sparse_page; +} /* * This is a file read routine for execute in place files, and uses @@ -32,7 +56,8 @@ do_xip_mapping_read(struct address_space *mapping, read_actor_t actor) { struct inode *inode = mapping->host; - unsigned long index, end_index, offset; + pgoff_t index, end_index; + unsigned long offset; loff_t isize; BUG_ON(!mapping->a_ops->get_xip_page); @@ -68,13 +93,12 @@ do_xip_mapping_read(struct address_space *mapping, if (unlikely(IS_ERR(page))) { if (PTR_ERR(page) == -ENODATA) { /* sparse */ - page = virt_to_page(empty_zero_page); + page = ZERO_PAGE(0); } else { desc->error = PTR_ERR(page); goto out; } - } else - BUG_ON(!PageUptodate(page)); + } /* If users can be writing to this page using arbitrary * virtual addresses, take care about potential aliasing @@ -84,8 +108,7 @@ do_xip_mapping_read(struct address_space *mapping, flush_dcache_page(page); /* - * Ok, we have the page, and it's up-to-date, so - * now we can copy it to user space... + * Ok, we have the page, so now we can copy it to user space... * * The actor routine returns how many bytes were actually used.. * NOTE! This may not be the same as how much of a user buffer @@ -137,34 +160,12 @@ xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) } EXPORT_SYMBOL_GPL(xip_file_read); -ssize_t -xip_file_sendfile(struct file *in_file, loff_t *ppos, - size_t count, read_actor_t actor, void *target) -{ - read_descriptor_t desc; - - if (!count) - return 0; - - desc.written = 0; - desc.count = count; - desc.arg.data = target; - desc.error = 0; - - do_xip_mapping_read(in_file->f_mapping, &in_file->f_ra, in_file, - ppos, &desc, actor); - if (desc.written) - return desc.written; - return desc.error; -} -EXPORT_SYMBOL_GPL(xip_file_sendfile); - /* * __xip_unmap is invoked from xip_unmap and * xip_write * * This function walks all vmas of the address_space and unmaps the - * empty_zero_page when found at pgoff. Should it go in rmap.c? + * __xip_sparse_page when found at pgoff. */ static void __xip_unmap (struct address_space * mapping, @@ -176,6 +177,12 @@ __xip_unmap (struct address_space * mapping, unsigned long address; pte_t *pte; pte_t pteval; + spinlock_t *ptl; + struct page *page; + + page = __xip_sparse_page; + if (!page) + return; spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { @@ -183,81 +190,74 @@ __xip_unmap (struct address_space * mapping, address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); BUG_ON(address < vma->vm_start || address >= vma->vm_end); - /* - * We need the page_table_lock to protect us from page faults, - * munmap, fork, etc... - */ - pte = page_check_address(virt_to_page(empty_zero_page), mm, - address); - if (!IS_ERR(pte)) { + pte = page_check_address(page, mm, address, &ptl); + if (pte) { /* Nuke the page table entry. */ - flush_cache_page(vma, address, pte_pfn(pte)); + flush_cache_page(vma, address, pte_pfn(*pte)); pteval = ptep_clear_flush(vma, address, pte); + page_remove_rmap(page, vma); + dec_mm_counter(mm, file_rss); BUG_ON(pte_dirty(pteval)); - pte_unmap(pte); - spin_unlock(&mm->page_table_lock); + pte_unmap_unlock(pte, ptl); + page_cache_release(page); } } spin_unlock(&mapping->i_mmap_lock); } /* - * xip_nopage() is invoked via the vma operations vector for a + * xip_fault() is invoked via the vma operations vector for a * mapped memory region to read in file data during a page fault. * - * This function is derived from filemap_nopage, but used for execute in place + * This function is derived from filemap_fault, but used for execute in place */ -static struct page * -xip_file_nopage(struct vm_area_struct * area, - unsigned long address, - int *type) +static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf) { struct file *file = area->vm_file; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; struct page *page; - unsigned long size, pgoff, endoff; + pgoff_t size; - pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) - + area->vm_pgoff; - endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT) - + area->vm_pgoff; + /* XXX: are VM_FAULT_ codes OK? */ size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - if (pgoff >= size) { - return NULL; - } + if (vmf->pgoff >= size) + return VM_FAULT_SIGBUS; - page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0); - if (!IS_ERR(page)) { - BUG_ON(!PageUptodate(page)); - return page; - } + page = mapping->a_ops->get_xip_page(mapping, + vmf->pgoff*(PAGE_SIZE/512), 0); + if (!IS_ERR(page)) + goto out; if (PTR_ERR(page) != -ENODATA) - return NULL; + return VM_FAULT_OOM; /* sparse block */ if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) && (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) && (!(mapping->host->i_sb->s_flags & MS_RDONLY))) { /* maybe shared writable, allocate new block */ - page = mapping->a_ops->get_xip_page (mapping, - pgoff*(PAGE_SIZE/512), 1); + page = mapping->a_ops->get_xip_page(mapping, + vmf->pgoff*(PAGE_SIZE/512), 1); if (IS_ERR(page)) - return NULL; - BUG_ON(!PageUptodate(page)); + return VM_FAULT_SIGBUS; /* unmap page at pgoff from all other vmas */ - __xip_unmap(mapping, pgoff); + __xip_unmap(mapping, vmf->pgoff); } else { - /* not shared and writable, use empty_zero_page */ - page = virt_to_page(empty_zero_page); + /* not shared and writable, use xip_sparse_page() */ + page = xip_sparse_page(); + if (!page) + return VM_FAULT_OOM; } - return page; +out: + page_cache_get(page); + vmf->page = page; + return 0; } static struct vm_operations_struct xip_file_vm_ops = { - .nopage = xip_file_nopage, + .fault = xip_file_fault, }; int xip_file_mmap(struct file * file, struct vm_area_struct * vma) @@ -266,6 +266,7 @@ int xip_file_mmap(struct file * file, struct vm_area_struct * vma) file_accessed(file); vma->vm_ops = &xip_file_vm_ops; + vma->vm_flags |= VM_CAN_NONLINEAR; return 0; } EXPORT_SYMBOL_GPL(xip_file_mmap); @@ -275,7 +276,7 @@ __xip_file_write(struct file *filp, const char __user *buf, size_t count, loff_t pos, loff_t *ppos) { struct address_space * mapping = filp->f_mapping; - struct address_space_operations *a_ops = mapping->a_ops; + const struct address_space_operations *a_ops = mapping->a_ops; struct inode *inode = mapping->host; long status = 0; struct page *page; @@ -288,6 +289,7 @@ __xip_file_write(struct file *filp, const char __user *buf, unsigned long index; unsigned long offset; size_t copied; + char *kaddr; offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ index = pos >> PAGE_CACHE_SHIFT; @@ -295,14 +297,6 @@ __xip_file_write(struct file *filp, const char __user *buf, if (bytes > count) bytes = count; - /* - * Bring in the user page that we will copy from _first_. - * Otherwise there's a nasty deadlock on copying from the - * same page as we're writing to, without it being marked - * up-to-date. - */ - fault_in_pages_readable(buf, bytes); - page = a_ops->get_xip_page(mapping, index*(PAGE_SIZE/512), 0); if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) { @@ -319,10 +313,13 @@ __xip_file_write(struct file *filp, const char __user *buf, break; } - BUG_ON(!PageUptodate(page)); - - copied = filemap_copy_from_user(page, offset, buf, bytes); + fault_in_pages_readable(buf, bytes); + kaddr = kmap_atomic(page, KM_USER0); + copied = bytes - + __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes); + kunmap_atomic(kaddr, KM_USER0); flush_dcache_page(page); + if (likely(copied > 0)) { status = copied; @@ -342,7 +339,7 @@ __xip_file_write(struct file *filp, const char __user *buf, *ppos = pos; /* * No need to use i_size_read() here, the i_size - * cannot change under us because we hold i_sem. + * cannot change under us because we hold i_mutex. */ if (pos > inode->i_size) { i_size_write(inode, pos); @@ -362,7 +359,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, loff_t pos; ssize_t ret; - down(&inode->i_sem); + mutex_lock(&inode->i_mutex); if (!access_ok(VERIFY_READ, buf, len)) { ret=-EFAULT; @@ -383,18 +380,18 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, if (count == 0) goto out_backing; - ret = remove_suid(filp->f_dentry); + ret = remove_suid(filp->f_path.dentry); if (ret) goto out_backing; - inode_update_time(inode, 1); + file_update_time(filp); ret = __xip_file_write (filp, buf, count, pos, ppos); out_backing: current->backing_dev_info = NULL; out_up: - up(&inode->i_sem); + mutex_unlock(&inode->i_mutex); return ret; } EXPORT_SYMBOL_GPL(xip_file_write); @@ -412,7 +409,6 @@ xip_truncate_page(struct address_space *mapping, loff_t from) unsigned blocksize; unsigned length; struct page *page; - void *kaddr; BUG_ON(!mapping->a_ops->get_xip_page); @@ -435,13 +431,8 @@ xip_truncate_page(struct address_space *mapping, loff_t from) return 0; else return PTR_ERR(page); - } else - BUG_ON(!PageUptodate(page)); - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + offset, 0, length); - kunmap_atomic(kaddr, KM_USER0); - - flush_dcache_page(page); + } + zero_user(page, offset, length); return 0; } EXPORT_SYMBOL_GPL(xip_truncate_page);