X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Ffremap.c;h=46f5dacf90a2cd62427fdf89b67fa01ef8af68e0;hb=5e8aa85253513b9c1ade8bd71dc341218a752a65;hp=7f08d10ceaff4d037f5a37a5b629d89452a2dab0;hpb=861f2fb8e796022b4928cab9c74fca6681a1c557;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/fremap.c b/mm/fremap.c index 7f08d10..46f5dac 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -5,7 +5,7 @@ * * started by Ingo Molnar, Copyright (C) 2002, 2003 */ - +#include #include #include #include @@ -15,170 +15,113 @@ #include #include #include +#include #include #include #include -static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, +#include "internal.h" + +static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte = *ptep; - struct page *page = NULL; if (pte_present(pte)) { - unsigned long pfn = pte_pfn(pte); - flush_cache_page(vma, addr, pfn); + struct page *page; + + flush_cache_page(vma, addr, pte_pfn(pte)); pte = ptep_clear_flush(vma, addr, ptep); - if (unlikely(!pfn_valid(pfn))) { - print_bad_pte(vma, pte, addr); - goto out; + page = vm_normal_page(vma, addr, pte); + if (page) { + if (pte_dirty(pte)) + set_page_dirty(page); + page_remove_rmap(page); + page_cache_release(page); + update_hiwater_rss(mm); + dec_mm_counter(mm, MM_FILEPAGES); } - page = pfn_to_page(pfn); - if (pte_dirty(pte)) - set_page_dirty(page); - page_remove_rmap(page); - page_cache_release(page); } else { if (!pte_file(pte)) free_swap_and_cache(pte_to_swp_entry(pte)); - pte_clear(mm, addr, ptep); + pte_clear_not_present_full(mm, addr, ptep, 0); } -out: - return !!page; } /* - * Install a file page to a given virtual memory address, release any + * Install a file pte to a given virtual memory address, release any * previously existing mapping. */ -int install_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr, struct page *page, pgprot_t prot) +static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long pgoff, pgprot_t prot) { - struct inode *inode; - pgoff_t size; int err = -ENOMEM; pte_t *pte; - pmd_t *pmd; - pud_t *pud; - pgd_t *pgd; - pte_t pte_val; + spinlock_t *ptl; - BUG_ON(vma->vm_flags & VM_RESERVED); - - pgd = pgd_offset(mm, addr); - spin_lock(&mm->page_table_lock); - - pud = pud_alloc(mm, pgd, addr); - if (!pud) - goto err_unlock; - - pmd = pmd_alloc(mm, pud, addr); - if (!pmd) - goto err_unlock; - - pte = pte_alloc_map(mm, pmd, addr); + pte = get_locked_pte(mm, addr, &ptl); if (!pte) - goto err_unlock; + goto out; + + if (!pte_none(*pte)) + zap_pte(mm, vma, addr, pte); + set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); /* - * This page may have been truncated. Tell the - * caller about it. + * We don't need to run update_mmu_cache() here because the "file pte" + * being installed by install_file_pte() is not a real pte - it's a + * non-present entry (like a swap entry), noting what file offset should + * be mapped there when there's a fault (in a non-linear vma where + * that's not obvious). */ - err = -EINVAL; - inode = vma->vm_file->f_mapping->host; - size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - if (!page->mapping || page->index >= size) - goto err_unlock; - err = -ENOMEM; - if (page_mapcount(page) > INT_MAX/2) - goto err_unlock; - - if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte)) - inc_mm_counter(mm, file_rss); - - flush_icache_page(vma, page); - set_pte_at(mm, addr, pte, mk_pte(page, prot)); - page_add_file_rmap(page); - pte_val = *pte; - pte_unmap(pte); - update_mmu_cache(vma, addr, pte_val); - + pte_unmap_unlock(pte, ptl); err = 0; -err_unlock: - spin_unlock(&mm->page_table_lock); +out: return err; } -EXPORT_SYMBOL(install_page); - -/* - * Install a file pte to a given virtual memory address, release any - * previously existing mapping. - */ -int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr, unsigned long pgoff, pgprot_t prot) +static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long size, pgoff_t pgoff) { - int err = -ENOMEM; - pte_t *pte; - pmd_t *pmd; - pud_t *pud; - pgd_t *pgd; - pte_t pte_val; - - BUG_ON(vma->vm_flags & VM_RESERVED); - - pgd = pgd_offset(mm, addr); - spin_lock(&mm->page_table_lock); - - pud = pud_alloc(mm, pgd, addr); - if (!pud) - goto err_unlock; - - pmd = pmd_alloc(mm, pud, addr); - if (!pmd) - goto err_unlock; + int err; - pte = pte_alloc_map(mm, pmd, addr); - if (!pte) - goto err_unlock; + do { + err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot); + if (err) + return err; - if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) - dec_mm_counter(mm, file_rss); + size -= PAGE_SIZE; + addr += PAGE_SIZE; + pgoff++; + } while (size); - set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); - pte_val = *pte; - pte_unmap(pte); - update_mmu_cache(vma, addr, pte_val); - spin_unlock(&mm->page_table_lock); - return 0; + return 0; -err_unlock: - spin_unlock(&mm->page_table_lock); - return err; } - -/*** - * sys_remap_file_pages - remap arbitrary pages of a shared backing store - * file within an existing vma. +/** + * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma * @start: start of the remapped virtual memory range * @size: size of the remapped virtual memory range - * @prot: new protection bits of the range - * @pgoff: to be mapped page of the backing store file + * @prot: new protection bits of the range (see NOTE) + * @pgoff: to-be-mapped page of the backing store file * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO. * - * this syscall works purely via pagetables, so it's the most efficient + * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma + * (shared backing store file). + * + * This syscall works purely via pagetables, so it's the most efficient * way to map the same (large) file into a given virtual window. Unlike * mmap()/mremap() it does not create any new vmas. The new mappings are * also safe across swapout. * - * NOTE: the 'prot' parameter right now is ignored, and the vma's default - * protection is used. Arbitrary protections might be implemented in the - * future. + * NOTE: the @prot parameter right now is ignored (but must be zero), + * and the vma's default protection is used. Arbitrary protections + * might be implemented in the future. */ -asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, - unsigned long __prot, unsigned long pgoff, unsigned long flags) +SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, + unsigned long, prot, unsigned long, pgoff, unsigned long, flags) { struct mm_struct *mm = current->mm; struct address_space *mapping; @@ -187,7 +130,7 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, int err = -EINVAL; int has_write_lock = 0; - if (__prot) + if (prot) return err; /* * Sanitize the syscall parameters: @@ -214,45 +157,100 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, * Make sure the vma is shared, that it supports prefaulting, * and that the remapped range is valid and fully within * the single existing vma. vm_private_data is used as a - * swapout cursor in a VM_NONLINEAR vma (unless VM_RESERVED - * or VM_LOCKED, but VM_LOCKED could be revoked later on). + * swapout cursor in a VM_NONLINEAR vma. */ - if (vma && (vma->vm_flags & VM_SHARED) && - (!vma->vm_private_data || - (vma->vm_flags & (VM_NONLINEAR|VM_RESERVED))) && - vma->vm_ops && vma->vm_ops->populate && - end > start && start >= vma->vm_start && - end <= vma->vm_end) { + if (!vma || !(vma->vm_flags & VM_SHARED)) + goto out; - /* Must set VM_NONLINEAR before any pages are populated. */ - if (pgoff != linear_page_index(vma, start) && - !(vma->vm_flags & VM_NONLINEAR)) { - if (!has_write_lock) { - up_read(&mm->mmap_sem); - down_write(&mm->mmap_sem); - has_write_lock = 1; - goto retry; - } - mapping = vma->vm_file->f_mapping; - spin_lock(&mapping->i_mmap_lock); - flush_dcache_mmap_lock(mapping); - vma->vm_flags |= VM_NONLINEAR; - vma_prio_tree_remove(vma, &mapping->i_mmap); - vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); - flush_dcache_mmap_unlock(mapping); - spin_unlock(&mapping->i_mmap_lock); + if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR)) + goto out; + + if (!(vma->vm_flags & VM_CAN_NONLINEAR)) + goto out; + + if (end <= start || start < vma->vm_start || end > vma->vm_end) + goto out; + + /* Must set VM_NONLINEAR before any pages are populated. */ + if (!(vma->vm_flags & VM_NONLINEAR)) { + /* Don't need a nonlinear mapping, exit success */ + if (pgoff == linear_page_index(vma, start)) { + err = 0; + goto out; } - err = vma->vm_ops->populate(vma, start, size, - vma->vm_page_prot, - pgoff, flags & MAP_NONBLOCK); + if (!has_write_lock) { + up_read(&mm->mmap_sem); + down_write(&mm->mmap_sem); + has_write_lock = 1; + goto retry; + } + mapping = vma->vm_file->f_mapping; + /* + * page_mkclean doesn't work on nonlinear vmas, so if + * dirty pages need to be accounted, emulate with linear + * vmas. + */ + if (mapping_cap_account_dirty(mapping)) { + unsigned long addr; + struct file *file = vma->vm_file; + + flags &= MAP_NONBLOCK; + get_file(file); + addr = mmap_region(file, start, size, + flags, vma->vm_flags, pgoff); + fput(file); + if (IS_ERR_VALUE(addr)) { + err = addr; + } else { + BUG_ON(addr != start); + err = 0; + } + goto out; + } + spin_lock(&mapping->i_mmap_lock); + flush_dcache_mmap_lock(mapping); + vma->vm_flags |= VM_NONLINEAR; + vma_prio_tree_remove(vma, &mapping->i_mmap); + vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); + flush_dcache_mmap_unlock(mapping); + spin_unlock(&mapping->i_mmap_lock); + } + if (vma->vm_flags & VM_LOCKED) { /* - * We can't clear VM_NONLINEAR because we'd have to do - * it after ->populate completes, and that would prevent - * downgrading the lock. (Locks can't be upgraded). + * drop PG_Mlocked flag for over-mapped range */ + unsigned int saved_flags = vma->vm_flags; + munlock_vma_pages_range(vma, start, start + size); + vma->vm_flags = saved_flags; + } + + mmu_notifier_invalidate_range_start(mm, start, start + size); + err = populate_range(mm, vma, start, size, pgoff); + mmu_notifier_invalidate_range_end(mm, start, start + size); + if (!err && !(flags & MAP_NONBLOCK)) { + if (vma->vm_flags & VM_LOCKED) { + /* + * might be mapping previously unmapped range of file + */ + mlock_vma_pages_range(vma, start, start + size); + } else { + if (unlikely(has_write_lock)) { + downgrade_write(&mm->mmap_sem); + has_write_lock = 0; + } + make_pages_present(start, start+size); + } } + + /* + * We can't clear VM_NONLINEAR because we'd have to do + * it after ->populate completes, and that would prevent + * downgrading the lock. (Locks can't be upgraded). + */ + +out: if (likely(!has_write_lock)) up_read(&mm->mmap_sem); else @@ -260,4 +258,3 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, return err; } -