#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/swap.h>
+#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
pud_t *pud;
pmd_t *pmd;
- /*
- * We don't need page_table_lock: we have mmap_sem exclusively.
- */
pgd = pgd_offset(mm, addr);
if (pgd_none_or_clear_bad(pgd))
return NULL;
{
pgd_t *pgd;
pud_t *pud;
- pmd_t *pmd = NULL;
- pte_t *pte;
+ pmd_t *pmd;
- /*
- * We do need page_table_lock: because allocators expect that.
- */
- spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr);
if (!pud)
- goto out;
+ return NULL;
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
- goto out;
+ return NULL;
+
+ if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr))
+ return NULL;
- pte = pte_alloc_map(mm, pmd, addr);
- if (!pte) {
- pmd = NULL;
- goto out;
- }
- pte_unmap(pte);
-out:
- spin_unlock(&mm->page_table_lock);
return pmd;
}
struct address_space *mapping = NULL;
struct mm_struct *mm = vma->vm_mm;
pte_t *old_pte, *new_pte, pte;
+ spinlock_t *old_ptl, *new_ptl;
if (vma->vm_file) {
/*
new_vma->vm_truncate_count = 0;
}
- spin_lock(&mm->page_table_lock);
- old_pte = pte_offset_map(old_pmd, old_addr);
- new_pte = pte_offset_map_nested(new_pmd, new_addr);
+ /*
+ * We don't have to worry about the ordering of src and dst
+ * pte locks because exclusive mmap_sem prevents deadlock.
+ */
+ old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
+ new_pte = pte_offset_map_nested(new_pmd, new_addr);
+ new_ptl = pte_lockptr(mm, new_pmd);
+ if (new_ptl != old_ptl)
+ spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+ arch_enter_lazy_mmu_mode();
for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
new_pte++, new_addr += PAGE_SIZE) {
if (pte_none(*old_pte))
continue;
pte = ptep_clear_flush(vma, old_addr, old_pte);
- /* ZERO_PAGE can be dependant on virtual addr */
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
set_pte_at(mm, new_addr, new_pte, pte);
}
+ arch_leave_lazy_mmu_mode();
+ if (new_ptl != old_ptl)
+ spin_unlock(new_ptl);
pte_unmap_nested(new_pte - 1);
- pte_unmap(old_pte - 1);
- spin_unlock(&mm->page_table_lock);
+ pte_unmap_unlock(old_pte - 1, old_ptl);
if (mapping)
spin_unlock(&mapping->i_mmap_lock);
}
#define LATENCY_LIMIT (64 * PAGE_SIZE)
-static unsigned long move_page_tables(struct vm_area_struct *vma,
+unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len)
{
if ((addr <= new_addr) && (addr+old_len) > new_addr)
goto out;
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+ if (ret)
+ goto out;
+
ret = do_munmap(mm, new_addr, new_len);
if (ret)
goto out;
/* We can't remap across vm area boundaries */
if (old_len > vma->vm_end - addr)
goto out;
- if (vma->vm_flags & VM_DONTEXPAND) {
+ if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
if (new_len > old_len)
goto out;
}
new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
vma->vm_pgoff, map_flags);
- ret = new_addr;
- if (new_addr & ~PAGE_MASK)
+ if (new_addr & ~PAGE_MASK) {
+ ret = new_addr;
+ goto out;
+ }
+
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+ if (ret)
goto out;
}
ret = move_vma(vma, addr, old_len, new_len, new_addr);