git://ftp.safe.ca
/
safe
/
jmp
/
linux-2.6
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
vmscan: limit VM_EXEC protection to file pages
[safe/jmp/linux-2.6]
/
mm
/
mprotect.c
diff --git
a/mm/mprotect.c
b/mm/mprotect.c
index
fded06f
..
8bc969d
100644
(file)
--- a/
mm/mprotect.c
+++ b/
mm/mprotect.c
@@
-4,7
+4,7
@@
* (C) Copyright 1994 Linus Torvalds
* (C) Copyright 2002 Christoph Hellwig
*
* (C) Copyright 1994 Linus Torvalds
* (C) Copyright 2002 Christoph Hellwig
*
- * Address space accounting code <alan@
redhat.com
>
+ * Address space accounting code <alan@
lxorguk.ukuu.org.uk
>
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved
*/
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved
*/
@@
-22,6
+22,8
@@
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/mmu_notifier.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/mmu_notifier.h>
+#include <linux/migrate.h>
+#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
@@
-59,8
+61,7
@@
static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
ptent = pte_mkwrite(ptent);
ptep_modify_prot_commit(mm, addr, pte, ptent);
ptent = pte_mkwrite(ptent);
ptep_modify_prot_commit(mm, addr, pte, ptent);
-#ifdef CONFIG_MIGRATION
- } else if (!pte_file(oldpte)) {
+ } else if (PAGE_MIGRATION && !pte_file(oldpte)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
if (is_write_migration_entry(entry)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
if (is_write_migration_entry(entry)) {
@@
-72,9
+73,7
@@
static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
set_pte_at(mm, addr, pte,
swp_entry_to_pte(entry));
}
set_pte_at(mm, addr, pte,
swp_entry_to_pte(entry));
}
-#endif
}
}
-
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
@@
-153,10
+152,11
@@
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
/*
* If we make a private mapping writable we increase our commit;
* but (without finer accounting) cannot reduce our commit if we
/*
* If we make a private mapping writable we increase our commit;
* but (without finer accounting) cannot reduce our commit if we
- * make it unwritable again.
+ * make it unwritable again. hugetlb mapping were accounted for
+ * even if read-only so there is no need to account for them here
*/
if (newflags & VM_WRITE) {
*/
if (newflags & VM_WRITE) {
- if (!(oldflags & (VM_ACCOUNT|VM_WRITE|
+ if (!(oldflags & (VM_ACCOUNT|VM_WRITE|
VM_HUGETLB|
VM_SHARED|VM_NORESERVE))) {
charged = nrpages;
if (security_vm_enough_memory(charged))
VM_SHARED|VM_NORESERVE))) {
charged = nrpages;
if (security_vm_enough_memory(charged))
@@
-219,8
+219,8
@@
fail:
return error;
}
return error;
}
-asmlinkage long
-
sys_mprotect(unsigned long start, size_t len, unsigned long
prot)
+SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+
unsigned long,
prot)
{
unsigned long vm_flags, nstart, end, tmp, reqprot;
struct vm_area_struct *vma, *prev;
{
unsigned long vm_flags, nstart, end, tmp, reqprot;
struct vm_area_struct *vma, *prev;
@@
-300,6
+300,7
@@
sys_mprotect(unsigned long start, size_t len, unsigned long prot)
error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
if (error)
goto out;
error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
if (error)
goto out;
+ perf_event_mmap(vma);
nstart = tmp;
if (nstart < prev->vm_end)
nstart = tmp;
if (nstart < prev->vm_end)