nfsd: nfsd should drop CAP_MKNOD for non-root
[safe/jmp/linux-2.6] / mm / memory.c
index 1a83fe5..baa999e 100644 (file)
@@ -52,6 +52,9 @@
 #include <linux/writeback.h>
 #include <linux/memcontrol.h>
 #include <linux/mmu_notifier.h>
+#include <linux/kallsyms.h>
+#include <linux/swapops.h>
+#include <linux/elf.h>
 
 #include <asm/pgalloc.h>
 #include <asm/uaccess.h>
@@ -59,9 +62,6 @@
 #include <asm/tlbflush.h>
 #include <asm/pgtable.h>
 
-#include <linux/swapops.h>
-#include <linux/elf.h>
-
 #include "internal.h"
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -375,15 +375,65 @@ static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
  *
  * The calling function must still handle the error.
  */
-static void print_bad_pte(struct vm_area_struct *vma, pte_t pte,
-                         unsigned long vaddr)
-{
-       printk(KERN_ERR "Bad pte = %08llx, process = %s, "
-                       "vm_flags = %lx, vaddr = %lx\n",
-               (long long)pte_val(pte),
-               (vma->vm_mm == current->mm ? current->comm : "???"),
-               vma->vm_flags, vaddr);
+static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
+                         pte_t pte, struct page *page)
+{
+       pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
+       pud_t *pud = pud_offset(pgd, addr);
+       pmd_t *pmd = pmd_offset(pud, addr);
+       struct address_space *mapping;
+       pgoff_t index;
+       static unsigned long resume;
+       static unsigned long nr_shown;
+       static unsigned long nr_unshown;
+
+       /*
+        * Allow a burst of 60 reports, then keep quiet for that minute;
+        * or allow a steady drip of one report per second.
+        */
+       if (nr_shown == 60) {
+               if (time_before(jiffies, resume)) {
+                       nr_unshown++;
+                       return;
+               }
+               if (nr_unshown) {
+                       printk(KERN_ALERT
+                               "BUG: Bad page map: %lu messages suppressed\n",
+                               nr_unshown);
+                       nr_unshown = 0;
+               }
+               nr_shown = 0;
+       }
+       if (nr_shown++ == 0)
+               resume = jiffies + 60 * HZ;
+
+       mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
+       index = linear_page_index(vma, addr);
+
+       printk(KERN_ALERT
+               "BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
+               current->comm,
+               (long long)pte_val(pte), (long long)pmd_val(*pmd));
+       if (page) {
+               printk(KERN_ALERT
+               "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
+               page, (void *)page->flags, page_count(page),
+               page_mapcount(page), page->mapping, page->index);
+       }
+       printk(KERN_ALERT
+               "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
+               (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
+       /*
+        * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
+        */
+       if (vma->vm_ops)
+               print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n",
+                               (unsigned long)vma->vm_ops->fault);
+       if (vma->vm_file && vma->vm_file->f_op)
+               print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n",
+                               (unsigned long)vma->vm_file->f_op->mmap);
        dump_stack();
+       add_taint(TAINT_BAD_PAGE);
 }
 
 static inline int is_cow_mapping(unsigned int flags)
@@ -441,21 +491,18 @@ static inline int is_cow_mapping(unsigned int flags)
 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                                pte_t pte)
 {
-       unsigned long pfn;
+       unsigned long pfn = pte_pfn(pte);
 
        if (HAVE_PTE_SPECIAL) {
-               if (likely(!pte_special(pte))) {
-                       VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
-                       return pte_page(pte);
-               }
-               VM_BUG_ON(!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
+               if (likely(!pte_special(pte)))
+                       goto check_pfn;
+               if (!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)))
+                       print_bad_pte(vma, addr, pte, NULL);
                return NULL;
        }
 
        /* !HAVE_PTE_SPECIAL case follows: */
 
-       pfn = pte_pfn(pte);
-
        if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
                if (vma->vm_flags & VM_MIXEDMAP) {
                        if (!pfn_valid(pfn))
@@ -471,11 +518,14 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                }
        }
 
-       VM_BUG_ON(!pfn_valid(pfn));
+check_pfn:
+       if (unlikely(pfn > highest_memmap_pfn)) {
+               print_bad_pte(vma, addr, pte, NULL);
+               return NULL;
+       }
 
        /*
         * NOTE! We still have PageReserved() pages in the page tables.
-        *
         * eg. VDSO mappings can cause them to exist.
         */
 out:
@@ -772,7 +822,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                                        mark_page_accessed(page);
                                file_rss--;
                        }
-                       page_remove_rmap(page, vma);
+                       page_remove_rmap(page);
+                       if (unlikely(page_mapcount(page) < 0))
+                               print_bad_pte(vma, addr, ptent, page);
                        tlb_remove_page(tlb, page);
                        continue;
                }
@@ -782,8 +834,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                 */
                if (unlikely(details))
                        continue;
-               if (!pte_file(ptent))
-                       free_swap_and_cache(pte_to_swp_entry(ptent));
+               if (pte_file(ptent)) {
+                       if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
+                               print_bad_pte(vma, addr, ptent, NULL);
+               } else if
+                 (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent))))
+                       print_bad_pte(vma, addr, ptent, NULL);
                pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
        } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
 
@@ -1154,6 +1210,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
        int write = !!(flags & GUP_FLAGS_WRITE);
        int force = !!(flags & GUP_FLAGS_FORCE);
        int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
+       int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
 
        if (len <= 0)
                return 0;
@@ -1232,12 +1289,15 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        struct page *page;
 
                        /*
-                        * If tsk is ooming, cut off its access to large memory
-                        * allocations. It has a pending SIGKILL, but it can't
-                        * be processed until returning to user space.
+                        * If we have a pending SIGKILL, don't keep faulting
+                        * pages and potentially allocating memory, unless
+                        * current is handling munlock--e.g., on exit. In
+                        * that case, we are not allocating memory.  Rather,
+                        * we're only unlocking already resident/mapped pages.
                         */
-                       if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE)))
-                               return i ? i : -ENOMEM;
+                       if (unlikely(!ignore_sigkill &&
+                                       fatal_signal_pending(current)))
+                               return i ? i : -ERESTARTSYS;
 
                        if (write)
                                foll_flags |= FOLL_WRITE;
@@ -1451,6 +1511,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn)
 {
        int ret;
+       pgprot_t pgprot = vma->vm_page_prot;
        /*
         * Technically, architectures with pte_special can avoid all these
         * restrictions (same for remap_pfn_range).  However we would like
@@ -1465,10 +1526,10 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return -EFAULT;
-       if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
+       if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
                return -EINVAL;
 
-       ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+       ret = insert_pfn(vma, addr, pfn, pgprot);
 
        if (ret)
                untrack_pfn_vma(vma, pfn, PAGE_SIZE);
@@ -1611,9 +1672,15 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
 
        vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
 
-       err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
-       if (err)
+       err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
+       if (err) {
+               /*
+                * To indicate that track_pfn related cleanup is not
+                * needed from higher level routine calling unmap_vmas
+                */
+               vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
                return -EINVAL;
+       }
 
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
@@ -1932,7 +1999,7 @@ gotten:
         * Don't let another task, with possibly unlocked vma,
         * keep the mlocked page.
         */
-       if (vma->vm_flags & VM_LOCKED) {
+       if ((vma->vm_flags & VM_LOCKED) && old_page) {
                lock_page(old_page);    /* for LRU manipulation */
                clear_page_mlock(old_page);
                unlock_page(old_page);
@@ -1940,7 +2007,7 @@ gotten:
        cow_user_page(new_page, old_page, address, vma);
        __SetPageUptodate(new_page);
 
-       if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
+       if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
                goto oom_free_new;
 
        /*
@@ -1991,7 +2058,7 @@ gotten:
                         * mapcount is visible. So transitively, TLBs to
                         * old page will be flushed before it can be reused.
                         */
-                       page_remove_rmap(old_page, vma);
+                       page_remove_rmap(old_page);
                }
 
                /* Free the old page.. */
@@ -2332,6 +2399,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *page;
        swp_entry_t entry;
        pte_t pte;
+       struct mem_cgroup *ptr = NULL;
        int ret = 0;
 
        if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
@@ -2370,7 +2438,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
-       if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+       if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
                ret = VM_FAULT_OOM;
                unlock_page(page);
                goto out;
@@ -2388,7 +2456,19 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto out_nomap;
        }
 
-       /* The page isn't present yet, go ahead with the fault. */
+       /*
+        * The page isn't present yet, go ahead with the fault.
+        *
+        * Be careful about the sequence of operations here.
+        * To get its accounting right, reuse_swap_page() must be called
+        * while the page is counted on swap but not yet in mapcount i.e.
+        * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
+        * must be called after the swap_free(), or it will never succeed.
+        * Because delete_from_swap_page() may be called by reuse_swap_page(),
+        * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
+        * in page->private. In this case, a record in swap_cgroup  is silently
+        * discarded at swap_free().
+        */
 
        inc_mm_counter(mm, anon_rss);
        pte = mk_pte(page, vma->vm_page_prot);
@@ -2396,10 +2476,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                pte = maybe_mkwrite(pte_mkdirty(pte), vma);
                write_access = 0;
        }
-
        flush_icache_page(vma, page);
        set_pte_at(mm, address, page_table, pte);
        page_add_anon_rmap(page, vma, address);
+       /* It's better to call commit-charge after rmap is established */
+       mem_cgroup_commit_charge_swapin(page, ptr);
 
        swap_free(entry);
        if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
@@ -2420,7 +2501,7 @@ unlock:
 out:
        return ret;
 out_nomap:
-       mem_cgroup_uncharge_page(page);
+       mem_cgroup_cancel_charge_swapin(ptr);
        pte_unmap_unlock(page_table, ptl);
        unlock_page(page);
        page_cache_release(page);
@@ -2450,7 +2531,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto oom;
        __SetPageUptodate(page);
 
-       if (mem_cgroup_charge(page, mm, GFP_KERNEL))
+       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
                goto oom_free_page;
 
        entry = mk_pte(page, vma->vm_page_prot);
@@ -2541,7 +2622,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                ret = VM_FAULT_OOM;
                                goto out;
                        }
-                       if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+                       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
                                ret = VM_FAULT_OOM;
                                page_cache_release(page);
                                goto out;
@@ -2679,12 +2760,11 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
                return 0;
 
-       if (unlikely(!(vma->vm_flags & VM_NONLINEAR) ||
-                       !(vma->vm_flags & VM_CAN_NONLINEAR))) {
+       if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
                /*
                 * Page table corrupted: show pte and kill process.
                 */
-               print_bad_pte(vma, orig_pte, address);
+               print_bad_pte(vma, address, orig_pte, NULL);
                return VM_FAULT_OOM;
        }
 
@@ -2966,7 +3046,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
 {
        resource_size_t phys_addr;
        unsigned long prot = 0;
-       void *maddr;
+       void __iomem *maddr;
        int offset = addr & (PAGE_SIZE-1);
 
        if (follow_phys(vma, addr, write, &prot, &phys_addr))
@@ -3092,6 +3172,15 @@ void print_vma_addr(char *prefix, unsigned long ip)
 #ifdef CONFIG_PROVE_LOCKING
 void might_fault(void)
 {
+       /*
+        * Some code (nfs/sunrpc) uses socket ops on kernel memory while
+        * holding the mmap_sem, this is safe because kernel memory doesn't
+        * get paged out, therefore we'll never actually fault, and the
+        * below annotations will generate false positives.
+        */
+       if (segment_eq(get_fs(), KERNEL_DS))
+               return;
+
        might_sleep();
        /*
         * it would be nicer only to annotate paths which are not under