drm/i915: Report purgeable status in buffer lists.
[safe/jmp/linux-2.6] / mm / ksm.c
index c49bb71..5575f86 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -30,6 +30,7 @@
 #include <linux/slab.h>
 #include <linux/rbtree.h>
 #include <linux/mmu_notifier.h>
+#include <linux/swap.h>
 #include <linux/ksm.h>
 
 #include <asm/tlbflush.h>
@@ -165,15 +166,15 @@ static unsigned long ksm_rmap_items;
 static unsigned long ksm_max_kernel_pages;
 
 /* Number of pages ksmd should scan in one batch */
-static unsigned int ksm_thread_pages_to_scan;
+static unsigned int ksm_thread_pages_to_scan = 100;
 
 /* Milliseconds ksmd should sleep between batches */
-static unsigned int ksm_thread_sleep_millisecs;
+static unsigned int ksm_thread_sleep_millisecs = 20;
 
 #define KSM_RUN_STOP   0
 #define KSM_RUN_MERGE  1
 #define KSM_RUN_UNMERGE        2
-static unsigned int ksm_run;
+static unsigned int ksm_run = KSM_RUN_STOP;
 
 static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
 static DEFINE_MUTEX(ksm_thread_mutex);
@@ -284,6 +285,19 @@ static inline int in_stable_tree(struct rmap_item *rmap_item)
 }
 
 /*
+ * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
+ * page tables after it has passed through ksm_exit() - which, if necessary,
+ * takes mmap_sem briefly to serialize against them.  ksm_exit() does not set
+ * a special flag: they can just back out as soon as mm_users goes to zero.
+ * ksm_test_exit() is used throughout to make this test for exit: in some
+ * places for correctness, in some places just to avoid unnecessary work.
+ */
+static inline bool ksm_test_exit(struct mm_struct *mm)
+{
+       return atomic_read(&mm->mm_users) == 0;
+}
+
+/*
  * We use break_ksm to break COW on a ksm page: it's a stripped down
  *
  *     if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1)
@@ -294,10 +308,10 @@ static inline int in_stable_tree(struct rmap_item *rmap_item)
  * Could a ksm page appear anywhere else?  Actually yes, in a VM_PFNMAP
  * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
  */
-static void break_ksm(struct vm_area_struct *vma, unsigned long addr)
+static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
 {
        struct page *page;
-       int ret;
+       int ret = 0;
 
        do {
                cond_resched();
@@ -310,9 +324,36 @@ static void break_ksm(struct vm_area_struct *vma, unsigned long addr)
                else
                        ret = VM_FAULT_WRITE;
                put_page(page);
-       } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS)));
-
-       /* Which leaves us looping there if VM_FAULT_OOM: hmmm... */
+       } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
+       /*
+        * We must loop because handle_mm_fault() may back out if there's
+        * any difficulty e.g. if pte accessed bit gets updated concurrently.
+        *
+        * VM_FAULT_WRITE is what we have been hoping for: it indicates that
+        * COW has been broken, even if the vma does not permit VM_WRITE;
+        * but note that a concurrent fault might break PageKsm for us.
+        *
+        * VM_FAULT_SIGBUS could occur if we race with truncation of the
+        * backing file, which also invalidates anonymous pages: that's
+        * okay, that truncation will have unmapped the PageKsm for us.
+        *
+        * VM_FAULT_OOM: at the time of writing (late July 2009), setting
+        * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
+        * current task has TIF_MEMDIE set, and will be OOM killed on return
+        * to user; and ksmd, having no mm, would never be chosen for that.
+        *
+        * But if the mm is in a limited mem_cgroup, then the fault may fail
+        * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
+        * even ksmd can fail in this way - though it's usually breaking ksm
+        * just to undo a merge it made a moment before, so unlikely to oom.
+        *
+        * That's a pity: we might therefore have more kernel pages allocated
+        * than we're counting as nodes in the stable tree; but ksm_do_scan
+        * will retry to break_cow on each pass, so should recover the page
+        * in due course.  The important thing is to not let VM_MERGEABLE
+        * be cleared while any such pages might remain in the area.
+        */
+       return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
 }
 
 static void break_cow(struct mm_struct *mm, unsigned long addr)
@@ -320,6 +361,8 @@ static void break_cow(struct mm_struct *mm, unsigned long addr)
        struct vm_area_struct *vma;
 
        down_read(&mm->mmap_sem);
+       if (ksm_test_exit(mm))
+               goto out;
        vma = find_vma(mm, addr);
        if (!vma || vma->vm_start > addr)
                goto out;
@@ -338,6 +381,8 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item)
        struct page *page;
 
        down_read(&mm->mmap_sem);
+       if (ksm_test_exit(mm))
+               goto out;
        vma = find_vma(mm, addr);
        if (!vma || vma->vm_start > addr)
                goto out;
@@ -412,19 +457,14 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
        } else if (rmap_item->address & NODE_FLAG) {
                unsigned char age;
                /*
-                * ksm_thread can and must skip the rb_erase, because
+                * Usually ksmd can and must skip the rb_erase, because
                 * root_unstable_tree was already reset to RB_ROOT.
-                * But __ksm_exit has to be careful: do the rb_erase
-                * if it's interrupting a scan, and this rmap_item was
-                * inserted by this scan rather than left from before.
-                *
-                * Because of the case in which remove_mm_from_lists
-                * increments seqnr before removing rmaps, unstable_nr
-                * may even be 2 behind seqnr, but should never be
-                * further behind.  Yes, I did have trouble with this!
+                * But be careful when an mm is exiting: do the rb_erase
+                * if this rmap_item was inserted by this scan, rather
+                * than left over from before.
                 */
                age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
-               BUG_ON(age > 2);
+               BUG_ON(age > 1);
                if (!age)
                        rb_erase(&rmap_item->node, &root_unstable_tree);
                ksm_pages_unshared--;
@@ -462,71 +502,85 @@ static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
  * to the next pass of ksmd - consider, for example, how ksmd might be
  * in cmp_and_merge_page on one of the rmap_items we would be removing.
  */
-static void unmerge_ksm_pages(struct vm_area_struct *vma,
-                             unsigned long start, unsigned long end)
+static int unmerge_ksm_pages(struct vm_area_struct *vma,
+                            unsigned long start, unsigned long end)
 {
        unsigned long addr;
+       int err = 0;
 
-       for (addr = start; addr < end; addr += PAGE_SIZE)
-               break_ksm(vma, addr);
+       for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
+               if (ksm_test_exit(vma->vm_mm))
+                       break;
+               if (signal_pending(current))
+                       err = -ERESTARTSYS;
+               else
+                       err = break_ksm(vma, addr);
+       }
+       return err;
 }
 
-static void unmerge_and_remove_all_rmap_items(void)
+#ifdef CONFIG_SYSFS
+/*
+ * Only called through the sysfs control interface:
+ */
+static int unmerge_and_remove_all_rmap_items(void)
 {
        struct mm_slot *mm_slot;
        struct mm_struct *mm;
        struct vm_area_struct *vma;
+       int err = 0;
+
+       spin_lock(&ksm_mmlist_lock);
+       ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
+                                               struct mm_slot, mm_list);
+       spin_unlock(&ksm_mmlist_lock);
 
-       list_for_each_entry(mm_slot, &ksm_mm_head.mm_list, mm_list) {
+       for (mm_slot = ksm_scan.mm_slot;
+                       mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
                mm = mm_slot->mm;
                down_read(&mm->mmap_sem);
                for (vma = mm->mmap; vma; vma = vma->vm_next) {
+                       if (ksm_test_exit(mm))
+                               break;
                        if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
                                continue;
-                       unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end);
+                       err = unmerge_ksm_pages(vma,
+                                               vma->vm_start, vma->vm_end);
+                       if (err)
+                               goto error;
                }
+
                remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next);
-               up_read(&mm->mmap_sem);
-       }
 
-       spin_lock(&ksm_mmlist_lock);
-       if (ksm_scan.mm_slot != &ksm_mm_head) {
-               ksm_scan.mm_slot = &ksm_mm_head;
-               ksm_scan.seqnr++;
+               spin_lock(&ksm_mmlist_lock);
+               ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
+                                               struct mm_slot, mm_list);
+               if (ksm_test_exit(mm)) {
+                       hlist_del(&mm_slot->link);
+                       list_del(&mm_slot->mm_list);
+                       spin_unlock(&ksm_mmlist_lock);
+
+                       free_mm_slot(mm_slot);
+                       clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+                       up_read(&mm->mmap_sem);
+                       mmdrop(mm);
+               } else {
+                       spin_unlock(&ksm_mmlist_lock);
+                       up_read(&mm->mmap_sem);
+               }
        }
-       spin_unlock(&ksm_mmlist_lock);
-}
 
-static void remove_mm_from_lists(struct mm_struct *mm)
-{
-       struct mm_slot *mm_slot;
+       ksm_scan.seqnr = 0;
+       return 0;
 
+error:
+       up_read(&mm->mmap_sem);
        spin_lock(&ksm_mmlist_lock);
-       mm_slot = get_mm_slot(mm);
-
-       /*
-        * This mm_slot is always at the scanning cursor when we're
-        * called from scan_get_next_rmap_item; but it's a special
-        * case when we're called from __ksm_exit.
-        */
-       if (ksm_scan.mm_slot == mm_slot) {
-               ksm_scan.mm_slot = list_entry(
-                       mm_slot->mm_list.next, struct mm_slot, mm_list);
-               ksm_scan.address = 0;
-               ksm_scan.rmap_item = list_entry(
-                       &ksm_scan.mm_slot->rmap_list, struct rmap_item, link);
-               if (ksm_scan.mm_slot == &ksm_mm_head)
-                       ksm_scan.seqnr++;
-       }
-
-       hlist_del(&mm_slot->link);
-       list_del(&mm_slot->mm_list);
+       ksm_scan.mm_slot = &ksm_mm_head;
        spin_unlock(&ksm_mmlist_lock);
-
-       remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next);
-       free_mm_slot(mm_slot);
-       clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+       return err;
 }
+#endif /* CONFIG_SYSFS */
 
 static u32 calc_checksum(struct page *page)
 {
@@ -742,6 +796,9 @@ static int try_to_merge_with_ksm_page(struct mm_struct *mm1,
        int err = -EFAULT;
 
        down_read(&mm1->mmap_sem);
+       if (ksm_test_exit(mm1))
+               goto out;
+
        vma = find_vma(mm1, addr1);
        if (!vma || vma->vm_start > addr1)
                goto out;
@@ -783,6 +840,10 @@ static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1,
                return err;
 
        down_read(&mm1->mmap_sem);
+       if (ksm_test_exit(mm1)) {
+               up_read(&mm1->mmap_sem);
+               goto out;
+       }
        vma = find_vma(mm1, addr1);
        if (!vma || vma->vm_start > addr1) {
                up_read(&mm1->mmap_sem);
@@ -951,6 +1012,7 @@ static struct rmap_item *unstable_tree_search_insert(struct page *page,
                struct rmap_item *tree_rmap_item;
                int ret;
 
+               cond_resched();
                tree_rmap_item = rb_entry(*new, struct rmap_item, node);
                page2[0] = get_mergeable_page(tree_rmap_item);
                if (!page2[0])
@@ -1051,6 +1113,8 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
        /*
         * A ksm page might have got here by fork, but its other
         * references have already been removed from the stable tree.
+        * Or it might be left over from a break_ksm which failed
+        * when the mem_cgroup had reached its limit: try again now.
         */
        if (PageKsm(page))
                break_cow(rmap_item->mm, rmap_item->address);
@@ -1159,7 +1223,12 @@ next_mm:
 
        mm = slot->mm;
        down_read(&mm->mmap_sem);
-       for (vma = find_vma(mm, ksm_scan.address); vma; vma = vma->vm_next) {
+       if (ksm_test_exit(mm))
+               vma = NULL;
+       else
+               vma = find_vma(mm, ksm_scan.address);
+
+       for (; vma; vma = vma->vm_next) {
                if (!(vma->vm_flags & VM_MERGEABLE))
                        continue;
                if (ksm_scan.address < vma->vm_start)
@@ -1168,6 +1237,8 @@ next_mm:
                        ksm_scan.address = vma->vm_end;
 
                while (ksm_scan.address < vma->vm_end) {
+                       if (ksm_test_exit(mm))
+                               break;
                        *page = follow_page(vma, ksm_scan.address, FOLL_GET);
                        if (*page && PageAnon(*page)) {
                                flush_anon_page(vma, *page, ksm_scan.address);
@@ -1190,40 +1261,48 @@ next_mm:
                }
        }
 
-       if (!ksm_scan.address) {
-               /*
-                * We've completed a full scan of all vmas, holding mmap_sem
-                * throughout, and found no VM_MERGEABLE: so do the same as
-                * __ksm_exit does to remove this mm from all our lists now.
-                */
-               remove_mm_from_lists(mm);
-               up_read(&mm->mmap_sem);
-               slot = ksm_scan.mm_slot;
-               if (slot != &ksm_mm_head)
-                       goto next_mm;
-               return NULL;
+       if (ksm_test_exit(mm)) {
+               ksm_scan.address = 0;
+               ksm_scan.rmap_item = list_entry(&slot->rmap_list,
+                                               struct rmap_item, link);
        }
-
        /*
         * Nuke all the rmap_items that are above this current rmap:
         * because there were no VM_MERGEABLE vmas with such addresses.
         */
        remove_trailing_rmap_items(slot, ksm_scan.rmap_item->link.next);
-       up_read(&mm->mmap_sem);
 
        spin_lock(&ksm_mmlist_lock);
-       slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
-       ksm_scan.mm_slot = slot;
-       spin_unlock(&ksm_mmlist_lock);
+       ksm_scan.mm_slot = list_entry(slot->mm_list.next,
+                                               struct mm_slot, mm_list);
+       if (ksm_scan.address == 0) {
+               /*
+                * We've completed a full scan of all vmas, holding mmap_sem
+                * throughout, and found no VM_MERGEABLE: so do the same as
+                * __ksm_exit does to remove this mm from all our lists now.
+                * This applies either when cleaning up after __ksm_exit
+                * (but beware: we can reach here even before __ksm_exit),
+                * or when all VM_MERGEABLE areas have been unmapped (and
+                * mmap_sem then protects against race with MADV_MERGEABLE).
+                */
+               hlist_del(&slot->link);
+               list_del(&slot->mm_list);
+               spin_unlock(&ksm_mmlist_lock);
+
+               free_mm_slot(slot);
+               clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+               up_read(&mm->mmap_sem);
+               mmdrop(mm);
+       } else {
+               spin_unlock(&ksm_mmlist_lock);
+               up_read(&mm->mmap_sem);
+       }
 
        /* Repeat until we've completed scanning the whole list */
+       slot = ksm_scan.mm_slot;
        if (slot != &ksm_mm_head)
                goto next_mm;
 
-       /*
-        * Bump seqnr here rather than at top, so that __ksm_exit
-        * can skip rb_erase on unstable tree until we run again.
-        */
        ksm_scan.seqnr++;
        return NULL;
 }
@@ -1286,6 +1365,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
                unsigned long end, int advice, unsigned long *vm_flags)
 {
        struct mm_struct *mm = vma->vm_mm;
+       int err;
 
        switch (advice) {
        case MADV_MERGEABLE:
@@ -1298,9 +1378,11 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
                                 VM_MIXEDMAP  | VM_SAO))
                        return 0;               /* just ignore the advice */
 
-               if (!test_bit(MMF_VM_MERGEABLE, &mm->flags))
-                       if (__ksm_enter(mm) < 0)
-                               return -EAGAIN;
+               if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
+                       err = __ksm_enter(mm);
+                       if (err)
+                               return err;
+               }
 
                *vm_flags |= VM_MERGEABLE;
                break;
@@ -1309,8 +1391,11 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
                if (!(*vm_flags & VM_MERGEABLE))
                        return 0;               /* just ignore the advice */
 
-               if (vma->anon_vma)
-                       unmerge_ksm_pages(vma, start, end);
+               if (vma->anon_vma) {
+                       err = unmerge_ksm_pages(vma, start, end);
+                       if (err)
+                               return err;
+               }
 
                *vm_flags &= ~VM_MERGEABLE;
                break;
@@ -1342,6 +1427,7 @@ int __ksm_enter(struct mm_struct *mm)
        spin_unlock(&ksm_mmlist_lock);
 
        set_bit(MMF_VM_MERGEABLE, &mm->flags);
+       atomic_inc(&mm->mm_count);
 
        if (needs_wakeup)
                wake_up_interruptible(&ksm_thread_wait);
@@ -1351,16 +1437,47 @@ int __ksm_enter(struct mm_struct *mm)
 
 void __ksm_exit(struct mm_struct *mm)
 {
+       struct mm_slot *mm_slot;
+       int easy_to_free = 0;
+
        /*
-        * This process is exiting: doesn't hold and doesn't need mmap_sem;
-        * but we do need to exclude ksmd and other exiters while we modify
-        * the various lists and trees.
+        * This process is exiting: if it's straightforward (as is the
+        * case when ksmd was never running), free mm_slot immediately.
+        * But if it's at the cursor or has rmap_items linked to it, use
+        * mmap_sem to synchronize with any break_cows before pagetables
+        * are freed, and leave the mm_slot on the list for ksmd to free.
+        * Beware: ksm may already have noticed it exiting and freed the slot.
         */
-       mutex_lock(&ksm_thread_mutex);
-       remove_mm_from_lists(mm);
-       mutex_unlock(&ksm_thread_mutex);
+
+       spin_lock(&ksm_mmlist_lock);
+       mm_slot = get_mm_slot(mm);
+       if (mm_slot && ksm_scan.mm_slot != mm_slot) {
+               if (list_empty(&mm_slot->rmap_list)) {
+                       hlist_del(&mm_slot->link);
+                       list_del(&mm_slot->mm_list);
+                       easy_to_free = 1;
+               } else {
+                       list_move(&mm_slot->mm_list,
+                                 &ksm_scan.mm_slot->mm_list);
+               }
+       }
+       spin_unlock(&ksm_mmlist_lock);
+
+       if (easy_to_free) {
+               free_mm_slot(mm_slot);
+               clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+               mmdrop(mm);
+       } else if (mm_slot) {
+               down_write(&mm->mmap_sem);
+               up_write(&mm->mmap_sem);
+       }
 }
 
+#ifdef CONFIG_SYSFS
+/*
+ * This all compiles without CONFIG_SYSFS, but is a waste of space.
+ */
+
 #define KSM_ATTR_RO(_name) \
        static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
 #define KSM_ATTR(_name) \
@@ -1441,8 +1558,15 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
        mutex_lock(&ksm_thread_mutex);
        if (ksm_run != flags) {
                ksm_run = flags;
-               if (flags & KSM_RUN_UNMERGE)
-                       unmerge_and_remove_all_rmap_items();
+               if (flags & KSM_RUN_UNMERGE) {
+                       current->flags |= PF_OOM_ORIGIN;
+                       err = unmerge_and_remove_all_rmap_items();
+                       current->flags &= ~PF_OOM_ORIGIN;
+                       if (err) {
+                               ksm_run = KSM_RUN_STOP;
+                               count = err;
+                       }
+               }
        }
        mutex_unlock(&ksm_thread_mutex);
 
@@ -1538,12 +1662,15 @@ static struct attribute_group ksm_attr_group = {
        .attrs = ksm_attrs,
        .name = "ksm",
 };
+#endif /* CONFIG_SYSFS */
 
 static int __init ksm_init(void)
 {
        struct task_struct *ksm_thread;
        int err;
 
+       ksm_max_kernel_pages = totalram_pages / 4;
+
        err = ksm_slab_init();
        if (err)
                goto out;
@@ -1559,16 +1686,20 @@ static int __init ksm_init(void)
                goto out_free2;
        }
 
+#ifdef CONFIG_SYSFS
        err = sysfs_create_group(mm_kobj, &ksm_attr_group);
        if (err) {
                printk(KERN_ERR "ksm: register sysfs failed\n");
-               goto out_free3;
+               kthread_stop(ksm_thread);
+               goto out_free2;
        }
+#else
+       ksm_run = KSM_RUN_MERGE;        /* no way for user to start it */
+
+#endif /* CONFIG_SYSFS */
 
        return 0;
 
-out_free3:
-       kthread_stop(ksm_thread);
 out_free2:
        mm_slots_hash_free();
 out_free1: