perf: Make bp_len type to u64 generic across the arch
[safe/jmp/linux-2.6] / mm / ksm.c
index dfdc292..56a0da1 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -29,6 +29,7 @@
 #include <linux/wait.h>
 #include <linux/slab.h>
 #include <linux/rbtree.h>
+#include <linux/memory.h>
 #include <linux/mmu_notifier.h>
 #include <linux/swap.h>
 #include <linux/ksm.h>
@@ -108,14 +109,14 @@ struct ksm_scan {
 
 /**
  * struct stable_node - node of the stable rbtree
- * @page: pointer to struct page of the ksm page
  * @node: rb node of this ksm page in the stable tree
  * @hlist: hlist head of rmap_items using this ksm page
+ * @kpfn: page frame number of this ksm page
  */
 struct stable_node {
-       struct page *page;
        struct rb_node node;
        struct hlist_head hlist;
+       unsigned long kpfn;
 };
 
 /**
@@ -178,9 +179,6 @@ static unsigned long ksm_pages_unshared;
 /* The number of rmap_items in use: to calculate pages_volatile */
 static unsigned long ksm_rmap_items;
 
-/* Limit on the number of unswappable pages used */
-static unsigned long ksm_max_kernel_pages;
-
 /* Number of pages ksmd should scan in one batch */
 static unsigned int ksm_thread_pages_to_scan = 100;
 
@@ -515,7 +513,7 @@ static struct page *get_ksm_page(struct stable_node *stable_node)
        struct page *page;
        void *expected_mapping;
 
-       page = stable_node->page;
+       page = pfn_to_page(stable_node->kpfn);
        expected_mapping = (void *)stable_node +
                                (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
        rcu_read_lock();
@@ -942,14 +940,6 @@ static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
 {
        int err;
 
-       /*
-        * The number of nodes in the stable tree
-        * is the number of kernel pages that we hold.
-        */
-       if (ksm_max_kernel_pages &&
-           ksm_max_kernel_pages <= ksm_pages_shared)
-               return NULL;
-
        err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
        if (!err) {
                err = try_to_merge_with_ksm_page(tree_rmap_item,
@@ -973,7 +963,7 @@ static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
  * This function returns the stable tree node of identical content if found,
  * NULL otherwise.
  */
-static struct stable_node *stable_tree_search(struct page *page)
+static struct page *stable_tree_search(struct page *page)
 {
        struct rb_node *node = root_stable_tree.rb_node;
        struct stable_node *stable_node;
@@ -981,7 +971,7 @@ static struct stable_node *stable_tree_search(struct page *page)
        stable_node = page_stable_node(page);
        if (stable_node) {                      /* ksm page forked */
                get_page(page);
-               return stable_node;
+               return page;
        }
 
        while (node) {
@@ -1003,7 +993,7 @@ static struct stable_node *stable_tree_search(struct page *page)
                        put_page(tree_page);
                        node = node->rb_right;
                } else
-                       return stable_node;
+                       return tree_page;
        }
 
        return NULL;
@@ -1059,7 +1049,7 @@ static struct stable_node *stable_tree_insert(struct page *kpage)
 
        INIT_HLIST_HEAD(&stable_node->hlist);
 
-       stable_node->page = kpage;
+       stable_node->kpfn = page_to_pfn(kpage);
        set_page_stable_node(kpage, stable_node);
 
        return stable_node;
@@ -1170,9 +1160,8 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
        remove_rmap_item_from_tree(rmap_item);
 
        /* We first start with searching the page inside the stable tree */
-       stable_node = stable_tree_search(page);
-       if (stable_node) {
-               kpage = stable_node->page;
+       kpage = stable_tree_search(page);
+       if (kpage) {
                err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
                if (!err) {
                        /*
@@ -1180,7 +1169,7 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
                         * add its rmap_item to the stable tree.
                         */
                        lock_page(kpage);
-                       stable_tree_append(rmap_item, stable_node);
+                       stable_tree_append(rmap_item, page_stable_node(kpage));
                        unlock_page(kpage);
                }
                put_page(kpage);
@@ -1715,12 +1704,63 @@ void ksm_migrate_page(struct page *newpage, struct page *oldpage)
 
        stable_node = page_stable_node(newpage);
        if (stable_node) {
-               VM_BUG_ON(stable_node->page != oldpage);
-               stable_node->page = newpage;
+               VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
+               stable_node->kpfn = page_to_pfn(newpage);
        }
 }
 #endif /* CONFIG_MIGRATION */
 
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn,
+                                                unsigned long end_pfn)
+{
+       struct rb_node *node;
+
+       for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) {
+               struct stable_node *stable_node;
+
+               stable_node = rb_entry(node, struct stable_node, node);
+               if (stable_node->kpfn >= start_pfn &&
+                   stable_node->kpfn < end_pfn)
+                       return stable_node;
+       }
+       return NULL;
+}
+
+static int ksm_memory_callback(struct notifier_block *self,
+                              unsigned long action, void *arg)
+{
+       struct memory_notify *mn = arg;
+       struct stable_node *stable_node;
+
+       switch (action) {
+       case MEM_GOING_OFFLINE:
+               /*
+                * Keep it very simple for now: just lock out ksmd and
+                * MADV_UNMERGEABLE while any memory is going offline.
+                */
+               mutex_lock(&ksm_thread_mutex);
+               break;
+
+       case MEM_OFFLINE:
+               /*
+                * Most of the work is done by page migration; but there might
+                * be a few stable_nodes left over, still pointing to struct
+                * pages which have been offlined: prune those from the tree.
+                */
+               while ((stable_node = ksm_check_stable_tree(mn->start_pfn,
+                                       mn->start_pfn + mn->nr_pages)) != NULL)
+                       remove_node_from_stable_tree(stable_node);
+               /* fallthrough */
+
+       case MEM_CANCEL_OFFLINE:
+               mutex_unlock(&ksm_thread_mutex);
+               break;
+       }
+       return NOTIFY_OK;
+}
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+
 #ifdef CONFIG_SYSFS
 /*
  * This all compiles without CONFIG_SYSFS, but is a waste of space.
@@ -1799,8 +1839,8 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
        /*
         * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
         * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
-        * breaking COW to free the unswappable pages_shared (but leaves
-        * mm_slots on the list for when ksmd may be set running again).
+        * breaking COW to free the pages_shared (but leaves mm_slots
+        * on the list for when ksmd may be set running again).
         */
 
        mutex_lock(&ksm_thread_mutex);
@@ -1825,29 +1865,6 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
 }
 KSM_ATTR(run);
 
-static ssize_t max_kernel_pages_store(struct kobject *kobj,
-                                     struct kobj_attribute *attr,
-                                     const char *buf, size_t count)
-{
-       int err;
-       unsigned long nr_pages;
-
-       err = strict_strtoul(buf, 10, &nr_pages);
-       if (err)
-               return -EINVAL;
-
-       ksm_max_kernel_pages = nr_pages;
-
-       return count;
-}
-
-static ssize_t max_kernel_pages_show(struct kobject *kobj,
-                                    struct kobj_attribute *attr, char *buf)
-{
-       return sprintf(buf, "%lu\n", ksm_max_kernel_pages);
-}
-KSM_ATTR(max_kernel_pages);
-
 static ssize_t pages_shared_show(struct kobject *kobj,
                                 struct kobj_attribute *attr, char *buf)
 {
@@ -1897,7 +1914,6 @@ static struct attribute *ksm_attrs[] = {
        &sleep_millisecs_attr.attr,
        &pages_to_scan_attr.attr,
        &run_attr.attr,
-       &max_kernel_pages_attr.attr,
        &pages_shared_attr.attr,
        &pages_sharing_attr.attr,
        &pages_unshared_attr.attr,
@@ -1917,8 +1933,6 @@ static int __init ksm_init(void)
        struct task_struct *ksm_thread;
        int err;
 
-       ksm_max_kernel_pages = totalram_pages / 4;
-
        err = ksm_slab_init();
        if (err)
                goto out;
@@ -1946,6 +1960,13 @@ static int __init ksm_init(void)
 
 #endif /* CONFIG_SYSFS */
 
+#ifdef CONFIG_MEMORY_HOTREMOVE
+       /*
+        * Choose a high priority since the callback takes ksm_thread_mutex:
+        * later callbacks could only be taking locks which nest within that.
+        */
+       hotplug_memory_notifier(ksm_memory_callback, 100);
+#endif
        return 0;
 
 out_free2: