Merge branch 'for-linus' of git://git.o-hand.com/linux-rpurdie-leds
[safe/jmp/linux-2.6] / mm / ksm.c
index d4c228a..a93f1b7 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -179,9 +179,6 @@ static unsigned long ksm_pages_unshared;
 /* The number of rmap_items in use: to calculate pages_volatile */
 static unsigned long ksm_rmap_items;
 
-/* Limit on the number of unswappable pages used */
-static unsigned long ksm_max_kernel_pages;
-
 /* Number of pages ksmd should scan in one batch */
 static unsigned int ksm_thread_pages_to_scan = 100;
 
@@ -943,14 +940,6 @@ static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
 {
        int err;
 
-       /*
-        * The number of nodes in the stable tree
-        * is the number of kernel pages that we hold.
-        */
-       if (ksm_max_kernel_pages &&
-           ksm_max_kernel_pages <= ksm_pages_shared)
-               return NULL;
-
        err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
        if (!err) {
                err = try_to_merge_with_ksm_page(tree_rmap_item,
@@ -1574,10 +1563,12 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
 again:
        hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
                struct anon_vma *anon_vma = rmap_item->anon_vma;
+               struct anon_vma_chain *vmac;
                struct vm_area_struct *vma;
 
                spin_lock(&anon_vma->lock);
-               list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+               list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
+                       vma = vmac->vma;
                        if (rmap_item->address < vma->vm_start ||
                            rmap_item->address >= vma->vm_end)
                                continue;
@@ -1625,10 +1616,12 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
 again:
        hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
                struct anon_vma *anon_vma = rmap_item->anon_vma;
+               struct anon_vma_chain *vmac;
                struct vm_area_struct *vma;
 
                spin_lock(&anon_vma->lock);
-               list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+               list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
+                       vma = vmac->vma;
                        if (rmap_item->address < vma->vm_start ||
                            rmap_item->address >= vma->vm_end)
                                continue;
@@ -1675,10 +1668,12 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
 again:
        hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
                struct anon_vma *anon_vma = rmap_item->anon_vma;
+               struct anon_vma_chain *vmac;
                struct vm_area_struct *vma;
 
                spin_lock(&anon_vma->lock);
-               list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+               list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
+                       vma = vmac->vma;
                        if (rmap_item->address < vma->vm_start ||
                            rmap_item->address >= vma->vm_end)
                                continue;
@@ -1850,8 +1845,8 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
        /*
         * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
         * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
-        * breaking COW to free the unswappable pages_shared (but leaves
-        * mm_slots on the list for when ksmd may be set running again).
+        * breaking COW to free the pages_shared (but leaves mm_slots
+        * on the list for when ksmd may be set running again).
         */
 
        mutex_lock(&ksm_thread_mutex);
@@ -1876,29 +1871,6 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
 }
 KSM_ATTR(run);
 
-static ssize_t max_kernel_pages_store(struct kobject *kobj,
-                                     struct kobj_attribute *attr,
-                                     const char *buf, size_t count)
-{
-       int err;
-       unsigned long nr_pages;
-
-       err = strict_strtoul(buf, 10, &nr_pages);
-       if (err)
-               return -EINVAL;
-
-       ksm_max_kernel_pages = nr_pages;
-
-       return count;
-}
-
-static ssize_t max_kernel_pages_show(struct kobject *kobj,
-                                    struct kobj_attribute *attr, char *buf)
-{
-       return sprintf(buf, "%lu\n", ksm_max_kernel_pages);
-}
-KSM_ATTR(max_kernel_pages);
-
 static ssize_t pages_shared_show(struct kobject *kobj,
                                 struct kobj_attribute *attr, char *buf)
 {
@@ -1948,7 +1920,6 @@ static struct attribute *ksm_attrs[] = {
        &sleep_millisecs_attr.attr,
        &pages_to_scan_attr.attr,
        &run_attr.attr,
-       &max_kernel_pages_attr.attr,
        &pages_shared_attr.attr,
        &pages_sharing_attr.attr,
        &pages_unshared_attr.attr,
@@ -1968,8 +1939,6 @@ static int __init ksm_init(void)
        struct task_struct *ksm_thread;
        int err;
 
-       ksm_max_kernel_pages = totalram_pages / 4;
-
        err = ksm_slab_init();
        if (err)
                goto out;