133ea2ca8384e69a97099096cb479d84d43ba2c7
[safe/jmp/linux-2.6] / mm / ksm.c
1 /*
2  * Memory merging support.
3  *
4  * This code enables dynamic sharing of identical pages found in different
5  * memory areas, even if they are not shared by fork()
6  *
7  * Copyright (C) 2008-2009 Red Hat, Inc.
8  * Authors:
9  *      Izik Eidus
10  *      Andrea Arcangeli
11  *      Chris Wright
12  *      Hugh Dickins
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.
15  */
16
17 #include <linux/errno.h>
18 #include <linux/mm.h>
19 #include <linux/fs.h>
20 #include <linux/mman.h>
21 #include <linux/sched.h>
22 #include <linux/rwsem.h>
23 #include <linux/pagemap.h>
24 #include <linux/rmap.h>
25 #include <linux/spinlock.h>
26 #include <linux/jhash.h>
27 #include <linux/delay.h>
28 #include <linux/kthread.h>
29 #include <linux/wait.h>
30 #include <linux/slab.h>
31 #include <linux/rbtree.h>
32 #include <linux/mmu_notifier.h>
33 #include <linux/swap.h>
34 #include <linux/ksm.h>
35
36 #include <asm/tlbflush.h>
37
38 /*
39  * A few notes about the KSM scanning process,
40  * to make it easier to understand the data structures below:
41  *
42  * In order to reduce excessive scanning, KSM sorts the memory pages by their
43  * contents into a data structure that holds pointers to the pages' locations.
44  *
45  * Since the contents of the pages may change at any moment, KSM cannot just
46  * insert the pages into a normal sorted tree and expect it to find anything.
47  * Therefore KSM uses two data structures - the stable and the unstable tree.
48  *
49  * The stable tree holds pointers to all the merged pages (ksm pages), sorted
50  * by their contents.  Because each such page is write-protected, searching on
51  * this tree is fully assured to be working (except when pages are unmapped),
52  * and therefore this tree is called the stable tree.
53  *
54  * In addition to the stable tree, KSM uses a second data structure called the
55  * unstable tree: this tree holds pointers to pages which have been found to
56  * be "unchanged for a period of time".  The unstable tree sorts these pages
57  * by their contents, but since they are not write-protected, KSM cannot rely
58  * upon the unstable tree to work correctly - the unstable tree is liable to
59  * be corrupted as its contents are modified, and so it is called unstable.
60  *
61  * KSM solves this problem by several techniques:
62  *
63  * 1) The unstable tree is flushed every time KSM completes scanning all
64  *    memory areas, and then the tree is rebuilt again from the beginning.
65  * 2) KSM will only insert into the unstable tree, pages whose hash value
66  *    has not changed since the previous scan of all memory areas.
67  * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
68  *    colors of the nodes and not on their contents, assuring that even when
69  *    the tree gets "corrupted" it won't get out of balance, so scanning time
70  *    remains the same (also, searching and inserting nodes in an rbtree uses
71  *    the same algorithm, so we have no overhead when we flush and rebuild).
72  * 4) KSM never flushes the stable tree, which means that even if it were to
73  *    take 10 attempts to find a page in the unstable tree, once it is found,
74  *    it is secured in the stable tree.  (When we scan a new page, we first
75  *    compare it against the stable tree, and then against the unstable tree.)
76  */
77
78 /**
79  * struct mm_slot - ksm information per mm that is being scanned
80  * @link: link to the mm_slots hash list
81  * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
82  * @rmap_list: head for this mm_slot's list of rmap_items
83  * @mm: the mm that this information is valid for
84  */
85 struct mm_slot {
86         struct hlist_node link;
87         struct list_head mm_list;
88         struct list_head rmap_list;
89         struct mm_struct *mm;
90 };
91
92 /**
93  * struct ksm_scan - cursor for scanning
94  * @mm_slot: the current mm_slot we are scanning
95  * @address: the next address inside that to be scanned
96  * @rmap_item: the current rmap that we are scanning inside the rmap_list
97  * @seqnr: count of completed full scans (needed when removing unstable node)
98  *
99  * There is only the one ksm_scan instance of this cursor structure.
100  */
101 struct ksm_scan {
102         struct mm_slot *mm_slot;
103         unsigned long address;
104         struct rmap_item *rmap_item;
105         unsigned long seqnr;
106 };
107
108 /**
109  * struct rmap_item - reverse mapping item for virtual addresses
110  * @link: link into mm_slot's rmap_list (rmap_list is per mm)
111  * @mm: the memory structure this rmap_item is pointing into
112  * @address: the virtual address this rmap_item tracks (+ flags in low bits)
113  * @oldchecksum: previous checksum of the page at that virtual address
114  * @node: rb_node of this rmap_item in either unstable or stable tree
115  * @next: next rmap_item hanging off the same node of the stable tree
116  * @prev: previous rmap_item hanging off the same node of the stable tree
117  */
118 struct rmap_item {
119         struct list_head link;
120         struct mm_struct *mm;
121         unsigned long address;          /* + low bits used for flags below */
122         union {
123                 unsigned int oldchecksum;               /* when unstable */
124                 struct rmap_item *next;                 /* when stable */
125         };
126         union {
127                 struct rb_node node;                    /* when tree node */
128                 struct rmap_item *prev;                 /* in stable list */
129         };
130 };
131
132 #define SEQNR_MASK      0x0ff   /* low bits of unstable tree seqnr */
133 #define NODE_FLAG       0x100   /* is a node of unstable or stable tree */
134 #define STABLE_FLAG     0x200   /* is a node or list item of stable tree */
135
136 /* The stable and unstable tree heads */
137 static struct rb_root root_stable_tree = RB_ROOT;
138 static struct rb_root root_unstable_tree = RB_ROOT;
139
140 #define MM_SLOTS_HASH_HEADS 1024
141 static struct hlist_head *mm_slots_hash;
142
143 static struct mm_slot ksm_mm_head = {
144         .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
145 };
146 static struct ksm_scan ksm_scan = {
147         .mm_slot = &ksm_mm_head,
148 };
149
150 static struct kmem_cache *rmap_item_cache;
151 static struct kmem_cache *mm_slot_cache;
152
153 /* The number of nodes in the stable tree */
154 static unsigned long ksm_pages_shared;
155
156 /* The number of page slots additionally sharing those nodes */
157 static unsigned long ksm_pages_sharing;
158
159 /* The number of nodes in the unstable tree */
160 static unsigned long ksm_pages_unshared;
161
162 /* The number of rmap_items in use: to calculate pages_volatile */
163 static unsigned long ksm_rmap_items;
164
165 /* Limit on the number of unswappable pages used */
166 static unsigned long ksm_max_kernel_pages;
167
168 /* Number of pages ksmd should scan in one batch */
169 static unsigned int ksm_thread_pages_to_scan = 100;
170
171 /* Milliseconds ksmd should sleep between batches */
172 static unsigned int ksm_thread_sleep_millisecs = 20;
173
174 #define KSM_RUN_STOP    0
175 #define KSM_RUN_MERGE   1
176 #define KSM_RUN_UNMERGE 2
177 static unsigned int ksm_run = KSM_RUN_STOP;
178
179 static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
180 static DEFINE_MUTEX(ksm_thread_mutex);
181 static DEFINE_SPINLOCK(ksm_mmlist_lock);
182
183 #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
184                 sizeof(struct __struct), __alignof__(struct __struct),\
185                 (__flags), NULL)
186
187 static int __init ksm_slab_init(void)
188 {
189         rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
190         if (!rmap_item_cache)
191                 goto out;
192
193         mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
194         if (!mm_slot_cache)
195                 goto out_free;
196
197         return 0;
198
199 out_free:
200         kmem_cache_destroy(rmap_item_cache);
201 out:
202         return -ENOMEM;
203 }
204
205 static void __init ksm_slab_free(void)
206 {
207         kmem_cache_destroy(mm_slot_cache);
208         kmem_cache_destroy(rmap_item_cache);
209         mm_slot_cache = NULL;
210 }
211
212 static inline struct rmap_item *alloc_rmap_item(void)
213 {
214         struct rmap_item *rmap_item;
215
216         rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
217         if (rmap_item)
218                 ksm_rmap_items++;
219         return rmap_item;
220 }
221
222 static inline void free_rmap_item(struct rmap_item *rmap_item)
223 {
224         ksm_rmap_items--;
225         rmap_item->mm = NULL;   /* debug safety */
226         kmem_cache_free(rmap_item_cache, rmap_item);
227 }
228
229 static inline struct mm_slot *alloc_mm_slot(void)
230 {
231         if (!mm_slot_cache)     /* initialization failed */
232                 return NULL;
233         return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
234 }
235
236 static inline void free_mm_slot(struct mm_slot *mm_slot)
237 {
238         kmem_cache_free(mm_slot_cache, mm_slot);
239 }
240
241 static int __init mm_slots_hash_init(void)
242 {
243         mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
244                                 GFP_KERNEL);
245         if (!mm_slots_hash)
246                 return -ENOMEM;
247         return 0;
248 }
249
250 static void __init mm_slots_hash_free(void)
251 {
252         kfree(mm_slots_hash);
253 }
254
255 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
256 {
257         struct mm_slot *mm_slot;
258         struct hlist_head *bucket;
259         struct hlist_node *node;
260
261         bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
262                                 % MM_SLOTS_HASH_HEADS];
263         hlist_for_each_entry(mm_slot, node, bucket, link) {
264                 if (mm == mm_slot->mm)
265                         return mm_slot;
266         }
267         return NULL;
268 }
269
270 static void insert_to_mm_slots_hash(struct mm_struct *mm,
271                                     struct mm_slot *mm_slot)
272 {
273         struct hlist_head *bucket;
274
275         bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
276                                 % MM_SLOTS_HASH_HEADS];
277         mm_slot->mm = mm;
278         INIT_LIST_HEAD(&mm_slot->rmap_list);
279         hlist_add_head(&mm_slot->link, bucket);
280 }
281
282 static inline int in_stable_tree(struct rmap_item *rmap_item)
283 {
284         return rmap_item->address & STABLE_FLAG;
285 }
286
287 /*
288  * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
289  * page tables after it has passed through ksm_exit() - which, if necessary,
290  * takes mmap_sem briefly to serialize against them.  ksm_exit() does not set
291  * a special flag: they can just back out as soon as mm_users goes to zero.
292  * ksm_test_exit() is used throughout to make this test for exit: in some
293  * places for correctness, in some places just to avoid unnecessary work.
294  */
295 static inline bool ksm_test_exit(struct mm_struct *mm)
296 {
297         return atomic_read(&mm->mm_users) == 0;
298 }
299
300 /*
301  * We use break_ksm to break COW on a ksm page: it's a stripped down
302  *
303  *      if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1)
304  *              put_page(page);
305  *
306  * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
307  * in case the application has unmapped and remapped mm,addr meanwhile.
308  * Could a ksm page appear anywhere else?  Actually yes, in a VM_PFNMAP
309  * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
310  */
311 static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
312 {
313         struct page *page;
314         int ret = 0;
315
316         do {
317                 cond_resched();
318                 page = follow_page(vma, addr, FOLL_GET);
319                 if (!page)
320                         break;
321                 if (PageKsm(page))
322                         ret = handle_mm_fault(vma->vm_mm, vma, addr,
323                                                         FAULT_FLAG_WRITE);
324                 else
325                         ret = VM_FAULT_WRITE;
326                 put_page(page);
327         } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
328         /*
329          * We must loop because handle_mm_fault() may back out if there's
330          * any difficulty e.g. if pte accessed bit gets updated concurrently.
331          *
332          * VM_FAULT_WRITE is what we have been hoping for: it indicates that
333          * COW has been broken, even if the vma does not permit VM_WRITE;
334          * but note that a concurrent fault might break PageKsm for us.
335          *
336          * VM_FAULT_SIGBUS could occur if we race with truncation of the
337          * backing file, which also invalidates anonymous pages: that's
338          * okay, that truncation will have unmapped the PageKsm for us.
339          *
340          * VM_FAULT_OOM: at the time of writing (late July 2009), setting
341          * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
342          * current task has TIF_MEMDIE set, and will be OOM killed on return
343          * to user; and ksmd, having no mm, would never be chosen for that.
344          *
345          * But if the mm is in a limited mem_cgroup, then the fault may fail
346          * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
347          * even ksmd can fail in this way - though it's usually breaking ksm
348          * just to undo a merge it made a moment before, so unlikely to oom.
349          *
350          * That's a pity: we might therefore have more kernel pages allocated
351          * than we're counting as nodes in the stable tree; but ksm_do_scan
352          * will retry to break_cow on each pass, so should recover the page
353          * in due course.  The important thing is to not let VM_MERGEABLE
354          * be cleared while any such pages might remain in the area.
355          */
356         return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
357 }
358
359 static void break_cow(struct mm_struct *mm, unsigned long addr)
360 {
361         struct vm_area_struct *vma;
362
363         down_read(&mm->mmap_sem);
364         if (ksm_test_exit(mm))
365                 goto out;
366         vma = find_vma(mm, addr);
367         if (!vma || vma->vm_start > addr)
368                 goto out;
369         if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
370                 goto out;
371         break_ksm(vma, addr);
372 out:
373         up_read(&mm->mmap_sem);
374 }
375
376 static struct page *get_mergeable_page(struct rmap_item *rmap_item)
377 {
378         struct mm_struct *mm = rmap_item->mm;
379         unsigned long addr = rmap_item->address;
380         struct vm_area_struct *vma;
381         struct page *page;
382
383         down_read(&mm->mmap_sem);
384         if (ksm_test_exit(mm))
385                 goto out;
386         vma = find_vma(mm, addr);
387         if (!vma || vma->vm_start > addr)
388                 goto out;
389         if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
390                 goto out;
391
392         page = follow_page(vma, addr, FOLL_GET);
393         if (!page)
394                 goto out;
395         if (PageAnon(page)) {
396                 flush_anon_page(vma, page, addr);
397                 flush_dcache_page(page);
398         } else {
399                 put_page(page);
400 out:            page = NULL;
401         }
402         up_read(&mm->mmap_sem);
403         return page;
404 }
405
406 /*
407  * get_ksm_page: checks if the page at the virtual address in rmap_item
408  * is still PageKsm, in which case we can trust the content of the page,
409  * and it returns the gotten page; but NULL if the page has been zapped.
410  */
411 static struct page *get_ksm_page(struct rmap_item *rmap_item)
412 {
413         struct page *page;
414
415         page = get_mergeable_page(rmap_item);
416         if (page && !PageKsm(page)) {
417                 put_page(page);
418                 page = NULL;
419         }
420         return page;
421 }
422
423 /*
424  * Removing rmap_item from stable or unstable tree.
425  * This function will clean the information from the stable/unstable tree.
426  */
427 static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
428 {
429         if (in_stable_tree(rmap_item)) {
430                 struct rmap_item *next_item = rmap_item->next;
431
432                 if (rmap_item->address & NODE_FLAG) {
433                         if (next_item) {
434                                 rb_replace_node(&rmap_item->node,
435                                                 &next_item->node,
436                                                 &root_stable_tree);
437                                 next_item->address |= NODE_FLAG;
438                                 ksm_pages_sharing--;
439                         } else {
440                                 rb_erase(&rmap_item->node, &root_stable_tree);
441                                 ksm_pages_shared--;
442                         }
443                 } else {
444                         struct rmap_item *prev_item = rmap_item->prev;
445
446                         BUG_ON(prev_item->next != rmap_item);
447                         prev_item->next = next_item;
448                         if (next_item) {
449                                 BUG_ON(next_item->prev != rmap_item);
450                                 next_item->prev = rmap_item->prev;
451                         }
452                         ksm_pages_sharing--;
453                 }
454
455                 rmap_item->next = NULL;
456                 rmap_item->address &= PAGE_MASK;
457
458         } else if (rmap_item->address & NODE_FLAG) {
459                 unsigned char age;
460                 /*
461                  * Usually ksmd can and must skip the rb_erase, because
462                  * root_unstable_tree was already reset to RB_ROOT.
463                  * But be careful when an mm is exiting: do the rb_erase
464                  * if this rmap_item was inserted by this scan, rather
465                  * than left over from before.
466                  */
467                 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
468                 BUG_ON(age > 1);
469                 if (!age)
470                         rb_erase(&rmap_item->node, &root_unstable_tree);
471
472                 ksm_pages_unshared--;
473                 rmap_item->address &= PAGE_MASK;
474         }
475
476         cond_resched();         /* we're called from many long loops */
477 }
478
479 static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
480                                        struct list_head *cur)
481 {
482         struct rmap_item *rmap_item;
483
484         while (cur != &mm_slot->rmap_list) {
485                 rmap_item = list_entry(cur, struct rmap_item, link);
486                 cur = cur->next;
487                 remove_rmap_item_from_tree(rmap_item);
488                 list_del(&rmap_item->link);
489                 free_rmap_item(rmap_item);
490         }
491 }
492
493 /*
494  * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather
495  * than check every pte of a given vma, the locking doesn't quite work for
496  * that - an rmap_item is assigned to the stable tree after inserting ksm
497  * page and upping mmap_sem.  Nor does it fit with the way we skip dup'ing
498  * rmap_items from parent to child at fork time (so as not to waste time
499  * if exit comes before the next scan reaches it).
500  *
501  * Similarly, although we'd like to remove rmap_items (so updating counts
502  * and freeing memory) when unmerging an area, it's easier to leave that
503  * to the next pass of ksmd - consider, for example, how ksmd might be
504  * in cmp_and_merge_page on one of the rmap_items we would be removing.
505  */
506 static int unmerge_ksm_pages(struct vm_area_struct *vma,
507                              unsigned long start, unsigned long end)
508 {
509         unsigned long addr;
510         int err = 0;
511
512         for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
513                 if (ksm_test_exit(vma->vm_mm))
514                         break;
515                 if (signal_pending(current))
516                         err = -ERESTARTSYS;
517                 else
518                         err = break_ksm(vma, addr);
519         }
520         return err;
521 }
522
523 #ifdef CONFIG_SYSFS
524 /*
525  * Only called through the sysfs control interface:
526  */
527 static int unmerge_and_remove_all_rmap_items(void)
528 {
529         struct mm_slot *mm_slot;
530         struct mm_struct *mm;
531         struct vm_area_struct *vma;
532         int err = 0;
533
534         spin_lock(&ksm_mmlist_lock);
535         ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
536                                                 struct mm_slot, mm_list);
537         spin_unlock(&ksm_mmlist_lock);
538
539         for (mm_slot = ksm_scan.mm_slot;
540                         mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
541                 mm = mm_slot->mm;
542                 down_read(&mm->mmap_sem);
543                 for (vma = mm->mmap; vma; vma = vma->vm_next) {
544                         if (ksm_test_exit(mm))
545                                 break;
546                         if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
547                                 continue;
548                         err = unmerge_ksm_pages(vma,
549                                                 vma->vm_start, vma->vm_end);
550                         if (err)
551                                 goto error;
552                 }
553
554                 remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next);
555
556                 spin_lock(&ksm_mmlist_lock);
557                 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
558                                                 struct mm_slot, mm_list);
559                 if (ksm_test_exit(mm)) {
560                         hlist_del(&mm_slot->link);
561                         list_del(&mm_slot->mm_list);
562                         spin_unlock(&ksm_mmlist_lock);
563
564                         free_mm_slot(mm_slot);
565                         clear_bit(MMF_VM_MERGEABLE, &mm->flags);
566                         up_read(&mm->mmap_sem);
567                         mmdrop(mm);
568                 } else {
569                         spin_unlock(&ksm_mmlist_lock);
570                         up_read(&mm->mmap_sem);
571                 }
572         }
573
574         ksm_scan.seqnr = 0;
575         return 0;
576
577 error:
578         up_read(&mm->mmap_sem);
579         spin_lock(&ksm_mmlist_lock);
580         ksm_scan.mm_slot = &ksm_mm_head;
581         spin_unlock(&ksm_mmlist_lock);
582         return err;
583 }
584 #endif /* CONFIG_SYSFS */
585
586 static u32 calc_checksum(struct page *page)
587 {
588         u32 checksum;
589         void *addr = kmap_atomic(page, KM_USER0);
590         checksum = jhash2(addr, PAGE_SIZE / 4, 17);
591         kunmap_atomic(addr, KM_USER0);
592         return checksum;
593 }
594
595 static int memcmp_pages(struct page *page1, struct page *page2)
596 {
597         char *addr1, *addr2;
598         int ret;
599
600         addr1 = kmap_atomic(page1, KM_USER0);
601         addr2 = kmap_atomic(page2, KM_USER1);
602         ret = memcmp(addr1, addr2, PAGE_SIZE);
603         kunmap_atomic(addr2, KM_USER1);
604         kunmap_atomic(addr1, KM_USER0);
605         return ret;
606 }
607
608 static inline int pages_identical(struct page *page1, struct page *page2)
609 {
610         return !memcmp_pages(page1, page2);
611 }
612
613 static int write_protect_page(struct vm_area_struct *vma, struct page *page,
614                               pte_t *orig_pte)
615 {
616         struct mm_struct *mm = vma->vm_mm;
617         unsigned long addr;
618         pte_t *ptep;
619         spinlock_t *ptl;
620         int swapped;
621         int err = -EFAULT;
622
623         addr = page_address_in_vma(page, vma);
624         if (addr == -EFAULT)
625                 goto out;
626
627         ptep = page_check_address(page, mm, addr, &ptl, 0);
628         if (!ptep)
629                 goto out;
630
631         if (pte_write(*ptep)) {
632                 pte_t entry;
633
634                 swapped = PageSwapCache(page);
635                 flush_cache_page(vma, addr, page_to_pfn(page));
636                 /*
637                  * Ok this is tricky, when get_user_pages_fast() run it doesnt
638                  * take any lock, therefore the check that we are going to make
639                  * with the pagecount against the mapcount is racey and
640                  * O_DIRECT can happen right after the check.
641                  * So we clear the pte and flush the tlb before the check
642                  * this assure us that no O_DIRECT can happen after the check
643                  * or in the middle of the check.
644                  */
645                 entry = ptep_clear_flush(vma, addr, ptep);
646                 /*
647                  * Check that no O_DIRECT or similar I/O is in progress on the
648                  * page
649                  */
650                 if ((page_mapcount(page) + 2 + swapped) != page_count(page)) {
651                         set_pte_at_notify(mm, addr, ptep, entry);
652                         goto out_unlock;
653                 }
654                 entry = pte_wrprotect(entry);
655                 set_pte_at_notify(mm, addr, ptep, entry);
656         }
657         *orig_pte = *ptep;
658         err = 0;
659
660 out_unlock:
661         pte_unmap_unlock(ptep, ptl);
662 out:
663         return err;
664 }
665
666 /**
667  * replace_page - replace page in vma by new ksm page
668  * @vma:      vma that holds the pte pointing to oldpage
669  * @oldpage:  the page we are replacing by newpage
670  * @newpage:  the ksm page we replace oldpage by
671  * @orig_pte: the original value of the pte
672  *
673  * Returns 0 on success, -EFAULT on failure.
674  */
675 static int replace_page(struct vm_area_struct *vma, struct page *oldpage,
676                         struct page *newpage, pte_t orig_pte)
677 {
678         struct mm_struct *mm = vma->vm_mm;
679         pgd_t *pgd;
680         pud_t *pud;
681         pmd_t *pmd;
682         pte_t *ptep;
683         spinlock_t *ptl;
684         unsigned long addr;
685         pgprot_t prot;
686         int err = -EFAULT;
687
688         prot = vm_get_page_prot(vma->vm_flags & ~VM_WRITE);
689
690         addr = page_address_in_vma(oldpage, vma);
691         if (addr == -EFAULT)
692                 goto out;
693
694         pgd = pgd_offset(mm, addr);
695         if (!pgd_present(*pgd))
696                 goto out;
697
698         pud = pud_offset(pgd, addr);
699         if (!pud_present(*pud))
700                 goto out;
701
702         pmd = pmd_offset(pud, addr);
703         if (!pmd_present(*pmd))
704                 goto out;
705
706         ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
707         if (!pte_same(*ptep, orig_pte)) {
708                 pte_unmap_unlock(ptep, ptl);
709                 goto out;
710         }
711
712         get_page(newpage);
713         page_add_ksm_rmap(newpage);
714
715         flush_cache_page(vma, addr, pte_pfn(*ptep));
716         ptep_clear_flush(vma, addr, ptep);
717         set_pte_at_notify(mm, addr, ptep, mk_pte(newpage, prot));
718
719         page_remove_rmap(oldpage);
720         put_page(oldpage);
721
722         pte_unmap_unlock(ptep, ptl);
723         err = 0;
724 out:
725         return err;
726 }
727
728 /*
729  * try_to_merge_one_page - take two pages and merge them into one
730  * @vma: the vma that hold the pte pointing into oldpage
731  * @oldpage: the page that we want to replace with newpage
732  * @newpage: the page that we want to map instead of oldpage
733  *
734  * Note:
735  * oldpage should be a PageAnon page, while newpage should be a PageKsm page,
736  * or a newly allocated kernel page which page_add_ksm_rmap will make PageKsm.
737  *
738  * This function returns 0 if the pages were merged, -EFAULT otherwise.
739  */
740 static int try_to_merge_one_page(struct vm_area_struct *vma,
741                                  struct page *oldpage,
742                                  struct page *newpage)
743 {
744         pte_t orig_pte = __pte(0);
745         int err = -EFAULT;
746
747         if (!(vma->vm_flags & VM_MERGEABLE))
748                 goto out;
749
750         if (!PageAnon(oldpage))
751                 goto out;
752
753         get_page(newpage);
754         get_page(oldpage);
755
756         /*
757          * We need the page lock to read a stable PageSwapCache in
758          * write_protect_page().  We use trylock_page() instead of
759          * lock_page() because we don't want to wait here - we
760          * prefer to continue scanning and merging different pages,
761          * then come back to this page when it is unlocked.
762          */
763         if (!trylock_page(oldpage))
764                 goto out_putpage;
765         /*
766          * If this anonymous page is mapped only here, its pte may need
767          * to be write-protected.  If it's mapped elsewhere, all of its
768          * ptes are necessarily already write-protected.  But in either
769          * case, we need to lock and check page_count is not raised.
770          */
771         if (write_protect_page(vma, oldpage, &orig_pte)) {
772                 unlock_page(oldpage);
773                 goto out_putpage;
774         }
775         unlock_page(oldpage);
776
777         if (pages_identical(oldpage, newpage))
778                 err = replace_page(vma, oldpage, newpage, orig_pte);
779
780 out_putpage:
781         put_page(oldpage);
782         put_page(newpage);
783 out:
784         return err;
785 }
786
787 /*
788  * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
789  * but no new kernel page is allocated: kpage must already be a ksm page.
790  */
791 static int try_to_merge_with_ksm_page(struct mm_struct *mm1,
792                                       unsigned long addr1,
793                                       struct page *page1,
794                                       struct page *kpage)
795 {
796         struct vm_area_struct *vma;
797         int err = -EFAULT;
798
799         down_read(&mm1->mmap_sem);
800         if (ksm_test_exit(mm1))
801                 goto out;
802
803         vma = find_vma(mm1, addr1);
804         if (!vma || vma->vm_start > addr1)
805                 goto out;
806
807         err = try_to_merge_one_page(vma, page1, kpage);
808 out:
809         up_read(&mm1->mmap_sem);
810         return err;
811 }
812
813 /*
814  * try_to_merge_two_pages - take two identical pages and prepare them
815  * to be merged into one page.
816  *
817  * This function returns 0 if we successfully mapped two identical pages
818  * into one page, -EFAULT otherwise.
819  *
820  * Note that this function allocates a new kernel page: if one of the pages
821  * is already a ksm page, try_to_merge_with_ksm_page should be used.
822  */
823 static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1,
824                                   struct page *page1, struct mm_struct *mm2,
825                                   unsigned long addr2, struct page *page2)
826 {
827         struct vm_area_struct *vma;
828         struct page *kpage;
829         int err = -EFAULT;
830
831         /*
832          * The number of nodes in the stable tree
833          * is the number of kernel pages that we hold.
834          */
835         if (ksm_max_kernel_pages &&
836             ksm_max_kernel_pages <= ksm_pages_shared)
837                 return err;
838
839         kpage = alloc_page(GFP_HIGHUSER);
840         if (!kpage)
841                 return err;
842
843         down_read(&mm1->mmap_sem);
844         if (ksm_test_exit(mm1)) {
845                 up_read(&mm1->mmap_sem);
846                 goto out;
847         }
848         vma = find_vma(mm1, addr1);
849         if (!vma || vma->vm_start > addr1) {
850                 up_read(&mm1->mmap_sem);
851                 goto out;
852         }
853
854         copy_user_highpage(kpage, page1, addr1, vma);
855         err = try_to_merge_one_page(vma, page1, kpage);
856         up_read(&mm1->mmap_sem);
857
858         if (!err) {
859                 err = try_to_merge_with_ksm_page(mm2, addr2, page2, kpage);
860                 /*
861                  * If that fails, we have a ksm page with only one pte
862                  * pointing to it: so break it.
863                  */
864                 if (err)
865                         break_cow(mm1, addr1);
866         }
867 out:
868         put_page(kpage);
869         return err;
870 }
871
872 /*
873  * stable_tree_search - search page inside the stable tree
874  * @page: the page that we are searching identical pages to.
875  * @page2: pointer into identical page that we are holding inside the stable
876  *         tree that we have found.
877  * @rmap_item: the reverse mapping item
878  *
879  * This function checks if there is a page inside the stable tree
880  * with identical content to the page that we are scanning right now.
881  *
882  * This function return rmap_item pointer to the identical item if found,
883  * NULL otherwise.
884  */
885 static struct rmap_item *stable_tree_search(struct page *page,
886                                             struct page **page2,
887                                             struct rmap_item *rmap_item)
888 {
889         struct rb_node *node = root_stable_tree.rb_node;
890
891         while (node) {
892                 struct rmap_item *tree_rmap_item, *next_rmap_item;
893                 int ret;
894
895                 tree_rmap_item = rb_entry(node, struct rmap_item, node);
896                 while (tree_rmap_item) {
897                         BUG_ON(!in_stable_tree(tree_rmap_item));
898                         cond_resched();
899                         page2[0] = get_ksm_page(tree_rmap_item);
900                         if (page2[0])
901                                 break;
902                         next_rmap_item = tree_rmap_item->next;
903                         remove_rmap_item_from_tree(tree_rmap_item);
904                         tree_rmap_item = next_rmap_item;
905                 }
906                 if (!tree_rmap_item)
907                         return NULL;
908
909                 ret = memcmp_pages(page, page2[0]);
910
911                 if (ret < 0) {
912                         put_page(page2[0]);
913                         node = node->rb_left;
914                 } else if (ret > 0) {
915                         put_page(page2[0]);
916                         node = node->rb_right;
917                 } else {
918                         return tree_rmap_item;
919                 }
920         }
921
922         return NULL;
923 }
924
925 /*
926  * stable_tree_insert - insert rmap_item pointing to new ksm page
927  * into the stable tree.
928  *
929  * @page: the page that we are searching identical page to inside the stable
930  *        tree.
931  * @rmap_item: pointer to the reverse mapping item.
932  *
933  * This function returns rmap_item if success, NULL otherwise.
934  */
935 static struct rmap_item *stable_tree_insert(struct page *page,
936                                             struct rmap_item *rmap_item)
937 {
938         struct rb_node **new = &root_stable_tree.rb_node;
939         struct rb_node *parent = NULL;
940
941         while (*new) {
942                 struct rmap_item *tree_rmap_item, *next_rmap_item;
943                 struct page *tree_page;
944                 int ret;
945
946                 tree_rmap_item = rb_entry(*new, struct rmap_item, node);
947                 while (tree_rmap_item) {
948                         BUG_ON(!in_stable_tree(tree_rmap_item));
949                         cond_resched();
950                         tree_page = get_ksm_page(tree_rmap_item);
951                         if (tree_page)
952                                 break;
953                         next_rmap_item = tree_rmap_item->next;
954                         remove_rmap_item_from_tree(tree_rmap_item);
955                         tree_rmap_item = next_rmap_item;
956                 }
957                 if (!tree_rmap_item)
958                         return NULL;
959
960                 ret = memcmp_pages(page, tree_page);
961                 put_page(tree_page);
962
963                 parent = *new;
964                 if (ret < 0)
965                         new = &parent->rb_left;
966                 else if (ret > 0)
967                         new = &parent->rb_right;
968                 else {
969                         /*
970                          * It is not a bug that stable_tree_search() didn't
971                          * find this node: because at that time our page was
972                          * not yet write-protected, so may have changed since.
973                          */
974                         return NULL;
975                 }
976         }
977
978         rmap_item->address |= NODE_FLAG | STABLE_FLAG;
979         rmap_item->next = NULL;
980         rb_link_node(&rmap_item->node, parent, new);
981         rb_insert_color(&rmap_item->node, &root_stable_tree);
982
983         ksm_pages_shared++;
984         return rmap_item;
985 }
986
987 /*
988  * unstable_tree_search_insert - search and insert items into the unstable tree.
989  *
990  * @page: the page that we are going to search for identical page or to insert
991  *        into the unstable tree
992  * @page2: pointer into identical page that was found inside the unstable tree
993  * @rmap_item: the reverse mapping item of page
994  *
995  * This function searches for a page in the unstable tree identical to the
996  * page currently being scanned; and if no identical page is found in the
997  * tree, we insert rmap_item as a new object into the unstable tree.
998  *
999  * This function returns pointer to rmap_item found to be identical
1000  * to the currently scanned page, NULL otherwise.
1001  *
1002  * This function does both searching and inserting, because they share
1003  * the same walking algorithm in an rbtree.
1004  */
1005 static struct rmap_item *unstable_tree_search_insert(struct page *page,
1006                                                 struct page **page2,
1007                                                 struct rmap_item *rmap_item)
1008 {
1009         struct rb_node **new = &root_unstable_tree.rb_node;
1010         struct rb_node *parent = NULL;
1011
1012         while (*new) {
1013                 struct rmap_item *tree_rmap_item;
1014                 int ret;
1015
1016                 cond_resched();
1017                 tree_rmap_item = rb_entry(*new, struct rmap_item, node);
1018                 page2[0] = get_mergeable_page(tree_rmap_item);
1019                 if (!page2[0])
1020                         return NULL;
1021
1022                 /*
1023                  * Don't substitute an unswappable ksm page
1024                  * just for one good swappable forked page.
1025                  */
1026                 if (page == page2[0]) {
1027                         put_page(page2[0]);
1028                         return NULL;
1029                 }
1030
1031                 ret = memcmp_pages(page, page2[0]);
1032
1033                 parent = *new;
1034                 if (ret < 0) {
1035                         put_page(page2[0]);
1036                         new = &parent->rb_left;
1037                 } else if (ret > 0) {
1038                         put_page(page2[0]);
1039                         new = &parent->rb_right;
1040                 } else {
1041                         return tree_rmap_item;
1042                 }
1043         }
1044
1045         rmap_item->address |= NODE_FLAG;
1046         rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
1047         rb_link_node(&rmap_item->node, parent, new);
1048         rb_insert_color(&rmap_item->node, &root_unstable_tree);
1049
1050         ksm_pages_unshared++;
1051         return NULL;
1052 }
1053
1054 /*
1055  * stable_tree_append - add another rmap_item to the linked list of
1056  * rmap_items hanging off a given node of the stable tree, all sharing
1057  * the same ksm page.
1058  */
1059 static void stable_tree_append(struct rmap_item *rmap_item,
1060                                struct rmap_item *tree_rmap_item)
1061 {
1062         rmap_item->next = tree_rmap_item->next;
1063         rmap_item->prev = tree_rmap_item;
1064
1065         if (tree_rmap_item->next)
1066                 tree_rmap_item->next->prev = rmap_item;
1067
1068         tree_rmap_item->next = rmap_item;
1069         rmap_item->address |= STABLE_FLAG;
1070
1071         ksm_pages_sharing++;
1072 }
1073
1074 /*
1075  * cmp_and_merge_page - first see if page can be merged into the stable tree;
1076  * if not, compare checksum to previous and if it's the same, see if page can
1077  * be inserted into the unstable tree, or merged with a page already there and
1078  * both transferred to the stable tree.
1079  *
1080  * @page: the page that we are searching identical page to.
1081  * @rmap_item: the reverse mapping into the virtual address of this page
1082  */
1083 static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1084 {
1085         struct page *page2[1];
1086         struct rmap_item *tree_rmap_item;
1087         unsigned int checksum;
1088         int err;
1089
1090         remove_rmap_item_from_tree(rmap_item);
1091
1092         /* We first start with searching the page inside the stable tree */
1093         tree_rmap_item = stable_tree_search(page, page2, rmap_item);
1094         if (tree_rmap_item) {
1095                 if (page == page2[0])                   /* forked */
1096                         err = 0;
1097                 else
1098                         err = try_to_merge_with_ksm_page(rmap_item->mm,
1099                                                          rmap_item->address,
1100                                                          page, page2[0]);
1101                 put_page(page2[0]);
1102
1103                 if (!err) {
1104                         /*
1105                          * The page was successfully merged:
1106                          * add its rmap_item to the stable tree.
1107                          */
1108                         stable_tree_append(rmap_item, tree_rmap_item);
1109                 }
1110                 return;
1111         }
1112
1113         /*
1114          * A ksm page might have got here by fork, but its other
1115          * references have already been removed from the stable tree.
1116          * Or it might be left over from a break_ksm which failed
1117          * when the mem_cgroup had reached its limit: try again now.
1118          */
1119         if (PageKsm(page))
1120                 break_cow(rmap_item->mm, rmap_item->address);
1121
1122         /*
1123          * In case the hash value of the page was changed from the last time we
1124          * have calculated it, this page to be changed frequely, therefore we
1125          * don't want to insert it to the unstable tree, and we don't want to
1126          * waste our time to search if there is something identical to it there.
1127          */
1128         checksum = calc_checksum(page);
1129         if (rmap_item->oldchecksum != checksum) {
1130                 rmap_item->oldchecksum = checksum;
1131                 return;
1132         }
1133
1134         tree_rmap_item = unstable_tree_search_insert(page, page2, rmap_item);
1135         if (tree_rmap_item) {
1136                 err = try_to_merge_two_pages(rmap_item->mm,
1137                                              rmap_item->address, page,
1138                                              tree_rmap_item->mm,
1139                                              tree_rmap_item->address, page2[0]);
1140                 /*
1141                  * As soon as we merge this page, we want to remove the
1142                  * rmap_item of the page we have merged with from the unstable
1143                  * tree, and insert it instead as new node in the stable tree.
1144                  */
1145                 if (!err) {
1146                         remove_rmap_item_from_tree(tree_rmap_item);
1147
1148                         /*
1149                          * If we fail to insert the page into the stable tree,
1150                          * we will have 2 virtual addresses that are pointing
1151                          * to a ksm page left outside the stable tree,
1152                          * in which case we need to break_cow on both.
1153                          */
1154                         if (stable_tree_insert(page2[0], tree_rmap_item))
1155                                 stable_tree_append(rmap_item, tree_rmap_item);
1156                         else {
1157                                 break_cow(tree_rmap_item->mm,
1158                                                 tree_rmap_item->address);
1159                                 break_cow(rmap_item->mm, rmap_item->address);
1160                         }
1161                 }
1162
1163                 put_page(page2[0]);
1164         }
1165 }
1166
1167 static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
1168                                             struct list_head *cur,
1169                                             unsigned long addr)
1170 {
1171         struct rmap_item *rmap_item;
1172
1173         while (cur != &mm_slot->rmap_list) {
1174                 rmap_item = list_entry(cur, struct rmap_item, link);
1175                 if ((rmap_item->address & PAGE_MASK) == addr)
1176                         return rmap_item;
1177                 if (rmap_item->address > addr)
1178                         break;
1179                 cur = cur->next;
1180                 remove_rmap_item_from_tree(rmap_item);
1181                 list_del(&rmap_item->link);
1182                 free_rmap_item(rmap_item);
1183         }
1184
1185         rmap_item = alloc_rmap_item();
1186         if (rmap_item) {
1187                 /* It has already been zeroed */
1188                 rmap_item->mm = mm_slot->mm;
1189                 rmap_item->address = addr;
1190                 list_add_tail(&rmap_item->link, cur);
1191         }
1192         return rmap_item;
1193 }
1194
1195 static struct rmap_item *scan_get_next_rmap_item(struct page **page)
1196 {
1197         struct mm_struct *mm;
1198         struct mm_slot *slot;
1199         struct vm_area_struct *vma;
1200         struct rmap_item *rmap_item;
1201
1202         if (list_empty(&ksm_mm_head.mm_list))
1203                 return NULL;
1204
1205         slot = ksm_scan.mm_slot;
1206         if (slot == &ksm_mm_head) {
1207                 root_unstable_tree = RB_ROOT;
1208
1209                 spin_lock(&ksm_mmlist_lock);
1210                 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
1211                 ksm_scan.mm_slot = slot;
1212                 spin_unlock(&ksm_mmlist_lock);
1213 next_mm:
1214                 ksm_scan.address = 0;
1215                 ksm_scan.rmap_item = list_entry(&slot->rmap_list,
1216                                                 struct rmap_item, link);
1217         }
1218
1219         mm = slot->mm;
1220         down_read(&mm->mmap_sem);
1221         if (ksm_test_exit(mm))
1222                 vma = NULL;
1223         else
1224                 vma = find_vma(mm, ksm_scan.address);
1225
1226         for (; vma; vma = vma->vm_next) {
1227                 if (!(vma->vm_flags & VM_MERGEABLE))
1228                         continue;
1229                 if (ksm_scan.address < vma->vm_start)
1230                         ksm_scan.address = vma->vm_start;
1231                 if (!vma->anon_vma)
1232                         ksm_scan.address = vma->vm_end;
1233
1234                 while (ksm_scan.address < vma->vm_end) {
1235                         if (ksm_test_exit(mm))
1236                                 break;
1237                         *page = follow_page(vma, ksm_scan.address, FOLL_GET);
1238                         if (*page && PageAnon(*page)) {
1239                                 flush_anon_page(vma, *page, ksm_scan.address);
1240                                 flush_dcache_page(*page);
1241                                 rmap_item = get_next_rmap_item(slot,
1242                                         ksm_scan.rmap_item->link.next,
1243                                         ksm_scan.address);
1244                                 if (rmap_item) {
1245                                         ksm_scan.rmap_item = rmap_item;
1246                                         ksm_scan.address += PAGE_SIZE;
1247                                 } else
1248                                         put_page(*page);
1249                                 up_read(&mm->mmap_sem);
1250                                 return rmap_item;
1251                         }
1252                         if (*page)
1253                                 put_page(*page);
1254                         ksm_scan.address += PAGE_SIZE;
1255                         cond_resched();
1256                 }
1257         }
1258
1259         if (ksm_test_exit(mm)) {
1260                 ksm_scan.address = 0;
1261                 ksm_scan.rmap_item = list_entry(&slot->rmap_list,
1262                                                 struct rmap_item, link);
1263         }
1264         /*
1265          * Nuke all the rmap_items that are above this current rmap:
1266          * because there were no VM_MERGEABLE vmas with such addresses.
1267          */
1268         remove_trailing_rmap_items(slot, ksm_scan.rmap_item->link.next);
1269
1270         spin_lock(&ksm_mmlist_lock);
1271         ksm_scan.mm_slot = list_entry(slot->mm_list.next,
1272                                                 struct mm_slot, mm_list);
1273         if (ksm_scan.address == 0) {
1274                 /*
1275                  * We've completed a full scan of all vmas, holding mmap_sem
1276                  * throughout, and found no VM_MERGEABLE: so do the same as
1277                  * __ksm_exit does to remove this mm from all our lists now.
1278                  * This applies either when cleaning up after __ksm_exit
1279                  * (but beware: we can reach here even before __ksm_exit),
1280                  * or when all VM_MERGEABLE areas have been unmapped (and
1281                  * mmap_sem then protects against race with MADV_MERGEABLE).
1282                  */
1283                 hlist_del(&slot->link);
1284                 list_del(&slot->mm_list);
1285                 spin_unlock(&ksm_mmlist_lock);
1286
1287                 free_mm_slot(slot);
1288                 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1289                 up_read(&mm->mmap_sem);
1290                 mmdrop(mm);
1291         } else {
1292                 spin_unlock(&ksm_mmlist_lock);
1293                 up_read(&mm->mmap_sem);
1294         }
1295
1296         /* Repeat until we've completed scanning the whole list */
1297         slot = ksm_scan.mm_slot;
1298         if (slot != &ksm_mm_head)
1299                 goto next_mm;
1300
1301         ksm_scan.seqnr++;
1302         return NULL;
1303 }
1304
1305 /**
1306  * ksm_do_scan  - the ksm scanner main worker function.
1307  * @scan_npages - number of pages we want to scan before we return.
1308  */
1309 static void ksm_do_scan(unsigned int scan_npages)
1310 {
1311         struct rmap_item *rmap_item;
1312         struct page *page;
1313
1314         while (scan_npages--) {
1315                 cond_resched();
1316                 rmap_item = scan_get_next_rmap_item(&page);
1317                 if (!rmap_item)
1318                         return;
1319                 if (!PageKsm(page) || !in_stable_tree(rmap_item))
1320                         cmp_and_merge_page(page, rmap_item);
1321                 else if (page_mapcount(page) == 1) {
1322                         /*
1323                          * Replace now-unshared ksm page by ordinary page.
1324                          */
1325                         break_cow(rmap_item->mm, rmap_item->address);
1326                         remove_rmap_item_from_tree(rmap_item);
1327                         rmap_item->oldchecksum = calc_checksum(page);
1328                 }
1329                 put_page(page);
1330         }
1331 }
1332
1333 static int ksmd_should_run(void)
1334 {
1335         return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
1336 }
1337
1338 static int ksm_scan_thread(void *nothing)
1339 {
1340         set_user_nice(current, 5);
1341
1342         while (!kthread_should_stop()) {
1343                 mutex_lock(&ksm_thread_mutex);
1344                 if (ksmd_should_run())
1345                         ksm_do_scan(ksm_thread_pages_to_scan);
1346                 mutex_unlock(&ksm_thread_mutex);
1347
1348                 if (ksmd_should_run()) {
1349                         schedule_timeout_interruptible(
1350                                 msecs_to_jiffies(ksm_thread_sleep_millisecs));
1351                 } else {
1352                         wait_event_interruptible(ksm_thread_wait,
1353                                 ksmd_should_run() || kthread_should_stop());
1354                 }
1355         }
1356         return 0;
1357 }
1358
1359 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
1360                 unsigned long end, int advice, unsigned long *vm_flags)
1361 {
1362         struct mm_struct *mm = vma->vm_mm;
1363         int err;
1364
1365         switch (advice) {
1366         case MADV_MERGEABLE:
1367                 /*
1368                  * Be somewhat over-protective for now!
1369                  */
1370                 if (*vm_flags & (VM_MERGEABLE | VM_SHARED  | VM_MAYSHARE   |
1371                                  VM_PFNMAP    | VM_IO      | VM_DONTEXPAND |
1372                                  VM_RESERVED  | VM_HUGETLB | VM_INSERTPAGE |
1373                                  VM_MIXEDMAP  | VM_SAO))
1374                         return 0;               /* just ignore the advice */
1375
1376                 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
1377                         err = __ksm_enter(mm);
1378                         if (err)
1379                                 return err;
1380                 }
1381
1382                 *vm_flags |= VM_MERGEABLE;
1383                 break;
1384
1385         case MADV_UNMERGEABLE:
1386                 if (!(*vm_flags & VM_MERGEABLE))
1387                         return 0;               /* just ignore the advice */
1388
1389                 if (vma->anon_vma) {
1390                         err = unmerge_ksm_pages(vma, start, end);
1391                         if (err)
1392                                 return err;
1393                 }
1394
1395                 *vm_flags &= ~VM_MERGEABLE;
1396                 break;
1397         }
1398
1399         return 0;
1400 }
1401
1402 int __ksm_enter(struct mm_struct *mm)
1403 {
1404         struct mm_slot *mm_slot;
1405         int needs_wakeup;
1406
1407         mm_slot = alloc_mm_slot();
1408         if (!mm_slot)
1409                 return -ENOMEM;
1410
1411         /* Check ksm_run too?  Would need tighter locking */
1412         needs_wakeup = list_empty(&ksm_mm_head.mm_list);
1413
1414         spin_lock(&ksm_mmlist_lock);
1415         insert_to_mm_slots_hash(mm, mm_slot);
1416         /*
1417          * Insert just behind the scanning cursor, to let the area settle
1418          * down a little; when fork is followed by immediate exec, we don't
1419          * want ksmd to waste time setting up and tearing down an rmap_list.
1420          */
1421         list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list);
1422         spin_unlock(&ksm_mmlist_lock);
1423
1424         set_bit(MMF_VM_MERGEABLE, &mm->flags);
1425         atomic_inc(&mm->mm_count);
1426
1427         if (needs_wakeup)
1428                 wake_up_interruptible(&ksm_thread_wait);
1429
1430         return 0;
1431 }
1432
1433 void __ksm_exit(struct mm_struct *mm)
1434 {
1435         struct mm_slot *mm_slot;
1436         int easy_to_free = 0;
1437
1438         /*
1439          * This process is exiting: if it's straightforward (as is the
1440          * case when ksmd was never running), free mm_slot immediately.
1441          * But if it's at the cursor or has rmap_items linked to it, use
1442          * mmap_sem to synchronize with any break_cows before pagetables
1443          * are freed, and leave the mm_slot on the list for ksmd to free.
1444          * Beware: ksm may already have noticed it exiting and freed the slot.
1445          */
1446
1447         spin_lock(&ksm_mmlist_lock);
1448         mm_slot = get_mm_slot(mm);
1449         if (mm_slot && ksm_scan.mm_slot != mm_slot) {
1450                 if (list_empty(&mm_slot->rmap_list)) {
1451                         hlist_del(&mm_slot->link);
1452                         list_del(&mm_slot->mm_list);
1453                         easy_to_free = 1;
1454                 } else {
1455                         list_move(&mm_slot->mm_list,
1456                                   &ksm_scan.mm_slot->mm_list);
1457                 }
1458         }
1459         spin_unlock(&ksm_mmlist_lock);
1460
1461         if (easy_to_free) {
1462                 free_mm_slot(mm_slot);
1463                 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1464                 mmdrop(mm);
1465         } else if (mm_slot) {
1466                 down_write(&mm->mmap_sem);
1467                 up_write(&mm->mmap_sem);
1468         }
1469 }
1470
1471 #ifdef CONFIG_SYSFS
1472 /*
1473  * This all compiles without CONFIG_SYSFS, but is a waste of space.
1474  */
1475
1476 #define KSM_ATTR_RO(_name) \
1477         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1478 #define KSM_ATTR(_name) \
1479         static struct kobj_attribute _name##_attr = \
1480                 __ATTR(_name, 0644, _name##_show, _name##_store)
1481
1482 static ssize_t sleep_millisecs_show(struct kobject *kobj,
1483                                     struct kobj_attribute *attr, char *buf)
1484 {
1485         return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs);
1486 }
1487
1488 static ssize_t sleep_millisecs_store(struct kobject *kobj,
1489                                      struct kobj_attribute *attr,
1490                                      const char *buf, size_t count)
1491 {
1492         unsigned long msecs;
1493         int err;
1494
1495         err = strict_strtoul(buf, 10, &msecs);
1496         if (err || msecs > UINT_MAX)
1497                 return -EINVAL;
1498
1499         ksm_thread_sleep_millisecs = msecs;
1500
1501         return count;
1502 }
1503 KSM_ATTR(sleep_millisecs);
1504
1505 static ssize_t pages_to_scan_show(struct kobject *kobj,
1506                                   struct kobj_attribute *attr, char *buf)
1507 {
1508         return sprintf(buf, "%u\n", ksm_thread_pages_to_scan);
1509 }
1510
1511 static ssize_t pages_to_scan_store(struct kobject *kobj,
1512                                    struct kobj_attribute *attr,
1513                                    const char *buf, size_t count)
1514 {
1515         int err;
1516         unsigned long nr_pages;
1517
1518         err = strict_strtoul(buf, 10, &nr_pages);
1519         if (err || nr_pages > UINT_MAX)
1520                 return -EINVAL;
1521
1522         ksm_thread_pages_to_scan = nr_pages;
1523
1524         return count;
1525 }
1526 KSM_ATTR(pages_to_scan);
1527
1528 static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
1529                         char *buf)
1530 {
1531         return sprintf(buf, "%u\n", ksm_run);
1532 }
1533
1534 static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
1535                          const char *buf, size_t count)
1536 {
1537         int err;
1538         unsigned long flags;
1539
1540         err = strict_strtoul(buf, 10, &flags);
1541         if (err || flags > UINT_MAX)
1542                 return -EINVAL;
1543         if (flags > KSM_RUN_UNMERGE)
1544                 return -EINVAL;
1545
1546         /*
1547          * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
1548          * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
1549          * breaking COW to free the unswappable pages_shared (but leaves
1550          * mm_slots on the list for when ksmd may be set running again).
1551          */
1552
1553         mutex_lock(&ksm_thread_mutex);
1554         if (ksm_run != flags) {
1555                 ksm_run = flags;
1556                 if (flags & KSM_RUN_UNMERGE) {
1557                         current->flags |= PF_OOM_ORIGIN;
1558                         err = unmerge_and_remove_all_rmap_items();
1559                         current->flags &= ~PF_OOM_ORIGIN;
1560                         if (err) {
1561                                 ksm_run = KSM_RUN_STOP;
1562                                 count = err;
1563                         }
1564                 }
1565         }
1566         mutex_unlock(&ksm_thread_mutex);
1567
1568         if (flags & KSM_RUN_MERGE)
1569                 wake_up_interruptible(&ksm_thread_wait);
1570
1571         return count;
1572 }
1573 KSM_ATTR(run);
1574
1575 static ssize_t max_kernel_pages_store(struct kobject *kobj,
1576                                       struct kobj_attribute *attr,
1577                                       const char *buf, size_t count)
1578 {
1579         int err;
1580         unsigned long nr_pages;
1581
1582         err = strict_strtoul(buf, 10, &nr_pages);
1583         if (err)
1584                 return -EINVAL;
1585
1586         ksm_max_kernel_pages = nr_pages;
1587
1588         return count;
1589 }
1590
1591 static ssize_t max_kernel_pages_show(struct kobject *kobj,
1592                                      struct kobj_attribute *attr, char *buf)
1593 {
1594         return sprintf(buf, "%lu\n", ksm_max_kernel_pages);
1595 }
1596 KSM_ATTR(max_kernel_pages);
1597
1598 static ssize_t pages_shared_show(struct kobject *kobj,
1599                                  struct kobj_attribute *attr, char *buf)
1600 {
1601         return sprintf(buf, "%lu\n", ksm_pages_shared);
1602 }
1603 KSM_ATTR_RO(pages_shared);
1604
1605 static ssize_t pages_sharing_show(struct kobject *kobj,
1606                                   struct kobj_attribute *attr, char *buf)
1607 {
1608         return sprintf(buf, "%lu\n", ksm_pages_sharing);
1609 }
1610 KSM_ATTR_RO(pages_sharing);
1611
1612 static ssize_t pages_unshared_show(struct kobject *kobj,
1613                                    struct kobj_attribute *attr, char *buf)
1614 {
1615         return sprintf(buf, "%lu\n", ksm_pages_unshared);
1616 }
1617 KSM_ATTR_RO(pages_unshared);
1618
1619 static ssize_t pages_volatile_show(struct kobject *kobj,
1620                                    struct kobj_attribute *attr, char *buf)
1621 {
1622         long ksm_pages_volatile;
1623
1624         ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
1625                                 - ksm_pages_sharing - ksm_pages_unshared;
1626         /*
1627          * It was not worth any locking to calculate that statistic,
1628          * but it might therefore sometimes be negative: conceal that.
1629          */
1630         if (ksm_pages_volatile < 0)
1631                 ksm_pages_volatile = 0;
1632         return sprintf(buf, "%ld\n", ksm_pages_volatile);
1633 }
1634 KSM_ATTR_RO(pages_volatile);
1635
1636 static ssize_t full_scans_show(struct kobject *kobj,
1637                                struct kobj_attribute *attr, char *buf)
1638 {
1639         return sprintf(buf, "%lu\n", ksm_scan.seqnr);
1640 }
1641 KSM_ATTR_RO(full_scans);
1642
1643 static struct attribute *ksm_attrs[] = {
1644         &sleep_millisecs_attr.attr,
1645         &pages_to_scan_attr.attr,
1646         &run_attr.attr,
1647         &max_kernel_pages_attr.attr,
1648         &pages_shared_attr.attr,
1649         &pages_sharing_attr.attr,
1650         &pages_unshared_attr.attr,
1651         &pages_volatile_attr.attr,
1652         &full_scans_attr.attr,
1653         NULL,
1654 };
1655
1656 static struct attribute_group ksm_attr_group = {
1657         .attrs = ksm_attrs,
1658         .name = "ksm",
1659 };
1660 #endif /* CONFIG_SYSFS */
1661
1662 static int __init ksm_init(void)
1663 {
1664         struct task_struct *ksm_thread;
1665         int err;
1666
1667         ksm_max_kernel_pages = totalram_pages / 4;
1668
1669         err = ksm_slab_init();
1670         if (err)
1671                 goto out;
1672
1673         err = mm_slots_hash_init();
1674         if (err)
1675                 goto out_free1;
1676
1677         ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
1678         if (IS_ERR(ksm_thread)) {
1679                 printk(KERN_ERR "ksm: creating kthread failed\n");
1680                 err = PTR_ERR(ksm_thread);
1681                 goto out_free2;
1682         }
1683
1684 #ifdef CONFIG_SYSFS
1685         err = sysfs_create_group(mm_kobj, &ksm_attr_group);
1686         if (err) {
1687                 printk(KERN_ERR "ksm: register sysfs failed\n");
1688                 kthread_stop(ksm_thread);
1689                 goto out_free2;
1690         }
1691 #else
1692         ksm_run = KSM_RUN_MERGE;        /* no way for user to start it */
1693
1694 #endif /* CONFIG_SYSFS */
1695
1696         return 0;
1697
1698 out_free2:
1699         mm_slots_hash_free();
1700 out_free1:
1701         ksm_slab_free();
1702 out:
1703         return err;
1704 }
1705 module_init(ksm_init)