e8e9a2bca809d23d6ff11abf2e03ffc925ce51c5
[safe/jmp/linux-2.6] / mm / ksm.c
1 /*
2  * Memory merging support.
3  *
4  * This code enables dynamic sharing of identical pages found in different
5  * memory areas, even if they are not shared by fork()
6  *
7  * Copyright (C) 2008-2009 Red Hat, Inc.
8  * Authors:
9  *      Izik Eidus
10  *      Andrea Arcangeli
11  *      Chris Wright
12  *      Hugh Dickins
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.
15  */
16
17 #include <linux/errno.h>
18 #include <linux/mm.h>
19 #include <linux/fs.h>
20 #include <linux/mman.h>
21 #include <linux/sched.h>
22 #include <linux/rwsem.h>
23 #include <linux/pagemap.h>
24 #include <linux/rmap.h>
25 #include <linux/spinlock.h>
26 #include <linux/jhash.h>
27 #include <linux/delay.h>
28 #include <linux/kthread.h>
29 #include <linux/wait.h>
30 #include <linux/slab.h>
31 #include <linux/rbtree.h>
32 #include <linux/mmu_notifier.h>
33 #include <linux/swap.h>
34 #include <linux/ksm.h>
35
36 #include <asm/tlbflush.h>
37
38 /*
39  * A few notes about the KSM scanning process,
40  * to make it easier to understand the data structures below:
41  *
42  * In order to reduce excessive scanning, KSM sorts the memory pages by their
43  * contents into a data structure that holds pointers to the pages' locations.
44  *
45  * Since the contents of the pages may change at any moment, KSM cannot just
46  * insert the pages into a normal sorted tree and expect it to find anything.
47  * Therefore KSM uses two data structures - the stable and the unstable tree.
48  *
49  * The stable tree holds pointers to all the merged pages (ksm pages), sorted
50  * by their contents.  Because each such page is write-protected, searching on
51  * this tree is fully assured to be working (except when pages are unmapped),
52  * and therefore this tree is called the stable tree.
53  *
54  * In addition to the stable tree, KSM uses a second data structure called the
55  * unstable tree: this tree holds pointers to pages which have been found to
56  * be "unchanged for a period of time".  The unstable tree sorts these pages
57  * by their contents, but since they are not write-protected, KSM cannot rely
58  * upon the unstable tree to work correctly - the unstable tree is liable to
59  * be corrupted as its contents are modified, and so it is called unstable.
60  *
61  * KSM solves this problem by several techniques:
62  *
63  * 1) The unstable tree is flushed every time KSM completes scanning all
64  *    memory areas, and then the tree is rebuilt again from the beginning.
65  * 2) KSM will only insert into the unstable tree, pages whose hash value
66  *    has not changed since the previous scan of all memory areas.
67  * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
68  *    colors of the nodes and not on their contents, assuring that even when
69  *    the tree gets "corrupted" it won't get out of balance, so scanning time
70  *    remains the same (also, searching and inserting nodes in an rbtree uses
71  *    the same algorithm, so we have no overhead when we flush and rebuild).
72  * 4) KSM never flushes the stable tree, which means that even if it were to
73  *    take 10 attempts to find a page in the unstable tree, once it is found,
74  *    it is secured in the stable tree.  (When we scan a new page, we first
75  *    compare it against the stable tree, and then against the unstable tree.)
76  */
77
78 /**
79  * struct mm_slot - ksm information per mm that is being scanned
80  * @link: link to the mm_slots hash list
81  * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
82  * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
83  * @mm: the mm that this information is valid for
84  */
85 struct mm_slot {
86         struct hlist_node link;
87         struct list_head mm_list;
88         struct rmap_item *rmap_list;
89         struct mm_struct *mm;
90 };
91
92 /**
93  * struct ksm_scan - cursor for scanning
94  * @mm_slot: the current mm_slot we are scanning
95  * @address: the next address inside that to be scanned
96  * @rmap_list: link to the next rmap to be scanned in the rmap_list
97  * @seqnr: count of completed full scans (needed when removing unstable node)
98  *
99  * There is only the one ksm_scan instance of this cursor structure.
100  */
101 struct ksm_scan {
102         struct mm_slot *mm_slot;
103         unsigned long address;
104         struct rmap_item **rmap_list;
105         unsigned long seqnr;
106 };
107
108 /**
109  * struct rmap_item - reverse mapping item for virtual addresses
110  * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
111  * @filler: unused space we're making available in this patch
112  * @mm: the memory structure this rmap_item is pointing into
113  * @address: the virtual address this rmap_item tracks (+ flags in low bits)
114  * @oldchecksum: previous checksum of the page at that virtual address
115  * @node: rb_node of this rmap_item in either unstable or stable tree
116  * @next: next rmap_item hanging off the same node of the stable tree
117  * @prev: previous rmap_item hanging off the same node of the stable tree
118  */
119 struct rmap_item {
120         struct rmap_item *rmap_list;
121         unsigned long filler;
122         struct mm_struct *mm;
123         unsigned long address;          /* + low bits used for flags below */
124         union {
125                 unsigned int oldchecksum;               /* when unstable */
126                 struct rmap_item *next;                 /* when stable */
127         };
128         union {
129                 struct rb_node node;                    /* when tree node */
130                 struct rmap_item *prev;                 /* in stable list */
131         };
132 };
133
134 #define SEQNR_MASK      0x0ff   /* low bits of unstable tree seqnr */
135 #define NODE_FLAG       0x100   /* is a node of unstable or stable tree */
136 #define STABLE_FLAG     0x200   /* is a node or list item of stable tree */
137
138 /* The stable and unstable tree heads */
139 static struct rb_root root_stable_tree = RB_ROOT;
140 static struct rb_root root_unstable_tree = RB_ROOT;
141
142 #define MM_SLOTS_HASH_HEADS 1024
143 static struct hlist_head *mm_slots_hash;
144
145 static struct mm_slot ksm_mm_head = {
146         .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
147 };
148 static struct ksm_scan ksm_scan = {
149         .mm_slot = &ksm_mm_head,
150 };
151
152 static struct kmem_cache *rmap_item_cache;
153 static struct kmem_cache *mm_slot_cache;
154
155 /* The number of nodes in the stable tree */
156 static unsigned long ksm_pages_shared;
157
158 /* The number of page slots additionally sharing those nodes */
159 static unsigned long ksm_pages_sharing;
160
161 /* The number of nodes in the unstable tree */
162 static unsigned long ksm_pages_unshared;
163
164 /* The number of rmap_items in use: to calculate pages_volatile */
165 static unsigned long ksm_rmap_items;
166
167 /* Limit on the number of unswappable pages used */
168 static unsigned long ksm_max_kernel_pages;
169
170 /* Number of pages ksmd should scan in one batch */
171 static unsigned int ksm_thread_pages_to_scan = 100;
172
173 /* Milliseconds ksmd should sleep between batches */
174 static unsigned int ksm_thread_sleep_millisecs = 20;
175
176 #define KSM_RUN_STOP    0
177 #define KSM_RUN_MERGE   1
178 #define KSM_RUN_UNMERGE 2
179 static unsigned int ksm_run = KSM_RUN_STOP;
180
181 static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
182 static DEFINE_MUTEX(ksm_thread_mutex);
183 static DEFINE_SPINLOCK(ksm_mmlist_lock);
184
185 #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
186                 sizeof(struct __struct), __alignof__(struct __struct),\
187                 (__flags), NULL)
188
189 static int __init ksm_slab_init(void)
190 {
191         rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
192         if (!rmap_item_cache)
193                 goto out;
194
195         mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
196         if (!mm_slot_cache)
197                 goto out_free;
198
199         return 0;
200
201 out_free:
202         kmem_cache_destroy(rmap_item_cache);
203 out:
204         return -ENOMEM;
205 }
206
207 static void __init ksm_slab_free(void)
208 {
209         kmem_cache_destroy(mm_slot_cache);
210         kmem_cache_destroy(rmap_item_cache);
211         mm_slot_cache = NULL;
212 }
213
214 static inline struct rmap_item *alloc_rmap_item(void)
215 {
216         struct rmap_item *rmap_item;
217
218         rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
219         if (rmap_item)
220                 ksm_rmap_items++;
221         return rmap_item;
222 }
223
224 static inline void free_rmap_item(struct rmap_item *rmap_item)
225 {
226         ksm_rmap_items--;
227         rmap_item->mm = NULL;   /* debug safety */
228         kmem_cache_free(rmap_item_cache, rmap_item);
229 }
230
231 static inline struct mm_slot *alloc_mm_slot(void)
232 {
233         if (!mm_slot_cache)     /* initialization failed */
234                 return NULL;
235         return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
236 }
237
238 static inline void free_mm_slot(struct mm_slot *mm_slot)
239 {
240         kmem_cache_free(mm_slot_cache, mm_slot);
241 }
242
243 static int __init mm_slots_hash_init(void)
244 {
245         mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
246                                 GFP_KERNEL);
247         if (!mm_slots_hash)
248                 return -ENOMEM;
249         return 0;
250 }
251
252 static void __init mm_slots_hash_free(void)
253 {
254         kfree(mm_slots_hash);
255 }
256
257 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
258 {
259         struct mm_slot *mm_slot;
260         struct hlist_head *bucket;
261         struct hlist_node *node;
262
263         bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
264                                 % MM_SLOTS_HASH_HEADS];
265         hlist_for_each_entry(mm_slot, node, bucket, link) {
266                 if (mm == mm_slot->mm)
267                         return mm_slot;
268         }
269         return NULL;
270 }
271
272 static void insert_to_mm_slots_hash(struct mm_struct *mm,
273                                     struct mm_slot *mm_slot)
274 {
275         struct hlist_head *bucket;
276
277         bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
278                                 % MM_SLOTS_HASH_HEADS];
279         mm_slot->mm = mm;
280         hlist_add_head(&mm_slot->link, bucket);
281 }
282
283 static inline int in_stable_tree(struct rmap_item *rmap_item)
284 {
285         return rmap_item->address & STABLE_FLAG;
286 }
287
288 /*
289  * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
290  * page tables after it has passed through ksm_exit() - which, if necessary,
291  * takes mmap_sem briefly to serialize against them.  ksm_exit() does not set
292  * a special flag: they can just back out as soon as mm_users goes to zero.
293  * ksm_test_exit() is used throughout to make this test for exit: in some
294  * places for correctness, in some places just to avoid unnecessary work.
295  */
296 static inline bool ksm_test_exit(struct mm_struct *mm)
297 {
298         return atomic_read(&mm->mm_users) == 0;
299 }
300
301 /*
302  * We use break_ksm to break COW on a ksm page: it's a stripped down
303  *
304  *      if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1)
305  *              put_page(page);
306  *
307  * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
308  * in case the application has unmapped and remapped mm,addr meanwhile.
309  * Could a ksm page appear anywhere else?  Actually yes, in a VM_PFNMAP
310  * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
311  */
312 static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
313 {
314         struct page *page;
315         int ret = 0;
316
317         do {
318                 cond_resched();
319                 page = follow_page(vma, addr, FOLL_GET);
320                 if (!page)
321                         break;
322                 if (PageKsm(page))
323                         ret = handle_mm_fault(vma->vm_mm, vma, addr,
324                                                         FAULT_FLAG_WRITE);
325                 else
326                         ret = VM_FAULT_WRITE;
327                 put_page(page);
328         } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
329         /*
330          * We must loop because handle_mm_fault() may back out if there's
331          * any difficulty e.g. if pte accessed bit gets updated concurrently.
332          *
333          * VM_FAULT_WRITE is what we have been hoping for: it indicates that
334          * COW has been broken, even if the vma does not permit VM_WRITE;
335          * but note that a concurrent fault might break PageKsm for us.
336          *
337          * VM_FAULT_SIGBUS could occur if we race with truncation of the
338          * backing file, which also invalidates anonymous pages: that's
339          * okay, that truncation will have unmapped the PageKsm for us.
340          *
341          * VM_FAULT_OOM: at the time of writing (late July 2009), setting
342          * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
343          * current task has TIF_MEMDIE set, and will be OOM killed on return
344          * to user; and ksmd, having no mm, would never be chosen for that.
345          *
346          * But if the mm is in a limited mem_cgroup, then the fault may fail
347          * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
348          * even ksmd can fail in this way - though it's usually breaking ksm
349          * just to undo a merge it made a moment before, so unlikely to oom.
350          *
351          * That's a pity: we might therefore have more kernel pages allocated
352          * than we're counting as nodes in the stable tree; but ksm_do_scan
353          * will retry to break_cow on each pass, so should recover the page
354          * in due course.  The important thing is to not let VM_MERGEABLE
355          * be cleared while any such pages might remain in the area.
356          */
357         return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
358 }
359
360 static void break_cow(struct rmap_item *rmap_item)
361 {
362         struct mm_struct *mm = rmap_item->mm;
363         unsigned long addr = rmap_item->address;
364         struct vm_area_struct *vma;
365
366         down_read(&mm->mmap_sem);
367         if (ksm_test_exit(mm))
368                 goto out;
369         vma = find_vma(mm, addr);
370         if (!vma || vma->vm_start > addr)
371                 goto out;
372         if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
373                 goto out;
374         break_ksm(vma, addr);
375 out:
376         up_read(&mm->mmap_sem);
377 }
378
379 static struct page *get_mergeable_page(struct rmap_item *rmap_item)
380 {
381         struct mm_struct *mm = rmap_item->mm;
382         unsigned long addr = rmap_item->address;
383         struct vm_area_struct *vma;
384         struct page *page;
385
386         down_read(&mm->mmap_sem);
387         if (ksm_test_exit(mm))
388                 goto out;
389         vma = find_vma(mm, addr);
390         if (!vma || vma->vm_start > addr)
391                 goto out;
392         if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
393                 goto out;
394
395         page = follow_page(vma, addr, FOLL_GET);
396         if (!page)
397                 goto out;
398         if (PageAnon(page)) {
399                 flush_anon_page(vma, page, addr);
400                 flush_dcache_page(page);
401         } else {
402                 put_page(page);
403 out:            page = NULL;
404         }
405         up_read(&mm->mmap_sem);
406         return page;
407 }
408
409 /*
410  * get_ksm_page: checks if the page at the virtual address in rmap_item
411  * is still PageKsm, in which case we can trust the content of the page,
412  * and it returns the gotten page; but NULL if the page has been zapped.
413  */
414 static struct page *get_ksm_page(struct rmap_item *rmap_item)
415 {
416         struct page *page;
417
418         page = get_mergeable_page(rmap_item);
419         if (page && !PageKsm(page)) {
420                 put_page(page);
421                 page = NULL;
422         }
423         return page;
424 }
425
426 /*
427  * Removing rmap_item from stable or unstable tree.
428  * This function will clean the information from the stable/unstable tree.
429  */
430 static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
431 {
432         if (in_stable_tree(rmap_item)) {
433                 struct rmap_item *next_item = rmap_item->next;
434
435                 if (rmap_item->address & NODE_FLAG) {
436                         if (next_item) {
437                                 rb_replace_node(&rmap_item->node,
438                                                 &next_item->node,
439                                                 &root_stable_tree);
440                                 next_item->address |= NODE_FLAG;
441                                 ksm_pages_sharing--;
442                         } else {
443                                 rb_erase(&rmap_item->node, &root_stable_tree);
444                                 ksm_pages_shared--;
445                         }
446                 } else {
447                         struct rmap_item *prev_item = rmap_item->prev;
448
449                         BUG_ON(prev_item->next != rmap_item);
450                         prev_item->next = next_item;
451                         if (next_item) {
452                                 BUG_ON(next_item->prev != rmap_item);
453                                 next_item->prev = rmap_item->prev;
454                         }
455                         ksm_pages_sharing--;
456                 }
457
458                 rmap_item->next = NULL;
459                 rmap_item->address &= PAGE_MASK;
460
461         } else if (rmap_item->address & NODE_FLAG) {
462                 unsigned char age;
463                 /*
464                  * Usually ksmd can and must skip the rb_erase, because
465                  * root_unstable_tree was already reset to RB_ROOT.
466                  * But be careful when an mm is exiting: do the rb_erase
467                  * if this rmap_item was inserted by this scan, rather
468                  * than left over from before.
469                  */
470                 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
471                 BUG_ON(age > 1);
472                 if (!age)
473                         rb_erase(&rmap_item->node, &root_unstable_tree);
474
475                 ksm_pages_unshared--;
476                 rmap_item->address &= PAGE_MASK;
477         }
478
479         cond_resched();         /* we're called from many long loops */
480 }
481
482 static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
483                                        struct rmap_item **rmap_list)
484 {
485         while (*rmap_list) {
486                 struct rmap_item *rmap_item = *rmap_list;
487                 *rmap_list = rmap_item->rmap_list;
488                 remove_rmap_item_from_tree(rmap_item);
489                 free_rmap_item(rmap_item);
490         }
491 }
492
493 /*
494  * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather
495  * than check every pte of a given vma, the locking doesn't quite work for
496  * that - an rmap_item is assigned to the stable tree after inserting ksm
497  * page and upping mmap_sem.  Nor does it fit with the way we skip dup'ing
498  * rmap_items from parent to child at fork time (so as not to waste time
499  * if exit comes before the next scan reaches it).
500  *
501  * Similarly, although we'd like to remove rmap_items (so updating counts
502  * and freeing memory) when unmerging an area, it's easier to leave that
503  * to the next pass of ksmd - consider, for example, how ksmd might be
504  * in cmp_and_merge_page on one of the rmap_items we would be removing.
505  */
506 static int unmerge_ksm_pages(struct vm_area_struct *vma,
507                              unsigned long start, unsigned long end)
508 {
509         unsigned long addr;
510         int err = 0;
511
512         for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
513                 if (ksm_test_exit(vma->vm_mm))
514                         break;
515                 if (signal_pending(current))
516                         err = -ERESTARTSYS;
517                 else
518                         err = break_ksm(vma, addr);
519         }
520         return err;
521 }
522
523 #ifdef CONFIG_SYSFS
524 /*
525  * Only called through the sysfs control interface:
526  */
527 static int unmerge_and_remove_all_rmap_items(void)
528 {
529         struct mm_slot *mm_slot;
530         struct mm_struct *mm;
531         struct vm_area_struct *vma;
532         int err = 0;
533
534         spin_lock(&ksm_mmlist_lock);
535         ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
536                                                 struct mm_slot, mm_list);
537         spin_unlock(&ksm_mmlist_lock);
538
539         for (mm_slot = ksm_scan.mm_slot;
540                         mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
541                 mm = mm_slot->mm;
542                 down_read(&mm->mmap_sem);
543                 for (vma = mm->mmap; vma; vma = vma->vm_next) {
544                         if (ksm_test_exit(mm))
545                                 break;
546                         if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
547                                 continue;
548                         err = unmerge_ksm_pages(vma,
549                                                 vma->vm_start, vma->vm_end);
550                         if (err)
551                                 goto error;
552                 }
553
554                 remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
555
556                 spin_lock(&ksm_mmlist_lock);
557                 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
558                                                 struct mm_slot, mm_list);
559                 if (ksm_test_exit(mm)) {
560                         hlist_del(&mm_slot->link);
561                         list_del(&mm_slot->mm_list);
562                         spin_unlock(&ksm_mmlist_lock);
563
564                         free_mm_slot(mm_slot);
565                         clear_bit(MMF_VM_MERGEABLE, &mm->flags);
566                         up_read(&mm->mmap_sem);
567                         mmdrop(mm);
568                 } else {
569                         spin_unlock(&ksm_mmlist_lock);
570                         up_read(&mm->mmap_sem);
571                 }
572         }
573
574         ksm_scan.seqnr = 0;
575         return 0;
576
577 error:
578         up_read(&mm->mmap_sem);
579         spin_lock(&ksm_mmlist_lock);
580         ksm_scan.mm_slot = &ksm_mm_head;
581         spin_unlock(&ksm_mmlist_lock);
582         return err;
583 }
584 #endif /* CONFIG_SYSFS */
585
586 static u32 calc_checksum(struct page *page)
587 {
588         u32 checksum;
589         void *addr = kmap_atomic(page, KM_USER0);
590         checksum = jhash2(addr, PAGE_SIZE / 4, 17);
591         kunmap_atomic(addr, KM_USER0);
592         return checksum;
593 }
594
595 static int memcmp_pages(struct page *page1, struct page *page2)
596 {
597         char *addr1, *addr2;
598         int ret;
599
600         addr1 = kmap_atomic(page1, KM_USER0);
601         addr2 = kmap_atomic(page2, KM_USER1);
602         ret = memcmp(addr1, addr2, PAGE_SIZE);
603         kunmap_atomic(addr2, KM_USER1);
604         kunmap_atomic(addr1, KM_USER0);
605         return ret;
606 }
607
608 static inline int pages_identical(struct page *page1, struct page *page2)
609 {
610         return !memcmp_pages(page1, page2);
611 }
612
613 static int write_protect_page(struct vm_area_struct *vma, struct page *page,
614                               pte_t *orig_pte)
615 {
616         struct mm_struct *mm = vma->vm_mm;
617         unsigned long addr;
618         pte_t *ptep;
619         spinlock_t *ptl;
620         int swapped;
621         int err = -EFAULT;
622
623         addr = page_address_in_vma(page, vma);
624         if (addr == -EFAULT)
625                 goto out;
626
627         ptep = page_check_address(page, mm, addr, &ptl, 0);
628         if (!ptep)
629                 goto out;
630
631         if (pte_write(*ptep)) {
632                 pte_t entry;
633
634                 swapped = PageSwapCache(page);
635                 flush_cache_page(vma, addr, page_to_pfn(page));
636                 /*
637                  * Ok this is tricky, when get_user_pages_fast() run it doesnt
638                  * take any lock, therefore the check that we are going to make
639                  * with the pagecount against the mapcount is racey and
640                  * O_DIRECT can happen right after the check.
641                  * So we clear the pte and flush the tlb before the check
642                  * this assure us that no O_DIRECT can happen after the check
643                  * or in the middle of the check.
644                  */
645                 entry = ptep_clear_flush(vma, addr, ptep);
646                 /*
647                  * Check that no O_DIRECT or similar I/O is in progress on the
648                  * page
649                  */
650                 if (page_mapcount(page) + 1 + swapped != page_count(page)) {
651                         set_pte_at_notify(mm, addr, ptep, entry);
652                         goto out_unlock;
653                 }
654                 entry = pte_wrprotect(entry);
655                 set_pte_at_notify(mm, addr, ptep, entry);
656         }
657         *orig_pte = *ptep;
658         err = 0;
659
660 out_unlock:
661         pte_unmap_unlock(ptep, ptl);
662 out:
663         return err;
664 }
665
666 /**
667  * replace_page - replace page in vma by new ksm page
668  * @vma:      vma that holds the pte pointing to page
669  * @page:     the page we are replacing by kpage
670  * @kpage:    the ksm page we replace page by
671  * @orig_pte: the original value of the pte
672  *
673  * Returns 0 on success, -EFAULT on failure.
674  */
675 static int replace_page(struct vm_area_struct *vma, struct page *page,
676                         struct page *kpage, pte_t orig_pte)
677 {
678         struct mm_struct *mm = vma->vm_mm;
679         pgd_t *pgd;
680         pud_t *pud;
681         pmd_t *pmd;
682         pte_t *ptep;
683         spinlock_t *ptl;
684         unsigned long addr;
685         int err = -EFAULT;
686
687         addr = page_address_in_vma(page, vma);
688         if (addr == -EFAULT)
689                 goto out;
690
691         pgd = pgd_offset(mm, addr);
692         if (!pgd_present(*pgd))
693                 goto out;
694
695         pud = pud_offset(pgd, addr);
696         if (!pud_present(*pud))
697                 goto out;
698
699         pmd = pmd_offset(pud, addr);
700         if (!pmd_present(*pmd))
701                 goto out;
702
703         ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
704         if (!pte_same(*ptep, orig_pte)) {
705                 pte_unmap_unlock(ptep, ptl);
706                 goto out;
707         }
708
709         get_page(kpage);
710         page_add_ksm_rmap(kpage);
711
712         flush_cache_page(vma, addr, pte_pfn(*ptep));
713         ptep_clear_flush(vma, addr, ptep);
714         set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
715
716         page_remove_rmap(page);
717         put_page(page);
718
719         pte_unmap_unlock(ptep, ptl);
720         err = 0;
721 out:
722         return err;
723 }
724
725 /*
726  * try_to_merge_one_page - take two pages and merge them into one
727  * @vma: the vma that holds the pte pointing to page
728  * @page: the PageAnon page that we want to replace with kpage
729  * @kpage: the PageKsm page (or newly allocated page which page_add_ksm_rmap
730  *         will make PageKsm) that we want to map instead of page
731  *
732  * This function returns 0 if the pages were merged, -EFAULT otherwise.
733  */
734 static int try_to_merge_one_page(struct vm_area_struct *vma,
735                                  struct page *page, struct page *kpage)
736 {
737         pte_t orig_pte = __pte(0);
738         int err = -EFAULT;
739
740         if (!(vma->vm_flags & VM_MERGEABLE))
741                 goto out;
742         if (!PageAnon(page))
743                 goto out;
744
745         /*
746          * We need the page lock to read a stable PageSwapCache in
747          * write_protect_page().  We use trylock_page() instead of
748          * lock_page() because we don't want to wait here - we
749          * prefer to continue scanning and merging different pages,
750          * then come back to this page when it is unlocked.
751          */
752         if (!trylock_page(page))
753                 goto out;
754         /*
755          * If this anonymous page is mapped only here, its pte may need
756          * to be write-protected.  If it's mapped elsewhere, all of its
757          * ptes are necessarily already write-protected.  But in either
758          * case, we need to lock and check page_count is not raised.
759          */
760         if (write_protect_page(vma, page, &orig_pte) == 0 &&
761             pages_identical(page, kpage))
762                 err = replace_page(vma, page, kpage, orig_pte);
763
764         unlock_page(page);
765 out:
766         return err;
767 }
768
769 /*
770  * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
771  * but no new kernel page is allocated: kpage must already be a ksm page.
772  *
773  * This function returns 0 if the pages were merged, -EFAULT otherwise.
774  */
775 static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
776                                       struct page *page, struct page *kpage)
777 {
778         struct mm_struct *mm = rmap_item->mm;
779         struct vm_area_struct *vma;
780         int err = -EFAULT;
781
782         down_read(&mm->mmap_sem);
783         if (ksm_test_exit(mm))
784                 goto out;
785         vma = find_vma(mm, rmap_item->address);
786         if (!vma || vma->vm_start > rmap_item->address)
787                 goto out;
788
789         err = try_to_merge_one_page(vma, page, kpage);
790 out:
791         up_read(&mm->mmap_sem);
792         return err;
793 }
794
795 /*
796  * try_to_merge_two_pages - take two identical pages and prepare them
797  * to be merged into one page.
798  *
799  * This function returns the kpage if we successfully merged two identical
800  * pages into one ksm page, NULL otherwise.
801  *
802  * Note that this function allocates a new kernel page: if one of the pages
803  * is already a ksm page, try_to_merge_with_ksm_page should be used.
804  */
805 static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
806                                            struct page *page,
807                                            struct rmap_item *tree_rmap_item,
808                                            struct page *tree_page)
809 {
810         struct mm_struct *mm = rmap_item->mm;
811         struct vm_area_struct *vma;
812         struct page *kpage;
813         int err = -EFAULT;
814
815         /*
816          * The number of nodes in the stable tree
817          * is the number of kernel pages that we hold.
818          */
819         if (ksm_max_kernel_pages &&
820             ksm_max_kernel_pages <= ksm_pages_shared)
821                 return NULL;
822
823         kpage = alloc_page(GFP_HIGHUSER);
824         if (!kpage)
825                 return NULL;
826
827         down_read(&mm->mmap_sem);
828         if (ksm_test_exit(mm))
829                 goto up;
830         vma = find_vma(mm, rmap_item->address);
831         if (!vma || vma->vm_start > rmap_item->address)
832                 goto up;
833
834         copy_user_highpage(kpage, page, rmap_item->address, vma);
835         err = try_to_merge_one_page(vma, page, kpage);
836 up:
837         up_read(&mm->mmap_sem);
838
839         if (!err) {
840                 err = try_to_merge_with_ksm_page(tree_rmap_item,
841                                                         tree_page, kpage);
842                 /*
843                  * If that fails, we have a ksm page with only one pte
844                  * pointing to it: so break it.
845                  */
846                 if (err)
847                         break_cow(rmap_item);
848         }
849         if (err) {
850                 put_page(kpage);
851                 kpage = NULL;
852         }
853         return kpage;
854 }
855
856 /*
857  * stable_tree_search - search for page inside the stable tree
858  *
859  * This function checks if there is a page inside the stable tree
860  * with identical content to the page that we are scanning right now.
861  *
862  * This function return rmap_item pointer to the identical item if found,
863  * NULL otherwise.
864  */
865 static struct rmap_item *stable_tree_search(struct page *page,
866                                             struct page **tree_pagep)
867 {
868         struct rb_node *node = root_stable_tree.rb_node;
869
870         while (node) {
871                 struct rmap_item *tree_rmap_item, *next_rmap_item;
872                 struct page *tree_page;
873                 int ret;
874
875                 tree_rmap_item = rb_entry(node, struct rmap_item, node);
876                 while (tree_rmap_item) {
877                         BUG_ON(!in_stable_tree(tree_rmap_item));
878                         cond_resched();
879                         tree_page = get_ksm_page(tree_rmap_item);
880                         if (tree_page)
881                                 break;
882                         next_rmap_item = tree_rmap_item->next;
883                         remove_rmap_item_from_tree(tree_rmap_item);
884                         tree_rmap_item = next_rmap_item;
885                 }
886                 if (!tree_rmap_item)
887                         return NULL;
888
889                 ret = memcmp_pages(page, tree_page);
890
891                 if (ret < 0) {
892                         put_page(tree_page);
893                         node = node->rb_left;
894                 } else if (ret > 0) {
895                         put_page(tree_page);
896                         node = node->rb_right;
897                 } else {
898                         *tree_pagep = tree_page;
899                         return tree_rmap_item;
900                 }
901         }
902
903         return NULL;
904 }
905
906 /*
907  * stable_tree_insert - insert rmap_item pointing to new ksm page
908  * into the stable tree.
909  *
910  * This function returns rmap_item if success, NULL otherwise.
911  */
912 static struct rmap_item *stable_tree_insert(struct page *kpage,
913                                             struct rmap_item *rmap_item)
914 {
915         struct rb_node **new = &root_stable_tree.rb_node;
916         struct rb_node *parent = NULL;
917
918         while (*new) {
919                 struct rmap_item *tree_rmap_item, *next_rmap_item;
920                 struct page *tree_page;
921                 int ret;
922
923                 tree_rmap_item = rb_entry(*new, struct rmap_item, node);
924                 while (tree_rmap_item) {
925                         BUG_ON(!in_stable_tree(tree_rmap_item));
926                         cond_resched();
927                         tree_page = get_ksm_page(tree_rmap_item);
928                         if (tree_page)
929                                 break;
930                         next_rmap_item = tree_rmap_item->next;
931                         remove_rmap_item_from_tree(tree_rmap_item);
932                         tree_rmap_item = next_rmap_item;
933                 }
934                 if (!tree_rmap_item)
935                         return NULL;
936
937                 ret = memcmp_pages(kpage, tree_page);
938                 put_page(tree_page);
939
940                 parent = *new;
941                 if (ret < 0)
942                         new = &parent->rb_left;
943                 else if (ret > 0)
944                         new = &parent->rb_right;
945                 else {
946                         /*
947                          * It is not a bug that stable_tree_search() didn't
948                          * find this node: because at that time our page was
949                          * not yet write-protected, so may have changed since.
950                          */
951                         return NULL;
952                 }
953         }
954
955         rmap_item->address |= NODE_FLAG | STABLE_FLAG;
956         rmap_item->next = NULL;
957         rb_link_node(&rmap_item->node, parent, new);
958         rb_insert_color(&rmap_item->node, &root_stable_tree);
959
960         ksm_pages_shared++;
961         return rmap_item;
962 }
963
964 /*
965  * unstable_tree_search_insert - search for identical page,
966  * else insert rmap_item into the unstable tree.
967  *
968  * This function searches for a page in the unstable tree identical to the
969  * page currently being scanned; and if no identical page is found in the
970  * tree, we insert rmap_item as a new object into the unstable tree.
971  *
972  * This function returns pointer to rmap_item found to be identical
973  * to the currently scanned page, NULL otherwise.
974  *
975  * This function does both searching and inserting, because they share
976  * the same walking algorithm in an rbtree.
977  */
978 static
979 struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
980                                               struct page *page,
981                                               struct page **tree_pagep)
982
983 {
984         struct rb_node **new = &root_unstable_tree.rb_node;
985         struct rb_node *parent = NULL;
986
987         while (*new) {
988                 struct rmap_item *tree_rmap_item;
989                 struct page *tree_page;
990                 int ret;
991
992                 cond_resched();
993                 tree_rmap_item = rb_entry(*new, struct rmap_item, node);
994                 tree_page = get_mergeable_page(tree_rmap_item);
995                 if (!tree_page)
996                         return NULL;
997
998                 /*
999                  * Don't substitute a ksm page for a forked page.
1000                  */
1001                 if (page == tree_page) {
1002                         put_page(tree_page);
1003                         return NULL;
1004                 }
1005
1006                 ret = memcmp_pages(page, tree_page);
1007
1008                 parent = *new;
1009                 if (ret < 0) {
1010                         put_page(tree_page);
1011                         new = &parent->rb_left;
1012                 } else if (ret > 0) {
1013                         put_page(tree_page);
1014                         new = &parent->rb_right;
1015                 } else {
1016                         *tree_pagep = tree_page;
1017                         return tree_rmap_item;
1018                 }
1019         }
1020
1021         rmap_item->address |= NODE_FLAG;
1022         rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
1023         rb_link_node(&rmap_item->node, parent, new);
1024         rb_insert_color(&rmap_item->node, &root_unstable_tree);
1025
1026         ksm_pages_unshared++;
1027         return NULL;
1028 }
1029
1030 /*
1031  * stable_tree_append - add another rmap_item to the linked list of
1032  * rmap_items hanging off a given node of the stable tree, all sharing
1033  * the same ksm page.
1034  */
1035 static void stable_tree_append(struct rmap_item *rmap_item,
1036                                struct rmap_item *tree_rmap_item)
1037 {
1038         rmap_item->next = tree_rmap_item->next;
1039         rmap_item->prev = tree_rmap_item;
1040
1041         if (tree_rmap_item->next)
1042                 tree_rmap_item->next->prev = rmap_item;
1043
1044         tree_rmap_item->next = rmap_item;
1045         rmap_item->address |= STABLE_FLAG;
1046
1047         ksm_pages_sharing++;
1048 }
1049
1050 /*
1051  * cmp_and_merge_page - first see if page can be merged into the stable tree;
1052  * if not, compare checksum to previous and if it's the same, see if page can
1053  * be inserted into the unstable tree, or merged with a page already there and
1054  * both transferred to the stable tree.
1055  *
1056  * @page: the page that we are searching identical page to.
1057  * @rmap_item: the reverse mapping into the virtual address of this page
1058  */
1059 static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1060 {
1061         struct rmap_item *tree_rmap_item;
1062         struct page *tree_page = NULL;
1063         struct page *kpage;
1064         unsigned int checksum;
1065         int err;
1066
1067         remove_rmap_item_from_tree(rmap_item);
1068
1069         /* We first start with searching the page inside the stable tree */
1070         tree_rmap_item = stable_tree_search(page, &tree_page);
1071         if (tree_rmap_item) {
1072                 kpage = tree_page;
1073                 if (page == kpage)                      /* forked */
1074                         err = 0;
1075                 else
1076                         err = try_to_merge_with_ksm_page(rmap_item,
1077                                                          page, kpage);
1078                 if (!err) {
1079                         /*
1080                          * The page was successfully merged:
1081                          * add its rmap_item to the stable tree.
1082                          */
1083                         stable_tree_append(rmap_item, tree_rmap_item);
1084                 }
1085                 put_page(kpage);
1086                 return;
1087         }
1088
1089         /*
1090          * A ksm page might have got here by fork, but its other
1091          * references have already been removed from the stable tree.
1092          * Or it might be left over from a break_ksm which failed
1093          * when the mem_cgroup had reached its limit: try again now.
1094          */
1095         if (PageKsm(page))
1096                 break_cow(rmap_item);
1097
1098         /*
1099          * In case the hash value of the page was changed from the last time we
1100          * have calculated it, this page to be changed frequely, therefore we
1101          * don't want to insert it to the unstable tree, and we don't want to
1102          * waste our time to search if there is something identical to it there.
1103          */
1104         checksum = calc_checksum(page);
1105         if (rmap_item->oldchecksum != checksum) {
1106                 rmap_item->oldchecksum = checksum;
1107                 return;
1108         }
1109
1110         tree_rmap_item =
1111                 unstable_tree_search_insert(rmap_item, page, &tree_page);
1112         if (tree_rmap_item) {
1113                 kpage = try_to_merge_two_pages(rmap_item, page,
1114                                                 tree_rmap_item, tree_page);
1115                 put_page(tree_page);
1116                 /*
1117                  * As soon as we merge this page, we want to remove the
1118                  * rmap_item of the page we have merged with from the unstable
1119                  * tree, and insert it instead as new node in the stable tree.
1120                  */
1121                 if (kpage) {
1122                         remove_rmap_item_from_tree(tree_rmap_item);
1123
1124                         /*
1125                          * If we fail to insert the page into the stable tree,
1126                          * we will have 2 virtual addresses that are pointing
1127                          * to a ksm page left outside the stable tree,
1128                          * in which case we need to break_cow on both.
1129                          */
1130                         if (stable_tree_insert(kpage, tree_rmap_item))
1131                                 stable_tree_append(rmap_item, tree_rmap_item);
1132                         else {
1133                                 break_cow(tree_rmap_item);
1134                                 break_cow(rmap_item);
1135                         }
1136                         put_page(kpage);
1137                 }
1138         }
1139 }
1140
1141 static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
1142                                             struct rmap_item **rmap_list,
1143                                             unsigned long addr)
1144 {
1145         struct rmap_item *rmap_item;
1146
1147         while (*rmap_list) {
1148                 rmap_item = *rmap_list;
1149                 if ((rmap_item->address & PAGE_MASK) == addr)
1150                         return rmap_item;
1151                 if (rmap_item->address > addr)
1152                         break;
1153                 *rmap_list = rmap_item->rmap_list;
1154                 remove_rmap_item_from_tree(rmap_item);
1155                 free_rmap_item(rmap_item);
1156         }
1157
1158         rmap_item = alloc_rmap_item();
1159         if (rmap_item) {
1160                 /* It has already been zeroed */
1161                 rmap_item->mm = mm_slot->mm;
1162                 rmap_item->address = addr;
1163                 rmap_item->rmap_list = *rmap_list;
1164                 *rmap_list = rmap_item;
1165         }
1166         return rmap_item;
1167 }
1168
1169 static struct rmap_item *scan_get_next_rmap_item(struct page **page)
1170 {
1171         struct mm_struct *mm;
1172         struct mm_slot *slot;
1173         struct vm_area_struct *vma;
1174         struct rmap_item *rmap_item;
1175
1176         if (list_empty(&ksm_mm_head.mm_list))
1177                 return NULL;
1178
1179         slot = ksm_scan.mm_slot;
1180         if (slot == &ksm_mm_head) {
1181                 root_unstable_tree = RB_ROOT;
1182
1183                 spin_lock(&ksm_mmlist_lock);
1184                 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
1185                 ksm_scan.mm_slot = slot;
1186                 spin_unlock(&ksm_mmlist_lock);
1187 next_mm:
1188                 ksm_scan.address = 0;
1189                 ksm_scan.rmap_list = &slot->rmap_list;
1190         }
1191
1192         mm = slot->mm;
1193         down_read(&mm->mmap_sem);
1194         if (ksm_test_exit(mm))
1195                 vma = NULL;
1196         else
1197                 vma = find_vma(mm, ksm_scan.address);
1198
1199         for (; vma; vma = vma->vm_next) {
1200                 if (!(vma->vm_flags & VM_MERGEABLE))
1201                         continue;
1202                 if (ksm_scan.address < vma->vm_start)
1203                         ksm_scan.address = vma->vm_start;
1204                 if (!vma->anon_vma)
1205                         ksm_scan.address = vma->vm_end;
1206
1207                 while (ksm_scan.address < vma->vm_end) {
1208                         if (ksm_test_exit(mm))
1209                                 break;
1210                         *page = follow_page(vma, ksm_scan.address, FOLL_GET);
1211                         if (*page && PageAnon(*page)) {
1212                                 flush_anon_page(vma, *page, ksm_scan.address);
1213                                 flush_dcache_page(*page);
1214                                 rmap_item = get_next_rmap_item(slot,
1215                                         ksm_scan.rmap_list, ksm_scan.address);
1216                                 if (rmap_item) {
1217                                         ksm_scan.rmap_list =
1218                                                         &rmap_item->rmap_list;
1219                                         ksm_scan.address += PAGE_SIZE;
1220                                 } else
1221                                         put_page(*page);
1222                                 up_read(&mm->mmap_sem);
1223                                 return rmap_item;
1224                         }
1225                         if (*page)
1226                                 put_page(*page);
1227                         ksm_scan.address += PAGE_SIZE;
1228                         cond_resched();
1229                 }
1230         }
1231
1232         if (ksm_test_exit(mm)) {
1233                 ksm_scan.address = 0;
1234                 ksm_scan.rmap_list = &slot->rmap_list;
1235         }
1236         /*
1237          * Nuke all the rmap_items that are above this current rmap:
1238          * because there were no VM_MERGEABLE vmas with such addresses.
1239          */
1240         remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
1241
1242         spin_lock(&ksm_mmlist_lock);
1243         ksm_scan.mm_slot = list_entry(slot->mm_list.next,
1244                                                 struct mm_slot, mm_list);
1245         if (ksm_scan.address == 0) {
1246                 /*
1247                  * We've completed a full scan of all vmas, holding mmap_sem
1248                  * throughout, and found no VM_MERGEABLE: so do the same as
1249                  * __ksm_exit does to remove this mm from all our lists now.
1250                  * This applies either when cleaning up after __ksm_exit
1251                  * (but beware: we can reach here even before __ksm_exit),
1252                  * or when all VM_MERGEABLE areas have been unmapped (and
1253                  * mmap_sem then protects against race with MADV_MERGEABLE).
1254                  */
1255                 hlist_del(&slot->link);
1256                 list_del(&slot->mm_list);
1257                 spin_unlock(&ksm_mmlist_lock);
1258
1259                 free_mm_slot(slot);
1260                 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1261                 up_read(&mm->mmap_sem);
1262                 mmdrop(mm);
1263         } else {
1264                 spin_unlock(&ksm_mmlist_lock);
1265                 up_read(&mm->mmap_sem);
1266         }
1267
1268         /* Repeat until we've completed scanning the whole list */
1269         slot = ksm_scan.mm_slot;
1270         if (slot != &ksm_mm_head)
1271                 goto next_mm;
1272
1273         ksm_scan.seqnr++;
1274         return NULL;
1275 }
1276
1277 /**
1278  * ksm_do_scan  - the ksm scanner main worker function.
1279  * @scan_npages - number of pages we want to scan before we return.
1280  */
1281 static void ksm_do_scan(unsigned int scan_npages)
1282 {
1283         struct rmap_item *rmap_item;
1284         struct page *page;
1285
1286         while (scan_npages--) {
1287                 cond_resched();
1288                 rmap_item = scan_get_next_rmap_item(&page);
1289                 if (!rmap_item)
1290                         return;
1291                 if (!PageKsm(page) || !in_stable_tree(rmap_item))
1292                         cmp_and_merge_page(page, rmap_item);
1293                 else if (page_mapcount(page) == 1) {
1294                         /*
1295                          * Replace now-unshared ksm page by ordinary page.
1296                          */
1297                         break_cow(rmap_item);
1298                         remove_rmap_item_from_tree(rmap_item);
1299                         rmap_item->oldchecksum = calc_checksum(page);
1300                 }
1301                 put_page(page);
1302         }
1303 }
1304
1305 static int ksmd_should_run(void)
1306 {
1307         return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
1308 }
1309
1310 static int ksm_scan_thread(void *nothing)
1311 {
1312         set_user_nice(current, 5);
1313
1314         while (!kthread_should_stop()) {
1315                 mutex_lock(&ksm_thread_mutex);
1316                 if (ksmd_should_run())
1317                         ksm_do_scan(ksm_thread_pages_to_scan);
1318                 mutex_unlock(&ksm_thread_mutex);
1319
1320                 if (ksmd_should_run()) {
1321                         schedule_timeout_interruptible(
1322                                 msecs_to_jiffies(ksm_thread_sleep_millisecs));
1323                 } else {
1324                         wait_event_interruptible(ksm_thread_wait,
1325                                 ksmd_should_run() || kthread_should_stop());
1326                 }
1327         }
1328         return 0;
1329 }
1330
1331 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
1332                 unsigned long end, int advice, unsigned long *vm_flags)
1333 {
1334         struct mm_struct *mm = vma->vm_mm;
1335         int err;
1336
1337         switch (advice) {
1338         case MADV_MERGEABLE:
1339                 /*
1340                  * Be somewhat over-protective for now!
1341                  */
1342                 if (*vm_flags & (VM_MERGEABLE | VM_SHARED  | VM_MAYSHARE   |
1343                                  VM_PFNMAP    | VM_IO      | VM_DONTEXPAND |
1344                                  VM_RESERVED  | VM_HUGETLB | VM_INSERTPAGE |
1345                                  VM_MIXEDMAP  | VM_SAO))
1346                         return 0;               /* just ignore the advice */
1347
1348                 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
1349                         err = __ksm_enter(mm);
1350                         if (err)
1351                                 return err;
1352                 }
1353
1354                 *vm_flags |= VM_MERGEABLE;
1355                 break;
1356
1357         case MADV_UNMERGEABLE:
1358                 if (!(*vm_flags & VM_MERGEABLE))
1359                         return 0;               /* just ignore the advice */
1360
1361                 if (vma->anon_vma) {
1362                         err = unmerge_ksm_pages(vma, start, end);
1363                         if (err)
1364                                 return err;
1365                 }
1366
1367                 *vm_flags &= ~VM_MERGEABLE;
1368                 break;
1369         }
1370
1371         return 0;
1372 }
1373
1374 int __ksm_enter(struct mm_struct *mm)
1375 {
1376         struct mm_slot *mm_slot;
1377         int needs_wakeup;
1378
1379         mm_slot = alloc_mm_slot();
1380         if (!mm_slot)
1381                 return -ENOMEM;
1382
1383         /* Check ksm_run too?  Would need tighter locking */
1384         needs_wakeup = list_empty(&ksm_mm_head.mm_list);
1385
1386         spin_lock(&ksm_mmlist_lock);
1387         insert_to_mm_slots_hash(mm, mm_slot);
1388         /*
1389          * Insert just behind the scanning cursor, to let the area settle
1390          * down a little; when fork is followed by immediate exec, we don't
1391          * want ksmd to waste time setting up and tearing down an rmap_list.
1392          */
1393         list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list);
1394         spin_unlock(&ksm_mmlist_lock);
1395
1396         set_bit(MMF_VM_MERGEABLE, &mm->flags);
1397         atomic_inc(&mm->mm_count);
1398
1399         if (needs_wakeup)
1400                 wake_up_interruptible(&ksm_thread_wait);
1401
1402         return 0;
1403 }
1404
1405 void __ksm_exit(struct mm_struct *mm)
1406 {
1407         struct mm_slot *mm_slot;
1408         int easy_to_free = 0;
1409
1410         /*
1411          * This process is exiting: if it's straightforward (as is the
1412          * case when ksmd was never running), free mm_slot immediately.
1413          * But if it's at the cursor or has rmap_items linked to it, use
1414          * mmap_sem to synchronize with any break_cows before pagetables
1415          * are freed, and leave the mm_slot on the list for ksmd to free.
1416          * Beware: ksm may already have noticed it exiting and freed the slot.
1417          */
1418
1419         spin_lock(&ksm_mmlist_lock);
1420         mm_slot = get_mm_slot(mm);
1421         if (mm_slot && ksm_scan.mm_slot != mm_slot) {
1422                 if (!mm_slot->rmap_list) {
1423                         hlist_del(&mm_slot->link);
1424                         list_del(&mm_slot->mm_list);
1425                         easy_to_free = 1;
1426                 } else {
1427                         list_move(&mm_slot->mm_list,
1428                                   &ksm_scan.mm_slot->mm_list);
1429                 }
1430         }
1431         spin_unlock(&ksm_mmlist_lock);
1432
1433         if (easy_to_free) {
1434                 free_mm_slot(mm_slot);
1435                 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1436                 mmdrop(mm);
1437         } else if (mm_slot) {
1438                 down_write(&mm->mmap_sem);
1439                 up_write(&mm->mmap_sem);
1440         }
1441 }
1442
1443 #ifdef CONFIG_SYSFS
1444 /*
1445  * This all compiles without CONFIG_SYSFS, but is a waste of space.
1446  */
1447
1448 #define KSM_ATTR_RO(_name) \
1449         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1450 #define KSM_ATTR(_name) \
1451         static struct kobj_attribute _name##_attr = \
1452                 __ATTR(_name, 0644, _name##_show, _name##_store)
1453
1454 static ssize_t sleep_millisecs_show(struct kobject *kobj,
1455                                     struct kobj_attribute *attr, char *buf)
1456 {
1457         return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs);
1458 }
1459
1460 static ssize_t sleep_millisecs_store(struct kobject *kobj,
1461                                      struct kobj_attribute *attr,
1462                                      const char *buf, size_t count)
1463 {
1464         unsigned long msecs;
1465         int err;
1466
1467         err = strict_strtoul(buf, 10, &msecs);
1468         if (err || msecs > UINT_MAX)
1469                 return -EINVAL;
1470
1471         ksm_thread_sleep_millisecs = msecs;
1472
1473         return count;
1474 }
1475 KSM_ATTR(sleep_millisecs);
1476
1477 static ssize_t pages_to_scan_show(struct kobject *kobj,
1478                                   struct kobj_attribute *attr, char *buf)
1479 {
1480         return sprintf(buf, "%u\n", ksm_thread_pages_to_scan);
1481 }
1482
1483 static ssize_t pages_to_scan_store(struct kobject *kobj,
1484                                    struct kobj_attribute *attr,
1485                                    const char *buf, size_t count)
1486 {
1487         int err;
1488         unsigned long nr_pages;
1489
1490         err = strict_strtoul(buf, 10, &nr_pages);
1491         if (err || nr_pages > UINT_MAX)
1492                 return -EINVAL;
1493
1494         ksm_thread_pages_to_scan = nr_pages;
1495
1496         return count;
1497 }
1498 KSM_ATTR(pages_to_scan);
1499
1500 static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
1501                         char *buf)
1502 {
1503         return sprintf(buf, "%u\n", ksm_run);
1504 }
1505
1506 static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
1507                          const char *buf, size_t count)
1508 {
1509         int err;
1510         unsigned long flags;
1511
1512         err = strict_strtoul(buf, 10, &flags);
1513         if (err || flags > UINT_MAX)
1514                 return -EINVAL;
1515         if (flags > KSM_RUN_UNMERGE)
1516                 return -EINVAL;
1517
1518         /*
1519          * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
1520          * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
1521          * breaking COW to free the unswappable pages_shared (but leaves
1522          * mm_slots on the list for when ksmd may be set running again).
1523          */
1524
1525         mutex_lock(&ksm_thread_mutex);
1526         if (ksm_run != flags) {
1527                 ksm_run = flags;
1528                 if (flags & KSM_RUN_UNMERGE) {
1529                         current->flags |= PF_OOM_ORIGIN;
1530                         err = unmerge_and_remove_all_rmap_items();
1531                         current->flags &= ~PF_OOM_ORIGIN;
1532                         if (err) {
1533                                 ksm_run = KSM_RUN_STOP;
1534                                 count = err;
1535                         }
1536                 }
1537         }
1538         mutex_unlock(&ksm_thread_mutex);
1539
1540         if (flags & KSM_RUN_MERGE)
1541                 wake_up_interruptible(&ksm_thread_wait);
1542
1543         return count;
1544 }
1545 KSM_ATTR(run);
1546
1547 static ssize_t max_kernel_pages_store(struct kobject *kobj,
1548                                       struct kobj_attribute *attr,
1549                                       const char *buf, size_t count)
1550 {
1551         int err;
1552         unsigned long nr_pages;
1553
1554         err = strict_strtoul(buf, 10, &nr_pages);
1555         if (err)
1556                 return -EINVAL;
1557
1558         ksm_max_kernel_pages = nr_pages;
1559
1560         return count;
1561 }
1562
1563 static ssize_t max_kernel_pages_show(struct kobject *kobj,
1564                                      struct kobj_attribute *attr, char *buf)
1565 {
1566         return sprintf(buf, "%lu\n", ksm_max_kernel_pages);
1567 }
1568 KSM_ATTR(max_kernel_pages);
1569
1570 static ssize_t pages_shared_show(struct kobject *kobj,
1571                                  struct kobj_attribute *attr, char *buf)
1572 {
1573         return sprintf(buf, "%lu\n", ksm_pages_shared);
1574 }
1575 KSM_ATTR_RO(pages_shared);
1576
1577 static ssize_t pages_sharing_show(struct kobject *kobj,
1578                                   struct kobj_attribute *attr, char *buf)
1579 {
1580         return sprintf(buf, "%lu\n", ksm_pages_sharing);
1581 }
1582 KSM_ATTR_RO(pages_sharing);
1583
1584 static ssize_t pages_unshared_show(struct kobject *kobj,
1585                                    struct kobj_attribute *attr, char *buf)
1586 {
1587         return sprintf(buf, "%lu\n", ksm_pages_unshared);
1588 }
1589 KSM_ATTR_RO(pages_unshared);
1590
1591 static ssize_t pages_volatile_show(struct kobject *kobj,
1592                                    struct kobj_attribute *attr, char *buf)
1593 {
1594         long ksm_pages_volatile;
1595
1596         ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
1597                                 - ksm_pages_sharing - ksm_pages_unshared;
1598         /*
1599          * It was not worth any locking to calculate that statistic,
1600          * but it might therefore sometimes be negative: conceal that.
1601          */
1602         if (ksm_pages_volatile < 0)
1603                 ksm_pages_volatile = 0;
1604         return sprintf(buf, "%ld\n", ksm_pages_volatile);
1605 }
1606 KSM_ATTR_RO(pages_volatile);
1607
1608 static ssize_t full_scans_show(struct kobject *kobj,
1609                                struct kobj_attribute *attr, char *buf)
1610 {
1611         return sprintf(buf, "%lu\n", ksm_scan.seqnr);
1612 }
1613 KSM_ATTR_RO(full_scans);
1614
1615 static struct attribute *ksm_attrs[] = {
1616         &sleep_millisecs_attr.attr,
1617         &pages_to_scan_attr.attr,
1618         &run_attr.attr,
1619         &max_kernel_pages_attr.attr,
1620         &pages_shared_attr.attr,
1621         &pages_sharing_attr.attr,
1622         &pages_unshared_attr.attr,
1623         &pages_volatile_attr.attr,
1624         &full_scans_attr.attr,
1625         NULL,
1626 };
1627
1628 static struct attribute_group ksm_attr_group = {
1629         .attrs = ksm_attrs,
1630         .name = "ksm",
1631 };
1632 #endif /* CONFIG_SYSFS */
1633
1634 static int __init ksm_init(void)
1635 {
1636         struct task_struct *ksm_thread;
1637         int err;
1638
1639         ksm_max_kernel_pages = totalram_pages / 4;
1640
1641         err = ksm_slab_init();
1642         if (err)
1643                 goto out;
1644
1645         err = mm_slots_hash_init();
1646         if (err)
1647                 goto out_free1;
1648
1649         ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
1650         if (IS_ERR(ksm_thread)) {
1651                 printk(KERN_ERR "ksm: creating kthread failed\n");
1652                 err = PTR_ERR(ksm_thread);
1653                 goto out_free2;
1654         }
1655
1656 #ifdef CONFIG_SYSFS
1657         err = sysfs_create_group(mm_kobj, &ksm_attr_group);
1658         if (err) {
1659                 printk(KERN_ERR "ksm: register sysfs failed\n");
1660                 kthread_stop(ksm_thread);
1661                 goto out_free2;
1662         }
1663 #else
1664         ksm_run = KSM_RUN_MERGE;        /* no way for user to start it */
1665
1666 #endif /* CONFIG_SYSFS */
1667
1668         return 0;
1669
1670 out_free2:
1671         mm_slots_hash_free();
1672 out_free1:
1673         ksm_slab_free();
1674 out:
1675         return err;
1676 }
1677 module_init(ksm_init)