mm: migration: share the anon_vma ref counts between KSM and page migration
authorMel Gorman <mel@csn.ul.ie>
Mon, 24 May 2010 21:32:18 +0000 (14:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 25 May 2010 15:06:58 +0000 (08:06 -0700)
For clarity of review, KSM and page migration have separate refcounts on
the anon_vma.  While clear, this is a waste of memory.  This patch gets
KSM and page migration to share their toys in a spirit of harmony.

Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/rmap.h
mm/ksm.c
mm/migrate.c
mm/rmap.c

index 567d43f..7721674 100644 (file)
  */
 struct anon_vma {
        spinlock_t lock;        /* Serialize access to vma list */
-#ifdef CONFIG_KSM
-       atomic_t ksm_refcount;
-#endif
-#ifdef CONFIG_MIGRATION
-       atomic_t migrate_refcount;
+#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
+
+       /*
+        * The external_refcount is taken by either KSM or page migration
+        * to take a reference to an anon_vma when there is no
+        * guarantee that the vma of page tables will exist for
+        * the duration of the operation. A caller that takes
+        * the reference is responsible for clearing up the
+        * anon_vma if they are the last user on release
+        */
+       atomic_t external_refcount;
 #endif
        /*
         * NOTE: the LSB of the head.next is set by
@@ -64,46 +70,26 @@ struct anon_vma_chain {
 };
 
 #ifdef CONFIG_MMU
-#ifdef CONFIG_KSM
-static inline void ksm_refcount_init(struct anon_vma *anon_vma)
+#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
+static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
 {
-       atomic_set(&anon_vma->ksm_refcount, 0);
+       atomic_set(&anon_vma->external_refcount, 0);
 }
 
-static inline int ksm_refcount(struct anon_vma *anon_vma)
+static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
 {
-       return atomic_read(&anon_vma->ksm_refcount);
+       return atomic_read(&anon_vma->external_refcount);
 }
 #else
-static inline void ksm_refcount_init(struct anon_vma *anon_vma)
+static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
 {
 }
 
-static inline int ksm_refcount(struct anon_vma *anon_vma)
+static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
 {
        return 0;
 }
 #endif /* CONFIG_KSM */
-#ifdef CONFIG_MIGRATION
-static inline void migrate_refcount_init(struct anon_vma *anon_vma)
-{
-       atomic_set(&anon_vma->migrate_refcount, 0);
-}
-
-static inline int migrate_refcount(struct anon_vma *anon_vma)
-{
-       return atomic_read(&anon_vma->migrate_refcount);
-}
-#else
-static inline void migrate_refcount_init(struct anon_vma *anon_vma)
-{
-}
-
-static inline int migrate_refcount(struct anon_vma *anon_vma)
-{
-       return 0;
-}
-#endif /* CONFIG_MIGRATE */
 
 static inline struct anon_vma *page_anon_vma(struct page *page)
 {
index 956880f..6c3e99b 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -318,14 +318,14 @@ static void hold_anon_vma(struct rmap_item *rmap_item,
                          struct anon_vma *anon_vma)
 {
        rmap_item->anon_vma = anon_vma;
-       atomic_inc(&anon_vma->ksm_refcount);
+       atomic_inc(&anon_vma->external_refcount);
 }
 
 static void drop_anon_vma(struct rmap_item *rmap_item)
 {
        struct anon_vma *anon_vma = rmap_item->anon_vma;
 
-       if (atomic_dec_and_lock(&anon_vma->ksm_refcount, &anon_vma->lock)) {
+       if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->lock)) {
                int empty = list_empty(&anon_vma->head);
                spin_unlock(&anon_vma->lock);
                if (empty)
index b768a1d..42a3d24 100644 (file)
@@ -601,7 +601,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
                rcu_read_lock();
                rcu_locked = 1;
                anon_vma = page_anon_vma(page);
-               atomic_inc(&anon_vma->migrate_refcount);
+               atomic_inc(&anon_vma->external_refcount);
        }
 
        /*
@@ -643,7 +643,7 @@ skip_unmap:
 rcu_unlock:
 
        /* Drop an anon_vma reference if we took one */
-       if (anon_vma && atomic_dec_and_lock(&anon_vma->migrate_refcount, &anon_vma->lock)) {
+       if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->lock)) {
                int empty = list_empty(&anon_vma->head);
                spin_unlock(&anon_vma->lock);
                if (empty)
index f522cb0..b5c320f 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -250,8 +250,7 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
        list_del(&anon_vma_chain->same_anon_vma);
 
        /* We must garbage collect the anon_vma if it's empty */
-       empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma) &&
-                                       !migrate_refcount(anon_vma);
+       empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma);
        spin_unlock(&anon_vma->lock);
 
        if (empty)
@@ -275,8 +274,7 @@ static void anon_vma_ctor(void *data)
        struct anon_vma *anon_vma = data;
 
        spin_lock_init(&anon_vma->lock);
-       ksm_refcount_init(anon_vma);
-       migrate_refcount_init(anon_vma);
+       anonvma_external_refcount_init(anon_vma);
        INIT_LIST_HEAD(&anon_vma->head);
 }