mm: add swap cache interface for swap reference
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tue, 16 Jun 2009 22:32:52 +0000 (15:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 17 Jun 2009 02:47:42 +0000 (19:47 -0700)
In a following patch, the usage of swap cache is recorded into swap_map.
This patch is for necessary interface changes to do that.

2 interfaces:

  - swapcache_prepare()
  - swapcache_free()

are added for allocating/freeing refcnt from swap-cache to existing swap
entries.  But implementation itself is not changed under this patch.  At
adding swapcache_free(), memcg's hook code is moved under
swapcache_free().  This is better than using scattered hooks.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Acked-by: Balbir Singh <balbir@in.ibm.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Dhaval Giani <dhaval@linux.vnet.ibm.com>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/swap.h
mm/shmem.c
mm/swap_state.c
mm/swapfile.c
mm/vmscan.c

index f30c069..259e96c 100644 (file)
@@ -282,8 +282,10 @@ extern void si_swapinfo(struct sysinfo *);
 extern swp_entry_t get_swap_page(void);
 extern swp_entry_t get_swap_page_of_type(int);
 extern int swap_duplicate(swp_entry_t);
+extern int swapcache_prepare(swp_entry_t);
 extern int valid_swaphandles(swp_entry_t, unsigned long *);
 extern void swap_free(swp_entry_t);
+extern void swapcache_free(swp_entry_t, struct page *page);
 extern int free_swap_and_cache(swp_entry_t);
 extern int swap_type_of(dev_t, sector_t, struct block_device **);
 extern unsigned int count_swap_pages(int, int);
@@ -352,11 +354,16 @@ static inline void show_swap_cache_info(void)
 
 #define free_swap_and_cache(swp)       is_migration_entry(swp)
 #define swap_duplicate(swp)            is_migration_entry(swp)
+#define swapcache_prepare(swp)         is_migration_entry(swp)
 
 static inline void swap_free(swp_entry_t swp)
 {
 }
 
+static inline void swapcache_free(swp_entry_t swp, struct page *page)
+{
+}
+
 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
                        struct vm_area_struct *vma, unsigned long addr)
 {
index 0132fbd..47ab191 100644 (file)
@@ -1097,7 +1097,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
        shmem_swp_unmap(entry);
 unlock:
        spin_unlock(&info->lock);
-       swap_free(swap);
+       swapcache_free(swap, NULL);
 redirty:
        set_page_dirty(page);
        if (wbc->for_reclaim)
index 1416e7e..19bdf30 100644 (file)
@@ -162,11 +162,11 @@ int add_to_swap(struct page *page)
                        return 1;
                case -EEXIST:
                        /* Raced with "speculative" read_swap_cache_async */
-                       swap_free(entry);
+                       swapcache_free(entry, NULL);
                        continue;
                default:
                        /* -ENOMEM radix-tree allocation failure */
-                       swap_free(entry);
+                       swapcache_free(entry, NULL);
                        return 0;
                }
        }
@@ -188,8 +188,7 @@ void delete_from_swap_cache(struct page *page)
        __delete_from_swap_cache(page);
        spin_unlock_irq(&swapper_space.tree_lock);
 
-       mem_cgroup_uncharge_swapcache(page, entry);
-       swap_free(entry);
+       swapcache_free(entry, page);
        page_cache_release(page);
 }
 
@@ -293,7 +292,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                /*
                 * Swap entry may have been freed since our caller observed it.
                 */
-               if (!swap_duplicate(entry))
+               if (!swapcache_prepare(entry))
                        break;
 
                /*
@@ -317,7 +316,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                }
                ClearPageSwapBacked(new_page);
                __clear_page_locked(new_page);
-               swap_free(entry);
+               swapcache_free(entry, NULL);
        } while (err != -ENOMEM);
 
        if (new_page)
index 312fafe..3187079 100644 (file)
@@ -510,6 +510,16 @@ void swap_free(swp_entry_t entry)
 }
 
 /*
+ * Called after dropping swapcache to decrease refcnt to swap entries.
+ */
+void swapcache_free(swp_entry_t entry, struct page *page)
+{
+       if (page)
+               mem_cgroup_uncharge_swapcache(page, entry);
+       return swap_free(entry);
+}
+
+/*
  * How many references to page are currently swapped out?
  */
 static inline int page_swapcount(struct page *page)
@@ -1979,6 +1989,15 @@ bad_file:
        goto out;
 }
 
+/*
+ * Called when allocating swap cache for exising swap entry,
+ */
+int swapcache_prepare(swp_entry_t entry)
+{
+       return swap_duplicate(entry);
+}
+
+
 struct swap_info_struct *
 get_swap_info_struct(unsigned type)
 {
index 2c4b945..52339dd 100644 (file)
@@ -470,8 +470,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
                swp_entry_t swap = { .val = page_private(page) };
                __delete_from_swap_cache(page);
                spin_unlock_irq(&mapping->tree_lock);
-               mem_cgroup_uncharge_swapcache(page, swap);
-               swap_free(swap);
+               swapcache_free(swap, page);
        } else {
                __remove_from_page_cache(page);
                spin_unlock_irq(&mapping->tree_lock);