Merge branch 'x86-stage-3-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[safe/jmp/linux-2.6] / mm / swapfile.c
index fbeb4bb..312fafe 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/namei.h>
 #include <linux/shm.h>
 #include <linux/blkdev.h>
+#include <linux/random.h>
 #include <linux/writeback.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
@@ -32,6 +33,7 @@
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <linux/swapops.h>
+#include <linux/page_cgroup.h>
 
 static DEFINE_SPINLOCK(swap_lock);
 static unsigned int nr_swapfiles;
@@ -95,7 +97,7 @@ static int discard_swap(struct swap_info_struct *si)
 
        list_for_each_entry(se, &si->extent_list, list) {
                sector_t start_block = se->start_block << (PAGE_SHIFT - 9);
-               pgoff_t nr_blocks = se->nr_pages << (PAGE_SHIFT - 9);
+               sector_t nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 
                if (se->start_page == 0) {
                        /* Do not discard the swap header page! */
@@ -115,14 +117,63 @@ static int discard_swap(struct swap_info_struct *si)
        return err;             /* That will often be -EOPNOTSUPP */
 }
 
+/*
+ * swap allocation tell device that a cluster of swap can now be discarded,
+ * to allow the swap device to optimize its wear-levelling.
+ */
+static void discard_swap_cluster(struct swap_info_struct *si,
+                                pgoff_t start_page, pgoff_t nr_pages)
+{
+       struct swap_extent *se = si->curr_swap_extent;
+       int found_extent = 0;
+
+       while (nr_pages) {
+               struct list_head *lh;
+
+               if (se->start_page <= start_page &&
+                   start_page < se->start_page + se->nr_pages) {
+                       pgoff_t offset = start_page - se->start_page;
+                       sector_t start_block = se->start_block + offset;
+                       sector_t nr_blocks = se->nr_pages - offset;
+
+                       if (nr_blocks > nr_pages)
+                               nr_blocks = nr_pages;
+                       start_page += nr_blocks;
+                       nr_pages -= nr_blocks;
+
+                       if (!found_extent++)
+                               si->curr_swap_extent = se;
+
+                       start_block <<= PAGE_SHIFT - 9;
+                       nr_blocks <<= PAGE_SHIFT - 9;
+                       if (blkdev_issue_discard(si->bdev, start_block,
+                                                       nr_blocks, GFP_NOIO))
+                               break;
+               }
+
+               lh = se->list.next;
+               if (lh == &si->extent_list)
+                       lh = lh->next;
+               se = list_entry(lh, struct swap_extent, list);
+       }
+}
+
+static int wait_for_discard(void *word)
+{
+       schedule();
+       return 0;
+}
+
 #define SWAPFILE_CLUSTER       256
 #define LATENCY_LIMIT          256
 
 static inline unsigned long scan_swap_map(struct swap_info_struct *si)
 {
        unsigned long offset;
-       unsigned long last_in_cluster;
+       unsigned long scan_base;
+       unsigned long last_in_cluster = 0;
        int latency_ration = LATENCY_LIMIT;
+       int found_free_cluster = 0;
 
        /*
         * We try to cluster swap pages by allocating them sequentially
@@ -132,19 +183,42 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si)
         * all over the entire swap partition, so that we reduce
         * overall disk seek times between swap pages.  -- sct
         * But we do now try to find an empty cluster.  -Andrea
+        * And we let swap pages go all over an SSD partition.  Hugh
         */
 
        si->flags += SWP_SCANNING;
-       offset = si->cluster_next;
+       scan_base = offset = si->cluster_next;
 
        if (unlikely(!si->cluster_nr--)) {
                if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
                        si->cluster_nr = SWAPFILE_CLUSTER - 1;
                        goto checks;
                }
+               if (si->flags & SWP_DISCARDABLE) {
+                       /*
+                        * Start range check on racing allocations, in case
+                        * they overlap the cluster we eventually decide on
+                        * (we scan without swap_lock to allow preemption).
+                        * It's hardly conceivable that cluster_nr could be
+                        * wrapped during our scan, but don't depend on it.
+                        */
+                       if (si->lowest_alloc)
+                               goto checks;
+                       si->lowest_alloc = si->max;
+                       si->highest_alloc = 0;
+               }
                spin_unlock(&swap_lock);
 
-               offset = si->lowest_bit;
+               /*
+                * If seek is expensive, start searching for new cluster from
+                * start of partition, to minimize the span of allocated swap.
+                * But if seek is cheap, search from our current position, so
+                * that swap is allocated from all over the partition: if the
+                * Flash Translation Layer only remaps within limited zones,
+                * we don't want to wear out the first zone too quickly.
+                */
+               if (!(si->flags & SWP_SOLIDSTATE))
+                       scan_base = offset = si->lowest_bit;
                last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
 
                /* Locate the first empty (unaligned) cluster */
@@ -156,6 +230,7 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si)
                                offset -= SWAPFILE_CLUSTER - 1;
                                si->cluster_next = offset;
                                si->cluster_nr = SWAPFILE_CLUSTER - 1;
+                               found_free_cluster = 1;
                                goto checks;
                        }
                        if (unlikely(--latency_ration < 0)) {
@@ -165,8 +240,30 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si)
                }
 
                offset = si->lowest_bit;
+               last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
+
+               /* Locate the first empty (unaligned) cluster */
+               for (; last_in_cluster < scan_base; offset++) {
+                       if (si->swap_map[offset])
+                               last_in_cluster = offset + SWAPFILE_CLUSTER;
+                       else if (offset == last_in_cluster) {
+                               spin_lock(&swap_lock);
+                               offset -= SWAPFILE_CLUSTER - 1;
+                               si->cluster_next = offset;
+                               si->cluster_nr = SWAPFILE_CLUSTER - 1;
+                               found_free_cluster = 1;
+                               goto checks;
+                       }
+                       if (unlikely(--latency_ration < 0)) {
+                               cond_resched();
+                               latency_ration = LATENCY_LIMIT;
+                       }
+               }
+
+               offset = scan_base;
                spin_lock(&swap_lock);
                si->cluster_nr = SWAPFILE_CLUSTER - 1;
+               si->lowest_alloc = 0;
        }
 
 checks:
@@ -175,7 +272,7 @@ checks:
        if (!si->highest_bit)
                goto no_page;
        if (offset > si->highest_bit)
-               offset = si->lowest_bit;
+               scan_base = offset = si->lowest_bit;
        if (si->swap_map[offset])
                goto scan;
 
@@ -191,6 +288,60 @@ checks:
        si->swap_map[offset] = 1;
        si->cluster_next = offset + 1;
        si->flags -= SWP_SCANNING;
+
+       if (si->lowest_alloc) {
+               /*
+                * Only set when SWP_DISCARDABLE, and there's a scan
+                * for a free cluster in progress or just completed.
+                */
+               if (found_free_cluster) {
+                       /*
+                        * To optimize wear-levelling, discard the
+                        * old data of the cluster, taking care not to
+                        * discard any of its pages that have already
+                        * been allocated by racing tasks (offset has
+                        * already stepped over any at the beginning).
+                        */
+                       if (offset < si->highest_alloc &&
+                           si->lowest_alloc <= last_in_cluster)
+                               last_in_cluster = si->lowest_alloc - 1;
+                       si->flags |= SWP_DISCARDING;
+                       spin_unlock(&swap_lock);
+
+                       if (offset < last_in_cluster)
+                               discard_swap_cluster(si, offset,
+                                       last_in_cluster - offset + 1);
+
+                       spin_lock(&swap_lock);
+                       si->lowest_alloc = 0;
+                       si->flags &= ~SWP_DISCARDING;
+
+                       smp_mb();       /* wake_up_bit advises this */
+                       wake_up_bit(&si->flags, ilog2(SWP_DISCARDING));
+
+               } else if (si->flags & SWP_DISCARDING) {
+                       /*
+                        * Delay using pages allocated by racing tasks
+                        * until the whole discard has been issued. We
+                        * could defer that delay until swap_writepage,
+                        * but it's easier to keep this self-contained.
+                        */
+                       spin_unlock(&swap_lock);
+                       wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
+                               wait_for_discard, TASK_UNINTERRUPTIBLE);
+                       spin_lock(&swap_lock);
+               } else {
+                       /*
+                        * Note pages allocated by racing tasks while
+                        * scan for a free cluster is in progress, so
+                        * that its final discard can exclude them.
+                        */
+                       if (offset < si->lowest_alloc)
+                               si->lowest_alloc = offset;
+                       if (offset > si->highest_alloc)
+                               si->highest_alloc = offset;
+               }
+       }
        return offset;
 
 scan:
@@ -205,8 +356,18 @@ scan:
                        latency_ration = LATENCY_LIMIT;
                }
        }
+       offset = si->lowest_bit;
+       while (++offset < scan_base) {
+               if (!si->swap_map[offset]) {
+                       spin_lock(&swap_lock);
+                       goto checks;
+               }
+               if (unlikely(--latency_ration < 0)) {
+                       cond_resched();
+                       latency_ration = LATENCY_LIMIT;
+               }
+       }
        spin_lock(&swap_lock);
-       goto checks;
 
 no_page:
        si->flags -= SWP_SCANNING;
@@ -310,8 +471,9 @@ out:
        return NULL;
 }
 
-static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
+static int swap_entry_free(struct swap_info_struct *p, swp_entry_t ent)
 {
+       unsigned long offset = swp_offset(ent);
        int count = p->swap_map[offset];
 
        if (count < SWAP_MAP_MAX) {
@@ -326,6 +488,7 @@ static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
                                swap_list.next = p - swap_info;
                        nr_swap_pages++;
                        p->inuse_pages--;
+                       mem_cgroup_uncharge_swap(ent);
                }
        }
        return count;
@@ -341,7 +504,7 @@ void swap_free(swp_entry_t entry)
 
        p = swap_info_get(entry);
        if (p) {
-               swap_entry_free(p, swp_offset(entry));
+               swap_entry_free(p, entry);
                spin_unlock(&swap_lock);
        }
 }
@@ -411,17 +574,17 @@ int try_to_free_swap(struct page *page)
  * Free the swap entry like above, but also try to
  * free the page cache entry if it is the last user.
  */
-void free_swap_and_cache(swp_entry_t entry)
+int free_swap_and_cache(swp_entry_t entry)
 {
-       struct swap_info_struct * p;
+       struct swap_info_struct *p;
        struct page *page = NULL;
 
        if (is_migration_entry(entry))
-               return;
+               return 1;
 
        p = swap_info_get(entry);
        if (p) {
-               if (swap_entry_free(p, swp_offset(entry)) == 1) {
+               if (swap_entry_free(p, entry) == 1) {
                        page = find_get_page(&swapper_space, entry.val);
                        if (page && !trylock_page(page)) {
                                page_cache_release(page);
@@ -443,6 +606,7 @@ void free_swap_and_cache(swp_entry_t entry)
                unlock_page(page);
                page_cache_release(page);
        }
+       return p != NULL;
 }
 
 #ifdef CONFIG_HIBERNATION
@@ -471,7 +635,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
 
                if (!bdev) {
                        if (bdev_p)
-                               *bdev_p = sis->bdev;
+                               *bdev_p = bdget(sis->bdev->bd_dev);
 
                        spin_unlock(&swap_lock);
                        return i;
@@ -483,7 +647,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
                                        struct swap_extent, list);
                        if (se->start_block == offset) {
                                if (bdev_p)
-                                       *bdev_p = sis->bdev;
+                                       *bdev_p = bdget(sis->bdev->bd_dev);
 
                                spin_unlock(&swap_lock);
                                bdput(bdev);
@@ -529,17 +693,20 @@ unsigned int count_swap_pages(int type, int free)
 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long addr, swp_entry_t entry, struct page *page)
 {
+       struct mem_cgroup *ptr = NULL;
        spinlock_t *ptl;
        pte_t *pte;
        int ret = 1;
 
-       if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
+       if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) {
                ret = -ENOMEM;
+               goto out_nolock;
+       }
 
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
                if (ret > 0)
-                       mem_cgroup_uncharge_page(page);
+                       mem_cgroup_cancel_charge_swapin(ptr);
                ret = 0;
                goto out;
        }
@@ -549,6 +716,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
        set_pte_at(vma->vm_mm, addr, pte,
                   pte_mkold(mk_pte(page, vma->vm_page_prot)));
        page_add_anon_rmap(page, vma, addr);
+       mem_cgroup_commit_charge_swapin(page, ptr);
        swap_free(entry);
        /*
         * Move the page to the active list so it is not
@@ -557,6 +725,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
        activate_page(page);
 out:
        pte_unmap_unlock(pte, ptl);
+out_nolock:
        return ret;
 }
 
@@ -1211,27 +1380,7 @@ out:
        return ret;
 }
 
-#if 0  /* We don't need this yet */
-#include <linux/backing-dev.h>
-int page_queue_congested(struct page *page)
-{
-       struct backing_dev_info *bdi;
-
-       VM_BUG_ON(!PageLocked(page));   /* It pins the swap_info_struct */
-
-       if (PageSwapCache(page)) {
-               swp_entry_t entry = { .val = page_private(page) };
-               struct swap_info_struct *sis;
-
-               sis = get_swap_info_struct(swp_type(entry));
-               bdi = sis->bdev->bd_inode->i_mapping->backing_dev_info;
-       } else
-               bdi = page->mapping->backing_dev_info;
-       return bdi_write_congested(bdi);
-}
-#endif
-
-asmlinkage long sys_swapoff(const char __user * specialfile)
+SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
 {
        struct swap_info_struct * p = NULL;
        unsigned short *swap_map;
@@ -1351,6 +1500,9 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
        spin_unlock(&swap_lock);
        mutex_unlock(&swapon_mutex);
        vfree(swap_map);
+       /* Destroy swap account informatin */
+       swap_cgroup_swapoff(type);
+
        inode = mapping->host;
        if (S_ISBLK(inode->i_mode)) {
                struct block_device *bdev = I_BDEV(inode);
@@ -1484,7 +1636,7 @@ late_initcall(max_swapfiles_check);
  *
  * The swapon system call
  */
-asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
+SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
 {
        struct swap_info_struct * p;
        char *name = NULL;
@@ -1668,6 +1820,11 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
                }
                swap_map[page_nr] = SWAP_MAP_BAD;
        }
+
+       error = swap_cgroup_swapon(type, maxpages);
+       if (error)
+               goto bad_swap;
+
        nr_good_pages = swap_header->info.last_page -
                        swap_header->info.nr_badpages -
                        1 /* header page */;
@@ -1689,6 +1846,10 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
                goto bad_swap;
        }
 
+       if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
+               p->flags |= SWP_SOLIDSTATE;
+               p->cluster_next = 1 + (random32() % p->highest_bit);
+       }
        if (discard_swap(p) == 0)
                p->flags |= SWP_DISCARDABLE;
 
@@ -1705,10 +1866,11 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
        total_swap_pages += nr_good_pages;
 
        printk(KERN_INFO "Adding %uk swap on %s.  "
-                       "Priority:%d extents:%d across:%lluk%s\n",
+                       "Priority:%d extents:%d across:%lluk %s%s\n",
                nr_good_pages<<(PAGE_SHIFT-10), name, p->prio,
                nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
-               (p->flags & SWP_DISCARDABLE) ? " D" : "");
+               (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
+               (p->flags & SWP_DISCARDABLE) ? "D" : "");
 
        /* insert swap space into swap_list: */
        prev = -1;
@@ -1734,6 +1896,7 @@ bad_swap:
                bd_release(bdev);
        }
        destroy_swap_extents(p);
+       swap_cgroup_swapoff(type);
 bad_swap_2:
        spin_lock(&swap_lock);
        p->swap_file = NULL;