* Swap reorganised 29.12.95, Stephen Tweedie
*/
-#include <linux/config.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/backing-dev.h>
+#include <linux/mutex.h>
+#include <linux/capability.h>
#include <linux/syscalls.h>
+#include <linux/memcontrol.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <linux/swapops.h>
-DEFINE_SPINLOCK(swaplock);
+DEFINE_SPINLOCK(swap_lock);
unsigned int nr_swapfiles;
long total_swap_pages;
static int swap_overflow;
-EXPORT_SYMBOL(total_swap_pages);
-
static const char Bad_file[] = "Bad swap file entry ";
static const char Unused_file[] = "Unused swap file entry ";
static const char Bad_offset[] = "Bad swap offset entry ";
struct swap_list_t swap_list = {-1, -1};
-struct swap_info_struct swap_info[MAX_SWAPFILES];
+static struct swap_info_struct swap_info[MAX_SWAPFILES];
-static DECLARE_MUTEX(swapon_sem);
+static DEFINE_MUTEX(swapon_mutex);
/*
* We need this because the bdev->unplug_fn can sleep and we cannot
- * hold swap_list_lock while calling the unplug_fn. And swap_list_lock
- * cannot be turned into a semaphore.
+ * hold swap_lock while calling the unplug_fn. And swap_lock
+ * cannot be turned into a mutex.
*/
static DECLARE_RWSEM(swap_unplug_sem);
-#define SWAPFILE_CLUSTER 256
-
void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
{
swp_entry_t entry;
down_read(&swap_unplug_sem);
- entry.val = page->private;
+ entry.val = page_private(page);
if (PageSwapCache(page)) {
struct block_device *bdev = swap_info[swp_type(entry)].bdev;
struct backing_dev_info *bdi;
/*
* If the page is removed from swapcache from under us (with a
* racy try_to_unuse/swapoff) we need an additional reference
- * count to avoid reading garbage from page->private above. If
- * the WARN_ON triggers during a swapoff it maybe the race
+ * count to avoid reading garbage from page_private(page) above.
+ * If the WARN_ON triggers during a swapoff it maybe the race
* condition and it's harmless. However if it triggers without
* swapoff it signals a problem.
*/
up_read(&swap_unplug_sem);
}
-static inline int scan_swap_map(struct swap_info_struct *si)
+#define SWAPFILE_CLUSTER 256
+#define LATENCY_LIMIT 256
+
+static inline unsigned long scan_swap_map(struct swap_info_struct *si)
{
- unsigned long offset;
+ unsigned long offset, last_in_cluster;
+ int latency_ration = LATENCY_LIMIT;
+
/*
- * We try to cluster swap pages by allocating them
- * sequentially in swap. Once we've allocated
- * SWAPFILE_CLUSTER pages this way, however, we resort to
- * first-free allocation, starting a new cluster. This
- * prevents us from scattering swap pages all over the entire
- * swap partition, so that we reduce overall disk seek times
- * between swap pages. -- sct */
- if (si->cluster_nr) {
- while (si->cluster_next <= si->highest_bit) {
- offset = si->cluster_next++;
+ * We try to cluster swap pages by allocating them sequentially
+ * in swap. Once we've allocated SWAPFILE_CLUSTER pages this
+ * way, however, we resort to first-free allocation, starting
+ * a new cluster. This prevents us from scattering swap pages
+ * all over the entire swap partition, so that we reduce
+ * overall disk seek times between swap pages. -- sct
+ * But we do now try to find an empty cluster. -Andrea
+ */
+
+ si->flags += SWP_SCANNING;
+ if (unlikely(!si->cluster_nr)) {
+ si->cluster_nr = SWAPFILE_CLUSTER - 1;
+ if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER)
+ goto lowest;
+ spin_unlock(&swap_lock);
+
+ offset = si->lowest_bit;
+ last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
+
+ /* Locate the first empty (unaligned) cluster */
+ for (; last_in_cluster <= si->highest_bit; offset++) {
if (si->swap_map[offset])
- continue;
- si->cluster_nr--;
- goto got_page;
- }
- }
- si->cluster_nr = SWAPFILE_CLUSTER;
-
- /* try to find an empty (even not aligned) cluster. */
- offset = si->lowest_bit;
- check_next_cluster:
- if (offset+SWAPFILE_CLUSTER-1 <= si->highest_bit)
- {
- unsigned long nr;
- for (nr = offset; nr < offset+SWAPFILE_CLUSTER; nr++)
- if (si->swap_map[nr])
- {
- offset = nr+1;
- goto check_next_cluster;
+ last_in_cluster = offset + SWAPFILE_CLUSTER;
+ else if (offset == last_in_cluster) {
+ spin_lock(&swap_lock);
+ si->cluster_next = offset-SWAPFILE_CLUSTER+1;
+ goto cluster;
}
- /* We found a completly empty cluster, so start
- * using it.
- */
- goto got_page;
+ if (unlikely(--latency_ration < 0)) {
+ cond_resched();
+ latency_ration = LATENCY_LIMIT;
+ }
+ }
+ spin_lock(&swap_lock);
+ goto lowest;
}
- /* No luck, so now go finegrined as usual. -Andrea */
- for (offset = si->lowest_bit; offset <= si->highest_bit ; offset++) {
- if (si->swap_map[offset])
- continue;
- si->lowest_bit = offset+1;
- got_page:
+
+ si->cluster_nr--;
+cluster:
+ offset = si->cluster_next;
+ if (offset > si->highest_bit)
+lowest: offset = si->lowest_bit;
+checks: if (!(si->flags & SWP_WRITEOK))
+ goto no_page;
+ if (!si->highest_bit)
+ goto no_page;
+ if (!si->swap_map[offset]) {
if (offset == si->lowest_bit)
si->lowest_bit++;
if (offset == si->highest_bit)
si->highest_bit--;
- if (si->lowest_bit > si->highest_bit) {
+ si->inuse_pages++;
+ if (si->inuse_pages == si->pages) {
si->lowest_bit = si->max;
si->highest_bit = 0;
}
si->swap_map[offset] = 1;
- si->inuse_pages++;
- nr_swap_pages--;
- si->cluster_next = offset+1;
+ si->cluster_next = offset + 1;
+ si->flags -= SWP_SCANNING;
return offset;
}
- si->lowest_bit = si->max;
- si->highest_bit = 0;
+
+ spin_unlock(&swap_lock);
+ while (++offset <= si->highest_bit) {
+ if (!si->swap_map[offset]) {
+ spin_lock(&swap_lock);
+ goto checks;
+ }
+ if (unlikely(--latency_ration < 0)) {
+ cond_resched();
+ latency_ration = LATENCY_LIMIT;
+ }
+ }
+ spin_lock(&swap_lock);
+ goto lowest;
+
+no_page:
+ si->flags -= SWP_SCANNING;
return 0;
}
swp_entry_t get_swap_page(void)
{
- struct swap_info_struct * p;
- unsigned long offset;
- swp_entry_t entry;
- int type, wrapped = 0;
+ struct swap_info_struct *si;
+ pgoff_t offset;
+ int type, next;
+ int wrapped = 0;
- entry.val = 0; /* Out of memory */
- swap_list_lock();
- type = swap_list.next;
- if (type < 0)
- goto out;
+ spin_lock(&swap_lock);
if (nr_swap_pages <= 0)
- goto out;
+ goto noswap;
+ nr_swap_pages--;
+
+ for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
+ si = swap_info + type;
+ next = si->next;
+ if (next < 0 ||
+ (!wrapped && si->prio != swap_info[next].prio)) {
+ next = swap_list.head;
+ wrapped++;
+ }
- while (1) {
- p = &swap_info[type];
- if ((p->flags & SWP_ACTIVE) == SWP_ACTIVE) {
- swap_device_lock(p);
- offset = scan_swap_map(p);
- swap_device_unlock(p);
- if (offset) {
- entry = swp_entry(type,offset);
- type = swap_info[type].next;
- if (type < 0 ||
- p->prio != swap_info[type].prio) {
- swap_list.next = swap_list.head;
- } else {
- swap_list.next = type;
- }
- goto out;
- }
+ if (!si->highest_bit)
+ continue;
+ if (!(si->flags & SWP_WRITEOK))
+ continue;
+
+ swap_list.next = next;
+ offset = scan_swap_map(si);
+ if (offset) {
+ spin_unlock(&swap_lock);
+ return swp_entry(type, offset);
}
- type = p->next;
- if (!wrapped) {
- if (type < 0 || p->prio != swap_info[type].prio) {
- type = swap_list.head;
- wrapped = 1;
- }
- } else
- if (type < 0)
- goto out; /* out of swap space */
+ next = swap_list.next;
}
-out:
- swap_list_unlock();
- return entry;
+
+ nr_swap_pages++;
+noswap:
+ spin_unlock(&swap_lock);
+ return (swp_entry_t) {0};
+}
+
+swp_entry_t get_swap_page_of_type(int type)
+{
+ struct swap_info_struct *si;
+ pgoff_t offset;
+
+ spin_lock(&swap_lock);
+ si = swap_info + type;
+ if (si->flags & SWP_WRITEOK) {
+ nr_swap_pages--;
+ offset = scan_swap_map(si);
+ if (offset) {
+ spin_unlock(&swap_lock);
+ return swp_entry(type, offset);
+ }
+ nr_swap_pages++;
+ }
+ spin_unlock(&swap_lock);
+ return (swp_entry_t) {0};
}
static struct swap_info_struct * swap_info_get(swp_entry_t entry)
goto bad_offset;
if (!p->swap_map[offset])
goto bad_free;
- swap_list_lock();
- if (p->prio > swap_info[swap_list.next].prio)
- swap_list.next = type;
- swap_device_lock(p);
+ spin_lock(&swap_lock);
return p;
bad_free:
return NULL;
}
-static void swap_info_put(struct swap_info_struct * p)
-{
- swap_device_unlock(p);
- swap_list_unlock();
-}
-
static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
{
int count = p->swap_map[offset];
p->lowest_bit = offset;
if (offset > p->highest_bit)
p->highest_bit = offset;
+ if (p->prio > swap_info[swap_list.next].prio)
+ swap_list.next = p - swap_info;
nr_swap_pages++;
p->inuse_pages--;
}
p = swap_info_get(entry);
if (p) {
swap_entry_free(p, swp_offset(entry));
- swap_info_put(p);
+ spin_unlock(&swap_lock);
}
}
struct swap_info_struct *p;
swp_entry_t entry;
- entry.val = page->private;
+ entry.val = page_private(page);
p = swap_info_get(entry);
if (p) {
/* Subtract the 1 for the swap cache itself */
count = p->swap_map[swp_offset(entry)] - 1;
- swap_info_put(p);
+ spin_unlock(&swap_lock);
}
return count;
}
if (page_count(page) != 2) /* 2: us + cache */
return 0;
- entry.val = page->private;
+ entry.val = page_private(page);
p = swap_info_get(entry);
if (!p)
return 0;
}
write_unlock_irq(&swapper_space.tree_lock);
}
- swap_info_put(p);
+ spin_unlock(&swap_lock);
if (retval) {
swap_free(entry);
struct swap_info_struct * p;
struct page *page = NULL;
+ if (is_migration_entry(entry))
+ return;
+
p = swap_info_get(entry);
if (p) {
- if (swap_entry_free(p, swp_offset(entry)) == 1)
- page = find_trylock_page(&swapper_space, entry.val);
- swap_info_put(p);
+ if (swap_entry_free(p, swp_offset(entry)) == 1) {
+ page = find_get_page(&swapper_space, entry.val);
+ if (page && unlikely(TestSetPageLocked(page))) {
+ page_cache_release(page);
+ page = NULL;
+ }
+ }
+ spin_unlock(&swap_lock);
}
if (page) {
int one_user;
BUG_ON(PagePrivate(page));
- page_cache_get(page);
one_user = (page_count(page) == 2);
/* Only cache user (+us), or swap space full? Free it! */
- if (!PageWriteback(page) && (one_user || vm_swap_full())) {
+ /* Also recheck PageSwapCache after page is locked (above) */
+ if (PageSwapCache(page) && !PageWriteback(page) &&
+ (one_user || vm_swap_full())) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
}
}
+#ifdef CONFIG_HIBERNATION
/*
- * Always set the resulting pte to be nowrite (the same as COW pages
- * after one process has exited). We don't know just how many PTEs will
- * share this swap entry, so be cautious and let do_wp_page work out
- * what to do if a write is requested later.
+ * Find the swap type that corresponds to given device (if any).
+ *
+ * @offset - number of the PAGE_SIZE-sized block of the device, starting
+ * from 0, in which the swap header is expected to be located.
*
- * vma->vm_mm->page_table_lock is held.
+ * This is needed for the suspend to disk (aka swsusp).
*/
-static void unuse_pte(struct vm_area_struct *vma, pte_t *pte,
+int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
+{
+ struct block_device *bdev = NULL;
+ int i;
+
+ if (device)
+ bdev = bdget(device);
+
+ spin_lock(&swap_lock);
+ for (i = 0; i < nr_swapfiles; i++) {
+ struct swap_info_struct *sis = swap_info + i;
+
+ if (!(sis->flags & SWP_WRITEOK))
+ continue;
+
+ if (!bdev) {
+ if (bdev_p)
+ *bdev_p = sis->bdev;
+
+ spin_unlock(&swap_lock);
+ return i;
+ }
+ if (bdev == sis->bdev) {
+ struct swap_extent *se;
+
+ se = list_entry(sis->extent_list.next,
+ struct swap_extent, list);
+ if (se->start_block == offset) {
+ if (bdev_p)
+ *bdev_p = sis->bdev;
+
+ spin_unlock(&swap_lock);
+ bdput(bdev);
+ return i;
+ }
+ }
+ }
+ spin_unlock(&swap_lock);
+ if (bdev)
+ bdput(bdev);
+
+ return -ENODEV;
+}
+
+/*
+ * Return either the total number of swap pages of given type, or the number
+ * of free pages of that type (depending on @free)
+ *
+ * This is needed for software suspend
+ */
+unsigned int count_swap_pages(int type, int free)
+{
+ unsigned int n = 0;
+
+ if (type < nr_swapfiles) {
+ spin_lock(&swap_lock);
+ if (swap_info[type].flags & SWP_WRITEOK) {
+ n = swap_info[type].pages;
+ if (free)
+ n -= swap_info[type].inuse_pages;
+ }
+ spin_unlock(&swap_lock);
+ }
+ return n;
+}
+#endif
+
+/*
+ * No need to decide whether this PTE shares the swap entry with others,
+ * just let do_wp_page work it out if a write is requested later - to
+ * force COW, vm_page_prot omits write permission from any private vma.
+ */
+static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, swp_entry_t entry, struct page *page)
{
- inc_mm_counter(vma->vm_mm, rss);
+ spinlock_t *ptl;
+ pte_t *pte;
+ int ret = 1;
+
+ if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
+ ret = -ENOMEM;
+
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
+ if (ret > 0)
+ mem_cgroup_uncharge_page(page);
+ ret = 0;
+ goto out;
+ }
+
+ inc_mm_counter(vma->vm_mm, anon_rss);
get_page(page);
set_pte_at(vma->vm_mm, addr, pte,
pte_mkold(mk_pte(page, vma->vm_page_prot)));
* immediately swapped out again after swapon.
*/
activate_page(page);
+out:
+ pte_unmap_unlock(pte, ptl);
+ return ret;
}
static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
swp_entry_t entry, struct page *page)
{
- pte_t *pte;
pte_t swp_pte = swp_entry_to_pte(entry);
+ pte_t *pte;
+ int ret = 0;
+ /*
+ * We don't actually need pte lock while scanning for swp_pte: since
+ * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
+ * page table while we're scanning; though it could get zapped, and on
+ * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
+ * of unmatched parts which look like swp_pte, so unuse_pte must
+ * recheck under pte lock. Scanning without pte lock lets it be
+ * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
+ */
pte = pte_offset_map(pmd, addr);
do {
/*
* Test inline before going to call unuse_pte.
*/
if (unlikely(pte_same(*pte, swp_pte))) {
- unuse_pte(vma, pte, addr, entry, page);
pte_unmap(pte);
- return 1;
+ ret = unuse_pte(vma, pmd, addr, entry, page);
+ if (ret)
+ goto out;
+ pte = pte_offset_map(pmd, addr);
}
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap(pte - 1);
- return 0;
+out:
+ return ret;
}
static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
{
pmd_t *pmd;
unsigned long next;
+ int ret;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd))
continue;
- if (unuse_pte_range(vma, pmd, addr, next, entry, page))
- return 1;
+ ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
+ if (ret)
+ return ret;
} while (pmd++, addr = next, addr != end);
return 0;
}
{
pud_t *pud;
unsigned long next;
+ int ret;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
- if (unuse_pmd_range(vma, pud, addr, next, entry, page))
- return 1;
+ ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
+ if (ret)
+ return ret;
} while (pud++, addr = next, addr != end);
return 0;
}
{
pgd_t *pgd;
unsigned long addr, end, next;
+ int ret;
if (page->mapping) {
addr = page_address_in_vma(page, vma);
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- if (unuse_pud_range(vma, pgd, addr, next, entry, page))
- return 1;
+ ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
+ if (ret)
+ return ret;
} while (pgd++, addr = next, addr != end);
return 0;
}
swp_entry_t entry, struct page *page)
{
struct vm_area_struct *vma;
+ int ret = 0;
if (!down_read_trylock(&mm->mmap_sem)) {
/*
down_read(&mm->mmap_sem);
lock_page(page);
}
- spin_lock(&mm->page_table_lock);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (vma->anon_vma && unuse_vma(vma, entry, page))
+ if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
break;
}
- spin_unlock(&mm->page_table_lock);
up_read(&mm->mmap_sem);
- /*
- * Currently unuse_mm cannot fail, but leave error handling
- * at call sites for now, since we change it from time to time.
- */
- return 0;
+ return (ret < 0)? ret: 0;
}
/*
* Scan swap_map from current position to next entry still in use.
* Recycle to start on reaching the end, returning 0 when empty.
*/
-static int find_next_to_unuse(struct swap_info_struct *si, int prev)
+static unsigned int find_next_to_unuse(struct swap_info_struct *si,
+ unsigned int prev)
{
- int max = si->max;
- int i = prev;
+ unsigned int max = si->max;
+ unsigned int i = prev;
int count;
/*
- * No need for swap_device_lock(si) here: we're just looking
+ * No need for swap_lock here: we're just looking
* for whether an entry is in use, not modifying it; false
* hits are okay, and sys_swapoff() has already prevented new
- * allocations from this area (while holding swap_list_lock()).
+ * allocations from this area (while holding swap_lock).
*/
for (;;) {
if (++i >= max) {
unsigned short swcount;
struct page *page;
swp_entry_t entry;
- int i = 0;
+ unsigned int i = 0;
int retval = 0;
int reset_overflow = 0;
int shmem;
*/
swap_map = &si->swap_map[i];
entry = swp_entry(type, i);
- page = read_swap_cache_async(entry, NULL, 0);
+ page = read_swap_cache_async(entry,
+ GFP_HIGHUSER_MOVABLE, NULL, 0);
if (!page) {
/*
* Either swap_duplicate() failed because entry
atomic_inc(&new_start_mm->mm_users);
atomic_inc(&prev_mm->mm_users);
spin_lock(&mmlist_lock);
- while (*swap_map > 1 && !retval &&
+ while (*swap_map > 1 && !retval && !shmem &&
(p = p->next) != &start_mm->mmlist) {
mm = list_entry(p, struct mm_struct, mmlist);
- if (atomic_inc_return(&mm->mm_users) == 1) {
- atomic_dec(&mm->mm_users);
+ if (!atomic_inc_not_zero(&mm->mm_users))
continue;
- }
spin_unlock(&mmlist_lock);
mmput(prev_mm);
prev_mm = mm;
mmput(start_mm);
start_mm = new_start_mm;
}
+ if (shmem) {
+ /* page has already been unlocked and released */
+ if (shmem > 0)
+ continue;
+ retval = shmem;
+ break;
+ }
if (retval) {
unlock_page(page);
page_cache_release(page);
* report them; but do report if we reset SWAP_MAP_MAX.
*/
if (*swap_map == SWAP_MAP_MAX) {
- swap_device_lock(si);
+ spin_lock(&swap_lock);
*swap_map = 1;
- swap_device_unlock(si);
+ spin_unlock(&swap_lock);
reset_overflow = 1;
}
* read from disk into another page. Splitting into two
* pages would be incorrect if swap supported "shared
* private" pages, but they are handled by tmpfs files.
- *
- * Note shmem_unuse already deleted a swappage from
- * the swap cache, unless the move to filepage failed:
- * in which case it left swappage in cache, lowered its
- * swap count to pass quickly through the loops above,
- * and now we must reincrement count to try again later.
*/
if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
struct writeback_control wbc = {
lock_page(page);
wait_on_page_writeback(page);
}
- if (PageSwapCache(page)) {
- if (shmem)
- swap_duplicate(entry);
- else
- delete_from_swap_cache(page);
- }
+ if (PageSwapCache(page))
+ delete_from_swap_cache(page);
/*
* So we could skip searching mms once swap count went
* to 1, we did not mark any present ptes as dirty: must
- * mark page dirty so shrink_list will preserve it.
+ * mark page dirty so shrink_page_list will preserve it.
*/
SetPageDirty(page);
unlock_page(page);
}
/*
- * After a successful try_to_unuse, if no swap is now in use, we know we
- * can empty the mmlist. swap_list_lock must be held on entry and exit.
- * Note that mmlist_lock nests inside swap_list_lock, and an mm must be
+ * After a successful try_to_unuse, if no swap is now in use, we know
+ * we can empty the mmlist. swap_lock must be held on entry and exit.
+ * Note that mmlist_lock nests inside swap_lock, and an mm must be
* added to the mmlist just after page_duplicate - before would be racy.
*/
static void drain_mmlist(void)
}
}
+#ifdef CONFIG_HIBERNATION
+/*
+ * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
+ * corresponding to given index in swap_info (swap type).
+ */
+sector_t swapdev_block(int swap_type, pgoff_t offset)
+{
+ struct swap_info_struct *sis;
+
+ if (swap_type >= nr_swapfiles)
+ return 0;
+
+ sis = swap_info + swap_type;
+ return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0;
+}
+#endif /* CONFIG_HIBERNATION */
+
/*
* Free all of a swapdev's extent information
*/
list_del(&se->list);
kfree(se);
}
- sis->nr_extents = 0;
}
/*
new_se->start_block = start_block;
list_add_tail(&new_se->list, &sis->extent_list);
- sis->nr_extents++;
- return 0;
+ return 1;
}
/*
* This is extremely effective. The average number of iterations in
* map_swap_page() has been measured at about 0.3 per page. - akpm.
*/
-static int setup_swap_extents(struct swap_info_struct *sis)
+static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
{
struct inode *inode;
unsigned blocks_per_page;
unsigned blkbits;
sector_t probe_block;
sector_t last_block;
+ sector_t lowest_block = -1;
+ sector_t highest_block = 0;
+ int nr_extents = 0;
int ret;
inode = sis->swap_file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
ret = add_swap_extent(sis, 0, sis->max, 0);
+ *span = sis->pages;
goto done;
}
}
}
+ first_block >>= (PAGE_SHIFT - blkbits);
+ if (page_no) { /* exclude the header page */
+ if (first_block < lowest_block)
+ lowest_block = first_block;
+ if (first_block > highest_block)
+ highest_block = first_block;
+ }
+
/*
* We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
*/
- ret = add_swap_extent(sis, page_no, 1,
- first_block >> (PAGE_SHIFT - blkbits));
- if (ret)
+ ret = add_swap_extent(sis, page_no, 1, first_block);
+ if (ret < 0)
goto out;
+ nr_extents += ret;
page_no++;
probe_block += blocks_per_page;
reprobe:
continue;
}
- ret = 0;
+ ret = nr_extents;
+ *span = 1 + highest_block - lowest_block;
if (page_no == 0)
page_no = 1; /* force Empty message */
sis->max = page_no;
BUG_ON(!PageLocked(page)); /* It pins the swap_info_struct */
if (PageSwapCache(page)) {
- swp_entry_t entry = { .val = page->private };
+ swp_entry_t entry = { .val = page_private(page) };
struct swap_info_struct *sis;
sis = get_swap_info_struct(swp_type(entry));
mapping = victim->f_mapping;
prev = -1;
- swap_list_lock();
+ spin_lock(&swap_lock);
for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
p = swap_info + type;
if ((p->flags & SWP_ACTIVE) == SWP_ACTIVE) {
}
if (type < 0) {
err = -EINVAL;
- swap_list_unlock();
+ spin_unlock(&swap_lock);
goto out_dput;
}
if (!security_vm_enough_memory(p->pages))
vm_unacct_memory(p->pages);
else {
err = -ENOMEM;
- swap_list_unlock();
+ spin_unlock(&swap_lock);
goto out_dput;
}
if (prev < 0) {
nr_swap_pages -= p->pages;
total_swap_pages -= p->pages;
p->flags &= ~SWP_WRITEOK;
- swap_list_unlock();
+ spin_unlock(&swap_lock);
+
current->flags |= PF_SWAPOFF;
err = try_to_unuse(type);
current->flags &= ~PF_SWAPOFF;
- /* wait for any unplug function to finish */
- down_write(&swap_unplug_sem);
- up_write(&swap_unplug_sem);
-
if (err) {
/* re-insert swap space back into swap_list */
- swap_list_lock();
+ spin_lock(&swap_lock);
for (prev = -1, i = swap_list.head; i >= 0; prev = i, i = swap_info[i].next)
if (p->prio >= swap_info[i].prio)
break;
nr_swap_pages += p->pages;
total_swap_pages += p->pages;
p->flags |= SWP_WRITEOK;
- swap_list_unlock();
+ spin_unlock(&swap_lock);
goto out_dput;
}
+
+ /* wait for any unplug function to finish */
+ down_write(&swap_unplug_sem);
+ up_write(&swap_unplug_sem);
+
destroy_swap_extents(p);
- down(&swapon_sem);
- swap_list_lock();
+ mutex_lock(&swapon_mutex);
+ spin_lock(&swap_lock);
drain_mmlist();
- swap_device_lock(p);
+
+ /* wait for anyone still in scan_swap_map */
+ p->highest_bit = 0; /* cuts scans short */
+ while (p->flags >= SWP_SCANNING) {
+ spin_unlock(&swap_lock);
+ schedule_timeout_uninterruptible(1);
+ spin_lock(&swap_lock);
+ }
+
swap_file = p->swap_file;
p->swap_file = NULL;
p->max = 0;
swap_map = p->swap_map;
p->swap_map = NULL;
p->flags = 0;
- swap_device_unlock(p);
- swap_list_unlock();
- up(&swapon_sem);
+ spin_unlock(&swap_lock);
+ mutex_unlock(&swapon_mutex);
vfree(swap_map);
inode = mapping->host;
if (S_ISBLK(inode->i_mode)) {
set_blocksize(bdev, p->old_block_size);
bd_release(bdev);
} else {
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
inode->i_flags &= ~S_SWAPFILE;
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
filp_close(swap_file, NULL);
err = 0;
int i;
loff_t l = *pos;
- down(&swapon_sem);
+ mutex_lock(&swapon_mutex);
+
+ if (!l)
+ return SEQ_START_TOKEN;
for (i = 0; i < nr_swapfiles; i++, ptr++) {
if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
continue;
- if (!l--)
+ if (!--l)
return ptr;
}
static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
{
- struct swap_info_struct *ptr = v;
+ struct swap_info_struct *ptr;
struct swap_info_struct *endptr = swap_info + nr_swapfiles;
- for (++ptr; ptr < endptr; ptr++) {
+ if (v == SEQ_START_TOKEN)
+ ptr = swap_info;
+ else {
+ ptr = v;
+ ptr++;
+ }
+
+ for (; ptr < endptr; ptr++) {
if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
continue;
++*pos;
static void swap_stop(struct seq_file *swap, void *v)
{
- up(&swapon_sem);
+ mutex_unlock(&swapon_mutex);
}
static int swap_show(struct seq_file *swap, void *v)
struct file *file;
int len;
- if (v == swap_info)
- seq_puts(swap, "Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
+ if (ptr == SEQ_START_TOKEN) {
+ seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
+ return 0;
+ }
file = ptr->swap_file;
- len = seq_path(swap, file->f_vfsmnt, file->f_dentry, " \t\n\\");
- seq_printf(swap, "%*s%s\t%d\t%ld\t%d\n",
+ len = seq_path(swap, &file->f_path, " \t\n\\");
+ seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
len < 40 ? 40 - len : 1, " ",
- S_ISBLK(file->f_dentry->d_inode->i_mode) ?
+ S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
"partition" : "file\t",
ptr->pages << (PAGE_SHIFT - 10),
ptr->inuse_pages << (PAGE_SHIFT - 10),
return 0;
}
-static struct seq_operations swaps_op = {
+static const struct seq_operations swaps_op = {
.start = swap_start,
.next = swap_next,
.stop = swap_stop,
return seq_open(file, &swaps_op);
}
-static struct file_operations proc_swaps_operations = {
+static const struct file_operations proc_swaps_operations = {
.open = swaps_open,
.read = seq_read,
.llseek = seq_lseek,
static int __init procswaps_init(void)
{
- struct proc_dir_entry *entry;
-
- entry = create_proc_entry("swaps", 0, NULL);
- if (entry)
- entry->proc_fops = &proc_swaps_operations;
+ proc_create("swaps", 0, NULL, &proc_swaps_operations);
return 0;
}
__initcall(procswaps_init);
static int least_priority;
union swap_header *swap_header = NULL;
int swap_header_version;
- int nr_good_pages = 0;
+ unsigned int nr_good_pages = 0;
+ int nr_extents = 0;
+ sector_t span;
unsigned long maxpages = 1;
int swapfilesize;
unsigned short *swap_map;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- swap_list_lock();
+ spin_lock(&swap_lock);
p = swap_info;
for (type = 0 ; type < nr_swapfiles ; type++,p++)
if (!(p->flags & SWP_USED))
break;
error = -EPERM;
- /*
- * Test if adding another swap device is possible. There are
- * two limiting factors: 1) the number of bits for the swap
- * type swp_entry_t definition and 2) the number of bits for
- * the swap type in the swap ptes as defined by the different
- * architectures. To honor both limitations a swap entry
- * with swap offset 0 and swap type ~0UL is created, encoded
- * to a swap pte, decoded to a swp_entry_t again and finally
- * the swap type part is extracted. This will mask all bits
- * from the initial ~0UL that can't be encoded in either the
- * swp_entry_t or the architecture definition of a swap pte.
- */
- if (type > swp_type(pte_to_swp_entry(swp_entry_to_pte(swp_entry(~0UL,0))))) {
- swap_list_unlock();
+ if (type >= MAX_SWAPFILES) {
+ spin_unlock(&swap_lock);
goto out;
}
if (type >= nr_swapfiles)
nr_swapfiles = type+1;
INIT_LIST_HEAD(&p->extent_list);
p->flags = SWP_USED;
- p->nr_extents = 0;
p->swap_file = NULL;
p->old_block_size = 0;
p->swap_map = NULL;
p->highest_bit = 0;
p->cluster_nr = 0;
p->inuse_pages = 0;
- spin_lock_init(&p->sdev_lock);
p->next = -1;
if (swap_flags & SWAP_FLAG_PREFER) {
p->prio =
} else {
p->prio = --least_priority;
}
- swap_list_unlock();
+ spin_unlock(&swap_lock);
name = getname(specialfile);
error = PTR_ERR(name);
if (IS_ERR(name)) {
error = bd_claim(bdev, sys_swapon);
if (error < 0) {
bdev = NULL;
+ error = -EINVAL;
goto bad_swap;
}
p->old_block_size = block_size(bdev);
p->bdev = bdev;
} else if (S_ISREG(inode->i_mode)) {
p->bdev = inode->i_sb->s_bdev;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
did_down = 1;
if (IS_SWAPFILE(inode)) {
error = -EBUSY;
error = -EINVAL;
goto bad_swap;
}
- page = read_cache_page(mapping, 0,
- (filler_t *)mapping->a_ops->readpage, swap_file);
+ page = read_mapping_page(mapping, 0, swap_file);
if (IS_ERR(page)) {
error = PTR_ERR(page);
goto bad_swap;
}
- wait_on_page_locked(page);
- if (!PageUptodate(page))
- goto bad_swap;
kmap(page);
swap_header = page_address(page);
else if (!memcmp("SWAPSPACE2",swap_header->magic.magic,10))
swap_header_version = 2;
else {
- printk("Unable to find swap-space signature\n");
+ printk(KERN_ERR "Unable to find swap-space signature\n");
error = -EINVAL;
goto bad_swap;
}
error = -EINVAL;
goto bad_swap;
case 2:
+ /* swap partition endianess hack... */
+ if (swab32(swap_header->info.version) == 1) {
+ swab32s(&swap_header->info.version);
+ swab32s(&swap_header->info.last_page);
+ swab32s(&swap_header->info.nr_badpages);
+ for (i = 0; i < swap_header->info.nr_badpages; i++)
+ swab32s(&swap_header->info.badpages[i]);
+ }
/* Check the swap header's sub-version and the size of
the swap file and bad block lists */
if (swap_header->info.version != 1) {
}
p->lowest_bit = 1;
+ p->cluster_next = 1;
+
/*
* Find out how many pages are allowed for a single swap
* device. There are two limiting factors: 1) the number of
error = -EINVAL;
if (!maxpages)
goto bad_swap;
+ if (swapfilesize && maxpages > swapfilesize) {
+ printk(KERN_WARNING
+ "Swap area shorter than signature indicates\n");
+ goto bad_swap;
+ }
if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
goto bad_swap;
if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
goto bad_swap;
-
+
/* OK, set up the swap map and apply the bad block list */
if (!(p->swap_map = vmalloc(maxpages * sizeof(short)))) {
error = -ENOMEM;
error = 0;
memset(p->swap_map, 0, maxpages * sizeof(short));
- for (i=0; i<swap_header->info.nr_badpages; i++) {
- int page = swap_header->info.badpages[i];
- if (page <= 0 || page >= swap_header->info.last_page)
+ for (i = 0; i < swap_header->info.nr_badpages; i++) {
+ int page_nr = swap_header->info.badpages[i];
+ if (page_nr <= 0 || page_nr >= swap_header->info.last_page)
error = -EINVAL;
else
- p->swap_map[page] = SWAP_MAP_BAD;
+ p->swap_map[page_nr] = SWAP_MAP_BAD;
}
nr_good_pages = swap_header->info.last_page -
swap_header->info.nr_badpages -
1 /* header page */;
- if (error)
+ if (error)
goto bad_swap;
}
- if (swapfilesize && maxpages > swapfilesize) {
- printk(KERN_WARNING
- "Swap area shorter than signature indicates\n");
- error = -EINVAL;
- goto bad_swap;
- }
if (nr_good_pages) {
p->swap_map[0] = SWAP_MAP_BAD;
p->max = maxpages;
p->pages = nr_good_pages;
- error = setup_swap_extents(p);
- if (error)
+ nr_extents = setup_swap_extents(p, &span);
+ if (nr_extents < 0) {
+ error = nr_extents;
goto bad_swap;
+ }
nr_good_pages = p->pages;
}
if (!nr_good_pages) {
goto bad_swap;
}
- down(&swapon_sem);
- swap_list_lock();
- swap_device_lock(p);
+ mutex_lock(&swapon_mutex);
+ spin_lock(&swap_lock);
p->flags = SWP_ACTIVE;
nr_swap_pages += nr_good_pages;
total_swap_pages += nr_good_pages;
- printk(KERN_INFO "Adding %dk swap on %s. Priority:%d extents:%d\n",
- nr_good_pages<<(PAGE_SHIFT-10), name,
- p->prio, p->nr_extents);
+
+ printk(KERN_INFO "Adding %uk swap on %s. "
+ "Priority:%d extents:%d across:%lluk\n",
+ nr_good_pages<<(PAGE_SHIFT-10), name, p->prio,
+ nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10));
/* insert swap space into swap_list: */
prev = -1;
} else {
swap_info[prev].next = p - swap_info;
}
- swap_device_unlock(p);
- swap_list_unlock();
- up(&swapon_sem);
+ spin_unlock(&swap_lock);
+ mutex_unlock(&swapon_mutex);
error = 0;
goto out;
bad_swap:
}
destroy_swap_extents(p);
bad_swap_2:
- swap_list_lock();
+ spin_lock(&swap_lock);
swap_map = p->swap_map;
p->swap_file = NULL;
p->swap_map = NULL;
p->flags = 0;
if (!(swap_flags & SWAP_FLAG_PREFER))
++least_priority;
- swap_list_unlock();
+ spin_unlock(&swap_lock);
vfree(swap_map);
if (swap_file)
filp_close(swap_file, NULL);
if (did_down) {
if (!error)
inode->i_flags |= S_SWAPFILE;
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
return error;
}
unsigned int i;
unsigned long nr_to_be_unused = 0;
- swap_list_lock();
+ spin_lock(&swap_lock);
for (i = 0; i < nr_swapfiles; i++) {
if (!(swap_info[i].flags & SWP_USED) ||
(swap_info[i].flags & SWP_WRITEOK))
}
val->freeswap = nr_swap_pages + nr_to_be_unused;
val->totalswap = total_swap_pages + nr_to_be_unused;
- swap_list_unlock();
+ spin_unlock(&swap_lock);
}
/*
unsigned long offset, type;
int result = 0;
+ if (is_migration_entry(entry))
+ return 1;
+
type = swp_type(entry);
if (type >= nr_swapfiles)
goto bad_file;
p = type + swap_info;
offset = swp_offset(entry);
- swap_device_lock(p);
+ spin_lock(&swap_lock);
if (offset < p->max && p->swap_map[offset]) {
if (p->swap_map[offset] < SWAP_MAP_MAX - 1) {
p->swap_map[offset]++;
result = 1;
}
}
- swap_device_unlock(p);
+ spin_unlock(&swap_lock);
out:
return result;
}
/*
- * swap_device_lock prevents swap_map being freed. Don't grab an extra
+ * swap_lock prevents swap_map being freed. Don't grab an extra
* reference on the swaphandle, it doesn't matter if it becomes unused.
*/
int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
{
- int ret = 0, i = 1 << page_cluster;
- unsigned long toff;
- struct swap_info_struct *swapdev = swp_type(entry) + swap_info;
+ struct swap_info_struct *si;
+ int our_page_cluster = page_cluster;
+ pgoff_t target, toff;
+ pgoff_t base, end;
+ int nr_pages = 0;
- if (!page_cluster) /* no readahead */
+ if (!our_page_cluster) /* no readahead */
return 0;
- toff = (swp_offset(entry) >> page_cluster) << page_cluster;
- if (!toff) /* first page is swap header */
- toff++, i--;
- *offset = toff;
- swap_device_lock(swapdev);
- do {
- /* Don't read-ahead past the end of the swap area */
- if (toff >= swapdev->max)
+ si = &swap_info[swp_type(entry)];
+ target = swp_offset(entry);
+ base = (target >> our_page_cluster) << our_page_cluster;
+ end = base + (1 << our_page_cluster);
+ if (!base) /* first page is swap header */
+ base++;
+
+ spin_lock(&swap_lock);
+ if (end > si->max) /* don't go beyond end of map */
+ end = si->max;
+
+ /* Count contiguous allocated slots above our target */
+ for (toff = target; ++toff < end; nr_pages++) {
+ /* Don't read in free or bad pages */
+ if (!si->swap_map[toff])
+ break;
+ if (si->swap_map[toff] == SWAP_MAP_BAD)
break;
+ }
+ /* Count contiguous allocated slots below our target */
+ for (toff = target; --toff >= base; nr_pages++) {
/* Don't read in free or bad pages */
- if (!swapdev->swap_map[toff])
+ if (!si->swap_map[toff])
break;
- if (swapdev->swap_map[toff] == SWAP_MAP_BAD)
+ if (si->swap_map[toff] == SWAP_MAP_BAD)
break;
- toff++;
- ret++;
- } while (--i);
- swap_device_unlock(swapdev);
- return ret;
+ }
+ spin_unlock(&swap_lock);
+
+ /*
+ * Indicate starting offset, and return number of pages to get:
+ * if only 1, say 0, since there's then no readahead to be done.
+ */
+ *offset = ++toff;
+ return nr_pages? ++nr_pages: 0;
}