mm: fix various kernel-doc comments
[safe/jmp/linux-2.6] / mm / swapfile.c
index 14bc4f2..2da149c 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/mutex.h>
 #include <linux/capability.h>
 #include <linux/syscalls.h>
+#include <linux/memcontrol.h>
 
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -511,11 +512,16 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 {
        spinlock_t *ptl;
        pte_t *pte;
-       int found = 1;
+       int ret = 1;
+
+       if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
+               ret = -ENOMEM;
 
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
-               found = 0;
+               if (ret > 0)
+                       mem_cgroup_uncharge_page(page);
+               ret = 0;
                goto out;
        }
 
@@ -532,7 +538,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
        activate_page(page);
 out:
        pte_unmap_unlock(pte, ptl);
-       return found;
+       return ret;
 }
 
 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
@@ -541,7 +547,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 {
        pte_t swp_pte = swp_entry_to_pte(entry);
        pte_t *pte;
-       int found = 0;
+       int ret = 0;
 
        /*
         * We don't actually need pte lock while scanning for swp_pte: since
@@ -560,15 +566,15 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                 */
                if (unlikely(pte_same(*pte, swp_pte))) {
                        pte_unmap(pte);
-                       found = unuse_pte(vma, pmd, addr, entry, page);
-                       if (found)
+                       ret = unuse_pte(vma, pmd, addr, entry, page);
+                       if (ret)
                                goto out;
                        pte = pte_offset_map(pmd, addr);
                }
        } while (pte++, addr += PAGE_SIZE, addr != end);
        pte_unmap(pte - 1);
 out:
-       return found;
+       return ret;
 }
 
 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
@@ -577,14 +583,16 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
 {
        pmd_t *pmd;
        unsigned long next;
+       int ret;
 
        pmd = pmd_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
                if (pmd_none_or_clear_bad(pmd))
                        continue;
-               if (unuse_pte_range(vma, pmd, addr, next, entry, page))
-                       return 1;
+               ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
+               if (ret)
+                       return ret;
        } while (pmd++, addr = next, addr != end);
        return 0;
 }
@@ -595,14 +603,16 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
 {
        pud_t *pud;
        unsigned long next;
+       int ret;
 
        pud = pud_offset(pgd, addr);
        do {
                next = pud_addr_end(addr, end);
                if (pud_none_or_clear_bad(pud))
                        continue;
-               if (unuse_pmd_range(vma, pud, addr, next, entry, page))
-                       return 1;
+               ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
+               if (ret)
+                       return ret;
        } while (pud++, addr = next, addr != end);
        return 0;
 }
@@ -612,6 +622,7 @@ static int unuse_vma(struct vm_area_struct *vma,
 {
        pgd_t *pgd;
        unsigned long addr, end, next;
+       int ret;
 
        if (page->mapping) {
                addr = page_address_in_vma(page, vma);
@@ -629,8 +640,9 @@ static int unuse_vma(struct vm_area_struct *vma,
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               if (unuse_pud_range(vma, pgd, addr, next, entry, page))
-                       return 1;
+               ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
+               if (ret)
+                       return ret;
        } while (pgd++, addr = next, addr != end);
        return 0;
 }
@@ -639,6 +651,7 @@ static int unuse_mm(struct mm_struct *mm,
                                swp_entry_t entry, struct page *page)
 {
        struct vm_area_struct *vma;
+       int ret = 0;
 
        if (!down_read_trylock(&mm->mmap_sem)) {
                /*
@@ -651,15 +664,11 @@ static int unuse_mm(struct mm_struct *mm,
                lock_page(page);
        }
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
-               if (vma->anon_vma && unuse_vma(vma, entry, page))
+               if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
                        break;
        }
        up_read(&mm->mmap_sem);
-       /*
-        * Currently unuse_mm cannot fail, but leave error handling
-        * at call sites for now, since we change it from time to time.
-        */
-       return 0;
+       return (ret < 0)? ret: 0;
 }
 
 /*
@@ -814,7 +823,7 @@ static int try_to_unuse(unsigned int type)
                        atomic_inc(&new_start_mm->mm_users);
                        atomic_inc(&prev_mm->mm_users);
                        spin_lock(&mmlist_lock);
-                       while (*swap_map > 1 && !retval &&
+                       while (*swap_map > 1 && !retval && !shmem &&
                                        (p = p->next) != &start_mm->mmlist) {
                                mm = list_entry(p, struct mm_struct, mmlist);
                                if (!atomic_inc_not_zero(&mm->mm_users))
@@ -846,6 +855,13 @@ static int try_to_unuse(unsigned int type)
                        mmput(start_mm);
                        start_mm = new_start_mm;
                }
+               if (shmem) {
+                       /* page has already been unlocked and released */
+                       if (shmem > 0)
+                               continue;
+                       retval = shmem;
+                       break;
+               }
                if (retval) {
                        unlock_page(page);
                        page_cache_release(page);
@@ -884,12 +900,6 @@ static int try_to_unuse(unsigned int type)
                 * read from disk into another page.  Splitting into two
                 * pages would be incorrect if swap supported "shared
                 * private" pages, but they are handled by tmpfs files.
-                *
-                * Note shmem_unuse already deleted a swappage from
-                * the swap cache, unless the move to filepage failed:
-                * in which case it left swappage in cache, lowered its
-                * swap count to pass quickly through the loops above,
-                * and now we must reincrement count to try again later.
                 */
                if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
                        struct writeback_control wbc = {
@@ -900,12 +910,8 @@ static int try_to_unuse(unsigned int type)
                        lock_page(page);
                        wait_on_page_writeback(page);
                }
-               if (PageSwapCache(page)) {
-                       if (shmem)
-                               swap_duplicate(entry);
-                       else
-                               delete_from_swap_cache(page);
-               }
+               if (PageSwapCache(page))
+                       delete_from_swap_cache(page);
 
                /*
                 * So we could skip searching mms once swap count went
@@ -1388,7 +1394,7 @@ static int swap_show(struct seq_file *swap, void *v)
        }
 
        file = ptr->swap_file;
-       len = seq_path(swap, file->f_path.mnt, file->f_path.dentry, " \t\n\\");
+       len = seq_path(swap, &file->f_path, " \t\n\\");
        seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
                       len < 40 ? 40 - len : 1, " ",
                       S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?