WorkStruct: make allyesconfig
[safe/jmp/linux-2.6] / mm / swap.c
index b895128..d9a3770 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
 /* How many pages do we try to swap or page in/out together? */
 int page_cluster;
 
-#ifdef CONFIG_HUGETLB_PAGE
+/*
+ * This path almost never happens for VM activity - pages are normally
+ * freed via pagevecs.  But it gets used by networking.
+ */
+static void fastcall __page_cache_release(struct page *page)
+{
+       if (PageLRU(page)) {
+               unsigned long flags;
+               struct zone *zone = page_zone(page);
 
-void put_page(struct page *page)
+               spin_lock_irqsave(&zone->lru_lock, flags);
+               VM_BUG_ON(!PageLRU(page));
+               __ClearPageLRU(page);
+               del_page_from_lru(zone, page);
+               spin_unlock_irqrestore(&zone->lru_lock, flags);
+       }
+       free_hot_page(page);
+}
+
+static void put_compound_page(struct page *page)
 {
-       if (unlikely(PageCompound(page))) {
-               page = (struct page *)page_private(page);
-               if (put_page_testzero(page)) {
-                       void (*dtor)(struct page *page);
+       page = (struct page *)page_private(page);
+       if (put_page_testzero(page)) {
+               void (*dtor)(struct page *page);
 
-                       dtor = (void (*)(struct page *))page[1].mapping;
-                       (*dtor)(page);
-               }
-               return;
+               dtor = (void (*)(struct page *))page[1].lru.next;
+               (*dtor)(page);
        }
-       if (put_page_testzero(page))
+}
+
+void put_page(struct page *page)
+{
+       if (unlikely(PageCompound(page)))
+               put_compound_page(page);
+       else if (put_page_testzero(page))
                __page_cache_release(page);
 }
 EXPORT_SYMBOL(put_page);
-#endif
+
+/**
+ * put_pages_list(): release a list of pages
+ *
+ * Release a list of pages which are strung together on page.lru.  Currently
+ * used by read_cache_pages() and related error recovery code.
+ *
+ * @pages: list of pages threaded on page->lru
+ */
+void put_pages_list(struct list_head *pages)
+{
+       while (!list_empty(pages)) {
+               struct page *victim;
+
+               victim = list_entry(pages->prev, struct page, lru);
+               list_del(&victim->lru);
+               page_cache_release(victim);
+       }
+}
+EXPORT_SYMBOL(put_pages_list);
 
 /*
  * Writeback is about to end against a page which has been marked for immediate
@@ -86,9 +125,8 @@ int rotate_reclaimable_page(struct page *page)
        zone = page_zone(page);
        spin_lock_irqsave(&zone->lru_lock, flags);
        if (PageLRU(page) && !PageActive(page)) {
-               list_del(&page->lru);
-               list_add_tail(&page->lru, &zone->inactive_list);
-               inc_page_state(pgrotated);
+               list_move_tail(&page->lru, &zone->inactive_list);
+               __count_vm_event(PGROTATED);
        }
        if (!test_clear_page_writeback(page))
                BUG();
@@ -108,7 +146,7 @@ void fastcall activate_page(struct page *page)
                del_page_from_inactive_list(zone, page);
                SetPageActive(page);
                add_page_to_active_list(zone, page);
-               inc_page_state(pgactivate);
+               __count_vm_event(PGACTIVATE);
        }
        spin_unlock_irq(&zone->lru_lock);
 }
@@ -159,38 +197,49 @@ void fastcall lru_cache_add_active(struct page *page)
        put_cpu_var(lru_add_active_pvecs);
 }
 
-void lru_add_drain(void)
+static void __lru_add_drain(int cpu)
 {
-       struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
+       struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu);
 
+       /* CPU is dead, so no locking needed. */
        if (pagevec_count(pvec))
                __pagevec_lru_add(pvec);
-       pvec = &__get_cpu_var(lru_add_active_pvecs);
+       pvec = &per_cpu(lru_add_active_pvecs, cpu);
        if (pagevec_count(pvec))
                __pagevec_lru_add_active(pvec);
-       put_cpu_var(lru_add_pvecs);
+}
+
+void lru_add_drain(void)
+{
+       __lru_add_drain(get_cpu());
+       put_cpu();
+}
+
+#ifdef CONFIG_NUMA
+static void lru_add_drain_per_cpu(struct work_struct *dummy)
+{
+       lru_add_drain();
 }
 
 /*
- * This path almost never happens for VM activity - pages are normally
- * freed via pagevecs.  But it gets used by networking.
+ * Returns 0 for success
  */
-void fastcall __page_cache_release(struct page *page)
+int lru_add_drain_all(void)
 {
-       unsigned long flags;
-       struct zone *zone = page_zone(page);
-
-       spin_lock_irqsave(&zone->lru_lock, flags);
-       if (TestClearPageLRU(page))
-               del_page_from_lru(zone, page);
-       if (page_count(page) != 0)
-               page = NULL;
-       spin_unlock_irqrestore(&zone->lru_lock, flags);
-       if (page)
-               free_hot_page(page);
+       return schedule_on_each_cpu(lru_add_drain_per_cpu);
 }
 
-EXPORT_SYMBOL(__page_cache_release);
+#else
+
+/*
+ * Returns 0 for success
+ */
+int lru_add_drain_all(void)
+{
+       lru_add_drain();
+       return 0;
+}
+#endif
 
 /*
  * Batched page_cache_release().  Decrement the reference count on all the
@@ -213,28 +262,40 @@ void release_pages(struct page **pages, int nr, int cold)
        pagevec_init(&pages_to_free, cold);
        for (i = 0; i < nr; i++) {
                struct page *page = pages[i];
-               struct zone *pagezone;
+
+               if (unlikely(PageCompound(page))) {
+                       if (zone) {
+                               spin_unlock_irq(&zone->lru_lock);
+                               zone = NULL;
+                       }
+                       put_compound_page(page);
+                       continue;
+               }
 
                if (!put_page_testzero(page))
                        continue;
 
-               pagezone = page_zone(page);
-               if (pagezone != zone) {
-                       if (zone)
-                               spin_unlock_irq(&zone->lru_lock);
-                       zone = pagezone;
-                       spin_lock_irq(&zone->lru_lock);
-               }
-               if (TestClearPageLRU(page))
+               if (PageLRU(page)) {
+                       struct zone *pagezone = page_zone(page);
+                       if (pagezone != zone) {
+                               if (zone)
+                                       spin_unlock_irq(&zone->lru_lock);
+                               zone = pagezone;
+                               spin_lock_irq(&zone->lru_lock);
+                       }
+                       VM_BUG_ON(!PageLRU(page));
+                       __ClearPageLRU(page);
                        del_page_from_lru(zone, page);
-               if (page_count(page) == 0) {
-                       if (!pagevec_add(&pages_to_free, page)) {
+               }
+
+               if (!pagevec_add(&pages_to_free, page)) {
+                       if (zone) {
                                spin_unlock_irq(&zone->lru_lock);
-                               __pagevec_free(&pages_to_free);
-                               pagevec_reinit(&pages_to_free);
-                               zone = NULL;    /* No lock is held */
+                               zone = NULL;
                        }
-               }
+                       __pagevec_free(&pages_to_free);
+                       pagevec_reinit(&pages_to_free);
+               }
        }
        if (zone)
                spin_unlock_irq(&zone->lru_lock);
@@ -259,6 +320,8 @@ void __pagevec_release(struct pagevec *pvec)
        pagevec_reinit(pvec);
 }
 
+EXPORT_SYMBOL(__pagevec_release);
+
 /*
  * pagevec_release() for pages which are known to not be on the LRU
  *
@@ -270,11 +333,10 @@ void __pagevec_release_nonlru(struct pagevec *pvec)
        struct pagevec pages_to_free;
 
        pagevec_init(&pages_to_free, pvec->cold);
-       pages_to_free.cold = pvec->cold;
        for (i = 0; i < pagevec_count(pvec); i++) {
                struct page *page = pvec->pages[i];
 
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
                if (put_page_testzero(page))
                        pagevec_add(&pages_to_free, page);
        }
@@ -301,8 +363,8 @@ void __pagevec_lru_add(struct pagevec *pvec)
                        zone = pagezone;
                        spin_lock_irq(&zone->lru_lock);
                }
-               if (TestSetPageLRU(page))
-                       BUG();
+               VM_BUG_ON(PageLRU(page));
+               SetPageLRU(page);
                add_page_to_inactive_list(zone, page);
        }
        if (zone)
@@ -328,10 +390,10 @@ void __pagevec_lru_add_active(struct pagevec *pvec)
                        zone = pagezone;
                        spin_lock_irq(&zone->lru_lock);
                }
-               if (TestSetPageLRU(page))
-                       BUG();
-               if (TestSetPageActive(page))
-                       BUG();
+               VM_BUG_ON(PageLRU(page));
+               SetPageLRU(page);
+               VM_BUG_ON(PageActive(page));
+               SetPageActive(page);
                add_page_to_active_list(zone, page);
        }
        if (zone)
@@ -351,7 +413,8 @@ void pagevec_strip(struct pagevec *pvec)
                struct page *page = pvec->pages[i];
 
                if (PagePrivate(page) && !TestSetPageLocked(page)) {
-                       try_to_release_page(page, 0);
+                       if (PagePrivate(page))
+                               try_to_release_page(page, 0);
                        unlock_page(page);
                }
        }
@@ -380,6 +443,8 @@ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
        return pagevec_count(pvec);
 }
 
+EXPORT_SYMBOL(pagevec_lookup);
+
 unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
                pgoff_t *index, int tag, unsigned nr_pages)
 {
@@ -388,6 +453,7 @@ unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
        return pagevec_count(pvec);
 }
 
+EXPORT_SYMBOL(pagevec_lookup_tag);
 
 #ifdef CONFIG_SMP
 /*
@@ -411,20 +477,8 @@ void vm_acct_memory(long pages)
        }
        preempt_enable();
 }
-EXPORT_SYMBOL(vm_acct_memory);
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void lru_drain_cache(unsigned int cpu)
-{
-       struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu);
-
-       /* CPU is dead, so no locking needed. */
-       if (pagevec_count(pvec))
-               __pagevec_lru_add(pvec);
-       pvec = &per_cpu(lru_add_active_pvecs, cpu);
-       if (pagevec_count(pvec))
-               __pagevec_lru_add_active(pvec);
-}
 
 /* Drop the CPU's cached committed space back into the central pool. */
 static int cpu_swap_callback(struct notifier_block *nfb,
@@ -437,34 +491,13 @@ static int cpu_swap_callback(struct notifier_block *nfb,
        if (action == CPU_DEAD) {
                atomic_add(*committed, &vm_committed_space);
                *committed = 0;
-               lru_drain_cache((long)hcpu);
+               __lru_add_drain((long)hcpu);
        }
        return NOTIFY_OK;
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 #endif /* CONFIG_SMP */
 
-#ifdef CONFIG_SMP
-void percpu_counter_mod(struct percpu_counter *fbc, long amount)
-{
-       long count;
-       long *pcount;
-       int cpu = get_cpu();
-
-       pcount = per_cpu_ptr(fbc->counters, cpu);
-       count = *pcount + amount;
-       if (count >= FBC_BATCH || count <= -FBC_BATCH) {
-               spin_lock(&fbc->lock);
-               fbc->count += count;
-               spin_unlock(&fbc->lock);
-               count = 0;
-       }
-       *pcount = count;
-       put_cpu();
-}
-EXPORT_SYMBOL(percpu_counter_mod);
-#endif
-
 /*
  * Perform any setup for the swap system
  */