nommu: don't need get_unmapped_area() for NOMMU
[safe/jmp/linux-2.6] / mm / vmstat.c
index f45d724..6051fba 100644 (file)
 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
 EXPORT_PER_CPU_SYMBOL(vm_event_states);
 
-static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
+static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask)
 {
        int cpu;
        int i;
 
        memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
 
-       for_each_cpu_mask_nr(cpu, *cpumask) {
+       for_each_cpu(cpu, cpumask) {
                struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
 
                for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
@@ -43,7 +43,7 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
 void all_vm_events(unsigned long *ret)
 {
        get_online_cpus();
-       sum_vm_events(ret, &cpu_online_map);
+       sum_vm_events(ret, cpu_online_mask);
        put_online_cpus();
 }
 EXPORT_SYMBOL_GPL(all_vm_events);
@@ -135,11 +135,7 @@ static void refresh_zone_stat_thresholds(void)
        int cpu;
        int threshold;
 
-       for_each_zone(zone) {
-
-               if (!zone->present_pages)
-                       continue;
-
+       for_each_populated_zone(zone) {
                threshold = calculate_threshold(zone);
 
                for_each_online_cpu(cpu)
@@ -301,12 +297,9 @@ void refresh_cpu_vm_stats(int cpu)
        int i;
        int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                struct per_cpu_pageset *p;
 
-               if (!populated_zone(zone))
-                       continue;
-
                p = zone_pcp(zone, cpu);
 
                for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
@@ -516,22 +509,11 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m,
                        continue;
 
                page = pfn_to_page(pfn);
-#ifdef CONFIG_ARCH_FLATMEM_HAS_HOLES
-               /*
-                * Ordinarily, memory holes in flatmem still have a valid
-                * memmap for the PFN range. However, an architecture for
-                * embedded systems (e.g. ARM) can free up the memmap backing
-                * holes to save memory on the assumption the memmap is
-                * never used. The page_zone linkages are then broken even
-                * though pfn_valid() returns true. Skip the page if the
-                * linkages are broken. Even if this test passed, the impact
-                * is that the counters for the movable type are off but
-                * fragmentation monitoring is likely meaningless on small
-                * systems.
-                */
-               if (page_zone(page) != zone)
+
+               /* Watch for unexpected holes punched in the memmap */
+               if (!memmap_valid_within(pfn, page, zone))
                        continue;
-#endif
+
                mtype = get_pageblock_migratetype(page);
 
                if (mtype < MIGRATE_TYPES)
@@ -600,13 +582,25 @@ static const struct file_operations fragmentation_file_operations = {
        .release        = seq_release,
 };
 
-const struct seq_operations pagetypeinfo_op = {
+static const struct seq_operations pagetypeinfo_op = {
        .start  = frag_start,
        .next   = frag_next,
        .stop   = frag_stop,
        .show   = pagetypeinfo_show,
 };
 
+static int pagetypeinfo_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &pagetypeinfo_op);
+}
+
+static const struct file_operations pagetypeinfo_file_ops = {
+       .open           = pagetypeinfo_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
+
 #ifdef CONFIG_ZONE_DMA
 #define TEXT_FOR_DMA(xx) xx "_dma",
 #else
@@ -635,10 +629,8 @@ static const char * const vmstat_text[] = {
        "nr_active_anon",
        "nr_inactive_file",
        "nr_active_file",
-#ifdef CONFIG_UNEVICTABLE_LRU
        "nr_unevictable",
        "nr_mlock",
-#endif
        "nr_anon_pages",
        "nr_mapped",
        "nr_file_pages",
@@ -647,11 +639,14 @@ static const char * const vmstat_text[] = {
        "nr_slab_reclaimable",
        "nr_slab_unreclaimable",
        "nr_page_table_pages",
+       "nr_kernel_stack",
        "nr_unstable",
        "nr_bounce",
        "nr_vmscan_write",
        "nr_writeback_temp",
-
+       "nr_isolated_anon",
+       "nr_isolated_file",
+       "nr_shmem",
 #ifdef CONFIG_NUMA
        "numa_hit",
        "numa_miss",
@@ -681,10 +676,16 @@ static const char * const vmstat_text[] = {
        TEXTS_FOR_ZONES("pgscan_kswapd")
        TEXTS_FOR_ZONES("pgscan_direct")
 
+#ifdef CONFIG_NUMA
+       "zone_reclaim_failed",
+#endif
        "pginodesteal",
        "slabs_scanned",
        "kswapd_steal",
        "kswapd_inodesteal",
+       "kswapd_low_wmark_hit_quickly",
+       "kswapd_high_wmark_hit_quickly",
+       "kswapd_skip_congestion_wait",
        "pageoutrun",
        "allocstall",
 
@@ -693,7 +694,6 @@ static const char * const vmstat_text[] = {
        "htlb_buddy_alloc_success",
        "htlb_buddy_alloc_fail",
 #endif
-#ifdef CONFIG_UNEVICTABLE_LRU
        "unevictable_pgs_culled",
        "unevictable_pgs_scanned",
        "unevictable_pgs_rescued",
@@ -703,7 +703,6 @@ static const char * const vmstat_text[] = {
        "unevictable_pgs_stranded",
        "unevictable_pgs_mlockfreed",
 #endif
-#endif
 };
 
 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
@@ -716,18 +715,14 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                   "\n        min      %lu"
                   "\n        low      %lu"
                   "\n        high     %lu"
-                  "\n        scanned  %lu (aa: %lu ia: %lu af: %lu if: %lu)"
+                  "\n        scanned  %lu"
                   "\n        spanned  %lu"
                   "\n        present  %lu",
                   zone_page_state(zone, NR_FREE_PAGES),
-                  zone->pages_min,
-                  zone->pages_low,
-                  zone->pages_high,
+                  min_wmark_pages(zone),
+                  low_wmark_pages(zone),
+                  high_wmark_pages(zone),
                   zone->pages_scanned,
-                  zone->lru[LRU_ACTIVE_ANON].nr_scan,
-                  zone->lru[LRU_INACTIVE_ANON].nr_scan,
-                  zone->lru[LRU_ACTIVE_FILE].nr_scan,
-                  zone->lru[LRU_INACTIVE_FILE].nr_scan,
                   zone->spanned_pages,
                   zone->present_pages);
 
@@ -783,7 +778,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
        return 0;
 }
 
-const struct seq_operations zoneinfo_op = {
+static const struct seq_operations zoneinfo_op = {
        .start  = frag_start, /* iterate over all zones. The same as in
                               * fragmentation. */
        .next   = frag_next,
@@ -791,6 +786,18 @@ const struct seq_operations zoneinfo_op = {
        .show   = zoneinfo_show,
 };
 
+static int zoneinfo_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &zoneinfo_op);
+}
+
+static const struct file_operations proc_zoneinfo_file_operations = {
+       .open           = zoneinfo_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
+
 static void *vmstat_start(struct seq_file *m, loff_t *pos)
 {
        unsigned long *v;
@@ -846,13 +853,24 @@ static void vmstat_stop(struct seq_file *m, void *arg)
        m->private = NULL;
 }
 
-const struct seq_operations vmstat_op = {
+static const struct seq_operations vmstat_op = {
        .start  = vmstat_start,
        .next   = vmstat_next,
        .stop   = vmstat_stop,
        .show   = vmstat_show,
 };
 
+static int vmstat_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &vmstat_op);
+}
+
+static const struct file_operations proc_vmstat_file_operations = {
+       .open           = vmstat_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
 #endif /* CONFIG_PROC_FS */
 
 #ifdef CONFIG_SMP
@@ -863,15 +881,15 @@ static void vmstat_update(struct work_struct *w)
 {
        refresh_cpu_vm_stats(smp_processor_id());
        schedule_delayed_work(&__get_cpu_var(vmstat_work),
-               sysctl_stat_interval);
+               round_jiffies_relative(sysctl_stat_interval));
 }
 
 static void __cpuinit start_cpu_timer(int cpu)
 {
-       struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
+       struct delayed_work *work = &per_cpu(vmstat_work, cpu);
 
-       INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update);
-       schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu);
+       INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
+       schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
 }
 
 /*
@@ -925,6 +943,9 @@ static int __init setup_vmstat(void)
 #endif
 #ifdef CONFIG_PROC_FS
        proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
+       proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
+       proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
+       proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
 #endif
        return 0;
 }