memcg: rework usage of stats by soft limit
[safe/jmp/linux-2.6] / mm / vmstat.c
1 /*
2  *  linux/mm/vmstat.c
3  *
4  *  Manages VM statistics
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  zoned VM statistics
8  *  Copyright (C) 2006 Silicon Graphics, Inc.,
9  *              Christoph Lameter <christoph@lameter.com>
10  */
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/cpu.h>
16 #include <linux/vmstat.h>
17 #include <linux/sched.h>
18
19 #ifdef CONFIG_VM_EVENT_COUNTERS
20 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
21 EXPORT_PER_CPU_SYMBOL(vm_event_states);
22
23 static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask)
24 {
25         int cpu;
26         int i;
27
28         memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
29
30         for_each_cpu(cpu, cpumask) {
31                 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
32
33                 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
34                         ret[i] += this->event[i];
35         }
36 }
37
38 /*
39  * Accumulate the vm event counters across all CPUs.
40  * The result is unavoidably approximate - it can change
41  * during and after execution of this function.
42 */
43 void all_vm_events(unsigned long *ret)
44 {
45         get_online_cpus();
46         sum_vm_events(ret, cpu_online_mask);
47         put_online_cpus();
48 }
49 EXPORT_SYMBOL_GPL(all_vm_events);
50
51 #ifdef CONFIG_HOTPLUG
52 /*
53  * Fold the foreign cpu events into our own.
54  *
55  * This is adding to the events on one processor
56  * but keeps the global counts constant.
57  */
58 void vm_events_fold_cpu(int cpu)
59 {
60         struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
61         int i;
62
63         for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
64                 count_vm_events(i, fold_state->event[i]);
65                 fold_state->event[i] = 0;
66         }
67 }
68 #endif /* CONFIG_HOTPLUG */
69
70 #endif /* CONFIG_VM_EVENT_COUNTERS */
71
72 /*
73  * Manage combined zone based / global counters
74  *
75  * vm_stat contains the global counters
76  */
77 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
78 EXPORT_SYMBOL(vm_stat);
79
80 #ifdef CONFIG_SMP
81
82 static int calculate_threshold(struct zone *zone)
83 {
84         int threshold;
85         int mem;        /* memory in 128 MB units */
86
87         /*
88          * The threshold scales with the number of processors and the amount
89          * of memory per zone. More memory means that we can defer updates for
90          * longer, more processors could lead to more contention.
91          * fls() is used to have a cheap way of logarithmic scaling.
92          *
93          * Some sample thresholds:
94          *
95          * Threshold    Processors      (fls)   Zonesize        fls(mem+1)
96          * ------------------------------------------------------------------
97          * 8            1               1       0.9-1 GB        4
98          * 16           2               2       0.9-1 GB        4
99          * 20           2               2       1-2 GB          5
100          * 24           2               2       2-4 GB          6
101          * 28           2               2       4-8 GB          7
102          * 32           2               2       8-16 GB         8
103          * 4            2               2       <128M           1
104          * 30           4               3       2-4 GB          5
105          * 48           4               3       8-16 GB         8
106          * 32           8               4       1-2 GB          4
107          * 32           8               4       0.9-1GB         4
108          * 10           16              5       <128M           1
109          * 40           16              5       900M            4
110          * 70           64              7       2-4 GB          5
111          * 84           64              7       4-8 GB          6
112          * 108          512             9       4-8 GB          6
113          * 125          1024            10      8-16 GB         8
114          * 125          1024            10      16-32 GB        9
115          */
116
117         mem = zone->present_pages >> (27 - PAGE_SHIFT);
118
119         threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
120
121         /*
122          * Maximum threshold is 125
123          */
124         threshold = min(125, threshold);
125
126         return threshold;
127 }
128
129 /*
130  * Refresh the thresholds for each zone.
131  */
132 static void refresh_zone_stat_thresholds(void)
133 {
134         struct zone *zone;
135         int cpu;
136         int threshold;
137
138         for_each_populated_zone(zone) {
139                 threshold = calculate_threshold(zone);
140
141                 for_each_online_cpu(cpu)
142                         per_cpu_ptr(zone->pageset, cpu)->stat_threshold
143                                                         = threshold;
144         }
145 }
146
147 /*
148  * For use when we know that interrupts are disabled.
149  */
150 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
151                                 int delta)
152 {
153         struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
154
155         s8 *p = pcp->vm_stat_diff + item;
156         long x;
157
158         x = delta + *p;
159
160         if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) {
161                 zone_page_state_add(x, zone, item);
162                 x = 0;
163         }
164         *p = x;
165 }
166 EXPORT_SYMBOL(__mod_zone_page_state);
167
168 /*
169  * For an unknown interrupt state
170  */
171 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
172                                         int delta)
173 {
174         unsigned long flags;
175
176         local_irq_save(flags);
177         __mod_zone_page_state(zone, item, delta);
178         local_irq_restore(flags);
179 }
180 EXPORT_SYMBOL(mod_zone_page_state);
181
182 /*
183  * Optimized increment and decrement functions.
184  *
185  * These are only for a single page and therefore can take a struct page *
186  * argument instead of struct zone *. This allows the inclusion of the code
187  * generated for page_zone(page) into the optimized functions.
188  *
189  * No overflow check is necessary and therefore the differential can be
190  * incremented or decremented in place which may allow the compilers to
191  * generate better code.
192  * The increment or decrement is known and therefore one boundary check can
193  * be omitted.
194  *
195  * NOTE: These functions are very performance sensitive. Change only
196  * with care.
197  *
198  * Some processors have inc/dec instructions that are atomic vs an interrupt.
199  * However, the code must first determine the differential location in a zone
200  * based on the processor number and then inc/dec the counter. There is no
201  * guarantee without disabling preemption that the processor will not change
202  * in between and therefore the atomicity vs. interrupt cannot be exploited
203  * in a useful way here.
204  */
205 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
206 {
207         struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
208         s8 *p = pcp->vm_stat_diff + item;
209
210         (*p)++;
211
212         if (unlikely(*p > pcp->stat_threshold)) {
213                 int overstep = pcp->stat_threshold / 2;
214
215                 zone_page_state_add(*p + overstep, zone, item);
216                 *p = -overstep;
217         }
218 }
219
220 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
221 {
222         __inc_zone_state(page_zone(page), item);
223 }
224 EXPORT_SYMBOL(__inc_zone_page_state);
225
226 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
227 {
228         struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
229         s8 *p = pcp->vm_stat_diff + item;
230
231         (*p)--;
232
233         if (unlikely(*p < - pcp->stat_threshold)) {
234                 int overstep = pcp->stat_threshold / 2;
235
236                 zone_page_state_add(*p - overstep, zone, item);
237                 *p = overstep;
238         }
239 }
240
241 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
242 {
243         __dec_zone_state(page_zone(page), item);
244 }
245 EXPORT_SYMBOL(__dec_zone_page_state);
246
247 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
248 {
249         unsigned long flags;
250
251         local_irq_save(flags);
252         __inc_zone_state(zone, item);
253         local_irq_restore(flags);
254 }
255
256 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
257 {
258         unsigned long flags;
259         struct zone *zone;
260
261         zone = page_zone(page);
262         local_irq_save(flags);
263         __inc_zone_state(zone, item);
264         local_irq_restore(flags);
265 }
266 EXPORT_SYMBOL(inc_zone_page_state);
267
268 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
269 {
270         unsigned long flags;
271
272         local_irq_save(flags);
273         __dec_zone_page_state(page, item);
274         local_irq_restore(flags);
275 }
276 EXPORT_SYMBOL(dec_zone_page_state);
277
278 /*
279  * Update the zone counters for one cpu.
280  *
281  * The cpu specified must be either the current cpu or a processor that
282  * is not online. If it is the current cpu then the execution thread must
283  * be pinned to the current cpu.
284  *
285  * Note that refresh_cpu_vm_stats strives to only access
286  * node local memory. The per cpu pagesets on remote zones are placed
287  * in the memory local to the processor using that pageset. So the
288  * loop over all zones will access a series of cachelines local to
289  * the processor.
290  *
291  * The call to zone_page_state_add updates the cachelines with the
292  * statistics in the remote zone struct as well as the global cachelines
293  * with the global counters. These could cause remote node cache line
294  * bouncing and will have to be only done when necessary.
295  */
296 void refresh_cpu_vm_stats(int cpu)
297 {
298         struct zone *zone;
299         int i;
300         int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
301
302         for_each_populated_zone(zone) {
303                 struct per_cpu_pageset *p;
304
305                 p = per_cpu_ptr(zone->pageset, cpu);
306
307                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
308                         if (p->vm_stat_diff[i]) {
309                                 unsigned long flags;
310                                 int v;
311
312                                 local_irq_save(flags);
313                                 v = p->vm_stat_diff[i];
314                                 p->vm_stat_diff[i] = 0;
315                                 local_irq_restore(flags);
316                                 atomic_long_add(v, &zone->vm_stat[i]);
317                                 global_diff[i] += v;
318 #ifdef CONFIG_NUMA
319                                 /* 3 seconds idle till flush */
320                                 p->expire = 3;
321 #endif
322                         }
323                 cond_resched();
324 #ifdef CONFIG_NUMA
325                 /*
326                  * Deal with draining the remote pageset of this
327                  * processor
328                  *
329                  * Check if there are pages remaining in this pageset
330                  * if not then there is nothing to expire.
331                  */
332                 if (!p->expire || !p->pcp.count)
333                         continue;
334
335                 /*
336                  * We never drain zones local to this processor.
337                  */
338                 if (zone_to_nid(zone) == numa_node_id()) {
339                         p->expire = 0;
340                         continue;
341                 }
342
343                 p->expire--;
344                 if (p->expire)
345                         continue;
346
347                 if (p->pcp.count)
348                         drain_zone_pages(zone, &p->pcp);
349 #endif
350         }
351
352         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
353                 if (global_diff[i])
354                         atomic_long_add(global_diff[i], &vm_stat[i]);
355 }
356
357 #endif
358
359 #ifdef CONFIG_NUMA
360 /*
361  * zonelist = the list of zones passed to the allocator
362  * z        = the zone from which the allocation occurred.
363  *
364  * Must be called with interrupts disabled.
365  */
366 void zone_statistics(struct zone *preferred_zone, struct zone *z)
367 {
368         if (z->zone_pgdat == preferred_zone->zone_pgdat) {
369                 __inc_zone_state(z, NUMA_HIT);
370         } else {
371                 __inc_zone_state(z, NUMA_MISS);
372                 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
373         }
374         if (z->node == numa_node_id())
375                 __inc_zone_state(z, NUMA_LOCAL);
376         else
377                 __inc_zone_state(z, NUMA_OTHER);
378 }
379 #endif
380
381 #ifdef CONFIG_PROC_FS
382 #include <linux/proc_fs.h>
383 #include <linux/seq_file.h>
384
385 static char * const migratetype_names[MIGRATE_TYPES] = {
386         "Unmovable",
387         "Reclaimable",
388         "Movable",
389         "Reserve",
390         "Isolate",
391 };
392
393 static void *frag_start(struct seq_file *m, loff_t *pos)
394 {
395         pg_data_t *pgdat;
396         loff_t node = *pos;
397         for (pgdat = first_online_pgdat();
398              pgdat && node;
399              pgdat = next_online_pgdat(pgdat))
400                 --node;
401
402         return pgdat;
403 }
404
405 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
406 {
407         pg_data_t *pgdat = (pg_data_t *)arg;
408
409         (*pos)++;
410         return next_online_pgdat(pgdat);
411 }
412
413 static void frag_stop(struct seq_file *m, void *arg)
414 {
415 }
416
417 /* Walk all the zones in a node and print using a callback */
418 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
419                 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
420 {
421         struct zone *zone;
422         struct zone *node_zones = pgdat->node_zones;
423         unsigned long flags;
424
425         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
426                 if (!populated_zone(zone))
427                         continue;
428
429                 spin_lock_irqsave(&zone->lock, flags);
430                 print(m, pgdat, zone);
431                 spin_unlock_irqrestore(&zone->lock, flags);
432         }
433 }
434
435 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
436                                                 struct zone *zone)
437 {
438         int order;
439
440         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
441         for (order = 0; order < MAX_ORDER; ++order)
442                 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
443         seq_putc(m, '\n');
444 }
445
446 /*
447  * This walks the free areas for each zone.
448  */
449 static int frag_show(struct seq_file *m, void *arg)
450 {
451         pg_data_t *pgdat = (pg_data_t *)arg;
452         walk_zones_in_node(m, pgdat, frag_show_print);
453         return 0;
454 }
455
456 static void pagetypeinfo_showfree_print(struct seq_file *m,
457                                         pg_data_t *pgdat, struct zone *zone)
458 {
459         int order, mtype;
460
461         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
462                 seq_printf(m, "Node %4d, zone %8s, type %12s ",
463                                         pgdat->node_id,
464                                         zone->name,
465                                         migratetype_names[mtype]);
466                 for (order = 0; order < MAX_ORDER; ++order) {
467                         unsigned long freecount = 0;
468                         struct free_area *area;
469                         struct list_head *curr;
470
471                         area = &(zone->free_area[order]);
472
473                         list_for_each(curr, &area->free_list[mtype])
474                                 freecount++;
475                         seq_printf(m, "%6lu ", freecount);
476                 }
477                 seq_putc(m, '\n');
478         }
479 }
480
481 /* Print out the free pages at each order for each migatetype */
482 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
483 {
484         int order;
485         pg_data_t *pgdat = (pg_data_t *)arg;
486
487         /* Print header */
488         seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
489         for (order = 0; order < MAX_ORDER; ++order)
490                 seq_printf(m, "%6d ", order);
491         seq_putc(m, '\n');
492
493         walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
494
495         return 0;
496 }
497
498 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
499                                         pg_data_t *pgdat, struct zone *zone)
500 {
501         int mtype;
502         unsigned long pfn;
503         unsigned long start_pfn = zone->zone_start_pfn;
504         unsigned long end_pfn = start_pfn + zone->spanned_pages;
505         unsigned long count[MIGRATE_TYPES] = { 0, };
506
507         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
508                 struct page *page;
509
510                 if (!pfn_valid(pfn))
511                         continue;
512
513                 page = pfn_to_page(pfn);
514
515                 /* Watch for unexpected holes punched in the memmap */
516                 if (!memmap_valid_within(pfn, page, zone))
517                         continue;
518
519                 mtype = get_pageblock_migratetype(page);
520
521                 if (mtype < MIGRATE_TYPES)
522                         count[mtype]++;
523         }
524
525         /* Print counts */
526         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
527         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
528                 seq_printf(m, "%12lu ", count[mtype]);
529         seq_putc(m, '\n');
530 }
531
532 /* Print out the free pages at each order for each migratetype */
533 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
534 {
535         int mtype;
536         pg_data_t *pgdat = (pg_data_t *)arg;
537
538         seq_printf(m, "\n%-23s", "Number of blocks type ");
539         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
540                 seq_printf(m, "%12s ", migratetype_names[mtype]);
541         seq_putc(m, '\n');
542         walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
543
544         return 0;
545 }
546
547 /*
548  * This prints out statistics in relation to grouping pages by mobility.
549  * It is expensive to collect so do not constantly read the file.
550  */
551 static int pagetypeinfo_show(struct seq_file *m, void *arg)
552 {
553         pg_data_t *pgdat = (pg_data_t *)arg;
554
555         /* check memoryless node */
556         if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
557                 return 0;
558
559         seq_printf(m, "Page block order: %d\n", pageblock_order);
560         seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
561         seq_putc(m, '\n');
562         pagetypeinfo_showfree(m, pgdat);
563         pagetypeinfo_showblockcount(m, pgdat);
564
565         return 0;
566 }
567
568 static const struct seq_operations fragmentation_op = {
569         .start  = frag_start,
570         .next   = frag_next,
571         .stop   = frag_stop,
572         .show   = frag_show,
573 };
574
575 static int fragmentation_open(struct inode *inode, struct file *file)
576 {
577         return seq_open(file, &fragmentation_op);
578 }
579
580 static const struct file_operations fragmentation_file_operations = {
581         .open           = fragmentation_open,
582         .read           = seq_read,
583         .llseek         = seq_lseek,
584         .release        = seq_release,
585 };
586
587 static const struct seq_operations pagetypeinfo_op = {
588         .start  = frag_start,
589         .next   = frag_next,
590         .stop   = frag_stop,
591         .show   = pagetypeinfo_show,
592 };
593
594 static int pagetypeinfo_open(struct inode *inode, struct file *file)
595 {
596         return seq_open(file, &pagetypeinfo_op);
597 }
598
599 static const struct file_operations pagetypeinfo_file_ops = {
600         .open           = pagetypeinfo_open,
601         .read           = seq_read,
602         .llseek         = seq_lseek,
603         .release        = seq_release,
604 };
605
606 #ifdef CONFIG_ZONE_DMA
607 #define TEXT_FOR_DMA(xx) xx "_dma",
608 #else
609 #define TEXT_FOR_DMA(xx)
610 #endif
611
612 #ifdef CONFIG_ZONE_DMA32
613 #define TEXT_FOR_DMA32(xx) xx "_dma32",
614 #else
615 #define TEXT_FOR_DMA32(xx)
616 #endif
617
618 #ifdef CONFIG_HIGHMEM
619 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
620 #else
621 #define TEXT_FOR_HIGHMEM(xx)
622 #endif
623
624 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
625                                         TEXT_FOR_HIGHMEM(xx) xx "_movable",
626
627 static const char * const vmstat_text[] = {
628         /* Zoned VM counters */
629         "nr_free_pages",
630         "nr_inactive_anon",
631         "nr_active_anon",
632         "nr_inactive_file",
633         "nr_active_file",
634         "nr_unevictable",
635         "nr_mlock",
636         "nr_anon_pages",
637         "nr_mapped",
638         "nr_file_pages",
639         "nr_dirty",
640         "nr_writeback",
641         "nr_slab_reclaimable",
642         "nr_slab_unreclaimable",
643         "nr_page_table_pages",
644         "nr_kernel_stack",
645         "nr_unstable",
646         "nr_bounce",
647         "nr_vmscan_write",
648         "nr_writeback_temp",
649         "nr_isolated_anon",
650         "nr_isolated_file",
651         "nr_shmem",
652 #ifdef CONFIG_NUMA
653         "numa_hit",
654         "numa_miss",
655         "numa_foreign",
656         "numa_interleave",
657         "numa_local",
658         "numa_other",
659 #endif
660
661 #ifdef CONFIG_VM_EVENT_COUNTERS
662         "pgpgin",
663         "pgpgout",
664         "pswpin",
665         "pswpout",
666
667         TEXTS_FOR_ZONES("pgalloc")
668
669         "pgfree",
670         "pgactivate",
671         "pgdeactivate",
672
673         "pgfault",
674         "pgmajfault",
675
676         TEXTS_FOR_ZONES("pgrefill")
677         TEXTS_FOR_ZONES("pgsteal")
678         TEXTS_FOR_ZONES("pgscan_kswapd")
679         TEXTS_FOR_ZONES("pgscan_direct")
680
681 #ifdef CONFIG_NUMA
682         "zone_reclaim_failed",
683 #endif
684         "pginodesteal",
685         "slabs_scanned",
686         "kswapd_steal",
687         "kswapd_inodesteal",
688         "kswapd_low_wmark_hit_quickly",
689         "kswapd_high_wmark_hit_quickly",
690         "kswapd_skip_congestion_wait",
691         "pageoutrun",
692         "allocstall",
693
694         "pgrotated",
695 #ifdef CONFIG_HUGETLB_PAGE
696         "htlb_buddy_alloc_success",
697         "htlb_buddy_alloc_fail",
698 #endif
699         "unevictable_pgs_culled",
700         "unevictable_pgs_scanned",
701         "unevictable_pgs_rescued",
702         "unevictable_pgs_mlocked",
703         "unevictable_pgs_munlocked",
704         "unevictable_pgs_cleared",
705         "unevictable_pgs_stranded",
706         "unevictable_pgs_mlockfreed",
707 #endif
708 };
709
710 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
711                                                         struct zone *zone)
712 {
713         int i;
714         seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
715         seq_printf(m,
716                    "\n  pages free     %lu"
717                    "\n        min      %lu"
718                    "\n        low      %lu"
719                    "\n        high     %lu"
720                    "\n        scanned  %lu"
721                    "\n        spanned  %lu"
722                    "\n        present  %lu",
723                    zone_page_state(zone, NR_FREE_PAGES),
724                    min_wmark_pages(zone),
725                    low_wmark_pages(zone),
726                    high_wmark_pages(zone),
727                    zone->pages_scanned,
728                    zone->spanned_pages,
729                    zone->present_pages);
730
731         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
732                 seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
733                                 zone_page_state(zone, i));
734
735         seq_printf(m,
736                    "\n        protection: (%lu",
737                    zone->lowmem_reserve[0]);
738         for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
739                 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
740         seq_printf(m,
741                    ")"
742                    "\n  pagesets");
743         for_each_online_cpu(i) {
744                 struct per_cpu_pageset *pageset;
745
746                 pageset = per_cpu_ptr(zone->pageset, i);
747                 seq_printf(m,
748                            "\n    cpu: %i"
749                            "\n              count: %i"
750                            "\n              high:  %i"
751                            "\n              batch: %i",
752                            i,
753                            pageset->pcp.count,
754                            pageset->pcp.high,
755                            pageset->pcp.batch);
756 #ifdef CONFIG_SMP
757                 seq_printf(m, "\n  vm stats threshold: %d",
758                                 pageset->stat_threshold);
759 #endif
760         }
761         seq_printf(m,
762                    "\n  all_unreclaimable: %u"
763                    "\n  prev_priority:     %i"
764                    "\n  start_pfn:         %lu"
765                    "\n  inactive_ratio:    %u",
766                    zone->all_unreclaimable,
767                    zone->prev_priority,
768                    zone->zone_start_pfn,
769                    zone->inactive_ratio);
770         seq_putc(m, '\n');
771 }
772
773 /*
774  * Output information about zones in @pgdat.
775  */
776 static int zoneinfo_show(struct seq_file *m, void *arg)
777 {
778         pg_data_t *pgdat = (pg_data_t *)arg;
779         walk_zones_in_node(m, pgdat, zoneinfo_show_print);
780         return 0;
781 }
782
783 static const struct seq_operations zoneinfo_op = {
784         .start  = frag_start, /* iterate over all zones. The same as in
785                                * fragmentation. */
786         .next   = frag_next,
787         .stop   = frag_stop,
788         .show   = zoneinfo_show,
789 };
790
791 static int zoneinfo_open(struct inode *inode, struct file *file)
792 {
793         return seq_open(file, &zoneinfo_op);
794 }
795
796 static const struct file_operations proc_zoneinfo_file_operations = {
797         .open           = zoneinfo_open,
798         .read           = seq_read,
799         .llseek         = seq_lseek,
800         .release        = seq_release,
801 };
802
803 static void *vmstat_start(struct seq_file *m, loff_t *pos)
804 {
805         unsigned long *v;
806 #ifdef CONFIG_VM_EVENT_COUNTERS
807         unsigned long *e;
808 #endif
809         int i;
810
811         if (*pos >= ARRAY_SIZE(vmstat_text))
812                 return NULL;
813
814 #ifdef CONFIG_VM_EVENT_COUNTERS
815         v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
816                         + sizeof(struct vm_event_state), GFP_KERNEL);
817 #else
818         v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
819                         GFP_KERNEL);
820 #endif
821         m->private = v;
822         if (!v)
823                 return ERR_PTR(-ENOMEM);
824         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
825                 v[i] = global_page_state(i);
826 #ifdef CONFIG_VM_EVENT_COUNTERS
827         e = v + NR_VM_ZONE_STAT_ITEMS;
828         all_vm_events(e);
829         e[PGPGIN] /= 2;         /* sectors -> kbytes */
830         e[PGPGOUT] /= 2;
831 #endif
832         return v + *pos;
833 }
834
835 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
836 {
837         (*pos)++;
838         if (*pos >= ARRAY_SIZE(vmstat_text))
839                 return NULL;
840         return (unsigned long *)m->private + *pos;
841 }
842
843 static int vmstat_show(struct seq_file *m, void *arg)
844 {
845         unsigned long *l = arg;
846         unsigned long off = l - (unsigned long *)m->private;
847
848         seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
849         return 0;
850 }
851
852 static void vmstat_stop(struct seq_file *m, void *arg)
853 {
854         kfree(m->private);
855         m->private = NULL;
856 }
857
858 static const struct seq_operations vmstat_op = {
859         .start  = vmstat_start,
860         .next   = vmstat_next,
861         .stop   = vmstat_stop,
862         .show   = vmstat_show,
863 };
864
865 static int vmstat_open(struct inode *inode, struct file *file)
866 {
867         return seq_open(file, &vmstat_op);
868 }
869
870 static const struct file_operations proc_vmstat_file_operations = {
871         .open           = vmstat_open,
872         .read           = seq_read,
873         .llseek         = seq_lseek,
874         .release        = seq_release,
875 };
876 #endif /* CONFIG_PROC_FS */
877
878 #ifdef CONFIG_SMP
879 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
880 int sysctl_stat_interval __read_mostly = HZ;
881
882 static void vmstat_update(struct work_struct *w)
883 {
884         refresh_cpu_vm_stats(smp_processor_id());
885         schedule_delayed_work(&__get_cpu_var(vmstat_work),
886                 round_jiffies_relative(sysctl_stat_interval));
887 }
888
889 static void __cpuinit start_cpu_timer(int cpu)
890 {
891         struct delayed_work *work = &per_cpu(vmstat_work, cpu);
892
893         INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
894         schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
895 }
896
897 /*
898  * Use the cpu notifier to insure that the thresholds are recalculated
899  * when necessary.
900  */
901 static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
902                 unsigned long action,
903                 void *hcpu)
904 {
905         long cpu = (long)hcpu;
906
907         switch (action) {
908         case CPU_ONLINE:
909         case CPU_ONLINE_FROZEN:
910                 start_cpu_timer(cpu);
911                 node_set_state(cpu_to_node(cpu), N_CPU);
912                 break;
913         case CPU_DOWN_PREPARE:
914         case CPU_DOWN_PREPARE_FROZEN:
915                 cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));
916                 per_cpu(vmstat_work, cpu).work.func = NULL;
917                 break;
918         case CPU_DOWN_FAILED:
919         case CPU_DOWN_FAILED_FROZEN:
920                 start_cpu_timer(cpu);
921                 break;
922         case CPU_DEAD:
923         case CPU_DEAD_FROZEN:
924                 refresh_zone_stat_thresholds();
925                 break;
926         default:
927                 break;
928         }
929         return NOTIFY_OK;
930 }
931
932 static struct notifier_block __cpuinitdata vmstat_notifier =
933         { &vmstat_cpuup_callback, NULL, 0 };
934 #endif
935
936 static int __init setup_vmstat(void)
937 {
938 #ifdef CONFIG_SMP
939         int cpu;
940
941         refresh_zone_stat_thresholds();
942         register_cpu_notifier(&vmstat_notifier);
943
944         for_each_online_cpu(cpu)
945                 start_cpu_timer(cpu);
946 #endif
947 #ifdef CONFIG_PROC_FS
948         proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
949         proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
950         proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
951         proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
952 #endif
953         return 0;
954 }
955 module_init(setup_vmstat)