SLUB: Avoid page struct cacheline bouncing due to remote frees to cpu slab
[safe/jmp/linux-2.6] / mm / vmstat.c
1 /*
2  *  linux/mm/vmstat.c
3  *
4  *  Manages VM statistics
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  zoned VM statistics
8  *  Copyright (C) 2006 Silicon Graphics, Inc.,
9  *              Christoph Lameter <christoph@lameter.com>
10  */
11
12 #include <linux/mm.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/cpu.h>
16 #include <linux/sched.h>
17
18 #ifdef CONFIG_VM_EVENT_COUNTERS
19 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
20 EXPORT_PER_CPU_SYMBOL(vm_event_states);
21
22 static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
23 {
24         int cpu = 0;
25         int i;
26
27         memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
28
29         cpu = first_cpu(*cpumask);
30         while (cpu < NR_CPUS) {
31                 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
32
33                 cpu = next_cpu(cpu, *cpumask);
34
35                 if (cpu < NR_CPUS)
36                         prefetch(&per_cpu(vm_event_states, cpu));
37
38
39                 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
40                         ret[i] += this->event[i];
41         }
42 }
43
44 /*
45  * Accumulate the vm event counters across all CPUs.
46  * The result is unavoidably approximate - it can change
47  * during and after execution of this function.
48 */
49 void all_vm_events(unsigned long *ret)
50 {
51         sum_vm_events(ret, &cpu_online_map);
52 }
53 EXPORT_SYMBOL_GPL(all_vm_events);
54
55 #ifdef CONFIG_HOTPLUG
56 /*
57  * Fold the foreign cpu events into our own.
58  *
59  * This is adding to the events on one processor
60  * but keeps the global counts constant.
61  */
62 void vm_events_fold_cpu(int cpu)
63 {
64         struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
65         int i;
66
67         for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
68                 count_vm_events(i, fold_state->event[i]);
69                 fold_state->event[i] = 0;
70         }
71 }
72 #endif /* CONFIG_HOTPLUG */
73
74 #endif /* CONFIG_VM_EVENT_COUNTERS */
75
76 /*
77  * Manage combined zone based / global counters
78  *
79  * vm_stat contains the global counters
80  */
81 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
82 EXPORT_SYMBOL(vm_stat);
83
84 #ifdef CONFIG_SMP
85
86 static int calculate_threshold(struct zone *zone)
87 {
88         int threshold;
89         int mem;        /* memory in 128 MB units */
90
91         /*
92          * The threshold scales with the number of processors and the amount
93          * of memory per zone. More memory means that we can defer updates for
94          * longer, more processors could lead to more contention.
95          * fls() is used to have a cheap way of logarithmic scaling.
96          *
97          * Some sample thresholds:
98          *
99          * Threshold    Processors      (fls)   Zonesize        fls(mem+1)
100          * ------------------------------------------------------------------
101          * 8            1               1       0.9-1 GB        4
102          * 16           2               2       0.9-1 GB        4
103          * 20           2               2       1-2 GB          5
104          * 24           2               2       2-4 GB          6
105          * 28           2               2       4-8 GB          7
106          * 32           2               2       8-16 GB         8
107          * 4            2               2       <128M           1
108          * 30           4               3       2-4 GB          5
109          * 48           4               3       8-16 GB         8
110          * 32           8               4       1-2 GB          4
111          * 32           8               4       0.9-1GB         4
112          * 10           16              5       <128M           1
113          * 40           16              5       900M            4
114          * 70           64              7       2-4 GB          5
115          * 84           64              7       4-8 GB          6
116          * 108          512             9       4-8 GB          6
117          * 125          1024            10      8-16 GB         8
118          * 125          1024            10      16-32 GB        9
119          */
120
121         mem = zone->present_pages >> (27 - PAGE_SHIFT);
122
123         threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
124
125         /*
126          * Maximum threshold is 125
127          */
128         threshold = min(125, threshold);
129
130         return threshold;
131 }
132
133 /*
134  * Refresh the thresholds for each zone.
135  */
136 static void refresh_zone_stat_thresholds(void)
137 {
138         struct zone *zone;
139         int cpu;
140         int threshold;
141
142         for_each_zone(zone) {
143
144                 if (!zone->present_pages)
145                         continue;
146
147                 threshold = calculate_threshold(zone);
148
149                 for_each_online_cpu(cpu)
150                         zone_pcp(zone, cpu)->stat_threshold = threshold;
151         }
152 }
153
154 /*
155  * For use when we know that interrupts are disabled.
156  */
157 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
158                                 int delta)
159 {
160         struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
161         s8 *p = pcp->vm_stat_diff + item;
162         long x;
163
164         x = delta + *p;
165
166         if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) {
167                 zone_page_state_add(x, zone, item);
168                 x = 0;
169         }
170         *p = x;
171 }
172 EXPORT_SYMBOL(__mod_zone_page_state);
173
174 /*
175  * For an unknown interrupt state
176  */
177 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
178                                         int delta)
179 {
180         unsigned long flags;
181
182         local_irq_save(flags);
183         __mod_zone_page_state(zone, item, delta);
184         local_irq_restore(flags);
185 }
186 EXPORT_SYMBOL(mod_zone_page_state);
187
188 /*
189  * Optimized increment and decrement functions.
190  *
191  * These are only for a single page and therefore can take a struct page *
192  * argument instead of struct zone *. This allows the inclusion of the code
193  * generated for page_zone(page) into the optimized functions.
194  *
195  * No overflow check is necessary and therefore the differential can be
196  * incremented or decremented in place which may allow the compilers to
197  * generate better code.
198  * The increment or decrement is known and therefore one boundary check can
199  * be omitted.
200  *
201  * NOTE: These functions are very performance sensitive. Change only
202  * with care.
203  *
204  * Some processors have inc/dec instructions that are atomic vs an interrupt.
205  * However, the code must first determine the differential location in a zone
206  * based on the processor number and then inc/dec the counter. There is no
207  * guarantee without disabling preemption that the processor will not change
208  * in between and therefore the atomicity vs. interrupt cannot be exploited
209  * in a useful way here.
210  */
211 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
212 {
213         struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
214         s8 *p = pcp->vm_stat_diff + item;
215
216         (*p)++;
217
218         if (unlikely(*p > pcp->stat_threshold)) {
219                 int overstep = pcp->stat_threshold / 2;
220
221                 zone_page_state_add(*p + overstep, zone, item);
222                 *p = -overstep;
223         }
224 }
225
226 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
227 {
228         __inc_zone_state(page_zone(page), item);
229 }
230 EXPORT_SYMBOL(__inc_zone_page_state);
231
232 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
233 {
234         struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
235         s8 *p = pcp->vm_stat_diff + item;
236
237         (*p)--;
238
239         if (unlikely(*p < - pcp->stat_threshold)) {
240                 int overstep = pcp->stat_threshold / 2;
241
242                 zone_page_state_add(*p - overstep, zone, item);
243                 *p = overstep;
244         }
245 }
246
247 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
248 {
249         __dec_zone_state(page_zone(page), item);
250 }
251 EXPORT_SYMBOL(__dec_zone_page_state);
252
253 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
254 {
255         unsigned long flags;
256
257         local_irq_save(flags);
258         __inc_zone_state(zone, item);
259         local_irq_restore(flags);
260 }
261
262 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
263 {
264         unsigned long flags;
265         struct zone *zone;
266
267         zone = page_zone(page);
268         local_irq_save(flags);
269         __inc_zone_state(zone, item);
270         local_irq_restore(flags);
271 }
272 EXPORT_SYMBOL(inc_zone_page_state);
273
274 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
275 {
276         unsigned long flags;
277
278         local_irq_save(flags);
279         __dec_zone_page_state(page, item);
280         local_irq_restore(flags);
281 }
282 EXPORT_SYMBOL(dec_zone_page_state);
283
284 /*
285  * Update the zone counters for one cpu.
286  *
287  * Note that refresh_cpu_vm_stats strives to only access
288  * node local memory. The per cpu pagesets on remote zones are placed
289  * in the memory local to the processor using that pageset. So the
290  * loop over all zones will access a series of cachelines local to
291  * the processor.
292  *
293  * The call to zone_page_state_add updates the cachelines with the
294  * statistics in the remote zone struct as well as the global cachelines
295  * with the global counters. These could cause remote node cache line
296  * bouncing and will have to be only done when necessary.
297  */
298 void refresh_cpu_vm_stats(int cpu)
299 {
300         struct zone *zone;
301         int i;
302         unsigned long flags;
303
304         for_each_zone(zone) {
305                 struct per_cpu_pageset *p;
306
307                 if (!populated_zone(zone))
308                         continue;
309
310                 p = zone_pcp(zone, cpu);
311
312                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
313                         if (p->vm_stat_diff[i]) {
314                                 local_irq_save(flags);
315                                 zone_page_state_add(p->vm_stat_diff[i],
316                                         zone, i);
317                                 p->vm_stat_diff[i] = 0;
318 #ifdef CONFIG_NUMA
319                                 /* 3 seconds idle till flush */
320                                 p->expire = 3;
321 #endif
322                                 local_irq_restore(flags);
323                         }
324 #ifdef CONFIG_NUMA
325                 /*
326                  * Deal with draining the remote pageset of this
327                  * processor
328                  *
329                  * Check if there are pages remaining in this pageset
330                  * if not then there is nothing to expire.
331                  */
332                 if (!p->expire || (!p->pcp[0].count && !p->pcp[1].count))
333                         continue;
334
335                 /*
336                  * We never drain zones local to this processor.
337                  */
338                 if (zone_to_nid(zone) == numa_node_id()) {
339                         p->expire = 0;
340                         continue;
341                 }
342
343                 p->expire--;
344                 if (p->expire)
345                         continue;
346
347                 if (p->pcp[0].count)
348                         drain_zone_pages(zone, p->pcp + 0);
349
350                 if (p->pcp[1].count)
351                         drain_zone_pages(zone, p->pcp + 1);
352 #endif
353         }
354 }
355
356 static void __refresh_cpu_vm_stats(void *dummy)
357 {
358         refresh_cpu_vm_stats(smp_processor_id());
359 }
360
361 /*
362  * Consolidate all counters.
363  *
364  * Note that the result is less inaccurate but still inaccurate
365  * if concurrent processes are allowed to run.
366  */
367 void refresh_vm_stats(void)
368 {
369         on_each_cpu(__refresh_cpu_vm_stats, NULL, 0, 1);
370 }
371 EXPORT_SYMBOL(refresh_vm_stats);
372
373 #endif
374
375 #ifdef CONFIG_NUMA
376 /*
377  * zonelist = the list of zones passed to the allocator
378  * z        = the zone from which the allocation occurred.
379  *
380  * Must be called with interrupts disabled.
381  */
382 void zone_statistics(struct zonelist *zonelist, struct zone *z)
383 {
384         if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) {
385                 __inc_zone_state(z, NUMA_HIT);
386         } else {
387                 __inc_zone_state(z, NUMA_MISS);
388                 __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN);
389         }
390         if (z->node == numa_node_id())
391                 __inc_zone_state(z, NUMA_LOCAL);
392         else
393                 __inc_zone_state(z, NUMA_OTHER);
394 }
395 #endif
396
397 #ifdef CONFIG_PROC_FS
398
399 #include <linux/seq_file.h>
400
401 static char * const migratetype_names[MIGRATE_TYPES] = {
402         "Unmovable",
403         "Reclaimable",
404         "Movable",
405         "Reserve",
406 };
407
408 static void *frag_start(struct seq_file *m, loff_t *pos)
409 {
410         pg_data_t *pgdat;
411         loff_t node = *pos;
412         for (pgdat = first_online_pgdat();
413              pgdat && node;
414              pgdat = next_online_pgdat(pgdat))
415                 --node;
416
417         return pgdat;
418 }
419
420 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
421 {
422         pg_data_t *pgdat = (pg_data_t *)arg;
423
424         (*pos)++;
425         return next_online_pgdat(pgdat);
426 }
427
428 static void frag_stop(struct seq_file *m, void *arg)
429 {
430 }
431
432 /* Walk all the zones in a node and print using a callback */
433 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
434                 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
435 {
436         struct zone *zone;
437         struct zone *node_zones = pgdat->node_zones;
438         unsigned long flags;
439
440         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
441                 if (!populated_zone(zone))
442                         continue;
443
444                 spin_lock_irqsave(&zone->lock, flags);
445                 print(m, pgdat, zone);
446                 spin_unlock_irqrestore(&zone->lock, flags);
447         }
448 }
449
450 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
451                                                 struct zone *zone)
452 {
453         int order;
454
455         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
456         for (order = 0; order < MAX_ORDER; ++order)
457                 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
458         seq_putc(m, '\n');
459 }
460
461 /*
462  * This walks the free areas for each zone.
463  */
464 static int frag_show(struct seq_file *m, void *arg)
465 {
466         pg_data_t *pgdat = (pg_data_t *)arg;
467         walk_zones_in_node(m, pgdat, frag_show_print);
468         return 0;
469 }
470
471 static void pagetypeinfo_showfree_print(struct seq_file *m,
472                                         pg_data_t *pgdat, struct zone *zone)
473 {
474         int order, mtype;
475
476         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
477                 seq_printf(m, "Node %4d, zone %8s, type %12s ",
478                                         pgdat->node_id,
479                                         zone->name,
480                                         migratetype_names[mtype]);
481                 for (order = 0; order < MAX_ORDER; ++order) {
482                         unsigned long freecount = 0;
483                         struct free_area *area;
484                         struct list_head *curr;
485
486                         area = &(zone->free_area[order]);
487
488                         list_for_each(curr, &area->free_list[mtype])
489                                 freecount++;
490                         seq_printf(m, "%6lu ", freecount);
491                 }
492                 seq_putc(m, '\n');
493         }
494 }
495
496 /* Print out the free pages at each order for each migatetype */
497 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
498 {
499         int order;
500         pg_data_t *pgdat = (pg_data_t *)arg;
501
502         /* Print header */
503         seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
504         for (order = 0; order < MAX_ORDER; ++order)
505                 seq_printf(m, "%6d ", order);
506         seq_putc(m, '\n');
507
508         walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
509
510         return 0;
511 }
512
513 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
514                                         pg_data_t *pgdat, struct zone *zone)
515 {
516         int mtype;
517         unsigned long pfn;
518         unsigned long start_pfn = zone->zone_start_pfn;
519         unsigned long end_pfn = start_pfn + zone->spanned_pages;
520         unsigned long count[MIGRATE_TYPES] = { 0, };
521
522         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
523                 struct page *page;
524
525                 if (!pfn_valid(pfn))
526                         continue;
527
528                 page = pfn_to_page(pfn);
529                 mtype = get_pageblock_migratetype(page);
530
531                 count[mtype]++;
532         }
533
534         /* Print counts */
535         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
536         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
537                 seq_printf(m, "%12lu ", count[mtype]);
538         seq_putc(m, '\n');
539 }
540
541 /* Print out the free pages at each order for each migratetype */
542 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
543 {
544         int mtype;
545         pg_data_t *pgdat = (pg_data_t *)arg;
546
547         seq_printf(m, "\n%-23s", "Number of blocks type ");
548         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
549                 seq_printf(m, "%12s ", migratetype_names[mtype]);
550         seq_putc(m, '\n');
551         walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
552
553         return 0;
554 }
555
556 /*
557  * This prints out statistics in relation to grouping pages by mobility.
558  * It is expensive to collect so do not constantly read the file.
559  */
560 static int pagetypeinfo_show(struct seq_file *m, void *arg)
561 {
562         pg_data_t *pgdat = (pg_data_t *)arg;
563
564         seq_printf(m, "Page block order: %d\n", pageblock_order);
565         seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
566         seq_putc(m, '\n');
567         pagetypeinfo_showfree(m, pgdat);
568         pagetypeinfo_showblockcount(m, pgdat);
569
570         return 0;
571 }
572
573 const struct seq_operations fragmentation_op = {
574         .start  = frag_start,
575         .next   = frag_next,
576         .stop   = frag_stop,
577         .show   = frag_show,
578 };
579
580 const struct seq_operations pagetypeinfo_op = {
581         .start  = frag_start,
582         .next   = frag_next,
583         .stop   = frag_stop,
584         .show   = pagetypeinfo_show,
585 };
586
587 #ifdef CONFIG_ZONE_DMA
588 #define TEXT_FOR_DMA(xx) xx "_dma",
589 #else
590 #define TEXT_FOR_DMA(xx)
591 #endif
592
593 #ifdef CONFIG_ZONE_DMA32
594 #define TEXT_FOR_DMA32(xx) xx "_dma32",
595 #else
596 #define TEXT_FOR_DMA32(xx)
597 #endif
598
599 #ifdef CONFIG_HIGHMEM
600 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
601 #else
602 #define TEXT_FOR_HIGHMEM(xx)
603 #endif
604
605 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
606                                         TEXT_FOR_HIGHMEM(xx) xx "_movable",
607
608 static const char * const vmstat_text[] = {
609         /* Zoned VM counters */
610         "nr_free_pages",
611         "nr_inactive",
612         "nr_active",
613         "nr_anon_pages",
614         "nr_mapped",
615         "nr_file_pages",
616         "nr_dirty",
617         "nr_writeback",
618         "nr_slab_reclaimable",
619         "nr_slab_unreclaimable",
620         "nr_page_table_pages",
621         "nr_unstable",
622         "nr_bounce",
623         "nr_vmscan_write",
624
625 #ifdef CONFIG_NUMA
626         "numa_hit",
627         "numa_miss",
628         "numa_foreign",
629         "numa_interleave",
630         "numa_local",
631         "numa_other",
632 #endif
633
634 #ifdef CONFIG_VM_EVENT_COUNTERS
635         "pgpgin",
636         "pgpgout",
637         "pswpin",
638         "pswpout",
639
640         TEXTS_FOR_ZONES("pgalloc")
641
642         "pgfree",
643         "pgactivate",
644         "pgdeactivate",
645
646         "pgfault",
647         "pgmajfault",
648
649         TEXTS_FOR_ZONES("pgrefill")
650         TEXTS_FOR_ZONES("pgsteal")
651         TEXTS_FOR_ZONES("pgscan_kswapd")
652         TEXTS_FOR_ZONES("pgscan_direct")
653
654         "pginodesteal",
655         "slabs_scanned",
656         "kswapd_steal",
657         "kswapd_inodesteal",
658         "pageoutrun",
659         "allocstall",
660
661         "pgrotated",
662 #endif
663 };
664
665 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
666                                                         struct zone *zone)
667 {
668         int i;
669         seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
670         seq_printf(m,
671                    "\n  pages free     %lu"
672                    "\n        min      %lu"
673                    "\n        low      %lu"
674                    "\n        high     %lu"
675                    "\n        scanned  %lu (a: %lu i: %lu)"
676                    "\n        spanned  %lu"
677                    "\n        present  %lu",
678                    zone_page_state(zone, NR_FREE_PAGES),
679                    zone->pages_min,
680                    zone->pages_low,
681                    zone->pages_high,
682                    zone->pages_scanned,
683                    zone->nr_scan_active, zone->nr_scan_inactive,
684                    zone->spanned_pages,
685                    zone->present_pages);
686
687         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
688                 seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
689                                 zone_page_state(zone, i));
690
691         seq_printf(m,
692                    "\n        protection: (%lu",
693                    zone->lowmem_reserve[0]);
694         for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
695                 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
696         seq_printf(m,
697                    ")"
698                    "\n  pagesets");
699         for_each_online_cpu(i) {
700                 struct per_cpu_pageset *pageset;
701                 int j;
702
703                 pageset = zone_pcp(zone, i);
704                 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
705                         seq_printf(m,
706                                    "\n    cpu: %i pcp: %i"
707                                    "\n              count: %i"
708                                    "\n              high:  %i"
709                                    "\n              batch: %i",
710                                    i, j,
711                                    pageset->pcp[j].count,
712                                    pageset->pcp[j].high,
713                                    pageset->pcp[j].batch);
714                         }
715 #ifdef CONFIG_SMP
716                 seq_printf(m, "\n  vm stats threshold: %d",
717                                 pageset->stat_threshold);
718 #endif
719         }
720         seq_printf(m,
721                    "\n  all_unreclaimable: %u"
722                    "\n  prev_priority:     %i"
723                    "\n  start_pfn:         %lu",
724                    zone->all_unreclaimable,
725                    zone->prev_priority,
726                    zone->zone_start_pfn);
727         seq_putc(m, '\n');
728 }
729
730 /*
731  * Output information about zones in @pgdat.
732  */
733 static int zoneinfo_show(struct seq_file *m, void *arg)
734 {
735         pg_data_t *pgdat = (pg_data_t *)arg;
736         walk_zones_in_node(m, pgdat, zoneinfo_show_print);
737         return 0;
738 }
739
740 const struct seq_operations zoneinfo_op = {
741         .start  = frag_start, /* iterate over all zones. The same as in
742                                * fragmentation. */
743         .next   = frag_next,
744         .stop   = frag_stop,
745         .show   = zoneinfo_show,
746 };
747
748 static void *vmstat_start(struct seq_file *m, loff_t *pos)
749 {
750         unsigned long *v;
751 #ifdef CONFIG_VM_EVENT_COUNTERS
752         unsigned long *e;
753 #endif
754         int i;
755
756         if (*pos >= ARRAY_SIZE(vmstat_text))
757                 return NULL;
758
759 #ifdef CONFIG_VM_EVENT_COUNTERS
760         v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
761                         + sizeof(struct vm_event_state), GFP_KERNEL);
762 #else
763         v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
764                         GFP_KERNEL);
765 #endif
766         m->private = v;
767         if (!v)
768                 return ERR_PTR(-ENOMEM);
769         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
770                 v[i] = global_page_state(i);
771 #ifdef CONFIG_VM_EVENT_COUNTERS
772         e = v + NR_VM_ZONE_STAT_ITEMS;
773         all_vm_events(e);
774         e[PGPGIN] /= 2;         /* sectors -> kbytes */
775         e[PGPGOUT] /= 2;
776 #endif
777         return v + *pos;
778 }
779
780 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
781 {
782         (*pos)++;
783         if (*pos >= ARRAY_SIZE(vmstat_text))
784                 return NULL;
785         return (unsigned long *)m->private + *pos;
786 }
787
788 static int vmstat_show(struct seq_file *m, void *arg)
789 {
790         unsigned long *l = arg;
791         unsigned long off = l - (unsigned long *)m->private;
792
793         seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
794         return 0;
795 }
796
797 static void vmstat_stop(struct seq_file *m, void *arg)
798 {
799         kfree(m->private);
800         m->private = NULL;
801 }
802
803 const struct seq_operations vmstat_op = {
804         .start  = vmstat_start,
805         .next   = vmstat_next,
806         .stop   = vmstat_stop,
807         .show   = vmstat_show,
808 };
809
810 #endif /* CONFIG_PROC_FS */
811
812 #ifdef CONFIG_SMP
813 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
814 int sysctl_stat_interval __read_mostly = HZ;
815
816 static void vmstat_update(struct work_struct *w)
817 {
818         refresh_cpu_vm_stats(smp_processor_id());
819         schedule_delayed_work(&__get_cpu_var(vmstat_work),
820                 sysctl_stat_interval);
821 }
822
823 static void __devinit start_cpu_timer(int cpu)
824 {
825         struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
826
827         INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update);
828         schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu);
829 }
830
831 /*
832  * Use the cpu notifier to insure that the thresholds are recalculated
833  * when necessary.
834  */
835 static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
836                 unsigned long action,
837                 void *hcpu)
838 {
839         long cpu = (long)hcpu;
840
841         switch (action) {
842         case CPU_ONLINE:
843         case CPU_ONLINE_FROZEN:
844                 start_cpu_timer(cpu);
845                 break;
846         case CPU_DOWN_PREPARE:
847         case CPU_DOWN_PREPARE_FROZEN:
848                 cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));
849                 per_cpu(vmstat_work, cpu).work.func = NULL;
850                 break;
851         case CPU_DOWN_FAILED:
852         case CPU_DOWN_FAILED_FROZEN:
853                 start_cpu_timer(cpu);
854                 break;
855         case CPU_DEAD:
856         case CPU_DEAD_FROZEN:
857                 refresh_zone_stat_thresholds();
858                 break;
859         default:
860                 break;
861         }
862         return NOTIFY_OK;
863 }
864
865 static struct notifier_block __cpuinitdata vmstat_notifier =
866         { &vmstat_cpuup_callback, NULL, 0 };
867
868 int __init setup_vmstat(void)
869 {
870         int cpu;
871
872         refresh_zone_stat_thresholds();
873         register_cpu_notifier(&vmstat_notifier);
874
875         for_each_online_cpu(cpu)
876                 start_cpu_timer(cpu);
877         return 0;
878 }
879 module_init(setup_vmstat)
880 #endif