4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/cpu.h>
17 #include <linux/vmstat.h>
18 #include <linux/sched.h>
20 #ifdef CONFIG_VM_EVENT_COUNTERS
21 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
22 EXPORT_PER_CPU_SYMBOL(vm_event_states);
24 static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask)
29 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
31 for_each_cpu(cpu, cpumask) {
32 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
34 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
35 ret[i] += this->event[i];
40 * Accumulate the vm event counters across all CPUs.
41 * The result is unavoidably approximate - it can change
42 * during and after execution of this function.
44 void all_vm_events(unsigned long *ret)
47 sum_vm_events(ret, cpu_online_mask);
50 EXPORT_SYMBOL_GPL(all_vm_events);
54 * Fold the foreign cpu events into our own.
56 * This is adding to the events on one processor
57 * but keeps the global counts constant.
59 void vm_events_fold_cpu(int cpu)
61 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
64 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
65 count_vm_events(i, fold_state->event[i]);
66 fold_state->event[i] = 0;
69 #endif /* CONFIG_HOTPLUG */
71 #endif /* CONFIG_VM_EVENT_COUNTERS */
74 * Manage combined zone based / global counters
76 * vm_stat contains the global counters
78 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79 EXPORT_SYMBOL(vm_stat);
83 static int calculate_threshold(struct zone *zone)
86 int mem; /* memory in 128 MB units */
89 * The threshold scales with the number of processors and the amount
90 * of memory per zone. More memory means that we can defer updates for
91 * longer, more processors could lead to more contention.
92 * fls() is used to have a cheap way of logarithmic scaling.
94 * Some sample thresholds:
96 * Threshold Processors (fls) Zonesize fls(mem+1)
97 * ------------------------------------------------------------------
114 * 125 1024 10 8-16 GB 8
115 * 125 1024 10 16-32 GB 9
118 mem = zone->present_pages >> (27 - PAGE_SHIFT);
120 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
123 * Maximum threshold is 125
125 threshold = min(125, threshold);
131 * Refresh the thresholds for each zone.
133 static void refresh_zone_stat_thresholds(void)
139 for_each_populated_zone(zone) {
140 threshold = calculate_threshold(zone);
142 for_each_online_cpu(cpu)
143 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
149 * For use when we know that interrupts are disabled.
151 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
154 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
156 s8 *p = pcp->vm_stat_diff + item;
161 if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) {
162 zone_page_state_add(x, zone, item);
167 EXPORT_SYMBOL(__mod_zone_page_state);
170 * For an unknown interrupt state
172 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
177 local_irq_save(flags);
178 __mod_zone_page_state(zone, item, delta);
179 local_irq_restore(flags);
181 EXPORT_SYMBOL(mod_zone_page_state);
184 * Optimized increment and decrement functions.
186 * These are only for a single page and therefore can take a struct page *
187 * argument instead of struct zone *. This allows the inclusion of the code
188 * generated for page_zone(page) into the optimized functions.
190 * No overflow check is necessary and therefore the differential can be
191 * incremented or decremented in place which may allow the compilers to
192 * generate better code.
193 * The increment or decrement is known and therefore one boundary check can
196 * NOTE: These functions are very performance sensitive. Change only
199 * Some processors have inc/dec instructions that are atomic vs an interrupt.
200 * However, the code must first determine the differential location in a zone
201 * based on the processor number and then inc/dec the counter. There is no
202 * guarantee without disabling preemption that the processor will not change
203 * in between and therefore the atomicity vs. interrupt cannot be exploited
204 * in a useful way here.
206 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
208 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
209 s8 *p = pcp->vm_stat_diff + item;
213 if (unlikely(*p > pcp->stat_threshold)) {
214 int overstep = pcp->stat_threshold / 2;
216 zone_page_state_add(*p + overstep, zone, item);
221 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
223 __inc_zone_state(page_zone(page), item);
225 EXPORT_SYMBOL(__inc_zone_page_state);
227 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
229 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
230 s8 *p = pcp->vm_stat_diff + item;
234 if (unlikely(*p < - pcp->stat_threshold)) {
235 int overstep = pcp->stat_threshold / 2;
237 zone_page_state_add(*p - overstep, zone, item);
242 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
244 __dec_zone_state(page_zone(page), item);
246 EXPORT_SYMBOL(__dec_zone_page_state);
248 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
252 local_irq_save(flags);
253 __inc_zone_state(zone, item);
254 local_irq_restore(flags);
257 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
262 zone = page_zone(page);
263 local_irq_save(flags);
264 __inc_zone_state(zone, item);
265 local_irq_restore(flags);
267 EXPORT_SYMBOL(inc_zone_page_state);
269 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
273 local_irq_save(flags);
274 __dec_zone_page_state(page, item);
275 local_irq_restore(flags);
277 EXPORT_SYMBOL(dec_zone_page_state);
280 * Update the zone counters for one cpu.
282 * The cpu specified must be either the current cpu or a processor that
283 * is not online. If it is the current cpu then the execution thread must
284 * be pinned to the current cpu.
286 * Note that refresh_cpu_vm_stats strives to only access
287 * node local memory. The per cpu pagesets on remote zones are placed
288 * in the memory local to the processor using that pageset. So the
289 * loop over all zones will access a series of cachelines local to
292 * The call to zone_page_state_add updates the cachelines with the
293 * statistics in the remote zone struct as well as the global cachelines
294 * with the global counters. These could cause remote node cache line
295 * bouncing and will have to be only done when necessary.
297 void refresh_cpu_vm_stats(int cpu)
301 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
303 for_each_populated_zone(zone) {
304 struct per_cpu_pageset *p;
306 p = per_cpu_ptr(zone->pageset, cpu);
308 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
309 if (p->vm_stat_diff[i]) {
313 local_irq_save(flags);
314 v = p->vm_stat_diff[i];
315 p->vm_stat_diff[i] = 0;
316 local_irq_restore(flags);
317 atomic_long_add(v, &zone->vm_stat[i]);
320 /* 3 seconds idle till flush */
327 * Deal with draining the remote pageset of this
330 * Check if there are pages remaining in this pageset
331 * if not then there is nothing to expire.
333 if (!p->expire || !p->pcp.count)
337 * We never drain zones local to this processor.
339 if (zone_to_nid(zone) == numa_node_id()) {
349 drain_zone_pages(zone, &p->pcp);
353 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
355 atomic_long_add(global_diff[i], &vm_stat[i]);
362 * zonelist = the list of zones passed to the allocator
363 * z = the zone from which the allocation occurred.
365 * Must be called with interrupts disabled.
367 void zone_statistics(struct zone *preferred_zone, struct zone *z)
369 if (z->zone_pgdat == preferred_zone->zone_pgdat) {
370 __inc_zone_state(z, NUMA_HIT);
372 __inc_zone_state(z, NUMA_MISS);
373 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
375 if (z->node == numa_node_id())
376 __inc_zone_state(z, NUMA_LOCAL);
378 __inc_zone_state(z, NUMA_OTHER);
382 #ifdef CONFIG_COMPACTION
383 struct contig_page_info {
384 unsigned long free_pages;
385 unsigned long free_blocks_total;
386 unsigned long free_blocks_suitable;
390 * Calculate the number of free pages in a zone, how many contiguous
391 * pages are free and how many are large enough to satisfy an allocation of
392 * the target size. Note that this function makes no attempt to estimate
393 * how many suitable free blocks there *might* be if MOVABLE pages were
394 * migrated. Calculating that is possible, but expensive and can be
395 * figured out from userspace
397 static void fill_contig_page_info(struct zone *zone,
398 unsigned int suitable_order,
399 struct contig_page_info *info)
403 info->free_pages = 0;
404 info->free_blocks_total = 0;
405 info->free_blocks_suitable = 0;
407 for (order = 0; order < MAX_ORDER; order++) {
408 unsigned long blocks;
410 /* Count number of free blocks */
411 blocks = zone->free_area[order].nr_free;
412 info->free_blocks_total += blocks;
414 /* Count free base pages */
415 info->free_pages += blocks << order;
417 /* Count the suitable free blocks */
418 if (order >= suitable_order)
419 info->free_blocks_suitable += blocks <<
420 (order - suitable_order);
425 #if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
426 #include <linux/proc_fs.h>
427 #include <linux/seq_file.h>
429 static char * const migratetype_names[MIGRATE_TYPES] = {
437 static void *frag_start(struct seq_file *m, loff_t *pos)
441 for (pgdat = first_online_pgdat();
443 pgdat = next_online_pgdat(pgdat))
449 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
451 pg_data_t *pgdat = (pg_data_t *)arg;
454 return next_online_pgdat(pgdat);
457 static void frag_stop(struct seq_file *m, void *arg)
461 /* Walk all the zones in a node and print using a callback */
462 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
463 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
466 struct zone *node_zones = pgdat->node_zones;
469 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
470 if (!populated_zone(zone))
473 spin_lock_irqsave(&zone->lock, flags);
474 print(m, pgdat, zone);
475 spin_unlock_irqrestore(&zone->lock, flags);
480 #ifdef CONFIG_PROC_FS
481 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
486 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
487 for (order = 0; order < MAX_ORDER; ++order)
488 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
493 * This walks the free areas for each zone.
495 static int frag_show(struct seq_file *m, void *arg)
497 pg_data_t *pgdat = (pg_data_t *)arg;
498 walk_zones_in_node(m, pgdat, frag_show_print);
502 static void pagetypeinfo_showfree_print(struct seq_file *m,
503 pg_data_t *pgdat, struct zone *zone)
507 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
508 seq_printf(m, "Node %4d, zone %8s, type %12s ",
511 migratetype_names[mtype]);
512 for (order = 0; order < MAX_ORDER; ++order) {
513 unsigned long freecount = 0;
514 struct free_area *area;
515 struct list_head *curr;
517 area = &(zone->free_area[order]);
519 list_for_each(curr, &area->free_list[mtype])
521 seq_printf(m, "%6lu ", freecount);
527 /* Print out the free pages at each order for each migatetype */
528 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
531 pg_data_t *pgdat = (pg_data_t *)arg;
534 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
535 for (order = 0; order < MAX_ORDER; ++order)
536 seq_printf(m, "%6d ", order);
539 walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
544 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
545 pg_data_t *pgdat, struct zone *zone)
549 unsigned long start_pfn = zone->zone_start_pfn;
550 unsigned long end_pfn = start_pfn + zone->spanned_pages;
551 unsigned long count[MIGRATE_TYPES] = { 0, };
553 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
559 page = pfn_to_page(pfn);
561 /* Watch for unexpected holes punched in the memmap */
562 if (!memmap_valid_within(pfn, page, zone))
565 mtype = get_pageblock_migratetype(page);
567 if (mtype < MIGRATE_TYPES)
572 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
573 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
574 seq_printf(m, "%12lu ", count[mtype]);
578 /* Print out the free pages at each order for each migratetype */
579 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
582 pg_data_t *pgdat = (pg_data_t *)arg;
584 seq_printf(m, "\n%-23s", "Number of blocks type ");
585 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
586 seq_printf(m, "%12s ", migratetype_names[mtype]);
588 walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
594 * This prints out statistics in relation to grouping pages by mobility.
595 * It is expensive to collect so do not constantly read the file.
597 static int pagetypeinfo_show(struct seq_file *m, void *arg)
599 pg_data_t *pgdat = (pg_data_t *)arg;
601 /* check memoryless node */
602 if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
605 seq_printf(m, "Page block order: %d\n", pageblock_order);
606 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
608 pagetypeinfo_showfree(m, pgdat);
609 pagetypeinfo_showblockcount(m, pgdat);
614 static const struct seq_operations fragmentation_op = {
621 static int fragmentation_open(struct inode *inode, struct file *file)
623 return seq_open(file, &fragmentation_op);
626 static const struct file_operations fragmentation_file_operations = {
627 .open = fragmentation_open,
630 .release = seq_release,
633 static const struct seq_operations pagetypeinfo_op = {
637 .show = pagetypeinfo_show,
640 static int pagetypeinfo_open(struct inode *inode, struct file *file)
642 return seq_open(file, &pagetypeinfo_op);
645 static const struct file_operations pagetypeinfo_file_ops = {
646 .open = pagetypeinfo_open,
649 .release = seq_release,
652 #ifdef CONFIG_ZONE_DMA
653 #define TEXT_FOR_DMA(xx) xx "_dma",
655 #define TEXT_FOR_DMA(xx)
658 #ifdef CONFIG_ZONE_DMA32
659 #define TEXT_FOR_DMA32(xx) xx "_dma32",
661 #define TEXT_FOR_DMA32(xx)
664 #ifdef CONFIG_HIGHMEM
665 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
667 #define TEXT_FOR_HIGHMEM(xx)
670 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
671 TEXT_FOR_HIGHMEM(xx) xx "_movable",
673 static const char * const vmstat_text[] = {
674 /* Zoned VM counters */
687 "nr_slab_reclaimable",
688 "nr_slab_unreclaimable",
689 "nr_page_table_pages",
707 #ifdef CONFIG_VM_EVENT_COUNTERS
713 TEXTS_FOR_ZONES("pgalloc")
722 TEXTS_FOR_ZONES("pgrefill")
723 TEXTS_FOR_ZONES("pgsteal")
724 TEXTS_FOR_ZONES("pgscan_kswapd")
725 TEXTS_FOR_ZONES("pgscan_direct")
728 "zone_reclaim_failed",
734 "kswapd_low_wmark_hit_quickly",
735 "kswapd_high_wmark_hit_quickly",
736 "kswapd_skip_congestion_wait",
741 #ifdef CONFIG_HUGETLB_PAGE
742 "htlb_buddy_alloc_success",
743 "htlb_buddy_alloc_fail",
745 "unevictable_pgs_culled",
746 "unevictable_pgs_scanned",
747 "unevictable_pgs_rescued",
748 "unevictable_pgs_mlocked",
749 "unevictable_pgs_munlocked",
750 "unevictable_pgs_cleared",
751 "unevictable_pgs_stranded",
752 "unevictable_pgs_mlockfreed",
756 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
760 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
769 zone_page_state(zone, NR_FREE_PAGES),
770 min_wmark_pages(zone),
771 low_wmark_pages(zone),
772 high_wmark_pages(zone),
775 zone->present_pages);
777 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
778 seq_printf(m, "\n %-12s %lu", vmstat_text[i],
779 zone_page_state(zone, i));
782 "\n protection: (%lu",
783 zone->lowmem_reserve[0]);
784 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
785 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
789 for_each_online_cpu(i) {
790 struct per_cpu_pageset *pageset;
792 pageset = per_cpu_ptr(zone->pageset, i);
803 seq_printf(m, "\n vm stats threshold: %d",
804 pageset->stat_threshold);
808 "\n all_unreclaimable: %u"
809 "\n prev_priority: %i"
811 "\n inactive_ratio: %u",
812 zone->all_unreclaimable,
814 zone->zone_start_pfn,
815 zone->inactive_ratio);
820 * Output information about zones in @pgdat.
822 static int zoneinfo_show(struct seq_file *m, void *arg)
824 pg_data_t *pgdat = (pg_data_t *)arg;
825 walk_zones_in_node(m, pgdat, zoneinfo_show_print);
829 static const struct seq_operations zoneinfo_op = {
830 .start = frag_start, /* iterate over all zones. The same as in
834 .show = zoneinfo_show,
837 static int zoneinfo_open(struct inode *inode, struct file *file)
839 return seq_open(file, &zoneinfo_op);
842 static const struct file_operations proc_zoneinfo_file_operations = {
843 .open = zoneinfo_open,
846 .release = seq_release,
849 static void *vmstat_start(struct seq_file *m, loff_t *pos)
852 #ifdef CONFIG_VM_EVENT_COUNTERS
857 if (*pos >= ARRAY_SIZE(vmstat_text))
860 #ifdef CONFIG_VM_EVENT_COUNTERS
861 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
862 + sizeof(struct vm_event_state), GFP_KERNEL);
864 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
869 return ERR_PTR(-ENOMEM);
870 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
871 v[i] = global_page_state(i);
872 #ifdef CONFIG_VM_EVENT_COUNTERS
873 e = v + NR_VM_ZONE_STAT_ITEMS;
875 e[PGPGIN] /= 2; /* sectors -> kbytes */
881 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
884 if (*pos >= ARRAY_SIZE(vmstat_text))
886 return (unsigned long *)m->private + *pos;
889 static int vmstat_show(struct seq_file *m, void *arg)
891 unsigned long *l = arg;
892 unsigned long off = l - (unsigned long *)m->private;
894 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
898 static void vmstat_stop(struct seq_file *m, void *arg)
904 static const struct seq_operations vmstat_op = {
905 .start = vmstat_start,
911 static int vmstat_open(struct inode *inode, struct file *file)
913 return seq_open(file, &vmstat_op);
916 static const struct file_operations proc_vmstat_file_operations = {
920 .release = seq_release,
922 #endif /* CONFIG_PROC_FS */
925 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
926 int sysctl_stat_interval __read_mostly = HZ;
928 static void vmstat_update(struct work_struct *w)
930 refresh_cpu_vm_stats(smp_processor_id());
931 schedule_delayed_work(&__get_cpu_var(vmstat_work),
932 round_jiffies_relative(sysctl_stat_interval));
935 static void __cpuinit start_cpu_timer(int cpu)
937 struct delayed_work *work = &per_cpu(vmstat_work, cpu);
939 INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
940 schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
944 * Use the cpu notifier to insure that the thresholds are recalculated
947 static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
948 unsigned long action,
951 long cpu = (long)hcpu;
955 case CPU_ONLINE_FROZEN:
956 start_cpu_timer(cpu);
957 node_set_state(cpu_to_node(cpu), N_CPU);
959 case CPU_DOWN_PREPARE:
960 case CPU_DOWN_PREPARE_FROZEN:
961 cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));
962 per_cpu(vmstat_work, cpu).work.func = NULL;
964 case CPU_DOWN_FAILED:
965 case CPU_DOWN_FAILED_FROZEN:
966 start_cpu_timer(cpu);
969 case CPU_DEAD_FROZEN:
970 refresh_zone_stat_thresholds();
978 static struct notifier_block __cpuinitdata vmstat_notifier =
979 { &vmstat_cpuup_callback, NULL, 0 };
982 static int __init setup_vmstat(void)
987 refresh_zone_stat_thresholds();
988 register_cpu_notifier(&vmstat_notifier);
990 for_each_online_cpu(cpu)
991 start_cpu_timer(cpu);
993 #ifdef CONFIG_PROC_FS
994 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
995 proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
996 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
997 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1001 module_init(setup_vmstat)
1003 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1004 #include <linux/debugfs.h>
1006 static struct dentry *extfrag_debug_root;
1009 * Return an index indicating how much of the available free memory is
1010 * unusable for an allocation of the requested size.
1012 static int unusable_free_index(unsigned int order,
1013 struct contig_page_info *info)
1015 /* No free memory is interpreted as all free memory is unusable */
1016 if (info->free_pages == 0)
1020 * Index should be a value between 0 and 1. Return a value to 3
1023 * 0 => no fragmentation
1024 * 1 => high fragmentation
1026 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1030 static void unusable_show_print(struct seq_file *m,
1031 pg_data_t *pgdat, struct zone *zone)
1035 struct contig_page_info info;
1037 seq_printf(m, "Node %d, zone %8s ",
1040 for (order = 0; order < MAX_ORDER; ++order) {
1041 fill_contig_page_info(zone, order, &info);
1042 index = unusable_free_index(order, &info);
1043 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1050 * Display unusable free space index
1052 * The unusable free space index measures how much of the available free
1053 * memory cannot be used to satisfy an allocation of a given size and is a
1054 * value between 0 and 1. The higher the value, the more of free memory is
1055 * unusable and by implication, the worse the external fragmentation is. This
1056 * can be expressed as a percentage by multiplying by 100.
1058 static int unusable_show(struct seq_file *m, void *arg)
1060 pg_data_t *pgdat = (pg_data_t *)arg;
1062 /* check memoryless node */
1063 if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
1066 walk_zones_in_node(m, pgdat, unusable_show_print);
1071 static const struct seq_operations unusable_op = {
1072 .start = frag_start,
1075 .show = unusable_show,
1078 static int unusable_open(struct inode *inode, struct file *file)
1080 return seq_open(file, &unusable_op);
1083 static const struct file_operations unusable_file_ops = {
1084 .open = unusable_open,
1086 .llseek = seq_lseek,
1087 .release = seq_release,
1090 static int __init extfrag_debug_init(void)
1092 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1093 if (!extfrag_debug_root)
1096 if (!debugfs_create_file("unusable_index", 0444,
1097 extfrag_debug_root, NULL, &unusable_file_ops))
1103 module_init(extfrag_debug_init);