X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fvmstat.c;h=6051fbab67ba26533594103c62790f779cd21c08;hb=4035c07a895974d0ac06a56fe870ad293fc451a7;hp=3b5e9043e7db0887612ccd9ea1ce4cd6c04869ca;hpb=e2fc88d0643ca68f2011e6db4aa31e22bd94210c;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/vmstat.c b/mm/vmstat.c index 3b5e904..6051fba 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -8,34 +8,28 @@ * Copyright (C) 2006 Silicon Graphics, Inc., * Christoph Lameter */ - +#include #include #include #include #include +#include #include #ifdef CONFIG_VM_EVENT_COUNTERS DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; EXPORT_PER_CPU_SYMBOL(vm_event_states); -static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) +static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask) { - int cpu = 0; + int cpu; int i; memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); - cpu = first_cpu(*cpumask); - while (cpu < NR_CPUS) { + for_each_cpu(cpu, cpumask) { struct vm_event_state *this = &per_cpu(vm_event_states, cpu); - cpu = next_cpu(cpu, *cpumask); - - if (cpu < NR_CPUS) - prefetch(&per_cpu(vm_event_states, cpu)); - - for (i = 0; i < NR_VM_EVENT_ITEMS; i++) ret[i] += this->event[i]; } @@ -48,7 +42,9 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) */ void all_vm_events(unsigned long *ret) { - sum_vm_events(ret, &cpu_online_map); + get_online_cpus(); + sum_vm_events(ret, cpu_online_mask); + put_online_cpus(); } EXPORT_SYMBOL_GPL(all_vm_events); @@ -139,11 +135,7 @@ static void refresh_zone_stat_thresholds(void) int cpu; int threshold; - for_each_zone(zone) { - - if (!zone->present_pages) - continue; - + for_each_populated_zone(zone) { threshold = calculate_threshold(zone); for_each_online_cpu(cpu) @@ -284,6 +276,10 @@ EXPORT_SYMBOL(dec_zone_page_state); /* * Update the zone counters for one cpu. * + * The cpu specified must be either the current cpu or a processor that + * is not online. If it is the current cpu then the execution thread must + * be pinned to the current cpu. + * * Note that refresh_cpu_vm_stats strives to only access * node local memory. The per cpu pagesets on remote zones are placed * in the memory local to the processor using that pageset. So the @@ -299,28 +295,30 @@ void refresh_cpu_vm_stats(int cpu) { struct zone *zone; int i; - unsigned long flags; + int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; - for_each_zone(zone) { + for_each_populated_zone(zone) { struct per_cpu_pageset *p; - if (!populated_zone(zone)) - continue; - p = zone_pcp(zone, cpu); for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) if (p->vm_stat_diff[i]) { + unsigned long flags; + int v; + local_irq_save(flags); - zone_page_state_add(p->vm_stat_diff[i], - zone, i); + v = p->vm_stat_diff[i]; p->vm_stat_diff[i] = 0; + local_irq_restore(flags); + atomic_long_add(v, &zone->vm_stat[i]); + global_diff[i] += v; #ifdef CONFIG_NUMA /* 3 seconds idle till flush */ p->expire = 3; #endif - local_irq_restore(flags); } + cond_resched(); #ifdef CONFIG_NUMA /* * Deal with draining the remote pageset of this @@ -329,7 +327,7 @@ void refresh_cpu_vm_stats(int cpu) * Check if there are pages remaining in this pageset * if not then there is nothing to expire. */ - if (!p->expire || (!p->pcp[0].count && !p->pcp[1].count)) + if (!p->expire || !p->pcp.count) continue; /* @@ -344,13 +342,14 @@ void refresh_cpu_vm_stats(int cpu) if (p->expire) continue; - if (p->pcp[0].count) - drain_zone_pages(zone, p->pcp + 0); - - if (p->pcp[1].count) - drain_zone_pages(zone, p->pcp + 1); + if (p->pcp.count) + drain_zone_pages(zone, &p->pcp); #endif } + + for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) + if (global_diff[i]) + atomic_long_add(global_diff[i], &vm_stat[i]); } #endif @@ -362,13 +361,13 @@ void refresh_cpu_vm_stats(int cpu) * * Must be called with interrupts disabled. */ -void zone_statistics(struct zonelist *zonelist, struct zone *z) +void zone_statistics(struct zone *preferred_zone, struct zone *z) { - if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) { + if (z->zone_pgdat == preferred_zone->zone_pgdat) { __inc_zone_state(z, NUMA_HIT); } else { __inc_zone_state(z, NUMA_MISS); - __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN); + __inc_zone_state(preferred_zone, NUMA_FOREIGN); } if (z->node == numa_node_id()) __inc_zone_state(z, NUMA_LOCAL); @@ -378,7 +377,7 @@ void zone_statistics(struct zonelist *zonelist, struct zone *z) #endif #ifdef CONFIG_PROC_FS - +#include #include static char * const migratetype_names[MIGRATE_TYPES] = { @@ -386,6 +385,7 @@ static char * const migratetype_names[MIGRATE_TYPES] = { "Reclaimable", "Movable", "Reserve", + "Isolate", }; static void *frag_start(struct seq_file *m, loff_t *pos) @@ -509,9 +509,15 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m, continue; page = pfn_to_page(pfn); + + /* Watch for unexpected holes punched in the memmap */ + if (!memmap_valid_within(pfn, page, zone)) + continue; + mtype = get_pageblock_migratetype(page); - count[mtype]++; + if (mtype < MIGRATE_TYPES) + count[mtype]++; } /* Print counts */ @@ -544,6 +550,10 @@ static int pagetypeinfo_show(struct seq_file *m, void *arg) { pg_data_t *pgdat = (pg_data_t *)arg; + /* check memoryless node */ + if (!node_state(pgdat->node_id, N_HIGH_MEMORY)) + return 0; + seq_printf(m, "Page block order: %d\n", pageblock_order); seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages); seq_putc(m, '\n'); @@ -553,20 +563,44 @@ static int pagetypeinfo_show(struct seq_file *m, void *arg) return 0; } -const struct seq_operations fragmentation_op = { +static const struct seq_operations fragmentation_op = { .start = frag_start, .next = frag_next, .stop = frag_stop, .show = frag_show, }; -const struct seq_operations pagetypeinfo_op = { +static int fragmentation_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &fragmentation_op); +} + +static const struct file_operations fragmentation_file_operations = { + .open = fragmentation_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static const struct seq_operations pagetypeinfo_op = { .start = frag_start, .next = frag_next, .stop = frag_stop, .show = pagetypeinfo_show, }; +static int pagetypeinfo_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &pagetypeinfo_op); +} + +static const struct file_operations pagetypeinfo_file_ops = { + .open = pagetypeinfo_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + #ifdef CONFIG_ZONE_DMA #define TEXT_FOR_DMA(xx) xx "_dma", #else @@ -591,8 +625,12 @@ const struct seq_operations pagetypeinfo_op = { static const char * const vmstat_text[] = { /* Zoned VM counters */ "nr_free_pages", - "nr_inactive", - "nr_active", + "nr_inactive_anon", + "nr_active_anon", + "nr_inactive_file", + "nr_active_file", + "nr_unevictable", + "nr_mlock", "nr_anon_pages", "nr_mapped", "nr_file_pages", @@ -601,10 +639,14 @@ static const char * const vmstat_text[] = { "nr_slab_reclaimable", "nr_slab_unreclaimable", "nr_page_table_pages", + "nr_kernel_stack", "nr_unstable", "nr_bounce", "nr_vmscan_write", - + "nr_writeback_temp", + "nr_isolated_anon", + "nr_isolated_file", + "nr_shmem", #ifdef CONFIG_NUMA "numa_hit", "numa_miss", @@ -634,14 +676,32 @@ static const char * const vmstat_text[] = { TEXTS_FOR_ZONES("pgscan_kswapd") TEXTS_FOR_ZONES("pgscan_direct") +#ifdef CONFIG_NUMA + "zone_reclaim_failed", +#endif "pginodesteal", "slabs_scanned", "kswapd_steal", "kswapd_inodesteal", + "kswapd_low_wmark_hit_quickly", + "kswapd_high_wmark_hit_quickly", + "kswapd_skip_congestion_wait", "pageoutrun", "allocstall", "pgrotated", +#ifdef CONFIG_HUGETLB_PAGE + "htlb_buddy_alloc_success", + "htlb_buddy_alloc_fail", +#endif + "unevictable_pgs_culled", + "unevictable_pgs_scanned", + "unevictable_pgs_rescued", + "unevictable_pgs_mlocked", + "unevictable_pgs_munlocked", + "unevictable_pgs_cleared", + "unevictable_pgs_stranded", + "unevictable_pgs_mlockfreed", #endif }; @@ -655,15 +715,14 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, "\n min %lu" "\n low %lu" "\n high %lu" - "\n scanned %lu (a: %lu i: %lu)" + "\n scanned %lu" "\n spanned %lu" "\n present %lu", zone_page_state(zone, NR_FREE_PAGES), - zone->pages_min, - zone->pages_low, - zone->pages_high, + min_wmark_pages(zone), + low_wmark_pages(zone), + high_wmark_pages(zone), zone->pages_scanned, - zone->nr_scan_active, zone->nr_scan_inactive, zone->spanned_pages, zone->present_pages); @@ -681,20 +740,17 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, "\n pagesets"); for_each_online_cpu(i) { struct per_cpu_pageset *pageset; - int j; pageset = zone_pcp(zone, i); - for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { - seq_printf(m, - "\n cpu: %i pcp: %i" - "\n count: %i" - "\n high: %i" - "\n batch: %i", - i, j, - pageset->pcp[j].count, - pageset->pcp[j].high, - pageset->pcp[j].batch); - } + seq_printf(m, + "\n cpu: %i" + "\n count: %i" + "\n high: %i" + "\n batch: %i", + i, + pageset->pcp.count, + pageset->pcp.high, + pageset->pcp.batch); #ifdef CONFIG_SMP seq_printf(m, "\n vm stats threshold: %d", pageset->stat_threshold); @@ -703,10 +759,12 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, seq_printf(m, "\n all_unreclaimable: %u" "\n prev_priority: %i" - "\n start_pfn: %lu", - zone->all_unreclaimable, + "\n start_pfn: %lu" + "\n inactive_ratio: %u", + zone_is_all_unreclaimable(zone), zone->prev_priority, - zone->zone_start_pfn); + zone->zone_start_pfn, + zone->inactive_ratio); seq_putc(m, '\n'); } @@ -720,7 +778,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg) return 0; } -const struct seq_operations zoneinfo_op = { +static const struct seq_operations zoneinfo_op = { .start = frag_start, /* iterate over all zones. The same as in * fragmentation. */ .next = frag_next, @@ -728,6 +786,18 @@ const struct seq_operations zoneinfo_op = { .show = zoneinfo_show, }; +static int zoneinfo_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &zoneinfo_op); +} + +static const struct file_operations proc_zoneinfo_file_operations = { + .open = zoneinfo_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + static void *vmstat_start(struct seq_file *m, loff_t *pos) { unsigned long *v; @@ -783,13 +853,24 @@ static void vmstat_stop(struct seq_file *m, void *arg) m->private = NULL; } -const struct seq_operations vmstat_op = { +static const struct seq_operations vmstat_op = { .start = vmstat_start, .next = vmstat_next, .stop = vmstat_stop, .show = vmstat_show, }; +static int vmstat_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &vmstat_op); +} + +static const struct file_operations proc_vmstat_file_operations = { + .open = vmstat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_SMP @@ -800,15 +881,15 @@ static void vmstat_update(struct work_struct *w) { refresh_cpu_vm_stats(smp_processor_id()); schedule_delayed_work(&__get_cpu_var(vmstat_work), - sysctl_stat_interval); + round_jiffies_relative(sysctl_stat_interval)); } -static void __devinit start_cpu_timer(int cpu) +static void __cpuinit start_cpu_timer(int cpu) { - struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); + struct delayed_work *work = &per_cpu(vmstat_work, cpu); - INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update); - schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu); + INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update); + schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu)); } /* @@ -847,9 +928,11 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, static struct notifier_block __cpuinitdata vmstat_notifier = { &vmstat_cpuup_callback, NULL, 0 }; +#endif static int __init setup_vmstat(void) { +#ifdef CONFIG_SMP int cpu; refresh_zone_stat_thresholds(); @@ -857,7 +940,13 @@ static int __init setup_vmstat(void) for_each_online_cpu(cpu) start_cpu_timer(cpu); +#endif +#ifdef CONFIG_PROC_FS + proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); + proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); + proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); + proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); +#endif return 0; } module_init(setup_vmstat) -#endif