oom: fix possible oom_dump_tasks NULL pointer
[safe/jmp/linux-2.6] / mm / vmstat.c
index 9114974..74d66db 100644 (file)
@@ -27,7 +27,7 @@ static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask)
 
        memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
 
-       for_each_cpu_mask_nr(cpu, *cpumask) {
+       for_each_cpu(cpu, cpumask) {
                struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
 
                for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
@@ -135,11 +135,7 @@ static void refresh_zone_stat_thresholds(void)
        int cpu;
        int threshold;
 
-       for_each_zone(zone) {
-
-               if (!zone->present_pages)
-                       continue;
-
+       for_each_populated_zone(zone) {
                threshold = calculate_threshold(zone);
 
                for_each_online_cpu(cpu)
@@ -301,12 +297,9 @@ void refresh_cpu_vm_stats(int cpu)
        int i;
        int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                struct per_cpu_pageset *p;
 
-               if (!populated_zone(zone))
-                       continue;
-
                p = zone_pcp(zone, cpu);
 
                for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
@@ -516,22 +509,11 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m,
                        continue;
 
                page = pfn_to_page(pfn);
-#ifdef CONFIG_ARCH_FLATMEM_HAS_HOLES
-               /*
-                * Ordinarily, memory holes in flatmem still have a valid
-                * memmap for the PFN range. However, an architecture for
-                * embedded systems (e.g. ARM) can free up the memmap backing
-                * holes to save memory on the assumption the memmap is
-                * never used. The page_zone linkages are then broken even
-                * though pfn_valid() returns true. Skip the page if the
-                * linkages are broken. Even if this test passed, the impact
-                * is that the counters for the movable type are off but
-                * fragmentation monitoring is likely meaningless on small
-                * systems.
-                */
-               if (page_zone(page) != zone)
+
+               /* Watch for unexpected holes punched in the memmap */
+               if (!memmap_valid_within(pfn, page, zone))
                        continue;
-#endif
+
                mtype = get_pageblock_migratetype(page);
 
                if (mtype < MIGRATE_TYPES)
@@ -898,7 +880,7 @@ static void vmstat_update(struct work_struct *w)
 {
        refresh_cpu_vm_stats(smp_processor_id());
        schedule_delayed_work(&__get_cpu_var(vmstat_work),
-               sysctl_stat_interval);
+               round_jiffies_relative(sysctl_stat_interval));
 }
 
 static void __cpuinit start_cpu_timer(int cpu)
@@ -906,7 +888,8 @@ static void __cpuinit start_cpu_timer(int cpu)
        struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
 
        INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update);
-       schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu);
+       schedule_delayed_work_on(cpu, vmstat_work,
+                                __round_jiffies_relative(HZ, cpu));
 }
 
 /*