drbd: The kernel code is now equivalent to out of tree release 8.3.7
[safe/jmp/linux-2.6] / include / linux / vmstat.h
index c8d55bc..ee03bba 100644 (file)
@@ -3,20 +3,15 @@
 
 #include <linux/types.h>
 #include <linux/percpu.h>
+#include <linux/mm.h>
 #include <linux/mmzone.h>
 #include <asm/atomic.h>
 
-#ifdef CONFIG_VM_EVENT_COUNTERS
-/*
- * Light weight per cpu counter implementation.
- *
- * Counters should only be incremented.  You need to set EMBEDDED
- * to disable VM_EVENT_COUNTERS.  Things like procps (vmstat,
- * top, etc) use /proc/vmstat and depend on these counters.
- *
- * Counters are handled completely inline. On many platforms the code
- * generated will simply be the increment of a global address.
- */
+#ifdef CONFIG_ZONE_DMA
+#define DMA_ZONE(xx) xx##_DMA,
+#else
+#define DMA_ZONE(xx)
+#endif
 
 #ifdef CONFIG_ZONE_DMA32
 #define DMA32_ZONE(xx) xx##_DMA32,
@@ -30,7 +25,8 @@
 #define HIGHMEM_ZONE(xx)
 #endif
 
-#define FOR_ALL_ZONES(xx) xx##_DMA, DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx)
+
+#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
 
 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
                FOR_ALL_ZONES(PGALLOC),
@@ -40,11 +36,40 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
                FOR_ALL_ZONES(PGSTEAL),
                FOR_ALL_ZONES(PGSCAN_KSWAPD),
                FOR_ALL_ZONES(PGSCAN_DIRECT),
+#ifdef CONFIG_NUMA
+               PGSCAN_ZONE_RECLAIM_FAILED,
+#endif
                PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
+               KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
+               KSWAPD_SKIP_CONGESTION_WAIT,
                PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+#ifdef CONFIG_HUGETLB_PAGE
+               HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
+#endif
+               UNEVICTABLE_PGCULLED,   /* culled to noreclaim list */
+               UNEVICTABLE_PGSCANNED,  /* scanned for reclaimability */
+               UNEVICTABLE_PGRESCUED,  /* rescued from noreclaim list */
+               UNEVICTABLE_PGMLOCKED,
+               UNEVICTABLE_PGMUNLOCKED,
+               UNEVICTABLE_PGCLEARED,  /* on COW, page truncate */
+               UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
+               UNEVICTABLE_MLOCKFREED,
                NR_VM_EVENT_ITEMS
 };
 
+extern int sysctl_stat_interval;
+
+#ifdef CONFIG_VM_EVENT_COUNTERS
+/*
+ * Light weight per cpu counter implementation.
+ *
+ * Counters should only be incremented and no critical kernel component
+ * should rely on the counter values.
+ *
+ * Counters are handled completely inline. On many platforms the code
+ * generated will simply be the increment of a global address.
+ */
+
 struct vm_event_state {
        unsigned long event[NR_VM_EVENT_ITEMS];
 };
@@ -53,24 +78,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
 
 static inline void __count_vm_event(enum vm_event_item item)
 {
-       __get_cpu_var(vm_event_states).event[item]++;
+       __this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
 }
 
 static inline void count_vm_event(enum vm_event_item item)
 {
-       get_cpu_var(vm_event_states).event[item]++;
-       put_cpu();
+       this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
 }
 
 static inline void __count_vm_events(enum vm_event_item item, long delta)
 {
-       __get_cpu_var(vm_event_states).event[item] += delta;
+       __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
 }
 
 static inline void count_vm_events(enum vm_event_item item, long delta)
 {
-       get_cpu_var(vm_event_states).event[item] += delta;
-       put_cpu();
+       this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
 }
 
 extern void all_vm_events(unsigned long *);
@@ -85,17 +108,30 @@ static inline void vm_events_fold_cpu(int cpu)
 #else
 
 /* Disable counters */
-#define get_cpu_vm_events(e)   0L
-#define count_vm_event(e)      do { } while (0)
-#define count_vm_events(e,d)   do { } while (0)
-#define __count_vm_event(e)    do { } while (0)
-#define __count_vm_events(e,d) do { } while (0)
-#define vm_events_fold_cpu(x)  do { } while (0)
+static inline void count_vm_event(enum vm_event_item item)
+{
+}
+static inline void count_vm_events(enum vm_event_item item, long delta)
+{
+}
+static inline void __count_vm_event(enum vm_event_item item)
+{
+}
+static inline void __count_vm_events(enum vm_event_item item, long delta)
+{
+}
+static inline void all_vm_events(unsigned long *ret)
+{
+}
+static inline void vm_events_fold_cpu(int cpu)
+{
+}
 
 #endif /* CONFIG_VM_EVENT_COUNTERS */
 
 #define __count_zone_vm_events(item, zone, delta) \
-                       __count_vm_events(item##_DMA + zone_idx(zone), delta)
+               __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
+               zone_idx(zone), delta)
 
 /*
  * Zone based page accounting with per cpu differentials.
@@ -130,6 +166,9 @@ static inline unsigned long zone_page_state(struct zone *zone,
        return x;
 }
 
+extern unsigned long global_reclaimable_pages(void);
+extern unsigned long zone_reclaimable_pages(struct zone *zone);
+
 #ifdef CONFIG_NUMA
 /*
  * Determine the per node value of a stat item. This function
@@ -142,17 +181,20 @@ static inline unsigned long node_page_state(int node,
        struct zone *zones = NODE_DATA(node)->node_zones;
 
        return
+#ifdef CONFIG_ZONE_DMA
+               zone_page_state(&zones[ZONE_DMA], item) +
+#endif
 #ifdef CONFIG_ZONE_DMA32
                zone_page_state(&zones[ZONE_DMA32], item) +
 #endif
-               zone_page_state(&zones[ZONE_NORMAL], item) +
 #ifdef CONFIG_HIGHMEM
                zone_page_state(&zones[ZONE_HIGHMEM], item) +
 #endif
-               zone_page_state(&zones[ZONE_DMA], item);
+               zone_page_state(&zones[ZONE_NORMAL], item) +
+               zone_page_state(&zones[ZONE_MOVABLE], item);
 }
 
-extern void zone_statistics(struct zonelist *, struct zone *);
+extern void zone_statistics(struct zone *, struct zone *);
 
 #else
 
@@ -161,11 +203,6 @@ extern void zone_statistics(struct zonelist *, struct zone *);
 
 #endif /* CONFIG_NUMA */
 
-#define __add_zone_page_state(__z, __i, __d)   \
-               __mod_zone_page_state(__z, __i, __d)
-#define __sub_zone_page_state(__z, __i, __d)   \
-               __mod_zone_page_state(__z, __i,-(__d))
-
 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
 
@@ -191,8 +228,6 @@ extern void dec_zone_state(struct zone *, enum zone_stat_item);
 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
 
 void refresh_cpu_vm_stats(int);
-void refresh_vm_stats(void);
-
 #else /* CONFIG_SMP */
 
 /*
@@ -226,8 +261,7 @@ static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 static inline void __dec_zone_page_state(struct page *page,
                        enum zone_stat_item item)
 {
-       atomic_long_dec(&page_zone(page)->vm_stat[item]);
-       atomic_long_dec(&vm_stat[item]);
+       __dec_zone_state(page_zone(page), item);
 }
 
 /*
@@ -239,7 +273,6 @@ static inline void __dec_zone_page_state(struct page *page,
 #define mod_zone_page_state __mod_zone_page_state
 
 static inline void refresh_cpu_vm_stats(int cpu) { }
-static inline void refresh_vm_stats(void) { }
 #endif
 
 #endif /* _LINUX_VMSTAT_H */