Merge branch 'linus' into cont_syslog
[safe/jmp/linux-2.6] / include / linux / vmstat.h
index 60c2e03..7f43ccd 100644 (file)
 
 #include <linux/types.h>
 #include <linux/percpu.h>
-#include <linux/config.h>
+#include <linux/mm.h>
 #include <linux/mmzone.h>
 #include <asm/atomic.h>
 
+#ifdef CONFIG_ZONE_DMA
+#define DMA_ZONE(xx) xx##_DMA,
+#else
+#define DMA_ZONE(xx)
+#endif
+
+#ifdef CONFIG_ZONE_DMA32
+#define DMA32_ZONE(xx) xx##_DMA32,
+#else
+#define DMA32_ZONE(xx)
+#endif
+
+#ifdef CONFIG_HIGHMEM
+#define HIGHMEM_ZONE(xx) , xx##_HIGH
+#else
+#define HIGHMEM_ZONE(xx)
+#endif
+
+
+#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
+
+enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
+               FOR_ALL_ZONES(PGALLOC),
+               PGFREE, PGACTIVATE, PGDEACTIVATE,
+               PGFAULT, PGMAJFAULT,
+               FOR_ALL_ZONES(PGREFILL),
+               FOR_ALL_ZONES(PGSTEAL),
+               FOR_ALL_ZONES(PGSCAN_KSWAPD),
+               FOR_ALL_ZONES(PGSCAN_DIRECT),
+#ifdef CONFIG_NUMA
+               PGSCAN_ZONE_RECLAIM_FAILED,
+#endif
+               PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
+               KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
+               KSWAPD_SKIP_CONGESTION_WAIT,
+               PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+#ifdef CONFIG_COMPACTION
+               COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
+               COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
+#endif
+#ifdef CONFIG_HUGETLB_PAGE
+               HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
+#endif
+               UNEVICTABLE_PGCULLED,   /* culled to noreclaim list */
+               UNEVICTABLE_PGSCANNED,  /* scanned for reclaimability */
+               UNEVICTABLE_PGRESCUED,  /* rescued from noreclaim list */
+               UNEVICTABLE_PGMLOCKED,
+               UNEVICTABLE_PGMUNLOCKED,
+               UNEVICTABLE_PGCLEARED,  /* on COW, page truncate */
+               UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
+               UNEVICTABLE_MLOCKFREED,
+               NR_VM_EVENT_ITEMS
+};
+
+extern int sysctl_stat_interval;
+
+#ifdef CONFIG_VM_EVENT_COUNTERS
 /*
- * Global page accounting.  One instance per CPU.  Only unsigned longs are
- * allowed.
+ * Light weight per cpu counter implementation.
+ *
+ * Counters should only be incremented and no critical kernel component
+ * should rely on the counter values.
  *
- * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
- * any time safely (which protects the instance from modification by
- * interrupt.
- * - The __xxx_page_state variants can be used safely when interrupts are
- * disabled.
- * - The __xxx_page_state variants can be used if the field is only
- * modified from process context and protected from preemption, or only
- * modified from interrupt context.  In this case, the field should be
- * commented here.
+ * Counters are handled completely inline. On many platforms the code
+ * generated will simply be the increment of a global address.
  */
-struct page_state {
-       unsigned long nr_unstable;      /* NFS unstable pages */
-#define GET_PAGE_STATE_LAST nr_unstable
-
-       /*
-        * The below are zeroed by get_page_state().  Use get_full_page_state()
-        * to add up all these.
-        */
-       unsigned long pgpgin;           /* Disk reads */
-       unsigned long pgpgout;          /* Disk writes */
-       unsigned long pswpin;           /* swap reads */
-       unsigned long pswpout;          /* swap writes */
-
-       unsigned long pgalloc_high;     /* page allocations */
-       unsigned long pgalloc_normal;
-       unsigned long pgalloc_dma32;
-       unsigned long pgalloc_dma;
-
-       unsigned long pgfree;           /* page freeings */
-       unsigned long pgactivate;       /* pages moved inactive->active */
-       unsigned long pgdeactivate;     /* pages moved active->inactive */
-
-       unsigned long pgfault;          /* faults (major+minor) */
-       unsigned long pgmajfault;       /* faults (major only) */
-
-       unsigned long pgrefill_high;    /* inspected in refill_inactive_zone */
-       unsigned long pgrefill_normal;
-       unsigned long pgrefill_dma32;
-       unsigned long pgrefill_dma;
-
-       unsigned long pgsteal_high;     /* total highmem pages reclaimed */
-       unsigned long pgsteal_normal;
-       unsigned long pgsteal_dma32;
-       unsigned long pgsteal_dma;
-
-       unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
-       unsigned long pgscan_kswapd_normal;
-       unsigned long pgscan_kswapd_dma32;
-       unsigned long pgscan_kswapd_dma;
-
-       unsigned long pgscan_direct_high;/* total highmem pages scanned */
-       unsigned long pgscan_direct_normal;
-       unsigned long pgscan_direct_dma32;
-       unsigned long pgscan_direct_dma;
-
-       unsigned long pginodesteal;     /* pages reclaimed via inode freeing */
-       unsigned long slabs_scanned;    /* slab objects scanned */
-       unsigned long kswapd_steal;     /* pages reclaimed by kswapd */
-       unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
-       unsigned long pageoutrun;       /* kswapd's calls to page reclaim */
-       unsigned long allocstall;       /* direct reclaim calls */
-
-       unsigned long pgrotated;        /* pages rotated to tail of the LRU */
-       unsigned long nr_bounce;        /* pages for bounce buffers */
+
+struct vm_event_state {
+       unsigned long event[NR_VM_EVENT_ITEMS];
 };
 
-extern void get_page_state(struct page_state *ret);
-extern void get_page_state_node(struct page_state *ret, int node);
-extern void get_full_page_state(struct page_state *ret);
-extern unsigned long read_page_state_offset(unsigned long offset);
-extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
-extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
-
-#define read_page_state(member) \
-       read_page_state_offset(offsetof(struct page_state, member))
-
-#define mod_page_state(member, delta)  \
-       mod_page_state_offset(offsetof(struct page_state, member), (delta))
-
-#define __mod_page_state(member, delta)        \
-       __mod_page_state_offset(offsetof(struct page_state, member), (delta))
-
-#define inc_page_state(member)         mod_page_state(member, 1UL)
-#define dec_page_state(member)         mod_page_state(member, 0UL - 1)
-#define add_page_state(member,delta)   mod_page_state(member, (delta))
-#define sub_page_state(member,delta)   mod_page_state(member, 0UL - (delta))
-
-#define __inc_page_state(member)       __mod_page_state(member, 1UL)
-#define __dec_page_state(member)       __mod_page_state(member, 0UL - 1)
-#define __add_page_state(member,delta) __mod_page_state(member, (delta))
-#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
-
-#define page_state(member) (*__page_state(offsetof(struct page_state, member)))
-
-#define state_zone_offset(zone, member)                                        \
-({                                                                     \
-       unsigned offset;                                                \
-       if (is_highmem(zone))                                           \
-               offset = offsetof(struct page_state, member##_high);    \
-       else if (is_normal(zone))                                       \
-               offset = offsetof(struct page_state, member##_normal);  \
-       else if (is_dma32(zone))                                        \
-               offset = offsetof(struct page_state, member##_dma32);   \
-       else                                                            \
-               offset = offsetof(struct page_state, member##_dma);     \
-       offset;                                                         \
-})
-
-#define __mod_page_state_zone(zone, member, delta)                     \
- do {                                                                  \
-       __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
- } while (0)
-
-#define mod_page_state_zone(zone, member, delta)                       \
- do {                                                                  \
-       mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
- } while (0)
-
-DECLARE_PER_CPU(struct page_state, page_states);
+DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
+
+static inline void __count_vm_event(enum vm_event_item item)
+{
+       __this_cpu_inc(vm_event_states.event[item]);
+}
+
+static inline void count_vm_event(enum vm_event_item item)
+{
+       this_cpu_inc(vm_event_states.event[item]);
+}
+
+static inline void __count_vm_events(enum vm_event_item item, long delta)
+{
+       __this_cpu_add(vm_event_states.event[item], delta);
+}
+
+static inline void count_vm_events(enum vm_event_item item, long delta)
+{
+       this_cpu_add(vm_event_states.event[item], delta);
+}
+
+extern void all_vm_events(unsigned long *);
+#ifdef CONFIG_HOTPLUG
+extern void vm_events_fold_cpu(int cpu);
+#else
+static inline void vm_events_fold_cpu(int cpu)
+{
+}
+#endif
+
+#else
+
+/* Disable counters */
+static inline void count_vm_event(enum vm_event_item item)
+{
+}
+static inline void count_vm_events(enum vm_event_item item, long delta)
+{
+}
+static inline void __count_vm_event(enum vm_event_item item)
+{
+}
+static inline void __count_vm_events(enum vm_event_item item, long delta)
+{
+}
+static inline void all_vm_events(unsigned long *ret)
+{
+}
+static inline void vm_events_fold_cpu(int cpu)
+{
+}
+
+#endif /* CONFIG_VM_EVENT_COUNTERS */
+
+#define __count_zone_vm_events(item, zone, delta) \
+               __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
+               zone_idx(zone), delta)
 
 /*
  * Zone based page accounting with per cpu differentials.
@@ -164,6 +170,9 @@ static inline unsigned long zone_page_state(struct zone *zone,
        return x;
 }
 
+extern unsigned long global_reclaimable_pages(void);
+extern unsigned long zone_reclaimable_pages(struct zone *zone);
+
 #ifdef CONFIG_NUMA
 /*
  * Determine the per node value of a stat item. This function
@@ -176,25 +185,27 @@ static inline unsigned long node_page_state(int node,
        struct zone *zones = NODE_DATA(node)->node_zones;
 
        return
-#ifndef CONFIG_DMA_IS_NORMAL
-#if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
-               zone_page_state(&zones[ZONE_DMA32], item) +
+#ifdef CONFIG_ZONE_DMA
+               zone_page_state(&zones[ZONE_DMA], item) +
 #endif
-               zone_page_state(&zones[ZONE_NORMAL], item) +
+#ifdef CONFIG_ZONE_DMA32
+               zone_page_state(&zones[ZONE_DMA32], item) +
 #endif
 #ifdef CONFIG_HIGHMEM
                zone_page_state(&zones[ZONE_HIGHMEM], item) +
 #endif
-               zone_page_state(&zones[ZONE_DMA], item);
+               zone_page_state(&zones[ZONE_NORMAL], item) +
+               zone_page_state(&zones[ZONE_MOVABLE], item);
 }
+
+extern void zone_statistics(struct zone *, struct zone *);
+
 #else
+
 #define node_page_state(node, item) global_page_state(item)
-#endif
+#define zone_statistics(_zl,_z) do { } while (0)
 
-#define __add_zone_page_state(__z, __i, __d)   \
-               __mod_zone_page_state(__z, __i, __d)
-#define __sub_zone_page_state(__z, __i, __d)   \
-               __mod_zone_page_state(__z, __i,-(__d))
+#endif /* CONFIG_NUMA */
 
 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
@@ -204,6 +215,8 @@ static inline void zap_zone_vm_stats(struct zone *zone)
        memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
 }
 
+extern void inc_zone_state(struct zone *, enum zone_stat_item);
+
 #ifdef CONFIG_SMP
 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
 void __inc_zone_page_state(struct page *, enum zone_stat_item);
@@ -214,10 +227,11 @@ void inc_zone_page_state(struct page *, enum zone_stat_item);
 void dec_zone_page_state(struct page *, enum zone_stat_item);
 
 extern void inc_zone_state(struct zone *, enum zone_stat_item);
+extern void __inc_zone_state(struct zone *, enum zone_stat_item);
+extern void dec_zone_state(struct zone *, enum zone_stat_item);
+extern void __dec_zone_state(struct zone *, enum zone_stat_item);
 
 void refresh_cpu_vm_stats(int);
-void refresh_vm_stats(void);
-
 #else /* CONFIG_SMP */
 
 /*
@@ -230,18 +244,28 @@ static inline void __mod_zone_page_state(struct zone *zone,
        zone_page_state_add(delta, zone, item);
 }
 
+static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
+{
+       atomic_long_inc(&zone->vm_stat[item]);
+       atomic_long_inc(&vm_stat[item]);
+}
+
 static inline void __inc_zone_page_state(struct page *page,
                        enum zone_stat_item item)
 {
-       atomic_long_inc(&page_zone(page)->vm_stat[item]);
-       atomic_long_inc(&vm_stat[item]);
+       __inc_zone_state(page_zone(page), item);
+}
+
+static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
+{
+       atomic_long_dec(&zone->vm_stat[item]);
+       atomic_long_dec(&vm_stat[item]);
 }
 
 static inline void __dec_zone_page_state(struct page *page,
                        enum zone_stat_item item)
 {
-       atomic_long_dec(&page_zone(page)->vm_stat[item]);
-       atomic_long_dec(&vm_stat[item]);
+       __dec_zone_state(page_zone(page), item);
 }
 
 /*
@@ -253,7 +277,6 @@ static inline void __dec_zone_page_state(struct page *page,
 #define mod_zone_page_state __mod_zone_page_state
 
 static inline void refresh_cpu_vm_stats(int cpu) { }
-static inline void refresh_vm_stats(void) { }
 #endif
 
 #endif /* _LINUX_VMSTAT_H */