X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;ds=sidebyside;f=include%2Flinux%2Fvmstat.h;h=524cd1b28ecb0ff62a9d3620f9d21d48f8c1e32e;hb=bc47ab0241c7c86da4f5e5f82fbca7d45387c18d;hp=3e0daf54133e165aad7bde3adfff94cf659190ad;hpb=f8891e5e1f93a128c3900f82035e8541357896a7;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 3e0daf5..524cd1b 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -3,22 +3,30 @@ #include #include -#include +#include #include #include -#ifdef CONFIG_VM_EVENT_COUNTERS -/* - * Light weight per cpu counter implementation. - * - * Counters should only be incremented and no critical kernel component - * should rely on the counter values. - * - * Counters are handled completely inline. On many platforms the code - * generated will simply be the increment of a global address. - */ +#ifdef CONFIG_ZONE_DMA +#define DMA_ZONE(xx) xx##_DMA, +#else +#define DMA_ZONE(xx) +#endif + +#ifdef CONFIG_ZONE_DMA32 +#define DMA32_ZONE(xx) xx##_DMA32, +#else +#define DMA32_ZONE(xx) +#endif + +#ifdef CONFIG_HIGHMEM +#define HIGHMEM_ZONE(xx) , xx##_HIGH +#else +#define HIGHMEM_ZONE(xx) +#endif -#define FOR_ALL_ZONES(x) x##_DMA, x##_DMA32, x##_NORMAL, x##_HIGH + +#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, FOR_ALL_ZONES(PGALLOC), @@ -30,9 +38,35 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, FOR_ALL_ZONES(PGSCAN_DIRECT), PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, PAGEOUTRUN, ALLOCSTALL, PGROTATED, +#ifdef CONFIG_HUGETLB_PAGE + HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, +#endif +#ifdef CONFIG_UNEVICTABLE_LRU + UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ + UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ + UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ + UNEVICTABLE_PGMLOCKED, + UNEVICTABLE_PGMUNLOCKED, + UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ + UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ + UNEVICTABLE_MLOCKFREED, +#endif NR_VM_EVENT_ITEMS }; +extern int sysctl_stat_interval; + +#ifdef CONFIG_VM_EVENT_COUNTERS +/* + * Light weight per cpu counter implementation. + * + * Counters should only be incremented and no critical kernel component + * should rely on the counter values. + * + * Counters are handled completely inline. On many platforms the code + * generated will simply be the increment of a global address. + */ + struct vm_event_state { unsigned long event[NR_VM_EVENT_ITEMS]; }; @@ -41,43 +75,62 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states); static inline void __count_vm_event(enum vm_event_item item) { - __get_cpu_var(vm_event_states.event[item])++; + __get_cpu_var(vm_event_states).event[item]++; } static inline void count_vm_event(enum vm_event_item item) { - get_cpu_var(vm_event_states.event[item])++; + get_cpu_var(vm_event_states).event[item]++; put_cpu(); } static inline void __count_vm_events(enum vm_event_item item, long delta) { - __get_cpu_var(vm_event_states.event[item]) += delta; + __get_cpu_var(vm_event_states).event[item] += delta; } static inline void count_vm_events(enum vm_event_item item, long delta) { - get_cpu_var(vm_event_states.event[item])++; + get_cpu_var(vm_event_states).event[item] += delta; put_cpu(); } extern void all_vm_events(unsigned long *); +#ifdef CONFIG_HOTPLUG extern void vm_events_fold_cpu(int cpu); +#else +static inline void vm_events_fold_cpu(int cpu) +{ +} +#endif #else /* Disable counters */ -#define get_cpu_vm_events(e) 0L -#define count_vm_event(e) do { } while (0) -#define count_vm_events(e,d) do { } while (0) -#define __count_vm_event(e) do { } while (0) -#define __count_vm_events(e,d) do { } while (0) -#define vm_events_fold_cpu(x) do { } while (0) +static inline void count_vm_event(enum vm_event_item item) +{ +} +static inline void count_vm_events(enum vm_event_item item, long delta) +{ +} +static inline void __count_vm_event(enum vm_event_item item) +{ +} +static inline void __count_vm_events(enum vm_event_item item, long delta) +{ +} +static inline void all_vm_events(unsigned long *ret) +{ +} +static inline void vm_events_fold_cpu(int cpu) +{ +} #endif /* CONFIG_VM_EVENT_COUNTERS */ #define __count_zone_vm_events(item, zone, delta) \ - __count_vm_events(item##_DMA + zone_idx(zone), delta) + __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ + zone_idx(zone), delta) /* * Zone based page accounting with per cpu differentials. @@ -112,6 +165,16 @@ static inline unsigned long zone_page_state(struct zone *zone, return x; } +extern unsigned long global_lru_pages(void); + +static inline unsigned long zone_lru_pages(struct zone *zone) +{ + return (zone_page_state(zone, NR_ACTIVE_ANON) + + zone_page_state(zone, NR_ACTIVE_FILE) + + zone_page_state(zone, NR_INACTIVE_ANON) + + zone_page_state(zone, NR_INACTIVE_FILE)); +} + #ifdef CONFIG_NUMA /* * Determine the per node value of a stat item. This function @@ -124,19 +187,20 @@ static inline unsigned long node_page_state(int node, struct zone *zones = NODE_DATA(node)->node_zones; return -#ifndef CONFIG_DMA_IS_NORMAL -#if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64 - zone_page_state(&zones[ZONE_DMA32], item) + +#ifdef CONFIG_ZONE_DMA + zone_page_state(&zones[ZONE_DMA], item) + #endif - zone_page_state(&zones[ZONE_NORMAL], item) + +#ifdef CONFIG_ZONE_DMA32 + zone_page_state(&zones[ZONE_DMA32], item) + #endif #ifdef CONFIG_HIGHMEM zone_page_state(&zones[ZONE_HIGHMEM], item) + #endif - zone_page_state(&zones[ZONE_DMA], item); + zone_page_state(&zones[ZONE_NORMAL], item) + + zone_page_state(&zones[ZONE_MOVABLE], item); } -extern void zone_statistics(struct zonelist *, struct zone *); +extern void zone_statistics(struct zone *, struct zone *); #else @@ -170,10 +234,11 @@ void inc_zone_page_state(struct page *, enum zone_stat_item); void dec_zone_page_state(struct page *, enum zone_stat_item); extern void inc_zone_state(struct zone *, enum zone_stat_item); +extern void __inc_zone_state(struct zone *, enum zone_stat_item); +extern void dec_zone_state(struct zone *, enum zone_stat_item); +extern void __dec_zone_state(struct zone *, enum zone_stat_item); void refresh_cpu_vm_stats(int); -void refresh_vm_stats(void); - #else /* CONFIG_SMP */ /* @@ -186,18 +251,28 @@ static inline void __mod_zone_page_state(struct zone *zone, zone_page_state_add(delta, zone, item); } +static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) +{ + atomic_long_inc(&zone->vm_stat[item]); + atomic_long_inc(&vm_stat[item]); +} + static inline void __inc_zone_page_state(struct page *page, enum zone_stat_item item) { - atomic_long_inc(&page_zone(page)->vm_stat[item]); - atomic_long_inc(&vm_stat[item]); + __inc_zone_state(page_zone(page), item); +} + +static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) +{ + atomic_long_dec(&zone->vm_stat[item]); + atomic_long_dec(&vm_stat[item]); } static inline void __dec_zone_page_state(struct page *page, enum zone_stat_item item) { - atomic_long_dec(&page_zone(page)->vm_stat[item]); - atomic_long_dec(&vm_stat[item]); + __dec_zone_state(page_zone(page), item); } /* @@ -209,7 +284,6 @@ static inline void __dec_zone_page_state(struct page *page, #define mod_zone_page_state __mod_zone_page_state static inline void refresh_cpu_vm_stats(int cpu) { } -static inline void refresh_vm_stats(void) { } #endif #endif /* _LINUX_VMSTAT_H */