Merge branch 'for-33' of git://repo.or.cz/linux-kbuild
[safe/jmp/linux-2.6] / include / linux / page-flags.h
index 9ea629c..5b59f35 100644 (file)
@@ -5,9 +5,11 @@
 #ifndef PAGE_FLAGS_H
 #define PAGE_FLAGS_H
 
-#include <linux/percpu.h>
-#include <linux/cache.h>
-#include <asm/pgtable.h>
+#include <linux/types.h>
+#ifndef __GENERATING_BOUNDS_H
+#include <linux/mm_types.h>
+#include <generated/bounds.h>
+#endif /* !__GENERATING_BOUNDS_H */
 
 /*
  * Various page->flags bits:
  * PG_reserved is set for special pages, which can never be swapped out. Some
  * of them might not even exist (eg empty_bad_page)...
  *
- * The PG_private bitflag is set if page->private contains a valid value.
+ * The PG_private bitflag is set on pagecache pages if they contain filesystem
+ * specific data (which is normally at page->private). It can be used by
+ * private allocations for its own usage.
  *
- * During disk I/O, PG_locked is used. This bit is set before I/O and
- * reset when I/O completes. page_waitqueue(page) is a wait queue of all tasks
- * waiting for the I/O on this page to complete.
+ * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
+ * and cleared when writeback _starts_ or when read _completes_. PG_writeback
+ * is set before writeback starts and cleared when it finishes.
+ *
+ * PG_locked also pins a page in pagecache, and blocks truncation of the file
+ * while it is held.
+ *
+ * page_waitqueue(page) is a wait queue of all tasks waiting for the page
+ * to become unlocked.
  *
  * PG_uptodate tells whether the page's contents is valid.  When a read
  * completes, the page becomes uptodate, unless a disk I/O error happened.
  *
- * For choosing which pages to swap out, inode pages carry a PG_referenced bit,
- * which is set any time the system accesses that page through the (mapping,
- * index) hash table.  This referenced bit, together with the referenced bit
- * in the page tables, is used to manipulate page->age and move the page across
- * the active, inactive_dirty and inactive_clean lists.
- *
- * Note that the referenced bit, the page->lru list_head and the active,
- * inactive_dirty and inactive_clean lists are protected by the
- * zone->lru_lock, and *NOT* by the usual PG_locked bit!
+ * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
+ * file-backed pagecache (see mm/vmscan.c).
  *
  * PG_error is set to indicate that an I/O error occurred on this page.
  *
  * space, they need to be kmapped separately for doing IO on the pages.  The
  * struct page (these bits with information) are always mapped into kernel
  * address space...
+ *
+ * PG_buddy is set to indicate that the page is free and in the buddy system
+ * (see mm/page_alloc.c).
+ *
+ * PG_hwpoison indicates that a page got corrupted in hardware and contains
+ * data with incorrect ECC bits that triggered a machine check. Accessing is
+ * not safe since it may cause another machine check. Don't touch!
  */
 
 /*
  * Don't use the *_dontuse flags.  Use the macros.  Otherwise you'll break
- * locked- and dirty-page accounting.  The top eight bits of page->flags are
- * used for page->zone, so putting flag bits there doesn't work.
- */
-#define PG_locked               0      /* Page is locked. Don't touch. */
-#define PG_error                1
-#define PG_referenced           2
-#define PG_uptodate             3
-
-#define PG_dirty                4
-#define PG_lru                  5
-#define PG_active               6
-#define PG_slab                         7      /* slab debug (Suparna wants this) */
-
-#define PG_checked              8      /* kill me in 2.5.<early>. */
-#define PG_arch_1               9
-#define PG_reserved            10
-#define PG_private             11      /* Has something at ->private */
-
-#define PG_writeback           12      /* Page is under writeback */
-#define PG_nosave              13      /* Used for system suspend/resume */
-#define PG_compound            14      /* Part of a compound page */
-#define PG_swapcache           15      /* Swap page: swp_entry_t in private */
-
-#define PG_mappedtodisk                16      /* Has blocks allocated on-disk */
-#define PG_reclaim             17      /* To be reclaimed asap */
-#define PG_nosave_free         18      /* Free, should not be written */
-#define PG_uncached            19      /* Page has been mapped as uncached */
-
-/*
- * Global page accounting.  One instance per CPU.  Only unsigned longs are
- * allowed.
+ * locked- and dirty-page accounting.
  *
- * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
- * any time safely (which protects the instance from modification by
- * interrupt.
- * - The __xxx_page_state variants can be used safely when interrupts are
- * disabled.
- * - The __xxx_page_state variants can be used if the field is only
- * modified from process context and protected from preemption, or only
- * modified from interrupt context.  In this case, the field should be
- * commented here.
+ * The page flags field is split into two parts, the main flags area
+ * which extends from the low bits upwards, and the fields area which
+ * extends from the high bits downwards.
+ *
+ *  | FIELD | ... | FLAGS |
+ *  N-1           ^       0
+ *               (NR_PAGEFLAGS)
+ *
+ * The fields area is reserved for fields mapping zone, node (for NUMA) and
+ * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
+ * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
  */
-struct page_state {
-       unsigned long nr_dirty;         /* Dirty writeable pages */
-       unsigned long nr_writeback;     /* Pages under writeback */
-       unsigned long nr_unstable;      /* NFS unstable pages */
-       unsigned long nr_page_table_pages;/* Pages used for pagetables */
-       unsigned long nr_mapped;        /* mapped into pagetables.
-                                        * only modified from process context */
-       unsigned long nr_slab;          /* In slab */
-#define GET_PAGE_STATE_LAST nr_slab
+enum pageflags {
+       PG_locked,              /* Page is locked. Don't touch. */
+       PG_error,
+       PG_referenced,
+       PG_uptodate,
+       PG_dirty,
+       PG_lru,
+       PG_active,
+       PG_slab,
+       PG_owner_priv_1,        /* Owner use. If pagecache, fs may use*/
+       PG_arch_1,
+       PG_reserved,
+       PG_private,             /* If pagecache, has fs-private data */
+       PG_private_2,           /* If pagecache, has fs aux data */
+       PG_writeback,           /* Page is under writeback */
+#ifdef CONFIG_PAGEFLAGS_EXTENDED
+       PG_head,                /* A head page */
+       PG_tail,                /* A tail page */
+#else
+       PG_compound,            /* A compound page */
+#endif
+       PG_swapcache,           /* Swap page: swp_entry_t in private */
+       PG_mappedtodisk,        /* Has blocks allocated on-disk */
+       PG_reclaim,             /* To be reclaimed asap */
+       PG_buddy,               /* Page is free, on buddy lists */
+       PG_swapbacked,          /* Page is backed by RAM/swap */
+       PG_unevictable,         /* Page is "unevictable"  */
+#ifdef CONFIG_MMU
+       PG_mlocked,             /* Page is vma mlocked */
+#endif
+#ifdef CONFIG_ARCH_USES_PG_UNCACHED
+       PG_uncached,            /* Page has been mapped as uncached */
+#endif
+#ifdef CONFIG_MEMORY_FAILURE
+       PG_hwpoison,            /* hardware poisoned page. Don't touch */
+#endif
+       __NR_PAGEFLAGS,
 
-       /*
-        * The below are zeroed by get_page_state().  Use get_full_page_state()
-        * to add up all these.
+       /* Filesystems */
+       PG_checked = PG_owner_priv_1,
+
+       /* Two page bits are conscripted by FS-Cache to maintain local caching
+        * state.  These bits are set on pages belonging to the netfs's inodes
+        * when those inodes are being locally cached.
         */
-       unsigned long pgpgin;           /* Disk reads */
-       unsigned long pgpgout;          /* Disk writes */
-       unsigned long pswpin;           /* swap reads */
-       unsigned long pswpout;          /* swap writes */
-
-       unsigned long pgalloc_high;     /* page allocations */
-       unsigned long pgalloc_normal;
-       unsigned long pgalloc_dma32;
-       unsigned long pgalloc_dma;
-
-       unsigned long pgfree;           /* page freeings */
-       unsigned long pgactivate;       /* pages moved inactive->active */
-       unsigned long pgdeactivate;     /* pages moved active->inactive */
-
-       unsigned long pgfault;          /* faults (major+minor) */
-       unsigned long pgmajfault;       /* faults (major only) */
-
-       unsigned long pgrefill_high;    /* inspected in refill_inactive_zone */
-       unsigned long pgrefill_normal;
-       unsigned long pgrefill_dma32;
-       unsigned long pgrefill_dma;
-
-       unsigned long pgsteal_high;     /* total highmem pages reclaimed */
-       unsigned long pgsteal_normal;
-       unsigned long pgsteal_dma32;
-       unsigned long pgsteal_dma;
-
-       unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
-       unsigned long pgscan_kswapd_normal;
-       unsigned long pgscan_kswapd_dma32;
-       unsigned long pgscan_kswapd_dma;
-
-       unsigned long pgscan_direct_high;/* total highmem pages scanned */
-       unsigned long pgscan_direct_normal;
-       unsigned long pgscan_direct_dma32;
-       unsigned long pgscan_direct_dma;
-
-       unsigned long pginodesteal;     /* pages reclaimed via inode freeing */
-       unsigned long slabs_scanned;    /* slab objects scanned */
-       unsigned long kswapd_steal;     /* pages reclaimed by kswapd */
-       unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
-       unsigned long pageoutrun;       /* kswapd's calls to page reclaim */
-       unsigned long allocstall;       /* direct reclaim calls */
-
-       unsigned long pgrotated;        /* pages rotated to tail of the LRU */
-       unsigned long nr_bounce;        /* pages for bounce buffers */
+       PG_fscache = PG_private_2,      /* page backed by cache */
+
+       /* XEN */
+       PG_pinned = PG_owner_priv_1,
+       PG_savepinned = PG_dirty,
+
+       /* SLOB */
+       PG_slob_free = PG_private,
+
+       /* SLUB */
+       PG_slub_frozen = PG_active,
+       PG_slub_debug = PG_error,
 };
 
-extern void get_page_state(struct page_state *ret);
-extern void get_page_state_node(struct page_state *ret, int node);
-extern void get_full_page_state(struct page_state *ret);
-extern unsigned long read_page_state_offset(unsigned long offset);
-extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
-extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
-
-#define read_page_state(member) \
-       read_page_state_offset(offsetof(struct page_state, member))
-
-#define mod_page_state(member, delta)  \
-       mod_page_state_offset(offsetof(struct page_state, member), (delta))
-
-#define __mod_page_state(member, delta)        \
-       __mod_page_state_offset(offsetof(struct page_state, member), (delta))
-
-#define inc_page_state(member)         mod_page_state(member, 1UL)
-#define dec_page_state(member)         mod_page_state(member, 0UL - 1)
-#define add_page_state(member,delta)   mod_page_state(member, (delta))
-#define sub_page_state(member,delta)   mod_page_state(member, 0UL - (delta))
-
-#define __inc_page_state(member)       __mod_page_state(member, 1UL)
-#define __dec_page_state(member)       __mod_page_state(member, 0UL - 1)
-#define __add_page_state(member,delta) __mod_page_state(member, (delta))
-#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
-
-#define page_state(member) (*__page_state(offsetof(struct page_state, member)))
-
-#define state_zone_offset(zone, member)                                        \
-({                                                                     \
-       unsigned offset;                                                \
-       if (is_highmem(zone))                                           \
-               offset = offsetof(struct page_state, member##_high);    \
-       else if (is_normal(zone))                                       \
-               offset = offsetof(struct page_state, member##_normal);  \
-       else if (is_dma32(zone))                                        \
-               offset = offsetof(struct page_state, member##_dma32);   \
-       else                                                            \
-               offset = offsetof(struct page_state, member##_dma);     \
-       offset;                                                         \
-})
-
-#define __mod_page_state_zone(zone, member, delta)                     \
- do {                                                                  \
-       __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
- } while (0)
-
-#define mod_page_state_zone(zone, member, delta)                       \
- do {                                                                  \
-       mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
- } while (0)
+#ifndef __GENERATING_BOUNDS_H
 
 /*
- * Manipulation of page state flags
+ * Macros to create function definitions for page flags
  */
-#define PageLocked(page)               \
-               test_bit(PG_locked, &(page)->flags)
-#define SetPageLocked(page)            \
-               set_bit(PG_locked, &(page)->flags)
-#define TestSetPageLocked(page)                \
-               test_and_set_bit(PG_locked, &(page)->flags)
-#define ClearPageLocked(page)          \
-               clear_bit(PG_locked, &(page)->flags)
-#define TestClearPageLocked(page)      \
-               test_and_clear_bit(PG_locked, &(page)->flags)
-
-#define PageError(page)                test_bit(PG_error, &(page)->flags)
-#define SetPageError(page)     set_bit(PG_error, &(page)->flags)
-#define ClearPageError(page)   clear_bit(PG_error, &(page)->flags)
-
-#define PageReferenced(page)   test_bit(PG_referenced, &(page)->flags)
-#define SetPageReferenced(page)        set_bit(PG_referenced, &(page)->flags)
-#define ClearPageReferenced(page)      clear_bit(PG_referenced, &(page)->flags)
-#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
-
-#define PageUptodate(page)     test_bit(PG_uptodate, &(page)->flags)
-#ifndef SetPageUptodate
-#define SetPageUptodate(page)  set_bit(PG_uptodate, &(page)->flags)
-#endif
-#define ClearPageUptodate(page)        clear_bit(PG_uptodate, &(page)->flags)
+#define TESTPAGEFLAG(uname, lname)                                     \
+static inline int Page##uname(struct page *page)                       \
+                       { return test_bit(PG_##lname, &page->flags); }
+
+#define SETPAGEFLAG(uname, lname)                                      \
+static inline void SetPage##uname(struct page *page)                   \
+                       { set_bit(PG_##lname, &page->flags); }
+
+#define CLEARPAGEFLAG(uname, lname)                                    \
+static inline void ClearPage##uname(struct page *page)                 \
+                       { clear_bit(PG_##lname, &page->flags); }
+
+#define __SETPAGEFLAG(uname, lname)                                    \
+static inline void __SetPage##uname(struct page *page)                 \
+                       { __set_bit(PG_##lname, &page->flags); }
+
+#define __CLEARPAGEFLAG(uname, lname)                                  \
+static inline void __ClearPage##uname(struct page *page)               \
+                       { __clear_bit(PG_##lname, &page->flags); }
+
+#define TESTSETFLAG(uname, lname)                                      \
+static inline int TestSetPage##uname(struct page *page)                        \
+               { return test_and_set_bit(PG_##lname, &page->flags); }
+
+#define TESTCLEARFLAG(uname, lname)                                    \
+static inline int TestClearPage##uname(struct page *page)              \
+               { return test_and_clear_bit(PG_##lname, &page->flags); }
+
+#define __TESTCLEARFLAG(uname, lname)                                  \
+static inline int __TestClearPage##uname(struct page *page)            \
+               { return __test_and_clear_bit(PG_##lname, &page->flags); }
+
+#define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname)              \
+       SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname)
 
-#define PageDirty(page)                test_bit(PG_dirty, &(page)->flags)
-#define SetPageDirty(page)     set_bit(PG_dirty, &(page)->flags)
-#define TestSetPageDirty(page) test_and_set_bit(PG_dirty, &(page)->flags)
-#define ClearPageDirty(page)   clear_bit(PG_dirty, &(page)->flags)
-#define __ClearPageDirty(page) __clear_bit(PG_dirty, &(page)->flags)
-#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags)
+#define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname)            \
+       __SETPAGEFLAG(uname, lname)  __CLEARPAGEFLAG(uname, lname)
 
-#define PageLRU(page)          test_bit(PG_lru, &(page)->flags)
-#define SetPageLRU(page)       set_bit(PG_lru, &(page)->flags)
-#define ClearPageLRU(page)     clear_bit(PG_lru, &(page)->flags)
-#define __ClearPageLRU(page)   __clear_bit(PG_lru, &(page)->flags)
+#define PAGEFLAG_FALSE(uname)                                          \
+static inline int Page##uname(struct page *page)                       \
+                       { return 0; }
 
-#define PageActive(page)       test_bit(PG_active, &(page)->flags)
-#define SetPageActive(page)    set_bit(PG_active, &(page)->flags)
-#define ClearPageActive(page)  clear_bit(PG_active, &(page)->flags)
-#define __ClearPageActive(page)        __clear_bit(PG_active, &(page)->flags)
+#define TESTSCFLAG(uname, lname)                                       \
+       TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
 
-#define PageSlab(page)         test_bit(PG_slab, &(page)->flags)
-#define __SetPageSlab(page)    __set_bit(PG_slab, &(page)->flags)
-#define __ClearPageSlab(page)  __clear_bit(PG_slab, &(page)->flags)
+#define SETPAGEFLAG_NOOP(uname)                                                \
+static inline void SetPage##uname(struct page *page) {  }
+
+#define CLEARPAGEFLAG_NOOP(uname)                                      \
+static inline void ClearPage##uname(struct page *page) {  }
+
+#define __CLEARPAGEFLAG_NOOP(uname)                                    \
+static inline void __ClearPage##uname(struct page *page) {  }
+
+#define TESTCLEARFLAG_FALSE(uname)                                     \
+static inline int TestClearPage##uname(struct page *page) { return 0; }
+
+#define __TESTCLEARFLAG_FALSE(uname)                                   \
+static inline int __TestClearPage##uname(struct page *page) { return 0; }
+
+struct page;   /* forward declaration */
+
+TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked)
+PAGEFLAG(Error, error)
+PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
+PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
+PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
+PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
+       TESTCLEARFLAG(Active, active)
+__PAGEFLAG(Slab, slab)
+PAGEFLAG(Checked, checked)             /* Used by some filesystems */
+PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned)    /* Xen */
+PAGEFLAG(SavePinned, savepinned);                      /* Xen */
+PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
+PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
+
+__PAGEFLAG(SlobFree, slob_free)
+
+__PAGEFLAG(SlubFrozen, slub_frozen)
+__PAGEFLAG(SlubDebug, slub_debug)
+
+/*
+ * Private page markings that may be used by the filesystem that owns the page
+ * for its own purposes.
+ * - PG_private and PG_private_2 cause releasepage() and co to be invoked
+ */
+PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private)
+       __CLEARPAGEFLAG(Private, private)
+PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2)
+PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
+
+/*
+ * Only test-and-set exist for PG_writeback.  The unconditional operators are
+ * risky: they bypass page accounting.
+ */
+TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
+__PAGEFLAG(Buddy, buddy)
+PAGEFLAG(MappedToDisk, mappedtodisk)
+
+/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
+PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
+PAGEFLAG(Readahead, reclaim)           /* Reminder to do async read-ahead */
 
 #ifdef CONFIG_HIGHMEM
-#define PageHighMem(page)      is_highmem(page_zone(page))
+/*
+ * Must use a macro here due to header dependency issues. page_zone() is not
+ * available at this point.
+ */
+#define PageHighMem(__p) is_highmem(page_zone(__p))
 #else
-#define PageHighMem(page)      0 /* needed to optimize away at compile time */
+PAGEFLAG_FALSE(HighMem)
 #endif
 
-#define PageChecked(page)      test_bit(PG_checked, &(page)->flags)
-#define SetPageChecked(page)   set_bit(PG_checked, &(page)->flags)
-#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
-
-#define PageReserved(page)     test_bit(PG_reserved, &(page)->flags)
-#define SetPageReserved(page)  set_bit(PG_reserved, &(page)->flags)
-#define ClearPageReserved(page)        clear_bit(PG_reserved, &(page)->flags)
-#define __ClearPageReserved(page)      __clear_bit(PG_reserved, &(page)->flags)
-
-#define SetPagePrivate(page)   set_bit(PG_private, &(page)->flags)
-#define ClearPagePrivate(page) clear_bit(PG_private, &(page)->flags)
-#define PagePrivate(page)      test_bit(PG_private, &(page)->flags)
-#define __SetPagePrivate(page)  __set_bit(PG_private, &(page)->flags)
-#define __ClearPagePrivate(page) __clear_bit(PG_private, &(page)->flags)
-
-#define PageWriteback(page)    test_bit(PG_writeback, &(page)->flags)
-#define SetPageWriteback(page)                                         \
-       do {                                                            \
-               if (!test_and_set_bit(PG_writeback,                     \
-                               &(page)->flags))                        \
-                       inc_page_state(nr_writeback);                   \
-       } while (0)
-#define TestSetPageWriteback(page)                                     \
-       ({                                                              \
-               int ret;                                                \
-               ret = test_and_set_bit(PG_writeback,                    \
-                                       &(page)->flags);                \
-               if (!ret)                                               \
-                       inc_page_state(nr_writeback);                   \
-               ret;                                                    \
-       })
-#define ClearPageWriteback(page)                                       \
-       do {                                                            \
-               if (test_and_clear_bit(PG_writeback,                    \
-                               &(page)->flags))                        \
-                       dec_page_state(nr_writeback);                   \
-       } while (0)
-#define TestClearPageWriteback(page)                                   \
-       ({                                                              \
-               int ret;                                                \
-               ret = test_and_clear_bit(PG_writeback,                  \
-                               &(page)->flags);                        \
-               if (ret)                                                \
-                       dec_page_state(nr_writeback);                   \
-               ret;                                                    \
-       })
-
-#define PageNosave(page)       test_bit(PG_nosave, &(page)->flags)
-#define SetPageNosave(page)    set_bit(PG_nosave, &(page)->flags)
-#define TestSetPageNosave(page)        test_and_set_bit(PG_nosave, &(page)->flags)
-#define ClearPageNosave(page)          clear_bit(PG_nosave, &(page)->flags)
-#define TestClearPageNosave(page)      test_and_clear_bit(PG_nosave, &(page)->flags)
-
-#define PageNosaveFree(page)   test_bit(PG_nosave_free, &(page)->flags)
-#define SetPageNosaveFree(page)        set_bit(PG_nosave_free, &(page)->flags)
-#define ClearPageNosaveFree(page)              clear_bit(PG_nosave_free, &(page)->flags)
-
-#define PageMappedToDisk(page) test_bit(PG_mappedtodisk, &(page)->flags)
-#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
-#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
-
-#define PageReclaim(page)      test_bit(PG_reclaim, &(page)->flags)
-#define SetPageReclaim(page)   set_bit(PG_reclaim, &(page)->flags)
-#define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags)
-#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags)
-
-#define PageCompound(page)     test_bit(PG_compound, &(page)->flags)
-#define __SetPageCompound(page)        __set_bit(PG_compound, &(page)->flags)
-#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags)
-
 #ifdef CONFIG_SWAP
-#define PageSwapCache(page)    test_bit(PG_swapcache, &(page)->flags)
-#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags)
-#define ClearPageSwapCache(page) clear_bit(PG_swapcache, &(page)->flags)
+PAGEFLAG(SwapCache, swapcache)
 #else
-#define PageSwapCache(page)    0
+PAGEFLAG_FALSE(SwapCache)
+       SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache)
 #endif
 
-#define PageUncached(page)     test_bit(PG_uncached, &(page)->flags)
-#define SetPageUncached(page)  set_bit(PG_uncached, &(page)->flags)
-#define ClearPageUncached(page)        clear_bit(PG_uncached, &(page)->flags)
+PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
+       TESTCLEARFLAG(Unevictable, unevictable)
 
-struct page;   /* forward declaration */
+#ifdef CONFIG_MMU
+PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
+       TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
+#else
+PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked)
+       TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
+#endif
 
-int test_clear_page_dirty(struct page *page);
-int test_clear_page_writeback(struct page *page);
-int test_set_page_writeback(struct page *page);
+#ifdef CONFIG_ARCH_USES_PG_UNCACHED
+PAGEFLAG(Uncached, uncached)
+#else
+PAGEFLAG_FALSE(Uncached)
+#endif
 
-static inline void clear_page_dirty(struct page *page)
+#ifdef CONFIG_MEMORY_FAILURE
+PAGEFLAG(HWPoison, hwpoison)
+TESTSCFLAG(HWPoison, hwpoison)
+#define __PG_HWPOISON (1UL << PG_hwpoison)
+#else
+PAGEFLAG_FALSE(HWPoison)
+#define __PG_HWPOISON 0
+#endif
+
+u64 stable_page_flags(struct page *page);
+
+static inline int PageUptodate(struct page *page)
 {
-       test_clear_page_dirty(page);
+       int ret = test_bit(PG_uptodate, &(page)->flags);
+
+       /*
+        * Must ensure that the data we read out of the page is loaded
+        * _after_ we've loaded page->flags to check for PageUptodate.
+        * We can skip the barrier if the page is not uptodate, because
+        * we wouldn't be reading anything from it.
+        *
+        * See SetPageUptodate() for the other side of the story.
+        */
+       if (ret)
+               smp_rmb();
+
+       return ret;
+}
+
+static inline void __SetPageUptodate(struct page *page)
+{
+       smp_wmb();
+       __set_bit(PG_uptodate, &(page)->flags);
 }
 
+static inline void SetPageUptodate(struct page *page)
+{
+#ifdef CONFIG_S390
+       if (!test_and_set_bit(PG_uptodate, &page->flags))
+               page_clear_dirty(page);
+#else
+       /*
+        * Memory barrier must be issued before setting the PG_uptodate bit,
+        * so that all previous stores issued in order to bring the page
+        * uptodate are actually visible before PageUptodate becomes true.
+        *
+        * s390 doesn't need an explicit smp_wmb here because the test and
+        * set bit already provides full barriers.
+        */
+       smp_wmb();
+       set_bit(PG_uptodate, &(page)->flags);
+#endif
+}
+
+CLEARPAGEFLAG(Uptodate, uptodate)
+
+extern void cancel_dirty_page(struct page *page, unsigned int account_size);
+
+int test_clear_page_writeback(struct page *page);
+int test_set_page_writeback(struct page *page);
+
 static inline void set_page_writeback(struct page *page)
 {
        test_set_page_writeback(page);
 }
 
+#ifdef CONFIG_PAGEFLAGS_EXTENDED
+/*
+ * System with lots of page flags available. This allows separate
+ * flags for PageHead() and PageTail() checks of compound pages so that bit
+ * tests can be used in performance sensitive paths. PageCompound is
+ * generally not used in hot code paths.
+ */
+__PAGEFLAG(Head, head)
+__PAGEFLAG(Tail, tail)
+
+static inline int PageCompound(struct page *page)
+{
+       return page->flags & ((1L << PG_head) | (1L << PG_tail));
+
+}
+#else
+/*
+ * Reduce page flag use as much as possible by overlapping
+ * compound page flags with the flags used for page cache pages. Possible
+ * because PageCompound is always set for compound pages and not for
+ * pages on the LRU and/or pagecache.
+ */
+TESTPAGEFLAG(Compound, compound)
+__PAGEFLAG(Head, compound)
+
+/*
+ * PG_reclaim is used in combination with PG_compound to mark the
+ * head and tail of a compound page. This saves one page flag
+ * but makes it impossible to use compound pages for the page cache.
+ * The PG_reclaim bit would have to be used for reclaim or readahead
+ * if compound pages enter the page cache.
+ *
+ * PG_compound & PG_reclaim    => Tail page
+ * PG_compound & ~PG_reclaim   => Head page
+ */
+#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
+
+static inline int PageTail(struct page *page)
+{
+       return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
+}
+
+static inline void __SetPageTail(struct page *page)
+{
+       page->flags |= PG_head_tail_mask;
+}
+
+static inline void __ClearPageTail(struct page *page)
+{
+       page->flags &= ~PG_head_tail_mask;
+}
+
+#endif /* !PAGEFLAGS_EXTENDED */
+
+#ifdef CONFIG_MMU
+#define __PG_MLOCKED           (1 << PG_mlocked)
+#else
+#define __PG_MLOCKED           0
+#endif
+
+/*
+ * Flags checked when a page is freed.  Pages being freed should not have
+ * these flags set.  It they are, there is a problem.
+ */
+#define PAGE_FLAGS_CHECK_AT_FREE \
+       (1 << PG_lru     | 1 << PG_locked    | \
+        1 << PG_private | 1 << PG_private_2 | \
+        1 << PG_buddy   | 1 << PG_writeback | 1 << PG_reserved | \
+        1 << PG_slab    | 1 << PG_swapcache | 1 << PG_active | \
+        1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON)
+
+/*
+ * Flags checked when a page is prepped for return by the page allocator.
+ * Pages being prepped should not have any flags set.  It they are set,
+ * there has been a kernel bug or struct page corruption.
+ */
+#define PAGE_FLAGS_CHECK_AT_PREP       ((1 << NR_PAGEFLAGS) - 1)
+
+#define PAGE_FLAGS_PRIVATE                             \
+       (1 << PG_private | 1 << PG_private_2)
+/**
+ * page_has_private - Determine if page has private stuff
+ * @page: The page to be checked
+ *
+ * Determine if a page has private stuff, indicating that release routines
+ * should be invoked upon it.
+ */
+static inline int page_has_private(struct page *page)
+{
+       return !!(page->flags & PAGE_FLAGS_PRIVATE);
+}
+
+#endif /* !__GENERATING_BOUNDS_H */
+
 #endif /* PAGE_FLAGS_H */