#include <linux/sched.h>
#include <linux/errno.h>
+#include <linux/capability.h>
#ifdef __KERNEL__
-#include <linux/config.h>
#include <linux/gfp.h>
#include <linux/list.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/prio_tree.h>
#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/debug_locks.h>
struct mempolicy;
struct anon_vma;
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
-#include <asm/atomic.h>
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
#define VM_GROWSUP 0x00000200
-#define VM_SHM 0x00000000 /* Means nothing: delete it later */
-#define VM_UNPAGED 0x00000400 /* Pages managed without map count */
+#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
#define VM_EXECUTABLE 0x00001000
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
+#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
void (*close)(struct vm_area_struct * area);
struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type);
int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
+
+ /* notification that a previously read-only page is about to become
+ * writable, if an error is returned it will cause a SIGBUS */
+ int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
#ifdef CONFIG_NUMA
int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr);
+ int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
+ const nodemask_t *to, unsigned long flags);
#endif
};
* & limit reverse map searches.
*/
union {
- unsigned long private; /* Mapping-private opaque data:
- * usually used for buffer_heads
- * if PagePrivate set; used for
- * swp_entry_t if PageSwapCache
- * When page is free, this indicates
- * order in the buddy system.
- */
+ struct {
+ unsigned long private; /* Mapping-private opaque data:
+ * usually used for buffer_heads
+ * if PagePrivate set; used for
+ * swp_entry_t if PageSwapCache;
+ * indicates order in the buddy
+ * system if PG_buddy is set.
+ */
+ struct address_space *mapping; /* If low bit clear, points to
+ * inode address_space, or NULL.
+ * If page mapped as anonymous
+ * memory, low bit is set, and
+ * it points to anon_vma object:
+ * see PAGE_MAPPING_ANON below.
+ */
+ };
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
- spinlock_t ptl;
+ spinlock_t ptl;
#endif
- } u;
- struct address_space *mapping; /* If low bit clear, points to
- * inode address_space, or NULL.
- * If page mapped as anonymous
- * memory, low bit is set, and
- * it points to anon_vma object:
- * see PAGE_MAPPING_ANON below.
- */
+ };
pgoff_t index; /* Our offset within mapping. */
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
#endif /* WANT_PAGE_VIRTUAL */
};
-#define page_private(page) ((page)->u.private)
-#define set_page_private(page, v) ((page)->u.private = (v))
+#define page_private(page) ((page)->private)
+#define set_page_private(page, v) ((page)->private = (v))
/*
* FIXME: take this include out, include page-flags.h in
*
* Also, many kernel routines increase the page count before a critical
* routine so they can be sure the page doesn't go away from under them.
- *
- * Since 2.6.6 (approx), a free page has ->_count = -1. This is so that we
- * can use atomic_add_negative(-1, page->_count) to detect when the page
- * becomes free and so that we can also use atomic_inc_and_test to atomically
- * detect when we just tried to grab a ref on a page which some other CPU has
- * already deemed to be freeable.
- *
- * NO code should make assumptions about this internal detail! Use the provided
- * macros which retain the old rules: page_count(page) == 0 is a free page.
*/
/*
* Drop a ref, return true if the logical refcount fell to zero (the page has
* no users)
*/
-#define put_page_testzero(p) \
- ({ \
- BUG_ON(page_count(p) == 0); \
- atomic_add_negative(-1, &(p)->_count); \
- })
+static inline int put_page_testzero(struct page *page)
+{
+ BUG_ON(atomic_read(&page->_count) == 0);
+ return atomic_dec_and_test(&page->_count);
+}
/*
- * Grab a ref, return true if the page previously had a logical refcount of
- * zero. ie: returns true if we just grabbed an already-deemed-to-be-free page
+ * Try to grab a ref unless the page has a refcount of zero, return false if
+ * that is the case.
*/
-#define get_page_testone(p) atomic_inc_and_test(&(p)->_count)
-
-#define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)
-#define __put_page(p) atomic_dec(&(p)->_count)
+static inline int get_page_unless_zero(struct page *page)
+{
+ return atomic_inc_not_zero(&page->_count);
+}
extern void FASTCALL(__page_cache_release(struct page *));
static inline int page_count(struct page *page)
{
- if (PageCompound(page))
+ if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
- return atomic_read(&page->_count) + 1;
+ return atomic_read(&page->_count);
}
static inline void get_page(struct page *page)
atomic_inc(&page->_count);
}
+/*
+ * Setup the page count before being freed into the page allocator for
+ * the first time (boot or memory hotplug)
+ */
+static inline void init_page_count(struct page *page)
+{
+ atomic_set(&page->_count, 1);
+}
+
void put_page(struct page *page);
+void put_pages_list(struct list_head *pages);
+
+void split_page(struct page *page, unsigned int order);
/*
* Multiple processes may "see" the same page. E.g. for untouched
struct zone;
extern struct zone *zone_table[];
+static inline int page_zone_id(struct page *page)
+{
+ return (page->flags >> ZONETABLE_PGSHIFT) & ZONETABLE_MASK;
+}
static inline struct zone *page_zone(struct page *page)
{
- return zone_table[(page->flags >> ZONETABLE_PGSHIFT) &
- ZONETABLE_MASK];
+ return zone_table[page_zone_id(page)];
}
static inline unsigned long page_to_nid(struct page *page)
set_page_section(page, pfn_to_section_nr(pfn));
}
+/*
+ * Some inline functions in vmstat.h depend on page_zone()
+ */
+#include <linux/vmstat.h>
+
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
extern struct page *mem_map;
#endif
-static inline void *lowmem_page_address(struct page *page)
+static __always_inline void *lowmem_page_address(struct page *page)
{
return __va(page_to_pfn(page) << PAGE_SHIFT);
}
int shmem_lock(struct file *file, int lock, struct user_struct *user);
#else
#define shmem_nopage filemap_nopage
-#define shmem_lock(a, b, c) ({0;}) /* always in memory, no need to lock */
-#define shmem_set_policy(a, b) (0)
-#define shmem_get_policy(a, b) (NULL)
+
+static inline int shmem_lock(struct file *file, int lock,
+ struct user_struct *user)
+{
+ return 0;
+}
+
+static inline int shmem_set_policy(struct vm_area_struct *vma,
+ struct mempolicy *new)
+{
+ return 0;
+}
+
+static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return NULL;
+}
#endif
struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
+extern int shmem_mmap(struct file *file, struct vm_area_struct *vma);
int shmem_zero_setup(struct vm_area_struct *);
+#ifndef CONFIG_MMU
+extern unsigned long shmem_get_unmapped_area(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags);
+#endif
+
static inline int can_do_mlock(void)
{
if (capable(CAP_IPC_LOCK))
unsigned long truncate_count; /* Compare vm_truncate_count */
};
+struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t);
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *);
unsigned long unmap_vmas(struct mmu_gather **tlb,
}
extern int vmtruncate(struct inode * inode, loff_t offset);
+extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
-extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
-static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access)
+#ifdef CONFIG_MMU
+extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma,
+ unsigned long address, int write_access);
+
+static inline int handle_mm_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ int write_access)
+{
+ return __handle_mm_fault(mm, vma, address, write_access) &
+ (~VM_FAULT_WRITE);
+}
+#else
+static inline int handle_mm_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ int write_access)
{
- return __handle_mm_fault(mm, vma, address, write_access) & (~VM_FAULT_WRITE);
+ /* should never happen if there's no MMU */
+ BUG();
+ return VM_FAULT_SIGBUS;
}
+#endif
extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
extern struct shrinker *set_shrinker(int, shrinker_t);
extern void remove_shrinker(struct shrinker *shrinker);
+extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl));
+
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
* overflow into the next struct page (as it might with DEBUG_SPINLOCK).
* When freeing, reset page->mapping so free_pages_check won't complain.
*/
-#define __pte_lockptr(page) &((page)->u.ptl)
+#define __pte_lockptr(page) &((page)->ptl)
#define pte_lock_init(_page) do { \
spin_lock_init(__pte_lockptr(_page)); \
} while (0)
/* filemap.c */
extern unsigned long page_unuse(struct page *);
extern void truncate_inode_pages(struct address_space *, loff_t);
+extern void truncate_inode_pages_range(struct address_space *,
+ loff_t lstart, loff_t lend);
/* generic vm_area_ops exported for stackable file systems */
extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *);
unsigned long vmalloc_to_pfn(void *addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
+int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
-struct page *follow_page(struct mm_struct *, unsigned long address,
+struct page *follow_page(struct vm_area_struct *, unsigned long address,
unsigned int foll_flags);
#define FOLL_WRITE 0x01 /* check pte is writable */
#define FOLL_TOUCH 0x02 /* mark page accessed */
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
{
+ if (!PageHighMem(page) && !enable)
+ debug_check_no_locks_freed(page_address(page),
+ numpages * PAGE_SIZE);
}
#endif
/* /proc/<pid>/oom_adj set to -17 protects from the oom-killer */
#define OOM_DISABLE -17
+int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
+ void __user *, size_t *, loff_t *);
+unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
+ unsigned long lru_pages);
+void drop_pagecache(void);
+void drop_slab(void);
+
+#ifndef CONFIG_MMU
+#define randomize_va_space 0
+#else
+extern int randomize_va_space;
+#endif
+
+const char *arch_vma_name(struct vm_area_struct *vma);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */