X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Flinux%2Fmm.h;h=e70f21beb4b4db9ec2944ca2bf0a9768e42af2b5;hb=80c802f3073e84c956846e921e8a0b02dfa3755f;hp=f28a1b3e63a91ffe988b11f749e4ca1120f2ea9b;hpb=54cb8821de07f2ffcd28c380ce9b93d5784b40d7;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/linux/mm.h b/include/linux/mm.h index f28a1b3..e70f21b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -10,21 +10,23 @@ #include #include #include -#include -#include #include -#include #include +#include struct mempolicy; struct anon_vma; +struct file_ra_state; struct user_struct; +struct writeback_control; +struct rlimit; #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ extern unsigned long max_mapnr; #endif extern unsigned long num_physpages; +extern unsigned long totalram_pages; extern void * high_memory; extern int page_cluster; @@ -40,6 +42,9 @@ extern int sysctl_legacy_va_layout; #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) + /* * Linux kernel virtual memory manager primitives. * The idea being to have a "virtual" mm in the same way @@ -49,90 +54,17 @@ extern int sysctl_legacy_va_layout; * mmap() functions). */ -/* - * This struct defines a memory VMM memory area. There is one of these - * per VM-area/task. A VM area is any part of the process virtual memory - * space that has a special rule for the page-fault handlers (ie a shared - * library, the executable area etc). - */ -struct vm_area_struct { - struct mm_struct * vm_mm; /* The address space we belong to. */ - unsigned long vm_start; /* Our start address within vm_mm. */ - unsigned long vm_end; /* The first byte after our end address - within vm_mm. */ - - /* linked list of VM areas per task, sorted by address */ - struct vm_area_struct *vm_next; - - pgprot_t vm_page_prot; /* Access permissions of this VMA. */ - unsigned long vm_flags; /* Flags, listed below. */ - - struct rb_node vm_rb; - - /* - * For areas with an address space and backing store, - * linkage into the address_space->i_mmap prio tree, or - * linkage to the list of like vmas hanging off its node, or - * linkage of vma in the address_space->i_mmap_nonlinear list. - */ - union { - struct { - struct list_head list; - void *parent; /* aligns with prio_tree_node parent */ - struct vm_area_struct *head; - } vm_set; - - struct raw_prio_tree_node prio_tree_node; - } shared; - - /* - * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma - * list, after a COW of one of the file pages. A MAP_SHARED vma - * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack - * or brk vma (with NULL file) can only be in an anon_vma list. - */ - struct list_head anon_vma_node; /* Serialized by anon_vma->lock */ - struct anon_vma *anon_vma; /* Serialized by page_table_lock */ - - /* Function pointers to deal with this struct. */ - struct vm_operations_struct * vm_ops; - - /* Information about our backing store: */ - unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE - units, *not* PAGE_CACHE_SIZE */ - struct file * vm_file; /* File we map to (can be NULL). */ - void * vm_private_data; /* was vm_pte (shared mem) */ - unsigned long vm_truncate_count;/* truncate_count or restart_addr */ - -#ifndef CONFIG_MMU - atomic_t vm_usage; /* refcount (VMAs shared if !MMU) */ -#endif -#ifdef CONFIG_NUMA - struct mempolicy *vm_policy; /* NUMA policy for the VMA */ -#endif -}; - extern struct kmem_cache *vm_area_cachep; -/* - * This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is - * disabled, then there's a single shared list of VMAs maintained by the - * system, and mm's subscribe to these individually - */ -struct vm_list_struct { - struct vm_list_struct *next; - struct vm_area_struct *vma; -}; - #ifndef CONFIG_MMU -extern struct rb_root nommu_vma_tree; -extern struct rw_semaphore nommu_vma_sem; +extern struct rb_root nommu_region_tree; +extern struct rw_semaphore nommu_region_sem; extern unsigned int kobjsize(const void *objp); #endif /* - * vm_flags.. + * vm_flags in vm_area_struct, see mm_types.h. */ #define VM_READ 0x00000001 /* currently active flags */ #define VM_WRITE 0x00000002 @@ -162,18 +94,18 @@ extern unsigned int kobjsize(const void *objp); #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ #define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */ #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ +#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ -#define VM_CAN_INVALIDATE 0x08000000 /* The mapping may be invalidated, - * eg. truncate or invalidate_inode_*. - * In this case, do_no_page must - * return with the page locked. - */ -#define VM_CAN_NONLINEAR 0x10000000 /* Has ->fault & does nonlinear pages */ +#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ +#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ +#define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ +#define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ +#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS @@ -192,29 +124,57 @@ extern unsigned int kobjsize(const void *objp); #define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ) /* + * special vmas that are non-mergable, non-mlock()able + */ +#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) + +/* * mapping from the currently active vm_flags protection bits (the * low four bits) to a page protection mask.. */ extern pgprot_t protection_map[16]; -#define FAULT_FLAG_WRITE 0x01 -#define FAULT_FLAG_NONLINEAR 0x02 +#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ +#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ +#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ /* - * fault_data is filled in the the pagefault handler and passed to the - * vma's ->fault function. That function is responsible for filling in - * 'type', which is the type of fault if a page is returned, or the type - * of error if NULL is returned. + * This interface is used by x86 PAT code to identify a pfn mapping that is + * linear over entire vma. This is to optimize PAT code that deals with + * marking the physical region with a particular prot. This is not for generic + * mm use. Note also that this check will not work if the pfn mapping is + * linear for a vma starting at physical address 0. In which case PAT code + * falls back to slow path of reserving physical range page by page. + */ +static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) +{ + return (vma->vm_flags & VM_PFN_AT_MMAP); +} + +static inline int is_pfn_mapping(struct vm_area_struct *vma) +{ + return (vma->vm_flags & VM_PFNMAP); +} + +/* + * vm_fault is filled by the the pagefault handler and passed to the vma's + * ->fault function. The vma's ->fault is responsible for returning a bitmask + * of VM_FAULT_xxx flags that give details about how the fault was handled. * - * pgoff should be used in favour of address, if possible. If pgoff is - * used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get - * nonlinear mapping support. + * pgoff should be used in favour of virtual_address, if possible. If pgoff + * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear + * mapping support. */ -struct fault_data { - unsigned long address; - pgoff_t pgoff; - unsigned int flags; - int type; +struct vm_fault { + unsigned int flags; /* FAULT_FLAG_xxx flags */ + pgoff_t pgoff; /* Logical page offset based on vma */ + void __user *virtual_address; /* Faulting virtual address */ + + struct page *page; /* ->fault handlers should return a + * page here, unless VM_FAULT_NOPAGE + * is set (which is also implied by + * VM_FAULT_ERROR). + */ }; /* @@ -225,21 +185,37 @@ struct fault_data { struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); - struct page *(*fault)(struct vm_area_struct *vma, - struct fault_data *fdata); - struct page *(*nopage)(struct vm_area_struct *area, - unsigned long address, int *type); - unsigned long (*nopfn)(struct vm_area_struct *area, - unsigned long address); - int (*populate)(struct vm_area_struct *area, unsigned long address, - unsigned long len, pgprot_t prot, unsigned long pgoff, - int nonblock); + int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ - int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); + int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); + + /* called by access_process_vm when get_user_pages() fails, typically + * for use by special VMAs that can switch between memory and hardware + */ + int (*access)(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); #ifdef CONFIG_NUMA + /* + * set_policy() op must add a reference to any non-NULL @new mempolicy + * to hold the policy upon return. Caller should pass NULL @new to + * remove a policy and fall back to surrounding context--i.e. do not + * install a MPOL_DEFAULT policy, nor the task or system default + * mempolicy. + */ int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); + + /* + * get_policy() op must add reference [mpol_get()] to any policy at + * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure + * in mm/mempolicy.c will do this automatically. + * get_policy() must NOT add a ref if the policy at (vma,addr) is not + * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. + * If no [shared/vma] mempolicy exists at the addr, get_policy() op + * must return NULL--i.e., do not "fallback" to task or system default + * policy. + */ struct mempolicy *(*get_policy)(struct vm_area_struct *vma, unsigned long addr); int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from, @@ -259,12 +235,6 @@ struct inode; */ #include -#ifdef CONFIG_DEBUG_VM -#define VM_BUG_ON(cond) BUG_ON(cond) -#else -#define VM_BUG_ON(condition) do { } while(0) -#endif - /* * Methods to modify the page usage count. * @@ -293,10 +263,40 @@ static inline int put_page_testzero(struct page *page) */ static inline int get_page_unless_zero(struct page *page) { - VM_BUG_ON(PageCompound(page)); return atomic_inc_not_zero(&page->_count); } +extern int page_is_ram(unsigned long pfn); + +/* Support for virtually mapped pages */ +struct page *vmalloc_to_page(const void *addr); +unsigned long vmalloc_to_pfn(const void *addr); + +/* + * Determine if an address is within the vmalloc range + * + * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there + * is no special casing required. + */ +static inline int is_vmalloc_addr(const void *x) +{ +#ifdef CONFIG_MMU + unsigned long addr = (unsigned long)x; + + return addr >= VMALLOC_START && addr < VMALLOC_END; +#else + return 0; +#endif +} +#ifdef CONFIG_MMU +extern int is_vmalloc_or_module_addr(const void *x); +#else +static inline int is_vmalloc_or_module_addr(const void *x) +{ + return 0; +} +#endif + static inline struct page *compound_head(struct page *page) { if (unlikely(PageTail(page))) @@ -442,11 +442,11 @@ static inline void set_compound_order(struct page *page, unsigned long order) * we have run out of space and have to fall back to an * alternate (slower) way of determining the node. * - * No sparsemem: | NODE | ZONE | ... | FLAGS | - * with space for node: | SECTION | NODE | ZONE | ... | FLAGS | - * no space for node: | SECTION | ZONE | ... | FLAGS | + * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS | + * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS | + * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS | */ -#ifdef CONFIG_SPARSEMEM +#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define SECTIONS_WIDTH SECTIONS_SHIFT #else #define SECTIONS_WIDTH 0 @@ -454,9 +454,12 @@ static inline void set_compound_order(struct page *page, unsigned long order) #define ZONES_WIDTH ZONES_SHIFT -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED +#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS #define NODES_WIDTH NODES_SHIFT #else +#ifdef CONFIG_SPARSEMEM_VMEMMAP +#error "Vmemmap: No space for nodes field in page flags" +#endif #define NODES_WIDTH 0 #endif @@ -499,8 +502,8 @@ static inline void set_compound_order(struct page *page, unsigned long order) #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) -#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED -#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED +#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS +#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS #endif #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) @@ -549,10 +552,12 @@ static inline struct zone *page_zone(struct page *page) return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; } +#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) static inline unsigned long page_to_section(struct page *page) { return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; } +#endif static inline void set_page_zone(struct page *page, enum zone_type zone) { @@ -618,13 +623,22 @@ void page_address_init(void); /* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; - * with the PAGE_MAPPING_ANON bit set to distinguish it. + * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. + * + * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, + * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; + * and then page->mapping points, not to an anon_vma, but to a private + * structure which KSM associates with that merged page. See ksm.h. + * + * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. * * Please note that, confusingly, "page_mapping" refers to the inode * address_space which maps the page from disk; whereas "page_mapped" * refers to user virtual address space into which the page is mapped. */ #define PAGE_MAPPING_ANON 1 +#define PAGE_MAPPING_KSM 2 +#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) extern struct address_space swapper_space; static inline struct address_space *page_mapping(struct page *page) @@ -634,15 +648,17 @@ static inline struct address_space *page_mapping(struct page *page) VM_BUG_ON(PageSlab(page)); if (unlikely(PageSwapCache(page))) mapping = &swapper_space; -#ifdef CONFIG_SLUB - else if (unlikely(PageSlab(page))) - mapping = NULL; -#endif else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) mapping = NULL; return mapping; } +/* Neutral page->mapping pointer to address_space or anon_vma or other */ +static inline void *page_rmapping(struct page *page) +{ + return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); +} + static inline int PageAnon(struct page *page) { return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; @@ -683,64 +699,35 @@ static inline int page_mapped(struct page *page) } /* - * Error return values for the *_nopage functions - */ -#define NOPAGE_SIGBUS (NULL) -#define NOPAGE_OOM ((struct page *) (-1)) - -/* - * Error return values for the *_nopfn functions - */ -#define NOPFN_SIGBUS ((unsigned long) -1) -#define NOPFN_OOM ((unsigned long) -2) -#define NOPFN_REFAULT ((unsigned long) -3) - -/* * Different kinds of faults, as returned by handle_mm_fault(). * Used to decide whether a process gets delivered SIGBUS or * just gets major/minor fault counters bumped up. */ -#define VM_FAULT_OOM 0x00 -#define VM_FAULT_SIGBUS 0x01 -#define VM_FAULT_MINOR 0x02 -#define VM_FAULT_MAJOR 0x03 - -/* - * Special case for get_user_pages. - * Must be in a distinct bit from the above VM_FAULT_ flags. + +#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */ + +#define VM_FAULT_OOM 0x0001 +#define VM_FAULT_SIGBUS 0x0002 +#define VM_FAULT_MAJOR 0x0004 +#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ +#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned page */ + +#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ +#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ + +#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON) + +/* + * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. */ -#define VM_FAULT_WRITE 0x10 +extern void pagefault_out_of_memory(void); #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) extern void show_free_areas(void); -#ifdef CONFIG_SHMEM -int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new); -struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, - unsigned long addr); int shmem_lock(struct file *file, int lock, struct user_struct *user); -#else -static inline int shmem_lock(struct file *file, int lock, - struct user_struct *user) -{ - return 0; -} - -static inline int shmem_set_policy(struct vm_area_struct *vma, - struct mempolicy *new) -{ - return 0; -} - -static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, - unsigned long addr) -{ - return NULL; -} -#endif -struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); - +struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); int shmem_zero_setup(struct vm_area_struct *); #ifndef CONFIG_MMU @@ -767,23 +754,55 @@ struct zap_details { unsigned long truncate_count; /* Compare vm_truncate_count */ }; -struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t); +struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte); + +int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, + unsigned long size); unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *); unsigned long unmap_vmas(struct mmu_gather **tlb, struct vm_area_struct *start_vma, unsigned long start_addr, unsigned long end_addr, unsigned long *nr_accounted, struct zap_details *); -void free_pgd_range(struct mmu_gather **tlb, unsigned long addr, + +/** + * mm_walk - callbacks for walk_page_range + * @pgd_entry: if set, called for each non-empty PGD (top-level) entry + * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry + * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry + * @pte_entry: if set, called for each non-empty PTE (4th-level) entry + * @pte_hole: if set, called for each hole at all levels + * @hugetlb_entry: if set, called for each hugetlb entry + * + * (see walk_page_range for more details) + */ +struct mm_walk { + int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *); + int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *); + int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); + int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); + int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); + int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long, + struct mm_walk *); + struct mm_struct *mm; + void *private; +}; + +int walk_page_range(unsigned long addr, unsigned long end, + struct mm_walk *walk); +void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); -void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma, - unsigned long floor, unsigned long ceiling); int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); -int zeromap_page_range(struct vm_area_struct *vma, unsigned long from, - unsigned long size, pgprot_t prot); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); +int follow_pfn(struct vm_area_struct *vma, unsigned long address, + unsigned long *pfn); +int follow_phys(struct vm_area_struct *vma, unsigned long address, + unsigned int flags, unsigned long *prot, resource_size_t *phys); +int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); static inline void unmap_shared_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen) @@ -791,26 +810,22 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, unmap_mapping_range(mapping, holebegin, holelen, 0); } -extern int vmtruncate(struct inode * inode, loff_t offset); -extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); -extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot); -extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot); +extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new); +extern int vmtruncate(struct inode *inode, loff_t offset); +extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end); -#ifdef CONFIG_MMU -extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, - unsigned long address, int write_access); +int truncate_inode_page(struct address_space *mapping, struct page *page); +int generic_error_remove_page(struct address_space *mapping, struct page *page); -static inline int handle_mm_fault(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long address, - int write_access) -{ - return __handle_mm_fault(mm, vma, address, write_access) & - (~VM_FAULT_WRITE); -} +int invalidate_inode_page(struct page *page); + +#ifdef CONFIG_MMU +extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, unsigned int flags); #else static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, - int write_access) + unsigned int flags) { /* should never happen if there's no MMU */ BUG(); @@ -820,11 +835,13 @@ static inline int handle_mm_fault(struct mm_struct *mm, extern int make_pages_present(unsigned long addr, unsigned long end); extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); -void install_arg_page(struct vm_area_struct *, struct page *, unsigned long); -int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, - int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); -void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long); +int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, int nr_pages, int write, int force, + struct page **pages, struct vm_area_struct **vmas); +int get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages); +struct page *get_dump_page(unsigned long addr); extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned long offset); @@ -833,13 +850,134 @@ int __set_page_dirty_nobuffers(struct page *page); int __set_page_dirty_no_writeback(struct page *page); int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page); -int FASTCALL(set_page_dirty(struct page *page)); +void account_page_dirtied(struct page *page, struct address_space *mapping); +int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); +extern unsigned long move_page_tables(struct vm_area_struct *vma, + unsigned long old_addr, struct vm_area_struct *new_vma, + unsigned long new_addr, unsigned long len); extern unsigned long do_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr); +extern int mprotect_fixup(struct vm_area_struct *vma, + struct vm_area_struct **pprev, unsigned long start, + unsigned long end, unsigned long newflags); + +/* + * doesn't attempt to fault and will return short. + */ +int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages); +/* + * per-process(per-mm_struct) statistics. + */ +#if defined(SPLIT_RSS_COUNTING) +/* + * The mm counters are not protected by its page_table_lock, + * so must be incremented atomically. + */ +static inline void set_mm_counter(struct mm_struct *mm, int member, long value) +{ + atomic_long_set(&mm->rss_stat.count[member], value); +} + +unsigned long get_mm_counter(struct mm_struct *mm, int member); + +static inline void add_mm_counter(struct mm_struct *mm, int member, long value) +{ + atomic_long_add(value, &mm->rss_stat.count[member]); +} + +static inline void inc_mm_counter(struct mm_struct *mm, int member) +{ + atomic_long_inc(&mm->rss_stat.count[member]); +} + +static inline void dec_mm_counter(struct mm_struct *mm, int member) +{ + atomic_long_dec(&mm->rss_stat.count[member]); +} + +#else /* !USE_SPLIT_PTLOCKS */ +/* + * The mm counters are protected by its page_table_lock, + * so can be incremented directly. + */ +static inline void set_mm_counter(struct mm_struct *mm, int member, long value) +{ + mm->rss_stat.count[member] = value; +} + +static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) +{ + return mm->rss_stat.count[member]; +} + +static inline void add_mm_counter(struct mm_struct *mm, int member, long value) +{ + mm->rss_stat.count[member] += value; +} + +static inline void inc_mm_counter(struct mm_struct *mm, int member) +{ + mm->rss_stat.count[member]++; +} + +static inline void dec_mm_counter(struct mm_struct *mm, int member) +{ + mm->rss_stat.count[member]--; +} + +#endif /* !USE_SPLIT_PTLOCKS */ + +static inline unsigned long get_mm_rss(struct mm_struct *mm) +{ + return get_mm_counter(mm, MM_FILEPAGES) + + get_mm_counter(mm, MM_ANONPAGES); +} + +static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) +{ + return max(mm->hiwater_rss, get_mm_rss(mm)); +} + +static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) +{ + return max(mm->hiwater_vm, mm->total_vm); +} + +static inline void update_hiwater_rss(struct mm_struct *mm) +{ + unsigned long _rss = get_mm_rss(mm); + + if ((mm)->hiwater_rss < _rss) + (mm)->hiwater_rss = _rss; +} + +static inline void update_hiwater_vm(struct mm_struct *mm) +{ + if (mm->hiwater_vm < mm->total_vm) + mm->hiwater_vm = mm->total_vm; +} + +static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, + struct mm_struct *mm) +{ + unsigned long hiwater_rss = get_mm_hiwater_rss(mm); + + if (*maxrss < hiwater_rss) + *maxrss = hiwater_rss; +} + +#if defined(SPLIT_RSS_COUNTING) +void sync_mm_rss(struct task_struct *task, struct mm_struct *mm); +#else +static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) +{ +} +#endif /* * A callback you can register to apply pressure to ageable caches. @@ -868,40 +1006,9 @@ struct shrinker { extern void register_shrinker(struct shrinker *); extern void unregister_shrinker(struct shrinker *); -/* - * Some shared mappigns will want the pages marked read-only - * to track write events. If so, we'll downgrade vm_page_prot - * to the private version (using protection_map[] without the - * VM_SHARED bit). - */ -static inline int vma_wants_writenotify(struct vm_area_struct *vma) -{ - unsigned int vm_flags = vma->vm_flags; +int vma_wants_writenotify(struct vm_area_struct *vma); - /* If it was private or non-writable, the write bit is already clear */ - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) - return 0; - - /* The backer wishes to know when pages are first written to? */ - if (vma->vm_ops && vma->vm_ops->page_mkwrite) - return 1; - - /* The open routine did something to the protections already? */ - if (pgprot_val(vma->vm_page_prot) != - pgprot_val(protection_map[vm_flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)])) - return 0; - - /* Specialty mapping? */ - if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE)) - return 0; - - /* Can the mapping track the dirty pages? */ - return vma->vm_file && vma->vm_file->f_mapping && - mapping_cap_account_dirty(vma->vm_file->f_mapping); -} - -extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)); +extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl); #ifdef __PAGETABLE_PUD_FOLDED static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, @@ -944,7 +1051,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a } #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ -#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS +#if USE_SPLIT_PTLOCKS /* * We tuck a spinlock to guard each pagetable page into its struct page, * at page->private, with BUILD_BUG_ON to make sure that this will not @@ -957,14 +1064,26 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a } while (0) #define pte_lock_deinit(page) ((page)->mapping = NULL) #define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) -#else +#else /* !USE_SPLIT_PTLOCKS */ /* * We use mm->page_table_lock to guard all pagetable pages of the mm. */ #define pte_lock_init(page) do {} while (0) #define pte_lock_deinit(page) do {} while (0) #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) -#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ +#endif /* USE_SPLIT_PTLOCKS */ + +static inline void pgtable_page_ctor(struct page *page) +{ + pte_lock_init(page); + inc_zone_page_state(page, NR_PAGETABLE); +} + +static inline void pgtable_page_dtor(struct page *page) +{ + pte_lock_deinit(page); + dec_zone_page_state(page, NR_PAGETABLE); +} #define pte_offset_map_lock(mm, pmd, address, ptlp) \ ({ \ @@ -993,9 +1112,8 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a NULL: pte_offset_kernel(pmd, address)) extern void free_area_init(unsigned long * zones_size); -extern void free_area_init_node(int nid, pg_data_t *pgdat, - unsigned long * zones_size, unsigned long zone_start_pfn, - unsigned long *zholes_size); +extern void free_area_init_node(int nid, unsigned long * zones_size, + unsigned long zone_start_pfn, unsigned long *zholes_size); #ifdef CONFIG_ARCH_POPULATES_NODE_MAP /* * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its @@ -1027,38 +1145,62 @@ extern void free_area_init_node(int nid, pg_data_t *pgdat, extern void free_area_init_nodes(unsigned long *max_zone_pfn); extern void add_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn); -extern void shrink_active_range(unsigned int nid, unsigned long old_end_pfn, - unsigned long new_end_pfn); -extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, +extern void remove_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn); extern void remove_all_active_ranges(void); +void sort_node_map(void); +unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, + unsigned long end_pfn); extern unsigned long absent_pages_in_range(unsigned long start_pfn, unsigned long end_pfn); extern void get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn); extern unsigned long find_min_pfn_with_active_regions(void); -extern unsigned long find_max_pfn_with_active_regions(void); extern void free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn); +int add_from_early_node_map(struct range *range, int az, + int nr_range, int nid); +void *__alloc_memory_core_early(int nodeid, u64 size, u64 align, + u64 goal, u64 limit); +typedef int (*work_fn_t)(unsigned long, unsigned long, void *); +extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); extern void sparse_memory_present_with_active_regions(int nid); -#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID -extern int early_pfn_to_nid(unsigned long pfn); -#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ + +#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \ + !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) +static inline int __early_pfn_to_nid(unsigned long pfn) +{ + return 0; +} +#else +/* please see mm/page_alloc.c */ +extern int __meminit early_pfn_to_nid(unsigned long pfn); +#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID +/* there is a per-arch backend function. */ +extern int __meminit __early_pfn_to_nid(unsigned long pfn); +#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ +#endif + extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, enum memmap_context); -extern void setup_per_zone_pages_min(void); +extern void setup_per_zone_wmarks(void); +extern void calculate_zone_inactive_ratio(struct zone *zone); extern void mem_init(void); +extern void __init mmap_init(void); extern void show_mem(void); extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); +extern int after_bootmem; -#ifdef CONFIG_NUMA extern void setup_per_cpu_pageset(void); -#else -static inline void setup_per_cpu_pageset(void) {} -#endif + +extern void zone_pcp_update(struct zone *zone); + +/* nommu.c */ +extern atomic_long_t mmap_pages_allocated; +extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); /* prio_tree.c */ void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); @@ -1079,8 +1221,8 @@ static inline void vma_nonlinear_insert(struct vm_area_struct *vma, } /* mmap.c */ -extern int __vm_enough_memory(long pages, int cap_sys_admin); -extern void vma_adjust(struct vm_area_struct *vma, unsigned long start, +extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); +extern int vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); extern struct vm_area_struct *vma_merge(struct mm_struct *, struct vm_area_struct *prev, unsigned long addr, unsigned long end, @@ -1096,6 +1238,22 @@ extern void unlink_file_vma(struct vm_area_struct *); extern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, pgoff_t pgoff); extern void exit_mmap(struct mm_struct *); + +extern int mm_take_all_locks(struct mm_struct *mm); +extern void mm_drop_all_locks(struct mm_struct *mm); + +#ifdef CONFIG_PROC_FS +/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ +extern void added_exe_file_vma(struct mm_struct *mm); +extern void removed_exe_file_vma(struct mm_struct *mm); +#else +static inline void added_exe_file_vma(struct mm_struct *mm) +{} + +static inline void removed_exe_file_vma(struct mm_struct *mm) +{} +#endif /* CONFIG_PROC_FS */ + extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); extern int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, @@ -1108,8 +1266,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long flag, unsigned long pgoff); extern unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, unsigned long flags, - unsigned int vm_flags, unsigned long pgoff, - int accountable); + unsigned int vm_flags, unsigned long pgoff); static inline unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, @@ -1135,39 +1292,44 @@ extern void truncate_inode_pages_range(struct address_space *, loff_t lstart, loff_t lend); /* generic vm_area_ops exported for stackable file systems */ -extern struct page *filemap_fault(struct vm_area_struct *, struct fault_data *); -extern struct page * __deprecated_for_modules -filemap_nopage(struct vm_area_struct *, unsigned long, int *); -extern int __deprecated_for_modules filemap_populate(struct vm_area_struct *, - unsigned long, unsigned long, pgprot_t, unsigned long, int); +extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); /* mm/page-writeback.c */ int write_one_page(struct page *page, int wait); +void task_dirty_inc(struct task_struct *tsk); /* readahead.c */ #define VM_MAX_READAHEAD 128 /* kbytes */ #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ -#define VM_MAX_CACHE_HIT 256 /* max pages in a row in cache before - * turning readahead off */ -int do_page_cache_readahead(struct address_space *mapping, struct file *filp, - pgoff_t offset, unsigned long nr_to_read); int force_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read); -unsigned long page_cache_readahead(struct address_space *mapping, - struct file_ra_state *ra, - struct file *filp, - pgoff_t offset, - unsigned long size); -void handle_ra_miss(struct address_space *mapping, - struct file_ra_state *ra, pgoff_t offset); + +void page_cache_sync_readahead(struct address_space *mapping, + struct file_ra_state *ra, + struct file *filp, + pgoff_t offset, + unsigned long size); + +void page_cache_async_readahead(struct address_space *mapping, + struct file_ra_state *ra, + struct file *filp, + struct page *pg, + pgoff_t offset, + unsigned long size); + unsigned long max_sane_readahead(unsigned long nr); +unsigned long ra_submit(struct file_ra_state *ra, + struct address_space *mapping, + struct file *filp); /* Do stack extension */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); #ifdef CONFIG_IA64 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); #endif +extern int expand_stack_downwards(struct vm_area_struct *vma, + unsigned long address); /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); @@ -1192,22 +1354,23 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma) pgprot_t vm_get_page_prot(unsigned long vm_flags); struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); -struct page *vmalloc_to_page(void *addr); -unsigned long vmalloc_to_pfn(void *addr); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t); int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); +int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn); struct page *follow_page(struct vm_area_struct *, unsigned long address, unsigned int foll_flags); #define FOLL_WRITE 0x01 /* check pte is writable */ #define FOLL_TOUCH 0x02 /* mark page accessed */ #define FOLL_GET 0x04 /* do get_page on page */ -#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */ +#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ +#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ -typedef int (*pte_fn_t)(pte_t *pte, struct page *pmd_page, unsigned long addr, +typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, void *data); @@ -1221,9 +1384,27 @@ static inline void vm_stat_account(struct mm_struct *mm, } #endif /* CONFIG_PROC_FS */ -#ifndef CONFIG_DEBUG_PAGEALLOC +#ifdef CONFIG_DEBUG_PAGEALLOC +extern int debug_pagealloc_enabled; + +extern void kernel_map_pages(struct page *page, int numpages, int enable); + +static inline void enable_debug_pagealloc(void) +{ + debug_pagealloc_enabled = 1; +} +#ifdef CONFIG_HIBERNATION +extern bool kernel_page_present(struct page *page); +#endif /* CONFIG_HIBERNATION */ +#else static inline void kernel_map_pages(struct page *page, int numpages, int enable) {} +static inline void enable_debug_pagealloc(void) +{ +} +#ifdef CONFIG_HIBERNATION +static inline bool kernel_page_present(struct page *page) { return true; } +#endif /* CONFIG_HIBERNATION */ #endif extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk); @@ -1235,12 +1416,10 @@ int in_gate_area_no_task(unsigned long addr); #define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);}) #endif /* __HAVE_ARCH_GATE_AREA */ -int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, +int drop_caches_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages); -void drop_pagecache(void); -void drop_slab(void); #ifndef CONFIG_MMU #define randomize_va_space 0 @@ -1248,7 +1427,45 @@ void drop_slab(void); extern int randomize_va_space; #endif -__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma); +const char * arch_vma_name(struct vm_area_struct *vma); +void print_vma_addr(char *prefix, unsigned long rip); + +void sparse_mem_maps_populate_node(struct page **map_map, + unsigned long pnum_begin, + unsigned long pnum_end, + unsigned long map_count, + int nodeid); + +struct page *sparse_mem_map_populate(unsigned long pnum, int nid); +pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); +pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); +pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); +pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); +void *vmemmap_alloc_block(unsigned long size, int node); +void *vmemmap_alloc_block_buf(unsigned long size, int node); +void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); +int vmemmap_populate_basepages(struct page *start_page, + unsigned long pages, int node); +int vmemmap_populate(struct page *start_page, unsigned long pages, int node); +void vmemmap_populate_print_last(void); + +extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, + size_t size); +extern void refund_locked_memory(struct mm_struct *mm, size_t size); + +enum mf_flags { + MF_COUNT_INCREASED = 1 << 0, +}; +extern void memory_failure(unsigned long pfn, int trapno); +extern int __memory_failure(unsigned long pfn, int trapno, int flags); +extern int unpoison_memory(unsigned long pfn); +extern int sysctl_memory_failure_early_kill; +extern int sysctl_memory_failure_recovery; +extern void shake_page(struct page *p, int access); +extern atomic_long_t mce_bad_pages; +extern int soft_offline_page(struct page *page, int flags); + +extern void dump_page(struct page *page); #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */