X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;ds=inline;f=mm%2Fmemory.c;h=92a3ebd8d7951daff767f409836c8490cf283028;hb=39bbcb8f88154c4ac9853baf3f1134af4c987517;hp=fa941b169071d4573b3deb6bb0dd0cb2f2856c17;hpb=d08b3851da41d0ee60851f2c75b118e1f7a5fc89;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/memory.c b/mm/memory.c index fa941b1..92a3ebd 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -1226,7 +1227,12 @@ out: return retval; } -/* +/** + * vm_insert_page - insert single page into user vma + * @vma: user vma to map to + * @addr: target user address of this page + * @page: source kernel page + * * This allows drivers to insert individual pages they've allocated * into a user vma. * @@ -1318,7 +1324,16 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, return 0; } -/* Note: this is only safe if the mm semaphore is held when called. */ +/** + * remap_pfn_range - remap kernel memory to userspace + * @vma: user vma to map to + * @addr: target user address to start at + * @pfn: physical address of kernel memory + * @size: size of map area + * @prot: page protection flags for this mapping + * + * Note: this is only safe if the mm semaphore is held when called. + */ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) { @@ -1466,11 +1481,21 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, goto gotten; /* - * Only catch write-faults on shared writable pages, read-only - * shared pages can get COWed by get_user_pages(.write=1, .force=1). + * Take out anonymous pages first, anonymous shared vmas are + * not dirty accountable. */ - if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == + if (PageAnon(old_page)) { + if (!TestSetPageLocked(old_page)) { + reuse = can_share_swap_page(old_page); + unlock_page(old_page); + } + } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { + /* + * Only catch write-faults on shared writable pages, + * read-only shared pages can get COWed by + * get_user_pages(.write=1, .force=1). + */ if (vma->vm_ops && vma->vm_ops->page_mkwrite) { /* * Notify the address space that the page is about to @@ -1502,9 +1527,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, dirty_page = old_page; get_page(dirty_page); reuse = 1; - } else if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { - reuse = can_share_swap_page(old_page); - unlock_page(old_page); } if (reuse) { @@ -1571,7 +1593,7 @@ gotten: unlock: pte_unmap_unlock(page_table, ptl); if (dirty_page) { - set_page_dirty(dirty_page); + set_page_dirty_balance(dirty_page); put_page(dirty_page); } return ret; @@ -1793,9 +1815,10 @@ void unmap_mapping_range(struct address_space *mapping, } EXPORT_SYMBOL(unmap_mapping_range); -/* - * Handle all mappings that got truncated by a "truncate()" - * system call. +/** + * vmtruncate - unmap mappings "freed" by truncate() syscall + * @inode: inode of the file used + * @offset: file offset to start truncating * * NOTE! We have to be ready to update the memory sharing * between the file and the memory map for a potential last @@ -1864,11 +1887,16 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) } EXPORT_UNUSED_SYMBOL(vmtruncate_range); /* June 2006 */ -/* +/** + * swapin_readahead - swap in pages in hope we need them soon + * @entry: swap entry of this memory + * @addr: address to start + * @vma: user vma this addresses belong to + * * Primitive swap readahead code. We simply read an aligned block of * (1 << page_cluster) entries in the swap area. This method is chosen * because it doesn't cost us any seek time. We also make sure to queue - * the 'original' request together with the readahead ones... + * the 'original' request together with the readahead ones... * * This has been extended to use the NUMA policies from the mm triggering * the readahead. @@ -2218,7 +2246,7 @@ retry: unlock: pte_unmap_unlock(page_table, ptl); if (dirty_page) { - set_page_dirty(dirty_page); + set_page_dirty_balance(dirty_page); put_page(dirty_page); } return ret;