include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / powerpc / mm / mem.c
index 776ba6a..0f594d7 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/string.h>
+#include <linux/gfp.h>
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/stddef.h>
@@ -32,6 +33,7 @@
 #include <linux/pagemap.h>
 #include <linux/suspend.h>
 #include <linux/lmb.h>
+#include <linux/hugetlb.h>
 
 #include <asm/pgalloc.h>
 #include <asm/prom.h>
@@ -47,6 +49,7 @@
 #include <asm/sparsemem.h>
 #include <asm/vdso.h>
 #include <asm/fixmap.h>
+#include <asm/swiotlb.h>
 
 #include "mmu_decl.h"
 
@@ -57,7 +60,7 @@
 
 int init_bootmem_done;
 int mem_init_done;
-unsigned long memory_limit;
+phys_addr_t memory_limit;
 
 #ifdef CONFIG_HIGHMEM
 pte_t *kmap_pte;
@@ -75,11 +78,10 @@ static inline pte_t *virt_to_kpte(unsigned long vaddr)
 
 int page_is_ram(unsigned long pfn)
 {
-       unsigned long paddr = (pfn << PAGE_SHIFT);
-
 #ifndef CONFIG_PPC64   /* XXX for now */
-       return paddr < __pa(high_memory);
+       return pfn < max_pfn;
 #else
+       unsigned long paddr = (pfn << PAGE_SHIFT);
        int i;
        for (i=0; i < lmb.memory.cnt; i++) {
                unsigned long base;
@@ -103,8 +105,8 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
                return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
 
        if (!page_is_ram(pfn))
-               vma_prot = __pgprot(pgprot_val(vma_prot)
-                                   | _PAGE_GUARDED | _PAGE_NO_CACHE);
+               vma_prot = pgprot_noncached(vma_prot);
+
        return vma_prot;
 }
 EXPORT_SYMBOL(phys_mem_access_prot);
@@ -133,25 +135,8 @@ int arch_add_memory(int nid, u64 start, u64 size)
        /* this should work for most non-highmem platforms */
        zone = pgdata->node_zones;
 
-       return __add_pages(zone, start_pfn, nr_pages);
+       return __add_pages(nid, zone, start_pfn, nr_pages);
 }
-
-#ifdef CONFIG_MEMORY_HOTREMOVE
-int remove_memory(u64 start, u64 size)
-{
-       unsigned long start_pfn, end_pfn;
-       int ret;
-
-       start_pfn = start >> PAGE_SHIFT;
-       end_pfn = start_pfn + (size >> PAGE_SHIFT);
-       ret = offline_pages(start_pfn, end_pfn, 120 * HZ);
-       if (ret)
-               goto out;
-       /* Arch-specific calls go here - next patch */
-out:
-       return ret;
-}
-#endif /* CONFIG_MEMORY_HOTREMOVE */
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
 /*
@@ -161,8 +146,8 @@ out:
  * memory regions, find holes and callback for contiguous regions.
  */
 int
-walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
-                       int (*func)(unsigned long, unsigned long, void *))
+walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
+               void *arg, int (*func)(unsigned long, unsigned long, void *))
 {
        struct lmb_property res;
        unsigned long pfn, len;
@@ -184,46 +169,7 @@ walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
        }
        return ret;
 }
-EXPORT_SYMBOL_GPL(walk_memory_resource);
-
-void show_mem(void)
-{
-       unsigned long total = 0, reserved = 0;
-       unsigned long shared = 0, cached = 0;
-       unsigned long highmem = 0;
-       struct page *page;
-       pg_data_t *pgdat;
-       unsigned long i;
-
-       printk("Mem-info:\n");
-       show_free_areas();
-       for_each_online_pgdat(pgdat) {
-               unsigned long flags;
-               pgdat_resize_lock(pgdat, &flags);
-               for (i = 0; i < pgdat->node_spanned_pages; i++) {
-                       if (!pfn_valid(pgdat->node_start_pfn + i))
-                               continue;
-                       page = pgdat_page_nr(pgdat, i);
-                       total++;
-                       if (PageHighMem(page))
-                               highmem++;
-                       if (PageReserved(page))
-                               reserved++;
-                       else if (PageSwapCache(page))
-                               cached++;
-                       else if (page_count(page))
-                               shared += page_count(page) - 1;
-               }
-               pgdat_resize_unlock(pgdat, &flags);
-       }
-       printk("%ld pages of RAM\n", total);
-#ifdef CONFIG_HIGHMEM
-       printk("%ld pages of HIGHMEM\n", highmem);
-#endif
-       printk("%ld reserved pages\n", reserved);
-       printk("%ld pages shared\n", shared);
-       printk("%ld pages swap cached\n", cached);
-}
+EXPORT_SYMBOL_GPL(walk_system_ram_range);
 
 /*
  * Initialize the bootmem system and give it all the memory we
@@ -330,7 +276,7 @@ static int __init mark_nonram_nosave(void)
 void __init paging_init(void)
 {
        unsigned long total_ram = lmb_phys_mem_size();
-       unsigned long top_of_ram = lmb_end_of_DRAM();
+       phys_addr_t top_of_ram = lmb_end_of_DRAM();
        unsigned long max_zone_pfns[MAX_NR_ZONES];
 
 #ifdef CONFIG_PPC32
@@ -349,10 +295,10 @@ void __init paging_init(void)
        kmap_prot = PAGE_KERNEL;
 #endif /* CONFIG_HIGHMEM */
 
-       printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
-              top_of_ram, total_ram);
+       printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%lx\n",
+              (unsigned long long)top_of_ram, total_ram);
        printk(KERN_DEBUG "Memory hole size: %ldMB\n",
-              (top_of_ram - total_ram) >> 20);
+              (long int)((top_of_ram - total_ram) >> 20));
        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 #ifdef CONFIG_HIGHMEM
        max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
@@ -376,6 +322,11 @@ void __init mem_init(void)
        struct page *page;
        unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
 
+#ifdef CONFIG_SWIOTLB
+       if (ppc_swiotlb_enable)
+               swiotlb_init(1);
+#endif
+
        num_physpages = lmb.memory.size >> PAGE_SHIFT;
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 
@@ -429,7 +380,7 @@ void __init mem_init(void)
 
        printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
               "%luk reserved, %luk data, %luk bss, %luk init)\n",
-               (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
+               nr_free_pages() << (PAGE_SHIFT-10),
                num_physpages << (PAGE_SHIFT-10),
                codesize >> 10,
                reservedpages << (PAGE_SHIFT-10),
@@ -437,6 +388,23 @@ void __init mem_init(void)
                bsssize >> 10,
                initsize >> 10);
 
+#ifdef CONFIG_PPC32
+       pr_info("Kernel virtual memory layout:\n");
+       pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
+#ifdef CONFIG_HIGHMEM
+       pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
+               PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
+#endif /* CONFIG_HIGHMEM */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+       pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
+               IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
+#endif /* CONFIG_NOT_COHERENT_CACHE */
+       pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
+               ioremap_bot, IOREMAP_TOP);
+       pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
+               VMALLOC_START, VMALLOC_END);
+#endif /* CONFIG_PPC32 */
+
        mem_init_done = 1;
 }
 
@@ -457,18 +425,26 @@ EXPORT_SYMBOL(flush_dcache_page);
 
 void flush_dcache_icache_page(struct page *page)
 {
+#ifdef CONFIG_HUGETLB_PAGE
+       if (PageCompound(page)) {
+               flush_dcache_icache_hugepage(page);
+               return;
+       }
+#endif
 #ifdef CONFIG_BOOKE
-       void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
-       __flush_dcache_icache(start);
-       kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
+       {
+               void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
+               __flush_dcache_icache(start);
+               kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
+       }
 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
        /* On 8xx there is no need to kmap since highmem is not supported */
        __flush_dcache_icache(page_address(page)); 
 #else
        __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
 #endif
-
 }
+
 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
 {
        clear_page(page);
@@ -525,46 +501,13 @@ EXPORT_SYMBOL(flush_icache_user_range);
  * This must always be called with the pte lock held.
  */
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
-                     pte_t pte)
+                     pte_t *ptep)
 {
 #ifdef CONFIG_PPC_STD_MMU
        unsigned long access = 0, trap;
-#endif
-       unsigned long pfn = pte_pfn(pte);
-
-       /* handle i-cache coherency */
-       if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
-           !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
-           pfn_valid(pfn)) {
-               struct page *page = pfn_to_page(pfn);
-#ifdef CONFIG_8xx
-               /* On 8xx, cache control instructions (particularly
-                * "dcbst" from flush_dcache_icache) fault as write
-                * operation if there is an unpopulated TLB entry
-                * for the address in question. To workaround that,
-                * we invalidate the TLB here, thus avoiding dcbst
-                * misbehaviour.
-                */
-               _tlbie(address, 0 /* 8xx doesn't care about PID */);
-#endif
-               /* The _PAGE_USER test should really be _PAGE_EXEC, but
-                * older glibc versions execute some code from no-exec
-                * pages, which for now we are supporting.  If exec-only
-                * pages are ever implemented, this will have to change.
-                */
-               if (!PageReserved(page) && (pte_val(pte) & _PAGE_USER)
-                   && !test_bit(PG_arch_1, &page->flags)) {
-                       if (vma->vm_mm == current->active_mm) {
-                               __flush_dcache_icache((void *) address);
-                       } else
-                               flush_dcache_icache_page(page);
-                       set_bit(PG_arch_1, &page->flags);
-               }
-       }
 
-#ifdef CONFIG_PPC_STD_MMU
        /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
-       if (!pte_young(pte) || address >= TASK_SIZE)
+       if (!pte_young(*ptep) || address >= TASK_SIZE)
                return;
 
        /* We try to figure out if we are coming from an instruction