[MIPS] 64-bit Sibyte kernels need DMA32.
[safe/jmp/linux-2.6] / arch / mips / mm / init.c
index 5b06349..480dec0 100644 (file)
@@ -8,6 +8,7 @@
  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
  */
+#include <linux/bug.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/signal.h>
 #include <linux/proc_fs.h>
 #include <linux/pfn.h>
 
+#include <asm/asm-offsets.h>
 #include <asm/bootinfo.h>
 #include <asm/cachectl.h>
 #include <asm/cpu.h>
 #include <asm/dma.h>
+#include <asm/kmap_types.h>
 #include <asm/mmu_context.h>
 #include <asm/sections.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
+#include <asm/fixmap.h>
 
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+/* Atomicity and interruptability */
+#ifdef CONFIG_MIPS_MT_SMTC
 
-unsigned long highstart_pfn, highend_pfn;
+#include <asm/mipsmtregs.h>
+
+#define ENTER_CRITICAL(flags) \
+       { \
+       unsigned int mvpflags; \
+       local_irq_save(flags);\
+       mvpflags = dvpe()
+#define EXIT_CRITICAL(flags) \
+       evpe(mvpflags); \
+       local_irq_restore(flags); \
+       }
+#else
+
+#define ENTER_CRITICAL(flags) local_irq_save(flags)
+#define EXIT_CRITICAL(flags) local_irq_restore(flags)
+
+#endif /* CONFIG_MIPS_MT_SMTC */
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
 /*
  * We have up to 8 empty zeroed pages so we can map one of the right colour
@@ -67,9 +90,9 @@ unsigned long setup_zero_pages(void)
        if (!empty_zero_page)
                panic("Oh boy, that early out of memory?");
 
-       page = virt_to_page(empty_zero_page);
+       page = virt_to_page((void *)empty_zero_page);
        split_page(page, order);
-       while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) {
+       while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
                SetPageReserved(page);
                page++;
        }
@@ -80,13 +103,176 @@ unsigned long setup_zero_pages(void)
        return 1UL << order;
 }
 
-#ifdef CONFIG_HIGHMEM
-pte_t *kmap_pte;
-pgprot_t kmap_prot;
+/*
+ * These are almost like kmap_atomic / kunmap_atmic except they take an
+ * additional address argument as the hint.
+ */
 
 #define kmap_get_fixmap_pte(vaddr)                                     \
        pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
 
+#ifdef CONFIG_MIPS_MT_SMTC
+static pte_t *kmap_coherent_pte;
+static void __init kmap_coherent_init(void)
+{
+       unsigned long vaddr;
+
+       /* cache the first coherent kmap pte */
+       vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
+       kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
+}
+#else
+static inline void kmap_coherent_init(void) {}
+#endif
+
+void *kmap_coherent(struct page *page, unsigned long addr)
+{
+       enum fixed_addresses idx;
+       unsigned long vaddr, flags, entrylo;
+       unsigned long old_ctx;
+       pte_t pte;
+       int tlbidx;
+
+       BUG_ON(Page_dcache_dirty(page));
+
+       inc_preempt_count();
+       idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
+#ifdef CONFIG_MIPS_MT_SMTC
+       idx += FIX_N_COLOURS * smp_processor_id();
+#endif
+       vaddr = __fix_to_virt(FIX_CMAP_END - idx);
+       pte = mk_pte(page, PAGE_KERNEL);
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
+       entrylo = pte.pte_high;
+#else
+       entrylo = pte_val(pte) >> 6;
+#endif
+
+       ENTER_CRITICAL(flags);
+       old_ctx = read_c0_entryhi();
+       write_c0_entryhi(vaddr & (PAGE_MASK << 1));
+       write_c0_entrylo0(entrylo);
+       write_c0_entrylo1(entrylo);
+#ifdef CONFIG_MIPS_MT_SMTC
+       set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
+       /* preload TLB instead of local_flush_tlb_one() */
+       mtc0_tlbw_hazard();
+       tlb_probe();
+       tlb_probe_hazard();
+       tlbidx = read_c0_index();
+       mtc0_tlbw_hazard();
+       if (tlbidx < 0)
+               tlb_write_random();
+       else
+               tlb_write_indexed();
+#else
+       tlbidx = read_c0_wired();
+       write_c0_wired(tlbidx + 1);
+       write_c0_index(tlbidx);
+       mtc0_tlbw_hazard();
+       tlb_write_indexed();
+#endif
+       tlbw_use_hazard();
+       write_c0_entryhi(old_ctx);
+       EXIT_CRITICAL(flags);
+
+       return (void*) vaddr;
+}
+
+#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+
+void kunmap_coherent(void)
+{
+#ifndef CONFIG_MIPS_MT_SMTC
+       unsigned int wired;
+       unsigned long flags, old_ctx;
+
+       ENTER_CRITICAL(flags);
+       old_ctx = read_c0_entryhi();
+       wired = read_c0_wired() - 1;
+       write_c0_wired(wired);
+       write_c0_index(wired);
+       write_c0_entryhi(UNIQUE_ENTRYHI(wired));
+       write_c0_entrylo0(0);
+       write_c0_entrylo1(0);
+       mtc0_tlbw_hazard();
+       tlb_write_indexed();
+       tlbw_use_hazard();
+       write_c0_entryhi(old_ctx);
+       EXIT_CRITICAL(flags);
+#endif
+       dec_preempt_count();
+       preempt_check_resched();
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+       unsigned long vaddr, struct vm_area_struct *vma)
+{
+       void *vfrom, *vto;
+
+       vto = kmap_atomic(to, KM_USER1);
+       if (cpu_has_dc_aliases && page_mapped(from)) {
+               vfrom = kmap_coherent(from, vaddr);
+               copy_page(vto, vfrom);
+               kunmap_coherent();
+       } else {
+               vfrom = kmap_atomic(from, KM_USER0);
+               copy_page(vto, vfrom);
+               kunmap_atomic(vfrom, KM_USER0);
+       }
+       if (((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) ||
+           pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
+               flush_data_cache_page((unsigned long)vto);
+       kunmap_atomic(vto, KM_USER1);
+       /* Make sure this page is cleared on other CPU's too before using it */
+       smp_wmb();
+}
+
+EXPORT_SYMBOL(copy_user_highpage);
+
+void copy_to_user_page(struct vm_area_struct *vma,
+       struct page *page, unsigned long vaddr, void *dst, const void *src,
+       unsigned long len)
+{
+       if (cpu_has_dc_aliases && page_mapped(page)) {
+               void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+               memcpy(vto, src, len);
+               kunmap_coherent();
+       } else {
+               memcpy(dst, src, len);
+               if (cpu_has_dc_aliases)
+                       SetPageDcacheDirty(page);
+       }
+       if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
+               flush_cache_page(vma, vaddr, page_to_pfn(page));
+}
+
+EXPORT_SYMBOL(copy_to_user_page);
+
+void copy_from_user_page(struct vm_area_struct *vma,
+       struct page *page, unsigned long vaddr, void *dst, const void *src,
+       unsigned long len)
+{
+       if (cpu_has_dc_aliases && page_mapped(page)) {
+               void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+               memcpy(dst, vfrom, len);
+               kunmap_coherent();
+       } else {
+               memcpy(dst, src, len);
+               if (cpu_has_dc_aliases)
+                       SetPageDcacheDirty(page);
+       }
+}
+
+EXPORT_SYMBOL(copy_from_user_page);
+
+
+#ifdef CONFIG_HIGHMEM
+unsigned long highstart_pfn, highend_pfn;
+
+pte_t *kmap_pte;
+pgprot_t kmap_prot;
+
 static void __init kmap_init(void)
 {
        unsigned long kmap_vstart;
@@ -97,11 +283,12 @@ static void __init kmap_init(void)
 
        kmap_prot = PAGE_KERNEL;
 }
+#endif /* CONFIG_HIGHMEM */
 
-#ifdef CONFIG_32BIT
 void __init fixrange_init(unsigned long start, unsigned long end,
        pgd_t *pgd_base)
 {
+#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
@@ -122,7 +309,7 @@ void __init fixrange_init(unsigned long start, unsigned long end,
                        for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
                                if (pmd_none(*pmd)) {
                                        pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-                                       set_pmd(pmd, __pmd(pte));
+                                       set_pmd(pmd, __pmd((unsigned long)pte));
                                        if (pte != pte_offset_kernel(pmd, 0))
                                                BUG();
                                }
@@ -132,13 +319,10 @@ void __init fixrange_init(unsigned long start, unsigned long end,
                }
                j = 0;
        }
+#endif
 }
-#endif /* CONFIG_32BIT */
-#endif /* CONFIG_HIGHMEM */
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
-extern void pagetable_init(void);
-
 static int __init page_is_ram(unsigned long pagenr)
 {
        int i;
@@ -163,53 +347,38 @@ static int __init page_is_ram(unsigned long pagenr)
 
 void __init paging_init(void)
 {
-       unsigned long zones_size[] = { 0, };
-       unsigned long max_dma, high, low;
-#ifndef CONFIG_FLATMEM
-       unsigned long zholes_size[] = { 0, };
-       unsigned long i, j, pfn;
-#endif
+       unsigned long max_zone_pfns[MAX_NR_ZONES];
+       unsigned long lastpfn;
 
        pagetable_init();
 
 #ifdef CONFIG_HIGHMEM
        kmap_init();
 #endif
+       kmap_coherent_init();
 
-       max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
-       low = max_low_pfn;
-       high = highend_pfn;
-
-#ifdef CONFIG_ISA
-       if (low < max_dma)
-               zones_size[ZONE_DMA] = low;
-       else {
-               zones_size[ZONE_DMA] = max_dma;
-               zones_size[ZONE_NORMAL] = low - max_dma;
-       }
-#else
-       zones_size[ZONE_DMA] = low;
+#ifdef CONFIG_ZONE_DMA
+       max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
+#endif
+#ifdef CONFIG_ZONE_DMA32
+       max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
 #endif
+       max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+       lastpfn = max_low_pfn;
 #ifdef CONFIG_HIGHMEM
-       if (cpu_has_dc_aliases) {
-               printk(KERN_WARNING "This processor doesn't support highmem.");
-               if (high - low)
-                       printk(" %ldk highmem ignored", high - low);
-               printk("\n");
-       } else
-               zones_size[ZONE_HIGHMEM] = high - low;
+       max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
+       lastpfn = highend_pfn;
+
+       if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
+               printk(KERN_WARNING "This processor doesn't support highmem."
+                      " %ldk highmem ignored\n",
+                      (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
+               max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
+               lastpfn = max_low_pfn;
+       }
 #endif
 
-#ifdef CONFIG_FLATMEM
-       free_area_init(zones_size);
-#else
-       pfn = 0;
-       for (i = 0; i < MAX_NR_ZONES; i++)
-               for (j = 0; j < zones_size[i]; j++, pfn++)
-                       if (!page_is_ram(pfn))
-                               zholes_size[i]++;
-       free_area_init_node(0, NODE_DATA(0), zones_size, 0, zholes_size);
-#endif
+       free_area_init_nodes(max_zone_pfns);
 }
 
 static struct kcore_list kcore_mem, kcore_vmalloc;
@@ -246,16 +415,13 @@ void __init mem_init(void)
 
 #ifdef CONFIG_HIGHMEM
        for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
-               struct page *page = mem_map + tmp;
+               struct page *page = pfn_to_page(tmp);
 
                if (!page_is_ram(tmp)) {
                        SetPageReserved(page);
                        continue;
                }
                ClearPageReserved(page);
-#ifdef CONFIG_LIMITED_DMA
-               set_page_address(page, lowmem_page_address(page));
-#endif
                init_page_count(page);
                __free_page(page);
                totalhigh_pages++;
@@ -290,15 +456,18 @@ void __init mem_init(void)
 }
 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
 
-void free_init_pages(char *what, unsigned long begin, unsigned long end)
+void free_init_pages(const char *what, unsigned long begin, unsigned long end)
 {
-       unsigned long addr;
+       unsigned long pfn;
+
+       for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
+               struct page *page = pfn_to_page(pfn);
+               void *addr = phys_to_virt(PFN_PHYS(pfn));
 
-       for (addr = begin; addr < end; addr += PAGE_SIZE) {
-               ClearPageReserved(virt_to_page(addr));
-               init_page_count(virt_to_page(addr));
-               memset((void *)addr, 0xcc, PAGE_SIZE);
-               free_page(addr);
+               ClearPageReserved(page);
+               init_page_count(page);
+               memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
+               __free_page(page);
                totalram_pages++;
        }
        printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
@@ -307,30 +476,37 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
 #ifdef CONFIG_BLK_DEV_INITRD
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
-#ifdef CONFIG_64BIT
-       /* Switch from KSEG0 to XKPHYS addresses */
-       start = (unsigned long)phys_to_virt(CPHYSADDR(start));
-       end = (unsigned long)phys_to_virt(CPHYSADDR(end));
-#endif
-       free_init_pages("initrd memory", start, end);
+       free_init_pages("initrd memory",
+                       virt_to_phys((void *)start),
+                       virt_to_phys((void *)end));
 }
 #endif
 
-extern unsigned long prom_free_prom_memory(void);
-
-void free_initmem(void)
+void __init_refok free_initmem(void)
 {
-       unsigned long start, end, freed;
+       prom_free_prom_memory();
+       free_init_pages("unused kernel memory",
+                       __pa_symbol(&__init_begin),
+                       __pa_symbol(&__init_end));
+}
 
-       freed = prom_free_prom_memory();
-       if (freed)
-               printk(KERN_INFO "Freeing firmware memory: %ldk freed\n",freed);
+unsigned long pgd_current[NR_CPUS];
+/*
+ * On 64-bit we've got three-level pagetables with a slightly
+ * different layout ...
+ */
+#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
 
-       start = (unsigned long)(&__init_begin);
-       end = (unsigned long)(&__init_end);
+/*
+ * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
+ * are constants.  So we use the variants from asm-offset.h until that gcc
+ * will officially be retired.
+ */
+pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
 #ifdef CONFIG_64BIT
-       start = PAGE_OFFSET | CPHYSADDR(start);
-       end = PAGE_OFFSET | CPHYSADDR(end);
+#ifdef MODULE_START
+pgd_t module_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
 #endif
-       free_init_pages("unused kernel memory", start, end);
-}
+pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
+#endif
+pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);