* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*/
+#include <linux/bug.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/pfn.h>
+#include <linux/hardirq.h>
+#include <linux/gfp.h>
+#include <asm/asm-offsets.h>
#include <asm/bootinfo.h>
#include <asm/cachectl.h>
#include <asm/cpu.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-unsigned long highstart_pfn, highend_pfn;
-
/*
* We have up to 8 empty zeroed pages so we can map one of the right colour
* when needed. This is necessary only on R4000 / R4400 SC and MC versions
* don't have to care about aliases on other CPUs.
*/
unsigned long empty_zero_page, zero_page_mask;
+EXPORT_SYMBOL_GPL(empty_zero_page);
/*
* Not static inline because used by IP27 special magic initialization code
return 1UL << order;
}
-/*
- * These are almost like kmap_atomic / kunmap_atmic except they take an
- * additional address argument as the hint.
- */
-
-#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
-
#ifdef CONFIG_MIPS_MT_SMTC
static pte_t *kmap_coherent_pte;
static void __init kmap_coherent_init(void)
static inline void kmap_coherent_init(void) {}
#endif
-static inline void *kmap_coherent(struct page *page, unsigned long addr)
+void *kmap_coherent(struct page *page, unsigned long addr)
{
enum fixed_addresses idx;
unsigned long vaddr, flags, entrylo;
pte_t pte;
int tlbidx;
+ BUG_ON(Page_dcache_dirty(page));
+
inc_preempt_count();
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
#ifdef CONFIG_MIPS_MT_SMTC
- idx += FIX_N_COLOURS * smp_processor_id();
+ idx += FIX_N_COLOURS * smp_processor_id() +
+ (in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
+#else
+ idx += in_interrupt() ? FIX_N_COLOURS : 0;
#endif
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
pte = mk_pte(page, PAGE_KERNEL);
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
entrylo = pte.pte_high;
#else
- entrylo = pte_val(pte) >> 6;
+ entrylo = pte_to_entrylo(pte_val(pte));
#endif
ENTER_CRITICAL(flags);
#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
-static inline void kunmap_coherent(struct page *page)
+void kunmap_coherent(void)
{
#ifndef CONFIG_MIPS_MT_SMTC
unsigned int wired;
preempt_check_resched();
}
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ void *vfrom, *vto;
+
+ vto = kmap_atomic(to, KM_USER1);
+ if (cpu_has_dc_aliases &&
+ page_mapped(from) && !Page_dcache_dirty(from)) {
+ vfrom = kmap_coherent(from, vaddr);
+ copy_page(vto, vfrom);
+ kunmap_coherent();
+ } else {
+ vfrom = kmap_atomic(from, KM_USER0);
+ copy_page(vto, vfrom);
+ kunmap_atomic(vfrom, KM_USER0);
+ }
+ if ((!cpu_has_ic_fills_f_dc) ||
+ pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
+ flush_data_cache_page((unsigned long)vto);
+ kunmap_atomic(vto, KM_USER1);
+ /* Make sure this page is cleared on other CPU's too before using it */
+ smp_wmb();
+}
+
void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
- if (cpu_has_dc_aliases) {
+ if (cpu_has_dc_aliases &&
+ page_mapped(page) && !Page_dcache_dirty(page)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
- kunmap_coherent(page);
- } else
+ kunmap_coherent();
+ } else {
memcpy(dst, src, len);
+ if (cpu_has_dc_aliases)
+ SetPageDcacheDirty(page);
+ }
if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
flush_cache_page(vma, vaddr, page_to_pfn(page));
}
-EXPORT_SYMBOL(copy_to_user_page);
-
void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
- if (cpu_has_dc_aliases) {
- void *vfrom =
- kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+ if (cpu_has_dc_aliases &&
+ page_mapped(page) && !Page_dcache_dirty(page)) {
+ void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
- kunmap_coherent(page);
- } else
+ kunmap_coherent();
+ } else {
memcpy(dst, src, len);
+ if (cpu_has_dc_aliases)
+ SetPageDcacheDirty(page);
+ }
}
-EXPORT_SYMBOL(copy_from_user_page);
-
-
-#ifdef CONFIG_HIGHMEM
-pte_t *kmap_pte;
-pgprot_t kmap_prot;
-
-static void __init kmap_init(void)
-{
- unsigned long kmap_vstart;
-
- /* cache the first kmap pte */
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-
- kmap_prot = PAGE_KERNEL;
-}
-#endif /* CONFIG_HIGHMEM */
-
void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
if (pmd_none(*pmd)) {
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
set_pmd(pmd, __pmd((unsigned long)pte));
- if (pte != pte_offset_kernel(pmd, 0))
- BUG();
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
vaddr += PMD_SIZE;
}
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
-extern void pagetable_init(void);
-
-static int __init page_is_ram(unsigned long pagenr)
+int page_is_ram(unsigned long pagenr)
{
int i;
void __init paging_init(void)
{
- unsigned long zones_size[MAX_NR_ZONES] = { 0, };
- unsigned long max_dma, low;
-#ifndef CONFIG_FLATMEM
- unsigned long zholes_size[MAX_NR_ZONES] = { 0, };
- unsigned long i, j, pfn;
-#endif
+ unsigned long max_zone_pfns[MAX_NR_ZONES];
+ unsigned long lastpfn;
pagetable_init();
#endif
kmap_coherent_init();
- max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
- low = max_low_pfn;
-
-#ifdef CONFIG_ISA
- if (low < max_dma)
- zones_size[ZONE_DMA] = low;
- else {
- zones_size[ZONE_DMA] = max_dma;
- zones_size[ZONE_NORMAL] = low - max_dma;
- }
-#else
- zones_size[ZONE_DMA] = low;
+#ifdef CONFIG_ZONE_DMA
+ max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
#endif
+#ifdef CONFIG_ZONE_DMA32
+ max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
+#endif
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+ lastpfn = max_low_pfn;
#ifdef CONFIG_HIGHMEM
- zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn;
+ max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
+ lastpfn = highend_pfn;
- if (cpu_has_dc_aliases && zones_size[ZONE_HIGHMEM]) {
+ if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
printk(KERN_WARNING "This processor doesn't support highmem."
- " %ldk highmem ignored\n", zones_size[ZONE_HIGHMEM]);
- zones_size[ZONE_HIGHMEM] = 0;
+ " %ldk highmem ignored\n",
+ (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
+ max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
+ lastpfn = max_low_pfn;
}
#endif
-#ifdef CONFIG_FLATMEM
- free_area_init(zones_size);
-#else
- pfn = 0;
- for (i = 0; i < MAX_NR_ZONES; i++)
- for (j = 0; j < zones_size[i]; j++, pfn++)
- if (!page_is_ram(pfn))
- zholes_size[i]++;
- free_area_init_node(0, NODE_DATA(0), zones_size, 0, zholes_size);
-#endif
+ free_area_init_nodes(max_zone_pfns);
}
-static struct kcore_list kcore_mem, kcore_vmalloc;
#ifdef CONFIG_64BIT
static struct kcore_list kcore_kseg0;
#endif
#ifdef CONFIG_HIGHMEM
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
- struct page *page = mem_map + tmp;
+ struct page *page = pfn_to_page(tmp);
if (!page_is_ram(tmp)) {
SetPageReserved(page);
continue;
}
ClearPageReserved(page);
-#ifdef CONFIG_LIMITED_DMA
- set_page_address(page, lowmem_page_address(page));
-#endif
init_page_count(page);
__free_page(page);
totalhigh_pages++;
if ((unsigned long) &_text > (unsigned long) CKSEG0)
/* The -4 is a hack so that user tools don't have to handle
the overflow. */
- kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4);
+ kclist_add(&kcore_kseg0, (void *) CKSEG0,
+ 0x80000000 - 4, KCORE_TEXT);
#endif
- kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
- kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
- VMALLOC_END-VMALLOC_START);
printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
"%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
ram << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
datasize >> 10,
initsize >> 10,
- (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
+ totalhigh_pages << (PAGE_SHIFT-10));
}
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
-static void free_init_pages(char *what, unsigned long begin, unsigned long end)
+void free_init_pages(const char *what, unsigned long begin, unsigned long end)
{
unsigned long pfn;
}
#endif
-extern unsigned long prom_free_prom_memory(void);
-
-void free_initmem(void)
+void __init_refok free_initmem(void)
{
- unsigned long freed;
-
- freed = prom_free_prom_memory();
- if (freed)
- printk(KERN_INFO "Freeing firmware memory: %ldk freed\n",freed);
-
+ prom_free_prom_memory();
free_init_pages("unused kernel memory",
__pa_symbol(&__init_begin),
__pa_symbol(&__init_end));
}
+
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
+unsigned long pgd_current[NR_CPUS];
+#endif
+/*
+ * On 64-bit we've got three-level pagetables with a slightly
+ * different layout ...
+ */
+#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
+
+/*
+ * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
+ * are constants. So we use the variants from asm-offset.h until that gcc
+ * will officially be retired.
+ */
+pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
+#ifndef __PAGETABLE_PMD_FOLDED
+pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
+#endif
+pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);