X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=arch%2Fpowerpc%2Fmm%2Fhash_utils_64.c;h=1ade7eb6ae00a3e8f352526f9b556cb05408943a;hb=f3a32500ba8f3ec9ee0c12836fcfd315f1256db4;hp=ae2c919cbecd3f70b89c5449da4df33b12705749;hpb=a00428f5b149e36b8225b2a0812742a6dfb07b8c;p=safe%2Fjmp%2Flinux-2.6 diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index ae2c919..1ade7eb 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -21,7 +21,6 @@ #undef DEBUG #undef DEBUG_LOW -#include #include #include #include @@ -32,6 +31,7 @@ #include #include #include +#include #include #include @@ -42,7 +42,7 @@ #include #include #include -#include +#include #include #include #include @@ -50,8 +50,9 @@ #include #include #include -#include #include +#include +#include #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) @@ -67,6 +68,7 @@ #define KB (1024) #define MB (1024*KB) +#define GB (1024L*MB) /* * Note: pte --> Linux PTE @@ -87,15 +89,30 @@ extern unsigned long dart_tablebase; static unsigned long _SDR1; struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; -hpte_t *htab_address; +struct hash_pte *htab_address; unsigned long htab_size_bytes; unsigned long htab_hash_mask; int mmu_linear_psize = MMU_PAGE_4K; int mmu_virtual_psize = MMU_PAGE_4K; +int mmu_vmalloc_psize = MMU_PAGE_4K; +#ifdef CONFIG_SPARSEMEM_VMEMMAP +int mmu_vmemmap_psize = MMU_PAGE_4K; +#endif +int mmu_io_psize = MMU_PAGE_4K; +int mmu_kernel_ssize = MMU_SEGSIZE_256M; +int mmu_highuser_ssize = MMU_SEGSIZE_256M; +u16 mmu_slb_size = 64; #ifdef CONFIG_HUGETLB_PAGE -int mmu_huge_psize = MMU_PAGE_16M; unsigned int HPAGE_SHIFT; #endif +#ifdef CONFIG_PPC_64K_PAGES +int mmu_ci_restrictions; +#endif +#ifdef CONFIG_DEBUG_PAGEALLOC +static u8 *linear_map_hash_slots; +static unsigned long linear_map_hash_count; +static DEFINE_SPINLOCK(linear_map_hash_lock); +#endif /* CONFIG_DEBUG_PAGEALLOC */ /* There are definitions of page sizes arrays to be used when none * is provided by the firmware. @@ -103,7 +120,7 @@ unsigned int HPAGE_SHIFT; /* Pre-POWER4 CPUs (4k pages only) */ -struct mmu_psize_def mmu_psize_defaults_old[] = { +static struct mmu_psize_def mmu_psize_defaults_old[] = { [MMU_PAGE_4K] = { .shift = 12, .sllp = 0, @@ -117,7 +134,7 @@ struct mmu_psize_def mmu_psize_defaults_old[] = { * * Support for 16Mb large pages */ -struct mmu_psize_def mmu_psize_defaults_gp[] = { +static struct mmu_psize_def mmu_psize_defaults_gp[] = { [MMU_PAGE_4K] = { .shift = 12, .sllp = 0, @@ -134,68 +151,124 @@ struct mmu_psize_def mmu_psize_defaults_gp[] = { }, }; +static unsigned long htab_convert_pte_flags(unsigned long pteflags) +{ + unsigned long rflags = pteflags & 0x1fa; + + /* _PAGE_EXEC -> NOEXEC */ + if ((pteflags & _PAGE_EXEC) == 0) + rflags |= HPTE_R_N; + + /* PP bits. PAGE_USER is already PP bit 0x2, so we only + * need to add in 0x1 if it's a read-only user page + */ + if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) && + (pteflags & _PAGE_DIRTY))) + rflags |= 1; + + /* Always add C */ + return rflags | HPTE_R_C; +} int htab_bolt_mapping(unsigned long vstart, unsigned long vend, - unsigned long pstart, unsigned long mode, int psize) + unsigned long pstart, unsigned long prot, + int psize, int ssize) { unsigned long vaddr, paddr; unsigned int step, shift; - unsigned long tmp_mode; int ret = 0; shift = mmu_psize_defs[psize].shift; step = 1 << shift; + prot = htab_convert_pte_flags(prot); + + DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n", + vstart, vend, pstart, prot, psize, ssize); + for (vaddr = vstart, paddr = pstart; vaddr < vend; vaddr += step, paddr += step) { - unsigned long vpn, hash, hpteg; - unsigned long vsid = get_kernel_vsid(vaddr); - unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); - - vpn = va >> shift; - tmp_mode = mode; - - /* Make non-kernel text non-executable */ - if (!in_kernel_text(vaddr)) - tmp_mode = mode | HPTE_R_N; - - hash = hpt_hash(va, shift); + unsigned long hash, hpteg; + unsigned long vsid = get_kernel_vsid(vaddr, ssize); + unsigned long va = hpt_va(vaddr, vsid, ssize); + unsigned long tprot = prot; + + /* Make kernel text executable */ + if (overlaps_kernel_text(vaddr, vaddr + step)) + tprot &= ~HPTE_R_N; + + hash = hpt_hash(va, shift, ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); - /* The crap below can be cleaned once ppd_md.probe() can - * set up the hash callbacks, thus we can just used the - * normal insert callback here. - */ -#ifdef CONFIG_PPC_ISERIES - if (_machine == PLATFORM_ISERIES_LPAR) - ret = iSeries_hpte_insert(hpteg, va, - virt_to_abs(paddr), - tmp_mode, - HPTE_V_BOLTED, - psize); - else -#endif -#ifdef CONFIG_PPC_PSERIES - if (_machine & PLATFORM_LPAR) - ret = pSeries_lpar_hpte_insert(hpteg, va, - virt_to_abs(paddr), - tmp_mode, - HPTE_V_BOLTED, - psize); - else -#endif -#ifdef CONFIG_PPC_MULTIPLATFORM - ret = native_hpte_insert(hpteg, va, - virt_to_abs(paddr), - tmp_mode, HPTE_V_BOLTED, - psize); -#endif + BUG_ON(!ppc_md.hpte_insert); + ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot, + HPTE_V_BOLTED, psize, ssize); + if (ret < 0) break; +#ifdef CONFIG_DEBUG_PAGEALLOC + if ((paddr >> PAGE_SHIFT) < linear_map_hash_count) + linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; +#endif /* CONFIG_DEBUG_PAGEALLOC */ } return ret < 0 ? ret : 0; } +#ifdef CONFIG_MEMORY_HOTPLUG +static int htab_remove_mapping(unsigned long vstart, unsigned long vend, + int psize, int ssize) +{ + unsigned long vaddr; + unsigned int step, shift; + + shift = mmu_psize_defs[psize].shift; + step = 1 << shift; + + if (!ppc_md.hpte_removebolted) { + printk(KERN_WARNING "Platform doesn't implement " + "hpte_removebolted\n"); + return -EINVAL; + } + + for (vaddr = vstart; vaddr < vend; vaddr += step) + ppc_md.hpte_removebolted(vaddr, psize, ssize); + + return 0; +} +#endif /* CONFIG_MEMORY_HOTPLUG */ + +static int __init htab_dt_scan_seg_sizes(unsigned long node, + const char *uname, int depth, + void *data) +{ + char *type = of_get_flat_dt_prop(node, "device_type", NULL); + u32 *prop; + unsigned long size = 0; + + /* We are scanning "cpu" nodes only */ + if (type == NULL || strcmp(type, "cpu") != 0) + return 0; + + prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", + &size); + if (prop == NULL) + return 0; + for (; size >= 4; size -= 4, ++prop) { + if (prop[0] == 40) { + DBG("1T segment support detected\n"); + cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT; + return 1; + } + } + cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B; + return 0; +} + +static void __init htab_init_seg_sizes(void) +{ + of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL); +} + static int __init htab_dt_scan_page_sizes(unsigned long node, const char *uname, int depth, void *data) @@ -275,6 +348,47 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, return 0; } +#ifdef CONFIG_HUGETLB_PAGE +/* Scan for 16G memory blocks that have been set aside for huge pages + * and reserve those blocks for 16G huge pages. + */ +static int __init htab_dt_scan_hugepage_blocks(unsigned long node, + const char *uname, int depth, + void *data) { + char *type = of_get_flat_dt_prop(node, "device_type", NULL); + unsigned long *addr_prop; + u32 *page_count_prop; + unsigned int expected_pages; + long unsigned int phys_addr; + long unsigned int block_size; + + /* We are scanning "memory" nodes only */ + if (type == NULL || strcmp(type, "memory") != 0) + return 0; + + /* This property is the log base 2 of the number of virtual pages that + * will represent this memory block. */ + page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL); + if (page_count_prop == NULL) + return 0; + expected_pages = (1 << page_count_prop[0]); + addr_prop = of_get_flat_dt_prop(node, "reg", NULL); + if (addr_prop == NULL) + return 0; + phys_addr = addr_prop[0]; + block_size = addr_prop[1]; + if (block_size != (16 * GB)) + return 0; + printk(KERN_INFO "Huge page(16GB) memory: " + "addr = 0x%lX size = 0x%lX pages = %d\n", + phys_addr, block_size, expected_pages); + if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) { + lmb_reserve(phys_addr, block_size * expected_pages); + add_gpage(phys_addr, block_size, expected_pages); + } + return 0; +} +#endif /* CONFIG_HUGETLB_PAGE */ static void __init htab_init_page_sizes(void) { @@ -295,11 +409,11 @@ static void __init htab_init_page_sizes(void) * Not in the device-tree, let's fallback on known size * list for 16M capable GP & GR */ - if ((_machine != PLATFORM_ISERIES_LPAR) && - cpu_has_feature(CPU_FTR_16M_PAGE)) + if (cpu_has_feature(CPU_FTR_16M_PAGE)) memcpy(mmu_psize_defs, mmu_psize_defaults_gp, sizeof(mmu_psize_defaults_gp)); found: +#ifndef CONFIG_DEBUG_PAGEALLOC /* * Pick a size for the linear mapping. Currently, we only support * 16M, 1M and 4K which is the default @@ -308,39 +422,75 @@ static void __init htab_init_page_sizes(void) mmu_linear_psize = MMU_PAGE_16M; else if (mmu_psize_defs[MMU_PAGE_1M].shift) mmu_linear_psize = MMU_PAGE_1M; +#endif /* CONFIG_DEBUG_PAGEALLOC */ +#ifdef CONFIG_PPC_64K_PAGES /* * Pick a size for the ordinary pages. Default is 4K, we support - * 64K if cache inhibited large pages are supported by the - * processor + * 64K for user mappings and vmalloc if supported by the processor. + * We only use 64k for ioremap if the processor + * (and firmware) support cache-inhibited large pages. + * If not, we use 4k and set mmu_ci_restrictions so that + * hash_page knows to switch processes that use cache-inhibited + * mappings to 4k pages. */ -#ifdef CONFIG_PPC_64K_PAGES - if (mmu_psize_defs[MMU_PAGE_64K].shift && - cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) + if (mmu_psize_defs[MMU_PAGE_64K].shift) { mmu_virtual_psize = MMU_PAGE_64K; -#endif + mmu_vmalloc_psize = MMU_PAGE_64K; + if (mmu_linear_psize == MMU_PAGE_4K) + mmu_linear_psize = MMU_PAGE_64K; + if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) { + /* + * Don't use 64k pages for ioremap on pSeries, since + * that would stop us accessing the HEA ethernet. + */ + if (!machine_is(pseries)) + mmu_io_psize = MMU_PAGE_64K; + } else + mmu_ci_restrictions = 1; + } +#endif /* CONFIG_PPC_64K_PAGES */ - printk(KERN_INFO "Page orders: linear mapping = %d, others = %d\n", +#ifdef CONFIG_SPARSEMEM_VMEMMAP + /* We try to use 16M pages for vmemmap if that is supported + * and we have at least 1G of RAM at boot + */ + if (mmu_psize_defs[MMU_PAGE_16M].shift && + lmb_phys_mem_size() >= 0x40000000) + mmu_vmemmap_psize = MMU_PAGE_16M; + else if (mmu_psize_defs[MMU_PAGE_64K].shift) + mmu_vmemmap_psize = MMU_PAGE_64K; + else + mmu_vmemmap_psize = MMU_PAGE_4K; +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ + + printk(KERN_DEBUG "Page orders: linear mapping = %d, " + "virtual = %d, io = %d" +#ifdef CONFIG_SPARSEMEM_VMEMMAP + ", vmemmap = %d" +#endif + "\n", mmu_psize_defs[mmu_linear_psize].shift, - mmu_psize_defs[mmu_virtual_psize].shift); + mmu_psize_defs[mmu_virtual_psize].shift, + mmu_psize_defs[mmu_io_psize].shift +#ifdef CONFIG_SPARSEMEM_VMEMMAP + ,mmu_psize_defs[mmu_vmemmap_psize].shift +#endif + ); #ifdef CONFIG_HUGETLB_PAGE - /* Init large page size. Currently, we pick 16M or 1M depending + /* Reserve 16G huge page memory sections for huge pages */ + of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL); + +/* Set default large page size. Currently, we pick 16M or 1M depending * on what is available */ if (mmu_psize_defs[MMU_PAGE_16M].shift) - mmu_huge_psize = MMU_PAGE_16M; + HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift; /* With 4k/4level pagetables, we can't (for now) cope with a * huge page size < PMD_SIZE */ else if (mmu_psize_defs[MMU_PAGE_1M].shift) - mmu_huge_psize = MMU_PAGE_1M; - - /* Calculate HPAGE_SHIFT and sanity check it */ - if (mmu_psize_defs[mmu_huge_psize].shift > MIN_HUGEPTE_SHIFT && - mmu_psize_defs[mmu_huge_psize].shift < SID_SHIFT) - HPAGE_SHIFT = mmu_psize_defs[mmu_huge_psize].shift; - else - HPAGE_SHIFT = 0; /* No huge pages dude ! */ + HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift; #endif /* CONFIG_HUGETLB_PAGE */ } @@ -366,7 +516,7 @@ static int __init htab_dt_scan_pftsize(unsigned long node, static unsigned long __init htab_get_table_size(void) { - unsigned long mem_size, rnd_mem_size, pteg_count; + unsigned long mem_size, rnd_mem_size, pteg_count, psize; /* If hash size isn't already provided by the platform, we try to * retrieve it from the device-tree. If it's not there neither, we @@ -384,7 +534,8 @@ static unsigned long __init htab_get_table_size(void) rnd_mem_size <<= 1; /* # pages / 2 */ - pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11); + psize = mmu_psize_defs[mmu_virtual_psize].shift; + pteg_count = max(rnd_mem_size >> (psize + 1), 1UL << 11); return pteg_count << 7; } @@ -392,27 +543,75 @@ static unsigned long __init htab_get_table_size(void) #ifdef CONFIG_MEMORY_HOTPLUG void create_section_mapping(unsigned long start, unsigned long end) { - BUG_ON(htab_bolt_mapping(start, end, start, - _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX, - mmu_linear_psize)); + BUG_ON(htab_bolt_mapping(start, end, __pa(start), + pgprot_val(PAGE_KERNEL), mmu_linear_psize, + mmu_kernel_ssize)); +} + +int remove_section_mapping(unsigned long start, unsigned long end) +{ + return htab_remove_mapping(start, end, mmu_linear_psize, + mmu_kernel_ssize); } #endif /* CONFIG_MEMORY_HOTPLUG */ -void __init htab_initialize(void) +static inline void make_bl(unsigned int *insn_addr, void *func) +{ + unsigned long funcp = *((unsigned long *)func); + int offset = funcp - (unsigned long)insn_addr; + + *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc)); + flush_icache_range((unsigned long)insn_addr, 4+ + (unsigned long)insn_addr); +} + +static void __init htab_finish_init(void) +{ + extern unsigned int *htab_call_hpte_insert1; + extern unsigned int *htab_call_hpte_insert2; + extern unsigned int *htab_call_hpte_remove; + extern unsigned int *htab_call_hpte_updatepp; + +#ifdef CONFIG_PPC_HAS_HASH_64K + extern unsigned int *ht64_call_hpte_insert1; + extern unsigned int *ht64_call_hpte_insert2; + extern unsigned int *ht64_call_hpte_remove; + extern unsigned int *ht64_call_hpte_updatepp; + + make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert); + make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert); + make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove); + make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp); +#endif /* CONFIG_PPC_HAS_HASH_64K */ + + make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert); + make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert); + make_bl(htab_call_hpte_remove, ppc_md.hpte_remove); + make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp); +} + +static void __init htab_initialize(void) { unsigned long table; unsigned long pteg_count; - unsigned long mode_rw; - unsigned long base = 0, size = 0; + unsigned long prot; + unsigned long base = 0, size = 0, limit; int i; - extern unsigned long tce_alloc_start, tce_alloc_end; - DBG(" -> htab_initialize()\n"); + /* Initialize segment sizes */ + htab_init_seg_sizes(); + /* Initialize page sizes */ htab_init_page_sizes(); + if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { + mmu_kernel_ssize = MMU_SEGSIZE_1T; + mmu_highuser_ssize = MMU_SEGSIZE_1T; + printk(KERN_INFO "Using 1TB segments\n"); + } + /* * Calculate the required size of the htab. We want the number of * PTEGs to equal one half the number of real pages. @@ -422,15 +621,21 @@ void __init htab_initialize(void) htab_hash_mask = pteg_count - 1; - if (platform_is_lpar()) { + if (firmware_has_feature(FW_FEATURE_LPAR)) { /* Using a hypervisor which owns the htab */ htab_address = NULL; _SDR1 = 0; } else { /* Find storage for the HPT. Must be contiguous in - * the absolute address space. + * the absolute address space. On cell we want it to be + * in the first 2 Gig so we can use it for IOMMU hacks. */ - table = lmb_alloc(htab_size_bytes, htab_size_bytes); + if (machine_is(cell)) + limit = 0x80000000; + else + limit = 0; + + table = lmb_alloc_base(htab_size_bytes, htab_size_bytes, limit); DBG("Hash table allocated at %lx, size: %lx\n", table, htab_size_bytes); @@ -447,7 +652,14 @@ void __init htab_initialize(void) mtspr(SPRN_SDR1, _SDR1); } - mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; + prot = pgprot_val(PAGE_KERNEL); + +#ifdef CONFIG_DEBUG_PAGEALLOC + linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT; + linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count, + 1, lmb.rmo_size)); + memset(linear_map_hash_slots, 0, linear_map_hash_count); +#endif /* CONFIG_DEBUG_PAGEALLOC */ /* On U3 based machines, we need to reserve the DART area and * _NOT_ map it to avoid cache paradoxes as it's remapped non @@ -459,7 +671,8 @@ void __init htab_initialize(void) base = (unsigned long)__va(lmb.memory.region[i].base); size = lmb.memory.region[i].size; - DBG("creating mapping for region: %lx : %lx\n", base, size); + DBG("creating mapping for region: %lx..%lx (prot: %x)\n", + base, size, prot); #ifdef CONFIG_U3_DART /* Do not map the DART space. Fortunately, it will be aligned @@ -473,21 +686,24 @@ void __init htab_initialize(void) if (dart_tablebase != 0 && dart_tablebase >= base && dart_tablebase < (base + size)) { + unsigned long dart_table_end = dart_tablebase + 16 * MB; if (base != dart_tablebase) BUG_ON(htab_bolt_mapping(base, dart_tablebase, - base, mode_rw, - mmu_linear_psize)); - if ((base + size) > (dart_tablebase + 16*MB)) + __pa(base), prot, + mmu_linear_psize, + mmu_kernel_ssize)); + if ((base + size) > dart_table_end) BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB, - base + size, - dart_tablebase+16*MB, - mode_rw, - mmu_linear_psize)); + base + size, + __pa(dart_table_end), + prot, + mmu_linear_psize, + mmu_kernel_ssize)); continue; } #endif /* CONFIG_U3_DART */ - BUG_ON(htab_bolt_mapping(base, base + size, base, - mode_rw, mmu_linear_psize)); + BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), + prot, mmu_linear_psize, mmu_kernel_ssize)); } /* @@ -504,21 +720,55 @@ void __init htab_initialize(void) if (base + size >= tce_alloc_start) tce_alloc_start = base + size + 1; - BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end, - tce_alloc_start, mode_rw, - mmu_linear_psize)); + BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end, + __pa(tce_alloc_start), prot, + mmu_linear_psize, mmu_kernel_ssize)); } + htab_finish_init(); + DBG(" <- htab_initialize()\n"); } #undef KB #undef MB -void htab_initialize_secondary(void) +void __init early_init_mmu(void) +{ + /* Setup initial STAB address in the PACA */ + get_paca()->stab_real = __pa((u64)&initial_stab); + get_paca()->stab_addr = (u64)&initial_stab; + + /* Initialize the MMU Hash table and create the linear mapping + * of memory. Has to be done before stab/slb initialization as + * this is currently where the page size encoding is obtained + */ + htab_initialize(); + + /* Initialize stab / SLB management except on iSeries + */ + if (cpu_has_feature(CPU_FTR_SLB)) + slb_initialize(); + else if (!firmware_has_feature(FW_FEATURE_ISERIES)) + stab_initialize(get_paca()->stab_real); +} + +#ifdef CONFIG_SMP +void __cpuinit early_init_mmu_secondary(void) { - if (!platform_is_lpar()) + /* Initialize hash table for that CPU */ + if (!firmware_has_feature(FW_FEATURE_LPAR)) mtspr(SPRN_SDR1, _SDR1); + + /* Initialize STAB/SLB. We use a virtual address as it works + * in real mode on pSeries and we want a virutal address on + * iSeries anyway + */ + if (cpu_has_feature(CPU_FTR_SLB)) + slb_initialize(); + else + stab_initialize(get_paca()->stab_addr); } +#endif /* CONFIG_SMP */ /* * Called by asm hashtable.S for doing lazy icache flush @@ -543,10 +793,97 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) return pp; } +#ifdef CONFIG_PPC_MM_SLICES +unsigned int get_paca_psize(unsigned long addr) +{ + unsigned long index, slices; + + if (addr < SLICE_LOW_TOP) { + slices = get_paca()->context.low_slices_psize; + index = GET_LOW_SLICE_INDEX(addr); + } else { + slices = get_paca()->context.high_slices_psize; + index = GET_HIGH_SLICE_INDEX(addr); + } + return (slices >> (index * 4)) & 0xF; +} + +#else +unsigned int get_paca_psize(unsigned long addr) +{ + return get_paca()->context.user_psize; +} +#endif + +/* + * Demote a segment to using 4k pages. + * For now this makes the whole process use 4k pages. + */ +#ifdef CONFIG_PPC_64K_PAGES +void demote_segment_4k(struct mm_struct *mm, unsigned long addr) +{ + if (get_slice_psize(mm, addr) == MMU_PAGE_4K) + return; + slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K); +#ifdef CONFIG_SPU_BASE + spu_flush_all_slbs(mm); +#endif + if (get_paca_psize(addr) != MMU_PAGE_4K) { + get_paca()->context = mm->context; + slb_flush_and_rebolt(); + } +} +#endif /* CONFIG_PPC_64K_PAGES */ + +#ifdef CONFIG_PPC_SUBPAGE_PROT +/* + * This looks up a 2-bit protection code for a 4k subpage of a 64k page. + * Userspace sets the subpage permissions using the subpage_prot system call. + * + * Result is 0: full permissions, _PAGE_RW: read-only, + * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access. + */ +static int subpage_protection(pgd_t *pgdir, unsigned long ea) +{ + struct subpage_prot_table *spt = pgd_subpage_prot(pgdir); + u32 spp = 0; + u32 **sbpm, *sbpp; + + if (ea >= spt->maxaddr) + return 0; + if (ea < 0x100000000) { + /* addresses below 4GB use spt->low_prot */ + sbpm = spt->low_prot; + } else { + sbpm = spt->protptrs[ea >> SBP_L3_SHIFT]; + if (!sbpm) + return 0; + } + sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)]; + if (!sbpp) + return 0; + spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)]; + + /* extract 2-bit bitfield for this 4k subpage */ + spp >>= 30 - 2 * ((ea >> 12) & 0xf); + + /* turn 0,1,2,3 into combination of _PAGE_USER and _PAGE_RW */ + spp = ((spp & 2) ? _PAGE_USER : 0) | ((spp & 1) ? _PAGE_RW : 0); + return spp; +} + +#else /* CONFIG_PPC_SUBPAGE_PROT */ +static inline int subpage_protection(pgd_t *pgdir, unsigned long ea) +{ + return 0; +} +#endif + /* Result code is: * 0 - handled * 1 - normal page fault * -1 - critical hash insertion error + * -2 - access not permitted by subpage protection mechanism */ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) { @@ -554,8 +891,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) unsigned long vsid; struct mm_struct *mm; pte_t *ptep; - cpumask_t tmp; + const struct cpumask *tmp; int rc, user_region = 0, local = 0; + int psize, ssize; DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", ea, access, trap); @@ -574,11 +912,18 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) DBG_LOW(" user region with no mm !\n"); return 1; } - vsid = get_vsid(mm->context.id, ea); + psize = get_slice_psize(mm, ea); + ssize = user_segment_size(ea); + vsid = get_vsid(mm->context.id, ea, ssize); break; case VMALLOC_REGION_ID: mm = &init_mm; - vsid = get_kernel_vsid(ea); + vsid = get_kernel_vsid(ea, mmu_kernel_ssize); + if (ea < VMALLOC_END) + psize = mmu_vmalloc_psize; + else + psize = mmu_io_psize; + ssize = mmu_kernel_ssize; break; default: /* Not a valid range @@ -594,15 +939,26 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) return 1; /* Check CPU locality */ - tmp = cpumask_of_cpu(smp_processor_id()); - if (user_region && cpus_equal(mm->cpu_vm_mask, tmp)) + tmp = cpumask_of(smp_processor_id()); + if (user_region && cpumask_equal(mm_cpumask(mm), tmp)) local = 1; +#ifdef CONFIG_HUGETLB_PAGE /* Handle hugepage regions */ - if (unlikely(in_hugepage_area(mm->context, ea))) { + if (HPAGE_SHIFT && mmu_huge_psizes[psize]) { DBG_LOW(" -> huge page !\n"); return hash_huge_page(mm, access, ea, vsid, local, trap); } +#endif /* CONFIG_HUGETLB_PAGE */ + +#ifndef CONFIG_PPC_64K_PAGES + /* If we use 4K pages and our psize is not 4K, then we are hitting + * a special driver mapping, we need to align the address before + * we fetch the PTE + */ + if (psize != MMU_PAGE_4K) + ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1); +#endif /* CONFIG_PPC_64K_PAGES */ /* Get PTE and page size from page tables */ ptep = find_linux_pte(pgdir, ea); @@ -626,15 +982,63 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) } /* Do actual hashing */ -#ifndef CONFIG_PPC_64K_PAGES - rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); -#else - if (mmu_virtual_psize == MMU_PAGE_64K) - rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); - else - rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); +#ifdef CONFIG_PPC_64K_PAGES + /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */ + if ((pte_val(*ptep) & _PAGE_4K_PFN) && psize == MMU_PAGE_64K) { + demote_segment_4k(mm, ea); + psize = MMU_PAGE_4K; + } + + /* If this PTE is non-cacheable and we have restrictions on + * using non cacheable large pages, then we switch to 4k + */ + if (mmu_ci_restrictions && psize == MMU_PAGE_64K && + (pte_val(*ptep) & _PAGE_NO_CACHE)) { + if (user_region) { + demote_segment_4k(mm, ea); + psize = MMU_PAGE_4K; + } else if (ea < VMALLOC_END) { + /* + * some driver did a non-cacheable mapping + * in vmalloc space, so switch vmalloc + * to 4k pages + */ + printk(KERN_ALERT "Reducing vmalloc segment " + "to 4kB pages because of " + "non-cacheable mapping\n"); + psize = mmu_vmalloc_psize = MMU_PAGE_4K; +#ifdef CONFIG_SPU_BASE + spu_flush_all_slbs(mm); +#endif + } + } + if (user_region) { + if (psize != get_paca_psize(ea)) { + get_paca()->context = mm->context; + slb_flush_and_rebolt(); + } + } else if (get_paca()->vmalloc_sllp != + mmu_psize_defs[mmu_vmalloc_psize].sllp) { + get_paca()->vmalloc_sllp = + mmu_psize_defs[mmu_vmalloc_psize].sllp; + slb_vmalloc_update(); + } #endif /* CONFIG_PPC_64K_PAGES */ +#ifdef CONFIG_PPC_HAS_HASH_64K + if (psize == MMU_PAGE_64K) + rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); + else +#endif /* CONFIG_PPC_HAS_HASH_64K */ + { + int spp = subpage_protection(pgdir, ea); + if (access & spp) + rc = -2; + else + rc = __hash_page_4K(ea, access, vsid, ptep, trap, + local, ssize, spp); + } + #ifndef CONFIG_PPC_64K_PAGES DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); #else @@ -652,57 +1056,81 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, unsigned long vsid; void *pgdir; pte_t *ptep; - cpumask_t mask; unsigned long flags; int local = 0; + int ssize; - /* We don't want huge pages prefaulted for now - */ - if (unlikely(in_hugepage_area(mm->context, ea))) + BUG_ON(REGION_ID(ea) != USER_REGION_ID); + +#ifdef CONFIG_PPC_MM_SLICES + /* We only prefault standard pages for now */ + if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize)) return; +#endif DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx," " trap=%lx\n", mm, mm->pgd, ea, access, trap); - /* Get PTE, VSID, access mask */ + /* Get Linux PTE if available */ pgdir = mm->pgd; if (pgdir == NULL) return; ptep = find_linux_pte(pgdir, ea); if (!ptep) return; - vsid = get_vsid(mm->context.id, ea); - /* Hash it in */ +#ifdef CONFIG_PPC_64K_PAGES + /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on + * a 64K kernel), then we don't preload, hash_page() will take + * care of it once we actually try to access the page. + * That way we don't have to duplicate all of the logic for segment + * page size demotion here + */ + if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE)) + return; +#endif /* CONFIG_PPC_64K_PAGES */ + + /* Get VSID */ + ssize = user_segment_size(ea); + vsid = get_vsid(mm->context.id, ea, ssize); + + /* Hash doesn't like irqs */ local_irq_save(flags); - mask = cpumask_of_cpu(smp_processor_id()); - if (cpus_equal(mm->cpu_vm_mask, mask)) + + /* Is that local to this CPU ? */ + if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) local = 1; -#ifndef CONFIG_PPC_64K_PAGES - __hash_page_4K(ea, access, vsid, ptep, trap, local); -#else - if (mmu_virtual_psize == MMU_PAGE_64K) - __hash_page_64K(ea, access, vsid, ptep, trap, local); + + /* Hash it in */ +#ifdef CONFIG_PPC_HAS_HASH_64K + if (mm->context.user_psize == MMU_PAGE_64K) + __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); else - __hash_page_4K(ea, access, vsid, ptep, trap, local); -#endif /* CONFIG_PPC_64K_PAGES */ +#endif /* CONFIG_PPC_HAS_HASH_64K */ + __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, + subpage_protection(pgdir, ea)); + local_irq_restore(flags); } -void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local) +/* WARNING: This is called from hash_low_64.S, if you change this prototype, + * do not forget to update the assembly call site ! + */ +void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize, + int local) { unsigned long hash, index, shift, hidx, slot; DBG_LOW("flush_hash_page(va=%016x)\n", va); pte_iterate_hashed_subpages(pte, psize, va, index, shift) { - hash = hpt_hash(va, shift); + hash = hpt_hash(va, shift, ssize); hidx = __rpte_to_hidx(pte, index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx); - ppc_md.hpte_invalidate(slot, va, psize, local); + ppc_md.hpte_invalidate(slot, va, psize, ssize, local); } pte_iterate_hashed_end(); } @@ -717,60 +1145,84 @@ void flush_hash_range(unsigned long number, int local) for (i = 0; i < number; i++) flush_hash_page(batch->vaddr[i], batch->pte[i], - batch->psize, local); + batch->psize, batch->ssize, local); } } -static inline void make_bl(unsigned int *insn_addr, void *func) -{ - unsigned long funcp = *((unsigned long *)func); - int offset = funcp - (unsigned long)insn_addr; - - *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc)); - flush_icache_range((unsigned long)insn_addr, 4+ - (unsigned long)insn_addr); -} - /* * low_hash_fault is called when we the low level hash code failed * to instert a PTE due to an hypervisor error */ -void low_hash_fault(struct pt_regs *regs, unsigned long address) +void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) { if (user_mode(regs)) { - siginfo_t info; - - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRERR; - info.si_addr = (void __user *)address; - force_sig_info(SIGBUS, &info, current); - return; - } - bad_page_fault(regs, address, SIGBUS); +#ifdef CONFIG_PPC_SUBPAGE_PROT + if (rc == -2) + _exception(SIGSEGV, regs, SEGV_ACCERR, address); + else +#endif + _exception(SIGBUS, regs, BUS_ADRERR, address); + } else + bad_page_fault(regs, address, SIGBUS); } -void __init htab_finish_init(void) +#ifdef CONFIG_DEBUG_PAGEALLOC +static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) { - extern unsigned int *htab_call_hpte_insert1; - extern unsigned int *htab_call_hpte_insert2; - extern unsigned int *htab_call_hpte_remove; - extern unsigned int *htab_call_hpte_updatepp; + unsigned long hash, hpteg; + unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); + unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); + unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL); + int ret; + + hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); + hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); + + ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr), + mode, HPTE_V_BOLTED, + mmu_linear_psize, mmu_kernel_ssize); + BUG_ON (ret < 0); + spin_lock(&linear_map_hash_lock); + BUG_ON(linear_map_hash_slots[lmi] & 0x80); + linear_map_hash_slots[lmi] = ret | 0x80; + spin_unlock(&linear_map_hash_lock); +} -#ifdef CONFIG_PPC_64K_PAGES - extern unsigned int *ht64_call_hpte_insert1; - extern unsigned int *ht64_call_hpte_insert2; - extern unsigned int *ht64_call_hpte_remove; - extern unsigned int *ht64_call_hpte_updatepp; +static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) +{ + unsigned long hash, hidx, slot; + unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); + unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); + + hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); + spin_lock(&linear_map_hash_lock); + BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); + hidx = linear_map_hash_slots[lmi] & 0x7f; + linear_map_hash_slots[lmi] = 0; + spin_unlock(&linear_map_hash_lock); + if (hidx & _PTEIDX_SECONDARY) + hash = ~hash; + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; + slot += hidx & _PTEIDX_GROUP_IX; + ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0); +} - make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert); - make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert); - make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove); - make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp); -#endif /* CONFIG_PPC_64K_PAGES */ +void kernel_map_pages(struct page *page, int numpages, int enable) +{ + unsigned long flags, vaddr, lmi; + int i; - make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert); - make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert); - make_bl(htab_call_hpte_remove, ppc_md.hpte_remove); - make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp); + local_irq_save(flags); + for (i = 0; i < numpages; i++, page++) { + vaddr = (unsigned long)page_address(page); + lmi = __pa(vaddr) >> PAGE_SHIFT; + if (lmi >= linear_map_hash_count) + continue; + if (enable) + kernel_map_linear_page(vaddr, lmi); + else + kernel_unmap_linear_page(vaddr, lmi); + } + local_irq_restore(flags); } +#endif /* CONFIG_DEBUG_PAGEALLOC */