X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=arch%2Fpowerpc%2Fmm%2Fhash_utils_64.c;h=1ade7eb6ae00a3e8f352526f9b556cb05408943a;hb=f3a32500ba8f3ec9ee0c12836fcfd315f1256db4;hp=5ce5a4dcd00823c17447737f18ad5ad03c820185;hpb=0d9ea75443dc7e37843e656b8ebc947a6d16d618;p=safe%2Fjmp%2Flinux-2.6 diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 5ce5a4d..1ade7eb 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -151,39 +151,58 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = { }, }; +static unsigned long htab_convert_pte_flags(unsigned long pteflags) +{ + unsigned long rflags = pteflags & 0x1fa; + + /* _PAGE_EXEC -> NOEXEC */ + if ((pteflags & _PAGE_EXEC) == 0) + rflags |= HPTE_R_N; + + /* PP bits. PAGE_USER is already PP bit 0x2, so we only + * need to add in 0x1 if it's a read-only user page + */ + if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) && + (pteflags & _PAGE_DIRTY))) + rflags |= 1; + + /* Always add C */ + return rflags | HPTE_R_C; +} int htab_bolt_mapping(unsigned long vstart, unsigned long vend, - unsigned long pstart, unsigned long mode, + unsigned long pstart, unsigned long prot, int psize, int ssize) { unsigned long vaddr, paddr; unsigned int step, shift; - unsigned long tmp_mode; int ret = 0; shift = mmu_psize_defs[psize].shift; step = 1 << shift; + prot = htab_convert_pte_flags(prot); + + DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n", + vstart, vend, pstart, prot, psize, ssize); + for (vaddr = vstart, paddr = pstart; vaddr < vend; vaddr += step, paddr += step) { unsigned long hash, hpteg; unsigned long vsid = get_kernel_vsid(vaddr, ssize); unsigned long va = hpt_va(vaddr, vsid, ssize); + unsigned long tprot = prot; - tmp_mode = mode; - - /* Make non-kernel text non-executable */ - if (!in_kernel_text(vaddr)) - tmp_mode = mode | HPTE_R_N; + /* Make kernel text executable */ + if (overlaps_kernel_text(vaddr, vaddr + step)) + tprot &= ~HPTE_R_N; hash = hpt_hash(va, shift, ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); - DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert); - BUG_ON(!ppc_md.hpte_insert); - ret = ppc_md.hpte_insert(hpteg, va, paddr, - tmp_mode, HPTE_V_BOLTED, psize, ssize); + ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot, + HPTE_V_BOLTED, psize, ssize); if (ret < 0) break; @@ -329,6 +348,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, return 0; } +#ifdef CONFIG_HUGETLB_PAGE /* Scan for 16G memory blocks that have been set aside for huge pages * and reserve those blocks for 16G huge pages. */ @@ -362,10 +382,13 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node, printk(KERN_INFO "Huge page(16GB) memory: " "addr = 0x%lX size = 0x%lX pages = %d\n", phys_addr, block_size, expected_pages); - lmb_reserve(phys_addr, block_size * expected_pages); - add_gpage(phys_addr, block_size, expected_pages); + if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) { + lmb_reserve(phys_addr, block_size * expected_pages); + add_gpage(phys_addr, block_size, expected_pages); + } return 0; } +#endif /* CONFIG_HUGETLB_PAGE */ static void __init htab_init_page_sizes(void) { @@ -493,7 +516,7 @@ static int __init htab_dt_scan_pftsize(unsigned long node, static unsigned long __init htab_get_table_size(void) { - unsigned long mem_size, rnd_mem_size, pteg_count; + unsigned long mem_size, rnd_mem_size, pteg_count, psize; /* If hash size isn't already provided by the platform, we try to * retrieve it from the device-tree. If it's not there neither, we @@ -511,7 +534,8 @@ static unsigned long __init htab_get_table_size(void) rnd_mem_size <<= 1; /* # pages / 2 */ - pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11); + psize = mmu_psize_defs[mmu_virtual_psize].shift; + pteg_count = max(rnd_mem_size >> (psize + 1), 1UL << 11); return pteg_count << 7; } @@ -519,9 +543,9 @@ static unsigned long __init htab_get_table_size(void) #ifdef CONFIG_MEMORY_HOTPLUG void create_section_mapping(unsigned long start, unsigned long end) { - BUG_ON(htab_bolt_mapping(start, end, __pa(start), - _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX, - mmu_linear_psize, mmu_kernel_ssize)); + BUG_ON(htab_bolt_mapping(start, end, __pa(start), + pgprot_val(PAGE_KERNEL), mmu_linear_psize, + mmu_kernel_ssize)); } int remove_section_mapping(unsigned long start, unsigned long end) @@ -566,11 +590,11 @@ static void __init htab_finish_init(void) make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp); } -void __init htab_initialize(void) +static void __init htab_initialize(void) { unsigned long table; unsigned long pteg_count; - unsigned long mode_rw; + unsigned long prot; unsigned long base = 0, size = 0, limit; int i; @@ -628,7 +652,7 @@ void __init htab_initialize(void) mtspr(SPRN_SDR1, _SDR1); } - mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; + prot = pgprot_val(PAGE_KERNEL); #ifdef CONFIG_DEBUG_PAGEALLOC linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT; @@ -647,7 +671,8 @@ void __init htab_initialize(void) base = (unsigned long)__va(lmb.memory.region[i].base); size = lmb.memory.region[i].size; - DBG("creating mapping for region: %lx : %lx\n", base, size); + DBG("creating mapping for region: %lx..%lx (prot: %x)\n", + base, size, prot); #ifdef CONFIG_U3_DART /* Do not map the DART space. Fortunately, it will be aligned @@ -664,21 +689,21 @@ void __init htab_initialize(void) unsigned long dart_table_end = dart_tablebase + 16 * MB; if (base != dart_tablebase) BUG_ON(htab_bolt_mapping(base, dart_tablebase, - __pa(base), mode_rw, + __pa(base), prot, mmu_linear_psize, mmu_kernel_ssize)); if ((base + size) > dart_table_end) BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB, base + size, __pa(dart_table_end), - mode_rw, + prot, mmu_linear_psize, mmu_kernel_ssize)); continue; } #endif /* CONFIG_U3_DART */ BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), - mode_rw, mmu_linear_psize, mmu_kernel_ssize)); + prot, mmu_linear_psize, mmu_kernel_ssize)); } /* @@ -696,7 +721,7 @@ void __init htab_initialize(void) tce_alloc_start = base + size + 1; BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end, - __pa(tce_alloc_start), mode_rw, + __pa(tce_alloc_start), prot, mmu_linear_psize, mmu_kernel_ssize)); } @@ -707,11 +732,43 @@ void __init htab_initialize(void) #undef KB #undef MB -void htab_initialize_secondary(void) +void __init early_init_mmu(void) { + /* Setup initial STAB address in the PACA */ + get_paca()->stab_real = __pa((u64)&initial_stab); + get_paca()->stab_addr = (u64)&initial_stab; + + /* Initialize the MMU Hash table and create the linear mapping + * of memory. Has to be done before stab/slb initialization as + * this is currently where the page size encoding is obtained + */ + htab_initialize(); + + /* Initialize stab / SLB management except on iSeries + */ + if (cpu_has_feature(CPU_FTR_SLB)) + slb_initialize(); + else if (!firmware_has_feature(FW_FEATURE_ISERIES)) + stab_initialize(get_paca()->stab_real); +} + +#ifdef CONFIG_SMP +void __cpuinit early_init_mmu_secondary(void) +{ + /* Initialize hash table for that CPU */ if (!firmware_has_feature(FW_FEATURE_LPAR)) mtspr(SPRN_SDR1, _SDR1); + + /* Initialize STAB/SLB. We use a virtual address as it works + * in real mode on pSeries and we want a virutal address on + * iSeries anyway + */ + if (cpu_has_feature(CPU_FTR_SLB)) + slb_initialize(); + else + stab_initialize(get_paca()->stab_addr); } +#endif /* CONFIG_SMP */ /* * Called by asm hashtable.S for doing lazy icache flush @@ -834,7 +891,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) unsigned long vsid; struct mm_struct *mm; pte_t *ptep; - cpumask_t tmp; + const struct cpumask *tmp; int rc, user_region = 0, local = 0; int psize, ssize; @@ -882,8 +939,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) return 1; /* Check CPU locality */ - tmp = cpumask_of_cpu(smp_processor_id()); - if (user_region && cpus_equal(mm->cpu_vm_mask, tmp)) + tmp = cpumask_of(smp_processor_id()); + if (user_region && cpumask_equal(mm_cpumask(mm), tmp)) local = 1; #ifdef CONFIG_HUGETLB_PAGE @@ -999,7 +1056,6 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, unsigned long vsid; void *pgdir; pte_t *ptep; - cpumask_t mask; unsigned long flags; int local = 0; int ssize; @@ -1042,8 +1098,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, local_irq_save(flags); /* Is that local to this CPU ? */ - mask = cpumask_of_cpu(smp_processor_id()); - if (cpus_equal(mm->cpu_vm_mask, mask)) + if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) local = 1; /* Hash it in */ @@ -1117,8 +1172,7 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) unsigned long hash, hpteg; unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); - unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY | - _PAGE_COHERENT | PP_RWXX | HPTE_R_N; + unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL); int ret; hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);