X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=arch%2Fpowerpc%2Fmm%2Fslb.c;h=89497fb04280ce6bb63b50af6603c513c942d872;hb=b61bfa3c462671c48a51fb5c31af337c5a996a04;hp=637afb2386b703cdfb5e603f3861db8fcda7b466;hpb=aa39be09dfd7e95509cadcdb99cf7eb470d83c46;p=safe%2Fjmp%2Flinux-2.6 diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 637afb2..89497fb 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -28,9 +28,9 @@ #include #ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) +#define DBG(fmt...) printk(fmt) #else -#define DBG(fmt...) +#define DBG pr_debug #endif extern void slb_allocate_realmode(unsigned long ea); @@ -44,13 +44,13 @@ static void slb_allocate(unsigned long ea) slb_allocate_realmode(ea); } +#define slb_esid_mask(ssize) \ + (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T) + static inline unsigned long mk_esid_data(unsigned long ea, int ssize, unsigned long slot) { - unsigned long mask; - - mask = (ssize == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T; - return (ea & mask) | SLB_ESID_V | slot; + return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot; } #define slb_vsid_shift(ssize) \ @@ -124,6 +124,12 @@ void slb_flush_and_rebolt(void) ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; } + /* + * We can't take a PMU exception in the following code, so hard + * disable interrupts. + */ + hard_irq_disable(); + /* We need to do this all in asm, so we're sure we don't touch * the stack between the slbia and rebolting it. */ asm volatile("isync\n" @@ -149,6 +155,35 @@ void slb_vmalloc_update(void) slb_flush_and_rebolt(); } +/* Helper function to compare esids. There are four cases to handle. + * 1. The system is not 1T segment size capable. Use the GET_ESID compare. + * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare. + * 3. The system is 1T capable, only one of the two addresses is > 1T. This is not a match. + * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare. + */ +static inline int esids_match(unsigned long addr1, unsigned long addr2) +{ + int esid_1t_count; + + /* System is not 1T segment size capable. */ + if (!cpu_has_feature(CPU_FTR_1T_SEGMENT)) + return (GET_ESID(addr1) == GET_ESID(addr2)); + + esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) + + ((addr2 >> SID_SHIFT_1T) != 0)); + + /* both addresses are < 1T */ + if (esid_1t_count == 0) + return (GET_ESID(addr1) == GET_ESID(addr2)); + + /* One address < 1T, the other > 1T. Not a match */ + if (esid_1t_count == 1) + return 0; + + /* Both addresses are > 1T. */ + return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2)); +} + /* Flush all user entries from the segment table of the current processor. */ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) { @@ -194,15 +229,14 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) return; slb_allocate(pc); - if (GET_ESID(pc) == GET_ESID(stack)) + if (esids_match(pc,stack)) return; if (is_kernel_addr(stack)) return; slb_allocate(stack); - if ((GET_ESID(pc) == GET_ESID(unmapped_base)) - || (GET_ESID(stack) == GET_ESID(unmapped_base))) + if (esids_match(pc,unmapped_base) || esids_match(stack,unmapped_base)) return; if (is_kernel_addr(unmapped_base)) @@ -228,22 +262,37 @@ void slb_initialize(void) static int slb_encoding_inited; extern unsigned int *slb_miss_kernel_load_linear; extern unsigned int *slb_miss_kernel_load_io; + extern unsigned int *slb_compare_rr_to_size; +#ifdef CONFIG_SPARSEMEM_VMEMMAP + extern unsigned int *slb_miss_kernel_load_vmemmap; + unsigned long vmemmap_llp; +#endif /* Prepare our SLB miss handler based on our page size */ linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; io_llp = mmu_psize_defs[mmu_io_psize].sllp; vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; - +#ifdef CONFIG_SPARSEMEM_VMEMMAP + vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp; +#endif if (!slb_encoding_inited) { slb_encoding_inited = 1; patch_slb_encoding(slb_miss_kernel_load_linear, SLB_VSID_KERNEL | linear_llp); patch_slb_encoding(slb_miss_kernel_load_io, SLB_VSID_KERNEL | io_llp); + patch_slb_encoding(slb_compare_rr_to_size, + mmu_slb_size); + + DBG("SLB: linear LLP = %04lx\n", linear_llp); + DBG("SLB: io LLP = %04lx\n", io_llp); - DBG("SLB: linear LLP = %04x\n", linear_llp); - DBG("SLB: io LLP = %04x\n", io_llp); +#ifdef CONFIG_SPARSEMEM_VMEMMAP + patch_slb_encoding(slb_miss_kernel_load_vmemmap, + SLB_VSID_KERNEL | vmemmap_llp); + DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); +#endif } get_paca()->stab_rr = SLB_NUM_BOLTED; @@ -264,9 +313,16 @@ void slb_initialize(void) create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1); - /* We don't bolt the stack for the time being - we're in boot, - * so the stack is in the bolted segment. By the time it goes - * elsewhere, we'll call _switch() which will bolt in the new - * one. */ + /* For the boot cpu, we're running on the stack in init_thread_union, + * which is in the first segment of the linear mapping, and also + * get_paca()->kstack hasn't been initialized yet. + * For secondary cpus, we need to bolt the kernel stack entry now. + */ + slb_shadow_clear(2); + if (raw_smp_processor_id() != boot_cpuid && + (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) + create_shadowed_slbe(get_paca()->kstack, + mmu_kernel_ssize, lflags, 2); + asm volatile("isync":::"memory"); }