* Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
* and Paul Mackerras.
*
+ * kexec bits:
+ * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
+ * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
*
*/
-#include <linux/config.h>
#include <linux/sys.h>
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+#include <asm/kexec.h>
.text
- .align 5
-_GLOBAL(__delay)
- cmpwi 0,r3,0
- mtctr r3
- beqlr
-1: bdnz 1b
- blr
-
/*
* This returns the high 64 bits of the product of two 64-bit numbers.
*/
blr
/*
- * Returns (address we're running at) - (address we were linked at)
- * for use before the text and data are mapped to KERNELBASE.
- */
-_GLOBAL(reloc_offset)
- mflr r0
- bl 1f
-1: mflr r3
- LOADADDR(r4,1b)
- subf r3,r4,r3
- mtlr r0
- blr
-
-/*
- * add_reloc_offset(x) returns x + reloc_offset().
- */
-_GLOBAL(add_reloc_offset)
- mflr r0
- bl 1f
-1: mflr r5
- LOADADDR(r4,1b)
- subf r5,r4,r5
- add r3,r3,r5
- mtlr r0
- blr
-
-/*
* sub_reloc_offset(x) returns x - reloc_offset().
*/
_GLOBAL(sub_reloc_offset)
blr
/*
- * identify_cpu,
- * called with r3 = data offset and r4 = CPU number
- * doesn't change r3
- */
-_GLOBAL(identify_cpu)
- addis r8,r3,cpu_specs@ha
- addi r8,r8,cpu_specs@l
- mfpvr r7
-1:
- lwz r5,CPU_SPEC_PVR_MASK(r8)
- and r5,r5,r7
- lwz r6,CPU_SPEC_PVR_VALUE(r8)
- cmplw 0,r6,r5
- beq 1f
- addi r8,r8,CPU_SPEC_ENTRY_SIZE
- b 1b
-1:
- addis r6,r3,cur_cpu_spec@ha
- addi r6,r6,cur_cpu_spec@l
- sub r8,r8,r3
- stw r8,0(r6)
- blr
-
-/*
- * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
- * and writes nop's over sections of code that don't apply for this cpu.
- * r3 = data offset (not changed)
- */
-_GLOBAL(do_cpu_ftr_fixups)
- /* Get CPU 0 features */
- addis r6,r3,cur_cpu_spec@ha
- addi r6,r6,cur_cpu_spec@l
- lwz r4,0(r6)
- add r4,r4,r3
- lwz r4,CPU_SPEC_FEATURES(r4)
-
- /* Get the fixup table */
- addis r6,r3,__start___ftr_fixup@ha
- addi r6,r6,__start___ftr_fixup@l
- addis r7,r3,__stop___ftr_fixup@ha
- addi r7,r7,__stop___ftr_fixup@l
-
- /* Do the fixup */
-1: cmplw 0,r6,r7
- bgelr
- addi r6,r6,16
- lwz r8,-16(r6) /* mask */
- and r8,r8,r4
- lwz r9,-12(r6) /* value */
- cmplw 0,r8,r9
- beq 1b
- lwz r8,-8(r6) /* section begin */
- lwz r9,-4(r6) /* section end */
- subf. r9,r8,r9
- beq 1b
- /* write nops over the section of code */
- /* todo: if large section, add a branch at the start of it */
- srwi r9,r9,2
- mtctr r9
- add r8,r8,r3
- lis r0,0x60000000@h /* nop */
-3: stw r0,0(r8)
- andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
- beq 2f
- dcbst 0,r8 /* suboptimal, but simpler */
- sync
- icbi 0,r8
-2: addi r8,r8,4
- bdnz 3b
- sync /* additional sync needed on g4 */
- isync
- b 1b
-
-/*
* call_setup_cpu - call the setup_cpu function for this cpu
* r3 = data offset, r24 = cpu number
*
lwz r4,0(r4)
add r4,r4,r3
lwz r5,CPU_SPEC_SETUP(r4)
- cmpi 0,r5,0
+ cmpwi 0,r5,0
add r5,r5,r3
beqlr
mtctr r5
mtspr SPRN_L1CSR0,r3
isync
blr
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
mfspr r3,SPRN_L1CSR1
ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
mtspr SPRN_L1CSR1,r3
*
* flush_icache_range(unsigned long start, unsigned long stop)
*/
-_GLOBAL(flush_icache_range)
+_GLOBAL(__flush_icache_range)
BEGIN_FTR_SECTION
blr /* for 601, do nothing */
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
li r5,L1_CACHE_BYTES-1
andc r3,r3,r5
subf r4,r3,r4
sync /* wait for dcbi's to get to ram */
blr
-#ifdef CONFIG_NOT_COHERENT_CACHE
-/*
- * 40x cores have 8K or 16K dcache and 32 byte line size.
- * 44x has a 32K dcache and 32 byte line size.
- * 8xx has 1, 2, 4, 8K variants.
- * For now, cover the worst case of the 44x.
- * Must be called with external interrupts disabled.
- */
-#define CACHE_NWAYS 64
-#define CACHE_NLINES 16
-
-_GLOBAL(flush_dcache_all)
- li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
- mtctr r4
- lis r5, KERNELBASE@h
-1: lwz r3, 0(r5) /* Load one word from every line */
- addi r5, r5, L1_CACHE_BYTES
- bdnz 1b
- blr
-#endif /* CONFIG_NOT_COHERENT_CACHE */
-
/*
* Flush a particular page from the data cache to RAM.
* Note: this is necessary because the instruction cache does *not*
*/
_GLOBAL(__flush_dcache_icache)
BEGIN_FTR_SECTION
- blr /* for 601, do nothing */
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+ blr
+END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
rlwinm r3,r3,0,0,19 /* Get page base address */
li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
mtctr r4
_GLOBAL(__flush_dcache_icache_phys)
BEGIN_FTR_SECTION
blr /* for 601, do nothing */
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
mfmsr r10
rlwinm r0,r10,0,28,26 /* clear DR */
mtmsr r0
blr
/*
- * I/O string operations
- *
- * insb(port, buf, len)
- * outsb(port, buf, len)
- * insw(port, buf, len)
- * outsw(port, buf, len)
- * insl(port, buf, len)
- * outsl(port, buf, len)
- * insw_ns(port, buf, len)
- * outsw_ns(port, buf, len)
- * insl_ns(port, buf, len)
- * outsl_ns(port, buf, len)
- *
- * The *_ns versions don't do byte-swapping.
- */
-_GLOBAL(_insb)
- cmpwi 0,r5,0
- mtctr r5
- subi r4,r4,1
- blelr-
-00: lbz r5,0(r3)
- eieio
- stbu r5,1(r4)
- bdnz 00b
- blr
-
-_GLOBAL(_outsb)
- cmpwi 0,r5,0
- mtctr r5
- subi r4,r4,1
- blelr-
-00: lbzu r5,1(r4)
- stb r5,0(r3)
- eieio
- bdnz 00b
- blr
-
-_GLOBAL(_insw)
- cmpwi 0,r5,0
- mtctr r5
- subi r4,r4,2
- blelr-
-00: lhbrx r5,0,r3
- eieio
- sthu r5,2(r4)
- bdnz 00b
- blr
-
-_GLOBAL(_outsw)
- cmpwi 0,r5,0
- mtctr r5
- subi r4,r4,2
- blelr-
-00: lhzu r5,2(r4)
- eieio
- sthbrx r5,0,r3
- bdnz 00b
- blr
-
-_GLOBAL(_insl)
- cmpwi 0,r5,0
- mtctr r5
- subi r4,r4,4
- blelr-
-00: lwbrx r5,0,r3
- eieio
- stwu r5,4(r4)
- bdnz 00b
- blr
-
-_GLOBAL(_outsl)
- cmpwi 0,r5,0
- mtctr r5
- subi r4,r4,4
- blelr-
-00: lwzu r5,4(r4)
- stwbrx r5,0,r3
- eieio
- bdnz 00b
- blr
-
-_GLOBAL(__ide_mm_insw)
-_GLOBAL(_insw_ns)
- cmpwi 0,r5,0
- mtctr r5
- subi r4,r4,2
- blelr-
-00: lhz r5,0(r3)
- eieio
- sthu r5,2(r4)
- bdnz 00b
- blr
-
-_GLOBAL(__ide_mm_outsw)
-_GLOBAL(_outsw_ns)
- cmpwi 0,r5,0
- mtctr r5
- subi r4,r4,2
- blelr-
-00: lhzu r5,2(r4)
- sth r5,0(r3)
- eieio
- bdnz 00b
- blr
-
-_GLOBAL(__ide_mm_insl)
-_GLOBAL(_insl_ns)
- cmpwi 0,r5,0
- mtctr r5
- subi r4,r4,4
- blelr-
-00: lwz r5,0(r3)
- eieio
- stwu r5,4(r4)
- bdnz 00b
- blr
-
-_GLOBAL(__ide_mm_outsl)
-_GLOBAL(_outsl_ns)
- cmpwi 0,r5,0
- mtctr r5
- subi r4,r4,4
- blelr-
-00: lwzu r5,4(r4)
- stw r5,0(r3)
- eieio
- bdnz 00b
- blr
-
-/*
* Extended precision shifts.
*
* Updated to be valid for shift counts from 0 to 63 inclusive.
sub r3,r3,r4
blr
-_GLOBAL(_get_SP)
- mr r3,r1 /* Close enough */
- blr
-
/*
* Create a kernel thread
* kernel_thread(fn, arg, flags)
addi r1,r1,16
blr
-_GLOBAL(execve)
+_GLOBAL(kernel_execve)
li r0,__NR_execve
sc
bnslr
*/
_GLOBAL(__main)
blr
+
+#ifdef CONFIG_KEXEC
+ /*
+ * Must be relocatable PIC code callable as a C function.
+ */
+ .globl relocate_new_kernel
+relocate_new_kernel:
+ /* r3 = page_list */
+ /* r4 = reboot_code_buffer */
+ /* r5 = start_address */
+
+ li r0, 0
+
+ /*
+ * Set Machine Status Register to a known status,
+ * switch the MMU off and jump to 1: in a single step.
+ */
+
+ mr r8, r0
+ ori r8, r8, MSR_RI|MSR_ME
+ mtspr SPRN_SRR1, r8
+ addi r8, r4, 1f - relocate_new_kernel
+ mtspr SPRN_SRR0, r8
+ sync
+ rfi
+
+1:
+ /* from this point address translation is turned off */
+ /* and interrupts are disabled */
+
+ /* set a new stack at the bottom of our page... */
+ /* (not really needed now) */
+ addi r1, r4, KEXEC_CONTROL_CODE_SIZE - 8 /* for LR Save+Back Chain */
+ stw r0, 0(r1)
+
+ /* Do the copies */
+ li r6, 0 /* checksum */
+ mr r0, r3
+ b 1f
+
+0: /* top, read another word for the indirection page */
+ lwzu r0, 4(r3)
+
+1:
+ /* is it a destination page? (r8) */
+ rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
+ beq 2f
+
+ rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */
+ b 0b
+
+2: /* is it an indirection page? (r3) */
+ rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
+ beq 2f
+
+ rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */
+ subi r3, r3, 4
+ b 0b
+
+2: /* are we done? */
+ rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
+ beq 2f
+ b 3f
+
+2: /* is it a source page? (r9) */
+ rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
+ beq 0b
+
+ rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */
+
+ li r7, PAGE_SIZE / 4
+ mtctr r7
+ subi r9, r9, 4
+ subi r8, r8, 4
+9:
+ lwzu r0, 4(r9) /* do the copy */
+ xor r6, r6, r0
+ stwu r0, 4(r8)
+ dcbst 0, r8
+ sync
+ icbi 0, r8
+ bdnz 9b
+
+ addi r9, r9, 4
+ addi r8, r8, 4
+ b 0b
+
+3:
+
+ /* To be certain of avoiding problems with self-modifying code
+ * execute a serializing instruction here.
+ */
+ isync
+ sync
+
+ /* jump to the entry point, usually the setup routine */
+ mtlr r5
+ blrl
+
+1: b 1b
+
+relocate_new_kernel_end:
+
+ .globl relocate_new_kernel_size
+relocate_new_kernel_size:
+ .long relocate_new_kernel_end - relocate_new_kernel
+#endif