2 * Copyright (C) Paul Mackerras 1997.
4 * Updates for PPC64 by Todd Inglett, Dave Engebretsen & Peter Bergner.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
18 #include "gunzip_util.h"
19 #include "flatdevtree.h"
23 extern char __bss_start[];
25 extern char _vmlinux_start[];
26 extern char _vmlinux_end[];
27 extern char _initrd_start[];
28 extern char _initrd_end[];
29 extern char _dtb_start[];
30 extern char _dtb_end[];
32 static struct gunzip_state gzstate;
40 unsigned long loadsize;
41 unsigned long memsize;
42 unsigned long elfoffset;
45 typedef void (*kernel_entry_t)(unsigned long, unsigned long, void *);
49 static int parse_elf64(void *hdr, struct elf_info *info)
51 Elf64_Ehdr *elf64 = hdr;
55 if (!(elf64->e_ident[EI_MAG0] == ELFMAG0 &&
56 elf64->e_ident[EI_MAG1] == ELFMAG1 &&
57 elf64->e_ident[EI_MAG2] == ELFMAG2 &&
58 elf64->e_ident[EI_MAG3] == ELFMAG3 &&
59 elf64->e_ident[EI_CLASS] == ELFCLASS64 &&
60 elf64->e_ident[EI_DATA] == ELFDATA2MSB &&
61 elf64->e_type == ET_EXEC &&
62 elf64->e_machine == EM_PPC64))
65 elf64ph = (Elf64_Phdr *)((unsigned long)elf64 +
66 (unsigned long)elf64->e_phoff);
67 for (i = 0; i < (unsigned int)elf64->e_phnum; i++, elf64ph++)
68 if (elf64ph->p_type == PT_LOAD)
70 if (i >= (unsigned int)elf64->e_phnum)
73 info->loadsize = (unsigned long)elf64ph->p_filesz;
74 info->memsize = (unsigned long)elf64ph->p_memsz;
75 info->elfoffset = (unsigned long)elf64ph->p_offset;
80 static int parse_elf32(void *hdr, struct elf_info *info)
82 Elf32_Ehdr *elf32 = hdr;
86 if (!(elf32->e_ident[EI_MAG0] == ELFMAG0 &&
87 elf32->e_ident[EI_MAG1] == ELFMAG1 &&
88 elf32->e_ident[EI_MAG2] == ELFMAG2 &&
89 elf32->e_ident[EI_MAG3] == ELFMAG3 &&
90 elf32->e_ident[EI_CLASS] == ELFCLASS32 &&
91 elf32->e_ident[EI_DATA] == ELFDATA2MSB &&
92 elf32->e_type == ET_EXEC &&
93 elf32->e_machine == EM_PPC))
96 elf32ph = (Elf32_Phdr *) ((unsigned long)elf32 + elf32->e_phoff);
97 for (i = 0; i < elf32->e_phnum; i++, elf32ph++)
98 if (elf32ph->p_type == PT_LOAD)
100 if (i >= elf32->e_phnum)
103 info->loadsize = elf32ph->p_filesz;
104 info->memsize = elf32ph->p_memsz;
105 info->elfoffset = elf32ph->p_offset;
109 static struct addr_range prep_kernel(void)
112 void *vmlinuz_addr = _vmlinux_start;
113 unsigned long vmlinuz_size = _vmlinux_end - _vmlinux_start;
118 /* gunzip the ELF header of the kernel */
119 gunzip_start(&gzstate, vmlinuz_addr, vmlinuz_size);
120 gunzip_exactly(&gzstate, elfheader, sizeof(elfheader));
122 if (!parse_elf64(elfheader, &ei) && !parse_elf32(elfheader, &ei))
123 fatal("Error: not a valid PPC32 or PPC64 ELF file!\n\r");
125 if (platform_ops.image_hdr)
126 platform_ops.image_hdr(elfheader);
128 /* We need to alloc the memsize: gzip will expand the kernel
129 * text/data, then possible rubbish we don't care about. But
130 * the kernel bss must be claimed (it will be zero'd by the
133 printf("Allocating 0x%lx bytes for kernel ...\n\r", ei.memsize);
135 if (platform_ops.vmlinux_alloc) {
136 addr = platform_ops.vmlinux_alloc(ei.memsize);
138 if ((unsigned long)_start < ei.memsize)
139 fatal("Insufficient memory for kernel at address 0!"
140 " (_start=%p)\n\r", _start);
143 /* Finally, gunzip the kernel */
144 printf("gunzipping (0x%p <- 0x%p:0x%p)...", addr,
145 vmlinuz_addr, vmlinuz_addr+vmlinuz_size);
146 /* discard up to the actual load data */
147 gunzip_discard(&gzstate, ei.elfoffset - sizeof(elfheader));
148 len = gunzip_finish(&gzstate, addr, ei.memsize);
149 printf("done 0x%x bytes\n\r", len);
151 flush_cache(addr, ei.loadsize);
153 return (struct addr_range){addr, ei.memsize};
156 static struct addr_range prep_initrd(struct addr_range vmlinux, void *chosen,
157 unsigned long initrd_addr,
158 unsigned long initrd_size)
160 /* If we have an image attached to us, it overrides anything
161 * supplied by the loader. */
162 if (_initrd_end > _initrd_start) {
163 printf("Attached initrd image at 0x%p-0x%p\n\r",
164 _initrd_start, _initrd_end);
165 initrd_addr = (unsigned long)_initrd_start;
166 initrd_size = _initrd_end - _initrd_start;
167 } else if (initrd_size > 0) {
168 printf("Using loader supplied ramdisk at 0x%lx-0x%lx\n\r",
169 initrd_addr, initrd_addr + initrd_size);
172 /* If there's no initrd at all, we're done */
174 return (struct addr_range){0, 0};
177 * If the initrd is too low it will be clobbered when the
178 * kernel relocates to its final location. In this case,
179 * allocate a safer place and move it.
181 if (initrd_addr < vmlinux.size) {
182 void *old_addr = (void *)initrd_addr;
184 printf("Allocating 0x%lx bytes for initrd ...\n\r",
186 initrd_addr = (unsigned long)malloc(initrd_size);
188 fatal("Can't allocate memory for initial "
190 printf("Relocating initrd 0x%lx <- 0x%p (0x%lx bytes)\n\r",
191 initrd_addr, old_addr, initrd_size);
192 memmove((void *)initrd_addr, old_addr, initrd_size);
195 printf("initrd head: 0x%lx\n\r", *((unsigned long *)initrd_addr));
197 /* Tell the kernel initrd address via device tree */
198 setprop_val(chosen, "linux,initrd-start", (u32)(initrd_addr));
199 setprop_val(chosen, "linux,initrd-end", (u32)(initrd_addr+initrd_size));
201 return (struct addr_range){(void *)initrd_addr, initrd_size};
204 /* A buffer that may be edited by tools operating on a zImage binary so as to
205 * edit the command line passed to vmlinux (by setting /chosen/bootargs).
206 * The buffer is put in it's own section so that tools may locate it easier.
208 static char cmdline[COMMAND_LINE_SIZE]
209 __attribute__((__section__("__builtin_cmdline")));
211 static void prep_cmdline(void *chosen)
213 if (cmdline[0] == '\0')
214 getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1);
216 printf("\n\rLinux/PowerPC load: %s", cmdline);
217 /* If possible, edit the command line */
218 if (console_ops.edit_cmdline)
219 console_ops.edit_cmdline(cmdline, COMMAND_LINE_SIZE);
222 /* Put the command line back into the devtree for the kernel */
223 setprop_str(chosen, "bootargs", cmdline);
226 struct platform_ops platform_ops;
227 struct dt_ops dt_ops;
228 struct console_ops console_ops;
229 struct loader_info loader_info;
233 struct addr_range vmlinux, initrd;
234 kernel_entry_t kentry;
235 unsigned long ft_addr = 0;
238 /* Do this first, because malloc() could clobber the loader's
239 * command line. Only use the loader command line if a
240 * built-in command line wasn't set by an external tool */
241 if ((loader_info.cmdline_len > 0) && (cmdline[0] == '\0'))
242 memmove(cmdline, loader_info.cmdline,
243 min(loader_info.cmdline_len, COMMAND_LINE_SIZE-1));
245 if (console_ops.open && (console_ops.open() < 0))
247 if (platform_ops.fixups)
248 platform_ops.fixups();
250 printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r",
253 /* Ensure that the device tree has a /chosen node */
254 chosen = finddevice("/chosen");
256 chosen = create_node(NULL, "chosen");
258 vmlinux = prep_kernel();
259 initrd = prep_initrd(vmlinux, chosen,
260 loader_info.initrd_addr, loader_info.initrd_size);
261 prep_cmdline(chosen);
263 printf("Finalizing device tree...");
265 ft_addr = dt_ops.finalize();
267 printf(" flat tree at 0x%lx\n\r", ft_addr);
269 printf(" using OF tree (promptr=%p)\n\r", loader_info.promptr);
271 if (console_ops.close)
274 kentry = (kernel_entry_t) vmlinux.addr;
276 kentry(ft_addr, 0, NULL);
278 kentry((unsigned long)initrd.addr, initrd.size,
279 loader_info.promptr);
281 /* console closed so printf in fatal below may not work */
282 fatal("Error: Linux kernel returned to zImage boot wrapper!\n\r");