x86, kexec: fix kexec x86 coding style
[safe/jmp/linux-2.6] / arch / x86 / kernel / machine_kexec_64.c
1 /*
2  * handle transition of Linux booting another kernel
3  * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
4  *
5  * This source code is licensed under the GNU General Public License,
6  * Version 2.  See the file COPYING for more details.
7  */
8
9 #include <linux/mm.h>
10 #include <linux/kexec.h>
11 #include <linux/string.h>
12 #include <linux/reboot.h>
13 #include <linux/numa.h>
14 #include <linux/ftrace.h>
15 #include <linux/io.h>
16
17 #include <asm/pgtable.h>
18 #include <asm/tlbflush.h>
19 #include <asm/mmu_context.h>
20
21 static void init_level2_page(pmd_t *level2p, unsigned long addr)
22 {
23         unsigned long end_addr;
24
25         addr &= PAGE_MASK;
26         end_addr = addr + PUD_SIZE;
27         while (addr < end_addr) {
28                 set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
29                 addr += PMD_SIZE;
30         }
31 }
32
33 static int init_level3_page(struct kimage *image, pud_t *level3p,
34                                 unsigned long addr, unsigned long last_addr)
35 {
36         unsigned long end_addr;
37         int result;
38
39         result = 0;
40         addr &= PAGE_MASK;
41         end_addr = addr + PGDIR_SIZE;
42         while ((addr < last_addr) && (addr < end_addr)) {
43                 struct page *page;
44                 pmd_t *level2p;
45
46                 page = kimage_alloc_control_pages(image, 0);
47                 if (!page) {
48                         result = -ENOMEM;
49                         goto out;
50                 }
51                 level2p = (pmd_t *)page_address(page);
52                 init_level2_page(level2p, addr);
53                 set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
54                 addr += PUD_SIZE;
55         }
56         /* clear the unused entries */
57         while (addr < end_addr) {
58                 pud_clear(level3p++);
59                 addr += PUD_SIZE;
60         }
61 out:
62         return result;
63 }
64
65
66 static int init_level4_page(struct kimage *image, pgd_t *level4p,
67                                 unsigned long addr, unsigned long last_addr)
68 {
69         unsigned long end_addr;
70         int result;
71
72         result = 0;
73         addr &= PAGE_MASK;
74         end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
75         while ((addr < last_addr) && (addr < end_addr)) {
76                 struct page *page;
77                 pud_t *level3p;
78
79                 page = kimage_alloc_control_pages(image, 0);
80                 if (!page) {
81                         result = -ENOMEM;
82                         goto out;
83                 }
84                 level3p = (pud_t *)page_address(page);
85                 result = init_level3_page(image, level3p, addr, last_addr);
86                 if (result)
87                         goto out;
88                 set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
89                 addr += PGDIR_SIZE;
90         }
91         /* clear the unused entries */
92         while (addr < end_addr) {
93                 pgd_clear(level4p++);
94                 addr += PGDIR_SIZE;
95         }
96 out:
97         return result;
98 }
99
100 static void free_transition_pgtable(struct kimage *image)
101 {
102         free_page((unsigned long)image->arch.pud);
103         free_page((unsigned long)image->arch.pmd);
104         free_page((unsigned long)image->arch.pte);
105 }
106
107 static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
108 {
109         pud_t *pud;
110         pmd_t *pmd;
111         pte_t *pte;
112         unsigned long vaddr, paddr;
113         int result = -ENOMEM;
114
115         vaddr = (unsigned long)relocate_kernel;
116         paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
117         pgd += pgd_index(vaddr);
118         if (!pgd_present(*pgd)) {
119                 pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
120                 if (!pud)
121                         goto err;
122                 image->arch.pud = pud;
123                 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
124         }
125         pud = pud_offset(pgd, vaddr);
126         if (!pud_present(*pud)) {
127                 pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
128                 if (!pmd)
129                         goto err;
130                 image->arch.pmd = pmd;
131                 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
132         }
133         pmd = pmd_offset(pud, vaddr);
134         if (!pmd_present(*pmd)) {
135                 pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
136                 if (!pte)
137                         goto err;
138                 image->arch.pte = pte;
139                 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
140         }
141         pte = pte_offset_kernel(pmd, vaddr);
142         set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
143         return 0;
144 err:
145         free_transition_pgtable(image);
146         return result;
147 }
148
149
150 static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
151 {
152         pgd_t *level4p;
153         int result;
154         level4p = (pgd_t *)__va(start_pgtable);
155         result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
156         if (result)
157                 return result;
158         return init_transition_pgtable(image, level4p);
159 }
160
161 static void set_idt(void *newidt, u16 limit)
162 {
163         struct desc_ptr curidt;
164
165         /* x86-64 supports unaliged loads & stores */
166         curidt.size    = limit;
167         curidt.address = (unsigned long)newidt;
168
169         __asm__ __volatile__ (
170                 "lidtq %0\n"
171                 : : "m" (curidt)
172                 );
173 };
174
175
176 static void set_gdt(void *newgdt, u16 limit)
177 {
178         struct desc_ptr curgdt;
179
180         /* x86-64 supports unaligned loads & stores */
181         curgdt.size    = limit;
182         curgdt.address = (unsigned long)newgdt;
183
184         __asm__ __volatile__ (
185                 "lgdtq %0\n"
186                 : : "m" (curgdt)
187                 );
188 };
189
190 static void load_segments(void)
191 {
192         __asm__ __volatile__ (
193                 "\tmovl %0,%%ds\n"
194                 "\tmovl %0,%%es\n"
195                 "\tmovl %0,%%ss\n"
196                 "\tmovl %0,%%fs\n"
197                 "\tmovl %0,%%gs\n"
198                 : : "a" (__KERNEL_DS) : "memory"
199                 );
200 }
201
202 int machine_kexec_prepare(struct kimage *image)
203 {
204         unsigned long start_pgtable;
205         int result;
206
207         /* Calculate the offsets */
208         start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
209
210         /* Setup the identity mapped 64bit page table */
211         result = init_pgtable(image, start_pgtable);
212         if (result)
213                 return result;
214
215         return 0;
216 }
217
218 void machine_kexec_cleanup(struct kimage *image)
219 {
220         free_transition_pgtable(image);
221 }
222
223 /*
224  * Do not allocate memory (or fail in any way) in machine_kexec().
225  * We are past the point of no return, committed to rebooting now.
226  */
227 void machine_kexec(struct kimage *image)
228 {
229         unsigned long page_list[PAGES_NR];
230         void *control_page;
231
232         tracer_disable();
233
234         /* Interrupts aren't acceptable while we reboot */
235         local_irq_disable();
236
237         control_page = page_address(image->control_code_page) + PAGE_SIZE;
238         memcpy(control_page, relocate_kernel, PAGE_SIZE);
239
240         page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
241         page_list[PA_TABLE_PAGE] =
242           (unsigned long)__pa(page_address(image->control_code_page));
243
244         /*
245          * The segment registers are funny things, they have both a
246          * visible and an invisible part.  Whenever the visible part is
247          * set to a specific selector, the invisible part is loaded
248          * with from a table in memory.  At no other time is the
249          * descriptor table in memory accessed.
250          *
251          * I take advantage of this here by force loading the
252          * segments, before I zap the gdt with an invalid value.
253          */
254         load_segments();
255         /*
256          * The gdt & idt are now invalid.
257          * If you want to load them you must set up your own idt & gdt.
258          */
259         set_gdt(phys_to_virt(0), 0);
260         set_idt(phys_to_virt(0), 0);
261
262         /* now call it */
263         relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
264                         image->start);
265 }
266
267 void arch_crash_save_vmcoreinfo(void)
268 {
269         VMCOREINFO_SYMBOL(phys_base);
270         VMCOREINFO_SYMBOL(init_level4_pgt);
271
272 #ifdef CONFIG_NUMA
273         VMCOREINFO_SYMBOL(node_data);
274         VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
275 #endif
276 }
277