i386: avoid temporarily inconsistent pte-s
[safe/jmp/linux-2.6] / arch / x86 / mm / pgtable_32.c
1 /*
2  *  linux/arch/i386/mm/pgtable.c
3  */
4
5 #include <linux/sched.h>
6 #include <linux/kernel.h>
7 #include <linux/errno.h>
8 #include <linux/mm.h>
9 #include <linux/swap.h>
10 #include <linux/smp.h>
11 #include <linux/highmem.h>
12 #include <linux/slab.h>
13 #include <linux/pagemap.h>
14 #include <linux/spinlock.h>
15 #include <linux/module.h>
16 #include <linux/quicklist.h>
17
18 #include <asm/system.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
21 #include <asm/fixmap.h>
22 #include <asm/e820.h>
23 #include <asm/tlb.h>
24 #include <asm/tlbflush.h>
25
26 void show_mem(void)
27 {
28         int total = 0, reserved = 0;
29         int shared = 0, cached = 0;
30         int highmem = 0;
31         struct page *page;
32         pg_data_t *pgdat;
33         unsigned long i;
34         unsigned long flags;
35
36         printk(KERN_INFO "Mem-info:\n");
37         show_free_areas();
38         printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
39         for_each_online_pgdat(pgdat) {
40                 pgdat_resize_lock(pgdat, &flags);
41                 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
42                         page = pgdat_page_nr(pgdat, i);
43                         total++;
44                         if (PageHighMem(page))
45                                 highmem++;
46                         if (PageReserved(page))
47                                 reserved++;
48                         else if (PageSwapCache(page))
49                                 cached++;
50                         else if (page_count(page))
51                                 shared += page_count(page) - 1;
52                 }
53                 pgdat_resize_unlock(pgdat, &flags);
54         }
55         printk(KERN_INFO "%d pages of RAM\n", total);
56         printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
57         printk(KERN_INFO "%d reserved pages\n", reserved);
58         printk(KERN_INFO "%d pages shared\n", shared);
59         printk(KERN_INFO "%d pages swap cached\n", cached);
60
61         printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
62         printk(KERN_INFO "%lu pages writeback\n",
63                                         global_page_state(NR_WRITEBACK));
64         printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
65         printk(KERN_INFO "%lu pages slab\n",
66                 global_page_state(NR_SLAB_RECLAIMABLE) +
67                 global_page_state(NR_SLAB_UNRECLAIMABLE));
68         printk(KERN_INFO "%lu pages pagetables\n",
69                                         global_page_state(NR_PAGETABLE));
70 }
71
72 /*
73  * Associate a virtual page frame with a given physical page frame 
74  * and protection flags for that frame.
75  */ 
76 static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
77 {
78         pgd_t *pgd;
79         pud_t *pud;
80         pmd_t *pmd;
81         pte_t *pte;
82
83         pgd = swapper_pg_dir + pgd_index(vaddr);
84         if (pgd_none(*pgd)) {
85                 BUG();
86                 return;
87         }
88         pud = pud_offset(pgd, vaddr);
89         if (pud_none(*pud)) {
90                 BUG();
91                 return;
92         }
93         pmd = pmd_offset(pud, vaddr);
94         if (pmd_none(*pmd)) {
95                 BUG();
96                 return;
97         }
98         pte = pte_offset_kernel(pmd, vaddr);
99         if (pgprot_val(flags))
100                 set_pte_present(&init_mm, vaddr, pte, pfn_pte(pfn, flags));
101         else
102                 pte_clear(&init_mm, vaddr, pte);
103
104         /*
105          * It's enough to flush this one mapping.
106          * (PGE mappings get flushed as well)
107          */
108         __flush_tlb_one(vaddr);
109 }
110
111 /*
112  * Associate a large virtual page frame with a given physical page frame 
113  * and protection flags for that frame. pfn is for the base of the page,
114  * vaddr is what the page gets mapped to - both must be properly aligned. 
115  * The pmd must already be instantiated. Assumes PAE mode.
116  */ 
117 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
118 {
119         pgd_t *pgd;
120         pud_t *pud;
121         pmd_t *pmd;
122
123         if (vaddr & (PMD_SIZE-1)) {             /* vaddr is misaligned */
124                 printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
125                 return; /* BUG(); */
126         }
127         if (pfn & (PTRS_PER_PTE-1)) {           /* pfn is misaligned */
128                 printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
129                 return; /* BUG(); */
130         }
131         pgd = swapper_pg_dir + pgd_index(vaddr);
132         if (pgd_none(*pgd)) {
133                 printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
134                 return; /* BUG(); */
135         }
136         pud = pud_offset(pgd, vaddr);
137         pmd = pmd_offset(pud, vaddr);
138         set_pmd(pmd, pfn_pmd(pfn, flags));
139         /*
140          * It's enough to flush this one mapping.
141          * (PGE mappings get flushed as well)
142          */
143         __flush_tlb_one(vaddr);
144 }
145
146 static int fixmaps;
147 unsigned long __FIXADDR_TOP = 0xfffff000;
148 EXPORT_SYMBOL(__FIXADDR_TOP);
149
150 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
151 {
152         unsigned long address = __fix_to_virt(idx);
153
154         if (idx >= __end_of_fixed_addresses) {
155                 BUG();
156                 return;
157         }
158         set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
159         fixmaps++;
160 }
161
162 /**
163  * reserve_top_address - reserves a hole in the top of kernel address space
164  * @reserve - size of hole to reserve
165  *
166  * Can be used to relocate the fixmap area and poke a hole in the top
167  * of kernel address space to make room for a hypervisor.
168  */
169 void reserve_top_address(unsigned long reserve)
170 {
171         BUG_ON(fixmaps > 0);
172         printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
173                (int)-reserve);
174         __FIXADDR_TOP = -reserve - PAGE_SIZE;
175         __VMALLOC_RESERVE += reserve;
176 }
177
178 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
179 {
180         return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
181 }
182
183 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
184 {
185         struct page *pte;
186
187 #ifdef CONFIG_HIGHPTE
188         pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
189 #else
190         pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
191 #endif
192         return pte;
193 }
194
195 void pmd_ctor(struct kmem_cache *cache, void *pmd)
196 {
197         memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
198 }
199
200 /*
201  * List of all pgd's needed for non-PAE so it can invalidate entries
202  * in both cached and uncached pgd's; not needed for PAE since the
203  * kernel pmd is shared. If PAE were not to share the pmd a similar
204  * tactic would be needed. This is essentially codepath-based locking
205  * against pageattr.c; it is the unique case in which a valid change
206  * of kernel pagetables can't be lazily synchronized by vmalloc faults.
207  * vmalloc faults work because attached pagetables are never freed.
208  * -- wli
209  */
210 DEFINE_SPINLOCK(pgd_lock);
211 struct page *pgd_list;
212
213 static inline void pgd_list_add(pgd_t *pgd)
214 {
215         struct page *page = virt_to_page(pgd);
216         page->index = (unsigned long)pgd_list;
217         if (pgd_list)
218                 set_page_private(pgd_list, (unsigned long)&page->index);
219         pgd_list = page;
220         set_page_private(page, (unsigned long)&pgd_list);
221 }
222
223 static inline void pgd_list_del(pgd_t *pgd)
224 {
225         struct page *next, **pprev, *page = virt_to_page(pgd);
226         next = (struct page *)page->index;
227         pprev = (struct page **)page_private(page);
228         *pprev = next;
229         if (next)
230                 set_page_private(next, (unsigned long)pprev);
231 }
232
233
234
235 #if (PTRS_PER_PMD == 1)
236 /* Non-PAE pgd constructor */
237 static void pgd_ctor(void *pgd)
238 {
239         unsigned long flags;
240
241         /* !PAE, no pagetable sharing */
242         memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
243
244         spin_lock_irqsave(&pgd_lock, flags);
245
246         /* must happen under lock */
247         clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
248                         swapper_pg_dir + USER_PTRS_PER_PGD,
249                         KERNEL_PGD_PTRS);
250         paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
251                                 __pa(swapper_pg_dir) >> PAGE_SHIFT,
252                                 USER_PTRS_PER_PGD,
253                                 KERNEL_PGD_PTRS);
254         pgd_list_add(pgd);
255         spin_unlock_irqrestore(&pgd_lock, flags);
256 }
257 #else  /* PTRS_PER_PMD > 1 */
258 /* PAE pgd constructor */
259 static void pgd_ctor(void *pgd)
260 {
261         /* PAE, kernel PMD may be shared */
262
263         if (SHARED_KERNEL_PMD) {
264                 clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
265                                 swapper_pg_dir + USER_PTRS_PER_PGD,
266                                 KERNEL_PGD_PTRS);
267         } else {
268                 unsigned long flags;
269
270                 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
271                 spin_lock_irqsave(&pgd_lock, flags);
272                 pgd_list_add(pgd);
273                 spin_unlock_irqrestore(&pgd_lock, flags);
274         }
275 }
276 #endif  /* PTRS_PER_PMD */
277
278 static void pgd_dtor(void *pgd)
279 {
280         unsigned long flags; /* can be called from interrupt context */
281
282         if (SHARED_KERNEL_PMD)
283                 return;
284
285         paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
286         spin_lock_irqsave(&pgd_lock, flags);
287         pgd_list_del(pgd);
288         spin_unlock_irqrestore(&pgd_lock, flags);
289 }
290
291 #define UNSHARED_PTRS_PER_PGD                           \
292         (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
293
294 /* If we allocate a pmd for part of the kernel address space, then
295    make sure its initialized with the appropriate kernel mappings.
296    Otherwise use a cached zeroed pmd.  */
297 static pmd_t *pmd_cache_alloc(int idx)
298 {
299         pmd_t *pmd;
300
301         if (idx >= USER_PTRS_PER_PGD) {
302                 pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
303
304                 if (pmd)
305                         memcpy(pmd,
306                                (void *)pgd_page_vaddr(swapper_pg_dir[idx]),
307                                sizeof(pmd_t) * PTRS_PER_PMD);
308         } else
309                 pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
310
311         return pmd;
312 }
313
314 static void pmd_cache_free(pmd_t *pmd, int idx)
315 {
316         if (idx >= USER_PTRS_PER_PGD)
317                 free_page((unsigned long)pmd);
318         else
319                 kmem_cache_free(pmd_cache, pmd);
320 }
321
322 pgd_t *pgd_alloc(struct mm_struct *mm)
323 {
324         int i;
325         pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
326
327         if (PTRS_PER_PMD == 1 || !pgd)
328                 return pgd;
329
330         for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
331                 pmd_t *pmd = pmd_cache_alloc(i);
332
333                 if (!pmd)
334                         goto out_oom;
335
336                 paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
337                 set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
338         }
339         return pgd;
340
341 out_oom:
342         for (i--; i >= 0; i--) {
343                 pgd_t pgdent = pgd[i];
344                 void* pmd = (void *)__va(pgd_val(pgdent)-1);
345                 paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
346                 pmd_cache_free(pmd, i);
347         }
348         quicklist_free(0, pgd_dtor, pgd);
349         return NULL;
350 }
351
352 void pgd_free(pgd_t *pgd)
353 {
354         int i;
355
356         /* in the PAE case user pgd entries are overwritten before usage */
357         if (PTRS_PER_PMD > 1)
358                 for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
359                         pgd_t pgdent = pgd[i];
360                         void* pmd = (void *)__va(pgd_val(pgdent)-1);
361                         paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
362                         pmd_cache_free(pmd, i);
363                 }
364         /* in the non-PAE case, free_pgtables() clears user pgd entries */
365         quicklist_free(0, pgd_dtor, pgd);
366 }
367
368 void check_pgt_cache(void)
369 {
370         quicklist_trim(0, pgd_dtor, 25, 16);
371 }
372