2 #include <asm/pgalloc.h>
5 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
7 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
10 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
15 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
17 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
20 pgtable_page_ctor(pte);
24 void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
26 pgtable_page_dtor(pte);
27 paravirt_release_pt(page_to_pfn(pte));
28 tlb_remove_page(tlb, pte);
32 static inline void pgd_list_add(pgd_t *pgd)
34 struct page *page = virt_to_page(pgd);
37 spin_lock_irqsave(&pgd_lock, flags);
38 list_add(&page->lru, &pgd_list);
39 spin_unlock_irqrestore(&pgd_lock, flags);
42 static inline void pgd_list_del(pgd_t *pgd)
44 struct page *page = virt_to_page(pgd);
47 spin_lock_irqsave(&pgd_lock, flags);
49 spin_unlock_irqrestore(&pgd_lock, flags);
52 pgd_t *pgd_alloc(struct mm_struct *mm)
55 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
60 * Copy kernel pointers in from init.
61 * Could keep a freelist or slab cache of those because the kernel
64 boundary = pgd_index(__PAGE_OFFSET);
65 memset(pgd, 0, boundary * sizeof(pgd_t));
66 memcpy(pgd + boundary,
67 init_level4_pgt + boundary,
68 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
72 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
74 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
76 free_page((unsigned long)pgd);
80 * List of all pgd's needed for non-PAE so it can invalidate entries
81 * in both cached and uncached pgd's; not needed for PAE since the
82 * kernel pmd is shared. If PAE were not to share the pmd a similar
83 * tactic would be needed. This is essentially codepath-based locking
84 * against pageattr.c; it is the unique case in which a valid change
85 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
86 * vmalloc faults work because attached pagetables are never freed.
89 static inline void pgd_list_add(pgd_t *pgd)
91 struct page *page = virt_to_page(pgd);
93 list_add(&page->lru, &pgd_list);
96 static inline void pgd_list_del(pgd_t *pgd)
98 struct page *page = virt_to_page(pgd);
100 list_del(&page->lru);
103 #define UNSHARED_PTRS_PER_PGD \
104 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
106 static void pgd_ctor(void *p)
111 /* Clear usermode parts of PGD */
112 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
114 spin_lock_irqsave(&pgd_lock, flags);
116 /* If the pgd points to a shared pagetable level (either the
117 ptes in non-PAE, or shared PMD in PAE), then just copy the
118 references from swapper_pg_dir. */
119 if (PAGETABLE_LEVELS == 2 ||
120 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
121 clone_pgd_range(pgd + USER_PTRS_PER_PGD,
122 swapper_pg_dir + USER_PTRS_PER_PGD,
124 paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
125 __pa(swapper_pg_dir) >> PAGE_SHIFT,
130 /* list required to sync kernel mapping updates */
131 if (!SHARED_KERNEL_PMD)
134 spin_unlock_irqrestore(&pgd_lock, flags);
137 static void pgd_dtor(void *pgd)
139 unsigned long flags; /* can be called from interrupt context */
141 if (SHARED_KERNEL_PMD)
144 spin_lock_irqsave(&pgd_lock, flags);
146 spin_unlock_irqrestore(&pgd_lock, flags);
149 #ifdef CONFIG_X86_PAE
151 * Mop up any pmd pages which may still be attached to the pgd.
152 * Normally they will be freed by munmap/exit_mmap, but any pmd we
153 * preallocate which never got a corresponding vma will need to be
156 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
160 for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
163 if (pgd_val(pgd) != 0) {
164 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
166 pgdp[i] = native_make_pgd(0);
168 paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
175 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
176 * updating the top-level pagetable entries to guarantee the
177 * processor notices the update. Since this is expensive, and
178 * all 4 top-level entries are used almost immediately in a
179 * new process's life, we just pre-populate them here.
181 * Also, if we're in a paravirt environment where the kernel pmd is
182 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
183 * and initialize the kernel pmds here.
185 static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
191 pud = pud_offset(pgd, 0);
192 for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
193 i++, pud++, addr += PUD_SIZE) {
194 pmd_t *pmd = pmd_alloc_one(mm, addr);
197 pgd_mop_up_pmds(mm, pgd);
201 if (i >= USER_PTRS_PER_PGD)
202 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
203 sizeof(pmd_t) * PTRS_PER_PMD);
205 pud_populate(mm, pud, pmd);
211 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
213 paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
215 /* Note: almost everything apart from _PAGE_PRESENT is
216 reserved at the pmd (PDPT) level. */
217 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
220 * According to Intel App note "TLBs, Paging-Structure Caches,
221 * and Their Invalidation", April 2007, document 317080-001,
222 * section 8.1: in PAE mode we explicitly have to flush the
223 * TLB via cr3 if the top-level pgd is changed...
225 if (mm == current->active_mm)
226 write_cr3(read_cr3());
228 #else /* !CONFIG_X86_PAE */
229 /* No need to prepopulate any pagetable entries in non-PAE modes. */
230 static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
235 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgd)
238 #endif /* CONFIG_X86_PAE */
240 pgd_t *pgd_alloc(struct mm_struct *mm)
242 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
244 /* so that alloc_pd can use it */
249 if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
251 free_page((unsigned long)pgd);
258 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
260 pgd_mop_up_pmds(mm, pgd);
262 free_page((unsigned long)pgd);