4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
41 #include <linux/bug.h>
42 #include <linux/sched.h>
44 #include <asm/pgtable.h>
45 #include <asm/tlbflush.h>
46 #include <asm/mmu_context.h>
48 #include <asm/xen/hypercall.h>
49 #include <asm/paravirt.h>
52 #include <xen/interface/xen.h>
56 xmaddr_t arbitrary_virt_to_machine(unsigned long address)
58 pte_t *pte = lookup_address(address);
59 unsigned offset = address & PAGE_MASK;
63 return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset);
66 void make_lowmem_page_readonly(void *vaddr)
69 unsigned long address = (unsigned long)vaddr;
71 pte = lookup_address(address);
74 ptev = pte_wrprotect(*pte);
76 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
80 void make_lowmem_page_readwrite(void *vaddr)
83 unsigned long address = (unsigned long)vaddr;
85 pte = lookup_address(address);
88 ptev = pte_mkwrite(*pte);
90 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
95 void xen_set_pte(pte_t *ptep, pte_t pte)
99 u.ptr = virt_to_machine(ptep).maddr;
100 u.val = pte_val_ma(pte);
101 if (HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0)
105 void xen_set_pmd(pmd_t *ptr, pmd_t val)
109 u.ptr = virt_to_machine(ptr).maddr;
110 u.val = pmd_val_ma(val);
111 if (HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0)
115 #ifdef CONFIG_X86_PAE
116 void xen_set_pud(pud_t *ptr, pud_t val)
120 u.ptr = virt_to_machine(ptr).maddr;
121 u.val = pud_val_ma(val);
122 if (HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0)
128 * Associate a virtual page frame with a given physical page frame
129 * and protection flags for that frame.
131 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
138 pgd = swapper_pg_dir + pgd_index(vaddr);
139 if (pgd_none(*pgd)) {
143 pud = pud_offset(pgd, vaddr);
144 if (pud_none(*pud)) {
148 pmd = pmd_offset(pud, vaddr);
149 if (pmd_none(*pmd)) {
153 pte = pte_offset_kernel(pmd, vaddr);
154 /* <mfn,flags> stored as-is, to permit clearing entries */
155 xen_set_pte(pte, mfn_pte(mfn, flags));
158 * It's enough to flush this one mapping.
159 * (PGE mappings get flushed as well)
161 __flush_tlb_one(vaddr);
164 void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
165 pte_t *ptep, pte_t pteval)
167 if ((mm != current->mm && mm != &init_mm) ||
168 HYPERVISOR_update_va_mapping(addr, pteval, 0) != 0)
169 xen_set_pte(ptep, pteval);
172 #ifdef CONFIG_X86_PAE
173 void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
175 set_64bit((u64 *)ptep, pte_val_ma(pte));
178 void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
181 smp_wmb(); /* make sure low gets written first */
185 void xen_pmd_clear(pmd_t *pmdp)
187 xen_set_pmd(pmdp, __pmd(0));
190 unsigned long long xen_pte_val(pte_t pte)
192 unsigned long long ret = 0;
195 ret = ((unsigned long long)pte.pte_high << 32) | pte.pte_low;
196 ret = machine_to_phys(XMADDR(ret)).paddr | 1;
202 unsigned long long xen_pmd_val(pmd_t pmd)
204 unsigned long long ret = pmd.pmd;
206 ret = machine_to_phys(XMADDR(ret)).paddr | 1;
210 unsigned long long xen_pgd_val(pgd_t pgd)
212 unsigned long long ret = pgd.pgd;
214 ret = machine_to_phys(XMADDR(ret)).paddr | 1;
218 pte_t xen_make_pte(unsigned long long pte)
221 pte = phys_to_machine(XPADDR(pte)).maddr;
223 return (pte_t){ pte, pte >> 32 };
226 pmd_t xen_make_pmd(unsigned long long pmd)
229 pmd = phys_to_machine(XPADDR(pmd)).maddr;
231 return (pmd_t){ pmd };
234 pgd_t xen_make_pgd(unsigned long long pgd)
236 if (pgd & _PAGE_PRESENT)
237 pgd = phys_to_machine(XPADDR(pgd)).maddr;
239 return (pgd_t){ pgd };
242 unsigned long xen_pte_val(pte_t pte)
244 unsigned long ret = pte.pte_low;
246 if (ret & _PAGE_PRESENT)
247 ret = machine_to_phys(XMADDR(ret)).paddr;
252 unsigned long xen_pmd_val(pmd_t pmd)
254 /* a BUG here is a lot easier to track down than a NULL eip */
259 unsigned long xen_pgd_val(pgd_t pgd)
261 unsigned long ret = pgd.pgd;
263 ret = machine_to_phys(XMADDR(ret)).paddr | 1;
267 pte_t xen_make_pte(unsigned long pte)
269 if (pte & _PAGE_PRESENT)
270 pte = phys_to_machine(XPADDR(pte)).maddr;
272 return (pte_t){ pte };
275 pmd_t xen_make_pmd(unsigned long pmd)
277 /* a BUG here is a lot easier to track down than a NULL eip */
282 pgd_t xen_make_pgd(unsigned long pgd)
284 if (pgd & _PAGE_PRESENT)
285 pgd = phys_to_machine(XPADDR(pgd)).maddr;
287 return (pgd_t){ pgd };
289 #endif /* CONFIG_X86_PAE */
293 static void pgd_walk_set_prot(void *pt, pgprot_t flags)
295 unsigned long pfn = PFN_DOWN(__pa(pt));
297 if (HYPERVISOR_update_va_mapping((unsigned long)pt,
298 pfn_pte(pfn, flags), 0) < 0)
302 static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
304 pgd_t *pgd = pgd_base;
310 if (xen_feature(XENFEAT_auto_translated_physmap))
313 for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
316 pud = pud_offset(pgd, 0);
318 if (PTRS_PER_PUD > 1) /* not folded */
319 pgd_walk_set_prot(pud, flags);
321 for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
324 pmd = pmd_offset(pud, 0);
326 if (PTRS_PER_PMD > 1) /* not folded */
327 pgd_walk_set_prot(pmd, flags);
329 for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
333 /* This can get called before mem_map
334 is set up, so we assume nothing is
335 highmem at that point. */
336 if (mem_map == NULL ||
337 !PageHighMem(pmd_page(*pmd))) {
338 pte = pte_offset_kernel(pmd, 0);
339 pgd_walk_set_prot(pte, flags);
345 if (HYPERVISOR_update_va_mapping((unsigned long)pgd_base,
346 pfn_pte(PFN_DOWN(__pa(pgd_base)),
353 /* This is called just after a mm has been duplicated from its parent,
354 but it has not been used yet. We need to make sure that its
355 pagetable is all read-only, and can be pinned. */
356 void xen_pgd_pin(pgd_t *pgd)
360 pgd_walk(pgd, PAGE_KERNEL_RO);
362 #if defined(CONFIG_X86_PAE)
363 op.cmd = MMUEXT_PIN_L3_TABLE;
365 op.cmd = MMUEXT_PIN_L2_TABLE;
367 op.arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd)));
368 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
372 /* Release a pagetables pages back as normal RW */
373 void xen_pgd_unpin(pgd_t *pgd)
377 op.cmd = MMUEXT_UNPIN_TABLE;
378 op.arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd)));
380 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
383 pgd_walk(pgd, PAGE_KERNEL);
387 void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
389 xen_pgd_pin(next->pgd);
392 void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
394 xen_pgd_pin(mm->pgd);
397 void xen_exit_mmap(struct mm_struct *mm)
399 struct task_struct *tsk = current;
404 * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
405 * *much* faster this way, as no tlb flushes means bigger wrpt batches.
407 if (tsk->active_mm == mm) {
408 tsk->active_mm = &init_mm;
409 atomic_inc(&init_mm.mm_count);
411 switch_mm(mm, &init_mm, tsk);
413 atomic_dec(&mm->mm_count);
414 BUG_ON(atomic_read(&mm->mm_count) == 0);
419 xen_pgd_unpin(mm->pgd);