4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
41 #include <linux/sched.h>
42 #include <linux/highmem.h>
43 #include <linux/bug.h>
45 #include <asm/pgtable.h>
46 #include <asm/tlbflush.h>
47 #include <asm/mmu_context.h>
48 #include <asm/paravirt.h>
49 #include <asm/linkage.h>
51 #include <asm/xen/hypercall.h>
52 #include <asm/xen/hypervisor.h>
55 #include <xen/interface/xen.h>
57 #include "multicalls.h"
60 #define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
61 #define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
63 /* Placeholder for holes in the address space */
64 static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
65 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
67 /* Array of pointers to pages containing p2m entries */
68 static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
69 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
71 /* Arrays of p2m arrays expressed in mfns used for save/restore */
72 static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
74 static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
77 static inline unsigned p2m_top_index(unsigned long pfn)
79 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
80 return pfn / P2M_ENTRIES_PER_PAGE;
83 static inline unsigned p2m_index(unsigned long pfn)
85 return pfn % P2M_ENTRIES_PER_PAGE;
88 /* Build the parallel p2m_top_mfn structures */
89 void xen_setup_mfn_list_list(void)
93 for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
94 unsigned topidx = p2m_top_index(pfn);
96 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
99 for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
100 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
101 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
104 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
106 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
107 virt_to_mfn(p2m_top_mfn_list);
108 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
111 /* Set up p2m_top to point to the domain-builder provided p2m pages */
112 void __init xen_build_dynamic_phys_to_machine(void)
114 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
115 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
118 for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
119 unsigned topidx = p2m_top_index(pfn);
121 p2m_top[topidx] = &mfn_list[pfn];
125 unsigned long get_phys_to_machine(unsigned long pfn)
127 unsigned topidx, idx;
129 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
130 return INVALID_P2M_ENTRY;
132 topidx = p2m_top_index(pfn);
133 idx = p2m_index(pfn);
134 return p2m_top[topidx][idx];
136 EXPORT_SYMBOL_GPL(get_phys_to_machine);
138 static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
143 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
146 for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
147 p[i] = INVALID_P2M_ENTRY;
149 if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
150 free_page((unsigned long)p);
152 *mfnp = virt_to_mfn(p);
155 void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
157 unsigned topidx, idx;
159 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
160 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
164 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
165 BUG_ON(mfn != INVALID_P2M_ENTRY);
169 topidx = p2m_top_index(pfn);
170 if (p2m_top[topidx] == p2m_missing) {
171 /* no need to allocate a page to store an invalid entry */
172 if (mfn == INVALID_P2M_ENTRY)
174 alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]);
177 idx = p2m_index(pfn);
178 p2m_top[topidx][idx] = mfn;
181 xmaddr_t arbitrary_virt_to_machine(void *vaddr)
183 unsigned long address = (unsigned long)vaddr;
185 pte_t *pte = lookup_address(address, &level);
186 unsigned offset = address & ~PAGE_MASK;
190 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
193 void make_lowmem_page_readonly(void *vaddr)
196 unsigned long address = (unsigned long)vaddr;
199 pte = lookup_address(address, &level);
202 ptev = pte_wrprotect(*pte);
204 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
208 void make_lowmem_page_readwrite(void *vaddr)
211 unsigned long address = (unsigned long)vaddr;
214 pte = lookup_address(address, &level);
217 ptev = pte_mkwrite(*pte);
219 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
224 static bool page_pinned(void *ptr)
226 struct page *page = virt_to_page(ptr);
228 return PagePinned(page);
231 static void extend_mmu_update(const struct mmu_update *update)
233 struct multicall_space mcs;
234 struct mmu_update *u;
236 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
241 mcs = __xen_mc_entry(sizeof(*u));
242 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
249 void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
257 /* ptr may be ioremapped for 64-bit pagetable setup */
258 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
259 u.val = pmd_val_ma(val);
260 extend_mmu_update(&u);
262 xen_mc_issue(PARAVIRT_LAZY_MMU);
267 void xen_set_pmd(pmd_t *ptr, pmd_t val)
269 /* If page is not pinned, we can just update the entry
271 if (!page_pinned(ptr)) {
276 xen_set_pmd_hyper(ptr, val);
280 * Associate a virtual page frame with a given physical page frame
281 * and protection flags for that frame.
283 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
285 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
288 void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
289 pte_t *ptep, pte_t pteval)
291 /* updates to init_mm may be done without lock */
295 if (mm == current->mm || mm == &init_mm) {
296 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
297 struct multicall_space mcs;
298 mcs = xen_mc_entry(0);
300 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
301 xen_mc_issue(PARAVIRT_LAZY_MMU);
304 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
307 xen_set_pte(ptep, pteval);
314 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
316 /* Just return the pte as-is. We preserve the bits on commit */
320 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
321 pte_t *ptep, pte_t pte)
327 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
328 u.val = pte_val_ma(pte);
329 extend_mmu_update(&u);
331 xen_mc_issue(PARAVIRT_LAZY_MMU);
334 /* Assume pteval_t is equivalent to all the other *val_t types. */
335 static pteval_t pte_mfn_to_pfn(pteval_t val)
337 if (val & _PAGE_PRESENT) {
338 unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT;
339 pteval_t flags = val & ~PTE_MASK;
340 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
346 static pteval_t pte_pfn_to_mfn(pteval_t val)
348 if (val & _PAGE_PRESENT) {
349 unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT;
350 pteval_t flags = val & ~PTE_MASK;
351 val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
357 pteval_t xen_pte_val(pte_t pte)
359 return pte_mfn_to_pfn(pte.pte);
362 pgdval_t xen_pgd_val(pgd_t pgd)
364 return pte_mfn_to_pfn(pgd.pgd);
367 pte_t xen_make_pte(pteval_t pte)
369 pte = pte_pfn_to_mfn(pte);
370 return native_make_pte(pte);
373 pgd_t xen_make_pgd(pgdval_t pgd)
375 pgd = pte_pfn_to_mfn(pgd);
376 return native_make_pgd(pgd);
379 pmdval_t xen_pmd_val(pmd_t pmd)
381 return pte_mfn_to_pfn(pmd.pmd);
384 void xen_set_pud_hyper(pud_t *ptr, pud_t val)
392 /* ptr may be ioremapped for 64-bit pagetable setup */
393 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
394 u.val = pud_val_ma(val);
395 extend_mmu_update(&u);
397 xen_mc_issue(PARAVIRT_LAZY_MMU);
402 void xen_set_pud(pud_t *ptr, pud_t val)
404 /* If page is not pinned, we can just update the entry
406 if (!page_pinned(ptr)) {
411 xen_set_pud_hyper(ptr, val);
414 void xen_set_pte(pte_t *ptep, pte_t pte)
416 #ifdef CONFIG_X86_PAE
417 ptep->pte_high = pte.pte_high;
419 ptep->pte_low = pte.pte_low;
425 #ifdef CONFIG_X86_PAE
426 void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
428 set_64bit((u64 *)ptep, native_pte_val(pte));
431 void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
434 smp_wmb(); /* make sure low gets written first */
438 void xen_pmd_clear(pmd_t *pmdp)
440 set_pmd(pmdp, __pmd(0));
442 #endif /* CONFIG_X86_PAE */
444 pmd_t xen_make_pmd(pmdval_t pmd)
446 pmd = pte_pfn_to_mfn(pmd);
447 return native_make_pmd(pmd);
450 #if PAGETABLE_LEVELS == 4
451 pudval_t xen_pud_val(pud_t pud)
453 return pte_mfn_to_pfn(pud.pud);
456 pud_t xen_make_pud(pudval_t pud)
458 pud = pte_pfn_to_mfn(pud);
460 return native_make_pud(pud);
463 void xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
471 u.ptr = virt_to_machine(ptr).maddr;
472 u.val = pgd_val_ma(val);
473 extend_mmu_update(&u);
475 xen_mc_issue(PARAVIRT_LAZY_MMU);
480 void xen_set_pgd(pgd_t *ptr, pgd_t val)
482 /* If page is not pinned, we can just update the entry
484 if (!page_pinned(ptr)) {
489 xen_set_pgd_hyper(ptr, val);
491 #endif /* PAGETABLE_LEVELS == 4 */
494 (Yet another) pagetable walker. This one is intended for pinning a
495 pagetable. This means that it walks a pagetable and calls the
496 callback function on each page it finds making up the page table,
497 at every level. It walks the entire pagetable, but it only bothers
498 pinning pte pages which are below pte_limit. In the normal case
499 this will be TASK_SIZE, but at boot we need to pin up to
500 FIXADDR_TOP. But the important bit is that we don't pin beyond
501 there, because then we start getting into Xen's ptes.
503 static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, enum pt_level),
506 pgd_t *pgd = pgd_base;
508 unsigned long addr = 0;
509 unsigned long pgd_next;
511 BUG_ON(limit > FIXADDR_TOP);
513 if (xen_feature(XENFEAT_auto_translated_physmap))
516 for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) {
518 unsigned long pud_limit, pud_next;
520 pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP);
525 pud = pud_offset(pgd, 0);
527 if (PTRS_PER_PUD > 1) /* not folded */
528 flush |= (*func)(virt_to_page(pud), PT_PUD);
530 for (; addr != pud_limit; pud++, addr = pud_next) {
532 unsigned long pmd_limit;
534 pud_next = pud_addr_end(addr, pud_limit);
536 if (pud_next < limit)
537 pmd_limit = pud_next;
544 pmd = pmd_offset(pud, 0);
546 if (PTRS_PER_PMD > 1) /* not folded */
547 flush |= (*func)(virt_to_page(pmd), PT_PMD);
549 for (; addr != pmd_limit; pmd++) {
550 addr += (PAGE_SIZE * PTRS_PER_PTE);
551 if ((pmd_limit-1) < (addr-1)) {
559 flush |= (*func)(pmd_page(*pmd), PT_PTE);
564 flush |= (*func)(virt_to_page(pgd_base), PT_PGD);
569 static spinlock_t *lock_pte(struct page *page)
571 spinlock_t *ptl = NULL;
573 #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
574 ptl = __pte_lockptr(page);
581 static void do_unlock(void *v)
587 static void xen_do_pin(unsigned level, unsigned long pfn)
589 struct mmuext_op *op;
590 struct multicall_space mcs;
592 mcs = __xen_mc_entry(sizeof(*op));
595 op->arg1.mfn = pfn_to_mfn(pfn);
596 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
599 static int pin_page(struct page *page, enum pt_level level)
601 unsigned pgfl = TestSetPagePinned(page);
605 flush = 0; /* already pinned */
606 else if (PageHighMem(page))
607 /* kmaps need flushing if we found an unpinned
611 void *pt = lowmem_page_address(page);
612 unsigned long pfn = page_to_pfn(page);
613 struct multicall_space mcs = __xen_mc_entry(0);
620 ptl = lock_pte(page);
622 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
623 pfn_pte(pfn, PAGE_KERNEL_RO),
624 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
627 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
630 /* Queue a deferred unlock for when this batch
632 xen_mc_callback(do_unlock, ptl);
639 /* This is called just after a mm has been created, but it has not
640 been used yet. We need to make sure that its pagetable is all
641 read-only, and can be pinned. */
642 void xen_pgd_pin(pgd_t *pgd)
646 if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
647 /* re-enable interrupts for kmap_flush_unused */
653 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
658 * On save, we need to pin all pagetables to make sure they get their
659 * mfns turned into pfns. Search the list for any unpinned pgds and pin
660 * them (unpinned pgds are not currently in use, probably because the
661 * process is under construction or destruction).
663 void xen_mm_pin_all(void)
668 spin_lock_irqsave(&pgd_lock, flags);
670 list_for_each_entry(page, &pgd_list, lru) {
671 if (!PagePinned(page)) {
672 xen_pgd_pin((pgd_t *)page_address(page));
673 SetPageSavePinned(page);
677 spin_unlock_irqrestore(&pgd_lock, flags);
681 * The init_mm pagetable is really pinned as soon as its created, but
682 * that's before we have page structures to store the bits. So do all
683 * the book-keeping now.
685 static __init int mark_pinned(struct page *page, enum pt_level level)
691 void __init xen_mark_init_mm_pinned(void)
693 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
696 static int unpin_page(struct page *page, enum pt_level level)
698 unsigned pgfl = TestClearPagePinned(page);
700 if (pgfl && !PageHighMem(page)) {
701 void *pt = lowmem_page_address(page);
702 unsigned long pfn = page_to_pfn(page);
703 spinlock_t *ptl = NULL;
704 struct multicall_space mcs;
706 if (level == PT_PTE) {
707 ptl = lock_pte(page);
709 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
712 mcs = __xen_mc_entry(0);
714 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
715 pfn_pte(pfn, PAGE_KERNEL),
716 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
719 /* unlock when batch completed */
720 xen_mc_callback(do_unlock, ptl);
724 return 0; /* never need to flush on unpin */
727 /* Release a pagetables pages back as normal RW */
728 static void xen_pgd_unpin(pgd_t *pgd)
732 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
734 pgd_walk(pgd, unpin_page, TASK_SIZE);
740 * On resume, undo any pinning done at save, so that the rest of the
741 * kernel doesn't see any unexpected pinned pagetables.
743 void xen_mm_unpin_all(void)
748 spin_lock_irqsave(&pgd_lock, flags);
750 list_for_each_entry(page, &pgd_list, lru) {
751 if (PageSavePinned(page)) {
752 BUG_ON(!PagePinned(page));
753 printk("unpinning pinned %p\n", page_address(page));
754 xen_pgd_unpin((pgd_t *)page_address(page));
755 ClearPageSavePinned(page);
759 spin_unlock_irqrestore(&pgd_lock, flags);
762 void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
764 spin_lock(&next->page_table_lock);
765 xen_pgd_pin(next->pgd);
766 spin_unlock(&next->page_table_lock);
769 void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
771 spin_lock(&mm->page_table_lock);
772 xen_pgd_pin(mm->pgd);
773 spin_unlock(&mm->page_table_lock);
778 /* Another cpu may still have their %cr3 pointing at the pagetable, so
779 we need to repoint it somewhere else before we can unpin it. */
780 static void drop_other_mm_ref(void *info)
782 struct mm_struct *mm = info;
783 struct mm_struct *active_mm;
786 active_mm = read_pda(active_mm);
788 active_mm = __get_cpu_var(cpu_tlbstate).active_mm;
792 leave_mm(smp_processor_id());
794 /* If this cpu still has a stale cr3 reference, then make sure
795 it has been flushed. */
796 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
797 load_cr3(swapper_pg_dir);
798 arch_flush_lazy_cpu_mode();
802 static void drop_mm_ref(struct mm_struct *mm)
807 if (current->active_mm == mm) {
808 if (current->mm == mm)
809 load_cr3(swapper_pg_dir);
811 leave_mm(smp_processor_id());
812 arch_flush_lazy_cpu_mode();
815 /* Get the "official" set of cpus referring to our pagetable. */
816 mask = mm->cpu_vm_mask;
818 /* It's possible that a vcpu may have a stale reference to our
819 cr3, because its in lazy mode, and it hasn't yet flushed
820 its set of pending hypercalls yet. In this case, we can
821 look at its actual current cr3 value, and force it to flush
823 for_each_online_cpu(cpu) {
824 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
828 if (!cpus_empty(mask))
829 smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
832 static void drop_mm_ref(struct mm_struct *mm)
834 if (current->active_mm == mm)
835 load_cr3(swapper_pg_dir);
840 * While a process runs, Xen pins its pagetables, which means that the
841 * hypervisor forces it to be read-only, and it controls all updates
842 * to it. This means that all pagetable updates have to go via the
843 * hypervisor, which is moderately expensive.
845 * Since we're pulling the pagetable down, we switch to use init_mm,
846 * unpin old process pagetable and mark it all read-write, which
847 * allows further operations on it to be simple memory accesses.
849 * The only subtle point is that another CPU may be still using the
850 * pagetable because of lazy tlb flushing. This means we need need to
851 * switch all CPUs off this pagetable before we can unpin it.
853 void xen_exit_mmap(struct mm_struct *mm)
855 get_cpu(); /* make sure we don't move around */
859 spin_lock(&mm->page_table_lock);
861 /* pgd may not be pinned in the error exit path of execve */
862 if (page_pinned(mm->pgd))
863 xen_pgd_unpin(mm->pgd);
865 spin_unlock(&mm->page_table_lock);