x86: change flush_tlb_others to take a const struct cpumask
[safe/jmp/linux-2.6] / arch / x86 / include / asm / paravirt.h
1 #ifndef _ASM_X86_PARAVIRT_H
2 #define _ASM_X86_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/page.h>
8 #include <asm/asm.h>
9
10 /* Bitmask of what can be clobbered: usually at least eax. */
11 #define CLBR_NONE 0
12 #define CLBR_EAX  (1 << 0)
13 #define CLBR_ECX  (1 << 1)
14 #define CLBR_EDX  (1 << 2)
15
16 #ifdef CONFIG_X86_64
17 #define CLBR_RSI  (1 << 3)
18 #define CLBR_RDI  (1 << 4)
19 #define CLBR_R8   (1 << 5)
20 #define CLBR_R9   (1 << 6)
21 #define CLBR_R10  (1 << 7)
22 #define CLBR_R11  (1 << 8)
23 #define CLBR_ANY  ((1 << 9) - 1)
24 #include <asm/desc_defs.h>
25 #else
26 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
27 #define CLBR_ANY  ((1 << 3) - 1)
28 #endif /* X86_64 */
29
30 #ifndef __ASSEMBLY__
31 #include <linux/types.h>
32 #include <linux/cpumask.h>
33 #include <asm/kmap_types.h>
34 #include <asm/desc_defs.h>
35
36 struct page;
37 struct thread_struct;
38 struct desc_ptr;
39 struct tss_struct;
40 struct mm_struct;
41 struct desc_struct;
42
43 /* general info */
44 struct pv_info {
45         unsigned int kernel_rpl;
46         int shared_kernel_pmd;
47         int paravirt_enabled;
48         const char *name;
49 };
50
51 struct pv_init_ops {
52         /*
53          * Patch may replace one of the defined code sequences with
54          * arbitrary code, subject to the same register constraints.
55          * This generally means the code is not free to clobber any
56          * registers other than EAX.  The patch function should return
57          * the number of bytes of code generated, as we nop pad the
58          * rest in generic code.
59          */
60         unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
61                           unsigned long addr, unsigned len);
62
63         /* Basic arch-specific setup */
64         void (*arch_setup)(void);
65         char *(*memory_setup)(void);
66         void (*post_allocator_init)(void);
67
68         /* Print a banner to identify the environment */
69         void (*banner)(void);
70 };
71
72
73 struct pv_lazy_ops {
74         /* Set deferred update mode, used for batching operations. */
75         void (*enter)(void);
76         void (*leave)(void);
77 };
78
79 struct pv_time_ops {
80         void (*time_init)(void);
81
82         /* Set and set time of day */
83         unsigned long (*get_wallclock)(void);
84         int (*set_wallclock)(unsigned long);
85
86         unsigned long long (*sched_clock)(void);
87         unsigned long (*get_tsc_khz)(void);
88 };
89
90 struct pv_cpu_ops {
91         /* hooks for various privileged instructions */
92         unsigned long (*get_debugreg)(int regno);
93         void (*set_debugreg)(int regno, unsigned long value);
94
95         void (*clts)(void);
96
97         unsigned long (*read_cr0)(void);
98         void (*write_cr0)(unsigned long);
99
100         unsigned long (*read_cr4_safe)(void);
101         unsigned long (*read_cr4)(void);
102         void (*write_cr4)(unsigned long);
103
104 #ifdef CONFIG_X86_64
105         unsigned long (*read_cr8)(void);
106         void (*write_cr8)(unsigned long);
107 #endif
108
109         /* Segment descriptor handling */
110         void (*load_tr_desc)(void);
111         void (*load_gdt)(const struct desc_ptr *);
112         void (*load_idt)(const struct desc_ptr *);
113         void (*store_gdt)(struct desc_ptr *);
114         void (*store_idt)(struct desc_ptr *);
115         void (*set_ldt)(const void *desc, unsigned entries);
116         unsigned long (*store_tr)(void);
117         void (*load_tls)(struct thread_struct *t, unsigned int cpu);
118 #ifdef CONFIG_X86_64
119         void (*load_gs_index)(unsigned int idx);
120 #endif
121         void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
122                                 const void *desc);
123         void (*write_gdt_entry)(struct desc_struct *,
124                                 int entrynum, const void *desc, int size);
125         void (*write_idt_entry)(gate_desc *,
126                                 int entrynum, const gate_desc *gate);
127         void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
128         void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
129
130         void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
131
132         void (*set_iopl_mask)(unsigned mask);
133
134         void (*wbinvd)(void);
135         void (*io_delay)(void);
136
137         /* cpuid emulation, mostly so that caps bits can be disabled */
138         void (*cpuid)(unsigned int *eax, unsigned int *ebx,
139                       unsigned int *ecx, unsigned int *edx);
140
141         /* MSR, PMC and TSR operations.
142            err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
143         u64 (*read_msr_amd)(unsigned int msr, int *err);
144         u64 (*read_msr)(unsigned int msr, int *err);
145         int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
146
147         u64 (*read_tsc)(void);
148         u64 (*read_pmc)(int counter);
149         unsigned long long (*read_tscp)(unsigned int *aux);
150
151         /*
152          * Atomically enable interrupts and return to userspace.  This
153          * is only ever used to return to 32-bit processes; in a
154          * 64-bit kernel, it's used for 32-on-64 compat processes, but
155          * never native 64-bit processes.  (Jump, not call.)
156          */
157         void (*irq_enable_sysexit)(void);
158
159         /*
160          * Switch to usermode gs and return to 64-bit usermode using
161          * sysret.  Only used in 64-bit kernels to return to 64-bit
162          * processes.  Usermode register state, including %rsp, must
163          * already be restored.
164          */
165         void (*usergs_sysret64)(void);
166
167         /*
168          * Switch to usermode gs and return to 32-bit usermode using
169          * sysret.  Used to return to 32-on-64 compat processes.
170          * Other usermode register state, including %esp, must already
171          * be restored.
172          */
173         void (*usergs_sysret32)(void);
174
175         /* Normal iret.  Jump to this with the standard iret stack
176            frame set up. */
177         void (*iret)(void);
178
179         void (*swapgs)(void);
180
181         struct pv_lazy_ops lazy_mode;
182 };
183
184 struct pv_irq_ops {
185         void (*init_IRQ)(void);
186
187         /*
188          * Get/set interrupt state.  save_fl and restore_fl are only
189          * expected to use X86_EFLAGS_IF; all other bits
190          * returned from save_fl are undefined, and may be ignored by
191          * restore_fl.
192          */
193         unsigned long (*save_fl)(void);
194         void (*restore_fl)(unsigned long);
195         void (*irq_disable)(void);
196         void (*irq_enable)(void);
197         void (*safe_halt)(void);
198         void (*halt)(void);
199
200 #ifdef CONFIG_X86_64
201         void (*adjust_exception_frame)(void);
202 #endif
203 };
204
205 struct pv_apic_ops {
206 #ifdef CONFIG_X86_LOCAL_APIC
207         void (*setup_boot_clock)(void);
208         void (*setup_secondary_clock)(void);
209
210         void (*startup_ipi_hook)(int phys_apicid,
211                                  unsigned long start_eip,
212                                  unsigned long start_esp);
213 #endif
214 };
215
216 struct pv_mmu_ops {
217         /*
218          * Called before/after init_mm pagetable setup. setup_start
219          * may reset %cr3, and may pre-install parts of the pagetable;
220          * pagetable setup is expected to preserve any existing
221          * mapping.
222          */
223         void (*pagetable_setup_start)(pgd_t *pgd_base);
224         void (*pagetable_setup_done)(pgd_t *pgd_base);
225
226         unsigned long (*read_cr2)(void);
227         void (*write_cr2)(unsigned long);
228
229         unsigned long (*read_cr3)(void);
230         void (*write_cr3)(unsigned long);
231
232         /*
233          * Hooks for intercepting the creation/use/destruction of an
234          * mm_struct.
235          */
236         void (*activate_mm)(struct mm_struct *prev,
237                             struct mm_struct *next);
238         void (*dup_mmap)(struct mm_struct *oldmm,
239                          struct mm_struct *mm);
240         void (*exit_mmap)(struct mm_struct *mm);
241
242
243         /* TLB operations */
244         void (*flush_tlb_user)(void);
245         void (*flush_tlb_kernel)(void);
246         void (*flush_tlb_single)(unsigned long addr);
247         void (*flush_tlb_others)(const struct cpumask *cpus,
248                                  struct mm_struct *mm,
249                                  unsigned long va);
250
251         /* Hooks for allocating and freeing a pagetable top-level */
252         int  (*pgd_alloc)(struct mm_struct *mm);
253         void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
254
255         /*
256          * Hooks for allocating/releasing pagetable pages when they're
257          * attached to a pagetable
258          */
259         void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
260         void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
261         void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
262         void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
263         void (*release_pte)(unsigned long pfn);
264         void (*release_pmd)(unsigned long pfn);
265         void (*release_pud)(unsigned long pfn);
266
267         /* Pagetable manipulation functions */
268         void (*set_pte)(pte_t *ptep, pte_t pteval);
269         void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
270                            pte_t *ptep, pte_t pteval);
271         void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
272         void (*pte_update)(struct mm_struct *mm, unsigned long addr,
273                            pte_t *ptep);
274         void (*pte_update_defer)(struct mm_struct *mm,
275                                  unsigned long addr, pte_t *ptep);
276
277         pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
278                                         pte_t *ptep);
279         void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
280                                         pte_t *ptep, pte_t pte);
281
282         pteval_t (*pte_val)(pte_t);
283         pteval_t (*pte_flags)(pte_t);
284         pte_t (*make_pte)(pteval_t pte);
285
286         pgdval_t (*pgd_val)(pgd_t);
287         pgd_t (*make_pgd)(pgdval_t pgd);
288
289 #if PAGETABLE_LEVELS >= 3
290 #ifdef CONFIG_X86_PAE
291         void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
292         void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
293                                 pte_t *ptep, pte_t pte);
294         void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
295                           pte_t *ptep);
296         void (*pmd_clear)(pmd_t *pmdp);
297
298 #endif  /* CONFIG_X86_PAE */
299
300         void (*set_pud)(pud_t *pudp, pud_t pudval);
301
302         pmdval_t (*pmd_val)(pmd_t);
303         pmd_t (*make_pmd)(pmdval_t pmd);
304
305 #if PAGETABLE_LEVELS == 4
306         pudval_t (*pud_val)(pud_t);
307         pud_t (*make_pud)(pudval_t pud);
308
309         void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
310 #endif  /* PAGETABLE_LEVELS == 4 */
311 #endif  /* PAGETABLE_LEVELS >= 3 */
312
313 #ifdef CONFIG_HIGHPTE
314         void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
315 #endif
316
317         struct pv_lazy_ops lazy_mode;
318
319         /* dom0 ops */
320
321         /* Sometimes the physical address is a pfn, and sometimes its
322            an mfn.  We can tell which is which from the index. */
323         void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
324                            unsigned long phys, pgprot_t flags);
325 };
326
327 struct raw_spinlock;
328 struct pv_lock_ops {
329         int (*spin_is_locked)(struct raw_spinlock *lock);
330         int (*spin_is_contended)(struct raw_spinlock *lock);
331         void (*spin_lock)(struct raw_spinlock *lock);
332         void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
333         int (*spin_trylock)(struct raw_spinlock *lock);
334         void (*spin_unlock)(struct raw_spinlock *lock);
335 };
336
337 /* This contains all the paravirt structures: we get a convenient
338  * number for each function using the offset which we use to indicate
339  * what to patch. */
340 struct paravirt_patch_template {
341         struct pv_init_ops pv_init_ops;
342         struct pv_time_ops pv_time_ops;
343         struct pv_cpu_ops pv_cpu_ops;
344         struct pv_irq_ops pv_irq_ops;
345         struct pv_apic_ops pv_apic_ops;
346         struct pv_mmu_ops pv_mmu_ops;
347         struct pv_lock_ops pv_lock_ops;
348 };
349
350 extern struct pv_info pv_info;
351 extern struct pv_init_ops pv_init_ops;
352 extern struct pv_time_ops pv_time_ops;
353 extern struct pv_cpu_ops pv_cpu_ops;
354 extern struct pv_irq_ops pv_irq_ops;
355 extern struct pv_apic_ops pv_apic_ops;
356 extern struct pv_mmu_ops pv_mmu_ops;
357 extern struct pv_lock_ops pv_lock_ops;
358
359 #define PARAVIRT_PATCH(x)                                       \
360         (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
361
362 #define paravirt_type(op)                               \
363         [paravirt_typenum] "i" (PARAVIRT_PATCH(op)),    \
364         [paravirt_opptr] "m" (op)
365 #define paravirt_clobber(clobber)               \
366         [paravirt_clobber] "i" (clobber)
367
368 /*
369  * Generate some code, and mark it as patchable by the
370  * apply_paravirt() alternate instruction patcher.
371  */
372 #define _paravirt_alt(insn_string, type, clobber)       \
373         "771:\n\t" insn_string "\n" "772:\n"            \
374         ".pushsection .parainstructions,\"a\"\n"        \
375         _ASM_ALIGN "\n"                                 \
376         _ASM_PTR " 771b\n"                              \
377         "  .byte " type "\n"                            \
378         "  .byte 772b-771b\n"                           \
379         "  .short " clobber "\n"                        \
380         ".popsection\n"
381
382 /* Generate patchable code, with the default asm parameters. */
383 #define paravirt_alt(insn_string)                                       \
384         _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
385
386 /* Simple instruction patching code. */
387 #define DEF_NATIVE(ops, name, code)                                     \
388         extern const char start_##ops##_##name[], end_##ops##_##name[]; \
389         asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
390
391 unsigned paravirt_patch_nop(void);
392 unsigned paravirt_patch_ignore(unsigned len);
393 unsigned paravirt_patch_call(void *insnbuf,
394                              const void *target, u16 tgt_clobbers,
395                              unsigned long addr, u16 site_clobbers,
396                              unsigned len);
397 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
398                             unsigned long addr, unsigned len);
399 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
400                                 unsigned long addr, unsigned len);
401
402 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
403                               const char *start, const char *end);
404
405 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
406                       unsigned long addr, unsigned len);
407
408 int paravirt_disable_iospace(void);
409
410 /*
411  * This generates an indirect call based on the operation type number.
412  * The type number, computed in PARAVIRT_PATCH, is derived from the
413  * offset into the paravirt_patch_template structure, and can therefore be
414  * freely converted back into a structure offset.
415  */
416 #define PARAVIRT_CALL   "call *%[paravirt_opptr];"
417
418 /*
419  * These macros are intended to wrap calls through one of the paravirt
420  * ops structs, so that they can be later identified and patched at
421  * runtime.
422  *
423  * Normally, a call to a pv_op function is a simple indirect call:
424  * (pv_op_struct.operations)(args...).
425  *
426  * Unfortunately, this is a relatively slow operation for modern CPUs,
427  * because it cannot necessarily determine what the destination
428  * address is.  In this case, the address is a runtime constant, so at
429  * the very least we can patch the call to e a simple direct call, or
430  * ideally, patch an inline implementation into the callsite.  (Direct
431  * calls are essentially free, because the call and return addresses
432  * are completely predictable.)
433  *
434  * For i386, these macros rely on the standard gcc "regparm(3)" calling
435  * convention, in which the first three arguments are placed in %eax,
436  * %edx, %ecx (in that order), and the remaining arguments are placed
437  * on the stack.  All caller-save registers (eax,edx,ecx) are expected
438  * to be modified (either clobbered or used for return values).
439  * X86_64, on the other hand, already specifies a register-based calling
440  * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
441  * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
442  * special handling for dealing with 4 arguments, unlike i386.
443  * However, x86_64 also have to clobber all caller saved registers, which
444  * unfortunately, are quite a bit (r8 - r11)
445  *
446  * The call instruction itself is marked by placing its start address
447  * and size into the .parainstructions section, so that
448  * apply_paravirt() in arch/i386/kernel/alternative.c can do the
449  * appropriate patching under the control of the backend pv_init_ops
450  * implementation.
451  *
452  * Unfortunately there's no way to get gcc to generate the args setup
453  * for the call, and then allow the call itself to be generated by an
454  * inline asm.  Because of this, we must do the complete arg setup and
455  * return value handling from within these macros.  This is fairly
456  * cumbersome.
457  *
458  * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
459  * It could be extended to more arguments, but there would be little
460  * to be gained from that.  For each number of arguments, there are
461  * the two VCALL and CALL variants for void and non-void functions.
462  *
463  * When there is a return value, the invoker of the macro must specify
464  * the return type.  The macro then uses sizeof() on that type to
465  * determine whether its a 32 or 64 bit value, and places the return
466  * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
467  * 64-bit). For x86_64 machines, it just returns at %rax regardless of
468  * the return value size.
469  *
470  * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
471  * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
472  * in low,high order
473  *
474  * Small structures are passed and returned in registers.  The macro
475  * calling convention can't directly deal with this, so the wrapper
476  * functions must do this.
477  *
478  * These PVOP_* macros are only defined within this header.  This
479  * means that all uses must be wrapped in inline functions.  This also
480  * makes sure the incoming and outgoing types are always correct.
481  */
482 #ifdef CONFIG_X86_32
483 #define PVOP_VCALL_ARGS                 unsigned long __eax, __edx, __ecx
484 #define PVOP_CALL_ARGS                  PVOP_VCALL_ARGS
485 #define PVOP_VCALL_CLOBBERS             "=a" (__eax), "=d" (__edx),     \
486                                         "=c" (__ecx)
487 #define PVOP_CALL_CLOBBERS              PVOP_VCALL_CLOBBERS
488 #define EXTRA_CLOBBERS
489 #define VEXTRA_CLOBBERS
490 #else
491 #define PVOP_VCALL_ARGS         unsigned long __edi, __esi, __edx, __ecx
492 #define PVOP_CALL_ARGS          PVOP_VCALL_ARGS, __eax
493 #define PVOP_VCALL_CLOBBERS     "=D" (__edi),                           \
494                                 "=S" (__esi), "=d" (__edx),             \
495                                 "=c" (__ecx)
496
497 #define PVOP_CALL_CLOBBERS      PVOP_VCALL_CLOBBERS, "=a" (__eax)
498
499 #define EXTRA_CLOBBERS   , "r8", "r9", "r10", "r11"
500 #define VEXTRA_CLOBBERS  , "rax", "r8", "r9", "r10", "r11"
501 #endif
502
503 #ifdef CONFIG_PARAVIRT_DEBUG
504 #define PVOP_TEST_NULL(op)      BUG_ON(op == NULL)
505 #else
506 #define PVOP_TEST_NULL(op)      ((void)op)
507 #endif
508
509 #define __PVOP_CALL(rettype, op, pre, post, ...)                        \
510         ({                                                              \
511                 rettype __ret;                                          \
512                 PVOP_CALL_ARGS;                                 \
513                 PVOP_TEST_NULL(op);                                     \
514                 /* This is 32-bit specific, but is okay in 64-bit */    \
515                 /* since this condition will never hold */              \
516                 if (sizeof(rettype) > sizeof(unsigned long)) {          \
517                         asm volatile(pre                                \
518                                      paravirt_alt(PARAVIRT_CALL)        \
519                                      post                               \
520                                      : PVOP_CALL_CLOBBERS               \
521                                      : paravirt_type(op),               \
522                                        paravirt_clobber(CLBR_ANY),      \
523                                        ##__VA_ARGS__                    \
524                                      : "memory", "cc" EXTRA_CLOBBERS);  \
525                         __ret = (rettype)((((u64)__edx) << 32) | __eax); \
526                 } else {                                                \
527                         asm volatile(pre                                \
528                                      paravirt_alt(PARAVIRT_CALL)        \
529                                      post                               \
530                                      : PVOP_CALL_CLOBBERS               \
531                                      : paravirt_type(op),               \
532                                        paravirt_clobber(CLBR_ANY),      \
533                                        ##__VA_ARGS__                    \
534                                      : "memory", "cc" EXTRA_CLOBBERS);  \
535                         __ret = (rettype)__eax;                         \
536                 }                                                       \
537                 __ret;                                                  \
538         })
539 #define __PVOP_VCALL(op, pre, post, ...)                                \
540         ({                                                              \
541                 PVOP_VCALL_ARGS;                                        \
542                 PVOP_TEST_NULL(op);                                     \
543                 asm volatile(pre                                        \
544                              paravirt_alt(PARAVIRT_CALL)                \
545                              post                                       \
546                              : PVOP_VCALL_CLOBBERS                      \
547                              : paravirt_type(op),                       \
548                                paravirt_clobber(CLBR_ANY),              \
549                                ##__VA_ARGS__                            \
550                              : "memory", "cc" VEXTRA_CLOBBERS);         \
551         })
552
553 #define PVOP_CALL0(rettype, op)                                         \
554         __PVOP_CALL(rettype, op, "", "")
555 #define PVOP_VCALL0(op)                                                 \
556         __PVOP_VCALL(op, "", "")
557
558 #define PVOP_CALL1(rettype, op, arg1)                                   \
559         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
560 #define PVOP_VCALL1(op, arg1)                                           \
561         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
562
563 #define PVOP_CALL2(rettype, op, arg1, arg2)                             \
564         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
565         "1" ((unsigned long)(arg2)))
566 #define PVOP_VCALL2(op, arg1, arg2)                                     \
567         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
568         "1" ((unsigned long)(arg2)))
569
570 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)                       \
571         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
572         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
573 #define PVOP_VCALL3(op, arg1, arg2, arg3)                               \
574         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
575         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
576
577 /* This is the only difference in x86_64. We can make it much simpler */
578 #ifdef CONFIG_X86_32
579 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
580         __PVOP_CALL(rettype, op,                                        \
581                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
582                     "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
583                     "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
584 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
585         __PVOP_VCALL(op,                                                \
586                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
587                     "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
588                     "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
589 #else
590 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
591         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
592         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
593         "3"((unsigned long)(arg4)))
594 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
595         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
596         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
597         "3"((unsigned long)(arg4)))
598 #endif
599
600 static inline int paravirt_enabled(void)
601 {
602         return pv_info.paravirt_enabled;
603 }
604
605 static inline void load_sp0(struct tss_struct *tss,
606                              struct thread_struct *thread)
607 {
608         PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
609 }
610
611 #define ARCH_SETUP                      pv_init_ops.arch_setup();
612 static inline unsigned long get_wallclock(void)
613 {
614         return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
615 }
616
617 static inline int set_wallclock(unsigned long nowtime)
618 {
619         return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
620 }
621
622 static inline void (*choose_time_init(void))(void)
623 {
624         return pv_time_ops.time_init;
625 }
626
627 /* The paravirtualized CPUID instruction. */
628 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
629                            unsigned int *ecx, unsigned int *edx)
630 {
631         PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
632 }
633
634 /*
635  * These special macros can be used to get or set a debugging register
636  */
637 static inline unsigned long paravirt_get_debugreg(int reg)
638 {
639         return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
640 }
641 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
642 static inline void set_debugreg(unsigned long val, int reg)
643 {
644         PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
645 }
646
647 static inline void clts(void)
648 {
649         PVOP_VCALL0(pv_cpu_ops.clts);
650 }
651
652 static inline unsigned long read_cr0(void)
653 {
654         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
655 }
656
657 static inline void write_cr0(unsigned long x)
658 {
659         PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
660 }
661
662 static inline unsigned long read_cr2(void)
663 {
664         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
665 }
666
667 static inline void write_cr2(unsigned long x)
668 {
669         PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
670 }
671
672 static inline unsigned long read_cr3(void)
673 {
674         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
675 }
676
677 static inline void write_cr3(unsigned long x)
678 {
679         PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
680 }
681
682 static inline unsigned long read_cr4(void)
683 {
684         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
685 }
686 static inline unsigned long read_cr4_safe(void)
687 {
688         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
689 }
690
691 static inline void write_cr4(unsigned long x)
692 {
693         PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
694 }
695
696 #ifdef CONFIG_X86_64
697 static inline unsigned long read_cr8(void)
698 {
699         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
700 }
701
702 static inline void write_cr8(unsigned long x)
703 {
704         PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
705 }
706 #endif
707
708 static inline void raw_safe_halt(void)
709 {
710         PVOP_VCALL0(pv_irq_ops.safe_halt);
711 }
712
713 static inline void halt(void)
714 {
715         PVOP_VCALL0(pv_irq_ops.safe_halt);
716 }
717
718 static inline void wbinvd(void)
719 {
720         PVOP_VCALL0(pv_cpu_ops.wbinvd);
721 }
722
723 #define get_kernel_rpl()  (pv_info.kernel_rpl)
724
725 static inline u64 paravirt_read_msr(unsigned msr, int *err)
726 {
727         return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
728 }
729 static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
730 {
731         return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
732 }
733 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
734 {
735         return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
736 }
737
738 /* These should all do BUG_ON(_err), but our headers are too tangled. */
739 #define rdmsr(msr, val1, val2)                  \
740 do {                                            \
741         int _err;                               \
742         u64 _l = paravirt_read_msr(msr, &_err); \
743         val1 = (u32)_l;                         \
744         val2 = _l >> 32;                        \
745 } while (0)
746
747 #define wrmsr(msr, val1, val2)                  \
748 do {                                            \
749         paravirt_write_msr(msr, val1, val2);    \
750 } while (0)
751
752 #define rdmsrl(msr, val)                        \
753 do {                                            \
754         int _err;                               \
755         val = paravirt_read_msr(msr, &_err);    \
756 } while (0)
757
758 #define wrmsrl(msr, val)        wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
759 #define wrmsr_safe(msr, a, b)   paravirt_write_msr(msr, a, b)
760
761 /* rdmsr with exception handling */
762 #define rdmsr_safe(msr, a, b)                   \
763 ({                                              \
764         int _err;                               \
765         u64 _l = paravirt_read_msr(msr, &_err); \
766         (*a) = (u32)_l;                         \
767         (*b) = _l >> 32;                        \
768         _err;                                   \
769 })
770
771 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
772 {
773         int err;
774
775         *p = paravirt_read_msr(msr, &err);
776         return err;
777 }
778 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
779 {
780         int err;
781
782         *p = paravirt_read_msr_amd(msr, &err);
783         return err;
784 }
785
786 static inline u64 paravirt_read_tsc(void)
787 {
788         return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
789 }
790
791 #define rdtscl(low)                             \
792 do {                                            \
793         u64 _l = paravirt_read_tsc();           \
794         low = (int)_l;                          \
795 } while (0)
796
797 #define rdtscll(val) (val = paravirt_read_tsc())
798
799 static inline unsigned long long paravirt_sched_clock(void)
800 {
801         return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
802 }
803 #define calibrate_tsc() (pv_time_ops.get_tsc_khz())
804
805 static inline unsigned long long paravirt_read_pmc(int counter)
806 {
807         return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
808 }
809
810 #define rdpmc(counter, low, high)               \
811 do {                                            \
812         u64 _l = paravirt_read_pmc(counter);    \
813         low = (u32)_l;                          \
814         high = _l >> 32;                        \
815 } while (0)
816
817 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
818 {
819         return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
820 }
821
822 #define rdtscp(low, high, aux)                          \
823 do {                                                    \
824         int __aux;                                      \
825         unsigned long __val = paravirt_rdtscp(&__aux);  \
826         (low) = (u32)__val;                             \
827         (high) = (u32)(__val >> 32);                    \
828         (aux) = __aux;                                  \
829 } while (0)
830
831 #define rdtscpll(val, aux)                              \
832 do {                                                    \
833         unsigned long __aux;                            \
834         val = paravirt_rdtscp(&__aux);                  \
835         (aux) = __aux;                                  \
836 } while (0)
837
838 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
839 {
840         PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
841 }
842
843 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
844 {
845         PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
846 }
847
848 static inline void load_TR_desc(void)
849 {
850         PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
851 }
852 static inline void load_gdt(const struct desc_ptr *dtr)
853 {
854         PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
855 }
856 static inline void load_idt(const struct desc_ptr *dtr)
857 {
858         PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
859 }
860 static inline void set_ldt(const void *addr, unsigned entries)
861 {
862         PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
863 }
864 static inline void store_gdt(struct desc_ptr *dtr)
865 {
866         PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
867 }
868 static inline void store_idt(struct desc_ptr *dtr)
869 {
870         PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
871 }
872 static inline unsigned long paravirt_store_tr(void)
873 {
874         return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
875 }
876 #define store_tr(tr)    ((tr) = paravirt_store_tr())
877 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
878 {
879         PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
880 }
881
882 #ifdef CONFIG_X86_64
883 static inline void load_gs_index(unsigned int gs)
884 {
885         PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
886 }
887 #endif
888
889 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
890                                    const void *desc)
891 {
892         PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
893 }
894
895 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
896                                    void *desc, int type)
897 {
898         PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
899 }
900
901 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
902 {
903         PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
904 }
905 static inline void set_iopl_mask(unsigned mask)
906 {
907         PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
908 }
909
910 /* The paravirtualized I/O functions */
911 static inline void slow_down_io(void)
912 {
913         pv_cpu_ops.io_delay();
914 #ifdef REALLY_SLOW_IO
915         pv_cpu_ops.io_delay();
916         pv_cpu_ops.io_delay();
917         pv_cpu_ops.io_delay();
918 #endif
919 }
920
921 #ifdef CONFIG_X86_LOCAL_APIC
922 static inline void setup_boot_clock(void)
923 {
924         PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
925 }
926
927 static inline void setup_secondary_clock(void)
928 {
929         PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
930 }
931 #endif
932
933 static inline void paravirt_post_allocator_init(void)
934 {
935         if (pv_init_ops.post_allocator_init)
936                 (*pv_init_ops.post_allocator_init)();
937 }
938
939 static inline void paravirt_pagetable_setup_start(pgd_t *base)
940 {
941         (*pv_mmu_ops.pagetable_setup_start)(base);
942 }
943
944 static inline void paravirt_pagetable_setup_done(pgd_t *base)
945 {
946         (*pv_mmu_ops.pagetable_setup_done)(base);
947 }
948
949 #ifdef CONFIG_SMP
950 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
951                                     unsigned long start_esp)
952 {
953         PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
954                     phys_apicid, start_eip, start_esp);
955 }
956 #endif
957
958 static inline void paravirt_activate_mm(struct mm_struct *prev,
959                                         struct mm_struct *next)
960 {
961         PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
962 }
963
964 static inline void arch_dup_mmap(struct mm_struct *oldmm,
965                                  struct mm_struct *mm)
966 {
967         PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
968 }
969
970 static inline void arch_exit_mmap(struct mm_struct *mm)
971 {
972         PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
973 }
974
975 static inline void __flush_tlb(void)
976 {
977         PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
978 }
979 static inline void __flush_tlb_global(void)
980 {
981         PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
982 }
983 static inline void __flush_tlb_single(unsigned long addr)
984 {
985         PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
986 }
987
988 static inline void flush_tlb_others(const struct cpumask *cpumask,
989                                     struct mm_struct *mm,
990                                     unsigned long va)
991 {
992         PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
993 }
994
995 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
996 {
997         return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
998 }
999
1000 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1001 {
1002         PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
1003 }
1004
1005 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1006 {
1007         PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
1008 }
1009 static inline void paravirt_release_pte(unsigned long pfn)
1010 {
1011         PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
1012 }
1013
1014 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1015 {
1016         PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
1017 }
1018
1019 static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
1020                                             unsigned long start, unsigned long count)
1021 {
1022         PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
1023 }
1024 static inline void paravirt_release_pmd(unsigned long pfn)
1025 {
1026         PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
1027 }
1028
1029 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1030 {
1031         PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1032 }
1033 static inline void paravirt_release_pud(unsigned long pfn)
1034 {
1035         PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1036 }
1037
1038 #ifdef CONFIG_HIGHPTE
1039 static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
1040 {
1041         unsigned long ret;
1042         ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
1043         return (void *)ret;
1044 }
1045 #endif
1046
1047 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
1048                               pte_t *ptep)
1049 {
1050         PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
1051 }
1052
1053 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
1054                                     pte_t *ptep)
1055 {
1056         PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
1057 }
1058
1059 static inline pte_t __pte(pteval_t val)
1060 {
1061         pteval_t ret;
1062
1063         if (sizeof(pteval_t) > sizeof(long))
1064                 ret = PVOP_CALL2(pteval_t,
1065                                  pv_mmu_ops.make_pte,
1066                                  val, (u64)val >> 32);
1067         else
1068                 ret = PVOP_CALL1(pteval_t,
1069                                  pv_mmu_ops.make_pte,
1070                                  val);
1071
1072         return (pte_t) { .pte = ret };
1073 }
1074
1075 static inline pteval_t pte_val(pte_t pte)
1076 {
1077         pteval_t ret;
1078
1079         if (sizeof(pteval_t) > sizeof(long))
1080                 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
1081                                  pte.pte, (u64)pte.pte >> 32);
1082         else
1083                 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
1084                                  pte.pte);
1085
1086         return ret;
1087 }
1088
1089 static inline pteval_t pte_flags(pte_t pte)
1090 {
1091         pteval_t ret;
1092
1093         if (sizeof(pteval_t) > sizeof(long))
1094                 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1095                                  pte.pte, (u64)pte.pte >> 32);
1096         else
1097                 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
1098                                  pte.pte);
1099
1100 #ifdef CONFIG_PARAVIRT_DEBUG
1101         BUG_ON(ret & PTE_PFN_MASK);
1102 #endif
1103         return ret;
1104 }
1105
1106 static inline pgd_t __pgd(pgdval_t val)
1107 {
1108         pgdval_t ret;
1109
1110         if (sizeof(pgdval_t) > sizeof(long))
1111                 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
1112                                  val, (u64)val >> 32);
1113         else
1114                 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
1115                                  val);
1116
1117         return (pgd_t) { ret };
1118 }
1119
1120 static inline pgdval_t pgd_val(pgd_t pgd)
1121 {
1122         pgdval_t ret;
1123
1124         if (sizeof(pgdval_t) > sizeof(long))
1125                 ret =  PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
1126                                   pgd.pgd, (u64)pgd.pgd >> 32);
1127         else
1128                 ret =  PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
1129                                   pgd.pgd);
1130
1131         return ret;
1132 }
1133
1134 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1135 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1136                                            pte_t *ptep)
1137 {
1138         pteval_t ret;
1139
1140         ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1141                          mm, addr, ptep);
1142
1143         return (pte_t) { .pte = ret };
1144 }
1145
1146 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1147                                            pte_t *ptep, pte_t pte)
1148 {
1149         if (sizeof(pteval_t) > sizeof(long))
1150                 /* 5 arg words */
1151                 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1152         else
1153                 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1154                             mm, addr, ptep, pte.pte);
1155 }
1156
1157 static inline void set_pte(pte_t *ptep, pte_t pte)
1158 {
1159         if (sizeof(pteval_t) > sizeof(long))
1160                 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
1161                             pte.pte, (u64)pte.pte >> 32);
1162         else
1163                 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
1164                             pte.pte);
1165 }
1166
1167 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1168                               pte_t *ptep, pte_t pte)
1169 {
1170         if (sizeof(pteval_t) > sizeof(long))
1171                 /* 5 arg words */
1172                 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
1173         else
1174                 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
1175 }
1176
1177 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
1178 {
1179         pmdval_t val = native_pmd_val(pmd);
1180
1181         if (sizeof(pmdval_t) > sizeof(long))
1182                 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
1183         else
1184                 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
1185 }
1186
1187 #if PAGETABLE_LEVELS >= 3
1188 static inline pmd_t __pmd(pmdval_t val)
1189 {
1190         pmdval_t ret;
1191
1192         if (sizeof(pmdval_t) > sizeof(long))
1193                 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
1194                                  val, (u64)val >> 32);
1195         else
1196                 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
1197                                  val);
1198
1199         return (pmd_t) { ret };
1200 }
1201
1202 static inline pmdval_t pmd_val(pmd_t pmd)
1203 {
1204         pmdval_t ret;
1205
1206         if (sizeof(pmdval_t) > sizeof(long))
1207                 ret =  PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
1208                                   pmd.pmd, (u64)pmd.pmd >> 32);
1209         else
1210                 ret =  PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
1211                                   pmd.pmd);
1212
1213         return ret;
1214 }
1215
1216 static inline void set_pud(pud_t *pudp, pud_t pud)
1217 {
1218         pudval_t val = native_pud_val(pud);
1219
1220         if (sizeof(pudval_t) > sizeof(long))
1221                 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
1222                             val, (u64)val >> 32);
1223         else
1224                 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
1225                             val);
1226 }
1227 #if PAGETABLE_LEVELS == 4
1228 static inline pud_t __pud(pudval_t val)
1229 {
1230         pudval_t ret;
1231
1232         if (sizeof(pudval_t) > sizeof(long))
1233                 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
1234                                  val, (u64)val >> 32);
1235         else
1236                 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
1237                                  val);
1238
1239         return (pud_t) { ret };
1240 }
1241
1242 static inline pudval_t pud_val(pud_t pud)
1243 {
1244         pudval_t ret;
1245
1246         if (sizeof(pudval_t) > sizeof(long))
1247                 ret =  PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
1248                                   pud.pud, (u64)pud.pud >> 32);
1249         else
1250                 ret =  PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
1251                                   pud.pud);
1252
1253         return ret;
1254 }
1255
1256 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1257 {
1258         pgdval_t val = native_pgd_val(pgd);
1259
1260         if (sizeof(pgdval_t) > sizeof(long))
1261                 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
1262                             val, (u64)val >> 32);
1263         else
1264                 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
1265                             val);
1266 }
1267
1268 static inline void pgd_clear(pgd_t *pgdp)
1269 {
1270         set_pgd(pgdp, __pgd(0));
1271 }
1272
1273 static inline void pud_clear(pud_t *pudp)
1274 {
1275         set_pud(pudp, __pud(0));
1276 }
1277
1278 #endif  /* PAGETABLE_LEVELS == 4 */
1279
1280 #endif  /* PAGETABLE_LEVELS >= 3 */
1281
1282 #ifdef CONFIG_X86_PAE
1283 /* Special-case pte-setting operations for PAE, which can't update a
1284    64-bit pte atomically */
1285 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1286 {
1287         PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
1288                     pte.pte, pte.pte >> 32);
1289 }
1290
1291 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1292                                    pte_t *ptep, pte_t pte)
1293 {
1294         /* 5 arg words */
1295         pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
1296 }
1297
1298 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1299                              pte_t *ptep)
1300 {
1301         PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
1302 }
1303
1304 static inline void pmd_clear(pmd_t *pmdp)
1305 {
1306         PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
1307 }
1308 #else  /* !CONFIG_X86_PAE */
1309 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1310 {
1311         set_pte(ptep, pte);
1312 }
1313
1314 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1315                                    pte_t *ptep, pte_t pte)
1316 {
1317         set_pte(ptep, pte);
1318 }
1319
1320 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1321                              pte_t *ptep)
1322 {
1323         set_pte_at(mm, addr, ptep, __pte(0));
1324 }
1325
1326 static inline void pmd_clear(pmd_t *pmdp)
1327 {
1328         set_pmd(pmdp, __pmd(0));
1329 }
1330 #endif  /* CONFIG_X86_PAE */
1331
1332 /* Lazy mode for batching updates / context switch */
1333 enum paravirt_lazy_mode {
1334         PARAVIRT_LAZY_NONE,
1335         PARAVIRT_LAZY_MMU,
1336         PARAVIRT_LAZY_CPU,
1337 };
1338
1339 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1340 void paravirt_enter_lazy_cpu(void);
1341 void paravirt_leave_lazy_cpu(void);
1342 void paravirt_enter_lazy_mmu(void);
1343 void paravirt_leave_lazy_mmu(void);
1344 void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
1345
1346 #define  __HAVE_ARCH_ENTER_LAZY_CPU_MODE
1347 static inline void arch_enter_lazy_cpu_mode(void)
1348 {
1349         PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
1350 }
1351
1352 static inline void arch_leave_lazy_cpu_mode(void)
1353 {
1354         PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
1355 }
1356
1357 static inline void arch_flush_lazy_cpu_mode(void)
1358 {
1359         if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
1360                 arch_leave_lazy_cpu_mode();
1361                 arch_enter_lazy_cpu_mode();
1362         }
1363 }
1364
1365
1366 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1367 static inline void arch_enter_lazy_mmu_mode(void)
1368 {
1369         PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
1370 }
1371
1372 static inline void arch_leave_lazy_mmu_mode(void)
1373 {
1374         PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
1375 }
1376
1377 static inline void arch_flush_lazy_mmu_mode(void)
1378 {
1379         if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
1380                 arch_leave_lazy_mmu_mode();
1381                 arch_enter_lazy_mmu_mode();
1382         }
1383 }
1384
1385 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1386                                 unsigned long phys, pgprot_t flags)
1387 {
1388         pv_mmu_ops.set_fixmap(idx, phys, flags);
1389 }
1390
1391 void _paravirt_nop(void);
1392 #define paravirt_nop    ((void *)_paravirt_nop)
1393
1394 void paravirt_use_bytelocks(void);
1395
1396 #ifdef CONFIG_SMP
1397
1398 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1399 {
1400         return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1401 }
1402
1403 static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1404 {
1405         return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1406 }
1407
1408 static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1409 {
1410         PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
1411 }
1412
1413 static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
1414                                                   unsigned long flags)
1415 {
1416         PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
1417 }
1418
1419 static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1420 {
1421         return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1422 }
1423
1424 static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1425 {
1426         PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
1427 }
1428
1429 #endif
1430
1431 /* These all sit in the .parainstructions section to tell us what to patch. */
1432 struct paravirt_patch_site {
1433         u8 *instr;              /* original instructions */
1434         u8 instrtype;           /* type of this instruction */
1435         u8 len;                 /* length of original instruction */
1436         u16 clobbers;           /* what registers you may clobber */
1437 };
1438
1439 extern struct paravirt_patch_site __parainstructions[],
1440         __parainstructions_end[];
1441
1442 #ifdef CONFIG_X86_32
1443 #define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
1444 #define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
1445 #define PV_FLAGS_ARG "0"
1446 #define PV_EXTRA_CLOBBERS
1447 #define PV_VEXTRA_CLOBBERS
1448 #else
1449 /* We save some registers, but all of them, that's too much. We clobber all
1450  * caller saved registers but the argument parameter */
1451 #define PV_SAVE_REGS "pushq %%rdi;"
1452 #define PV_RESTORE_REGS "popq %%rdi;"
1453 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1454 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
1455 #define PV_FLAGS_ARG "D"
1456 #endif
1457
1458 static inline unsigned long __raw_local_save_flags(void)
1459 {
1460         unsigned long f;
1461
1462         asm volatile(paravirt_alt(PV_SAVE_REGS
1463                                   PARAVIRT_CALL
1464                                   PV_RESTORE_REGS)
1465                      : "=a"(f)
1466                      : paravirt_type(pv_irq_ops.save_fl),
1467                        paravirt_clobber(CLBR_EAX)
1468                      : "memory", "cc" PV_VEXTRA_CLOBBERS);
1469         return f;
1470 }
1471
1472 static inline void raw_local_irq_restore(unsigned long f)
1473 {
1474         asm volatile(paravirt_alt(PV_SAVE_REGS
1475                                   PARAVIRT_CALL
1476                                   PV_RESTORE_REGS)
1477                      : "=a"(f)
1478                      : PV_FLAGS_ARG(f),
1479                        paravirt_type(pv_irq_ops.restore_fl),
1480                        paravirt_clobber(CLBR_EAX)
1481                      : "memory", "cc" PV_EXTRA_CLOBBERS);
1482 }
1483
1484 static inline void raw_local_irq_disable(void)
1485 {
1486         asm volatile(paravirt_alt(PV_SAVE_REGS
1487                                   PARAVIRT_CALL
1488                                   PV_RESTORE_REGS)
1489                      :
1490                      : paravirt_type(pv_irq_ops.irq_disable),
1491                        paravirt_clobber(CLBR_EAX)
1492                      : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
1493 }
1494
1495 static inline void raw_local_irq_enable(void)
1496 {
1497         asm volatile(paravirt_alt(PV_SAVE_REGS
1498                                   PARAVIRT_CALL
1499                                   PV_RESTORE_REGS)
1500                      :
1501                      : paravirt_type(pv_irq_ops.irq_enable),
1502                        paravirt_clobber(CLBR_EAX)
1503                      : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
1504 }
1505
1506 static inline unsigned long __raw_local_irq_save(void)
1507 {
1508         unsigned long f;
1509
1510         f = __raw_local_save_flags();
1511         raw_local_irq_disable();
1512         return f;
1513 }
1514
1515
1516 /* Make sure as little as possible of this mess escapes. */
1517 #undef PARAVIRT_CALL
1518 #undef __PVOP_CALL
1519 #undef __PVOP_VCALL
1520 #undef PVOP_VCALL0
1521 #undef PVOP_CALL0
1522 #undef PVOP_VCALL1
1523 #undef PVOP_CALL1
1524 #undef PVOP_VCALL2
1525 #undef PVOP_CALL2
1526 #undef PVOP_VCALL3
1527 #undef PVOP_CALL3
1528 #undef PVOP_VCALL4
1529 #undef PVOP_CALL4
1530
1531 #else  /* __ASSEMBLY__ */
1532
1533 #define _PVSITE(ptype, clobbers, ops, word, algn)       \
1534 771:;                                           \
1535         ops;                                    \
1536 772:;                                           \
1537         .pushsection .parainstructions,"a";     \
1538          .align algn;                           \
1539          word 771b;                             \
1540          .byte ptype;                           \
1541          .byte 772b-771b;                       \
1542          .short clobbers;                       \
1543         .popsection
1544
1545
1546 #ifdef CONFIG_X86_64
1547 #define PV_SAVE_REGS                            \
1548         push %rax;                              \
1549         push %rcx;                              \
1550         push %rdx;                              \
1551         push %rsi;                              \
1552         push %rdi;                              \
1553         push %r8;                               \
1554         push %r9;                               \
1555         push %r10;                              \
1556         push %r11
1557 #define PV_RESTORE_REGS                         \
1558         pop %r11;                               \
1559         pop %r10;                               \
1560         pop %r9;                                \
1561         pop %r8;                                \
1562         pop %rdi;                               \
1563         pop %rsi;                               \
1564         pop %rdx;                               \
1565         pop %rcx;                               \
1566         pop %rax
1567 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
1568 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1569 #define PARA_INDIRECT(addr)     *addr(%rip)
1570 #else
1571 #define PV_SAVE_REGS   pushl %eax; pushl %edi; pushl %ecx; pushl %edx
1572 #define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
1573 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
1574 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1575 #define PARA_INDIRECT(addr)     *%cs:addr
1576 #endif
1577
1578 #define INTERRUPT_RETURN                                                \
1579         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
1580                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
1581
1582 #define DISABLE_INTERRUPTS(clobbers)                                    \
1583         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1584                   PV_SAVE_REGS;                                         \
1585                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
1586                   PV_RESTORE_REGS;)                     \
1587
1588 #define ENABLE_INTERRUPTS(clobbers)                                     \
1589         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
1590                   PV_SAVE_REGS;                                         \
1591                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
1592                   PV_RESTORE_REGS;)
1593
1594 #define USERGS_SYSRET32                                                 \
1595         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),       \
1596                   CLBR_NONE,                                            \
1597                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
1598
1599 #ifdef CONFIG_X86_32
1600 #define GET_CR0_INTO_EAX                                \
1601         push %ecx; push %edx;                           \
1602         call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
1603         pop %edx; pop %ecx
1604
1605 #define ENABLE_INTERRUPTS_SYSEXIT                                       \
1606         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1607                   CLBR_NONE,                                            \
1608                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1609
1610
1611 #else   /* !CONFIG_X86_32 */
1612
1613 /*
1614  * If swapgs is used while the userspace stack is still current,
1615  * there's no way to call a pvop.  The PV replacement *must* be
1616  * inlined, or the swapgs instruction must be trapped and emulated.
1617  */
1618 #define SWAPGS_UNSAFE_STACK                                             \
1619         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1620                   swapgs)
1621
1622 #define SWAPGS                                                          \
1623         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1624                   PV_SAVE_REGS;                                         \
1625                   call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs);         \
1626                   PV_RESTORE_REGS                                       \
1627                  )
1628
1629 #define GET_CR2_INTO_RCX                                \
1630         call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1631         movq %rax, %rcx;                                \
1632         xorq %rax, %rax;
1633
1634 #define PARAVIRT_ADJUST_EXCEPTION_FRAME                                 \
1635         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1636                   CLBR_NONE,                                            \
1637                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1638
1639 #define USERGS_SYSRET64                                                 \
1640         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
1641                   CLBR_NONE,                                            \
1642                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1643
1644 #define ENABLE_INTERRUPTS_SYSEXIT32                                     \
1645         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1646                   CLBR_NONE,                                            \
1647                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1648 #endif  /* CONFIG_X86_32 */
1649
1650 #endif /* __ASSEMBLY__ */
1651 #endif /* CONFIG_PARAVIRT */
1652 #endif /* _ASM_X86_PARAVIRT_H */