1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
19 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
25 #include <public/xen.h>
26 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
28 #include <linux/kvm_host.h>
29 #include "kvm_cache_regs.h"
30 #define DPRINTF(x...) do {} while (0)
32 #include <linux/module.h>
33 #include <asm/kvm_emulate.h>
35 #include "mmu.h" /* for is_long_mode() */
38 * Opcode effective-address decode tables.
39 * Note that we only emulate instructions that have at least one memory
40 * operand (excluding implicit stack references). We assume that stack
41 * references and instruction fetches will never occur in special memory
42 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
46 /* Operand sizes: 8-bit operands or specified/overridden size. */
47 #define ByteOp (1<<0) /* 8-bit operands. */
48 /* Destination operand type. */
49 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
50 #define DstReg (2<<1) /* Register operand. */
51 #define DstMem (3<<1) /* Memory operand. */
52 #define DstAcc (4<<1) /* Destination Accumulator */
53 #define DstMask (7<<1)
54 /* Source operand type. */
55 #define SrcNone (0<<4) /* No source operand. */
56 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
57 #define SrcReg (1<<4) /* Register operand. */
58 #define SrcMem (2<<4) /* Memory operand. */
59 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
60 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
61 #define SrcImm (5<<4) /* Immediate operand. */
62 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
63 #define SrcOne (7<<4) /* Implied '1' */
64 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
65 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
66 #define SrcMask (0xf<<4)
67 /* Generic ModRM decode. */
69 /* Destination is only written; never read. */
72 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
73 #define String (1<<12) /* String instruction (rep capable) */
74 #define Stack (1<<13) /* Stack instruction (push/pop) */
75 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
76 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
77 #define GroupMask 0xff /* Group number stored in bits 0:7 */
80 /* Source 2 operand type */
81 #define Src2None (0<<29)
82 #define Src2CL (1<<29)
83 #define Src2ImmByte (2<<29)
84 #define Src2One (3<<29)
85 #define Src2Imm16 (4<<29)
86 #define Src2Mask (7<<29)
89 Group1_80, Group1_81, Group1_82, Group1_83,
90 Group1A, Group3_Byte, Group3, Group4, Group5, Group7,
93 static u32 opcode_table[256] = {
95 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
96 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
97 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
98 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
100 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
101 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
102 0, 0, ImplicitOps | Stack | No64, 0,
104 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
105 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
106 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
107 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
109 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
110 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
111 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
112 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
114 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
115 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
116 DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0,
118 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
119 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
122 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
123 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
126 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
127 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
128 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
131 DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg,
133 DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg,
135 SrcReg | Stack, SrcReg | Stack, SrcReg | Stack, SrcReg | Stack,
136 SrcReg | Stack, SrcReg | Stack, SrcReg | Stack, SrcReg | Stack,
138 DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack,
139 DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack,
141 0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ ,
144 SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0,
145 SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* insb, insw/insd */
146 SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* outsb, outsw/outsd */
148 SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
149 SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
151 SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
152 SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
154 Group | Group1_80, Group | Group1_81,
155 Group | Group1_82, Group | Group1_83,
156 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
157 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
159 ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
160 ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
161 DstMem | SrcReg | ModRM | Mov, ModRM | DstReg,
162 DstReg | SrcMem | ModRM | Mov, Group | Group1A,
164 DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg,
166 0, 0, SrcImm | Src2Imm16 | No64, 0,
167 ImplicitOps | Stack, ImplicitOps | Stack, 0, 0,
169 ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs,
170 ByteOp | DstMem | SrcReg | Mov | MemAbs, DstMem | SrcReg | Mov | MemAbs,
171 ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
172 ByteOp | ImplicitOps | String, ImplicitOps | String,
174 0, 0, ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
175 ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
176 ByteOp | ImplicitOps | String, ImplicitOps | String,
178 ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
179 ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
180 ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
181 ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
183 DstReg | SrcImm | Mov, DstReg | SrcImm | Mov,
184 DstReg | SrcImm | Mov, DstReg | SrcImm | Mov,
185 DstReg | SrcImm | Mov, DstReg | SrcImm | Mov,
186 DstReg | SrcImm | Mov, DstReg | SrcImm | Mov,
188 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
189 0, ImplicitOps | Stack, 0, 0,
190 ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov,
192 0, 0, 0, ImplicitOps | Stack,
193 ImplicitOps, SrcImmByte, ImplicitOps | No64, ImplicitOps,
195 ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
196 ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
199 0, 0, 0, 0, 0, 0, 0, 0,
202 ByteOp | SrcImmUByte, SrcImmUByte,
203 ByteOp | SrcImmUByte, SrcImmUByte,
205 SrcImm | Stack, SrcImm | ImplicitOps,
206 SrcImmU | Src2Imm16 | No64, SrcImmByte | ImplicitOps,
207 SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
208 SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
211 ImplicitOps, ImplicitOps, Group | Group3_Byte, Group | Group3,
213 ImplicitOps, 0, ImplicitOps, ImplicitOps,
214 ImplicitOps, ImplicitOps, Group | Group4, Group | Group5,
217 static u32 twobyte_table[256] = {
219 0, Group | GroupDual | Group7, 0, 0, 0, ImplicitOps, ImplicitOps, 0,
220 ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
222 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
224 ModRM | ImplicitOps, ModRM, ModRM | ImplicitOps, ModRM, 0, 0, 0, 0,
225 0, 0, 0, 0, 0, 0, 0, 0,
227 ImplicitOps, 0, ImplicitOps, 0,
228 ImplicitOps, ImplicitOps, 0, 0,
229 0, 0, 0, 0, 0, 0, 0, 0,
231 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
232 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
233 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
234 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
236 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
237 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
238 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
239 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
241 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
243 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
245 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
247 SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm,
248 SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm,
250 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
252 ImplicitOps | Stack, ImplicitOps | Stack,
253 0, DstMem | SrcReg | ModRM | BitOp,
254 DstMem | SrcReg | Src2ImmByte | ModRM,
255 DstMem | SrcReg | Src2CL | ModRM, 0, 0,
257 ImplicitOps | Stack, ImplicitOps | Stack,
258 0, DstMem | SrcReg | ModRM | BitOp,
259 DstMem | SrcReg | Src2ImmByte | ModRM,
260 DstMem | SrcReg | Src2CL | ModRM,
263 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 0,
264 DstMem | SrcReg | ModRM | BitOp,
265 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
266 DstReg | SrcMem16 | ModRM | Mov,
268 0, 0, DstMem | SrcImmByte | ModRM, DstMem | SrcReg | ModRM | BitOp,
269 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
270 DstReg | SrcMem16 | ModRM | Mov,
272 0, 0, 0, DstMem | SrcReg | ModRM | Mov, 0, 0, 0, ImplicitOps | ModRM,
273 0, 0, 0, 0, 0, 0, 0, 0,
275 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
277 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
279 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
282 static u32 group_table[] = {
284 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
285 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
286 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
287 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
289 DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
290 DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
291 DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
292 DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
294 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
295 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
296 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
297 ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
299 DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
300 DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
301 DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
302 DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
304 DstMem | SrcNone | ModRM | Mov | Stack, 0, 0, 0, 0, 0, 0, 0,
306 ByteOp | SrcImm | DstMem | ModRM, 0,
307 ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
310 DstMem | SrcImm | ModRM, 0,
311 DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
314 ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
317 DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
318 SrcMem | ModRM | Stack, 0,
319 SrcMem | ModRM | Stack, 0, SrcMem | ModRM | Stack, 0,
321 0, 0, ModRM | SrcMem, ModRM | SrcMem,
322 SrcNone | ModRM | DstMem | Mov, 0,
323 SrcMem16 | ModRM | Mov, SrcMem | ModRM | ByteOp,
326 static u32 group2_table[] = {
328 SrcNone | ModRM, 0, 0, SrcNone | ModRM,
329 SrcNone | ModRM | DstMem | Mov, 0,
330 SrcMem16 | ModRM | Mov, 0,
333 /* EFLAGS bit definitions. */
334 #define EFLG_VM (1<<17)
335 #define EFLG_RF (1<<16)
336 #define EFLG_OF (1<<11)
337 #define EFLG_DF (1<<10)
338 #define EFLG_IF (1<<9)
339 #define EFLG_SF (1<<7)
340 #define EFLG_ZF (1<<6)
341 #define EFLG_AF (1<<4)
342 #define EFLG_PF (1<<2)
343 #define EFLG_CF (1<<0)
346 * Instruction emulation:
347 * Most instructions are emulated directly via a fragment of inline assembly
348 * code. This allows us to save/restore EFLAGS and thus very easily pick up
349 * any modified flags.
352 #if defined(CONFIG_X86_64)
353 #define _LO32 "k" /* force 32-bit operand */
354 #define _STK "%%rsp" /* stack pointer */
355 #elif defined(__i386__)
356 #define _LO32 "" /* force 32-bit operand */
357 #define _STK "%%esp" /* stack pointer */
361 * These EFLAGS bits are restored from saved value during emulation, and
362 * any changes are written back to the saved value after emulation.
364 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
366 /* Before executing instruction: restore necessary bits in EFLAGS. */
367 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
368 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
369 "movl %"_sav",%"_LO32 _tmp"; " \
372 "movl %"_msk",%"_LO32 _tmp"; " \
373 "andl %"_LO32 _tmp",("_STK"); " \
375 "notl %"_LO32 _tmp"; " \
376 "andl %"_LO32 _tmp",("_STK"); " \
377 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
379 "orl %"_LO32 _tmp",("_STK"); " \
383 /* After executing instruction: write-back necessary bits in EFLAGS. */
384 #define _POST_EFLAGS(_sav, _msk, _tmp) \
385 /* _sav |= EFLAGS & _msk; */ \
388 "andl %"_msk",%"_LO32 _tmp"; " \
389 "orl %"_LO32 _tmp",%"_sav"; "
397 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
399 __asm__ __volatile__ ( \
400 _PRE_EFLAGS("0", "4", "2") \
401 _op _suffix " %"_x"3,%1; " \
402 _POST_EFLAGS("0", "4", "2") \
403 : "=m" (_eflags), "=m" ((_dst).val), \
405 : _y ((_src).val), "i" (EFLAGS_MASK)); \
409 /* Raw emulation: instruction has two explicit operands. */
410 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
412 unsigned long _tmp; \
414 switch ((_dst).bytes) { \
416 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
419 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
422 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
427 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
429 unsigned long _tmp; \
430 switch ((_dst).bytes) { \
432 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
435 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
436 _wx, _wy, _lx, _ly, _qx, _qy); \
441 /* Source operand is byte-sized and may be restricted to just %cl. */
442 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
443 __emulate_2op(_op, _src, _dst, _eflags, \
444 "b", "c", "b", "c", "b", "c", "b", "c")
446 /* Source operand is byte, word, long or quad sized. */
447 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
448 __emulate_2op(_op, _src, _dst, _eflags, \
449 "b", "q", "w", "r", _LO32, "r", "", "r")
451 /* Source operand is word, long or quad sized. */
452 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
453 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
454 "w", "r", _LO32, "r", "", "r")
456 /* Instruction has three operands and one operand is stored in ECX register */
457 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
459 unsigned long _tmp; \
460 _type _clv = (_cl).val; \
461 _type _srcv = (_src).val; \
462 _type _dstv = (_dst).val; \
464 __asm__ __volatile__ ( \
465 _PRE_EFLAGS("0", "5", "2") \
466 _op _suffix " %4,%1 \n" \
467 _POST_EFLAGS("0", "5", "2") \
468 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
469 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
472 (_cl).val = (unsigned long) _clv; \
473 (_src).val = (unsigned long) _srcv; \
474 (_dst).val = (unsigned long) _dstv; \
477 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
479 switch ((_dst).bytes) { \
481 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
482 "w", unsigned short); \
485 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
486 "l", unsigned int); \
489 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
490 "q", unsigned long)); \
495 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
497 unsigned long _tmp; \
499 __asm__ __volatile__ ( \
500 _PRE_EFLAGS("0", "3", "2") \
501 _op _suffix " %1; " \
502 _POST_EFLAGS("0", "3", "2") \
503 : "=m" (_eflags), "+m" ((_dst).val), \
505 : "i" (EFLAGS_MASK)); \
508 /* Instruction has only one explicit operand (no source operand). */
509 #define emulate_1op(_op, _dst, _eflags) \
511 switch ((_dst).bytes) { \
512 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
513 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
514 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
515 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
519 /* Fetch next part of the instruction being emulated. */
520 #define insn_fetch(_type, _size, _eip) \
521 ({ unsigned long _x; \
522 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
529 static inline unsigned long ad_mask(struct decode_cache *c)
531 return (1UL << (c->ad_bytes << 3)) - 1;
534 /* Access/update address held in a register, based on addressing mode. */
535 static inline unsigned long
536 address_mask(struct decode_cache *c, unsigned long reg)
538 if (c->ad_bytes == sizeof(unsigned long))
541 return reg & ad_mask(c);
544 static inline unsigned long
545 register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
547 return base + address_mask(c, reg);
551 register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
553 if (c->ad_bytes == sizeof(unsigned long))
556 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
559 static inline void jmp_rel(struct decode_cache *c, int rel)
561 register_address_increment(c, &c->eip, rel);
564 static void set_seg_override(struct decode_cache *c, int seg)
566 c->has_seg_override = true;
567 c->seg_override = seg;
570 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
572 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
575 return kvm_x86_ops->get_segment_base(ctxt->vcpu, seg);
578 static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
579 struct decode_cache *c)
581 if (!c->has_seg_override)
584 return seg_base(ctxt, c->seg_override);
587 static unsigned long es_base(struct x86_emulate_ctxt *ctxt)
589 return seg_base(ctxt, VCPU_SREG_ES);
592 static unsigned long ss_base(struct x86_emulate_ctxt *ctxt)
594 return seg_base(ctxt, VCPU_SREG_SS);
597 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
598 struct x86_emulate_ops *ops,
599 unsigned long linear, u8 *dest)
601 struct fetch_cache *fc = &ctxt->decode.fetch;
605 if (linear < fc->start || linear >= fc->end) {
606 size = min(15UL, PAGE_SIZE - offset_in_page(linear));
607 rc = ops->read_std(linear, fc->data, size, ctxt->vcpu);
611 fc->end = linear + size;
613 *dest = fc->data[linear - fc->start];
617 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
618 struct x86_emulate_ops *ops,
619 unsigned long eip, void *dest, unsigned size)
623 eip += ctxt->cs_base;
625 rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
633 * Given the 'reg' portion of a ModRM byte, and a register block, return a
634 * pointer into the block that addresses the relevant register.
635 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
637 static void *decode_register(u8 modrm_reg, unsigned long *regs,
642 p = ®s[modrm_reg];
643 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
644 p = (unsigned char *)®s[modrm_reg & 3] + 1;
648 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
649 struct x86_emulate_ops *ops,
651 u16 *size, unsigned long *address, int op_bytes)
658 rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
662 rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
667 static int test_cc(unsigned int condition, unsigned int flags)
671 switch ((condition & 15) >> 1) {
673 rc |= (flags & EFLG_OF);
675 case 1: /* b/c/nae */
676 rc |= (flags & EFLG_CF);
679 rc |= (flags & EFLG_ZF);
682 rc |= (flags & (EFLG_CF|EFLG_ZF));
685 rc |= (flags & EFLG_SF);
688 rc |= (flags & EFLG_PF);
691 rc |= (flags & EFLG_ZF);
694 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
698 /* Odd condition identifiers (lsb == 1) have inverted sense. */
699 return (!!rc ^ (condition & 1));
702 static void decode_register_operand(struct operand *op,
703 struct decode_cache *c,
706 unsigned reg = c->modrm_reg;
707 int highbyte_regs = c->rex_prefix == 0;
710 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
712 if ((c->d & ByteOp) && !inhibit_bytereg) {
713 op->ptr = decode_register(reg, c->regs, highbyte_regs);
714 op->val = *(u8 *)op->ptr;
717 op->ptr = decode_register(reg, c->regs, 0);
718 op->bytes = c->op_bytes;
721 op->val = *(u16 *)op->ptr;
724 op->val = *(u32 *)op->ptr;
727 op->val = *(u64 *) op->ptr;
731 op->orig_val = op->val;
734 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
735 struct x86_emulate_ops *ops)
737 struct decode_cache *c = &ctxt->decode;
739 int index_reg = 0, base_reg = 0, scale;
743 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
744 index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
745 c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
748 c->modrm = insn_fetch(u8, 1, c->eip);
749 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
750 c->modrm_reg |= (c->modrm & 0x38) >> 3;
751 c->modrm_rm |= (c->modrm & 0x07);
755 if (c->modrm_mod == 3) {
756 c->modrm_ptr = decode_register(c->modrm_rm,
757 c->regs, c->d & ByteOp);
758 c->modrm_val = *(unsigned long *)c->modrm_ptr;
762 if (c->ad_bytes == 2) {
763 unsigned bx = c->regs[VCPU_REGS_RBX];
764 unsigned bp = c->regs[VCPU_REGS_RBP];
765 unsigned si = c->regs[VCPU_REGS_RSI];
766 unsigned di = c->regs[VCPU_REGS_RDI];
768 /* 16-bit ModR/M decode. */
769 switch (c->modrm_mod) {
771 if (c->modrm_rm == 6)
772 c->modrm_ea += insn_fetch(u16, 2, c->eip);
775 c->modrm_ea += insn_fetch(s8, 1, c->eip);
778 c->modrm_ea += insn_fetch(u16, 2, c->eip);
781 switch (c->modrm_rm) {
783 c->modrm_ea += bx + si;
786 c->modrm_ea += bx + di;
789 c->modrm_ea += bp + si;
792 c->modrm_ea += bp + di;
801 if (c->modrm_mod != 0)
808 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
809 (c->modrm_rm == 6 && c->modrm_mod != 0))
810 if (!c->has_seg_override)
811 set_seg_override(c, VCPU_SREG_SS);
812 c->modrm_ea = (u16)c->modrm_ea;
814 /* 32/64-bit ModR/M decode. */
815 if ((c->modrm_rm & 7) == 4) {
816 sib = insn_fetch(u8, 1, c->eip);
817 index_reg |= (sib >> 3) & 7;
821 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
822 c->modrm_ea += insn_fetch(s32, 4, c->eip);
824 c->modrm_ea += c->regs[base_reg];
826 c->modrm_ea += c->regs[index_reg] << scale;
827 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
828 if (ctxt->mode == X86EMUL_MODE_PROT64)
831 c->modrm_ea += c->regs[c->modrm_rm];
832 switch (c->modrm_mod) {
834 if (c->modrm_rm == 5)
835 c->modrm_ea += insn_fetch(s32, 4, c->eip);
838 c->modrm_ea += insn_fetch(s8, 1, c->eip);
841 c->modrm_ea += insn_fetch(s32, 4, c->eip);
849 static int decode_abs(struct x86_emulate_ctxt *ctxt,
850 struct x86_emulate_ops *ops)
852 struct decode_cache *c = &ctxt->decode;
855 switch (c->ad_bytes) {
857 c->modrm_ea = insn_fetch(u16, 2, c->eip);
860 c->modrm_ea = insn_fetch(u32, 4, c->eip);
863 c->modrm_ea = insn_fetch(u64, 8, c->eip);
871 x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
873 struct decode_cache *c = &ctxt->decode;
875 int mode = ctxt->mode;
876 int def_op_bytes, def_ad_bytes, group;
878 /* Shadow copy of register state. Committed on successful emulation. */
880 memset(c, 0, sizeof(struct decode_cache));
881 c->eip = kvm_rip_read(ctxt->vcpu);
882 ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS);
883 memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
886 case X86EMUL_MODE_REAL:
887 case X86EMUL_MODE_PROT16:
888 def_op_bytes = def_ad_bytes = 2;
890 case X86EMUL_MODE_PROT32:
891 def_op_bytes = def_ad_bytes = 4;
894 case X86EMUL_MODE_PROT64:
903 c->op_bytes = def_op_bytes;
904 c->ad_bytes = def_ad_bytes;
906 /* Legacy prefixes. */
908 switch (c->b = insn_fetch(u8, 1, c->eip)) {
909 case 0x66: /* operand-size override */
910 /* switch between 2/4 bytes */
911 c->op_bytes = def_op_bytes ^ 6;
913 case 0x67: /* address-size override */
914 if (mode == X86EMUL_MODE_PROT64)
915 /* switch between 4/8 bytes */
916 c->ad_bytes = def_ad_bytes ^ 12;
918 /* switch between 2/4 bytes */
919 c->ad_bytes = def_ad_bytes ^ 6;
921 case 0x26: /* ES override */
922 case 0x2e: /* CS override */
923 case 0x36: /* SS override */
924 case 0x3e: /* DS override */
925 set_seg_override(c, (c->b >> 3) & 3);
927 case 0x64: /* FS override */
928 case 0x65: /* GS override */
929 set_seg_override(c, c->b & 7);
931 case 0x40 ... 0x4f: /* REX */
932 if (mode != X86EMUL_MODE_PROT64)
934 c->rex_prefix = c->b;
936 case 0xf0: /* LOCK */
939 case 0xf2: /* REPNE/REPNZ */
940 c->rep_prefix = REPNE_PREFIX;
942 case 0xf3: /* REP/REPE/REPZ */
943 c->rep_prefix = REPE_PREFIX;
949 /* Any legacy prefix after a REX prefix nullifies its effect. */
958 if (c->rex_prefix & 8)
959 c->op_bytes = 8; /* REX.W */
961 /* Opcode byte(s). */
962 c->d = opcode_table[c->b];
964 /* Two-byte opcode? */
967 c->b = insn_fetch(u8, 1, c->eip);
968 c->d = twobyte_table[c->b];
972 if (mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
973 kvm_report_emulation_failure(ctxt->vcpu, "invalid x86/64 instruction");;
978 group = c->d & GroupMask;
979 c->modrm = insn_fetch(u8, 1, c->eip);
982 group = (group << 3) + ((c->modrm >> 3) & 7);
983 if ((c->d & GroupDual) && (c->modrm >> 6) == 3)
984 c->d = group2_table[group];
986 c->d = group_table[group];
991 DPRINTF("Cannot emulate %02x\n", c->b);
995 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
998 /* ModRM and SIB bytes. */
1000 rc = decode_modrm(ctxt, ops);
1001 else if (c->d & MemAbs)
1002 rc = decode_abs(ctxt, ops);
1006 if (!c->has_seg_override)
1007 set_seg_override(c, VCPU_SREG_DS);
1009 if (!(!c->twobyte && c->b == 0x8d))
1010 c->modrm_ea += seg_override_base(ctxt, c);
1012 if (c->ad_bytes != 8)
1013 c->modrm_ea = (u32)c->modrm_ea;
1015 * Decode and fetch the source operand: register, memory
1018 switch (c->d & SrcMask) {
1022 decode_register_operand(&c->src, c, 0);
1031 c->src.bytes = (c->d & ByteOp) ? 1 :
1033 /* Don't fetch the address for invlpg: it could be unmapped. */
1034 if (c->twobyte && c->b == 0x01 && c->modrm_reg == 7)
1038 * For instructions with a ModR/M byte, switch to register
1039 * access if Mod = 3.
1041 if ((c->d & ModRM) && c->modrm_mod == 3) {
1042 c->src.type = OP_REG;
1043 c->src.val = c->modrm_val;
1044 c->src.ptr = c->modrm_ptr;
1047 c->src.type = OP_MEM;
1051 c->src.type = OP_IMM;
1052 c->src.ptr = (unsigned long *)c->eip;
1053 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1054 if (c->src.bytes == 8)
1056 /* NB. Immediates are sign-extended as necessary. */
1057 switch (c->src.bytes) {
1059 c->src.val = insn_fetch(s8, 1, c->eip);
1062 c->src.val = insn_fetch(s16, 2, c->eip);
1065 c->src.val = insn_fetch(s32, 4, c->eip);
1068 if ((c->d & SrcMask) == SrcImmU) {
1069 switch (c->src.bytes) {
1074 c->src.val &= 0xffff;
1077 c->src.val &= 0xffffffff;
1084 c->src.type = OP_IMM;
1085 c->src.ptr = (unsigned long *)c->eip;
1087 if ((c->d & SrcMask) == SrcImmByte)
1088 c->src.val = insn_fetch(s8, 1, c->eip);
1090 c->src.val = insn_fetch(u8, 1, c->eip);
1099 * Decode and fetch the second source operand: register, memory
1102 switch (c->d & Src2Mask) {
1107 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
1110 c->src2.type = OP_IMM;
1111 c->src2.ptr = (unsigned long *)c->eip;
1113 c->src2.val = insn_fetch(u8, 1, c->eip);
1116 c->src2.type = OP_IMM;
1117 c->src2.ptr = (unsigned long *)c->eip;
1119 c->src2.val = insn_fetch(u16, 2, c->eip);
1127 /* Decode and fetch the destination operand: register or memory. */
1128 switch (c->d & DstMask) {
1130 /* Special instructions do their own operand decoding. */
1133 decode_register_operand(&c->dst, c,
1134 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
1137 if ((c->d & ModRM) && c->modrm_mod == 3) {
1138 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1139 c->dst.type = OP_REG;
1140 c->dst.val = c->dst.orig_val = c->modrm_val;
1141 c->dst.ptr = c->modrm_ptr;
1144 c->dst.type = OP_MEM;
1147 c->dst.type = OP_REG;
1148 c->dst.bytes = c->op_bytes;
1149 c->dst.ptr = &c->regs[VCPU_REGS_RAX];
1150 switch (c->op_bytes) {
1152 c->dst.val = *(u8 *)c->dst.ptr;
1155 c->dst.val = *(u16 *)c->dst.ptr;
1158 c->dst.val = *(u32 *)c->dst.ptr;
1161 c->dst.orig_val = c->dst.val;
1165 if (c->rip_relative)
1166 c->modrm_ea += c->eip;
1169 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
1172 static inline void emulate_push(struct x86_emulate_ctxt *ctxt)
1174 struct decode_cache *c = &ctxt->decode;
1176 c->dst.type = OP_MEM;
1177 c->dst.bytes = c->op_bytes;
1178 c->dst.val = c->src.val;
1179 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1180 c->dst.ptr = (void *) register_address(c, ss_base(ctxt),
1181 c->regs[VCPU_REGS_RSP]);
1184 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1185 struct x86_emulate_ops *ops,
1186 void *dest, int len)
1188 struct decode_cache *c = &ctxt->decode;
1191 rc = ops->read_emulated(register_address(c, ss_base(ctxt),
1192 c->regs[VCPU_REGS_RSP]),
1193 dest, len, ctxt->vcpu);
1197 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1201 static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
1203 struct decode_cache *c = &ctxt->decode;
1204 struct kvm_segment segment;
1206 kvm_x86_ops->get_segment(ctxt->vcpu, &segment, seg);
1208 c->src.val = segment.selector;
1212 static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1213 struct x86_emulate_ops *ops, int seg)
1215 struct decode_cache *c = &ctxt->decode;
1216 unsigned long selector;
1219 rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
1223 rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, 1, seg);
1227 static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1228 struct x86_emulate_ops *ops)
1230 struct decode_cache *c = &ctxt->decode;
1233 rc = emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1239 static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1241 struct decode_cache *c = &ctxt->decode;
1242 switch (c->modrm_reg) {
1244 emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1247 emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1250 emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1253 emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1255 case 4: /* sal/shl */
1256 case 6: /* sal/shl */
1257 emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1260 emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1263 emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1268 static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1269 struct x86_emulate_ops *ops)
1271 struct decode_cache *c = &ctxt->decode;
1274 switch (c->modrm_reg) {
1275 case 0 ... 1: /* test */
1276 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1279 c->dst.val = ~c->dst.val;
1282 emulate_1op("neg", c->dst, ctxt->eflags);
1285 DPRINTF("Cannot emulate %02x\n", c->b);
1286 rc = X86EMUL_UNHANDLEABLE;
1292 static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1293 struct x86_emulate_ops *ops)
1295 struct decode_cache *c = &ctxt->decode;
1297 switch (c->modrm_reg) {
1299 emulate_1op("inc", c->dst, ctxt->eflags);
1302 emulate_1op("dec", c->dst, ctxt->eflags);
1304 case 2: /* call near abs */ {
1307 c->eip = c->src.val;
1308 c->src.val = old_eip;
1312 case 4: /* jmp abs */
1313 c->eip = c->src.val;
1322 static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1323 struct x86_emulate_ops *ops,
1324 unsigned long memop)
1326 struct decode_cache *c = &ctxt->decode;
1330 rc = ops->read_emulated(memop, &old, 8, ctxt->vcpu);
1334 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1335 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1337 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1338 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1339 ctxt->eflags &= ~EFLG_ZF;
1342 new = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1343 (u32) c->regs[VCPU_REGS_RBX];
1345 rc = ops->cmpxchg_emulated(memop, &old, &new, 8, ctxt->vcpu);
1348 ctxt->eflags |= EFLG_ZF;
1353 static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1354 struct x86_emulate_ops *ops)
1356 struct decode_cache *c = &ctxt->decode;
1360 rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1363 if (c->op_bytes == 4)
1364 c->eip = (u32)c->eip;
1365 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1368 rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, 1, VCPU_SREG_CS);
1372 static inline int writeback(struct x86_emulate_ctxt *ctxt,
1373 struct x86_emulate_ops *ops)
1376 struct decode_cache *c = &ctxt->decode;
1378 switch (c->dst.type) {
1380 /* The 4-byte case *is* correct:
1381 * in 64-bit mode we zero-extend.
1383 switch (c->dst.bytes) {
1385 *(u8 *)c->dst.ptr = (u8)c->dst.val;
1388 *(u16 *)c->dst.ptr = (u16)c->dst.val;
1391 *c->dst.ptr = (u32)c->dst.val;
1392 break; /* 64b: zero-ext */
1394 *c->dst.ptr = c->dst.val;
1400 rc = ops->cmpxchg_emulated(
1401 (unsigned long)c->dst.ptr,
1407 rc = ops->write_emulated(
1408 (unsigned long)c->dst.ptr,
1424 static void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
1426 u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(ctxt->vcpu, mask);
1428 * an sti; sti; sequence only disable interrupts for the first
1429 * instruction. So, if the last instruction, be it emulated or
1430 * not, left the system with the INT_STI flag enabled, it
1431 * means that the last instruction is an sti. We should not
1432 * leave the flag on in this case. The same goes for mov ss
1434 if (!(int_shadow & mask))
1435 ctxt->interruptibility = mask;
1439 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1440 struct kvm_segment *cs, struct kvm_segment *ss)
1442 memset(cs, 0, sizeof(struct kvm_segment));
1443 kvm_x86_ops->get_segment(ctxt->vcpu, cs, VCPU_SREG_CS);
1444 memset(ss, 0, sizeof(struct kvm_segment));
1446 cs->l = 0; /* will be adjusted later */
1447 cs->base = 0; /* flat segment */
1448 cs->g = 1; /* 4kb granularity */
1449 cs->limit = 0xffffffff; /* 4GB limit */
1450 cs->type = 0x0b; /* Read, Execute, Accessed */
1452 cs->dpl = 0; /* will be adjusted later */
1457 ss->base = 0; /* flat segment */
1458 ss->limit = 0xffffffff; /* 4GB limit */
1459 ss->g = 1; /* 4kb granularity */
1461 ss->type = 0x03; /* Read/Write, Accessed */
1462 ss->db = 1; /* 32bit stack segment */
1468 emulate_syscall(struct x86_emulate_ctxt *ctxt)
1470 struct decode_cache *c = &ctxt->decode;
1471 struct kvm_segment cs, ss;
1474 /* syscall is not available in real mode */
1475 if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL
1476 || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE))
1479 setup_syscalls_segments(ctxt, &cs, &ss);
1481 kvm_x86_ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1483 cs.selector = (u16)(msr_data & 0xfffc);
1484 ss.selector = (u16)(msr_data + 8);
1486 if (is_long_mode(ctxt->vcpu)) {
1490 kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS);
1491 kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS);
1493 c->regs[VCPU_REGS_RCX] = c->eip;
1494 if (is_long_mode(ctxt->vcpu)) {
1495 #ifdef CONFIG_X86_64
1496 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1498 kvm_x86_ops->get_msr(ctxt->vcpu,
1499 ctxt->mode == X86EMUL_MODE_PROT64 ?
1500 MSR_LSTAR : MSR_CSTAR, &msr_data);
1503 kvm_x86_ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
1504 ctxt->eflags &= ~(msr_data | EFLG_RF);
1508 kvm_x86_ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1509 c->eip = (u32)msr_data;
1511 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1518 emulate_sysenter(struct x86_emulate_ctxt *ctxt)
1520 struct decode_cache *c = &ctxt->decode;
1521 struct kvm_segment cs, ss;
1524 /* inject #UD if LOCK prefix is used */
1528 /* inject #GP if in real mode or paging is disabled */
1529 if (ctxt->mode == X86EMUL_MODE_REAL ||
1530 !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
1531 kvm_inject_gp(ctxt->vcpu, 0);
1535 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1536 * Therefore, we inject an #UD.
1538 if (ctxt->mode == X86EMUL_MODE_PROT64)
1541 setup_syscalls_segments(ctxt, &cs, &ss);
1543 kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1544 switch (ctxt->mode) {
1545 case X86EMUL_MODE_PROT32:
1546 if ((msr_data & 0xfffc) == 0x0) {
1547 kvm_inject_gp(ctxt->vcpu, 0);
1551 case X86EMUL_MODE_PROT64:
1552 if (msr_data == 0x0) {
1553 kvm_inject_gp(ctxt->vcpu, 0);
1559 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1560 cs.selector = (u16)msr_data;
1561 cs.selector &= ~SELECTOR_RPL_MASK;
1562 ss.selector = cs.selector + 8;
1563 ss.selector &= ~SELECTOR_RPL_MASK;
1564 if (ctxt->mode == X86EMUL_MODE_PROT64
1565 || is_long_mode(ctxt->vcpu)) {
1570 kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS);
1571 kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS);
1573 kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
1576 kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
1577 c->regs[VCPU_REGS_RSP] = msr_data;
1583 emulate_sysexit(struct x86_emulate_ctxt *ctxt)
1585 struct decode_cache *c = &ctxt->decode;
1586 struct kvm_segment cs, ss;
1590 /* inject #UD if LOCK prefix is used */
1594 /* inject #GP if in real mode or paging is disabled */
1595 if (ctxt->mode == X86EMUL_MODE_REAL
1596 || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
1597 kvm_inject_gp(ctxt->vcpu, 0);
1601 /* sysexit must be called from CPL 0 */
1602 if (kvm_x86_ops->get_cpl(ctxt->vcpu) != 0) {
1603 kvm_inject_gp(ctxt->vcpu, 0);
1607 setup_syscalls_segments(ctxt, &cs, &ss);
1609 if ((c->rex_prefix & 0x8) != 0x0)
1610 usermode = X86EMUL_MODE_PROT64;
1612 usermode = X86EMUL_MODE_PROT32;
1616 kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1618 case X86EMUL_MODE_PROT32:
1619 cs.selector = (u16)(msr_data + 16);
1620 if ((msr_data & 0xfffc) == 0x0) {
1621 kvm_inject_gp(ctxt->vcpu, 0);
1624 ss.selector = (u16)(msr_data + 24);
1626 case X86EMUL_MODE_PROT64:
1627 cs.selector = (u16)(msr_data + 32);
1628 if (msr_data == 0x0) {
1629 kvm_inject_gp(ctxt->vcpu, 0);
1632 ss.selector = cs.selector + 8;
1637 cs.selector |= SELECTOR_RPL_MASK;
1638 ss.selector |= SELECTOR_RPL_MASK;
1640 kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS);
1641 kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS);
1643 c->eip = ctxt->vcpu->arch.regs[VCPU_REGS_RDX];
1644 c->regs[VCPU_REGS_RSP] = ctxt->vcpu->arch.regs[VCPU_REGS_RCX];
1650 x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1652 unsigned long memop = 0;
1654 unsigned long saved_eip = 0;
1655 struct decode_cache *c = &ctxt->decode;
1660 ctxt->interruptibility = 0;
1662 /* Shadow copy of register state. Committed on successful emulation.
1663 * NOTE: we can copy them from vcpu as x86_decode_insn() doesn't
1667 memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
1670 if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs))
1671 memop = c->modrm_ea;
1673 if (c->rep_prefix && (c->d & String)) {
1674 /* All REP prefixes have the same first termination condition */
1675 if (c->regs[VCPU_REGS_RCX] == 0) {
1676 kvm_rip_write(ctxt->vcpu, c->eip);
1679 /* The second termination condition only applies for REPE
1680 * and REPNE. Test if the repeat string operation prefix is
1681 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
1682 * corresponding termination condition according to:
1683 * - if REPE/REPZ and ZF = 0 then done
1684 * - if REPNE/REPNZ and ZF = 1 then done
1686 if ((c->b == 0xa6) || (c->b == 0xa7) ||
1687 (c->b == 0xae) || (c->b == 0xaf)) {
1688 if ((c->rep_prefix == REPE_PREFIX) &&
1689 ((ctxt->eflags & EFLG_ZF) == 0)) {
1690 kvm_rip_write(ctxt->vcpu, c->eip);
1693 if ((c->rep_prefix == REPNE_PREFIX) &&
1694 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) {
1695 kvm_rip_write(ctxt->vcpu, c->eip);
1699 c->regs[VCPU_REGS_RCX]--;
1700 c->eip = kvm_rip_read(ctxt->vcpu);
1703 if (c->src.type == OP_MEM) {
1704 c->src.ptr = (unsigned long *)memop;
1706 rc = ops->read_emulated((unsigned long)c->src.ptr,
1712 c->src.orig_val = c->src.val;
1715 if ((c->d & DstMask) == ImplicitOps)
1719 if (c->dst.type == OP_MEM) {
1720 c->dst.ptr = (unsigned long *)memop;
1721 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1724 unsigned long mask = ~(c->dst.bytes * 8 - 1);
1726 c->dst.ptr = (void *)c->dst.ptr +
1727 (c->src.val & mask) / 8;
1729 if (!(c->d & Mov) &&
1730 /* optimisation - avoid slow emulated read */
1731 ((rc = ops->read_emulated((unsigned long)c->dst.ptr,
1733 c->dst.bytes, ctxt->vcpu)) != 0))
1736 c->dst.orig_val = c->dst.val;
1746 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
1748 case 0x06: /* push es */
1749 emulate_push_sreg(ctxt, VCPU_SREG_ES);
1751 case 0x07: /* pop es */
1752 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
1758 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
1760 case 0x0e: /* push cs */
1761 emulate_push_sreg(ctxt, VCPU_SREG_CS);
1765 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
1767 case 0x16: /* push ss */
1768 emulate_push_sreg(ctxt, VCPU_SREG_SS);
1770 case 0x17: /* pop ss */
1771 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
1777 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
1779 case 0x1e: /* push ds */
1780 emulate_push_sreg(ctxt, VCPU_SREG_DS);
1782 case 0x1f: /* pop ds */
1783 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
1789 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
1793 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
1797 emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
1801 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
1803 case 0x40 ... 0x47: /* inc r16/r32 */
1804 emulate_1op("inc", c->dst, ctxt->eflags);
1806 case 0x48 ... 0x4f: /* dec r16/r32 */
1807 emulate_1op("dec", c->dst, ctxt->eflags);
1809 case 0x50 ... 0x57: /* push reg */
1812 case 0x58 ... 0x5f: /* pop reg */
1814 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
1818 case 0x63: /* movsxd */
1819 if (ctxt->mode != X86EMUL_MODE_PROT64)
1820 goto cannot_emulate;
1821 c->dst.val = (s32) c->src.val;
1823 case 0x68: /* push imm */
1824 case 0x6a: /* push imm8 */
1827 case 0x6c: /* insb */
1828 case 0x6d: /* insw/insd */
1829 if (kvm_emulate_pio_string(ctxt->vcpu,
1831 (c->d & ByteOp) ? 1 : c->op_bytes,
1833 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
1834 (ctxt->eflags & EFLG_DF),
1835 register_address(c, es_base(ctxt),
1836 c->regs[VCPU_REGS_RDI]),
1838 c->regs[VCPU_REGS_RDX]) == 0) {
1843 case 0x6e: /* outsb */
1844 case 0x6f: /* outsw/outsd */
1845 if (kvm_emulate_pio_string(ctxt->vcpu,
1847 (c->d & ByteOp) ? 1 : c->op_bytes,
1849 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
1850 (ctxt->eflags & EFLG_DF),
1852 seg_override_base(ctxt, c),
1853 c->regs[VCPU_REGS_RSI]),
1855 c->regs[VCPU_REGS_RDX]) == 0) {
1860 case 0x70 ... 0x7f: /* jcc (short) */
1861 if (test_cc(c->b, ctxt->eflags))
1862 jmp_rel(c, c->src.val);
1864 case 0x80 ... 0x83: /* Grp1 */
1865 switch (c->modrm_reg) {
1885 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1887 case 0x86 ... 0x87: /* xchg */
1889 /* Write back the register source. */
1890 switch (c->dst.bytes) {
1892 *(u8 *) c->src.ptr = (u8) c->dst.val;
1895 *(u16 *) c->src.ptr = (u16) c->dst.val;
1898 *c->src.ptr = (u32) c->dst.val;
1899 break; /* 64b reg: zero-extend */
1901 *c->src.ptr = c->dst.val;
1905 * Write back the memory destination with implicit LOCK
1908 c->dst.val = c->src.val;
1911 case 0x88 ... 0x8b: /* mov */
1913 case 0x8c: { /* mov r/m, sreg */
1914 struct kvm_segment segreg;
1916 if (c->modrm_reg <= 5)
1917 kvm_get_segment(ctxt->vcpu, &segreg, c->modrm_reg);
1919 printk(KERN_INFO "0x8c: Invalid segreg in modrm byte 0x%02x\n",
1921 goto cannot_emulate;
1923 c->dst.val = segreg.selector;
1926 case 0x8d: /* lea r16/r32, m */
1927 c->dst.val = c->modrm_ea;
1929 case 0x8e: { /* mov seg, r/m16 */
1935 if (c->modrm_reg == VCPU_SREG_SS)
1936 toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
1938 if (c->modrm_reg <= 5) {
1939 type_bits = (c->modrm_reg == 1) ? 9 : 1;
1940 err = kvm_load_segment_descriptor(ctxt->vcpu, sel,
1941 type_bits, c->modrm_reg);
1943 printk(KERN_INFO "Invalid segreg in modrm byte 0x%02x\n",
1945 goto cannot_emulate;
1949 goto cannot_emulate;
1951 c->dst.type = OP_NONE; /* Disable writeback. */
1954 case 0x8f: /* pop (sole member of Grp1a) */
1955 rc = emulate_grp1a(ctxt, ops);
1959 case 0x90: /* nop / xchg r8,rax */
1960 if (!(c->rex_prefix & 1)) { /* nop */
1961 c->dst.type = OP_NONE;
1964 case 0x91 ... 0x97: /* xchg reg,rax */
1965 c->src.type = c->dst.type = OP_REG;
1966 c->src.bytes = c->dst.bytes = c->op_bytes;
1967 c->src.ptr = (unsigned long *) &c->regs[VCPU_REGS_RAX];
1968 c->src.val = *(c->src.ptr);
1970 case 0x9c: /* pushf */
1971 c->src.val = (unsigned long) ctxt->eflags;
1974 case 0x9d: /* popf */
1975 c->dst.type = OP_REG;
1976 c->dst.ptr = (unsigned long *) &ctxt->eflags;
1977 c->dst.bytes = c->op_bytes;
1978 goto pop_instruction;
1979 case 0xa0 ... 0xa1: /* mov */
1980 c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
1981 c->dst.val = c->src.val;
1983 case 0xa2 ... 0xa3: /* mov */
1984 c->dst.val = (unsigned long)c->regs[VCPU_REGS_RAX];
1986 case 0xa4 ... 0xa5: /* movs */
1987 c->dst.type = OP_MEM;
1988 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1989 c->dst.ptr = (unsigned long *)register_address(c,
1991 c->regs[VCPU_REGS_RDI]);
1992 if ((rc = ops->read_emulated(register_address(c,
1993 seg_override_base(ctxt, c),
1994 c->regs[VCPU_REGS_RSI]),
1996 c->dst.bytes, ctxt->vcpu)) != 0)
1998 register_address_increment(c, &c->regs[VCPU_REGS_RSI],
1999 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
2001 register_address_increment(c, &c->regs[VCPU_REGS_RDI],
2002 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
2005 case 0xa6 ... 0xa7: /* cmps */
2006 c->src.type = OP_NONE; /* Disable writeback. */
2007 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2008 c->src.ptr = (unsigned long *)register_address(c,
2009 seg_override_base(ctxt, c),
2010 c->regs[VCPU_REGS_RSI]);
2011 if ((rc = ops->read_emulated((unsigned long)c->src.ptr,
2017 c->dst.type = OP_NONE; /* Disable writeback. */
2018 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2019 c->dst.ptr = (unsigned long *)register_address(c,
2021 c->regs[VCPU_REGS_RDI]);
2022 if ((rc = ops->read_emulated((unsigned long)c->dst.ptr,
2028 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr);
2030 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
2032 register_address_increment(c, &c->regs[VCPU_REGS_RSI],
2033 (ctxt->eflags & EFLG_DF) ? -c->src.bytes
2035 register_address_increment(c, &c->regs[VCPU_REGS_RDI],
2036 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
2040 case 0xaa ... 0xab: /* stos */
2041 c->dst.type = OP_MEM;
2042 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2043 c->dst.ptr = (unsigned long *)register_address(c,
2045 c->regs[VCPU_REGS_RDI]);
2046 c->dst.val = c->regs[VCPU_REGS_RAX];
2047 register_address_increment(c, &c->regs[VCPU_REGS_RDI],
2048 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
2051 case 0xac ... 0xad: /* lods */
2052 c->dst.type = OP_REG;
2053 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2054 c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
2055 if ((rc = ops->read_emulated(register_address(c,
2056 seg_override_base(ctxt, c),
2057 c->regs[VCPU_REGS_RSI]),
2062 register_address_increment(c, &c->regs[VCPU_REGS_RSI],
2063 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
2066 case 0xae ... 0xaf: /* scas */
2067 DPRINTF("Urk! I don't handle SCAS.\n");
2068 goto cannot_emulate;
2069 case 0xb0 ... 0xbf: /* mov r, imm */
2074 case 0xc3: /* ret */
2075 c->dst.type = OP_REG;
2076 c->dst.ptr = &c->eip;
2077 c->dst.bytes = c->op_bytes;
2078 goto pop_instruction;
2079 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
2081 c->dst.val = c->src.val;
2083 case 0xcb: /* ret far */
2084 rc = emulate_ret_far(ctxt, ops);
2088 case 0xd0 ... 0xd1: /* Grp2 */
2092 case 0xd2 ... 0xd3: /* Grp2 */
2093 c->src.val = c->regs[VCPU_REGS_RCX];
2096 case 0xe4: /* inb */
2101 case 0xe6: /* outb */
2102 case 0xe7: /* out */
2106 case 0xe8: /* call (near) */ {
2107 long int rel = c->src.val;
2108 c->src.val = (unsigned long) c->eip;
2113 case 0xe9: /* jmp rel */
2115 case 0xea: /* jmp far */
2116 if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val, 9,
2117 VCPU_SREG_CS) < 0) {
2118 DPRINTF("jmp far: Failed to load CS descriptor\n");
2119 goto cannot_emulate;
2122 c->eip = c->src.val;
2125 jmp: /* jmp rel short */
2126 jmp_rel(c, c->src.val);
2127 c->dst.type = OP_NONE; /* Disable writeback. */
2129 case 0xec: /* in al,dx */
2130 case 0xed: /* in (e/r)ax,dx */
2131 port = c->regs[VCPU_REGS_RDX];
2134 case 0xee: /* out al,dx */
2135 case 0xef: /* out (e/r)ax,dx */
2136 port = c->regs[VCPU_REGS_RDX];
2138 do_io: if (kvm_emulate_pio(ctxt->vcpu, io_dir_in,
2139 (c->d & ByteOp) ? 1 : c->op_bytes,
2142 goto cannot_emulate;
2145 case 0xf4: /* hlt */
2146 ctxt->vcpu->arch.halt_request = 1;
2148 case 0xf5: /* cmc */
2149 /* complement carry flag from eflags reg */
2150 ctxt->eflags ^= EFLG_CF;
2151 c->dst.type = OP_NONE; /* Disable writeback. */
2153 case 0xf6 ... 0xf7: /* Grp3 */
2154 rc = emulate_grp3(ctxt, ops);
2158 case 0xf8: /* clc */
2159 ctxt->eflags &= ~EFLG_CF;
2160 c->dst.type = OP_NONE; /* Disable writeback. */
2162 case 0xfa: /* cli */
2163 ctxt->eflags &= ~X86_EFLAGS_IF;
2164 c->dst.type = OP_NONE; /* Disable writeback. */
2166 case 0xfb: /* sti */
2167 toggle_interruptibility(ctxt, X86_SHADOW_INT_STI);
2168 ctxt->eflags |= X86_EFLAGS_IF;
2169 c->dst.type = OP_NONE; /* Disable writeback. */
2171 case 0xfc: /* cld */
2172 ctxt->eflags &= ~EFLG_DF;
2173 c->dst.type = OP_NONE; /* Disable writeback. */
2175 case 0xfd: /* std */
2176 ctxt->eflags |= EFLG_DF;
2177 c->dst.type = OP_NONE; /* Disable writeback. */
2179 case 0xfe ... 0xff: /* Grp4/Grp5 */
2180 rc = emulate_grp45(ctxt, ops);
2187 rc = writeback(ctxt, ops);
2191 /* Commit shadow register state. */
2192 memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
2193 kvm_rip_write(ctxt->vcpu, c->eip);
2196 if (rc == X86EMUL_UNHANDLEABLE) {
2204 case 0x01: /* lgdt, lidt, lmsw */
2205 switch (c->modrm_reg) {
2207 unsigned long address;
2209 case 0: /* vmcall */
2210 if (c->modrm_mod != 3 || c->modrm_rm != 1)
2211 goto cannot_emulate;
2213 rc = kvm_fix_hypercall(ctxt->vcpu);
2217 /* Let the processor re-execute the fixed hypercall */
2218 c->eip = kvm_rip_read(ctxt->vcpu);
2219 /* Disable writeback. */
2220 c->dst.type = OP_NONE;
2223 rc = read_descriptor(ctxt, ops, c->src.ptr,
2224 &size, &address, c->op_bytes);
2227 realmode_lgdt(ctxt->vcpu, size, address);
2228 /* Disable writeback. */
2229 c->dst.type = OP_NONE;
2231 case 3: /* lidt/vmmcall */
2232 if (c->modrm_mod == 3) {
2233 switch (c->modrm_rm) {
2235 rc = kvm_fix_hypercall(ctxt->vcpu);
2240 goto cannot_emulate;
2243 rc = read_descriptor(ctxt, ops, c->src.ptr,
2248 realmode_lidt(ctxt->vcpu, size, address);
2250 /* Disable writeback. */
2251 c->dst.type = OP_NONE;
2255 c->dst.val = realmode_get_cr(ctxt->vcpu, 0);
2258 realmode_lmsw(ctxt->vcpu, (u16)c->src.val,
2260 c->dst.type = OP_NONE;
2263 emulate_invlpg(ctxt->vcpu, memop);
2264 /* Disable writeback. */
2265 c->dst.type = OP_NONE;
2268 goto cannot_emulate;
2271 case 0x05: /* syscall */
2272 if (emulate_syscall(ctxt) == -1)
2273 goto cannot_emulate;
2278 emulate_clts(ctxt->vcpu);
2279 c->dst.type = OP_NONE;
2281 case 0x08: /* invd */
2282 case 0x09: /* wbinvd */
2283 case 0x0d: /* GrpP (prefetch) */
2284 case 0x18: /* Grp16 (prefetch/nop) */
2285 c->dst.type = OP_NONE;
2287 case 0x20: /* mov cr, reg */
2288 if (c->modrm_mod != 3)
2289 goto cannot_emulate;
2290 c->regs[c->modrm_rm] =
2291 realmode_get_cr(ctxt->vcpu, c->modrm_reg);
2292 c->dst.type = OP_NONE; /* no writeback */
2294 case 0x21: /* mov from dr to reg */
2295 if (c->modrm_mod != 3)
2296 goto cannot_emulate;
2297 rc = emulator_get_dr(ctxt, c->modrm_reg, &c->regs[c->modrm_rm]);
2299 goto cannot_emulate;
2300 c->dst.type = OP_NONE; /* no writeback */
2302 case 0x22: /* mov reg, cr */
2303 if (c->modrm_mod != 3)
2304 goto cannot_emulate;
2305 realmode_set_cr(ctxt->vcpu,
2306 c->modrm_reg, c->modrm_val, &ctxt->eflags);
2307 c->dst.type = OP_NONE;
2309 case 0x23: /* mov from reg to dr */
2310 if (c->modrm_mod != 3)
2311 goto cannot_emulate;
2312 rc = emulator_set_dr(ctxt, c->modrm_reg,
2313 c->regs[c->modrm_rm]);
2315 goto cannot_emulate;
2316 c->dst.type = OP_NONE; /* no writeback */
2320 msr_data = (u32)c->regs[VCPU_REGS_RAX]
2321 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
2322 rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data);
2324 kvm_inject_gp(ctxt->vcpu, 0);
2325 c->eip = kvm_rip_read(ctxt->vcpu);
2327 rc = X86EMUL_CONTINUE;
2328 c->dst.type = OP_NONE;
2332 rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data);
2334 kvm_inject_gp(ctxt->vcpu, 0);
2335 c->eip = kvm_rip_read(ctxt->vcpu);
2337 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
2338 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
2340 rc = X86EMUL_CONTINUE;
2341 c->dst.type = OP_NONE;
2343 case 0x34: /* sysenter */
2344 if (emulate_sysenter(ctxt) == -1)
2345 goto cannot_emulate;
2349 case 0x35: /* sysexit */
2350 if (emulate_sysexit(ctxt) == -1)
2351 goto cannot_emulate;
2355 case 0x40 ... 0x4f: /* cmov */
2356 c->dst.val = c->dst.orig_val = c->src.val;
2357 if (!test_cc(c->b, ctxt->eflags))
2358 c->dst.type = OP_NONE; /* no writeback */
2360 case 0x80 ... 0x8f: /* jnz rel, etc*/
2361 if (test_cc(c->b, ctxt->eflags))
2362 jmp_rel(c, c->src.val);
2363 c->dst.type = OP_NONE;
2365 case 0xa0: /* push fs */
2366 emulate_push_sreg(ctxt, VCPU_SREG_FS);
2368 case 0xa1: /* pop fs */
2369 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
2375 c->dst.type = OP_NONE;
2376 /* only subword offset */
2377 c->src.val &= (c->dst.bytes << 3) - 1;
2378 emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
2380 case 0xa4: /* shld imm8, r, r/m */
2381 case 0xa5: /* shld cl, r, r/m */
2382 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
2384 case 0xa8: /* push gs */
2385 emulate_push_sreg(ctxt, VCPU_SREG_GS);
2387 case 0xa9: /* pop gs */
2388 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
2394 /* only subword offset */
2395 c->src.val &= (c->dst.bytes << 3) - 1;
2396 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
2398 case 0xac: /* shrd imm8, r, r/m */
2399 case 0xad: /* shrd cl, r, r/m */
2400 emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
2402 case 0xae: /* clflush */
2404 case 0xb0 ... 0xb1: /* cmpxchg */
2406 * Save real source value, then compare EAX against
2409 c->src.orig_val = c->src.val;
2410 c->src.val = c->regs[VCPU_REGS_RAX];
2411 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
2412 if (ctxt->eflags & EFLG_ZF) {
2413 /* Success: write back to memory. */
2414 c->dst.val = c->src.orig_val;
2416 /* Failure: write the value we saw to EAX. */
2417 c->dst.type = OP_REG;
2418 c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
2423 /* only subword offset */
2424 c->src.val &= (c->dst.bytes << 3) - 1;
2425 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
2427 case 0xb6 ... 0xb7: /* movzx */
2428 c->dst.bytes = c->op_bytes;
2429 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
2432 case 0xba: /* Grp8 */
2433 switch (c->modrm_reg & 3) {
2446 /* only subword offset */
2447 c->src.val &= (c->dst.bytes << 3) - 1;
2448 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
2450 case 0xbe ... 0xbf: /* movsx */
2451 c->dst.bytes = c->op_bytes;
2452 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
2455 case 0xc3: /* movnti */
2456 c->dst.bytes = c->op_bytes;
2457 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
2460 case 0xc7: /* Grp9 (cmpxchg8b) */
2461 rc = emulate_grp9(ctxt, ops, memop);
2464 c->dst.type = OP_NONE;
2470 DPRINTF("Cannot emulate %02x\n", c->b);