File: | target-i386/translate.c |
Location: | line 1900, column 13 |
Description: | Value stored to 'havesib' is never read |
1 | /* |
2 | * i386 translation |
3 | * |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2 of the License, or (at your option) any later version. |
10 | * |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
18 | */ |
19 | #include <stdarg.h> |
20 | #include <stdlib.h> |
21 | #include <stdio.h> |
22 | #include <string.h> |
23 | #include <inttypes.h> |
24 | #include <signal.h> |
25 | |
26 | #include "qemu/host-utils.h" |
27 | #include "cpu.h" |
28 | #include "disas/disas.h" |
29 | #include "tcg-op.h" |
30 | |
31 | #include "helper.h" |
32 | #define GEN_HELPER 1 |
33 | #include "helper.h" |
34 | |
35 | #define PREFIX_REPZ0x01 0x01 |
36 | #define PREFIX_REPNZ0x02 0x02 |
37 | #define PREFIX_LOCK0x04 0x04 |
38 | #define PREFIX_DATA0x08 0x08 |
39 | #define PREFIX_ADR0x10 0x10 |
40 | #define PREFIX_VEX0x20 0x20 |
41 | |
42 | #ifdef TARGET_X86_64 |
43 | #define CODE64(s)0 ((s)->code64) |
44 | #define REX_X(s)0 ((s)->rex_x) |
45 | #define REX_B(s)0 ((s)->rex_b) |
46 | #else |
47 | #define CODE64(s)0 0 |
48 | #define REX_X(s)0 0 |
49 | #define REX_B(s)0 0 |
50 | #endif |
51 | |
52 | #ifdef TARGET_X86_64 |
53 | # define ctztlctz32 ctz64 |
54 | # define clztlclz32 clz64 |
55 | #else |
56 | # define ctztlctz32 ctz32 |
57 | # define clztlclz32 clz32 |
58 | #endif |
59 | |
60 | //#define MACRO_TEST 1 |
61 | |
62 | /* global register indexes */ |
63 | static TCGv_ptr cpu_env; |
64 | static TCGvTCGv_i32 cpu_A0; |
65 | static TCGvTCGv_i32 cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT; |
66 | static TCGv_i32 cpu_cc_op; |
67 | static TCGvTCGv_i32 cpu_regs[CPU_NB_REGS8]; |
68 | /* local temps */ |
69 | static TCGvTCGv_i32 cpu_T[2]; |
70 | /* local register indexes (only used inside old micro ops) */ |
71 | static TCGvTCGv_i32 cpu_tmp0, cpu_tmp4; |
72 | static TCGv_ptr cpu_ptr0, cpu_ptr1; |
73 | static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32; |
74 | static TCGv_i64 cpu_tmp1_i64; |
75 | |
76 | static uint8_t gen_opc_cc_op[OPC_BUF_SIZE640]; |
77 | |
78 | #include "exec/gen-icount.h" |
79 | |
80 | #ifdef TARGET_X86_64 |
81 | static int x86_64_hregs; |
82 | #endif |
83 | |
84 | typedef struct DisasContext { |
85 | /* current insn context */ |
86 | int override; /* -1 if no override */ |
87 | int prefix; |
88 | TCGMemOp aflag; |
89 | TCGMemOp dflag; |
90 | target_ulong pc; /* pc = eip + cs_base */ |
91 | int is_jmp; /* 1 = means jump (stop translation), 2 means CPU |
92 | static state change (stop translation) */ |
93 | /* current block context */ |
94 | target_ulong cs_base; /* base of CS segment */ |
95 | int pe; /* protected mode */ |
96 | int code32; /* 32 bit code segment */ |
97 | #ifdef TARGET_X86_64 |
98 | int lma; /* long mode active */ |
99 | int code64; /* 64 bit code segment */ |
100 | int rex_x, rex_b; |
101 | #endif |
102 | int vex_l; /* vex vector length */ |
103 | int vex_v; /* vex vvvv register, without 1's compliment. */ |
104 | int ss32; /* 32 bit stack segment */ |
105 | CCOp cc_op; /* current CC operation */ |
106 | bool_Bool cc_op_dirty; |
107 | int addseg; /* non zero if either DS/ES/SS have a non zero base */ |
108 | int f_st; /* currently unused */ |
109 | int vm86; /* vm86 mode */ |
110 | int cpl; |
111 | int iopl; |
112 | int tf; /* TF cpu flag */ |
113 | int singlestep_enabled; /* "hardware" single step enabled */ |
114 | int jmp_opt; /* use direct block chaining for direct jumps */ |
115 | int mem_index; /* select memory access functions */ |
116 | uint64_t flags; /* all execution flags */ |
117 | struct TranslationBlock *tb; |
118 | int popl_esp_hack; /* for correct popl with esp base handling */ |
119 | int rip_offset; /* only used in x86_64, but left for simplicity */ |
120 | int cpuid_features; |
121 | int cpuid_ext_features; |
122 | int cpuid_ext2_features; |
123 | int cpuid_ext3_features; |
124 | int cpuid_7_0_ebx_features; |
125 | } DisasContext; |
126 | |
127 | static void gen_eob(DisasContext *s); |
128 | static void gen_jmp(DisasContext *s, target_ulong eip); |
129 | static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num); |
130 | static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d); |
131 | |
132 | /* i386 arith/logic operations */ |
133 | enum { |
134 | OP_ADDL, |
135 | OP_ORL, |
136 | OP_ADCL, |
137 | OP_SBBL, |
138 | OP_ANDL, |
139 | OP_SUBL, |
140 | OP_XORL, |
141 | OP_CMPL, |
142 | }; |
143 | |
144 | /* i386 shift ops */ |
145 | enum { |
146 | OP_ROL, |
147 | OP_ROR, |
148 | OP_RCL, |
149 | OP_RCR, |
150 | OP_SHL, |
151 | OP_SHR, |
152 | OP_SHL1, /* undocumented */ |
153 | OP_SAR = 7, |
154 | }; |
155 | |
156 | enum { |
157 | JCC_O, |
158 | JCC_B, |
159 | JCC_Z, |
160 | JCC_BE, |
161 | JCC_S, |
162 | JCC_P, |
163 | JCC_L, |
164 | JCC_LE, |
165 | }; |
166 | |
167 | enum { |
168 | /* I386 int registers */ |
169 | OR_EAX, /* MUST be even numbered */ |
170 | OR_ECX, |
171 | OR_EDX, |
172 | OR_EBX, |
173 | OR_ESP, |
174 | OR_EBP, |
175 | OR_ESI, |
176 | OR_EDI, |
177 | |
178 | OR_TMP0 = 16, /* temporary operand register */ |
179 | OR_TMP1, |
180 | OR_A0, /* temporary register used when doing address evaluation */ |
181 | }; |
182 | |
183 | enum { |
184 | USES_CC_DST = 1, |
185 | USES_CC_SRC = 2, |
186 | USES_CC_SRC2 = 4, |
187 | USES_CC_SRCT = 8, |
188 | }; |
189 | |
190 | /* Bit set if the global variable is live after setting CC_OP to X. */ |
191 | static const uint8_t cc_op_live[CC_OP_NB] = { |
192 | [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, |
193 | [CC_OP_EFLAGS] = USES_CC_SRC, |
194 | [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC, |
195 | [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC, |
196 | [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, |
197 | [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, |
198 | [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, |
199 | [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST, |
200 | [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC, |
201 | [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC, |
202 | [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC, |
203 | [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC, |
204 | [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC, |
205 | [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC, |
206 | [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2, |
207 | [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, |
208 | [CC_OP_CLR] = 0, |
209 | }; |
210 | |
211 | static void set_cc_op(DisasContext *s, CCOp op) |
212 | { |
213 | int dead; |
214 | |
215 | if (s->cc_op == op) { |
216 | return; |
217 | } |
218 | |
219 | /* Discard CC computation that will no longer be used. */ |
220 | dead = cc_op_live[s->cc_op] & ~cc_op_live[op]; |
221 | if (dead & USES_CC_DST) { |
222 | tcg_gen_discard_tltcg_gen_discard_i32(cpu_cc_dst); |
223 | } |
224 | if (dead & USES_CC_SRC) { |
225 | tcg_gen_discard_tltcg_gen_discard_i32(cpu_cc_src); |
226 | } |
227 | if (dead & USES_CC_SRC2) { |
228 | tcg_gen_discard_tltcg_gen_discard_i32(cpu_cc_src2); |
229 | } |
230 | if (dead & USES_CC_SRCT) { |
231 | tcg_gen_discard_tltcg_gen_discard_i32(cpu_cc_srcT); |
232 | } |
233 | |
234 | if (op == CC_OP_DYNAMIC) { |
235 | /* The DYNAMIC setting is translator only, and should never be |
236 | stored. Thus we always consider it clean. */ |
237 | s->cc_op_dirty = false0; |
238 | } else { |
239 | /* Discard any computed CC_OP value (see shifts). */ |
240 | if (s->cc_op == CC_OP_DYNAMIC) { |
241 | tcg_gen_discard_i32(cpu_cc_op); |
242 | } |
243 | s->cc_op_dirty = true1; |
244 | } |
245 | s->cc_op = op; |
246 | } |
247 | |
248 | static void gen_update_cc_op(DisasContext *s) |
249 | { |
250 | if (s->cc_op_dirty) { |
251 | tcg_gen_movi_i32(cpu_cc_op, s->cc_op); |
252 | s->cc_op_dirty = false0; |
253 | } |
254 | } |
255 | |
256 | #ifdef TARGET_X86_64 |
257 | |
258 | #define NB_OP_SIZES3 4 |
259 | |
260 | #else /* !TARGET_X86_64 */ |
261 | |
262 | #define NB_OP_SIZES3 3 |
263 | |
264 | #endif /* !TARGET_X86_64 */ |
265 | |
266 | #if defined(HOST_WORDS_BIGENDIAN) |
267 | #define REG_B_OFFSET0 (sizeof(target_ulong) - 1) |
268 | #define REG_H_OFFSET1 (sizeof(target_ulong) - 2) |
269 | #define REG_W_OFFSET0 (sizeof(target_ulong) - 2) |
270 | #define REG_L_OFFSET0 (sizeof(target_ulong) - 4) |
271 | #define REG_LH_OFFSET4 (sizeof(target_ulong) - 8) |
272 | #else |
273 | #define REG_B_OFFSET0 0 |
274 | #define REG_H_OFFSET1 1 |
275 | #define REG_W_OFFSET0 0 |
276 | #define REG_L_OFFSET0 0 |
277 | #define REG_LH_OFFSET4 4 |
278 | #endif |
279 | |
280 | /* In instruction encodings for byte register accesses the |
281 | * register number usually indicates "low 8 bits of register N"; |
282 | * however there are some special cases where N 4..7 indicates |
283 | * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return |
284 | * true for this special case, false otherwise. |
285 | */ |
286 | static inline bool_Bool byte_reg_is_xH(int reg) |
287 | { |
288 | if (reg < 4) { |
289 | return false0; |
290 | } |
291 | #ifdef TARGET_X86_64 |
292 | if (reg >= 8 || x86_64_hregs) { |
293 | return false0; |
294 | } |
295 | #endif |
296 | return true1; |
297 | } |
298 | |
299 | /* Select the size of a push/pop operation. */ |
300 | static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot) |
301 | { |
302 | if (CODE64(s)0) { |
303 | return ot == MO_16 ? MO_16 : MO_64; |
304 | } else { |
305 | return ot; |
306 | } |
307 | } |
308 | |
309 | /* Select only size 64 else 32. Used for SSE operand sizes. */ |
310 | static inline TCGMemOp mo_64_32(TCGMemOp ot) |
311 | { |
312 | #ifdef TARGET_X86_64 |
313 | return ot == MO_64 ? MO_64 : MO_32; |
314 | #else |
315 | return MO_32; |
316 | #endif |
317 | } |
318 | |
319 | /* Select size 8 if lsb of B is clear, else OT. Used for decoding |
320 | byte vs word opcodes. */ |
321 | static inline TCGMemOp mo_b_d(int b, TCGMemOp ot) |
322 | { |
323 | return b & 1 ? ot : MO_8; |
324 | } |
325 | |
326 | /* Select size 8 if lsb of B is clear, else OT capped at 32. |
327 | Used for decoding operand size of port opcodes. */ |
328 | static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot) |
329 | { |
330 | return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8; |
331 | } |
332 | |
333 | static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGvTCGv_i32 t0) |
334 | { |
335 | switch(ot) { |
336 | case MO_8: |
337 | if (!byte_reg_is_xH(reg)) { |
338 | tcg_gen_deposit_tltcg_gen_deposit_i32(cpu_regs[reg], cpu_regs[reg], t0, 0, 8); |
339 | } else { |
340 | tcg_gen_deposit_tltcg_gen_deposit_i32(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8); |
341 | } |
342 | break; |
343 | case MO_16: |
344 | tcg_gen_deposit_tltcg_gen_deposit_i32(cpu_regs[reg], cpu_regs[reg], t0, 0, 16); |
345 | break; |
346 | case MO_32: |
347 | /* For x86_64, this sets the higher half of register to zero. |
348 | For i386, this is equivalent to a mov. */ |
349 | tcg_gen_ext32u_tltcg_gen_mov_i32(cpu_regs[reg], t0); |
350 | break; |
351 | #ifdef TARGET_X86_64 |
352 | case MO_64: |
353 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_regs[reg], t0); |
354 | break; |
355 | #endif |
356 | default: |
357 | tcg_abort()do { fprintf(stderr, "%s:%d: tcg fatal error\n", "/home/stefan/src/qemu/qemu.org/qemu/target-i386/translate.c" , 357); abort();} while (0); |
358 | } |
359 | } |
360 | |
361 | static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGvTCGv_i32 t0, int reg) |
362 | { |
363 | if (ot == MO_8 && byte_reg_is_xH(reg)) { |
364 | tcg_gen_shri_tltcg_gen_shri_i32(t0, cpu_regs[reg - 4], 8); |
365 | tcg_gen_ext8u_tltcg_gen_ext8u_i32(t0, t0); |
366 | } else { |
367 | tcg_gen_mov_tltcg_gen_mov_i32(t0, cpu_regs[reg]); |
368 | } |
369 | } |
370 | |
371 | static inline void gen_op_movl_A0_reg(int reg) |
372 | { |
373 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_A0, cpu_regs[reg]); |
374 | } |
375 | |
376 | static inline void gen_op_addl_A0_im(int32_t val) |
377 | { |
378 | tcg_gen_addi_tltcg_gen_addi_i32(cpu_A0, cpu_A0, val); |
379 | #ifdef TARGET_X86_64 |
380 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_A0, cpu_A0, 0xffffffff); |
381 | #endif |
382 | } |
383 | |
384 | #ifdef TARGET_X86_64 |
385 | static inline void gen_op_addq_A0_im(int64_t val) |
386 | { |
387 | tcg_gen_addi_tltcg_gen_addi_i32(cpu_A0, cpu_A0, val); |
388 | } |
389 | #endif |
390 | |
391 | static void gen_add_A0_im(DisasContext *s, int val) |
392 | { |
393 | #ifdef TARGET_X86_64 |
394 | if (CODE64(s)0) |
395 | gen_op_addq_A0_im(val); |
396 | else |
397 | #endif |
398 | gen_op_addl_A0_im(val); |
399 | } |
400 | |
401 | static inline void gen_op_jmp_v(TCGvTCGv_i32 dest) |
402 | { |
403 | tcg_gen_st_tltcg_gen_st_i32(dest, cpu_env, offsetof(CPUX86State, eip)__builtin_offsetof(CPUX86State, eip)); |
404 | } |
405 | |
406 | static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val) |
407 | { |
408 | tcg_gen_addi_tltcg_gen_addi_i32(cpu_tmp0, cpu_regs[reg], val); |
409 | gen_op_mov_reg_v(size, reg, cpu_tmp0); |
410 | } |
411 | |
412 | static inline void gen_op_add_reg_T0(TCGMemOp size, int reg) |
413 | { |
414 | tcg_gen_add_tltcg_gen_add_i32(cpu_tmp0, cpu_regs[reg], cpu_T[0]); |
415 | gen_op_mov_reg_v(size, reg, cpu_tmp0); |
416 | } |
417 | |
418 | static inline void gen_op_addl_A0_reg_sN(int shift, int reg) |
419 | { |
420 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_tmp0, cpu_regs[reg]); |
421 | if (shift != 0) |
422 | tcg_gen_shli_tltcg_gen_shli_i32(cpu_tmp0, cpu_tmp0, shift); |
423 | tcg_gen_add_tltcg_gen_add_i32(cpu_A0, cpu_A0, cpu_tmp0); |
424 | /* For x86_64, this sets the higher half of register to zero. |
425 | For i386, this is equivalent to a nop. */ |
426 | tcg_gen_ext32u_tltcg_gen_mov_i32(cpu_A0, cpu_A0); |
427 | } |
428 | |
429 | static inline void gen_op_movl_A0_seg(int reg) |
430 | { |
431 | tcg_gen_ld32u_tltcg_gen_ld_i32(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base)__builtin_offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET0); |
432 | } |
433 | |
434 | static inline void gen_op_addl_A0_seg(DisasContext *s, int reg) |
435 | { |
436 | tcg_gen_ld_tltcg_gen_ld_i32(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base)__builtin_offsetof(CPUX86State, segs[reg].base)); |
437 | #ifdef TARGET_X86_64 |
438 | if (CODE64(s)0) { |
439 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_A0, cpu_A0, 0xffffffff); |
440 | tcg_gen_add_tltcg_gen_add_i32(cpu_A0, cpu_A0, cpu_tmp0); |
441 | } else { |
442 | tcg_gen_add_tltcg_gen_add_i32(cpu_A0, cpu_A0, cpu_tmp0); |
443 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_A0, cpu_A0, 0xffffffff); |
444 | } |
445 | #else |
446 | tcg_gen_add_tltcg_gen_add_i32(cpu_A0, cpu_A0, cpu_tmp0); |
447 | #endif |
448 | } |
449 | |
450 | #ifdef TARGET_X86_64 |
451 | static inline void gen_op_movq_A0_seg(int reg) |
452 | { |
453 | tcg_gen_ld_tltcg_gen_ld_i32(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base)__builtin_offsetof(CPUX86State, segs[reg].base)); |
454 | } |
455 | |
456 | static inline void gen_op_addq_A0_seg(int reg) |
457 | { |
458 | tcg_gen_ld_tltcg_gen_ld_i32(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base)__builtin_offsetof(CPUX86State, segs[reg].base)); |
459 | tcg_gen_add_tltcg_gen_add_i32(cpu_A0, cpu_A0, cpu_tmp0); |
460 | } |
461 | |
462 | static inline void gen_op_movq_A0_reg(int reg) |
463 | { |
464 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_A0, cpu_regs[reg]); |
465 | } |
466 | |
467 | static inline void gen_op_addq_A0_reg_sN(int shift, int reg) |
468 | { |
469 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_tmp0, cpu_regs[reg]); |
470 | if (shift != 0) |
471 | tcg_gen_shli_tltcg_gen_shli_i32(cpu_tmp0, cpu_tmp0, shift); |
472 | tcg_gen_add_tltcg_gen_add_i32(cpu_A0, cpu_A0, cpu_tmp0); |
473 | } |
474 | #endif |
475 | |
476 | static inline void gen_op_ld_v(DisasContext *s, int idx, TCGvTCGv_i32 t0, TCGvTCGv_i32 a0) |
477 | { |
478 | tcg_gen_qemu_ld_tltcg_gen_qemu_ld_i32(t0, a0, s->mem_index, idx | MO_LE); |
479 | } |
480 | |
481 | static inline void gen_op_st_v(DisasContext *s, int idx, TCGvTCGv_i32 t0, TCGvTCGv_i32 a0) |
482 | { |
483 | tcg_gen_qemu_st_tltcg_gen_qemu_st_i32(t0, a0, s->mem_index, idx | MO_LE); |
484 | } |
485 | |
486 | static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d) |
487 | { |
488 | if (d == OR_TMP0) { |
489 | gen_op_st_v(s, idx, cpu_T[0], cpu_A0); |
490 | } else { |
491 | gen_op_mov_reg_v(idx, d, cpu_T[0]); |
492 | } |
493 | } |
494 | |
495 | static inline void gen_jmp_im(target_ulong pc) |
496 | { |
497 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_tmp0, pc); |
498 | gen_op_jmp_v(cpu_tmp0); |
499 | } |
500 | |
501 | static inline void gen_string_movl_A0_ESI(DisasContext *s) |
502 | { |
503 | int override; |
504 | |
505 | override = s->override; |
506 | switch (s->aflag) { |
507 | #ifdef TARGET_X86_64 |
508 | case MO_64: |
509 | if (override >= 0) { |
510 | gen_op_movq_A0_seg(override); |
511 | gen_op_addq_A0_reg_sN(0, R_ESI6); |
512 | } else { |
513 | gen_op_movq_A0_reg(R_ESI6); |
514 | } |
515 | break; |
516 | #endif |
517 | case MO_32: |
518 | /* 32 bit address */ |
519 | if (s->addseg && override < 0) |
520 | override = R_DS3; |
521 | if (override >= 0) { |
522 | gen_op_movl_A0_seg(override); |
523 | gen_op_addl_A0_reg_sN(0, R_ESI6); |
524 | } else { |
525 | gen_op_movl_A0_reg(R_ESI6); |
526 | } |
527 | break; |
528 | case MO_16: |
529 | /* 16 address, always override */ |
530 | if (override < 0) |
531 | override = R_DS3; |
532 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_A0, cpu_regs[R_ESI6]); |
533 | gen_op_addl_A0_seg(s, override); |
534 | break; |
535 | default: |
536 | tcg_abort()do { fprintf(stderr, "%s:%d: tcg fatal error\n", "/home/stefan/src/qemu/qemu.org/qemu/target-i386/translate.c" , 536); abort();} while (0); |
537 | } |
538 | } |
539 | |
540 | static inline void gen_string_movl_A0_EDI(DisasContext *s) |
541 | { |
542 | switch (s->aflag) { |
543 | #ifdef TARGET_X86_64 |
544 | case MO_64: |
545 | gen_op_movq_A0_reg(R_EDI7); |
546 | break; |
547 | #endif |
548 | case MO_32: |
549 | if (s->addseg) { |
550 | gen_op_movl_A0_seg(R_ES0); |
551 | gen_op_addl_A0_reg_sN(0, R_EDI7); |
552 | } else { |
553 | gen_op_movl_A0_reg(R_EDI7); |
554 | } |
555 | break; |
556 | case MO_16: |
557 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_A0, cpu_regs[R_EDI7]); |
558 | gen_op_addl_A0_seg(s, R_ES0); |
559 | break; |
560 | default: |
561 | tcg_abort()do { fprintf(stderr, "%s:%d: tcg fatal error\n", "/home/stefan/src/qemu/qemu.org/qemu/target-i386/translate.c" , 561); abort();} while (0); |
562 | } |
563 | } |
564 | |
565 | static inline void gen_op_movl_T0_Dshift(TCGMemOp ot) |
566 | { |
567 | tcg_gen_ld32s_tltcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUX86State, df)__builtin_offsetof(CPUX86State, df)); |
568 | tcg_gen_shli_tltcg_gen_shli_i32(cpu_T[0], cpu_T[0], ot); |
569 | }; |
570 | |
571 | static TCGvTCGv_i32 gen_ext_tl(TCGvTCGv_i32 dst, TCGvTCGv_i32 src, TCGMemOp size, bool_Bool sign) |
572 | { |
573 | switch (size) { |
574 | case MO_8: |
575 | if (sign) { |
576 | tcg_gen_ext8s_tltcg_gen_ext8s_i32(dst, src); |
577 | } else { |
578 | tcg_gen_ext8u_tltcg_gen_ext8u_i32(dst, src); |
579 | } |
580 | return dst; |
581 | case MO_16: |
582 | if (sign) { |
583 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(dst, src); |
584 | } else { |
585 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(dst, src); |
586 | } |
587 | return dst; |
588 | #ifdef TARGET_X86_64 |
589 | case MO_32: |
590 | if (sign) { |
591 | tcg_gen_ext32s_tltcg_gen_mov_i32(dst, src); |
592 | } else { |
593 | tcg_gen_ext32u_tltcg_gen_mov_i32(dst, src); |
594 | } |
595 | return dst; |
596 | #endif |
597 | default: |
598 | return src; |
599 | } |
600 | } |
601 | |
602 | static void gen_extu(TCGMemOp ot, TCGvTCGv_i32 reg) |
603 | { |
604 | gen_ext_tl(reg, reg, ot, false0); |
605 | } |
606 | |
607 | static void gen_exts(TCGMemOp ot, TCGvTCGv_i32 reg) |
608 | { |
609 | gen_ext_tl(reg, reg, ot, true1); |
610 | } |
611 | |
612 | static inline void gen_op_jnz_ecx(TCGMemOp size, int label1) |
613 | { |
614 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_tmp0, cpu_regs[R_ECX1]); |
615 | gen_extu(size, cpu_tmp0); |
616 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_NE, cpu_tmp0, 0, label1); |
617 | } |
618 | |
619 | static inline void gen_op_jz_ecx(TCGMemOp size, int label1) |
620 | { |
621 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_tmp0, cpu_regs[R_ECX1]); |
622 | gen_extu(size, cpu_tmp0); |
623 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_EQ, cpu_tmp0, 0, label1); |
624 | } |
625 | |
626 | static void gen_helper_in_func(TCGMemOp ot, TCGvTCGv_i32 v, TCGv_i32 n) |
627 | { |
628 | switch (ot) { |
629 | case MO_8: |
630 | gen_helper_inb(v, n); |
631 | break; |
632 | case MO_16: |
633 | gen_helper_inw(v, n); |
634 | break; |
635 | case MO_32: |
636 | gen_helper_inl(v, n); |
637 | break; |
638 | default: |
639 | tcg_abort()do { fprintf(stderr, "%s:%d: tcg fatal error\n", "/home/stefan/src/qemu/qemu.org/qemu/target-i386/translate.c" , 639); abort();} while (0); |
640 | } |
641 | } |
642 | |
643 | static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n) |
644 | { |
645 | switch (ot) { |
646 | case MO_8: |
647 | gen_helper_outb(v, n); |
648 | break; |
649 | case MO_16: |
650 | gen_helper_outw(v, n); |
651 | break; |
652 | case MO_32: |
653 | gen_helper_outl(v, n); |
654 | break; |
655 | default: |
656 | tcg_abort()do { fprintf(stderr, "%s:%d: tcg fatal error\n", "/home/stefan/src/qemu/qemu.org/qemu/target-i386/translate.c" , 656); abort();} while (0); |
657 | } |
658 | } |
659 | |
660 | static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, |
661 | uint32_t svm_flags) |
662 | { |
663 | int state_saved; |
664 | target_ulong next_eip; |
665 | |
666 | state_saved = 0; |
667 | if (s->pe && (s->cpl > s->iopl || s->vm86)) { |
668 | gen_update_cc_op(s); |
669 | gen_jmp_im(cur_eip); |
670 | state_saved = 1; |
671 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
672 | switch (ot) { |
673 | case MO_8: |
674 | gen_helper_check_iob(cpu_env, cpu_tmp2_i32); |
675 | break; |
676 | case MO_16: |
677 | gen_helper_check_iow(cpu_env, cpu_tmp2_i32); |
678 | break; |
679 | case MO_32: |
680 | gen_helper_check_iol(cpu_env, cpu_tmp2_i32); |
681 | break; |
682 | default: |
683 | tcg_abort()do { fprintf(stderr, "%s:%d: tcg fatal error\n", "/home/stefan/src/qemu/qemu.org/qemu/target-i386/translate.c" , 683); abort();} while (0); |
684 | } |
685 | } |
686 | if(s->flags & HF_SVMI_MASK(1 << 21)) { |
687 | if (!state_saved) { |
688 | gen_update_cc_op(s); |
689 | gen_jmp_im(cur_eip); |
690 | } |
691 | svm_flags |= (1 << (4 + ot)); |
692 | next_eip = s->pc - s->cs_base; |
693 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
694 | gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32, |
695 | tcg_const_i32(svm_flags), |
696 | tcg_const_i32(next_eip - cur_eip)); |
697 | } |
698 | } |
699 | |
700 | static inline void gen_movs(DisasContext *s, TCGMemOp ot) |
701 | { |
702 | gen_string_movl_A0_ESI(s); |
703 | gen_op_ld_v(s, ot, cpu_T[0], cpu_A0); |
704 | gen_string_movl_A0_EDI(s); |
705 | gen_op_st_v(s, ot, cpu_T[0], cpu_A0); |
706 | gen_op_movl_T0_Dshift(ot); |
707 | gen_op_add_reg_T0(s->aflag, R_ESI6); |
708 | gen_op_add_reg_T0(s->aflag, R_EDI7); |
709 | } |
710 | |
711 | static void gen_op_update1_cc(void) |
712 | { |
713 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_T[0]); |
714 | } |
715 | |
716 | static void gen_op_update2_cc(void) |
717 | { |
718 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_src, cpu_T[1]); |
719 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_T[0]); |
720 | } |
721 | |
722 | static void gen_op_update3_cc(TCGvTCGv_i32 reg) |
723 | { |
724 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_src2, reg); |
725 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_src, cpu_T[1]); |
726 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_T[0]); |
727 | } |
728 | |
729 | static inline void gen_op_testl_T0_T1_cc(void) |
730 | { |
731 | tcg_gen_and_tltcg_gen_and_i32(cpu_cc_dst, cpu_T[0], cpu_T[1]); |
732 | } |
733 | |
734 | static void gen_op_update_neg_cc(void) |
735 | { |
736 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_T[0]); |
737 | tcg_gen_neg_tltcg_gen_neg_i32(cpu_cc_src, cpu_T[0]); |
738 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_cc_srcT, 0); |
739 | } |
740 | |
741 | /* compute all eflags to cc_src */ |
742 | static void gen_compute_eflags(DisasContext *s) |
743 | { |
744 | TCGvTCGv_i32 zero, dst, src1, src2; |
745 | int live, dead; |
746 | |
747 | if (s->cc_op == CC_OP_EFLAGS) { |
748 | return; |
749 | } |
750 | if (s->cc_op == CC_OP_CLR) { |
751 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_cc_src, CC_Z0x0040); |
752 | set_cc_op(s, CC_OP_EFLAGS); |
753 | return; |
754 | } |
755 | |
756 | TCGV_UNUSED(zero)zero = __extension__ ({ TCGv_i32 make_tcgv_tmp = {-1}; make_tcgv_tmp ;}); |
757 | dst = cpu_cc_dst; |
758 | src1 = cpu_cc_src; |
759 | src2 = cpu_cc_src2; |
760 | |
761 | /* Take care to not read values that are not live. */ |
762 | live = cc_op_live[s->cc_op] & ~USES_CC_SRCT; |
763 | dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2); |
764 | if (dead) { |
765 | zero = tcg_const_tltcg_const_i32(0); |
766 | if (dead & USES_CC_DST) { |
767 | dst = zero; |
768 | } |
769 | if (dead & USES_CC_SRC) { |
770 | src1 = zero; |
771 | } |
772 | if (dead & USES_CC_SRC2) { |
773 | src2 = zero; |
774 | } |
775 | } |
776 | |
777 | gen_update_cc_op(s); |
778 | gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op); |
779 | set_cc_op(s, CC_OP_EFLAGS); |
780 | |
781 | if (dead) { |
782 | tcg_temp_freetcg_temp_free_i32(zero); |
783 | } |
784 | } |
785 | |
786 | typedef struct CCPrepare { |
787 | TCGCond cond; |
788 | TCGvTCGv_i32 reg; |
789 | TCGvTCGv_i32 reg2; |
790 | target_ulong imm; |
791 | target_ulong mask; |
792 | bool_Bool use_reg2; |
793 | bool_Bool no_setcond; |
794 | } CCPrepare; |
795 | |
796 | /* compute eflags.C to reg */ |
797 | static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGvTCGv_i32 reg) |
798 | { |
799 | TCGvTCGv_i32 t0, t1; |
800 | int size, shift; |
801 | |
802 | switch (s->cc_op) { |
803 | case CC_OP_SUBB ... CC_OP_SUBQ: |
804 | /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */ |
805 | size = s->cc_op - CC_OP_SUBB; |
806 | t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false0); |
807 | /* If no temporary was used, be careful not to alias t1 and t0. */ |
808 | t0 = TCGV_EQUAL(t1, cpu_cc_src)(((t1).i32) == ((cpu_cc_src).i32)) ? cpu_tmp0 : reg; |
809 | tcg_gen_mov_tltcg_gen_mov_i32(t0, cpu_cc_srcT); |
810 | gen_extu(size, t0); |
811 | goto add_sub; |
812 | |
813 | case CC_OP_ADDB ... CC_OP_ADDQ: |
814 | /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */ |
815 | size = s->cc_op - CC_OP_ADDB; |
816 | t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false0); |
817 | t0 = gen_ext_tl(reg, cpu_cc_dst, size, false0); |
818 | add_sub: |
819 | return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0, |
820 | .reg2 = t1, .mask = -1, .use_reg2 = true1 }; |
821 | |
822 | case CC_OP_LOGICB ... CC_OP_LOGICQ: |
823 | case CC_OP_CLR: |
824 | return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; |
825 | |
826 | case CC_OP_INCB ... CC_OP_INCQ: |
827 | case CC_OP_DECB ... CC_OP_DECQ: |
828 | return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, |
829 | .mask = -1, .no_setcond = true1 }; |
830 | |
831 | case CC_OP_SHLB ... CC_OP_SHLQ: |
832 | /* (CC_SRC >> (DATA_BITS - 1)) & 1 */ |
833 | size = s->cc_op - CC_OP_SHLB; |
834 | shift = (8 << size) - 1; |
835 | return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, |
836 | .mask = (target_ulong)1 << shift }; |
837 | |
838 | case CC_OP_MULB ... CC_OP_MULQ: |
839 | return (CCPrepare) { .cond = TCG_COND_NE, |
840 | .reg = cpu_cc_src, .mask = -1 }; |
841 | |
842 | case CC_OP_BMILGB ... CC_OP_BMILGQ: |
843 | size = s->cc_op - CC_OP_BMILGB; |
844 | t0 = gen_ext_tl(reg, cpu_cc_src, size, false0); |
845 | return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 }; |
846 | |
847 | case CC_OP_ADCX: |
848 | case CC_OP_ADCOX: |
849 | return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst, |
850 | .mask = -1, .no_setcond = true1 }; |
851 | |
852 | case CC_OP_EFLAGS: |
853 | case CC_OP_SARB ... CC_OP_SARQ: |
854 | /* CC_SRC & 1 */ |
855 | return (CCPrepare) { .cond = TCG_COND_NE, |
856 | .reg = cpu_cc_src, .mask = CC_C0x0001 }; |
857 | |
858 | default: |
859 | /* The need to compute only C from CC_OP_DYNAMIC is important |
860 | in efficiently implementing e.g. INC at the start of a TB. */ |
861 | gen_update_cc_op(s); |
862 | gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src, |
863 | cpu_cc_src2, cpu_cc_op); |
864 | return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, |
865 | .mask = -1, .no_setcond = true1 }; |
866 | } |
867 | } |
868 | |
869 | /* compute eflags.P to reg */ |
870 | static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGvTCGv_i32 reg) |
871 | { |
872 | gen_compute_eflags(s); |
873 | return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, |
874 | .mask = CC_P0x0004 }; |
875 | } |
876 | |
877 | /* compute eflags.S to reg */ |
878 | static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGvTCGv_i32 reg) |
879 | { |
880 | switch (s->cc_op) { |
881 | case CC_OP_DYNAMIC: |
882 | gen_compute_eflags(s); |
883 | /* FALLTHRU */ |
884 | case CC_OP_EFLAGS: |
885 | case CC_OP_ADCX: |
886 | case CC_OP_ADOX: |
887 | case CC_OP_ADCOX: |
888 | return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, |
889 | .mask = CC_S0x0080 }; |
890 | case CC_OP_CLR: |
891 | return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; |
892 | default: |
893 | { |
894 | TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3; |
895 | TCGvTCGv_i32 t0 = gen_ext_tl(reg, cpu_cc_dst, size, true1); |
896 | return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 }; |
897 | } |
898 | } |
899 | } |
900 | |
901 | /* compute eflags.O to reg */ |
902 | static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGvTCGv_i32 reg) |
903 | { |
904 | switch (s->cc_op) { |
905 | case CC_OP_ADOX: |
906 | case CC_OP_ADCOX: |
907 | return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2, |
908 | .mask = -1, .no_setcond = true1 }; |
909 | case CC_OP_CLR: |
910 | return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; |
911 | default: |
912 | gen_compute_eflags(s); |
913 | return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, |
914 | .mask = CC_O0x0800 }; |
915 | } |
916 | } |
917 | |
918 | /* compute eflags.Z to reg */ |
919 | static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGvTCGv_i32 reg) |
920 | { |
921 | switch (s->cc_op) { |
922 | case CC_OP_DYNAMIC: |
923 | gen_compute_eflags(s); |
924 | /* FALLTHRU */ |
925 | case CC_OP_EFLAGS: |
926 | case CC_OP_ADCX: |
927 | case CC_OP_ADOX: |
928 | case CC_OP_ADCOX: |
929 | return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, |
930 | .mask = CC_Z0x0040 }; |
931 | case CC_OP_CLR: |
932 | return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 }; |
933 | default: |
934 | { |
935 | TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3; |
936 | TCGvTCGv_i32 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false0); |
937 | return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 }; |
938 | } |
939 | } |
940 | } |
941 | |
942 | /* perform a conditional store into register 'reg' according to jump opcode |
943 | value 'b'. In the fast case, T0 is guaranted not to be used. */ |
944 | static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGvTCGv_i32 reg) |
945 | { |
946 | int inv, jcc_op, cond; |
947 | TCGMemOp size; |
948 | CCPrepare cc; |
949 | TCGvTCGv_i32 t0; |
950 | |
951 | inv = b & 1; |
952 | jcc_op = (b >> 1) & 7; |
953 | |
954 | switch (s->cc_op) { |
955 | case CC_OP_SUBB ... CC_OP_SUBQ: |
956 | /* We optimize relational operators for the cmp/jcc case. */ |
957 | size = s->cc_op - CC_OP_SUBB; |
958 | switch (jcc_op) { |
959 | case JCC_BE: |
960 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_tmp4, cpu_cc_srcT); |
961 | gen_extu(size, cpu_tmp4); |
962 | t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false0); |
963 | cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4, |
964 | .reg2 = t0, .mask = -1, .use_reg2 = true1 }; |
965 | break; |
966 | |
967 | case JCC_L: |
968 | cond = TCG_COND_LT; |
969 | goto fast_jcc_l; |
970 | case JCC_LE: |
971 | cond = TCG_COND_LE; |
972 | fast_jcc_l: |
973 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_tmp4, cpu_cc_srcT); |
974 | gen_exts(size, cpu_tmp4); |
975 | t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true1); |
976 | cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4, |
977 | .reg2 = t0, .mask = -1, .use_reg2 = true1 }; |
978 | break; |
979 | |
980 | default: |
981 | goto slow_jcc; |
982 | } |
983 | break; |
984 | |
985 | default: |
986 | slow_jcc: |
987 | /* This actually generates good code for JC, JZ and JS. */ |
988 | switch (jcc_op) { |
989 | case JCC_O: |
990 | cc = gen_prepare_eflags_o(s, reg); |
991 | break; |
992 | case JCC_B: |
993 | cc = gen_prepare_eflags_c(s, reg); |
994 | break; |
995 | case JCC_Z: |
996 | cc = gen_prepare_eflags_z(s, reg); |
997 | break; |
998 | case JCC_BE: |
999 | gen_compute_eflags(s); |
1000 | cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, |
1001 | .mask = CC_Z0x0040 | CC_C0x0001 }; |
1002 | break; |
1003 | case JCC_S: |
1004 | cc = gen_prepare_eflags_s(s, reg); |
1005 | break; |
1006 | case JCC_P: |
1007 | cc = gen_prepare_eflags_p(s, reg); |
1008 | break; |
1009 | case JCC_L: |
1010 | gen_compute_eflags(s); |
1011 | if (TCGV_EQUAL(reg, cpu_cc_src)(((reg).i32) == ((cpu_cc_src).i32))) { |
1012 | reg = cpu_tmp0; |
1013 | } |
1014 | tcg_gen_shri_tltcg_gen_shri_i32(reg, cpu_cc_src, 4); /* CC_O -> CC_S */ |
1015 | tcg_gen_xor_tltcg_gen_xor_i32(reg, reg, cpu_cc_src); |
1016 | cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, |
1017 | .mask = CC_S0x0080 }; |
1018 | break; |
1019 | default: |
1020 | case JCC_LE: |
1021 | gen_compute_eflags(s); |
1022 | if (TCGV_EQUAL(reg, cpu_cc_src)(((reg).i32) == ((cpu_cc_src).i32))) { |
1023 | reg = cpu_tmp0; |
1024 | } |
1025 | tcg_gen_shri_tltcg_gen_shri_i32(reg, cpu_cc_src, 4); /* CC_O -> CC_S */ |
1026 | tcg_gen_xor_tltcg_gen_xor_i32(reg, reg, cpu_cc_src); |
1027 | cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, |
1028 | .mask = CC_S0x0080 | CC_Z0x0040 }; |
1029 | break; |
1030 | } |
1031 | break; |
1032 | } |
1033 | |
1034 | if (inv) { |
1035 | cc.cond = tcg_invert_cond(cc.cond); |
1036 | } |
1037 | return cc; |
1038 | } |
1039 | |
1040 | static void gen_setcc1(DisasContext *s, int b, TCGvTCGv_i32 reg) |
1041 | { |
1042 | CCPrepare cc = gen_prepare_cc(s, b, reg); |
1043 | |
1044 | if (cc.no_setcond) { |
1045 | if (cc.cond == TCG_COND_EQ) { |
1046 | tcg_gen_xori_tltcg_gen_xori_i32(reg, cc.reg, 1); |
1047 | } else { |
1048 | tcg_gen_mov_tltcg_gen_mov_i32(reg, cc.reg); |
1049 | } |
1050 | return; |
1051 | } |
1052 | |
1053 | if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 && |
1054 | cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) { |
1055 | tcg_gen_shri_tltcg_gen_shri_i32(reg, cc.reg, ctztlctz32(cc.mask)); |
1056 | tcg_gen_andi_tltcg_gen_andi_i32(reg, reg, 1); |
1057 | return; |
1058 | } |
1059 | if (cc.mask != -1) { |
1060 | tcg_gen_andi_tltcg_gen_andi_i32(reg, cc.reg, cc.mask); |
1061 | cc.reg = reg; |
1062 | } |
1063 | if (cc.use_reg2) { |
1064 | tcg_gen_setcond_tltcg_gen_setcond_i32(cc.cond, reg, cc.reg, cc.reg2); |
1065 | } else { |
1066 | tcg_gen_setcondi_tltcg_gen_setcondi_i32(cc.cond, reg, cc.reg, cc.imm); |
1067 | } |
1068 | } |
1069 | |
1070 | static inline void gen_compute_eflags_c(DisasContext *s, TCGvTCGv_i32 reg) |
1071 | { |
1072 | gen_setcc1(s, JCC_B << 1, reg); |
1073 | } |
1074 | |
1075 | /* generate a conditional jump to label 'l1' according to jump opcode |
1076 | value 'b'. In the fast case, T0 is guaranted not to be used. */ |
1077 | static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1) |
1078 | { |
1079 | CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]); |
1080 | |
1081 | if (cc.mask != -1) { |
1082 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_T[0], cc.reg, cc.mask); |
1083 | cc.reg = cpu_T[0]; |
1084 | } |
1085 | if (cc.use_reg2) { |
1086 | tcg_gen_brcond_tltcg_gen_brcond_i32(cc.cond, cc.reg, cc.reg2, l1); |
1087 | } else { |
1088 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(cc.cond, cc.reg, cc.imm, l1); |
1089 | } |
1090 | } |
1091 | |
1092 | /* Generate a conditional jump to label 'l1' according to jump opcode |
1093 | value 'b'. In the fast case, T0 is guaranted not to be used. |
1094 | A translation block must end soon. */ |
1095 | static inline void gen_jcc1(DisasContext *s, int b, int l1) |
1096 | { |
1097 | CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]); |
1098 | |
1099 | gen_update_cc_op(s); |
1100 | if (cc.mask != -1) { |
1101 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_T[0], cc.reg, cc.mask); |
1102 | cc.reg = cpu_T[0]; |
1103 | } |
1104 | set_cc_op(s, CC_OP_DYNAMIC); |
1105 | if (cc.use_reg2) { |
1106 | tcg_gen_brcond_tltcg_gen_brcond_i32(cc.cond, cc.reg, cc.reg2, l1); |
1107 | } else { |
1108 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(cc.cond, cc.reg, cc.imm, l1); |
1109 | } |
1110 | } |
1111 | |
1112 | /* XXX: does not work with gdbstub "ice" single step - not a |
1113 | serious problem */ |
1114 | static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) |
1115 | { |
1116 | int l1, l2; |
1117 | |
1118 | l1 = gen_new_label(); |
1119 | l2 = gen_new_label(); |
1120 | gen_op_jnz_ecx(s->aflag, l1); |
1121 | gen_set_label(l2); |
1122 | gen_jmp_tb(s, next_eip, 1); |
1123 | gen_set_label(l1); |
1124 | return l2; |
1125 | } |
1126 | |
1127 | static inline void gen_stos(DisasContext *s, TCGMemOp ot) |
1128 | { |
1129 | gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX0); |
1130 | gen_string_movl_A0_EDI(s); |
1131 | gen_op_st_v(s, ot, cpu_T[0], cpu_A0); |
1132 | gen_op_movl_T0_Dshift(ot); |
1133 | gen_op_add_reg_T0(s->aflag, R_EDI7); |
1134 | } |
1135 | |
1136 | static inline void gen_lods(DisasContext *s, TCGMemOp ot) |
1137 | { |
1138 | gen_string_movl_A0_ESI(s); |
1139 | gen_op_ld_v(s, ot, cpu_T[0], cpu_A0); |
1140 | gen_op_mov_reg_v(ot, R_EAX0, cpu_T[0]); |
1141 | gen_op_movl_T0_Dshift(ot); |
1142 | gen_op_add_reg_T0(s->aflag, R_ESI6); |
1143 | } |
1144 | |
1145 | static inline void gen_scas(DisasContext *s, TCGMemOp ot) |
1146 | { |
1147 | gen_string_movl_A0_EDI(s); |
1148 | gen_op_ld_v(s, ot, cpu_T[1], cpu_A0); |
1149 | gen_op(s, OP_CMPL, ot, R_EAX0); |
1150 | gen_op_movl_T0_Dshift(ot); |
1151 | gen_op_add_reg_T0(s->aflag, R_EDI7); |
1152 | } |
1153 | |
1154 | static inline void gen_cmps(DisasContext *s, TCGMemOp ot) |
1155 | { |
1156 | gen_string_movl_A0_EDI(s); |
1157 | gen_op_ld_v(s, ot, cpu_T[1], cpu_A0); |
1158 | gen_string_movl_A0_ESI(s); |
1159 | gen_op(s, OP_CMPL, ot, OR_TMP0); |
1160 | gen_op_movl_T0_Dshift(ot); |
1161 | gen_op_add_reg_T0(s->aflag, R_ESI6); |
1162 | gen_op_add_reg_T0(s->aflag, R_EDI7); |
1163 | } |
1164 | |
1165 | static inline void gen_ins(DisasContext *s, TCGMemOp ot) |
1166 | { |
1167 | if (use_icount) |
1168 | gen_io_start(); |
1169 | gen_string_movl_A0_EDI(s); |
1170 | /* Note: we must do this dummy write first to be restartable in |
1171 | case of page fault. */ |
1172 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[0], 0); |
1173 | gen_op_st_v(s, ot, cpu_T[0], cpu_A0); |
1174 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_regs[R_EDX2]); |
1175 | tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); |
1176 | gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32); |
1177 | gen_op_st_v(s, ot, cpu_T[0], cpu_A0); |
1178 | gen_op_movl_T0_Dshift(ot); |
1179 | gen_op_add_reg_T0(s->aflag, R_EDI7); |
1180 | if (use_icount) |
1181 | gen_io_end(); |
1182 | } |
1183 | |
1184 | static inline void gen_outs(DisasContext *s, TCGMemOp ot) |
1185 | { |
1186 | if (use_icount) |
1187 | gen_io_start(); |
1188 | gen_string_movl_A0_ESI(s); |
1189 | gen_op_ld_v(s, ot, cpu_T[0], cpu_A0); |
1190 | |
1191 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_regs[R_EDX2]); |
1192 | tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); |
1193 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp3_i32, cpu_T[0]); |
1194 | gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); |
1195 | |
1196 | gen_op_movl_T0_Dshift(ot); |
1197 | gen_op_add_reg_T0(s->aflag, R_ESI6); |
1198 | if (use_icount) |
1199 | gen_io_end(); |
1200 | } |
1201 | |
1202 | /* same method as Valgrind : we generate jumps to current or next |
1203 | instruction */ |
1204 | #define GEN_REPZ(op)static inline void gen_repz_op(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, target_ulong next_eip) { int l2; gen_update_cc_op(s ); l2 = gen_jz_ecx_string(s, next_eip); gen_op(s, ot); gen_op_add_reg_im (s->aflag, 1, -1); if (!s->jmp_opt) gen_op_jz_ecx(s-> aflag, l2); gen_jmp(s, cur_eip); } \ |
1205 | static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ |
1206 | target_ulong cur_eip, target_ulong next_eip) \ |
1207 | { \ |
1208 | int l2;\ |
1209 | gen_update_cc_op(s); \ |
1210 | l2 = gen_jz_ecx_string(s, next_eip); \ |
1211 | gen_ ## op(s, ot); \ |
1212 | gen_op_add_reg_im(s->aflag, R_ECX1, -1); \ |
1213 | /* a loop would cause two single step exceptions if ECX = 1 \ |
1214 | before rep string_insn */ \ |
1215 | if (!s->jmp_opt) \ |
1216 | gen_op_jz_ecx(s->aflag, l2); \ |
1217 | gen_jmp(s, cur_eip); \ |
1218 | } |
1219 | |
1220 | #define GEN_REPZ2(op)static inline void gen_repz_op(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, target_ulong next_eip, int nz) { int l2; gen_update_cc_op (s); l2 = gen_jz_ecx_string(s, next_eip); gen_op(s, ot); gen_op_add_reg_im (s->aflag, 1, -1); gen_update_cc_op(s); gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); if (!s->jmp_opt) gen_op_jz_ecx (s->aflag, l2); gen_jmp(s, cur_eip); } \ |
1221 | static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ |
1222 | target_ulong cur_eip, \ |
1223 | target_ulong next_eip, \ |
1224 | int nz) \ |
1225 | { \ |
1226 | int l2;\ |
1227 | gen_update_cc_op(s); \ |
1228 | l2 = gen_jz_ecx_string(s, next_eip); \ |
1229 | gen_ ## op(s, ot); \ |
1230 | gen_op_add_reg_im(s->aflag, R_ECX1, -1); \ |
1231 | gen_update_cc_op(s); \ |
1232 | gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \ |
1233 | if (!s->jmp_opt) \ |
1234 | gen_op_jz_ecx(s->aflag, l2); \ |
1235 | gen_jmp(s, cur_eip); \ |
1236 | } |
1237 | |
1238 | GEN_REPZ(movs)static inline void gen_repz_movs(DisasContext *s, TCGMemOp ot , target_ulong cur_eip, target_ulong next_eip) { int l2; gen_update_cc_op (s); l2 = gen_jz_ecx_string(s, next_eip); gen_movs(s, ot); gen_op_add_reg_im (s->aflag, 1, -1); if (!s->jmp_opt) gen_op_jz_ecx(s-> aflag, l2); gen_jmp(s, cur_eip); } |
1239 | GEN_REPZ(stos)static inline void gen_repz_stos(DisasContext *s, TCGMemOp ot , target_ulong cur_eip, target_ulong next_eip) { int l2; gen_update_cc_op (s); l2 = gen_jz_ecx_string(s, next_eip); gen_stos(s, ot); gen_op_add_reg_im (s->aflag, 1, -1); if (!s->jmp_opt) gen_op_jz_ecx(s-> aflag, l2); gen_jmp(s, cur_eip); } |
1240 | GEN_REPZ(lods)static inline void gen_repz_lods(DisasContext *s, TCGMemOp ot , target_ulong cur_eip, target_ulong next_eip) { int l2; gen_update_cc_op (s); l2 = gen_jz_ecx_string(s, next_eip); gen_lods(s, ot); gen_op_add_reg_im (s->aflag, 1, -1); if (!s->jmp_opt) gen_op_jz_ecx(s-> aflag, l2); gen_jmp(s, cur_eip); } |
1241 | GEN_REPZ(ins)static inline void gen_repz_ins(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, target_ulong next_eip) { int l2; gen_update_cc_op (s); l2 = gen_jz_ecx_string(s, next_eip); gen_ins(s, ot); gen_op_add_reg_im (s->aflag, 1, -1); if (!s->jmp_opt) gen_op_jz_ecx(s-> aflag, l2); gen_jmp(s, cur_eip); } |
1242 | GEN_REPZ(outs)static inline void gen_repz_outs(DisasContext *s, TCGMemOp ot , target_ulong cur_eip, target_ulong next_eip) { int l2; gen_update_cc_op (s); l2 = gen_jz_ecx_string(s, next_eip); gen_outs(s, ot); gen_op_add_reg_im (s->aflag, 1, -1); if (!s->jmp_opt) gen_op_jz_ecx(s-> aflag, l2); gen_jmp(s, cur_eip); } |
1243 | GEN_REPZ2(scas)static inline void gen_repz_scas(DisasContext *s, TCGMemOp ot , target_ulong cur_eip, target_ulong next_eip, int nz) { int l2 ; gen_update_cc_op(s); l2 = gen_jz_ecx_string(s, next_eip); gen_scas (s, ot); gen_op_add_reg_im(s->aflag, 1, -1); gen_update_cc_op (s); gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); if (!s-> jmp_opt) gen_op_jz_ecx(s->aflag, l2); gen_jmp(s, cur_eip); } |
1244 | GEN_REPZ2(cmps)static inline void gen_repz_cmps(DisasContext *s, TCGMemOp ot , target_ulong cur_eip, target_ulong next_eip, int nz) { int l2 ; gen_update_cc_op(s); l2 = gen_jz_ecx_string(s, next_eip); gen_cmps (s, ot); gen_op_add_reg_im(s->aflag, 1, -1); gen_update_cc_op (s); gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); if (!s-> jmp_opt) gen_op_jz_ecx(s->aflag, l2); gen_jmp(s, cur_eip); } |
1245 | |
1246 | static void gen_helper_fp_arith_ST0_FT0(int op) |
1247 | { |
1248 | switch (op) { |
1249 | case 0: |
1250 | gen_helper_fadd_ST0_FT0(cpu_env); |
1251 | break; |
1252 | case 1: |
1253 | gen_helper_fmul_ST0_FT0(cpu_env); |
1254 | break; |
1255 | case 2: |
1256 | gen_helper_fcom_ST0_FT0(cpu_env); |
1257 | break; |
1258 | case 3: |
1259 | gen_helper_fcom_ST0_FT0(cpu_env); |
1260 | break; |
1261 | case 4: |
1262 | gen_helper_fsub_ST0_FT0(cpu_env); |
1263 | break; |
1264 | case 5: |
1265 | gen_helper_fsubr_ST0_FT0(cpu_env); |
1266 | break; |
1267 | case 6: |
1268 | gen_helper_fdiv_ST0_FT0(cpu_env); |
1269 | break; |
1270 | case 7: |
1271 | gen_helper_fdivr_ST0_FT0(cpu_env); |
1272 | break; |
1273 | } |
1274 | } |
1275 | |
1276 | /* NOTE the exception in "r" op ordering */ |
1277 | static void gen_helper_fp_arith_STN_ST0(int op, int opreg) |
1278 | { |
1279 | TCGv_i32 tmp = tcg_const_i32(opreg); |
1280 | switch (op) { |
1281 | case 0: |
1282 | gen_helper_fadd_STN_ST0(cpu_env, tmp); |
1283 | break; |
1284 | case 1: |
1285 | gen_helper_fmul_STN_ST0(cpu_env, tmp); |
1286 | break; |
1287 | case 4: |
1288 | gen_helper_fsubr_STN_ST0(cpu_env, tmp); |
1289 | break; |
1290 | case 5: |
1291 | gen_helper_fsub_STN_ST0(cpu_env, tmp); |
1292 | break; |
1293 | case 6: |
1294 | gen_helper_fdivr_STN_ST0(cpu_env, tmp); |
1295 | break; |
1296 | case 7: |
1297 | gen_helper_fdiv_STN_ST0(cpu_env, tmp); |
1298 | break; |
1299 | } |
1300 | } |
1301 | |
1302 | /* if d == OR_TMP0, it means memory operand (address in A0) */ |
1303 | static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) |
1304 | { |
1305 | if (d != OR_TMP0) { |
1306 | gen_op_mov_v_reg(ot, cpu_T[0], d); |
1307 | } else { |
1308 | gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0); |
1309 | } |
1310 | switch(op) { |
1311 | case OP_ADCL: |
1312 | gen_compute_eflags_c(s1, cpu_tmp4); |
1313 | tcg_gen_add_tltcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
1314 | tcg_gen_add_tltcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_tmp4); |
1315 | gen_op_st_rm_T0_A0(s1, ot, d); |
1316 | gen_op_update3_cc(cpu_tmp4); |
1317 | set_cc_op(s1, CC_OP_ADCB + ot); |
1318 | break; |
1319 | case OP_SBBL: |
1320 | gen_compute_eflags_c(s1, cpu_tmp4); |
1321 | tcg_gen_sub_tltcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
1322 | tcg_gen_sub_tltcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_tmp4); |
1323 | gen_op_st_rm_T0_A0(s1, ot, d); |
1324 | gen_op_update3_cc(cpu_tmp4); |
1325 | set_cc_op(s1, CC_OP_SBBB + ot); |
1326 | break; |
1327 | case OP_ADDL: |
1328 | tcg_gen_add_tltcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
1329 | gen_op_st_rm_T0_A0(s1, ot, d); |
1330 | gen_op_update2_cc(); |
1331 | set_cc_op(s1, CC_OP_ADDB + ot); |
1332 | break; |
1333 | case OP_SUBL: |
1334 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_srcT, cpu_T[0]); |
1335 | tcg_gen_sub_tltcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
1336 | gen_op_st_rm_T0_A0(s1, ot, d); |
1337 | gen_op_update2_cc(); |
1338 | set_cc_op(s1, CC_OP_SUBB + ot); |
1339 | break; |
1340 | default: |
1341 | case OP_ANDL: |
1342 | tcg_gen_and_tltcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
1343 | gen_op_st_rm_T0_A0(s1, ot, d); |
1344 | gen_op_update1_cc(); |
1345 | set_cc_op(s1, CC_OP_LOGICB + ot); |
1346 | break; |
1347 | case OP_ORL: |
1348 | tcg_gen_or_tltcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
1349 | gen_op_st_rm_T0_A0(s1, ot, d); |
1350 | gen_op_update1_cc(); |
1351 | set_cc_op(s1, CC_OP_LOGICB + ot); |
1352 | break; |
1353 | case OP_XORL: |
1354 | tcg_gen_xor_tltcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
1355 | gen_op_st_rm_T0_A0(s1, ot, d); |
1356 | gen_op_update1_cc(); |
1357 | set_cc_op(s1, CC_OP_LOGICB + ot); |
1358 | break; |
1359 | case OP_CMPL: |
1360 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_src, cpu_T[1]); |
1361 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_srcT, cpu_T[0]); |
1362 | tcg_gen_sub_tltcg_gen_sub_i32(cpu_cc_dst, cpu_T[0], cpu_T[1]); |
1363 | set_cc_op(s1, CC_OP_SUBB + ot); |
1364 | break; |
1365 | } |
1366 | } |
1367 | |
1368 | /* if d == OR_TMP0, it means memory operand (address in A0) */ |
1369 | static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c) |
1370 | { |
1371 | if (d != OR_TMP0) { |
1372 | gen_op_mov_v_reg(ot, cpu_T[0], d); |
1373 | } else { |
1374 | gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0); |
1375 | } |
1376 | gen_compute_eflags_c(s1, cpu_cc_src); |
1377 | if (c > 0) { |
1378 | tcg_gen_addi_tltcg_gen_addi_i32(cpu_T[0], cpu_T[0], 1); |
1379 | set_cc_op(s1, CC_OP_INCB + ot); |
1380 | } else { |
1381 | tcg_gen_addi_tltcg_gen_addi_i32(cpu_T[0], cpu_T[0], -1); |
1382 | set_cc_op(s1, CC_OP_DECB + ot); |
1383 | } |
1384 | gen_op_st_rm_T0_A0(s1, ot, d); |
1385 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_T[0]); |
1386 | } |
1387 | |
1388 | static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGvTCGv_i32 result, |
1389 | TCGvTCGv_i32 shm1, TCGvTCGv_i32 count, bool_Bool is_right) |
1390 | { |
1391 | TCGv_i32 z32, s32, oldop; |
1392 | TCGvTCGv_i32 z_tl; |
1393 | |
1394 | /* Store the results into the CC variables. If we know that the |
1395 | variable must be dead, store unconditionally. Otherwise we'll |
1396 | need to not disrupt the current contents. */ |
1397 | z_tl = tcg_const_tltcg_const_i32(0); |
1398 | if (cc_op_live[s->cc_op] & USES_CC_DST) { |
1399 | tcg_gen_movcond_tltcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_dst, count, z_tl, |
1400 | result, cpu_cc_dst); |
1401 | } else { |
1402 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, result); |
1403 | } |
1404 | if (cc_op_live[s->cc_op] & USES_CC_SRC) { |
1405 | tcg_gen_movcond_tltcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_src, count, z_tl, |
1406 | shm1, cpu_cc_src); |
1407 | } else { |
1408 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_src, shm1); |
1409 | } |
1410 | tcg_temp_freetcg_temp_free_i32(z_tl); |
1411 | |
1412 | /* Get the two potential CC_OP values into temporaries. */ |
1413 | tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); |
1414 | if (s->cc_op == CC_OP_DYNAMIC) { |
1415 | oldop = cpu_cc_op; |
1416 | } else { |
1417 | tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op); |
1418 | oldop = cpu_tmp3_i32; |
1419 | } |
1420 | |
1421 | /* Conditionally store the CC_OP value. */ |
1422 | z32 = tcg_const_i32(0); |
1423 | s32 = tcg_temp_new_i32(); |
1424 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(s32, count); |
1425 | tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop); |
1426 | tcg_temp_free_i32(z32); |
1427 | tcg_temp_free_i32(s32); |
1428 | |
1429 | /* The CC_OP value is no longer predictable. */ |
1430 | set_cc_op(s, CC_OP_DYNAMIC); |
1431 | } |
1432 | |
1433 | static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, |
1434 | int is_right, int is_arith) |
1435 | { |
1436 | target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); |
1437 | |
1438 | /* load */ |
1439 | if (op1 == OR_TMP0) { |
1440 | gen_op_ld_v(s, ot, cpu_T[0], cpu_A0); |
1441 | } else { |
1442 | gen_op_mov_v_reg(ot, cpu_T[0], op1); |
1443 | } |
1444 | |
1445 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_T[1], cpu_T[1], mask); |
1446 | tcg_gen_subi_tltcg_gen_subi_i32(cpu_tmp0, cpu_T[1], 1); |
1447 | |
1448 | if (is_right) { |
1449 | if (is_arith) { |
1450 | gen_exts(ot, cpu_T[0]); |
1451 | tcg_gen_sar_tltcg_gen_sar_i32(cpu_tmp0, cpu_T[0], cpu_tmp0); |
1452 | tcg_gen_sar_tltcg_gen_sar_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
1453 | } else { |
1454 | gen_extu(ot, cpu_T[0]); |
1455 | tcg_gen_shr_tltcg_gen_shr_i32(cpu_tmp0, cpu_T[0], cpu_tmp0); |
1456 | tcg_gen_shr_tltcg_gen_shr_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
1457 | } |
1458 | } else { |
1459 | tcg_gen_shl_tltcg_gen_shl_i32(cpu_tmp0, cpu_T[0], cpu_tmp0); |
1460 | tcg_gen_shl_tltcg_gen_shl_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
1461 | } |
1462 | |
1463 | /* store */ |
1464 | gen_op_st_rm_T0_A0(s, ot, op1); |
1465 | |
1466 | gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right); |
1467 | } |
1468 | |
1469 | static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, |
1470 | int is_right, int is_arith) |
1471 | { |
1472 | int mask = (ot == MO_64 ? 0x3f : 0x1f); |
1473 | |
1474 | /* load */ |
1475 | if (op1 == OR_TMP0) |
1476 | gen_op_ld_v(s, ot, cpu_T[0], cpu_A0); |
1477 | else |
1478 | gen_op_mov_v_reg(ot, cpu_T[0], op1); |
1479 | |
1480 | op2 &= mask; |
1481 | if (op2 != 0) { |
1482 | if (is_right) { |
1483 | if (is_arith) { |
1484 | gen_exts(ot, cpu_T[0]); |
1485 | tcg_gen_sari_tltcg_gen_sari_i32(cpu_tmp4, cpu_T[0], op2 - 1); |
1486 | tcg_gen_sari_tltcg_gen_sari_i32(cpu_T[0], cpu_T[0], op2); |
1487 | } else { |
1488 | gen_extu(ot, cpu_T[0]); |
1489 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_tmp4, cpu_T[0], op2 - 1); |
1490 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_T[0], cpu_T[0], op2); |
1491 | } |
1492 | } else { |
1493 | tcg_gen_shli_tltcg_gen_shli_i32(cpu_tmp4, cpu_T[0], op2 - 1); |
1494 | tcg_gen_shli_tltcg_gen_shli_i32(cpu_T[0], cpu_T[0], op2); |
1495 | } |
1496 | } |
1497 | |
1498 | /* store */ |
1499 | gen_op_st_rm_T0_A0(s, ot, op1); |
1500 | |
1501 | /* update eflags if non zero shift */ |
1502 | if (op2 != 0) { |
1503 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_src, cpu_tmp4); |
1504 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_T[0]); |
1505 | set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); |
1506 | } |
1507 | } |
1508 | |
1509 | static inline void tcg_gen_lshift(TCGvTCGv_i32 ret, TCGvTCGv_i32 arg1, target_long arg2) |
1510 | { |
1511 | if (arg2 >= 0) |
1512 | tcg_gen_shli_tltcg_gen_shli_i32(ret, arg1, arg2); |
1513 | else |
1514 | tcg_gen_shri_tltcg_gen_shri_i32(ret, arg1, -arg2); |
1515 | } |
1516 | |
1517 | static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) |
1518 | { |
1519 | target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); |
1520 | TCGv_i32 t0, t1; |
1521 | |
1522 | /* load */ |
1523 | if (op1 == OR_TMP0) { |
1524 | gen_op_ld_v(s, ot, cpu_T[0], cpu_A0); |
1525 | } else { |
1526 | gen_op_mov_v_reg(ot, cpu_T[0], op1); |
1527 | } |
1528 | |
1529 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_T[1], cpu_T[1], mask); |
1530 | |
1531 | switch (ot) { |
1532 | case MO_8: |
1533 | /* Replicate the 8-bit input so that a 32-bit rotate works. */ |
1534 | tcg_gen_ext8u_tltcg_gen_ext8u_i32(cpu_T[0], cpu_T[0]); |
1535 | tcg_gen_muli_tltcg_gen_muli_i32(cpu_T[0], cpu_T[0], 0x01010101); |
1536 | goto do_long; |
1537 | case MO_16: |
1538 | /* Replicate the 16-bit input so that a 32-bit rotate works. */ |
1539 | tcg_gen_deposit_tltcg_gen_deposit_i32(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16); |
1540 | goto do_long; |
1541 | do_long: |
1542 | #ifdef TARGET_X86_64 |
1543 | case MO_32: |
1544 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
1545 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp3_i32, cpu_T[1]); |
1546 | if (is_right) { |
1547 | tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); |
1548 | } else { |
1549 | tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); |
1550 | } |
1551 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_T[0], cpu_tmp2_i32); |
1552 | break; |
1553 | #endif |
1554 | default: |
1555 | if (is_right) { |
1556 | tcg_gen_rotr_tltcg_gen_rotr_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
1557 | } else { |
1558 | tcg_gen_rotl_tltcg_gen_rotl_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
1559 | } |
1560 | break; |
1561 | } |
1562 | |
1563 | /* store */ |
1564 | gen_op_st_rm_T0_A0(s, ot, op1); |
1565 | |
1566 | /* We'll need the flags computed into CC_SRC. */ |
1567 | gen_compute_eflags(s); |
1568 | |
1569 | /* The value that was "rotated out" is now present at the other end |
1570 | of the word. Compute C into CC_DST and O into CC_SRC2. Note that |
1571 | since we've computed the flags into CC_SRC, these variables are |
1572 | currently dead. */ |
1573 | if (is_right) { |
1574 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_cc_src2, cpu_T[0], mask - 1); |
1575 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_cc_dst, cpu_T[0], mask); |
1576 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_cc_dst, cpu_cc_dst, 1); |
1577 | } else { |
1578 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_cc_src2, cpu_T[0], mask); |
1579 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_cc_dst, cpu_T[0], 1); |
1580 | } |
1581 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_cc_src2, cpu_cc_src2, 1); |
1582 | tcg_gen_xor_tltcg_gen_xor_i32(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst); |
1583 | |
1584 | /* Now conditionally store the new CC_OP value. If the shift count |
1585 | is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live. |
1586 | Otherwise reuse CC_OP_ADCOX which have the C and O flags split out |
1587 | exactly as we computed above. */ |
1588 | t0 = tcg_const_i32(0); |
1589 | t1 = tcg_temp_new_i32(); |
1590 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(t1, cpu_T[1]); |
1591 | tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX); |
1592 | tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS); |
1593 | tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0, |
1594 | cpu_tmp2_i32, cpu_tmp3_i32); |
1595 | tcg_temp_free_i32(t0); |
1596 | tcg_temp_free_i32(t1); |
1597 | |
1598 | /* The CC_OP value is no longer predictable. */ |
1599 | set_cc_op(s, CC_OP_DYNAMIC); |
1600 | } |
1601 | |
1602 | static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, |
1603 | int is_right) |
1604 | { |
1605 | int mask = (ot == MO_64 ? 0x3f : 0x1f); |
1606 | int shift; |
1607 | |
1608 | /* load */ |
1609 | if (op1 == OR_TMP0) { |
1610 | gen_op_ld_v(s, ot, cpu_T[0], cpu_A0); |
1611 | } else { |
1612 | gen_op_mov_v_reg(ot, cpu_T[0], op1); |
1613 | } |
1614 | |
1615 | op2 &= mask; |
1616 | if (op2 != 0) { |
1617 | switch (ot) { |
1618 | #ifdef TARGET_X86_64 |
1619 | case MO_32: |
1620 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
1621 | if (is_right) { |
1622 | tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2); |
1623 | } else { |
1624 | tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2); |
1625 | } |
1626 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_T[0], cpu_tmp2_i32); |
1627 | break; |
1628 | #endif |
1629 | default: |
1630 | if (is_right) { |
1631 | tcg_gen_rotri_tltcg_gen_rotri_i32(cpu_T[0], cpu_T[0], op2); |
1632 | } else { |
1633 | tcg_gen_rotli_tltcg_gen_rotli_i32(cpu_T[0], cpu_T[0], op2); |
1634 | } |
1635 | break; |
1636 | case MO_8: |
1637 | mask = 7; |
1638 | goto do_shifts; |
1639 | case MO_16: |
1640 | mask = 15; |
1641 | do_shifts: |
1642 | shift = op2 & mask; |
1643 | if (is_right) { |
1644 | shift = mask + 1 - shift; |
1645 | } |
1646 | gen_extu(ot, cpu_T[0]); |
1647 | tcg_gen_shli_tltcg_gen_shli_i32(cpu_tmp0, cpu_T[0], shift); |
1648 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_T[0], cpu_T[0], mask + 1 - shift); |
1649 | tcg_gen_or_tltcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_tmp0); |
1650 | break; |
1651 | } |
1652 | } |
1653 | |
1654 | /* store */ |
1655 | gen_op_st_rm_T0_A0(s, ot, op1); |
1656 | |
1657 | if (op2 != 0) { |
1658 | /* Compute the flags into CC_SRC. */ |
1659 | gen_compute_eflags(s); |
1660 | |
1661 | /* The value that was "rotated out" is now present at the other end |
1662 | of the word. Compute C into CC_DST and O into CC_SRC2. Note that |
1663 | since we've computed the flags into CC_SRC, these variables are |
1664 | currently dead. */ |
1665 | if (is_right) { |
1666 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_cc_src2, cpu_T[0], mask - 1); |
1667 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_cc_dst, cpu_T[0], mask); |
1668 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_cc_dst, cpu_cc_dst, 1); |
1669 | } else { |
1670 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_cc_src2, cpu_T[0], mask); |
1671 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_cc_dst, cpu_T[0], 1); |
1672 | } |
1673 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_cc_src2, cpu_cc_src2, 1); |
1674 | tcg_gen_xor_tltcg_gen_xor_i32(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst); |
1675 | set_cc_op(s, CC_OP_ADCOX); |
1676 | } |
1677 | } |
1678 | |
1679 | /* XXX: add faster immediate = 1 case */ |
1680 | static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, |
1681 | int is_right) |
1682 | { |
1683 | gen_compute_eflags(s); |
1684 | assert(s->cc_op == CC_OP_EFLAGS)((s->cc_op == CC_OP_EFLAGS) ? (void) (0) : __assert_fail ( "s->cc_op == CC_OP_EFLAGS", "/home/stefan/src/qemu/qemu.org/qemu/target-i386/translate.c" , 1684, __PRETTY_FUNCTION__)); |
1685 | |
1686 | /* load */ |
1687 | if (op1 == OR_TMP0) |
1688 | gen_op_ld_v(s, ot, cpu_T[0], cpu_A0); |
1689 | else |
1690 | gen_op_mov_v_reg(ot, cpu_T[0], op1); |
1691 | |
1692 | if (is_right) { |
1693 | switch (ot) { |
1694 | case MO_8: |
1695 | gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); |
1696 | break; |
1697 | case MO_16: |
1698 | gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); |
1699 | break; |
1700 | case MO_32: |
1701 | gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); |
1702 | break; |
1703 | #ifdef TARGET_X86_64 |
1704 | case MO_64: |
1705 | gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); |
1706 | break; |
1707 | #endif |
1708 | default: |
1709 | tcg_abort()do { fprintf(stderr, "%s:%d: tcg fatal error\n", "/home/stefan/src/qemu/qemu.org/qemu/target-i386/translate.c" , 1709); abort();} while (0); |
1710 | } |
1711 | } else { |
1712 | switch (ot) { |
1713 | case MO_8: |
1714 | gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); |
1715 | break; |
1716 | case MO_16: |
1717 | gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); |
1718 | break; |
1719 | case MO_32: |
1720 | gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); |
1721 | break; |
1722 | #ifdef TARGET_X86_64 |
1723 | case MO_64: |
1724 | gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); |
1725 | break; |
1726 | #endif |
1727 | default: |
1728 | tcg_abort()do { fprintf(stderr, "%s:%d: tcg fatal error\n", "/home/stefan/src/qemu/qemu.org/qemu/target-i386/translate.c" , 1728); abort();} while (0); |
1729 | } |
1730 | } |
1731 | /* store */ |
1732 | gen_op_st_rm_T0_A0(s, ot, op1); |
1733 | } |
1734 | |
1735 | /* XXX: add faster immediate case */ |
1736 | static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, |
1737 | bool_Bool is_right, TCGvTCGv_i32 count_in) |
1738 | { |
1739 | target_ulong mask = (ot == MO_64 ? 63 : 31); |
1740 | TCGvTCGv_i32 count; |
1741 | |
1742 | /* load */ |
1743 | if (op1 == OR_TMP0) { |
1744 | gen_op_ld_v(s, ot, cpu_T[0], cpu_A0); |
1745 | } else { |
1746 | gen_op_mov_v_reg(ot, cpu_T[0], op1); |
1747 | } |
1748 | |
1749 | count = tcg_temp_new()tcg_temp_new_i32(); |
1750 | tcg_gen_andi_tltcg_gen_andi_i32(count, count_in, mask); |
1751 | |
1752 | switch (ot) { |
1753 | case MO_16: |
1754 | /* Note: we implement the Intel behaviour for shift count > 16. |
1755 | This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A |
1756 | portion by constructing it as a 32-bit value. */ |
1757 | if (is_right) { |
1758 | tcg_gen_deposit_tltcg_gen_deposit_i32(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16); |
1759 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_T[1], cpu_T[0]); |
1760 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_T[0], cpu_tmp0); |
1761 | } else { |
1762 | tcg_gen_deposit_tltcg_gen_deposit_i32(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16); |
1763 | } |
1764 | /* FALLTHRU */ |
1765 | #ifdef TARGET_X86_64 |
1766 | case MO_32: |
1767 | /* Concatenate the two 32-bit values and use a 64-bit shift. */ |
1768 | tcg_gen_subi_tltcg_gen_subi_i32(cpu_tmp0, count, 1); |
1769 | if (is_right) { |
1770 | tcg_gen_concat_tl_i64tcg_gen_concat_i32_i64(cpu_T[0], cpu_T[0], cpu_T[1]); |
1771 | tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0); |
1772 | tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count); |
1773 | } else { |
1774 | tcg_gen_concat_tl_i64tcg_gen_concat_i32_i64(cpu_T[0], cpu_T[1], cpu_T[0]); |
1775 | tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0); |
1776 | tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count); |
1777 | tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32); |
1778 | tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32); |
1779 | } |
1780 | break; |
1781 | #endif |
1782 | default: |
1783 | tcg_gen_subi_tltcg_gen_subi_i32(cpu_tmp0, count, 1); |
1784 | if (is_right) { |
1785 | tcg_gen_shr_tltcg_gen_shr_i32(cpu_tmp0, cpu_T[0], cpu_tmp0); |
1786 | |
1787 | tcg_gen_subfi_tltcg_gen_subfi_i32(cpu_tmp4, mask + 1, count); |
1788 | tcg_gen_shr_tltcg_gen_shr_i32(cpu_T[0], cpu_T[0], count); |
1789 | tcg_gen_shl_tltcg_gen_shl_i32(cpu_T[1], cpu_T[1], cpu_tmp4); |
1790 | } else { |
1791 | tcg_gen_shl_tltcg_gen_shl_i32(cpu_tmp0, cpu_T[0], cpu_tmp0); |
1792 | if (ot == MO_16) { |
1793 | /* Only needed if count > 16, for Intel behaviour. */ |
1794 | tcg_gen_subfi_tltcg_gen_subfi_i32(cpu_tmp4, 33, count); |
1795 | tcg_gen_shr_tltcg_gen_shr_i32(cpu_tmp4, cpu_T[1], cpu_tmp4); |
1796 | tcg_gen_or_tltcg_gen_or_i32(cpu_tmp0, cpu_tmp0, cpu_tmp4); |
1797 | } |
1798 | |
1799 | tcg_gen_subfi_tltcg_gen_subfi_i32(cpu_tmp4, mask + 1, count); |
1800 | tcg_gen_shl_tltcg_gen_shl_i32(cpu_T[0], cpu_T[0], count); |
1801 | tcg_gen_shr_tltcg_gen_shr_i32(cpu_T[1], cpu_T[1], cpu_tmp4); |
1802 | } |
1803 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_tmp4, 0); |
1804 | tcg_gen_movcond_tltcg_gen_movcond_i32(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4, |
1805 | cpu_tmp4, cpu_T[1]); |
1806 | tcg_gen_or_tltcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
1807 | break; |
1808 | } |
1809 | |
1810 | /* store */ |
1811 | gen_op_st_rm_T0_A0(s, ot, op1); |
1812 | |
1813 | gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right); |
1814 | tcg_temp_freetcg_temp_free_i32(count); |
1815 | } |
1816 | |
1817 | static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s) |
1818 | { |
1819 | if (s != OR_TMP1) |
1820 | gen_op_mov_v_reg(ot, cpu_T[1], s); |
1821 | switch(op) { |
1822 | case OP_ROL: |
1823 | gen_rot_rm_T1(s1, ot, d, 0); |
1824 | break; |
1825 | case OP_ROR: |
1826 | gen_rot_rm_T1(s1, ot, d, 1); |
1827 | break; |
1828 | case OP_SHL: |
1829 | case OP_SHL1: |
1830 | gen_shift_rm_T1(s1, ot, d, 0, 0); |
1831 | break; |
1832 | case OP_SHR: |
1833 | gen_shift_rm_T1(s1, ot, d, 1, 0); |
1834 | break; |
1835 | case OP_SAR: |
1836 | gen_shift_rm_T1(s1, ot, d, 1, 1); |
1837 | break; |
1838 | case OP_RCL: |
1839 | gen_rotc_rm_T1(s1, ot, d, 0); |
1840 | break; |
1841 | case OP_RCR: |
1842 | gen_rotc_rm_T1(s1, ot, d, 1); |
1843 | break; |
1844 | } |
1845 | } |
1846 | |
1847 | static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c) |
1848 | { |
1849 | switch(op) { |
1850 | case OP_ROL: |
1851 | gen_rot_rm_im(s1, ot, d, c, 0); |
1852 | break; |
1853 | case OP_ROR: |
1854 | gen_rot_rm_im(s1, ot, d, c, 1); |
1855 | break; |
1856 | case OP_SHL: |
1857 | case OP_SHL1: |
1858 | gen_shift_rm_im(s1, ot, d, c, 0, 0); |
1859 | break; |
1860 | case OP_SHR: |
1861 | gen_shift_rm_im(s1, ot, d, c, 1, 0); |
1862 | break; |
1863 | case OP_SAR: |
1864 | gen_shift_rm_im(s1, ot, d, c, 1, 1); |
1865 | break; |
1866 | default: |
1867 | /* currently not optimized */ |
1868 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[1], c); |
1869 | gen_shift(s1, op, ot, d, OR_TMP1); |
1870 | break; |
1871 | } |
1872 | } |
1873 | |
1874 | static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm) |
1875 | { |
1876 | target_long disp; |
1877 | int havesib; |
1878 | int base; |
1879 | int index; |
1880 | int scale; |
1881 | int mod, rm, code, override, must_add_seg; |
1882 | TCGvTCGv_i32 sum; |
1883 | |
1884 | override = s->override; |
1885 | must_add_seg = s->addseg; |
1886 | if (override >= 0) |
1887 | must_add_seg = 1; |
1888 | mod = (modrm >> 6) & 3; |
1889 | rm = modrm & 7; |
1890 | |
1891 | switch (s->aflag) { |
1892 | case MO_64: |
1893 | case MO_32: |
1894 | havesib = 0; |
1895 | base = rm; |
1896 | index = -1; |
1897 | scale = 0; |
1898 | |
1899 | if (base == 4) { |
1900 | havesib = 1; |
Value stored to 'havesib' is never read | |
1901 | code = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
1902 | scale = (code >> 6) & 3; |
1903 | index = ((code >> 3) & 7) | REX_X(s)0; |
1904 | if (index == 4) { |
1905 | index = -1; /* no index */ |
1906 | } |
1907 | base = (code & 7); |
1908 | } |
1909 | base |= REX_B(s)0; |
1910 | |
1911 | switch (mod) { |
1912 | case 0: |
1913 | if ((base & 7) == 5) { |
1914 | base = -1; |
1915 | disp = (int32_t)cpu_ldl_code(env, s->pc)ldl_le_p(((void *)((unsigned long)(target_ulong)((s->pc)) + guest_base))); |
1916 | s->pc += 4; |
1917 | if (CODE64(s)0 && !havesib) { |
1918 | disp += s->pc + s->rip_offset; |
1919 | } |
1920 | } else { |
1921 | disp = 0; |
1922 | } |
1923 | break; |
1924 | case 1: |
1925 | disp = (int8_t)cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
1926 | break; |
1927 | default: |
1928 | case 2: |
1929 | disp = (int32_t)cpu_ldl_code(env, s->pc)ldl_le_p(((void *)((unsigned long)(target_ulong)((s->pc)) + guest_base))); |
1930 | s->pc += 4; |
1931 | break; |
1932 | } |
1933 | |
1934 | /* For correct popl handling with esp. */ |
1935 | if (base == R_ESP4 && s->popl_esp_hack) { |
1936 | disp += s->popl_esp_hack; |
1937 | } |
1938 | |
1939 | /* Compute the address, with a minimum number of TCG ops. */ |
1940 | TCGV_UNUSED(sum)sum = __extension__ ({ TCGv_i32 make_tcgv_tmp = {-1}; make_tcgv_tmp ;}); |
1941 | if (index >= 0) { |
1942 | if (scale == 0) { |
1943 | sum = cpu_regs[index]; |
1944 | } else { |
1945 | tcg_gen_shli_tltcg_gen_shli_i32(cpu_A0, cpu_regs[index], scale); |
1946 | sum = cpu_A0; |
1947 | } |
1948 | if (base >= 0) { |
1949 | tcg_gen_add_tltcg_gen_add_i32(cpu_A0, sum, cpu_regs[base]); |
1950 | sum = cpu_A0; |
1951 | } |
1952 | } else if (base >= 0) { |
1953 | sum = cpu_regs[base]; |
1954 | } |
1955 | if (TCGV_IS_UNUSED(sum)(((sum).i32) == -1)) { |
1956 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_A0, disp); |
1957 | } else { |
1958 | tcg_gen_addi_tltcg_gen_addi_i32(cpu_A0, sum, disp); |
1959 | } |
1960 | |
1961 | if (must_add_seg) { |
1962 | if (override < 0) { |
1963 | if (base == R_EBP5 || base == R_ESP4) { |
1964 | override = R_SS2; |
1965 | } else { |
1966 | override = R_DS3; |
1967 | } |
1968 | } |
1969 | |
1970 | tcg_gen_ld_tltcg_gen_ld_i32(cpu_tmp0, cpu_env, |
1971 | offsetof(CPUX86State, segs[override].base)__builtin_offsetof(CPUX86State, segs[override].base)); |
1972 | if (CODE64(s)0) { |
1973 | if (s->aflag == MO_32) { |
1974 | tcg_gen_ext32u_tltcg_gen_mov_i32(cpu_A0, cpu_A0); |
1975 | } |
1976 | tcg_gen_add_tltcg_gen_add_i32(cpu_A0, cpu_A0, cpu_tmp0); |
1977 | return; |
1978 | } |
1979 | |
1980 | tcg_gen_add_tltcg_gen_add_i32(cpu_A0, cpu_A0, cpu_tmp0); |
1981 | } |
1982 | |
1983 | if (s->aflag == MO_32) { |
1984 | tcg_gen_ext32u_tltcg_gen_mov_i32(cpu_A0, cpu_A0); |
1985 | } |
1986 | break; |
1987 | |
1988 | case MO_16: |
1989 | switch (mod) { |
1990 | case 0: |
1991 | if (rm == 6) { |
1992 | disp = cpu_lduw_code(env, s->pc)lduw_le_p(((void *)((unsigned long)(target_ulong)((s->pc)) + guest_base))); |
1993 | s->pc += 2; |
1994 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_A0, disp); |
1995 | rm = 0; /* avoid SS override */ |
1996 | goto no_rm; |
1997 | } else { |
1998 | disp = 0; |
1999 | } |
2000 | break; |
2001 | case 1: |
2002 | disp = (int8_t)cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
2003 | break; |
2004 | default: |
2005 | case 2: |
2006 | disp = (int16_t)cpu_lduw_code(env, s->pc)lduw_le_p(((void *)((unsigned long)(target_ulong)((s->pc)) + guest_base))); |
2007 | s->pc += 2; |
2008 | break; |
2009 | } |
2010 | |
2011 | sum = cpu_A0; |
2012 | switch (rm) { |
2013 | case 0: |
2014 | tcg_gen_add_tltcg_gen_add_i32(cpu_A0, cpu_regs[R_EBX3], cpu_regs[R_ESI6]); |
2015 | break; |
2016 | case 1: |
2017 | tcg_gen_add_tltcg_gen_add_i32(cpu_A0, cpu_regs[R_EBX3], cpu_regs[R_EDI7]); |
2018 | break; |
2019 | case 2: |
2020 | tcg_gen_add_tltcg_gen_add_i32(cpu_A0, cpu_regs[R_EBP5], cpu_regs[R_ESI6]); |
2021 | break; |
2022 | case 3: |
2023 | tcg_gen_add_tltcg_gen_add_i32(cpu_A0, cpu_regs[R_EBP5], cpu_regs[R_EDI7]); |
2024 | break; |
2025 | case 4: |
2026 | sum = cpu_regs[R_ESI6]; |
2027 | break; |
2028 | case 5: |
2029 | sum = cpu_regs[R_EDI7]; |
2030 | break; |
2031 | case 6: |
2032 | sum = cpu_regs[R_EBP5]; |
2033 | break; |
2034 | default: |
2035 | case 7: |
2036 | sum = cpu_regs[R_EBX3]; |
2037 | break; |
2038 | } |
2039 | tcg_gen_addi_tltcg_gen_addi_i32(cpu_A0, sum, disp); |
2040 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_A0, cpu_A0); |
2041 | no_rm: |
2042 | if (must_add_seg) { |
2043 | if (override < 0) { |
2044 | if (rm == 2 || rm == 3 || rm == 6) { |
2045 | override = R_SS2; |
2046 | } else { |
2047 | override = R_DS3; |
2048 | } |
2049 | } |
2050 | gen_op_addl_A0_seg(s, override); |
2051 | } |
2052 | break; |
2053 | |
2054 | default: |
2055 | tcg_abort()do { fprintf(stderr, "%s:%d: tcg fatal error\n", "/home/stefan/src/qemu/qemu.org/qemu/target-i386/translate.c" , 2055); abort();} while (0); |
2056 | } |
2057 | } |
2058 | |
2059 | static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm) |
2060 | { |
2061 | int mod, rm, base, code; |
2062 | |
2063 | mod = (modrm >> 6) & 3; |
2064 | if (mod == 3) |
2065 | return; |
2066 | rm = modrm & 7; |
2067 | |
2068 | switch (s->aflag) { |
2069 | case MO_64: |
2070 | case MO_32: |
2071 | base = rm; |
2072 | |
2073 | if (base == 4) { |
2074 | code = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
2075 | base = (code & 7); |
2076 | } |
2077 | |
2078 | switch (mod) { |
2079 | case 0: |
2080 | if (base == 5) { |
2081 | s->pc += 4; |
2082 | } |
2083 | break; |
2084 | case 1: |
2085 | s->pc++; |
2086 | break; |
2087 | default: |
2088 | case 2: |
2089 | s->pc += 4; |
2090 | break; |
2091 | } |
2092 | break; |
2093 | |
2094 | case MO_16: |
2095 | switch (mod) { |
2096 | case 0: |
2097 | if (rm == 6) { |
2098 | s->pc += 2; |
2099 | } |
2100 | break; |
2101 | case 1: |
2102 | s->pc++; |
2103 | break; |
2104 | default: |
2105 | case 2: |
2106 | s->pc += 2; |
2107 | break; |
2108 | } |
2109 | break; |
2110 | |
2111 | default: |
2112 | tcg_abort()do { fprintf(stderr, "%s:%d: tcg fatal error\n", "/home/stefan/src/qemu/qemu.org/qemu/target-i386/translate.c" , 2112); abort();} while (0); |
2113 | } |
2114 | } |
2115 | |
2116 | /* used for LEA and MOV AX, mem */ |
2117 | static void gen_add_A0_ds_seg(DisasContext *s) |
2118 | { |
2119 | int override, must_add_seg; |
2120 | must_add_seg = s->addseg; |
2121 | override = R_DS3; |
2122 | if (s->override >= 0) { |
2123 | override = s->override; |
2124 | must_add_seg = 1; |
2125 | } |
2126 | if (must_add_seg) { |
2127 | #ifdef TARGET_X86_64 |
2128 | if (CODE64(s)0) { |
2129 | gen_op_addq_A0_seg(override); |
2130 | } else |
2131 | #endif |
2132 | { |
2133 | gen_op_addl_A0_seg(s, override); |
2134 | } |
2135 | } |
2136 | } |
2137 | |
2138 | /* generate modrm memory load or store of 'reg'. TMP0 is used if reg == |
2139 | OR_TMP0 */ |
2140 | static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, |
2141 | TCGMemOp ot, int reg, int is_store) |
2142 | { |
2143 | int mod, rm; |
2144 | |
2145 | mod = (modrm >> 6) & 3; |
2146 | rm = (modrm & 7) | REX_B(s)0; |
2147 | if (mod == 3) { |
2148 | if (is_store) { |
2149 | if (reg != OR_TMP0) |
2150 | gen_op_mov_v_reg(ot, cpu_T[0], reg); |
2151 | gen_op_mov_reg_v(ot, rm, cpu_T[0]); |
2152 | } else { |
2153 | gen_op_mov_v_reg(ot, cpu_T[0], rm); |
2154 | if (reg != OR_TMP0) |
2155 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
2156 | } |
2157 | } else { |
2158 | gen_lea_modrm(env, s, modrm); |
2159 | if (is_store) { |
2160 | if (reg != OR_TMP0) |
2161 | gen_op_mov_v_reg(ot, cpu_T[0], reg); |
2162 | gen_op_st_v(s, ot, cpu_T[0], cpu_A0); |
2163 | } else { |
2164 | gen_op_ld_v(s, ot, cpu_T[0], cpu_A0); |
2165 | if (reg != OR_TMP0) |
2166 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
2167 | } |
2168 | } |
2169 | } |
2170 | |
2171 | static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot) |
2172 | { |
2173 | uint32_t ret; |
2174 | |
2175 | switch (ot) { |
2176 | case MO_8: |
2177 | ret = cpu_ldub_code(env, s->pc)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc)) + guest_base ))); |
2178 | s->pc++; |
2179 | break; |
2180 | case MO_16: |
2181 | ret = cpu_lduw_code(env, s->pc)lduw_le_p(((void *)((unsigned long)(target_ulong)((s->pc)) + guest_base))); |
2182 | s->pc += 2; |
2183 | break; |
2184 | case MO_32: |
2185 | #ifdef TARGET_X86_64 |
2186 | case MO_64: |
2187 | #endif |
2188 | ret = cpu_ldl_code(env, s->pc)ldl_le_p(((void *)((unsigned long)(target_ulong)((s->pc)) + guest_base))); |
2189 | s->pc += 4; |
2190 | break; |
2191 | default: |
2192 | tcg_abort()do { fprintf(stderr, "%s:%d: tcg fatal error\n", "/home/stefan/src/qemu/qemu.org/qemu/target-i386/translate.c" , 2192); abort();} while (0); |
2193 | } |
2194 | return ret; |
2195 | } |
2196 | |
2197 | static inline int insn_const_size(TCGMemOp ot) |
2198 | { |
2199 | if (ot <= MO_32) { |
2200 | return 1 << ot; |
2201 | } else { |
2202 | return 4; |
2203 | } |
2204 | } |
2205 | |
2206 | static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip) |
2207 | { |
2208 | TranslationBlock *tb; |
2209 | target_ulong pc; |
2210 | |
2211 | pc = s->cs_base + eip; |
2212 | tb = s->tb; |
2213 | /* NOTE: we handle the case where the TB spans two pages here */ |
2214 | if ((pc & TARGET_PAGE_MASK~((1 << 12) - 1)) == (tb->pc & TARGET_PAGE_MASK~((1 << 12) - 1)) || |
2215 | (pc & TARGET_PAGE_MASK~((1 << 12) - 1)) == ((s->pc - 1) & TARGET_PAGE_MASK~((1 << 12) - 1))) { |
2216 | /* jump to same page: we can use a direct jump */ |
2217 | tcg_gen_goto_tb(tb_num); |
2218 | gen_jmp_im(eip); |
2219 | tcg_gen_exit_tb((uintptr_t)tb + tb_num); |
2220 | } else { |
2221 | /* jump to another page: currently not optimized */ |
2222 | gen_jmp_im(eip); |
2223 | gen_eob(s); |
2224 | } |
2225 | } |
2226 | |
2227 | static inline void gen_jcc(DisasContext *s, int b, |
2228 | target_ulong val, target_ulong next_eip) |
2229 | { |
2230 | int l1, l2; |
2231 | |
2232 | if (s->jmp_opt) { |
2233 | l1 = gen_new_label(); |
2234 | gen_jcc1(s, b, l1); |
2235 | |
2236 | gen_goto_tb(s, 0, next_eip); |
2237 | |
2238 | gen_set_label(l1); |
2239 | gen_goto_tb(s, 1, val); |
2240 | s->is_jmp = DISAS_TB_JUMP3; |
2241 | } else { |
2242 | l1 = gen_new_label(); |
2243 | l2 = gen_new_label(); |
2244 | gen_jcc1(s, b, l1); |
2245 | |
2246 | gen_jmp_im(next_eip); |
2247 | tcg_gen_br(l2); |
2248 | |
2249 | gen_set_label(l1); |
2250 | gen_jmp_im(val); |
2251 | gen_set_label(l2); |
2252 | gen_eob(s); |
2253 | } |
2254 | } |
2255 | |
2256 | static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b, |
2257 | int modrm, int reg) |
2258 | { |
2259 | CCPrepare cc; |
2260 | |
2261 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
2262 | |
2263 | cc = gen_prepare_cc(s, b, cpu_T[1]); |
2264 | if (cc.mask != -1) { |
2265 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
2266 | tcg_gen_andi_tltcg_gen_andi_i32(t0, cc.reg, cc.mask); |
2267 | cc.reg = t0; |
2268 | } |
2269 | if (!cc.use_reg2) { |
2270 | cc.reg2 = tcg_const_tltcg_const_i32(cc.imm); |
2271 | } |
2272 | |
2273 | tcg_gen_movcond_tltcg_gen_movcond_i32(cc.cond, cpu_T[0], cc.reg, cc.reg2, |
2274 | cpu_T[0], cpu_regs[reg]); |
2275 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
2276 | |
2277 | if (cc.mask != -1) { |
2278 | tcg_temp_freetcg_temp_free_i32(cc.reg); |
2279 | } |
2280 | if (!cc.use_reg2) { |
2281 | tcg_temp_freetcg_temp_free_i32(cc.reg2); |
2282 | } |
2283 | } |
2284 | |
2285 | static inline void gen_op_movl_T0_seg(int seg_reg) |
2286 | { |
2287 | tcg_gen_ld32u_tltcg_gen_ld_i32(cpu_T[0], cpu_env, |
2288 | offsetof(CPUX86State,segs[seg_reg].selector)__builtin_offsetof(CPUX86State, segs[seg_reg].selector)); |
2289 | } |
2290 | |
2291 | static inline void gen_op_movl_seg_T0_vm(int seg_reg) |
2292 | { |
2293 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffff); |
2294 | tcg_gen_st32_tltcg_gen_st_i32(cpu_T[0], cpu_env, |
2295 | offsetof(CPUX86State,segs[seg_reg].selector)__builtin_offsetof(CPUX86State, segs[seg_reg].selector)); |
2296 | tcg_gen_shli_tltcg_gen_shli_i32(cpu_T[0], cpu_T[0], 4); |
2297 | tcg_gen_st_tltcg_gen_st_i32(cpu_T[0], cpu_env, |
2298 | offsetof(CPUX86State,segs[seg_reg].base)__builtin_offsetof(CPUX86State, segs[seg_reg].base)); |
2299 | } |
2300 | |
2301 | /* move T0 to seg_reg and compute if the CPU state may change. Never |
2302 | call this function with seg_reg == R_CS */ |
2303 | static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip) |
2304 | { |
2305 | if (s->pe && !s->vm86) { |
2306 | /* XXX: optimize by finding processor state dynamically */ |
2307 | gen_update_cc_op(s); |
2308 | gen_jmp_im(cur_eip); |
2309 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
2310 | gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32); |
2311 | /* abort translation because the addseg value may change or |
2312 | because ss32 may change. For R_SS, translation must always |
2313 | stop as a special handling must be done to disable hardware |
2314 | interrupts for the next instruction */ |
2315 | if (seg_reg == R_SS2 || (s->code32 && seg_reg < R_FS4)) |
2316 | s->is_jmp = DISAS_TB_JUMP3; |
2317 | } else { |
2318 | gen_op_movl_seg_T0_vm(seg_reg); |
2319 | if (seg_reg == R_SS2) |
2320 | s->is_jmp = DISAS_TB_JUMP3; |
2321 | } |
2322 | } |
2323 | |
2324 | static inline int svm_is_rep(int prefixes) |
2325 | { |
2326 | return ((prefixes & (PREFIX_REPZ0x01 | PREFIX_REPNZ0x02)) ? 8 : 0); |
2327 | } |
2328 | |
2329 | static inline void |
2330 | gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start, |
2331 | uint32_t type, uint64_t param) |
2332 | { |
2333 | /* no SVM activated; fast case */ |
2334 | if (likely(!(s->flags & HF_SVMI_MASK))__builtin_expect(!!(!(s->flags & (1 << 21))), 1)) |
2335 | return; |
2336 | gen_update_cc_op(s); |
2337 | gen_jmp_im(pc_start - s->cs_base); |
2338 | gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type), |
2339 | tcg_const_i64(param)); |
2340 | } |
2341 | |
2342 | static inline void |
2343 | gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type) |
2344 | { |
2345 | gen_svm_check_intercept_param(s, pc_start, type, 0); |
2346 | } |
2347 | |
2348 | static inline void gen_stack_update(DisasContext *s, int addend) |
2349 | { |
2350 | #ifdef TARGET_X86_64 |
2351 | if (CODE64(s)0) { |
2352 | gen_op_add_reg_im(MO_64, R_ESP4, addend); |
2353 | } else |
2354 | #endif |
2355 | if (s->ss32) { |
2356 | gen_op_add_reg_im(MO_32, R_ESP4, addend); |
2357 | } else { |
2358 | gen_op_add_reg_im(MO_16, R_ESP4, addend); |
2359 | } |
2360 | } |
2361 | |
2362 | /* Generate a push. It depends on ss32, addseg and dflag. */ |
2363 | static void gen_push_v(DisasContext *s, TCGvTCGv_i32 val) |
2364 | { |
2365 | TCGMemOp a_ot, d_ot = mo_pushpop(s, s->dflag); |
2366 | int size = 1 << d_ot; |
2367 | TCGvTCGv_i32 new_esp = cpu_A0; |
2368 | |
2369 | tcg_gen_subi_tltcg_gen_subi_i32(cpu_A0, cpu_regs[R_ESP4], size); |
2370 | |
2371 | if (CODE64(s)0) { |
2372 | a_ot = MO_64; |
2373 | } else if (s->ss32) { |
2374 | a_ot = MO_32; |
2375 | if (s->addseg) { |
2376 | new_esp = cpu_tmp4; |
2377 | tcg_gen_mov_tltcg_gen_mov_i32(new_esp, cpu_A0); |
2378 | gen_op_addl_A0_seg(s, R_SS2); |
2379 | } else { |
2380 | tcg_gen_ext32u_tltcg_gen_mov_i32(cpu_A0, cpu_A0); |
2381 | } |
2382 | } else { |
2383 | a_ot = MO_16; |
2384 | new_esp = cpu_tmp4; |
2385 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_A0, cpu_A0); |
2386 | tcg_gen_mov_tltcg_gen_mov_i32(new_esp, cpu_A0); |
2387 | gen_op_addl_A0_seg(s, R_SS2); |
2388 | } |
2389 | |
2390 | gen_op_st_v(s, d_ot, val, cpu_A0); |
2391 | gen_op_mov_reg_v(a_ot, R_ESP4, new_esp); |
2392 | } |
2393 | |
2394 | /* two step pop is necessary for precise exceptions */ |
2395 | static TCGMemOp gen_pop_T0(DisasContext *s) |
2396 | { |
2397 | TCGMemOp d_ot = mo_pushpop(s, s->dflag); |
2398 | TCGvTCGv_i32 addr = cpu_A0; |
2399 | |
2400 | if (CODE64(s)0) { |
2401 | addr = cpu_regs[R_ESP4]; |
2402 | } else if (!s->ss32) { |
2403 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_A0, cpu_regs[R_ESP4]); |
2404 | gen_op_addl_A0_seg(s, R_SS2); |
2405 | } else if (s->addseg) { |
2406 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_A0, cpu_regs[R_ESP4]); |
2407 | gen_op_addl_A0_seg(s, R_SS2); |
2408 | } else { |
2409 | tcg_gen_ext32u_tltcg_gen_mov_i32(cpu_A0, cpu_regs[R_ESP4]); |
2410 | } |
2411 | |
2412 | gen_op_ld_v(s, d_ot, cpu_T[0], addr); |
2413 | return d_ot; |
2414 | } |
2415 | |
2416 | static void gen_pop_update(DisasContext *s, TCGMemOp ot) |
2417 | { |
2418 | gen_stack_update(s, 1 << ot); |
2419 | } |
2420 | |
2421 | static void gen_stack_A0(DisasContext *s) |
2422 | { |
2423 | gen_op_movl_A0_reg(R_ESP4); |
2424 | if (!s->ss32) |
2425 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_A0, cpu_A0); |
2426 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_T[1], cpu_A0); |
2427 | if (s->addseg) |
2428 | gen_op_addl_A0_seg(s, R_SS2); |
2429 | } |
2430 | |
2431 | /* NOTE: wrap around in 16 bit not fully handled */ |
2432 | static void gen_pusha(DisasContext *s) |
2433 | { |
2434 | int i; |
2435 | gen_op_movl_A0_reg(R_ESP4); |
2436 | gen_op_addl_A0_im(-8 << s->dflag); |
2437 | if (!s->ss32) |
2438 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_A0, cpu_A0); |
2439 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_T[1], cpu_A0); |
2440 | if (s->addseg) |
2441 | gen_op_addl_A0_seg(s, R_SS2); |
2442 | for(i = 0;i < 8; i++) { |
2443 | gen_op_mov_v_reg(MO_32, cpu_T[0], 7 - i); |
2444 | gen_op_st_v(s, s->dflag, cpu_T[0], cpu_A0); |
2445 | gen_op_addl_A0_im(1 << s->dflag); |
2446 | } |
2447 | gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP4, cpu_T[1]); |
2448 | } |
2449 | |
2450 | /* NOTE: wrap around in 16 bit not fully handled */ |
2451 | static void gen_popa(DisasContext *s) |
2452 | { |
2453 | int i; |
2454 | gen_op_movl_A0_reg(R_ESP4); |
2455 | if (!s->ss32) |
2456 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_A0, cpu_A0); |
2457 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_T[1], cpu_A0); |
2458 | tcg_gen_addi_tltcg_gen_addi_i32(cpu_T[1], cpu_T[1], 8 << s->dflag); |
2459 | if (s->addseg) |
2460 | gen_op_addl_A0_seg(s, R_SS2); |
2461 | for(i = 0;i < 8; i++) { |
2462 | /* ESP is not reloaded */ |
2463 | if (i != 3) { |
2464 | gen_op_ld_v(s, s->dflag, cpu_T[0], cpu_A0); |
2465 | gen_op_mov_reg_v(s->dflag, 7 - i, cpu_T[0]); |
2466 | } |
2467 | gen_op_addl_A0_im(1 << s->dflag); |
2468 | } |
2469 | gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP4, cpu_T[1]); |
2470 | } |
2471 | |
2472 | static void gen_enter(DisasContext *s, int esp_addend, int level) |
2473 | { |
2474 | TCGMemOp ot = mo_pushpop(s, s->dflag); |
2475 | int opsize = 1 << ot; |
2476 | |
2477 | level &= 0x1f; |
2478 | #ifdef TARGET_X86_64 |
2479 | if (CODE64(s)0) { |
2480 | gen_op_movl_A0_reg(R_ESP4); |
2481 | gen_op_addq_A0_im(-opsize); |
2482 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_T[1], cpu_A0); |
2483 | |
2484 | /* push bp */ |
2485 | gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP5); |
2486 | gen_op_st_v(s, ot, cpu_T[0], cpu_A0); |
2487 | if (level) { |
2488 | /* XXX: must save state */ |
2489 | gen_helper_enter64_level(cpu_env, tcg_const_i32(level), |
2490 | tcg_const_i32((ot == MO_64)), |
2491 | cpu_T[1]); |
2492 | } |
2493 | gen_op_mov_reg_v(ot, R_EBP5, cpu_T[1]); |
2494 | tcg_gen_addi_tltcg_gen_addi_i32(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level)); |
2495 | gen_op_mov_reg_v(MO_64, R_ESP4, cpu_T[1]); |
2496 | } else |
2497 | #endif |
2498 | { |
2499 | gen_op_movl_A0_reg(R_ESP4); |
2500 | gen_op_addl_A0_im(-opsize); |
2501 | if (!s->ss32) |
2502 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_A0, cpu_A0); |
2503 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_T[1], cpu_A0); |
2504 | if (s->addseg) |
2505 | gen_op_addl_A0_seg(s, R_SS2); |
2506 | /* push bp */ |
2507 | gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP5); |
2508 | gen_op_st_v(s, ot, cpu_T[0], cpu_A0); |
2509 | if (level) { |
2510 | /* XXX: must save state */ |
2511 | gen_helper_enter_level(cpu_env, tcg_const_i32(level), |
2512 | tcg_const_i32(s->dflag - 1), |
2513 | cpu_T[1]); |
2514 | } |
2515 | gen_op_mov_reg_v(ot, R_EBP5, cpu_T[1]); |
2516 | tcg_gen_addi_tltcg_gen_addi_i32(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level)); |
2517 | gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP4, cpu_T[1]); |
2518 | } |
2519 | } |
2520 | |
2521 | static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip) |
2522 | { |
2523 | gen_update_cc_op(s); |
2524 | gen_jmp_im(cur_eip); |
2525 | gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno)); |
2526 | s->is_jmp = DISAS_TB_JUMP3; |
2527 | } |
2528 | |
2529 | /* an interrupt is different from an exception because of the |
2530 | privilege checks */ |
2531 | static void gen_interrupt(DisasContext *s, int intno, |
2532 | target_ulong cur_eip, target_ulong next_eip) |
2533 | { |
2534 | gen_update_cc_op(s); |
2535 | gen_jmp_im(cur_eip); |
2536 | gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno), |
2537 | tcg_const_i32(next_eip - cur_eip)); |
2538 | s->is_jmp = DISAS_TB_JUMP3; |
2539 | } |
2540 | |
2541 | static void gen_debug(DisasContext *s, target_ulong cur_eip) |
2542 | { |
2543 | gen_update_cc_op(s); |
2544 | gen_jmp_im(cur_eip); |
2545 | gen_helper_debug(cpu_env); |
2546 | s->is_jmp = DISAS_TB_JUMP3; |
2547 | } |
2548 | |
2549 | /* generate a generic end of block. Trace exception is also generated |
2550 | if needed */ |
2551 | static void gen_eob(DisasContext *s) |
2552 | { |
2553 | gen_update_cc_op(s); |
2554 | if (s->tb->flags & HF_INHIBIT_IRQ_MASK(1 << 3)) { |
2555 | gen_helper_reset_inhibit_irq(cpu_env); |
2556 | } |
2557 | if (s->tb->flags & HF_RF_MASK(1 << 16)) { |
2558 | gen_helper_reset_rf(cpu_env); |
2559 | } |
2560 | if (s->singlestep_enabled) { |
2561 | gen_helper_debug(cpu_env); |
2562 | } else if (s->tf) { |
2563 | gen_helper_single_step(cpu_env); |
2564 | } else { |
2565 | tcg_gen_exit_tb(0); |
2566 | } |
2567 | s->is_jmp = DISAS_TB_JUMP3; |
2568 | } |
2569 | |
2570 | /* generate a jump to eip. No segment change must happen before as a |
2571 | direct call to the next block may occur */ |
2572 | static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num) |
2573 | { |
2574 | gen_update_cc_op(s); |
2575 | set_cc_op(s, CC_OP_DYNAMIC); |
2576 | if (s->jmp_opt) { |
2577 | gen_goto_tb(s, tb_num, eip); |
2578 | s->is_jmp = DISAS_TB_JUMP3; |
2579 | } else { |
2580 | gen_jmp_im(eip); |
2581 | gen_eob(s); |
2582 | } |
2583 | } |
2584 | |
2585 | static void gen_jmp(DisasContext *s, target_ulong eip) |
2586 | { |
2587 | gen_jmp_tb(s, eip, 0); |
2588 | } |
2589 | |
2590 | static inline void gen_ldq_env_A0(DisasContext *s, int offset) |
2591 | { |
2592 | tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); |
2593 | tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset); |
2594 | } |
2595 | |
2596 | static inline void gen_stq_env_A0(DisasContext *s, int offset) |
2597 | { |
2598 | tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset); |
2599 | tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); |
2600 | } |
2601 | |
2602 | static inline void gen_ldo_env_A0(DisasContext *s, int offset) |
2603 | { |
2604 | int mem_index = s->mem_index; |
2605 | tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ); |
2606 | tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0))__builtin_offsetof(XMMReg, _q[0])); |
2607 | tcg_gen_addi_tltcg_gen_addi_i32(cpu_tmp0, cpu_A0, 8); |
2608 | tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ); |
2609 | tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1))__builtin_offsetof(XMMReg, _q[1])); |
2610 | } |
2611 | |
2612 | static inline void gen_sto_env_A0(DisasContext *s, int offset) |
2613 | { |
2614 | int mem_index = s->mem_index; |
2615 | tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0))__builtin_offsetof(XMMReg, _q[0])); |
2616 | tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ); |
2617 | tcg_gen_addi_tltcg_gen_addi_i32(cpu_tmp0, cpu_A0, 8); |
2618 | tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1))__builtin_offsetof(XMMReg, _q[1])); |
2619 | tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ); |
2620 | } |
2621 | |
2622 | static inline void gen_op_movo(int d_offset, int s_offset) |
2623 | { |
2624 | tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset); |
2625 | tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset); |
2626 | tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8); |
2627 | tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8); |
2628 | } |
2629 | |
2630 | static inline void gen_op_movq(int d_offset, int s_offset) |
2631 | { |
2632 | tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset); |
2633 | tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset); |
2634 | } |
2635 | |
2636 | static inline void gen_op_movl(int d_offset, int s_offset) |
2637 | { |
2638 | tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset); |
2639 | tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset); |
2640 | } |
2641 | |
2642 | static inline void gen_op_movq_env_0(int d_offset) |
2643 | { |
2644 | tcg_gen_movi_i64(cpu_tmp1_i64, 0); |
2645 | tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset); |
2646 | } |
2647 | |
2648 | typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg); |
2649 | typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg); |
2650 | typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val); |
2651 | typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val); |
2652 | typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b); |
2653 | typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, |
2654 | TCGv_i32 val); |
2655 | typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val); |
2656 | typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, |
2657 | TCGvTCGv_i32 val); |
2658 | |
2659 | #define SSE_SPECIAL((void *)1) ((void *)1) |
2660 | #define SSE_DUMMY((void *)2) ((void *)2) |
2661 | |
2662 | #define MMX_OP2(x){ gen_helper_x_mmx, gen_helper_x_xmm } { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm } |
2663 | #define SSE_FOP(x){ gen_helper_xps, gen_helper_xpd, gen_helper_xss, gen_helper_xsd , } { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \ |
2664 | gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, } |
2665 | |
2666 | static const SSEFunc_0_epp sse_op_table1[256][4] = { |
2667 | /* 3DNow! extensions */ |
2668 | [0x0e] = { SSE_DUMMY((void *)2) }, /* femms */ |
2669 | [0x0f] = { SSE_DUMMY((void *)2) }, /* pf... */ |
2670 | /* pure SSE operations */ |
2671 | [0x10] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* movups, movupd, movss, movsd */ |
2672 | [0x11] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* movups, movupd, movss, movsd */ |
2673 | [0x12] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* movlps, movlpd, movsldup, movddup */ |
2674 | [0x13] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* movlps, movlpd */ |
2675 | [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm }, |
2676 | [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm }, |
2677 | [0x16] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* movhps, movhpd, movshdup */ |
2678 | [0x17] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* movhps, movhpd */ |
2679 | |
2680 | [0x28] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* movaps, movapd */ |
2681 | [0x29] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* movaps, movapd */ |
2682 | [0x2a] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */ |
2683 | [0x2b] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* movntps, movntpd, movntss, movntsd */ |
2684 | [0x2c] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */ |
2685 | [0x2d] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */ |
2686 | [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd }, |
2687 | [0x2f] = { gen_helper_comiss, gen_helper_comisd }, |
2688 | [0x50] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* movmskps, movmskpd */ |
2689 | [0x51] = SSE_FOP(sqrt){ gen_helper_sqrtps, gen_helper_sqrtpd, gen_helper_sqrtss, gen_helper_sqrtsd , }, |
2690 | [0x52] = { gen_helper_rsqrtps, NULL((void*)0), gen_helper_rsqrtss, NULL((void*)0) }, |
2691 | [0x53] = { gen_helper_rcpps, NULL((void*)0), gen_helper_rcpss, NULL((void*)0) }, |
2692 | [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */ |
2693 | [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */ |
2694 | [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */ |
2695 | [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */ |
2696 | [0x58] = SSE_FOP(add){ gen_helper_addps, gen_helper_addpd, gen_helper_addss, gen_helper_addsd , }, |
2697 | [0x59] = SSE_FOP(mul){ gen_helper_mulps, gen_helper_mulpd, gen_helper_mulss, gen_helper_mulsd , }, |
2698 | [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps, |
2699 | gen_helper_cvtss2sd, gen_helper_cvtsd2ss }, |
2700 | [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq }, |
2701 | [0x5c] = SSE_FOP(sub){ gen_helper_subps, gen_helper_subpd, gen_helper_subss, gen_helper_subsd , }, |
2702 | [0x5d] = SSE_FOP(min){ gen_helper_minps, gen_helper_minpd, gen_helper_minss, gen_helper_minsd , }, |
2703 | [0x5e] = SSE_FOP(div){ gen_helper_divps, gen_helper_divpd, gen_helper_divss, gen_helper_divsd , }, |
2704 | [0x5f] = SSE_FOP(max){ gen_helper_maxps, gen_helper_maxpd, gen_helper_maxss, gen_helper_maxsd , }, |
2705 | |
2706 | [0xc2] = SSE_FOP(cmpeq){ gen_helper_cmpeqps, gen_helper_cmpeqpd, gen_helper_cmpeqss, gen_helper_cmpeqsd, }, |
2707 | [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps, |
2708 | (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */ |
2709 | |
2710 | /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */ |
2711 | [0x38] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, |
2712 | [0x3a] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, |
2713 | |
2714 | /* MMX ops and their SSE extensions */ |
2715 | [0x60] = MMX_OP2(punpcklbw){ gen_helper_punpcklbw_mmx, gen_helper_punpcklbw_xmm }, |
2716 | [0x61] = MMX_OP2(punpcklwd){ gen_helper_punpcklwd_mmx, gen_helper_punpcklwd_xmm }, |
2717 | [0x62] = MMX_OP2(punpckldq){ gen_helper_punpckldq_mmx, gen_helper_punpckldq_xmm }, |
2718 | [0x63] = MMX_OP2(packsswb){ gen_helper_packsswb_mmx, gen_helper_packsswb_xmm }, |
2719 | [0x64] = MMX_OP2(pcmpgtb){ gen_helper_pcmpgtb_mmx, gen_helper_pcmpgtb_xmm }, |
2720 | [0x65] = MMX_OP2(pcmpgtw){ gen_helper_pcmpgtw_mmx, gen_helper_pcmpgtw_xmm }, |
2721 | [0x66] = MMX_OP2(pcmpgtl){ gen_helper_pcmpgtl_mmx, gen_helper_pcmpgtl_xmm }, |
2722 | [0x67] = MMX_OP2(packuswb){ gen_helper_packuswb_mmx, gen_helper_packuswb_xmm }, |
2723 | [0x68] = MMX_OP2(punpckhbw){ gen_helper_punpckhbw_mmx, gen_helper_punpckhbw_xmm }, |
2724 | [0x69] = MMX_OP2(punpckhwd){ gen_helper_punpckhwd_mmx, gen_helper_punpckhwd_xmm }, |
2725 | [0x6a] = MMX_OP2(punpckhdq){ gen_helper_punpckhdq_mmx, gen_helper_punpckhdq_xmm }, |
2726 | [0x6b] = MMX_OP2(packssdw){ gen_helper_packssdw_mmx, gen_helper_packssdw_xmm }, |
2727 | [0x6c] = { NULL((void*)0), gen_helper_punpcklqdq_xmm }, |
2728 | [0x6d] = { NULL((void*)0), gen_helper_punpckhqdq_xmm }, |
2729 | [0x6e] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* movd mm, ea */ |
2730 | [0x6f] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* movq, movdqa, , movqdu */ |
2731 | [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx, |
2732 | (SSEFunc_0_epp)gen_helper_pshufd_xmm, |
2733 | (SSEFunc_0_epp)gen_helper_pshufhw_xmm, |
2734 | (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */ |
2735 | [0x71] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* shiftw */ |
2736 | [0x72] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* shiftd */ |
2737 | [0x73] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* shiftq */ |
2738 | [0x74] = MMX_OP2(pcmpeqb){ gen_helper_pcmpeqb_mmx, gen_helper_pcmpeqb_xmm }, |
2739 | [0x75] = MMX_OP2(pcmpeqw){ gen_helper_pcmpeqw_mmx, gen_helper_pcmpeqw_xmm }, |
2740 | [0x76] = MMX_OP2(pcmpeql){ gen_helper_pcmpeql_mmx, gen_helper_pcmpeql_xmm }, |
2741 | [0x77] = { SSE_DUMMY((void *)2) }, /* emms */ |
2742 | [0x78] = { NULL((void*)0), SSE_SPECIAL((void *)1), NULL((void*)0), SSE_SPECIAL((void *)1) }, /* extrq_i, insertq_i */ |
2743 | [0x79] = { NULL((void*)0), gen_helper_extrq_r, NULL((void*)0), gen_helper_insertq_r }, |
2744 | [0x7c] = { NULL((void*)0), gen_helper_haddpd, NULL((void*)0), gen_helper_haddps }, |
2745 | [0x7d] = { NULL((void*)0), gen_helper_hsubpd, NULL((void*)0), gen_helper_hsubps }, |
2746 | [0x7e] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* movd, movd, , movq */ |
2747 | [0x7f] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* movq, movdqa, movdqu */ |
2748 | [0xc4] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* pinsrw */ |
2749 | [0xc5] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* pextrw */ |
2750 | [0xd0] = { NULL((void*)0), gen_helper_addsubpd, NULL((void*)0), gen_helper_addsubps }, |
2751 | [0xd1] = MMX_OP2(psrlw){ gen_helper_psrlw_mmx, gen_helper_psrlw_xmm }, |
2752 | [0xd2] = MMX_OP2(psrld){ gen_helper_psrld_mmx, gen_helper_psrld_xmm }, |
2753 | [0xd3] = MMX_OP2(psrlq){ gen_helper_psrlq_mmx, gen_helper_psrlq_xmm }, |
2754 | [0xd4] = MMX_OP2(paddq){ gen_helper_paddq_mmx, gen_helper_paddq_xmm }, |
2755 | [0xd5] = MMX_OP2(pmullw){ gen_helper_pmullw_mmx, gen_helper_pmullw_xmm }, |
2756 | [0xd6] = { NULL((void*)0), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, |
2757 | [0xd7] = { SSE_SPECIAL((void *)1), SSE_SPECIAL((void *)1) }, /* pmovmskb */ |
2758 | [0xd8] = MMX_OP2(psubusb){ gen_helper_psubusb_mmx, gen_helper_psubusb_xmm }, |
2759 | [0xd9] = MMX_OP2(psubusw){ gen_helper_psubusw_mmx, gen_helper_psubusw_xmm }, |
2760 | [0xda] = MMX_OP2(pminub){ gen_helper_pminub_mmx, gen_helper_pminub_xmm }, |
2761 | [0xdb] = MMX_OP2(pand){ gen_helper_pand_mmx, gen_helper_pand_xmm }, |
2762 | [0xdc] = MMX_OP2(paddusb){ gen_helper_paddusb_mmx, gen_helper_paddusb_xmm }, |
2763 | [0xdd] = MMX_OP2(paddusw){ gen_helper_paddusw_mmx, gen_helper_paddusw_xmm }, |
2764 | [0xde] = MMX_OP2(pmaxub){ gen_helper_pmaxub_mmx, gen_helper_pmaxub_xmm }, |
2765 | [0xdf] = MMX_OP2(pandn){ gen_helper_pandn_mmx, gen_helper_pandn_xmm }, |
2766 | [0xe0] = MMX_OP2(pavgb){ gen_helper_pavgb_mmx, gen_helper_pavgb_xmm }, |
2767 | [0xe1] = MMX_OP2(psraw){ gen_helper_psraw_mmx, gen_helper_psraw_xmm }, |
2768 | [0xe2] = MMX_OP2(psrad){ gen_helper_psrad_mmx, gen_helper_psrad_xmm }, |
2769 | [0xe3] = MMX_OP2(pavgw){ gen_helper_pavgw_mmx, gen_helper_pavgw_xmm }, |
2770 | [0xe4] = MMX_OP2(pmulhuw){ gen_helper_pmulhuw_mmx, gen_helper_pmulhuw_xmm }, |
2771 | [0xe5] = MMX_OP2(pmulhw){ gen_helper_pmulhw_mmx, gen_helper_pmulhw_xmm }, |
2772 | [0xe6] = { NULL((void*)0), gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq }, |
2773 | [0xe7] = { SSE_SPECIAL((void *)1) , SSE_SPECIAL((void *)1) }, /* movntq, movntq */ |
2774 | [0xe8] = MMX_OP2(psubsb){ gen_helper_psubsb_mmx, gen_helper_psubsb_xmm }, |
2775 | [0xe9] = MMX_OP2(psubsw){ gen_helper_psubsw_mmx, gen_helper_psubsw_xmm }, |
2776 | [0xea] = MMX_OP2(pminsw){ gen_helper_pminsw_mmx, gen_helper_pminsw_xmm }, |
2777 | [0xeb] = MMX_OP2(por){ gen_helper_por_mmx, gen_helper_por_xmm }, |
2778 | [0xec] = MMX_OP2(paddsb){ gen_helper_paddsb_mmx, gen_helper_paddsb_xmm }, |
2779 | [0xed] = MMX_OP2(paddsw){ gen_helper_paddsw_mmx, gen_helper_paddsw_xmm }, |
2780 | [0xee] = MMX_OP2(pmaxsw){ gen_helper_pmaxsw_mmx, gen_helper_pmaxsw_xmm }, |
2781 | [0xef] = MMX_OP2(pxor){ gen_helper_pxor_mmx, gen_helper_pxor_xmm }, |
2782 | [0xf0] = { NULL((void*)0), NULL((void*)0), NULL((void*)0), SSE_SPECIAL((void *)1) }, /* lddqu */ |
2783 | [0xf1] = MMX_OP2(psllw){ gen_helper_psllw_mmx, gen_helper_psllw_xmm }, |
2784 | [0xf2] = MMX_OP2(pslld){ gen_helper_pslld_mmx, gen_helper_pslld_xmm }, |
2785 | [0xf3] = MMX_OP2(psllq){ gen_helper_psllq_mmx, gen_helper_psllq_xmm }, |
2786 | [0xf4] = MMX_OP2(pmuludq){ gen_helper_pmuludq_mmx, gen_helper_pmuludq_xmm }, |
2787 | [0xf5] = MMX_OP2(pmaddwd){ gen_helper_pmaddwd_mmx, gen_helper_pmaddwd_xmm }, |
2788 | [0xf6] = MMX_OP2(psadbw){ gen_helper_psadbw_mmx, gen_helper_psadbw_xmm }, |
2789 | [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx, |
2790 | (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */ |
2791 | [0xf8] = MMX_OP2(psubb){ gen_helper_psubb_mmx, gen_helper_psubb_xmm }, |
2792 | [0xf9] = MMX_OP2(psubw){ gen_helper_psubw_mmx, gen_helper_psubw_xmm }, |
2793 | [0xfa] = MMX_OP2(psubl){ gen_helper_psubl_mmx, gen_helper_psubl_xmm }, |
2794 | [0xfb] = MMX_OP2(psubq){ gen_helper_psubq_mmx, gen_helper_psubq_xmm }, |
2795 | [0xfc] = MMX_OP2(paddb){ gen_helper_paddb_mmx, gen_helper_paddb_xmm }, |
2796 | [0xfd] = MMX_OP2(paddw){ gen_helper_paddw_mmx, gen_helper_paddw_xmm }, |
2797 | [0xfe] = MMX_OP2(paddl){ gen_helper_paddl_mmx, gen_helper_paddl_xmm }, |
2798 | }; |
2799 | |
2800 | static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = { |
2801 | [0 + 2] = MMX_OP2(psrlw){ gen_helper_psrlw_mmx, gen_helper_psrlw_xmm }, |
2802 | [0 + 4] = MMX_OP2(psraw){ gen_helper_psraw_mmx, gen_helper_psraw_xmm }, |
2803 | [0 + 6] = MMX_OP2(psllw){ gen_helper_psllw_mmx, gen_helper_psllw_xmm }, |
2804 | [8 + 2] = MMX_OP2(psrld){ gen_helper_psrld_mmx, gen_helper_psrld_xmm }, |
2805 | [8 + 4] = MMX_OP2(psrad){ gen_helper_psrad_mmx, gen_helper_psrad_xmm }, |
2806 | [8 + 6] = MMX_OP2(pslld){ gen_helper_pslld_mmx, gen_helper_pslld_xmm }, |
2807 | [16 + 2] = MMX_OP2(psrlq){ gen_helper_psrlq_mmx, gen_helper_psrlq_xmm }, |
2808 | [16 + 3] = { NULL((void*)0), gen_helper_psrldq_xmm }, |
2809 | [16 + 6] = MMX_OP2(psllq){ gen_helper_psllq_mmx, gen_helper_psllq_xmm }, |
2810 | [16 + 7] = { NULL((void*)0), gen_helper_pslldq_xmm }, |
2811 | }; |
2812 | |
2813 | static const SSEFunc_0_epi sse_op_table3ai[] = { |
2814 | gen_helper_cvtsi2ss, |
2815 | gen_helper_cvtsi2sd |
2816 | }; |
2817 | |
2818 | #ifdef TARGET_X86_64 |
2819 | static const SSEFunc_0_epl sse_op_table3aq[] = { |
2820 | gen_helper_cvtsq2ss, |
2821 | gen_helper_cvtsq2sd |
2822 | }; |
2823 | #endif |
2824 | |
2825 | static const SSEFunc_i_ep sse_op_table3bi[] = { |
2826 | gen_helper_cvttss2si, |
2827 | gen_helper_cvtss2si, |
2828 | gen_helper_cvttsd2si, |
2829 | gen_helper_cvtsd2si |
2830 | }; |
2831 | |
2832 | #ifdef TARGET_X86_64 |
2833 | static const SSEFunc_l_ep sse_op_table3bq[] = { |
2834 | gen_helper_cvttss2sq, |
2835 | gen_helper_cvtss2sq, |
2836 | gen_helper_cvttsd2sq, |
2837 | gen_helper_cvtsd2sq |
2838 | }; |
2839 | #endif |
2840 | |
2841 | static const SSEFunc_0_epp sse_op_table4[8][4] = { |
2842 | SSE_FOP(cmpeq){ gen_helper_cmpeqps, gen_helper_cmpeqpd, gen_helper_cmpeqss, gen_helper_cmpeqsd, }, |
2843 | SSE_FOP(cmplt){ gen_helper_cmpltps, gen_helper_cmpltpd, gen_helper_cmpltss, gen_helper_cmpltsd, }, |
2844 | SSE_FOP(cmple){ gen_helper_cmpleps, gen_helper_cmplepd, gen_helper_cmpless, gen_helper_cmplesd, }, |
2845 | SSE_FOP(cmpunord){ gen_helper_cmpunordps, gen_helper_cmpunordpd, gen_helper_cmpunordss , gen_helper_cmpunordsd, }, |
2846 | SSE_FOP(cmpneq){ gen_helper_cmpneqps, gen_helper_cmpneqpd, gen_helper_cmpneqss , gen_helper_cmpneqsd, }, |
2847 | SSE_FOP(cmpnlt){ gen_helper_cmpnltps, gen_helper_cmpnltpd, gen_helper_cmpnltss , gen_helper_cmpnltsd, }, |
2848 | SSE_FOP(cmpnle){ gen_helper_cmpnleps, gen_helper_cmpnlepd, gen_helper_cmpnless , gen_helper_cmpnlesd, }, |
2849 | SSE_FOP(cmpord){ gen_helper_cmpordps, gen_helper_cmpordpd, gen_helper_cmpordss , gen_helper_cmpordsd, }, |
2850 | }; |
2851 | |
2852 | static const SSEFunc_0_epp sse_op_table5[256] = { |
2853 | [0x0c] = gen_helper_pi2fw, |
2854 | [0x0d] = gen_helper_pi2fd, |
2855 | [0x1c] = gen_helper_pf2iw, |
2856 | [0x1d] = gen_helper_pf2id, |
2857 | [0x8a] = gen_helper_pfnacc, |
2858 | [0x8e] = gen_helper_pfpnacc, |
2859 | [0x90] = gen_helper_pfcmpge, |
2860 | [0x94] = gen_helper_pfmin, |
2861 | [0x96] = gen_helper_pfrcp, |
2862 | [0x97] = gen_helper_pfrsqrt, |
2863 | [0x9a] = gen_helper_pfsub, |
2864 | [0x9e] = gen_helper_pfadd, |
2865 | [0xa0] = gen_helper_pfcmpgt, |
2866 | [0xa4] = gen_helper_pfmax, |
2867 | [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */ |
2868 | [0xa7] = gen_helper_movq, /* pfrsqit1 */ |
2869 | [0xaa] = gen_helper_pfsubr, |
2870 | [0xae] = gen_helper_pfacc, |
2871 | [0xb0] = gen_helper_pfcmpeq, |
2872 | [0xb4] = gen_helper_pfmul, |
2873 | [0xb6] = gen_helper_movq, /* pfrcpit2 */ |
2874 | [0xb7] = gen_helper_pmulhrw_mmx, |
2875 | [0xbb] = gen_helper_pswapd, |
2876 | [0xbf] = gen_helper_pavgb_mmx /* pavgusb */ |
2877 | }; |
2878 | |
2879 | struct SSEOpHelper_epp { |
2880 | SSEFunc_0_epp op[2]; |
2881 | uint32_t ext_mask; |
2882 | }; |
2883 | |
2884 | struct SSEOpHelper_eppi { |
2885 | SSEFunc_0_eppi op[2]; |
2886 | uint32_t ext_mask; |
2887 | }; |
2888 | |
2889 | #define SSSE3_OP(x){ { gen_helper_x_mmx, gen_helper_x_xmm }, (1 << 9) } { MMX_OP2(x){ gen_helper_x_mmx, gen_helper_x_xmm }, CPUID_EXT_SSSE3(1 << 9) } |
2890 | #define SSE41_OP(x){ { ((void*)0), gen_helper_x_xmm }, (1 << 19) } { { NULL((void*)0), gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41(1 << 19) } |
2891 | #define SSE42_OP(x){ { ((void*)0), gen_helper_x_xmm }, (1 << 20) } { { NULL((void*)0), gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42(1 << 20) } |
2892 | #define SSE41_SPECIAL{ { ((void*)0), ((void *)1) }, (1 << 19) } { { NULL((void*)0), SSE_SPECIAL((void *)1) }, CPUID_EXT_SSE41(1 << 19) } |
2893 | #define PCLMULQDQ_OP(x){ { ((void*)0), gen_helper_x_xmm }, (1 << 1) } { { NULL((void*)0), gen_helper_ ## x ## _xmm }, \ |
2894 | CPUID_EXT_PCLMULQDQ(1 << 1) } |
2895 | #define AESNI_OP(x){ { ((void*)0), gen_helper_x_xmm }, (1 << 25) } { { NULL((void*)0), gen_helper_ ## x ## _xmm }, CPUID_EXT_AES(1 << 25) } |
2896 | |
2897 | static const struct SSEOpHelper_epp sse_op_table6[256] = { |
2898 | [0x00] = SSSE3_OP(pshufb){ { gen_helper_pshufb_mmx, gen_helper_pshufb_xmm }, (1 << 9) }, |
2899 | [0x01] = SSSE3_OP(phaddw){ { gen_helper_phaddw_mmx, gen_helper_phaddw_xmm }, (1 << 9) }, |
2900 | [0x02] = SSSE3_OP(phaddd){ { gen_helper_phaddd_mmx, gen_helper_phaddd_xmm }, (1 << 9) }, |
2901 | [0x03] = SSSE3_OP(phaddsw){ { gen_helper_phaddsw_mmx, gen_helper_phaddsw_xmm }, (1 << 9) }, |
2902 | [0x04] = SSSE3_OP(pmaddubsw){ { gen_helper_pmaddubsw_mmx, gen_helper_pmaddubsw_xmm }, (1 << 9) }, |
2903 | [0x05] = SSSE3_OP(phsubw){ { gen_helper_phsubw_mmx, gen_helper_phsubw_xmm }, (1 << 9) }, |
2904 | [0x06] = SSSE3_OP(phsubd){ { gen_helper_phsubd_mmx, gen_helper_phsubd_xmm }, (1 << 9) }, |
2905 | [0x07] = SSSE3_OP(phsubsw){ { gen_helper_phsubsw_mmx, gen_helper_phsubsw_xmm }, (1 << 9) }, |
2906 | [0x08] = SSSE3_OP(psignb){ { gen_helper_psignb_mmx, gen_helper_psignb_xmm }, (1 << 9) }, |
2907 | [0x09] = SSSE3_OP(psignw){ { gen_helper_psignw_mmx, gen_helper_psignw_xmm }, (1 << 9) }, |
2908 | [0x0a] = SSSE3_OP(psignd){ { gen_helper_psignd_mmx, gen_helper_psignd_xmm }, (1 << 9) }, |
2909 | [0x0b] = SSSE3_OP(pmulhrsw){ { gen_helper_pmulhrsw_mmx, gen_helper_pmulhrsw_xmm }, (1 << 9) }, |
2910 | [0x10] = SSE41_OP(pblendvb){ { ((void*)0), gen_helper_pblendvb_xmm }, (1 << 19) }, |
2911 | [0x14] = SSE41_OP(blendvps){ { ((void*)0), gen_helper_blendvps_xmm }, (1 << 19) }, |
2912 | [0x15] = SSE41_OP(blendvpd){ { ((void*)0), gen_helper_blendvpd_xmm }, (1 << 19) }, |
2913 | [0x17] = SSE41_OP(ptest){ { ((void*)0), gen_helper_ptest_xmm }, (1 << 19) }, |
2914 | [0x1c] = SSSE3_OP(pabsb){ { gen_helper_pabsb_mmx, gen_helper_pabsb_xmm }, (1 << 9) }, |
2915 | [0x1d] = SSSE3_OP(pabsw){ { gen_helper_pabsw_mmx, gen_helper_pabsw_xmm }, (1 << 9) }, |
2916 | [0x1e] = SSSE3_OP(pabsd){ { gen_helper_pabsd_mmx, gen_helper_pabsd_xmm }, (1 << 9) }, |
2917 | [0x20] = SSE41_OP(pmovsxbw){ { ((void*)0), gen_helper_pmovsxbw_xmm }, (1 << 19) }, |
2918 | [0x21] = SSE41_OP(pmovsxbd){ { ((void*)0), gen_helper_pmovsxbd_xmm }, (1 << 19) }, |
2919 | [0x22] = SSE41_OP(pmovsxbq){ { ((void*)0), gen_helper_pmovsxbq_xmm }, (1 << 19) }, |
2920 | [0x23] = SSE41_OP(pmovsxwd){ { ((void*)0), gen_helper_pmovsxwd_xmm }, (1 << 19) }, |
2921 | [0x24] = SSE41_OP(pmovsxwq){ { ((void*)0), gen_helper_pmovsxwq_xmm }, (1 << 19) }, |
2922 | [0x25] = SSE41_OP(pmovsxdq){ { ((void*)0), gen_helper_pmovsxdq_xmm }, (1 << 19) }, |
2923 | [0x28] = SSE41_OP(pmuldq){ { ((void*)0), gen_helper_pmuldq_xmm }, (1 << 19) }, |
2924 | [0x29] = SSE41_OP(pcmpeqq){ { ((void*)0), gen_helper_pcmpeqq_xmm }, (1 << 19) }, |
2925 | [0x2a] = SSE41_SPECIAL{ { ((void*)0), ((void *)1) }, (1 << 19) }, /* movntqda */ |
2926 | [0x2b] = SSE41_OP(packusdw){ { ((void*)0), gen_helper_packusdw_xmm }, (1 << 19) }, |
2927 | [0x30] = SSE41_OP(pmovzxbw){ { ((void*)0), gen_helper_pmovzxbw_xmm }, (1 << 19) }, |
2928 | [0x31] = SSE41_OP(pmovzxbd){ { ((void*)0), gen_helper_pmovzxbd_xmm }, (1 << 19) }, |
2929 | [0x32] = SSE41_OP(pmovzxbq){ { ((void*)0), gen_helper_pmovzxbq_xmm }, (1 << 19) }, |
2930 | [0x33] = SSE41_OP(pmovzxwd){ { ((void*)0), gen_helper_pmovzxwd_xmm }, (1 << 19) }, |
2931 | [0x34] = SSE41_OP(pmovzxwq){ { ((void*)0), gen_helper_pmovzxwq_xmm }, (1 << 19) }, |
2932 | [0x35] = SSE41_OP(pmovzxdq){ { ((void*)0), gen_helper_pmovzxdq_xmm }, (1 << 19) }, |
2933 | [0x37] = SSE42_OP(pcmpgtq){ { ((void*)0), gen_helper_pcmpgtq_xmm }, (1 << 20) }, |
2934 | [0x38] = SSE41_OP(pminsb){ { ((void*)0), gen_helper_pminsb_xmm }, (1 << 19) }, |
2935 | [0x39] = SSE41_OP(pminsd){ { ((void*)0), gen_helper_pminsd_xmm }, (1 << 19) }, |
2936 | [0x3a] = SSE41_OP(pminuw){ { ((void*)0), gen_helper_pminuw_xmm }, (1 << 19) }, |
2937 | [0x3b] = SSE41_OP(pminud){ { ((void*)0), gen_helper_pminud_xmm }, (1 << 19) }, |
2938 | [0x3c] = SSE41_OP(pmaxsb){ { ((void*)0), gen_helper_pmaxsb_xmm }, (1 << 19) }, |
2939 | [0x3d] = SSE41_OP(pmaxsd){ { ((void*)0), gen_helper_pmaxsd_xmm }, (1 << 19) }, |
2940 | [0x3e] = SSE41_OP(pmaxuw){ { ((void*)0), gen_helper_pmaxuw_xmm }, (1 << 19) }, |
2941 | [0x3f] = SSE41_OP(pmaxud){ { ((void*)0), gen_helper_pmaxud_xmm }, (1 << 19) }, |
2942 | [0x40] = SSE41_OP(pmulld){ { ((void*)0), gen_helper_pmulld_xmm }, (1 << 19) }, |
2943 | [0x41] = SSE41_OP(phminposuw){ { ((void*)0), gen_helper_phminposuw_xmm }, (1 << 19) }, |
2944 | [0xdb] = AESNI_OP(aesimc){ { ((void*)0), gen_helper_aesimc_xmm }, (1 << 25) }, |
2945 | [0xdc] = AESNI_OP(aesenc){ { ((void*)0), gen_helper_aesenc_xmm }, (1 << 25) }, |
2946 | [0xdd] = AESNI_OP(aesenclast){ { ((void*)0), gen_helper_aesenclast_xmm }, (1 << 25) }, |
2947 | [0xde] = AESNI_OP(aesdec){ { ((void*)0), gen_helper_aesdec_xmm }, (1 << 25) }, |
2948 | [0xdf] = AESNI_OP(aesdeclast){ { ((void*)0), gen_helper_aesdeclast_xmm }, (1 << 25) }, |
2949 | }; |
2950 | |
2951 | static const struct SSEOpHelper_eppi sse_op_table7[256] = { |
2952 | [0x08] = SSE41_OP(roundps){ { ((void*)0), gen_helper_roundps_xmm }, (1 << 19) }, |
2953 | [0x09] = SSE41_OP(roundpd){ { ((void*)0), gen_helper_roundpd_xmm }, (1 << 19) }, |
2954 | [0x0a] = SSE41_OP(roundss){ { ((void*)0), gen_helper_roundss_xmm }, (1 << 19) }, |
2955 | [0x0b] = SSE41_OP(roundsd){ { ((void*)0), gen_helper_roundsd_xmm }, (1 << 19) }, |
2956 | [0x0c] = SSE41_OP(blendps){ { ((void*)0), gen_helper_blendps_xmm }, (1 << 19) }, |
2957 | [0x0d] = SSE41_OP(blendpd){ { ((void*)0), gen_helper_blendpd_xmm }, (1 << 19) }, |
2958 | [0x0e] = SSE41_OP(pblendw){ { ((void*)0), gen_helper_pblendw_xmm }, (1 << 19) }, |
2959 | [0x0f] = SSSE3_OP(palignr){ { gen_helper_palignr_mmx, gen_helper_palignr_xmm }, (1 << 9) }, |
2960 | [0x14] = SSE41_SPECIAL{ { ((void*)0), ((void *)1) }, (1 << 19) }, /* pextrb */ |
2961 | [0x15] = SSE41_SPECIAL{ { ((void*)0), ((void *)1) }, (1 << 19) }, /* pextrw */ |
2962 | [0x16] = SSE41_SPECIAL{ { ((void*)0), ((void *)1) }, (1 << 19) }, /* pextrd/pextrq */ |
2963 | [0x17] = SSE41_SPECIAL{ { ((void*)0), ((void *)1) }, (1 << 19) }, /* extractps */ |
2964 | [0x20] = SSE41_SPECIAL{ { ((void*)0), ((void *)1) }, (1 << 19) }, /* pinsrb */ |
2965 | [0x21] = SSE41_SPECIAL{ { ((void*)0), ((void *)1) }, (1 << 19) }, /* insertps */ |
2966 | [0x22] = SSE41_SPECIAL{ { ((void*)0), ((void *)1) }, (1 << 19) }, /* pinsrd/pinsrq */ |
2967 | [0x40] = SSE41_OP(dpps){ { ((void*)0), gen_helper_dpps_xmm }, (1 << 19) }, |
2968 | [0x41] = SSE41_OP(dppd){ { ((void*)0), gen_helper_dppd_xmm }, (1 << 19) }, |
2969 | [0x42] = SSE41_OP(mpsadbw){ { ((void*)0), gen_helper_mpsadbw_xmm }, (1 << 19) }, |
2970 | [0x44] = PCLMULQDQ_OP(pclmulqdq){ { ((void*)0), gen_helper_pclmulqdq_xmm }, (1 << 1) }, |
2971 | [0x60] = SSE42_OP(pcmpestrm){ { ((void*)0), gen_helper_pcmpestrm_xmm }, (1 << 20) }, |
2972 | [0x61] = SSE42_OP(pcmpestri){ { ((void*)0), gen_helper_pcmpestri_xmm }, (1 << 20) }, |
2973 | [0x62] = SSE42_OP(pcmpistrm){ { ((void*)0), gen_helper_pcmpistrm_xmm }, (1 << 20) }, |
2974 | [0x63] = SSE42_OP(pcmpistri){ { ((void*)0), gen_helper_pcmpistri_xmm }, (1 << 20) }, |
2975 | [0xdf] = AESNI_OP(aeskeygenassist){ { ((void*)0), gen_helper_aeskeygenassist_xmm }, (1 << 25) }, |
2976 | }; |
2977 | |
2978 | static void gen_sse(CPUX86State *env, DisasContext *s, int b, |
2979 | target_ulong pc_start, int rex_r) |
2980 | { |
2981 | int b1, op1_offset, op2_offset, is_xmm, val; |
2982 | int modrm, mod, rm, reg; |
2983 | SSEFunc_0_epp sse_fn_epp; |
2984 | SSEFunc_0_eppi sse_fn_eppi; |
2985 | SSEFunc_0_ppi sse_fn_ppi; |
2986 | SSEFunc_0_eppt sse_fn_eppt; |
2987 | TCGMemOp ot; |
2988 | |
2989 | b &= 0xff; |
2990 | if (s->prefix & PREFIX_DATA0x08) |
2991 | b1 = 1; |
2992 | else if (s->prefix & PREFIX_REPZ0x01) |
2993 | b1 = 2; |
2994 | else if (s->prefix & PREFIX_REPNZ0x02) |
2995 | b1 = 3; |
2996 | else |
2997 | b1 = 0; |
2998 | sse_fn_epp = sse_op_table1[b][b1]; |
2999 | if (!sse_fn_epp) { |
3000 | goto illegal_op; |
3001 | } |
3002 | if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) { |
3003 | is_xmm = 1; |
3004 | } else { |
3005 | if (b1 == 0) { |
3006 | /* MMX case */ |
3007 | is_xmm = 0; |
3008 | } else { |
3009 | is_xmm = 1; |
3010 | } |
3011 | } |
3012 | /* simple MMX/SSE operation */ |
3013 | if (s->flags & HF_TS_MASK(1 << 11)) { |
3014 | gen_exception(s, EXCP07_PREX7, pc_start - s->cs_base); |
3015 | return; |
3016 | } |
3017 | if (s->flags & HF_EM_MASK(1 << 10)) { |
3018 | illegal_op: |
3019 | gen_exception(s, EXCP06_ILLOP6, pc_start - s->cs_base); |
3020 | return; |
3021 | } |
3022 | if (is_xmm && !(s->flags & HF_OSFXSR_MASK(1 << 22))) |
3023 | if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA0x08)) |
3024 | goto illegal_op; |
3025 | if (b == 0x0e) { |
3026 | if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW(1 << 31))) |
3027 | goto illegal_op; |
3028 | /* femms */ |
3029 | gen_helper_emms(cpu_env); |
3030 | return; |
3031 | } |
3032 | if (b == 0x77) { |
3033 | /* emms */ |
3034 | gen_helper_emms(cpu_env); |
3035 | return; |
3036 | } |
3037 | /* prepare MMX state (XXX: optimize by storing fptt and fptags in |
3038 | the static cpu state) */ |
3039 | if (!is_xmm) { |
3040 | gen_helper_enter_mmx(cpu_env); |
3041 | } |
3042 | |
3043 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
3044 | reg = ((modrm >> 3) & 7); |
3045 | if (is_xmm) |
3046 | reg |= rex_r; |
3047 | mod = (modrm >> 6) & 3; |
3048 | if (sse_fn_epp == SSE_SPECIAL((void *)1)) { |
3049 | b |= (b1 << 8); |
3050 | switch(b) { |
3051 | case 0x0e7: /* movntq */ |
3052 | if (mod == 3) |
3053 | goto illegal_op; |
3054 | gen_lea_modrm(env, s, modrm); |
3055 | gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)__builtin_offsetof(CPUX86State, fpregs[reg].mmx)); |
3056 | break; |
3057 | case 0x1e7: /* movntdq */ |
3058 | case 0x02b: /* movntps */ |
3059 | case 0x12b: /* movntps */ |
3060 | if (mod == 3) |
3061 | goto illegal_op; |
3062 | gen_lea_modrm(env, s, modrm); |
3063 | gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])__builtin_offsetof(CPUX86State, xmm_regs[reg])); |
3064 | break; |
3065 | case 0x3f0: /* lddqu */ |
3066 | if (mod == 3) |
3067 | goto illegal_op; |
3068 | gen_lea_modrm(env, s, modrm); |
3069 | gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])__builtin_offsetof(CPUX86State, xmm_regs[reg])); |
3070 | break; |
3071 | case 0x22b: /* movntss */ |
3072 | case 0x32b: /* movntsd */ |
3073 | if (mod == 3) |
3074 | goto illegal_op; |
3075 | gen_lea_modrm(env, s, modrm); |
3076 | if (b1 & 1) { |
3077 | gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])__builtin_offsetof(CPUX86State, xmm_regs[reg])); |
3078 | } else { |
3079 | tcg_gen_ld32u_tltcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[0]) |
3080 | xmm_regs[reg].XMM_L(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[0])); |
3081 | gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0); |
3082 | } |
3083 | break; |
3084 | case 0x6e: /* movd mm, ea */ |
3085 | #ifdef TARGET_X86_64 |
3086 | if (s->dflag == MO_64) { |
3087 | gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); |
3088 | tcg_gen_st_tltcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)__builtin_offsetof(CPUX86State, fpregs[reg].mmx)); |
3089 | } else |
3090 | #endif |
3091 | { |
3092 | gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); |
3093 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env,tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (__builtin_offsetof (CPUX86State, fpregs[reg].mmx))) |
3094 | offsetof(CPUX86State,fpregs[reg].mmx))tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (__builtin_offsetof (CPUX86State, fpregs[reg].mmx))); |
3095 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
3096 | gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32); |
3097 | } |
3098 | break; |
3099 | case 0x16e: /* movd xmm, ea */ |
3100 | #ifdef TARGET_X86_64 |
3101 | if (s->dflag == MO_64) { |
3102 | gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); |
3103 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env,tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (__builtin_offsetof (CPUX86State, xmm_regs[reg]))) |
3104 | offsetof(CPUX86State,xmm_regs[reg]))tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (__builtin_offsetof (CPUX86State, xmm_regs[reg]))); |
3105 | gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]); |
3106 | } else |
3107 | #endif |
3108 | { |
3109 | gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); |
3110 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env,tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (__builtin_offsetof (CPUX86State, xmm_regs[reg]))) |
3111 | offsetof(CPUX86State,xmm_regs[reg]))tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (__builtin_offsetof (CPUX86State, xmm_regs[reg]))); |
3112 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
3113 | gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32); |
3114 | } |
3115 | break; |
3116 | case 0x6f: /* movq mm, ea */ |
3117 | if (mod != 3) { |
3118 | gen_lea_modrm(env, s, modrm); |
3119 | gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)__builtin_offsetof(CPUX86State, fpregs[reg].mmx)); |
3120 | } else { |
3121 | rm = (modrm & 7); |
3122 | tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, |
3123 | offsetof(CPUX86State,fpregs[rm].mmx)__builtin_offsetof(CPUX86State, fpregs[rm].mmx)); |
3124 | tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, |
3125 | offsetof(CPUX86State,fpregs[reg].mmx)__builtin_offsetof(CPUX86State, fpregs[reg].mmx)); |
3126 | } |
3127 | break; |
3128 | case 0x010: /* movups */ |
3129 | case 0x110: /* movupd */ |
3130 | case 0x028: /* movaps */ |
3131 | case 0x128: /* movapd */ |
3132 | case 0x16f: /* movdqa xmm, ea */ |
3133 | case 0x26f: /* movdqu xmm, ea */ |
3134 | if (mod != 3) { |
3135 | gen_lea_modrm(env, s, modrm); |
3136 | gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])__builtin_offsetof(CPUX86State, xmm_regs[reg])); |
3137 | } else { |
3138 | rm = (modrm & 7) | REX_B(s)0; |
3139 | gen_op_movo(offsetof(CPUX86State,xmm_regs[reg])__builtin_offsetof(CPUX86State, xmm_regs[reg]), |
3140 | offsetof(CPUX86State,xmm_regs[rm])__builtin_offsetof(CPUX86State, xmm_regs[rm])); |
3141 | } |
3142 | break; |
3143 | case 0x210: /* movss xmm, ea */ |
3144 | if (mod != 3) { |
3145 | gen_lea_modrm(env, s, modrm); |
3146 | gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0); |
3147 | tcg_gen_st32_tltcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[0])); |
3148 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[0], 0); |
3149 | tcg_gen_st32_tltcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[1])); |
3150 | tcg_gen_st32_tltcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[2])); |
3151 | tcg_gen_st32_tltcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[3])); |
3152 | } else { |
3153 | rm = (modrm & 7) | REX_B(s)0; |
3154 | gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[0]), |
3155 | offsetof(CPUX86State,xmm_regs[rm].XMM_L(0))__builtin_offsetof(CPUX86State, xmm_regs[rm]._l[0])); |
3156 | } |
3157 | break; |
3158 | case 0x310: /* movsd xmm, ea */ |
3159 | if (mod != 3) { |
3160 | gen_lea_modrm(env, s, modrm); |
3161 | gen_ldq_env_A0(s, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0]) |
3162 | xmm_regs[reg].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0])); |
3163 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[0], 0); |
3164 | tcg_gen_st32_tltcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[2])); |
3165 | tcg_gen_st32_tltcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[3])); |
3166 | } else { |
3167 | rm = (modrm & 7) | REX_B(s)0; |
3168 | gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0]), |
3169 | offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[rm]._q[0])); |
3170 | } |
3171 | break; |
3172 | case 0x012: /* movlps */ |
3173 | case 0x112: /* movlpd */ |
3174 | if (mod != 3) { |
3175 | gen_lea_modrm(env, s, modrm); |
3176 | gen_ldq_env_A0(s, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0]) |
3177 | xmm_regs[reg].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0])); |
3178 | } else { |
3179 | /* movhlps */ |
3180 | rm = (modrm & 7) | REX_B(s)0; |
3181 | gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0]), |
3182 | offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1))__builtin_offsetof(CPUX86State, xmm_regs[rm]._q[1])); |
3183 | } |
3184 | break; |
3185 | case 0x212: /* movsldup */ |
3186 | if (mod != 3) { |
3187 | gen_lea_modrm(env, s, modrm); |
3188 | gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])__builtin_offsetof(CPUX86State, xmm_regs[reg])); |
3189 | } else { |
3190 | rm = (modrm & 7) | REX_B(s)0; |
3191 | gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[0]), |
3192 | offsetof(CPUX86State,xmm_regs[rm].XMM_L(0))__builtin_offsetof(CPUX86State, xmm_regs[rm]._l[0])); |
3193 | gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[2]), |
3194 | offsetof(CPUX86State,xmm_regs[rm].XMM_L(2))__builtin_offsetof(CPUX86State, xmm_regs[rm]._l[2])); |
3195 | } |
3196 | gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[1]), |
3197 | offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[0])); |
3198 | gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[3]), |
3199 | offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[2])); |
3200 | break; |
3201 | case 0x312: /* movddup */ |
3202 | if (mod != 3) { |
3203 | gen_lea_modrm(env, s, modrm); |
3204 | gen_ldq_env_A0(s, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0]) |
3205 | xmm_regs[reg].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0])); |
3206 | } else { |
3207 | rm = (modrm & 7) | REX_B(s)0; |
3208 | gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0]), |
3209 | offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[rm]._q[0])); |
3210 | } |
3211 | gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[1]), |
3212 | offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0])); |
3213 | break; |
3214 | case 0x016: /* movhps */ |
3215 | case 0x116: /* movhpd */ |
3216 | if (mod != 3) { |
3217 | gen_lea_modrm(env, s, modrm); |
3218 | gen_ldq_env_A0(s, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[1]) |
3219 | xmm_regs[reg].XMM_Q(1))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[1])); |
3220 | } else { |
3221 | /* movlhps */ |
3222 | rm = (modrm & 7) | REX_B(s)0; |
3223 | gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[1]), |
3224 | offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[rm]._q[0])); |
3225 | } |
3226 | break; |
3227 | case 0x216: /* movshdup */ |
3228 | if (mod != 3) { |
3229 | gen_lea_modrm(env, s, modrm); |
3230 | gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])__builtin_offsetof(CPUX86State, xmm_regs[reg])); |
3231 | } else { |
3232 | rm = (modrm & 7) | REX_B(s)0; |
3233 | gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[1]), |
3234 | offsetof(CPUX86State,xmm_regs[rm].XMM_L(1))__builtin_offsetof(CPUX86State, xmm_regs[rm]._l[1])); |
3235 | gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[3]), |
3236 | offsetof(CPUX86State,xmm_regs[rm].XMM_L(3))__builtin_offsetof(CPUX86State, xmm_regs[rm]._l[3])); |
3237 | } |
3238 | gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[0]), |
3239 | offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[1])); |
3240 | gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[2]), |
3241 | offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[3])); |
3242 | break; |
3243 | case 0x178: |
3244 | case 0x378: |
3245 | { |
3246 | int bit_index, field_length; |
3247 | |
3248 | if (b1 == 1 && reg != 0) |
3249 | goto illegal_op; |
3250 | field_length = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))) & 0x3F; |
3251 | bit_index = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))) & 0x3F; |
3252 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env,tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (__builtin_offsetof (CPUX86State, xmm_regs[reg]))) |
3253 | offsetof(CPUX86State,xmm_regs[reg]))tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (__builtin_offsetof (CPUX86State, xmm_regs[reg]))); |
3254 | if (b1 == 1) |
3255 | gen_helper_extrq_i(cpu_env, cpu_ptr0, |
3256 | tcg_const_i32(bit_index), |
3257 | tcg_const_i32(field_length)); |
3258 | else |
3259 | gen_helper_insertq_i(cpu_env, cpu_ptr0, |
3260 | tcg_const_i32(bit_index), |
3261 | tcg_const_i32(field_length)); |
3262 | } |
3263 | break; |
3264 | case 0x7e: /* movd ea, mm */ |
3265 | #ifdef TARGET_X86_64 |
3266 | if (s->dflag == MO_64) { |
3267 | tcg_gen_ld_i64(cpu_T[0], cpu_env, |
3268 | offsetof(CPUX86State,fpregs[reg].mmx)__builtin_offsetof(CPUX86State, fpregs[reg].mmx)); |
3269 | gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1); |
3270 | } else |
3271 | #endif |
3272 | { |
3273 | tcg_gen_ld32u_tltcg_gen_ld_i32(cpu_T[0], cpu_env, |
3274 | offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0))__builtin_offsetof(CPUX86State, fpregs[reg].mmx._l[0])); |
3275 | gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1); |
3276 | } |
3277 | break; |
3278 | case 0x17e: /* movd ea, xmm */ |
3279 | #ifdef TARGET_X86_64 |
3280 | if (s->dflag == MO_64) { |
3281 | tcg_gen_ld_i64(cpu_T[0], cpu_env, |
3282 | offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0])); |
3283 | gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1); |
3284 | } else |
3285 | #endif |
3286 | { |
3287 | tcg_gen_ld32u_tltcg_gen_ld_i32(cpu_T[0], cpu_env, |
3288 | offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[0])); |
3289 | gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1); |
3290 | } |
3291 | break; |
3292 | case 0x27e: /* movq xmm, ea */ |
3293 | if (mod != 3) { |
3294 | gen_lea_modrm(env, s, modrm); |
3295 | gen_ldq_env_A0(s, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0]) |
3296 | xmm_regs[reg].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0])); |
3297 | } else { |
3298 | rm = (modrm & 7) | REX_B(s)0; |
3299 | gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0]), |
3300 | offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[rm]._q[0])); |
3301 | } |
3302 | gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[1])); |
3303 | break; |
3304 | case 0x7f: /* movq ea, mm */ |
3305 | if (mod != 3) { |
3306 | gen_lea_modrm(env, s, modrm); |
3307 | gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)__builtin_offsetof(CPUX86State, fpregs[reg].mmx)); |
3308 | } else { |
3309 | rm = (modrm & 7); |
3310 | gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx)__builtin_offsetof(CPUX86State, fpregs[rm].mmx), |
3311 | offsetof(CPUX86State,fpregs[reg].mmx)__builtin_offsetof(CPUX86State, fpregs[reg].mmx)); |
3312 | } |
3313 | break; |
3314 | case 0x011: /* movups */ |
3315 | case 0x111: /* movupd */ |
3316 | case 0x029: /* movaps */ |
3317 | case 0x129: /* movapd */ |
3318 | case 0x17f: /* movdqa ea, xmm */ |
3319 | case 0x27f: /* movdqu ea, xmm */ |
3320 | if (mod != 3) { |
3321 | gen_lea_modrm(env, s, modrm); |
3322 | gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])__builtin_offsetof(CPUX86State, xmm_regs[reg])); |
3323 | } else { |
3324 | rm = (modrm & 7) | REX_B(s)0; |
3325 | gen_op_movo(offsetof(CPUX86State,xmm_regs[rm])__builtin_offsetof(CPUX86State, xmm_regs[rm]), |
3326 | offsetof(CPUX86State,xmm_regs[reg])__builtin_offsetof(CPUX86State, xmm_regs[reg])); |
3327 | } |
3328 | break; |
3329 | case 0x211: /* movss ea, xmm */ |
3330 | if (mod != 3) { |
3331 | gen_lea_modrm(env, s, modrm); |
3332 | tcg_gen_ld32u_tltcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[0])); |
3333 | gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0); |
3334 | } else { |
3335 | rm = (modrm & 7) | REX_B(s)0; |
3336 | gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0))__builtin_offsetof(CPUX86State, xmm_regs[rm]._l[0]), |
3337 | offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[0])); |
3338 | } |
3339 | break; |
3340 | case 0x311: /* movsd ea, xmm */ |
3341 | if (mod != 3) { |
3342 | gen_lea_modrm(env, s, modrm); |
3343 | gen_stq_env_A0(s, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0]) |
3344 | xmm_regs[reg].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0])); |
3345 | } else { |
3346 | rm = (modrm & 7) | REX_B(s)0; |
3347 | gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[rm]._q[0]), |
3348 | offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0])); |
3349 | } |
3350 | break; |
3351 | case 0x013: /* movlps */ |
3352 | case 0x113: /* movlpd */ |
3353 | if (mod != 3) { |
3354 | gen_lea_modrm(env, s, modrm); |
3355 | gen_stq_env_A0(s, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0]) |
3356 | xmm_regs[reg].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0])); |
3357 | } else { |
3358 | goto illegal_op; |
3359 | } |
3360 | break; |
3361 | case 0x017: /* movhps */ |
3362 | case 0x117: /* movhpd */ |
3363 | if (mod != 3) { |
3364 | gen_lea_modrm(env, s, modrm); |
3365 | gen_stq_env_A0(s, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[1]) |
3366 | xmm_regs[reg].XMM_Q(1))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[1])); |
3367 | } else { |
3368 | goto illegal_op; |
3369 | } |
3370 | break; |
3371 | case 0x71: /* shift mm, im */ |
3372 | case 0x72: |
3373 | case 0x73: |
3374 | case 0x171: /* shift xmm, im */ |
3375 | case 0x172: |
3376 | case 0x173: |
3377 | if (b1 >= 2) { |
3378 | goto illegal_op; |
3379 | } |
3380 | val = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
3381 | if (is_xmm) { |
3382 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[0], val); |
3383 | tcg_gen_st32_tltcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))__builtin_offsetof(CPUX86State, xmm_t0._l[0])); |
3384 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[0], 0); |
3385 | tcg_gen_st32_tltcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1))__builtin_offsetof(CPUX86State, xmm_t0._l[1])); |
3386 | op1_offset = offsetof(CPUX86State,xmm_t0)__builtin_offsetof(CPUX86State, xmm_t0); |
3387 | } else { |
3388 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[0], val); |
3389 | tcg_gen_st32_tltcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0))__builtin_offsetof(CPUX86State, mmx_t0._l[0])); |
3390 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[0], 0); |
3391 | tcg_gen_st32_tltcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1))__builtin_offsetof(CPUX86State, mmx_t0._l[1])); |
3392 | op1_offset = offsetof(CPUX86State,mmx_t0)__builtin_offsetof(CPUX86State, mmx_t0); |
3393 | } |
3394 | sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 + |
3395 | (((modrm >> 3)) & 7)][b1]; |
3396 | if (!sse_fn_epp) { |
3397 | goto illegal_op; |
3398 | } |
3399 | if (is_xmm) { |
3400 | rm = (modrm & 7) | REX_B(s)0; |
3401 | op2_offset = offsetof(CPUX86State,xmm_regs[rm])__builtin_offsetof(CPUX86State, xmm_regs[rm]); |
3402 | } else { |
3403 | rm = (modrm & 7); |
3404 | op2_offset = offsetof(CPUX86State,fpregs[rm].mmx)__builtin_offsetof(CPUX86State, fpregs[rm].mmx); |
3405 | } |
3406 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op2_offset )); |
3407 | tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr1).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op1_offset )); |
3408 | sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); |
3409 | break; |
3410 | case 0x050: /* movmskps */ |
3411 | rm = (modrm & 7) | REX_B(s)0; |
3412 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env,tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (__builtin_offsetof (CPUX86State, xmm_regs[rm]))) |
3413 | offsetof(CPUX86State,xmm_regs[rm]))tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (__builtin_offsetof (CPUX86State, xmm_regs[rm]))); |
3414 | gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0); |
3415 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_regs[reg], cpu_tmp2_i32); |
3416 | break; |
3417 | case 0x150: /* movmskpd */ |
3418 | rm = (modrm & 7) | REX_B(s)0; |
3419 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env,tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (__builtin_offsetof (CPUX86State, xmm_regs[rm]))) |
3420 | offsetof(CPUX86State,xmm_regs[rm]))tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (__builtin_offsetof (CPUX86State, xmm_regs[rm]))); |
3421 | gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0); |
3422 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_regs[reg], cpu_tmp2_i32); |
3423 | break; |
3424 | case 0x02a: /* cvtpi2ps */ |
3425 | case 0x12a: /* cvtpi2pd */ |
3426 | gen_helper_enter_mmx(cpu_env); |
3427 | if (mod != 3) { |
3428 | gen_lea_modrm(env, s, modrm); |
3429 | op2_offset = offsetof(CPUX86State,mmx_t0)__builtin_offsetof(CPUX86State, mmx_t0); |
3430 | gen_ldq_env_A0(s, op2_offset); |
3431 | } else { |
3432 | rm = (modrm & 7); |
3433 | op2_offset = offsetof(CPUX86State,fpregs[rm].mmx)__builtin_offsetof(CPUX86State, fpregs[rm].mmx); |
3434 | } |
3435 | op1_offset = offsetof(CPUX86State,xmm_regs[reg])__builtin_offsetof(CPUX86State, xmm_regs[reg]); |
3436 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op1_offset )); |
3437 | tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr1).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op2_offset )); |
3438 | switch(b >> 8) { |
3439 | case 0x0: |
3440 | gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1); |
3441 | break; |
3442 | default: |
3443 | case 0x1: |
3444 | gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1); |
3445 | break; |
3446 | } |
3447 | break; |
3448 | case 0x22a: /* cvtsi2ss */ |
3449 | case 0x32a: /* cvtsi2sd */ |
3450 | ot = mo_64_32(s->dflag); |
3451 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
3452 | op1_offset = offsetof(CPUX86State,xmm_regs[reg])__builtin_offsetof(CPUX86State, xmm_regs[reg]); |
3453 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op1_offset )); |
3454 | if (ot == MO_32) { |
3455 | SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1]; |
3456 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
3457 | sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32); |
3458 | } else { |
3459 | #ifdef TARGET_X86_64 |
3460 | SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1]; |
3461 | sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]); |
3462 | #else |
3463 | goto illegal_op; |
3464 | #endif |
3465 | } |
3466 | break; |
3467 | case 0x02c: /* cvttps2pi */ |
3468 | case 0x12c: /* cvttpd2pi */ |
3469 | case 0x02d: /* cvtps2pi */ |
3470 | case 0x12d: /* cvtpd2pi */ |
3471 | gen_helper_enter_mmx(cpu_env); |
3472 | if (mod != 3) { |
3473 | gen_lea_modrm(env, s, modrm); |
3474 | op2_offset = offsetof(CPUX86State,xmm_t0)__builtin_offsetof(CPUX86State, xmm_t0); |
3475 | gen_ldo_env_A0(s, op2_offset); |
3476 | } else { |
3477 | rm = (modrm & 7) | REX_B(s)0; |
3478 | op2_offset = offsetof(CPUX86State,xmm_regs[rm])__builtin_offsetof(CPUX86State, xmm_regs[rm]); |
3479 | } |
3480 | op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx)__builtin_offsetof(CPUX86State, fpregs[reg & 7].mmx); |
3481 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op1_offset )); |
3482 | tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr1).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op2_offset )); |
3483 | switch(b) { |
3484 | case 0x02c: |
3485 | gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1); |
3486 | break; |
3487 | case 0x12c: |
3488 | gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1); |
3489 | break; |
3490 | case 0x02d: |
3491 | gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1); |
3492 | break; |
3493 | case 0x12d: |
3494 | gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1); |
3495 | break; |
3496 | } |
3497 | break; |
3498 | case 0x22c: /* cvttss2si */ |
3499 | case 0x32c: /* cvttsd2si */ |
3500 | case 0x22d: /* cvtss2si */ |
3501 | case 0x32d: /* cvtsd2si */ |
3502 | ot = mo_64_32(s->dflag); |
3503 | if (mod != 3) { |
3504 | gen_lea_modrm(env, s, modrm); |
3505 | if ((b >> 8) & 1) { |
3506 | gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_t0._q[0])); |
3507 | } else { |
3508 | gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0); |
3509 | tcg_gen_st32_tltcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))__builtin_offsetof(CPUX86State, xmm_t0._l[0])); |
3510 | } |
3511 | op2_offset = offsetof(CPUX86State,xmm_t0)__builtin_offsetof(CPUX86State, xmm_t0); |
3512 | } else { |
3513 | rm = (modrm & 7) | REX_B(s)0; |
3514 | op2_offset = offsetof(CPUX86State,xmm_regs[rm])__builtin_offsetof(CPUX86State, xmm_regs[rm]); |
3515 | } |
3516 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op2_offset )); |
3517 | if (ot == MO_32) { |
3518 | SSEFunc_i_ep sse_fn_i_ep = |
3519 | sse_op_table3bi[((b >> 7) & 2) | (b & 1)]; |
3520 | sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0); |
3521 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_T[0], cpu_tmp2_i32); |
3522 | } else { |
3523 | #ifdef TARGET_X86_64 |
3524 | SSEFunc_l_ep sse_fn_l_ep = |
3525 | sse_op_table3bq[((b >> 7) & 2) | (b & 1)]; |
3526 | sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0); |
3527 | #else |
3528 | goto illegal_op; |
3529 | #endif |
3530 | } |
3531 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
3532 | break; |
3533 | case 0xc4: /* pinsrw */ |
3534 | case 0x1c4: |
3535 | s->rip_offset = 1; |
3536 | gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); |
3537 | val = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
3538 | if (b1) { |
3539 | val &= 7; |
3540 | tcg_gen_st16_tltcg_gen_st16_i32(cpu_T[0], cpu_env, |
3541 | offsetof(CPUX86State,xmm_regs[reg].XMM_W(val))__builtin_offsetof(CPUX86State, xmm_regs[reg]._w[val])); |
3542 | } else { |
3543 | val &= 3; |
3544 | tcg_gen_st16_tltcg_gen_st16_i32(cpu_T[0], cpu_env, |
3545 | offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val))__builtin_offsetof(CPUX86State, fpregs[reg].mmx._w[val])); |
3546 | } |
3547 | break; |
3548 | case 0xc5: /* pextrw */ |
3549 | case 0x1c5: |
3550 | if (mod != 3) |
3551 | goto illegal_op; |
3552 | ot = mo_64_32(s->dflag); |
3553 | val = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
3554 | if (b1) { |
3555 | val &= 7; |
3556 | rm = (modrm & 7) | REX_B(s)0; |
3557 | tcg_gen_ld16u_tltcg_gen_ld16u_i32(cpu_T[0], cpu_env, |
3558 | offsetof(CPUX86State,xmm_regs[rm].XMM_W(val))__builtin_offsetof(CPUX86State, xmm_regs[rm]._w[val])); |
3559 | } else { |
3560 | val &= 3; |
3561 | rm = (modrm & 7); |
3562 | tcg_gen_ld16u_tltcg_gen_ld16u_i32(cpu_T[0], cpu_env, |
3563 | offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val))__builtin_offsetof(CPUX86State, fpregs[rm].mmx._w[val])); |
3564 | } |
3565 | reg = ((modrm >> 3) & 7) | rex_r; |
3566 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
3567 | break; |
3568 | case 0x1d6: /* movq ea, xmm */ |
3569 | if (mod != 3) { |
3570 | gen_lea_modrm(env, s, modrm); |
3571 | gen_stq_env_A0(s, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0]) |
3572 | xmm_regs[reg].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0])); |
3573 | } else { |
3574 | rm = (modrm & 7) | REX_B(s)0; |
3575 | gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[rm]._q[0]), |
3576 | offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0])); |
3577 | gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1))__builtin_offsetof(CPUX86State, xmm_regs[rm]._q[1])); |
3578 | } |
3579 | break; |
3580 | case 0x2d6: /* movq2dq */ |
3581 | gen_helper_enter_mmx(cpu_env); |
3582 | rm = (modrm & 7); |
3583 | gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[0]), |
3584 | offsetof(CPUX86State,fpregs[rm].mmx)__builtin_offsetof(CPUX86State, fpregs[rm].mmx)); |
3585 | gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[1])); |
3586 | break; |
3587 | case 0x3d6: /* movdq2q */ |
3588 | gen_helper_enter_mmx(cpu_env); |
3589 | rm = (modrm & 7) | REX_B(s)0; |
3590 | gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx)__builtin_offsetof(CPUX86State, fpregs[reg & 7].mmx), |
3591 | offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))__builtin_offsetof(CPUX86State, xmm_regs[rm]._q[0])); |
3592 | break; |
3593 | case 0xd7: /* pmovmskb */ |
3594 | case 0x1d7: |
3595 | if (mod != 3) |
3596 | goto illegal_op; |
3597 | if (b1) { |
3598 | rm = (modrm & 7) | REX_B(s)0; |
3599 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]))tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (__builtin_offsetof (CPUX86State, xmm_regs[rm]))); |
3600 | gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0); |
3601 | } else { |
3602 | rm = (modrm & 7); |
3603 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx))tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (__builtin_offsetof (CPUX86State, fpregs[rm].mmx))); |
3604 | gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0); |
3605 | } |
3606 | reg = ((modrm >> 3) & 7) | rex_r; |
3607 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_regs[reg], cpu_tmp2_i32); |
3608 | break; |
3609 | |
3610 | case 0x138: |
3611 | case 0x038: |
3612 | b = modrm; |
3613 | if ((b & 0xf0) == 0xf0) { |
3614 | goto do_0f_38_fx; |
3615 | } |
3616 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
3617 | rm = modrm & 7; |
3618 | reg = ((modrm >> 3) & 7) | rex_r; |
3619 | mod = (modrm >> 6) & 3; |
3620 | if (b1 >= 2) { |
3621 | goto illegal_op; |
3622 | } |
3623 | |
3624 | sse_fn_epp = sse_op_table6[b].op[b1]; |
3625 | if (!sse_fn_epp) { |
3626 | goto illegal_op; |
3627 | } |
3628 | if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask)) |
3629 | goto illegal_op; |
3630 | |
3631 | if (b1) { |
3632 | op1_offset = offsetof(CPUX86State,xmm_regs[reg])__builtin_offsetof(CPUX86State, xmm_regs[reg]); |
3633 | if (mod == 3) { |
3634 | op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)])__builtin_offsetof(CPUX86State, xmm_regs[rm | 0]); |
3635 | } else { |
3636 | op2_offset = offsetof(CPUX86State,xmm_t0)__builtin_offsetof(CPUX86State, xmm_t0); |
3637 | gen_lea_modrm(env, s, modrm); |
3638 | switch (b) { |
3639 | case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */ |
3640 | case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */ |
3641 | case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */ |
3642 | gen_ldq_env_A0(s, op2_offset + |
3643 | offsetof(XMMReg, XMM_Q(0))__builtin_offsetof(XMMReg, _q[0])); |
3644 | break; |
3645 | case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */ |
3646 | case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */ |
3647 | tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, |
3648 | s->mem_index, MO_LEUL); |
3649 | tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset + |
3650 | offsetof(XMMReg, XMM_L(0))__builtin_offsetof(XMMReg, _l[0])); |
3651 | break; |
3652 | case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */ |
3653 | tcg_gen_qemu_ld_tltcg_gen_qemu_ld_i32(cpu_tmp0, cpu_A0, |
3654 | s->mem_index, MO_LEUW); |
3655 | tcg_gen_st16_tltcg_gen_st16_i32(cpu_tmp0, cpu_env, op2_offset + |
3656 | offsetof(XMMReg, XMM_W(0))__builtin_offsetof(XMMReg, _w[0])); |
3657 | break; |
3658 | case 0x2a: /* movntqda */ |
3659 | gen_ldo_env_A0(s, op1_offset); |
3660 | return; |
3661 | default: |
3662 | gen_ldo_env_A0(s, op2_offset); |
3663 | } |
3664 | } |
3665 | } else { |
3666 | op1_offset = offsetof(CPUX86State,fpregs[reg].mmx)__builtin_offsetof(CPUX86State, fpregs[reg].mmx); |
3667 | if (mod == 3) { |
3668 | op2_offset = offsetof(CPUX86State,fpregs[rm].mmx)__builtin_offsetof(CPUX86State, fpregs[rm].mmx); |
3669 | } else { |
3670 | op2_offset = offsetof(CPUX86State,mmx_t0)__builtin_offsetof(CPUX86State, mmx_t0); |
3671 | gen_lea_modrm(env, s, modrm); |
3672 | gen_ldq_env_A0(s, op2_offset); |
3673 | } |
3674 | } |
3675 | if (sse_fn_epp == SSE_SPECIAL((void *)1)) { |
3676 | goto illegal_op; |
3677 | } |
3678 | |
3679 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op1_offset )); |
3680 | tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr1).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op2_offset )); |
3681 | sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); |
3682 | |
3683 | if (b == 0x17) { |
3684 | set_cc_op(s, CC_OP_EFLAGS); |
3685 | } |
3686 | break; |
3687 | |
3688 | case 0x238: |
3689 | case 0x338: |
3690 | do_0f_38_fx: |
3691 | /* Various integer extensions at 0f 38 f[0-f]. */ |
3692 | b = modrm | (b1 << 8); |
3693 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
3694 | reg = ((modrm >> 3) & 7) | rex_r; |
3695 | |
3696 | switch (b) { |
3697 | case 0x3f0: /* crc32 Gd,Eb */ |
3698 | case 0x3f1: /* crc32 Gd,Ey */ |
3699 | do_crc32: |
3700 | if (!(s->cpuid_ext_features & CPUID_EXT_SSE42(1 << 20))) { |
3701 | goto illegal_op; |
3702 | } |
3703 | if ((b & 0xff) == 0xf0) { |
3704 | ot = MO_8; |
3705 | } else if (s->dflag != MO_64) { |
3706 | ot = (s->prefix & PREFIX_DATA0x08 ? MO_16 : MO_32); |
3707 | } else { |
3708 | ot = MO_64; |
3709 | } |
3710 | |
3711 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_regs[reg]); |
3712 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
3713 | gen_helper_crc32(cpu_T[0], cpu_tmp2_i32, |
3714 | cpu_T[0], tcg_const_i32(8 << ot)); |
3715 | |
3716 | ot = mo_64_32(s->dflag); |
3717 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
3718 | break; |
3719 | |
3720 | case 0x1f0: /* crc32 or movbe */ |
3721 | case 0x1f1: |
3722 | /* For these insns, the f3 prefix is supposed to have priority |
3723 | over the 66 prefix, but that's not what we implement above |
3724 | setting b1. */ |
3725 | if (s->prefix & PREFIX_REPNZ0x02) { |
3726 | goto do_crc32; |
3727 | } |
3728 | /* FALLTHRU */ |
3729 | case 0x0f0: /* movbe Gy,My */ |
3730 | case 0x0f1: /* movbe My,Gy */ |
3731 | if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE(1 << 22))) { |
3732 | goto illegal_op; |
3733 | } |
3734 | if (s->dflag != MO_64) { |
3735 | ot = (s->prefix & PREFIX_DATA0x08 ? MO_16 : MO_32); |
3736 | } else { |
3737 | ot = MO_64; |
3738 | } |
3739 | |
3740 | gen_lea_modrm(env, s, modrm); |
3741 | if ((b & 1) == 0) { |
3742 | tcg_gen_qemu_ld_tltcg_gen_qemu_ld_i32(cpu_T[0], cpu_A0, |
3743 | s->mem_index, ot | MO_BE); |
3744 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
3745 | } else { |
3746 | tcg_gen_qemu_st_tltcg_gen_qemu_st_i32(cpu_regs[reg], cpu_A0, |
3747 | s->mem_index, ot | MO_BE); |
3748 | } |
3749 | break; |
3750 | |
3751 | case 0x0f2: /* andn Gy, By, Ey */ |
3752 | if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1(1 << 3)) |
3753 | || !(s->prefix & PREFIX_VEX0x20) |
3754 | || s->vex_l != 0) { |
3755 | goto illegal_op; |
3756 | } |
3757 | ot = mo_64_32(s->dflag); |
3758 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
3759 | tcg_gen_andc_tltcg_gen_andc_i32(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]); |
3760 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
3761 | gen_op_update1_cc(); |
3762 | set_cc_op(s, CC_OP_LOGICB + ot); |
3763 | break; |
3764 | |
3765 | case 0x0f7: /* bextr Gy, Ey, By */ |
3766 | if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1(1 << 3)) |
3767 | || !(s->prefix & PREFIX_VEX0x20) |
3768 | || s->vex_l != 0) { |
3769 | goto illegal_op; |
3770 | } |
3771 | ot = mo_64_32(s->dflag); |
3772 | { |
3773 | TCGvTCGv_i32 bound, zero; |
3774 | |
3775 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
3776 | /* Extract START, and shift the operand. |
3777 | Shifts larger than operand size get zeros. */ |
3778 | tcg_gen_ext8u_tltcg_gen_ext8u_i32(cpu_A0, cpu_regs[s->vex_v]); |
3779 | tcg_gen_shr_tltcg_gen_shr_i32(cpu_T[0], cpu_T[0], cpu_A0); |
3780 | |
3781 | bound = tcg_const_tltcg_const_i32(ot == MO_64 ? 63 : 31); |
3782 | zero = tcg_const_tltcg_const_i32(0); |
3783 | tcg_gen_movcond_tltcg_gen_movcond_i32(TCG_COND_LEU, cpu_T[0], cpu_A0, bound, |
3784 | cpu_T[0], zero); |
3785 | tcg_temp_freetcg_temp_free_i32(zero); |
3786 | |
3787 | /* Extract the LEN into a mask. Lengths larger than |
3788 | operand size get all ones. */ |
3789 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_A0, cpu_regs[s->vex_v], 8); |
3790 | tcg_gen_ext8u_tltcg_gen_ext8u_i32(cpu_A0, cpu_A0); |
3791 | tcg_gen_movcond_tltcg_gen_movcond_i32(TCG_COND_LEU, cpu_A0, cpu_A0, bound, |
3792 | cpu_A0, bound); |
3793 | tcg_temp_freetcg_temp_free_i32(bound); |
3794 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[1], 1); |
3795 | tcg_gen_shl_tltcg_gen_shl_i32(cpu_T[1], cpu_T[1], cpu_A0); |
3796 | tcg_gen_subi_tltcg_gen_subi_i32(cpu_T[1], cpu_T[1], 1); |
3797 | tcg_gen_and_tltcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
3798 | |
3799 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
3800 | gen_op_update1_cc(); |
3801 | set_cc_op(s, CC_OP_LOGICB + ot); |
3802 | } |
3803 | break; |
3804 | |
3805 | case 0x0f5: /* bzhi Gy, Ey, By */ |
3806 | if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2(1 << 8)) |
3807 | || !(s->prefix & PREFIX_VEX0x20) |
3808 | || s->vex_l != 0) { |
3809 | goto illegal_op; |
3810 | } |
3811 | ot = mo_64_32(s->dflag); |
3812 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
3813 | tcg_gen_ext8u_tltcg_gen_ext8u_i32(cpu_T[1], cpu_regs[s->vex_v]); |
3814 | { |
3815 | TCGvTCGv_i32 bound = tcg_const_tltcg_const_i32(ot == MO_64 ? 63 : 31); |
3816 | /* Note that since we're using BMILG (in order to get O |
3817 | cleared) we need to store the inverse into C. */ |
3818 | tcg_gen_setcond_tltcg_gen_setcond_i32(TCG_COND_LT, cpu_cc_src, |
3819 | cpu_T[1], bound); |
3820 | tcg_gen_movcond_tltcg_gen_movcond_i32(TCG_COND_GT, cpu_T[1], cpu_T[1], |
3821 | bound, bound, cpu_T[1]); |
3822 | tcg_temp_freetcg_temp_free_i32(bound); |
3823 | } |
3824 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_A0, -1); |
3825 | tcg_gen_shl_tltcg_gen_shl_i32(cpu_A0, cpu_A0, cpu_T[1]); |
3826 | tcg_gen_andc_tltcg_gen_andc_i32(cpu_T[0], cpu_T[0], cpu_A0); |
3827 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
3828 | gen_op_update1_cc(); |
3829 | set_cc_op(s, CC_OP_BMILGB + ot); |
3830 | break; |
3831 | |
3832 | case 0x3f6: /* mulx By, Gy, rdx, Ey */ |
3833 | if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2(1 << 8)) |
3834 | || !(s->prefix & PREFIX_VEX0x20) |
3835 | || s->vex_l != 0) { |
3836 | goto illegal_op; |
3837 | } |
3838 | ot = mo_64_32(s->dflag); |
3839 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
3840 | switch (ot) { |
3841 | default: |
3842 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
3843 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp3_i32, cpu_regs[R_EDX2]); |
3844 | tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32, |
3845 | cpu_tmp2_i32, cpu_tmp3_i32); |
3846 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_regs[s->vex_v], cpu_tmp2_i32); |
3847 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_regs[reg], cpu_tmp3_i32); |
3848 | break; |
3849 | #ifdef TARGET_X86_64 |
3850 | case MO_64: |
3851 | tcg_gen_mulu2_i64(cpu_regs[s->vex_v], cpu_regs[reg], |
3852 | cpu_T[0], cpu_regs[R_EDX2]); |
3853 | break; |
3854 | #endif |
3855 | } |
3856 | break; |
3857 | |
3858 | case 0x3f5: /* pdep Gy, By, Ey */ |
3859 | if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2(1 << 8)) |
3860 | || !(s->prefix & PREFIX_VEX0x20) |
3861 | || s->vex_l != 0) { |
3862 | goto illegal_op; |
3863 | } |
3864 | ot = mo_64_32(s->dflag); |
3865 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
3866 | /* Note that by zero-extending the mask operand, we |
3867 | automatically handle zero-extending the result. */ |
3868 | if (ot == MO_64) { |
3869 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_T[1], cpu_regs[s->vex_v]); |
3870 | } else { |
3871 | tcg_gen_ext32u_tltcg_gen_mov_i32(cpu_T[1], cpu_regs[s->vex_v]); |
3872 | } |
3873 | gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]); |
3874 | break; |
3875 | |
3876 | case 0x2f5: /* pext Gy, By, Ey */ |
3877 | if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2(1 << 8)) |
3878 | || !(s->prefix & PREFIX_VEX0x20) |
3879 | || s->vex_l != 0) { |
3880 | goto illegal_op; |
3881 | } |
3882 | ot = mo_64_32(s->dflag); |
3883 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
3884 | /* Note that by zero-extending the mask operand, we |
3885 | automatically handle zero-extending the result. */ |
3886 | if (ot == MO_64) { |
3887 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_T[1], cpu_regs[s->vex_v]); |
3888 | } else { |
3889 | tcg_gen_ext32u_tltcg_gen_mov_i32(cpu_T[1], cpu_regs[s->vex_v]); |
3890 | } |
3891 | gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]); |
3892 | break; |
3893 | |
3894 | case 0x1f6: /* adcx Gy, Ey */ |
3895 | case 0x2f6: /* adox Gy, Ey */ |
3896 | if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX(1 << 19))) { |
3897 | goto illegal_op; |
3898 | } else { |
3899 | TCGvTCGv_i32 carry_in, carry_out, zero; |
3900 | int end_op; |
3901 | |
3902 | ot = mo_64_32(s->dflag); |
3903 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
3904 | |
3905 | /* Re-use the carry-out from a previous round. */ |
3906 | TCGV_UNUSED(carry_in)carry_in = __extension__ ({ TCGv_i32 make_tcgv_tmp = {-1}; make_tcgv_tmp ;}); |
3907 | carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2); |
3908 | switch (s->cc_op) { |
3909 | case CC_OP_ADCX: |
3910 | if (b == 0x1f6) { |
3911 | carry_in = cpu_cc_dst; |
3912 | end_op = CC_OP_ADCX; |
3913 | } else { |
3914 | end_op = CC_OP_ADCOX; |
3915 | } |
3916 | break; |
3917 | case CC_OP_ADOX: |
3918 | if (b == 0x1f6) { |
3919 | end_op = CC_OP_ADCOX; |
3920 | } else { |
3921 | carry_in = cpu_cc_src2; |
3922 | end_op = CC_OP_ADOX; |
3923 | } |
3924 | break; |
3925 | case CC_OP_ADCOX: |
3926 | end_op = CC_OP_ADCOX; |
3927 | carry_in = carry_out; |
3928 | break; |
3929 | default: |
3930 | end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX); |
3931 | break; |
3932 | } |
3933 | /* If we can't reuse carry-out, get it out of EFLAGS. */ |
3934 | if (TCGV_IS_UNUSED(carry_in)(((carry_in).i32) == -1)) { |
3935 | if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) { |
3936 | gen_compute_eflags(s); |
3937 | } |
3938 | carry_in = cpu_tmp0; |
3939 | tcg_gen_shri_tltcg_gen_shri_i32(carry_in, cpu_cc_src, |
3940 | ctz32(b == 0x1f6 ? CC_C0x0001 : CC_O0x0800)); |
3941 | tcg_gen_andi_tltcg_gen_andi_i32(carry_in, carry_in, 1); |
3942 | } |
3943 | |
3944 | switch (ot) { |
3945 | #ifdef TARGET_X86_64 |
3946 | case MO_32: |
3947 | /* If we know TL is 64-bit, and we want a 32-bit |
3948 | result, just do everything in 64-bit arithmetic. */ |
3949 | tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]); |
3950 | tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]); |
3951 | tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]); |
3952 | tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in); |
3953 | tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]); |
3954 | tcg_gen_shri_i64(carry_out, cpu_T[0], 32); |
3955 | break; |
3956 | #endif |
3957 | default: |
3958 | /* Otherwise compute the carry-out in two steps. */ |
3959 | zero = tcg_const_tltcg_const_i32(0); |
3960 | tcg_gen_add2_tltcg_gen_add2_i32(cpu_T[0], carry_out, |
3961 | cpu_T[0], zero, |
3962 | carry_in, zero); |
3963 | tcg_gen_add2_tltcg_gen_add2_i32(cpu_regs[reg], carry_out, |
3964 | cpu_regs[reg], carry_out, |
3965 | cpu_T[0], zero); |
3966 | tcg_temp_freetcg_temp_free_i32(zero); |
3967 | break; |
3968 | } |
3969 | set_cc_op(s, end_op); |
3970 | } |
3971 | break; |
3972 | |
3973 | case 0x1f7: /* shlx Gy, Ey, By */ |
3974 | case 0x2f7: /* sarx Gy, Ey, By */ |
3975 | case 0x3f7: /* shrx Gy, Ey, By */ |
3976 | if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2(1 << 8)) |
3977 | || !(s->prefix & PREFIX_VEX0x20) |
3978 | || s->vex_l != 0) { |
3979 | goto illegal_op; |
3980 | } |
3981 | ot = mo_64_32(s->dflag); |
3982 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
3983 | if (ot == MO_64) { |
3984 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_T[1], cpu_regs[s->vex_v], 63); |
3985 | } else { |
3986 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_T[1], cpu_regs[s->vex_v], 31); |
3987 | } |
3988 | if (b == 0x1f7) { |
3989 | tcg_gen_shl_tltcg_gen_shl_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
3990 | } else if (b == 0x2f7) { |
3991 | if (ot != MO_64) { |
3992 | tcg_gen_ext32s_tltcg_gen_mov_i32(cpu_T[0], cpu_T[0]); |
3993 | } |
3994 | tcg_gen_sar_tltcg_gen_sar_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
3995 | } else { |
3996 | if (ot != MO_64) { |
3997 | tcg_gen_ext32u_tltcg_gen_mov_i32(cpu_T[0], cpu_T[0]); |
3998 | } |
3999 | tcg_gen_shr_tltcg_gen_shr_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
4000 | } |
4001 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
4002 | break; |
4003 | |
4004 | case 0x0f3: |
4005 | case 0x1f3: |
4006 | case 0x2f3: |
4007 | case 0x3f3: /* Group 17 */ |
4008 | if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1(1 << 3)) |
4009 | || !(s->prefix & PREFIX_VEX0x20) |
4010 | || s->vex_l != 0) { |
4011 | goto illegal_op; |
4012 | } |
4013 | ot = mo_64_32(s->dflag); |
4014 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
4015 | |
4016 | switch (reg & 7) { |
4017 | case 1: /* blsr By,Ey */ |
4018 | tcg_gen_neg_tltcg_gen_neg_i32(cpu_T[1], cpu_T[0]); |
4019 | tcg_gen_and_tltcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
4020 | gen_op_mov_reg_v(ot, s->vex_v, cpu_T[0]); |
4021 | gen_op_update2_cc(); |
4022 | set_cc_op(s, CC_OP_BMILGB + ot); |
4023 | break; |
4024 | |
4025 | case 2: /* blsmsk By,Ey */ |
4026 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_src, cpu_T[0]); |
4027 | tcg_gen_subi_tltcg_gen_subi_i32(cpu_T[0], cpu_T[0], 1); |
4028 | tcg_gen_xor_tltcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_cc_src); |
4029 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_T[0]); |
4030 | set_cc_op(s, CC_OP_BMILGB + ot); |
4031 | break; |
4032 | |
4033 | case 3: /* blsi By, Ey */ |
4034 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_src, cpu_T[0]); |
4035 | tcg_gen_subi_tltcg_gen_subi_i32(cpu_T[0], cpu_T[0], 1); |
4036 | tcg_gen_and_tltcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_cc_src); |
4037 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_T[0]); |
4038 | set_cc_op(s, CC_OP_BMILGB + ot); |
4039 | break; |
4040 | |
4041 | default: |
4042 | goto illegal_op; |
4043 | } |
4044 | break; |
4045 | |
4046 | default: |
4047 | goto illegal_op; |
4048 | } |
4049 | break; |
4050 | |
4051 | case 0x03a: |
4052 | case 0x13a: |
4053 | b = modrm; |
4054 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
4055 | rm = modrm & 7; |
4056 | reg = ((modrm >> 3) & 7) | rex_r; |
4057 | mod = (modrm >> 6) & 3; |
4058 | if (b1 >= 2) { |
4059 | goto illegal_op; |
4060 | } |
4061 | |
4062 | sse_fn_eppi = sse_op_table7[b].op[b1]; |
4063 | if (!sse_fn_eppi) { |
4064 | goto illegal_op; |
4065 | } |
4066 | if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask)) |
4067 | goto illegal_op; |
4068 | |
4069 | if (sse_fn_eppi == SSE_SPECIAL((void *)1)) { |
4070 | ot = mo_64_32(s->dflag); |
4071 | rm = (modrm & 7) | REX_B(s)0; |
4072 | if (mod != 3) |
4073 | gen_lea_modrm(env, s, modrm); |
4074 | reg = ((modrm >> 3) & 7) | rex_r; |
4075 | val = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
4076 | switch (b) { |
4077 | case 0x14: /* pextrb */ |
4078 | tcg_gen_ld8u_tltcg_gen_ld8u_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._b[val & 15 ]) |
4079 | xmm_regs[reg].XMM_B(val & 15))__builtin_offsetof(CPUX86State, xmm_regs[reg]._b[val & 15 ])); |
4080 | if (mod == 3) { |
4081 | gen_op_mov_reg_v(ot, rm, cpu_T[0]); |
4082 | } else { |
4083 | tcg_gen_qemu_st_tltcg_gen_qemu_st_i32(cpu_T[0], cpu_A0, |
4084 | s->mem_index, MO_UB); |
4085 | } |
4086 | break; |
4087 | case 0x15: /* pextrw */ |
4088 | tcg_gen_ld16u_tltcg_gen_ld16u_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._w[val & 7] ) |
4089 | xmm_regs[reg].XMM_W(val & 7))__builtin_offsetof(CPUX86State, xmm_regs[reg]._w[val & 7] )); |
4090 | if (mod == 3) { |
4091 | gen_op_mov_reg_v(ot, rm, cpu_T[0]); |
4092 | } else { |
4093 | tcg_gen_qemu_st_tltcg_gen_qemu_st_i32(cpu_T[0], cpu_A0, |
4094 | s->mem_index, MO_LEUW); |
4095 | } |
4096 | break; |
4097 | case 0x16: |
4098 | if (ot == MO_32) { /* pextrd */ |
4099 | tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, |
4100 | offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[val & 3] ) |
4101 | xmm_regs[reg].XMM_L(val & 3))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[val & 3] )); |
4102 | if (mod == 3) { |
4103 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_regs[rm], cpu_tmp2_i32); |
4104 | } else { |
4105 | tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, |
4106 | s->mem_index, MO_LEUL); |
4107 | } |
4108 | } else { /* pextrq */ |
4109 | #ifdef TARGET_X86_64 |
4110 | tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, |
4111 | offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[val & 1] ) |
4112 | xmm_regs[reg].XMM_Q(val & 1))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[val & 1] )); |
4113 | if (mod == 3) { |
4114 | tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64); |
4115 | } else { |
4116 | tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, |
4117 | s->mem_index, MO_LEQ); |
4118 | } |
4119 | #else |
4120 | goto illegal_op; |
4121 | #endif |
4122 | } |
4123 | break; |
4124 | case 0x17: /* extractps */ |
4125 | tcg_gen_ld32u_tltcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[val & 3] ) |
4126 | xmm_regs[reg].XMM_L(val & 3))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[val & 3] )); |
4127 | if (mod == 3) { |
4128 | gen_op_mov_reg_v(ot, rm, cpu_T[0]); |
4129 | } else { |
4130 | tcg_gen_qemu_st_tltcg_gen_qemu_st_i32(cpu_T[0], cpu_A0, |
4131 | s->mem_index, MO_LEUL); |
4132 | } |
4133 | break; |
4134 | case 0x20: /* pinsrb */ |
4135 | if (mod == 3) { |
4136 | gen_op_mov_v_reg(MO_32, cpu_T[0], rm); |
4137 | } else { |
4138 | tcg_gen_qemu_ld_tltcg_gen_qemu_ld_i32(cpu_T[0], cpu_A0, |
4139 | s->mem_index, MO_UB); |
4140 | } |
4141 | tcg_gen_st8_tltcg_gen_st8_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._b[val & 15 ]) |
4142 | xmm_regs[reg].XMM_B(val & 15))__builtin_offsetof(CPUX86State, xmm_regs[reg]._b[val & 15 ])); |
4143 | break; |
4144 | case 0x21: /* insertps */ |
4145 | if (mod == 3) { |
4146 | tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, |
4147 | offsetof(CPUX86State,xmm_regs[rm]__builtin_offsetof(CPUX86State, xmm_regs[rm] ._l[(val >> 6) & 3]) |
4148 | .XMM_L((val >> 6) & 3))__builtin_offsetof(CPUX86State, xmm_regs[rm] ._l[(val >> 6) & 3])); |
4149 | } else { |
4150 | tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, |
4151 | s->mem_index, MO_LEUL); |
4152 | } |
4153 | tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, |
4154 | offsetof(CPUX86State,xmm_regs[reg]__builtin_offsetof(CPUX86State, xmm_regs[reg] ._l[(val >> 4) & 3]) |
4155 | .XMM_L((val >> 4) & 3))__builtin_offsetof(CPUX86State, xmm_regs[reg] ._l[(val >> 4) & 3])); |
4156 | if ((val >> 0) & 1) |
4157 | tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), |
4158 | cpu_env, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[0]) |
4159 | xmm_regs[reg].XMM_L(0))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[0])); |
4160 | if ((val >> 1) & 1) |
4161 | tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), |
4162 | cpu_env, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[1]) |
4163 | xmm_regs[reg].XMM_L(1))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[1])); |
4164 | if ((val >> 2) & 1) |
4165 | tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), |
4166 | cpu_env, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[2]) |
4167 | xmm_regs[reg].XMM_L(2))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[2])); |
4168 | if ((val >> 3) & 1) |
4169 | tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), |
4170 | cpu_env, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[3]) |
4171 | xmm_regs[reg].XMM_L(3))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[3])); |
4172 | break; |
4173 | case 0x22: |
4174 | if (ot == MO_32) { /* pinsrd */ |
4175 | if (mod == 3) { |
4176 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_regs[rm]); |
4177 | } else { |
4178 | tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, |
4179 | s->mem_index, MO_LEUL); |
4180 | } |
4181 | tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, |
4182 | offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[val & 3] ) |
4183 | xmm_regs[reg].XMM_L(val & 3))__builtin_offsetof(CPUX86State, xmm_regs[reg]._l[val & 3] )); |
4184 | } else { /* pinsrq */ |
4185 | #ifdef TARGET_X86_64 |
4186 | if (mod == 3) { |
4187 | gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm); |
4188 | } else { |
4189 | tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, |
4190 | s->mem_index, MO_LEQ); |
4191 | } |
4192 | tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, |
4193 | offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[val & 1] ) |
4194 | xmm_regs[reg].XMM_Q(val & 1))__builtin_offsetof(CPUX86State, xmm_regs[reg]._q[val & 1] )); |
4195 | #else |
4196 | goto illegal_op; |
4197 | #endif |
4198 | } |
4199 | break; |
4200 | } |
4201 | return; |
4202 | } |
4203 | |
4204 | if (b1) { |
4205 | op1_offset = offsetof(CPUX86State,xmm_regs[reg])__builtin_offsetof(CPUX86State, xmm_regs[reg]); |
4206 | if (mod == 3) { |
4207 | op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)])__builtin_offsetof(CPUX86State, xmm_regs[rm | 0]); |
4208 | } else { |
4209 | op2_offset = offsetof(CPUX86State,xmm_t0)__builtin_offsetof(CPUX86State, xmm_t0); |
4210 | gen_lea_modrm(env, s, modrm); |
4211 | gen_ldo_env_A0(s, op2_offset); |
4212 | } |
4213 | } else { |
4214 | op1_offset = offsetof(CPUX86State,fpregs[reg].mmx)__builtin_offsetof(CPUX86State, fpregs[reg].mmx); |
4215 | if (mod == 3) { |
4216 | op2_offset = offsetof(CPUX86State,fpregs[rm].mmx)__builtin_offsetof(CPUX86State, fpregs[rm].mmx); |
4217 | } else { |
4218 | op2_offset = offsetof(CPUX86State,mmx_t0)__builtin_offsetof(CPUX86State, mmx_t0); |
4219 | gen_lea_modrm(env, s, modrm); |
4220 | gen_ldq_env_A0(s, op2_offset); |
4221 | } |
4222 | } |
4223 | val = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
4224 | |
4225 | if ((b & 0xfc) == 0x60) { /* pcmpXstrX */ |
4226 | set_cc_op(s, CC_OP_EFLAGS); |
4227 | |
4228 | if (s->dflag == MO_64) { |
4229 | /* The helper must use entire 64-bit gp registers */ |
4230 | val |= 1 << 8; |
4231 | } |
4232 | } |
4233 | |
4234 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op1_offset )); |
4235 | tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr1).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op2_offset )); |
4236 | sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); |
4237 | break; |
4238 | |
4239 | case 0x33a: |
4240 | /* Various integer extensions at 0f 3a f[0-f]. */ |
4241 | b = modrm | (b1 << 8); |
4242 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
4243 | reg = ((modrm >> 3) & 7) | rex_r; |
4244 | |
4245 | switch (b) { |
4246 | case 0x3f0: /* rorx Gy,Ey, Ib */ |
4247 | if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2(1 << 8)) |
4248 | || !(s->prefix & PREFIX_VEX0x20) |
4249 | || s->vex_l != 0) { |
4250 | goto illegal_op; |
4251 | } |
4252 | ot = mo_64_32(s->dflag); |
4253 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
4254 | b = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
4255 | if (ot == MO_64) { |
4256 | tcg_gen_rotri_tltcg_gen_rotri_i32(cpu_T[0], cpu_T[0], b & 63); |
4257 | } else { |
4258 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
4259 | tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31); |
4260 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_T[0], cpu_tmp2_i32); |
4261 | } |
4262 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
4263 | break; |
4264 | |
4265 | default: |
4266 | goto illegal_op; |
4267 | } |
4268 | break; |
4269 | |
4270 | default: |
4271 | goto illegal_op; |
4272 | } |
4273 | } else { |
4274 | /* generic MMX or SSE operation */ |
4275 | switch(b) { |
4276 | case 0x70: /* pshufx insn */ |
4277 | case 0xc6: /* pshufx insn */ |
4278 | case 0xc2: /* compare insns */ |
4279 | s->rip_offset = 1; |
4280 | break; |
4281 | default: |
4282 | break; |
4283 | } |
4284 | if (is_xmm) { |
4285 | op1_offset = offsetof(CPUX86State,xmm_regs[reg])__builtin_offsetof(CPUX86State, xmm_regs[reg]); |
4286 | if (mod != 3) { |
4287 | gen_lea_modrm(env, s, modrm); |
4288 | op2_offset = offsetof(CPUX86State,xmm_t0)__builtin_offsetof(CPUX86State, xmm_t0); |
4289 | if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) || |
4290 | b == 0xc2)) { |
4291 | /* specific case for SSE single instructions */ |
4292 | if (b1 == 2) { |
4293 | /* 32 bit access */ |
4294 | gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0); |
4295 | tcg_gen_st32_tltcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))__builtin_offsetof(CPUX86State, xmm_t0._l[0])); |
4296 | } else { |
4297 | /* 64 bit access */ |
4298 | gen_ldq_env_A0(s, offsetof(CPUX86State,__builtin_offsetof(CPUX86State, xmm_t0._d[0]) |
4299 | xmm_t0.XMM_D(0))__builtin_offsetof(CPUX86State, xmm_t0._d[0])); |
4300 | } |
4301 | } else { |
4302 | gen_ldo_env_A0(s, op2_offset); |
4303 | } |
4304 | } else { |
4305 | rm = (modrm & 7) | REX_B(s)0; |
4306 | op2_offset = offsetof(CPUX86State,xmm_regs[rm])__builtin_offsetof(CPUX86State, xmm_regs[rm]); |
4307 | } |
4308 | } else { |
4309 | op1_offset = offsetof(CPUX86State,fpregs[reg].mmx)__builtin_offsetof(CPUX86State, fpregs[reg].mmx); |
4310 | if (mod != 3) { |
4311 | gen_lea_modrm(env, s, modrm); |
4312 | op2_offset = offsetof(CPUX86State,mmx_t0)__builtin_offsetof(CPUX86State, mmx_t0); |
4313 | gen_ldq_env_A0(s, op2_offset); |
4314 | } else { |
4315 | rm = (modrm & 7); |
4316 | op2_offset = offsetof(CPUX86State,fpregs[rm].mmx)__builtin_offsetof(CPUX86State, fpregs[rm].mmx); |
4317 | } |
4318 | } |
4319 | switch(b) { |
4320 | case 0x0f: /* 3DNow! data insns */ |
4321 | if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW(1 << 31))) |
4322 | goto illegal_op; |
4323 | val = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
4324 | sse_fn_epp = sse_op_table5[val]; |
4325 | if (!sse_fn_epp) { |
4326 | goto illegal_op; |
4327 | } |
4328 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op1_offset )); |
4329 | tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr1).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op2_offset )); |
4330 | sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); |
4331 | break; |
4332 | case 0x70: /* pshufx insn */ |
4333 | case 0xc6: /* pshufx insn */ |
4334 | val = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
4335 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op1_offset )); |
4336 | tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr1).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op2_offset )); |
4337 | /* XXX: introduce a new table? */ |
4338 | sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp; |
4339 | sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); |
4340 | break; |
4341 | case 0xc2: |
4342 | /* compare insns */ |
4343 | val = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
4344 | if (val >= 8) |
4345 | goto illegal_op; |
4346 | sse_fn_epp = sse_op_table4[val][b1]; |
4347 | |
4348 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op1_offset )); |
4349 | tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr1).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op2_offset )); |
4350 | sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); |
4351 | break; |
4352 | case 0xf7: |
4353 | /* maskmov : we must prepare A0 */ |
4354 | if (mod != 3) |
4355 | goto illegal_op; |
4356 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_A0, cpu_regs[R_EDI7]); |
4357 | gen_extu(s->aflag, cpu_A0); |
4358 | gen_add_A0_ds_seg(s); |
4359 | |
4360 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op1_offset )); |
4361 | tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr1).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op2_offset )); |
4362 | /* XXX: introduce a new table? */ |
4363 | sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp; |
4364 | sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0); |
4365 | break; |
4366 | default: |
4367 | tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr0).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op1_offset )); |
4368 | tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset)tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (cpu_ptr1).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (op2_offset )); |
4369 | sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); |
4370 | break; |
4371 | } |
4372 | if (b == 0x2e || b == 0x2f) { |
4373 | set_cc_op(s, CC_OP_EFLAGS); |
4374 | } |
4375 | } |
4376 | } |
4377 | |
4378 | /* convert one instruction. s->is_jmp is set if the translation must |
4379 | be stopped. Return the next pc value */ |
4380 | static target_ulong disas_insn(CPUX86State *env, DisasContext *s, |
4381 | target_ulong pc_start) |
4382 | { |
4383 | int b, prefixes; |
4384 | int shift; |
4385 | TCGMemOp ot, aflag, dflag; |
4386 | int modrm, reg, rm, mod, op, opreg, val; |
4387 | target_ulong next_eip, tval; |
4388 | int rex_w, rex_r; |
4389 | |
4390 | if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))__builtin_expect(!!(qemu_loglevel_mask((1 << 2) | (1 << 3))), 0)) { |
4391 | tcg_gen_debug_insn_start(pc_start); |
4392 | } |
4393 | s->pc = pc_start; |
4394 | prefixes = 0; |
4395 | s->override = -1; |
4396 | rex_w = -1; |
4397 | rex_r = 0; |
4398 | #ifdef TARGET_X86_64 |
4399 | s->rex_x = 0; |
4400 | s->rex_b = 0; |
4401 | x86_64_hregs = 0; |
4402 | #endif |
4403 | s->rip_offset = 0; /* for relative ip address */ |
4404 | s->vex_l = 0; |
4405 | s->vex_v = 0; |
4406 | next_byte: |
4407 | b = cpu_ldub_code(env, s->pc)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc)) + guest_base ))); |
4408 | s->pc++; |
4409 | /* Collect prefixes. */ |
4410 | switch (b) { |
4411 | case 0xf3: |
4412 | prefixes |= PREFIX_REPZ0x01; |
4413 | goto next_byte; |
4414 | case 0xf2: |
4415 | prefixes |= PREFIX_REPNZ0x02; |
4416 | goto next_byte; |
4417 | case 0xf0: |
4418 | prefixes |= PREFIX_LOCK0x04; |
4419 | goto next_byte; |
4420 | case 0x2e: |
4421 | s->override = R_CS1; |
4422 | goto next_byte; |
4423 | case 0x36: |
4424 | s->override = R_SS2; |
4425 | goto next_byte; |
4426 | case 0x3e: |
4427 | s->override = R_DS3; |
4428 | goto next_byte; |
4429 | case 0x26: |
4430 | s->override = R_ES0; |
4431 | goto next_byte; |
4432 | case 0x64: |
4433 | s->override = R_FS4; |
4434 | goto next_byte; |
4435 | case 0x65: |
4436 | s->override = R_GS5; |
4437 | goto next_byte; |
4438 | case 0x66: |
4439 | prefixes |= PREFIX_DATA0x08; |
4440 | goto next_byte; |
4441 | case 0x67: |
4442 | prefixes |= PREFIX_ADR0x10; |
4443 | goto next_byte; |
4444 | #ifdef TARGET_X86_64 |
4445 | case 0x40 ... 0x4f: |
4446 | if (CODE64(s)0) { |
4447 | /* REX prefix */ |
4448 | rex_w = (b >> 3) & 1; |
4449 | rex_r = (b & 0x4) << 1; |
4450 | s->rex_x = (b & 0x2) << 2; |
4451 | REX_B(s)0 = (b & 0x1) << 3; |
4452 | x86_64_hregs = 1; /* select uniform byte register addressing */ |
4453 | goto next_byte; |
4454 | } |
4455 | break; |
4456 | #endif |
4457 | case 0xc5: /* 2-byte VEX */ |
4458 | case 0xc4: /* 3-byte VEX */ |
4459 | /* VEX prefixes cannot be used except in 32-bit mode. |
4460 | Otherwise the instruction is LES or LDS. */ |
4461 | if (s->code32 && !s->vm86) { |
4462 | static const int pp_prefix[4] = { |
4463 | 0, PREFIX_DATA0x08, PREFIX_REPZ0x01, PREFIX_REPNZ0x02 |
4464 | }; |
4465 | int vex3, vex2 = cpu_ldub_code(env, s->pc)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc)) + guest_base ))); |
4466 | |
4467 | if (!CODE64(s)0 && (vex2 & 0xc0) != 0xc0) { |
4468 | /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b, |
4469 | otherwise the instruction is LES or LDS. */ |
4470 | break; |
4471 | } |
4472 | s->pc++; |
4473 | |
4474 | /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */ |
4475 | if (prefixes & (PREFIX_REPZ0x01 | PREFIX_REPNZ0x02 |
4476 | | PREFIX_LOCK0x04 | PREFIX_DATA0x08)) { |
4477 | goto illegal_op; |
4478 | } |
4479 | #ifdef TARGET_X86_64 |
4480 | if (x86_64_hregs) { |
4481 | goto illegal_op; |
4482 | } |
4483 | #endif |
4484 | rex_r = (~vex2 >> 4) & 8; |
4485 | if (b == 0xc5) { |
4486 | vex3 = vex2; |
4487 | b = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
4488 | } else { |
4489 | #ifdef TARGET_X86_64 |
4490 | s->rex_x = (~vex2 >> 3) & 8; |
4491 | s->rex_b = (~vex2 >> 2) & 8; |
4492 | #endif |
4493 | vex3 = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
4494 | rex_w = (vex3 >> 7) & 1; |
4495 | switch (vex2 & 0x1f) { |
4496 | case 0x01: /* Implied 0f leading opcode bytes. */ |
4497 | b = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))) | 0x100; |
4498 | break; |
4499 | case 0x02: /* Implied 0f 38 leading opcode bytes. */ |
4500 | b = 0x138; |
4501 | break; |
4502 | case 0x03: /* Implied 0f 3a leading opcode bytes. */ |
4503 | b = 0x13a; |
4504 | break; |
4505 | default: /* Reserved for future use. */ |
4506 | goto illegal_op; |
4507 | } |
4508 | } |
4509 | s->vex_v = (~vex3 >> 3) & 0xf; |
4510 | s->vex_l = (vex3 >> 2) & 1; |
4511 | prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX0x20; |
4512 | } |
4513 | break; |
4514 | } |
4515 | |
4516 | /* Post-process prefixes. */ |
4517 | if (CODE64(s)0) { |
4518 | /* In 64-bit mode, the default data size is 32-bit. Select 64-bit |
4519 | data with rex_w, and 16-bit data with 0x66; rex_w takes precedence |
4520 | over 0x66 if both are present. */ |
4521 | dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA0x08 ? MO_16 : MO_32); |
4522 | /* In 64-bit mode, 0x67 selects 32-bit addressing. */ |
4523 | aflag = (prefixes & PREFIX_ADR0x10 ? MO_32 : MO_64); |
4524 | } else { |
4525 | /* In 16/32-bit mode, 0x66 selects the opposite data size. */ |
4526 | if (s->code32 ^ ((prefixes & PREFIX_DATA0x08) != 0)) { |
4527 | dflag = MO_32; |
4528 | } else { |
4529 | dflag = MO_16; |
4530 | } |
4531 | /* In 16/32-bit mode, 0x67 selects the opposite addressing. */ |
4532 | if (s->code32 ^ ((prefixes & PREFIX_ADR0x10) != 0)) { |
4533 | aflag = MO_32; |
4534 | } else { |
4535 | aflag = MO_16; |
4536 | } |
4537 | } |
4538 | |
4539 | s->prefix = prefixes; |
4540 | s->aflag = aflag; |
4541 | s->dflag = dflag; |
4542 | |
4543 | /* lock generation */ |
4544 | if (prefixes & PREFIX_LOCK0x04) |
4545 | gen_helper_lock(); |
4546 | |
4547 | /* now check op code */ |
4548 | reswitch: |
4549 | switch(b) { |
4550 | case 0x0f: |
4551 | /**************************/ |
4552 | /* extended op code */ |
4553 | b = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))) | 0x100; |
4554 | goto reswitch; |
4555 | |
4556 | /**************************/ |
4557 | /* arith & logic */ |
4558 | case 0x00 ... 0x05: |
4559 | case 0x08 ... 0x0d: |
4560 | case 0x10 ... 0x15: |
4561 | case 0x18 ... 0x1d: |
4562 | case 0x20 ... 0x25: |
4563 | case 0x28 ... 0x2d: |
4564 | case 0x30 ... 0x35: |
4565 | case 0x38 ... 0x3d: |
4566 | { |
4567 | int op, f, val; |
4568 | op = (b >> 3) & 7; |
4569 | f = (b >> 1) & 3; |
4570 | |
4571 | ot = mo_b_d(b, dflag); |
4572 | |
4573 | switch(f) { |
4574 | case 0: /* OP Ev, Gv */ |
4575 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
4576 | reg = ((modrm >> 3) & 7) | rex_r; |
4577 | mod = (modrm >> 6) & 3; |
4578 | rm = (modrm & 7) | REX_B(s)0; |
4579 | if (mod != 3) { |
4580 | gen_lea_modrm(env, s, modrm); |
4581 | opreg = OR_TMP0; |
4582 | } else if (op == OP_XORL && rm == reg) { |
4583 | xor_zero: |
4584 | /* xor reg, reg optimisation */ |
4585 | set_cc_op(s, CC_OP_CLR); |
4586 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[0], 0); |
4587 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
4588 | break; |
4589 | } else { |
4590 | opreg = rm; |
4591 | } |
4592 | gen_op_mov_v_reg(ot, cpu_T[1], reg); |
4593 | gen_op(s, op, ot, opreg); |
4594 | break; |
4595 | case 1: /* OP Gv, Ev */ |
4596 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
4597 | mod = (modrm >> 6) & 3; |
4598 | reg = ((modrm >> 3) & 7) | rex_r; |
4599 | rm = (modrm & 7) | REX_B(s)0; |
4600 | if (mod != 3) { |
4601 | gen_lea_modrm(env, s, modrm); |
4602 | gen_op_ld_v(s, ot, cpu_T[1], cpu_A0); |
4603 | } else if (op == OP_XORL && rm == reg) { |
4604 | goto xor_zero; |
4605 | } else { |
4606 | gen_op_mov_v_reg(ot, cpu_T[1], rm); |
4607 | } |
4608 | gen_op(s, op, ot, reg); |
4609 | break; |
4610 | case 2: /* OP A, Iv */ |
4611 | val = insn_get(env, s, ot); |
4612 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[1], val); |
4613 | gen_op(s, op, ot, OR_EAX); |
4614 | break; |
4615 | } |
4616 | } |
4617 | break; |
4618 | |
4619 | case 0x82: |
4620 | if (CODE64(s)0) |
4621 | goto illegal_op; |
4622 | case 0x80: /* GRP1 */ |
4623 | case 0x81: |
4624 | case 0x83: |
4625 | { |
4626 | int val; |
4627 | |
4628 | ot = mo_b_d(b, dflag); |
4629 | |
4630 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
4631 | mod = (modrm >> 6) & 3; |
4632 | rm = (modrm & 7) | REX_B(s)0; |
4633 | op = (modrm >> 3) & 7; |
4634 | |
4635 | if (mod != 3) { |
4636 | if (b == 0x83) |
4637 | s->rip_offset = 1; |
4638 | else |
4639 | s->rip_offset = insn_const_size(ot); |
4640 | gen_lea_modrm(env, s, modrm); |
4641 | opreg = OR_TMP0; |
4642 | } else { |
4643 | opreg = rm; |
4644 | } |
4645 | |
4646 | switch(b) { |
4647 | default: |
4648 | case 0x80: |
4649 | case 0x81: |
4650 | case 0x82: |
4651 | val = insn_get(env, s, ot); |
4652 | break; |
4653 | case 0x83: |
4654 | val = (int8_t)insn_get(env, s, MO_8); |
4655 | break; |
4656 | } |
4657 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[1], val); |
4658 | gen_op(s, op, ot, opreg); |
4659 | } |
4660 | break; |
4661 | |
4662 | /**************************/ |
4663 | /* inc, dec, and other misc arith */ |
4664 | case 0x40 ... 0x47: /* inc Gv */ |
4665 | ot = dflag; |
4666 | gen_inc(s, ot, OR_EAX + (b & 7), 1); |
4667 | break; |
4668 | case 0x48 ... 0x4f: /* dec Gv */ |
4669 | ot = dflag; |
4670 | gen_inc(s, ot, OR_EAX + (b & 7), -1); |
4671 | break; |
4672 | case 0xf6: /* GRP3 */ |
4673 | case 0xf7: |
4674 | ot = mo_b_d(b, dflag); |
4675 | |
4676 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
4677 | mod = (modrm >> 6) & 3; |
4678 | rm = (modrm & 7) | REX_B(s)0; |
4679 | op = (modrm >> 3) & 7; |
4680 | if (mod != 3) { |
4681 | if (op == 0) |
4682 | s->rip_offset = insn_const_size(ot); |
4683 | gen_lea_modrm(env, s, modrm); |
4684 | gen_op_ld_v(s, ot, cpu_T[0], cpu_A0); |
4685 | } else { |
4686 | gen_op_mov_v_reg(ot, cpu_T[0], rm); |
4687 | } |
4688 | |
4689 | switch(op) { |
4690 | case 0: /* test */ |
4691 | val = insn_get(env, s, ot); |
4692 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[1], val); |
4693 | gen_op_testl_T0_T1_cc(); |
4694 | set_cc_op(s, CC_OP_LOGICB + ot); |
4695 | break; |
4696 | case 2: /* not */ |
4697 | tcg_gen_not_tltcg_gen_not_i32(cpu_T[0], cpu_T[0]); |
4698 | if (mod != 3) { |
4699 | gen_op_st_v(s, ot, cpu_T[0], cpu_A0); |
4700 | } else { |
4701 | gen_op_mov_reg_v(ot, rm, cpu_T[0]); |
4702 | } |
4703 | break; |
4704 | case 3: /* neg */ |
4705 | tcg_gen_neg_tltcg_gen_neg_i32(cpu_T[0], cpu_T[0]); |
4706 | if (mod != 3) { |
4707 | gen_op_st_v(s, ot, cpu_T[0], cpu_A0); |
4708 | } else { |
4709 | gen_op_mov_reg_v(ot, rm, cpu_T[0]); |
4710 | } |
4711 | gen_op_update_neg_cc(); |
4712 | set_cc_op(s, CC_OP_SUBB + ot); |
4713 | break; |
4714 | case 4: /* mul */ |
4715 | switch(ot) { |
4716 | case MO_8: |
4717 | gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX0); |
4718 | tcg_gen_ext8u_tltcg_gen_ext8u_i32(cpu_T[0], cpu_T[0]); |
4719 | tcg_gen_ext8u_tltcg_gen_ext8u_i32(cpu_T[1], cpu_T[1]); |
4720 | /* XXX: use 32 bit mul which could be faster */ |
4721 | tcg_gen_mul_tltcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
4722 | gen_op_mov_reg_v(MO_16, R_EAX0, cpu_T[0]); |
4723 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_T[0]); |
4724 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_cc_src, cpu_T[0], 0xff00); |
4725 | set_cc_op(s, CC_OP_MULB); |
4726 | break; |
4727 | case MO_16: |
4728 | gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX0); |
4729 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_T[0], cpu_T[0]); |
4730 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_T[1], cpu_T[1]); |
4731 | /* XXX: use 32 bit mul which could be faster */ |
4732 | tcg_gen_mul_tltcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
4733 | gen_op_mov_reg_v(MO_16, R_EAX0, cpu_T[0]); |
4734 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_T[0]); |
4735 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_T[0], cpu_T[0], 16); |
4736 | gen_op_mov_reg_v(MO_16, R_EDX2, cpu_T[0]); |
4737 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_src, cpu_T[0]); |
4738 | set_cc_op(s, CC_OP_MULW); |
4739 | break; |
4740 | default: |
4741 | case MO_32: |
4742 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
4743 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp3_i32, cpu_regs[R_EAX0]); |
4744 | tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32, |
4745 | cpu_tmp2_i32, cpu_tmp3_i32); |
4746 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_regs[R_EAX0], cpu_tmp2_i32); |
4747 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_regs[R_EDX2], cpu_tmp3_i32); |
4748 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_regs[R_EAX0]); |
4749 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_src, cpu_regs[R_EDX2]); |
4750 | set_cc_op(s, CC_OP_MULL); |
4751 | break; |
4752 | #ifdef TARGET_X86_64 |
4753 | case MO_64: |
4754 | tcg_gen_mulu2_i64(cpu_regs[R_EAX0], cpu_regs[R_EDX2], |
4755 | cpu_T[0], cpu_regs[R_EAX0]); |
4756 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_regs[R_EAX0]); |
4757 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_src, cpu_regs[R_EDX2]); |
4758 | set_cc_op(s, CC_OP_MULQ); |
4759 | break; |
4760 | #endif |
4761 | } |
4762 | break; |
4763 | case 5: /* imul */ |
4764 | switch(ot) { |
4765 | case MO_8: |
4766 | gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX0); |
4767 | tcg_gen_ext8s_tltcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]); |
4768 | tcg_gen_ext8s_tltcg_gen_ext8s_i32(cpu_T[1], cpu_T[1]); |
4769 | /* XXX: use 32 bit mul which could be faster */ |
4770 | tcg_gen_mul_tltcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
4771 | gen_op_mov_reg_v(MO_16, R_EAX0, cpu_T[0]); |
4772 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_T[0]); |
4773 | tcg_gen_ext8s_tltcg_gen_ext8s_i32(cpu_tmp0, cpu_T[0]); |
4774 | tcg_gen_sub_tltcg_gen_sub_i32(cpu_cc_src, cpu_T[0], cpu_tmp0); |
4775 | set_cc_op(s, CC_OP_MULB); |
4776 | break; |
4777 | case MO_16: |
4778 | gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX0); |
4779 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]); |
4780 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(cpu_T[1], cpu_T[1]); |
4781 | /* XXX: use 32 bit mul which could be faster */ |
4782 | tcg_gen_mul_tltcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
4783 | gen_op_mov_reg_v(MO_16, R_EAX0, cpu_T[0]); |
4784 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_T[0]); |
4785 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(cpu_tmp0, cpu_T[0]); |
4786 | tcg_gen_sub_tltcg_gen_sub_i32(cpu_cc_src, cpu_T[0], cpu_tmp0); |
4787 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_T[0], cpu_T[0], 16); |
4788 | gen_op_mov_reg_v(MO_16, R_EDX2, cpu_T[0]); |
4789 | set_cc_op(s, CC_OP_MULW); |
4790 | break; |
4791 | default: |
4792 | case MO_32: |
4793 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
4794 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp3_i32, cpu_regs[R_EAX0]); |
4795 | tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32, |
4796 | cpu_tmp2_i32, cpu_tmp3_i32); |
4797 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_regs[R_EAX0], cpu_tmp2_i32); |
4798 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_regs[R_EDX2], cpu_tmp3_i32); |
4799 | tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31); |
4800 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_regs[R_EAX0]); |
4801 | tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); |
4802 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_cc_src, cpu_tmp2_i32); |
4803 | set_cc_op(s, CC_OP_MULL); |
4804 | break; |
4805 | #ifdef TARGET_X86_64 |
4806 | case MO_64: |
4807 | tcg_gen_muls2_i64(cpu_regs[R_EAX0], cpu_regs[R_EDX2], |
4808 | cpu_T[0], cpu_regs[R_EAX0]); |
4809 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_regs[R_EAX0]); |
4810 | tcg_gen_sari_tltcg_gen_sari_i32(cpu_cc_src, cpu_regs[R_EAX0], 63); |
4811 | tcg_gen_sub_tltcg_gen_sub_i32(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX2]); |
4812 | set_cc_op(s, CC_OP_MULQ); |
4813 | break; |
4814 | #endif |
4815 | } |
4816 | break; |
4817 | case 6: /* div */ |
4818 | switch(ot) { |
4819 | case MO_8: |
4820 | gen_jmp_im(pc_start - s->cs_base); |
4821 | gen_helper_divb_AL(cpu_env, cpu_T[0]); |
4822 | break; |
4823 | case MO_16: |
4824 | gen_jmp_im(pc_start - s->cs_base); |
4825 | gen_helper_divw_AX(cpu_env, cpu_T[0]); |
4826 | break; |
4827 | default: |
4828 | case MO_32: |
4829 | gen_jmp_im(pc_start - s->cs_base); |
4830 | gen_helper_divl_EAX(cpu_env, cpu_T[0]); |
4831 | break; |
4832 | #ifdef TARGET_X86_64 |
4833 | case MO_64: |
4834 | gen_jmp_im(pc_start - s->cs_base); |
4835 | gen_helper_divq_EAX(cpu_env, cpu_T[0]); |
4836 | break; |
4837 | #endif |
4838 | } |
4839 | break; |
4840 | case 7: /* idiv */ |
4841 | switch(ot) { |
4842 | case MO_8: |
4843 | gen_jmp_im(pc_start - s->cs_base); |
4844 | gen_helper_idivb_AL(cpu_env, cpu_T[0]); |
4845 | break; |
4846 | case MO_16: |
4847 | gen_jmp_im(pc_start - s->cs_base); |
4848 | gen_helper_idivw_AX(cpu_env, cpu_T[0]); |
4849 | break; |
4850 | default: |
4851 | case MO_32: |
4852 | gen_jmp_im(pc_start - s->cs_base); |
4853 | gen_helper_idivl_EAX(cpu_env, cpu_T[0]); |
4854 | break; |
4855 | #ifdef TARGET_X86_64 |
4856 | case MO_64: |
4857 | gen_jmp_im(pc_start - s->cs_base); |
4858 | gen_helper_idivq_EAX(cpu_env, cpu_T[0]); |
4859 | break; |
4860 | #endif |
4861 | } |
4862 | break; |
4863 | default: |
4864 | goto illegal_op; |
4865 | } |
4866 | break; |
4867 | |
4868 | case 0xfe: /* GRP4 */ |
4869 | case 0xff: /* GRP5 */ |
4870 | ot = mo_b_d(b, dflag); |
4871 | |
4872 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
4873 | mod = (modrm >> 6) & 3; |
4874 | rm = (modrm & 7) | REX_B(s)0; |
4875 | op = (modrm >> 3) & 7; |
4876 | if (op >= 2 && b == 0xfe) { |
4877 | goto illegal_op; |
4878 | } |
4879 | if (CODE64(s)0) { |
4880 | if (op == 2 || op == 4) { |
4881 | /* operand size for jumps is 64 bit */ |
4882 | ot = MO_64; |
4883 | } else if (op == 3 || op == 5) { |
4884 | ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16; |
4885 | } else if (op == 6) { |
4886 | /* default push size is 64 bit */ |
4887 | ot = mo_pushpop(s, dflag); |
4888 | } |
4889 | } |
4890 | if (mod != 3) { |
4891 | gen_lea_modrm(env, s, modrm); |
4892 | if (op >= 2 && op != 3 && op != 5) |
4893 | gen_op_ld_v(s, ot, cpu_T[0], cpu_A0); |
4894 | } else { |
4895 | gen_op_mov_v_reg(ot, cpu_T[0], rm); |
4896 | } |
4897 | |
4898 | switch(op) { |
4899 | case 0: /* inc Ev */ |
4900 | if (mod != 3) |
4901 | opreg = OR_TMP0; |
4902 | else |
4903 | opreg = rm; |
4904 | gen_inc(s, ot, opreg, 1); |
4905 | break; |
4906 | case 1: /* dec Ev */ |
4907 | if (mod != 3) |
4908 | opreg = OR_TMP0; |
4909 | else |
4910 | opreg = rm; |
4911 | gen_inc(s, ot, opreg, -1); |
4912 | break; |
4913 | case 2: /* call Ev */ |
4914 | /* XXX: optimize if memory (no 'and' is necessary) */ |
4915 | if (dflag == MO_16) { |
4916 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_T[0], cpu_T[0]); |
4917 | } |
4918 | next_eip = s->pc - s->cs_base; |
4919 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[1], next_eip); |
4920 | gen_push_v(s, cpu_T[1]); |
4921 | gen_op_jmp_v(cpu_T[0]); |
4922 | gen_eob(s); |
4923 | break; |
4924 | case 3: /* lcall Ev */ |
4925 | gen_op_ld_v(s, ot, cpu_T[1], cpu_A0); |
4926 | gen_add_A0_im(s, 1 << ot); |
4927 | gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0); |
4928 | do_lcall: |
4929 | if (s->pe && !s->vm86) { |
4930 | gen_update_cc_op(s); |
4931 | gen_jmp_im(pc_start - s->cs_base); |
4932 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
4933 | gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1], |
4934 | tcg_const_i32(dflag - 1), |
4935 | tcg_const_i32(s->pc - pc_start)); |
4936 | } else { |
4937 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
4938 | gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1], |
4939 | tcg_const_i32(dflag - 1), |
4940 | tcg_const_i32(s->pc - s->cs_base)); |
4941 | } |
4942 | gen_eob(s); |
4943 | break; |
4944 | case 4: /* jmp Ev */ |
4945 | if (dflag == MO_16) { |
4946 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_T[0], cpu_T[0]); |
4947 | } |
4948 | gen_op_jmp_v(cpu_T[0]); |
4949 | gen_eob(s); |
4950 | break; |
4951 | case 5: /* ljmp Ev */ |
4952 | gen_op_ld_v(s, ot, cpu_T[1], cpu_A0); |
4953 | gen_add_A0_im(s, 1 << ot); |
4954 | gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0); |
4955 | do_ljmp: |
4956 | if (s->pe && !s->vm86) { |
4957 | gen_update_cc_op(s); |
4958 | gen_jmp_im(pc_start - s->cs_base); |
4959 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
4960 | gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1], |
4961 | tcg_const_i32(s->pc - pc_start)); |
4962 | } else { |
4963 | gen_op_movl_seg_T0_vm(R_CS1); |
4964 | gen_op_jmp_v(cpu_T[1]); |
4965 | } |
4966 | gen_eob(s); |
4967 | break; |
4968 | case 6: /* push Ev */ |
4969 | gen_push_v(s, cpu_T[0]); |
4970 | break; |
4971 | default: |
4972 | goto illegal_op; |
4973 | } |
4974 | break; |
4975 | |
4976 | case 0x84: /* test Ev, Gv */ |
4977 | case 0x85: |
4978 | ot = mo_b_d(b, dflag); |
4979 | |
4980 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
4981 | reg = ((modrm >> 3) & 7) | rex_r; |
4982 | |
4983 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
4984 | gen_op_mov_v_reg(ot, cpu_T[1], reg); |
4985 | gen_op_testl_T0_T1_cc(); |
4986 | set_cc_op(s, CC_OP_LOGICB + ot); |
4987 | break; |
4988 | |
4989 | case 0xa8: /* test eAX, Iv */ |
4990 | case 0xa9: |
4991 | ot = mo_b_d(b, dflag); |
4992 | val = insn_get(env, s, ot); |
4993 | |
4994 | gen_op_mov_v_reg(ot, cpu_T[0], OR_EAX); |
4995 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[1], val); |
4996 | gen_op_testl_T0_T1_cc(); |
4997 | set_cc_op(s, CC_OP_LOGICB + ot); |
4998 | break; |
4999 | |
5000 | case 0x98: /* CWDE/CBW */ |
5001 | switch (dflag) { |
5002 | #ifdef TARGET_X86_64 |
5003 | case MO_64: |
5004 | gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX0); |
5005 | tcg_gen_ext32s_tltcg_gen_mov_i32(cpu_T[0], cpu_T[0]); |
5006 | gen_op_mov_reg_v(MO_64, R_EAX0, cpu_T[0]); |
5007 | break; |
5008 | #endif |
5009 | case MO_32: |
5010 | gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX0); |
5011 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]); |
5012 | gen_op_mov_reg_v(MO_32, R_EAX0, cpu_T[0]); |
5013 | break; |
5014 | case MO_16: |
5015 | gen_op_mov_v_reg(MO_8, cpu_T[0], R_EAX0); |
5016 | tcg_gen_ext8s_tltcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]); |
5017 | gen_op_mov_reg_v(MO_16, R_EAX0, cpu_T[0]); |
5018 | break; |
5019 | default: |
5020 | tcg_abort()do { fprintf(stderr, "%s:%d: tcg fatal error\n", "/home/stefan/src/qemu/qemu.org/qemu/target-i386/translate.c" , 5020); abort();} while (0); |
5021 | } |
5022 | break; |
5023 | case 0x99: /* CDQ/CWD */ |
5024 | switch (dflag) { |
5025 | #ifdef TARGET_X86_64 |
5026 | case MO_64: |
5027 | gen_op_mov_v_reg(MO_64, cpu_T[0], R_EAX0); |
5028 | tcg_gen_sari_tltcg_gen_sari_i32(cpu_T[0], cpu_T[0], 63); |
5029 | gen_op_mov_reg_v(MO_64, R_EDX2, cpu_T[0]); |
5030 | break; |
5031 | #endif |
5032 | case MO_32: |
5033 | gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX0); |
5034 | tcg_gen_ext32s_tltcg_gen_mov_i32(cpu_T[0], cpu_T[0]); |
5035 | tcg_gen_sari_tltcg_gen_sari_i32(cpu_T[0], cpu_T[0], 31); |
5036 | gen_op_mov_reg_v(MO_32, R_EDX2, cpu_T[0]); |
5037 | break; |
5038 | case MO_16: |
5039 | gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX0); |
5040 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]); |
5041 | tcg_gen_sari_tltcg_gen_sari_i32(cpu_T[0], cpu_T[0], 15); |
5042 | gen_op_mov_reg_v(MO_16, R_EDX2, cpu_T[0]); |
5043 | break; |
5044 | default: |
5045 | tcg_abort()do { fprintf(stderr, "%s:%d: tcg fatal error\n", "/home/stefan/src/qemu/qemu.org/qemu/target-i386/translate.c" , 5045); abort();} while (0); |
5046 | } |
5047 | break; |
5048 | case 0x1af: /* imul Gv, Ev */ |
5049 | case 0x69: /* imul Gv, Ev, I */ |
5050 | case 0x6b: |
5051 | ot = dflag; |
5052 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5053 | reg = ((modrm >> 3) & 7) | rex_r; |
5054 | if (b == 0x69) |
5055 | s->rip_offset = insn_const_size(ot); |
5056 | else if (b == 0x6b) |
5057 | s->rip_offset = 1; |
5058 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
5059 | if (b == 0x69) { |
5060 | val = insn_get(env, s, ot); |
5061 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[1], val); |
5062 | } else if (b == 0x6b) { |
5063 | val = (int8_t)insn_get(env, s, MO_8); |
5064 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[1], val); |
5065 | } else { |
5066 | gen_op_mov_v_reg(ot, cpu_T[1], reg); |
5067 | } |
5068 | switch (ot) { |
5069 | #ifdef TARGET_X86_64 |
5070 | case MO_64: |
5071 | tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]); |
5072 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_regs[reg]); |
5073 | tcg_gen_sari_tltcg_gen_sari_i32(cpu_cc_src, cpu_cc_dst, 63); |
5074 | tcg_gen_sub_tltcg_gen_sub_i32(cpu_cc_src, cpu_cc_src, cpu_T[1]); |
5075 | break; |
5076 | #endif |
5077 | case MO_32: |
5078 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
5079 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp3_i32, cpu_T[1]); |
5080 | tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32, |
5081 | cpu_tmp2_i32, cpu_tmp3_i32); |
5082 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_regs[reg], cpu_tmp2_i32); |
5083 | tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31); |
5084 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_regs[reg]); |
5085 | tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); |
5086 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_cc_src, cpu_tmp2_i32); |
5087 | break; |
5088 | default: |
5089 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]); |
5090 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(cpu_T[1], cpu_T[1]); |
5091 | /* XXX: use 32 bit mul which could be faster */ |
5092 | tcg_gen_mul_tltcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
5093 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_T[0]); |
5094 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(cpu_tmp0, cpu_T[0]); |
5095 | tcg_gen_sub_tltcg_gen_sub_i32(cpu_cc_src, cpu_T[0], cpu_tmp0); |
5096 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
5097 | break; |
5098 | } |
5099 | set_cc_op(s, CC_OP_MULB + ot); |
5100 | break; |
5101 | case 0x1c0: |
5102 | case 0x1c1: /* xadd Ev, Gv */ |
5103 | ot = mo_b_d(b, dflag); |
5104 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5105 | reg = ((modrm >> 3) & 7) | rex_r; |
5106 | mod = (modrm >> 6) & 3; |
5107 | if (mod == 3) { |
5108 | rm = (modrm & 7) | REX_B(s)0; |
5109 | gen_op_mov_v_reg(ot, cpu_T[0], reg); |
5110 | gen_op_mov_v_reg(ot, cpu_T[1], rm); |
5111 | tcg_gen_add_tltcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
5112 | gen_op_mov_reg_v(ot, reg, cpu_T[1]); |
5113 | gen_op_mov_reg_v(ot, rm, cpu_T[0]); |
5114 | } else { |
5115 | gen_lea_modrm(env, s, modrm); |
5116 | gen_op_mov_v_reg(ot, cpu_T[0], reg); |
5117 | gen_op_ld_v(s, ot, cpu_T[1], cpu_A0); |
5118 | tcg_gen_add_tltcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1]); |
5119 | gen_op_st_v(s, ot, cpu_T[0], cpu_A0); |
5120 | gen_op_mov_reg_v(ot, reg, cpu_T[1]); |
5121 | } |
5122 | gen_op_update2_cc(); |
5123 | set_cc_op(s, CC_OP_ADDB + ot); |
5124 | break; |
5125 | case 0x1b0: |
5126 | case 0x1b1: /* cmpxchg Ev, Gv */ |
5127 | { |
5128 | int label1, label2; |
5129 | TCGvTCGv_i32 t0, t1, t2, a0; |
5130 | |
5131 | ot = mo_b_d(b, dflag); |
5132 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5133 | reg = ((modrm >> 3) & 7) | rex_r; |
5134 | mod = (modrm >> 6) & 3; |
5135 | t0 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
5136 | t1 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
5137 | t2 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
5138 | a0 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
5139 | gen_op_mov_v_reg(ot, t1, reg); |
5140 | if (mod == 3) { |
5141 | rm = (modrm & 7) | REX_B(s)0; |
5142 | gen_op_mov_v_reg(ot, t0, rm); |
5143 | } else { |
5144 | gen_lea_modrm(env, s, modrm); |
5145 | tcg_gen_mov_tltcg_gen_mov_i32(a0, cpu_A0); |
5146 | gen_op_ld_v(s, ot, t0, a0); |
5147 | rm = 0; /* avoid warning */ |
5148 | } |
5149 | label1 = gen_new_label(); |
5150 | tcg_gen_mov_tltcg_gen_mov_i32(t2, cpu_regs[R_EAX0]); |
5151 | gen_extu(ot, t0); |
5152 | gen_extu(ot, t2); |
5153 | tcg_gen_brcond_tltcg_gen_brcond_i32(TCG_COND_EQ, t2, t0, label1); |
5154 | label2 = gen_new_label(); |
5155 | if (mod == 3) { |
5156 | gen_op_mov_reg_v(ot, R_EAX0, t0); |
5157 | tcg_gen_br(label2); |
5158 | gen_set_label(label1); |
5159 | gen_op_mov_reg_v(ot, rm, t1); |
5160 | } else { |
5161 | /* perform no-op store cycle like physical cpu; must be |
5162 | before changing accumulator to ensure idempotency if |
5163 | the store faults and the instruction is restarted */ |
5164 | gen_op_st_v(s, ot, t0, a0); |
5165 | gen_op_mov_reg_v(ot, R_EAX0, t0); |
5166 | tcg_gen_br(label2); |
5167 | gen_set_label(label1); |
5168 | gen_op_st_v(s, ot, t1, a0); |
5169 | } |
5170 | gen_set_label(label2); |
5171 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_src, t0); |
5172 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_srcT, t2); |
5173 | tcg_gen_sub_tltcg_gen_sub_i32(cpu_cc_dst, t2, t0); |
5174 | set_cc_op(s, CC_OP_SUBB + ot); |
5175 | tcg_temp_freetcg_temp_free_i32(t0); |
5176 | tcg_temp_freetcg_temp_free_i32(t1); |
5177 | tcg_temp_freetcg_temp_free_i32(t2); |
5178 | tcg_temp_freetcg_temp_free_i32(a0); |
5179 | } |
5180 | break; |
5181 | case 0x1c7: /* cmpxchg8b */ |
5182 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5183 | mod = (modrm >> 6) & 3; |
5184 | if ((mod == 3) || ((modrm & 0x38) != 0x8)) |
5185 | goto illegal_op; |
5186 | #ifdef TARGET_X86_64 |
5187 | if (dflag == MO_64) { |
5188 | if (!(s->cpuid_ext_features & CPUID_EXT_CX16(1 << 13))) |
5189 | goto illegal_op; |
5190 | gen_jmp_im(pc_start - s->cs_base); |
5191 | gen_update_cc_op(s); |
5192 | gen_lea_modrm(env, s, modrm); |
5193 | gen_helper_cmpxchg16b(cpu_env, cpu_A0); |
5194 | } else |
5195 | #endif |
5196 | { |
5197 | if (!(s->cpuid_features & CPUID_CX8(1 << 8))) |
5198 | goto illegal_op; |
5199 | gen_jmp_im(pc_start - s->cs_base); |
5200 | gen_update_cc_op(s); |
5201 | gen_lea_modrm(env, s, modrm); |
5202 | gen_helper_cmpxchg8b(cpu_env, cpu_A0); |
5203 | } |
5204 | set_cc_op(s, CC_OP_EFLAGS); |
5205 | break; |
5206 | |
5207 | /**************************/ |
5208 | /* push/pop */ |
5209 | case 0x50 ... 0x57: /* push */ |
5210 | gen_op_mov_v_reg(MO_32, cpu_T[0], (b & 7) | REX_B(s)0); |
5211 | gen_push_v(s, cpu_T[0]); |
5212 | break; |
5213 | case 0x58 ... 0x5f: /* pop */ |
5214 | ot = gen_pop_T0(s); |
5215 | /* NOTE: order is important for pop %sp */ |
5216 | gen_pop_update(s, ot); |
5217 | gen_op_mov_reg_v(ot, (b & 7) | REX_B(s)0, cpu_T[0]); |
5218 | break; |
5219 | case 0x60: /* pusha */ |
5220 | if (CODE64(s)0) |
5221 | goto illegal_op; |
5222 | gen_pusha(s); |
5223 | break; |
5224 | case 0x61: /* popa */ |
5225 | if (CODE64(s)0) |
5226 | goto illegal_op; |
5227 | gen_popa(s); |
5228 | break; |
5229 | case 0x68: /* push Iv */ |
5230 | case 0x6a: |
5231 | ot = mo_pushpop(s, dflag); |
5232 | if (b == 0x68) |
5233 | val = insn_get(env, s, ot); |
5234 | else |
5235 | val = (int8_t)insn_get(env, s, MO_8); |
5236 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[0], val); |
5237 | gen_push_v(s, cpu_T[0]); |
5238 | break; |
5239 | case 0x8f: /* pop Ev */ |
5240 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5241 | mod = (modrm >> 6) & 3; |
5242 | ot = gen_pop_T0(s); |
5243 | if (mod == 3) { |
5244 | /* NOTE: order is important for pop %sp */ |
5245 | gen_pop_update(s, ot); |
5246 | rm = (modrm & 7) | REX_B(s)0; |
5247 | gen_op_mov_reg_v(ot, rm, cpu_T[0]); |
5248 | } else { |
5249 | /* NOTE: order is important too for MMU exceptions */ |
5250 | s->popl_esp_hack = 1 << ot; |
5251 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); |
5252 | s->popl_esp_hack = 0; |
5253 | gen_pop_update(s, ot); |
5254 | } |
5255 | break; |
5256 | case 0xc8: /* enter */ |
5257 | { |
5258 | int level; |
5259 | val = cpu_lduw_code(env, s->pc)lduw_le_p(((void *)((unsigned long)(target_ulong)((s->pc)) + guest_base))); |
5260 | s->pc += 2; |
5261 | level = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5262 | gen_enter(s, val, level); |
5263 | } |
5264 | break; |
5265 | case 0xc9: /* leave */ |
5266 | /* XXX: exception not precise (ESP is updated before potential exception) */ |
5267 | if (CODE64(s)0) { |
5268 | gen_op_mov_v_reg(MO_64, cpu_T[0], R_EBP5); |
5269 | gen_op_mov_reg_v(MO_64, R_ESP4, cpu_T[0]); |
5270 | } else if (s->ss32) { |
5271 | gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP5); |
5272 | gen_op_mov_reg_v(MO_32, R_ESP4, cpu_T[0]); |
5273 | } else { |
5274 | gen_op_mov_v_reg(MO_16, cpu_T[0], R_EBP5); |
5275 | gen_op_mov_reg_v(MO_16, R_ESP4, cpu_T[0]); |
5276 | } |
5277 | ot = gen_pop_T0(s); |
5278 | gen_op_mov_reg_v(ot, R_EBP5, cpu_T[0]); |
5279 | gen_pop_update(s, ot); |
5280 | break; |
5281 | case 0x06: /* push es */ |
5282 | case 0x0e: /* push cs */ |
5283 | case 0x16: /* push ss */ |
5284 | case 0x1e: /* push ds */ |
5285 | if (CODE64(s)0) |
5286 | goto illegal_op; |
5287 | gen_op_movl_T0_seg(b >> 3); |
5288 | gen_push_v(s, cpu_T[0]); |
5289 | break; |
5290 | case 0x1a0: /* push fs */ |
5291 | case 0x1a8: /* push gs */ |
5292 | gen_op_movl_T0_seg((b >> 3) & 7); |
5293 | gen_push_v(s, cpu_T[0]); |
5294 | break; |
5295 | case 0x07: /* pop es */ |
5296 | case 0x17: /* pop ss */ |
5297 | case 0x1f: /* pop ds */ |
5298 | if (CODE64(s)0) |
5299 | goto illegal_op; |
5300 | reg = b >> 3; |
5301 | ot = gen_pop_T0(s); |
5302 | gen_movl_seg_T0(s, reg, pc_start - s->cs_base); |
5303 | gen_pop_update(s, ot); |
5304 | if (reg == R_SS2) { |
5305 | /* if reg == SS, inhibit interrupts/trace. */ |
5306 | /* If several instructions disable interrupts, only the |
5307 | _first_ does it */ |
5308 | if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK(1 << 3))) |
5309 | gen_helper_set_inhibit_irq(cpu_env); |
5310 | s->tf = 0; |
5311 | } |
5312 | if (s->is_jmp) { |
5313 | gen_jmp_im(s->pc - s->cs_base); |
5314 | gen_eob(s); |
5315 | } |
5316 | break; |
5317 | case 0x1a1: /* pop fs */ |
5318 | case 0x1a9: /* pop gs */ |
5319 | ot = gen_pop_T0(s); |
5320 | gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base); |
5321 | gen_pop_update(s, ot); |
5322 | if (s->is_jmp) { |
5323 | gen_jmp_im(s->pc - s->cs_base); |
5324 | gen_eob(s); |
5325 | } |
5326 | break; |
5327 | |
5328 | /**************************/ |
5329 | /* mov */ |
5330 | case 0x88: |
5331 | case 0x89: /* mov Gv, Ev */ |
5332 | ot = mo_b_d(b, dflag); |
5333 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5334 | reg = ((modrm >> 3) & 7) | rex_r; |
5335 | |
5336 | /* generate a generic store */ |
5337 | gen_ldst_modrm(env, s, modrm, ot, reg, 1); |
5338 | break; |
5339 | case 0xc6: |
5340 | case 0xc7: /* mov Ev, Iv */ |
5341 | ot = mo_b_d(b, dflag); |
5342 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5343 | mod = (modrm >> 6) & 3; |
5344 | if (mod != 3) { |
5345 | s->rip_offset = insn_const_size(ot); |
5346 | gen_lea_modrm(env, s, modrm); |
5347 | } |
5348 | val = insn_get(env, s, ot); |
5349 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[0], val); |
5350 | if (mod != 3) { |
5351 | gen_op_st_v(s, ot, cpu_T[0], cpu_A0); |
5352 | } else { |
5353 | gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s)0, cpu_T[0]); |
5354 | } |
5355 | break; |
5356 | case 0x8a: |
5357 | case 0x8b: /* mov Ev, Gv */ |
5358 | ot = mo_b_d(b, dflag); |
5359 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5360 | reg = ((modrm >> 3) & 7) | rex_r; |
5361 | |
5362 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
5363 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
5364 | break; |
5365 | case 0x8e: /* mov seg, Gv */ |
5366 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5367 | reg = (modrm >> 3) & 7; |
5368 | if (reg >= 6 || reg == R_CS1) |
5369 | goto illegal_op; |
5370 | gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); |
5371 | gen_movl_seg_T0(s, reg, pc_start - s->cs_base); |
5372 | if (reg == R_SS2) { |
5373 | /* if reg == SS, inhibit interrupts/trace */ |
5374 | /* If several instructions disable interrupts, only the |
5375 | _first_ does it */ |
5376 | if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK(1 << 3))) |
5377 | gen_helper_set_inhibit_irq(cpu_env); |
5378 | s->tf = 0; |
5379 | } |
5380 | if (s->is_jmp) { |
5381 | gen_jmp_im(s->pc - s->cs_base); |
5382 | gen_eob(s); |
5383 | } |
5384 | break; |
5385 | case 0x8c: /* mov Gv, seg */ |
5386 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5387 | reg = (modrm >> 3) & 7; |
5388 | mod = (modrm >> 6) & 3; |
5389 | if (reg >= 6) |
5390 | goto illegal_op; |
5391 | gen_op_movl_T0_seg(reg); |
5392 | ot = mod == 3 ? dflag : MO_16; |
5393 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); |
5394 | break; |
5395 | |
5396 | case 0x1b6: /* movzbS Gv, Eb */ |
5397 | case 0x1b7: /* movzwS Gv, Eb */ |
5398 | case 0x1be: /* movsbS Gv, Eb */ |
5399 | case 0x1bf: /* movswS Gv, Eb */ |
5400 | { |
5401 | TCGMemOp d_ot; |
5402 | TCGMemOp s_ot; |
5403 | |
5404 | /* d_ot is the size of destination */ |
5405 | d_ot = dflag; |
5406 | /* ot is the size of source */ |
5407 | ot = (b & 1) + MO_8; |
5408 | /* s_ot is the sign+size of source */ |
5409 | s_ot = b & 8 ? MO_SIGN | ot : ot; |
5410 | |
5411 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5412 | reg = ((modrm >> 3) & 7) | rex_r; |
5413 | mod = (modrm >> 6) & 3; |
5414 | rm = (modrm & 7) | REX_B(s)0; |
5415 | |
5416 | if (mod == 3) { |
5417 | gen_op_mov_v_reg(ot, cpu_T[0], rm); |
5418 | switch (s_ot) { |
5419 | case MO_UB: |
5420 | tcg_gen_ext8u_tltcg_gen_ext8u_i32(cpu_T[0], cpu_T[0]); |
5421 | break; |
5422 | case MO_SB: |
5423 | tcg_gen_ext8s_tltcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]); |
5424 | break; |
5425 | case MO_UW: |
5426 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_T[0], cpu_T[0]); |
5427 | break; |
5428 | default: |
5429 | case MO_SW: |
5430 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]); |
5431 | break; |
5432 | } |
5433 | gen_op_mov_reg_v(d_ot, reg, cpu_T[0]); |
5434 | } else { |
5435 | gen_lea_modrm(env, s, modrm); |
5436 | gen_op_ld_v(s, s_ot, cpu_T[0], cpu_A0); |
5437 | gen_op_mov_reg_v(d_ot, reg, cpu_T[0]); |
5438 | } |
5439 | } |
5440 | break; |
5441 | |
5442 | case 0x8d: /* lea */ |
5443 | ot = dflag; |
5444 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5445 | mod = (modrm >> 6) & 3; |
5446 | if (mod == 3) |
5447 | goto illegal_op; |
5448 | reg = ((modrm >> 3) & 7) | rex_r; |
5449 | /* we must ensure that no segment is added */ |
5450 | s->override = -1; |
5451 | val = s->addseg; |
5452 | s->addseg = 0; |
5453 | gen_lea_modrm(env, s, modrm); |
5454 | s->addseg = val; |
5455 | gen_op_mov_reg_v(ot, reg, cpu_A0); |
5456 | break; |
5457 | |
5458 | case 0xa0: /* mov EAX, Ov */ |
5459 | case 0xa1: |
5460 | case 0xa2: /* mov Ov, EAX */ |
5461 | case 0xa3: |
5462 | { |
5463 | target_ulong offset_addr; |
5464 | |
5465 | ot = mo_b_d(b, dflag); |
5466 | switch (s->aflag) { |
5467 | #ifdef TARGET_X86_64 |
5468 | case MO_64: |
5469 | offset_addr = cpu_ldq_code(env, s->pc)ldq_le_p(((void *)((unsigned long)(target_ulong)((s->pc)) + guest_base))); |
5470 | s->pc += 8; |
5471 | break; |
5472 | #endif |
5473 | default: |
5474 | offset_addr = insn_get(env, s, s->aflag); |
5475 | break; |
5476 | } |
5477 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_A0, offset_addr); |
5478 | gen_add_A0_ds_seg(s); |
5479 | if ((b & 2) == 0) { |
5480 | gen_op_ld_v(s, ot, cpu_T[0], cpu_A0); |
5481 | gen_op_mov_reg_v(ot, R_EAX0, cpu_T[0]); |
5482 | } else { |
5483 | gen_op_mov_v_reg(ot, cpu_T[0], R_EAX0); |
5484 | gen_op_st_v(s, ot, cpu_T[0], cpu_A0); |
5485 | } |
5486 | } |
5487 | break; |
5488 | case 0xd7: /* xlat */ |
5489 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_A0, cpu_regs[R_EBX3]); |
5490 | tcg_gen_ext8u_tltcg_gen_ext8u_i32(cpu_T[0], cpu_regs[R_EAX0]); |
5491 | tcg_gen_add_tltcg_gen_add_i32(cpu_A0, cpu_A0, cpu_T[0]); |
5492 | gen_extu(s->aflag, cpu_A0); |
5493 | gen_add_A0_ds_seg(s); |
5494 | gen_op_ld_v(s, MO_8, cpu_T[0], cpu_A0); |
5495 | gen_op_mov_reg_v(MO_8, R_EAX0, cpu_T[0]); |
5496 | break; |
5497 | case 0xb0 ... 0xb7: /* mov R, Ib */ |
5498 | val = insn_get(env, s, MO_8); |
5499 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[0], val); |
5500 | gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s)0, cpu_T[0]); |
5501 | break; |
5502 | case 0xb8 ... 0xbf: /* mov R, Iv */ |
5503 | #ifdef TARGET_X86_64 |
5504 | if (dflag == MO_64) { |
5505 | uint64_t tmp; |
5506 | /* 64 bit case */ |
5507 | tmp = cpu_ldq_code(env, s->pc)ldq_le_p(((void *)((unsigned long)(target_ulong)((s->pc)) + guest_base))); |
5508 | s->pc += 8; |
5509 | reg = (b & 7) | REX_B(s)0; |
5510 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[0], tmp); |
5511 | gen_op_mov_reg_v(MO_64, reg, cpu_T[0]); |
5512 | } else |
5513 | #endif |
5514 | { |
5515 | ot = dflag; |
5516 | val = insn_get(env, s, ot); |
5517 | reg = (b & 7) | REX_B(s)0; |
5518 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[0], val); |
5519 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
5520 | } |
5521 | break; |
5522 | |
5523 | case 0x91 ... 0x97: /* xchg R, EAX */ |
5524 | do_xchg_reg_eax: |
5525 | ot = dflag; |
5526 | reg = (b & 7) | REX_B(s)0; |
5527 | rm = R_EAX0; |
5528 | goto do_xchg_reg; |
5529 | case 0x86: |
5530 | case 0x87: /* xchg Ev, Gv */ |
5531 | ot = mo_b_d(b, dflag); |
5532 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5533 | reg = ((modrm >> 3) & 7) | rex_r; |
5534 | mod = (modrm >> 6) & 3; |
5535 | if (mod == 3) { |
5536 | rm = (modrm & 7) | REX_B(s)0; |
5537 | do_xchg_reg: |
5538 | gen_op_mov_v_reg(ot, cpu_T[0], reg); |
5539 | gen_op_mov_v_reg(ot, cpu_T[1], rm); |
5540 | gen_op_mov_reg_v(ot, rm, cpu_T[0]); |
5541 | gen_op_mov_reg_v(ot, reg, cpu_T[1]); |
5542 | } else { |
5543 | gen_lea_modrm(env, s, modrm); |
5544 | gen_op_mov_v_reg(ot, cpu_T[0], reg); |
5545 | /* for xchg, lock is implicit */ |
5546 | if (!(prefixes & PREFIX_LOCK0x04)) |
5547 | gen_helper_lock(); |
5548 | gen_op_ld_v(s, ot, cpu_T[1], cpu_A0); |
5549 | gen_op_st_v(s, ot, cpu_T[0], cpu_A0); |
5550 | if (!(prefixes & PREFIX_LOCK0x04)) |
5551 | gen_helper_unlock(); |
5552 | gen_op_mov_reg_v(ot, reg, cpu_T[1]); |
5553 | } |
5554 | break; |
5555 | case 0xc4: /* les Gv */ |
5556 | /* In CODE64 this is VEX3; see above. */ |
5557 | op = R_ES0; |
5558 | goto do_lxx; |
5559 | case 0xc5: /* lds Gv */ |
5560 | /* In CODE64 this is VEX2; see above. */ |
5561 | op = R_DS3; |
5562 | goto do_lxx; |
5563 | case 0x1b2: /* lss Gv */ |
5564 | op = R_SS2; |
5565 | goto do_lxx; |
5566 | case 0x1b4: /* lfs Gv */ |
5567 | op = R_FS4; |
5568 | goto do_lxx; |
5569 | case 0x1b5: /* lgs Gv */ |
5570 | op = R_GS5; |
5571 | do_lxx: |
5572 | ot = dflag != MO_16 ? MO_32 : MO_16; |
5573 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5574 | reg = ((modrm >> 3) & 7) | rex_r; |
5575 | mod = (modrm >> 6) & 3; |
5576 | if (mod == 3) |
5577 | goto illegal_op; |
5578 | gen_lea_modrm(env, s, modrm); |
5579 | gen_op_ld_v(s, ot, cpu_T[1], cpu_A0); |
5580 | gen_add_A0_im(s, 1 << ot); |
5581 | /* load the segment first to handle exceptions properly */ |
5582 | gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0); |
5583 | gen_movl_seg_T0(s, op, pc_start - s->cs_base); |
5584 | /* then put the data */ |
5585 | gen_op_mov_reg_v(ot, reg, cpu_T[1]); |
5586 | if (s->is_jmp) { |
5587 | gen_jmp_im(s->pc - s->cs_base); |
5588 | gen_eob(s); |
5589 | } |
5590 | break; |
5591 | |
5592 | /************************/ |
5593 | /* shifts */ |
5594 | case 0xc0: |
5595 | case 0xc1: |
5596 | /* shift Ev,Ib */ |
5597 | shift = 2; |
5598 | grp2: |
5599 | { |
5600 | ot = mo_b_d(b, dflag); |
5601 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5602 | mod = (modrm >> 6) & 3; |
5603 | op = (modrm >> 3) & 7; |
5604 | |
5605 | if (mod != 3) { |
5606 | if (shift == 2) { |
5607 | s->rip_offset = 1; |
5608 | } |
5609 | gen_lea_modrm(env, s, modrm); |
5610 | opreg = OR_TMP0; |
5611 | } else { |
5612 | opreg = (modrm & 7) | REX_B(s)0; |
5613 | } |
5614 | |
5615 | /* simpler op */ |
5616 | if (shift == 0) { |
5617 | gen_shift(s, op, ot, opreg, OR_ECX); |
5618 | } else { |
5619 | if (shift == 2) { |
5620 | shift = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5621 | } |
5622 | gen_shifti(s, op, ot, opreg, shift); |
5623 | } |
5624 | } |
5625 | break; |
5626 | case 0xd0: |
5627 | case 0xd1: |
5628 | /* shift Ev,1 */ |
5629 | shift = 1; |
5630 | goto grp2; |
5631 | case 0xd2: |
5632 | case 0xd3: |
5633 | /* shift Ev,cl */ |
5634 | shift = 0; |
5635 | goto grp2; |
5636 | |
5637 | case 0x1a4: /* shld imm */ |
5638 | op = 0; |
5639 | shift = 1; |
5640 | goto do_shiftd; |
5641 | case 0x1a5: /* shld cl */ |
5642 | op = 0; |
5643 | shift = 0; |
5644 | goto do_shiftd; |
5645 | case 0x1ac: /* shrd imm */ |
5646 | op = 1; |
5647 | shift = 1; |
5648 | goto do_shiftd; |
5649 | case 0x1ad: /* shrd cl */ |
5650 | op = 1; |
5651 | shift = 0; |
5652 | do_shiftd: |
5653 | ot = dflag; |
5654 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5655 | mod = (modrm >> 6) & 3; |
5656 | rm = (modrm & 7) | REX_B(s)0; |
5657 | reg = ((modrm >> 3) & 7) | rex_r; |
5658 | if (mod != 3) { |
5659 | gen_lea_modrm(env, s, modrm); |
5660 | opreg = OR_TMP0; |
5661 | } else { |
5662 | opreg = rm; |
5663 | } |
5664 | gen_op_mov_v_reg(ot, cpu_T[1], reg); |
5665 | |
5666 | if (shift) { |
5667 | TCGvTCGv_i32 imm = tcg_const_tltcg_const_i32(cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base)))); |
5668 | gen_shiftd_rm_T1(s, ot, opreg, op, imm); |
5669 | tcg_temp_freetcg_temp_free_i32(imm); |
5670 | } else { |
5671 | gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX1]); |
5672 | } |
5673 | break; |
5674 | |
5675 | /************************/ |
5676 | /* floats */ |
5677 | case 0xd8 ... 0xdf: |
5678 | if (s->flags & (HF_EM_MASK(1 << 10) | HF_TS_MASK(1 << 11))) { |
5679 | /* if CR0.EM or CR0.TS are set, generate an FPU exception */ |
5680 | /* XXX: what to do if illegal op ? */ |
5681 | gen_exception(s, EXCP07_PREX7, pc_start - s->cs_base); |
5682 | break; |
5683 | } |
5684 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
5685 | mod = (modrm >> 6) & 3; |
5686 | rm = modrm & 7; |
5687 | op = ((b & 7) << 3) | ((modrm >> 3) & 7); |
5688 | if (mod != 3) { |
5689 | /* memory op */ |
5690 | gen_lea_modrm(env, s, modrm); |
5691 | switch(op) { |
5692 | case 0x00 ... 0x07: /* fxxxs */ |
5693 | case 0x10 ... 0x17: /* fixxxl */ |
5694 | case 0x20 ... 0x27: /* fxxxl */ |
5695 | case 0x30 ... 0x37: /* fixxx */ |
5696 | { |
5697 | int op1; |
5698 | op1 = op & 7; |
5699 | |
5700 | switch(op >> 4) { |
5701 | case 0: |
5702 | tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, |
5703 | s->mem_index, MO_LEUL); |
5704 | gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32); |
5705 | break; |
5706 | case 1: |
5707 | tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, |
5708 | s->mem_index, MO_LEUL); |
5709 | gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32); |
5710 | break; |
5711 | case 2: |
5712 | tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, |
5713 | s->mem_index, MO_LEQ); |
5714 | gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64); |
5715 | break; |
5716 | case 3: |
5717 | default: |
5718 | tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, |
5719 | s->mem_index, MO_LESW); |
5720 | gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32); |
5721 | break; |
5722 | } |
5723 | |
5724 | gen_helper_fp_arith_ST0_FT0(op1); |
5725 | if (op1 == 3) { |
5726 | /* fcomp needs pop */ |
5727 | gen_helper_fpop(cpu_env); |
5728 | } |
5729 | } |
5730 | break; |
5731 | case 0x08: /* flds */ |
5732 | case 0x0a: /* fsts */ |
5733 | case 0x0b: /* fstps */ |
5734 | case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */ |
5735 | case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */ |
5736 | case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */ |
5737 | switch(op & 7) { |
5738 | case 0: |
5739 | switch(op >> 4) { |
5740 | case 0: |
5741 | tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, |
5742 | s->mem_index, MO_LEUL); |
5743 | gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32); |
5744 | break; |
5745 | case 1: |
5746 | tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, |
5747 | s->mem_index, MO_LEUL); |
5748 | gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32); |
5749 | break; |
5750 | case 2: |
5751 | tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, |
5752 | s->mem_index, MO_LEQ); |
5753 | gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64); |
5754 | break; |
5755 | case 3: |
5756 | default: |
5757 | tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, |
5758 | s->mem_index, MO_LESW); |
5759 | gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32); |
5760 | break; |
5761 | } |
5762 | break; |
5763 | case 1: |
5764 | /* XXX: the corresponding CPUID bit must be tested ! */ |
5765 | switch(op >> 4) { |
5766 | case 1: |
5767 | gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env); |
5768 | tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, |
5769 | s->mem_index, MO_LEUL); |
5770 | break; |
5771 | case 2: |
5772 | gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env); |
5773 | tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, |
5774 | s->mem_index, MO_LEQ); |
5775 | break; |
5776 | case 3: |
5777 | default: |
5778 | gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env); |
5779 | tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, |
5780 | s->mem_index, MO_LEUW); |
5781 | break; |
5782 | } |
5783 | gen_helper_fpop(cpu_env); |
5784 | break; |
5785 | default: |
5786 | switch(op >> 4) { |
5787 | case 0: |
5788 | gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env); |
5789 | tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, |
5790 | s->mem_index, MO_LEUL); |
5791 | break; |
5792 | case 1: |
5793 | gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env); |
5794 | tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, |
5795 | s->mem_index, MO_LEUL); |
5796 | break; |
5797 | case 2: |
5798 | gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env); |
5799 | tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, |
5800 | s->mem_index, MO_LEQ); |
5801 | break; |
5802 | case 3: |
5803 | default: |
5804 | gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env); |
5805 | tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, |
5806 | s->mem_index, MO_LEUW); |
5807 | break; |
5808 | } |
5809 | if ((op & 7) == 3) |
5810 | gen_helper_fpop(cpu_env); |
5811 | break; |
5812 | } |
5813 | break; |
5814 | case 0x0c: /* fldenv mem */ |
5815 | gen_update_cc_op(s); |
5816 | gen_jmp_im(pc_start - s->cs_base); |
5817 | gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); |
5818 | break; |
5819 | case 0x0d: /* fldcw mem */ |
5820 | tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, |
5821 | s->mem_index, MO_LEUW); |
5822 | gen_helper_fldcw(cpu_env, cpu_tmp2_i32); |
5823 | break; |
5824 | case 0x0e: /* fnstenv mem */ |
5825 | gen_update_cc_op(s); |
5826 | gen_jmp_im(pc_start - s->cs_base); |
5827 | gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); |
5828 | break; |
5829 | case 0x0f: /* fnstcw mem */ |
5830 | gen_helper_fnstcw(cpu_tmp2_i32, cpu_env); |
5831 | tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, |
5832 | s->mem_index, MO_LEUW); |
5833 | break; |
5834 | case 0x1d: /* fldt mem */ |
5835 | gen_update_cc_op(s); |
5836 | gen_jmp_im(pc_start - s->cs_base); |
5837 | gen_helper_fldt_ST0(cpu_env, cpu_A0); |
5838 | break; |
5839 | case 0x1f: /* fstpt mem */ |
5840 | gen_update_cc_op(s); |
5841 | gen_jmp_im(pc_start - s->cs_base); |
5842 | gen_helper_fstt_ST0(cpu_env, cpu_A0); |
5843 | gen_helper_fpop(cpu_env); |
5844 | break; |
5845 | case 0x2c: /* frstor mem */ |
5846 | gen_update_cc_op(s); |
5847 | gen_jmp_im(pc_start - s->cs_base); |
5848 | gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); |
5849 | break; |
5850 | case 0x2e: /* fnsave mem */ |
5851 | gen_update_cc_op(s); |
5852 | gen_jmp_im(pc_start - s->cs_base); |
5853 | gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); |
5854 | break; |
5855 | case 0x2f: /* fnstsw mem */ |
5856 | gen_helper_fnstsw(cpu_tmp2_i32, cpu_env); |
5857 | tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, |
5858 | s->mem_index, MO_LEUW); |
5859 | break; |
5860 | case 0x3c: /* fbld */ |
5861 | gen_update_cc_op(s); |
5862 | gen_jmp_im(pc_start - s->cs_base); |
5863 | gen_helper_fbld_ST0(cpu_env, cpu_A0); |
5864 | break; |
5865 | case 0x3e: /* fbstp */ |
5866 | gen_update_cc_op(s); |
5867 | gen_jmp_im(pc_start - s->cs_base); |
5868 | gen_helper_fbst_ST0(cpu_env, cpu_A0); |
5869 | gen_helper_fpop(cpu_env); |
5870 | break; |
5871 | case 0x3d: /* fildll */ |
5872 | tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); |
5873 | gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64); |
5874 | break; |
5875 | case 0x3f: /* fistpll */ |
5876 | gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env); |
5877 | tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); |
5878 | gen_helper_fpop(cpu_env); |
5879 | break; |
5880 | default: |
5881 | goto illegal_op; |
5882 | } |
5883 | } else { |
5884 | /* register float ops */ |
5885 | opreg = rm; |
5886 | |
5887 | switch(op) { |
5888 | case 0x08: /* fld sti */ |
5889 | gen_helper_fpush(cpu_env); |
5890 | gen_helper_fmov_ST0_STN(cpu_env, |
5891 | tcg_const_i32((opreg + 1) & 7)); |
5892 | break; |
5893 | case 0x09: /* fxchg sti */ |
5894 | case 0x29: /* fxchg4 sti, undocumented op */ |
5895 | case 0x39: /* fxchg7 sti, undocumented op */ |
5896 | gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg)); |
5897 | break; |
5898 | case 0x0a: /* grp d9/2 */ |
5899 | switch(rm) { |
5900 | case 0: /* fnop */ |
5901 | /* check exceptions (FreeBSD FPU probe) */ |
5902 | gen_update_cc_op(s); |
5903 | gen_jmp_im(pc_start - s->cs_base); |
5904 | gen_helper_fwait(cpu_env); |
5905 | break; |
5906 | default: |
5907 | goto illegal_op; |
5908 | } |
5909 | break; |
5910 | case 0x0c: /* grp d9/4 */ |
5911 | switch(rm) { |
5912 | case 0: /* fchs */ |
5913 | gen_helper_fchs_ST0(cpu_env); |
5914 | break; |
5915 | case 1: /* fabs */ |
5916 | gen_helper_fabs_ST0(cpu_env); |
5917 | break; |
5918 | case 4: /* ftst */ |
5919 | gen_helper_fldz_FT0(cpu_env); |
5920 | gen_helper_fcom_ST0_FT0(cpu_env); |
5921 | break; |
5922 | case 5: /* fxam */ |
5923 | gen_helper_fxam_ST0(cpu_env); |
5924 | break; |
5925 | default: |
5926 | goto illegal_op; |
5927 | } |
5928 | break; |
5929 | case 0x0d: /* grp d9/5 */ |
5930 | { |
5931 | switch(rm) { |
5932 | case 0: |
5933 | gen_helper_fpush(cpu_env); |
5934 | gen_helper_fld1_ST0(cpu_env); |
5935 | break; |
5936 | case 1: |
5937 | gen_helper_fpush(cpu_env); |
5938 | gen_helper_fldl2t_ST0(cpu_env); |
5939 | break; |
5940 | case 2: |
5941 | gen_helper_fpush(cpu_env); |
5942 | gen_helper_fldl2e_ST0(cpu_env); |
5943 | break; |
5944 | case 3: |
5945 | gen_helper_fpush(cpu_env); |
5946 | gen_helper_fldpi_ST0(cpu_env); |
5947 | break; |
5948 | case 4: |
5949 | gen_helper_fpush(cpu_env); |
5950 | gen_helper_fldlg2_ST0(cpu_env); |
5951 | break; |
5952 | case 5: |
5953 | gen_helper_fpush(cpu_env); |
5954 | gen_helper_fldln2_ST0(cpu_env); |
5955 | break; |
5956 | case 6: |
5957 | gen_helper_fpush(cpu_env); |
5958 | gen_helper_fldz_ST0(cpu_env); |
5959 | break; |
5960 | default: |
5961 | goto illegal_op; |
5962 | } |
5963 | } |
5964 | break; |
5965 | case 0x0e: /* grp d9/6 */ |
5966 | switch(rm) { |
5967 | case 0: /* f2xm1 */ |
5968 | gen_helper_f2xm1(cpu_env); |
5969 | break; |
5970 | case 1: /* fyl2x */ |
5971 | gen_helper_fyl2x(cpu_env); |
5972 | break; |
5973 | case 2: /* fptan */ |
5974 | gen_helper_fptan(cpu_env); |
5975 | break; |
5976 | case 3: /* fpatan */ |
5977 | gen_helper_fpatan(cpu_env); |
5978 | break; |
5979 | case 4: /* fxtract */ |
5980 | gen_helper_fxtract(cpu_env); |
5981 | break; |
5982 | case 5: /* fprem1 */ |
5983 | gen_helper_fprem1(cpu_env); |
5984 | break; |
5985 | case 6: /* fdecstp */ |
5986 | gen_helper_fdecstp(cpu_env); |
5987 | break; |
5988 | default: |
5989 | case 7: /* fincstp */ |
5990 | gen_helper_fincstp(cpu_env); |
5991 | break; |
5992 | } |
5993 | break; |
5994 | case 0x0f: /* grp d9/7 */ |
5995 | switch(rm) { |
5996 | case 0: /* fprem */ |
5997 | gen_helper_fprem(cpu_env); |
5998 | break; |
5999 | case 1: /* fyl2xp1 */ |
6000 | gen_helper_fyl2xp1(cpu_env); |
6001 | break; |
6002 | case 2: /* fsqrt */ |
6003 | gen_helper_fsqrt(cpu_env); |
6004 | break; |
6005 | case 3: /* fsincos */ |
6006 | gen_helper_fsincos(cpu_env); |
6007 | break; |
6008 | case 5: /* fscale */ |
6009 | gen_helper_fscale(cpu_env); |
6010 | break; |
6011 | case 4: /* frndint */ |
6012 | gen_helper_frndint(cpu_env); |
6013 | break; |
6014 | case 6: /* fsin */ |
6015 | gen_helper_fsin(cpu_env); |
6016 | break; |
6017 | default: |
6018 | case 7: /* fcos */ |
6019 | gen_helper_fcos(cpu_env); |
6020 | break; |
6021 | } |
6022 | break; |
6023 | case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */ |
6024 | case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */ |
6025 | case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */ |
6026 | { |
6027 | int op1; |
6028 | |
6029 | op1 = op & 7; |
6030 | if (op >= 0x20) { |
6031 | gen_helper_fp_arith_STN_ST0(op1, opreg); |
6032 | if (op >= 0x30) |
6033 | gen_helper_fpop(cpu_env); |
6034 | } else { |
6035 | gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6036 | gen_helper_fp_arith_ST0_FT0(op1); |
6037 | } |
6038 | } |
6039 | break; |
6040 | case 0x02: /* fcom */ |
6041 | case 0x22: /* fcom2, undocumented op */ |
6042 | gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6043 | gen_helper_fcom_ST0_FT0(cpu_env); |
6044 | break; |
6045 | case 0x03: /* fcomp */ |
6046 | case 0x23: /* fcomp3, undocumented op */ |
6047 | case 0x32: /* fcomp5, undocumented op */ |
6048 | gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6049 | gen_helper_fcom_ST0_FT0(cpu_env); |
6050 | gen_helper_fpop(cpu_env); |
6051 | break; |
6052 | case 0x15: /* da/5 */ |
6053 | switch(rm) { |
6054 | case 1: /* fucompp */ |
6055 | gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1)); |
6056 | gen_helper_fucom_ST0_FT0(cpu_env); |
6057 | gen_helper_fpop(cpu_env); |
6058 | gen_helper_fpop(cpu_env); |
6059 | break; |
6060 | default: |
6061 | goto illegal_op; |
6062 | } |
6063 | break; |
6064 | case 0x1c: |
6065 | switch(rm) { |
6066 | case 0: /* feni (287 only, just do nop here) */ |
6067 | break; |
6068 | case 1: /* fdisi (287 only, just do nop here) */ |
6069 | break; |
6070 | case 2: /* fclex */ |
6071 | gen_helper_fclex(cpu_env); |
6072 | break; |
6073 | case 3: /* fninit */ |
6074 | gen_helper_fninit(cpu_env); |
6075 | break; |
6076 | case 4: /* fsetpm (287 only, just do nop here) */ |
6077 | break; |
6078 | default: |
6079 | goto illegal_op; |
6080 | } |
6081 | break; |
6082 | case 0x1d: /* fucomi */ |
6083 | if (!(s->cpuid_features & CPUID_CMOV(1 << 15))) { |
6084 | goto illegal_op; |
6085 | } |
6086 | gen_update_cc_op(s); |
6087 | gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6088 | gen_helper_fucomi_ST0_FT0(cpu_env); |
6089 | set_cc_op(s, CC_OP_EFLAGS); |
6090 | break; |
6091 | case 0x1e: /* fcomi */ |
6092 | if (!(s->cpuid_features & CPUID_CMOV(1 << 15))) { |
6093 | goto illegal_op; |
6094 | } |
6095 | gen_update_cc_op(s); |
6096 | gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6097 | gen_helper_fcomi_ST0_FT0(cpu_env); |
6098 | set_cc_op(s, CC_OP_EFLAGS); |
6099 | break; |
6100 | case 0x28: /* ffree sti */ |
6101 | gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg)); |
6102 | break; |
6103 | case 0x2a: /* fst sti */ |
6104 | gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg)); |
6105 | break; |
6106 | case 0x2b: /* fstp sti */ |
6107 | case 0x0b: /* fstp1 sti, undocumented op */ |
6108 | case 0x3a: /* fstp8 sti, undocumented op */ |
6109 | case 0x3b: /* fstp9 sti, undocumented op */ |
6110 | gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg)); |
6111 | gen_helper_fpop(cpu_env); |
6112 | break; |
6113 | case 0x2c: /* fucom st(i) */ |
6114 | gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6115 | gen_helper_fucom_ST0_FT0(cpu_env); |
6116 | break; |
6117 | case 0x2d: /* fucomp st(i) */ |
6118 | gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6119 | gen_helper_fucom_ST0_FT0(cpu_env); |
6120 | gen_helper_fpop(cpu_env); |
6121 | break; |
6122 | case 0x33: /* de/3 */ |
6123 | switch(rm) { |
6124 | case 1: /* fcompp */ |
6125 | gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1)); |
6126 | gen_helper_fcom_ST0_FT0(cpu_env); |
6127 | gen_helper_fpop(cpu_env); |
6128 | gen_helper_fpop(cpu_env); |
6129 | break; |
6130 | default: |
6131 | goto illegal_op; |
6132 | } |
6133 | break; |
6134 | case 0x38: /* ffreep sti, undocumented op */ |
6135 | gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg)); |
6136 | gen_helper_fpop(cpu_env); |
6137 | break; |
6138 | case 0x3c: /* df/4 */ |
6139 | switch(rm) { |
6140 | case 0: |
6141 | gen_helper_fnstsw(cpu_tmp2_i32, cpu_env); |
6142 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_T[0], cpu_tmp2_i32); |
6143 | gen_op_mov_reg_v(MO_16, R_EAX0, cpu_T[0]); |
6144 | break; |
6145 | default: |
6146 | goto illegal_op; |
6147 | } |
6148 | break; |
6149 | case 0x3d: /* fucomip */ |
6150 | if (!(s->cpuid_features & CPUID_CMOV(1 << 15))) { |
6151 | goto illegal_op; |
6152 | } |
6153 | gen_update_cc_op(s); |
6154 | gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6155 | gen_helper_fucomi_ST0_FT0(cpu_env); |
6156 | gen_helper_fpop(cpu_env); |
6157 | set_cc_op(s, CC_OP_EFLAGS); |
6158 | break; |
6159 | case 0x3e: /* fcomip */ |
6160 | if (!(s->cpuid_features & CPUID_CMOV(1 << 15))) { |
6161 | goto illegal_op; |
6162 | } |
6163 | gen_update_cc_op(s); |
6164 | gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); |
6165 | gen_helper_fcomi_ST0_FT0(cpu_env); |
6166 | gen_helper_fpop(cpu_env); |
6167 | set_cc_op(s, CC_OP_EFLAGS); |
6168 | break; |
6169 | case 0x10 ... 0x13: /* fcmovxx */ |
6170 | case 0x18 ... 0x1b: |
6171 | { |
6172 | int op1, l1; |
6173 | static const uint8_t fcmov_cc[8] = { |
6174 | (JCC_B << 1), |
6175 | (JCC_Z << 1), |
6176 | (JCC_BE << 1), |
6177 | (JCC_P << 1), |
6178 | }; |
6179 | |
6180 | if (!(s->cpuid_features & CPUID_CMOV(1 << 15))) { |
6181 | goto illegal_op; |
6182 | } |
6183 | op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); |
6184 | l1 = gen_new_label(); |
6185 | gen_jcc1_noeob(s, op1, l1); |
6186 | gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg)); |
6187 | gen_set_label(l1); |
6188 | } |
6189 | break; |
6190 | default: |
6191 | goto illegal_op; |
6192 | } |
6193 | } |
6194 | break; |
6195 | /************************/ |
6196 | /* string ops */ |
6197 | |
6198 | case 0xa4: /* movsS */ |
6199 | case 0xa5: |
6200 | ot = mo_b_d(b, dflag); |
6201 | if (prefixes & (PREFIX_REPZ0x01 | PREFIX_REPNZ0x02)) { |
6202 | gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); |
6203 | } else { |
6204 | gen_movs(s, ot); |
6205 | } |
6206 | break; |
6207 | |
6208 | case 0xaa: /* stosS */ |
6209 | case 0xab: |
6210 | ot = mo_b_d(b, dflag); |
6211 | if (prefixes & (PREFIX_REPZ0x01 | PREFIX_REPNZ0x02)) { |
6212 | gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); |
6213 | } else { |
6214 | gen_stos(s, ot); |
6215 | } |
6216 | break; |
6217 | case 0xac: /* lodsS */ |
6218 | case 0xad: |
6219 | ot = mo_b_d(b, dflag); |
6220 | if (prefixes & (PREFIX_REPZ0x01 | PREFIX_REPNZ0x02)) { |
6221 | gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); |
6222 | } else { |
6223 | gen_lods(s, ot); |
6224 | } |
6225 | break; |
6226 | case 0xae: /* scasS */ |
6227 | case 0xaf: |
6228 | ot = mo_b_d(b, dflag); |
6229 | if (prefixes & PREFIX_REPNZ0x02) { |
6230 | gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); |
6231 | } else if (prefixes & PREFIX_REPZ0x01) { |
6232 | gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); |
6233 | } else { |
6234 | gen_scas(s, ot); |
6235 | } |
6236 | break; |
6237 | |
6238 | case 0xa6: /* cmpsS */ |
6239 | case 0xa7: |
6240 | ot = mo_b_d(b, dflag); |
6241 | if (prefixes & PREFIX_REPNZ0x02) { |
6242 | gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); |
6243 | } else if (prefixes & PREFIX_REPZ0x01) { |
6244 | gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); |
6245 | } else { |
6246 | gen_cmps(s, ot); |
6247 | } |
6248 | break; |
6249 | case 0x6c: /* insS */ |
6250 | case 0x6d: |
6251 | ot = mo_b_d32(b, dflag); |
6252 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_T[0], cpu_regs[R_EDX2]); |
6253 | gen_check_io(s, ot, pc_start - s->cs_base, |
6254 | SVM_IOIO_TYPE_MASK1 | svm_is_rep(prefixes) | 4); |
6255 | if (prefixes & (PREFIX_REPZ0x01 | PREFIX_REPNZ0x02)) { |
6256 | gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); |
6257 | } else { |
6258 | gen_ins(s, ot); |
6259 | if (use_icount) { |
6260 | gen_jmp(s, s->pc - s->cs_base); |
6261 | } |
6262 | } |
6263 | break; |
6264 | case 0x6e: /* outsS */ |
6265 | case 0x6f: |
6266 | ot = mo_b_d32(b, dflag); |
6267 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_T[0], cpu_regs[R_EDX2]); |
6268 | gen_check_io(s, ot, pc_start - s->cs_base, |
6269 | svm_is_rep(prefixes) | 4); |
6270 | if (prefixes & (PREFIX_REPZ0x01 | PREFIX_REPNZ0x02)) { |
6271 | gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); |
6272 | } else { |
6273 | gen_outs(s, ot); |
6274 | if (use_icount) { |
6275 | gen_jmp(s, s->pc - s->cs_base); |
6276 | } |
6277 | } |
6278 | break; |
6279 | |
6280 | /************************/ |
6281 | /* port I/O */ |
6282 | |
6283 | case 0xe4: |
6284 | case 0xe5: |
6285 | ot = mo_b_d32(b, dflag); |
6286 | val = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
6287 | gen_check_io(s, ot, pc_start - s->cs_base, |
6288 | SVM_IOIO_TYPE_MASK1 | svm_is_rep(prefixes)); |
6289 | if (use_icount) |
6290 | gen_io_start(); |
6291 | tcg_gen_movi_i32(cpu_tmp2_i32, val); |
6292 | gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32); |
6293 | gen_op_mov_reg_v(ot, R_EAX0, cpu_T[1]); |
6294 | if (use_icount) { |
6295 | gen_io_end(); |
6296 | gen_jmp(s, s->pc - s->cs_base); |
6297 | } |
6298 | break; |
6299 | case 0xe6: |
6300 | case 0xe7: |
6301 | ot = mo_b_d32(b, dflag); |
6302 | val = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
6303 | gen_check_io(s, ot, pc_start - s->cs_base, |
6304 | svm_is_rep(prefixes)); |
6305 | gen_op_mov_v_reg(ot, cpu_T[1], R_EAX0); |
6306 | |
6307 | if (use_icount) |
6308 | gen_io_start(); |
6309 | tcg_gen_movi_i32(cpu_tmp2_i32, val); |
6310 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp3_i32, cpu_T[1]); |
6311 | gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); |
6312 | if (use_icount) { |
6313 | gen_io_end(); |
6314 | gen_jmp(s, s->pc - s->cs_base); |
6315 | } |
6316 | break; |
6317 | case 0xec: |
6318 | case 0xed: |
6319 | ot = mo_b_d32(b, dflag); |
6320 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_T[0], cpu_regs[R_EDX2]); |
6321 | gen_check_io(s, ot, pc_start - s->cs_base, |
6322 | SVM_IOIO_TYPE_MASK1 | svm_is_rep(prefixes)); |
6323 | if (use_icount) |
6324 | gen_io_start(); |
6325 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
6326 | gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32); |
6327 | gen_op_mov_reg_v(ot, R_EAX0, cpu_T[1]); |
6328 | if (use_icount) { |
6329 | gen_io_end(); |
6330 | gen_jmp(s, s->pc - s->cs_base); |
6331 | } |
6332 | break; |
6333 | case 0xee: |
6334 | case 0xef: |
6335 | ot = mo_b_d32(b, dflag); |
6336 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(cpu_T[0], cpu_regs[R_EDX2]); |
6337 | gen_check_io(s, ot, pc_start - s->cs_base, |
6338 | svm_is_rep(prefixes)); |
6339 | gen_op_mov_v_reg(ot, cpu_T[1], R_EAX0); |
6340 | |
6341 | if (use_icount) |
6342 | gen_io_start(); |
6343 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
6344 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp3_i32, cpu_T[1]); |
6345 | gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); |
6346 | if (use_icount) { |
6347 | gen_io_end(); |
6348 | gen_jmp(s, s->pc - s->cs_base); |
6349 | } |
6350 | break; |
6351 | |
6352 | /************************/ |
6353 | /* control */ |
6354 | case 0xc2: /* ret im */ |
6355 | val = cpu_ldsw_code(env, s->pc)ldsw_le_p(((void *)((unsigned long)(target_ulong)((s->pc)) + guest_base))); |
6356 | s->pc += 2; |
6357 | ot = gen_pop_T0(s); |
6358 | gen_stack_update(s, val + (1 << ot)); |
6359 | /* Note that gen_pop_T0 uses a zero-extending load. */ |
6360 | gen_op_jmp_v(cpu_T[0]); |
6361 | gen_eob(s); |
6362 | break; |
6363 | case 0xc3: /* ret */ |
6364 | ot = gen_pop_T0(s); |
6365 | gen_pop_update(s, ot); |
6366 | /* Note that gen_pop_T0 uses a zero-extending load. */ |
6367 | gen_op_jmp_v(cpu_T[0]); |
6368 | gen_eob(s); |
6369 | break; |
6370 | case 0xca: /* lret im */ |
6371 | val = cpu_ldsw_code(env, s->pc)ldsw_le_p(((void *)((unsigned long)(target_ulong)((s->pc)) + guest_base))); |
6372 | s->pc += 2; |
6373 | do_lret: |
6374 | if (s->pe && !s->vm86) { |
6375 | gen_update_cc_op(s); |
6376 | gen_jmp_im(pc_start - s->cs_base); |
6377 | gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1), |
6378 | tcg_const_i32(val)); |
6379 | } else { |
6380 | gen_stack_A0(s); |
6381 | /* pop offset */ |
6382 | gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0); |
6383 | /* NOTE: keeping EIP updated is not a problem in case of |
6384 | exception */ |
6385 | gen_op_jmp_v(cpu_T[0]); |
6386 | /* pop selector */ |
6387 | gen_op_addl_A0_im(1 << dflag); |
6388 | gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0); |
6389 | gen_op_movl_seg_T0_vm(R_CS1); |
6390 | /* add stack offset */ |
6391 | gen_stack_update(s, val + (2 << dflag)); |
6392 | } |
6393 | gen_eob(s); |
6394 | break; |
6395 | case 0xcb: /* lret */ |
6396 | val = 0; |
6397 | goto do_lret; |
6398 | case 0xcf: /* iret */ |
6399 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET0x074); |
6400 | if (!s->pe) { |
6401 | /* real mode */ |
6402 | gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1)); |
6403 | set_cc_op(s, CC_OP_EFLAGS); |
6404 | } else if (s->vm86) { |
6405 | if (s->iopl != 3) { |
6406 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
6407 | } else { |
6408 | gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1)); |
6409 | set_cc_op(s, CC_OP_EFLAGS); |
6410 | } |
6411 | } else { |
6412 | gen_update_cc_op(s); |
6413 | gen_jmp_im(pc_start - s->cs_base); |
6414 | gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1), |
6415 | tcg_const_i32(s->pc - s->cs_base)); |
6416 | set_cc_op(s, CC_OP_EFLAGS); |
6417 | } |
6418 | gen_eob(s); |
6419 | break; |
6420 | case 0xe8: /* call im */ |
6421 | { |
6422 | if (dflag != MO_16) { |
6423 | tval = (int32_t)insn_get(env, s, MO_32); |
6424 | } else { |
6425 | tval = (int16_t)insn_get(env, s, MO_16); |
6426 | } |
6427 | next_eip = s->pc - s->cs_base; |
6428 | tval += next_eip; |
6429 | if (dflag == MO_16) { |
6430 | tval &= 0xffff; |
6431 | } else if (!CODE64(s)0) { |
6432 | tval &= 0xffffffff; |
6433 | } |
6434 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[0], next_eip); |
6435 | gen_push_v(s, cpu_T[0]); |
6436 | gen_jmp(s, tval); |
6437 | } |
6438 | break; |
6439 | case 0x9a: /* lcall im */ |
6440 | { |
6441 | unsigned int selector, offset; |
6442 | |
6443 | if (CODE64(s)0) |
6444 | goto illegal_op; |
6445 | ot = dflag; |
6446 | offset = insn_get(env, s, ot); |
6447 | selector = insn_get(env, s, MO_16); |
6448 | |
6449 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[0], selector); |
6450 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[1], offset); |
6451 | } |
6452 | goto do_lcall; |
6453 | case 0xe9: /* jmp im */ |
6454 | if (dflag != MO_16) { |
6455 | tval = (int32_t)insn_get(env, s, MO_32); |
6456 | } else { |
6457 | tval = (int16_t)insn_get(env, s, MO_16); |
6458 | } |
6459 | tval += s->pc - s->cs_base; |
6460 | if (dflag == MO_16) { |
6461 | tval &= 0xffff; |
6462 | } else if (!CODE64(s)0) { |
6463 | tval &= 0xffffffff; |
6464 | } |
6465 | gen_jmp(s, tval); |
6466 | break; |
6467 | case 0xea: /* ljmp im */ |
6468 | { |
6469 | unsigned int selector, offset; |
6470 | |
6471 | if (CODE64(s)0) |
6472 | goto illegal_op; |
6473 | ot = dflag; |
6474 | offset = insn_get(env, s, ot); |
6475 | selector = insn_get(env, s, MO_16); |
6476 | |
6477 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[0], selector); |
6478 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[1], offset); |
6479 | } |
6480 | goto do_ljmp; |
6481 | case 0xeb: /* jmp Jb */ |
6482 | tval = (int8_t)insn_get(env, s, MO_8); |
6483 | tval += s->pc - s->cs_base; |
6484 | if (dflag == MO_16) { |
6485 | tval &= 0xffff; |
6486 | } |
6487 | gen_jmp(s, tval); |
6488 | break; |
6489 | case 0x70 ... 0x7f: /* jcc Jb */ |
6490 | tval = (int8_t)insn_get(env, s, MO_8); |
6491 | goto do_jcc; |
6492 | case 0x180 ... 0x18f: /* jcc Jv */ |
6493 | if (dflag != MO_16) { |
6494 | tval = (int32_t)insn_get(env, s, MO_32); |
6495 | } else { |
6496 | tval = (int16_t)insn_get(env, s, MO_16); |
6497 | } |
6498 | do_jcc: |
6499 | next_eip = s->pc - s->cs_base; |
6500 | tval += next_eip; |
6501 | if (dflag == MO_16) { |
6502 | tval &= 0xffff; |
6503 | } |
6504 | gen_jcc(s, b, tval, next_eip); |
6505 | break; |
6506 | |
6507 | case 0x190 ... 0x19f: /* setcc Gv */ |
6508 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
6509 | gen_setcc1(s, b, cpu_T[0]); |
6510 | gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1); |
6511 | break; |
6512 | case 0x140 ... 0x14f: /* cmov Gv, Ev */ |
6513 | if (!(s->cpuid_features & CPUID_CMOV(1 << 15))) { |
6514 | goto illegal_op; |
6515 | } |
6516 | ot = dflag; |
6517 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
6518 | reg = ((modrm >> 3) & 7) | rex_r; |
6519 | gen_cmovcc1(env, s, ot, b, modrm, reg); |
6520 | break; |
6521 | |
6522 | /************************/ |
6523 | /* flags */ |
6524 | case 0x9c: /* pushf */ |
6525 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF0x070); |
6526 | if (s->vm86 && s->iopl != 3) { |
6527 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
6528 | } else { |
6529 | gen_update_cc_op(s); |
6530 | gen_helper_read_eflags(cpu_T[0], cpu_env); |
6531 | gen_push_v(s, cpu_T[0]); |
6532 | } |
6533 | break; |
6534 | case 0x9d: /* popf */ |
6535 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF0x071); |
6536 | if (s->vm86 && s->iopl != 3) { |
6537 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
6538 | } else { |
6539 | ot = gen_pop_T0(s); |
6540 | if (s->cpl == 0) { |
6541 | if (dflag != MO_16) { |
6542 | gen_helper_write_eflags(cpu_env, cpu_T[0], |
6543 | tcg_const_i32((TF_MASK0x00000100 | AC_MASK0x00040000 | |
6544 | ID_MASK0x00200000 | NT_MASK0x00004000 | |
6545 | IF_MASK0x00000200 | |
6546 | IOPL_MASK0x00003000))); |
6547 | } else { |
6548 | gen_helper_write_eflags(cpu_env, cpu_T[0], |
6549 | tcg_const_i32((TF_MASK0x00000100 | AC_MASK0x00040000 | |
6550 | ID_MASK0x00200000 | NT_MASK0x00004000 | |
6551 | IF_MASK0x00000200 | IOPL_MASK0x00003000) |
6552 | & 0xffff)); |
6553 | } |
6554 | } else { |
6555 | if (s->cpl <= s->iopl) { |
6556 | if (dflag != MO_16) { |
6557 | gen_helper_write_eflags(cpu_env, cpu_T[0], |
6558 | tcg_const_i32((TF_MASK0x00000100 | |
6559 | AC_MASK0x00040000 | |
6560 | ID_MASK0x00200000 | |
6561 | NT_MASK0x00004000 | |
6562 | IF_MASK0x00000200))); |
6563 | } else { |
6564 | gen_helper_write_eflags(cpu_env, cpu_T[0], |
6565 | tcg_const_i32((TF_MASK0x00000100 | |
6566 | AC_MASK0x00040000 | |
6567 | ID_MASK0x00200000 | |
6568 | NT_MASK0x00004000 | |
6569 | IF_MASK0x00000200) |
6570 | & 0xffff)); |
6571 | } |
6572 | } else { |
6573 | if (dflag != MO_16) { |
6574 | gen_helper_write_eflags(cpu_env, cpu_T[0], |
6575 | tcg_const_i32((TF_MASK0x00000100 | AC_MASK0x00040000 | |
6576 | ID_MASK0x00200000 | NT_MASK0x00004000))); |
6577 | } else { |
6578 | gen_helper_write_eflags(cpu_env, cpu_T[0], |
6579 | tcg_const_i32((TF_MASK0x00000100 | AC_MASK0x00040000 | |
6580 | ID_MASK0x00200000 | NT_MASK0x00004000) |
6581 | & 0xffff)); |
6582 | } |
6583 | } |
6584 | } |
6585 | gen_pop_update(s, ot); |
6586 | set_cc_op(s, CC_OP_EFLAGS); |
6587 | /* abort translation because TF/AC flag may change */ |
6588 | gen_jmp_im(s->pc - s->cs_base); |
6589 | gen_eob(s); |
6590 | } |
6591 | break; |
6592 | case 0x9e: /* sahf */ |
6593 | if (CODE64(s)0 && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM(1 << 0))) |
6594 | goto illegal_op; |
6595 | gen_op_mov_v_reg(MO_8, cpu_T[0], R_AH4); |
6596 | gen_compute_eflags(s); |
6597 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_cc_src, cpu_cc_src, CC_O0x0800); |
6598 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_T[0], cpu_T[0], CC_S0x0080 | CC_Z0x0040 | CC_A0x0010 | CC_P0x0004 | CC_C0x0001); |
6599 | tcg_gen_or_tltcg_gen_or_i32(cpu_cc_src, cpu_cc_src, cpu_T[0]); |
6600 | break; |
6601 | case 0x9f: /* lahf */ |
6602 | if (CODE64(s)0 && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM(1 << 0))) |
6603 | goto illegal_op; |
6604 | gen_compute_eflags(s); |
6605 | /* Note: gen_compute_eflags() only gives the condition codes */ |
6606 | tcg_gen_ori_tltcg_gen_ori_i32(cpu_T[0], cpu_cc_src, 0x02); |
6607 | gen_op_mov_reg_v(MO_8, R_AH4, cpu_T[0]); |
6608 | break; |
6609 | case 0xf5: /* cmc */ |
6610 | gen_compute_eflags(s); |
6611 | tcg_gen_xori_tltcg_gen_xori_i32(cpu_cc_src, cpu_cc_src, CC_C0x0001); |
6612 | break; |
6613 | case 0xf8: /* clc */ |
6614 | gen_compute_eflags(s); |
6615 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_cc_src, cpu_cc_src, ~CC_C0x0001); |
6616 | break; |
6617 | case 0xf9: /* stc */ |
6618 | gen_compute_eflags(s); |
6619 | tcg_gen_ori_tltcg_gen_ori_i32(cpu_cc_src, cpu_cc_src, CC_C0x0001); |
6620 | break; |
6621 | case 0xfc: /* cld */ |
6622 | tcg_gen_movi_i32(cpu_tmp2_i32, 1); |
6623 | tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)__builtin_offsetof(CPUX86State, df)); |
6624 | break; |
6625 | case 0xfd: /* std */ |
6626 | tcg_gen_movi_i32(cpu_tmp2_i32, -1); |
6627 | tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)__builtin_offsetof(CPUX86State, df)); |
6628 | break; |
6629 | |
6630 | /************************/ |
6631 | /* bit operations */ |
6632 | case 0x1ba: /* bt/bts/btr/btc Gv, im */ |
6633 | ot = dflag; |
6634 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
6635 | op = (modrm >> 3) & 7; |
6636 | mod = (modrm >> 6) & 3; |
6637 | rm = (modrm & 7) | REX_B(s)0; |
6638 | if (mod != 3) { |
6639 | s->rip_offset = 1; |
6640 | gen_lea_modrm(env, s, modrm); |
6641 | gen_op_ld_v(s, ot, cpu_T[0], cpu_A0); |
6642 | } else { |
6643 | gen_op_mov_v_reg(ot, cpu_T[0], rm); |
6644 | } |
6645 | /* load shift */ |
6646 | val = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
6647 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_T[1], val); |
6648 | if (op < 4) |
6649 | goto illegal_op; |
6650 | op -= 4; |
6651 | goto bt_op; |
6652 | case 0x1a3: /* bt Gv, Ev */ |
6653 | op = 0; |
6654 | goto do_btx; |
6655 | case 0x1ab: /* bts */ |
6656 | op = 1; |
6657 | goto do_btx; |
6658 | case 0x1b3: /* btr */ |
6659 | op = 2; |
6660 | goto do_btx; |
6661 | case 0x1bb: /* btc */ |
6662 | op = 3; |
6663 | do_btx: |
6664 | ot = dflag; |
6665 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
6666 | reg = ((modrm >> 3) & 7) | rex_r; |
6667 | mod = (modrm >> 6) & 3; |
6668 | rm = (modrm & 7) | REX_B(s)0; |
6669 | gen_op_mov_v_reg(MO_32, cpu_T[1], reg); |
6670 | if (mod != 3) { |
6671 | gen_lea_modrm(env, s, modrm); |
6672 | /* specific case: we need to add a displacement */ |
6673 | gen_exts(ot, cpu_T[1]); |
6674 | tcg_gen_sari_tltcg_gen_sari_i32(cpu_tmp0, cpu_T[1], 3 + ot); |
6675 | tcg_gen_shli_tltcg_gen_shli_i32(cpu_tmp0, cpu_tmp0, ot); |
6676 | tcg_gen_add_tltcg_gen_add_i32(cpu_A0, cpu_A0, cpu_tmp0); |
6677 | gen_op_ld_v(s, ot, cpu_T[0], cpu_A0); |
6678 | } else { |
6679 | gen_op_mov_v_reg(ot, cpu_T[0], rm); |
6680 | } |
6681 | bt_op: |
6682 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1); |
6683 | switch(op) { |
6684 | case 0: |
6685 | tcg_gen_shr_tltcg_gen_shr_i32(cpu_cc_src, cpu_T[0], cpu_T[1]); |
6686 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_cc_dst, 0); |
6687 | break; |
6688 | case 1: |
6689 | tcg_gen_shr_tltcg_gen_shr_i32(cpu_tmp4, cpu_T[0], cpu_T[1]); |
6690 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_tmp0, 1); |
6691 | tcg_gen_shl_tltcg_gen_shl_i32(cpu_tmp0, cpu_tmp0, cpu_T[1]); |
6692 | tcg_gen_or_tltcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_tmp0); |
6693 | break; |
6694 | case 2: |
6695 | tcg_gen_shr_tltcg_gen_shr_i32(cpu_tmp4, cpu_T[0], cpu_T[1]); |
6696 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_tmp0, 1); |
6697 | tcg_gen_shl_tltcg_gen_shl_i32(cpu_tmp0, cpu_tmp0, cpu_T[1]); |
6698 | tcg_gen_not_tltcg_gen_not_i32(cpu_tmp0, cpu_tmp0); |
6699 | tcg_gen_and_tltcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_tmp0); |
6700 | break; |
6701 | default: |
6702 | case 3: |
6703 | tcg_gen_shr_tltcg_gen_shr_i32(cpu_tmp4, cpu_T[0], cpu_T[1]); |
6704 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_tmp0, 1); |
6705 | tcg_gen_shl_tltcg_gen_shl_i32(cpu_tmp0, cpu_tmp0, cpu_T[1]); |
6706 | tcg_gen_xor_tltcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_tmp0); |
6707 | break; |
6708 | } |
6709 | set_cc_op(s, CC_OP_SARB + ot); |
6710 | if (op != 0) { |
6711 | if (mod != 3) { |
6712 | gen_op_st_v(s, ot, cpu_T[0], cpu_A0); |
6713 | } else { |
6714 | gen_op_mov_reg_v(ot, rm, cpu_T[0]); |
6715 | } |
6716 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_src, cpu_tmp4); |
6717 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_cc_dst, 0); |
6718 | } |
6719 | break; |
6720 | case 0x1bc: /* bsf / tzcnt */ |
6721 | case 0x1bd: /* bsr / lzcnt */ |
6722 | ot = dflag; |
6723 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
6724 | reg = ((modrm >> 3) & 7) | rex_r; |
6725 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
6726 | gen_extu(ot, cpu_T[0]); |
6727 | |
6728 | /* Note that lzcnt and tzcnt are in different extensions. */ |
6729 | if ((prefixes & PREFIX_REPZ0x01) |
6730 | && (b & 1 |
6731 | ? s->cpuid_ext3_features & CPUID_EXT3_ABM(1 << 5) |
6732 | : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1(1 << 3))) { |
6733 | int size = 8 << ot; |
6734 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_src, cpu_T[0]); |
6735 | if (b & 1) { |
6736 | /* For lzcnt, reduce the target_ulong result by the |
6737 | number of zeros that we expect to find at the top. */ |
6738 | gen_helper_clz(cpu_T[0], cpu_T[0]); |
6739 | tcg_gen_subi_tltcg_gen_subi_i32(cpu_T[0], cpu_T[0], TARGET_LONG_BITS32 - size); |
6740 | } else { |
6741 | /* For tzcnt, a zero input must return the operand size: |
6742 | force all bits outside the operand size to 1. */ |
6743 | target_ulong mask = (target_ulong)-2 << (size - 1); |
6744 | tcg_gen_ori_tltcg_gen_ori_i32(cpu_T[0], cpu_T[0], mask); |
6745 | gen_helper_ctz(cpu_T[0], cpu_T[0]); |
6746 | } |
6747 | /* For lzcnt/tzcnt, C and Z bits are defined and are |
6748 | related to the result. */ |
6749 | gen_op_update1_cc(); |
6750 | set_cc_op(s, CC_OP_BMILGB + ot); |
6751 | } else { |
6752 | /* For bsr/bsf, only the Z bit is defined and it is related |
6753 | to the input and not the result. */ |
6754 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_cc_dst, cpu_T[0]); |
6755 | set_cc_op(s, CC_OP_LOGICB + ot); |
6756 | if (b & 1) { |
6757 | /* For bsr, return the bit index of the first 1 bit, |
6758 | not the count of leading zeros. */ |
6759 | gen_helper_clz(cpu_T[0], cpu_T[0]); |
6760 | tcg_gen_xori_tltcg_gen_xori_i32(cpu_T[0], cpu_T[0], TARGET_LONG_BITS32 - 1); |
6761 | } else { |
6762 | gen_helper_ctz(cpu_T[0], cpu_T[0]); |
6763 | } |
6764 | /* ??? The manual says that the output is undefined when the |
6765 | input is zero, but real hardware leaves it unchanged, and |
6766 | real programs appear to depend on that. */ |
6767 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_tmp0, 0); |
6768 | tcg_gen_movcond_tltcg_gen_movcond_i32(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0, |
6769 | cpu_regs[reg], cpu_T[0]); |
6770 | } |
6771 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
6772 | break; |
6773 | /************************/ |
6774 | /* bcd */ |
6775 | case 0x27: /* daa */ |
6776 | if (CODE64(s)0) |
6777 | goto illegal_op; |
6778 | gen_update_cc_op(s); |
6779 | gen_helper_daa(cpu_env); |
6780 | set_cc_op(s, CC_OP_EFLAGS); |
6781 | break; |
6782 | case 0x2f: /* das */ |
6783 | if (CODE64(s)0) |
6784 | goto illegal_op; |
6785 | gen_update_cc_op(s); |
6786 | gen_helper_das(cpu_env); |
6787 | set_cc_op(s, CC_OP_EFLAGS); |
6788 | break; |
6789 | case 0x37: /* aaa */ |
6790 | if (CODE64(s)0) |
6791 | goto illegal_op; |
6792 | gen_update_cc_op(s); |
6793 | gen_helper_aaa(cpu_env); |
6794 | set_cc_op(s, CC_OP_EFLAGS); |
6795 | break; |
6796 | case 0x3f: /* aas */ |
6797 | if (CODE64(s)0) |
6798 | goto illegal_op; |
6799 | gen_update_cc_op(s); |
6800 | gen_helper_aas(cpu_env); |
6801 | set_cc_op(s, CC_OP_EFLAGS); |
6802 | break; |
6803 | case 0xd4: /* aam */ |
6804 | if (CODE64(s)0) |
6805 | goto illegal_op; |
6806 | val = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
6807 | if (val == 0) { |
6808 | gen_exception(s, EXCP00_DIVZ0, pc_start - s->cs_base); |
6809 | } else { |
6810 | gen_helper_aam(cpu_env, tcg_const_i32(val)); |
6811 | set_cc_op(s, CC_OP_LOGICB); |
6812 | } |
6813 | break; |
6814 | case 0xd5: /* aad */ |
6815 | if (CODE64(s)0) |
6816 | goto illegal_op; |
6817 | val = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
6818 | gen_helper_aad(cpu_env, tcg_const_i32(val)); |
6819 | set_cc_op(s, CC_OP_LOGICB); |
6820 | break; |
6821 | /************************/ |
6822 | /* misc */ |
6823 | case 0x90: /* nop */ |
6824 | /* XXX: correct lock test for all insn */ |
6825 | if (prefixes & PREFIX_LOCK0x04) { |
6826 | goto illegal_op; |
6827 | } |
6828 | /* If REX_B is set, then this is xchg eax, r8d, not a nop. */ |
6829 | if (REX_B(s)0) { |
6830 | goto do_xchg_reg_eax; |
6831 | } |
6832 | if (prefixes & PREFIX_REPZ0x01) { |
6833 | gen_update_cc_op(s); |
6834 | gen_jmp_im(pc_start - s->cs_base); |
6835 | gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start)); |
6836 | s->is_jmp = DISAS_TB_JUMP3; |
6837 | } |
6838 | break; |
6839 | case 0x9b: /* fwait */ |
6840 | if ((s->flags & (HF_MP_MASK(1 << 9) | HF_TS_MASK(1 << 11))) == |
6841 | (HF_MP_MASK(1 << 9) | HF_TS_MASK(1 << 11))) { |
6842 | gen_exception(s, EXCP07_PREX7, pc_start - s->cs_base); |
6843 | } else { |
6844 | gen_update_cc_op(s); |
6845 | gen_jmp_im(pc_start - s->cs_base); |
6846 | gen_helper_fwait(cpu_env); |
6847 | } |
6848 | break; |
6849 | case 0xcc: /* int3 */ |
6850 | gen_interrupt(s, EXCP03_INT33, pc_start - s->cs_base, s->pc - s->cs_base); |
6851 | break; |
6852 | case 0xcd: /* int N */ |
6853 | val = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
6854 | if (s->vm86 && s->iopl != 3) { |
6855 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
6856 | } else { |
6857 | gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base); |
6858 | } |
6859 | break; |
6860 | case 0xce: /* into */ |
6861 | if (CODE64(s)0) |
6862 | goto illegal_op; |
6863 | gen_update_cc_op(s); |
6864 | gen_jmp_im(pc_start - s->cs_base); |
6865 | gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start)); |
6866 | break; |
6867 | #ifdef WANT_ICEBP |
6868 | case 0xf1: /* icebp (undocumented, exits to external debugger) */ |
6869 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP0x088); |
6870 | #if 1 |
6871 | gen_debug(s, pc_start - s->cs_base); |
6872 | #else |
6873 | /* start debug */ |
6874 | tb_flush(env); |
6875 | qemu_set_log(CPU_LOG_INT(1 << 4) | CPU_LOG_TB_IN_ASM(1 << 1)); |
6876 | #endif |
6877 | break; |
6878 | #endif |
6879 | case 0xfa: /* cli */ |
6880 | if (!s->vm86) { |
6881 | if (s->cpl <= s->iopl) { |
6882 | gen_helper_cli(cpu_env); |
6883 | } else { |
6884 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
6885 | } |
6886 | } else { |
6887 | if (s->iopl == 3) { |
6888 | gen_helper_cli(cpu_env); |
6889 | } else { |
6890 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
6891 | } |
6892 | } |
6893 | break; |
6894 | case 0xfb: /* sti */ |
6895 | if (!s->vm86) { |
6896 | if (s->cpl <= s->iopl) { |
6897 | gen_sti: |
6898 | gen_helper_sti(cpu_env); |
6899 | /* interruptions are enabled only the first insn after sti */ |
6900 | /* If several instructions disable interrupts, only the |
6901 | _first_ does it */ |
6902 | if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK(1 << 3))) |
6903 | gen_helper_set_inhibit_irq(cpu_env); |
6904 | /* give a chance to handle pending irqs */ |
6905 | gen_jmp_im(s->pc - s->cs_base); |
6906 | gen_eob(s); |
6907 | } else { |
6908 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
6909 | } |
6910 | } else { |
6911 | if (s->iopl == 3) { |
6912 | goto gen_sti; |
6913 | } else { |
6914 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
6915 | } |
6916 | } |
6917 | break; |
6918 | case 0x62: /* bound */ |
6919 | if (CODE64(s)0) |
6920 | goto illegal_op; |
6921 | ot = dflag; |
6922 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
6923 | reg = (modrm >> 3) & 7; |
6924 | mod = (modrm >> 6) & 3; |
6925 | if (mod == 3) |
6926 | goto illegal_op; |
6927 | gen_op_mov_v_reg(ot, cpu_T[0], reg); |
6928 | gen_lea_modrm(env, s, modrm); |
6929 | gen_jmp_im(pc_start - s->cs_base); |
6930 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
6931 | if (ot == MO_16) { |
6932 | gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32); |
6933 | } else { |
6934 | gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32); |
6935 | } |
6936 | break; |
6937 | case 0x1c8 ... 0x1cf: /* bswap reg */ |
6938 | reg = (b & 7) | REX_B(s)0; |
6939 | #ifdef TARGET_X86_64 |
6940 | if (dflag == MO_64) { |
6941 | gen_op_mov_v_reg(MO_64, cpu_T[0], reg); |
6942 | tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]); |
6943 | gen_op_mov_reg_v(MO_64, reg, cpu_T[0]); |
6944 | } else |
6945 | #endif |
6946 | { |
6947 | gen_op_mov_v_reg(MO_32, cpu_T[0], reg); |
6948 | tcg_gen_ext32u_tltcg_gen_mov_i32(cpu_T[0], cpu_T[0]); |
6949 | tcg_gen_bswap32_tltcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); |
6950 | gen_op_mov_reg_v(MO_32, reg, cpu_T[0]); |
6951 | } |
6952 | break; |
6953 | case 0xd6: /* salc */ |
6954 | if (CODE64(s)0) |
6955 | goto illegal_op; |
6956 | gen_compute_eflags_c(s, cpu_T[0]); |
6957 | tcg_gen_neg_tltcg_gen_neg_i32(cpu_T[0], cpu_T[0]); |
6958 | gen_op_mov_reg_v(MO_8, R_EAX0, cpu_T[0]); |
6959 | break; |
6960 | case 0xe0: /* loopnz */ |
6961 | case 0xe1: /* loopz */ |
6962 | case 0xe2: /* loop */ |
6963 | case 0xe3: /* jecxz */ |
6964 | { |
6965 | int l1, l2, l3; |
6966 | |
6967 | tval = (int8_t)insn_get(env, s, MO_8); |
6968 | next_eip = s->pc - s->cs_base; |
6969 | tval += next_eip; |
6970 | if (dflag == MO_16) { |
6971 | tval &= 0xffff; |
6972 | } |
6973 | |
6974 | l1 = gen_new_label(); |
6975 | l2 = gen_new_label(); |
6976 | l3 = gen_new_label(); |
6977 | b &= 3; |
6978 | switch(b) { |
6979 | case 0: /* loopnz */ |
6980 | case 1: /* loopz */ |
6981 | gen_op_add_reg_im(s->aflag, R_ECX1, -1); |
6982 | gen_op_jz_ecx(s->aflag, l3); |
6983 | gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1); |
6984 | break; |
6985 | case 2: /* loop */ |
6986 | gen_op_add_reg_im(s->aflag, R_ECX1, -1); |
6987 | gen_op_jnz_ecx(s->aflag, l1); |
6988 | break; |
6989 | default: |
6990 | case 3: /* jcxz */ |
6991 | gen_op_jz_ecx(s->aflag, l1); |
6992 | break; |
6993 | } |
6994 | |
6995 | gen_set_label(l3); |
6996 | gen_jmp_im(next_eip); |
6997 | tcg_gen_br(l2); |
6998 | |
6999 | gen_set_label(l1); |
7000 | gen_jmp_im(tval); |
7001 | gen_set_label(l2); |
7002 | gen_eob(s); |
7003 | } |
7004 | break; |
7005 | case 0x130: /* wrmsr */ |
7006 | case 0x132: /* rdmsr */ |
7007 | if (s->cpl != 0) { |
7008 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7009 | } else { |
7010 | gen_update_cc_op(s); |
7011 | gen_jmp_im(pc_start - s->cs_base); |
7012 | if (b & 2) { |
7013 | gen_helper_rdmsr(cpu_env); |
7014 | } else { |
7015 | gen_helper_wrmsr(cpu_env); |
7016 | } |
7017 | } |
7018 | break; |
7019 | case 0x131: /* rdtsc */ |
7020 | gen_update_cc_op(s); |
7021 | gen_jmp_im(pc_start - s->cs_base); |
7022 | if (use_icount) |
7023 | gen_io_start(); |
7024 | gen_helper_rdtsc(cpu_env); |
7025 | if (use_icount) { |
7026 | gen_io_end(); |
7027 | gen_jmp(s, s->pc - s->cs_base); |
7028 | } |
7029 | break; |
7030 | case 0x133: /* rdpmc */ |
7031 | gen_update_cc_op(s); |
7032 | gen_jmp_im(pc_start - s->cs_base); |
7033 | gen_helper_rdpmc(cpu_env); |
7034 | break; |
7035 | case 0x134: /* sysenter */ |
7036 | /* For Intel SYSENTER is valid on 64-bit */ |
7037 | if (CODE64(s)0 && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_10x756e6547) |
7038 | goto illegal_op; |
7039 | if (!s->pe) { |
7040 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7041 | } else { |
7042 | gen_update_cc_op(s); |
7043 | gen_jmp_im(pc_start - s->cs_base); |
7044 | gen_helper_sysenter(cpu_env); |
7045 | gen_eob(s); |
7046 | } |
7047 | break; |
7048 | case 0x135: /* sysexit */ |
7049 | /* For Intel SYSEXIT is valid on 64-bit */ |
7050 | if (CODE64(s)0 && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_10x756e6547) |
7051 | goto illegal_op; |
7052 | if (!s->pe) { |
7053 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7054 | } else { |
7055 | gen_update_cc_op(s); |
7056 | gen_jmp_im(pc_start - s->cs_base); |
7057 | gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1)); |
7058 | gen_eob(s); |
7059 | } |
7060 | break; |
7061 | #ifdef TARGET_X86_64 |
7062 | case 0x105: /* syscall */ |
7063 | /* XXX: is it usable in real mode ? */ |
7064 | gen_update_cc_op(s); |
7065 | gen_jmp_im(pc_start - s->cs_base); |
7066 | gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start)); |
7067 | gen_eob(s); |
7068 | break; |
7069 | case 0x107: /* sysret */ |
7070 | if (!s->pe) { |
7071 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7072 | } else { |
7073 | gen_update_cc_op(s); |
7074 | gen_jmp_im(pc_start - s->cs_base); |
7075 | gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1)); |
7076 | /* condition codes are modified only in long mode */ |
7077 | if (s->lma) { |
7078 | set_cc_op(s, CC_OP_EFLAGS); |
7079 | } |
7080 | gen_eob(s); |
7081 | } |
7082 | break; |
7083 | #endif |
7084 | case 0x1a2: /* cpuid */ |
7085 | gen_update_cc_op(s); |
7086 | gen_jmp_im(pc_start - s->cs_base); |
7087 | gen_helper_cpuid(cpu_env); |
7088 | break; |
7089 | case 0xf4: /* hlt */ |
7090 | if (s->cpl != 0) { |
7091 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7092 | } else { |
7093 | gen_update_cc_op(s); |
7094 | gen_jmp_im(pc_start - s->cs_base); |
7095 | gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start)); |
7096 | s->is_jmp = DISAS_TB_JUMP3; |
7097 | } |
7098 | break; |
7099 | case 0x100: |
7100 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
7101 | mod = (modrm >> 6) & 3; |
7102 | op = (modrm >> 3) & 7; |
7103 | switch(op) { |
7104 | case 0: /* sldt */ |
7105 | if (!s->pe || s->vm86) |
7106 | goto illegal_op; |
7107 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ0x068); |
7108 | tcg_gen_ld32u_tltcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector)__builtin_offsetof(CPUX86State, ldt.selector)); |
7109 | ot = mod == 3 ? dflag : MO_16; |
7110 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); |
7111 | break; |
7112 | case 2: /* lldt */ |
7113 | if (!s->pe || s->vm86) |
7114 | goto illegal_op; |
7115 | if (s->cpl != 0) { |
7116 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7117 | } else { |
7118 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE0x06c); |
7119 | gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); |
7120 | gen_jmp_im(pc_start - s->cs_base); |
7121 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
7122 | gen_helper_lldt(cpu_env, cpu_tmp2_i32); |
7123 | } |
7124 | break; |
7125 | case 1: /* str */ |
7126 | if (!s->pe || s->vm86) |
7127 | goto illegal_op; |
7128 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ0x069); |
7129 | tcg_gen_ld32u_tltcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector)__builtin_offsetof(CPUX86State, tr.selector)); |
7130 | ot = mod == 3 ? dflag : MO_16; |
7131 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); |
7132 | break; |
7133 | case 3: /* ltr */ |
7134 | if (!s->pe || s->vm86) |
7135 | goto illegal_op; |
7136 | if (s->cpl != 0) { |
7137 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7138 | } else { |
7139 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE0x06d); |
7140 | gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); |
7141 | gen_jmp_im(pc_start - s->cs_base); |
7142 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_tmp2_i32, cpu_T[0]); |
7143 | gen_helper_ltr(cpu_env, cpu_tmp2_i32); |
7144 | } |
7145 | break; |
7146 | case 4: /* verr */ |
7147 | case 5: /* verw */ |
7148 | if (!s->pe || s->vm86) |
7149 | goto illegal_op; |
7150 | gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); |
7151 | gen_update_cc_op(s); |
7152 | if (op == 4) { |
7153 | gen_helper_verr(cpu_env, cpu_T[0]); |
7154 | } else { |
7155 | gen_helper_verw(cpu_env, cpu_T[0]); |
7156 | } |
7157 | set_cc_op(s, CC_OP_EFLAGS); |
7158 | break; |
7159 | default: |
7160 | goto illegal_op; |
7161 | } |
7162 | break; |
7163 | case 0x101: |
7164 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
7165 | mod = (modrm >> 6) & 3; |
7166 | op = (modrm >> 3) & 7; |
7167 | rm = modrm & 7; |
7168 | switch(op) { |
7169 | case 0: /* sgdt */ |
7170 | if (mod == 3) |
7171 | goto illegal_op; |
7172 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ0x067); |
7173 | gen_lea_modrm(env, s, modrm); |
7174 | tcg_gen_ld32u_tltcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit)__builtin_offsetof(CPUX86State, gdt.limit)); |
7175 | gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0); |
7176 | gen_add_A0_im(s, 2); |
7177 | tcg_gen_ld_tltcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base)__builtin_offsetof(CPUX86State, gdt.base)); |
7178 | if (dflag == MO_16) { |
7179 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffffff); |
7180 | } |
7181 | gen_op_st_v(s, CODE64(s)0 + MO_32, cpu_T[0], cpu_A0); |
7182 | break; |
7183 | case 1: |
7184 | if (mod == 3) { |
7185 | switch (rm) { |
7186 | case 0: /* monitor */ |
7187 | if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR(1 << 3)) || |
7188 | s->cpl != 0) |
7189 | goto illegal_op; |
7190 | gen_update_cc_op(s); |
7191 | gen_jmp_im(pc_start - s->cs_base); |
7192 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_A0, cpu_regs[R_EAX0]); |
7193 | gen_extu(s->aflag, cpu_A0); |
7194 | gen_add_A0_ds_seg(s); |
7195 | gen_helper_monitor(cpu_env, cpu_A0); |
7196 | break; |
7197 | case 1: /* mwait */ |
7198 | if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR(1 << 3)) || |
7199 | s->cpl != 0) |
7200 | goto illegal_op; |
7201 | gen_update_cc_op(s); |
7202 | gen_jmp_im(pc_start - s->cs_base); |
7203 | gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start)); |
7204 | gen_eob(s); |
7205 | break; |
7206 | case 2: /* clac */ |
7207 | if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP(1 << 20)) || |
7208 | s->cpl != 0) { |
7209 | goto illegal_op; |
7210 | } |
7211 | gen_helper_clac(cpu_env); |
7212 | gen_jmp_im(s->pc - s->cs_base); |
7213 | gen_eob(s); |
7214 | break; |
7215 | case 3: /* stac */ |
7216 | if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP(1 << 20)) || |
7217 | s->cpl != 0) { |
7218 | goto illegal_op; |
7219 | } |
7220 | gen_helper_stac(cpu_env); |
7221 | gen_jmp_im(s->pc - s->cs_base); |
7222 | gen_eob(s); |
7223 | break; |
7224 | default: |
7225 | goto illegal_op; |
7226 | } |
7227 | } else { /* sidt */ |
7228 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ0x066); |
7229 | gen_lea_modrm(env, s, modrm); |
7230 | tcg_gen_ld32u_tltcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit)__builtin_offsetof(CPUX86State, idt.limit)); |
7231 | gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0); |
7232 | gen_add_A0_im(s, 2); |
7233 | tcg_gen_ld_tltcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base)__builtin_offsetof(CPUX86State, idt.base)); |
7234 | if (dflag == MO_16) { |
7235 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffffff); |
7236 | } |
7237 | gen_op_st_v(s, CODE64(s)0 + MO_32, cpu_T[0], cpu_A0); |
7238 | } |
7239 | break; |
7240 | case 2: /* lgdt */ |
7241 | case 3: /* lidt */ |
7242 | if (mod == 3) { |
7243 | gen_update_cc_op(s); |
7244 | gen_jmp_im(pc_start - s->cs_base); |
7245 | switch(rm) { |
7246 | case 0: /* VMRUN */ |
7247 | if (!(s->flags & HF_SVME_MASK(1 << 20)) || !s->pe) |
7248 | goto illegal_op; |
7249 | if (s->cpl != 0) { |
7250 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7251 | break; |
7252 | } else { |
7253 | gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1), |
7254 | tcg_const_i32(s->pc - pc_start)); |
7255 | tcg_gen_exit_tb(0); |
7256 | s->is_jmp = DISAS_TB_JUMP3; |
7257 | } |
7258 | break; |
7259 | case 1: /* VMMCALL */ |
7260 | if (!(s->flags & HF_SVME_MASK(1 << 20))) |
7261 | goto illegal_op; |
7262 | gen_helper_vmmcall(cpu_env); |
7263 | break; |
7264 | case 2: /* VMLOAD */ |
7265 | if (!(s->flags & HF_SVME_MASK(1 << 20)) || !s->pe) |
7266 | goto illegal_op; |
7267 | if (s->cpl != 0) { |
7268 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7269 | break; |
7270 | } else { |
7271 | gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1)); |
7272 | } |
7273 | break; |
7274 | case 3: /* VMSAVE */ |
7275 | if (!(s->flags & HF_SVME_MASK(1 << 20)) || !s->pe) |
7276 | goto illegal_op; |
7277 | if (s->cpl != 0) { |
7278 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7279 | break; |
7280 | } else { |
7281 | gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1)); |
7282 | } |
7283 | break; |
7284 | case 4: /* STGI */ |
7285 | if ((!(s->flags & HF_SVME_MASK(1 << 20)) && |
7286 | !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT(1 << 12))) || |
7287 | !s->pe) |
7288 | goto illegal_op; |
7289 | if (s->cpl != 0) { |
7290 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7291 | break; |
7292 | } else { |
7293 | gen_helper_stgi(cpu_env); |
7294 | } |
7295 | break; |
7296 | case 5: /* CLGI */ |
7297 | if (!(s->flags & HF_SVME_MASK(1 << 20)) || !s->pe) |
7298 | goto illegal_op; |
7299 | if (s->cpl != 0) { |
7300 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7301 | break; |
7302 | } else { |
7303 | gen_helper_clgi(cpu_env); |
7304 | } |
7305 | break; |
7306 | case 6: /* SKINIT */ |
7307 | if ((!(s->flags & HF_SVME_MASK(1 << 20)) && |
7308 | !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT(1 << 12))) || |
7309 | !s->pe) |
7310 | goto illegal_op; |
7311 | gen_helper_skinit(cpu_env); |
7312 | break; |
7313 | case 7: /* INVLPGA */ |
7314 | if (!(s->flags & HF_SVME_MASK(1 << 20)) || !s->pe) |
7315 | goto illegal_op; |
7316 | if (s->cpl != 0) { |
7317 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7318 | break; |
7319 | } else { |
7320 | gen_helper_invlpga(cpu_env, |
7321 | tcg_const_i32(s->aflag - 1)); |
7322 | } |
7323 | break; |
7324 | default: |
7325 | goto illegal_op; |
7326 | } |
7327 | } else if (s->cpl != 0) { |
7328 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7329 | } else { |
7330 | gen_svm_check_intercept(s, pc_start, |
7331 | op==2 ? SVM_EXIT_GDTR_WRITE0x06b : SVM_EXIT_IDTR_WRITE0x06a); |
7332 | gen_lea_modrm(env, s, modrm); |
7333 | gen_op_ld_v(s, MO_16, cpu_T[1], cpu_A0); |
7334 | gen_add_A0_im(s, 2); |
7335 | gen_op_ld_v(s, CODE64(s)0 + MO_32, cpu_T[0], cpu_A0); |
7336 | if (dflag == MO_16) { |
7337 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffffff); |
7338 | } |
7339 | if (op == 2) { |
7340 | tcg_gen_st_tltcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base)__builtin_offsetof(CPUX86State, gdt.base)); |
7341 | tcg_gen_st32_tltcg_gen_st_i32(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit)__builtin_offsetof(CPUX86State, gdt.limit)); |
7342 | } else { |
7343 | tcg_gen_st_tltcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base)__builtin_offsetof(CPUX86State, idt.base)); |
7344 | tcg_gen_st32_tltcg_gen_st_i32(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit)__builtin_offsetof(CPUX86State, idt.limit)); |
7345 | } |
7346 | } |
7347 | break; |
7348 | case 4: /* smsw */ |
7349 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR00x000); |
7350 | #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN |
7351 | tcg_gen_ld32u_tltcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0])__builtin_offsetof(CPUX86State, cr[0]) + 4); |
7352 | #else |
7353 | tcg_gen_ld32u_tltcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0])__builtin_offsetof(CPUX86State, cr[0])); |
7354 | #endif |
7355 | gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1); |
7356 | break; |
7357 | case 6: /* lmsw */ |
7358 | if (s->cpl != 0) { |
7359 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7360 | } else { |
7361 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR00x010); |
7362 | gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); |
7363 | gen_helper_lmsw(cpu_env, cpu_T[0]); |
7364 | gen_jmp_im(s->pc - s->cs_base); |
7365 | gen_eob(s); |
7366 | } |
7367 | break; |
7368 | case 7: |
7369 | if (mod != 3) { /* invlpg */ |
7370 | if (s->cpl != 0) { |
7371 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7372 | } else { |
7373 | gen_update_cc_op(s); |
7374 | gen_jmp_im(pc_start - s->cs_base); |
7375 | gen_lea_modrm(env, s, modrm); |
7376 | gen_helper_invlpg(cpu_env, cpu_A0); |
7377 | gen_jmp_im(s->pc - s->cs_base); |
7378 | gen_eob(s); |
7379 | } |
7380 | } else { |
7381 | switch (rm) { |
7382 | case 0: /* swapgs */ |
7383 | #ifdef TARGET_X86_64 |
7384 | if (CODE64(s)0) { |
7385 | if (s->cpl != 0) { |
7386 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7387 | } else { |
7388 | tcg_gen_ld_tltcg_gen_ld_i32(cpu_T[0], cpu_env, |
7389 | offsetof(CPUX86State,segs[R_GS].base)__builtin_offsetof(CPUX86State, segs[5].base)); |
7390 | tcg_gen_ld_tltcg_gen_ld_i32(cpu_T[1], cpu_env, |
7391 | offsetof(CPUX86State,kernelgsbase)__builtin_offsetof(CPUX86State, kernelgsbase)); |
7392 | tcg_gen_st_tltcg_gen_st_i32(cpu_T[1], cpu_env, |
7393 | offsetof(CPUX86State,segs[R_GS].base)__builtin_offsetof(CPUX86State, segs[5].base)); |
7394 | tcg_gen_st_tltcg_gen_st_i32(cpu_T[0], cpu_env, |
7395 | offsetof(CPUX86State,kernelgsbase)__builtin_offsetof(CPUX86State, kernelgsbase)); |
7396 | } |
7397 | } else |
7398 | #endif |
7399 | { |
7400 | goto illegal_op; |
7401 | } |
7402 | break; |
7403 | case 1: /* rdtscp */ |
7404 | if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP(1 << 27))) |
7405 | goto illegal_op; |
7406 | gen_update_cc_op(s); |
7407 | gen_jmp_im(pc_start - s->cs_base); |
7408 | if (use_icount) |
7409 | gen_io_start(); |
7410 | gen_helper_rdtscp(cpu_env); |
7411 | if (use_icount) { |
7412 | gen_io_end(); |
7413 | gen_jmp(s, s->pc - s->cs_base); |
7414 | } |
7415 | break; |
7416 | default: |
7417 | goto illegal_op; |
7418 | } |
7419 | } |
7420 | break; |
7421 | default: |
7422 | goto illegal_op; |
7423 | } |
7424 | break; |
7425 | case 0x108: /* invd */ |
7426 | case 0x109: /* wbinvd */ |
7427 | if (s->cpl != 0) { |
7428 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7429 | } else { |
7430 | gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD0x076 : SVM_EXIT_WBINVD0x089); |
7431 | /* nothing to do */ |
7432 | } |
7433 | break; |
7434 | case 0x63: /* arpl or movslS (x86_64) */ |
7435 | #ifdef TARGET_X86_64 |
7436 | if (CODE64(s)0) { |
7437 | int d_ot; |
7438 | /* d_ot is the size of destination */ |
7439 | d_ot = dflag; |
7440 | |
7441 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
7442 | reg = ((modrm >> 3) & 7) | rex_r; |
7443 | mod = (modrm >> 6) & 3; |
7444 | rm = (modrm & 7) | REX_B(s)0; |
7445 | |
7446 | if (mod == 3) { |
7447 | gen_op_mov_v_reg(MO_32, cpu_T[0], rm); |
7448 | /* sign extend */ |
7449 | if (d_ot == MO_64) { |
7450 | tcg_gen_ext32s_tltcg_gen_mov_i32(cpu_T[0], cpu_T[0]); |
7451 | } |
7452 | gen_op_mov_reg_v(d_ot, reg, cpu_T[0]); |
7453 | } else { |
7454 | gen_lea_modrm(env, s, modrm); |
7455 | gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T[0], cpu_A0); |
7456 | gen_op_mov_reg_v(d_ot, reg, cpu_T[0]); |
7457 | } |
7458 | } else |
7459 | #endif |
7460 | { |
7461 | int label1; |
7462 | TCGvTCGv_i32 t0, t1, t2, a0; |
7463 | |
7464 | if (!s->pe || s->vm86) |
7465 | goto illegal_op; |
7466 | t0 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
7467 | t1 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
7468 | t2 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
7469 | ot = MO_16; |
7470 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
7471 | reg = (modrm >> 3) & 7; |
7472 | mod = (modrm >> 6) & 3; |
7473 | rm = modrm & 7; |
7474 | if (mod != 3) { |
7475 | gen_lea_modrm(env, s, modrm); |
7476 | gen_op_ld_v(s, ot, t0, cpu_A0); |
7477 | a0 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
7478 | tcg_gen_mov_tltcg_gen_mov_i32(a0, cpu_A0); |
7479 | } else { |
7480 | gen_op_mov_v_reg(ot, t0, rm); |
7481 | TCGV_UNUSED(a0)a0 = __extension__ ({ TCGv_i32 make_tcgv_tmp = {-1}; make_tcgv_tmp ;}); |
7482 | } |
7483 | gen_op_mov_v_reg(ot, t1, reg); |
7484 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_tmp0, t0, 3); |
7485 | tcg_gen_andi_tltcg_gen_andi_i32(t1, t1, 3); |
7486 | tcg_gen_movi_tltcg_gen_movi_i32(t2, 0); |
7487 | label1 = gen_new_label(); |
7488 | tcg_gen_brcond_tltcg_gen_brcond_i32(TCG_COND_GE, cpu_tmp0, t1, label1); |
7489 | tcg_gen_andi_tltcg_gen_andi_i32(t0, t0, ~3); |
7490 | tcg_gen_or_tltcg_gen_or_i32(t0, t0, t1); |
7491 | tcg_gen_movi_tltcg_gen_movi_i32(t2, CC_Z0x0040); |
7492 | gen_set_label(label1); |
7493 | if (mod != 3) { |
7494 | gen_op_st_v(s, ot, t0, a0); |
7495 | tcg_temp_freetcg_temp_free_i32(a0); |
7496 | } else { |
7497 | gen_op_mov_reg_v(ot, rm, t0); |
7498 | } |
7499 | gen_compute_eflags(s); |
7500 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_cc_src, cpu_cc_src, ~CC_Z0x0040); |
7501 | tcg_gen_or_tltcg_gen_or_i32(cpu_cc_src, cpu_cc_src, t2); |
7502 | tcg_temp_freetcg_temp_free_i32(t0); |
7503 | tcg_temp_freetcg_temp_free_i32(t1); |
7504 | tcg_temp_freetcg_temp_free_i32(t2); |
7505 | } |
7506 | break; |
7507 | case 0x102: /* lar */ |
7508 | case 0x103: /* lsl */ |
7509 | { |
7510 | int label1; |
7511 | TCGvTCGv_i32 t0; |
7512 | if (!s->pe || s->vm86) |
7513 | goto illegal_op; |
7514 | ot = dflag != MO_16 ? MO_32 : MO_16; |
7515 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
7516 | reg = ((modrm >> 3) & 7) | rex_r; |
7517 | gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); |
7518 | t0 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
7519 | gen_update_cc_op(s); |
7520 | if (b == 0x102) { |
7521 | gen_helper_lar(t0, cpu_env, cpu_T[0]); |
7522 | } else { |
7523 | gen_helper_lsl(t0, cpu_env, cpu_T[0]); |
7524 | } |
7525 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_tmp0, cpu_cc_src, CC_Z0x0040); |
7526 | label1 = gen_new_label(); |
7527 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_EQ, cpu_tmp0, 0, label1); |
7528 | gen_op_mov_reg_v(ot, reg, t0); |
7529 | gen_set_label(label1); |
7530 | set_cc_op(s, CC_OP_EFLAGS); |
7531 | tcg_temp_freetcg_temp_free_i32(t0); |
7532 | } |
7533 | break; |
7534 | case 0x118: |
7535 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
7536 | mod = (modrm >> 6) & 3; |
7537 | op = (modrm >> 3) & 7; |
7538 | switch(op) { |
7539 | case 0: /* prefetchnta */ |
7540 | case 1: /* prefetchnt0 */ |
7541 | case 2: /* prefetchnt0 */ |
7542 | case 3: /* prefetchnt0 */ |
7543 | if (mod == 3) |
7544 | goto illegal_op; |
7545 | gen_lea_modrm(env, s, modrm); |
7546 | /* nothing more to do */ |
7547 | break; |
7548 | default: /* nop (multi byte) */ |
7549 | gen_nop_modrm(env, s, modrm); |
7550 | break; |
7551 | } |
7552 | break; |
7553 | case 0x119 ... 0x11f: /* nop (multi byte) */ |
7554 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
7555 | gen_nop_modrm(env, s, modrm); |
7556 | break; |
7557 | case 0x120: /* mov reg, crN */ |
7558 | case 0x122: /* mov crN, reg */ |
7559 | if (s->cpl != 0) { |
7560 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7561 | } else { |
7562 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
7563 | /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). |
7564 | * AMD documentation (24594.pdf) and testing of |
7565 | * intel 386 and 486 processors all show that the mod bits |
7566 | * are assumed to be 1's, regardless of actual values. |
7567 | */ |
7568 | rm = (modrm & 7) | REX_B(s)0; |
7569 | reg = ((modrm >> 3) & 7) | rex_r; |
7570 | if (CODE64(s)0) |
7571 | ot = MO_64; |
7572 | else |
7573 | ot = MO_32; |
7574 | if ((prefixes & PREFIX_LOCK0x04) && (reg == 0) && |
7575 | (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG(1 << 4))) { |
7576 | reg = 8; |
7577 | } |
7578 | switch(reg) { |
7579 | case 0: |
7580 | case 2: |
7581 | case 3: |
7582 | case 4: |
7583 | case 8: |
7584 | gen_update_cc_op(s); |
7585 | gen_jmp_im(pc_start - s->cs_base); |
7586 | if (b & 2) { |
7587 | gen_op_mov_v_reg(ot, cpu_T[0], rm); |
7588 | gen_helper_write_crN(cpu_env, tcg_const_i32(reg), |
7589 | cpu_T[0]); |
7590 | gen_jmp_im(s->pc - s->cs_base); |
7591 | gen_eob(s); |
7592 | } else { |
7593 | gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg)); |
7594 | gen_op_mov_reg_v(ot, rm, cpu_T[0]); |
7595 | } |
7596 | break; |
7597 | default: |
7598 | goto illegal_op; |
7599 | } |
7600 | } |
7601 | break; |
7602 | case 0x121: /* mov reg, drN */ |
7603 | case 0x123: /* mov drN, reg */ |
7604 | if (s->cpl != 0) { |
7605 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7606 | } else { |
7607 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
7608 | /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). |
7609 | * AMD documentation (24594.pdf) and testing of |
7610 | * intel 386 and 486 processors all show that the mod bits |
7611 | * are assumed to be 1's, regardless of actual values. |
7612 | */ |
7613 | rm = (modrm & 7) | REX_B(s)0; |
7614 | reg = ((modrm >> 3) & 7) | rex_r; |
7615 | if (CODE64(s)0) |
7616 | ot = MO_64; |
7617 | else |
7618 | ot = MO_32; |
7619 | /* XXX: do it dynamically with CR4.DE bit */ |
7620 | if (reg == 4 || reg == 5 || reg >= 8) |
7621 | goto illegal_op; |
7622 | if (b & 2) { |
7623 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR00x030 + reg); |
7624 | gen_op_mov_v_reg(ot, cpu_T[0], rm); |
7625 | gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]); |
7626 | gen_jmp_im(s->pc - s->cs_base); |
7627 | gen_eob(s); |
7628 | } else { |
7629 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR00x020 + reg); |
7630 | tcg_gen_ld_tltcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg])__builtin_offsetof(CPUX86State, dr[reg])); |
7631 | gen_op_mov_reg_v(ot, rm, cpu_T[0]); |
7632 | } |
7633 | } |
7634 | break; |
7635 | case 0x106: /* clts */ |
7636 | if (s->cpl != 0) { |
7637 | gen_exception(s, EXCP0D_GPF13, pc_start - s->cs_base); |
7638 | } else { |
7639 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR00x010); |
7640 | gen_helper_clts(cpu_env); |
7641 | /* abort block because static cpu state changed */ |
7642 | gen_jmp_im(s->pc - s->cs_base); |
7643 | gen_eob(s); |
7644 | } |
7645 | break; |
7646 | /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */ |
7647 | case 0x1c3: /* MOVNTI reg, mem */ |
7648 | if (!(s->cpuid_features & CPUID_SSE2(1 << 26))) |
7649 | goto illegal_op; |
7650 | ot = mo_64_32(dflag); |
7651 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
7652 | mod = (modrm >> 6) & 3; |
7653 | if (mod == 3) |
7654 | goto illegal_op; |
7655 | reg = ((modrm >> 3) & 7) | rex_r; |
7656 | /* generate a generic store */ |
7657 | gen_ldst_modrm(env, s, modrm, ot, reg, 1); |
7658 | break; |
7659 | case 0x1ae: |
7660 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
7661 | mod = (modrm >> 6) & 3; |
7662 | op = (modrm >> 3) & 7; |
7663 | switch(op) { |
7664 | case 0: /* fxsave */ |
7665 | if (mod == 3 || !(s->cpuid_features & CPUID_FXSR(1 << 24)) || |
7666 | (s->prefix & PREFIX_LOCK0x04)) |
7667 | goto illegal_op; |
7668 | if ((s->flags & HF_EM_MASK(1 << 10)) || (s->flags & HF_TS_MASK(1 << 11))) { |
7669 | gen_exception(s, EXCP07_PREX7, pc_start - s->cs_base); |
7670 | break; |
7671 | } |
7672 | gen_lea_modrm(env, s, modrm); |
7673 | gen_update_cc_op(s); |
7674 | gen_jmp_im(pc_start - s->cs_base); |
7675 | gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64)); |
7676 | break; |
7677 | case 1: /* fxrstor */ |
7678 | if (mod == 3 || !(s->cpuid_features & CPUID_FXSR(1 << 24)) || |
7679 | (s->prefix & PREFIX_LOCK0x04)) |
7680 | goto illegal_op; |
7681 | if ((s->flags & HF_EM_MASK(1 << 10)) || (s->flags & HF_TS_MASK(1 << 11))) { |
7682 | gen_exception(s, EXCP07_PREX7, pc_start - s->cs_base); |
7683 | break; |
7684 | } |
7685 | gen_lea_modrm(env, s, modrm); |
7686 | gen_update_cc_op(s); |
7687 | gen_jmp_im(pc_start - s->cs_base); |
7688 | gen_helper_fxrstor(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64)); |
7689 | break; |
7690 | case 2: /* ldmxcsr */ |
7691 | case 3: /* stmxcsr */ |
7692 | if (s->flags & HF_TS_MASK(1 << 11)) { |
7693 | gen_exception(s, EXCP07_PREX7, pc_start - s->cs_base); |
7694 | break; |
7695 | } |
7696 | if ((s->flags & HF_EM_MASK(1 << 10)) || !(s->flags & HF_OSFXSR_MASK(1 << 22)) || |
7697 | mod == 3) |
7698 | goto illegal_op; |
7699 | gen_lea_modrm(env, s, modrm); |
7700 | if (op == 2) { |
7701 | tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, |
7702 | s->mem_index, MO_LEUL); |
7703 | gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32); |
7704 | } else { |
7705 | tcg_gen_ld32u_tltcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr)__builtin_offsetof(CPUX86State, mxcsr)); |
7706 | gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0); |
7707 | } |
7708 | break; |
7709 | case 5: /* lfence */ |
7710 | case 6: /* mfence */ |
7711 | if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2(1 << 26))) |
7712 | goto illegal_op; |
7713 | break; |
7714 | case 7: /* sfence / clflush */ |
7715 | if ((modrm & 0xc7) == 0xc0) { |
7716 | /* sfence */ |
7717 | /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */ |
7718 | if (!(s->cpuid_features & CPUID_SSE(1 << 25))) |
7719 | goto illegal_op; |
7720 | } else { |
7721 | /* clflush */ |
7722 | if (!(s->cpuid_features & CPUID_CLFLUSH(1 << 19))) |
7723 | goto illegal_op; |
7724 | gen_lea_modrm(env, s, modrm); |
7725 | } |
7726 | break; |
7727 | default: |
7728 | goto illegal_op; |
7729 | } |
7730 | break; |
7731 | case 0x10d: /* 3DNow! prefetch(w) */ |
7732 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
7733 | mod = (modrm >> 6) & 3; |
7734 | if (mod == 3) |
7735 | goto illegal_op; |
7736 | gen_lea_modrm(env, s, modrm); |
7737 | /* ignore for now */ |
7738 | break; |
7739 | case 0x1aa: /* rsm */ |
7740 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM0x073); |
7741 | if (!(s->flags & HF_SMM_MASK(1 << 19))) |
7742 | goto illegal_op; |
7743 | gen_update_cc_op(s); |
7744 | gen_jmp_im(s->pc - s->cs_base); |
7745 | gen_helper_rsm(cpu_env); |
7746 | gen_eob(s); |
7747 | break; |
7748 | case 0x1b8: /* SSE4.2 popcnt */ |
7749 | if ((prefixes & (PREFIX_REPZ0x01 | PREFIX_LOCK0x04 | PREFIX_REPNZ0x02)) != |
7750 | PREFIX_REPZ0x01) |
7751 | goto illegal_op; |
7752 | if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT(1 << 23))) |
7753 | goto illegal_op; |
7754 | |
7755 | modrm = cpu_ldub_code(env, s->pc++)ldub_p(((void *)((unsigned long)(target_ulong)((s->pc++)) + guest_base))); |
7756 | reg = ((modrm >> 3) & 7) | rex_r; |
7757 | |
7758 | if (s->prefix & PREFIX_DATA0x08) { |
7759 | ot = MO_16; |
7760 | } else { |
7761 | ot = mo_64_32(dflag); |
7762 | } |
7763 | |
7764 | gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); |
7765 | gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot)); |
7766 | gen_op_mov_reg_v(ot, reg, cpu_T[0]); |
7767 | |
7768 | set_cc_op(s, CC_OP_EFLAGS); |
7769 | break; |
7770 | case 0x10e ... 0x10f: |
7771 | /* 3DNow! instructions, ignore prefixes */ |
7772 | s->prefix &= ~(PREFIX_REPZ0x01 | PREFIX_REPNZ0x02 | PREFIX_DATA0x08); |
7773 | case 0x110 ... 0x117: |
7774 | case 0x128 ... 0x12f: |
7775 | case 0x138 ... 0x13a: |
7776 | case 0x150 ... 0x179: |
7777 | case 0x17c ... 0x17f: |
7778 | case 0x1c2: |
7779 | case 0x1c4 ... 0x1c6: |
7780 | case 0x1d0 ... 0x1fe: |
7781 | gen_sse(env, s, b, pc_start, rex_r); |
7782 | break; |
7783 | default: |
7784 | goto illegal_op; |
7785 | } |
7786 | /* lock generation */ |
7787 | if (s->prefix & PREFIX_LOCK0x04) |
7788 | gen_helper_unlock(); |
7789 | return s->pc; |
7790 | illegal_op: |
7791 | if (s->prefix & PREFIX_LOCK0x04) |
7792 | gen_helper_unlock(); |
7793 | /* XXX: ensure that no lock was generated */ |
7794 | gen_exception(s, EXCP06_ILLOP6, pc_start - s->cs_base); |
7795 | return s->pc; |
7796 | } |
7797 | |
7798 | void optimize_flags_init(void) |
7799 | { |
7800 | static const char reg_names[CPU_NB_REGS8][4] = { |
7801 | #ifdef TARGET_X86_64 |
7802 | [R_EAX0] = "rax", |
7803 | [R_EBX3] = "rbx", |
7804 | [R_ECX1] = "rcx", |
7805 | [R_EDX2] = "rdx", |
7806 | [R_ESI6] = "rsi", |
7807 | [R_EDI7] = "rdi", |
7808 | [R_EBP5] = "rbp", |
7809 | [R_ESP4] = "rsp", |
7810 | [8] = "r8", |
7811 | [9] = "r9", |
7812 | [10] = "r10", |
7813 | [11] = "r11", |
7814 | [12] = "r12", |
7815 | [13] = "r13", |
7816 | [14] = "r14", |
7817 | [15] = "r15", |
7818 | #else |
7819 | [R_EAX0] = "eax", |
7820 | [R_EBX3] = "ebx", |
7821 | [R_ECX1] = "ecx", |
7822 | [R_EDX2] = "edx", |
7823 | [R_ESI6] = "esi", |
7824 | [R_EDI7] = "edi", |
7825 | [R_EBP5] = "ebp", |
7826 | [R_ESP4] = "esp", |
7827 | #endif |
7828 | }; |
7829 | int i; |
7830 | |
7831 | cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env")__extension__ ({ TCGv_ptr make_tcgv_tmp = {((tcg_global_reg_new_i64 ((TCG_REG_R14), ("env"))).i64)}; make_tcgv_tmp; }); |
7832 | cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
7833 | offsetof(CPUX86State, cc_op)__builtin_offsetof(CPUX86State, cc_op), "cc_op"); |
7834 | cpu_cc_dst = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, offsetof(CPUX86State, cc_dst)__builtin_offsetof(CPUX86State, cc_dst), |
7835 | "cc_dst"); |
7836 | cpu_cc_src = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, offsetof(CPUX86State, cc_src)__builtin_offsetof(CPUX86State, cc_src), |
7837 | "cc_src"); |
7838 | cpu_cc_src2 = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, offsetof(CPUX86State, cc_src2)__builtin_offsetof(CPUX86State, cc_src2), |
7839 | "cc_src2"); |
7840 | |
7841 | for (i = 0; i < CPU_NB_REGS8; ++i) { |
7842 | cpu_regs[i] = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
7843 | offsetof(CPUX86State, regs[i])__builtin_offsetof(CPUX86State, regs[i]), |
7844 | reg_names[i]); |
7845 | } |
7846 | } |
7847 | |
7848 | /* generate intermediate code in gen_opc_buf and gen_opparam_buf for |
7849 | basic block 'tb'. If search_pc is TRUE, also generate PC |
7850 | information for each intermediate instruction. */ |
7851 | static inline void gen_intermediate_code_internal(X86CPU *cpu, |
7852 | TranslationBlock *tb, |
7853 | bool_Bool search_pc) |
7854 | { |
7855 | CPUState *cs = CPU(cpu)((CPUState *)object_dynamic_cast_assert(((Object *)((cpu))), ( "cpu"), "/home/stefan/src/qemu/qemu.org/qemu/target-i386/translate.c" , 7855, __func__)); |
7856 | CPUX86State *env = &cpu->env; |
7857 | DisasContext dc1, *dc = &dc1; |
7858 | target_ulong pc_ptr; |
7859 | uint16_t *gen_opc_end; |
7860 | CPUBreakpoint *bp; |
7861 | int j, lj; |
7862 | uint64_t flags; |
7863 | target_ulong pc_start; |
7864 | target_ulong cs_base; |
7865 | int num_insns; |
7866 | int max_insns; |
7867 | |
7868 | /* generate intermediate code */ |
7869 | pc_start = tb->pc; |
7870 | cs_base = tb->cs_base; |
7871 | flags = tb->flags; |
7872 | |
7873 | dc->pe = (flags >> HF_PE_SHIFT7) & 1; |
7874 | dc->code32 = (flags >> HF_CS32_SHIFT4) & 1; |
7875 | dc->ss32 = (flags >> HF_SS32_SHIFT5) & 1; |
7876 | dc->addseg = (flags >> HF_ADDSEG_SHIFT6) & 1; |
7877 | dc->f_st = 0; |
7878 | dc->vm86 = (flags >> VM_SHIFT17) & 1; |
7879 | dc->cpl = (flags >> HF_CPL_SHIFT0) & 3; |
7880 | dc->iopl = (flags >> IOPL_SHIFT12) & 3; |
7881 | dc->tf = (flags >> TF_SHIFT8) & 1; |
7882 | dc->singlestep_enabled = cs->singlestep_enabled; |
7883 | dc->cc_op = CC_OP_DYNAMIC; |
7884 | dc->cc_op_dirty = false0; |
7885 | dc->cs_base = cs_base; |
7886 | dc->tb = tb; |
7887 | dc->popl_esp_hack = 0; |
7888 | /* select memory access functions */ |
7889 | dc->mem_index = 0; |
7890 | if (flags & HF_SOFTMMU_MASK(1 << 2)) { |
7891 | dc->mem_index = cpu_mmu_index(env); |
7892 | } |
7893 | dc->cpuid_features = env->features[FEAT_1_EDX]; |
7894 | dc->cpuid_ext_features = env->features[FEAT_1_ECX]; |
7895 | dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX]; |
7896 | dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX]; |
7897 | dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX]; |
7898 | #ifdef TARGET_X86_64 |
7899 | dc->lma = (flags >> HF_LMA_SHIFT14) & 1; |
7900 | dc->code64 = (flags >> HF_CS64_SHIFT15) & 1; |
7901 | #endif |
7902 | dc->flags = flags; |
7903 | dc->jmp_opt = !(dc->tf || cs->singlestep_enabled || |
7904 | (flags & HF_INHIBIT_IRQ_MASK(1 << 3)) |
7905 | #ifndef CONFIG_SOFTMMU |
7906 | || (flags & HF_SOFTMMU_MASK(1 << 2)) |
7907 | #endif |
7908 | ); |
7909 | #if 0 |
7910 | /* check addseg logic */ |
7911 | if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32)) |
7912 | printf("ERROR addseg\n"); |
7913 | #endif |
7914 | |
7915 | cpu_T[0] = tcg_temp_new()tcg_temp_new_i32(); |
7916 | cpu_T[1] = tcg_temp_new()tcg_temp_new_i32(); |
7917 | cpu_A0 = tcg_temp_new()tcg_temp_new_i32(); |
7918 | |
7919 | cpu_tmp0 = tcg_temp_new()tcg_temp_new_i32(); |
7920 | cpu_tmp1_i64 = tcg_temp_new_i64(); |
7921 | cpu_tmp2_i32 = tcg_temp_new_i32(); |
7922 | cpu_tmp3_i32 = tcg_temp_new_i32(); |
7923 | cpu_tmp4 = tcg_temp_new()tcg_temp_new_i32(); |
7924 | cpu_ptr0 = tcg_temp_new_ptr()__extension__ ({ TCGv_ptr make_tcgv_tmp = {((tcg_temp_new_i64 ()).i64)}; make_tcgv_tmp; }); |
7925 | cpu_ptr1 = tcg_temp_new_ptr()__extension__ ({ TCGv_ptr make_tcgv_tmp = {((tcg_temp_new_i64 ()).i64)}; make_tcgv_tmp; }); |
7926 | cpu_cc_srcT = tcg_temp_local_new()tcg_temp_local_new_i32(); |
7927 | |
7928 | gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE(640 - 208); |
7929 | |
7930 | dc->is_jmp = DISAS_NEXT0; |
7931 | pc_ptr = pc_start; |
7932 | lj = -1; |
7933 | num_insns = 0; |
7934 | max_insns = tb->cflags & CF_COUNT_MASK0x7fff; |
7935 | if (max_insns == 0) |
7936 | max_insns = CF_COUNT_MASK0x7fff; |
7937 | |
7938 | gen_tb_start(); |
7939 | for(;;) { |
7940 | if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))__builtin_expect(!!(!((&env->breakpoints)->tqh_first == ((void*)0))), 0)) { |
7941 | QTAILQ_FOREACH(bp, &env->breakpoints, entry)for ((bp) = ((&env->breakpoints)->tqh_first); (bp); (bp) = ((bp)->entry.tqe_next)) { |
7942 | if (bp->pc == pc_ptr && |
7943 | !((bp->flags & BP_CPU0x20) && (tb->flags & HF_RF_MASK(1 << 16)))) { |
7944 | gen_debug(dc, pc_ptr - dc->cs_base); |
7945 | break; |
7946 | } |
7947 | } |
7948 | } |
7949 | if (search_pc) { |
7950 | j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; |
7951 | if (lj < j) { |
7952 | lj++; |
7953 | while (lj < j) |
7954 | tcg_ctx.gen_opc_instr_start[lj++] = 0; |
7955 | } |
7956 | tcg_ctx.gen_opc_pc[lj] = pc_ptr; |
7957 | gen_opc_cc_op[lj] = dc->cc_op; |
7958 | tcg_ctx.gen_opc_instr_start[lj] = 1; |
7959 | tcg_ctx.gen_opc_icount[lj] = num_insns; |
7960 | } |
7961 | if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO0x8000)) |
7962 | gen_io_start(); |
7963 | |
7964 | pc_ptr = disas_insn(env, dc, pc_ptr); |
7965 | num_insns++; |
7966 | /* stop translation if indicated */ |
7967 | if (dc->is_jmp) |
7968 | break; |
7969 | /* if single step mode, we generate only one instruction and |
7970 | generate an exception */ |
7971 | /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear |
7972 | the flag and abort the translation to give the irqs a |
7973 | change to be happen */ |
7974 | if (dc->tf || dc->singlestep_enabled || |
7975 | (flags & HF_INHIBIT_IRQ_MASK(1 << 3))) { |
7976 | gen_jmp_im(pc_ptr - dc->cs_base); |
7977 | gen_eob(dc); |
7978 | break; |
7979 | } |
7980 | /* if too long translation, stop generation too */ |
7981 | if (tcg_ctx.gen_opc_ptr >= gen_opc_end || |
7982 | (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE(1 << 12) - 32) || |
7983 | num_insns >= max_insns) { |
7984 | gen_jmp_im(pc_ptr - dc->cs_base); |
7985 | gen_eob(dc); |
7986 | break; |
7987 | } |
7988 | if (singlestep) { |
7989 | gen_jmp_im(pc_ptr - dc->cs_base); |
7990 | gen_eob(dc); |
7991 | break; |
7992 | } |
7993 | } |
7994 | if (tb->cflags & CF_LAST_IO0x8000) |
7995 | gen_io_end(); |
7996 | gen_tb_end(tb, num_insns); |
7997 | *tcg_ctx.gen_opc_ptr = INDEX_op_end; |
7998 | /* we don't forget to fill the last values */ |
7999 | if (search_pc) { |
8000 | j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; |
8001 | lj++; |
8002 | while (lj <= j) |
8003 | tcg_ctx.gen_opc_instr_start[lj++] = 0; |
8004 | } |
8005 | |
8006 | #ifdef DEBUG_DISAS |
8007 | if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM(1 << 1))) { |
8008 | int disas_flags; |
8009 | qemu_log("----------------\n"); |
8010 | qemu_log("IN: %s\n", lookup_symbol(pc_start)); |
8011 | #ifdef TARGET_X86_64 |
8012 | if (dc->code64) |
8013 | disas_flags = 2; |
8014 | else |
8015 | #endif |
8016 | disas_flags = !dc->code32; |
8017 | log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags); |
8018 | qemu_log("\n"); |
8019 | } |
8020 | #endif |
8021 | |
8022 | if (!search_pc) { |
8023 | tb->size = pc_ptr - pc_start; |
8024 | tb->icount = num_insns; |
8025 | } |
8026 | } |
8027 | |
8028 | void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb) |
8029 | { |
8030 | gen_intermediate_code_internal(x86_env_get_cpu(env), tb, false0); |
8031 | } |
8032 | |
8033 | void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb) |
8034 | { |
8035 | gen_intermediate_code_internal(x86_env_get_cpu(env), tb, true1); |
8036 | } |
8037 | |
8038 | void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos) |
8039 | { |
8040 | int cc_op; |
8041 | #ifdef DEBUG_DISAS |
8042 | if (qemu_loglevel_mask(CPU_LOG_TB_OP(1 << 2))) { |
8043 | int i; |
8044 | qemu_log("RESTORE:\n"); |
8045 | for(i = 0;i <= pc_pos; i++) { |
8046 | if (tcg_ctx.gen_opc_instr_start[i]) { |
8047 | qemu_log("0x%04x: " TARGET_FMT_lx"%08x" "\n", i, |
8048 | tcg_ctx.gen_opc_pc[i]); |
8049 | } |
8050 | } |
8051 | qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx"%08x" " cs_base=%x\n", |
8052 | pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base, |
8053 | (uint32_t)tb->cs_base); |
8054 | } |
8055 | #endif |
8056 | env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base; |
8057 | cc_op = gen_opc_cc_op[pc_pos]; |
8058 | if (cc_op != CC_OP_DYNAMIC) |
8059 | env->cc_op = cc_op; |
8060 | } |