File: | target-m68k/translate.c |
Location: | line 660, column 20 |
Description: | Pass-by-value argument in function call is undefined |
1 | /* | ||
2 | * m68k translation | ||
3 | * | ||
4 | * Copyright (c) 2005-2007 CodeSourcery | ||
5 | * Written by Paul Brook | ||
6 | * | ||
7 | * This library is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU Lesser General Public | ||
9 | * License as published by the Free Software Foundation; either | ||
10 | * version 2 of the License, or (at your option) any later version. | ||
11 | * | ||
12 | * This library is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU Lesser General Public | ||
18 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include "cpu.h" | ||
22 | #include "disas.h" | ||
23 | #include "tcg-op.h" | ||
24 | #include "qemu-log.h" | ||
25 | |||
26 | #include "helpers.h" | ||
27 | #define GEN_HELPER 1 | ||
28 | #include "helpers.h" | ||
29 | |||
30 | //#define DEBUG_DISPATCH 1 | ||
31 | |||
32 | /* Fake floating point. */ | ||
33 | #define tcg_gen_mov_f64tcg_gen_mov_i64 tcg_gen_mov_i64 | ||
34 | #define tcg_gen_qemu_ldf64tcg_gen_qemu_ld64 tcg_gen_qemu_ld64 | ||
35 | #define tcg_gen_qemu_stf64tcg_gen_qemu_st64 tcg_gen_qemu_st64 | ||
36 | |||
37 | #define DEFO32(name, offset) static TCGvTCGv_i32 QREG_##name; | ||
38 | #define DEFO64(name, offset) static TCGv_i64 QREG_##name; | ||
39 | #define DEFF64(name, offset) static TCGv_i64 QREG_##name; | ||
40 | #include "qregs.def" | ||
41 | #undef DEFO32 | ||
42 | #undef DEFO64 | ||
43 | #undef DEFF64 | ||
44 | |||
45 | static TCGv_ptr cpu_env; | ||
46 | |||
47 | static char cpu_reg_names[3*8*3 + 5*4]; | ||
48 | static TCGvTCGv_i32 cpu_dregs[8]; | ||
49 | static TCGvTCGv_i32 cpu_aregs[8]; | ||
50 | static TCGv_i64 cpu_fregs[8]; | ||
51 | static TCGv_i64 cpu_macc[4]; | ||
52 | |||
53 | #define DREG(insn, pos)cpu_dregs[((insn) >> (pos)) & 7] cpu_dregs[((insn) >> (pos)) & 7] | ||
54 | #define AREG(insn, pos)cpu_aregs[((insn) >> (pos)) & 7] cpu_aregs[((insn) >> (pos)) & 7] | ||
55 | #define FREG(insn, pos)cpu_fregs[((insn) >> (pos)) & 7] cpu_fregs[((insn) >> (pos)) & 7] | ||
56 | #define MACREG(acc)cpu_macc[acc] cpu_macc[acc] | ||
57 | #define QREG_SPcpu_aregs[7] cpu_aregs[7] | ||
58 | |||
59 | static TCGvTCGv_i32 NULL_QREG; | ||
60 | #define IS_NULL_QREG(t)((((t).i32) == ((NULL_QREG).i32))) (TCGV_EQUAL(t, NULL_QREG)(((t).i32) == ((NULL_QREG).i32))) | ||
61 | /* Used to distinguish stores from bad addressing modes. */ | ||
62 | static TCGvTCGv_i32 store_dummy; | ||
63 | |||
64 | #include "gen-icount.h" | ||
65 | |||
66 | void m68k_tcg_init(void) | ||
67 | { | ||
68 | char *p; | ||
69 | int i; | ||
70 | |||
71 | #define DEFO32(name, offset) QREG_##name = tcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, offsetof(CPUM68KState, offset)__builtin_offsetof(CPUM68KState, offset), #name); | ||
72 | #define DEFO64(name, offset) QREG_##name = tcg_global_mem_new_i64(TCG_AREG0TCG_REG_R14, offsetof(CPUM68KState, offset)__builtin_offsetof(CPUM68KState, offset), #name); | ||
73 | #define DEFF64(name, offset) DEFO64(name, offset) | ||
74 | #include "qregs.def" | ||
75 | #undef DEFO32 | ||
76 | #undef DEFO64 | ||
77 | #undef DEFF64 | ||
78 | |||
79 | cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env")__extension__ ({ TCGv_ptr make_tcgv_tmp = {((tcg_global_reg_new_i64 ((TCG_REG_R14), ("env"))).i64)}; make_tcgv_tmp; }); | ||
80 | |||
81 | p = cpu_reg_names; | ||
82 | for (i = 0; i < 8; i++) { | ||
83 | sprintf(p, "D%d", i); | ||
84 | cpu_dregs[i] = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, | ||
85 | offsetof(CPUM68KState, dregs[i])__builtin_offsetof(CPUM68KState, dregs[i]), p); | ||
86 | p += 3; | ||
87 | sprintf(p, "A%d", i); | ||
88 | cpu_aregs[i] = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, | ||
89 | offsetof(CPUM68KState, aregs[i])__builtin_offsetof(CPUM68KState, aregs[i]), p); | ||
90 | p += 3; | ||
91 | sprintf(p, "F%d", i); | ||
92 | cpu_fregs[i] = tcg_global_mem_new_i64(TCG_AREG0TCG_REG_R14, | ||
93 | offsetof(CPUM68KState, fregs[i])__builtin_offsetof(CPUM68KState, fregs[i]), p); | ||
94 | p += 3; | ||
95 | } | ||
96 | for (i = 0; i < 4; i++) { | ||
97 | sprintf(p, "ACC%d", i); | ||
98 | cpu_macc[i] = tcg_global_mem_new_i64(TCG_AREG0TCG_REG_R14, | ||
99 | offsetof(CPUM68KState, macc[i])__builtin_offsetof(CPUM68KState, macc[i]), p); | ||
100 | p += 5; | ||
101 | } | ||
102 | |||
103 | NULL_QREG = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, -4, "NULL"); | ||
104 | store_dummy = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, -8, "NULL"); | ||
105 | |||
106 | #define GEN_HELPER 2 | ||
107 | #include "helpers.h" | ||
108 | } | ||
109 | |||
110 | static inline void qemu_assert(int cond, const char *msg) | ||
111 | { | ||
112 | if (!cond) { | ||
113 | fprintf (stderrstderr, "badness: %s\n", msg); | ||
114 | abort(); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | /* internal defines */ | ||
119 | typedef struct DisasContext { | ||
120 | CPUM68KState *env; | ||
121 | target_ulong insn_pc; /* Start of the current instruction. */ | ||
122 | target_ulong pc; | ||
123 | int is_jmp; | ||
124 | int cc_op; | ||
125 | int user; | ||
126 | uint32_t fpcr; | ||
127 | struct TranslationBlock *tb; | ||
128 | int singlestep_enabled; | ||
129 | int is_mem; | ||
130 | TCGv_i64 mactmp; | ||
131 | int done_mac; | ||
132 | } DisasContext; | ||
133 | |||
134 | #define DISAS_JUMP_NEXT4 4 | ||
135 | |||
136 | #if defined(CONFIG_USER_ONLY) | ||
137 | #define IS_USER(s)s->user 1 | ||
138 | #else | ||
139 | #define IS_USER(s)s->user s->user | ||
140 | #endif | ||
141 | |||
142 | /* XXX: move that elsewhere */ | ||
143 | /* ??? Fix exceptions. */ | ||
144 | static void *gen_throws_exception; | ||
145 | #define gen_last_qop((void*)0) NULL((void*)0) | ||
146 | |||
147 | #define OS_BYTE0 0 | ||
148 | #define OS_WORD1 1 | ||
149 | #define OS_LONG2 2 | ||
150 | #define OS_SINGLE4 4 | ||
151 | #define OS_DOUBLE5 5 | ||
152 | |||
153 | typedef void (*disas_proc)(DisasContext *, uint16_t); | ||
154 | |||
155 | #ifdef DEBUG_DISPATCH | ||
156 | #define DISAS_INSN(name)static void disas_name (DisasContext *s, uint16_t insn) \ | ||
157 | static void real_disas_##name (DisasContext *s, uint16_t insn); \ | ||
158 | static void disas_##name (DisasContext *s, uint16_t insn) { \ | ||
159 | qemu_log("Dispatch " #name "\n"); \ | ||
160 | real_disas_##name(s, insn); } \ | ||
161 | static void real_disas_##name (DisasContext *s, uint16_t insn) | ||
162 | #else | ||
163 | #define DISAS_INSN(name)static void disas_name (DisasContext *s, uint16_t insn) \ | ||
164 | static void disas_##name (DisasContext *s, uint16_t insn) | ||
165 | #endif | ||
166 | |||
167 | /* Generate a load from the specified address. Narrow values are | ||
168 | sign extended to full register width. */ | ||
169 | static inline TCGvTCGv_i32 gen_load(DisasContext * s, int opsize, TCGvTCGv_i32 addr, int sign) | ||
170 | { | ||
171 | TCGvTCGv_i32 tmp; | ||
172 | int index = IS_USER(s)s->user; | ||
173 | s->is_mem = 1; | ||
174 | tmp = tcg_temp_new_i32(); | ||
175 | switch(opsize) { | ||
176 | case OS_BYTE0: | ||
177 | if (sign) | ||
178 | tcg_gen_qemu_ld8s(tmp, addr, index); | ||
179 | else | ||
180 | tcg_gen_qemu_ld8u(tmp, addr, index); | ||
181 | break; | ||
182 | case OS_WORD1: | ||
183 | if (sign) | ||
184 | tcg_gen_qemu_ld16s(tmp, addr, index); | ||
185 | else | ||
186 | tcg_gen_qemu_ld16u(tmp, addr, index); | ||
187 | break; | ||
188 | case OS_LONG2: | ||
189 | case OS_SINGLE4: | ||
190 | tcg_gen_qemu_ld32u(tmp, addr, index); | ||
191 | break; | ||
192 | default: | ||
193 | qemu_assert(0, "bad load size"); | ||
194 | } | ||
195 | gen_throws_exception = gen_last_qop((void*)0); | ||
196 | return tmp; | ||
197 | } | ||
198 | |||
199 | static inline TCGv_i64 gen_load64(DisasContext * s, TCGvTCGv_i32 addr) | ||
200 | { | ||
201 | TCGv_i64 tmp; | ||
202 | int index = IS_USER(s)s->user; | ||
203 | s->is_mem = 1; | ||
204 | tmp = tcg_temp_new_i64(); | ||
205 | tcg_gen_qemu_ldf64tcg_gen_qemu_ld64(tmp, addr, index); | ||
206 | gen_throws_exception = gen_last_qop((void*)0); | ||
207 | return tmp; | ||
208 | } | ||
209 | |||
210 | /* Generate a store. */ | ||
211 | static inline void gen_store(DisasContext *s, int opsize, TCGvTCGv_i32 addr, TCGvTCGv_i32 val) | ||
212 | { | ||
213 | int index = IS_USER(s)s->user; | ||
214 | s->is_mem = 1; | ||
215 | switch(opsize) { | ||
216 | case OS_BYTE0: | ||
217 | tcg_gen_qemu_st8(val, addr, index); | ||
218 | break; | ||
219 | case OS_WORD1: | ||
220 | tcg_gen_qemu_st16(val, addr, index); | ||
221 | break; | ||
222 | case OS_LONG2: | ||
223 | case OS_SINGLE4: | ||
224 | tcg_gen_qemu_st32(val, addr, index); | ||
225 | break; | ||
226 | default: | ||
227 | qemu_assert(0, "bad store size"); | ||
228 | } | ||
229 | gen_throws_exception = gen_last_qop((void*)0); | ||
230 | } | ||
231 | |||
232 | static inline void gen_store64(DisasContext *s, TCGvTCGv_i32 addr, TCGv_i64 val) | ||
233 | { | ||
234 | int index = IS_USER(s)s->user; | ||
235 | s->is_mem = 1; | ||
236 | tcg_gen_qemu_stf64tcg_gen_qemu_st64(val, addr, index); | ||
237 | gen_throws_exception = gen_last_qop((void*)0); | ||
238 | } | ||
239 | |||
240 | typedef enum { | ||
241 | EA_STORE, | ||
242 | EA_LOADU, | ||
243 | EA_LOADS | ||
244 | } ea_what; | ||
245 | |||
246 | /* Generate an unsigned load if VAL is 0 a signed load if val is -1, | ||
247 | otherwise generate a store. */ | ||
248 | static TCGvTCGv_i32 gen_ldst(DisasContext *s, int opsize, TCGvTCGv_i32 addr, TCGvTCGv_i32 val, | ||
249 | ea_what what) | ||
250 | { | ||
251 | if (what == EA_STORE) { | ||
252 | gen_store(s, opsize, addr, val); | ||
253 | return store_dummy; | ||
254 | } else { | ||
255 | return gen_load(s, opsize, addr, what == EA_LOADS); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | /* Read a 32-bit immediate constant. */ | ||
260 | static inline uint32_t read_im32(DisasContext *s) | ||
261 | { | ||
262 | uint32_t im; | ||
263 | im = ((uint32_t)lduw_code(s->pc)) << 16; | ||
264 | s->pc += 2; | ||
265 | im |= lduw_code(s->pc); | ||
266 | s->pc += 2; | ||
267 | return im; | ||
268 | } | ||
269 | |||
270 | /* Calculate and address index. */ | ||
271 | static TCGvTCGv_i32 gen_addr_index(uint16_t ext, TCGvTCGv_i32 tmp) | ||
272 | { | ||
273 | TCGvTCGv_i32 add; | ||
274 | int scale; | ||
275 | |||
276 | add = (ext & 0x8000) ? AREG(ext, 12)cpu_aregs[((ext) >> (12)) & 7] : DREG(ext, 12)cpu_dregs[((ext) >> (12)) & 7]; | ||
277 | if ((ext & 0x800) == 0) { | ||
278 | tcg_gen_ext16s_i32(tmp, add); | ||
279 | add = tmp; | ||
280 | } | ||
281 | scale = (ext >> 9) & 3; | ||
282 | if (scale != 0) { | ||
283 | tcg_gen_shli_i32(tmp, add, scale); | ||
284 | add = tmp; | ||
285 | } | ||
286 | return add; | ||
287 | } | ||
288 | |||
289 | /* Handle a base + index + displacement effective addresss. | ||
290 | A NULL_QREG base means pc-relative. */ | ||
291 | static TCGvTCGv_i32 gen_lea_indexed(DisasContext *s, int opsize, TCGvTCGv_i32 base) | ||
292 | { | ||
293 | uint32_t offset; | ||
294 | uint16_t ext; | ||
295 | TCGvTCGv_i32 add; | ||
296 | TCGvTCGv_i32 tmp; | ||
297 | uint32_t bd, od; | ||
298 | |||
299 | offset = s->pc; | ||
300 | ext = lduw_code(s->pc); | ||
301 | s->pc += 2; | ||
302 | |||
303 | if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX)) | ||
304 | return NULL_QREG; | ||
305 | |||
306 | if (ext & 0x100) { | ||
307 | /* full extension word format */ | ||
308 | if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) | ||
309 | return NULL_QREG; | ||
310 | |||
311 | if ((ext & 0x30) > 0x10) { | ||
312 | /* base displacement */ | ||
313 | if ((ext & 0x30) == 0x20) { | ||
314 | bd = (int16_t)lduw_code(s->pc); | ||
315 | s->pc += 2; | ||
316 | } else { | ||
317 | bd = read_im32(s); | ||
318 | } | ||
319 | } else { | ||
320 | bd = 0; | ||
321 | } | ||
322 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
323 | if ((ext & 0x44) == 0) { | ||
324 | /* pre-index */ | ||
325 | add = gen_addr_index(ext, tmp); | ||
326 | } else { | ||
327 | add = NULL_QREG; | ||
328 | } | ||
329 | if ((ext & 0x80) == 0) { | ||
330 | /* base not suppressed */ | ||
331 | if (IS_NULL_QREG(base)((((base).i32) == ((NULL_QREG).i32)))) { | ||
332 | base = tcg_const_i32(offset + bd); | ||
333 | bd = 0; | ||
334 | } | ||
335 | if (!IS_NULL_QREG(add)((((add).i32) == ((NULL_QREG).i32)))) { | ||
336 | tcg_gen_add_i32(tmp, add, base); | ||
337 | add = tmp; | ||
338 | } else { | ||
339 | add = base; | ||
340 | } | ||
341 | } | ||
342 | if (!IS_NULL_QREG(add)((((add).i32) == ((NULL_QREG).i32)))) { | ||
343 | if (bd != 0) { | ||
344 | tcg_gen_addi_i32(tmp, add, bd); | ||
345 | add = tmp; | ||
346 | } | ||
347 | } else { | ||
348 | add = tcg_const_i32(bd); | ||
349 | } | ||
350 | if ((ext & 3) != 0) { | ||
351 | /* memory indirect */ | ||
352 | base = gen_load(s, OS_LONG2, add, 0); | ||
353 | if ((ext & 0x44) == 4) { | ||
354 | add = gen_addr_index(ext, tmp); | ||
355 | tcg_gen_add_i32(tmp, add, base); | ||
356 | add = tmp; | ||
357 | } else { | ||
358 | add = base; | ||
359 | } | ||
360 | if ((ext & 3) > 1) { | ||
361 | /* outer displacement */ | ||
362 | if ((ext & 3) == 2) { | ||
363 | od = (int16_t)lduw_code(s->pc); | ||
364 | s->pc += 2; | ||
365 | } else { | ||
366 | od = read_im32(s); | ||
367 | } | ||
368 | } else { | ||
369 | od = 0; | ||
370 | } | ||
371 | if (od != 0) { | ||
372 | tcg_gen_addi_i32(tmp, add, od); | ||
373 | add = tmp; | ||
374 | } | ||
375 | } | ||
376 | } else { | ||
377 | /* brief extension word format */ | ||
378 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
379 | add = gen_addr_index(ext, tmp); | ||
380 | if (!IS_NULL_QREG(base)((((base).i32) == ((NULL_QREG).i32)))) { | ||
381 | tcg_gen_add_i32(tmp, add, base); | ||
382 | if ((int8_t)ext) | ||
383 | tcg_gen_addi_i32(tmp, tmp, (int8_t)ext); | ||
384 | } else { | ||
385 | tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext); | ||
386 | } | ||
387 | add = tmp; | ||
388 | } | ||
389 | return add; | ||
390 | } | ||
391 | |||
392 | /* Update the CPU env CC_OP state. */ | ||
393 | static inline void gen_flush_cc_op(DisasContext *s) | ||
394 | { | ||
395 | if (s->cc_op != CC_OP_DYNAMIC) | ||
396 | tcg_gen_movi_i32(QREG_CC_OP, s->cc_op); | ||
397 | } | ||
398 | |||
399 | /* Evaluate all the CC flags. */ | ||
400 | static inline void gen_flush_flags(DisasContext *s) | ||
401 | { | ||
402 | if (s->cc_op == CC_OP_FLAGS) | ||
403 | return; | ||
404 | gen_flush_cc_op(s); | ||
405 | gen_helper_flush_flags(cpu_env, QREG_CC_OP); | ||
406 | s->cc_op = CC_OP_FLAGS; | ||
407 | } | ||
408 | |||
409 | static void gen_logic_cc(DisasContext *s, TCGvTCGv_i32 val) | ||
410 | { | ||
411 | tcg_gen_mov_i32(QREG_CC_DEST, val); | ||
412 | s->cc_op = CC_OP_LOGIC; | ||
413 | } | ||
414 | |||
415 | static void gen_update_cc_add(TCGvTCGv_i32 dest, TCGvTCGv_i32 src) | ||
416 | { | ||
417 | tcg_gen_mov_i32(QREG_CC_DEST, dest); | ||
418 | tcg_gen_mov_i32(QREG_CC_SRC, src); | ||
419 | } | ||
420 | |||
421 | static inline int opsize_bytes(int opsize) | ||
422 | { | ||
423 | switch (opsize) { | ||
424 | case OS_BYTE0: return 1; | ||
425 | case OS_WORD1: return 2; | ||
426 | case OS_LONG2: return 4; | ||
427 | case OS_SINGLE4: return 4; | ||
428 | case OS_DOUBLE5: return 8; | ||
429 | default: | ||
430 | qemu_assert(0, "bad operand size"); | ||
431 | return 0; | ||
432 | } | ||
433 | } | ||
434 | |||
435 | /* Assign value to a register. If the width is less than the register width | ||
436 | only the low part of the register is set. */ | ||
437 | static void gen_partset_reg(int opsize, TCGvTCGv_i32 reg, TCGvTCGv_i32 val) | ||
438 | { | ||
439 | TCGvTCGv_i32 tmp; | ||
440 | switch (opsize) { | ||
441 | case OS_BYTE0: | ||
442 | tcg_gen_andi_i32(reg, reg, 0xffffff00); | ||
443 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
444 | tcg_gen_ext8u_i32(tmp, val); | ||
445 | tcg_gen_or_i32(reg, reg, tmp); | ||
446 | break; | ||
447 | case OS_WORD1: | ||
448 | tcg_gen_andi_i32(reg, reg, 0xffff0000); | ||
449 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
450 | tcg_gen_ext16u_i32(tmp, val); | ||
451 | tcg_gen_or_i32(reg, reg, tmp); | ||
452 | break; | ||
453 | case OS_LONG2: | ||
454 | case OS_SINGLE4: | ||
455 | tcg_gen_mov_i32(reg, val); | ||
456 | break; | ||
457 | default: | ||
458 | qemu_assert(0, "Bad operand size"); | ||
459 | break; | ||
460 | } | ||
461 | } | ||
462 | |||
463 | /* Sign or zero extend a value. */ | ||
464 | static inline TCGvTCGv_i32 gen_extend(TCGvTCGv_i32 val, int opsize, int sign) | ||
465 | { | ||
466 | TCGvTCGv_i32 tmp; | ||
467 | |||
468 | switch (opsize) { | ||
469 | case OS_BYTE0: | ||
470 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
471 | if (sign) | ||
472 | tcg_gen_ext8s_i32(tmp, val); | ||
473 | else | ||
474 | tcg_gen_ext8u_i32(tmp, val); | ||
475 | break; | ||
476 | case OS_WORD1: | ||
477 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
478 | if (sign) | ||
479 | tcg_gen_ext16s_i32(tmp, val); | ||
480 | else | ||
481 | tcg_gen_ext16u_i32(tmp, val); | ||
482 | break; | ||
483 | case OS_LONG2: | ||
484 | case OS_SINGLE4: | ||
485 | tmp = val; | ||
486 | break; | ||
487 | default: | ||
488 | qemu_assert(0, "Bad operand size"); | ||
489 | } | ||
490 | return tmp; | ||
491 | } | ||
492 | |||
493 | /* Generate code for an "effective address". Does not adjust the base | ||
494 | register for autoincrement addressing modes. */ | ||
495 | static TCGvTCGv_i32 gen_lea(DisasContext *s, uint16_t insn, int opsize) | ||
496 | { | ||
497 | TCGvTCGv_i32 reg; | ||
498 | TCGvTCGv_i32 tmp; | ||
499 | uint16_t ext; | ||
500 | uint32_t offset; | ||
501 | |||
502 | switch ((insn >> 3) & 7) { | ||
503 | case 0: /* Data register direct. */ | ||
504 | case 1: /* Address register direct. */ | ||
505 | return NULL_QREG; | ||
506 | case 2: /* Indirect register */ | ||
507 | case 3: /* Indirect postincrement. */ | ||
508 | return AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7]; | ||
509 | case 4: /* Indirect predecrememnt. */ | ||
510 | reg = AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7]; | ||
511 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
512 | tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize)); | ||
513 | return tmp; | ||
514 | case 5: /* Indirect displacement. */ | ||
515 | reg = AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7]; | ||
516 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
517 | ext = lduw_code(s->pc); | ||
518 | s->pc += 2; | ||
519 | tcg_gen_addi_i32(tmp, reg, (int16_t)ext); | ||
520 | return tmp; | ||
521 | case 6: /* Indirect index + displacement. */ | ||
522 | reg = AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7]; | ||
523 | return gen_lea_indexed(s, opsize, reg); | ||
524 | case 7: /* Other */ | ||
525 | switch (insn & 7) { | ||
526 | case 0: /* Absolute short. */ | ||
527 | offset = ldsw_code(s->pc); | ||
528 | s->pc += 2; | ||
529 | return tcg_const_i32(offset); | ||
530 | case 1: /* Absolute long. */ | ||
531 | offset = read_im32(s); | ||
532 | return tcg_const_i32(offset); | ||
533 | case 2: /* pc displacement */ | ||
534 | offset = s->pc; | ||
535 | offset += ldsw_code(s->pc); | ||
536 | s->pc += 2; | ||
537 | return tcg_const_i32(offset); | ||
538 | case 3: /* pc index+displacement. */ | ||
539 | return gen_lea_indexed(s, opsize, NULL_QREG); | ||
540 | case 4: /* Immediate. */ | ||
541 | default: | ||
542 | return NULL_QREG; | ||
543 | } | ||
544 | } | ||
545 | /* Should never happen. */ | ||
546 | return NULL_QREG; | ||
547 | } | ||
548 | |||
549 | /* Helper function for gen_ea. Reuse the computed address between the | ||
550 | for read/write operands. */ | ||
551 | static inline TCGvTCGv_i32 gen_ea_once(DisasContext *s, uint16_t insn, int opsize, | ||
552 | TCGvTCGv_i32 val, TCGvTCGv_i32 *addrp, ea_what what) | ||
553 | { | ||
554 | TCGvTCGv_i32 tmp; | ||
555 | |||
556 | if (addrp && what == EA_STORE) { | ||
557 | tmp = *addrp; | ||
558 | } else { | ||
559 | tmp = gen_lea(s, insn, opsize); | ||
560 | if (IS_NULL_QREG(tmp)((((tmp).i32) == ((NULL_QREG).i32)))) | ||
561 | return tmp; | ||
562 | if (addrp) | ||
563 | *addrp = tmp; | ||
564 | } | ||
565 | return gen_ldst(s, opsize, tmp, val, what); | ||
566 | } | ||
567 | |||
568 | /* Generate code to load/store a value ito/from an EA. If VAL > 0 this is | ||
569 | a write otherwise it is a read (0 == sign extend, -1 == zero extend). | ||
570 | ADDRP is non-null for readwrite operands. */ | ||
571 | static TCGvTCGv_i32 gen_ea(DisasContext *s, uint16_t insn, int opsize, TCGvTCGv_i32 val, | ||
572 | TCGvTCGv_i32 *addrp, ea_what what) | ||
573 | { | ||
574 | TCGvTCGv_i32 reg; | ||
575 | TCGvTCGv_i32 result; | ||
576 | uint32_t offset; | ||
577 | |||
578 | switch ((insn >> 3) & 7) { | ||
| |||
579 | case 0: /* Data register direct. */ | ||
580 | reg = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
581 | if (what == EA_STORE) { | ||
582 | gen_partset_reg(opsize, reg, val); | ||
583 | return store_dummy; | ||
584 | } else { | ||
585 | return gen_extend(reg, opsize, what == EA_LOADS); | ||
586 | } | ||
587 | case 1: /* Address register direct. */ | ||
588 | reg = AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7]; | ||
589 | if (what == EA_STORE) { | ||
590 | tcg_gen_mov_i32(reg, val); | ||
591 | return store_dummy; | ||
592 | } else { | ||
593 | return gen_extend(reg, opsize, what == EA_LOADS); | ||
594 | } | ||
595 | case 2: /* Indirect register */ | ||
596 | reg = AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7]; | ||
597 | return gen_ldst(s, opsize, reg, val, what); | ||
598 | case 3: /* Indirect postincrement. */ | ||
599 | reg = AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7]; | ||
600 | result = gen_ldst(s, opsize, reg, val, what); | ||
601 | /* ??? This is not exception safe. The instruction may still | ||
602 | fault after this point. */ | ||
603 | if (what == EA_STORE || !addrp) | ||
604 | tcg_gen_addi_i32(reg, reg, opsize_bytes(opsize)); | ||
605 | return result; | ||
606 | case 4: /* Indirect predecrememnt. */ | ||
607 | { | ||
608 | TCGvTCGv_i32 tmp; | ||
609 | if (addrp && what == EA_STORE) { | ||
610 | tmp = *addrp; | ||
611 | } else { | ||
612 | tmp = gen_lea(s, insn, opsize); | ||
613 | if (IS_NULL_QREG(tmp)((((tmp).i32) == ((NULL_QREG).i32)))) | ||
614 | return tmp; | ||
615 | if (addrp) | ||
616 | *addrp = tmp; | ||
617 | } | ||
618 | result = gen_ldst(s, opsize, tmp, val, what); | ||
619 | /* ??? This is not exception safe. The instruction may still | ||
620 | fault after this point. */ | ||
621 | if (what == EA_STORE || !addrp) { | ||
622 | reg = AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7]; | ||
623 | tcg_gen_mov_i32(reg, tmp); | ||
624 | } | ||
625 | } | ||
626 | return result; | ||
627 | case 5: /* Indirect displacement. */ | ||
628 | case 6: /* Indirect index + displacement. */ | ||
629 | return gen_ea_once(s, insn, opsize, val, addrp, what); | ||
630 | case 7: /* Other */ | ||
631 | switch (insn & 7) { | ||
| |||
632 | case 0: /* Absolute short. */ | ||
633 | case 1: /* Absolute long. */ | ||
634 | case 2: /* pc displacement */ | ||
635 | case 3: /* pc index+displacement. */ | ||
636 | return gen_ea_once(s, insn, opsize, val, addrp, what); | ||
637 | case 4: /* Immediate. */ | ||
638 | /* Sign extend values for consistency. */ | ||
639 | switch (opsize) { | ||
| |||
640 | case OS_BYTE0: | ||
641 | if (what == EA_LOADS) | ||
642 | offset = ldsb_code(s->pc + 1); | ||
643 | else | ||
644 | offset = ldub_code(s->pc + 1); | ||
645 | s->pc += 2; | ||
646 | break; | ||
647 | case OS_WORD1: | ||
648 | if (what == EA_LOADS) | ||
649 | offset = ldsw_code(s->pc); | ||
650 | else | ||
651 | offset = lduw_code(s->pc); | ||
652 | s->pc += 2; | ||
653 | break; | ||
654 | case OS_LONG2: | ||
655 | offset = read_im32(s); | ||
656 | break; | ||
657 | default: | ||
658 | qemu_assert(0, "Bad immediate operand"); | ||
659 | } | ||
660 | return tcg_const_i32(offset); | ||
| |||
661 | default: | ||
662 | return NULL_QREG; | ||
663 | } | ||
664 | } | ||
665 | /* Should never happen. */ | ||
666 | return NULL_QREG; | ||
667 | } | ||
668 | |||
669 | /* This generates a conditional branch, clobbering all temporaries. */ | ||
670 | static void gen_jmpcc(DisasContext *s, int cond, int l1) | ||
671 | { | ||
672 | TCGvTCGv_i32 tmp; | ||
673 | |||
674 | /* TODO: Optimize compare/branch pairs rather than always flushing | ||
675 | flag state to CC_OP_FLAGS. */ | ||
676 | gen_flush_flags(s); | ||
677 | switch (cond) { | ||
678 | case 0: /* T */ | ||
679 | tcg_gen_br(l1); | ||
680 | break; | ||
681 | case 1: /* F */ | ||
682 | break; | ||
683 | case 2: /* HI (!C && !Z) */ | ||
684 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
685 | tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C0x01 | CCF_Z0x04); | ||
686 | tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1); | ||
687 | break; | ||
688 | case 3: /* LS (C || Z) */ | ||
689 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
690 | tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C0x01 | CCF_Z0x04); | ||
691 | tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1); | ||
692 | break; | ||
693 | case 4: /* CC (!C) */ | ||
694 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
695 | tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C0x01); | ||
696 | tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1); | ||
697 | break; | ||
698 | case 5: /* CS (C) */ | ||
699 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
700 | tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C0x01); | ||
701 | tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1); | ||
702 | break; | ||
703 | case 6: /* NE (!Z) */ | ||
704 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
705 | tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z0x04); | ||
706 | tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1); | ||
707 | break; | ||
708 | case 7: /* EQ (Z) */ | ||
709 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
710 | tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z0x04); | ||
711 | tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1); | ||
712 | break; | ||
713 | case 8: /* VC (!V) */ | ||
714 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
715 | tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V0x02); | ||
716 | tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1); | ||
717 | break; | ||
718 | case 9: /* VS (V) */ | ||
719 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
720 | tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V0x02); | ||
721 | tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1); | ||
722 | break; | ||
723 | case 10: /* PL (!N) */ | ||
724 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
725 | tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N0x08); | ||
726 | tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1); | ||
727 | break; | ||
728 | case 11: /* MI (N) */ | ||
729 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
730 | tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N0x08); | ||
731 | tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1); | ||
732 | break; | ||
733 | case 12: /* GE (!(N ^ V)) */ | ||
734 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
735 | assert(CCF_V == (CCF_N >> 2))((0x02 == (0x08 >> 2)) ? (void) (0) : __assert_fail ("0x02 == (0x08 >> 2)" , "/home/stefan/src/qemu/qemu.org/qemu/target-m68k/translate.c" , 735, __PRETTY_FUNCTION__)); | ||
736 | tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2); | ||
737 | tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST); | ||
738 | tcg_gen_andi_i32(tmp, tmp, CCF_V0x02); | ||
739 | tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1); | ||
740 | break; | ||
741 | case 13: /* LT (N ^ V) */ | ||
742 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
743 | assert(CCF_V == (CCF_N >> 2))((0x02 == (0x08 >> 2)) ? (void) (0) : __assert_fail ("0x02 == (0x08 >> 2)" , "/home/stefan/src/qemu/qemu.org/qemu/target-m68k/translate.c" , 743, __PRETTY_FUNCTION__)); | ||
744 | tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2); | ||
745 | tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST); | ||
746 | tcg_gen_andi_i32(tmp, tmp, CCF_V0x02); | ||
747 | tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1); | ||
748 | break; | ||
749 | case 14: /* GT (!(Z || (N ^ V))) */ | ||
750 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
751 | assert(CCF_V == (CCF_N >> 2))((0x02 == (0x08 >> 2)) ? (void) (0) : __assert_fail ("0x02 == (0x08 >> 2)" , "/home/stefan/src/qemu/qemu.org/qemu/target-m68k/translate.c" , 751, __PRETTY_FUNCTION__)); | ||
752 | tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N0x08); | ||
753 | tcg_gen_shri_i32(tmp, tmp, 2); | ||
754 | tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST); | ||
755 | tcg_gen_andi_i32(tmp, tmp, CCF_V0x02 | CCF_Z0x04); | ||
756 | tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1); | ||
757 | break; | ||
758 | case 15: /* LE (Z || (N ^ V)) */ | ||
759 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
760 | assert(CCF_V == (CCF_N >> 2))((0x02 == (0x08 >> 2)) ? (void) (0) : __assert_fail ("0x02 == (0x08 >> 2)" , "/home/stefan/src/qemu/qemu.org/qemu/target-m68k/translate.c" , 760, __PRETTY_FUNCTION__)); | ||
761 | tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N0x08); | ||
762 | tcg_gen_shri_i32(tmp, tmp, 2); | ||
763 | tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST); | ||
764 | tcg_gen_andi_i32(tmp, tmp, CCF_V0x02 | CCF_Z0x04); | ||
765 | tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1); | ||
766 | break; | ||
767 | default: | ||
768 | /* Should ever happen. */ | ||
769 | abort(); | ||
770 | } | ||
771 | } | ||
772 | |||
773 | DISAS_INSN(scc)static void disas_scc (DisasContext *s, uint16_t insn) | ||
774 | { | ||
775 | int l1; | ||
776 | int cond; | ||
777 | TCGvTCGv_i32 reg; | ||
778 | |||
779 | l1 = gen_new_label(); | ||
780 | cond = (insn >> 8) & 0xf; | ||
781 | reg = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
782 | tcg_gen_andi_i32(reg, reg, 0xffffff00); | ||
783 | /* This is safe because we modify the reg directly, with no other values | ||
784 | live. */ | ||
785 | gen_jmpcc(s, cond ^ 1, l1); | ||
786 | tcg_gen_ori_i32(reg, reg, 0xff); | ||
787 | gen_set_label(l1); | ||
788 | } | ||
789 | |||
790 | /* Force a TB lookup after an instruction that changes the CPU state. */ | ||
791 | static void gen_lookup_tb(DisasContext *s) | ||
792 | { | ||
793 | gen_flush_cc_op(s); | ||
794 | tcg_gen_movi_i32(QREG_PC, s->pc); | ||
795 | s->is_jmp = DISAS_UPDATE2; | ||
796 | } | ||
797 | |||
798 | /* Generate a jump to an immediate address. */ | ||
799 | static void gen_jmp_im(DisasContext *s, uint32_t dest) | ||
800 | { | ||
801 | gen_flush_cc_op(s); | ||
802 | tcg_gen_movi_i32(QREG_PC, dest); | ||
803 | s->is_jmp = DISAS_JUMP1; | ||
804 | } | ||
805 | |||
806 | /* Generate a jump to the address in qreg DEST. */ | ||
807 | static void gen_jmp(DisasContext *s, TCGvTCGv_i32 dest) | ||
808 | { | ||
809 | gen_flush_cc_op(s); | ||
810 | tcg_gen_mov_i32(QREG_PC, dest); | ||
811 | s->is_jmp = DISAS_JUMP1; | ||
812 | } | ||
813 | |||
814 | static void gen_exception(DisasContext *s, uint32_t where, int nr) | ||
815 | { | ||
816 | gen_flush_cc_op(s); | ||
817 | gen_jmp_im(s, where); | ||
818 | gen_helper_raise_exception(tcg_const_i32(nr)); | ||
819 | } | ||
820 | |||
821 | static inline void gen_addr_fault(DisasContext *s) | ||
822 | { | ||
823 | gen_exception(s, s->insn_pc, EXCP_ADDRESS3); | ||
824 | } | ||
825 | |||
826 | #define SRC_EA(result, opsize, op_sign, addrp)do { result = gen_ea(s, insn, opsize, NULL_QREG, addrp, op_sign ? EA_LOADS : EA_LOADU); if (((((result).i32) == ((NULL_QREG) .i32)))) { gen_addr_fault(s); return; } } while (0) do { \ | ||
827 | result = gen_ea(s, insn, opsize, NULL_QREG, addrp, op_sign ? EA_LOADS : EA_LOADU); \ | ||
828 | if (IS_NULL_QREG(result)((((result).i32) == ((NULL_QREG).i32)))) { \ | ||
829 | gen_addr_fault(s); \ | ||
830 | return; \ | ||
831 | } \ | ||
832 | } while (0) | ||
833 | |||
834 | #define DEST_EA(insn, opsize, val, addrp)do { TCGv_i32 ea_result = gen_ea(s, insn, opsize, val, addrp, EA_STORE); if (((((ea_result).i32) == ((NULL_QREG).i32)))) { gen_addr_fault(s); return; } } while (0) do { \ | ||
835 | TCGvTCGv_i32 ea_result = gen_ea(s, insn, opsize, val, addrp, EA_STORE); \ | ||
836 | if (IS_NULL_QREG(ea_result)((((ea_result).i32) == ((NULL_QREG).i32)))) { \ | ||
837 | gen_addr_fault(s); \ | ||
838 | return; \ | ||
839 | } \ | ||
840 | } while (0) | ||
841 | |||
842 | /* Generate a jump to an immediate address. */ | ||
843 | static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest) | ||
844 | { | ||
845 | TranslationBlock *tb; | ||
846 | |||
847 | tb = s->tb; | ||
848 | if (unlikely(s->singlestep_enabled)__builtin_expect(!!(s->singlestep_enabled), 0)) { | ||
849 | gen_exception(s, dest, EXCP_DEBUG0x10002); | ||
850 | } else if ((tb->pc & TARGET_PAGE_MASK~((1 << 10) - 1)) == (dest & TARGET_PAGE_MASK~((1 << 10) - 1)) || | ||
851 | (s->pc & TARGET_PAGE_MASK~((1 << 10) - 1)) == (dest & TARGET_PAGE_MASK~((1 << 10) - 1))) { | ||
852 | tcg_gen_goto_tb(n); | ||
853 | tcg_gen_movi_i32(QREG_PC, dest); | ||
854 | tcg_gen_exit_tb((tcg_target_long)tb + n); | ||
855 | } else { | ||
856 | gen_jmp_im(s, dest); | ||
857 | tcg_gen_exit_tb(0); | ||
858 | } | ||
859 | s->is_jmp = DISAS_TB_JUMP3; | ||
860 | } | ||
861 | |||
862 | DISAS_INSN(undef_mac)static void disas_undef_mac (DisasContext *s, uint16_t insn) | ||
863 | { | ||
864 | gen_exception(s, s->pc - 2, EXCP_LINEA10); | ||
865 | } | ||
866 | |||
867 | DISAS_INSN(undef_fpu)static void disas_undef_fpu (DisasContext *s, uint16_t insn) | ||
868 | { | ||
869 | gen_exception(s, s->pc - 2, EXCP_LINEF11); | ||
870 | } | ||
871 | |||
872 | DISAS_INSN(undef)static void disas_undef (DisasContext *s, uint16_t insn) | ||
873 | { | ||
874 | gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED61); | ||
875 | cpu_abort(cpu_single_envtls__cpu_single_env, "Illegal instruction: %04x @ %08x", | ||
876 | insn, s->pc - 2); | ||
877 | } | ||
878 | |||
879 | DISAS_INSN(mulw)static void disas_mulw (DisasContext *s, uint16_t insn) | ||
880 | { | ||
881 | TCGvTCGv_i32 reg; | ||
882 | TCGvTCGv_i32 tmp; | ||
883 | TCGvTCGv_i32 src; | ||
884 | int sign; | ||
885 | |||
886 | sign = (insn & 0x100) != 0; | ||
887 | reg = DREG(insn, 9)cpu_dregs[((insn) >> (9)) & 7]; | ||
888 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
889 | if (sign) | ||
890 | tcg_gen_ext16s_i32(tmp, reg); | ||
891 | else | ||
892 | tcg_gen_ext16u_i32(tmp, reg); | ||
893 | SRC_EA(src, OS_WORD, sign, NULL)do { src = gen_ea(s, insn, 1, NULL_QREG, ((void*)0), sign ? EA_LOADS : EA_LOADU); if (((((src).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
894 | tcg_gen_mul_i32(tmp, tmp, src); | ||
895 | tcg_gen_mov_i32(reg, tmp); | ||
896 | /* Unlike m68k, coldfire always clears the overflow bit. */ | ||
897 | gen_logic_cc(s, tmp); | ||
898 | } | ||
899 | |||
900 | DISAS_INSN(divw)static void disas_divw (DisasContext *s, uint16_t insn) | ||
901 | { | ||
902 | TCGvTCGv_i32 reg; | ||
903 | TCGvTCGv_i32 tmp; | ||
904 | TCGvTCGv_i32 src; | ||
905 | int sign; | ||
906 | |||
907 | sign = (insn & 0x100) != 0; | ||
908 | reg = DREG(insn, 9)cpu_dregs[((insn) >> (9)) & 7]; | ||
909 | if (sign) { | ||
910 | tcg_gen_ext16s_i32(QREG_DIV1, reg); | ||
911 | } else { | ||
912 | tcg_gen_ext16u_i32(QREG_DIV1, reg); | ||
913 | } | ||
914 | SRC_EA(src, OS_WORD, sign, NULL)do { src = gen_ea(s, insn, 1, NULL_QREG, ((void*)0), sign ? EA_LOADS : EA_LOADU); if (((((src).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
915 | tcg_gen_mov_i32(QREG_DIV2, src); | ||
916 | if (sign) { | ||
917 | gen_helper_divs(cpu_env, tcg_const_i32(1)); | ||
918 | } else { | ||
919 | gen_helper_divu(cpu_env, tcg_const_i32(1)); | ||
920 | } | ||
921 | |||
922 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
923 | src = tcg_temp_new()tcg_temp_new_i32(); | ||
924 | tcg_gen_ext16u_i32(tmp, QREG_DIV1); | ||
925 | tcg_gen_shli_i32(src, QREG_DIV2, 16); | ||
926 | tcg_gen_or_i32(reg, tmp, src); | ||
927 | s->cc_op = CC_OP_FLAGS; | ||
928 | } | ||
929 | |||
930 | DISAS_INSN(divl)static void disas_divl (DisasContext *s, uint16_t insn) | ||
931 | { | ||
932 | TCGvTCGv_i32 num; | ||
933 | TCGvTCGv_i32 den; | ||
934 | TCGvTCGv_i32 reg; | ||
935 | uint16_t ext; | ||
936 | |||
937 | ext = lduw_code(s->pc); | ||
938 | s->pc += 2; | ||
939 | if (ext & 0x87f8) { | ||
940 | gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED61); | ||
941 | return; | ||
942 | } | ||
943 | num = DREG(ext, 12)cpu_dregs[((ext) >> (12)) & 7]; | ||
944 | reg = DREG(ext, 0)cpu_dregs[((ext) >> (0)) & 7]; | ||
945 | tcg_gen_mov_i32(QREG_DIV1, num); | ||
946 | SRC_EA(den, OS_LONG, 0, NULL)do { den = gen_ea(s, insn, 2, NULL_QREG, ((void*)0), 0 ? EA_LOADS : EA_LOADU); if (((((den).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
947 | tcg_gen_mov_i32(QREG_DIV2, den); | ||
948 | if (ext & 0x0800) { | ||
949 | gen_helper_divs(cpu_env, tcg_const_i32(0)); | ||
950 | } else { | ||
951 | gen_helper_divu(cpu_env, tcg_const_i32(0)); | ||
952 | } | ||
953 | if ((ext & 7) == ((ext >> 12) & 7)) { | ||
954 | /* div */ | ||
955 | tcg_gen_mov_i32 (reg, QREG_DIV1); | ||
956 | } else { | ||
957 | /* rem */ | ||
958 | tcg_gen_mov_i32 (reg, QREG_DIV2); | ||
959 | } | ||
960 | s->cc_op = CC_OP_FLAGS; | ||
961 | } | ||
962 | |||
963 | DISAS_INSN(addsub)static void disas_addsub (DisasContext *s, uint16_t insn) | ||
964 | { | ||
965 | TCGvTCGv_i32 reg; | ||
966 | TCGvTCGv_i32 dest; | ||
967 | TCGvTCGv_i32 src; | ||
968 | TCGvTCGv_i32 tmp; | ||
969 | TCGvTCGv_i32 addr; | ||
970 | int add; | ||
971 | |||
972 | add = (insn & 0x4000) != 0; | ||
973 | reg = DREG(insn, 9)cpu_dregs[((insn) >> (9)) & 7]; | ||
974 | dest = tcg_temp_new()tcg_temp_new_i32(); | ||
975 | if (insn & 0x100) { | ||
976 | SRC_EA(tmp, OS_LONG, 0, &addr)do { tmp = gen_ea(s, insn, 2, NULL_QREG, &addr, 0 ? EA_LOADS : EA_LOADU); if (((((tmp).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
977 | src = reg; | ||
978 | } else { | ||
979 | tmp = reg; | ||
980 | SRC_EA(src, OS_LONG, 0, NULL)do { src = gen_ea(s, insn, 2, NULL_QREG, ((void*)0), 0 ? EA_LOADS : EA_LOADU); if (((((src).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
981 | } | ||
982 | if (add) { | ||
983 | tcg_gen_add_i32(dest, tmp, src); | ||
984 | gen_helper_xflag_lt(QREG_CC_X, dest, src); | ||
985 | s->cc_op = CC_OP_ADD; | ||
986 | } else { | ||
987 | gen_helper_xflag_lt(QREG_CC_X, tmp, src); | ||
988 | tcg_gen_sub_i32(dest, tmp, src); | ||
989 | s->cc_op = CC_OP_SUB; | ||
990 | } | ||
991 | gen_update_cc_add(dest, src); | ||
992 | if (insn & 0x100) { | ||
993 | DEST_EA(insn, OS_LONG, dest, &addr)do { TCGv_i32 ea_result = gen_ea(s, insn, 2, dest, &addr, EA_STORE); if (((((ea_result).i32) == ((NULL_QREG).i32)))) { gen_addr_fault(s); return; } } while (0); | ||
994 | } else { | ||
995 | tcg_gen_mov_i32(reg, dest); | ||
996 | } | ||
997 | } | ||
998 | |||
999 | |||
1000 | /* Reverse the order of the bits in REG. */ | ||
1001 | DISAS_INSN(bitrev)static void disas_bitrev (DisasContext *s, uint16_t insn) | ||
1002 | { | ||
1003 | TCGvTCGv_i32 reg; | ||
1004 | reg = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
1005 | gen_helper_bitrev(reg, reg); | ||
1006 | } | ||
1007 | |||
1008 | DISAS_INSN(bitop_reg)static void disas_bitop_reg (DisasContext *s, uint16_t insn) | ||
1009 | { | ||
1010 | int opsize; | ||
1011 | int op; | ||
1012 | TCGvTCGv_i32 src1; | ||
1013 | TCGvTCGv_i32 src2; | ||
1014 | TCGvTCGv_i32 tmp; | ||
1015 | TCGvTCGv_i32 addr; | ||
1016 | TCGvTCGv_i32 dest; | ||
1017 | |||
1018 | if ((insn & 0x38) != 0) | ||
1019 | opsize = OS_BYTE0; | ||
1020 | else | ||
1021 | opsize = OS_LONG2; | ||
1022 | op = (insn >> 6) & 3; | ||
1023 | SRC_EA(src1, opsize, 0, op ? &addr: NULL)do { src1 = gen_ea(s, insn, opsize, NULL_QREG, op ? &addr : ((void*)0), 0 ? EA_LOADS : EA_LOADU); if (((((src1).i32) == ((NULL_QREG).i32)))) { gen_addr_fault(s); return; } } while ( 0); | ||
1024 | src2 = DREG(insn, 9)cpu_dregs[((insn) >> (9)) & 7]; | ||
1025 | dest = tcg_temp_new()tcg_temp_new_i32(); | ||
1026 | |||
1027 | gen_flush_flags(s); | ||
1028 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
1029 | if (opsize == OS_BYTE0) | ||
1030 | tcg_gen_andi_i32(tmp, src2, 7); | ||
1031 | else | ||
1032 | tcg_gen_andi_i32(tmp, src2, 31); | ||
1033 | src2 = tmp; | ||
1034 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
1035 | tcg_gen_shr_i32(tmp, src1, src2); | ||
1036 | tcg_gen_andi_i32(tmp, tmp, 1); | ||
1037 | tcg_gen_shli_i32(tmp, tmp, 2); | ||
1038 | /* Clear CCF_Z if bit set. */ | ||
1039 | tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z0x04); | ||
1040 | tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp); | ||
1041 | |||
1042 | tcg_gen_shl_i32(tmp, tcg_const_i32(1), src2); | ||
1043 | switch (op) { | ||
1044 | case 1: /* bchg */ | ||
1045 | tcg_gen_xor_i32(dest, src1, tmp); | ||
1046 | break; | ||
1047 | case 2: /* bclr */ | ||
1048 | tcg_gen_not_i32(tmp, tmp); | ||
1049 | tcg_gen_and_i32(dest, src1, tmp); | ||
1050 | break; | ||
1051 | case 3: /* bset */ | ||
1052 | tcg_gen_or_i32(dest, src1, tmp); | ||
1053 | break; | ||
1054 | default: /* btst */ | ||
1055 | break; | ||
1056 | } | ||
1057 | if (op) | ||
1058 | DEST_EA(insn, opsize, dest, &addr)do { TCGv_i32 ea_result = gen_ea(s, insn, opsize, dest, & addr, EA_STORE); if (((((ea_result).i32) == ((NULL_QREG).i32) ))) { gen_addr_fault(s); return; } } while (0); | ||
1059 | } | ||
1060 | |||
1061 | DISAS_INSN(sats)static void disas_sats (DisasContext *s, uint16_t insn) | ||
1062 | { | ||
1063 | TCGvTCGv_i32 reg; | ||
1064 | reg = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
1065 | gen_flush_flags(s); | ||
1066 | gen_helper_sats(reg, reg, QREG_CC_DEST); | ||
1067 | gen_logic_cc(s, reg); | ||
1068 | } | ||
1069 | |||
1070 | static void gen_push(DisasContext *s, TCGvTCGv_i32 val) | ||
1071 | { | ||
1072 | TCGvTCGv_i32 tmp; | ||
1073 | |||
1074 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
1075 | tcg_gen_subi_i32(tmp, QREG_SPcpu_aregs[7], 4); | ||
1076 | gen_store(s, OS_LONG2, tmp, val); | ||
1077 | tcg_gen_mov_i32(QREG_SPcpu_aregs[7], tmp); | ||
1078 | } | ||
1079 | |||
1080 | DISAS_INSN(movem)static void disas_movem (DisasContext *s, uint16_t insn) | ||
1081 | { | ||
1082 | TCGvTCGv_i32 addr; | ||
1083 | int i; | ||
1084 | uint16_t mask; | ||
1085 | TCGvTCGv_i32 reg; | ||
1086 | TCGvTCGv_i32 tmp; | ||
1087 | int is_load; | ||
1088 | |||
1089 | mask = lduw_code(s->pc); | ||
1090 | s->pc += 2; | ||
1091 | tmp = gen_lea(s, insn, OS_LONG2); | ||
1092 | if (IS_NULL_QREG(tmp)((((tmp).i32) == ((NULL_QREG).i32)))) { | ||
1093 | gen_addr_fault(s); | ||
1094 | return; | ||
1095 | } | ||
1096 | addr = tcg_temp_new()tcg_temp_new_i32(); | ||
1097 | tcg_gen_mov_i32(addr, tmp); | ||
1098 | is_load = ((insn & 0x0400) != 0); | ||
1099 | for (i = 0; i < 16; i++, mask >>= 1) { | ||
1100 | if (mask & 1) { | ||
1101 | if (i < 8) | ||
1102 | reg = DREG(i, 0)cpu_dregs[((i) >> (0)) & 7]; | ||
1103 | else | ||
1104 | reg = AREG(i, 0)cpu_aregs[((i) >> (0)) & 7]; | ||
1105 | if (is_load) { | ||
1106 | tmp = gen_load(s, OS_LONG2, addr, 0); | ||
1107 | tcg_gen_mov_i32(reg, tmp); | ||
1108 | } else { | ||
1109 | gen_store(s, OS_LONG2, addr, reg); | ||
1110 | } | ||
1111 | if (mask != 1) | ||
1112 | tcg_gen_addi_i32(addr, addr, 4); | ||
1113 | } | ||
1114 | } | ||
1115 | } | ||
1116 | |||
1117 | DISAS_INSN(bitop_im)static void disas_bitop_im (DisasContext *s, uint16_t insn) | ||
1118 | { | ||
1119 | int opsize; | ||
1120 | int op; | ||
1121 | TCGvTCGv_i32 src1; | ||
1122 | uint32_t mask; | ||
1123 | int bitnum; | ||
1124 | TCGvTCGv_i32 tmp; | ||
1125 | TCGvTCGv_i32 addr; | ||
1126 | |||
1127 | if ((insn & 0x38) != 0) | ||
1128 | opsize = OS_BYTE0; | ||
1129 | else | ||
1130 | opsize = OS_LONG2; | ||
1131 | op = (insn >> 6) & 3; | ||
1132 | |||
1133 | bitnum = lduw_code(s->pc); | ||
1134 | s->pc += 2; | ||
1135 | if (bitnum & 0xff00) { | ||
1136 | disas_undef(s, insn); | ||
1137 | return; | ||
1138 | } | ||
1139 | |||
1140 | SRC_EA(src1, opsize, 0, op ? &addr: NULL)do { src1 = gen_ea(s, insn, opsize, NULL_QREG, op ? &addr : ((void*)0), 0 ? EA_LOADS : EA_LOADU); if (((((src1).i32) == ((NULL_QREG).i32)))) { gen_addr_fault(s); return; } } while ( 0); | ||
1141 | |||
1142 | gen_flush_flags(s); | ||
1143 | if (opsize == OS_BYTE0) | ||
1144 | bitnum &= 7; | ||
1145 | else | ||
1146 | bitnum &= 31; | ||
1147 | mask = 1 << bitnum; | ||
1148 | |||
1149 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
1150 | assert (CCF_Z == (1 << 2))((0x04 == (1 << 2)) ? (void) (0) : __assert_fail ("0x04 == (1 << 2)" , "/home/stefan/src/qemu/qemu.org/qemu/target-m68k/translate.c" , 1150, __PRETTY_FUNCTION__)); | ||
1151 | if (bitnum > 2) | ||
1152 | tcg_gen_shri_i32(tmp, src1, bitnum - 2); | ||
1153 | else if (bitnum < 2) | ||
1154 | tcg_gen_shli_i32(tmp, src1, 2 - bitnum); | ||
1155 | else | ||
1156 | tcg_gen_mov_i32(tmp, src1); | ||
1157 | tcg_gen_andi_i32(tmp, tmp, CCF_Z0x04); | ||
1158 | /* Clear CCF_Z if bit set. */ | ||
1159 | tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z0x04); | ||
1160 | tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp); | ||
1161 | if (op) { | ||
1162 | switch (op) { | ||
1163 | case 1: /* bchg */ | ||
1164 | tcg_gen_xori_i32(tmp, src1, mask); | ||
1165 | break; | ||
1166 | case 2: /* bclr */ | ||
1167 | tcg_gen_andi_i32(tmp, src1, ~mask); | ||
1168 | break; | ||
1169 | case 3: /* bset */ | ||
1170 | tcg_gen_ori_i32(tmp, src1, mask); | ||
1171 | break; | ||
1172 | default: /* btst */ | ||
1173 | break; | ||
1174 | } | ||
1175 | DEST_EA(insn, opsize, tmp, &addr)do { TCGv_i32 ea_result = gen_ea(s, insn, opsize, tmp, &addr , EA_STORE); if (((((ea_result).i32) == ((NULL_QREG).i32)))) { gen_addr_fault(s); return; } } while (0); | ||
1176 | } | ||
1177 | } | ||
1178 | |||
1179 | DISAS_INSN(arith_im)static void disas_arith_im (DisasContext *s, uint16_t insn) | ||
1180 | { | ||
1181 | int op; | ||
1182 | uint32_t im; | ||
1183 | TCGvTCGv_i32 src1; | ||
1184 | TCGvTCGv_i32 dest; | ||
1185 | TCGvTCGv_i32 addr; | ||
1186 | |||
1187 | op = (insn >> 9) & 7; | ||
1188 | SRC_EA(src1, OS_LONG, 0, (op == 6) ? NULL : &addr)do { src1 = gen_ea(s, insn, 2, NULL_QREG, (op == 6) ? ((void* )0) : &addr, 0 ? EA_LOADS : EA_LOADU); if (((((src1).i32) == ((NULL_QREG).i32)))) { gen_addr_fault(s); return; } } while (0); | ||
1189 | im = read_im32(s); | ||
1190 | dest = tcg_temp_new()tcg_temp_new_i32(); | ||
1191 | switch (op) { | ||
1192 | case 0: /* ori */ | ||
1193 | tcg_gen_ori_i32(dest, src1, im); | ||
1194 | gen_logic_cc(s, dest); | ||
1195 | break; | ||
1196 | case 1: /* andi */ | ||
1197 | tcg_gen_andi_i32(dest, src1, im); | ||
1198 | gen_logic_cc(s, dest); | ||
1199 | break; | ||
1200 | case 2: /* subi */ | ||
1201 | tcg_gen_mov_i32(dest, src1); | ||
1202 | gen_helper_xflag_lt(QREG_CC_X, dest, tcg_const_i32(im)); | ||
1203 | tcg_gen_subi_i32(dest, dest, im); | ||
1204 | gen_update_cc_add(dest, tcg_const_i32(im)); | ||
1205 | s->cc_op = CC_OP_SUB; | ||
1206 | break; | ||
1207 | case 3: /* addi */ | ||
1208 | tcg_gen_mov_i32(dest, src1); | ||
1209 | tcg_gen_addi_i32(dest, dest, im); | ||
1210 | gen_update_cc_add(dest, tcg_const_i32(im)); | ||
1211 | gen_helper_xflag_lt(QREG_CC_X, dest, tcg_const_i32(im)); | ||
1212 | s->cc_op = CC_OP_ADD; | ||
1213 | break; | ||
1214 | case 5: /* eori */ | ||
1215 | tcg_gen_xori_i32(dest, src1, im); | ||
1216 | gen_logic_cc(s, dest); | ||
1217 | break; | ||
1218 | case 6: /* cmpi */ | ||
1219 | tcg_gen_mov_i32(dest, src1); | ||
1220 | tcg_gen_subi_i32(dest, dest, im); | ||
1221 | gen_update_cc_add(dest, tcg_const_i32(im)); | ||
1222 | s->cc_op = CC_OP_SUB; | ||
1223 | break; | ||
1224 | default: | ||
1225 | abort(); | ||
1226 | } | ||
1227 | if (op != 6) { | ||
1228 | DEST_EA(insn, OS_LONG, dest, &addr)do { TCGv_i32 ea_result = gen_ea(s, insn, 2, dest, &addr, EA_STORE); if (((((ea_result).i32) == ((NULL_QREG).i32)))) { gen_addr_fault(s); return; } } while (0); | ||
1229 | } | ||
1230 | } | ||
1231 | |||
1232 | DISAS_INSN(byterev)static void disas_byterev (DisasContext *s, uint16_t insn) | ||
1233 | { | ||
1234 | TCGvTCGv_i32 reg; | ||
1235 | |||
1236 | reg = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
1237 | tcg_gen_bswap32_i32(reg, reg); | ||
1238 | } | ||
1239 | |||
1240 | DISAS_INSN(move)static void disas_move (DisasContext *s, uint16_t insn) | ||
1241 | { | ||
1242 | TCGvTCGv_i32 src; | ||
1243 | TCGvTCGv_i32 dest; | ||
1244 | int op; | ||
1245 | int opsize; | ||
1246 | |||
1247 | switch (insn >> 12) { | ||
1248 | case 1: /* move.b */ | ||
1249 | opsize = OS_BYTE0; | ||
1250 | break; | ||
1251 | case 2: /* move.l */ | ||
1252 | opsize = OS_LONG2; | ||
1253 | break; | ||
1254 | case 3: /* move.w */ | ||
1255 | opsize = OS_WORD1; | ||
1256 | break; | ||
1257 | default: | ||
1258 | abort(); | ||
1259 | } | ||
1260 | SRC_EA(src, opsize, 1, NULL)do { src = gen_ea(s, insn, opsize, NULL_QREG, ((void*)0), 1 ? EA_LOADS : EA_LOADU); if (((((src).i32) == ((NULL_QREG).i32) ))) { gen_addr_fault(s); return; } } while (0); | ||
1261 | op = (insn >> 6) & 7; | ||
1262 | if (op == 1) { | ||
1263 | /* movea */ | ||
1264 | /* The value will already have been sign extended. */ | ||
1265 | dest = AREG(insn, 9)cpu_aregs[((insn) >> (9)) & 7]; | ||
1266 | tcg_gen_mov_i32(dest, src); | ||
1267 | } else { | ||
1268 | /* normal move */ | ||
1269 | uint16_t dest_ea; | ||
1270 | dest_ea = ((insn >> 9) & 7) | (op << 3); | ||
1271 | DEST_EA(dest_ea, opsize, src, NULL)do { TCGv_i32 ea_result = gen_ea(s, dest_ea, opsize, src, ((void *)0), EA_STORE); if (((((ea_result).i32) == ((NULL_QREG).i32) ))) { gen_addr_fault(s); return; } } while (0); | ||
1272 | /* This will be correct because loads sign extend. */ | ||
1273 | gen_logic_cc(s, src); | ||
1274 | } | ||
1275 | } | ||
1276 | |||
1277 | DISAS_INSN(negx)static void disas_negx (DisasContext *s, uint16_t insn) | ||
1278 | { | ||
1279 | TCGvTCGv_i32 reg; | ||
1280 | |||
1281 | gen_flush_flags(s); | ||
1282 | reg = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
1283 | gen_helper_subx_cc(reg, cpu_env, tcg_const_i32(0), reg); | ||
1284 | } | ||
1285 | |||
1286 | DISAS_INSN(lea)static void disas_lea (DisasContext *s, uint16_t insn) | ||
1287 | { | ||
1288 | TCGvTCGv_i32 reg; | ||
1289 | TCGvTCGv_i32 tmp; | ||
1290 | |||
1291 | reg = AREG(insn, 9)cpu_aregs[((insn) >> (9)) & 7]; | ||
1292 | tmp = gen_lea(s, insn, OS_LONG2); | ||
1293 | if (IS_NULL_QREG(tmp)((((tmp).i32) == ((NULL_QREG).i32)))) { | ||
1294 | gen_addr_fault(s); | ||
1295 | return; | ||
1296 | } | ||
1297 | tcg_gen_mov_i32(reg, tmp); | ||
1298 | } | ||
1299 | |||
1300 | DISAS_INSN(clr)static void disas_clr (DisasContext *s, uint16_t insn) | ||
1301 | { | ||
1302 | int opsize; | ||
1303 | |||
1304 | switch ((insn >> 6) & 3) { | ||
1305 | case 0: /* clr.b */ | ||
1306 | opsize = OS_BYTE0; | ||
1307 | break; | ||
1308 | case 1: /* clr.w */ | ||
1309 | opsize = OS_WORD1; | ||
1310 | break; | ||
1311 | case 2: /* clr.l */ | ||
1312 | opsize = OS_LONG2; | ||
1313 | break; | ||
1314 | default: | ||
1315 | abort(); | ||
1316 | } | ||
1317 | DEST_EA(insn, opsize, tcg_const_i32(0), NULL)do { TCGv_i32 ea_result = gen_ea(s, insn, opsize, tcg_const_i32 (0), ((void*)0), EA_STORE); if (((((ea_result).i32) == ((NULL_QREG ).i32)))) { gen_addr_fault(s); return; } } while (0); | ||
1318 | gen_logic_cc(s, tcg_const_i32(0)); | ||
1319 | } | ||
1320 | |||
1321 | static TCGvTCGv_i32 gen_get_ccr(DisasContext *s) | ||
1322 | { | ||
1323 | TCGvTCGv_i32 dest; | ||
1324 | |||
1325 | gen_flush_flags(s); | ||
1326 | dest = tcg_temp_new()tcg_temp_new_i32(); | ||
1327 | tcg_gen_shli_i32(dest, QREG_CC_X, 4); | ||
1328 | tcg_gen_or_i32(dest, dest, QREG_CC_DEST); | ||
1329 | return dest; | ||
1330 | } | ||
1331 | |||
1332 | DISAS_INSN(move_from_ccr)static void disas_move_from_ccr (DisasContext *s, uint16_t insn ) | ||
1333 | { | ||
1334 | TCGvTCGv_i32 reg; | ||
1335 | TCGvTCGv_i32 ccr; | ||
1336 | |||
1337 | ccr = gen_get_ccr(s); | ||
1338 | reg = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
1339 | gen_partset_reg(OS_WORD1, reg, ccr); | ||
1340 | } | ||
1341 | |||
1342 | DISAS_INSN(neg)static void disas_neg (DisasContext *s, uint16_t insn) | ||
1343 | { | ||
1344 | TCGvTCGv_i32 reg; | ||
1345 | TCGvTCGv_i32 src1; | ||
1346 | |||
1347 | reg = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
1348 | src1 = tcg_temp_new()tcg_temp_new_i32(); | ||
1349 | tcg_gen_mov_i32(src1, reg); | ||
1350 | tcg_gen_neg_i32(reg, src1); | ||
1351 | s->cc_op = CC_OP_SUB; | ||
1352 | gen_update_cc_add(reg, src1); | ||
1353 | gen_helper_xflag_lt(QREG_CC_X, tcg_const_i32(0), src1); | ||
1354 | s->cc_op = CC_OP_SUB; | ||
1355 | } | ||
1356 | |||
1357 | static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only) | ||
1358 | { | ||
1359 | tcg_gen_movi_i32(QREG_CC_DEST, val & 0xf); | ||
1360 | tcg_gen_movi_i32(QREG_CC_X, (val & 0x10) >> 4); | ||
1361 | if (!ccr_only) { | ||
1362 | gen_helper_set_sr(cpu_env, tcg_const_i32(val & 0xff00)); | ||
1363 | } | ||
1364 | } | ||
1365 | |||
1366 | static void gen_set_sr(DisasContext *s, uint16_t insn, int ccr_only) | ||
1367 | { | ||
1368 | TCGvTCGv_i32 tmp; | ||
1369 | TCGvTCGv_i32 reg; | ||
1370 | |||
1371 | s->cc_op = CC_OP_FLAGS; | ||
1372 | if ((insn & 0x38) == 0) | ||
1373 | { | ||
1374 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
1375 | reg = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
1376 | tcg_gen_andi_i32(QREG_CC_DEST, reg, 0xf); | ||
1377 | tcg_gen_shri_i32(tmp, reg, 4); | ||
1378 | tcg_gen_andi_i32(QREG_CC_X, tmp, 1); | ||
1379 | if (!ccr_only) { | ||
1380 | gen_helper_set_sr(cpu_env, reg); | ||
1381 | } | ||
1382 | } | ||
1383 | else if ((insn & 0x3f) == 0x3c) | ||
1384 | { | ||
1385 | uint16_t val; | ||
1386 | val = lduw_code(s->pc); | ||
1387 | s->pc += 2; | ||
1388 | gen_set_sr_im(s, val, ccr_only); | ||
1389 | } | ||
1390 | else | ||
1391 | disas_undef(s, insn); | ||
1392 | } | ||
1393 | |||
1394 | DISAS_INSN(move_to_ccr)static void disas_move_to_ccr (DisasContext *s, uint16_t insn ) | ||
1395 | { | ||
1396 | gen_set_sr(s, insn, 1); | ||
1397 | } | ||
1398 | |||
1399 | DISAS_INSN(not)static void disas_not (DisasContext *s, uint16_t insn) | ||
1400 | { | ||
1401 | TCGvTCGv_i32 reg; | ||
1402 | |||
1403 | reg = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
1404 | tcg_gen_not_i32(reg, reg); | ||
1405 | gen_logic_cc(s, reg); | ||
1406 | } | ||
1407 | |||
1408 | DISAS_INSN(swap)static void disas_swap (DisasContext *s, uint16_t insn) | ||
1409 | { | ||
1410 | TCGvTCGv_i32 src1; | ||
1411 | TCGvTCGv_i32 src2; | ||
1412 | TCGvTCGv_i32 reg; | ||
1413 | |||
1414 | src1 = tcg_temp_new()tcg_temp_new_i32(); | ||
1415 | src2 = tcg_temp_new()tcg_temp_new_i32(); | ||
1416 | reg = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
1417 | tcg_gen_shli_i32(src1, reg, 16); | ||
1418 | tcg_gen_shri_i32(src2, reg, 16); | ||
1419 | tcg_gen_or_i32(reg, src1, src2); | ||
1420 | gen_logic_cc(s, reg); | ||
1421 | } | ||
1422 | |||
1423 | DISAS_INSN(pea)static void disas_pea (DisasContext *s, uint16_t insn) | ||
1424 | { | ||
1425 | TCGvTCGv_i32 tmp; | ||
1426 | |||
1427 | tmp = gen_lea(s, insn, OS_LONG2); | ||
1428 | if (IS_NULL_QREG(tmp)((((tmp).i32) == ((NULL_QREG).i32)))) { | ||
1429 | gen_addr_fault(s); | ||
1430 | return; | ||
1431 | } | ||
1432 | gen_push(s, tmp); | ||
1433 | } | ||
1434 | |||
1435 | DISAS_INSN(ext)static void disas_ext (DisasContext *s, uint16_t insn) | ||
1436 | { | ||
1437 | int op; | ||
1438 | TCGvTCGv_i32 reg; | ||
1439 | TCGvTCGv_i32 tmp; | ||
1440 | |||
1441 | reg = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
1442 | op = (insn >> 6) & 7; | ||
1443 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
1444 | if (op == 3) | ||
1445 | tcg_gen_ext16s_i32(tmp, reg); | ||
1446 | else | ||
1447 | tcg_gen_ext8s_i32(tmp, reg); | ||
1448 | if (op == 2) | ||
1449 | gen_partset_reg(OS_WORD1, reg, tmp); | ||
1450 | else | ||
1451 | tcg_gen_mov_i32(reg, tmp); | ||
1452 | gen_logic_cc(s, tmp); | ||
1453 | } | ||
1454 | |||
1455 | DISAS_INSN(tst)static void disas_tst (DisasContext *s, uint16_t insn) | ||
1456 | { | ||
1457 | int opsize; | ||
1458 | TCGvTCGv_i32 tmp; | ||
1459 | |||
1460 | switch ((insn >> 6) & 3) { | ||
1461 | case 0: /* tst.b */ | ||
1462 | opsize = OS_BYTE0; | ||
1463 | break; | ||
1464 | case 1: /* tst.w */ | ||
1465 | opsize = OS_WORD1; | ||
1466 | break; | ||
1467 | case 2: /* tst.l */ | ||
1468 | opsize = OS_LONG2; | ||
1469 | break; | ||
1470 | default: | ||
1471 | abort(); | ||
1472 | } | ||
1473 | SRC_EA(tmp, opsize, 1, NULL)do { tmp = gen_ea(s, insn, opsize, NULL_QREG, ((void*)0), 1 ? EA_LOADS : EA_LOADU); if (((((tmp).i32) == ((NULL_QREG).i32) ))) { gen_addr_fault(s); return; } } while (0); | ||
1474 | gen_logic_cc(s, tmp); | ||
1475 | } | ||
1476 | |||
1477 | DISAS_INSN(pulse)static void disas_pulse (DisasContext *s, uint16_t insn) | ||
1478 | { | ||
1479 | /* Implemented as a NOP. */ | ||
1480 | } | ||
1481 | |||
1482 | DISAS_INSN(illegal)static void disas_illegal (DisasContext *s, uint16_t insn) | ||
1483 | { | ||
1484 | gen_exception(s, s->pc - 2, EXCP_ILLEGAL4); | ||
1485 | } | ||
1486 | |||
1487 | /* ??? This should be atomic. */ | ||
1488 | DISAS_INSN(tas)static void disas_tas (DisasContext *s, uint16_t insn) | ||
1489 | { | ||
1490 | TCGvTCGv_i32 dest; | ||
1491 | TCGvTCGv_i32 src1; | ||
1492 | TCGvTCGv_i32 addr; | ||
1493 | |||
1494 | dest = tcg_temp_new()tcg_temp_new_i32(); | ||
1495 | SRC_EA(src1, OS_BYTE, 1, &addr)do { src1 = gen_ea(s, insn, 0, NULL_QREG, &addr, 1 ? EA_LOADS : EA_LOADU); if (((((src1).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
1496 | gen_logic_cc(s, src1); | ||
1497 | tcg_gen_ori_i32(dest, src1, 0x80); | ||
1498 | DEST_EA(insn, OS_BYTE, dest, &addr)do { TCGv_i32 ea_result = gen_ea(s, insn, 0, dest, &addr, EA_STORE); if (((((ea_result).i32) == ((NULL_QREG).i32)))) { gen_addr_fault(s); return; } } while (0); | ||
1499 | } | ||
1500 | |||
1501 | DISAS_INSN(mull)static void disas_mull (DisasContext *s, uint16_t insn) | ||
1502 | { | ||
1503 | uint16_t ext; | ||
1504 | TCGvTCGv_i32 reg; | ||
1505 | TCGvTCGv_i32 src1; | ||
1506 | TCGvTCGv_i32 dest; | ||
1507 | |||
1508 | /* The upper 32 bits of the product are discarded, so | ||
1509 | muls.l and mulu.l are functionally equivalent. */ | ||
1510 | ext = lduw_code(s->pc); | ||
1511 | s->pc += 2; | ||
1512 | if (ext & 0x87ff) { | ||
1513 | gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED61); | ||
1514 | return; | ||
1515 | } | ||
1516 | reg = DREG(ext, 12)cpu_dregs[((ext) >> (12)) & 7]; | ||
1517 | SRC_EA(src1, OS_LONG, 0, NULL)do { src1 = gen_ea(s, insn, 2, NULL_QREG, ((void*)0), 0 ? EA_LOADS : EA_LOADU); if (((((src1).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
1518 | dest = tcg_temp_new()tcg_temp_new_i32(); | ||
1519 | tcg_gen_mul_i32(dest, src1, reg); | ||
1520 | tcg_gen_mov_i32(reg, dest); | ||
1521 | /* Unlike m68k, coldfire always clears the overflow bit. */ | ||
1522 | gen_logic_cc(s, dest); | ||
1523 | } | ||
1524 | |||
1525 | DISAS_INSN(link)static void disas_link (DisasContext *s, uint16_t insn) | ||
1526 | { | ||
1527 | int16_t offset; | ||
1528 | TCGvTCGv_i32 reg; | ||
1529 | TCGvTCGv_i32 tmp; | ||
1530 | |||
1531 | offset = ldsw_code(s->pc); | ||
1532 | s->pc += 2; | ||
1533 | reg = AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7]; | ||
1534 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
1535 | tcg_gen_subi_i32(tmp, QREG_SPcpu_aregs[7], 4); | ||
1536 | gen_store(s, OS_LONG2, tmp, reg); | ||
1537 | if ((insn & 7) != 7) | ||
1538 | tcg_gen_mov_i32(reg, tmp); | ||
1539 | tcg_gen_addi_i32(QREG_SPcpu_aregs[7], tmp, offset); | ||
1540 | } | ||
1541 | |||
1542 | DISAS_INSN(unlk)static void disas_unlk (DisasContext *s, uint16_t insn) | ||
1543 | { | ||
1544 | TCGvTCGv_i32 src; | ||
1545 | TCGvTCGv_i32 reg; | ||
1546 | TCGvTCGv_i32 tmp; | ||
1547 | |||
1548 | src = tcg_temp_new()tcg_temp_new_i32(); | ||
1549 | reg = AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7]; | ||
1550 | tcg_gen_mov_i32(src, reg); | ||
1551 | tmp = gen_load(s, OS_LONG2, src, 0); | ||
1552 | tcg_gen_mov_i32(reg, tmp); | ||
1553 | tcg_gen_addi_i32(QREG_SPcpu_aregs[7], src, 4); | ||
1554 | } | ||
1555 | |||
1556 | DISAS_INSN(nop)static void disas_nop (DisasContext *s, uint16_t insn) | ||
1557 | { | ||
1558 | } | ||
1559 | |||
1560 | DISAS_INSN(rts)static void disas_rts (DisasContext *s, uint16_t insn) | ||
1561 | { | ||
1562 | TCGvTCGv_i32 tmp; | ||
1563 | |||
1564 | tmp = gen_load(s, OS_LONG2, QREG_SPcpu_aregs[7], 0); | ||
1565 | tcg_gen_addi_i32(QREG_SPcpu_aregs[7], QREG_SPcpu_aregs[7], 4); | ||
1566 | gen_jmp(s, tmp); | ||
1567 | } | ||
1568 | |||
1569 | DISAS_INSN(jump)static void disas_jump (DisasContext *s, uint16_t insn) | ||
1570 | { | ||
1571 | TCGvTCGv_i32 tmp; | ||
1572 | |||
1573 | /* Load the target address first to ensure correct exception | ||
1574 | behavior. */ | ||
1575 | tmp = gen_lea(s, insn, OS_LONG2); | ||
1576 | if (IS_NULL_QREG(tmp)((((tmp).i32) == ((NULL_QREG).i32)))) { | ||
1577 | gen_addr_fault(s); | ||
1578 | return; | ||
1579 | } | ||
1580 | if ((insn & 0x40) == 0) { | ||
1581 | /* jsr */ | ||
1582 | gen_push(s, tcg_const_i32(s->pc)); | ||
1583 | } | ||
1584 | gen_jmp(s, tmp); | ||
1585 | } | ||
1586 | |||
1587 | DISAS_INSN(addsubq)static void disas_addsubq (DisasContext *s, uint16_t insn) | ||
1588 | { | ||
1589 | TCGvTCGv_i32 src1; | ||
1590 | TCGvTCGv_i32 src2; | ||
1591 | TCGvTCGv_i32 dest; | ||
1592 | int val; | ||
1593 | TCGvTCGv_i32 addr; | ||
1594 | |||
1595 | SRC_EA(src1, OS_LONG, 0, &addr)do { src1 = gen_ea(s, insn, 2, NULL_QREG, &addr, 0 ? EA_LOADS : EA_LOADU); if (((((src1).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
1596 | val = (insn >> 9) & 7; | ||
1597 | if (val == 0) | ||
1598 | val = 8; | ||
1599 | dest = tcg_temp_new()tcg_temp_new_i32(); | ||
1600 | tcg_gen_mov_i32(dest, src1); | ||
1601 | if ((insn & 0x38) == 0x08) { | ||
1602 | /* Don't update condition codes if the destination is an | ||
1603 | address register. */ | ||
1604 | if (insn & 0x0100) { | ||
1605 | tcg_gen_subi_i32(dest, dest, val); | ||
1606 | } else { | ||
1607 | tcg_gen_addi_i32(dest, dest, val); | ||
1608 | } | ||
1609 | } else { | ||
1610 | src2 = tcg_const_i32(val); | ||
1611 | if (insn & 0x0100) { | ||
1612 | gen_helper_xflag_lt(QREG_CC_X, dest, src2); | ||
1613 | tcg_gen_subi_i32(dest, dest, val); | ||
1614 | s->cc_op = CC_OP_SUB; | ||
1615 | } else { | ||
1616 | tcg_gen_addi_i32(dest, dest, val); | ||
1617 | gen_helper_xflag_lt(QREG_CC_X, dest, src2); | ||
1618 | s->cc_op = CC_OP_ADD; | ||
1619 | } | ||
1620 | gen_update_cc_add(dest, src2); | ||
1621 | } | ||
1622 | DEST_EA(insn, OS_LONG, dest, &addr)do { TCGv_i32 ea_result = gen_ea(s, insn, 2, dest, &addr, EA_STORE); if (((((ea_result).i32) == ((NULL_QREG).i32)))) { gen_addr_fault(s); return; } } while (0); | ||
1623 | } | ||
1624 | |||
1625 | DISAS_INSN(tpf)static void disas_tpf (DisasContext *s, uint16_t insn) | ||
1626 | { | ||
1627 | switch (insn & 7) { | ||
1628 | case 2: /* One extension word. */ | ||
1629 | s->pc += 2; | ||
1630 | break; | ||
1631 | case 3: /* Two extension words. */ | ||
1632 | s->pc += 4; | ||
1633 | break; | ||
1634 | case 4: /* No extension words. */ | ||
1635 | break; | ||
1636 | default: | ||
1637 | disas_undef(s, insn); | ||
1638 | } | ||
1639 | } | ||
1640 | |||
1641 | DISAS_INSN(branch)static void disas_branch (DisasContext *s, uint16_t insn) | ||
1642 | { | ||
1643 | int32_t offset; | ||
1644 | uint32_t base; | ||
1645 | int op; | ||
1646 | int l1; | ||
1647 | |||
1648 | base = s->pc; | ||
1649 | op = (insn >> 8) & 0xf; | ||
1650 | offset = (int8_t)insn; | ||
1651 | if (offset == 0) { | ||
1652 | offset = ldsw_code(s->pc); | ||
1653 | s->pc += 2; | ||
1654 | } else if (offset == -1) { | ||
1655 | offset = read_im32(s); | ||
1656 | } | ||
1657 | if (op == 1) { | ||
1658 | /* bsr */ | ||
1659 | gen_push(s, tcg_const_i32(s->pc)); | ||
1660 | } | ||
1661 | gen_flush_cc_op(s); | ||
1662 | if (op > 1) { | ||
1663 | /* Bcc */ | ||
1664 | l1 = gen_new_label(); | ||
1665 | gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1); | ||
1666 | gen_jmp_tb(s, 1, base + offset); | ||
1667 | gen_set_label(l1); | ||
1668 | gen_jmp_tb(s, 0, s->pc); | ||
1669 | } else { | ||
1670 | /* Unconditional branch. */ | ||
1671 | gen_jmp_tb(s, 0, base + offset); | ||
1672 | } | ||
1673 | } | ||
1674 | |||
1675 | DISAS_INSN(moveq)static void disas_moveq (DisasContext *s, uint16_t insn) | ||
1676 | { | ||
1677 | uint32_t val; | ||
1678 | |||
1679 | val = (int8_t)insn; | ||
1680 | tcg_gen_movi_i32(DREG(insn, 9)cpu_dregs[((insn) >> (9)) & 7], val); | ||
1681 | gen_logic_cc(s, tcg_const_i32(val)); | ||
1682 | } | ||
1683 | |||
1684 | DISAS_INSN(mvzs)static void disas_mvzs (DisasContext *s, uint16_t insn) | ||
1685 | { | ||
1686 | int opsize; | ||
1687 | TCGvTCGv_i32 src; | ||
1688 | TCGvTCGv_i32 reg; | ||
1689 | |||
1690 | if (insn & 0x40) | ||
1691 | opsize = OS_WORD1; | ||
1692 | else | ||
1693 | opsize = OS_BYTE0; | ||
1694 | SRC_EA(src, opsize, (insn & 0x80) == 0, NULL)do { src = gen_ea(s, insn, opsize, NULL_QREG, ((void*)0), (insn & 0x80) == 0 ? EA_LOADS : EA_LOADU); if (((((src).i32) == ((NULL_QREG).i32)))) { gen_addr_fault(s); return; } } while ( 0); | ||
1695 | reg = DREG(insn, 9)cpu_dregs[((insn) >> (9)) & 7]; | ||
1696 | tcg_gen_mov_i32(reg, src); | ||
1697 | gen_logic_cc(s, src); | ||
1698 | } | ||
1699 | |||
1700 | DISAS_INSN(or)static void disas_or (DisasContext *s, uint16_t insn) | ||
1701 | { | ||
1702 | TCGvTCGv_i32 reg; | ||
1703 | TCGvTCGv_i32 dest; | ||
1704 | TCGvTCGv_i32 src; | ||
1705 | TCGvTCGv_i32 addr; | ||
1706 | |||
1707 | reg = DREG(insn, 9)cpu_dregs[((insn) >> (9)) & 7]; | ||
1708 | dest = tcg_temp_new()tcg_temp_new_i32(); | ||
1709 | if (insn & 0x100) { | ||
1710 | SRC_EA(src, OS_LONG, 0, &addr)do { src = gen_ea(s, insn, 2, NULL_QREG, &addr, 0 ? EA_LOADS : EA_LOADU); if (((((src).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
1711 | tcg_gen_or_i32(dest, src, reg); | ||
1712 | DEST_EA(insn, OS_LONG, dest, &addr)do { TCGv_i32 ea_result = gen_ea(s, insn, 2, dest, &addr, EA_STORE); if (((((ea_result).i32) == ((NULL_QREG).i32)))) { gen_addr_fault(s); return; } } while (0); | ||
1713 | } else { | ||
1714 | SRC_EA(src, OS_LONG, 0, NULL)do { src = gen_ea(s, insn, 2, NULL_QREG, ((void*)0), 0 ? EA_LOADS : EA_LOADU); if (((((src).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
1715 | tcg_gen_or_i32(dest, src, reg); | ||
1716 | tcg_gen_mov_i32(reg, dest); | ||
1717 | } | ||
1718 | gen_logic_cc(s, dest); | ||
1719 | } | ||
1720 | |||
1721 | DISAS_INSN(suba)static void disas_suba (DisasContext *s, uint16_t insn) | ||
1722 | { | ||
1723 | TCGvTCGv_i32 src; | ||
1724 | TCGvTCGv_i32 reg; | ||
1725 | |||
1726 | SRC_EA(src, OS_LONG, 0, NULL)do { src = gen_ea(s, insn, 2, NULL_QREG, ((void*)0), 0 ? EA_LOADS : EA_LOADU); if (((((src).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
1727 | reg = AREG(insn, 9)cpu_aregs[((insn) >> (9)) & 7]; | ||
1728 | tcg_gen_sub_i32(reg, reg, src); | ||
1729 | } | ||
1730 | |||
1731 | DISAS_INSN(subx)static void disas_subx (DisasContext *s, uint16_t insn) | ||
1732 | { | ||
1733 | TCGvTCGv_i32 reg; | ||
1734 | TCGvTCGv_i32 src; | ||
1735 | |||
1736 | gen_flush_flags(s); | ||
1737 | reg = DREG(insn, 9)cpu_dregs[((insn) >> (9)) & 7]; | ||
1738 | src = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
1739 | gen_helper_subx_cc(reg, cpu_env, reg, src); | ||
1740 | } | ||
1741 | |||
1742 | DISAS_INSN(mov3q)static void disas_mov3q (DisasContext *s, uint16_t insn) | ||
1743 | { | ||
1744 | TCGvTCGv_i32 src; | ||
1745 | int val; | ||
1746 | |||
1747 | val = (insn >> 9) & 7; | ||
1748 | if (val == 0) | ||
1749 | val = -1; | ||
1750 | src = tcg_const_i32(val); | ||
1751 | gen_logic_cc(s, src); | ||
1752 | DEST_EA(insn, OS_LONG, src, NULL)do { TCGv_i32 ea_result = gen_ea(s, insn, 2, src, ((void*)0), EA_STORE); if (((((ea_result).i32) == ((NULL_QREG).i32)))) { gen_addr_fault(s); return; } } while (0); | ||
1753 | } | ||
1754 | |||
1755 | DISAS_INSN(cmp)static void disas_cmp (DisasContext *s, uint16_t insn) | ||
1756 | { | ||
1757 | int op; | ||
1758 | TCGvTCGv_i32 src; | ||
1759 | TCGvTCGv_i32 reg; | ||
1760 | TCGvTCGv_i32 dest; | ||
1761 | int opsize; | ||
1762 | |||
1763 | op = (insn >> 6) & 3; | ||
1764 | switch (op) { | ||
1765 | case 0: /* cmp.b */ | ||
1766 | opsize = OS_BYTE0; | ||
1767 | s->cc_op = CC_OP_CMPB; | ||
1768 | break; | ||
1769 | case 1: /* cmp.w */ | ||
1770 | opsize = OS_WORD1; | ||
1771 | s->cc_op = CC_OP_CMPW; | ||
1772 | break; | ||
1773 | case 2: /* cmp.l */ | ||
1774 | opsize = OS_LONG2; | ||
1775 | s->cc_op = CC_OP_SUB; | ||
1776 | break; | ||
1777 | default: | ||
1778 | abort(); | ||
1779 | } | ||
1780 | SRC_EA(src, opsize, 1, NULL)do { src = gen_ea(s, insn, opsize, NULL_QREG, ((void*)0), 1 ? EA_LOADS : EA_LOADU); if (((((src).i32) == ((NULL_QREG).i32) ))) { gen_addr_fault(s); return; } } while (0); | ||
1781 | reg = DREG(insn, 9)cpu_dregs[((insn) >> (9)) & 7]; | ||
1782 | dest = tcg_temp_new()tcg_temp_new_i32(); | ||
1783 | tcg_gen_sub_i32(dest, reg, src); | ||
1784 | gen_update_cc_add(dest, src); | ||
1785 | } | ||
1786 | |||
1787 | DISAS_INSN(cmpa)static void disas_cmpa (DisasContext *s, uint16_t insn) | ||
1788 | { | ||
1789 | int opsize; | ||
1790 | TCGvTCGv_i32 src; | ||
1791 | TCGvTCGv_i32 reg; | ||
1792 | TCGvTCGv_i32 dest; | ||
1793 | |||
1794 | if (insn & 0x100) { | ||
1795 | opsize = OS_LONG2; | ||
1796 | } else { | ||
1797 | opsize = OS_WORD1; | ||
1798 | } | ||
1799 | SRC_EA(src, opsize, 1, NULL)do { src = gen_ea(s, insn, opsize, NULL_QREG, ((void*)0), 1 ? EA_LOADS : EA_LOADU); if (((((src).i32) == ((NULL_QREG).i32) ))) { gen_addr_fault(s); return; } } while (0); | ||
1800 | reg = AREG(insn, 9)cpu_aregs[((insn) >> (9)) & 7]; | ||
1801 | dest = tcg_temp_new()tcg_temp_new_i32(); | ||
1802 | tcg_gen_sub_i32(dest, reg, src); | ||
1803 | gen_update_cc_add(dest, src); | ||
1804 | s->cc_op = CC_OP_SUB; | ||
1805 | } | ||
1806 | |||
1807 | DISAS_INSN(eor)static void disas_eor (DisasContext *s, uint16_t insn) | ||
1808 | { | ||
1809 | TCGvTCGv_i32 src; | ||
1810 | TCGvTCGv_i32 reg; | ||
1811 | TCGvTCGv_i32 dest; | ||
1812 | TCGvTCGv_i32 addr; | ||
1813 | |||
1814 | SRC_EA(src, OS_LONG, 0, &addr)do { src = gen_ea(s, insn, 2, NULL_QREG, &addr, 0 ? EA_LOADS : EA_LOADU); if (((((src).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
1815 | reg = DREG(insn, 9)cpu_dregs[((insn) >> (9)) & 7]; | ||
1816 | dest = tcg_temp_new()tcg_temp_new_i32(); | ||
1817 | tcg_gen_xor_i32(dest, src, reg); | ||
1818 | gen_logic_cc(s, dest); | ||
1819 | DEST_EA(insn, OS_LONG, dest, &addr)do { TCGv_i32 ea_result = gen_ea(s, insn, 2, dest, &addr, EA_STORE); if (((((ea_result).i32) == ((NULL_QREG).i32)))) { gen_addr_fault(s); return; } } while (0); | ||
1820 | } | ||
1821 | |||
1822 | DISAS_INSN(and)static void disas_and (DisasContext *s, uint16_t insn) | ||
1823 | { | ||
1824 | TCGvTCGv_i32 src; | ||
1825 | TCGvTCGv_i32 reg; | ||
1826 | TCGvTCGv_i32 dest; | ||
1827 | TCGvTCGv_i32 addr; | ||
1828 | |||
1829 | reg = DREG(insn, 9)cpu_dregs[((insn) >> (9)) & 7]; | ||
1830 | dest = tcg_temp_new()tcg_temp_new_i32(); | ||
1831 | if (insn & 0x100) { | ||
1832 | SRC_EA(src, OS_LONG, 0, &addr)do { src = gen_ea(s, insn, 2, NULL_QREG, &addr, 0 ? EA_LOADS : EA_LOADU); if (((((src).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
1833 | tcg_gen_and_i32(dest, src, reg); | ||
1834 | DEST_EA(insn, OS_LONG, dest, &addr)do { TCGv_i32 ea_result = gen_ea(s, insn, 2, dest, &addr, EA_STORE); if (((((ea_result).i32) == ((NULL_QREG).i32)))) { gen_addr_fault(s); return; } } while (0); | ||
1835 | } else { | ||
1836 | SRC_EA(src, OS_LONG, 0, NULL)do { src = gen_ea(s, insn, 2, NULL_QREG, ((void*)0), 0 ? EA_LOADS : EA_LOADU); if (((((src).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
1837 | tcg_gen_and_i32(dest, src, reg); | ||
1838 | tcg_gen_mov_i32(reg, dest); | ||
1839 | } | ||
1840 | gen_logic_cc(s, dest); | ||
1841 | } | ||
1842 | |||
1843 | DISAS_INSN(adda)static void disas_adda (DisasContext *s, uint16_t insn) | ||
1844 | { | ||
1845 | TCGvTCGv_i32 src; | ||
1846 | TCGvTCGv_i32 reg; | ||
1847 | |||
1848 | SRC_EA(src, OS_LONG, 0, NULL)do { src = gen_ea(s, insn, 2, NULL_QREG, ((void*)0), 0 ? EA_LOADS : EA_LOADU); if (((((src).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
1849 | reg = AREG(insn, 9)cpu_aregs[((insn) >> (9)) & 7]; | ||
1850 | tcg_gen_add_i32(reg, reg, src); | ||
1851 | } | ||
1852 | |||
1853 | DISAS_INSN(addx)static void disas_addx (DisasContext *s, uint16_t insn) | ||
1854 | { | ||
1855 | TCGvTCGv_i32 reg; | ||
1856 | TCGvTCGv_i32 src; | ||
1857 | |||
1858 | gen_flush_flags(s); | ||
1859 | reg = DREG(insn, 9)cpu_dregs[((insn) >> (9)) & 7]; | ||
1860 | src = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
1861 | gen_helper_addx_cc(reg, cpu_env, reg, src); | ||
1862 | s->cc_op = CC_OP_FLAGS; | ||
1863 | } | ||
1864 | |||
1865 | /* TODO: This could be implemented without helper functions. */ | ||
1866 | DISAS_INSN(shift_im)static void disas_shift_im (DisasContext *s, uint16_t insn) | ||
1867 | { | ||
1868 | TCGvTCGv_i32 reg; | ||
1869 | int tmp; | ||
1870 | TCGvTCGv_i32 shift; | ||
1871 | |||
1872 | reg = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
1873 | tmp = (insn >> 9) & 7; | ||
1874 | if (tmp == 0) | ||
1875 | tmp = 8; | ||
1876 | shift = tcg_const_i32(tmp); | ||
1877 | /* No need to flush flags becuse we know we will set C flag. */ | ||
1878 | if (insn & 0x100) { | ||
1879 | gen_helper_shl_cc(reg, cpu_env, reg, shift); | ||
1880 | } else { | ||
1881 | if (insn & 8) { | ||
1882 | gen_helper_shr_cc(reg, cpu_env, reg, shift); | ||
1883 | } else { | ||
1884 | gen_helper_sar_cc(reg, cpu_env, reg, shift); | ||
1885 | } | ||
1886 | } | ||
1887 | s->cc_op = CC_OP_SHIFT; | ||
1888 | } | ||
1889 | |||
1890 | DISAS_INSN(shift_reg)static void disas_shift_reg (DisasContext *s, uint16_t insn) | ||
1891 | { | ||
1892 | TCGvTCGv_i32 reg; | ||
1893 | TCGvTCGv_i32 shift; | ||
1894 | |||
1895 | reg = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
1896 | shift = DREG(insn, 9)cpu_dregs[((insn) >> (9)) & 7]; | ||
1897 | /* Shift by zero leaves C flag unmodified. */ | ||
1898 | gen_flush_flags(s); | ||
1899 | if (insn & 0x100) { | ||
1900 | gen_helper_shl_cc(reg, cpu_env, reg, shift); | ||
1901 | } else { | ||
1902 | if (insn & 8) { | ||
1903 | gen_helper_shr_cc(reg, cpu_env, reg, shift); | ||
1904 | } else { | ||
1905 | gen_helper_sar_cc(reg, cpu_env, reg, shift); | ||
1906 | } | ||
1907 | } | ||
1908 | s->cc_op = CC_OP_SHIFT; | ||
1909 | } | ||
1910 | |||
1911 | DISAS_INSN(ff1)static void disas_ff1 (DisasContext *s, uint16_t insn) | ||
1912 | { | ||
1913 | TCGvTCGv_i32 reg; | ||
1914 | reg = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
1915 | gen_logic_cc(s, reg); | ||
1916 | gen_helper_ff1(reg, reg); | ||
1917 | } | ||
1918 | |||
1919 | static TCGvTCGv_i32 gen_get_sr(DisasContext *s) | ||
1920 | { | ||
1921 | TCGvTCGv_i32 ccr; | ||
1922 | TCGvTCGv_i32 sr; | ||
1923 | |||
1924 | ccr = gen_get_ccr(s); | ||
1925 | sr = tcg_temp_new()tcg_temp_new_i32(); | ||
1926 | tcg_gen_andi_i32(sr, QREG_SR, 0xffe0); | ||
1927 | tcg_gen_or_i32(sr, sr, ccr); | ||
1928 | return sr; | ||
1929 | } | ||
1930 | |||
1931 | DISAS_INSN(strldsr)static void disas_strldsr (DisasContext *s, uint16_t insn) | ||
1932 | { | ||
1933 | uint16_t ext; | ||
1934 | uint32_t addr; | ||
1935 | |||
1936 | addr = s->pc - 2; | ||
1937 | ext = lduw_code(s->pc); | ||
1938 | s->pc += 2; | ||
1939 | if (ext != 0x46FC) { | ||
1940 | gen_exception(s, addr, EXCP_UNSUPPORTED61); | ||
1941 | return; | ||
1942 | } | ||
1943 | ext = lduw_code(s->pc); | ||
1944 | s->pc += 2; | ||
1945 | if (IS_USER(s)s->user || (ext & SR_S0x2000) == 0) { | ||
1946 | gen_exception(s, addr, EXCP_PRIVILEGE8); | ||
1947 | return; | ||
1948 | } | ||
1949 | gen_push(s, gen_get_sr(s)); | ||
1950 | gen_set_sr_im(s, ext, 0); | ||
1951 | } | ||
1952 | |||
1953 | DISAS_INSN(move_from_sr)static void disas_move_from_sr (DisasContext *s, uint16_t insn ) | ||
1954 | { | ||
1955 | TCGvTCGv_i32 reg; | ||
1956 | TCGvTCGv_i32 sr; | ||
1957 | |||
1958 | if (IS_USER(s)s->user) { | ||
1959 | gen_exception(s, s->pc - 2, EXCP_PRIVILEGE8); | ||
1960 | return; | ||
1961 | } | ||
1962 | sr = gen_get_sr(s); | ||
1963 | reg = DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
1964 | gen_partset_reg(OS_WORD1, reg, sr); | ||
1965 | } | ||
1966 | |||
1967 | DISAS_INSN(move_to_sr)static void disas_move_to_sr (DisasContext *s, uint16_t insn) | ||
1968 | { | ||
1969 | if (IS_USER(s)s->user) { | ||
1970 | gen_exception(s, s->pc - 2, EXCP_PRIVILEGE8); | ||
1971 | return; | ||
1972 | } | ||
1973 | gen_set_sr(s, insn, 0); | ||
1974 | gen_lookup_tb(s); | ||
1975 | } | ||
1976 | |||
1977 | DISAS_INSN(move_from_usp)static void disas_move_from_usp (DisasContext *s, uint16_t insn ) | ||
1978 | { | ||
1979 | if (IS_USER(s)s->user) { | ||
1980 | gen_exception(s, s->pc - 2, EXCP_PRIVILEGE8); | ||
1981 | return; | ||
1982 | } | ||
1983 | /* TODO: Implement USP. */ | ||
1984 | gen_exception(s, s->pc - 2, EXCP_ILLEGAL4); | ||
1985 | } | ||
1986 | |||
1987 | DISAS_INSN(move_to_usp)static void disas_move_to_usp (DisasContext *s, uint16_t insn ) | ||
1988 | { | ||
1989 | if (IS_USER(s)s->user) { | ||
1990 | gen_exception(s, s->pc - 2, EXCP_PRIVILEGE8); | ||
1991 | return; | ||
1992 | } | ||
1993 | /* TODO: Implement USP. */ | ||
1994 | gen_exception(s, s->pc - 2, EXCP_ILLEGAL4); | ||
1995 | } | ||
1996 | |||
1997 | DISAS_INSN(halt)static void disas_halt (DisasContext *s, uint16_t insn) | ||
1998 | { | ||
1999 | gen_exception(s, s->pc, EXCP_HALT_INSN0x101); | ||
2000 | } | ||
2001 | |||
2002 | DISAS_INSN(stop)static void disas_stop (DisasContext *s, uint16_t insn) | ||
2003 | { | ||
2004 | uint16_t ext; | ||
2005 | |||
2006 | if (IS_USER(s)s->user) { | ||
2007 | gen_exception(s, s->pc - 2, EXCP_PRIVILEGE8); | ||
2008 | return; | ||
2009 | } | ||
2010 | |||
2011 | ext = lduw_code(s->pc); | ||
2012 | s->pc += 2; | ||
2013 | |||
2014 | gen_set_sr_im(s, ext, 0); | ||
2015 | tcg_gen_movi_i32(QREG_HALTED, 1); | ||
2016 | gen_exception(s, s->pc, EXCP_HLT0x10001); | ||
2017 | } | ||
2018 | |||
2019 | DISAS_INSN(rte)static void disas_rte (DisasContext *s, uint16_t insn) | ||
2020 | { | ||
2021 | if (IS_USER(s)s->user) { | ||
2022 | gen_exception(s, s->pc - 2, EXCP_PRIVILEGE8); | ||
2023 | return; | ||
2024 | } | ||
2025 | gen_exception(s, s->pc - 2, EXCP_RTE0x100); | ||
2026 | } | ||
2027 | |||
2028 | DISAS_INSN(movec)static void disas_movec (DisasContext *s, uint16_t insn) | ||
2029 | { | ||
2030 | uint16_t ext; | ||
2031 | TCGvTCGv_i32 reg; | ||
2032 | |||
2033 | if (IS_USER(s)s->user) { | ||
2034 | gen_exception(s, s->pc - 2, EXCP_PRIVILEGE8); | ||
2035 | return; | ||
2036 | } | ||
2037 | |||
2038 | ext = lduw_code(s->pc); | ||
2039 | s->pc += 2; | ||
2040 | |||
2041 | if (ext & 0x8000) { | ||
2042 | reg = AREG(ext, 12)cpu_aregs[((ext) >> (12)) & 7]; | ||
2043 | } else { | ||
2044 | reg = DREG(ext, 12)cpu_dregs[((ext) >> (12)) & 7]; | ||
2045 | } | ||
2046 | gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg); | ||
2047 | gen_lookup_tb(s); | ||
2048 | } | ||
2049 | |||
2050 | DISAS_INSN(intouch)static void disas_intouch (DisasContext *s, uint16_t insn) | ||
2051 | { | ||
2052 | if (IS_USER(s)s->user) { | ||
2053 | gen_exception(s, s->pc - 2, EXCP_PRIVILEGE8); | ||
2054 | return; | ||
2055 | } | ||
2056 | /* ICache fetch. Implement as no-op. */ | ||
2057 | } | ||
2058 | |||
2059 | DISAS_INSN(cpushl)static void disas_cpushl (DisasContext *s, uint16_t insn) | ||
2060 | { | ||
2061 | if (IS_USER(s)s->user) { | ||
2062 | gen_exception(s, s->pc - 2, EXCP_PRIVILEGE8); | ||
2063 | return; | ||
2064 | } | ||
2065 | /* Cache push/invalidate. Implement as no-op. */ | ||
2066 | } | ||
2067 | |||
2068 | DISAS_INSN(wddata)static void disas_wddata (DisasContext *s, uint16_t insn) | ||
2069 | { | ||
2070 | gen_exception(s, s->pc - 2, EXCP_PRIVILEGE8); | ||
2071 | } | ||
2072 | |||
2073 | DISAS_INSN(wdebug)static void disas_wdebug (DisasContext *s, uint16_t insn) | ||
2074 | { | ||
2075 | if (IS_USER(s)s->user) { | ||
2076 | gen_exception(s, s->pc - 2, EXCP_PRIVILEGE8); | ||
2077 | return; | ||
2078 | } | ||
2079 | /* TODO: Implement wdebug. */ | ||
2080 | qemu_assert(0, "WDEBUG not implemented"); | ||
2081 | } | ||
2082 | |||
2083 | DISAS_INSN(trap)static void disas_trap (DisasContext *s, uint16_t insn) | ||
2084 | { | ||
2085 | gen_exception(s, s->pc - 2, EXCP_TRAP032 + (insn & 0xf)); | ||
2086 | } | ||
2087 | |||
2088 | /* ??? FP exceptions are not implemented. Most exceptions are deferred until | ||
2089 | immediately before the next FP instruction is executed. */ | ||
2090 | DISAS_INSN(fpu)static void disas_fpu (DisasContext *s, uint16_t insn) | ||
2091 | { | ||
2092 | uint16_t ext; | ||
2093 | int32_t offset; | ||
2094 | int opmode; | ||
2095 | TCGv_i64 src; | ||
2096 | TCGv_i64 dest; | ||
2097 | TCGv_i64 res; | ||
2098 | TCGvTCGv_i32 tmp32; | ||
2099 | int round; | ||
2100 | int set_dest; | ||
2101 | int opsize; | ||
2102 | |||
2103 | ext = lduw_code(s->pc); | ||
2104 | s->pc += 2; | ||
2105 | opmode = ext & 0x7f; | ||
2106 | switch ((ext >> 13) & 7) { | ||
2107 | case 0: case 2: | ||
2108 | break; | ||
2109 | case 1: | ||
2110 | goto undef; | ||
2111 | case 3: /* fmove out */ | ||
2112 | src = FREG(ext, 7)cpu_fregs[((ext) >> (7)) & 7]; | ||
2113 | tmp32 = tcg_temp_new_i32(); | ||
2114 | /* fmove */ | ||
2115 | /* ??? TODO: Proper behavior on overflow. */ | ||
2116 | switch ((ext >> 10) & 7) { | ||
2117 | case 0: | ||
2118 | opsize = OS_LONG2; | ||
2119 | gen_helper_f64_to_i32(tmp32, cpu_env, src); | ||
2120 | break; | ||
2121 | case 1: | ||
2122 | opsize = OS_SINGLE4; | ||
2123 | gen_helper_f64_to_f32(tmp32, cpu_env, src); | ||
2124 | break; | ||
2125 | case 4: | ||
2126 | opsize = OS_WORD1; | ||
2127 | gen_helper_f64_to_i32(tmp32, cpu_env, src); | ||
2128 | break; | ||
2129 | case 5: /* OS_DOUBLE */ | ||
2130 | tcg_gen_mov_i32(tmp32, AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7]); | ||
2131 | switch ((insn >> 3) & 7) { | ||
2132 | case 2: | ||
2133 | case 3: | ||
2134 | break; | ||
2135 | case 4: | ||
2136 | tcg_gen_addi_i32(tmp32, tmp32, -8); | ||
2137 | break; | ||
2138 | case 5: | ||
2139 | offset = ldsw_code(s->pc); | ||
2140 | s->pc += 2; | ||
2141 | tcg_gen_addi_i32(tmp32, tmp32, offset); | ||
2142 | break; | ||
2143 | default: | ||
2144 | goto undef; | ||
2145 | } | ||
2146 | gen_store64(s, tmp32, src); | ||
2147 | switch ((insn >> 3) & 7) { | ||
2148 | case 3: | ||
2149 | tcg_gen_addi_i32(tmp32, tmp32, 8); | ||
2150 | tcg_gen_mov_i32(AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7], tmp32); | ||
2151 | break; | ||
2152 | case 4: | ||
2153 | tcg_gen_mov_i32(AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7], tmp32); | ||
2154 | break; | ||
2155 | } | ||
2156 | tcg_temp_free_i32(tmp32); | ||
2157 | return; | ||
2158 | case 6: | ||
2159 | opsize = OS_BYTE0; | ||
2160 | gen_helper_f64_to_i32(tmp32, cpu_env, src); | ||
2161 | break; | ||
2162 | default: | ||
2163 | goto undef; | ||
2164 | } | ||
2165 | DEST_EA(insn, opsize, tmp32, NULL)do { TCGv_i32 ea_result = gen_ea(s, insn, opsize, tmp32, ((void *)0), EA_STORE); if (((((ea_result).i32) == ((NULL_QREG).i32) ))) { gen_addr_fault(s); return; } } while (0); | ||
2166 | tcg_temp_free_i32(tmp32); | ||
2167 | return; | ||
2168 | case 4: /* fmove to control register. */ | ||
2169 | switch ((ext >> 10) & 7) { | ||
2170 | case 4: /* FPCR */ | ||
2171 | /* Not implemented. Ignore writes. */ | ||
2172 | break; | ||
2173 | case 1: /* FPIAR */ | ||
2174 | case 2: /* FPSR */ | ||
2175 | default: | ||
2176 | cpu_abort(NULL((void*)0), "Unimplemented: fmove to control %d", | ||
2177 | (ext >> 10) & 7); | ||
2178 | } | ||
2179 | break; | ||
2180 | case 5: /* fmove from control register. */ | ||
2181 | switch ((ext >> 10) & 7) { | ||
2182 | case 4: /* FPCR */ | ||
2183 | /* Not implemented. Always return zero. */ | ||
2184 | tmp32 = tcg_const_i32(0); | ||
2185 | break; | ||
2186 | case 1: /* FPIAR */ | ||
2187 | case 2: /* FPSR */ | ||
2188 | default: | ||
2189 | cpu_abort(NULL((void*)0), "Unimplemented: fmove from control %d", | ||
2190 | (ext >> 10) & 7); | ||
2191 | goto undef; | ||
2192 | } | ||
2193 | DEST_EA(insn, OS_LONG, tmp32, NULL)do { TCGv_i32 ea_result = gen_ea(s, insn, 2, tmp32, ((void*)0 ), EA_STORE); if (((((ea_result).i32) == ((NULL_QREG).i32)))) { gen_addr_fault(s); return; } } while (0); | ||
2194 | break; | ||
2195 | case 6: /* fmovem */ | ||
2196 | case 7: | ||
2197 | { | ||
2198 | TCGvTCGv_i32 addr; | ||
2199 | uint16_t mask; | ||
2200 | int i; | ||
2201 | if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0) | ||
2202 | goto undef; | ||
2203 | tmp32 = gen_lea(s, insn, OS_LONG2); | ||
2204 | if (IS_NULL_QREG(tmp32)((((tmp32).i32) == ((NULL_QREG).i32)))) { | ||
2205 | gen_addr_fault(s); | ||
2206 | return; | ||
2207 | } | ||
2208 | addr = tcg_temp_new_i32(); | ||
2209 | tcg_gen_mov_i32(addr, tmp32); | ||
2210 | mask = 0x80; | ||
2211 | for (i = 0; i < 8; i++) { | ||
2212 | if (ext & mask) { | ||
2213 | s->is_mem = 1; | ||
2214 | dest = FREG(i, 0)cpu_fregs[((i) >> (0)) & 7]; | ||
2215 | if (ext & (1 << 13)) { | ||
2216 | /* store */ | ||
2217 | tcg_gen_qemu_stf64tcg_gen_qemu_st64(dest, addr, IS_USER(s)s->user); | ||
2218 | } else { | ||
2219 | /* load */ | ||
2220 | tcg_gen_qemu_ldf64tcg_gen_qemu_ld64(dest, addr, IS_USER(s)s->user); | ||
2221 | } | ||
2222 | if (ext & (mask - 1)) | ||
2223 | tcg_gen_addi_i32(addr, addr, 8); | ||
2224 | } | ||
2225 | mask >>= 1; | ||
2226 | } | ||
2227 | tcg_temp_free_i32(addr); | ||
2228 | } | ||
2229 | return; | ||
2230 | } | ||
2231 | if (ext & (1 << 14)) { | ||
2232 | /* Source effective address. */ | ||
2233 | switch ((ext >> 10) & 7) { | ||
2234 | case 0: opsize = OS_LONG2; break; | ||
2235 | case 1: opsize = OS_SINGLE4; break; | ||
2236 | case 4: opsize = OS_WORD1; break; | ||
2237 | case 5: opsize = OS_DOUBLE5; break; | ||
2238 | case 6: opsize = OS_BYTE0; break; | ||
2239 | default: | ||
2240 | goto undef; | ||
2241 | } | ||
2242 | if (opsize == OS_DOUBLE5) { | ||
2243 | tmp32 = tcg_temp_new_i32(); | ||
2244 | tcg_gen_mov_i32(tmp32, AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7]); | ||
2245 | switch ((insn >> 3) & 7) { | ||
2246 | case 2: | ||
2247 | case 3: | ||
2248 | break; | ||
2249 | case 4: | ||
2250 | tcg_gen_addi_i32(tmp32, tmp32, -8); | ||
2251 | break; | ||
2252 | case 5: | ||
2253 | offset = ldsw_code(s->pc); | ||
2254 | s->pc += 2; | ||
2255 | tcg_gen_addi_i32(tmp32, tmp32, offset); | ||
2256 | break; | ||
2257 | case 7: | ||
2258 | offset = ldsw_code(s->pc); | ||
2259 | offset += s->pc - 2; | ||
2260 | s->pc += 2; | ||
2261 | tcg_gen_addi_i32(tmp32, tmp32, offset); | ||
2262 | break; | ||
2263 | default: | ||
2264 | goto undef; | ||
2265 | } | ||
2266 | src = gen_load64(s, tmp32); | ||
2267 | switch ((insn >> 3) & 7) { | ||
2268 | case 3: | ||
2269 | tcg_gen_addi_i32(tmp32, tmp32, 8); | ||
2270 | tcg_gen_mov_i32(AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7], tmp32); | ||
2271 | break; | ||
2272 | case 4: | ||
2273 | tcg_gen_mov_i32(AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7], tmp32); | ||
2274 | break; | ||
2275 | } | ||
2276 | tcg_temp_free_i32(tmp32); | ||
2277 | } else { | ||
2278 | SRC_EA(tmp32, opsize, 1, NULL)do { tmp32 = gen_ea(s, insn, opsize, NULL_QREG, ((void*)0), 1 ? EA_LOADS : EA_LOADU); if (((((tmp32).i32) == ((NULL_QREG). i32)))) { gen_addr_fault(s); return; } } while (0); | ||
2279 | src = tcg_temp_new_i64(); | ||
2280 | switch (opsize) { | ||
2281 | case OS_LONG2: | ||
2282 | case OS_WORD1: | ||
2283 | case OS_BYTE0: | ||
2284 | gen_helper_i32_to_f64(src, cpu_env, tmp32); | ||
2285 | break; | ||
2286 | case OS_SINGLE4: | ||
2287 | gen_helper_f32_to_f64(src, cpu_env, tmp32); | ||
2288 | break; | ||
2289 | } | ||
2290 | } | ||
2291 | } else { | ||
2292 | /* Source register. */ | ||
2293 | src = FREG(ext, 10)cpu_fregs[((ext) >> (10)) & 7]; | ||
2294 | } | ||
2295 | dest = FREG(ext, 7)cpu_fregs[((ext) >> (7)) & 7]; | ||
2296 | res = tcg_temp_new_i64(); | ||
2297 | if (opmode != 0x3a) | ||
2298 | tcg_gen_mov_f64tcg_gen_mov_i64(res, dest); | ||
2299 | round = 1; | ||
2300 | set_dest = 1; | ||
2301 | switch (opmode) { | ||
2302 | case 0: case 0x40: case 0x44: /* fmove */ | ||
2303 | tcg_gen_mov_f64tcg_gen_mov_i64(res, src); | ||
2304 | break; | ||
2305 | case 1: /* fint */ | ||
2306 | gen_helper_iround_f64(res, cpu_env, src); | ||
2307 | round = 0; | ||
2308 | break; | ||
2309 | case 3: /* fintrz */ | ||
2310 | gen_helper_itrunc_f64(res, cpu_env, src); | ||
2311 | round = 0; | ||
2312 | break; | ||
2313 | case 4: case 0x41: case 0x45: /* fsqrt */ | ||
2314 | gen_helper_sqrt_f64(res, cpu_env, src); | ||
2315 | break; | ||
2316 | case 0x18: case 0x58: case 0x5c: /* fabs */ | ||
2317 | gen_helper_abs_f64(res, src); | ||
2318 | break; | ||
2319 | case 0x1a: case 0x5a: case 0x5e: /* fneg */ | ||
2320 | gen_helper_chs_f64(res, src); | ||
2321 | break; | ||
2322 | case 0x20: case 0x60: case 0x64: /* fdiv */ | ||
2323 | gen_helper_div_f64(res, cpu_env, res, src); | ||
2324 | break; | ||
2325 | case 0x22: case 0x62: case 0x66: /* fadd */ | ||
2326 | gen_helper_add_f64(res, cpu_env, res, src); | ||
2327 | break; | ||
2328 | case 0x23: case 0x63: case 0x67: /* fmul */ | ||
2329 | gen_helper_mul_f64(res, cpu_env, res, src); | ||
2330 | break; | ||
2331 | case 0x28: case 0x68: case 0x6c: /* fsub */ | ||
2332 | gen_helper_sub_f64(res, cpu_env, res, src); | ||
2333 | break; | ||
2334 | case 0x38: /* fcmp */ | ||
2335 | gen_helper_sub_cmp_f64(res, cpu_env, res, src); | ||
2336 | set_dest = 0; | ||
2337 | round = 0; | ||
2338 | break; | ||
2339 | case 0x3a: /* ftst */ | ||
2340 | tcg_gen_mov_f64tcg_gen_mov_i64(res, src); | ||
2341 | set_dest = 0; | ||
2342 | round = 0; | ||
2343 | break; | ||
2344 | default: | ||
2345 | goto undef; | ||
2346 | } | ||
2347 | if (ext & (1 << 14)) { | ||
2348 | tcg_temp_free_i64(src); | ||
2349 | } | ||
2350 | if (round) { | ||
2351 | if (opmode & 0x40) { | ||
2352 | if ((opmode & 0x4) != 0) | ||
2353 | round = 0; | ||
2354 | } else if ((s->fpcr & M68K_FPCR_PREC(1 << 6)) == 0) { | ||
2355 | round = 0; | ||
2356 | } | ||
2357 | } | ||
2358 | if (round) { | ||
2359 | TCGvTCGv_i32 tmp = tcg_temp_new_i32(); | ||
2360 | gen_helper_f64_to_f32(tmp, cpu_env, res); | ||
2361 | gen_helper_f32_to_f64(res, cpu_env, tmp); | ||
2362 | tcg_temp_free_i32(tmp); | ||
2363 | } | ||
2364 | tcg_gen_mov_f64tcg_gen_mov_i64(QREG_FP_RESULT, res); | ||
2365 | if (set_dest) { | ||
2366 | tcg_gen_mov_f64tcg_gen_mov_i64(dest, res); | ||
2367 | } | ||
2368 | tcg_temp_free_i64(res); | ||
2369 | return; | ||
2370 | undef: | ||
2371 | /* FIXME: Is this right for offset addressing modes? */ | ||
2372 | s->pc -= 2; | ||
2373 | disas_undef_fpu(s, insn); | ||
2374 | } | ||
2375 | |||
2376 | DISAS_INSN(fbcc)static void disas_fbcc (DisasContext *s, uint16_t insn) | ||
2377 | { | ||
2378 | uint32_t offset; | ||
2379 | uint32_t addr; | ||
2380 | TCGvTCGv_i32 flag; | ||
2381 | int l1; | ||
2382 | |||
2383 | addr = s->pc; | ||
2384 | offset = ldsw_code(s->pc); | ||
2385 | s->pc += 2; | ||
2386 | if (insn & (1 << 6)) { | ||
2387 | offset = (offset << 16) | lduw_code(s->pc); | ||
2388 | s->pc += 2; | ||
2389 | } | ||
2390 | |||
2391 | l1 = gen_new_label(); | ||
2392 | /* TODO: Raise BSUN exception. */ | ||
2393 | flag = tcg_temp_new()tcg_temp_new_i32(); | ||
2394 | gen_helper_compare_f64(flag, cpu_env, QREG_FP_RESULT); | ||
2395 | /* Jump to l1 if condition is true. */ | ||
2396 | switch (insn & 0xf) { | ||
2397 | case 0: /* f */ | ||
2398 | break; | ||
2399 | case 1: /* eq (=0) */ | ||
2400 | tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1); | ||
2401 | break; | ||
2402 | case 2: /* ogt (=1) */ | ||
2403 | tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(1), l1); | ||
2404 | break; | ||
2405 | case 3: /* oge (=0 or =1) */ | ||
2406 | tcg_gen_brcond_i32(TCG_COND_LEU, flag, tcg_const_i32(1), l1); | ||
2407 | break; | ||
2408 | case 4: /* olt (=-1) */ | ||
2409 | tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(0), l1); | ||
2410 | break; | ||
2411 | case 5: /* ole (=-1 or =0) */ | ||
2412 | tcg_gen_brcond_i32(TCG_COND_LE, flag, tcg_const_i32(0), l1); | ||
2413 | break; | ||
2414 | case 6: /* ogl (=-1 or =1) */ | ||
2415 | tcg_gen_andi_i32(flag, flag, 1); | ||
2416 | tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1); | ||
2417 | break; | ||
2418 | case 7: /* or (=2) */ | ||
2419 | tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(2), l1); | ||
2420 | break; | ||
2421 | case 8: /* un (<2) */ | ||
2422 | tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(2), l1); | ||
2423 | break; | ||
2424 | case 9: /* ueq (=0 or =2) */ | ||
2425 | tcg_gen_andi_i32(flag, flag, 1); | ||
2426 | tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1); | ||
2427 | break; | ||
2428 | case 10: /* ugt (>0) */ | ||
2429 | tcg_gen_brcond_i32(TCG_COND_GT, flag, tcg_const_i32(0), l1); | ||
2430 | break; | ||
2431 | case 11: /* uge (>=0) */ | ||
2432 | tcg_gen_brcond_i32(TCG_COND_GE, flag, tcg_const_i32(0), l1); | ||
2433 | break; | ||
2434 | case 12: /* ult (=-1 or =2) */ | ||
2435 | tcg_gen_brcond_i32(TCG_COND_GEU, flag, tcg_const_i32(2), l1); | ||
2436 | break; | ||
2437 | case 13: /* ule (!=1) */ | ||
2438 | tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(1), l1); | ||
2439 | break; | ||
2440 | case 14: /* ne (!=0) */ | ||
2441 | tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1); | ||
2442 | break; | ||
2443 | case 15: /* t */ | ||
2444 | tcg_gen_br(l1); | ||
2445 | break; | ||
2446 | } | ||
2447 | gen_jmp_tb(s, 0, s->pc); | ||
2448 | gen_set_label(l1); | ||
2449 | gen_jmp_tb(s, 1, addr + offset); | ||
2450 | } | ||
2451 | |||
2452 | DISAS_INSN(frestore)static void disas_frestore (DisasContext *s, uint16_t insn) | ||
2453 | { | ||
2454 | /* TODO: Implement frestore. */ | ||
2455 | qemu_assert(0, "FRESTORE not implemented"); | ||
2456 | } | ||
2457 | |||
2458 | DISAS_INSN(fsave)static void disas_fsave (DisasContext *s, uint16_t insn) | ||
2459 | { | ||
2460 | /* TODO: Implement fsave. */ | ||
2461 | qemu_assert(0, "FSAVE not implemented"); | ||
2462 | } | ||
2463 | |||
2464 | static inline TCGvTCGv_i32 gen_mac_extract_word(DisasContext *s, TCGvTCGv_i32 val, int upper) | ||
2465 | { | ||
2466 | TCGvTCGv_i32 tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
2467 | if (s->env->macsr & MACSR_FI0x020) { | ||
2468 | if (upper) | ||
2469 | tcg_gen_andi_i32(tmp, val, 0xffff0000); | ||
2470 | else | ||
2471 | tcg_gen_shli_i32(tmp, val, 16); | ||
2472 | } else if (s->env->macsr & MACSR_SU0x040) { | ||
2473 | if (upper) | ||
2474 | tcg_gen_sari_i32(tmp, val, 16); | ||
2475 | else | ||
2476 | tcg_gen_ext16s_i32(tmp, val); | ||
2477 | } else { | ||
2478 | if (upper) | ||
2479 | tcg_gen_shri_i32(tmp, val, 16); | ||
2480 | else | ||
2481 | tcg_gen_ext16u_i32(tmp, val); | ||
2482 | } | ||
2483 | return tmp; | ||
2484 | } | ||
2485 | |||
2486 | static void gen_mac_clear_flags(void) | ||
2487 | { | ||
2488 | tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, | ||
2489 | ~(MACSR_V0x002 | MACSR_Z0x004 | MACSR_N0x008 | MACSR_EV0x001)); | ||
2490 | } | ||
2491 | |||
2492 | DISAS_INSN(mac)static void disas_mac (DisasContext *s, uint16_t insn) | ||
2493 | { | ||
2494 | TCGvTCGv_i32 rx; | ||
2495 | TCGvTCGv_i32 ry; | ||
2496 | uint16_t ext; | ||
2497 | int acc; | ||
2498 | TCGvTCGv_i32 tmp; | ||
2499 | TCGvTCGv_i32 addr; | ||
2500 | TCGvTCGv_i32 loadval; | ||
2501 | int dual; | ||
2502 | TCGvTCGv_i32 saved_flags; | ||
2503 | |||
2504 | if (!s->done_mac) { | ||
2505 | s->mactmp = tcg_temp_new_i64(); | ||
2506 | s->done_mac = 1; | ||
2507 | } | ||
2508 | |||
2509 | ext = lduw_code(s->pc); | ||
2510 | s->pc += 2; | ||
2511 | |||
2512 | acc = ((insn >> 7) & 1) | ((ext >> 3) & 2); | ||
2513 | dual = ((insn & 0x30) != 0 && (ext & 3) != 0); | ||
2514 | if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) { | ||
2515 | disas_undef(s, insn); | ||
2516 | return; | ||
2517 | } | ||
2518 | if (insn & 0x30) { | ||
2519 | /* MAC with load. */ | ||
2520 | tmp = gen_lea(s, insn, OS_LONG2); | ||
2521 | addr = tcg_temp_new()tcg_temp_new_i32(); | ||
2522 | tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK); | ||
2523 | /* Load the value now to ensure correct exception behavior. | ||
2524 | Perform writeback after reading the MAC inputs. */ | ||
2525 | loadval = gen_load(s, OS_LONG2, addr, 0); | ||
2526 | |||
2527 | acc ^= 1; | ||
2528 | rx = (ext & 0x8000) ? AREG(ext, 12)cpu_aregs[((ext) >> (12)) & 7] : DREG(insn, 12)cpu_dregs[((insn) >> (12)) & 7]; | ||
2529 | ry = (ext & 8) ? AREG(ext, 0)cpu_aregs[((ext) >> (0)) & 7] : DREG(ext, 0)cpu_dregs[((ext) >> (0)) & 7]; | ||
2530 | } else { | ||
2531 | loadval = addr = NULL_QREG; | ||
2532 | rx = (insn & 0x40) ? AREG(insn, 9)cpu_aregs[((insn) >> (9)) & 7] : DREG(insn, 9)cpu_dregs[((insn) >> (9)) & 7]; | ||
2533 | ry = (insn & 8) ? AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7] : DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
2534 | } | ||
2535 | |||
2536 | gen_mac_clear_flags(); | ||
2537 | #if 0 | ||
2538 | l1 = -1; | ||
2539 | /* Disabled because conditional branches clobber temporary vars. */ | ||
2540 | if ((s->env->macsr & MACSR_OMC0x080) != 0 && !dual) { | ||
2541 | /* Skip the multiply if we know we will ignore it. */ | ||
2542 | l1 = gen_new_label(); | ||
2543 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
2544 | tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8)); | ||
2545 | gen_op_jmp_nz32(tmp, l1); | ||
2546 | } | ||
2547 | #endif | ||
2548 | |||
2549 | if ((ext & 0x0800) == 0) { | ||
2550 | /* Word. */ | ||
2551 | rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0); | ||
2552 | ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0); | ||
2553 | } | ||
2554 | if (s->env->macsr & MACSR_FI0x020) { | ||
2555 | gen_helper_macmulf(s->mactmp, cpu_env, rx, ry); | ||
2556 | } else { | ||
2557 | if (s->env->macsr & MACSR_SU0x040) | ||
2558 | gen_helper_macmuls(s->mactmp, cpu_env, rx, ry); | ||
2559 | else | ||
2560 | gen_helper_macmulu(s->mactmp, cpu_env, rx, ry); | ||
2561 | switch ((ext >> 9) & 3) { | ||
2562 | case 1: | ||
2563 | tcg_gen_shli_i64(s->mactmp, s->mactmp, 1); | ||
2564 | break; | ||
2565 | case 3: | ||
2566 | tcg_gen_shri_i64(s->mactmp, s->mactmp, 1); | ||
2567 | break; | ||
2568 | } | ||
2569 | } | ||
2570 | |||
2571 | if (dual) { | ||
2572 | /* Save the overflow flag from the multiply. */ | ||
2573 | saved_flags = tcg_temp_new()tcg_temp_new_i32(); | ||
2574 | tcg_gen_mov_i32(saved_flags, QREG_MACSR); | ||
2575 | } else { | ||
2576 | saved_flags = NULL_QREG; | ||
2577 | } | ||
2578 | |||
2579 | #if 0 | ||
2580 | /* Disabled because conditional branches clobber temporary vars. */ | ||
2581 | if ((s->env->macsr & MACSR_OMC0x080) != 0 && dual) { | ||
2582 | /* Skip the accumulate if the value is already saturated. */ | ||
2583 | l1 = gen_new_label(); | ||
2584 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
2585 | gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV00x100 << acc)); | ||
2586 | gen_op_jmp_nz32(tmp, l1); | ||
2587 | } | ||
2588 | #endif | ||
2589 | |||
2590 | if (insn & 0x100) | ||
2591 | tcg_gen_sub_i64(MACREG(acc)cpu_macc[acc], MACREG(acc)cpu_macc[acc], s->mactmp); | ||
2592 | else | ||
2593 | tcg_gen_add_i64(MACREG(acc)cpu_macc[acc], MACREG(acc)cpu_macc[acc], s->mactmp); | ||
2594 | |||
2595 | if (s->env->macsr & MACSR_FI0x020) | ||
2596 | gen_helper_macsatf(cpu_env, tcg_const_i32(acc)); | ||
2597 | else if (s->env->macsr & MACSR_SU0x040) | ||
2598 | gen_helper_macsats(cpu_env, tcg_const_i32(acc)); | ||
2599 | else | ||
2600 | gen_helper_macsatu(cpu_env, tcg_const_i32(acc)); | ||
2601 | |||
2602 | #if 0 | ||
2603 | /* Disabled because conditional branches clobber temporary vars. */ | ||
2604 | if (l1 != -1) | ||
2605 | gen_set_label(l1); | ||
2606 | #endif | ||
2607 | |||
2608 | if (dual) { | ||
2609 | /* Dual accumulate variant. */ | ||
2610 | acc = (ext >> 2) & 3; | ||
2611 | /* Restore the overflow flag from the multiplier. */ | ||
2612 | tcg_gen_mov_i32(QREG_MACSR, saved_flags); | ||
2613 | #if 0 | ||
2614 | /* Disabled because conditional branches clobber temporary vars. */ | ||
2615 | if ((s->env->macsr & MACSR_OMC0x080) != 0) { | ||
2616 | /* Skip the accumulate if the value is already saturated. */ | ||
2617 | l1 = gen_new_label(); | ||
2618 | tmp = tcg_temp_new()tcg_temp_new_i32(); | ||
2619 | gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV00x100 << acc)); | ||
2620 | gen_op_jmp_nz32(tmp, l1); | ||
2621 | } | ||
2622 | #endif | ||
2623 | if (ext & 2) | ||
2624 | tcg_gen_sub_i64(MACREG(acc)cpu_macc[acc], MACREG(acc)cpu_macc[acc], s->mactmp); | ||
2625 | else | ||
2626 | tcg_gen_add_i64(MACREG(acc)cpu_macc[acc], MACREG(acc)cpu_macc[acc], s->mactmp); | ||
2627 | if (s->env->macsr & MACSR_FI0x020) | ||
2628 | gen_helper_macsatf(cpu_env, tcg_const_i32(acc)); | ||
2629 | else if (s->env->macsr & MACSR_SU0x040) | ||
2630 | gen_helper_macsats(cpu_env, tcg_const_i32(acc)); | ||
2631 | else | ||
2632 | gen_helper_macsatu(cpu_env, tcg_const_i32(acc)); | ||
2633 | #if 0 | ||
2634 | /* Disabled because conditional branches clobber temporary vars. */ | ||
2635 | if (l1 != -1) | ||
2636 | gen_set_label(l1); | ||
2637 | #endif | ||
2638 | } | ||
2639 | gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc)); | ||
2640 | |||
2641 | if (insn & 0x30) { | ||
2642 | TCGvTCGv_i32 rw; | ||
2643 | rw = (insn & 0x40) ? AREG(insn, 9)cpu_aregs[((insn) >> (9)) & 7] : DREG(insn, 9)cpu_dregs[((insn) >> (9)) & 7]; | ||
2644 | tcg_gen_mov_i32(rw, loadval); | ||
2645 | /* FIXME: Should address writeback happen with the masked or | ||
2646 | unmasked value? */ | ||
2647 | switch ((insn >> 3) & 7) { | ||
2648 | case 3: /* Post-increment. */ | ||
2649 | tcg_gen_addi_i32(AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7], addr, 4); | ||
2650 | break; | ||
2651 | case 4: /* Pre-decrement. */ | ||
2652 | tcg_gen_mov_i32(AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7], addr); | ||
2653 | } | ||
2654 | } | ||
2655 | } | ||
2656 | |||
2657 | DISAS_INSN(from_mac)static void disas_from_mac (DisasContext *s, uint16_t insn) | ||
2658 | { | ||
2659 | TCGvTCGv_i32 rx; | ||
2660 | TCGv_i64 acc; | ||
2661 | int accnum; | ||
2662 | |||
2663 | rx = (insn & 8) ? AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7] : DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
2664 | accnum = (insn >> 9) & 3; | ||
2665 | acc = MACREG(accnum)cpu_macc[accnum]; | ||
2666 | if (s->env->macsr & MACSR_FI0x020) { | ||
2667 | gen_helper_get_macf(rx, cpu_env, acc); | ||
2668 | } else if ((s->env->macsr & MACSR_OMC0x080) == 0) { | ||
2669 | tcg_gen_trunc_i64_i32(rx, acc); | ||
2670 | } else if (s->env->macsr & MACSR_SU0x040) { | ||
2671 | gen_helper_get_macs(rx, acc); | ||
2672 | } else { | ||
2673 | gen_helper_get_macu(rx, acc); | ||
2674 | } | ||
2675 | if (insn & 0x40) { | ||
2676 | tcg_gen_movi_i64(acc, 0); | ||
2677 | tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV00x100 << accnum)); | ||
2678 | } | ||
2679 | } | ||
2680 | |||
2681 | DISAS_INSN(move_mac)static void disas_move_mac (DisasContext *s, uint16_t insn) | ||
2682 | { | ||
2683 | /* FIXME: This can be done without a helper. */ | ||
2684 | int src; | ||
2685 | TCGvTCGv_i32 dest; | ||
2686 | src = insn & 3; | ||
2687 | dest = tcg_const_i32((insn >> 9) & 3); | ||
2688 | gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src)); | ||
2689 | gen_mac_clear_flags(); | ||
2690 | gen_helper_mac_set_flags(cpu_env, dest); | ||
2691 | } | ||
2692 | |||
2693 | DISAS_INSN(from_macsr)static void disas_from_macsr (DisasContext *s, uint16_t insn) | ||
2694 | { | ||
2695 | TCGvTCGv_i32 reg; | ||
2696 | |||
2697 | reg = (insn & 8) ? AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7] : DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
2698 | tcg_gen_mov_i32(reg, QREG_MACSR); | ||
2699 | } | ||
2700 | |||
2701 | DISAS_INSN(from_mask)static void disas_from_mask (DisasContext *s, uint16_t insn) | ||
2702 | { | ||
2703 | TCGvTCGv_i32 reg; | ||
2704 | reg = (insn & 8) ? AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7] : DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
2705 | tcg_gen_mov_i32(reg, QREG_MAC_MASK); | ||
2706 | } | ||
2707 | |||
2708 | DISAS_INSN(from_mext)static void disas_from_mext (DisasContext *s, uint16_t insn) | ||
2709 | { | ||
2710 | TCGvTCGv_i32 reg; | ||
2711 | TCGvTCGv_i32 acc; | ||
2712 | reg = (insn & 8) ? AREG(insn, 0)cpu_aregs[((insn) >> (0)) & 7] : DREG(insn, 0)cpu_dregs[((insn) >> (0)) & 7]; | ||
2713 | acc = tcg_const_i32((insn & 0x400) ? 2 : 0); | ||
2714 | if (s->env->macsr & MACSR_FI0x020) | ||
2715 | gen_helper_get_mac_extf(reg, cpu_env, acc); | ||
2716 | else | ||
2717 | gen_helper_get_mac_exti(reg, cpu_env, acc); | ||
2718 | } | ||
2719 | |||
2720 | DISAS_INSN(macsr_to_ccr)static void disas_macsr_to_ccr (DisasContext *s, uint16_t insn ) | ||
2721 | { | ||
2722 | tcg_gen_movi_i32(QREG_CC_X, 0); | ||
2723 | tcg_gen_andi_i32(QREG_CC_DEST, QREG_MACSR, 0xf); | ||
2724 | s->cc_op = CC_OP_FLAGS; | ||
2725 | } | ||
2726 | |||
2727 | DISAS_INSN(to_mac)static void disas_to_mac (DisasContext *s, uint16_t insn) | ||
2728 | { | ||
2729 | TCGv_i64 acc; | ||
2730 | TCGvTCGv_i32 val; | ||
2731 | int accnum; | ||
2732 | accnum = (insn >> 9) & 3; | ||
2733 | acc = MACREG(accnum)cpu_macc[accnum]; | ||
2734 | SRC_EA(val, OS_LONG, 0, NULL)do { val = gen_ea(s, insn, 2, NULL_QREG, ((void*)0), 0 ? EA_LOADS : EA_LOADU); if (((((val).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
2735 | if (s->env->macsr & MACSR_FI0x020) { | ||
2736 | tcg_gen_ext_i32_i64(acc, val); | ||
2737 | tcg_gen_shli_i64(acc, acc, 8); | ||
2738 | } else if (s->env->macsr & MACSR_SU0x040) { | ||
2739 | tcg_gen_ext_i32_i64(acc, val); | ||
2740 | } else { | ||
2741 | tcg_gen_extu_i32_i64(acc, val); | ||
2742 | } | ||
2743 | tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV00x100 << accnum)); | ||
2744 | gen_mac_clear_flags(); | ||
2745 | gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum)); | ||
2746 | } | ||
2747 | |||
2748 | DISAS_INSN(to_macsr)static void disas_to_macsr (DisasContext *s, uint16_t insn) | ||
2749 | { | ||
2750 | TCGvTCGv_i32 val; | ||
2751 | SRC_EA(val, OS_LONG, 0, NULL)do { val = gen_ea(s, insn, 2, NULL_QREG, ((void*)0), 0 ? EA_LOADS : EA_LOADU); if (((((val).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
2752 | gen_helper_set_macsr(cpu_env, val); | ||
2753 | gen_lookup_tb(s); | ||
2754 | } | ||
2755 | |||
2756 | DISAS_INSN(to_mask)static void disas_to_mask (DisasContext *s, uint16_t insn) | ||
2757 | { | ||
2758 | TCGvTCGv_i32 val; | ||
2759 | SRC_EA(val, OS_LONG, 0, NULL)do { val = gen_ea(s, insn, 2, NULL_QREG, ((void*)0), 0 ? EA_LOADS : EA_LOADU); if (((((val).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
2760 | tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000); | ||
2761 | } | ||
2762 | |||
2763 | DISAS_INSN(to_mext)static void disas_to_mext (DisasContext *s, uint16_t insn) | ||
2764 | { | ||
2765 | TCGvTCGv_i32 val; | ||
2766 | TCGvTCGv_i32 acc; | ||
2767 | SRC_EA(val, OS_LONG, 0, NULL)do { val = gen_ea(s, insn, 2, NULL_QREG, ((void*)0), 0 ? EA_LOADS : EA_LOADU); if (((((val).i32) == ((NULL_QREG).i32)))) { gen_addr_fault (s); return; } } while (0); | ||
2768 | acc = tcg_const_i32((insn & 0x400) ? 2 : 0); | ||
2769 | if (s->env->macsr & MACSR_FI0x020) | ||
2770 | gen_helper_set_mac_extf(cpu_env, val, acc); | ||
2771 | else if (s->env->macsr & MACSR_SU0x040) | ||
2772 | gen_helper_set_mac_exts(cpu_env, val, acc); | ||
2773 | else | ||
2774 | gen_helper_set_mac_extu(cpu_env, val, acc); | ||
2775 | } | ||
2776 | |||
2777 | static disas_proc opcode_table[65536]; | ||
2778 | |||
2779 | static void | ||
2780 | register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask) | ||
2781 | { | ||
2782 | int i; | ||
2783 | int from; | ||
2784 | int to; | ||
2785 | |||
2786 | /* Sanity check. All set bits must be included in the mask. */ | ||
2787 | if (opcode & ~mask) { | ||
2788 | fprintf(stderrstderr, | ||
2789 | "qemu internal error: bogus opcode definition %04x/%04x\n", | ||
2790 | opcode, mask); | ||
2791 | abort(); | ||
2792 | } | ||
2793 | /* This could probably be cleverer. For now just optimize the case where | ||
2794 | the top bits are known. */ | ||
2795 | /* Find the first zero bit in the mask. */ | ||
2796 | i = 0x8000; | ||
2797 | while ((i & mask) != 0) | ||
2798 | i >>= 1; | ||
2799 | /* Iterate over all combinations of this and lower bits. */ | ||
2800 | if (i == 0) | ||
2801 | i = 1; | ||
2802 | else | ||
2803 | i <<= 1; | ||
2804 | from = opcode & ~(i - 1); | ||
2805 | to = from + i; | ||
2806 | for (i = from; i < to; i++) { | ||
2807 | if ((i & mask) == opcode) | ||
2808 | opcode_table[i] = proc; | ||
2809 | } | ||
2810 | } | ||
2811 | |||
2812 | /* Register m68k opcode handlers. Order is important. | ||
2813 | Later insn override earlier ones. */ | ||
2814 | void register_m68k_insns (CPUM68KState *env) | ||
2815 | { | ||
2816 | #define INSN(name, opcode, mask, feature) do { \ | ||
2817 | if (m68k_feature(env, M68K_FEATURE_##feature)) \ | ||
2818 | register_opcode(disas_##name, 0x##opcode, 0x##mask); \ | ||
2819 | } while(0) | ||
2820 | INSN(undef, 0000, 0000, CF_ISA_A); | ||
2821 | INSN(arith_im, 0080, fff8, CF_ISA_A); | ||
2822 | INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC); | ||
2823 | INSN(bitop_reg, 0100, f1c0, CF_ISA_A); | ||
2824 | INSN(bitop_reg, 0140, f1c0, CF_ISA_A); | ||
2825 | INSN(bitop_reg, 0180, f1c0, CF_ISA_A); | ||
2826 | INSN(bitop_reg, 01c0, f1c0, CF_ISA_A); | ||
2827 | INSN(arith_im, 0280, fff8, CF_ISA_A); | ||
2828 | INSN(byterev, 02c0, fff8, CF_ISA_APLUSC); | ||
2829 | INSN(arith_im, 0480, fff8, CF_ISA_A); | ||
2830 | INSN(ff1, 04c0, fff8, CF_ISA_APLUSC); | ||
2831 | INSN(arith_im, 0680, fff8, CF_ISA_A); | ||
2832 | INSN(bitop_im, 0800, ffc0, CF_ISA_A); | ||
2833 | INSN(bitop_im, 0840, ffc0, CF_ISA_A); | ||
2834 | INSN(bitop_im, 0880, ffc0, CF_ISA_A); | ||
2835 | INSN(bitop_im, 08c0, ffc0, CF_ISA_A); | ||
2836 | INSN(arith_im, 0a80, fff8, CF_ISA_A); | ||
2837 | INSN(arith_im, 0c00, ff38, CF_ISA_A); | ||
2838 | INSN(move, 1000, f000, CF_ISA_A); | ||
2839 | INSN(move, 2000, f000, CF_ISA_A); | ||
2840 | INSN(move, 3000, f000, CF_ISA_A); | ||
2841 | INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC); | ||
2842 | INSN(negx, 4080, fff8, CF_ISA_A); | ||
2843 | INSN(move_from_sr, 40c0, fff8, CF_ISA_A); | ||
2844 | INSN(lea, 41c0, f1c0, CF_ISA_A); | ||
2845 | INSN(clr, 4200, ff00, CF_ISA_A); | ||
2846 | INSN(undef, 42c0, ffc0, CF_ISA_A); | ||
2847 | INSN(move_from_ccr, 42c0, fff8, CF_ISA_A); | ||
2848 | INSN(neg, 4480, fff8, CF_ISA_A); | ||
2849 | INSN(move_to_ccr, 44c0, ffc0, CF_ISA_A); | ||
2850 | INSN(not, 4680, fff8, CF_ISA_A); | ||
2851 | INSN(move_to_sr, 46c0, ffc0, CF_ISA_A); | ||
2852 | INSN(pea, 4840, ffc0, CF_ISA_A); | ||
2853 | INSN(swap, 4840, fff8, CF_ISA_A); | ||
2854 | INSN(movem, 48c0, fbc0, CF_ISA_A); | ||
2855 | INSN(ext, 4880, fff8, CF_ISA_A); | ||
2856 | INSN(ext, 48c0, fff8, CF_ISA_A); | ||
2857 | INSN(ext, 49c0, fff8, CF_ISA_A); | ||
2858 | INSN(tst, 4a00, ff00, CF_ISA_A); | ||
2859 | INSN(tas, 4ac0, ffc0, CF_ISA_B); | ||
2860 | INSN(halt, 4ac8, ffff, CF_ISA_A); | ||
2861 | INSN(pulse, 4acc, ffff, CF_ISA_A); | ||
2862 | INSN(illegal, 4afc, ffff, CF_ISA_A); | ||
2863 | INSN(mull, 4c00, ffc0, CF_ISA_A); | ||
2864 | INSN(divl, 4c40, ffc0, CF_ISA_A); | ||
2865 | INSN(sats, 4c80, fff8, CF_ISA_B); | ||
2866 | INSN(trap, 4e40, fff0, CF_ISA_A); | ||
2867 | INSN(link, 4e50, fff8, CF_ISA_A); | ||
2868 | INSN(unlk, 4e58, fff8, CF_ISA_A); | ||
2869 | INSN(move_to_usp, 4e60, fff8, USP); | ||
2870 | INSN(move_from_usp, 4e68, fff8, USP); | ||
2871 | INSN(nop, 4e71, ffff, CF_ISA_A); | ||
2872 | INSN(stop, 4e72, ffff, CF_ISA_A); | ||
2873 | INSN(rte, 4e73, ffff, CF_ISA_A); | ||
2874 | INSN(rts, 4e75, ffff, CF_ISA_A); | ||
2875 | INSN(movec, 4e7b, ffff, CF_ISA_A); | ||
2876 | INSN(jump, 4e80, ffc0, CF_ISA_A); | ||
2877 | INSN(jump, 4ec0, ffc0, CF_ISA_A); | ||
2878 | INSN(addsubq, 5180, f1c0, CF_ISA_A); | ||
2879 | INSN(scc, 50c0, f0f8, CF_ISA_A); | ||
2880 | INSN(addsubq, 5080, f1c0, CF_ISA_A); | ||
2881 | INSN(tpf, 51f8, fff8, CF_ISA_A); | ||
2882 | |||
2883 | /* Branch instructions. */ | ||
2884 | INSN(branch, 6000, f000, CF_ISA_A); | ||
2885 | /* Disable long branch instructions, then add back the ones we want. */ | ||
2886 | INSN(undef, 60ff, f0ff, CF_ISA_A); /* All long branches. */ | ||
2887 | INSN(branch, 60ff, f0ff, CF_ISA_B); | ||
2888 | INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */ | ||
2889 | INSN(branch, 60ff, ffff, BRAL); | ||
2890 | |||
2891 | INSN(moveq, 7000, f100, CF_ISA_A); | ||
2892 | INSN(mvzs, 7100, f100, CF_ISA_B); | ||
2893 | INSN(or, 8000, f000, CF_ISA_A); | ||
2894 | INSN(divw, 80c0, f0c0, CF_ISA_A); | ||
2895 | INSN(addsub, 9000, f000, CF_ISA_A); | ||
2896 | INSN(subx, 9180, f1f8, CF_ISA_A); | ||
2897 | INSN(suba, 91c0, f1c0, CF_ISA_A); | ||
2898 | |||
2899 | INSN(undef_mac, a000, f000, CF_ISA_A); | ||
2900 | INSN(mac, a000, f100, CF_EMAC); | ||
2901 | INSN(from_mac, a180, f9b0, CF_EMAC); | ||
2902 | INSN(move_mac, a110, f9fc, CF_EMAC); | ||
2903 | INSN(from_macsr,a980, f9f0, CF_EMAC); | ||
2904 | INSN(from_mask, ad80, fff0, CF_EMAC); | ||
2905 | INSN(from_mext, ab80, fbf0, CF_EMAC); | ||
2906 | INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC); | ||
2907 | INSN(to_mac, a100, f9c0, CF_EMAC); | ||
2908 | INSN(to_macsr, a900, ffc0, CF_EMAC); | ||
2909 | INSN(to_mext, ab00, fbc0, CF_EMAC); | ||
2910 | INSN(to_mask, ad00, ffc0, CF_EMAC); | ||
2911 | |||
2912 | INSN(mov3q, a140, f1c0, CF_ISA_B); | ||
2913 | INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */ | ||
2914 | INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */ | ||
2915 | INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */ | ||
2916 | INSN(cmp, b080, f1c0, CF_ISA_A); | ||
2917 | INSN(cmpa, b1c0, f1c0, CF_ISA_A); | ||
2918 | INSN(eor, b180, f1c0, CF_ISA_A); | ||
2919 | INSN(and, c000, f000, CF_ISA_A); | ||
2920 | INSN(mulw, c0c0, f0c0, CF_ISA_A); | ||
2921 | INSN(addsub, d000, f000, CF_ISA_A); | ||
2922 | INSN(addx, d180, f1f8, CF_ISA_A); | ||
2923 | INSN(adda, d1c0, f1c0, CF_ISA_A); | ||
2924 | INSN(shift_im, e080, f0f0, CF_ISA_A); | ||
2925 | INSN(shift_reg, e0a0, f0f0, CF_ISA_A); | ||
2926 | INSN(undef_fpu, f000, f000, CF_ISA_A); | ||
2927 | INSN(fpu, f200, ffc0, CF_FPU); | ||
2928 | INSN(fbcc, f280, ffc0, CF_FPU); | ||
2929 | INSN(frestore, f340, ffc0, CF_FPU); | ||
2930 | INSN(fsave, f340, ffc0, CF_FPU); | ||
2931 | INSN(intouch, f340, ffc0, CF_ISA_A); | ||
2932 | INSN(cpushl, f428, ff38, CF_ISA_A); | ||
2933 | INSN(wddata, fb00, ff00, CF_ISA_A); | ||
2934 | INSN(wdebug, fbc0, ffc0, CF_ISA_A); | ||
2935 | #undef INSN | ||
2936 | } | ||
2937 | |||
2938 | /* ??? Some of this implementation is not exception safe. We should always | ||
2939 | write back the result to memory before setting the condition codes. */ | ||
2940 | static void disas_m68k_insn(CPUM68KState * env, DisasContext *s) | ||
2941 | { | ||
2942 | uint16_t insn; | ||
2943 | |||
2944 | insn = lduw_code(s->pc); | ||
2945 | s->pc += 2; | ||
2946 | |||
2947 | opcode_table[insn](s, insn); | ||
2948 | } | ||
2949 | |||
2950 | /* generate intermediate code for basic block 'tb'. */ | ||
2951 | static inline void | ||
2952 | gen_intermediate_code_internal(CPUM68KState *env, TranslationBlock *tb, | ||
2953 | int search_pc) | ||
2954 | { | ||
2955 | DisasContext dc1, *dc = &dc1; | ||
2956 | uint16_t *gen_opc_end; | ||
2957 | CPUBreakpoint *bp; | ||
2958 | int j, lj; | ||
2959 | target_ulong pc_start; | ||
2960 | int pc_offset; | ||
2961 | int num_insns; | ||
2962 | int max_insns; | ||
2963 | |||
2964 | /* generate intermediate code */ | ||
2965 | pc_start = tb->pc; | ||
2966 | |||
2967 | dc->tb = tb; | ||
2968 | |||
2969 | gen_opc_end = gen_opc_buf + OPC_MAX_SIZE(640 - 208); | ||
2970 | |||
2971 | dc->env = env; | ||
2972 | dc->is_jmp = DISAS_NEXT0; | ||
2973 | dc->pc = pc_start; | ||
2974 | dc->cc_op = CC_OP_DYNAMIC; | ||
2975 | dc->singlestep_enabled = env->singlestep_enabled; | ||
2976 | dc->fpcr = env->fpcr; | ||
2977 | dc->user = (env->sr & SR_S0x2000) == 0; | ||
2978 | dc->is_mem = 0; | ||
2979 | dc->done_mac = 0; | ||
2980 | lj = -1; | ||
2981 | num_insns = 0; | ||
2982 | max_insns = tb->cflags & CF_COUNT_MASK0x7fff; | ||
2983 | if (max_insns == 0) | ||
2984 | max_insns = CF_COUNT_MASK0x7fff; | ||
2985 | |||
2986 | gen_icount_start(); | ||
2987 | do { | ||
2988 | pc_offset = dc->pc - pc_start; | ||
2989 | gen_throws_exception = NULL((void*)0); | ||
2990 | if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))__builtin_expect(!!(!((&env->breakpoints)->tqh_first == ((void*)0))), 0)) { | ||
2991 | QTAILQ_FOREACH(bp, &env->breakpoints, entry)for ((bp) = ((&env->breakpoints)->tqh_first); (bp); (bp) = ((bp)->entry.tqe_next)) { | ||
2992 | if (bp->pc == dc->pc) { | ||
2993 | gen_exception(dc, dc->pc, EXCP_DEBUG0x10002); | ||
2994 | dc->is_jmp = DISAS_JUMP1; | ||
2995 | break; | ||
2996 | } | ||
2997 | } | ||
2998 | if (dc->is_jmp) | ||
2999 | break; | ||
3000 | } | ||
3001 | if (search_pc) { | ||
3002 | j = gen_opc_ptr - gen_opc_buf; | ||
3003 | if (lj < j) { | ||
3004 | lj++; | ||
3005 | while (lj < j) | ||
3006 | gen_opc_instr_start[lj++] = 0; | ||
3007 | } | ||
3008 | gen_opc_pc[lj] = dc->pc; | ||
3009 | gen_opc_instr_start[lj] = 1; | ||
3010 | gen_opc_icount[lj] = num_insns; | ||
3011 | } | ||
3012 | if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO0x8000)) | ||
3013 | gen_io_start(); | ||
3014 | dc->insn_pc = dc->pc; | ||
3015 | disas_m68k_insn(env, dc); | ||
3016 | num_insns++; | ||
3017 | } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end && | ||
3018 | !env->singlestep_enabled && | ||
3019 | !singlestep && | ||
3020 | (pc_offset) < (TARGET_PAGE_SIZE(1 << 10) - 32) && | ||
3021 | num_insns < max_insns); | ||
3022 | |||
3023 | if (tb->cflags & CF_LAST_IO0x8000) | ||
3024 | gen_io_end(); | ||
3025 | if (unlikely(env->singlestep_enabled)__builtin_expect(!!(env->singlestep_enabled), 0)) { | ||
3026 | /* Make sure the pc is updated, and raise a debug exception. */ | ||
3027 | if (!dc->is_jmp) { | ||
3028 | gen_flush_cc_op(dc); | ||
3029 | tcg_gen_movi_i32(QREG_PC, dc->pc); | ||
3030 | } | ||
3031 | gen_helper_raise_exception(tcg_const_i32(EXCP_DEBUG0x10002)); | ||
3032 | } else { | ||
3033 | switch(dc->is_jmp) { | ||
3034 | case DISAS_NEXT0: | ||
3035 | gen_flush_cc_op(dc); | ||
3036 | gen_jmp_tb(dc, 0, dc->pc); | ||
3037 | break; | ||
3038 | default: | ||
3039 | case DISAS_JUMP1: | ||
3040 | case DISAS_UPDATE2: | ||
3041 | gen_flush_cc_op(dc); | ||
3042 | /* indicate that the hash table must be used to find the next TB */ | ||
3043 | tcg_gen_exit_tb(0); | ||
3044 | break; | ||
3045 | case DISAS_TB_JUMP3: | ||
3046 | /* nothing more to generate */ | ||
3047 | break; | ||
3048 | } | ||
3049 | } | ||
3050 | gen_icount_end(tb, num_insns); | ||
3051 | *gen_opc_ptr = INDEX_op_end; | ||
3052 | |||
3053 | #ifdef DEBUG_DISAS | ||
3054 | if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM(1 << 1))) { | ||
3055 | qemu_log("----------------\n"); | ||
3056 | qemu_log("IN: %s\n", lookup_symbol(pc_start)); | ||
3057 | log_target_disas(pc_start, dc->pc - pc_start, 0); | ||
3058 | qemu_log("\n"); | ||
3059 | } | ||
3060 | #endif | ||
3061 | if (search_pc) { | ||
3062 | j = gen_opc_ptr - gen_opc_buf; | ||
3063 | lj++; | ||
3064 | while (lj <= j) | ||
3065 | gen_opc_instr_start[lj++] = 0; | ||
3066 | } else { | ||
3067 | tb->size = dc->pc - pc_start; | ||
3068 | tb->icount = num_insns; | ||
3069 | } | ||
3070 | |||
3071 | //optimize_flags(); | ||
3072 | //expand_target_qops(); | ||
3073 | } | ||
3074 | |||
3075 | void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb) | ||
3076 | { | ||
3077 | gen_intermediate_code_internal(env, tb, 0); | ||
3078 | } | ||
3079 | |||
3080 | void gen_intermediate_code_pc(CPUM68KState *env, TranslationBlock *tb) | ||
3081 | { | ||
3082 | gen_intermediate_code_internal(env, tb, 1); | ||
3083 | } | ||
3084 | |||
3085 | void cpu_dump_state(CPUM68KState *env, FILE *f, fprintf_function cpu_fprintf, | ||
3086 | int flags) | ||
3087 | { | ||
3088 | int i; | ||
3089 | uint16_t sr; | ||
3090 | CPU_DoubleU u; | ||
3091 | for (i = 0; i < 8; i++) | ||
3092 | { | ||
3093 | u.d = env->fregs[i]; | ||
3094 | cpu_fprintf (f, "D%d = %08x A%d = %08x F%d = %08x%08x (%12g)\n", | ||
3095 | i, env->dregs[i], i, env->aregs[i], | ||
3096 | i, u.l.upper, u.l.lower, *(double *)&u.d); | ||
3097 | } | ||
3098 | cpu_fprintf (f, "PC = %08x ", env->pc); | ||
3099 | sr = env->sr; | ||
3100 | cpu_fprintf (f, "SR = %04x %c%c%c%c%c ", sr, (sr & 0x10) ? 'X' : '-', | ||
3101 | (sr & CCF_N0x08) ? 'N' : '-', (sr & CCF_Z0x04) ? 'Z' : '-', | ||
3102 | (sr & CCF_V0x02) ? 'V' : '-', (sr & CCF_C0x01) ? 'C' : '-'); | ||
3103 | cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result); | ||
3104 | } | ||
3105 | |||
3106 | void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb, int pc_pos) | ||
3107 | { | ||
3108 | env->pc = gen_opc_pc[pc_pos]; | ||
3109 | } |