File: | target-ppc/translate.c |
Location: | line 8226, column 14 |
Description: | Value stored to 'tmp' during its initialization is never read |
1 | /* |
2 | * PowerPC emulation for qemu: main translation routines. |
3 | * |
4 | * Copyright (c) 2003-2007 Jocelyn Mayer |
5 | * Copyright (C) 2011 Freescale Semiconductor, Inc. |
6 | * |
7 | * This library is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU Lesser General Public |
9 | * License as published by the Free Software Foundation; either |
10 | * version 2 of the License, or (at your option) any later version. |
11 | * |
12 | * This library is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | * Lesser General Public License for more details. |
16 | * |
17 | * You should have received a copy of the GNU Lesser General Public |
18 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
19 | */ |
20 | |
21 | #include "cpu.h" |
22 | #include "disas/disas.h" |
23 | #include "tcg-op.h" |
24 | #include "qemu/host-utils.h" |
25 | |
26 | #include "helper.h" |
27 | #define GEN_HELPER 1 |
28 | #include "helper.h" |
29 | |
30 | #define CPU_SINGLE_STEP0x1 0x1 |
31 | #define CPU_BRANCH_STEP0x2 0x2 |
32 | #define GDBSTUB_SINGLE_STEP0x4 0x4 |
33 | |
34 | /* Include definitions for instructions classes and implementations flags */ |
35 | //#define PPC_DEBUG_DISAS |
36 | //#define DO_PPC_STATISTICS |
37 | |
38 | #ifdef PPC_DEBUG_DISAS |
39 | # define LOG_DISAS(...)do { } while (0) qemu_log_mask(CPU_LOG_TB_IN_ASM(1 << 1), ## __VA_ARGS__) |
40 | #else |
41 | # define LOG_DISAS(...)do { } while (0) do { } while (0) |
42 | #endif |
43 | /*****************************************************************************/ |
44 | /* Code translation helpers */ |
45 | |
46 | /* global register indexes */ |
47 | static TCGv_ptr cpu_env; |
48 | static char cpu_reg_names[10*3 + 22*4 /* GPR */ |
49 | #if !defined(TARGET_PPC64) |
50 | + 10*4 + 22*5 /* SPE GPRh */ |
51 | #endif |
52 | + 10*4 + 22*5 /* FPR */ |
53 | + 2*(10*6 + 22*7) /* AVRh, AVRl */ |
54 | + 10*5 + 22*6 /* VSR */ |
55 | + 8*5 /* CRF */]; |
56 | static TCGvTCGv_i32 cpu_gpr[32]; |
57 | #if !defined(TARGET_PPC64) |
58 | static TCGvTCGv_i32 cpu_gprh[32]; |
59 | #endif |
60 | static TCGv_i64 cpu_fpr[32]; |
61 | static TCGv_i64 cpu_avrh[32], cpu_avrl[32]; |
62 | static TCGv_i64 cpu_vsr[32]; |
63 | static TCGv_i32 cpu_crf[8]; |
64 | static TCGvTCGv_i32 cpu_nip; |
65 | static TCGvTCGv_i32 cpu_msr; |
66 | static TCGvTCGv_i32 cpu_ctr; |
67 | static TCGvTCGv_i32 cpu_lr; |
68 | #if defined(TARGET_PPC64) |
69 | static TCGvTCGv_i32 cpu_cfar; |
70 | #endif |
71 | static TCGvTCGv_i32 cpu_xer, cpu_so, cpu_ov, cpu_ca; |
72 | static TCGvTCGv_i32 cpu_reserve; |
73 | static TCGvTCGv_i32 cpu_fpscr; |
74 | static TCGv_i32 cpu_access_type; |
75 | |
76 | #include "exec/gen-icount.h" |
77 | |
78 | void ppc_translate_init(void) |
79 | { |
80 | int i; |
81 | char* p; |
82 | size_t cpu_reg_names_size; |
83 | static int done_init = 0; |
84 | |
85 | if (done_init) |
86 | return; |
87 | |
88 | cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env")__extension__ ({ TCGv_ptr make_tcgv_tmp = {((tcg_global_reg_new_i64 ((TCG_REG_R14), ("env"))).i64)}; make_tcgv_tmp; }); |
89 | |
90 | p = cpu_reg_names; |
91 | cpu_reg_names_size = sizeof(cpu_reg_names); |
92 | |
93 | for (i = 0; i < 8; i++) { |
94 | snprintf(p, cpu_reg_names_size, "crf%d", i); |
95 | cpu_crf[i] = tcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
96 | offsetof(CPUPPCState, crf[i])__builtin_offsetof(CPUPPCState, crf[i]), p); |
97 | p += 5; |
98 | cpu_reg_names_size -= 5; |
99 | } |
100 | |
101 | for (i = 0; i < 32; i++) { |
102 | snprintf(p, cpu_reg_names_size, "r%d", i); |
103 | cpu_gpr[i] = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
104 | offsetof(CPUPPCState, gpr[i])__builtin_offsetof(CPUPPCState, gpr[i]), p); |
105 | p += (i < 10) ? 3 : 4; |
106 | cpu_reg_names_size -= (i < 10) ? 3 : 4; |
107 | #if !defined(TARGET_PPC64) |
108 | snprintf(p, cpu_reg_names_size, "r%dH", i); |
109 | cpu_gprh[i] = tcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
110 | offsetof(CPUPPCState, gprh[i])__builtin_offsetof(CPUPPCState, gprh[i]), p); |
111 | p += (i < 10) ? 4 : 5; |
112 | cpu_reg_names_size -= (i < 10) ? 4 : 5; |
113 | #endif |
114 | |
115 | snprintf(p, cpu_reg_names_size, "fp%d", i); |
116 | cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0TCG_REG_R14, |
117 | offsetof(CPUPPCState, fpr[i])__builtin_offsetof(CPUPPCState, fpr[i]), p); |
118 | p += (i < 10) ? 4 : 5; |
119 | cpu_reg_names_size -= (i < 10) ? 4 : 5; |
120 | |
121 | snprintf(p, cpu_reg_names_size, "avr%dH", i); |
122 | #ifdef HOST_WORDS_BIGENDIAN |
123 | cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0TCG_REG_R14, |
124 | offsetof(CPUPPCState, avr[i].u64[0])__builtin_offsetof(CPUPPCState, avr[i].u64[0]), p); |
125 | #else |
126 | cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0TCG_REG_R14, |
127 | offsetof(CPUPPCState, avr[i].u64[1])__builtin_offsetof(CPUPPCState, avr[i].u64[1]), p); |
128 | #endif |
129 | p += (i < 10) ? 6 : 7; |
130 | cpu_reg_names_size -= (i < 10) ? 6 : 7; |
131 | |
132 | snprintf(p, cpu_reg_names_size, "avr%dL", i); |
133 | #ifdef HOST_WORDS_BIGENDIAN |
134 | cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0TCG_REG_R14, |
135 | offsetof(CPUPPCState, avr[i].u64[1])__builtin_offsetof(CPUPPCState, avr[i].u64[1]), p); |
136 | #else |
137 | cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0TCG_REG_R14, |
138 | offsetof(CPUPPCState, avr[i].u64[0])__builtin_offsetof(CPUPPCState, avr[i].u64[0]), p); |
139 | #endif |
140 | p += (i < 10) ? 6 : 7; |
141 | cpu_reg_names_size -= (i < 10) ? 6 : 7; |
142 | snprintf(p, cpu_reg_names_size, "vsr%d", i); |
143 | cpu_vsr[i] = tcg_global_mem_new_i64(TCG_AREG0TCG_REG_R14, |
144 | offsetof(CPUPPCState, vsr[i])__builtin_offsetof(CPUPPCState, vsr[i]), p); |
145 | p += (i < 10) ? 5 : 6; |
146 | cpu_reg_names_size -= (i < 10) ? 5 : 6; |
147 | } |
148 | |
149 | cpu_nip = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
150 | offsetof(CPUPPCState, nip)__builtin_offsetof(CPUPPCState, nip), "nip"); |
151 | |
152 | cpu_msr = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
153 | offsetof(CPUPPCState, msr)__builtin_offsetof(CPUPPCState, msr), "msr"); |
154 | |
155 | cpu_ctr = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
156 | offsetof(CPUPPCState, ctr)__builtin_offsetof(CPUPPCState, ctr), "ctr"); |
157 | |
158 | cpu_lr = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
159 | offsetof(CPUPPCState, lr)__builtin_offsetof(CPUPPCState, lr), "lr"); |
160 | |
161 | #if defined(TARGET_PPC64) |
162 | cpu_cfar = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
163 | offsetof(CPUPPCState, cfar)__builtin_offsetof(CPUPPCState, cfar), "cfar"); |
164 | #endif |
165 | |
166 | cpu_xer = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
167 | offsetof(CPUPPCState, xer)__builtin_offsetof(CPUPPCState, xer), "xer"); |
168 | cpu_so = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
169 | offsetof(CPUPPCState, so)__builtin_offsetof(CPUPPCState, so), "SO"); |
170 | cpu_ov = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
171 | offsetof(CPUPPCState, ov)__builtin_offsetof(CPUPPCState, ov), "OV"); |
172 | cpu_ca = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
173 | offsetof(CPUPPCState, ca)__builtin_offsetof(CPUPPCState, ca), "CA"); |
174 | |
175 | cpu_reserve = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
176 | offsetof(CPUPPCState, reserve_addr)__builtin_offsetof(CPUPPCState, reserve_addr), |
177 | "reserve_addr"); |
178 | |
179 | cpu_fpscr = tcg_global_mem_newtcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
180 | offsetof(CPUPPCState, fpscr)__builtin_offsetof(CPUPPCState, fpscr), "fpscr"); |
181 | |
182 | cpu_access_type = tcg_global_mem_new_i32(TCG_AREG0TCG_REG_R14, |
183 | offsetof(CPUPPCState, access_type)__builtin_offsetof(CPUPPCState, access_type), "access_type"); |
184 | |
185 | done_init = 1; |
186 | } |
187 | |
188 | /* internal defines */ |
189 | typedef struct DisasContext { |
190 | struct TranslationBlock *tb; |
191 | target_ulong nip; |
192 | uint32_t opcode; |
193 | uint32_t exception; |
194 | /* Routine used to access memory */ |
195 | int mem_idx; |
196 | int access_type; |
197 | /* Translation flags */ |
198 | int le_mode; |
199 | #if defined(TARGET_PPC64) |
200 | int sf_mode; |
201 | int has_cfar; |
202 | #endif |
203 | int fpu_enabled; |
204 | int altivec_enabled; |
205 | int vsx_enabled; |
206 | int spe_enabled; |
207 | ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */ |
208 | int singlestep_enabled; |
209 | uint64_t insns_flags; |
210 | uint64_t insns_flags2; |
211 | } DisasContext; |
212 | |
213 | /* True when active word size < size of target_long. */ |
214 | #ifdef TARGET_PPC64 |
215 | # define NARROW_MODE(C)0 (!(C)->sf_mode) |
216 | #else |
217 | # define NARROW_MODE(C)0 0 |
218 | #endif |
219 | |
220 | struct opc_handler_t { |
221 | /* invalid bits for instruction 1 (Rc(opcode) == 0) */ |
222 | uint32_t inval1; |
223 | /* invalid bits for instruction 2 (Rc(opcode) == 1) */ |
224 | uint32_t inval2; |
225 | /* instruction type */ |
226 | uint64_t type; |
227 | /* extended instruction type */ |
228 | uint64_t type2; |
229 | /* handler */ |
230 | void (*handler)(DisasContext *ctx); |
231 | #if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) |
232 | const char *oname; |
233 | #endif |
234 | #if defined(DO_PPC_STATISTICS) |
235 | uint64_t count; |
236 | #endif |
237 | }; |
238 | |
239 | static inline void gen_reset_fpstatus(void) |
240 | { |
241 | gen_helper_reset_fpstatus(cpu_env); |
242 | } |
243 | |
244 | static inline void gen_compute_fprf(TCGv_i64 arg, int set_fprf, int set_rc) |
245 | { |
246 | TCGv_i32 t0 = tcg_temp_new_i32(); |
247 | |
248 | if (set_fprf != 0) { |
249 | /* This case might be optimized later */ |
250 | tcg_gen_movi_i32(t0, 1); |
251 | gen_helper_compute_fprf(t0, cpu_env, arg, t0); |
252 | if (unlikely(set_rc)__builtin_expect(!!(set_rc), 0)) { |
253 | tcg_gen_mov_i32(cpu_crf[1], t0); |
254 | } |
255 | gen_helper_float_check_status(cpu_env); |
256 | } else if (unlikely(set_rc)__builtin_expect(!!(set_rc), 0)) { |
257 | /* We always need to compute fpcc */ |
258 | tcg_gen_movi_i32(t0, 0); |
259 | gen_helper_compute_fprf(t0, cpu_env, arg, t0); |
260 | tcg_gen_mov_i32(cpu_crf[1], t0); |
261 | } |
262 | |
263 | tcg_temp_free_i32(t0); |
264 | } |
265 | |
266 | static inline void gen_set_access_type(DisasContext *ctx, int access_type) |
267 | { |
268 | if (ctx->access_type != access_type) { |
269 | tcg_gen_movi_i32(cpu_access_type, access_type); |
270 | ctx->access_type = access_type; |
271 | } |
272 | } |
273 | |
274 | static inline void gen_update_nip(DisasContext *ctx, target_ulong nip) |
275 | { |
276 | if (NARROW_MODE(ctx)0) { |
277 | nip = (uint32_t)nip; |
278 | } |
279 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_nip, nip); |
280 | } |
281 | |
282 | static inline void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error) |
283 | { |
284 | TCGv_i32 t0, t1; |
285 | if (ctx->exception == POWERPC_EXCP_NONE) { |
286 | gen_update_nip(ctx, ctx->nip); |
287 | } |
288 | t0 = tcg_const_i32(excp); |
289 | t1 = tcg_const_i32(error); |
290 | gen_helper_raise_exception_err(cpu_env, t0, t1); |
291 | tcg_temp_free_i32(t0); |
292 | tcg_temp_free_i32(t1); |
293 | ctx->exception = (excp); |
294 | } |
295 | |
296 | static inline void gen_exception(DisasContext *ctx, uint32_t excp) |
297 | { |
298 | TCGv_i32 t0; |
299 | if (ctx->exception == POWERPC_EXCP_NONE) { |
300 | gen_update_nip(ctx, ctx->nip); |
301 | } |
302 | t0 = tcg_const_i32(excp); |
303 | gen_helper_raise_exception(cpu_env, t0); |
304 | tcg_temp_free_i32(t0); |
305 | ctx->exception = (excp); |
306 | } |
307 | |
308 | static inline void gen_debug_exception(DisasContext *ctx) |
309 | { |
310 | TCGv_i32 t0; |
311 | |
312 | if ((ctx->exception != POWERPC_EXCP_BRANCH) && |
313 | (ctx->exception != POWERPC_EXCP_SYNC)) { |
314 | gen_update_nip(ctx, ctx->nip); |
315 | } |
316 | t0 = tcg_const_i32(EXCP_DEBUG0x10002); |
317 | gen_helper_raise_exception(cpu_env, t0); |
318 | tcg_temp_free_i32(t0); |
319 | } |
320 | |
321 | static inline void gen_inval_exception(DisasContext *ctx, uint32_t error) |
322 | { |
323 | gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | error); |
324 | } |
325 | |
326 | /* Stop translation */ |
327 | static inline void gen_stop_exception(DisasContext *ctx) |
328 | { |
329 | gen_update_nip(ctx, ctx->nip); |
330 | ctx->exception = POWERPC_EXCP_STOP; |
331 | } |
332 | |
333 | /* No need to update nip here, as execution flow will change */ |
334 | static inline void gen_sync_exception(DisasContext *ctx) |
335 | { |
336 | ctx->exception = POWERPC_EXCP_SYNC; |
337 | } |
338 | |
339 | #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type){ .opc1 = opc1, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_name, }, .oname = "name", } \{ .opc1 = opc1, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_name, }, .oname = "name", } |
340 | GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE){ .opc1 = opc1, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_name, }, .oname = "name", } |
341 | |
342 | #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2){ .opc1 = opc1, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = type2, .handler = &gen_name, }, .oname = "name", } \{ .opc1 = opc1, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = type2, .handler = &gen_name, }, .oname = "name", } |
343 | GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2){ .opc1 = opc1, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = type2, .handler = &gen_name, }, .oname = "name", } |
344 | |
345 | #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type){ .opc1 = opc1, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_name, }, .oname = onam, } \{ .opc1 = opc1, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_name, }, .oname = onam, } |
346 | GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE){ .opc1 = opc1, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_name, }, .oname = onam, } |
347 | |
348 | #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2){ .opc1 = opc1, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = type2, .handler = &gen_name, }, .oname = onam, } \{ .opc1 = opc1, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = type2, .handler = &gen_name, }, .oname = onam, } |
349 | GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2){ .opc1 = opc1, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = type2, .handler = &gen_name, }, .oname = onam, } |
350 | |
351 | typedef struct opcode_t { |
352 | unsigned char opc1, opc2, opc3; |
353 | #if HOST_LONG_BITS64 == 64 /* Explicitly align to 64 bits */ |
354 | unsigned char pad[5]; |
355 | #else |
356 | unsigned char pad[1]; |
357 | #endif |
358 | opc_handler_t handler; |
359 | const char *oname; |
360 | } opcode_t; |
361 | |
362 | /*****************************************************************************/ |
363 | /*** Instruction decoding ***/ |
364 | #define EXTRACT_HELPER(name, shift, nb)static inline uint32_t name(uint32_t opcode) { return (opcode >> (shift)) & ((1 << (nb)) - 1); } \ |
365 | static inline uint32_t name(uint32_t opcode) \ |
366 | { \ |
367 | return (opcode >> (shift)) & ((1 << (nb)) - 1); \ |
368 | } |
369 | |
370 | #define EXTRACT_SHELPER(name, shift, nb)static inline int32_t name(uint32_t opcode) { return (int16_t )((opcode >> (shift)) & ((1 << (nb)) - 1)); } \ |
371 | static inline int32_t name(uint32_t opcode) \ |
372 | { \ |
373 | return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \ |
374 | } |
375 | |
376 | #define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2)static inline uint32_t name(uint32_t opcode) { return (((opcode >> (shift1)) & ((1 << (nb1)) - 1)) << nb2 ) | ((opcode >> (shift2)) & ((1 << (nb2)) - 1 )); } \ |
377 | static inline uint32_t name(uint32_t opcode) \ |
378 | { \ |
379 | return (((opcode >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \ |
380 | ((opcode >> (shift2)) & ((1 << (nb2)) - 1)); \ |
381 | } |
382 | /* Opcode part 1 */ |
383 | EXTRACT_HELPER(opc1, 26, 6)static inline uint32_t opc1(uint32_t opcode) { return (opcode >> (26)) & ((1 << (6)) - 1); }; |
384 | /* Opcode part 2 */ |
385 | EXTRACT_HELPER(opc2, 1, 5)static inline uint32_t opc2(uint32_t opcode) { return (opcode >> (1)) & ((1 << (5)) - 1); }; |
386 | /* Opcode part 3 */ |
387 | EXTRACT_HELPER(opc3, 6, 5)static inline uint32_t opc3(uint32_t opcode) { return (opcode >> (6)) & ((1 << (5)) - 1); }; |
388 | /* Update Cr0 flags */ |
389 | EXTRACT_HELPER(Rc, 0, 1)static inline uint32_t Rc(uint32_t opcode) { return (opcode >> (0)) & ((1 << (1)) - 1); }; |
390 | /* Destination */ |
391 | EXTRACT_HELPER(rD, 21, 5)static inline uint32_t rD(uint32_t opcode) { return (opcode >> (21)) & ((1 << (5)) - 1); }; |
392 | /* Source */ |
393 | EXTRACT_HELPER(rS, 21, 5)static inline uint32_t rS(uint32_t opcode) { return (opcode >> (21)) & ((1 << (5)) - 1); }; |
394 | /* First operand */ |
395 | EXTRACT_HELPER(rA, 16, 5)static inline uint32_t rA(uint32_t opcode) { return (opcode >> (16)) & ((1 << (5)) - 1); }; |
396 | /* Second operand */ |
397 | EXTRACT_HELPER(rB, 11, 5)static inline uint32_t rB(uint32_t opcode) { return (opcode >> (11)) & ((1 << (5)) - 1); }; |
398 | /* Third operand */ |
399 | EXTRACT_HELPER(rC, 6, 5)static inline uint32_t rC(uint32_t opcode) { return (opcode >> (6)) & ((1 << (5)) - 1); }; |
400 | /*** Get CRn ***/ |
401 | EXTRACT_HELPER(crfD, 23, 3)static inline uint32_t crfD(uint32_t opcode) { return (opcode >> (23)) & ((1 << (3)) - 1); }; |
402 | EXTRACT_HELPER(crfS, 18, 3)static inline uint32_t crfS(uint32_t opcode) { return (opcode >> (18)) & ((1 << (3)) - 1); }; |
403 | EXTRACT_HELPER(crbD, 21, 5)static inline uint32_t crbD(uint32_t opcode) { return (opcode >> (21)) & ((1 << (5)) - 1); }; |
404 | EXTRACT_HELPER(crbA, 16, 5)static inline uint32_t crbA(uint32_t opcode) { return (opcode >> (16)) & ((1 << (5)) - 1); }; |
405 | EXTRACT_HELPER(crbB, 11, 5)static inline uint32_t crbB(uint32_t opcode) { return (opcode >> (11)) & ((1 << (5)) - 1); }; |
406 | /* SPR / TBL */ |
407 | EXTRACT_HELPER(_SPR, 11, 10)static inline uint32_t _SPR(uint32_t opcode) { return (opcode >> (11)) & ((1 << (10)) - 1); }; |
408 | static inline uint32_t SPR(uint32_t opcode) |
409 | { |
410 | uint32_t sprn = _SPR(opcode); |
411 | |
412 | return ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5); |
413 | } |
414 | /*** Get constants ***/ |
415 | EXTRACT_HELPER(IMM, 12, 8)static inline uint32_t IMM(uint32_t opcode) { return (opcode >> (12)) & ((1 << (8)) - 1); }; |
416 | /* 16 bits signed immediate value */ |
417 | EXTRACT_SHELPER(SIMM, 0, 16)static inline int32_t SIMM(uint32_t opcode) { return (int16_t )((opcode >> (0)) & ((1 << (16)) - 1)); }; |
418 | /* 16 bits unsigned immediate value */ |
419 | EXTRACT_HELPER(UIMM, 0, 16)static inline uint32_t UIMM(uint32_t opcode) { return (opcode >> (0)) & ((1 << (16)) - 1); }; |
420 | /* 5 bits signed immediate value */ |
421 | EXTRACT_HELPER(SIMM5, 16, 5)static inline uint32_t SIMM5(uint32_t opcode) { return (opcode >> (16)) & ((1 << (5)) - 1); }; |
422 | /* 5 bits signed immediate value */ |
423 | EXTRACT_HELPER(UIMM5, 16, 5)static inline uint32_t UIMM5(uint32_t opcode) { return (opcode >> (16)) & ((1 << (5)) - 1); }; |
424 | /* Bit count */ |
425 | EXTRACT_HELPER(NB, 11, 5)static inline uint32_t NB(uint32_t opcode) { return (opcode >> (11)) & ((1 << (5)) - 1); }; |
426 | /* Shift count */ |
427 | EXTRACT_HELPER(SH, 11, 5)static inline uint32_t SH(uint32_t opcode) { return (opcode >> (11)) & ((1 << (5)) - 1); }; |
428 | /* Vector shift count */ |
429 | EXTRACT_HELPER(VSH, 6, 4)static inline uint32_t VSH(uint32_t opcode) { return (opcode >> (6)) & ((1 << (4)) - 1); }; |
430 | /* Mask start */ |
431 | EXTRACT_HELPER(MB, 6, 5)static inline uint32_t MB(uint32_t opcode) { return (opcode >> (6)) & ((1 << (5)) - 1); }; |
432 | /* Mask end */ |
433 | EXTRACT_HELPER(ME, 1, 5)static inline uint32_t ME(uint32_t opcode) { return (opcode >> (1)) & ((1 << (5)) - 1); }; |
434 | /* Trap operand */ |
435 | EXTRACT_HELPER(TO, 21, 5)static inline uint32_t TO(uint32_t opcode) { return (opcode >> (21)) & ((1 << (5)) - 1); }; |
436 | |
437 | EXTRACT_HELPER(CRM, 12, 8)static inline uint32_t CRM(uint32_t opcode) { return (opcode >> (12)) & ((1 << (8)) - 1); }; |
438 | EXTRACT_HELPER(SR, 16, 4)static inline uint32_t SR(uint32_t opcode) { return (opcode >> (16)) & ((1 << (4)) - 1); }; |
439 | |
440 | /* mtfsf/mtfsfi */ |
441 | EXTRACT_HELPER(FPBF, 23, 3)static inline uint32_t FPBF(uint32_t opcode) { return (opcode >> (23)) & ((1 << (3)) - 1); }; |
442 | EXTRACT_HELPER(FPIMM, 12, 4)static inline uint32_t FPIMM(uint32_t opcode) { return (opcode >> (12)) & ((1 << (4)) - 1); }; |
443 | EXTRACT_HELPER(FPL, 25, 1)static inline uint32_t FPL(uint32_t opcode) { return (opcode >> (25)) & ((1 << (1)) - 1); }; |
444 | EXTRACT_HELPER(FPFLM, 17, 8)static inline uint32_t FPFLM(uint32_t opcode) { return (opcode >> (17)) & ((1 << (8)) - 1); }; |
445 | EXTRACT_HELPER(FPW, 16, 1)static inline uint32_t FPW(uint32_t opcode) { return (opcode >> (16)) & ((1 << (1)) - 1); }; |
446 | |
447 | /*** Jump target decoding ***/ |
448 | /* Displacement */ |
449 | EXTRACT_SHELPER(d, 0, 16)static inline int32_t d(uint32_t opcode) { return (int16_t)(( opcode >> (0)) & ((1 << (16)) - 1)); }; |
450 | /* Immediate address */ |
451 | static inline target_ulong LI(uint32_t opcode) |
452 | { |
453 | return (opcode >> 0) & 0x03FFFFFC; |
454 | } |
455 | |
456 | static inline uint32_t BD(uint32_t opcode) |
457 | { |
458 | return (opcode >> 0) & 0xFFFC; |
459 | } |
460 | |
461 | EXTRACT_HELPER(BO, 21, 5)static inline uint32_t BO(uint32_t opcode) { return (opcode >> (21)) & ((1 << (5)) - 1); }; |
462 | EXTRACT_HELPER(BI, 16, 5)static inline uint32_t BI(uint32_t opcode) { return (opcode >> (16)) & ((1 << (5)) - 1); }; |
463 | /* Absolute/relative address */ |
464 | EXTRACT_HELPER(AA, 1, 1)static inline uint32_t AA(uint32_t opcode) { return (opcode >> (1)) & ((1 << (1)) - 1); }; |
465 | /* Link */ |
466 | EXTRACT_HELPER(LK, 0, 1)static inline uint32_t LK(uint32_t opcode) { return (opcode >> (0)) & ((1 << (1)) - 1); }; |
467 | |
468 | /* Create a mask between <start> and <end> bits */ |
469 | static inline target_ulong MASK(uint32_t start, uint32_t end) |
470 | { |
471 | target_ulong ret; |
472 | |
473 | #if defined(TARGET_PPC64) |
474 | if (likely(start == 0)__builtin_expect(!!(start == 0), 1)) { |
475 | ret = UINT64_MAX(18446744073709551615UL) << (63 - end); |
476 | } else if (likely(end == 63)__builtin_expect(!!(end == 63), 1)) { |
477 | ret = UINT64_MAX(18446744073709551615UL) >> start; |
478 | } |
479 | #else |
480 | if (likely(start == 0)__builtin_expect(!!(start == 0), 1)) { |
481 | ret = UINT32_MAX(4294967295U) << (31 - end); |
482 | } else if (likely(end == 31)__builtin_expect(!!(end == 31), 1)) { |
483 | ret = UINT32_MAX(4294967295U) >> start; |
484 | } |
485 | #endif |
486 | else { |
487 | ret = (((target_ulong)(-1ULL)) >> (start)) ^ |
488 | (((target_ulong)(-1ULL) >> (end)) >> 1); |
489 | if (unlikely(start > end)__builtin_expect(!!(start > end), 0)) |
490 | return ~ret; |
491 | } |
492 | |
493 | return ret; |
494 | } |
495 | |
496 | EXTRACT_HELPER_SPLIT(xT, 0, 1, 21, 5)static inline uint32_t xT(uint32_t opcode) { return (((opcode >> (0)) & ((1 << (1)) - 1)) << 5) | (( opcode >> (21)) & ((1 << (5)) - 1)); }; |
497 | EXTRACT_HELPER_SPLIT(xS, 0, 1, 21, 5)static inline uint32_t xS(uint32_t opcode) { return (((opcode >> (0)) & ((1 << (1)) - 1)) << 5) | (( opcode >> (21)) & ((1 << (5)) - 1)); }; |
498 | EXTRACT_HELPER_SPLIT(xA, 2, 1, 16, 5)static inline uint32_t xA(uint32_t opcode) { return (((opcode >> (2)) & ((1 << (1)) - 1)) << 5) | (( opcode >> (16)) & ((1 << (5)) - 1)); }; |
499 | EXTRACT_HELPER_SPLIT(xB, 1, 1, 11, 5)static inline uint32_t xB(uint32_t opcode) { return (((opcode >> (1)) & ((1 << (1)) - 1)) << 5) | (( opcode >> (11)) & ((1 << (5)) - 1)); }; |
500 | EXTRACT_HELPER_SPLIT(xC, 3, 1, 6, 5)static inline uint32_t xC(uint32_t opcode) { return (((opcode >> (3)) & ((1 << (1)) - 1)) << 5) | (( opcode >> (6)) & ((1 << (5)) - 1)); }; |
501 | EXTRACT_HELPER(DM, 8, 2)static inline uint32_t DM(uint32_t opcode) { return (opcode >> (8)) & ((1 << (2)) - 1); }; |
502 | EXTRACT_HELPER(UIM, 16, 2)static inline uint32_t UIM(uint32_t opcode) { return (opcode >> (16)) & ((1 << (2)) - 1); }; |
503 | EXTRACT_HELPER(SHW, 8, 2)static inline uint32_t SHW(uint32_t opcode) { return (opcode >> (8)) & ((1 << (2)) - 1); }; |
504 | /*****************************************************************************/ |
505 | /* PowerPC instructions table */ |
506 | |
507 | #if defined(DO_PPC_STATISTICS) |
508 | #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2){ .opc1 = op1, .opc2 = op2, .opc3 = op3, .pad = { 0, }, .handler = { .inval1 = invl, .type = _typ, .type2 = _typ2, .handler = &gen_name, }, .oname = "name", } \ |
509 | { \ |
510 | .opc1 = op1, \ |
511 | .opc2 = op2, \ |
512 | .opc3 = op3, \ |
513 | .pad = { 0, }, \ |
514 | .handler = { \ |
515 | .inval1 = invl, \ |
516 | .type = _typ, \ |
517 | .type2 = _typ2, \ |
518 | .handler = &gen_##name, \ |
519 | .oname = stringify(name)"name", \ |
520 | }, \ |
521 | .oname = stringify(name)"name", \ |
522 | } |
523 | #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2){ .opc1 = op1, .opc2 = op2, .opc3 = op3, .pad = { 0, }, .handler = { .inval1 = invl1, .inval2 = invl2, .type = _typ, .type2 = _typ2, .handler = &gen_name, }, .oname = "name", } \ |
524 | { \ |
525 | .opc1 = op1, \ |
526 | .opc2 = op2, \ |
527 | .opc3 = op3, \ |
528 | .pad = { 0, }, \ |
529 | .handler = { \ |
530 | .inval1 = invl1, \ |
531 | .inval2 = invl2, \ |
532 | .type = _typ, \ |
533 | .type2 = _typ2, \ |
534 | .handler = &gen_##name, \ |
535 | .oname = stringify(name)"name", \ |
536 | }, \ |
537 | .oname = stringify(name)"name", \ |
538 | } |
539 | #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2){ .opc1 = op1, .opc2 = op2, .opc3 = op3, .pad = { 0, }, .handler = { .inval1 = invl, .type = _typ, .type2 = _typ2, .handler = &gen_name, }, .oname = onam, } \ |
540 | { \ |
541 | .opc1 = op1, \ |
542 | .opc2 = op2, \ |
543 | .opc3 = op3, \ |
544 | .pad = { 0, }, \ |
545 | .handler = { \ |
546 | .inval1 = invl, \ |
547 | .type = _typ, \ |
548 | .type2 = _typ2, \ |
549 | .handler = &gen_##name, \ |
550 | .oname = onam, \ |
551 | }, \ |
552 | .oname = onam, \ |
553 | } |
554 | #else |
555 | #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2){ .opc1 = op1, .opc2 = op2, .opc3 = op3, .pad = { 0, }, .handler = { .inval1 = invl, .type = _typ, .type2 = _typ2, .handler = &gen_name, }, .oname = "name", } \ |
556 | { \ |
557 | .opc1 = op1, \ |
558 | .opc2 = op2, \ |
559 | .opc3 = op3, \ |
560 | .pad = { 0, }, \ |
561 | .handler = { \ |
562 | .inval1 = invl, \ |
563 | .type = _typ, \ |
564 | .type2 = _typ2, \ |
565 | .handler = &gen_##name, \ |
566 | }, \ |
567 | .oname = stringify(name)"name", \ |
568 | } |
569 | #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2){ .opc1 = op1, .opc2 = op2, .opc3 = op3, .pad = { 0, }, .handler = { .inval1 = invl1, .inval2 = invl2, .type = _typ, .type2 = _typ2, .handler = &gen_name, }, .oname = "name", } \ |
570 | { \ |
571 | .opc1 = op1, \ |
572 | .opc2 = op2, \ |
573 | .opc3 = op3, \ |
574 | .pad = { 0, }, \ |
575 | .handler = { \ |
576 | .inval1 = invl1, \ |
577 | .inval2 = invl2, \ |
578 | .type = _typ, \ |
579 | .type2 = _typ2, \ |
580 | .handler = &gen_##name, \ |
581 | }, \ |
582 | .oname = stringify(name)"name", \ |
583 | } |
584 | #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2){ .opc1 = op1, .opc2 = op2, .opc3 = op3, .pad = { 0, }, .handler = { .inval1 = invl, .type = _typ, .type2 = _typ2, .handler = &gen_name, }, .oname = onam, } \ |
585 | { \ |
586 | .opc1 = op1, \ |
587 | .opc2 = op2, \ |
588 | .opc3 = op3, \ |
589 | .pad = { 0, }, \ |
590 | .handler = { \ |
591 | .inval1 = invl, \ |
592 | .type = _typ, \ |
593 | .type2 = _typ2, \ |
594 | .handler = &gen_##name, \ |
595 | }, \ |
596 | .oname = onam, \ |
597 | } |
598 | #endif |
599 | |
600 | /* SPR load/store helpers */ |
601 | static inline void gen_load_spr(TCGvTCGv_i32 t, int reg) |
602 | { |
603 | tcg_gen_ld_tltcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, spr[reg])__builtin_offsetof(CPUPPCState, spr[reg])); |
604 | } |
605 | |
606 | static inline void gen_store_spr(int reg, TCGvTCGv_i32 t) |
607 | { |
608 | tcg_gen_st_tltcg_gen_st_i32(t, cpu_env, offsetof(CPUPPCState, spr[reg])__builtin_offsetof(CPUPPCState, spr[reg])); |
609 | } |
610 | |
611 | /* Invalid instruction */ |
612 | static void gen_invalid(DisasContext *ctx) |
613 | { |
614 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
615 | } |
616 | |
617 | static opc_handler_t invalid_handler = { |
618 | .inval1 = 0xFFFFFFFF, |
619 | .inval2 = 0xFFFFFFFF, |
620 | .type = PPC_NONE, |
621 | .type2 = PPC_NONE, |
622 | .handler = gen_invalid, |
623 | }; |
624 | |
625 | /*** Integer comparison ***/ |
626 | |
627 | static inline void gen_op_cmp(TCGvTCGv_i32 arg0, TCGvTCGv_i32 arg1, int s, int crf) |
628 | { |
629 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
630 | TCGv_i32 t1 = tcg_temp_new_i32(); |
631 | |
632 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_crf[crf], cpu_so); |
633 | |
634 | tcg_gen_setcond_tltcg_gen_setcond_i32((s ? TCG_COND_LT: TCG_COND_LTU), t0, arg0, arg1); |
635 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(t1, t0); |
636 | tcg_gen_shli_i32(t1, t1, CRF_LT3); |
637 | tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1); |
638 | |
639 | tcg_gen_setcond_tltcg_gen_setcond_i32((s ? TCG_COND_GT: TCG_COND_GTU), t0, arg0, arg1); |
640 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(t1, t0); |
641 | tcg_gen_shli_i32(t1, t1, CRF_GT2); |
642 | tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1); |
643 | |
644 | tcg_gen_setcond_tltcg_gen_setcond_i32(TCG_COND_EQ, t0, arg0, arg1); |
645 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(t1, t0); |
646 | tcg_gen_shli_i32(t1, t1, CRF_EQ1); |
647 | tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1); |
648 | |
649 | tcg_temp_freetcg_temp_free_i32(t0); |
650 | tcg_temp_free_i32(t1); |
651 | } |
652 | |
653 | static inline void gen_op_cmpi(TCGvTCGv_i32 arg0, target_ulong arg1, int s, int crf) |
654 | { |
655 | TCGvTCGv_i32 t0 = tcg_const_tltcg_const_i32(arg1); |
656 | gen_op_cmp(arg0, t0, s, crf); |
657 | tcg_temp_freetcg_temp_free_i32(t0); |
658 | } |
659 | |
660 | static inline void gen_op_cmp32(TCGvTCGv_i32 arg0, TCGvTCGv_i32 arg1, int s, int crf) |
661 | { |
662 | TCGvTCGv_i32 t0, t1; |
663 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
664 | t1 = tcg_temp_new()tcg_temp_new_i32(); |
665 | if (s) { |
666 | tcg_gen_ext32s_tltcg_gen_mov_i32(t0, arg0); |
667 | tcg_gen_ext32s_tltcg_gen_mov_i32(t1, arg1); |
668 | } else { |
669 | tcg_gen_ext32u_tltcg_gen_mov_i32(t0, arg0); |
670 | tcg_gen_ext32u_tltcg_gen_mov_i32(t1, arg1); |
671 | } |
672 | gen_op_cmp(t0, t1, s, crf); |
673 | tcg_temp_freetcg_temp_free_i32(t1); |
674 | tcg_temp_freetcg_temp_free_i32(t0); |
675 | } |
676 | |
677 | static inline void gen_op_cmpi32(TCGvTCGv_i32 arg0, target_ulong arg1, int s, int crf) |
678 | { |
679 | TCGvTCGv_i32 t0 = tcg_const_tltcg_const_i32(arg1); |
680 | gen_op_cmp32(arg0, t0, s, crf); |
681 | tcg_temp_freetcg_temp_free_i32(t0); |
682 | } |
683 | |
684 | static inline void gen_set_Rc0(DisasContext *ctx, TCGvTCGv_i32 reg) |
685 | { |
686 | if (NARROW_MODE(ctx)0) { |
687 | gen_op_cmpi32(reg, 0, 1, 0); |
688 | } else { |
689 | gen_op_cmpi(reg, 0, 1, 0); |
690 | } |
691 | } |
692 | |
693 | /* cmp */ |
694 | static void gen_cmp(DisasContext *ctx) |
695 | { |
696 | if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) { |
697 | gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], |
698 | 1, crfD(ctx->opcode)); |
699 | } else { |
700 | gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], |
701 | 1, crfD(ctx->opcode)); |
702 | } |
703 | } |
704 | |
705 | /* cmpi */ |
706 | static void gen_cmpi(DisasContext *ctx) |
707 | { |
708 | if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) { |
709 | gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode), |
710 | 1, crfD(ctx->opcode)); |
711 | } else { |
712 | gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode), |
713 | 1, crfD(ctx->opcode)); |
714 | } |
715 | } |
716 | |
717 | /* cmpl */ |
718 | static void gen_cmpl(DisasContext *ctx) |
719 | { |
720 | if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) { |
721 | gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], |
722 | 0, crfD(ctx->opcode)); |
723 | } else { |
724 | gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], |
725 | 0, crfD(ctx->opcode)); |
726 | } |
727 | } |
728 | |
729 | /* cmpli */ |
730 | static void gen_cmpli(DisasContext *ctx) |
731 | { |
732 | if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) { |
733 | gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode), |
734 | 0, crfD(ctx->opcode)); |
735 | } else { |
736 | gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode), |
737 | 0, crfD(ctx->opcode)); |
738 | } |
739 | } |
740 | |
741 | /* isel (PowerPC 2.03 specification) */ |
742 | static void gen_isel(DisasContext *ctx) |
743 | { |
744 | int l1, l2; |
745 | uint32_t bi = rC(ctx->opcode); |
746 | uint32_t mask; |
747 | TCGv_i32 t0; |
748 | |
749 | l1 = gen_new_label(); |
750 | l2 = gen_new_label(); |
751 | |
752 | mask = 1 << (3 - (bi & 0x03)); |
753 | t0 = tcg_temp_new_i32(); |
754 | tcg_gen_andi_i32(t0, cpu_crf[bi >> 2], mask); |
755 | tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1); |
756 | if (rA(ctx->opcode) == 0) |
757 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_gpr[rD(ctx->opcode)], 0); |
758 | else |
759 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
760 | tcg_gen_br(l2); |
761 | gen_set_label(l1); |
762 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
763 | gen_set_label(l2); |
764 | tcg_temp_free_i32(t0); |
765 | } |
766 | |
767 | /* cmpb: PowerPC 2.05 specification */ |
768 | static void gen_cmpb(DisasContext *ctx) |
769 | { |
770 | gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], |
771 | cpu_gpr[rB(ctx->opcode)]); |
772 | } |
773 | |
774 | /*** Integer arithmetic ***/ |
775 | |
776 | static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGvTCGv_i32 arg0, |
777 | TCGvTCGv_i32 arg1, TCGvTCGv_i32 arg2, int sub) |
778 | { |
779 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
780 | |
781 | tcg_gen_xor_tltcg_gen_xor_i32(cpu_ov, arg0, arg2); |
782 | tcg_gen_xor_tltcg_gen_xor_i32(t0, arg1, arg2); |
783 | if (sub) { |
784 | tcg_gen_and_tltcg_gen_and_i32(cpu_ov, cpu_ov, t0); |
785 | } else { |
786 | tcg_gen_andc_tltcg_gen_andc_i32(cpu_ov, cpu_ov, t0); |
787 | } |
788 | tcg_temp_freetcg_temp_free_i32(t0); |
789 | if (NARROW_MODE(ctx)0) { |
790 | tcg_gen_ext32s_tltcg_gen_mov_i32(cpu_ov, cpu_ov); |
791 | } |
792 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_ov, cpu_ov, TARGET_LONG_BITS32 - 1); |
793 | tcg_gen_or_tltcg_gen_or_i32(cpu_so, cpu_so, cpu_ov); |
794 | } |
795 | |
796 | /* Common add function */ |
797 | static inline void gen_op_arith_add(DisasContext *ctx, TCGvTCGv_i32 ret, TCGvTCGv_i32 arg1, |
798 | TCGvTCGv_i32 arg2, bool_Bool add_ca, bool_Bool compute_ca, |
799 | bool_Bool compute_ov, bool_Bool compute_rc0) |
800 | { |
801 | TCGvTCGv_i32 t0 = ret; |
802 | |
803 | if (compute_ca || compute_ov) { |
804 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
805 | } |
806 | |
807 | if (compute_ca) { |
808 | if (NARROW_MODE(ctx)0) { |
809 | /* Caution: a non-obvious corner case of the spec is that we |
810 | must produce the *entire* 64-bit addition, but produce the |
811 | carry into bit 32. */ |
812 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
813 | tcg_gen_xor_tltcg_gen_xor_i32(t1, arg1, arg2); /* add without carry */ |
814 | tcg_gen_add_tltcg_gen_add_i32(t0, arg1, arg2); |
815 | if (add_ca) { |
816 | tcg_gen_add_tltcg_gen_add_i32(t0, t0, cpu_ca); |
817 | } |
818 | tcg_gen_xor_tltcg_gen_xor_i32(cpu_ca, t0, t1); /* bits changed w/ carry */ |
819 | tcg_temp_freetcg_temp_free_i32(t1); |
820 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_ca, cpu_ca, 32); /* extract bit 32 */ |
821 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_ca, cpu_ca, 1); |
822 | } else { |
823 | TCGvTCGv_i32 zero = tcg_const_tltcg_const_i32(0); |
824 | if (add_ca) { |
825 | tcg_gen_add2_tltcg_gen_add2_i32(t0, cpu_ca, arg1, zero, cpu_ca, zero); |
826 | tcg_gen_add2_tltcg_gen_add2_i32(t0, cpu_ca, t0, cpu_ca, arg2, zero); |
827 | } else { |
828 | tcg_gen_add2_tltcg_gen_add2_i32(t0, cpu_ca, arg1, zero, arg2, zero); |
829 | } |
830 | tcg_temp_freetcg_temp_free_i32(zero); |
831 | } |
832 | } else { |
833 | tcg_gen_add_tltcg_gen_add_i32(t0, arg1, arg2); |
834 | if (add_ca) { |
835 | tcg_gen_add_tltcg_gen_add_i32(t0, t0, cpu_ca); |
836 | } |
837 | } |
838 | |
839 | if (compute_ov) { |
840 | gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0); |
841 | } |
842 | if (unlikely(compute_rc0)__builtin_expect(!!(compute_rc0), 0)) { |
843 | gen_set_Rc0(ctx, t0); |
844 | } |
845 | |
846 | if (!TCGV_EQUAL(t0, ret)(((t0).i32) == ((ret).i32))) { |
847 | tcg_gen_mov_tltcg_gen_mov_i32(ret, t0); |
848 | tcg_temp_freetcg_temp_free_i32(t0); |
849 | } |
850 | } |
851 | /* Add functions with two operands */ |
852 | #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov){ .opc1 = 0x1F, .opc2 = 0x0A, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", }, \ |
853 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
854 | { \ |
855 | gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ |
856 | cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ |
857 | add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ |
858 | } |
859 | /* Add functions with one operand and one immediate */ |
860 | #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \{ .opc1 = 0x1F, .opc2 = 0x0A, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x0000F800, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", }, |
861 | add_ca, compute_ca, compute_ov){ .opc1 = 0x1F, .opc2 = 0x0A, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x0000F800, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", }, \ |
862 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
863 | { \ |
864 | TCGvTCGv_i32 t0 = tcg_const_tltcg_const_i32(const_val); \ |
865 | gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ |
866 | cpu_gpr[rA(ctx->opcode)], t0, \ |
867 | add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ |
868 | tcg_temp_freetcg_temp_free_i32(t0); \ |
869 | } |
870 | |
871 | /* add add. addo addo. */ |
872 | GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0){ .opc1 = 0x1F, .opc2 = 0x0A, .opc3 = 0x08, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_add, }, .oname = "add", }, |
873 | GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1){ .opc1 = 0x1F, .opc2 = 0x0A, .opc3 = 0x18, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_addo, }, .oname = "addo", }, |
874 | /* addc addc. addco addco. */ |
875 | GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0){ .opc1 = 0x1F, .opc2 = 0x0A, .opc3 = 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_addc, }, .oname = "addc", }, |
876 | GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1){ .opc1 = 0x1F, .opc2 = 0x0A, .opc3 = 0x10, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_addco, }, .oname = "addco", }, |
877 | /* adde adde. addeo addeo. */ |
878 | GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0){ .opc1 = 0x1F, .opc2 = 0x0A, .opc3 = 0x04, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_adde, }, .oname = "adde", }, |
879 | GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1){ .opc1 = 0x1F, .opc2 = 0x0A, .opc3 = 0x14, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_addeo, }, .oname = "addeo", }, |
880 | /* addme addme. addmeo addmeo. */ |
881 | GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0){ .opc1 = 0x1F, .opc2 = 0x0A, .opc3 = 0x07, .pad = { 0, }, .handler = { .inval1 = 0x0000F800, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_addme, }, .oname = "addme", }, |
882 | GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1){ .opc1 = 0x1F, .opc2 = 0x0A, .opc3 = 0x17, .pad = { 0, }, .handler = { .inval1 = 0x0000F800, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_addmeo, }, .oname = "addmeo", }, |
883 | /* addze addze. addzeo addzeo.*/ |
884 | GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0){ .opc1 = 0x1F, .opc2 = 0x0A, .opc3 = 0x06, .pad = { 0, }, .handler = { .inval1 = 0x0000F800, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_addze, }, .oname = "addze", }, |
885 | GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1){ .opc1 = 0x1F, .opc2 = 0x0A, .opc3 = 0x16, .pad = { 0, }, .handler = { .inval1 = 0x0000F800, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_addzeo, }, .oname = "addzeo", }, |
886 | /* addi */ |
887 | static void gen_addi(DisasContext *ctx) |
888 | { |
889 | target_long simm = SIMM(ctx->opcode); |
890 | |
891 | if (rA(ctx->opcode) == 0) { |
892 | /* li case */ |
893 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_gpr[rD(ctx->opcode)], simm); |
894 | } else { |
895 | tcg_gen_addi_tltcg_gen_addi_i32(cpu_gpr[rD(ctx->opcode)], |
896 | cpu_gpr[rA(ctx->opcode)], simm); |
897 | } |
898 | } |
899 | /* addic addic.*/ |
900 | static inline void gen_op_addic(DisasContext *ctx, bool_Bool compute_rc0) |
901 | { |
902 | TCGvTCGv_i32 c = tcg_const_tltcg_const_i32(SIMM(ctx->opcode)); |
903 | gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
904 | c, 0, 1, 0, compute_rc0); |
905 | tcg_temp_freetcg_temp_free_i32(c); |
906 | } |
907 | |
908 | static void gen_addic(DisasContext *ctx) |
909 | { |
910 | gen_op_addic(ctx, 0); |
911 | } |
912 | |
913 | static void gen_addic_(DisasContext *ctx) |
914 | { |
915 | gen_op_addic(ctx, 1); |
916 | } |
917 | |
918 | /* addis */ |
919 | static void gen_addis(DisasContext *ctx) |
920 | { |
921 | target_long simm = SIMM(ctx->opcode); |
922 | |
923 | if (rA(ctx->opcode) == 0) { |
924 | /* lis case */ |
925 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_gpr[rD(ctx->opcode)], simm << 16); |
926 | } else { |
927 | tcg_gen_addi_tltcg_gen_addi_i32(cpu_gpr[rD(ctx->opcode)], |
928 | cpu_gpr[rA(ctx->opcode)], simm << 16); |
929 | } |
930 | } |
931 | |
932 | static inline void gen_op_arith_divw(DisasContext *ctx, TCGvTCGv_i32 ret, TCGvTCGv_i32 arg1, |
933 | TCGvTCGv_i32 arg2, int sign, int compute_ov) |
934 | { |
935 | int l1 = gen_new_label(); |
936 | int l2 = gen_new_label(); |
937 | TCGv_i32 t0 = tcg_temp_local_new_i32(); |
938 | TCGv_i32 t1 = tcg_temp_local_new_i32(); |
939 | |
940 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(t0, arg1); |
941 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(t1, arg2); |
942 | tcg_gen_brcondi_i32(TCG_COND_EQ, t1, 0, l1); |
943 | if (sign) { |
944 | int l3 = gen_new_label(); |
945 | tcg_gen_brcondi_i32(TCG_COND_NE, t1, -1, l3); |
946 | tcg_gen_brcondi_i32(TCG_COND_EQ, t0, INT32_MIN(-2147483647-1), l1); |
947 | gen_set_label(l3); |
948 | tcg_gen_div_i32(t0, t0, t1); |
949 | } else { |
950 | tcg_gen_divu_i32(t0, t0, t1); |
951 | } |
952 | if (compute_ov) { |
953 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ov, 0); |
954 | } |
955 | tcg_gen_br(l2); |
956 | gen_set_label(l1); |
957 | if (sign) { |
958 | tcg_gen_sari_i32(t0, t0, 31); |
959 | } else { |
960 | tcg_gen_movi_i32(t0, 0); |
961 | } |
962 | if (compute_ov) { |
963 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ov, 1); |
964 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_so, 1); |
965 | } |
966 | gen_set_label(l2); |
967 | tcg_gen_extu_i32_tltcg_gen_mov_i32(ret, t0); |
968 | tcg_temp_free_i32(t0); |
969 | tcg_temp_free_i32(t1); |
970 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
971 | gen_set_Rc0(ctx, ret); |
972 | } |
973 | /* Div functions */ |
974 | #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov){ .opc1 = 0x1F, .opc2 = 0x0B, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", } \ |
975 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
976 | { \ |
977 | gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \ |
978 | cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ |
979 | sign, compute_ov); \ |
980 | } |
981 | /* divwu divwu. divwuo divwuo. */ |
982 | GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0){ .opc1 = 0x1F, .opc2 = 0x0B, .opc3 = 0x0E, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_divwu, }, .oname = "divwu", }; |
983 | GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1){ .opc1 = 0x1F, .opc2 = 0x0B, .opc3 = 0x1E, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_divwuo, }, .oname = "divwuo", }; |
984 | /* divw divw. divwo divwo. */ |
985 | GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0){ .opc1 = 0x1F, .opc2 = 0x0B, .opc3 = 0x0F, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_divw, }, .oname = "divw", }; |
986 | GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1){ .opc1 = 0x1F, .opc2 = 0x0B, .opc3 = 0x1F, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_divwo, }, .oname = "divwo", }; |
987 | #if defined(TARGET_PPC64) |
988 | static inline void gen_op_arith_divd(DisasContext *ctx, TCGvTCGv_i32 ret, TCGvTCGv_i32 arg1, |
989 | TCGvTCGv_i32 arg2, int sign, int compute_ov) |
990 | { |
991 | int l1 = gen_new_label(); |
992 | int l2 = gen_new_label(); |
993 | |
994 | tcg_gen_brcondi_i64(TCG_COND_EQ, arg2, 0, l1); |
995 | if (sign) { |
996 | int l3 = gen_new_label(); |
997 | tcg_gen_brcondi_i64(TCG_COND_NE, arg2, -1, l3); |
998 | tcg_gen_brcondi_i64(TCG_COND_EQ, arg1, INT64_MIN(-9223372036854775807L -1), l1); |
999 | gen_set_label(l3); |
1000 | tcg_gen_div_i64(ret, arg1, arg2); |
1001 | } else { |
1002 | tcg_gen_divu_i64(ret, arg1, arg2); |
1003 | } |
1004 | if (compute_ov) { |
1005 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ov, 0); |
1006 | } |
1007 | tcg_gen_br(l2); |
1008 | gen_set_label(l1); |
1009 | if (sign) { |
1010 | tcg_gen_sari_i64(ret, arg1, 63); |
1011 | } else { |
1012 | tcg_gen_movi_i64(ret, 0); |
1013 | } |
1014 | if (compute_ov) { |
1015 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ov, 1); |
1016 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_so, 1); |
1017 | } |
1018 | gen_set_label(l2); |
1019 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1020 | gen_set_Rc0(ctx, ret); |
1021 | } |
1022 | #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \ |
1023 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
1024 | { \ |
1025 | gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \ |
1026 | cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ |
1027 | sign, compute_ov); \ |
1028 | } |
1029 | /* divwu divwu. divwuo divwuo. */ |
1030 | GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0); |
1031 | GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1); |
1032 | /* divw divw. divwo divwo. */ |
1033 | GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0); |
1034 | GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1); |
1035 | #endif |
1036 | |
1037 | /* mulhw mulhw. */ |
1038 | static void gen_mulhw(DisasContext *ctx) |
1039 | { |
1040 | TCGv_i32 t0 = tcg_temp_new_i32(); |
1041 | TCGv_i32 t1 = tcg_temp_new_i32(); |
1042 | |
1043 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(t0, cpu_gpr[rA(ctx->opcode)]); |
1044 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(t1, cpu_gpr[rB(ctx->opcode)]); |
1045 | tcg_gen_muls2_i32(t0, t1, t0, t1); |
1046 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t1); |
1047 | tcg_temp_free_i32(t0); |
1048 | tcg_temp_free_i32(t1); |
1049 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1050 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
1051 | } |
1052 | |
1053 | /* mulhwu mulhwu. */ |
1054 | static void gen_mulhwu(DisasContext *ctx) |
1055 | { |
1056 | TCGv_i32 t0 = tcg_temp_new_i32(); |
1057 | TCGv_i32 t1 = tcg_temp_new_i32(); |
1058 | |
1059 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(t0, cpu_gpr[rA(ctx->opcode)]); |
1060 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(t1, cpu_gpr[rB(ctx->opcode)]); |
1061 | tcg_gen_mulu2_i32(t0, t1, t0, t1); |
1062 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t1); |
1063 | tcg_temp_free_i32(t0); |
1064 | tcg_temp_free_i32(t1); |
1065 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1066 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
1067 | } |
1068 | |
1069 | /* mullw mullw. */ |
1070 | static void gen_mullw(DisasContext *ctx) |
1071 | { |
1072 | tcg_gen_mul_tltcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
1073 | cpu_gpr[rB(ctx->opcode)]); |
1074 | tcg_gen_ext32s_tltcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)]); |
1075 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1076 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
1077 | } |
1078 | |
1079 | /* mullwo mullwo. */ |
1080 | static void gen_mullwo(DisasContext *ctx) |
1081 | { |
1082 | TCGv_i32 t0 = tcg_temp_new_i32(); |
1083 | TCGv_i32 t1 = tcg_temp_new_i32(); |
1084 | |
1085 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(t0, cpu_gpr[rA(ctx->opcode)]); |
1086 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(t1, cpu_gpr[rB(ctx->opcode)]); |
1087 | tcg_gen_muls2_i32(t0, t1, t0, t1); |
1088 | tcg_gen_ext_i32_tltcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0); |
1089 | |
1090 | tcg_gen_sari_i32(t0, t0, 31); |
1091 | tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1); |
1092 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_ov, t0); |
1093 | tcg_gen_or_tltcg_gen_or_i32(cpu_so, cpu_so, cpu_ov); |
1094 | |
1095 | tcg_temp_free_i32(t0); |
1096 | tcg_temp_free_i32(t1); |
1097 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1098 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
1099 | } |
1100 | |
1101 | /* mulli */ |
1102 | static void gen_mulli(DisasContext *ctx) |
1103 | { |
1104 | tcg_gen_muli_tltcg_gen_muli_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
1105 | SIMM(ctx->opcode)); |
1106 | } |
1107 | |
1108 | #if defined(TARGET_PPC64) |
1109 | /* mulhd mulhd. */ |
1110 | static void gen_mulhd(DisasContext *ctx) |
1111 | { |
1112 | TCGvTCGv_i32 lo = tcg_temp_new()tcg_temp_new_i32(); |
1113 | tcg_gen_muls2_tltcg_gen_muls2_i32(lo, cpu_gpr[rD(ctx->opcode)], |
1114 | cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
1115 | tcg_temp_freetcg_temp_free_i32(lo); |
1116 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) { |
1117 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
1118 | } |
1119 | } |
1120 | |
1121 | /* mulhdu mulhdu. */ |
1122 | static void gen_mulhdu(DisasContext *ctx) |
1123 | { |
1124 | TCGvTCGv_i32 lo = tcg_temp_new()tcg_temp_new_i32(); |
1125 | tcg_gen_mulu2_tltcg_gen_mulu2_i32(lo, cpu_gpr[rD(ctx->opcode)], |
1126 | cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
1127 | tcg_temp_freetcg_temp_free_i32(lo); |
1128 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) { |
1129 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
1130 | } |
1131 | } |
1132 | |
1133 | /* mulld mulld. */ |
1134 | static void gen_mulld(DisasContext *ctx) |
1135 | { |
1136 | tcg_gen_mul_tltcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
1137 | cpu_gpr[rB(ctx->opcode)]); |
1138 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1139 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
1140 | } |
1141 | |
1142 | /* mulldo mulldo. */ |
1143 | static void gen_mulldo(DisasContext *ctx) |
1144 | { |
1145 | gen_helper_mulldo(cpu_gpr[rD(ctx->opcode)], cpu_env, |
1146 | cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
1147 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) { |
1148 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
1149 | } |
1150 | } |
1151 | #endif |
1152 | |
1153 | /* Common subf function */ |
1154 | static inline void gen_op_arith_subf(DisasContext *ctx, TCGvTCGv_i32 ret, TCGvTCGv_i32 arg1, |
1155 | TCGvTCGv_i32 arg2, bool_Bool add_ca, bool_Bool compute_ca, |
1156 | bool_Bool compute_ov, bool_Bool compute_rc0) |
1157 | { |
1158 | TCGvTCGv_i32 t0 = ret; |
1159 | |
1160 | if (compute_ca || compute_ov) { |
1161 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
1162 | } |
1163 | |
1164 | if (compute_ca) { |
1165 | /* dest = ~arg1 + arg2 [+ ca]. */ |
1166 | if (NARROW_MODE(ctx)0) { |
1167 | /* Caution: a non-obvious corner case of the spec is that we |
1168 | must produce the *entire* 64-bit addition, but produce the |
1169 | carry into bit 32. */ |
1170 | TCGvTCGv_i32 inv1 = tcg_temp_new()tcg_temp_new_i32(); |
1171 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
1172 | tcg_gen_not_tltcg_gen_not_i32(inv1, arg1); |
1173 | if (add_ca) { |
1174 | tcg_gen_add_tltcg_gen_add_i32(t0, arg2, cpu_ca); |
1175 | } else { |
1176 | tcg_gen_addi_tltcg_gen_addi_i32(t0, arg2, 1); |
1177 | } |
1178 | tcg_gen_xor_tltcg_gen_xor_i32(t1, arg2, inv1); /* add without carry */ |
1179 | tcg_gen_add_tltcg_gen_add_i32(t0, t0, inv1); |
1180 | tcg_gen_xor_tltcg_gen_xor_i32(cpu_ca, t0, t1); /* bits changes w/ carry */ |
1181 | tcg_temp_freetcg_temp_free_i32(t1); |
1182 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_ca, cpu_ca, 32); /* extract bit 32 */ |
1183 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_ca, cpu_ca, 1); |
1184 | } else if (add_ca) { |
1185 | TCGvTCGv_i32 zero, inv1 = tcg_temp_new()tcg_temp_new_i32(); |
1186 | tcg_gen_not_tltcg_gen_not_i32(inv1, arg1); |
1187 | zero = tcg_const_tltcg_const_i32(0); |
1188 | tcg_gen_add2_tltcg_gen_add2_i32(t0, cpu_ca, arg2, zero, cpu_ca, zero); |
1189 | tcg_gen_add2_tltcg_gen_add2_i32(t0, cpu_ca, t0, cpu_ca, inv1, zero); |
1190 | tcg_temp_freetcg_temp_free_i32(zero); |
1191 | tcg_temp_freetcg_temp_free_i32(inv1); |
1192 | } else { |
1193 | tcg_gen_setcond_tltcg_gen_setcond_i32(TCG_COND_GEU, cpu_ca, arg2, arg1); |
1194 | tcg_gen_sub_tltcg_gen_sub_i32(t0, arg2, arg1); |
1195 | } |
1196 | } else if (add_ca) { |
1197 | /* Since we're ignoring carry-out, we can simplify the |
1198 | standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */ |
1199 | tcg_gen_sub_tltcg_gen_sub_i32(t0, arg2, arg1); |
1200 | tcg_gen_add_tltcg_gen_add_i32(t0, t0, cpu_ca); |
1201 | tcg_gen_subi_tltcg_gen_subi_i32(t0, t0, 1); |
1202 | } else { |
1203 | tcg_gen_sub_tltcg_gen_sub_i32(t0, arg2, arg1); |
1204 | } |
1205 | |
1206 | if (compute_ov) { |
1207 | gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1); |
1208 | } |
1209 | if (unlikely(compute_rc0)__builtin_expect(!!(compute_rc0), 0)) { |
1210 | gen_set_Rc0(ctx, t0); |
1211 | } |
1212 | |
1213 | if (!TCGV_EQUAL(t0, ret)(((t0).i32) == ((ret).i32))) { |
1214 | tcg_gen_mov_tltcg_gen_mov_i32(ret, t0); |
1215 | tcg_temp_freetcg_temp_free_i32(t0); |
1216 | } |
1217 | } |
1218 | /* Sub functions with Two operands functions */ |
1219 | #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov){ .opc1 = 0x1F, .opc2 = 0x08, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", }, \ |
1220 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
1221 | { \ |
1222 | gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ |
1223 | cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ |
1224 | add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ |
1225 | } |
1226 | /* Sub functions with one operand and one immediate */ |
1227 | #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \{ .opc1 = 0x1F, .opc2 = 0x08, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x0000F800, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", }, |
1228 | add_ca, compute_ca, compute_ov){ .opc1 = 0x1F, .opc2 = 0x08, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x0000F800, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", }, \ |
1229 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
1230 | { \ |
1231 | TCGvTCGv_i32 t0 = tcg_const_tltcg_const_i32(const_val); \ |
1232 | gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ |
1233 | cpu_gpr[rA(ctx->opcode)], t0, \ |
1234 | add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ |
1235 | tcg_temp_freetcg_temp_free_i32(t0); \ |
1236 | } |
1237 | /* subf subf. subfo subfo. */ |
1238 | GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0){ .opc1 = 0x1F, .opc2 = 0x08, .opc3 = 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_subf, }, .oname = "subf", }, |
1239 | GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1){ .opc1 = 0x1F, .opc2 = 0x08, .opc3 = 0x11, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_subfo, }, .oname = "subfo", }, |
1240 | /* subfc subfc. subfco subfco. */ |
1241 | GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0){ .opc1 = 0x1F, .opc2 = 0x08, .opc3 = 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_subfc, }, .oname = "subfc", }, |
1242 | GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1){ .opc1 = 0x1F, .opc2 = 0x08, .opc3 = 0x10, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_subfco, }, .oname = "subfco", }, |
1243 | /* subfe subfe. subfeo subfo. */ |
1244 | GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0){ .opc1 = 0x1F, .opc2 = 0x08, .opc3 = 0x04, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_subfe, }, .oname = "subfe", }, |
1245 | GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1){ .opc1 = 0x1F, .opc2 = 0x08, .opc3 = 0x14, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_subfeo, }, .oname = "subfeo", }, |
1246 | /* subfme subfme. subfmeo subfmeo. */ |
1247 | GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0){ .opc1 = 0x1F, .opc2 = 0x08, .opc3 = 0x07, .pad = { 0, }, .handler = { .inval1 = 0x0000F800, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_subfme, }, .oname = "subfme", }, |
1248 | GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1){ .opc1 = 0x1F, .opc2 = 0x08, .opc3 = 0x17, .pad = { 0, }, .handler = { .inval1 = 0x0000F800, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_subfmeo, }, .oname = "subfmeo", }, |
1249 | /* subfze subfze. subfzeo subfzeo.*/ |
1250 | GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0){ .opc1 = 0x1F, .opc2 = 0x08, .opc3 = 0x06, .pad = { 0, }, .handler = { .inval1 = 0x0000F800, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_subfze, }, .oname = "subfze", }, |
1251 | GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1){ .opc1 = 0x1F, .opc2 = 0x08, .opc3 = 0x16, .pad = { 0, }, .handler = { .inval1 = 0x0000F800, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_subfzeo, }, .oname = "subfzeo", }, |
1252 | |
1253 | /* subfic */ |
1254 | static void gen_subfic(DisasContext *ctx) |
1255 | { |
1256 | TCGvTCGv_i32 c = tcg_const_tltcg_const_i32(SIMM(ctx->opcode)); |
1257 | gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
1258 | c, 0, 1, 0, 0); |
1259 | tcg_temp_freetcg_temp_free_i32(c); |
1260 | } |
1261 | |
1262 | /* neg neg. nego nego. */ |
1263 | static inline void gen_op_arith_neg(DisasContext *ctx, bool_Bool compute_ov) |
1264 | { |
1265 | TCGvTCGv_i32 zero = tcg_const_tltcg_const_i32(0); |
1266 | gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
1267 | zero, 0, 0, compute_ov, Rc(ctx->opcode)); |
1268 | tcg_temp_freetcg_temp_free_i32(zero); |
1269 | } |
1270 | |
1271 | static void gen_neg(DisasContext *ctx) |
1272 | { |
1273 | gen_op_arith_neg(ctx, 0); |
1274 | } |
1275 | |
1276 | static void gen_nego(DisasContext *ctx) |
1277 | { |
1278 | gen_op_arith_neg(ctx, 1); |
1279 | } |
1280 | |
1281 | /*** Integer logical ***/ |
1282 | #define GEN_LOGICAL2(name, tcg_op, opc, type){ .opc1 = 0x1F, .opc2 = 0x1C, .opc3 = opc, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, . handler = &gen_name, }, .oname = "name", } \ |
1283 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
1284 | { \ |
1285 | tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \ |
1286 | cpu_gpr[rB(ctx->opcode)]); \ |
1287 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) \ |
1288 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \ |
1289 | } |
1290 | |
1291 | #define GEN_LOGICAL1(name, tcg_op, opc, type){ .opc1 = 0x1F, .opc2 = 0x1A, .opc3 = opc, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, . handler = &gen_name, }, .oname = "name", } \ |
1292 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
1293 | { \ |
1294 | tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \ |
1295 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) \ |
1296 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \ |
1297 | } |
1298 | |
1299 | /* and & and. */ |
1300 | GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER){ .opc1 = 0x1F, .opc2 = 0x1C, .opc3 = 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_and, }, .oname = "and", }; |
1301 | /* andc & andc. */ |
1302 | GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER){ .opc1 = 0x1F, .opc2 = 0x1C, .opc3 = 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_andc, }, .oname = "andc", }; |
1303 | |
1304 | /* andi. */ |
1305 | static void gen_andi_(DisasContext *ctx) |
1306 | { |
1307 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode)); |
1308 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1309 | } |
1310 | |
1311 | /* andis. */ |
1312 | static void gen_andis_(DisasContext *ctx) |
1313 | { |
1314 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16); |
1315 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1316 | } |
1317 | |
1318 | /* cntlzw */ |
1319 | static void gen_cntlzw(DisasContext *ctx) |
1320 | { |
1321 | gen_helper_cntlzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
1322 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1323 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1324 | } |
1325 | /* eqv & eqv. */ |
1326 | GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER){ .opc1 = 0x1F, .opc2 = 0x1C, .opc3 = 0x08, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_eqv, }, .oname = "eqv", }; |
1327 | /* extsb & extsb. */ |
1328 | GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER){ .opc1 = 0x1F, .opc2 = 0x1A, .opc3 = 0x1D, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_extsb, }, .oname = "extsb", }; |
1329 | /* extsh & extsh. */ |
1330 | GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER){ .opc1 = 0x1F, .opc2 = 0x1A, .opc3 = 0x1C, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_extsh, }, .oname = "extsh", }; |
1331 | /* nand & nand. */ |
1332 | GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER){ .opc1 = 0x1F, .opc2 = 0x1C, .opc3 = 0x0E, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_nand, }, .oname = "nand", }; |
1333 | /* nor & nor. */ |
1334 | GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER){ .opc1 = 0x1F, .opc2 = 0x1C, .opc3 = 0x03, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_nor, }, .oname = "nor", }; |
1335 | |
1336 | /* or & or. */ |
1337 | static void gen_or(DisasContext *ctx) |
1338 | { |
1339 | int rs, ra, rb; |
1340 | |
1341 | rs = rS(ctx->opcode); |
1342 | ra = rA(ctx->opcode); |
1343 | rb = rB(ctx->opcode); |
1344 | /* Optimisation for mr. ri case */ |
1345 | if (rs != ra || rs != rb) { |
1346 | if (rs != rb) |
1347 | tcg_gen_or_tltcg_gen_or_i32(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]); |
1348 | else |
1349 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[ra], cpu_gpr[rs]); |
1350 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1351 | gen_set_Rc0(ctx, cpu_gpr[ra]); |
1352 | } else if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) { |
1353 | gen_set_Rc0(ctx, cpu_gpr[rs]); |
1354 | #if defined(TARGET_PPC64) |
1355 | } else { |
1356 | int prio = 0; |
1357 | |
1358 | switch (rs) { |
1359 | case 1: |
1360 | /* Set process priority to low */ |
1361 | prio = 2; |
1362 | break; |
1363 | case 6: |
1364 | /* Set process priority to medium-low */ |
1365 | prio = 3; |
1366 | break; |
1367 | case 2: |
1368 | /* Set process priority to normal */ |
1369 | prio = 4; |
1370 | break; |
1371 | #if !defined(CONFIG_USER_ONLY) |
1372 | case 31: |
1373 | if (ctx->mem_idx > 0) { |
1374 | /* Set process priority to very low */ |
1375 | prio = 1; |
1376 | } |
1377 | break; |
1378 | case 5: |
1379 | if (ctx->mem_idx > 0) { |
1380 | /* Set process priority to medium-hight */ |
1381 | prio = 5; |
1382 | } |
1383 | break; |
1384 | case 3: |
1385 | if (ctx->mem_idx > 0) { |
1386 | /* Set process priority to high */ |
1387 | prio = 6; |
1388 | } |
1389 | break; |
1390 | case 7: |
1391 | if (ctx->mem_idx > 1) { |
1392 | /* Set process priority to very high */ |
1393 | prio = 7; |
1394 | } |
1395 | break; |
1396 | #endif |
1397 | default: |
1398 | /* nop */ |
1399 | break; |
1400 | } |
1401 | if (prio) { |
1402 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
1403 | gen_load_spr(t0, SPR_PPR(0x380)); |
1404 | tcg_gen_andi_tltcg_gen_andi_i32(t0, t0, ~0x001C000000000000ULL); |
1405 | tcg_gen_ori_tltcg_gen_ori_i32(t0, t0, ((uint64_t)prio) << 50); |
1406 | gen_store_spr(SPR_PPR(0x380), t0); |
1407 | tcg_temp_freetcg_temp_free_i32(t0); |
1408 | } |
1409 | #endif |
1410 | } |
1411 | } |
1412 | /* orc & orc. */ |
1413 | GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER){ .opc1 = 0x1F, .opc2 = 0x1C, .opc3 = 0x0C, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_orc, }, .oname = "orc", }; |
1414 | |
1415 | /* xor & xor. */ |
1416 | static void gen_xor(DisasContext *ctx) |
1417 | { |
1418 | /* Optimisation for "set to zero" case */ |
1419 | if (rS(ctx->opcode) != rB(ctx->opcode)) |
1420 | tcg_gen_xor_tltcg_gen_xor_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
1421 | else |
1422 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_gpr[rA(ctx->opcode)], 0); |
1423 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1424 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1425 | } |
1426 | |
1427 | /* ori */ |
1428 | static void gen_ori(DisasContext *ctx) |
1429 | { |
1430 | target_ulong uimm = UIMM(ctx->opcode); |
1431 | |
1432 | if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { |
1433 | /* NOP */ |
1434 | /* XXX: should handle special NOPs for POWER series */ |
1435 | return; |
1436 | } |
1437 | tcg_gen_ori_tltcg_gen_ori_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm); |
1438 | } |
1439 | |
1440 | /* oris */ |
1441 | static void gen_oris(DisasContext *ctx) |
1442 | { |
1443 | target_ulong uimm = UIMM(ctx->opcode); |
1444 | |
1445 | if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { |
1446 | /* NOP */ |
1447 | return; |
1448 | } |
1449 | tcg_gen_ori_tltcg_gen_ori_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16); |
1450 | } |
1451 | |
1452 | /* xori */ |
1453 | static void gen_xori(DisasContext *ctx) |
1454 | { |
1455 | target_ulong uimm = UIMM(ctx->opcode); |
1456 | |
1457 | if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { |
1458 | /* NOP */ |
1459 | return; |
1460 | } |
1461 | tcg_gen_xori_tltcg_gen_xori_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm); |
1462 | } |
1463 | |
1464 | /* xoris */ |
1465 | static void gen_xoris(DisasContext *ctx) |
1466 | { |
1467 | target_ulong uimm = UIMM(ctx->opcode); |
1468 | |
1469 | if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { |
1470 | /* NOP */ |
1471 | return; |
1472 | } |
1473 | tcg_gen_xori_tltcg_gen_xori_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16); |
1474 | } |
1475 | |
1476 | /* popcntb : PowerPC 2.03 specification */ |
1477 | static void gen_popcntb(DisasContext *ctx) |
1478 | { |
1479 | gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
1480 | } |
1481 | |
1482 | static void gen_popcntw(DisasContext *ctx) |
1483 | { |
1484 | gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
1485 | } |
1486 | |
1487 | #if defined(TARGET_PPC64) |
1488 | /* popcntd: PowerPC 2.06 specification */ |
1489 | static void gen_popcntd(DisasContext *ctx) |
1490 | { |
1491 | gen_helper_popcntd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
1492 | } |
1493 | #endif |
1494 | |
1495 | /* prtyw: PowerPC 2.05 specification */ |
1496 | static void gen_prtyw(DisasContext *ctx) |
1497 | { |
1498 | TCGvTCGv_i32 ra = cpu_gpr[rA(ctx->opcode)]; |
1499 | TCGvTCGv_i32 rs = cpu_gpr[rS(ctx->opcode)]; |
1500 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
1501 | tcg_gen_shri_tltcg_gen_shri_i32(t0, rs, 16); |
1502 | tcg_gen_xor_tltcg_gen_xor_i32(ra, rs, t0); |
1503 | tcg_gen_shri_tltcg_gen_shri_i32(t0, ra, 8); |
1504 | tcg_gen_xor_tltcg_gen_xor_i32(ra, ra, t0); |
1505 | tcg_gen_andi_tltcg_gen_andi_i32(ra, ra, (target_ulong)0x100000001ULL); |
1506 | tcg_temp_freetcg_temp_free_i32(t0); |
1507 | } |
1508 | |
1509 | #if defined(TARGET_PPC64) |
1510 | /* prtyd: PowerPC 2.05 specification */ |
1511 | static void gen_prtyd(DisasContext *ctx) |
1512 | { |
1513 | TCGvTCGv_i32 ra = cpu_gpr[rA(ctx->opcode)]; |
1514 | TCGvTCGv_i32 rs = cpu_gpr[rS(ctx->opcode)]; |
1515 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
1516 | tcg_gen_shri_tltcg_gen_shri_i32(t0, rs, 32); |
1517 | tcg_gen_xor_tltcg_gen_xor_i32(ra, rs, t0); |
1518 | tcg_gen_shri_tltcg_gen_shri_i32(t0, ra, 16); |
1519 | tcg_gen_xor_tltcg_gen_xor_i32(ra, ra, t0); |
1520 | tcg_gen_shri_tltcg_gen_shri_i32(t0, ra, 8); |
1521 | tcg_gen_xor_tltcg_gen_xor_i32(ra, ra, t0); |
1522 | tcg_gen_andi_tltcg_gen_andi_i32(ra, ra, 1); |
1523 | tcg_temp_freetcg_temp_free_i32(t0); |
1524 | } |
1525 | #endif |
1526 | |
1527 | #if defined(TARGET_PPC64) |
1528 | /* extsw & extsw. */ |
1529 | GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B){ .opc1 = 0x1F, .opc2 = 0x1A, .opc3 = 0x1E, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_64B, .type2 = PPC_NONE , .handler = &gen_extsw, }, .oname = "extsw", }; |
1530 | |
1531 | /* cntlzd */ |
1532 | static void gen_cntlzd(DisasContext *ctx) |
1533 | { |
1534 | gen_helper_cntlzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
1535 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1536 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1537 | } |
1538 | #endif |
1539 | |
1540 | /*** Integer rotate ***/ |
1541 | |
1542 | /* rlwimi & rlwimi. */ |
1543 | static void gen_rlwimi(DisasContext *ctx) |
1544 | { |
1545 | uint32_t mb, me, sh; |
1546 | |
1547 | mb = MB(ctx->opcode); |
1548 | me = ME(ctx->opcode); |
1549 | sh = SH(ctx->opcode); |
1550 | if (likely(sh == 0 && mb == 0 && me == 31)__builtin_expect(!!(sh == 0 && mb == 0 && me == 31), 1)) { |
1551 | tcg_gen_ext32u_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
1552 | } else { |
1553 | target_ulong mask; |
1554 | TCGvTCGv_i32 t1; |
1555 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
1556 | #if defined(TARGET_PPC64) |
1557 | TCGv_i32 t2 = tcg_temp_new_i32(); |
1558 | tcg_gen_trunc_i64_i32(t2, cpu_gpr[rS(ctx->opcode)]); |
1559 | tcg_gen_rotli_i32(t2, t2, sh); |
1560 | tcg_gen_extu_i32_i64(t0, t2); |
1561 | tcg_temp_free_i32(t2); |
1562 | #else |
1563 | tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh); |
1564 | #endif |
1565 | #if defined(TARGET_PPC64) |
1566 | mb += 32; |
1567 | me += 32; |
1568 | #endif |
1569 | mask = MASK(mb, me); |
1570 | t1 = tcg_temp_new()tcg_temp_new_i32(); |
1571 | tcg_gen_andi_tltcg_gen_andi_i32(t0, t0, mask); |
1572 | tcg_gen_andi_tltcg_gen_andi_i32(t1, cpu_gpr[rA(ctx->opcode)], ~mask); |
1573 | tcg_gen_or_tltcg_gen_or_i32(cpu_gpr[rA(ctx->opcode)], t0, t1); |
1574 | tcg_temp_freetcg_temp_free_i32(t0); |
1575 | tcg_temp_freetcg_temp_free_i32(t1); |
1576 | } |
1577 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1578 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1579 | } |
1580 | |
1581 | /* rlwinm & rlwinm. */ |
1582 | static void gen_rlwinm(DisasContext *ctx) |
1583 | { |
1584 | uint32_t mb, me, sh; |
1585 | |
1586 | sh = SH(ctx->opcode); |
1587 | mb = MB(ctx->opcode); |
1588 | me = ME(ctx->opcode); |
1589 | |
1590 | if (likely(mb == 0 && me == (31 - sh))__builtin_expect(!!(mb == 0 && me == (31 - sh)), 1)) { |
1591 | if (likely(sh == 0)__builtin_expect(!!(sh == 0), 1)) { |
1592 | tcg_gen_ext32u_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
1593 | } else { |
1594 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
1595 | tcg_gen_ext32u_tltcg_gen_mov_i32(t0, cpu_gpr[rS(ctx->opcode)]); |
1596 | tcg_gen_shli_tltcg_gen_shli_i32(t0, t0, sh); |
1597 | tcg_gen_ext32u_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], t0); |
1598 | tcg_temp_freetcg_temp_free_i32(t0); |
1599 | } |
1600 | } else if (likely(sh != 0 && me == 31 && sh == (32 - mb))__builtin_expect(!!(sh != 0 && me == 31 && sh == (32 - mb)), 1)) { |
1601 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
1602 | tcg_gen_ext32u_tltcg_gen_mov_i32(t0, cpu_gpr[rS(ctx->opcode)]); |
1603 | tcg_gen_shri_tltcg_gen_shri_i32(t0, t0, mb); |
1604 | tcg_gen_ext32u_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], t0); |
1605 | tcg_temp_freetcg_temp_free_i32(t0); |
1606 | } else { |
1607 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
1608 | #if defined(TARGET_PPC64) |
1609 | TCGv_i32 t1 = tcg_temp_new_i32(); |
1610 | tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]); |
1611 | tcg_gen_rotli_i32(t1, t1, sh); |
1612 | tcg_gen_extu_i32_i64(t0, t1); |
1613 | tcg_temp_free_i32(t1); |
1614 | #else |
1615 | tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh); |
1616 | #endif |
1617 | #if defined(TARGET_PPC64) |
1618 | mb += 32; |
1619 | me += 32; |
1620 | #endif |
1621 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me)); |
1622 | tcg_temp_freetcg_temp_free_i32(t0); |
1623 | } |
1624 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1625 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1626 | } |
1627 | |
1628 | /* rlwnm & rlwnm. */ |
1629 | static void gen_rlwnm(DisasContext *ctx) |
1630 | { |
1631 | uint32_t mb, me; |
1632 | TCGvTCGv_i32 t0; |
1633 | #if defined(TARGET_PPC64) |
1634 | TCGv_i32 t1, t2; |
1635 | #endif |
1636 | |
1637 | mb = MB(ctx->opcode); |
1638 | me = ME(ctx->opcode); |
1639 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
1640 | tcg_gen_andi_tltcg_gen_andi_i32(t0, cpu_gpr[rB(ctx->opcode)], 0x1f); |
1641 | #if defined(TARGET_PPC64) |
1642 | t1 = tcg_temp_new_i32(); |
1643 | t2 = tcg_temp_new_i32(); |
1644 | tcg_gen_trunc_i64_i32(t1, cpu_gpr[rS(ctx->opcode)]); |
1645 | tcg_gen_trunc_i64_i32(t2, t0); |
1646 | tcg_gen_rotl_i32(t1, t1, t2); |
1647 | tcg_gen_extu_i32_i64(t0, t1); |
1648 | tcg_temp_free_i32(t1); |
1649 | tcg_temp_free_i32(t2); |
1650 | #else |
1651 | tcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0); |
1652 | #endif |
1653 | if (unlikely(mb != 0 || me != 31)__builtin_expect(!!(mb != 0 || me != 31), 0)) { |
1654 | #if defined(TARGET_PPC64) |
1655 | mb += 32; |
1656 | me += 32; |
1657 | #endif |
1658 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me)); |
1659 | } else { |
1660 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], t0); |
1661 | } |
1662 | tcg_temp_freetcg_temp_free_i32(t0); |
1663 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1664 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1665 | } |
1666 | |
1667 | #if defined(TARGET_PPC64) |
1668 | #define GEN_PPC64_R2(name, opc1, opc2) \ |
1669 | static void glue(gen_, name##0)gen_name##0(DisasContext *ctx) \ |
1670 | { \ |
1671 | gen_##name(ctx, 0); \ |
1672 | } \ |
1673 | \ |
1674 | static void glue(gen_, name##1)gen_name##1(DisasContext *ctx) \ |
1675 | { \ |
1676 | gen_##name(ctx, 1); \ |
1677 | } |
1678 | #define GEN_PPC64_R4(name, opc1, opc2) \ |
1679 | static void glue(gen_, name##0)gen_name##0(DisasContext *ctx) \ |
1680 | { \ |
1681 | gen_##name(ctx, 0, 0); \ |
1682 | } \ |
1683 | \ |
1684 | static void glue(gen_, name##1)gen_name##1(DisasContext *ctx) \ |
1685 | { \ |
1686 | gen_##name(ctx, 0, 1); \ |
1687 | } \ |
1688 | \ |
1689 | static void glue(gen_, name##2)gen_name##2(DisasContext *ctx) \ |
1690 | { \ |
1691 | gen_##name(ctx, 1, 0); \ |
1692 | } \ |
1693 | \ |
1694 | static void glue(gen_, name##3)gen_name##3(DisasContext *ctx) \ |
1695 | { \ |
1696 | gen_##name(ctx, 1, 1); \ |
1697 | } |
1698 | |
1699 | static inline void gen_rldinm(DisasContext *ctx, uint32_t mb, uint32_t me, |
1700 | uint32_t sh) |
1701 | { |
1702 | if (likely(sh != 0 && mb == 0 && me == (63 - sh))__builtin_expect(!!(sh != 0 && mb == 0 && me == (63 - sh)), 1)) { |
1703 | tcg_gen_shli_tltcg_gen_shli_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh); |
1704 | } else if (likely(sh != 0 && me == 63 && sh == (64 - mb))__builtin_expect(!!(sh != 0 && me == 63 && sh == (64 - mb)), 1)) { |
1705 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], mb); |
1706 | } else { |
1707 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
1708 | tcg_gen_rotli_tltcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh); |
1709 | if (likely(mb == 0 && me == 63)__builtin_expect(!!(mb == 0 && me == 63), 1)) { |
1710 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], t0); |
1711 | } else { |
1712 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me)); |
1713 | } |
1714 | tcg_temp_freetcg_temp_free_i32(t0); |
1715 | } |
1716 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1717 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1718 | } |
1719 | /* rldicl - rldicl. */ |
1720 | static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn) |
1721 | { |
1722 | uint32_t sh, mb; |
1723 | |
1724 | sh = SH(ctx->opcode) | (shn << 5); |
1725 | mb = MB(ctx->opcode) | (mbn << 5); |
1726 | gen_rldinm(ctx, mb, 63, sh); |
1727 | } |
1728 | GEN_PPC64_R4(rldicl, 0x1E, 0x00); |
1729 | /* rldicr - rldicr. */ |
1730 | static inline void gen_rldicr(DisasContext *ctx, int men, int shn) |
1731 | { |
1732 | uint32_t sh, me; |
1733 | |
1734 | sh = SH(ctx->opcode) | (shn << 5); |
1735 | me = MB(ctx->opcode) | (men << 5); |
1736 | gen_rldinm(ctx, 0, me, sh); |
1737 | } |
1738 | GEN_PPC64_R4(rldicr, 0x1E, 0x02); |
1739 | /* rldic - rldic. */ |
1740 | static inline void gen_rldic(DisasContext *ctx, int mbn, int shn) |
1741 | { |
1742 | uint32_t sh, mb; |
1743 | |
1744 | sh = SH(ctx->opcode) | (shn << 5); |
1745 | mb = MB(ctx->opcode) | (mbn << 5); |
1746 | gen_rldinm(ctx, mb, 63 - sh, sh); |
1747 | } |
1748 | GEN_PPC64_R4(rldic, 0x1E, 0x04); |
1749 | |
1750 | static inline void gen_rldnm(DisasContext *ctx, uint32_t mb, uint32_t me) |
1751 | { |
1752 | TCGvTCGv_i32 t0; |
1753 | |
1754 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
1755 | tcg_gen_andi_tltcg_gen_andi_i32(t0, cpu_gpr[rB(ctx->opcode)], 0x3f); |
1756 | tcg_gen_rotl_tltcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0); |
1757 | if (unlikely(mb != 0 || me != 63)__builtin_expect(!!(mb != 0 || me != 63), 0)) { |
1758 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me)); |
1759 | } else { |
1760 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], t0); |
1761 | } |
1762 | tcg_temp_freetcg_temp_free_i32(t0); |
1763 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1764 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1765 | } |
1766 | |
1767 | /* rldcl - rldcl. */ |
1768 | static inline void gen_rldcl(DisasContext *ctx, int mbn) |
1769 | { |
1770 | uint32_t mb; |
1771 | |
1772 | mb = MB(ctx->opcode) | (mbn << 5); |
1773 | gen_rldnm(ctx, mb, 63); |
1774 | } |
1775 | GEN_PPC64_R2(rldcl, 0x1E, 0x08); |
1776 | /* rldcr - rldcr. */ |
1777 | static inline void gen_rldcr(DisasContext *ctx, int men) |
1778 | { |
1779 | uint32_t me; |
1780 | |
1781 | me = MB(ctx->opcode) | (men << 5); |
1782 | gen_rldnm(ctx, 0, me); |
1783 | } |
1784 | GEN_PPC64_R2(rldcr, 0x1E, 0x09); |
1785 | /* rldimi - rldimi. */ |
1786 | static inline void gen_rldimi(DisasContext *ctx, int mbn, int shn) |
1787 | { |
1788 | uint32_t sh, mb, me; |
1789 | |
1790 | sh = SH(ctx->opcode) | (shn << 5); |
1791 | mb = MB(ctx->opcode) | (mbn << 5); |
1792 | me = 63 - sh; |
1793 | if (unlikely(sh == 0 && mb == 0)__builtin_expect(!!(sh == 0 && mb == 0), 0)) { |
1794 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
1795 | } else { |
1796 | TCGvTCGv_i32 t0, t1; |
1797 | target_ulong mask; |
1798 | |
1799 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
1800 | tcg_gen_rotli_tltcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh); |
1801 | t1 = tcg_temp_new()tcg_temp_new_i32(); |
1802 | mask = MASK(mb, me); |
1803 | tcg_gen_andi_tltcg_gen_andi_i32(t0, t0, mask); |
1804 | tcg_gen_andi_tltcg_gen_andi_i32(t1, cpu_gpr[rA(ctx->opcode)], ~mask); |
1805 | tcg_gen_or_tltcg_gen_or_i32(cpu_gpr[rA(ctx->opcode)], t0, t1); |
1806 | tcg_temp_freetcg_temp_free_i32(t0); |
1807 | tcg_temp_freetcg_temp_free_i32(t1); |
1808 | } |
1809 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1810 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1811 | } |
1812 | GEN_PPC64_R4(rldimi, 0x1E, 0x06); |
1813 | #endif |
1814 | |
1815 | /*** Integer shift ***/ |
1816 | |
1817 | /* slw & slw. */ |
1818 | static void gen_slw(DisasContext *ctx) |
1819 | { |
1820 | TCGvTCGv_i32 t0, t1; |
1821 | |
1822 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
1823 | /* AND rS with a mask that is 0 when rB >= 0x20 */ |
1824 | #if defined(TARGET_PPC64) |
1825 | tcg_gen_shli_tltcg_gen_shli_i32(t0, cpu_gpr[rB(ctx->opcode)], 0x3a); |
1826 | tcg_gen_sari_tltcg_gen_sari_i32(t0, t0, 0x3f); |
1827 | #else |
1828 | tcg_gen_shli_tltcg_gen_shli_i32(t0, cpu_gpr[rB(ctx->opcode)], 0x1a); |
1829 | tcg_gen_sari_tltcg_gen_sari_i32(t0, t0, 0x1f); |
1830 | #endif |
1831 | tcg_gen_andc_tltcg_gen_andc_i32(t0, cpu_gpr[rS(ctx->opcode)], t0); |
1832 | t1 = tcg_temp_new()tcg_temp_new_i32(); |
1833 | tcg_gen_andi_tltcg_gen_andi_i32(t1, cpu_gpr[rB(ctx->opcode)], 0x1f); |
1834 | tcg_gen_shl_tltcg_gen_shl_i32(cpu_gpr[rA(ctx->opcode)], t0, t1); |
1835 | tcg_temp_freetcg_temp_free_i32(t1); |
1836 | tcg_temp_freetcg_temp_free_i32(t0); |
1837 | tcg_gen_ext32u_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
1838 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1839 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1840 | } |
1841 | |
1842 | /* sraw & sraw. */ |
1843 | static void gen_sraw(DisasContext *ctx) |
1844 | { |
1845 | gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env, |
1846 | cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
1847 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1848 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1849 | } |
1850 | |
1851 | /* srawi & srawi. */ |
1852 | static void gen_srawi(DisasContext *ctx) |
1853 | { |
1854 | int sh = SH(ctx->opcode); |
1855 | TCGvTCGv_i32 dst = cpu_gpr[rA(ctx->opcode)]; |
1856 | TCGvTCGv_i32 src = cpu_gpr[rS(ctx->opcode)]; |
1857 | if (sh == 0) { |
1858 | tcg_gen_mov_tltcg_gen_mov_i32(dst, src); |
1859 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ca, 0); |
1860 | } else { |
1861 | TCGvTCGv_i32 t0; |
1862 | tcg_gen_ext32s_tltcg_gen_mov_i32(dst, src); |
1863 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_ca, dst, (1ULL << sh) - 1); |
1864 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
1865 | tcg_gen_sari_tltcg_gen_sari_i32(t0, dst, TARGET_LONG_BITS32 - 1); |
1866 | tcg_gen_and_tltcg_gen_and_i32(cpu_ca, cpu_ca, t0); |
1867 | tcg_temp_freetcg_temp_free_i32(t0); |
1868 | tcg_gen_setcondi_tltcg_gen_setcondi_i32(TCG_COND_NE, cpu_ca, cpu_ca, 0); |
1869 | tcg_gen_sari_tltcg_gen_sari_i32(dst, dst, sh); |
1870 | } |
1871 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) { |
1872 | gen_set_Rc0(ctx, dst); |
1873 | } |
1874 | } |
1875 | |
1876 | /* srw & srw. */ |
1877 | static void gen_srw(DisasContext *ctx) |
1878 | { |
1879 | TCGvTCGv_i32 t0, t1; |
1880 | |
1881 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
1882 | /* AND rS with a mask that is 0 when rB >= 0x20 */ |
1883 | #if defined(TARGET_PPC64) |
1884 | tcg_gen_shli_tltcg_gen_shli_i32(t0, cpu_gpr[rB(ctx->opcode)], 0x3a); |
1885 | tcg_gen_sari_tltcg_gen_sari_i32(t0, t0, 0x3f); |
1886 | #else |
1887 | tcg_gen_shli_tltcg_gen_shli_i32(t0, cpu_gpr[rB(ctx->opcode)], 0x1a); |
1888 | tcg_gen_sari_tltcg_gen_sari_i32(t0, t0, 0x1f); |
1889 | #endif |
1890 | tcg_gen_andc_tltcg_gen_andc_i32(t0, cpu_gpr[rS(ctx->opcode)], t0); |
1891 | tcg_gen_ext32u_tltcg_gen_mov_i32(t0, t0); |
1892 | t1 = tcg_temp_new()tcg_temp_new_i32(); |
1893 | tcg_gen_andi_tltcg_gen_andi_i32(t1, cpu_gpr[rB(ctx->opcode)], 0x1f); |
1894 | tcg_gen_shr_tltcg_gen_shr_i32(cpu_gpr[rA(ctx->opcode)], t0, t1); |
1895 | tcg_temp_freetcg_temp_free_i32(t1); |
1896 | tcg_temp_freetcg_temp_free_i32(t0); |
1897 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1898 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1899 | } |
1900 | |
1901 | #if defined(TARGET_PPC64) |
1902 | /* sld & sld. */ |
1903 | static void gen_sld(DisasContext *ctx) |
1904 | { |
1905 | TCGvTCGv_i32 t0, t1; |
1906 | |
1907 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
1908 | /* AND rS with a mask that is 0 when rB >= 0x40 */ |
1909 | tcg_gen_shli_tltcg_gen_shli_i32(t0, cpu_gpr[rB(ctx->opcode)], 0x39); |
1910 | tcg_gen_sari_tltcg_gen_sari_i32(t0, t0, 0x3f); |
1911 | tcg_gen_andc_tltcg_gen_andc_i32(t0, cpu_gpr[rS(ctx->opcode)], t0); |
1912 | t1 = tcg_temp_new()tcg_temp_new_i32(); |
1913 | tcg_gen_andi_tltcg_gen_andi_i32(t1, cpu_gpr[rB(ctx->opcode)], 0x3f); |
1914 | tcg_gen_shl_tltcg_gen_shl_i32(cpu_gpr[rA(ctx->opcode)], t0, t1); |
1915 | tcg_temp_freetcg_temp_free_i32(t1); |
1916 | tcg_temp_freetcg_temp_free_i32(t0); |
1917 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1918 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1919 | } |
1920 | |
1921 | /* srad & srad. */ |
1922 | static void gen_srad(DisasContext *ctx) |
1923 | { |
1924 | gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env, |
1925 | cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
1926 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1927 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1928 | } |
1929 | /* sradi & sradi. */ |
1930 | static inline void gen_sradi(DisasContext *ctx, int n) |
1931 | { |
1932 | int sh = SH(ctx->opcode) + (n << 5); |
1933 | TCGvTCGv_i32 dst = cpu_gpr[rA(ctx->opcode)]; |
1934 | TCGvTCGv_i32 src = cpu_gpr[rS(ctx->opcode)]; |
1935 | if (sh == 0) { |
1936 | tcg_gen_mov_tltcg_gen_mov_i32(dst, src); |
1937 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ca, 0); |
1938 | } else { |
1939 | TCGvTCGv_i32 t0; |
1940 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_ca, src, (1ULL << sh) - 1); |
1941 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
1942 | tcg_gen_sari_tltcg_gen_sari_i32(t0, src, TARGET_LONG_BITS32 - 1); |
1943 | tcg_gen_and_tltcg_gen_and_i32(cpu_ca, cpu_ca, t0); |
1944 | tcg_temp_freetcg_temp_free_i32(t0); |
1945 | tcg_gen_setcondi_tltcg_gen_setcondi_i32(TCG_COND_NE, cpu_ca, cpu_ca, 0); |
1946 | tcg_gen_sari_tltcg_gen_sari_i32(dst, src, sh); |
1947 | } |
1948 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) { |
1949 | gen_set_Rc0(ctx, dst); |
1950 | } |
1951 | } |
1952 | |
1953 | static void gen_sradi0(DisasContext *ctx) |
1954 | { |
1955 | gen_sradi(ctx, 0); |
1956 | } |
1957 | |
1958 | static void gen_sradi1(DisasContext *ctx) |
1959 | { |
1960 | gen_sradi(ctx, 1); |
1961 | } |
1962 | |
1963 | /* srd & srd. */ |
1964 | static void gen_srd(DisasContext *ctx) |
1965 | { |
1966 | TCGvTCGv_i32 t0, t1; |
1967 | |
1968 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
1969 | /* AND rS with a mask that is 0 when rB >= 0x40 */ |
1970 | tcg_gen_shli_tltcg_gen_shli_i32(t0, cpu_gpr[rB(ctx->opcode)], 0x39); |
1971 | tcg_gen_sari_tltcg_gen_sari_i32(t0, t0, 0x3f); |
1972 | tcg_gen_andc_tltcg_gen_andc_i32(t0, cpu_gpr[rS(ctx->opcode)], t0); |
1973 | t1 = tcg_temp_new()tcg_temp_new_i32(); |
1974 | tcg_gen_andi_tltcg_gen_andi_i32(t1, cpu_gpr[rB(ctx->opcode)], 0x3f); |
1975 | tcg_gen_shr_tltcg_gen_shr_i32(cpu_gpr[rA(ctx->opcode)], t0, t1); |
1976 | tcg_temp_freetcg_temp_free_i32(t1); |
1977 | tcg_temp_freetcg_temp_free_i32(t0); |
1978 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
1979 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
1980 | } |
1981 | #endif |
1982 | |
1983 | /*** Floating-Point arithmetic ***/ |
1984 | #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type){ .opc1 = op1, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, . handler = &gen_fname, }, .oname = "fname", } \ |
1985 | static void gen_f##name(DisasContext *ctx) \ |
1986 | { \ |
1987 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { \ |
1988 | gen_exception(ctx, POWERPC_EXCP_FPU); \ |
1989 | return; \ |
1990 | } \ |
1991 | /* NIP cannot be restored if the memory exception comes from an helper */ \ |
1992 | gen_update_nip(ctx, ctx->nip - 4); \ |
1993 | gen_reset_fpstatus(); \ |
1994 | gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \ |
1995 | cpu_fpr[rA(ctx->opcode)], \ |
1996 | cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \ |
1997 | if (isfloat) { \ |
1998 | gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \ |
1999 | cpu_fpr[rD(ctx->opcode)]); \ |
2000 | } \ |
2001 | gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], set_fprf, \ |
2002 | Rc(ctx->opcode) != 0); \ |
2003 | } |
2004 | |
2005 | #define GEN_FLOAT_ACB(name, op2, set_fprf, type){ .opc1 = 0x3F, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, . handler = &gen_fname, }, .oname = "fname", }, { .opc1 = 0x3B , .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, .handler = & gen_fnames, }, .oname = "fnames", } \{ .opc1 = 0x3F, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, . handler = &gen_fname, }, .oname = "fname", } |
2006 | _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type){ .opc1 = 0x3F, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, . handler = &gen_fname, }, .oname = "fname", }; \{ .opc1 = 0x3B, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, . handler = &gen_fname##s, }, .oname = "fname##s", } |
2007 | _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type){ .opc1 = 0x3B, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, . handler = &gen_fname##s, }, .oname = "fname##s", }; |
2008 | |
2009 | #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type){ .opc1 = op1, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_fname, }, .oname = "fname", } \ |
2010 | static void gen_f##name(DisasContext *ctx) \ |
2011 | { \ |
2012 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { \ |
2013 | gen_exception(ctx, POWERPC_EXCP_FPU); \ |
2014 | return; \ |
2015 | } \ |
2016 | /* NIP cannot be restored if the memory exception comes from an helper */ \ |
2017 | gen_update_nip(ctx, ctx->nip - 4); \ |
2018 | gen_reset_fpstatus(); \ |
2019 | gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \ |
2020 | cpu_fpr[rA(ctx->opcode)], \ |
2021 | cpu_fpr[rB(ctx->opcode)]); \ |
2022 | if (isfloat) { \ |
2023 | gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \ |
2024 | cpu_fpr[rD(ctx->opcode)]); \ |
2025 | } \ |
2026 | gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \ |
2027 | set_fprf, Rc(ctx->opcode) != 0); \ |
2028 | } |
2029 | #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type){ .opc1 = 0x3F, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_fname, }, .oname = "fname", }, { .opc1 = 0x3B, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = inval , .type = type, .type2 = PPC_NONE, .handler = &gen_fnames , }, .oname = "fnames", } \{ .opc1 = 0x3F, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_fname, }, .oname = "fname", } |
2030 | _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type){ .opc1 = 0x3F, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_fname, }, .oname = "fname", }; \{ .opc1 = 0x3B, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_fname##s, }, .oname = "fname##s", } |
2031 | _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type){ .opc1 = 0x3B, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_fname##s, }, .oname = "fname##s", }; |
2032 | |
2033 | #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type){ .opc1 = op1, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_fname, }, .oname = "fname", } \ |
2034 | static void gen_f##name(DisasContext *ctx) \ |
2035 | { \ |
2036 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { \ |
2037 | gen_exception(ctx, POWERPC_EXCP_FPU); \ |
2038 | return; \ |
2039 | } \ |
2040 | /* NIP cannot be restored if the memory exception comes from an helper */ \ |
2041 | gen_update_nip(ctx, ctx->nip - 4); \ |
2042 | gen_reset_fpstatus(); \ |
2043 | gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \ |
2044 | cpu_fpr[rA(ctx->opcode)], \ |
2045 | cpu_fpr[rC(ctx->opcode)]); \ |
2046 | if (isfloat) { \ |
2047 | gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \ |
2048 | cpu_fpr[rD(ctx->opcode)]); \ |
2049 | } \ |
2050 | gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \ |
2051 | set_fprf, Rc(ctx->opcode) != 0); \ |
2052 | } |
2053 | #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type){ .opc1 = 0x3F, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_fname, }, .oname = "fname", }, { .opc1 = 0x3B, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = inval , .type = type, .type2 = PPC_NONE, .handler = &gen_fnames , }, .oname = "fnames", } \{ .opc1 = 0x3F, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_fname, }, .oname = "fname", } |
2054 | _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type){ .opc1 = 0x3F, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_fname, }, .oname = "fname", }; \{ .opc1 = 0x3B, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_fname##s, }, .oname = "fname##s", } |
2055 | _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type){ .opc1 = 0x3B, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = inval, .type = type, .type2 = PPC_NONE, .handler = &gen_fname##s, }, .oname = "fname##s", }; |
2056 | |
2057 | #define GEN_FLOAT_B(name, op2, op3, set_fprf, type){ .opc1 = 0x3F, .opc2 = op2, .opc3 = op3, .pad = { 0, }, .handler = { .inval1 = 0x001F0000, .type = type, .type2 = PPC_NONE, . handler = &gen_fname, }, .oname = "fname", } \ |
2058 | static void gen_f##name(DisasContext *ctx) \ |
2059 | { \ |
2060 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { \ |
2061 | gen_exception(ctx, POWERPC_EXCP_FPU); \ |
2062 | return; \ |
2063 | } \ |
2064 | /* NIP cannot be restored if the memory exception comes from an helper */ \ |
2065 | gen_update_nip(ctx, ctx->nip - 4); \ |
2066 | gen_reset_fpstatus(); \ |
2067 | gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \ |
2068 | cpu_fpr[rB(ctx->opcode)]); \ |
2069 | gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \ |
2070 | set_fprf, Rc(ctx->opcode) != 0); \ |
2071 | } |
2072 | |
2073 | #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type){ .opc1 = op1, .opc2 = op2, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x001F07C0, .type = type, .type2 = PPC_NONE, . handler = &gen_fname, }, .oname = "fname", } \ |
2074 | static void gen_f##name(DisasContext *ctx) \ |
2075 | { \ |
2076 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { \ |
2077 | gen_exception(ctx, POWERPC_EXCP_FPU); \ |
2078 | return; \ |
2079 | } \ |
2080 | /* NIP cannot be restored if the memory exception comes from an helper */ \ |
2081 | gen_update_nip(ctx, ctx->nip - 4); \ |
2082 | gen_reset_fpstatus(); \ |
2083 | gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \ |
2084 | cpu_fpr[rB(ctx->opcode)]); \ |
2085 | gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \ |
2086 | set_fprf, Rc(ctx->opcode) != 0); \ |
2087 | } |
2088 | |
2089 | /* fadd - fadds */ |
2090 | GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT){ .opc1 = 0x3F, .opc2 = 0x15, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x000007C0, .type = PPC_FLOAT, .type2 = PPC_NONE , .handler = &gen_fadd, }, .oname = "fadd", }, { .opc1 = 0x3B , .opc2 = 0x15, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x000007C0, .type = PPC_FLOAT, .type2 = PPC_NONE, .handler = &gen_fadds, }, .oname = "fadds", }; |
2091 | /* fdiv - fdivs */ |
2092 | GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT){ .opc1 = 0x3F, .opc2 = 0x12, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x000007C0, .type = PPC_FLOAT, .type2 = PPC_NONE , .handler = &gen_fdiv, }, .oname = "fdiv", }, { .opc1 = 0x3B , .opc2 = 0x12, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x000007C0, .type = PPC_FLOAT, .type2 = PPC_NONE, .handler = &gen_fdivs, }, .oname = "fdivs", }; |
2093 | /* fmul - fmuls */ |
2094 | GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT){ .opc1 = 0x3F, .opc2 = 0x19, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x0000F800, .type = PPC_FLOAT, .type2 = PPC_NONE , .handler = &gen_fmul, }, .oname = "fmul", }, { .opc1 = 0x3B , .opc2 = 0x19, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x0000F800, .type = PPC_FLOAT, .type2 = PPC_NONE, .handler = &gen_fmuls, }, .oname = "fmuls", }; |
2095 | |
2096 | /* fre */ |
2097 | GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT){ .opc1 = 0x3F, .opc2 = 0x18, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x001F07C0, .type = PPC_FLOAT_EXT, .type2 = PPC_NONE , .handler = &gen_fre, }, .oname = "fre", }; |
2098 | |
2099 | /* fres */ |
2100 | GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES){ .opc1 = 0x3B, .opc2 = 0x18, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x001F07C0, .type = PPC_FLOAT_FRES, .type2 = PPC_NONE , .handler = &gen_fres, }, .oname = "fres", }; |
2101 | |
2102 | /* frsqrte */ |
2103 | GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE){ .opc1 = 0x3F, .opc2 = 0x1A, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x001F07C0, .type = PPC_FLOAT_FRSQRTE, .type2 = PPC_NONE, .handler = &gen_frsqrte, }, .oname = "frsqrte" , }; |
2104 | |
2105 | /* frsqrtes */ |
2106 | static void gen_frsqrtes(DisasContext *ctx) |
2107 | { |
2108 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
2109 | gen_exception(ctx, POWERPC_EXCP_FPU); |
2110 | return; |
2111 | } |
2112 | /* NIP cannot be restored if the memory exception comes from an helper */ |
2113 | gen_update_nip(ctx, ctx->nip - 4); |
2114 | gen_reset_fpstatus(); |
2115 | gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_env, |
2116 | cpu_fpr[rB(ctx->opcode)]); |
2117 | gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, |
2118 | cpu_fpr[rD(ctx->opcode)]); |
2119 | gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0); |
2120 | } |
2121 | |
2122 | /* fsel */ |
2123 | _GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL){ .opc1 = 0x3F, .opc2 = 0x17, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT_FSEL, .type2 = PPC_NONE , .handler = &gen_fsel, }, .oname = "fsel", }; |
2124 | /* fsub - fsubs */ |
2125 | GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT){ .opc1 = 0x3F, .opc2 = 0x14, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x000007C0, .type = PPC_FLOAT, .type2 = PPC_NONE , .handler = &gen_fsub, }, .oname = "fsub", }, { .opc1 = 0x3B , .opc2 = 0x14, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x000007C0, .type = PPC_FLOAT, .type2 = PPC_NONE, .handler = &gen_fsubs, }, .oname = "fsubs", }; |
2126 | /* Optional: */ |
2127 | |
2128 | /* fsqrt */ |
2129 | static void gen_fsqrt(DisasContext *ctx) |
2130 | { |
2131 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
2132 | gen_exception(ctx, POWERPC_EXCP_FPU); |
2133 | return; |
2134 | } |
2135 | /* NIP cannot be restored if the memory exception comes from an helper */ |
2136 | gen_update_nip(ctx, ctx->nip - 4); |
2137 | gen_reset_fpstatus(); |
2138 | gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env, |
2139 | cpu_fpr[rB(ctx->opcode)]); |
2140 | gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0); |
2141 | } |
2142 | |
2143 | static void gen_fsqrts(DisasContext *ctx) |
2144 | { |
2145 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
2146 | gen_exception(ctx, POWERPC_EXCP_FPU); |
2147 | return; |
2148 | } |
2149 | /* NIP cannot be restored if the memory exception comes from an helper */ |
2150 | gen_update_nip(ctx, ctx->nip - 4); |
2151 | gen_reset_fpstatus(); |
2152 | gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env, |
2153 | cpu_fpr[rB(ctx->opcode)]); |
2154 | gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, |
2155 | cpu_fpr[rD(ctx->opcode)]); |
2156 | gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0); |
2157 | } |
2158 | |
2159 | /*** Floating-Point multiply-and-add ***/ |
2160 | /* fmadd - fmadds */ |
2161 | GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT){ .opc1 = 0x3F, .opc2 = 0x1D, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT, .type2 = PPC_NONE , .handler = &gen_fmadd, }, .oname = "fmadd", }, { .opc1 = 0x3B, .opc2 = 0x1D, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT, .type2 = PPC_NONE, . handler = &gen_fmadds, }, .oname = "fmadds", }; |
2162 | /* fmsub - fmsubs */ |
2163 | GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT){ .opc1 = 0x3F, .opc2 = 0x1C, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT, .type2 = PPC_NONE , .handler = &gen_fmsub, }, .oname = "fmsub", }, { .opc1 = 0x3B, .opc2 = 0x1C, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT, .type2 = PPC_NONE, . handler = &gen_fmsubs, }, .oname = "fmsubs", }; |
2164 | /* fnmadd - fnmadds */ |
2165 | GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT){ .opc1 = 0x3F, .opc2 = 0x1F, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT, .type2 = PPC_NONE , .handler = &gen_fnmadd, }, .oname = "fnmadd", }, { .opc1 = 0x3B, .opc2 = 0x1F, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT, .type2 = PPC_NONE , .handler = &gen_fnmadds, }, .oname = "fnmadds", }; |
2166 | /* fnmsub - fnmsubs */ |
2167 | GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT){ .opc1 = 0x3F, .opc2 = 0x1E, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT, .type2 = PPC_NONE , .handler = &gen_fnmsub, }, .oname = "fnmsub", }, { .opc1 = 0x3B, .opc2 = 0x1E, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT, .type2 = PPC_NONE , .handler = &gen_fnmsubs, }, .oname = "fnmsubs", }; |
2168 | |
2169 | /*** Floating-Point round & convert ***/ |
2170 | /* fctiw */ |
2171 | GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT){ .opc1 = 0x3F, .opc2 = 0x0E, .opc3 = 0x00, .pad = { 0, }, .handler = { .inval1 = 0x001F0000, .type = PPC_FLOAT, .type2 = PPC_NONE , .handler = &gen_fctiw, }, .oname = "fctiw", }; |
2172 | /* fctiwz */ |
2173 | GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT){ .opc1 = 0x3F, .opc2 = 0x0F, .opc3 = 0x00, .pad = { 0, }, .handler = { .inval1 = 0x001F0000, .type = PPC_FLOAT, .type2 = PPC_NONE , .handler = &gen_fctiwz, }, .oname = "fctiwz", }; |
2174 | /* frsp */ |
2175 | GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT){ .opc1 = 0x3F, .opc2 = 0x0C, .opc3 = 0x00, .pad = { 0, }, .handler = { .inval1 = 0x001F0000, .type = PPC_FLOAT, .type2 = PPC_NONE , .handler = &gen_frsp, }, .oname = "frsp", }; |
2176 | #if defined(TARGET_PPC64) |
2177 | /* fcfid */ |
2178 | GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC_64B){ .opc1 = 0x3F, .opc2 = 0x0E, .opc3 = 0x1A, .pad = { 0, }, .handler = { .inval1 = 0x001F0000, .type = PPC_64B, .type2 = PPC_NONE , .handler = &gen_fcfid, }, .oname = "fcfid", }; |
2179 | /* fctid */ |
2180 | GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC_64B){ .opc1 = 0x3F, .opc2 = 0x0E, .opc3 = 0x19, .pad = { 0, }, .handler = { .inval1 = 0x001F0000, .type = PPC_64B, .type2 = PPC_NONE , .handler = &gen_fctid, }, .oname = "fctid", }; |
2181 | /* fctidz */ |
2182 | GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC_64B){ .opc1 = 0x3F, .opc2 = 0x0F, .opc3 = 0x19, .pad = { 0, }, .handler = { .inval1 = 0x001F0000, .type = PPC_64B, .type2 = PPC_NONE , .handler = &gen_fctidz, }, .oname = "fctidz", }; |
2183 | #endif |
2184 | |
2185 | /* frin */ |
2186 | GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT){ .opc1 = 0x3F, .opc2 = 0x08, .opc3 = 0x0C, .pad = { 0, }, .handler = { .inval1 = 0x001F0000, .type = PPC_FLOAT_EXT, .type2 = PPC_NONE , .handler = &gen_frin, }, .oname = "frin", }; |
2187 | /* friz */ |
2188 | GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT){ .opc1 = 0x3F, .opc2 = 0x08, .opc3 = 0x0D, .pad = { 0, }, .handler = { .inval1 = 0x001F0000, .type = PPC_FLOAT_EXT, .type2 = PPC_NONE , .handler = &gen_friz, }, .oname = "friz", }; |
2189 | /* frip */ |
2190 | GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT){ .opc1 = 0x3F, .opc2 = 0x08, .opc3 = 0x0E, .pad = { 0, }, .handler = { .inval1 = 0x001F0000, .type = PPC_FLOAT_EXT, .type2 = PPC_NONE , .handler = &gen_frip, }, .oname = "frip", }; |
2191 | /* frim */ |
2192 | GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT){ .opc1 = 0x3F, .opc2 = 0x08, .opc3 = 0x0F, .pad = { 0, }, .handler = { .inval1 = 0x001F0000, .type = PPC_FLOAT_EXT, .type2 = PPC_NONE , .handler = &gen_frim, }, .oname = "frim", }; |
2193 | |
2194 | /*** Floating-Point compare ***/ |
2195 | |
2196 | /* fcmpo */ |
2197 | static void gen_fcmpo(DisasContext *ctx) |
2198 | { |
2199 | TCGv_i32 crf; |
2200 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
2201 | gen_exception(ctx, POWERPC_EXCP_FPU); |
2202 | return; |
2203 | } |
2204 | /* NIP cannot be restored if the memory exception comes from an helper */ |
2205 | gen_update_nip(ctx, ctx->nip - 4); |
2206 | gen_reset_fpstatus(); |
2207 | crf = tcg_const_i32(crfD(ctx->opcode)); |
2208 | gen_helper_fcmpo(cpu_env, cpu_fpr[rA(ctx->opcode)], |
2209 | cpu_fpr[rB(ctx->opcode)], crf); |
2210 | tcg_temp_free_i32(crf); |
2211 | gen_helper_float_check_status(cpu_env); |
2212 | } |
2213 | |
2214 | /* fcmpu */ |
2215 | static void gen_fcmpu(DisasContext *ctx) |
2216 | { |
2217 | TCGv_i32 crf; |
2218 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
2219 | gen_exception(ctx, POWERPC_EXCP_FPU); |
2220 | return; |
2221 | } |
2222 | /* NIP cannot be restored if the memory exception comes from an helper */ |
2223 | gen_update_nip(ctx, ctx->nip - 4); |
2224 | gen_reset_fpstatus(); |
2225 | crf = tcg_const_i32(crfD(ctx->opcode)); |
2226 | gen_helper_fcmpu(cpu_env, cpu_fpr[rA(ctx->opcode)], |
2227 | cpu_fpr[rB(ctx->opcode)], crf); |
2228 | tcg_temp_free_i32(crf); |
2229 | gen_helper_float_check_status(cpu_env); |
2230 | } |
2231 | |
2232 | /*** Floating-point move ***/ |
2233 | /* fabs */ |
2234 | /* XXX: beware that fabs never checks for NaNs nor update FPSCR */ |
2235 | static void gen_fabs(DisasContext *ctx) |
2236 | { |
2237 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
2238 | gen_exception(ctx, POWERPC_EXCP_FPU); |
2239 | return; |
2240 | } |
2241 | tcg_gen_andi_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)], |
2242 | ~(1ULL << 63)); |
2243 | gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0); |
2244 | } |
2245 | |
2246 | /* fmr - fmr. */ |
2247 | /* XXX: beware that fmr never checks for NaNs nor update FPSCR */ |
2248 | static void gen_fmr(DisasContext *ctx) |
2249 | { |
2250 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
2251 | gen_exception(ctx, POWERPC_EXCP_FPU); |
2252 | return; |
2253 | } |
2254 | tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); |
2255 | gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0); |
2256 | } |
2257 | |
2258 | /* fnabs */ |
2259 | /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */ |
2260 | static void gen_fnabs(DisasContext *ctx) |
2261 | { |
2262 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
2263 | gen_exception(ctx, POWERPC_EXCP_FPU); |
2264 | return; |
2265 | } |
2266 | tcg_gen_ori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)], |
2267 | 1ULL << 63); |
2268 | gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0); |
2269 | } |
2270 | |
2271 | /* fneg */ |
2272 | /* XXX: beware that fneg never checks for NaNs nor update FPSCR */ |
2273 | static void gen_fneg(DisasContext *ctx) |
2274 | { |
2275 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
2276 | gen_exception(ctx, POWERPC_EXCP_FPU); |
2277 | return; |
2278 | } |
2279 | tcg_gen_xori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)], |
2280 | 1ULL << 63); |
2281 | gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0); |
2282 | } |
2283 | |
2284 | /* fcpsgn: PowerPC 2.05 specification */ |
2285 | /* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */ |
2286 | static void gen_fcpsgn(DisasContext *ctx) |
2287 | { |
2288 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
2289 | gen_exception(ctx, POWERPC_EXCP_FPU); |
2290 | return; |
2291 | } |
2292 | tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], |
2293 | cpu_fpr[rB(ctx->opcode)], 0, 63); |
2294 | gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0); |
2295 | } |
2296 | |
2297 | /*** Floating-Point status & ctrl register ***/ |
2298 | |
2299 | /* mcrfs */ |
2300 | static void gen_mcrfs(DisasContext *ctx) |
2301 | { |
2302 | TCGvTCGv_i32 tmp = tcg_temp_new()tcg_temp_new_i32(); |
2303 | int bfa; |
2304 | |
2305 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
2306 | gen_exception(ctx, POWERPC_EXCP_FPU); |
2307 | return; |
2308 | } |
2309 | bfa = 4 * (7 - crfS(ctx->opcode)); |
2310 | tcg_gen_shri_tltcg_gen_shri_i32(tmp, cpu_fpscr, bfa); |
2311 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_crf[crfD(ctx->opcode)], tmp); |
2312 | tcg_temp_freetcg_temp_free_i32(tmp); |
2313 | tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf); |
2314 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_fpscr, cpu_fpscr, ~(0xF << bfa)); |
2315 | } |
2316 | |
2317 | /* mffs */ |
2318 | static void gen_mffs(DisasContext *ctx) |
2319 | { |
2320 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
2321 | gen_exception(ctx, POWERPC_EXCP_FPU); |
2322 | return; |
2323 | } |
2324 | gen_reset_fpstatus(); |
2325 | tcg_gen_extu_tl_i64tcg_gen_extu_i32_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpscr); |
2326 | gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 0, Rc(ctx->opcode) != 0); |
2327 | } |
2328 | |
2329 | /* mtfsb0 */ |
2330 | static void gen_mtfsb0(DisasContext *ctx) |
2331 | { |
2332 | uint8_t crb; |
2333 | |
2334 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
2335 | gen_exception(ctx, POWERPC_EXCP_FPU); |
2336 | return; |
2337 | } |
2338 | crb = 31 - crbD(ctx->opcode); |
2339 | gen_reset_fpstatus(); |
2340 | if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)__builtin_expect(!!(crb != 30 && crb != 29), 1)) { |
2341 | TCGv_i32 t0; |
2342 | /* NIP cannot be restored if the memory exception comes from an helper */ |
2343 | gen_update_nip(ctx, ctx->nip - 4); |
2344 | t0 = tcg_const_i32(crb); |
2345 | gen_helper_fpscr_clrbit(cpu_env, t0); |
2346 | tcg_temp_free_i32(t0); |
2347 | } |
2348 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) { |
2349 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_crf[1], cpu_fpscr); |
2350 | tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX28); |
2351 | } |
2352 | } |
2353 | |
2354 | /* mtfsb1 */ |
2355 | static void gen_mtfsb1(DisasContext *ctx) |
2356 | { |
2357 | uint8_t crb; |
2358 | |
2359 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
2360 | gen_exception(ctx, POWERPC_EXCP_FPU); |
2361 | return; |
2362 | } |
2363 | crb = 31 - crbD(ctx->opcode); |
2364 | gen_reset_fpstatus(); |
2365 | /* XXX: we pretend we can only do IEEE floating-point computations */ |
2366 | if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)__builtin_expect(!!(crb != 30 && crb != 29 && crb != 2), 1)) { |
2367 | TCGv_i32 t0; |
2368 | /* NIP cannot be restored if the memory exception comes from an helper */ |
2369 | gen_update_nip(ctx, ctx->nip - 4); |
2370 | t0 = tcg_const_i32(crb); |
2371 | gen_helper_fpscr_setbit(cpu_env, t0); |
2372 | tcg_temp_free_i32(t0); |
2373 | } |
2374 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) { |
2375 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_crf[1], cpu_fpscr); |
2376 | tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX28); |
2377 | } |
2378 | /* We can raise a differed exception */ |
2379 | gen_helper_float_check_status(cpu_env); |
2380 | } |
2381 | |
2382 | /* mtfsf */ |
2383 | static void gen_mtfsf(DisasContext *ctx) |
2384 | { |
2385 | TCGv_i32 t0; |
2386 | int flm, l, w; |
2387 | |
2388 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
2389 | gen_exception(ctx, POWERPC_EXCP_FPU); |
2390 | return; |
2391 | } |
2392 | flm = FPFLM(ctx->opcode); |
2393 | l = FPL(ctx->opcode); |
2394 | w = FPW(ctx->opcode); |
2395 | if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))__builtin_expect(!!(w & !(ctx->insns_flags2 & PPC2_ISA205 )), 0)) { |
2396 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
2397 | return; |
2398 | } |
2399 | /* NIP cannot be restored if the memory exception comes from an helper */ |
2400 | gen_update_nip(ctx, ctx->nip - 4); |
2401 | gen_reset_fpstatus(); |
2402 | if (l) { |
2403 | t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff); |
2404 | } else { |
2405 | t0 = tcg_const_i32(flm << (w * 8)); |
2406 | } |
2407 | gen_helper_store_fpscr(cpu_env, cpu_fpr[rB(ctx->opcode)], t0); |
2408 | tcg_temp_free_i32(t0); |
2409 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) { |
2410 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_crf[1], cpu_fpscr); |
2411 | tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX28); |
2412 | } |
2413 | /* We can raise a differed exception */ |
2414 | gen_helper_float_check_status(cpu_env); |
2415 | } |
2416 | |
2417 | /* mtfsfi */ |
2418 | static void gen_mtfsfi(DisasContext *ctx) |
2419 | { |
2420 | int bf, sh, w; |
2421 | TCGv_i64 t0; |
2422 | TCGv_i32 t1; |
2423 | |
2424 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
2425 | gen_exception(ctx, POWERPC_EXCP_FPU); |
2426 | return; |
2427 | } |
2428 | w = FPW(ctx->opcode); |
2429 | bf = FPBF(ctx->opcode); |
2430 | if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))__builtin_expect(!!(w & !(ctx->insns_flags2 & PPC2_ISA205 )), 0)) { |
2431 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
2432 | return; |
2433 | } |
2434 | sh = (8 * w) + 7 - bf; |
2435 | /* NIP cannot be restored if the memory exception comes from an helper */ |
2436 | gen_update_nip(ctx, ctx->nip - 4); |
2437 | gen_reset_fpstatus(); |
2438 | t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh)); |
2439 | t1 = tcg_const_i32(1 << sh); |
2440 | gen_helper_store_fpscr(cpu_env, t0, t1); |
2441 | tcg_temp_free_i64(t0); |
2442 | tcg_temp_free_i32(t1); |
2443 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) { |
2444 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_crf[1], cpu_fpscr); |
2445 | tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX28); |
2446 | } |
2447 | /* We can raise a differed exception */ |
2448 | gen_helper_float_check_status(cpu_env); |
2449 | } |
2450 | |
2451 | /*** Addressing modes ***/ |
2452 | /* Register indirect with immediate index : EA = (rA|0) + SIMM */ |
2453 | static inline void gen_addr_imm_index(DisasContext *ctx, TCGvTCGv_i32 EA, |
2454 | target_long maskl) |
2455 | { |
2456 | target_long simm = SIMM(ctx->opcode); |
2457 | |
2458 | simm &= ~maskl; |
2459 | if (rA(ctx->opcode) == 0) { |
2460 | if (NARROW_MODE(ctx)0) { |
2461 | simm = (uint32_t)simm; |
2462 | } |
2463 | tcg_gen_movi_tltcg_gen_movi_i32(EA, simm); |
2464 | } else if (likely(simm != 0)__builtin_expect(!!(simm != 0), 1)) { |
2465 | tcg_gen_addi_tltcg_gen_addi_i32(EA, cpu_gpr[rA(ctx->opcode)], simm); |
2466 | if (NARROW_MODE(ctx)0) { |
2467 | tcg_gen_ext32u_tltcg_gen_mov_i32(EA, EA); |
2468 | } |
2469 | } else { |
2470 | if (NARROW_MODE(ctx)0) { |
2471 | tcg_gen_ext32u_tltcg_gen_mov_i32(EA, cpu_gpr[rA(ctx->opcode)]); |
2472 | } else { |
2473 | tcg_gen_mov_tltcg_gen_mov_i32(EA, cpu_gpr[rA(ctx->opcode)]); |
2474 | } |
2475 | } |
2476 | } |
2477 | |
2478 | static inline void gen_addr_reg_index(DisasContext *ctx, TCGvTCGv_i32 EA) |
2479 | { |
2480 | if (rA(ctx->opcode) == 0) { |
2481 | if (NARROW_MODE(ctx)0) { |
2482 | tcg_gen_ext32u_tltcg_gen_mov_i32(EA, cpu_gpr[rB(ctx->opcode)]); |
2483 | } else { |
2484 | tcg_gen_mov_tltcg_gen_mov_i32(EA, cpu_gpr[rB(ctx->opcode)]); |
2485 | } |
2486 | } else { |
2487 | tcg_gen_add_tltcg_gen_add_i32(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
2488 | if (NARROW_MODE(ctx)0) { |
2489 | tcg_gen_ext32u_tltcg_gen_mov_i32(EA, EA); |
2490 | } |
2491 | } |
2492 | } |
2493 | |
2494 | static inline void gen_addr_register(DisasContext *ctx, TCGvTCGv_i32 EA) |
2495 | { |
2496 | if (rA(ctx->opcode) == 0) { |
2497 | tcg_gen_movi_tltcg_gen_movi_i32(EA, 0); |
2498 | } else if (NARROW_MODE(ctx)0) { |
2499 | tcg_gen_ext32u_tltcg_gen_mov_i32(EA, cpu_gpr[rA(ctx->opcode)]); |
2500 | } else { |
2501 | tcg_gen_mov_tltcg_gen_mov_i32(EA, cpu_gpr[rA(ctx->opcode)]); |
2502 | } |
2503 | } |
2504 | |
2505 | static inline void gen_addr_add(DisasContext *ctx, TCGvTCGv_i32 ret, TCGvTCGv_i32 arg1, |
2506 | target_long val) |
2507 | { |
2508 | tcg_gen_addi_tltcg_gen_addi_i32(ret, arg1, val); |
2509 | if (NARROW_MODE(ctx)0) { |
2510 | tcg_gen_ext32u_tltcg_gen_mov_i32(ret, ret); |
2511 | } |
2512 | } |
2513 | |
2514 | static inline void gen_check_align(DisasContext *ctx, TCGvTCGv_i32 EA, int mask) |
2515 | { |
2516 | int l1 = gen_new_label(); |
2517 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
2518 | TCGv_i32 t1, t2; |
2519 | /* NIP cannot be restored if the memory exception comes from an helper */ |
2520 | gen_update_nip(ctx, ctx->nip - 4); |
2521 | tcg_gen_andi_tltcg_gen_andi_i32(t0, EA, mask); |
2522 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1); |
2523 | t1 = tcg_const_i32(POWERPC_EXCP_ALIGN); |
2524 | t2 = tcg_const_i32(0); |
2525 | gen_helper_raise_exception_err(cpu_env, t1, t2); |
2526 | tcg_temp_free_i32(t1); |
2527 | tcg_temp_free_i32(t2); |
2528 | gen_set_label(l1); |
2529 | tcg_temp_freetcg_temp_free_i32(t0); |
2530 | } |
2531 | |
2532 | /*** Integer load ***/ |
2533 | static inline void gen_qemu_ld8u(DisasContext *ctx, TCGvTCGv_i32 arg1, TCGvTCGv_i32 arg2) |
2534 | { |
2535 | tcg_gen_qemu_ld8u(arg1, arg2, ctx->mem_idx); |
2536 | } |
2537 | |
2538 | static inline void gen_qemu_ld8s(DisasContext *ctx, TCGvTCGv_i32 arg1, TCGvTCGv_i32 arg2) |
2539 | { |
2540 | tcg_gen_qemu_ld8s(arg1, arg2, ctx->mem_idx); |
2541 | } |
2542 | |
2543 | static inline void gen_qemu_ld16u(DisasContext *ctx, TCGvTCGv_i32 arg1, TCGvTCGv_i32 arg2) |
2544 | { |
2545 | tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx); |
2546 | if (unlikely(ctx->le_mode)__builtin_expect(!!(ctx->le_mode), 0)) { |
2547 | tcg_gen_bswap16_tltcg_gen_bswap16_i32(arg1, arg1); |
2548 | } |
2549 | } |
2550 | |
2551 | static inline void gen_qemu_ld16s(DisasContext *ctx, TCGvTCGv_i32 arg1, TCGvTCGv_i32 arg2) |
2552 | { |
2553 | if (unlikely(ctx->le_mode)__builtin_expect(!!(ctx->le_mode), 0)) { |
2554 | tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx); |
2555 | tcg_gen_bswap16_tltcg_gen_bswap16_i32(arg1, arg1); |
2556 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(arg1, arg1); |
2557 | } else { |
2558 | tcg_gen_qemu_ld16s(arg1, arg2, ctx->mem_idx); |
2559 | } |
2560 | } |
2561 | |
2562 | static inline void gen_qemu_ld32u(DisasContext *ctx, TCGvTCGv_i32 arg1, TCGvTCGv_i32 arg2) |
2563 | { |
2564 | tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx); |
2565 | if (unlikely(ctx->le_mode)__builtin_expect(!!(ctx->le_mode), 0)) { |
2566 | tcg_gen_bswap32_tltcg_gen_bswap32_i32(arg1, arg1); |
2567 | } |
2568 | } |
2569 | |
2570 | static void gen_qemu_ld32u_i64(DisasContext *ctx, TCGv_i64 val, TCGvTCGv_i32 addr) |
2571 | { |
2572 | TCGvTCGv_i32 tmp = tcg_temp_new()tcg_temp_new_i32(); |
2573 | gen_qemu_ld32u(ctx, tmp, addr); |
2574 | tcg_gen_extu_tl_i64tcg_gen_extu_i32_i64(val, tmp); |
2575 | tcg_temp_freetcg_temp_free_i32(tmp); |
2576 | } |
2577 | |
2578 | static inline void gen_qemu_ld32s(DisasContext *ctx, TCGvTCGv_i32 arg1, TCGvTCGv_i32 arg2) |
2579 | { |
2580 | if (unlikely(ctx->le_mode)__builtin_expect(!!(ctx->le_mode), 0)) { |
2581 | tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx); |
2582 | tcg_gen_bswap32_tltcg_gen_bswap32_i32(arg1, arg1); |
2583 | tcg_gen_ext32s_tltcg_gen_mov_i32(arg1, arg1); |
2584 | } else |
2585 | tcg_gen_qemu_ld32s(arg1, arg2, ctx->mem_idx); |
2586 | } |
2587 | |
2588 | static inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGvTCGv_i32 arg2) |
2589 | { |
2590 | tcg_gen_qemu_ld64(arg1, arg2, ctx->mem_idx); |
2591 | if (unlikely(ctx->le_mode)__builtin_expect(!!(ctx->le_mode), 0)) { |
2592 | tcg_gen_bswap64_i64(arg1, arg1); |
2593 | } |
2594 | } |
2595 | |
2596 | static inline void gen_qemu_st8(DisasContext *ctx, TCGvTCGv_i32 arg1, TCGvTCGv_i32 arg2) |
2597 | { |
2598 | tcg_gen_qemu_st8(arg1, arg2, ctx->mem_idx); |
2599 | } |
2600 | |
2601 | static inline void gen_qemu_st16(DisasContext *ctx, TCGvTCGv_i32 arg1, TCGvTCGv_i32 arg2) |
2602 | { |
2603 | if (unlikely(ctx->le_mode)__builtin_expect(!!(ctx->le_mode), 0)) { |
2604 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
2605 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(t0, arg1); |
2606 | tcg_gen_bswap16_tltcg_gen_bswap16_i32(t0, t0); |
2607 | tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx); |
2608 | tcg_temp_freetcg_temp_free_i32(t0); |
2609 | } else { |
2610 | tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx); |
2611 | } |
2612 | } |
2613 | |
2614 | static inline void gen_qemu_st32(DisasContext *ctx, TCGvTCGv_i32 arg1, TCGvTCGv_i32 arg2) |
2615 | { |
2616 | if (unlikely(ctx->le_mode)__builtin_expect(!!(ctx->le_mode), 0)) { |
2617 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
2618 | tcg_gen_ext32u_tltcg_gen_mov_i32(t0, arg1); |
2619 | tcg_gen_bswap32_tltcg_gen_bswap32_i32(t0, t0); |
2620 | tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx); |
2621 | tcg_temp_freetcg_temp_free_i32(t0); |
2622 | } else { |
2623 | tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx); |
2624 | } |
2625 | } |
2626 | |
2627 | static void gen_qemu_st32_i64(DisasContext *ctx, TCGv_i64 val, TCGvTCGv_i32 addr) |
2628 | { |
2629 | TCGvTCGv_i32 tmp = tcg_temp_new()tcg_temp_new_i32(); |
2630 | tcg_gen_trunc_i64_tltcg_gen_trunc_i64_i32(tmp, val); |
2631 | gen_qemu_st32(ctx, tmp, addr); |
2632 | tcg_temp_freetcg_temp_free_i32(tmp); |
2633 | } |
2634 | |
2635 | static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGvTCGv_i32 arg2) |
2636 | { |
2637 | if (unlikely(ctx->le_mode)__builtin_expect(!!(ctx->le_mode), 0)) { |
2638 | TCGv_i64 t0 = tcg_temp_new_i64(); |
2639 | tcg_gen_bswap64_i64(t0, arg1); |
2640 | tcg_gen_qemu_st64(t0, arg2, ctx->mem_idx); |
2641 | tcg_temp_free_i64(t0); |
2642 | } else |
2643 | tcg_gen_qemu_st64(arg1, arg2, ctx->mem_idx); |
2644 | } |
2645 | |
2646 | #define GEN_LD(name, ldop, opc, type){ .opc1 = opc, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, . handler = &gen_name, }, .oname = "name", }, \ |
2647 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
2648 | { \ |
2649 | TCGvTCGv_i32 EA; \ |
2650 | gen_set_access_type(ctx, ACCESS_INT); \ |
2651 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
2652 | gen_addr_imm_index(ctx, EA, 0); \ |
2653 | gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \ |
2654 | tcg_temp_freetcg_temp_free_i32(EA); \ |
2655 | } |
2656 | |
2657 | #define GEN_LDU(name, ldop, opc, type){ .opc1 = opc, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, . handler = &gen_nameu, }, .oname = "nameu", }, \ |
2658 | static void glue(gen_, name##u)gen_name##u(DisasContext *ctx) \ |
2659 | { \ |
2660 | TCGvTCGv_i32 EA; \ |
2661 | if (unlikely(rA(ctx->opcode) == 0 || \__builtin_expect(!!(rA(ctx->opcode) == 0 || rA(ctx->opcode ) == rD(ctx->opcode)), 0) |
2662 | rA(ctx->opcode) == rD(ctx->opcode))__builtin_expect(!!(rA(ctx->opcode) == 0 || rA(ctx->opcode ) == rD(ctx->opcode)), 0)) { \ |
2663 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ |
2664 | return; \ |
2665 | } \ |
2666 | gen_set_access_type(ctx, ACCESS_INT); \ |
2667 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
2668 | if (type == PPC_64B) \ |
2669 | gen_addr_imm_index(ctx, EA, 0x03); \ |
2670 | else \ |
2671 | gen_addr_imm_index(ctx, EA, 0); \ |
2672 | gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \ |
2673 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], EA); \ |
2674 | tcg_temp_freetcg_temp_free_i32(EA); \ |
2675 | } |
2676 | |
2677 | #define GEN_LDUX(name, ldop, opc2, opc3, type){ .opc1 = 0x1F, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE, . handler = &gen_nameux, }, .oname = "nameux", }, \ |
2678 | static void glue(gen_, name##ux)gen_name##ux(DisasContext *ctx) \ |
2679 | { \ |
2680 | TCGvTCGv_i32 EA; \ |
2681 | if (unlikely(rA(ctx->opcode) == 0 || \__builtin_expect(!!(rA(ctx->opcode) == 0 || rA(ctx->opcode ) == rD(ctx->opcode)), 0) |
2682 | rA(ctx->opcode) == rD(ctx->opcode))__builtin_expect(!!(rA(ctx->opcode) == 0 || rA(ctx->opcode ) == rD(ctx->opcode)), 0)) { \ |
2683 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ |
2684 | return; \ |
2685 | } \ |
2686 | gen_set_access_type(ctx, ACCESS_INT); \ |
2687 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
2688 | gen_addr_reg_index(ctx, EA); \ |
2689 | gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \ |
2690 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], EA); \ |
2691 | tcg_temp_freetcg_temp_free_i32(EA); \ |
2692 | } |
2693 | |
2694 | #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2){ .opc1 = 0x1F, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = type, .type2 = type2, .handler = &gen_namex, }, .oname = "namex", }, \ |
2695 | static void glue(gen_, name##x)gen_name##x(DisasContext *ctx) \ |
2696 | { \ |
2697 | TCGvTCGv_i32 EA; \ |
2698 | gen_set_access_type(ctx, ACCESS_INT); \ |
2699 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
2700 | gen_addr_reg_index(ctx, EA); \ |
2701 | gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \ |
2702 | tcg_temp_freetcg_temp_free_i32(EA); \ |
2703 | } |
2704 | #define GEN_LDX(name, ldop, opc2, opc3, type){ .opc1 = 0x1F, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE, . handler = &gen_namex, }, .oname = "namex", }, \ |
2705 | GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE){ .opc1 = 0x1F, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE, . handler = &gen_namex, }, .oname = "namex", }, |
2706 | |
2707 | #define GEN_LDS(name, ldop, op, type){ .opc1 = op | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", }, { .opc1 = op | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, .handler = &gen_nameu, }, .oname = "nameu", }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE, .handler = & gen_nameux, }, .oname = "nameux", }, { .opc1 = 0x1F, .opc2 = 0x17 , .opc3 = op | 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001 , .type = type, .type2 = PPC_NONE, .handler = &gen_namex, }, .oname = "namex", }, \{ .opc1 = op | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", }, |
2708 | GEN_LD(name, ldop, op | 0x20, type){ .opc1 = op | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", },; \{ .opc1 = op | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_nameu, }, .oname = "nameu", }, |
2709 | GEN_LDU(name, ldop, op | 0x21, type){ .opc1 = op | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_nameu, }, .oname = "nameu", },; \{ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x01, .pad = { 0, } , .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE , .handler = &gen_nameux, }, .oname = "nameux", }, |
2710 | GEN_LDUX(name, ldop, 0x17, op | 0x01, type){ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x01, .pad = { 0, } , .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE , .handler = &gen_nameux, }, .oname = "nameux", },; \{ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x00, .pad = { 0, } , .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE , .handler = &gen_namex, }, .oname = "namex", }, |
2711 | GEN_LDX(name, ldop, 0x17, op | 0x00, type){ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x00, .pad = { 0, } , .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE , .handler = &gen_namex, }, .oname = "namex", }, |
2712 | |
2713 | /* lbz lbzu lbzux lbzx */ |
2714 | GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER){ .opc1 = 0x02 | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0 , }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_lbz, }, .oname = "lbz" , }, { .opc1 = 0x02 | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_lbzu, }, .oname = "lbzu" , }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x02 | 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_lbzux, }, .oname = "lbzux" , }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x02 | 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_lbzx, }, .oname = "lbzx" , },; |
2715 | /* lha lhau lhaux lhax */ |
2716 | GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER){ .opc1 = 0x0A | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0 , }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_lha, }, .oname = "lha" , }, { .opc1 = 0x0A | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_lhau, }, .oname = "lhau" , }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x0A | 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_lhaux, }, .oname = "lhaux" , }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x0A | 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_lhax, }, .oname = "lhax" , },; |
2717 | /* lhz lhzu lhzux lhzx */ |
2718 | GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER){ .opc1 = 0x08 | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0 , }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_lhz, }, .oname = "lhz" , }, { .opc1 = 0x08 | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_lhzu, }, .oname = "lhzu" , }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x08 | 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_lhzux, }, .oname = "lhzux" , }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x08 | 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_lhzx, }, .oname = "lhzx" , },; |
2719 | /* lwz lwzu lwzux lwzx */ |
2720 | GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER){ .opc1 = 0x00 | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0 , }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_lwz, }, .oname = "lwz" , }, { .opc1 = 0x00 | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_lwzu, }, .oname = "lwzu" , }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x00 | 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_lwzux, }, .oname = "lwzux" , }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x00 | 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_lwzx, }, .oname = "lwzx" , },; |
2721 | #if defined(TARGET_PPC64) |
2722 | /* lwaux */ |
2723 | GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B){ .opc1 = 0x1F, .opc2 = 0x15, .opc3 = 0x0B, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_64B, .type2 = PPC_NONE , .handler = &gen_lwaux, }, .oname = "lwaux", },; |
2724 | /* lwax */ |
2725 | GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B){ .opc1 = 0x1F, .opc2 = 0x15, .opc3 = 0x0A, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_64B, .type2 = PPC_NONE , .handler = &gen_lwax, }, .oname = "lwax", },; |
2726 | /* ldux */ |
2727 | GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B){ .opc1 = 0x1F, .opc2 = 0x15, .opc3 = 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_64B, .type2 = PPC_NONE , .handler = &gen_ldux, }, .oname = "ldux", },; |
2728 | /* ldx */ |
2729 | GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B){ .opc1 = 0x1F, .opc2 = 0x15, .opc3 = 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_64B, .type2 = PPC_NONE , .handler = &gen_ldx, }, .oname = "ldx", },; |
2730 | |
2731 | static void gen_ld(DisasContext *ctx) |
2732 | { |
2733 | TCGvTCGv_i32 EA; |
2734 | if (Rc(ctx->opcode)) { |
2735 | if (unlikely(rA(ctx->opcode) == 0 ||__builtin_expect(!!(rA(ctx->opcode) == 0 || rA(ctx->opcode ) == rD(ctx->opcode)), 0) |
2736 | rA(ctx->opcode) == rD(ctx->opcode))__builtin_expect(!!(rA(ctx->opcode) == 0 || rA(ctx->opcode ) == rD(ctx->opcode)), 0)) { |
2737 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
2738 | return; |
2739 | } |
2740 | } |
2741 | gen_set_access_type(ctx, ACCESS_INT); |
2742 | EA = tcg_temp_new()tcg_temp_new_i32(); |
2743 | gen_addr_imm_index(ctx, EA, 0x03); |
2744 | if (ctx->opcode & 0x02) { |
2745 | /* lwa (lwau is undefined) */ |
2746 | gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA); |
2747 | } else { |
2748 | /* ld - ldu */ |
2749 | gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], EA); |
2750 | } |
2751 | if (Rc(ctx->opcode)) |
2752 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], EA); |
2753 | tcg_temp_freetcg_temp_free_i32(EA); |
2754 | } |
2755 | |
2756 | /* lq */ |
2757 | static void gen_lq(DisasContext *ctx) |
2758 | { |
2759 | #if defined(CONFIG_USER_ONLY) |
2760 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
2761 | #else |
2762 | int ra, rd; |
2763 | TCGvTCGv_i32 EA; |
2764 | |
2765 | /* Restore CPU state */ |
2766 | if (unlikely(ctx->mem_idx == 0)__builtin_expect(!!(ctx->mem_idx == 0), 0)) { |
2767 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
2768 | return; |
2769 | } |
2770 | ra = rA(ctx->opcode); |
2771 | rd = rD(ctx->opcode); |
2772 | if (unlikely((rd & 1) || rd == ra)__builtin_expect(!!((rd & 1) || rd == ra), 0)) { |
2773 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
2774 | return; |
2775 | } |
2776 | if (unlikely(ctx->le_mode)__builtin_expect(!!(ctx->le_mode), 0)) { |
2777 | /* Little-endian mode is not handled */ |
2778 | gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE); |
2779 | return; |
2780 | } |
2781 | gen_set_access_type(ctx, ACCESS_INT); |
2782 | EA = tcg_temp_new()tcg_temp_new_i32(); |
2783 | gen_addr_imm_index(ctx, EA, 0x0F); |
2784 | gen_qemu_ld64(ctx, cpu_gpr[rd], EA); |
2785 | gen_addr_add(ctx, EA, EA, 8); |
2786 | gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA); |
2787 | tcg_temp_freetcg_temp_free_i32(EA); |
2788 | #endif |
2789 | } |
2790 | #endif |
2791 | |
2792 | /*** Integer store ***/ |
2793 | #define GEN_ST(name, stop, opc, type){ .opc1 = opc, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, . handler = &gen_name, }, .oname = "name", }, \ |
2794 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
2795 | { \ |
2796 | TCGvTCGv_i32 EA; \ |
2797 | gen_set_access_type(ctx, ACCESS_INT); \ |
2798 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
2799 | gen_addr_imm_index(ctx, EA, 0); \ |
2800 | gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \ |
2801 | tcg_temp_freetcg_temp_free_i32(EA); \ |
2802 | } |
2803 | |
2804 | #define GEN_STU(name, stop, opc, type){ .opc1 = opc, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, . handler = &gen_stopu, }, .oname = "stopu", }, \ |
2805 | static void glue(gen_, stop##u)gen_stop##u(DisasContext *ctx) \ |
2806 | { \ |
2807 | TCGvTCGv_i32 EA; \ |
2808 | if (unlikely(rA(ctx->opcode) == 0)__builtin_expect(!!(rA(ctx->opcode) == 0), 0)) { \ |
2809 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ |
2810 | return; \ |
2811 | } \ |
2812 | gen_set_access_type(ctx, ACCESS_INT); \ |
2813 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
2814 | if (type == PPC_64B) \ |
2815 | gen_addr_imm_index(ctx, EA, 0x03); \ |
2816 | else \ |
2817 | gen_addr_imm_index(ctx, EA, 0); \ |
2818 | gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \ |
2819 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], EA); \ |
2820 | tcg_temp_freetcg_temp_free_i32(EA); \ |
2821 | } |
2822 | |
2823 | #define GEN_STUX(name, stop, opc2, opc3, type){ .opc1 = 0x1F, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE, . handler = &gen_nameux, }, .oname = "nameux", }, \ |
2824 | static void glue(gen_, name##ux)gen_name##ux(DisasContext *ctx) \ |
2825 | { \ |
2826 | TCGvTCGv_i32 EA; \ |
2827 | if (unlikely(rA(ctx->opcode) == 0)__builtin_expect(!!(rA(ctx->opcode) == 0), 0)) { \ |
2828 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ |
2829 | return; \ |
2830 | } \ |
2831 | gen_set_access_type(ctx, ACCESS_INT); \ |
2832 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
2833 | gen_addr_reg_index(ctx, EA); \ |
2834 | gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \ |
2835 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], EA); \ |
2836 | tcg_temp_freetcg_temp_free_i32(EA); \ |
2837 | } |
2838 | |
2839 | #define GEN_STX_E(name, stop, opc2, opc3, type, type2){ .opc1 = 0x1F, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = type, .type2 = type2, .handler = &gen_namex, }, .oname = "namex", }, \ |
2840 | static void glue(gen_, name##x)gen_name##x(DisasContext *ctx) \ |
2841 | { \ |
2842 | TCGvTCGv_i32 EA; \ |
2843 | gen_set_access_type(ctx, ACCESS_INT); \ |
2844 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
2845 | gen_addr_reg_index(ctx, EA); \ |
2846 | gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \ |
2847 | tcg_temp_freetcg_temp_free_i32(EA); \ |
2848 | } |
2849 | #define GEN_STX(name, stop, opc2, opc3, type){ .opc1 = 0x1F, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE, . handler = &gen_namex, }, .oname = "namex", }, \ |
2850 | GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE){ .opc1 = 0x1F, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE, . handler = &gen_namex, }, .oname = "namex", }, |
2851 | |
2852 | #define GEN_STS(name, stop, op, type){ .opc1 = op | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", }, { .opc1 = op | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, .handler = &gen_stopu, }, .oname = "stopu", }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE, .handler = & gen_nameux, }, .oname = "nameux", }, { .opc1 = 0x1F, .opc2 = 0x17 , .opc3 = op | 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001 , .type = type, .type2 = PPC_NONE, .handler = &gen_namex, }, .oname = "namex", }, \{ .opc1 = op | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", }, |
2853 | GEN_ST(name, stop, op | 0x20, type){ .opc1 = op | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", },; \{ .opc1 = op | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_stopu, }, .oname = "stopu", }, |
2854 | GEN_STU(name, stop, op | 0x21, type){ .opc1 = op | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_stopu, }, .oname = "stopu", },; \{ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x01, .pad = { 0, } , .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE , .handler = &gen_nameux, }, .oname = "nameux", }, |
2855 | GEN_STUX(name, stop, 0x17, op | 0x01, type){ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x01, .pad = { 0, } , .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE , .handler = &gen_nameux, }, .oname = "nameux", },; \{ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x00, .pad = { 0, } , .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE , .handler = &gen_namex, }, .oname = "namex", }, |
2856 | GEN_STX(name, stop, 0x17, op | 0x00, type){ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x00, .pad = { 0, } , .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE , .handler = &gen_namex, }, .oname = "namex", }, |
2857 | |
2858 | /* stb stbu stbux stbx */ |
2859 | GEN_STS(stb, st8, 0x06, PPC_INTEGER){ .opc1 = 0x06 | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0 , }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_stb, }, .oname = "stb" , }, { .opc1 = 0x06 | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_st8u, }, .oname = "st8u" , }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x06 | 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_stbux, }, .oname = "stbux" , }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x06 | 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_stbx, }, .oname = "stbx" , },; |
2860 | /* sth sthu sthux sthx */ |
2861 | GEN_STS(sth, st16, 0x0C, PPC_INTEGER){ .opc1 = 0x0C | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0 , }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_sth, }, .oname = "sth" , }, { .opc1 = 0x0C | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_st16u, }, .oname = "st16u" , }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x0C | 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_sthux, }, .oname = "sthux" , }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x0C | 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_sthx, }, .oname = "sthx" , },; |
2862 | /* stw stwu stwux stwx */ |
2863 | GEN_STS(stw, st32, 0x04, PPC_INTEGER){ .opc1 = 0x04 | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0 , }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_stw, }, .oname = "stw" , }, { .opc1 = 0x04 | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_st32u, }, .oname = "st32u" , }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x04 | 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_stwux, }, .oname = "stwux" , }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x04 | 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE , .type2 = PPC_NONE, .handler = &gen_stwx, }, .oname = "stwx" , },; |
2864 | #if defined(TARGET_PPC64) |
2865 | GEN_STUX(std, st64, 0x15, 0x05, PPC_64B){ .opc1 = 0x1F, .opc2 = 0x15, .opc3 = 0x05, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_64B, .type2 = PPC_NONE , .handler = &gen_stdux, }, .oname = "stdux", },; |
2866 | GEN_STX(std, st64, 0x15, 0x04, PPC_64B){ .opc1 = 0x1F, .opc2 = 0x15, .opc3 = 0x04, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_64B, .type2 = PPC_NONE , .handler = &gen_stdx, }, .oname = "stdx", },; |
2867 | |
2868 | static void gen_std(DisasContext *ctx) |
2869 | { |
2870 | int rs; |
2871 | TCGvTCGv_i32 EA; |
2872 | |
2873 | rs = rS(ctx->opcode); |
2874 | if ((ctx->opcode & 0x3) == 0x2) { |
2875 | #if defined(CONFIG_USER_ONLY) |
2876 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
2877 | #else |
2878 | /* stq */ |
2879 | if (unlikely(ctx->mem_idx == 0)__builtin_expect(!!(ctx->mem_idx == 0), 0)) { |
2880 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
2881 | return; |
2882 | } |
2883 | if (unlikely(rs & 1)__builtin_expect(!!(rs & 1), 0)) { |
2884 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
2885 | return; |
2886 | } |
2887 | if (unlikely(ctx->le_mode)__builtin_expect(!!(ctx->le_mode), 0)) { |
2888 | /* Little-endian mode is not handled */ |
2889 | gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE); |
2890 | return; |
2891 | } |
2892 | gen_set_access_type(ctx, ACCESS_INT); |
2893 | EA = tcg_temp_new()tcg_temp_new_i32(); |
2894 | gen_addr_imm_index(ctx, EA, 0x03); |
2895 | gen_qemu_st64(ctx, cpu_gpr[rs], EA); |
2896 | gen_addr_add(ctx, EA, EA, 8); |
2897 | gen_qemu_st64(ctx, cpu_gpr[rs+1], EA); |
2898 | tcg_temp_freetcg_temp_free_i32(EA); |
2899 | #endif |
2900 | } else { |
2901 | /* std / stdu */ |
2902 | if (Rc(ctx->opcode)) { |
2903 | if (unlikely(rA(ctx->opcode) == 0)__builtin_expect(!!(rA(ctx->opcode) == 0), 0)) { |
2904 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
2905 | return; |
2906 | } |
2907 | } |
2908 | gen_set_access_type(ctx, ACCESS_INT); |
2909 | EA = tcg_temp_new()tcg_temp_new_i32(); |
2910 | gen_addr_imm_index(ctx, EA, 0x03); |
2911 | gen_qemu_st64(ctx, cpu_gpr[rs], EA); |
2912 | if (Rc(ctx->opcode)) |
2913 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], EA); |
2914 | tcg_temp_freetcg_temp_free_i32(EA); |
2915 | } |
2916 | } |
2917 | #endif |
2918 | /*** Integer load and store with byte reverse ***/ |
2919 | /* lhbrx */ |
2920 | static inline void gen_qemu_ld16ur(DisasContext *ctx, TCGvTCGv_i32 arg1, TCGvTCGv_i32 arg2) |
2921 | { |
2922 | tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx); |
2923 | if (likely(!ctx->le_mode)__builtin_expect(!!(!ctx->le_mode), 1)) { |
2924 | tcg_gen_bswap16_tltcg_gen_bswap16_i32(arg1, arg1); |
2925 | } |
2926 | } |
2927 | GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER){ .opc1 = 0x1F, .opc2 = 0x16, .opc3 = 0x18, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_lhbrx, }, .oname = "lhbrx", },; |
2928 | |
2929 | /* lwbrx */ |
2930 | static inline void gen_qemu_ld32ur(DisasContext *ctx, TCGvTCGv_i32 arg1, TCGvTCGv_i32 arg2) |
2931 | { |
2932 | tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx); |
2933 | if (likely(!ctx->le_mode)__builtin_expect(!!(!ctx->le_mode), 1)) { |
2934 | tcg_gen_bswap32_tltcg_gen_bswap32_i32(arg1, arg1); |
2935 | } |
2936 | } |
2937 | GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER){ .opc1 = 0x1F, .opc2 = 0x16, .opc3 = 0x10, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_lwbrx, }, .oname = "lwbrx", },; |
2938 | |
2939 | #if defined(TARGET_PPC64) |
2940 | /* ldbrx */ |
2941 | static inline void gen_qemu_ld64ur(DisasContext *ctx, TCGvTCGv_i32 arg1, TCGvTCGv_i32 arg2) |
2942 | { |
2943 | tcg_gen_qemu_ld64(arg1, arg2, ctx->mem_idx); |
2944 | if (likely(!ctx->le_mode)__builtin_expect(!!(!ctx->le_mode), 1)) { |
2945 | tcg_gen_bswap64_tl(arg1, arg1); |
2946 | } |
2947 | } |
2948 | GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX){ .opc1 = 0x1F, .opc2 = 0x14, .opc3 = 0x10, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_NONE, .type2 = PPC2_DBRX , .handler = &gen_ldbrx, }, .oname = "ldbrx", },; |
2949 | #endif /* TARGET_PPC64 */ |
2950 | |
2951 | /* sthbrx */ |
2952 | static inline void gen_qemu_st16r(DisasContext *ctx, TCGvTCGv_i32 arg1, TCGvTCGv_i32 arg2) |
2953 | { |
2954 | if (likely(!ctx->le_mode)__builtin_expect(!!(!ctx->le_mode), 1)) { |
2955 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
2956 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(t0, arg1); |
2957 | tcg_gen_bswap16_tltcg_gen_bswap16_i32(t0, t0); |
2958 | tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx); |
2959 | tcg_temp_freetcg_temp_free_i32(t0); |
2960 | } else { |
2961 | tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx); |
2962 | } |
2963 | } |
2964 | GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER){ .opc1 = 0x1F, .opc2 = 0x16, .opc3 = 0x1C, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_sthbrx, }, .oname = "sthbrx", },; |
2965 | |
2966 | /* stwbrx */ |
2967 | static inline void gen_qemu_st32r(DisasContext *ctx, TCGvTCGv_i32 arg1, TCGvTCGv_i32 arg2) |
2968 | { |
2969 | if (likely(!ctx->le_mode)__builtin_expect(!!(!ctx->le_mode), 1)) { |
2970 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
2971 | tcg_gen_ext32u_tltcg_gen_mov_i32(t0, arg1); |
2972 | tcg_gen_bswap32_tltcg_gen_bswap32_i32(t0, t0); |
2973 | tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx); |
2974 | tcg_temp_freetcg_temp_free_i32(t0); |
2975 | } else { |
2976 | tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx); |
2977 | } |
2978 | } |
2979 | GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER){ .opc1 = 0x1F, .opc2 = 0x16, .opc3 = 0x14, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_stwbrx, }, .oname = "stwbrx", },; |
2980 | |
2981 | #if defined(TARGET_PPC64) |
2982 | /* stdbrx */ |
2983 | static inline void gen_qemu_st64r(DisasContext *ctx, TCGvTCGv_i32 arg1, TCGvTCGv_i32 arg2) |
2984 | { |
2985 | if (likely(!ctx->le_mode)__builtin_expect(!!(!ctx->le_mode), 1)) { |
2986 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
2987 | tcg_gen_bswap64_tl(t0, arg1); |
2988 | tcg_gen_qemu_st64(t0, arg2, ctx->mem_idx); |
2989 | tcg_temp_freetcg_temp_free_i32(t0); |
2990 | } else { |
2991 | tcg_gen_qemu_st64(arg1, arg2, ctx->mem_idx); |
2992 | } |
2993 | } |
2994 | GEN_STX_E(stdbr, st64r, 0x14, 0x14, PPC_NONE, PPC2_DBRX){ .opc1 = 0x1F, .opc2 = 0x14, .opc3 = 0x14, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_NONE, .type2 = PPC2_DBRX , .handler = &gen_stdbrx, }, .oname = "stdbrx", },; |
2995 | #endif /* TARGET_PPC64 */ |
2996 | |
2997 | /*** Integer load and store multiple ***/ |
2998 | |
2999 | /* lmw */ |
3000 | static void gen_lmw(DisasContext *ctx) |
3001 | { |
3002 | TCGvTCGv_i32 t0; |
3003 | TCGv_i32 t1; |
3004 | gen_set_access_type(ctx, ACCESS_INT); |
3005 | /* NIP cannot be restored if the memory exception comes from an helper */ |
3006 | gen_update_nip(ctx, ctx->nip - 4); |
3007 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
3008 | t1 = tcg_const_i32(rD(ctx->opcode)); |
3009 | gen_addr_imm_index(ctx, t0, 0); |
3010 | gen_helper_lmw(cpu_env, t0, t1); |
3011 | tcg_temp_freetcg_temp_free_i32(t0); |
3012 | tcg_temp_free_i32(t1); |
3013 | } |
3014 | |
3015 | /* stmw */ |
3016 | static void gen_stmw(DisasContext *ctx) |
3017 | { |
3018 | TCGvTCGv_i32 t0; |
3019 | TCGv_i32 t1; |
3020 | gen_set_access_type(ctx, ACCESS_INT); |
3021 | /* NIP cannot be restored if the memory exception comes from an helper */ |
3022 | gen_update_nip(ctx, ctx->nip - 4); |
3023 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
3024 | t1 = tcg_const_i32(rS(ctx->opcode)); |
3025 | gen_addr_imm_index(ctx, t0, 0); |
3026 | gen_helper_stmw(cpu_env, t0, t1); |
3027 | tcg_temp_freetcg_temp_free_i32(t0); |
3028 | tcg_temp_free_i32(t1); |
3029 | } |
3030 | |
3031 | /*** Integer load and store strings ***/ |
3032 | |
3033 | /* lswi */ |
3034 | /* PowerPC32 specification says we must generate an exception if |
3035 | * rA is in the range of registers to be loaded. |
3036 | * In an other hand, IBM says this is valid, but rA won't be loaded. |
3037 | * For now, I'll follow the spec... |
3038 | */ |
3039 | static void gen_lswi(DisasContext *ctx) |
3040 | { |
3041 | TCGvTCGv_i32 t0; |
3042 | TCGv_i32 t1, t2; |
3043 | int nb = NB(ctx->opcode); |
3044 | int start = rD(ctx->opcode); |
3045 | int ra = rA(ctx->opcode); |
3046 | int nr; |
3047 | |
3048 | if (nb == 0) |
3049 | nb = 32; |
3050 | nr = nb / 4; |
3051 | if (unlikely(((start + nr) > 32 &&__builtin_expect(!!(((start + nr) > 32 && start <= ra && (start + nr - 32) > ra) || ((start + nr) <= 32 && start <= ra && (start + nr) > ra )), 0) |
3052 | start <= ra && (start + nr - 32) > ra) ||__builtin_expect(!!(((start + nr) > 32 && start <= ra && (start + nr - 32) > ra) || ((start + nr) <= 32 && start <= ra && (start + nr) > ra )), 0) |
3053 | ((start + nr) <= 32 && start <= ra && (start + nr) > ra))__builtin_expect(!!(((start + nr) > 32 && start <= ra && (start + nr - 32) > ra) || ((start + nr) <= 32 && start <= ra && (start + nr) > ra )), 0)) { |
3054 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX); |
3055 | return; |
3056 | } |
3057 | gen_set_access_type(ctx, ACCESS_INT); |
3058 | /* NIP cannot be restored if the memory exception comes from an helper */ |
3059 | gen_update_nip(ctx, ctx->nip - 4); |
3060 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
3061 | gen_addr_register(ctx, t0); |
3062 | t1 = tcg_const_i32(nb); |
3063 | t2 = tcg_const_i32(start); |
3064 | gen_helper_lsw(cpu_env, t0, t1, t2); |
3065 | tcg_temp_freetcg_temp_free_i32(t0); |
3066 | tcg_temp_free_i32(t1); |
3067 | tcg_temp_free_i32(t2); |
3068 | } |
3069 | |
3070 | /* lswx */ |
3071 | static void gen_lswx(DisasContext *ctx) |
3072 | { |
3073 | TCGvTCGv_i32 t0; |
3074 | TCGv_i32 t1, t2, t3; |
3075 | gen_set_access_type(ctx, ACCESS_INT); |
3076 | /* NIP cannot be restored if the memory exception comes from an helper */ |
3077 | gen_update_nip(ctx, ctx->nip - 4); |
3078 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
3079 | gen_addr_reg_index(ctx, t0); |
3080 | t1 = tcg_const_i32(rD(ctx->opcode)); |
3081 | t2 = tcg_const_i32(rA(ctx->opcode)); |
3082 | t3 = tcg_const_i32(rB(ctx->opcode)); |
3083 | gen_helper_lswx(cpu_env, t0, t1, t2, t3); |
3084 | tcg_temp_freetcg_temp_free_i32(t0); |
3085 | tcg_temp_free_i32(t1); |
3086 | tcg_temp_free_i32(t2); |
3087 | tcg_temp_free_i32(t3); |
3088 | } |
3089 | |
3090 | /* stswi */ |
3091 | static void gen_stswi(DisasContext *ctx) |
3092 | { |
3093 | TCGvTCGv_i32 t0; |
3094 | TCGv_i32 t1, t2; |
3095 | int nb = NB(ctx->opcode); |
3096 | gen_set_access_type(ctx, ACCESS_INT); |
3097 | /* NIP cannot be restored if the memory exception comes from an helper */ |
3098 | gen_update_nip(ctx, ctx->nip - 4); |
3099 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
3100 | gen_addr_register(ctx, t0); |
3101 | if (nb == 0) |
3102 | nb = 32; |
3103 | t1 = tcg_const_i32(nb); |
3104 | t2 = tcg_const_i32(rS(ctx->opcode)); |
3105 | gen_helper_stsw(cpu_env, t0, t1, t2); |
3106 | tcg_temp_freetcg_temp_free_i32(t0); |
3107 | tcg_temp_free_i32(t1); |
3108 | tcg_temp_free_i32(t2); |
3109 | } |
3110 | |
3111 | /* stswx */ |
3112 | static void gen_stswx(DisasContext *ctx) |
3113 | { |
3114 | TCGvTCGv_i32 t0; |
3115 | TCGv_i32 t1, t2; |
3116 | gen_set_access_type(ctx, ACCESS_INT); |
3117 | /* NIP cannot be restored if the memory exception comes from an helper */ |
3118 | gen_update_nip(ctx, ctx->nip - 4); |
3119 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
3120 | gen_addr_reg_index(ctx, t0); |
3121 | t1 = tcg_temp_new_i32(); |
3122 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(t1, cpu_xer); |
3123 | tcg_gen_andi_i32(t1, t1, 0x7F); |
3124 | t2 = tcg_const_i32(rS(ctx->opcode)); |
3125 | gen_helper_stsw(cpu_env, t0, t1, t2); |
3126 | tcg_temp_freetcg_temp_free_i32(t0); |
3127 | tcg_temp_free_i32(t1); |
3128 | tcg_temp_free_i32(t2); |
3129 | } |
3130 | |
3131 | /*** Memory synchronisation ***/ |
3132 | /* eieio */ |
3133 | static void gen_eieio(DisasContext *ctx) |
3134 | { |
3135 | } |
3136 | |
3137 | /* isync */ |
3138 | static void gen_isync(DisasContext *ctx) |
3139 | { |
3140 | gen_stop_exception(ctx); |
3141 | } |
3142 | |
3143 | /* lwarx */ |
3144 | static void gen_lwarx(DisasContext *ctx) |
3145 | { |
3146 | TCGvTCGv_i32 t0; |
3147 | TCGvTCGv_i32 gpr = cpu_gpr[rD(ctx->opcode)]; |
3148 | gen_set_access_type(ctx, ACCESS_RES); |
3149 | t0 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
3150 | gen_addr_reg_index(ctx, t0); |
3151 | gen_check_align(ctx, t0, 0x03); |
3152 | gen_qemu_ld32u(ctx, gpr, t0); |
3153 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_reserve, t0); |
3154 | tcg_gen_st_tltcg_gen_st_i32(gpr, cpu_env, offsetof(CPUPPCState, reserve_val)__builtin_offsetof(CPUPPCState, reserve_val)); |
3155 | tcg_temp_freetcg_temp_free_i32(t0); |
3156 | } |
3157 | |
3158 | #if defined(CONFIG_USER_ONLY) |
3159 | static void gen_conditional_store (DisasContext *ctx, TCGvTCGv_i32 EA, |
3160 | int reg, int size) |
3161 | { |
3162 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
3163 | uint32_t save_exception = ctx->exception; |
3164 | |
3165 | tcg_gen_st_tltcg_gen_st_i32(EA, cpu_env, offsetof(CPUPPCState, reserve_ea)__builtin_offsetof(CPUPPCState, reserve_ea)); |
3166 | tcg_gen_movi_tltcg_gen_movi_i32(t0, (size << 5) | reg); |
3167 | tcg_gen_st_tltcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, reserve_info)__builtin_offsetof(CPUPPCState, reserve_info)); |
3168 | tcg_temp_freetcg_temp_free_i32(t0); |
3169 | gen_update_nip(ctx, ctx->nip-4); |
3170 | ctx->exception = POWERPC_EXCP_BRANCH; |
3171 | gen_exception(ctx, POWERPC_EXCP_STCX); |
3172 | ctx->exception = save_exception; |
3173 | } |
3174 | #endif |
3175 | |
3176 | /* stwcx. */ |
3177 | static void gen_stwcx_(DisasContext *ctx) |
3178 | { |
3179 | TCGvTCGv_i32 t0; |
3180 | gen_set_access_type(ctx, ACCESS_RES); |
3181 | t0 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
3182 | gen_addr_reg_index(ctx, t0); |
3183 | gen_check_align(ctx, t0, 0x03); |
3184 | #if defined(CONFIG_USER_ONLY) |
3185 | gen_conditional_store(ctx, t0, rS(ctx->opcode), 4); |
3186 | #else |
3187 | { |
3188 | int l1; |
3189 | |
3190 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_crf[0], cpu_so); |
3191 | l1 = gen_new_label(); |
3192 | tcg_gen_brcond_tltcg_gen_brcond_i32(TCG_COND_NE, t0, cpu_reserve, l1); |
3193 | tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ1); |
3194 | gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], t0); |
3195 | gen_set_label(l1); |
3196 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_reserve, -1); |
3197 | } |
3198 | #endif |
3199 | tcg_temp_freetcg_temp_free_i32(t0); |
3200 | } |
3201 | |
3202 | #if defined(TARGET_PPC64) |
3203 | /* ldarx */ |
3204 | static void gen_ldarx(DisasContext *ctx) |
3205 | { |
3206 | TCGvTCGv_i32 t0; |
3207 | TCGvTCGv_i32 gpr = cpu_gpr[rD(ctx->opcode)]; |
3208 | gen_set_access_type(ctx, ACCESS_RES); |
3209 | t0 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
3210 | gen_addr_reg_index(ctx, t0); |
3211 | gen_check_align(ctx, t0, 0x07); |
3212 | gen_qemu_ld64(ctx, gpr, t0); |
3213 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_reserve, t0); |
3214 | tcg_gen_st_tltcg_gen_st_i32(gpr, cpu_env, offsetof(CPUPPCState, reserve_val)__builtin_offsetof(CPUPPCState, reserve_val)); |
3215 | tcg_temp_freetcg_temp_free_i32(t0); |
3216 | } |
3217 | |
3218 | /* stdcx. */ |
3219 | static void gen_stdcx_(DisasContext *ctx) |
3220 | { |
3221 | TCGvTCGv_i32 t0; |
3222 | gen_set_access_type(ctx, ACCESS_RES); |
3223 | t0 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
3224 | gen_addr_reg_index(ctx, t0); |
3225 | gen_check_align(ctx, t0, 0x07); |
3226 | #if defined(CONFIG_USER_ONLY) |
3227 | gen_conditional_store(ctx, t0, rS(ctx->opcode), 8); |
3228 | #else |
3229 | { |
3230 | int l1; |
3231 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_crf[0], cpu_so); |
3232 | l1 = gen_new_label(); |
3233 | tcg_gen_brcond_tltcg_gen_brcond_i32(TCG_COND_NE, t0, cpu_reserve, l1); |
3234 | tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ1); |
3235 | gen_qemu_st64(ctx, cpu_gpr[rS(ctx->opcode)], t0); |
3236 | gen_set_label(l1); |
3237 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_reserve, -1); |
3238 | } |
3239 | #endif |
3240 | tcg_temp_freetcg_temp_free_i32(t0); |
3241 | } |
3242 | #endif /* defined(TARGET_PPC64) */ |
3243 | |
3244 | /* sync */ |
3245 | static void gen_sync(DisasContext *ctx) |
3246 | { |
3247 | } |
3248 | |
3249 | /* wait */ |
3250 | static void gen_wait(DisasContext *ctx) |
3251 | { |
3252 | TCGv_i32 t0 = tcg_temp_new_i32(); |
3253 | tcg_gen_st_i32(t0, cpu_env, |
3254 | -offsetof(PowerPCCPU, env)__builtin_offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)__builtin_offsetof(CPUState, halted)); |
3255 | tcg_temp_free_i32(t0); |
3256 | /* Stop translation, as the CPU is supposed to sleep from now */ |
3257 | gen_exception_err(ctx, EXCP_HLT0x10001, 1); |
3258 | } |
3259 | |
3260 | /*** Floating-point load ***/ |
3261 | #define GEN_LDF(name, ldop, opc, type){ .opc1 = opc, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, . handler = &gen_name, }, .oname = "name", }, \ |
3262 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
3263 | { \ |
3264 | TCGvTCGv_i32 EA; \ |
3265 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { \ |
3266 | gen_exception(ctx, POWERPC_EXCP_FPU); \ |
3267 | return; \ |
3268 | } \ |
3269 | gen_set_access_type(ctx, ACCESS_FLOAT); \ |
3270 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
3271 | gen_addr_imm_index(ctx, EA, 0); \ |
3272 | gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \ |
3273 | tcg_temp_freetcg_temp_free_i32(EA); \ |
3274 | } |
3275 | |
3276 | #define GEN_LDUF(name, ldop, opc, type){ .opc1 = opc, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, . handler = &gen_nameu, }, .oname = "nameu", }, \ |
3277 | static void glue(gen_, name##u)gen_name##u(DisasContext *ctx) \ |
3278 | { \ |
3279 | TCGvTCGv_i32 EA; \ |
3280 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { \ |
3281 | gen_exception(ctx, POWERPC_EXCP_FPU); \ |
3282 | return; \ |
3283 | } \ |
3284 | if (unlikely(rA(ctx->opcode) == 0)__builtin_expect(!!(rA(ctx->opcode) == 0), 0)) { \ |
3285 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ |
3286 | return; \ |
3287 | } \ |
3288 | gen_set_access_type(ctx, ACCESS_FLOAT); \ |
3289 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
3290 | gen_addr_imm_index(ctx, EA, 0); \ |
3291 | gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \ |
3292 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], EA); \ |
3293 | tcg_temp_freetcg_temp_free_i32(EA); \ |
3294 | } |
3295 | |
3296 | #define GEN_LDUXF(name, ldop, opc, type){ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = opc, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE, . handler = &gen_nameux, }, .oname = "nameux", }, \ |
3297 | static void glue(gen_, name##ux)gen_name##ux(DisasContext *ctx) \ |
3298 | { \ |
3299 | TCGvTCGv_i32 EA; \ |
3300 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { \ |
3301 | gen_exception(ctx, POWERPC_EXCP_FPU); \ |
3302 | return; \ |
3303 | } \ |
3304 | if (unlikely(rA(ctx->opcode) == 0)__builtin_expect(!!(rA(ctx->opcode) == 0), 0)) { \ |
3305 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ |
3306 | return; \ |
3307 | } \ |
3308 | gen_set_access_type(ctx, ACCESS_FLOAT); \ |
3309 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
3310 | gen_addr_reg_index(ctx, EA); \ |
3311 | gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \ |
3312 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], EA); \ |
3313 | tcg_temp_freetcg_temp_free_i32(EA); \ |
3314 | } |
3315 | |
3316 | #define GEN_LDXF(name, ldop, opc2, opc3, type){ .opc1 = 0x1F, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE, . handler = &gen_namex, }, .oname = "namex", }, \ |
3317 | static void glue(gen_, name##x)gen_name##x(DisasContext *ctx) \ |
3318 | { \ |
3319 | TCGvTCGv_i32 EA; \ |
3320 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { \ |
3321 | gen_exception(ctx, POWERPC_EXCP_FPU); \ |
3322 | return; \ |
3323 | } \ |
3324 | gen_set_access_type(ctx, ACCESS_FLOAT); \ |
3325 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
3326 | gen_addr_reg_index(ctx, EA); \ |
3327 | gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \ |
3328 | tcg_temp_freetcg_temp_free_i32(EA); \ |
3329 | } |
3330 | |
3331 | #define GEN_LDFS(name, ldop, op, type){ .opc1 = op | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", }, { .opc1 = op | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, .handler = &gen_nameu, }, .oname = "nameu", }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE, .handler = & gen_nameux, }, .oname = "nameux", }, { .opc1 = 0x1F, .opc2 = 0x17 , .opc3 = op | 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001 , .type = type, .type2 = PPC_NONE, .handler = &gen_namex, }, .oname = "namex", }, \{ .opc1 = op | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", }, |
3332 | GEN_LDF(name, ldop, op | 0x20, type){ .opc1 = op | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", },; \{ .opc1 = op | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_nameu, }, .oname = "nameu", }, |
3333 | GEN_LDUF(name, ldop, op | 0x21, type){ .opc1 = op | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_nameu, }, .oname = "nameu", },; \{ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x01, .pad = { 0, } , .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE , .handler = &gen_nameux, }, .oname = "nameux", }, |
3334 | GEN_LDUXF(name, ldop, op | 0x01, type){ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x01, .pad = { 0, } , .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE , .handler = &gen_nameux, }, .oname = "nameux", },; \{ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x00, .pad = { 0, } , .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE , .handler = &gen_namex, }, .oname = "namex", }, |
3335 | GEN_LDXF(name, ldop, 0x17, op | 0x00, type){ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x00, .pad = { 0, } , .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE , .handler = &gen_namex, }, .oname = "namex", }, |
3336 | |
3337 | static inline void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 arg1, TCGvTCGv_i32 arg2) |
3338 | { |
3339 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
3340 | TCGv_i32 t1 = tcg_temp_new_i32(); |
3341 | gen_qemu_ld32u(ctx, t0, arg2); |
3342 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(t1, t0); |
3343 | tcg_temp_freetcg_temp_free_i32(t0); |
3344 | gen_helper_float32_to_float64(arg1, cpu_env, t1); |
3345 | tcg_temp_free_i32(t1); |
3346 | } |
3347 | |
3348 | /* lfd lfdu lfdux lfdx */ |
3349 | GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT){ .opc1 = 0x12 | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0 , }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT, .type2 = PPC_NONE, .handler = &gen_lfd, }, .oname = "lfd", }, { .opc1 = 0x12 | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT, .type2 = PPC_NONE, .handler = &gen_lfdu, }, .oname = "lfdu", }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x12 | 0x01, .pad = { 0 , }, .handler = { .inval1 = 0x00000001, .type = PPC_FLOAT, .type2 = PPC_NONE, .handler = &gen_lfdux, }, .oname = "lfdux", } , { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x12 | 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_FLOAT, . type2 = PPC_NONE, .handler = &gen_lfdx, }, .oname = "lfdx" , },; |
3350 | /* lfs lfsu lfsux lfsx */ |
3351 | GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT){ .opc1 = 0x10 | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0 , }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT, .type2 = PPC_NONE, .handler = &gen_lfs, }, .oname = "lfs", }, { .opc1 = 0x10 | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT, .type2 = PPC_NONE, .handler = &gen_lfsu, }, .oname = "lfsu", }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x10 | 0x01, .pad = { 0 , }, .handler = { .inval1 = 0x00000001, .type = PPC_FLOAT, .type2 = PPC_NONE, .handler = &gen_lfsux, }, .oname = "lfsux", } , { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x10 | 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_FLOAT, . type2 = PPC_NONE, .handler = &gen_lfsx, }, .oname = "lfsx" , },; |
3352 | |
3353 | /* lfdp */ |
3354 | static void gen_lfdp(DisasContext *ctx) |
3355 | { |
3356 | TCGvTCGv_i32 EA; |
3357 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
3358 | gen_exception(ctx, POWERPC_EXCP_FPU); |
3359 | return; |
3360 | } |
3361 | gen_set_access_type(ctx, ACCESS_FLOAT); |
3362 | EA = tcg_temp_new()tcg_temp_new_i32(); |
3363 | gen_addr_imm_index(ctx, EA, 0); \ |
3364 | if (unlikely(ctx->le_mode)__builtin_expect(!!(ctx->le_mode), 0)) { |
3365 | gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA); |
3366 | tcg_gen_addi_tltcg_gen_addi_i32(EA, EA, 8); |
3367 | gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA); |
3368 | } else { |
3369 | gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA); |
3370 | tcg_gen_addi_tltcg_gen_addi_i32(EA, EA, 8); |
3371 | gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA); |
3372 | } |
3373 | tcg_temp_freetcg_temp_free_i32(EA); |
3374 | } |
3375 | |
3376 | /* lfdpx */ |
3377 | static void gen_lfdpx(DisasContext *ctx) |
3378 | { |
3379 | TCGvTCGv_i32 EA; |
3380 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
3381 | gen_exception(ctx, POWERPC_EXCP_FPU); |
3382 | return; |
3383 | } |
3384 | gen_set_access_type(ctx, ACCESS_FLOAT); |
3385 | EA = tcg_temp_new()tcg_temp_new_i32(); |
3386 | gen_addr_reg_index(ctx, EA); |
3387 | if (unlikely(ctx->le_mode)__builtin_expect(!!(ctx->le_mode), 0)) { |
3388 | gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA); |
3389 | tcg_gen_addi_tltcg_gen_addi_i32(EA, EA, 8); |
3390 | gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA); |
3391 | } else { |
3392 | gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA); |
3393 | tcg_gen_addi_tltcg_gen_addi_i32(EA, EA, 8); |
3394 | gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA); |
3395 | } |
3396 | tcg_temp_freetcg_temp_free_i32(EA); |
3397 | } |
3398 | |
3399 | /* lfiwax */ |
3400 | static void gen_lfiwax(DisasContext *ctx) |
3401 | { |
3402 | TCGvTCGv_i32 EA; |
3403 | TCGvTCGv_i32 t0; |
3404 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
3405 | gen_exception(ctx, POWERPC_EXCP_FPU); |
3406 | return; |
3407 | } |
3408 | gen_set_access_type(ctx, ACCESS_FLOAT); |
3409 | EA = tcg_temp_new()tcg_temp_new_i32(); |
3410 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
3411 | gen_addr_reg_index(ctx, EA); |
3412 | gen_qemu_ld32s(ctx, t0, EA); |
3413 | tcg_gen_ext_tl_i64tcg_gen_ext_i32_i64(cpu_fpr[rD(ctx->opcode)], t0); |
3414 | tcg_temp_freetcg_temp_free_i32(EA); |
3415 | tcg_temp_freetcg_temp_free_i32(t0); |
3416 | } |
3417 | |
3418 | /*** Floating-point store ***/ |
3419 | #define GEN_STF(name, stop, opc, type){ .opc1 = opc, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, . handler = &gen_name, }, .oname = "name", }, \ |
3420 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
3421 | { \ |
3422 | TCGvTCGv_i32 EA; \ |
3423 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { \ |
3424 | gen_exception(ctx, POWERPC_EXCP_FPU); \ |
3425 | return; \ |
3426 | } \ |
3427 | gen_set_access_type(ctx, ACCESS_FLOAT); \ |
3428 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
3429 | gen_addr_imm_index(ctx, EA, 0); \ |
3430 | gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \ |
3431 | tcg_temp_freetcg_temp_free_i32(EA); \ |
3432 | } |
3433 | |
3434 | #define GEN_STUF(name, stop, opc, type){ .opc1 = opc, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, . handler = &gen_nameu, }, .oname = "nameu", }, \ |
3435 | static void glue(gen_, name##u)gen_name##u(DisasContext *ctx) \ |
3436 | { \ |
3437 | TCGvTCGv_i32 EA; \ |
3438 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { \ |
3439 | gen_exception(ctx, POWERPC_EXCP_FPU); \ |
3440 | return; \ |
3441 | } \ |
3442 | if (unlikely(rA(ctx->opcode) == 0)__builtin_expect(!!(rA(ctx->opcode) == 0), 0)) { \ |
3443 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ |
3444 | return; \ |
3445 | } \ |
3446 | gen_set_access_type(ctx, ACCESS_FLOAT); \ |
3447 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
3448 | gen_addr_imm_index(ctx, EA, 0); \ |
3449 | gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \ |
3450 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], EA); \ |
3451 | tcg_temp_freetcg_temp_free_i32(EA); \ |
3452 | } |
3453 | |
3454 | #define GEN_STUXF(name, stop, opc, type){ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = opc, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE, . handler = &gen_nameux, }, .oname = "nameux", }, \ |
3455 | static void glue(gen_, name##ux)gen_name##ux(DisasContext *ctx) \ |
3456 | { \ |
3457 | TCGvTCGv_i32 EA; \ |
3458 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { \ |
3459 | gen_exception(ctx, POWERPC_EXCP_FPU); \ |
3460 | return; \ |
3461 | } \ |
3462 | if (unlikely(rA(ctx->opcode) == 0)__builtin_expect(!!(rA(ctx->opcode) == 0), 0)) { \ |
3463 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ |
3464 | return; \ |
3465 | } \ |
3466 | gen_set_access_type(ctx, ACCESS_FLOAT); \ |
3467 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
3468 | gen_addr_reg_index(ctx, EA); \ |
3469 | gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \ |
3470 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], EA); \ |
3471 | tcg_temp_freetcg_temp_free_i32(EA); \ |
3472 | } |
3473 | |
3474 | #define GEN_STXF(name, stop, opc2, opc3, type){ .opc1 = 0x1F, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE, . handler = &gen_namex, }, .oname = "namex", }, \ |
3475 | static void glue(gen_, name##x)gen_name##x(DisasContext *ctx) \ |
3476 | { \ |
3477 | TCGvTCGv_i32 EA; \ |
3478 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { \ |
3479 | gen_exception(ctx, POWERPC_EXCP_FPU); \ |
3480 | return; \ |
3481 | } \ |
3482 | gen_set_access_type(ctx, ACCESS_FLOAT); \ |
3483 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
3484 | gen_addr_reg_index(ctx, EA); \ |
3485 | gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \ |
3486 | tcg_temp_freetcg_temp_free_i32(EA); \ |
3487 | } |
3488 | |
3489 | #define GEN_STFS(name, stop, op, type){ .opc1 = op | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", }, { .opc1 = op | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE, .handler = &gen_nameu, }, .oname = "nameu", }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE, .handler = & gen_nameux, }, .oname = "nameux", }, { .opc1 = 0x1F, .opc2 = 0x17 , .opc3 = op | 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001 , .type = type, .type2 = PPC_NONE, .handler = &gen_namex, }, .oname = "namex", }, \{ .opc1 = op | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", }, |
3490 | GEN_STF(name, stop, op | 0x20, type){ .opc1 = op | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", },; \{ .opc1 = op | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_nameu, }, .oname = "nameu", }, |
3491 | GEN_STUF(name, stop, op | 0x21, type){ .opc1 = op | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0, } , .handler = { .inval1 = 0x00000000, .type = type, .type2 = PPC_NONE , .handler = &gen_nameu, }, .oname = "nameu", },; \{ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x01, .pad = { 0, } , .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE , .handler = &gen_nameux, }, .oname = "nameux", }, |
3492 | GEN_STUXF(name, stop, op | 0x01, type){ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x01, .pad = { 0, } , .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE , .handler = &gen_nameux, }, .oname = "nameux", },; \{ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x00, .pad = { 0, } , .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE , .handler = &gen_namex, }, .oname = "namex", }, |
3493 | GEN_STXF(name, stop, 0x17, op | 0x00, type){ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = op | 0x00, .pad = { 0, } , .handler = { .inval1 = 0x00000001, .type = type, .type2 = PPC_NONE , .handler = &gen_namex, }, .oname = "namex", }, |
3494 | |
3495 | static inline void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 arg1, TCGvTCGv_i32 arg2) |
3496 | { |
3497 | TCGv_i32 t0 = tcg_temp_new_i32(); |
3498 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
3499 | gen_helper_float64_to_float32(t0, cpu_env, arg1); |
3500 | tcg_gen_extu_i32_tltcg_gen_mov_i32(t1, t0); |
3501 | tcg_temp_free_i32(t0); |
3502 | gen_qemu_st32(ctx, t1, arg2); |
3503 | tcg_temp_freetcg_temp_free_i32(t1); |
3504 | } |
3505 | |
3506 | /* stfd stfdu stfdux stfdx */ |
3507 | GEN_STFS(stfd, st64, 0x16, PPC_FLOAT){ .opc1 = 0x16 | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0 , }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT, .type2 = PPC_NONE, .handler = &gen_stfd, }, .oname = "stfd", }, { .opc1 = 0x16 | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0 , }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT, .type2 = PPC_NONE, .handler = &gen_stfdu, }, .oname = "stfdu", } , { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x16 | 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_FLOAT, . type2 = PPC_NONE, .handler = &gen_stfdux, }, .oname = "stfdux" , }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x16 | 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_FLOAT , .type2 = PPC_NONE, .handler = &gen_stfdx, }, .oname = "stfdx" , },; |
3508 | /* stfs stfsu stfsux stfsx */ |
3509 | GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT){ .opc1 = 0x14 | 0x20, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0 , }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT, .type2 = PPC_NONE, .handler = &gen_stfs, }, .oname = "stfs", }, { .opc1 = 0x14 | 0x21, .opc2 = 0xFF, .opc3 = 0xFF, .pad = { 0 , }, .handler = { .inval1 = 0x00000000, .type = PPC_FLOAT, .type2 = PPC_NONE, .handler = &gen_stfsu, }, .oname = "stfsu", } , { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x14 | 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_FLOAT, . type2 = PPC_NONE, .handler = &gen_stfsux, }, .oname = "stfsux" , }, { .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x14 | 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_FLOAT , .type2 = PPC_NONE, .handler = &gen_stfsx, }, .oname = "stfsx" , },; |
3510 | |
3511 | /* stfdp */ |
3512 | static void gen_stfdp(DisasContext *ctx) |
3513 | { |
3514 | TCGvTCGv_i32 EA; |
3515 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
3516 | gen_exception(ctx, POWERPC_EXCP_FPU); |
3517 | return; |
3518 | } |
3519 | gen_set_access_type(ctx, ACCESS_FLOAT); |
3520 | EA = tcg_temp_new()tcg_temp_new_i32(); |
3521 | gen_addr_imm_index(ctx, EA, 0); \ |
3522 | if (unlikely(ctx->le_mode)__builtin_expect(!!(ctx->le_mode), 0)) { |
3523 | gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA); |
3524 | tcg_gen_addi_tltcg_gen_addi_i32(EA, EA, 8); |
3525 | gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA); |
3526 | } else { |
3527 | gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA); |
3528 | tcg_gen_addi_tltcg_gen_addi_i32(EA, EA, 8); |
3529 | gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA); |
3530 | } |
3531 | tcg_temp_freetcg_temp_free_i32(EA); |
3532 | } |
3533 | |
3534 | /* stfdpx */ |
3535 | static void gen_stfdpx(DisasContext *ctx) |
3536 | { |
3537 | TCGvTCGv_i32 EA; |
3538 | if (unlikely(!ctx->fpu_enabled)__builtin_expect(!!(!ctx->fpu_enabled), 0)) { |
3539 | gen_exception(ctx, POWERPC_EXCP_FPU); |
3540 | return; |
3541 | } |
3542 | gen_set_access_type(ctx, ACCESS_FLOAT); |
3543 | EA = tcg_temp_new()tcg_temp_new_i32(); |
3544 | gen_addr_reg_index(ctx, EA); |
3545 | if (unlikely(ctx->le_mode)__builtin_expect(!!(ctx->le_mode), 0)) { |
3546 | gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA); |
3547 | tcg_gen_addi_tltcg_gen_addi_i32(EA, EA, 8); |
3548 | gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA); |
3549 | } else { |
3550 | gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA); |
3551 | tcg_gen_addi_tltcg_gen_addi_i32(EA, EA, 8); |
3552 | gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA); |
3553 | } |
3554 | tcg_temp_freetcg_temp_free_i32(EA); |
3555 | } |
3556 | |
3557 | /* Optional: */ |
3558 | static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGvTCGv_i32 arg2) |
3559 | { |
3560 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
3561 | tcg_gen_trunc_i64_tltcg_gen_trunc_i64_i32(t0, arg1), |
3562 | gen_qemu_st32(ctx, t0, arg2); |
3563 | tcg_temp_freetcg_temp_free_i32(t0); |
3564 | } |
3565 | /* stfiwx */ |
3566 | GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX){ .opc1 = 0x1F, .opc2 = 0x17, .opc3 = 0x1E, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_FLOAT_STFIWX, .type2 = PPC_NONE, .handler = &gen_stfiwx, }, .oname = "stfiwx", } ,; |
3567 | |
3568 | static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip) |
3569 | { |
3570 | #if defined(TARGET_PPC64) |
3571 | if (ctx->has_cfar) |
3572 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_cfar, nip); |
3573 | #endif |
3574 | } |
3575 | |
3576 | /*** Branch ***/ |
3577 | static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) |
3578 | { |
3579 | TranslationBlock *tb; |
3580 | tb = ctx->tb; |
3581 | if (NARROW_MODE(ctx)0) { |
3582 | dest = (uint32_t) dest; |
3583 | } |
3584 | if ((tb->pc & TARGET_PAGE_MASK~((1 << 12) - 1)) == (dest & TARGET_PAGE_MASK~((1 << 12) - 1)) && |
3585 | likely(!ctx->singlestep_enabled)__builtin_expect(!!(!ctx->singlestep_enabled), 1)) { |
3586 | tcg_gen_goto_tb(n); |
3587 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_nip, dest & ~3); |
3588 | tcg_gen_exit_tb((uintptr_t)tb + n); |
3589 | } else { |
3590 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_nip, dest & ~3); |
3591 | if (unlikely(ctx->singlestep_enabled)__builtin_expect(!!(ctx->singlestep_enabled), 0)) { |
3592 | if ((ctx->singlestep_enabled & |
3593 | (CPU_BRANCH_STEP0x2 | CPU_SINGLE_STEP0x1)) && |
3594 | (ctx->exception == POWERPC_EXCP_BRANCH || |
3595 | ctx->exception == POWERPC_EXCP_TRACE)) { |
3596 | target_ulong tmp = ctx->nip; |
3597 | ctx->nip = dest; |
3598 | gen_exception(ctx, POWERPC_EXCP_TRACE); |
3599 | ctx->nip = tmp; |
3600 | } |
3601 | if (ctx->singlestep_enabled & GDBSTUB_SINGLE_STEP0x4) { |
3602 | gen_debug_exception(ctx); |
3603 | } |
3604 | } |
3605 | tcg_gen_exit_tb(0); |
3606 | } |
3607 | } |
3608 | |
3609 | static inline void gen_setlr(DisasContext *ctx, target_ulong nip) |
3610 | { |
3611 | if (NARROW_MODE(ctx)0) { |
3612 | nip = (uint32_t)nip; |
3613 | } |
3614 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_lr, nip); |
3615 | } |
3616 | |
3617 | /* b ba bl bla */ |
3618 | static void gen_b(DisasContext *ctx) |
3619 | { |
3620 | target_ulong li, target; |
3621 | |
3622 | ctx->exception = POWERPC_EXCP_BRANCH; |
3623 | /* sign extend LI */ |
3624 | li = LI(ctx->opcode); |
3625 | li = (li ^ 0x02000000) - 0x02000000; |
3626 | if (likely(AA(ctx->opcode) == 0)__builtin_expect(!!(AA(ctx->opcode) == 0), 1)) { |
3627 | target = ctx->nip + li - 4; |
3628 | } else { |
3629 | target = li; |
3630 | } |
3631 | if (LK(ctx->opcode)) { |
3632 | gen_setlr(ctx, ctx->nip); |
3633 | } |
3634 | gen_update_cfar(ctx, ctx->nip); |
3635 | gen_goto_tb(ctx, 0, target); |
3636 | } |
3637 | |
3638 | #define BCOND_IM0 0 |
3639 | #define BCOND_LR1 1 |
3640 | #define BCOND_CTR2 2 |
3641 | |
3642 | static inline void gen_bcond(DisasContext *ctx, int type) |
3643 | { |
3644 | uint32_t bo = BO(ctx->opcode); |
3645 | int l1; |
3646 | TCGvTCGv_i32 target; |
3647 | |
3648 | ctx->exception = POWERPC_EXCP_BRANCH; |
3649 | if (type == BCOND_LR1 || type == BCOND_CTR2) { |
3650 | target = tcg_temp_local_new()tcg_temp_local_new_i32(); |
3651 | if (type == BCOND_CTR2) |
3652 | tcg_gen_mov_tltcg_gen_mov_i32(target, cpu_ctr); |
3653 | else |
3654 | tcg_gen_mov_tltcg_gen_mov_i32(target, cpu_lr); |
3655 | } else { |
3656 | TCGV_UNUSED(target)target = __extension__ ({ TCGv_i32 make_tcgv_tmp = {-1}; make_tcgv_tmp ;}); |
3657 | } |
3658 | if (LK(ctx->opcode)) |
3659 | gen_setlr(ctx, ctx->nip); |
3660 | l1 = gen_new_label(); |
3661 | if ((bo & 0x4) == 0) { |
3662 | /* Decrement and test CTR */ |
3663 | TCGvTCGv_i32 temp = tcg_temp_new()tcg_temp_new_i32(); |
3664 | if (unlikely(type == BCOND_CTR)__builtin_expect(!!(type == 2), 0)) { |
3665 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
3666 | return; |
3667 | } |
3668 | tcg_gen_subi_tltcg_gen_subi_i32(cpu_ctr, cpu_ctr, 1); |
3669 | if (NARROW_MODE(ctx)0) { |
3670 | tcg_gen_ext32u_tltcg_gen_mov_i32(temp, cpu_ctr); |
3671 | } else { |
3672 | tcg_gen_mov_tltcg_gen_mov_i32(temp, cpu_ctr); |
3673 | } |
3674 | if (bo & 0x2) { |
3675 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1); |
3676 | } else { |
3677 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1); |
3678 | } |
3679 | tcg_temp_freetcg_temp_free_i32(temp); |
3680 | } |
3681 | if ((bo & 0x10) == 0) { |
3682 | /* Test CR */ |
3683 | uint32_t bi = BI(ctx->opcode); |
3684 | uint32_t mask = 1 << (3 - (bi & 0x03)); |
3685 | TCGv_i32 temp = tcg_temp_new_i32(); |
3686 | |
3687 | if (bo & 0x8) { |
3688 | tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask); |
3689 | tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1); |
3690 | } else { |
3691 | tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask); |
3692 | tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1); |
3693 | } |
3694 | tcg_temp_free_i32(temp); |
3695 | } |
3696 | gen_update_cfar(ctx, ctx->nip); |
3697 | if (type == BCOND_IM0) { |
3698 | target_ulong li = (target_long)((int16_t)(BD(ctx->opcode))); |
3699 | if (likely(AA(ctx->opcode) == 0)__builtin_expect(!!(AA(ctx->opcode) == 0), 1)) { |
3700 | gen_goto_tb(ctx, 0, ctx->nip + li - 4); |
3701 | } else { |
3702 | gen_goto_tb(ctx, 0, li); |
3703 | } |
3704 | gen_set_label(l1); |
3705 | gen_goto_tb(ctx, 1, ctx->nip); |
3706 | } else { |
3707 | if (NARROW_MODE(ctx)0) { |
3708 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_nip, target, (uint32_t)~3); |
3709 | } else { |
3710 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_nip, target, ~3); |
3711 | } |
3712 | tcg_gen_exit_tb(0); |
3713 | gen_set_label(l1); |
3714 | gen_update_nip(ctx, ctx->nip); |
3715 | tcg_gen_exit_tb(0); |
3716 | } |
3717 | } |
3718 | |
3719 | static void gen_bc(DisasContext *ctx) |
3720 | { |
3721 | gen_bcond(ctx, BCOND_IM0); |
3722 | } |
3723 | |
3724 | static void gen_bcctr(DisasContext *ctx) |
3725 | { |
3726 | gen_bcond(ctx, BCOND_CTR2); |
3727 | } |
3728 | |
3729 | static void gen_bclr(DisasContext *ctx) |
3730 | { |
3731 | gen_bcond(ctx, BCOND_LR1); |
3732 | } |
3733 | |
3734 | /*** Condition register logical ***/ |
3735 | #define GEN_CRLOGIC(name, tcg_op, opc){ .opc1 = 0x13, .opc2 = 0x01, .opc3 = opc, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", } \ |
3736 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
3737 | { \ |
3738 | uint8_t bitmask; \ |
3739 | int sh; \ |
3740 | TCGv_i32 t0, t1; \ |
3741 | sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03); \ |
3742 | t0 = tcg_temp_new_i32(); \ |
3743 | if (sh > 0) \ |
3744 | tcg_gen_shri_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], sh); \ |
3745 | else if (sh < 0) \ |
3746 | tcg_gen_shli_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh); \ |
3747 | else \ |
3748 | tcg_gen_mov_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2]); \ |
3749 | t1 = tcg_temp_new_i32(); \ |
3750 | sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03); \ |
3751 | if (sh > 0) \ |
3752 | tcg_gen_shri_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], sh); \ |
3753 | else if (sh < 0) \ |
3754 | tcg_gen_shli_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh); \ |
3755 | else \ |
3756 | tcg_gen_mov_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2]); \ |
3757 | tcg_op(t0, t0, t1); \ |
3758 | bitmask = 1 << (3 - (crbD(ctx->opcode) & 0x03)); \ |
3759 | tcg_gen_andi_i32(t0, t0, bitmask); \ |
3760 | tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask); \ |
3761 | tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1); \ |
3762 | tcg_temp_free_i32(t0); \ |
3763 | tcg_temp_free_i32(t1); \ |
3764 | } |
3765 | |
3766 | /* crand */ |
3767 | GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08){ .opc1 = 0x13, .opc2 = 0x01, .opc3 = 0x08, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_crand, }, .oname = "crand", }; |
3768 | /* crandc */ |
3769 | GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04){ .opc1 = 0x13, .opc2 = 0x01, .opc3 = 0x04, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_crandc, }, .oname = "crandc", }; |
3770 | /* creqv */ |
3771 | GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09){ .opc1 = 0x13, .opc2 = 0x01, .opc3 = 0x09, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_creqv, }, .oname = "creqv", }; |
3772 | /* crnand */ |
3773 | GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07){ .opc1 = 0x13, .opc2 = 0x01, .opc3 = 0x07, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_crnand, }, .oname = "crnand", }; |
3774 | /* crnor */ |
3775 | GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01){ .opc1 = 0x13, .opc2 = 0x01, .opc3 = 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_crnor, }, .oname = "crnor", }; |
3776 | /* cror */ |
3777 | GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E){ .opc1 = 0x13, .opc2 = 0x01, .opc3 = 0x0E, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_cror, }, .oname = "cror", }; |
3778 | /* crorc */ |
3779 | GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D){ .opc1 = 0x13, .opc2 = 0x01, .opc3 = 0x0D, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_crorc, }, .oname = "crorc", }; |
3780 | /* crxor */ |
3781 | GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06){ .opc1 = 0x13, .opc2 = 0x01, .opc3 = 0x06, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_INSNS_BASE, .type2 = PPC_NONE , .handler = &gen_crxor, }, .oname = "crxor", }; |
3782 | |
3783 | /* mcrf */ |
3784 | static void gen_mcrf(DisasContext *ctx) |
3785 | { |
3786 | tcg_gen_mov_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]); |
3787 | } |
3788 | |
3789 | /*** System linkage ***/ |
3790 | |
3791 | /* rfi (mem_idx only) */ |
3792 | static void gen_rfi(DisasContext *ctx) |
3793 | { |
3794 | #if defined(CONFIG_USER_ONLY) |
3795 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
3796 | #else |
3797 | /* Restore CPU state */ |
3798 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
3799 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
3800 | return; |
3801 | } |
3802 | gen_update_cfar(ctx, ctx->nip); |
3803 | gen_helper_rfi(cpu_env); |
3804 | gen_sync_exception(ctx); |
3805 | #endif |
3806 | } |
3807 | |
3808 | #if defined(TARGET_PPC64) |
3809 | static void gen_rfid(DisasContext *ctx) |
3810 | { |
3811 | #if defined(CONFIG_USER_ONLY) |
3812 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
3813 | #else |
3814 | /* Restore CPU state */ |
3815 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
3816 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
3817 | return; |
3818 | } |
3819 | gen_update_cfar(ctx, ctx->nip); |
3820 | gen_helper_rfid(cpu_env); |
3821 | gen_sync_exception(ctx); |
3822 | #endif |
3823 | } |
3824 | |
3825 | static void gen_hrfid(DisasContext *ctx) |
3826 | { |
3827 | #if defined(CONFIG_USER_ONLY) |
3828 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
3829 | #else |
3830 | /* Restore CPU state */ |
3831 | if (unlikely(ctx->mem_idx <= 1)__builtin_expect(!!(ctx->mem_idx <= 1), 0)) { |
3832 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
3833 | return; |
3834 | } |
3835 | gen_helper_hrfid(cpu_env); |
3836 | gen_sync_exception(ctx); |
3837 | #endif |
3838 | } |
3839 | #endif |
3840 | |
3841 | /* sc */ |
3842 | #if defined(CONFIG_USER_ONLY) |
3843 | #define POWERPC_SYSCALLPOWERPC_EXCP_SYSCALL POWERPC_EXCP_SYSCALL_USER |
3844 | #else |
3845 | #define POWERPC_SYSCALLPOWERPC_EXCP_SYSCALL POWERPC_EXCP_SYSCALL |
3846 | #endif |
3847 | static void gen_sc(DisasContext *ctx) |
3848 | { |
3849 | uint32_t lev; |
3850 | |
3851 | lev = (ctx->opcode >> 5) & 0x7F; |
3852 | gen_exception_err(ctx, POWERPC_SYSCALLPOWERPC_EXCP_SYSCALL, lev); |
3853 | } |
3854 | |
3855 | /*** Trap ***/ |
3856 | |
3857 | /* tw */ |
3858 | static void gen_tw(DisasContext *ctx) |
3859 | { |
3860 | TCGv_i32 t0 = tcg_const_i32(TO(ctx->opcode)); |
3861 | /* Update the nip since this might generate a trap exception */ |
3862 | gen_update_nip(ctx, ctx->nip); |
3863 | gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], |
3864 | t0); |
3865 | tcg_temp_free_i32(t0); |
3866 | } |
3867 | |
3868 | /* twi */ |
3869 | static void gen_twi(DisasContext *ctx) |
3870 | { |
3871 | TCGvTCGv_i32 t0 = tcg_const_tltcg_const_i32(SIMM(ctx->opcode)); |
3872 | TCGv_i32 t1 = tcg_const_i32(TO(ctx->opcode)); |
3873 | /* Update the nip since this might generate a trap exception */ |
3874 | gen_update_nip(ctx, ctx->nip); |
3875 | gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1); |
3876 | tcg_temp_freetcg_temp_free_i32(t0); |
3877 | tcg_temp_free_i32(t1); |
3878 | } |
3879 | |
3880 | #if defined(TARGET_PPC64) |
3881 | /* td */ |
3882 | static void gen_td(DisasContext *ctx) |
3883 | { |
3884 | TCGv_i32 t0 = tcg_const_i32(TO(ctx->opcode)); |
3885 | /* Update the nip since this might generate a trap exception */ |
3886 | gen_update_nip(ctx, ctx->nip); |
3887 | gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], |
3888 | t0); |
3889 | tcg_temp_free_i32(t0); |
3890 | } |
3891 | |
3892 | /* tdi */ |
3893 | static void gen_tdi(DisasContext *ctx) |
3894 | { |
3895 | TCGvTCGv_i32 t0 = tcg_const_tltcg_const_i32(SIMM(ctx->opcode)); |
3896 | TCGv_i32 t1 = tcg_const_i32(TO(ctx->opcode)); |
3897 | /* Update the nip since this might generate a trap exception */ |
3898 | gen_update_nip(ctx, ctx->nip); |
3899 | gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1); |
3900 | tcg_temp_freetcg_temp_free_i32(t0); |
3901 | tcg_temp_free_i32(t1); |
3902 | } |
3903 | #endif |
3904 | |
3905 | /*** Processor control ***/ |
3906 | |
3907 | static void gen_read_xer(TCGvTCGv_i32 dst) |
3908 | { |
3909 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
3910 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
3911 | TCGvTCGv_i32 t2 = tcg_temp_new()tcg_temp_new_i32(); |
3912 | tcg_gen_mov_tltcg_gen_mov_i32(dst, cpu_xer); |
3913 | tcg_gen_shli_tltcg_gen_shli_i32(t0, cpu_so, XER_SO31); |
3914 | tcg_gen_shli_tltcg_gen_shli_i32(t1, cpu_ov, XER_OV30); |
3915 | tcg_gen_shli_tltcg_gen_shli_i32(t2, cpu_ca, XER_CA29); |
3916 | tcg_gen_or_tltcg_gen_or_i32(t0, t0, t1); |
3917 | tcg_gen_or_tltcg_gen_or_i32(dst, dst, t2); |
3918 | tcg_gen_or_tltcg_gen_or_i32(dst, dst, t0); |
3919 | tcg_temp_freetcg_temp_free_i32(t0); |
3920 | tcg_temp_freetcg_temp_free_i32(t1); |
3921 | tcg_temp_freetcg_temp_free_i32(t2); |
3922 | } |
3923 | |
3924 | static void gen_write_xer(TCGvTCGv_i32 src) |
3925 | { |
3926 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_xer, src, |
3927 | ~((1u << XER_SO31) | (1u << XER_OV30) | (1u << XER_CA29))); |
3928 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_so, src, XER_SO31); |
3929 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_ov, src, XER_OV30); |
3930 | tcg_gen_shri_tltcg_gen_shri_i32(cpu_ca, src, XER_CA29); |
3931 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_so, cpu_so, 1); |
3932 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_ov, cpu_ov, 1); |
3933 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_ca, cpu_ca, 1); |
3934 | } |
3935 | |
3936 | /* mcrxr */ |
3937 | static void gen_mcrxr(DisasContext *ctx) |
3938 | { |
3939 | TCGv_i32 t0 = tcg_temp_new_i32(); |
3940 | TCGv_i32 t1 = tcg_temp_new_i32(); |
3941 | TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)]; |
3942 | |
3943 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(t0, cpu_so); |
3944 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(t1, cpu_ov); |
3945 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(dst, cpu_ca); |
3946 | tcg_gen_shri_i32(t0, t0, 2); |
3947 | tcg_gen_shri_i32(t1, t1, 1); |
3948 | tcg_gen_or_i32(dst, dst, t0); |
3949 | tcg_gen_or_i32(dst, dst, t1); |
3950 | tcg_temp_free_i32(t0); |
3951 | tcg_temp_free_i32(t1); |
3952 | |
3953 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_so, 0); |
3954 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ov, 0); |
3955 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ca, 0); |
3956 | } |
3957 | |
3958 | /* mfcr mfocrf */ |
3959 | static void gen_mfcr(DisasContext *ctx) |
3960 | { |
3961 | uint32_t crm, crn; |
3962 | |
3963 | if (likely(ctx->opcode & 0x00100000)__builtin_expect(!!(ctx->opcode & 0x00100000), 1)) { |
3964 | crm = CRM(ctx->opcode); |
3965 | if (likely(crm && ((crm & (crm - 1)) == 0))__builtin_expect(!!(crm && ((crm & (crm - 1)) == 0 )), 1)) { |
3966 | crn = ctz32 (crm); |
3967 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]); |
3968 | tcg_gen_shli_tltcg_gen_shli_i32(cpu_gpr[rD(ctx->opcode)], |
3969 | cpu_gpr[rD(ctx->opcode)], crn * 4); |
3970 | } |
3971 | } else { |
3972 | TCGv_i32 t0 = tcg_temp_new_i32(); |
3973 | tcg_gen_mov_i32(t0, cpu_crf[0]); |
3974 | tcg_gen_shli_i32(t0, t0, 4); |
3975 | tcg_gen_or_i32(t0, t0, cpu_crf[1]); |
3976 | tcg_gen_shli_i32(t0, t0, 4); |
3977 | tcg_gen_or_i32(t0, t0, cpu_crf[2]); |
3978 | tcg_gen_shli_i32(t0, t0, 4); |
3979 | tcg_gen_or_i32(t0, t0, cpu_crf[3]); |
3980 | tcg_gen_shli_i32(t0, t0, 4); |
3981 | tcg_gen_or_i32(t0, t0, cpu_crf[4]); |
3982 | tcg_gen_shli_i32(t0, t0, 4); |
3983 | tcg_gen_or_i32(t0, t0, cpu_crf[5]); |
3984 | tcg_gen_shli_i32(t0, t0, 4); |
3985 | tcg_gen_or_i32(t0, t0, cpu_crf[6]); |
3986 | tcg_gen_shli_i32(t0, t0, 4); |
3987 | tcg_gen_or_i32(t0, t0, cpu_crf[7]); |
3988 | tcg_gen_extu_i32_tltcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0); |
3989 | tcg_temp_free_i32(t0); |
3990 | } |
3991 | } |
3992 | |
3993 | /* mfmsr */ |
3994 | static void gen_mfmsr(DisasContext *ctx) |
3995 | { |
3996 | #if defined(CONFIG_USER_ONLY) |
3997 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
3998 | #else |
3999 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4000 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4001 | return; |
4002 | } |
4003 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_msr); |
4004 | #endif |
4005 | } |
4006 | |
4007 | static void spr_noaccess(void *opaque, int gprn, int sprn) |
4008 | { |
4009 | #if 0 |
4010 | sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5); |
4011 | printf("ERROR: try to access SPR %d !\n", sprn); |
4012 | #endif |
4013 | } |
4014 | #define SPR_NOACCESS(&spr_noaccess) (&spr_noaccess) |
4015 | |
4016 | /* mfspr */ |
4017 | static inline void gen_op_mfspr(DisasContext *ctx) |
4018 | { |
4019 | void (*read_cb)(void *opaque, int gprn, int sprn); |
4020 | uint32_t sprn = SPR(ctx->opcode); |
4021 | |
4022 | #if !defined(CONFIG_USER_ONLY) |
4023 | if (ctx->mem_idx == 2) |
4024 | read_cb = ctx->spr_cb[sprn].hea_read; |
4025 | else if (ctx->mem_idx) |
4026 | read_cb = ctx->spr_cb[sprn].oea_read; |
4027 | else |
4028 | #endif |
4029 | read_cb = ctx->spr_cb[sprn].uea_read; |
4030 | if (likely(read_cb != NULL)__builtin_expect(!!(read_cb != ((void*)0)), 1)) { |
4031 | if (likely(read_cb != SPR_NOACCESS)__builtin_expect(!!(read_cb != (&spr_noaccess)), 1)) { |
4032 | (*read_cb)(ctx, rD(ctx->opcode), sprn); |
4033 | } else { |
4034 | /* Privilege exception */ |
4035 | /* This is a hack to avoid warnings when running Linux: |
4036 | * this OS breaks the PowerPC virtualisation model, |
4037 | * allowing userland application to read the PVR |
4038 | */ |
4039 | if (sprn != SPR_PVR(0x11F)) { |
4040 | qemu_log("Trying to read privileged spr %d (0x%03x) at " |
4041 | TARGET_FMT_lx"%08x" "\n", sprn, sprn, ctx->nip - 4); |
4042 | printf("Trying to read privileged spr %d (0x%03x) at " |
4043 | TARGET_FMT_lx"%08x" "\n", sprn, sprn, ctx->nip - 4); |
4044 | } |
4045 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4046 | } |
4047 | } else { |
4048 | /* Not defined */ |
4049 | qemu_log("Trying to read invalid spr %d (0x%03x) at " |
4050 | TARGET_FMT_lx"%08x" "\n", sprn, sprn, ctx->nip - 4); |
4051 | printf("Trying to read invalid spr %d (0x%03x) at " |
4052 | TARGET_FMT_lx"%08x" "\n", sprn, sprn, ctx->nip - 4); |
4053 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_SPR); |
4054 | } |
4055 | } |
4056 | |
4057 | static void gen_mfspr(DisasContext *ctx) |
4058 | { |
4059 | gen_op_mfspr(ctx); |
4060 | } |
4061 | |
4062 | /* mftb */ |
4063 | static void gen_mftb(DisasContext *ctx) |
4064 | { |
4065 | gen_op_mfspr(ctx); |
4066 | } |
4067 | |
4068 | /* mtcrf mtocrf*/ |
4069 | static void gen_mtcrf(DisasContext *ctx) |
4070 | { |
4071 | uint32_t crm, crn; |
4072 | |
4073 | crm = CRM(ctx->opcode); |
4074 | if (likely((ctx->opcode & 0x00100000))__builtin_expect(!!((ctx->opcode & 0x00100000)), 1)) { |
4075 | if (crm && ((crm & (crm - 1)) == 0)) { |
4076 | TCGv_i32 temp = tcg_temp_new_i32(); |
4077 | crn = ctz32 (crm); |
4078 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(temp, cpu_gpr[rS(ctx->opcode)]); |
4079 | tcg_gen_shri_i32(temp, temp, crn * 4); |
4080 | tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf); |
4081 | tcg_temp_free_i32(temp); |
4082 | } |
4083 | } else { |
4084 | TCGv_i32 temp = tcg_temp_new_i32(); |
4085 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(temp, cpu_gpr[rS(ctx->opcode)]); |
4086 | for (crn = 0 ; crn < 8 ; crn++) { |
4087 | if (crm & (1 << crn)) { |
4088 | tcg_gen_shri_i32(cpu_crf[7 - crn], temp, crn * 4); |
4089 | tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf); |
4090 | } |
4091 | } |
4092 | tcg_temp_free_i32(temp); |
4093 | } |
4094 | } |
4095 | |
4096 | /* mtmsr */ |
4097 | #if defined(TARGET_PPC64) |
4098 | static void gen_mtmsrd(DisasContext *ctx) |
4099 | { |
4100 | #if defined(CONFIG_USER_ONLY) |
4101 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4102 | #else |
4103 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4104 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4105 | return; |
4106 | } |
4107 | if (ctx->opcode & 0x00010000) { |
4108 | /* Special form that does not need any synchronisation */ |
4109 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
4110 | tcg_gen_andi_tltcg_gen_andi_i32(t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI1) | (1 << MSR_EE15)); |
4111 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_msr, cpu_msr, ~((1 << MSR_RI1) | (1 << MSR_EE15))); |
4112 | tcg_gen_or_tltcg_gen_or_i32(cpu_msr, cpu_msr, t0); |
4113 | tcg_temp_freetcg_temp_free_i32(t0); |
4114 | } else { |
4115 | /* XXX: we need to update nip before the store |
4116 | * if we enter power saving mode, we will exit the loop |
4117 | * directly from ppc_store_msr |
4118 | */ |
4119 | gen_update_nip(ctx, ctx->nip); |
4120 | gen_helper_store_msr(cpu_env, cpu_gpr[rS(ctx->opcode)]); |
4121 | /* Must stop the translation as machine state (may have) changed */ |
4122 | /* Note that mtmsr is not always defined as context-synchronizing */ |
4123 | gen_stop_exception(ctx); |
4124 | } |
4125 | #endif |
4126 | } |
4127 | #endif |
4128 | |
4129 | static void gen_mtmsr(DisasContext *ctx) |
4130 | { |
4131 | #if defined(CONFIG_USER_ONLY) |
4132 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4133 | #else |
4134 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4135 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4136 | return; |
4137 | } |
4138 | if (ctx->opcode & 0x00010000) { |
4139 | /* Special form that does not need any synchronisation */ |
4140 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
4141 | tcg_gen_andi_tltcg_gen_andi_i32(t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI1) | (1 << MSR_EE15)); |
4142 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_msr, cpu_msr, ~((1 << MSR_RI1) | (1 << MSR_EE15))); |
4143 | tcg_gen_or_tltcg_gen_or_i32(cpu_msr, cpu_msr, t0); |
4144 | tcg_temp_freetcg_temp_free_i32(t0); |
4145 | } else { |
4146 | TCGvTCGv_i32 msr = tcg_temp_new()tcg_temp_new_i32(); |
4147 | |
4148 | /* XXX: we need to update nip before the store |
4149 | * if we enter power saving mode, we will exit the loop |
4150 | * directly from ppc_store_msr |
4151 | */ |
4152 | gen_update_nip(ctx, ctx->nip); |
4153 | #if defined(TARGET_PPC64) |
4154 | tcg_gen_deposit_tltcg_gen_deposit_i32(msr, cpu_msr, cpu_gpr[rS(ctx->opcode)], 0, 32); |
4155 | #else |
4156 | tcg_gen_mov_tltcg_gen_mov_i32(msr, cpu_gpr[rS(ctx->opcode)]); |
4157 | #endif |
4158 | gen_helper_store_msr(cpu_env, msr); |
4159 | /* Must stop the translation as machine state (may have) changed */ |
4160 | /* Note that mtmsr is not always defined as context-synchronizing */ |
4161 | gen_stop_exception(ctx); |
4162 | } |
4163 | #endif |
4164 | } |
4165 | |
4166 | /* mtspr */ |
4167 | static void gen_mtspr(DisasContext *ctx) |
4168 | { |
4169 | void (*write_cb)(void *opaque, int sprn, int gprn); |
4170 | uint32_t sprn = SPR(ctx->opcode); |
4171 | |
4172 | #if !defined(CONFIG_USER_ONLY) |
4173 | if (ctx->mem_idx == 2) |
4174 | write_cb = ctx->spr_cb[sprn].hea_write; |
4175 | else if (ctx->mem_idx) |
4176 | write_cb = ctx->spr_cb[sprn].oea_write; |
4177 | else |
4178 | #endif |
4179 | write_cb = ctx->spr_cb[sprn].uea_write; |
4180 | if (likely(write_cb != NULL)__builtin_expect(!!(write_cb != ((void*)0)), 1)) { |
4181 | if (likely(write_cb != SPR_NOACCESS)__builtin_expect(!!(write_cb != (&spr_noaccess)), 1)) { |
4182 | (*write_cb)(ctx, sprn, rS(ctx->opcode)); |
4183 | } else { |
4184 | /* Privilege exception */ |
4185 | qemu_log("Trying to write privileged spr %d (0x%03x) at " |
4186 | TARGET_FMT_lx"%08x" "\n", sprn, sprn, ctx->nip - 4); |
4187 | printf("Trying to write privileged spr %d (0x%03x) at " |
4188 | TARGET_FMT_lx"%08x" "\n", sprn, sprn, ctx->nip - 4); |
4189 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4190 | } |
4191 | } else { |
4192 | /* Not defined */ |
4193 | qemu_log("Trying to write invalid spr %d (0x%03x) at " |
4194 | TARGET_FMT_lx"%08x" "\n", sprn, sprn, ctx->nip - 4); |
4195 | printf("Trying to write invalid spr %d (0x%03x) at " |
4196 | TARGET_FMT_lx"%08x" "\n", sprn, sprn, ctx->nip - 4); |
4197 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_SPR); |
4198 | } |
4199 | } |
4200 | |
4201 | /*** Cache management ***/ |
4202 | |
4203 | /* dcbf */ |
4204 | static void gen_dcbf(DisasContext *ctx) |
4205 | { |
4206 | /* XXX: specification says this is treated as a load by the MMU */ |
4207 | TCGvTCGv_i32 t0; |
4208 | gen_set_access_type(ctx, ACCESS_CACHE); |
4209 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
4210 | gen_addr_reg_index(ctx, t0); |
4211 | gen_qemu_ld8u(ctx, t0, t0); |
4212 | tcg_temp_freetcg_temp_free_i32(t0); |
4213 | } |
4214 | |
4215 | /* dcbi (Supervisor only) */ |
4216 | static void gen_dcbi(DisasContext *ctx) |
4217 | { |
4218 | #if defined(CONFIG_USER_ONLY) |
4219 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
4220 | #else |
4221 | TCGvTCGv_i32 EA, val; |
4222 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4223 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
4224 | return; |
4225 | } |
4226 | EA = tcg_temp_new()tcg_temp_new_i32(); |
4227 | gen_set_access_type(ctx, ACCESS_CACHE); |
4228 | gen_addr_reg_index(ctx, EA); |
4229 | val = tcg_temp_new()tcg_temp_new_i32(); |
4230 | /* XXX: specification says this should be treated as a store by the MMU */ |
4231 | gen_qemu_ld8u(ctx, val, EA); |
4232 | gen_qemu_st8(ctx, val, EA); |
4233 | tcg_temp_freetcg_temp_free_i32(val); |
4234 | tcg_temp_freetcg_temp_free_i32(EA); |
4235 | #endif |
4236 | } |
4237 | |
4238 | /* dcdst */ |
4239 | static void gen_dcbst(DisasContext *ctx) |
4240 | { |
4241 | /* XXX: specification say this is treated as a load by the MMU */ |
4242 | TCGvTCGv_i32 t0; |
4243 | gen_set_access_type(ctx, ACCESS_CACHE); |
4244 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
4245 | gen_addr_reg_index(ctx, t0); |
4246 | gen_qemu_ld8u(ctx, t0, t0); |
4247 | tcg_temp_freetcg_temp_free_i32(t0); |
4248 | } |
4249 | |
4250 | /* dcbt */ |
4251 | static void gen_dcbt(DisasContext *ctx) |
4252 | { |
4253 | /* interpreted as no-op */ |
4254 | /* XXX: specification say this is treated as a load by the MMU |
4255 | * but does not generate any exception |
4256 | */ |
4257 | } |
4258 | |
4259 | /* dcbtst */ |
4260 | static void gen_dcbtst(DisasContext *ctx) |
4261 | { |
4262 | /* interpreted as no-op */ |
4263 | /* XXX: specification say this is treated as a load by the MMU |
4264 | * but does not generate any exception |
4265 | */ |
4266 | } |
4267 | |
4268 | /* dcbz */ |
4269 | static void gen_dcbz(DisasContext *ctx) |
4270 | { |
4271 | TCGvTCGv_i32 tcgv_addr; |
4272 | TCGv_i32 tcgv_is_dcbzl; |
4273 | int is_dcbzl = ctx->opcode & 0x00200000 ? 1 : 0; |
4274 | |
4275 | gen_set_access_type(ctx, ACCESS_CACHE); |
4276 | /* NIP cannot be restored if the memory exception comes from an helper */ |
4277 | gen_update_nip(ctx, ctx->nip - 4); |
4278 | tcgv_addr = tcg_temp_new()tcg_temp_new_i32(); |
4279 | tcgv_is_dcbzl = tcg_const_i32(is_dcbzl); |
4280 | |
4281 | gen_addr_reg_index(ctx, tcgv_addr); |
4282 | gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_is_dcbzl); |
4283 | |
4284 | tcg_temp_freetcg_temp_free_i32(tcgv_addr); |
4285 | tcg_temp_free_i32(tcgv_is_dcbzl); |
4286 | } |
4287 | |
4288 | /* dst / dstt */ |
4289 | static void gen_dst(DisasContext *ctx) |
4290 | { |
4291 | if (rA(ctx->opcode) == 0) { |
4292 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX); |
4293 | } else { |
4294 | /* interpreted as no-op */ |
4295 | } |
4296 | } |
4297 | |
4298 | /* dstst /dststt */ |
4299 | static void gen_dstst(DisasContext *ctx) |
4300 | { |
4301 | if (rA(ctx->opcode) == 0) { |
4302 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX); |
4303 | } else { |
4304 | /* interpreted as no-op */ |
4305 | } |
4306 | |
4307 | } |
4308 | |
4309 | /* dss / dssall */ |
4310 | static void gen_dss(DisasContext *ctx) |
4311 | { |
4312 | /* interpreted as no-op */ |
4313 | } |
4314 | |
4315 | /* icbi */ |
4316 | static void gen_icbi(DisasContext *ctx) |
4317 | { |
4318 | TCGvTCGv_i32 t0; |
4319 | gen_set_access_type(ctx, ACCESS_CACHE); |
4320 | /* NIP cannot be restored if the memory exception comes from an helper */ |
4321 | gen_update_nip(ctx, ctx->nip - 4); |
4322 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
4323 | gen_addr_reg_index(ctx, t0); |
4324 | gen_helper_icbi(cpu_env, t0); |
4325 | tcg_temp_freetcg_temp_free_i32(t0); |
4326 | } |
4327 | |
4328 | /* Optional: */ |
4329 | /* dcba */ |
4330 | static void gen_dcba(DisasContext *ctx) |
4331 | { |
4332 | /* interpreted as no-op */ |
4333 | /* XXX: specification say this is treated as a store by the MMU |
4334 | * but does not generate any exception |
4335 | */ |
4336 | } |
4337 | |
4338 | /*** Segment register manipulation ***/ |
4339 | /* Supervisor only: */ |
4340 | |
4341 | /* mfsr */ |
4342 | static void gen_mfsr(DisasContext *ctx) |
4343 | { |
4344 | #if defined(CONFIG_USER_ONLY) |
4345 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4346 | #else |
4347 | TCGvTCGv_i32 t0; |
4348 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4349 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4350 | return; |
4351 | } |
4352 | t0 = tcg_const_tltcg_const_i32(SR(ctx->opcode)); |
4353 | gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); |
4354 | tcg_temp_freetcg_temp_free_i32(t0); |
4355 | #endif |
4356 | } |
4357 | |
4358 | /* mfsrin */ |
4359 | static void gen_mfsrin(DisasContext *ctx) |
4360 | { |
4361 | #if defined(CONFIG_USER_ONLY) |
4362 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4363 | #else |
4364 | TCGvTCGv_i32 t0; |
4365 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4366 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4367 | return; |
4368 | } |
4369 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
4370 | tcg_gen_shri_tltcg_gen_shri_i32(t0, cpu_gpr[rB(ctx->opcode)], 28); |
4371 | tcg_gen_andi_tltcg_gen_andi_i32(t0, t0, 0xF); |
4372 | gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); |
4373 | tcg_temp_freetcg_temp_free_i32(t0); |
4374 | #endif |
4375 | } |
4376 | |
4377 | /* mtsr */ |
4378 | static void gen_mtsr(DisasContext *ctx) |
4379 | { |
4380 | #if defined(CONFIG_USER_ONLY) |
4381 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4382 | #else |
4383 | TCGvTCGv_i32 t0; |
4384 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4385 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4386 | return; |
4387 | } |
4388 | t0 = tcg_const_tltcg_const_i32(SR(ctx->opcode)); |
4389 | gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); |
4390 | tcg_temp_freetcg_temp_free_i32(t0); |
4391 | #endif |
4392 | } |
4393 | |
4394 | /* mtsrin */ |
4395 | static void gen_mtsrin(DisasContext *ctx) |
4396 | { |
4397 | #if defined(CONFIG_USER_ONLY) |
4398 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4399 | #else |
4400 | TCGvTCGv_i32 t0; |
4401 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4402 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4403 | return; |
4404 | } |
4405 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
4406 | tcg_gen_shri_tltcg_gen_shri_i32(t0, cpu_gpr[rB(ctx->opcode)], 28); |
4407 | tcg_gen_andi_tltcg_gen_andi_i32(t0, t0, 0xF); |
4408 | gen_helper_store_sr(cpu_env, t0, cpu_gpr[rD(ctx->opcode)]); |
4409 | tcg_temp_freetcg_temp_free_i32(t0); |
4410 | #endif |
4411 | } |
4412 | |
4413 | #if defined(TARGET_PPC64) |
4414 | /* Specific implementation for PowerPC 64 "bridge" emulation using SLB */ |
4415 | |
4416 | /* mfsr */ |
4417 | static void gen_mfsr_64b(DisasContext *ctx) |
4418 | { |
4419 | #if defined(CONFIG_USER_ONLY) |
4420 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4421 | #else |
4422 | TCGvTCGv_i32 t0; |
4423 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4424 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4425 | return; |
4426 | } |
4427 | t0 = tcg_const_tltcg_const_i32(SR(ctx->opcode)); |
4428 | gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); |
4429 | tcg_temp_freetcg_temp_free_i32(t0); |
4430 | #endif |
4431 | } |
4432 | |
4433 | /* mfsrin */ |
4434 | static void gen_mfsrin_64b(DisasContext *ctx) |
4435 | { |
4436 | #if defined(CONFIG_USER_ONLY) |
4437 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4438 | #else |
4439 | TCGvTCGv_i32 t0; |
4440 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4441 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4442 | return; |
4443 | } |
4444 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
4445 | tcg_gen_shri_tltcg_gen_shri_i32(t0, cpu_gpr[rB(ctx->opcode)], 28); |
4446 | tcg_gen_andi_tltcg_gen_andi_i32(t0, t0, 0xF); |
4447 | gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); |
4448 | tcg_temp_freetcg_temp_free_i32(t0); |
4449 | #endif |
4450 | } |
4451 | |
4452 | /* mtsr */ |
4453 | static void gen_mtsr_64b(DisasContext *ctx) |
4454 | { |
4455 | #if defined(CONFIG_USER_ONLY) |
4456 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4457 | #else |
4458 | TCGvTCGv_i32 t0; |
4459 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4460 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4461 | return; |
4462 | } |
4463 | t0 = tcg_const_tltcg_const_i32(SR(ctx->opcode)); |
4464 | gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); |
4465 | tcg_temp_freetcg_temp_free_i32(t0); |
4466 | #endif |
4467 | } |
4468 | |
4469 | /* mtsrin */ |
4470 | static void gen_mtsrin_64b(DisasContext *ctx) |
4471 | { |
4472 | #if defined(CONFIG_USER_ONLY) |
4473 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4474 | #else |
4475 | TCGvTCGv_i32 t0; |
4476 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4477 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4478 | return; |
4479 | } |
4480 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
4481 | tcg_gen_shri_tltcg_gen_shri_i32(t0, cpu_gpr[rB(ctx->opcode)], 28); |
4482 | tcg_gen_andi_tltcg_gen_andi_i32(t0, t0, 0xF); |
4483 | gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); |
4484 | tcg_temp_freetcg_temp_free_i32(t0); |
4485 | #endif |
4486 | } |
4487 | |
4488 | /* slbmte */ |
4489 | static void gen_slbmte(DisasContext *ctx) |
4490 | { |
4491 | #if defined(CONFIG_USER_ONLY) |
4492 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4493 | #else |
4494 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4495 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4496 | return; |
4497 | } |
4498 | gen_helper_store_slb(cpu_env, cpu_gpr[rB(ctx->opcode)], |
4499 | cpu_gpr[rS(ctx->opcode)]); |
4500 | #endif |
4501 | } |
4502 | |
4503 | static void gen_slbmfee(DisasContext *ctx) |
4504 | { |
4505 | #if defined(CONFIG_USER_ONLY) |
4506 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4507 | #else |
4508 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4509 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4510 | return; |
4511 | } |
4512 | gen_helper_load_slb_esid(cpu_gpr[rS(ctx->opcode)], cpu_env, |
4513 | cpu_gpr[rB(ctx->opcode)]); |
4514 | #endif |
4515 | } |
4516 | |
4517 | static void gen_slbmfev(DisasContext *ctx) |
4518 | { |
4519 | #if defined(CONFIG_USER_ONLY) |
4520 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4521 | #else |
4522 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4523 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
4524 | return; |
4525 | } |
4526 | gen_helper_load_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env, |
4527 | cpu_gpr[rB(ctx->opcode)]); |
4528 | #endif |
4529 | } |
4530 | #endif /* defined(TARGET_PPC64) */ |
4531 | |
4532 | /*** Lookaside buffer management ***/ |
4533 | /* Optional & mem_idx only: */ |
4534 | |
4535 | /* tlbia */ |
4536 | static void gen_tlbia(DisasContext *ctx) |
4537 | { |
4538 | #if defined(CONFIG_USER_ONLY) |
4539 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
4540 | #else |
4541 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4542 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
4543 | return; |
4544 | } |
4545 | gen_helper_tlbia(cpu_env); |
4546 | #endif |
4547 | } |
4548 | |
4549 | /* tlbiel */ |
4550 | static void gen_tlbiel(DisasContext *ctx) |
4551 | { |
4552 | #if defined(CONFIG_USER_ONLY) |
4553 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
4554 | #else |
4555 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4556 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
4557 | return; |
4558 | } |
4559 | gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); |
4560 | #endif |
4561 | } |
4562 | |
4563 | /* tlbie */ |
4564 | static void gen_tlbie(DisasContext *ctx) |
4565 | { |
4566 | #if defined(CONFIG_USER_ONLY) |
4567 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
4568 | #else |
4569 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4570 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
4571 | return; |
4572 | } |
4573 | if (NARROW_MODE(ctx)0) { |
4574 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
4575 | tcg_gen_ext32u_tltcg_gen_mov_i32(t0, cpu_gpr[rB(ctx->opcode)]); |
4576 | gen_helper_tlbie(cpu_env, t0); |
4577 | tcg_temp_freetcg_temp_free_i32(t0); |
4578 | } else { |
4579 | gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); |
4580 | } |
4581 | #endif |
4582 | } |
4583 | |
4584 | /* tlbsync */ |
4585 | static void gen_tlbsync(DisasContext *ctx) |
4586 | { |
4587 | #if defined(CONFIG_USER_ONLY) |
4588 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
4589 | #else |
4590 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4591 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
4592 | return; |
4593 | } |
4594 | /* This has no effect: it should ensure that all previous |
4595 | * tlbie have completed |
4596 | */ |
4597 | gen_stop_exception(ctx); |
4598 | #endif |
4599 | } |
4600 | |
4601 | #if defined(TARGET_PPC64) |
4602 | /* slbia */ |
4603 | static void gen_slbia(DisasContext *ctx) |
4604 | { |
4605 | #if defined(CONFIG_USER_ONLY) |
4606 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
4607 | #else |
4608 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4609 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
4610 | return; |
4611 | } |
4612 | gen_helper_slbia(cpu_env); |
4613 | #endif |
4614 | } |
4615 | |
4616 | /* slbie */ |
4617 | static void gen_slbie(DisasContext *ctx) |
4618 | { |
4619 | #if defined(CONFIG_USER_ONLY) |
4620 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
4621 | #else |
4622 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
4623 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
4624 | return; |
4625 | } |
4626 | gen_helper_slbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); |
4627 | #endif |
4628 | } |
4629 | #endif |
4630 | |
4631 | /*** External control ***/ |
4632 | /* Optional: */ |
4633 | |
4634 | /* eciwx */ |
4635 | static void gen_eciwx(DisasContext *ctx) |
4636 | { |
4637 | TCGvTCGv_i32 t0; |
4638 | /* Should check EAR[E] ! */ |
4639 | gen_set_access_type(ctx, ACCESS_EXT); |
4640 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
4641 | gen_addr_reg_index(ctx, t0); |
4642 | gen_check_align(ctx, t0, 0x03); |
4643 | gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], t0); |
4644 | tcg_temp_freetcg_temp_free_i32(t0); |
4645 | } |
4646 | |
4647 | /* ecowx */ |
4648 | static void gen_ecowx(DisasContext *ctx) |
4649 | { |
4650 | TCGvTCGv_i32 t0; |
4651 | /* Should check EAR[E] ! */ |
4652 | gen_set_access_type(ctx, ACCESS_EXT); |
4653 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
4654 | gen_addr_reg_index(ctx, t0); |
4655 | gen_check_align(ctx, t0, 0x03); |
4656 | gen_qemu_st32(ctx, cpu_gpr[rD(ctx->opcode)], t0); |
4657 | tcg_temp_freetcg_temp_free_i32(t0); |
4658 | } |
4659 | |
4660 | /* PowerPC 601 specific instructions */ |
4661 | |
4662 | /* abs - abs. */ |
4663 | static void gen_abs(DisasContext *ctx) |
4664 | { |
4665 | int l1 = gen_new_label(); |
4666 | int l2 = gen_new_label(); |
4667 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_GE, cpu_gpr[rA(ctx->opcode)], 0, l1); |
4668 | tcg_gen_neg_tltcg_gen_neg_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
4669 | tcg_gen_br(l2); |
4670 | gen_set_label(l1); |
4671 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
4672 | gen_set_label(l2); |
4673 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4674 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
4675 | } |
4676 | |
4677 | /* abso - abso. */ |
4678 | static void gen_abso(DisasContext *ctx) |
4679 | { |
4680 | int l1 = gen_new_label(); |
4681 | int l2 = gen_new_label(); |
4682 | int l3 = gen_new_label(); |
4683 | /* Start with XER OV disabled, the most likely case */ |
4684 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ov, 0); |
4685 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_GE, cpu_gpr[rA(ctx->opcode)], 0, l2); |
4686 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_NE, cpu_gpr[rA(ctx->opcode)], 0x80000000, l1); |
4687 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ov, 1); |
4688 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_so, 1); |
4689 | tcg_gen_br(l2); |
4690 | gen_set_label(l1); |
4691 | tcg_gen_neg_tltcg_gen_neg_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
4692 | tcg_gen_br(l3); |
4693 | gen_set_label(l2); |
4694 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
4695 | gen_set_label(l3); |
4696 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4697 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
4698 | } |
4699 | |
4700 | /* clcs */ |
4701 | static void gen_clcs(DisasContext *ctx) |
4702 | { |
4703 | TCGv_i32 t0 = tcg_const_i32(rA(ctx->opcode)); |
4704 | gen_helper_clcs(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); |
4705 | tcg_temp_free_i32(t0); |
4706 | /* Rc=1 sets CR0 to an undefined state */ |
4707 | } |
4708 | |
4709 | /* div - div. */ |
4710 | static void gen_div(DisasContext *ctx) |
4711 | { |
4712 | gen_helper_div(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)], |
4713 | cpu_gpr[rB(ctx->opcode)]); |
4714 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4715 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
4716 | } |
4717 | |
4718 | /* divo - divo. */ |
4719 | static void gen_divo(DisasContext *ctx) |
4720 | { |
4721 | gen_helper_divo(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)], |
4722 | cpu_gpr[rB(ctx->opcode)]); |
4723 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4724 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
4725 | } |
4726 | |
4727 | /* divs - divs. */ |
4728 | static void gen_divs(DisasContext *ctx) |
4729 | { |
4730 | gen_helper_divs(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)], |
4731 | cpu_gpr[rB(ctx->opcode)]); |
4732 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4733 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
4734 | } |
4735 | |
4736 | /* divso - divso. */ |
4737 | static void gen_divso(DisasContext *ctx) |
4738 | { |
4739 | gen_helper_divso(cpu_gpr[rD(ctx->opcode)], cpu_env, |
4740 | cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
4741 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4742 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
4743 | } |
4744 | |
4745 | /* doz - doz. */ |
4746 | static void gen_doz(DisasContext *ctx) |
4747 | { |
4748 | int l1 = gen_new_label(); |
4749 | int l2 = gen_new_label(); |
4750 | tcg_gen_brcond_tltcg_gen_brcond_i32(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], l1); |
4751 | tcg_gen_sub_tltcg_gen_sub_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
4752 | tcg_gen_br(l2); |
4753 | gen_set_label(l1); |
4754 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_gpr[rD(ctx->opcode)], 0); |
4755 | gen_set_label(l2); |
4756 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4757 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
4758 | } |
4759 | |
4760 | /* dozo - dozo. */ |
4761 | static void gen_dozo(DisasContext *ctx) |
4762 | { |
4763 | int l1 = gen_new_label(); |
4764 | int l2 = gen_new_label(); |
4765 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
4766 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
4767 | TCGvTCGv_i32 t2 = tcg_temp_new()tcg_temp_new_i32(); |
4768 | /* Start with XER OV disabled, the most likely case */ |
4769 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ov, 0); |
4770 | tcg_gen_brcond_tltcg_gen_brcond_i32(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], l1); |
4771 | tcg_gen_sub_tltcg_gen_sub_i32(t0, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
4772 | tcg_gen_xor_tltcg_gen_xor_i32(t1, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
4773 | tcg_gen_xor_tltcg_gen_xor_i32(t2, cpu_gpr[rA(ctx->opcode)], t0); |
4774 | tcg_gen_andc_tltcg_gen_andc_i32(t1, t1, t2); |
4775 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0); |
4776 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_GE, t1, 0, l2); |
4777 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ov, 1); |
4778 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_so, 1); |
4779 | tcg_gen_br(l2); |
4780 | gen_set_label(l1); |
4781 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_gpr[rD(ctx->opcode)], 0); |
4782 | gen_set_label(l2); |
4783 | tcg_temp_freetcg_temp_free_i32(t0); |
4784 | tcg_temp_freetcg_temp_free_i32(t1); |
4785 | tcg_temp_freetcg_temp_free_i32(t2); |
4786 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4787 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
4788 | } |
4789 | |
4790 | /* dozi */ |
4791 | static void gen_dozi(DisasContext *ctx) |
4792 | { |
4793 | target_long simm = SIMM(ctx->opcode); |
4794 | int l1 = gen_new_label(); |
4795 | int l2 = gen_new_label(); |
4796 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_LT, cpu_gpr[rA(ctx->opcode)], simm, l1); |
4797 | tcg_gen_subfi_tltcg_gen_subfi_i32(cpu_gpr[rD(ctx->opcode)], simm, cpu_gpr[rA(ctx->opcode)]); |
4798 | tcg_gen_br(l2); |
4799 | gen_set_label(l1); |
4800 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_gpr[rD(ctx->opcode)], 0); |
4801 | gen_set_label(l2); |
4802 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4803 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
4804 | } |
4805 | |
4806 | /* lscbx - lscbx. */ |
4807 | static void gen_lscbx(DisasContext *ctx) |
4808 | { |
4809 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
4810 | TCGv_i32 t1 = tcg_const_i32(rD(ctx->opcode)); |
4811 | TCGv_i32 t2 = tcg_const_i32(rA(ctx->opcode)); |
4812 | TCGv_i32 t3 = tcg_const_i32(rB(ctx->opcode)); |
4813 | |
4814 | gen_addr_reg_index(ctx, t0); |
4815 | /* NIP cannot be restored if the memory exception comes from an helper */ |
4816 | gen_update_nip(ctx, ctx->nip - 4); |
4817 | gen_helper_lscbx(t0, cpu_env, t0, t1, t2, t3); |
4818 | tcg_temp_free_i32(t1); |
4819 | tcg_temp_free_i32(t2); |
4820 | tcg_temp_free_i32(t3); |
4821 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_xer, cpu_xer, ~0x7F); |
4822 | tcg_gen_or_tltcg_gen_or_i32(cpu_xer, cpu_xer, t0); |
4823 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4824 | gen_set_Rc0(ctx, t0); |
4825 | tcg_temp_freetcg_temp_free_i32(t0); |
4826 | } |
4827 | |
4828 | /* maskg - maskg. */ |
4829 | static void gen_maskg(DisasContext *ctx) |
4830 | { |
4831 | int l1 = gen_new_label(); |
4832 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
4833 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
4834 | TCGvTCGv_i32 t2 = tcg_temp_new()tcg_temp_new_i32(); |
4835 | TCGvTCGv_i32 t3 = tcg_temp_new()tcg_temp_new_i32(); |
4836 | tcg_gen_movi_tltcg_gen_movi_i32(t3, 0xFFFFFFFF); |
4837 | tcg_gen_andi_tltcg_gen_andi_i32(t0, cpu_gpr[rB(ctx->opcode)], 0x1F); |
4838 | tcg_gen_andi_tltcg_gen_andi_i32(t1, cpu_gpr[rS(ctx->opcode)], 0x1F); |
4839 | tcg_gen_addi_tltcg_gen_addi_i32(t2, t0, 1); |
4840 | tcg_gen_shr_tltcg_gen_shr_i32(t2, t3, t2); |
4841 | tcg_gen_shr_tltcg_gen_shr_i32(t3, t3, t1); |
4842 | tcg_gen_xor_tltcg_gen_xor_i32(cpu_gpr[rA(ctx->opcode)], t2, t3); |
4843 | tcg_gen_brcond_tltcg_gen_brcond_i32(TCG_COND_GE, t0, t1, l1); |
4844 | tcg_gen_neg_tltcg_gen_neg_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
4845 | gen_set_label(l1); |
4846 | tcg_temp_freetcg_temp_free_i32(t0); |
4847 | tcg_temp_freetcg_temp_free_i32(t1); |
4848 | tcg_temp_freetcg_temp_free_i32(t2); |
4849 | tcg_temp_freetcg_temp_free_i32(t3); |
4850 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4851 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
4852 | } |
4853 | |
4854 | /* maskir - maskir. */ |
4855 | static void gen_maskir(DisasContext *ctx) |
4856 | { |
4857 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
4858 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
4859 | tcg_gen_and_tltcg_gen_and_i32(t0, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
4860 | tcg_gen_andc_tltcg_gen_andc_i32(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
4861 | tcg_gen_or_tltcg_gen_or_i32(cpu_gpr[rA(ctx->opcode)], t0, t1); |
4862 | tcg_temp_freetcg_temp_free_i32(t0); |
4863 | tcg_temp_freetcg_temp_free_i32(t1); |
4864 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4865 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
4866 | } |
4867 | |
4868 | /* mul - mul. */ |
4869 | static void gen_mul(DisasContext *ctx) |
4870 | { |
4871 | TCGv_i64 t0 = tcg_temp_new_i64(); |
4872 | TCGv_i64 t1 = tcg_temp_new_i64(); |
4873 | TCGvTCGv_i32 t2 = tcg_temp_new()tcg_temp_new_i32(); |
4874 | tcg_gen_extu_tl_i64tcg_gen_extu_i32_i64(t0, cpu_gpr[rA(ctx->opcode)]); |
4875 | tcg_gen_extu_tl_i64tcg_gen_extu_i32_i64(t1, cpu_gpr[rB(ctx->opcode)]); |
4876 | tcg_gen_mul_i64(t0, t0, t1); |
4877 | tcg_gen_trunc_i64_tltcg_gen_trunc_i64_i32(t2, t0); |
4878 | gen_store_spr(SPR_MQ(0x000), t2); |
4879 | tcg_gen_shri_i64(t1, t0, 32); |
4880 | tcg_gen_trunc_i64_tltcg_gen_trunc_i64_i32(cpu_gpr[rD(ctx->opcode)], t1); |
4881 | tcg_temp_free_i64(t0); |
4882 | tcg_temp_free_i64(t1); |
4883 | tcg_temp_freetcg_temp_free_i32(t2); |
4884 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4885 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
4886 | } |
4887 | |
4888 | /* mulo - mulo. */ |
4889 | static void gen_mulo(DisasContext *ctx) |
4890 | { |
4891 | int l1 = gen_new_label(); |
4892 | TCGv_i64 t0 = tcg_temp_new_i64(); |
4893 | TCGv_i64 t1 = tcg_temp_new_i64(); |
4894 | TCGvTCGv_i32 t2 = tcg_temp_new()tcg_temp_new_i32(); |
4895 | /* Start with XER OV disabled, the most likely case */ |
4896 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ov, 0); |
4897 | tcg_gen_extu_tl_i64tcg_gen_extu_i32_i64(t0, cpu_gpr[rA(ctx->opcode)]); |
4898 | tcg_gen_extu_tl_i64tcg_gen_extu_i32_i64(t1, cpu_gpr[rB(ctx->opcode)]); |
4899 | tcg_gen_mul_i64(t0, t0, t1); |
4900 | tcg_gen_trunc_i64_tltcg_gen_trunc_i64_i32(t2, t0); |
4901 | gen_store_spr(SPR_MQ(0x000), t2); |
4902 | tcg_gen_shri_i64(t1, t0, 32); |
4903 | tcg_gen_trunc_i64_tltcg_gen_trunc_i64_i32(cpu_gpr[rD(ctx->opcode)], t1); |
4904 | tcg_gen_ext32s_i64(t1, t0); |
4905 | tcg_gen_brcond_i64(TCG_COND_EQ, t0, t1, l1); |
4906 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ov, 1); |
4907 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_so, 1); |
4908 | gen_set_label(l1); |
4909 | tcg_temp_free_i64(t0); |
4910 | tcg_temp_free_i64(t1); |
4911 | tcg_temp_freetcg_temp_free_i32(t2); |
4912 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4913 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
4914 | } |
4915 | |
4916 | /* nabs - nabs. */ |
4917 | static void gen_nabs(DisasContext *ctx) |
4918 | { |
4919 | int l1 = gen_new_label(); |
4920 | int l2 = gen_new_label(); |
4921 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_GT, cpu_gpr[rA(ctx->opcode)], 0, l1); |
4922 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
4923 | tcg_gen_br(l2); |
4924 | gen_set_label(l1); |
4925 | tcg_gen_neg_tltcg_gen_neg_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
4926 | gen_set_label(l2); |
4927 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4928 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
4929 | } |
4930 | |
4931 | /* nabso - nabso. */ |
4932 | static void gen_nabso(DisasContext *ctx) |
4933 | { |
4934 | int l1 = gen_new_label(); |
4935 | int l2 = gen_new_label(); |
4936 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_GT, cpu_gpr[rA(ctx->opcode)], 0, l1); |
4937 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
4938 | tcg_gen_br(l2); |
4939 | gen_set_label(l1); |
4940 | tcg_gen_neg_tltcg_gen_neg_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
4941 | gen_set_label(l2); |
4942 | /* nabs never overflows */ |
4943 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ov, 0); |
4944 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4945 | gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
4946 | } |
4947 | |
4948 | /* rlmi - rlmi. */ |
4949 | static void gen_rlmi(DisasContext *ctx) |
4950 | { |
4951 | uint32_t mb = MB(ctx->opcode); |
4952 | uint32_t me = ME(ctx->opcode); |
4953 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
4954 | tcg_gen_andi_tltcg_gen_andi_i32(t0, cpu_gpr[rB(ctx->opcode)], 0x1F); |
4955 | tcg_gen_rotl_tltcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0); |
4956 | tcg_gen_andi_tltcg_gen_andi_i32(t0, t0, MASK(mb, me)); |
4957 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], ~MASK(mb, me)); |
4958 | tcg_gen_or_tltcg_gen_or_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], t0); |
4959 | tcg_temp_freetcg_temp_free_i32(t0); |
4960 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4961 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
4962 | } |
4963 | |
4964 | /* rrib - rrib. */ |
4965 | static void gen_rrib(DisasContext *ctx) |
4966 | { |
4967 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
4968 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
4969 | tcg_gen_andi_tltcg_gen_andi_i32(t0, cpu_gpr[rB(ctx->opcode)], 0x1F); |
4970 | tcg_gen_movi_tltcg_gen_movi_i32(t1, 0x80000000); |
4971 | tcg_gen_shr_tltcg_gen_shr_i32(t1, t1, t0); |
4972 | tcg_gen_shr_tltcg_gen_shr_i32(t0, cpu_gpr[rS(ctx->opcode)], t0); |
4973 | tcg_gen_and_tltcg_gen_and_i32(t0, t0, t1); |
4974 | tcg_gen_andc_tltcg_gen_andc_i32(t1, cpu_gpr[rA(ctx->opcode)], t1); |
4975 | tcg_gen_or_tltcg_gen_or_i32(cpu_gpr[rA(ctx->opcode)], t0, t1); |
4976 | tcg_temp_freetcg_temp_free_i32(t0); |
4977 | tcg_temp_freetcg_temp_free_i32(t1); |
4978 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4979 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
4980 | } |
4981 | |
4982 | /* sle - sle. */ |
4983 | static void gen_sle(DisasContext *ctx) |
4984 | { |
4985 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
4986 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
4987 | tcg_gen_andi_tltcg_gen_andi_i32(t1, cpu_gpr[rB(ctx->opcode)], 0x1F); |
4988 | tcg_gen_shl_tltcg_gen_shl_i32(t0, cpu_gpr[rS(ctx->opcode)], t1); |
4989 | tcg_gen_subfi_tltcg_gen_subfi_i32(t1, 32, t1); |
4990 | tcg_gen_shr_tltcg_gen_shr_i32(t1, cpu_gpr[rS(ctx->opcode)], t1); |
4991 | tcg_gen_or_tltcg_gen_or_i32(t1, t0, t1); |
4992 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], t0); |
4993 | gen_store_spr(SPR_MQ(0x000), t1); |
4994 | tcg_temp_freetcg_temp_free_i32(t0); |
4995 | tcg_temp_freetcg_temp_free_i32(t1); |
4996 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
4997 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
4998 | } |
4999 | |
5000 | /* sleq - sleq. */ |
5001 | static void gen_sleq(DisasContext *ctx) |
5002 | { |
5003 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
5004 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
5005 | TCGvTCGv_i32 t2 = tcg_temp_new()tcg_temp_new_i32(); |
5006 | tcg_gen_andi_tltcg_gen_andi_i32(t0, cpu_gpr[rB(ctx->opcode)], 0x1F); |
5007 | tcg_gen_movi_tltcg_gen_movi_i32(t2, 0xFFFFFFFF); |
5008 | tcg_gen_shl_tltcg_gen_shl_i32(t2, t2, t0); |
5009 | tcg_gen_rotl_tltcg_gen_rotl_i32(t0, cpu_gpr[rS(ctx->opcode)], t0); |
5010 | gen_load_spr(t1, SPR_MQ(0x000)); |
5011 | gen_store_spr(SPR_MQ(0x000), t0); |
5012 | tcg_gen_and_tltcg_gen_and_i32(t0, t0, t2); |
5013 | tcg_gen_andc_tltcg_gen_andc_i32(t1, t1, t2); |
5014 | tcg_gen_or_tltcg_gen_or_i32(cpu_gpr[rA(ctx->opcode)], t0, t1); |
5015 | tcg_temp_freetcg_temp_free_i32(t0); |
5016 | tcg_temp_freetcg_temp_free_i32(t1); |
5017 | tcg_temp_freetcg_temp_free_i32(t2); |
5018 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
5019 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
5020 | } |
5021 | |
5022 | /* sliq - sliq. */ |
5023 | static void gen_sliq(DisasContext *ctx) |
5024 | { |
5025 | int sh = SH(ctx->opcode); |
5026 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
5027 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
5028 | tcg_gen_shli_tltcg_gen_shli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh); |
5029 | tcg_gen_shri_tltcg_gen_shri_i32(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh); |
5030 | tcg_gen_or_tltcg_gen_or_i32(t1, t0, t1); |
5031 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], t0); |
5032 | gen_store_spr(SPR_MQ(0x000), t1); |
5033 | tcg_temp_freetcg_temp_free_i32(t0); |
5034 | tcg_temp_freetcg_temp_free_i32(t1); |
5035 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
5036 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
5037 | } |
5038 | |
5039 | /* slliq - slliq. */ |
5040 | static void gen_slliq(DisasContext *ctx) |
5041 | { |
5042 | int sh = SH(ctx->opcode); |
5043 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
5044 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
5045 | tcg_gen_rotli_tltcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh); |
5046 | gen_load_spr(t1, SPR_MQ(0x000)); |
5047 | gen_store_spr(SPR_MQ(0x000), t0); |
5048 | tcg_gen_andi_tltcg_gen_andi_i32(t0, t0, (0xFFFFFFFFU << sh)); |
5049 | tcg_gen_andi_tltcg_gen_andi_i32(t1, t1, ~(0xFFFFFFFFU << sh)); |
5050 | tcg_gen_or_tltcg_gen_or_i32(cpu_gpr[rA(ctx->opcode)], t0, t1); |
5051 | tcg_temp_freetcg_temp_free_i32(t0); |
5052 | tcg_temp_freetcg_temp_free_i32(t1); |
5053 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
5054 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
5055 | } |
5056 | |
5057 | /* sllq - sllq. */ |
5058 | static void gen_sllq(DisasContext *ctx) |
5059 | { |
5060 | int l1 = gen_new_label(); |
5061 | int l2 = gen_new_label(); |
5062 | TCGvTCGv_i32 t0 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
5063 | TCGvTCGv_i32 t1 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
5064 | TCGvTCGv_i32 t2 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
5065 | tcg_gen_andi_tltcg_gen_andi_i32(t2, cpu_gpr[rB(ctx->opcode)], 0x1F); |
5066 | tcg_gen_movi_tltcg_gen_movi_i32(t1, 0xFFFFFFFF); |
5067 | tcg_gen_shl_tltcg_gen_shl_i32(t1, t1, t2); |
5068 | tcg_gen_andi_tltcg_gen_andi_i32(t0, cpu_gpr[rB(ctx->opcode)], 0x20); |
5069 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1); |
5070 | gen_load_spr(t0, SPR_MQ(0x000)); |
5071 | tcg_gen_and_tltcg_gen_and_i32(cpu_gpr[rA(ctx->opcode)], t0, t1); |
5072 | tcg_gen_br(l2); |
5073 | gen_set_label(l1); |
5074 | tcg_gen_shl_tltcg_gen_shl_i32(t0, cpu_gpr[rS(ctx->opcode)], t2); |
5075 | gen_load_spr(t2, SPR_MQ(0x000)); |
5076 | tcg_gen_andc_tltcg_gen_andc_i32(t1, t2, t1); |
5077 | tcg_gen_or_tltcg_gen_or_i32(cpu_gpr[rA(ctx->opcode)], t0, t1); |
5078 | gen_set_label(l2); |
5079 | tcg_temp_freetcg_temp_free_i32(t0); |
5080 | tcg_temp_freetcg_temp_free_i32(t1); |
5081 | tcg_temp_freetcg_temp_free_i32(t2); |
5082 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
5083 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
5084 | } |
5085 | |
5086 | /* slq - slq. */ |
5087 | static void gen_slq(DisasContext *ctx) |
5088 | { |
5089 | int l1 = gen_new_label(); |
5090 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
5091 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
5092 | tcg_gen_andi_tltcg_gen_andi_i32(t1, cpu_gpr[rB(ctx->opcode)], 0x1F); |
5093 | tcg_gen_shl_tltcg_gen_shl_i32(t0, cpu_gpr[rS(ctx->opcode)], t1); |
5094 | tcg_gen_subfi_tltcg_gen_subfi_i32(t1, 32, t1); |
5095 | tcg_gen_shr_tltcg_gen_shr_i32(t1, cpu_gpr[rS(ctx->opcode)], t1); |
5096 | tcg_gen_or_tltcg_gen_or_i32(t1, t0, t1); |
5097 | gen_store_spr(SPR_MQ(0x000), t1); |
5098 | tcg_gen_andi_tltcg_gen_andi_i32(t1, cpu_gpr[rB(ctx->opcode)], 0x20); |
5099 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], t0); |
5100 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_EQ, t1, 0, l1); |
5101 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_gpr[rA(ctx->opcode)], 0); |
5102 | gen_set_label(l1); |
5103 | tcg_temp_freetcg_temp_free_i32(t0); |
5104 | tcg_temp_freetcg_temp_free_i32(t1); |
5105 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
5106 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
5107 | } |
5108 | |
5109 | /* sraiq - sraiq. */ |
5110 | static void gen_sraiq(DisasContext *ctx) |
5111 | { |
5112 | int sh = SH(ctx->opcode); |
5113 | int l1 = gen_new_label(); |
5114 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
5115 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
5116 | tcg_gen_shri_tltcg_gen_shri_i32(t0, cpu_gpr[rS(ctx->opcode)], sh); |
5117 | tcg_gen_shli_tltcg_gen_shli_i32(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh); |
5118 | tcg_gen_or_tltcg_gen_or_i32(t0, t0, t1); |
5119 | gen_store_spr(SPR_MQ(0x000), t0); |
5120 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ca, 0); |
5121 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_EQ, t1, 0, l1); |
5122 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_GE, cpu_gpr[rS(ctx->opcode)], 0, l1); |
5123 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ca, 1); |
5124 | gen_set_label(l1); |
5125 | tcg_gen_sari_tltcg_gen_sari_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh); |
5126 | tcg_temp_freetcg_temp_free_i32(t0); |
5127 | tcg_temp_freetcg_temp_free_i32(t1); |
5128 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
5129 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
5130 | } |
5131 | |
5132 | /* sraq - sraq. */ |
5133 | static void gen_sraq(DisasContext *ctx) |
5134 | { |
5135 | int l1 = gen_new_label(); |
5136 | int l2 = gen_new_label(); |
5137 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
5138 | TCGvTCGv_i32 t1 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
5139 | TCGvTCGv_i32 t2 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
5140 | tcg_gen_andi_tltcg_gen_andi_i32(t2, cpu_gpr[rB(ctx->opcode)], 0x1F); |
5141 | tcg_gen_shr_tltcg_gen_shr_i32(t0, cpu_gpr[rS(ctx->opcode)], t2); |
5142 | tcg_gen_sar_tltcg_gen_sar_i32(t1, cpu_gpr[rS(ctx->opcode)], t2); |
5143 | tcg_gen_subfi_tltcg_gen_subfi_i32(t2, 32, t2); |
5144 | tcg_gen_shl_tltcg_gen_shl_i32(t2, cpu_gpr[rS(ctx->opcode)], t2); |
5145 | tcg_gen_or_tltcg_gen_or_i32(t0, t0, t2); |
5146 | gen_store_spr(SPR_MQ(0x000), t0); |
5147 | tcg_gen_andi_tltcg_gen_andi_i32(t0, cpu_gpr[rB(ctx->opcode)], 0x20); |
5148 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_EQ, t2, 0, l1); |
5149 | tcg_gen_mov_tltcg_gen_mov_i32(t2, cpu_gpr[rS(ctx->opcode)]); |
5150 | tcg_gen_sari_tltcg_gen_sari_i32(t1, cpu_gpr[rS(ctx->opcode)], 31); |
5151 | gen_set_label(l1); |
5152 | tcg_temp_freetcg_temp_free_i32(t0); |
5153 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], t1); |
5154 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ca, 0); |
5155 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_GE, t1, 0, l2); |
5156 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_EQ, t2, 0, l2); |
5157 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ca, 1); |
5158 | gen_set_label(l2); |
5159 | tcg_temp_freetcg_temp_free_i32(t1); |
5160 | tcg_temp_freetcg_temp_free_i32(t2); |
5161 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
5162 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
5163 | } |
5164 | |
5165 | /* sre - sre. */ |
5166 | static void gen_sre(DisasContext *ctx) |
5167 | { |
5168 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
5169 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
5170 | tcg_gen_andi_tltcg_gen_andi_i32(t1, cpu_gpr[rB(ctx->opcode)], 0x1F); |
5171 | tcg_gen_shr_tltcg_gen_shr_i32(t0, cpu_gpr[rS(ctx->opcode)], t1); |
5172 | tcg_gen_subfi_tltcg_gen_subfi_i32(t1, 32, t1); |
5173 | tcg_gen_shl_tltcg_gen_shl_i32(t1, cpu_gpr[rS(ctx->opcode)], t1); |
5174 | tcg_gen_or_tltcg_gen_or_i32(t1, t0, t1); |
5175 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], t0); |
5176 | gen_store_spr(SPR_MQ(0x000), t1); |
5177 | tcg_temp_freetcg_temp_free_i32(t0); |
5178 | tcg_temp_freetcg_temp_free_i32(t1); |
5179 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
5180 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
5181 | } |
5182 | |
5183 | /* srea - srea. */ |
5184 | static void gen_srea(DisasContext *ctx) |
5185 | { |
5186 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
5187 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
5188 | tcg_gen_andi_tltcg_gen_andi_i32(t1, cpu_gpr[rB(ctx->opcode)], 0x1F); |
5189 | tcg_gen_rotr_tltcg_gen_rotr_i32(t0, cpu_gpr[rS(ctx->opcode)], t1); |
5190 | gen_store_spr(SPR_MQ(0x000), t0); |
5191 | tcg_gen_sar_tltcg_gen_sar_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], t1); |
5192 | tcg_temp_freetcg_temp_free_i32(t0); |
5193 | tcg_temp_freetcg_temp_free_i32(t1); |
5194 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
5195 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
5196 | } |
5197 | |
5198 | /* sreq */ |
5199 | static void gen_sreq(DisasContext *ctx) |
5200 | { |
5201 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
5202 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
5203 | TCGvTCGv_i32 t2 = tcg_temp_new()tcg_temp_new_i32(); |
5204 | tcg_gen_andi_tltcg_gen_andi_i32(t0, cpu_gpr[rB(ctx->opcode)], 0x1F); |
5205 | tcg_gen_movi_tltcg_gen_movi_i32(t1, 0xFFFFFFFF); |
5206 | tcg_gen_shr_tltcg_gen_shr_i32(t1, t1, t0); |
5207 | tcg_gen_rotr_tltcg_gen_rotr_i32(t0, cpu_gpr[rS(ctx->opcode)], t0); |
5208 | gen_load_spr(t2, SPR_MQ(0x000)); |
5209 | gen_store_spr(SPR_MQ(0x000), t0); |
5210 | tcg_gen_and_tltcg_gen_and_i32(t0, t0, t1); |
5211 | tcg_gen_andc_tltcg_gen_andc_i32(t2, t2, t1); |
5212 | tcg_gen_or_tltcg_gen_or_i32(cpu_gpr[rA(ctx->opcode)], t0, t2); |
5213 | tcg_temp_freetcg_temp_free_i32(t0); |
5214 | tcg_temp_freetcg_temp_free_i32(t1); |
5215 | tcg_temp_freetcg_temp_free_i32(t2); |
5216 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
5217 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
5218 | } |
5219 | |
5220 | /* sriq */ |
5221 | static void gen_sriq(DisasContext *ctx) |
5222 | { |
5223 | int sh = SH(ctx->opcode); |
5224 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
5225 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
5226 | tcg_gen_shri_tltcg_gen_shri_i32(t0, cpu_gpr[rS(ctx->opcode)], sh); |
5227 | tcg_gen_shli_tltcg_gen_shli_i32(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh); |
5228 | tcg_gen_or_tltcg_gen_or_i32(t1, t0, t1); |
5229 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], t0); |
5230 | gen_store_spr(SPR_MQ(0x000), t1); |
5231 | tcg_temp_freetcg_temp_free_i32(t0); |
5232 | tcg_temp_freetcg_temp_free_i32(t1); |
5233 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
5234 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
5235 | } |
5236 | |
5237 | /* srliq */ |
5238 | static void gen_srliq(DisasContext *ctx) |
5239 | { |
5240 | int sh = SH(ctx->opcode); |
5241 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
5242 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
5243 | tcg_gen_rotri_tltcg_gen_rotri_i32(t0, cpu_gpr[rS(ctx->opcode)], sh); |
5244 | gen_load_spr(t1, SPR_MQ(0x000)); |
5245 | gen_store_spr(SPR_MQ(0x000), t0); |
5246 | tcg_gen_andi_tltcg_gen_andi_i32(t0, t0, (0xFFFFFFFFU >> sh)); |
5247 | tcg_gen_andi_tltcg_gen_andi_i32(t1, t1, ~(0xFFFFFFFFU >> sh)); |
5248 | tcg_gen_or_tltcg_gen_or_i32(cpu_gpr[rA(ctx->opcode)], t0, t1); |
5249 | tcg_temp_freetcg_temp_free_i32(t0); |
5250 | tcg_temp_freetcg_temp_free_i32(t1); |
5251 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
5252 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
5253 | } |
5254 | |
5255 | /* srlq */ |
5256 | static void gen_srlq(DisasContext *ctx) |
5257 | { |
5258 | int l1 = gen_new_label(); |
5259 | int l2 = gen_new_label(); |
5260 | TCGvTCGv_i32 t0 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
5261 | TCGvTCGv_i32 t1 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
5262 | TCGvTCGv_i32 t2 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
5263 | tcg_gen_andi_tltcg_gen_andi_i32(t2, cpu_gpr[rB(ctx->opcode)], 0x1F); |
5264 | tcg_gen_movi_tltcg_gen_movi_i32(t1, 0xFFFFFFFF); |
5265 | tcg_gen_shr_tltcg_gen_shr_i32(t2, t1, t2); |
5266 | tcg_gen_andi_tltcg_gen_andi_i32(t0, cpu_gpr[rB(ctx->opcode)], 0x20); |
5267 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1); |
5268 | gen_load_spr(t0, SPR_MQ(0x000)); |
5269 | tcg_gen_and_tltcg_gen_and_i32(cpu_gpr[rA(ctx->opcode)], t0, t2); |
5270 | tcg_gen_br(l2); |
5271 | gen_set_label(l1); |
5272 | tcg_gen_shr_tltcg_gen_shr_i32(t0, cpu_gpr[rS(ctx->opcode)], t2); |
5273 | tcg_gen_and_tltcg_gen_and_i32(t0, t0, t2); |
5274 | gen_load_spr(t1, SPR_MQ(0x000)); |
5275 | tcg_gen_andc_tltcg_gen_andc_i32(t1, t1, t2); |
5276 | tcg_gen_or_tltcg_gen_or_i32(cpu_gpr[rA(ctx->opcode)], t0, t1); |
5277 | gen_set_label(l2); |
5278 | tcg_temp_freetcg_temp_free_i32(t0); |
5279 | tcg_temp_freetcg_temp_free_i32(t1); |
5280 | tcg_temp_freetcg_temp_free_i32(t2); |
5281 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
5282 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
5283 | } |
5284 | |
5285 | /* srq */ |
5286 | static void gen_srq(DisasContext *ctx) |
5287 | { |
5288 | int l1 = gen_new_label(); |
5289 | TCGvTCGv_i32 t0 = tcg_temp_new()tcg_temp_new_i32(); |
5290 | TCGvTCGv_i32 t1 = tcg_temp_new()tcg_temp_new_i32(); |
5291 | tcg_gen_andi_tltcg_gen_andi_i32(t1, cpu_gpr[rB(ctx->opcode)], 0x1F); |
5292 | tcg_gen_shr_tltcg_gen_shr_i32(t0, cpu_gpr[rS(ctx->opcode)], t1); |
5293 | tcg_gen_subfi_tltcg_gen_subfi_i32(t1, 32, t1); |
5294 | tcg_gen_shl_tltcg_gen_shl_i32(t1, cpu_gpr[rS(ctx->opcode)], t1); |
5295 | tcg_gen_or_tltcg_gen_or_i32(t1, t0, t1); |
5296 | gen_store_spr(SPR_MQ(0x000), t1); |
5297 | tcg_gen_andi_tltcg_gen_andi_i32(t1, cpu_gpr[rB(ctx->opcode)], 0x20); |
5298 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rA(ctx->opcode)], t0); |
5299 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1); |
5300 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_gpr[rA(ctx->opcode)], 0); |
5301 | gen_set_label(l1); |
5302 | tcg_temp_freetcg_temp_free_i32(t0); |
5303 | tcg_temp_freetcg_temp_free_i32(t1); |
5304 | if (unlikely(Rc(ctx->opcode) != 0)__builtin_expect(!!(Rc(ctx->opcode) != 0), 0)) |
5305 | gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
5306 | } |
5307 | |
5308 | /* PowerPC 602 specific instructions */ |
5309 | |
5310 | /* dsa */ |
5311 | static void gen_dsa(DisasContext *ctx) |
5312 | { |
5313 | /* XXX: TODO */ |
5314 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
5315 | } |
5316 | |
5317 | /* esa */ |
5318 | static void gen_esa(DisasContext *ctx) |
5319 | { |
5320 | /* XXX: TODO */ |
5321 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
5322 | } |
5323 | |
5324 | /* mfrom */ |
5325 | static void gen_mfrom(DisasContext *ctx) |
5326 | { |
5327 | #if defined(CONFIG_USER_ONLY) |
5328 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5329 | #else |
5330 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
5331 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5332 | return; |
5333 | } |
5334 | gen_helper_602_mfrom(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
5335 | #endif |
5336 | } |
5337 | |
5338 | /* 602 - 603 - G2 TLB management */ |
5339 | |
5340 | /* tlbld */ |
5341 | static void gen_tlbld_6xx(DisasContext *ctx) |
5342 | { |
5343 | #if defined(CONFIG_USER_ONLY) |
5344 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5345 | #else |
5346 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
5347 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5348 | return; |
5349 | } |
5350 | gen_helper_6xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]); |
5351 | #endif |
5352 | } |
5353 | |
5354 | /* tlbli */ |
5355 | static void gen_tlbli_6xx(DisasContext *ctx) |
5356 | { |
5357 | #if defined(CONFIG_USER_ONLY) |
5358 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5359 | #else |
5360 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
5361 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5362 | return; |
5363 | } |
5364 | gen_helper_6xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]); |
5365 | #endif |
5366 | } |
5367 | |
5368 | /* 74xx TLB management */ |
5369 | |
5370 | /* tlbld */ |
5371 | static void gen_tlbld_74xx(DisasContext *ctx) |
5372 | { |
5373 | #if defined(CONFIG_USER_ONLY) |
5374 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5375 | #else |
5376 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
5377 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5378 | return; |
5379 | } |
5380 | gen_helper_74xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]); |
5381 | #endif |
5382 | } |
5383 | |
5384 | /* tlbli */ |
5385 | static void gen_tlbli_74xx(DisasContext *ctx) |
5386 | { |
5387 | #if defined(CONFIG_USER_ONLY) |
5388 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5389 | #else |
5390 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
5391 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5392 | return; |
5393 | } |
5394 | gen_helper_74xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]); |
5395 | #endif |
5396 | } |
5397 | |
5398 | /* POWER instructions not in PowerPC 601 */ |
5399 | |
5400 | /* clf */ |
5401 | static void gen_clf(DisasContext *ctx) |
5402 | { |
5403 | /* Cache line flush: implemented as no-op */ |
5404 | } |
5405 | |
5406 | /* cli */ |
5407 | static void gen_cli(DisasContext *ctx) |
5408 | { |
5409 | /* Cache line invalidate: privileged and treated as no-op */ |
5410 | #if defined(CONFIG_USER_ONLY) |
5411 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5412 | #else |
5413 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
5414 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5415 | return; |
5416 | } |
5417 | #endif |
5418 | } |
5419 | |
5420 | /* dclst */ |
5421 | static void gen_dclst(DisasContext *ctx) |
5422 | { |
5423 | /* Data cache line store: treated as no-op */ |
5424 | } |
5425 | |
5426 | static void gen_mfsri(DisasContext *ctx) |
5427 | { |
5428 | #if defined(CONFIG_USER_ONLY) |
5429 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5430 | #else |
5431 | int ra = rA(ctx->opcode); |
5432 | int rd = rD(ctx->opcode); |
5433 | TCGvTCGv_i32 t0; |
5434 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
5435 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5436 | return; |
5437 | } |
5438 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
5439 | gen_addr_reg_index(ctx, t0); |
5440 | tcg_gen_shri_tltcg_gen_shri_i32(t0, t0, 28); |
5441 | tcg_gen_andi_tltcg_gen_andi_i32(t0, t0, 0xF); |
5442 | gen_helper_load_sr(cpu_gpr[rd], cpu_env, t0); |
5443 | tcg_temp_freetcg_temp_free_i32(t0); |
5444 | if (ra != 0 && ra != rd) |
5445 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[ra], cpu_gpr[rd]); |
5446 | #endif |
5447 | } |
5448 | |
5449 | static void gen_rac(DisasContext *ctx) |
5450 | { |
5451 | #if defined(CONFIG_USER_ONLY) |
5452 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5453 | #else |
5454 | TCGvTCGv_i32 t0; |
5455 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
5456 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5457 | return; |
5458 | } |
5459 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
5460 | gen_addr_reg_index(ctx, t0); |
5461 | gen_helper_rac(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); |
5462 | tcg_temp_freetcg_temp_free_i32(t0); |
5463 | #endif |
5464 | } |
5465 | |
5466 | static void gen_rfsvc(DisasContext *ctx) |
5467 | { |
5468 | #if defined(CONFIG_USER_ONLY) |
5469 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5470 | #else |
5471 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
5472 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5473 | return; |
5474 | } |
5475 | gen_helper_rfsvc(cpu_env); |
5476 | gen_sync_exception(ctx); |
5477 | #endif |
5478 | } |
5479 | |
5480 | /* svc is not implemented for now */ |
5481 | |
5482 | /* POWER2 specific instructions */ |
5483 | /* Quad manipulation (load/store two floats at a time) */ |
5484 | |
5485 | /* lfq */ |
5486 | static void gen_lfq(DisasContext *ctx) |
5487 | { |
5488 | int rd = rD(ctx->opcode); |
5489 | TCGvTCGv_i32 t0; |
5490 | gen_set_access_type(ctx, ACCESS_FLOAT); |
5491 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
5492 | gen_addr_imm_index(ctx, t0, 0); |
5493 | gen_qemu_ld64(ctx, cpu_fpr[rd], t0); |
5494 | gen_addr_add(ctx, t0, t0, 8); |
5495 | gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t0); |
5496 | tcg_temp_freetcg_temp_free_i32(t0); |
5497 | } |
5498 | |
5499 | /* lfqu */ |
5500 | static void gen_lfqu(DisasContext *ctx) |
5501 | { |
5502 | int ra = rA(ctx->opcode); |
5503 | int rd = rD(ctx->opcode); |
5504 | TCGvTCGv_i32 t0, t1; |
5505 | gen_set_access_type(ctx, ACCESS_FLOAT); |
5506 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
5507 | t1 = tcg_temp_new()tcg_temp_new_i32(); |
5508 | gen_addr_imm_index(ctx, t0, 0); |
5509 | gen_qemu_ld64(ctx, cpu_fpr[rd], t0); |
5510 | gen_addr_add(ctx, t1, t0, 8); |
5511 | gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t1); |
5512 | if (ra != 0) |
5513 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[ra], t0); |
5514 | tcg_temp_freetcg_temp_free_i32(t0); |
5515 | tcg_temp_freetcg_temp_free_i32(t1); |
5516 | } |
5517 | |
5518 | /* lfqux */ |
5519 | static void gen_lfqux(DisasContext *ctx) |
5520 | { |
5521 | int ra = rA(ctx->opcode); |
5522 | int rd = rD(ctx->opcode); |
5523 | gen_set_access_type(ctx, ACCESS_FLOAT); |
5524 | TCGvTCGv_i32 t0, t1; |
5525 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
5526 | gen_addr_reg_index(ctx, t0); |
5527 | gen_qemu_ld64(ctx, cpu_fpr[rd], t0); |
5528 | t1 = tcg_temp_new()tcg_temp_new_i32(); |
5529 | gen_addr_add(ctx, t1, t0, 8); |
5530 | gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t1); |
5531 | tcg_temp_freetcg_temp_free_i32(t1); |
5532 | if (ra != 0) |
5533 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[ra], t0); |
5534 | tcg_temp_freetcg_temp_free_i32(t0); |
5535 | } |
5536 | |
5537 | /* lfqx */ |
5538 | static void gen_lfqx(DisasContext *ctx) |
5539 | { |
5540 | int rd = rD(ctx->opcode); |
5541 | TCGvTCGv_i32 t0; |
5542 | gen_set_access_type(ctx, ACCESS_FLOAT); |
5543 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
5544 | gen_addr_reg_index(ctx, t0); |
5545 | gen_qemu_ld64(ctx, cpu_fpr[rd], t0); |
5546 | gen_addr_add(ctx, t0, t0, 8); |
5547 | gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t0); |
5548 | tcg_temp_freetcg_temp_free_i32(t0); |
5549 | } |
5550 | |
5551 | /* stfq */ |
5552 | static void gen_stfq(DisasContext *ctx) |
5553 | { |
5554 | int rd = rD(ctx->opcode); |
5555 | TCGvTCGv_i32 t0; |
5556 | gen_set_access_type(ctx, ACCESS_FLOAT); |
5557 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
5558 | gen_addr_imm_index(ctx, t0, 0); |
5559 | gen_qemu_st64(ctx, cpu_fpr[rd], t0); |
5560 | gen_addr_add(ctx, t0, t0, 8); |
5561 | gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0); |
5562 | tcg_temp_freetcg_temp_free_i32(t0); |
5563 | } |
5564 | |
5565 | /* stfqu */ |
5566 | static void gen_stfqu(DisasContext *ctx) |
5567 | { |
5568 | int ra = rA(ctx->opcode); |
5569 | int rd = rD(ctx->opcode); |
5570 | TCGvTCGv_i32 t0, t1; |
5571 | gen_set_access_type(ctx, ACCESS_FLOAT); |
5572 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
5573 | gen_addr_imm_index(ctx, t0, 0); |
5574 | gen_qemu_st64(ctx, cpu_fpr[rd], t0); |
5575 | t1 = tcg_temp_new()tcg_temp_new_i32(); |
5576 | gen_addr_add(ctx, t1, t0, 8); |
5577 | gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1); |
5578 | tcg_temp_freetcg_temp_free_i32(t1); |
5579 | if (ra != 0) |
5580 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[ra], t0); |
5581 | tcg_temp_freetcg_temp_free_i32(t0); |
5582 | } |
5583 | |
5584 | /* stfqux */ |
5585 | static void gen_stfqux(DisasContext *ctx) |
5586 | { |
5587 | int ra = rA(ctx->opcode); |
5588 | int rd = rD(ctx->opcode); |
5589 | TCGvTCGv_i32 t0, t1; |
5590 | gen_set_access_type(ctx, ACCESS_FLOAT); |
5591 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
5592 | gen_addr_reg_index(ctx, t0); |
5593 | gen_qemu_st64(ctx, cpu_fpr[rd], t0); |
5594 | t1 = tcg_temp_new()tcg_temp_new_i32(); |
5595 | gen_addr_add(ctx, t1, t0, 8); |
5596 | gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1); |
5597 | tcg_temp_freetcg_temp_free_i32(t1); |
5598 | if (ra != 0) |
5599 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[ra], t0); |
5600 | tcg_temp_freetcg_temp_free_i32(t0); |
5601 | } |
5602 | |
5603 | /* stfqx */ |
5604 | static void gen_stfqx(DisasContext *ctx) |
5605 | { |
5606 | int rd = rD(ctx->opcode); |
5607 | TCGvTCGv_i32 t0; |
5608 | gen_set_access_type(ctx, ACCESS_FLOAT); |
5609 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
5610 | gen_addr_reg_index(ctx, t0); |
5611 | gen_qemu_st64(ctx, cpu_fpr[rd], t0); |
5612 | gen_addr_add(ctx, t0, t0, 8); |
5613 | gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0); |
5614 | tcg_temp_freetcg_temp_free_i32(t0); |
5615 | } |
5616 | |
5617 | /* BookE specific instructions */ |
5618 | |
5619 | /* XXX: not implemented on 440 ? */ |
5620 | static void gen_mfapidi(DisasContext *ctx) |
5621 | { |
5622 | /* XXX: TODO */ |
5623 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
5624 | } |
5625 | |
5626 | /* XXX: not implemented on 440 ? */ |
5627 | static void gen_tlbiva(DisasContext *ctx) |
5628 | { |
5629 | #if defined(CONFIG_USER_ONLY) |
5630 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5631 | #else |
5632 | TCGvTCGv_i32 t0; |
5633 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
5634 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5635 | return; |
5636 | } |
5637 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
5638 | gen_addr_reg_index(ctx, t0); |
5639 | gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); |
5640 | tcg_temp_freetcg_temp_free_i32(t0); |
5641 | #endif |
5642 | } |
5643 | |
5644 | /* All 405 MAC instructions are translated here */ |
5645 | static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3, |
5646 | int ra, int rb, int rt, int Rc) |
5647 | { |
5648 | TCGvTCGv_i32 t0, t1; |
5649 | |
5650 | t0 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
5651 | t1 = tcg_temp_local_new()tcg_temp_local_new_i32(); |
5652 | |
5653 | switch (opc3 & 0x0D) { |
5654 | case 0x05: |
5655 | /* macchw - macchw. - macchwo - macchwo. */ |
5656 | /* macchws - macchws. - macchwso - macchwso. */ |
5657 | /* nmacchw - nmacchw. - nmacchwo - nmacchwo. */ |
5658 | /* nmacchws - nmacchws. - nmacchwso - nmacchwso. */ |
5659 | /* mulchw - mulchw. */ |
5660 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(t0, cpu_gpr[ra]); |
5661 | tcg_gen_sari_tltcg_gen_sari_i32(t1, cpu_gpr[rb], 16); |
5662 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(t1, t1); |
5663 | break; |
5664 | case 0x04: |
5665 | /* macchwu - macchwu. - macchwuo - macchwuo. */ |
5666 | /* macchwsu - macchwsu. - macchwsuo - macchwsuo. */ |
5667 | /* mulchwu - mulchwu. */ |
5668 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(t0, cpu_gpr[ra]); |
5669 | tcg_gen_shri_tltcg_gen_shri_i32(t1, cpu_gpr[rb], 16); |
5670 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(t1, t1); |
5671 | break; |
5672 | case 0x01: |
5673 | /* machhw - machhw. - machhwo - machhwo. */ |
5674 | /* machhws - machhws. - machhwso - machhwso. */ |
5675 | /* nmachhw - nmachhw. - nmachhwo - nmachhwo. */ |
5676 | /* nmachhws - nmachhws. - nmachhwso - nmachhwso. */ |
5677 | /* mulhhw - mulhhw. */ |
5678 | tcg_gen_sari_tltcg_gen_sari_i32(t0, cpu_gpr[ra], 16); |
5679 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(t0, t0); |
5680 | tcg_gen_sari_tltcg_gen_sari_i32(t1, cpu_gpr[rb], 16); |
5681 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(t1, t1); |
5682 | break; |
5683 | case 0x00: |
5684 | /* machhwu - machhwu. - machhwuo - machhwuo. */ |
5685 | /* machhwsu - machhwsu. - machhwsuo - machhwsuo. */ |
5686 | /* mulhhwu - mulhhwu. */ |
5687 | tcg_gen_shri_tltcg_gen_shri_i32(t0, cpu_gpr[ra], 16); |
5688 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(t0, t0); |
5689 | tcg_gen_shri_tltcg_gen_shri_i32(t1, cpu_gpr[rb], 16); |
5690 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(t1, t1); |
5691 | break; |
5692 | case 0x0D: |
5693 | /* maclhw - maclhw. - maclhwo - maclhwo. */ |
5694 | /* maclhws - maclhws. - maclhwso - maclhwso. */ |
5695 | /* nmaclhw - nmaclhw. - nmaclhwo - nmaclhwo. */ |
5696 | /* nmaclhws - nmaclhws. - nmaclhwso - nmaclhwso. */ |
5697 | /* mullhw - mullhw. */ |
5698 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(t0, cpu_gpr[ra]); |
5699 | tcg_gen_ext16s_tltcg_gen_ext16s_i32(t1, cpu_gpr[rb]); |
5700 | break; |
5701 | case 0x0C: |
5702 | /* maclhwu - maclhwu. - maclhwuo - maclhwuo. */ |
5703 | /* maclhwsu - maclhwsu. - maclhwsuo - maclhwsuo. */ |
5704 | /* mullhwu - mullhwu. */ |
5705 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(t0, cpu_gpr[ra]); |
5706 | tcg_gen_ext16u_tltcg_gen_ext16u_i32(t1, cpu_gpr[rb]); |
5707 | break; |
5708 | } |
5709 | if (opc2 & 0x04) { |
5710 | /* (n)multiply-and-accumulate (0x0C / 0x0E) */ |
5711 | tcg_gen_mul_tltcg_gen_mul_i32(t1, t0, t1); |
5712 | if (opc2 & 0x02) { |
5713 | /* nmultiply-and-accumulate (0x0E) */ |
5714 | tcg_gen_sub_tltcg_gen_sub_i32(t0, cpu_gpr[rt], t1); |
5715 | } else { |
5716 | /* multiply-and-accumulate (0x0C) */ |
5717 | tcg_gen_add_tltcg_gen_add_i32(t0, cpu_gpr[rt], t1); |
5718 | } |
5719 | |
5720 | if (opc3 & 0x12) { |
5721 | /* Check overflow and/or saturate */ |
5722 | int l1 = gen_new_label(); |
5723 | |
5724 | if (opc3 & 0x10) { |
5725 | /* Start with XER OV disabled, the most likely case */ |
5726 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ov, 0); |
5727 | } |
5728 | if (opc3 & 0x01) { |
5729 | /* Signed */ |
5730 | tcg_gen_xor_tltcg_gen_xor_i32(t1, cpu_gpr[rt], t1); |
5731 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_GE, t1, 0, l1); |
5732 | tcg_gen_xor_tltcg_gen_xor_i32(t1, cpu_gpr[rt], t0); |
5733 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_LT, t1, 0, l1); |
5734 | if (opc3 & 0x02) { |
5735 | /* Saturate */ |
5736 | tcg_gen_sari_tltcg_gen_sari_i32(t0, cpu_gpr[rt], 31); |
5737 | tcg_gen_xori_tltcg_gen_xori_i32(t0, t0, 0x7fffffff); |
5738 | } |
5739 | } else { |
5740 | /* Unsigned */ |
5741 | tcg_gen_brcond_tltcg_gen_brcond_i32(TCG_COND_GEU, t0, t1, l1); |
5742 | if (opc3 & 0x02) { |
5743 | /* Saturate */ |
5744 | tcg_gen_movi_tltcg_gen_movi_i32(t0, UINT32_MAX(4294967295U)); |
5745 | } |
5746 | } |
5747 | if (opc3 & 0x10) { |
5748 | /* Check overflow */ |
5749 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_ov, 1); |
5750 | tcg_gen_movi_tltcg_gen_movi_i32(cpu_so, 1); |
5751 | } |
5752 | gen_set_label(l1); |
5753 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rt], t0); |
5754 | } |
5755 | } else { |
5756 | tcg_gen_mul_tltcg_gen_mul_i32(cpu_gpr[rt], t0, t1); |
5757 | } |
5758 | tcg_temp_freetcg_temp_free_i32(t0); |
5759 | tcg_temp_freetcg_temp_free_i32(t1); |
5760 | if (unlikely(Rc)__builtin_expect(!!(Rc), 0) != 0) { |
5761 | /* Update Rc0 */ |
5762 | gen_set_Rc0(ctx, cpu_gpr[rt]); |
5763 | } |
5764 | } |
5765 | |
5766 | #define GEN_MAC_HANDLER(name, opc2, opc3){ .opc1 = 0x04, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", } \ |
5767 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
5768 | { \ |
5769 | gen_405_mulladd_insn(ctx, opc2, opc3, rA(ctx->opcode), rB(ctx->opcode), \ |
5770 | rD(ctx->opcode), Rc(ctx->opcode)); \ |
5771 | } |
5772 | |
5773 | /* macchw - macchw. */ |
5774 | GEN_MAC_HANDLER(macchw, 0x0C, 0x05){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x05, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_macchw, }, .oname = "macchw", }; |
5775 | /* macchwo - macchwo. */ |
5776 | GEN_MAC_HANDLER(macchwo, 0x0C, 0x15){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x15, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_macchwo, }, .oname = "macchwo", }; |
5777 | /* macchws - macchws. */ |
5778 | GEN_MAC_HANDLER(macchws, 0x0C, 0x07){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x07, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_macchws, }, .oname = "macchws", }; |
5779 | /* macchwso - macchwso. */ |
5780 | GEN_MAC_HANDLER(macchwso, 0x0C, 0x17){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x17, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_macchwso, }, .oname = "macchwso", }; |
5781 | /* macchwsu - macchwsu. */ |
5782 | GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x06, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_macchwsu, }, .oname = "macchwsu", }; |
5783 | /* macchwsuo - macchwsuo. */ |
5784 | GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x16, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_macchwsuo, }, .oname = "macchwsuo", }; |
5785 | /* macchwu - macchwu. */ |
5786 | GEN_MAC_HANDLER(macchwu, 0x0C, 0x04){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x04, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_macchwu, }, .oname = "macchwu", }; |
5787 | /* macchwuo - macchwuo. */ |
5788 | GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x14, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_macchwuo, }, .oname = "macchwuo", }; |
5789 | /* machhw - machhw. */ |
5790 | GEN_MAC_HANDLER(machhw, 0x0C, 0x01){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_machhw, }, .oname = "machhw", }; |
5791 | /* machhwo - machhwo. */ |
5792 | GEN_MAC_HANDLER(machhwo, 0x0C, 0x11){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x11, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_machhwo, }, .oname = "machhwo", }; |
5793 | /* machhws - machhws. */ |
5794 | GEN_MAC_HANDLER(machhws, 0x0C, 0x03){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x03, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_machhws, }, .oname = "machhws", }; |
5795 | /* machhwso - machhwso. */ |
5796 | GEN_MAC_HANDLER(machhwso, 0x0C, 0x13){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x13, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_machhwso, }, .oname = "machhwso", }; |
5797 | /* machhwsu - machhwsu. */ |
5798 | GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x02, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_machhwsu, }, .oname = "machhwsu", }; |
5799 | /* machhwsuo - machhwsuo. */ |
5800 | GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x12, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_machhwsuo, }, .oname = "machhwsuo", }; |
5801 | /* machhwu - machhwu. */ |
5802 | GEN_MAC_HANDLER(machhwu, 0x0C, 0x00){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_machhwu, }, .oname = "machhwu", }; |
5803 | /* machhwuo - machhwuo. */ |
5804 | GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x10, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_machhwuo, }, .oname = "machhwuo", }; |
5805 | /* maclhw - maclhw. */ |
5806 | GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x0D, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_maclhw, }, .oname = "maclhw", }; |
5807 | /* maclhwo - maclhwo. */ |
5808 | GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x1D, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_maclhwo, }, .oname = "maclhwo", }; |
5809 | /* maclhws - maclhws. */ |
5810 | GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x0F, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_maclhws, }, .oname = "maclhws", }; |
5811 | /* maclhwso - maclhwso. */ |
5812 | GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x1F, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_maclhwso, }, .oname = "maclhwso", }; |
5813 | /* maclhwu - maclhwu. */ |
5814 | GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x0C, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_maclhwu, }, .oname = "maclhwu", }; |
5815 | /* maclhwuo - maclhwuo. */ |
5816 | GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x1C, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_maclhwuo, }, .oname = "maclhwuo", }; |
5817 | /* maclhwsu - maclhwsu. */ |
5818 | GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x0E, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_maclhwsu, }, .oname = "maclhwsu", }; |
5819 | /* maclhwsuo - maclhwsuo. */ |
5820 | GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E){ .opc1 = 0x04, .opc2 = 0x0C, .opc3 = 0x1E, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_maclhwsuo, }, .oname = "maclhwsuo", }; |
5821 | /* nmacchw - nmacchw. */ |
5822 | GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05){ .opc1 = 0x04, .opc2 = 0x0E, .opc3 = 0x05, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_nmacchw, }, .oname = "nmacchw", }; |
5823 | /* nmacchwo - nmacchwo. */ |
5824 | GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15){ .opc1 = 0x04, .opc2 = 0x0E, .opc3 = 0x15, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_nmacchwo, }, .oname = "nmacchwo", }; |
5825 | /* nmacchws - nmacchws. */ |
5826 | GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07){ .opc1 = 0x04, .opc2 = 0x0E, .opc3 = 0x07, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_nmacchws, }, .oname = "nmacchws", }; |
5827 | /* nmacchwso - nmacchwso. */ |
5828 | GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17){ .opc1 = 0x04, .opc2 = 0x0E, .opc3 = 0x17, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_nmacchwso, }, .oname = "nmacchwso", }; |
5829 | /* nmachhw - nmachhw. */ |
5830 | GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01){ .opc1 = 0x04, .opc2 = 0x0E, .opc3 = 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_nmachhw, }, .oname = "nmachhw", }; |
5831 | /* nmachhwo - nmachhwo. */ |
5832 | GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11){ .opc1 = 0x04, .opc2 = 0x0E, .opc3 = 0x11, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_nmachhwo, }, .oname = "nmachhwo", }; |
5833 | /* nmachhws - nmachhws. */ |
5834 | GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03){ .opc1 = 0x04, .opc2 = 0x0E, .opc3 = 0x03, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_nmachhws, }, .oname = "nmachhws", }; |
5835 | /* nmachhwso - nmachhwso. */ |
5836 | GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13){ .opc1 = 0x04, .opc2 = 0x0E, .opc3 = 0x13, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_nmachhwso, }, .oname = "nmachhwso", }; |
5837 | /* nmaclhw - nmaclhw. */ |
5838 | GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D){ .opc1 = 0x04, .opc2 = 0x0E, .opc3 = 0x0D, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_nmaclhw, }, .oname = "nmaclhw", }; |
5839 | /* nmaclhwo - nmaclhwo. */ |
5840 | GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D){ .opc1 = 0x04, .opc2 = 0x0E, .opc3 = 0x1D, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_nmaclhwo, }, .oname = "nmaclhwo", }; |
5841 | /* nmaclhws - nmaclhws. */ |
5842 | GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F){ .opc1 = 0x04, .opc2 = 0x0E, .opc3 = 0x0F, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_nmaclhws, }, .oname = "nmaclhws", }; |
5843 | /* nmaclhwso - nmaclhwso. */ |
5844 | GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F){ .opc1 = 0x04, .opc2 = 0x0E, .opc3 = 0x1F, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_nmaclhwso, }, .oname = "nmaclhwso", }; |
5845 | |
5846 | /* mulchw - mulchw. */ |
5847 | GEN_MAC_HANDLER(mulchw, 0x08, 0x05){ .opc1 = 0x04, .opc2 = 0x08, .opc3 = 0x05, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_mulchw, }, .oname = "mulchw", }; |
5848 | /* mulchwu - mulchwu. */ |
5849 | GEN_MAC_HANDLER(mulchwu, 0x08, 0x04){ .opc1 = 0x04, .opc2 = 0x08, .opc3 = 0x04, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_mulchwu, }, .oname = "mulchwu", }; |
5850 | /* mulhhw - mulhhw. */ |
5851 | GEN_MAC_HANDLER(mulhhw, 0x08, 0x01){ .opc1 = 0x04, .opc2 = 0x08, .opc3 = 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_mulhhw, }, .oname = "mulhhw", }; |
5852 | /* mulhhwu - mulhhwu. */ |
5853 | GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00){ .opc1 = 0x04, .opc2 = 0x08, .opc3 = 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_mulhhwu, }, .oname = "mulhhwu", }; |
5854 | /* mullhw - mullhw. */ |
5855 | GEN_MAC_HANDLER(mullhw, 0x08, 0x0D){ .opc1 = 0x04, .opc2 = 0x08, .opc3 = 0x0D, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_mullhw, }, .oname = "mullhw", }; |
5856 | /* mullhwu - mullhwu. */ |
5857 | GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C){ .opc1 = 0x04, .opc2 = 0x08, .opc3 = 0x0C, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_405_MAC, .type2 = PPC_NONE , .handler = &gen_mullhwu, }, .oname = "mullhwu", }; |
5858 | |
5859 | /* mfdcr */ |
5860 | static void gen_mfdcr(DisasContext *ctx) |
5861 | { |
5862 | #if defined(CONFIG_USER_ONLY) |
5863 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
5864 | #else |
5865 | TCGvTCGv_i32 dcrn; |
5866 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
5867 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
5868 | return; |
5869 | } |
5870 | /* NIP cannot be restored if the memory exception comes from an helper */ |
5871 | gen_update_nip(ctx, ctx->nip - 4); |
5872 | dcrn = tcg_const_tltcg_const_i32(SPR(ctx->opcode)); |
5873 | gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn); |
5874 | tcg_temp_freetcg_temp_free_i32(dcrn); |
5875 | #endif |
5876 | } |
5877 | |
5878 | /* mtdcr */ |
5879 | static void gen_mtdcr(DisasContext *ctx) |
5880 | { |
5881 | #if defined(CONFIG_USER_ONLY) |
5882 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
5883 | #else |
5884 | TCGvTCGv_i32 dcrn; |
5885 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
5886 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
5887 | return; |
5888 | } |
5889 | /* NIP cannot be restored if the memory exception comes from an helper */ |
5890 | gen_update_nip(ctx, ctx->nip - 4); |
5891 | dcrn = tcg_const_tltcg_const_i32(SPR(ctx->opcode)); |
5892 | gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]); |
5893 | tcg_temp_freetcg_temp_free_i32(dcrn); |
5894 | #endif |
5895 | } |
5896 | |
5897 | /* mfdcrx */ |
5898 | /* XXX: not implemented on 440 ? */ |
5899 | static void gen_mfdcrx(DisasContext *ctx) |
5900 | { |
5901 | #if defined(CONFIG_USER_ONLY) |
5902 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
5903 | #else |
5904 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
5905 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
5906 | return; |
5907 | } |
5908 | /* NIP cannot be restored if the memory exception comes from an helper */ |
5909 | gen_update_nip(ctx, ctx->nip - 4); |
5910 | gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, |
5911 | cpu_gpr[rA(ctx->opcode)]); |
5912 | /* Note: Rc update flag set leads to undefined state of Rc0 */ |
5913 | #endif |
5914 | } |
5915 | |
5916 | /* mtdcrx */ |
5917 | /* XXX: not implemented on 440 ? */ |
5918 | static void gen_mtdcrx(DisasContext *ctx) |
5919 | { |
5920 | #if defined(CONFIG_USER_ONLY) |
5921 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
5922 | #else |
5923 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
5924 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
5925 | return; |
5926 | } |
5927 | /* NIP cannot be restored if the memory exception comes from an helper */ |
5928 | gen_update_nip(ctx, ctx->nip - 4); |
5929 | gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)], |
5930 | cpu_gpr[rS(ctx->opcode)]); |
5931 | /* Note: Rc update flag set leads to undefined state of Rc0 */ |
5932 | #endif |
5933 | } |
5934 | |
5935 | /* mfdcrux (PPC 460) : user-mode access to DCR */ |
5936 | static void gen_mfdcrux(DisasContext *ctx) |
5937 | { |
5938 | /* NIP cannot be restored if the memory exception comes from an helper */ |
5939 | gen_update_nip(ctx, ctx->nip - 4); |
5940 | gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, |
5941 | cpu_gpr[rA(ctx->opcode)]); |
5942 | /* Note: Rc update flag set leads to undefined state of Rc0 */ |
5943 | } |
5944 | |
5945 | /* mtdcrux (PPC 460) : user-mode access to DCR */ |
5946 | static void gen_mtdcrux(DisasContext *ctx) |
5947 | { |
5948 | /* NIP cannot be restored if the memory exception comes from an helper */ |
5949 | gen_update_nip(ctx, ctx->nip - 4); |
5950 | gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)], |
5951 | cpu_gpr[rS(ctx->opcode)]); |
5952 | /* Note: Rc update flag set leads to undefined state of Rc0 */ |
5953 | } |
5954 | |
5955 | /* dccci */ |
5956 | static void gen_dccci(DisasContext *ctx) |
5957 | { |
5958 | #if defined(CONFIG_USER_ONLY) |
5959 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5960 | #else |
5961 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
5962 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5963 | return; |
5964 | } |
5965 | /* interpreted as no-op */ |
5966 | #endif |
5967 | } |
5968 | |
5969 | /* dcread */ |
5970 | static void gen_dcread(DisasContext *ctx) |
5971 | { |
5972 | #if defined(CONFIG_USER_ONLY) |
5973 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5974 | #else |
5975 | TCGvTCGv_i32 EA, val; |
5976 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
5977 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
5978 | return; |
5979 | } |
5980 | gen_set_access_type(ctx, ACCESS_CACHE); |
5981 | EA = tcg_temp_new()tcg_temp_new_i32(); |
5982 | gen_addr_reg_index(ctx, EA); |
5983 | val = tcg_temp_new()tcg_temp_new_i32(); |
5984 | gen_qemu_ld32u(ctx, val, EA); |
5985 | tcg_temp_freetcg_temp_free_i32(val); |
5986 | tcg_gen_mov_tltcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], EA); |
5987 | tcg_temp_freetcg_temp_free_i32(EA); |
5988 | #endif |
5989 | } |
5990 | |
5991 | /* icbt */ |
5992 | static void gen_icbt_40x(DisasContext *ctx) |
5993 | { |
5994 | /* interpreted as no-op */ |
5995 | /* XXX: specification say this is treated as a load by the MMU |
5996 | * but does not generate any exception |
5997 | */ |
5998 | } |
5999 | |
6000 | /* iccci */ |
6001 | static void gen_iccci(DisasContext *ctx) |
6002 | { |
6003 | #if defined(CONFIG_USER_ONLY) |
6004 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6005 | #else |
6006 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6007 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6008 | return; |
6009 | } |
6010 | /* interpreted as no-op */ |
6011 | #endif |
6012 | } |
6013 | |
6014 | /* icread */ |
6015 | static void gen_icread(DisasContext *ctx) |
6016 | { |
6017 | #if defined(CONFIG_USER_ONLY) |
6018 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6019 | #else |
6020 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6021 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6022 | return; |
6023 | } |
6024 | /* interpreted as no-op */ |
6025 | #endif |
6026 | } |
6027 | |
6028 | /* rfci (mem_idx only) */ |
6029 | static void gen_rfci_40x(DisasContext *ctx) |
6030 | { |
6031 | #if defined(CONFIG_USER_ONLY) |
6032 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6033 | #else |
6034 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6035 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6036 | return; |
6037 | } |
6038 | /* Restore CPU state */ |
6039 | gen_helper_40x_rfci(cpu_env); |
6040 | gen_sync_exception(ctx); |
6041 | #endif |
6042 | } |
6043 | |
6044 | static void gen_rfci(DisasContext *ctx) |
6045 | { |
6046 | #if defined(CONFIG_USER_ONLY) |
6047 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6048 | #else |
6049 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6050 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6051 | return; |
6052 | } |
6053 | /* Restore CPU state */ |
6054 | gen_helper_rfci(cpu_env); |
6055 | gen_sync_exception(ctx); |
6056 | #endif |
6057 | } |
6058 | |
6059 | /* BookE specific */ |
6060 | |
6061 | /* XXX: not implemented on 440 ? */ |
6062 | static void gen_rfdi(DisasContext *ctx) |
6063 | { |
6064 | #if defined(CONFIG_USER_ONLY) |
6065 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6066 | #else |
6067 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6068 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6069 | return; |
6070 | } |
6071 | /* Restore CPU state */ |
6072 | gen_helper_rfdi(cpu_env); |
6073 | gen_sync_exception(ctx); |
6074 | #endif |
6075 | } |
6076 | |
6077 | /* XXX: not implemented on 440 ? */ |
6078 | static void gen_rfmci(DisasContext *ctx) |
6079 | { |
6080 | #if defined(CONFIG_USER_ONLY) |
6081 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6082 | #else |
6083 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6084 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6085 | return; |
6086 | } |
6087 | /* Restore CPU state */ |
6088 | gen_helper_rfmci(cpu_env); |
6089 | gen_sync_exception(ctx); |
6090 | #endif |
6091 | } |
6092 | |
6093 | /* TLB management - PowerPC 405 implementation */ |
6094 | |
6095 | /* tlbre */ |
6096 | static void gen_tlbre_40x(DisasContext *ctx) |
6097 | { |
6098 | #if defined(CONFIG_USER_ONLY) |
6099 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6100 | #else |
6101 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6102 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6103 | return; |
6104 | } |
6105 | switch (rB(ctx->opcode)) { |
6106 | case 0: |
6107 | gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_env, |
6108 | cpu_gpr[rA(ctx->opcode)]); |
6109 | break; |
6110 | case 1: |
6111 | gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], cpu_env, |
6112 | cpu_gpr[rA(ctx->opcode)]); |
6113 | break; |
6114 | default: |
6115 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
6116 | break; |
6117 | } |
6118 | #endif |
6119 | } |
6120 | |
6121 | /* tlbsx - tlbsx. */ |
6122 | static void gen_tlbsx_40x(DisasContext *ctx) |
6123 | { |
6124 | #if defined(CONFIG_USER_ONLY) |
6125 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6126 | #else |
6127 | TCGvTCGv_i32 t0; |
6128 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6129 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6130 | return; |
6131 | } |
6132 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
6133 | gen_addr_reg_index(ctx, t0); |
6134 | gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); |
6135 | tcg_temp_freetcg_temp_free_i32(t0); |
6136 | if (Rc(ctx->opcode)) { |
6137 | int l1 = gen_new_label(); |
6138 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_crf[0], cpu_so); |
6139 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1); |
6140 | tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02); |
6141 | gen_set_label(l1); |
6142 | } |
6143 | #endif |
6144 | } |
6145 | |
6146 | /* tlbwe */ |
6147 | static void gen_tlbwe_40x(DisasContext *ctx) |
6148 | { |
6149 | #if defined(CONFIG_USER_ONLY) |
6150 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6151 | #else |
6152 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6153 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6154 | return; |
6155 | } |
6156 | switch (rB(ctx->opcode)) { |
6157 | case 0: |
6158 | gen_helper_4xx_tlbwe_hi(cpu_env, cpu_gpr[rA(ctx->opcode)], |
6159 | cpu_gpr[rS(ctx->opcode)]); |
6160 | break; |
6161 | case 1: |
6162 | gen_helper_4xx_tlbwe_lo(cpu_env, cpu_gpr[rA(ctx->opcode)], |
6163 | cpu_gpr[rS(ctx->opcode)]); |
6164 | break; |
6165 | default: |
6166 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
6167 | break; |
6168 | } |
6169 | #endif |
6170 | } |
6171 | |
6172 | /* TLB management - PowerPC 440 implementation */ |
6173 | |
6174 | /* tlbre */ |
6175 | static void gen_tlbre_440(DisasContext *ctx) |
6176 | { |
6177 | #if defined(CONFIG_USER_ONLY) |
6178 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6179 | #else |
6180 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6181 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6182 | return; |
6183 | } |
6184 | switch (rB(ctx->opcode)) { |
6185 | case 0: |
6186 | case 1: |
6187 | case 2: |
6188 | { |
6189 | TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode)); |
6190 | gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env, |
6191 | t0, cpu_gpr[rA(ctx->opcode)]); |
6192 | tcg_temp_free_i32(t0); |
6193 | } |
6194 | break; |
6195 | default: |
6196 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
6197 | break; |
6198 | } |
6199 | #endif |
6200 | } |
6201 | |
6202 | /* tlbsx - tlbsx. */ |
6203 | static void gen_tlbsx_440(DisasContext *ctx) |
6204 | { |
6205 | #if defined(CONFIG_USER_ONLY) |
6206 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6207 | #else |
6208 | TCGvTCGv_i32 t0; |
6209 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6210 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6211 | return; |
6212 | } |
6213 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
6214 | gen_addr_reg_index(ctx, t0); |
6215 | gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); |
6216 | tcg_temp_freetcg_temp_free_i32(t0); |
6217 | if (Rc(ctx->opcode)) { |
6218 | int l1 = gen_new_label(); |
6219 | tcg_gen_trunc_tl_i32tcg_gen_mov_i32(cpu_crf[0], cpu_so); |
6220 | tcg_gen_brcondi_tltcg_gen_brcondi_i32(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1); |
6221 | tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02); |
6222 | gen_set_label(l1); |
6223 | } |
6224 | #endif |
6225 | } |
6226 | |
6227 | /* tlbwe */ |
6228 | static void gen_tlbwe_440(DisasContext *ctx) |
6229 | { |
6230 | #if defined(CONFIG_USER_ONLY) |
6231 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6232 | #else |
6233 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6234 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6235 | return; |
6236 | } |
6237 | switch (rB(ctx->opcode)) { |
6238 | case 0: |
6239 | case 1: |
6240 | case 2: |
6241 | { |
6242 | TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode)); |
6243 | gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)], |
6244 | cpu_gpr[rS(ctx->opcode)]); |
6245 | tcg_temp_free_i32(t0); |
6246 | } |
6247 | break; |
6248 | default: |
6249 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
6250 | break; |
6251 | } |
6252 | #endif |
6253 | } |
6254 | |
6255 | /* TLB management - PowerPC BookE 2.06 implementation */ |
6256 | |
6257 | /* tlbre */ |
6258 | static void gen_tlbre_booke206(DisasContext *ctx) |
6259 | { |
6260 | #if defined(CONFIG_USER_ONLY) |
6261 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6262 | #else |
6263 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6264 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6265 | return; |
6266 | } |
6267 | |
6268 | gen_helper_booke206_tlbre(cpu_env); |
6269 | #endif |
6270 | } |
6271 | |
6272 | /* tlbsx - tlbsx. */ |
6273 | static void gen_tlbsx_booke206(DisasContext *ctx) |
6274 | { |
6275 | #if defined(CONFIG_USER_ONLY) |
6276 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6277 | #else |
6278 | TCGvTCGv_i32 t0; |
6279 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6280 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6281 | return; |
6282 | } |
6283 | |
6284 | if (rA(ctx->opcode)) { |
6285 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
6286 | tcg_gen_mov_tltcg_gen_mov_i32(t0, cpu_gpr[rD(ctx->opcode)]); |
6287 | } else { |
6288 | t0 = tcg_const_tltcg_const_i32(0); |
6289 | } |
6290 | |
6291 | tcg_gen_add_tltcg_gen_add_i32(t0, t0, cpu_gpr[rB(ctx->opcode)]); |
6292 | gen_helper_booke206_tlbsx(cpu_env, t0); |
6293 | #endif |
6294 | } |
6295 | |
6296 | /* tlbwe */ |
6297 | static void gen_tlbwe_booke206(DisasContext *ctx) |
6298 | { |
6299 | #if defined(CONFIG_USER_ONLY) |
6300 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6301 | #else |
6302 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6303 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6304 | return; |
6305 | } |
6306 | gen_update_nip(ctx, ctx->nip - 4); |
6307 | gen_helper_booke206_tlbwe(cpu_env); |
6308 | #endif |
6309 | } |
6310 | |
6311 | static void gen_tlbivax_booke206(DisasContext *ctx) |
6312 | { |
6313 | #if defined(CONFIG_USER_ONLY) |
6314 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6315 | #else |
6316 | TCGvTCGv_i32 t0; |
6317 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6318 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6319 | return; |
6320 | } |
6321 | |
6322 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
6323 | gen_addr_reg_index(ctx, t0); |
6324 | |
6325 | gen_helper_booke206_tlbivax(cpu_env, t0); |
6326 | #endif |
6327 | } |
6328 | |
6329 | static void gen_tlbilx_booke206(DisasContext *ctx) |
6330 | { |
6331 | #if defined(CONFIG_USER_ONLY) |
6332 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6333 | #else |
6334 | TCGvTCGv_i32 t0; |
6335 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6336 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6337 | return; |
6338 | } |
6339 | |
6340 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
6341 | gen_addr_reg_index(ctx, t0); |
6342 | |
6343 | switch((ctx->opcode >> 21) & 0x3) { |
6344 | case 0: |
6345 | gen_helper_booke206_tlbilx0(cpu_env, t0); |
6346 | break; |
6347 | case 1: |
6348 | gen_helper_booke206_tlbilx1(cpu_env, t0); |
6349 | break; |
6350 | case 3: |
6351 | gen_helper_booke206_tlbilx3(cpu_env, t0); |
6352 | break; |
6353 | default: |
6354 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
6355 | break; |
6356 | } |
6357 | |
6358 | tcg_temp_freetcg_temp_free_i32(t0); |
6359 | #endif |
6360 | } |
6361 | |
6362 | |
6363 | /* wrtee */ |
6364 | static void gen_wrtee(DisasContext *ctx) |
6365 | { |
6366 | #if defined(CONFIG_USER_ONLY) |
6367 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6368 | #else |
6369 | TCGvTCGv_i32 t0; |
6370 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6371 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6372 | return; |
6373 | } |
6374 | t0 = tcg_temp_new()tcg_temp_new_i32(); |
6375 | tcg_gen_andi_tltcg_gen_andi_i32(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE15)); |
6376 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_msr, cpu_msr, ~(1 << MSR_EE15)); |
6377 | tcg_gen_or_tltcg_gen_or_i32(cpu_msr, cpu_msr, t0); |
6378 | tcg_temp_freetcg_temp_free_i32(t0); |
6379 | /* Stop translation to have a chance to raise an exception |
6380 | * if we just set msr_ee to 1 |
6381 | */ |
6382 | gen_stop_exception(ctx); |
6383 | #endif |
6384 | } |
6385 | |
6386 | /* wrteei */ |
6387 | static void gen_wrteei(DisasContext *ctx) |
6388 | { |
6389 | #if defined(CONFIG_USER_ONLY) |
6390 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6391 | #else |
6392 | if (unlikely(!ctx->mem_idx)__builtin_expect(!!(!ctx->mem_idx), 0)) { |
6393 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6394 | return; |
6395 | } |
6396 | if (ctx->opcode & 0x00008000) { |
6397 | tcg_gen_ori_tltcg_gen_ori_i32(cpu_msr, cpu_msr, (1 << MSR_EE15)); |
6398 | /* Stop translation to have a chance to raise an exception */ |
6399 | gen_stop_exception(ctx); |
6400 | } else { |
6401 | tcg_gen_andi_tltcg_gen_andi_i32(cpu_msr, cpu_msr, ~(1 << MSR_EE15)); |
6402 | } |
6403 | #endif |
6404 | } |
6405 | |
6406 | /* PowerPC 440 specific instructions */ |
6407 | |
6408 | /* dlmzb */ |
6409 | static void gen_dlmzb(DisasContext *ctx) |
6410 | { |
6411 | TCGv_i32 t0 = tcg_const_i32(Rc(ctx->opcode)); |
6412 | gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env, |
6413 | cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); |
6414 | tcg_temp_free_i32(t0); |
6415 | } |
6416 | |
6417 | /* mbar replaces eieio on 440 */ |
6418 | static void gen_mbar(DisasContext *ctx) |
6419 | { |
6420 | /* interpreted as no-op */ |
6421 | } |
6422 | |
6423 | /* msync replaces sync on 440 */ |
6424 | static void gen_msync_4xx(DisasContext *ctx) |
6425 | { |
6426 | /* interpreted as no-op */ |
6427 | } |
6428 | |
6429 | /* icbt */ |
6430 | static void gen_icbt_440(DisasContext *ctx) |
6431 | { |
6432 | /* interpreted as no-op */ |
6433 | /* XXX: specification say this is treated as a load by the MMU |
6434 | * but does not generate any exception |
6435 | */ |
6436 | } |
6437 | |
6438 | /* Embedded.Processor Control */ |
6439 | |
6440 | static void gen_msgclr(DisasContext *ctx) |
6441 | { |
6442 | #if defined(CONFIG_USER_ONLY) |
6443 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6444 | #else |
6445 | if (unlikely(ctx->mem_idx == 0)__builtin_expect(!!(ctx->mem_idx == 0), 0)) { |
6446 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6447 | return; |
6448 | } |
6449 | |
6450 | gen_helper_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]); |
6451 | #endif |
6452 | } |
6453 | |
6454 | static void gen_msgsnd(DisasContext *ctx) |
6455 | { |
6456 | #if defined(CONFIG_USER_ONLY) |
6457 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6458 | #else |
6459 | if (unlikely(ctx->mem_idx == 0)__builtin_expect(!!(ctx->mem_idx == 0), 0)) { |
6460 | gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); |
6461 | return; |
6462 | } |
6463 | |
6464 | gen_helper_msgsnd(cpu_gpr[rB(ctx->opcode)]); |
6465 | #endif |
6466 | } |
6467 | |
6468 | /*** Altivec vector extension ***/ |
6469 | /* Altivec registers moves */ |
6470 | |
6471 | static inline TCGv_ptr gen_avr_ptr(int reg) |
6472 | { |
6473 | TCGv_ptr r = tcg_temp_new_ptr()__extension__ ({ TCGv_ptr make_tcgv_tmp = {((tcg_temp_new_i64 ()).i64)}; make_tcgv_tmp; }); |
6474 | tcg_gen_addi_ptr(r, cpu_env, offsetof(CPUPPCState, avr[reg]))tcg_gen_addi_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {( (r).iptr)}; make_tcgv_tmp;}), __extension__ ({ TCGv_i64 make_tcgv_tmp = {((cpu_env).iptr)}; make_tcgv_tmp;}), (__builtin_offsetof( CPUPPCState, avr[reg]))); |
6475 | return r; |
6476 | } |
6477 | |
6478 | #define GEN_VR_LDX(name, opc2, opc3){ .opc1 = 0x1F, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", } \ |
6479 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
6480 | { \ |
6481 | TCGvTCGv_i32 EA; \ |
6482 | if (unlikely(!ctx->altivec_enabled)__builtin_expect(!!(!ctx->altivec_enabled), 0)) { \ |
6483 | gen_exception(ctx, POWERPC_EXCP_VPU); \ |
6484 | return; \ |
6485 | } \ |
6486 | gen_set_access_type(ctx, ACCESS_INT); \ |
6487 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
6488 | gen_addr_reg_index(ctx, EA); \ |
6489 | tcg_gen_andi_tltcg_gen_andi_i32(EA, EA, ~0xf); \ |
6490 | if (ctx->le_mode) { \ |
6491 | gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \ |
6492 | tcg_gen_addi_tltcg_gen_addi_i32(EA, EA, 8); \ |
6493 | gen_qemu_ld64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \ |
6494 | } else { \ |
6495 | gen_qemu_ld64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \ |
6496 | tcg_gen_addi_tltcg_gen_addi_i32(EA, EA, 8); \ |
6497 | gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \ |
6498 | } \ |
6499 | tcg_temp_freetcg_temp_free_i32(EA); \ |
6500 | } |
6501 | |
6502 | #define GEN_VR_STX(name, opc2, opc3){ .opc1 = 0x1F, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_stname, }, .oname = "stname", } \ |
6503 | static void gen_st##name(DisasContext *ctx) \ |
6504 | { \ |
6505 | TCGvTCGv_i32 EA; \ |
6506 | if (unlikely(!ctx->altivec_enabled)__builtin_expect(!!(!ctx->altivec_enabled), 0)) { \ |
6507 | gen_exception(ctx, POWERPC_EXCP_VPU); \ |
6508 | return; \ |
6509 | } \ |
6510 | gen_set_access_type(ctx, ACCESS_INT); \ |
6511 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
6512 | gen_addr_reg_index(ctx, EA); \ |
6513 | tcg_gen_andi_tltcg_gen_andi_i32(EA, EA, ~0xf); \ |
6514 | if (ctx->le_mode) { \ |
6515 | gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \ |
6516 | tcg_gen_addi_tltcg_gen_addi_i32(EA, EA, 8); \ |
6517 | gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \ |
6518 | } else { \ |
6519 | gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \ |
6520 | tcg_gen_addi_tltcg_gen_addi_i32(EA, EA, 8); \ |
6521 | gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \ |
6522 | } \ |
6523 | tcg_temp_freetcg_temp_free_i32(EA); \ |
6524 | } |
6525 | |
6526 | #define GEN_VR_LVE(name, opc2, opc3){ .opc1 = 0x1F, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_lvename, }, .oname = "lvename", } \ |
6527 | static void gen_lve##name(DisasContext *ctx) \ |
6528 | { \ |
6529 | TCGvTCGv_i32 EA; \ |
6530 | TCGv_ptr rs; \ |
6531 | if (unlikely(!ctx->altivec_enabled)__builtin_expect(!!(!ctx->altivec_enabled), 0)) { \ |
6532 | gen_exception(ctx, POWERPC_EXCP_VPU); \ |
6533 | return; \ |
6534 | } \ |
6535 | gen_set_access_type(ctx, ACCESS_INT); \ |
6536 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
6537 | gen_addr_reg_index(ctx, EA); \ |
6538 | rs = gen_avr_ptr(rS(ctx->opcode)); \ |
6539 | gen_helper_lve##name(cpu_env, rs, EA); \ |
6540 | tcg_temp_freetcg_temp_free_i32(EA); \ |
6541 | tcg_temp_free_ptr(rs)tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((rs).iptr)}; make_tcgv_tmp;})); \ |
6542 | } |
6543 | |
6544 | #define GEN_VR_STVE(name, opc2, opc3){ .opc1 = 0x1F, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_stvename, }, .oname = "stvename", } \ |
6545 | static void gen_stve##name(DisasContext *ctx) \ |
6546 | { \ |
6547 | TCGvTCGv_i32 EA; \ |
6548 | TCGv_ptr rs; \ |
6549 | if (unlikely(!ctx->altivec_enabled)__builtin_expect(!!(!ctx->altivec_enabled), 0)) { \ |
6550 | gen_exception(ctx, POWERPC_EXCP_VPU); \ |
6551 | return; \ |
6552 | } \ |
6553 | gen_set_access_type(ctx, ACCESS_INT); \ |
6554 | EA = tcg_temp_new()tcg_temp_new_i32(); \ |
6555 | gen_addr_reg_index(ctx, EA); \ |
6556 | rs = gen_avr_ptr(rS(ctx->opcode)); \ |
6557 | gen_helper_stve##name(cpu_env, rs, EA); \ |
6558 | tcg_temp_freetcg_temp_free_i32(EA); \ |
6559 | tcg_temp_free_ptr(rs)tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((rs).iptr)}; make_tcgv_tmp;})); \ |
6560 | } |
6561 | |
6562 | GEN_VR_LDX(lvx, 0x07, 0x03){ .opc1 = 0x1F, .opc2 = 0x07, .opc3 = 0x03, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_lvx, }, .oname = "lvx", }; |
6563 | /* As we don't emulate the cache, lvxl is stricly equivalent to lvx */ |
6564 | GEN_VR_LDX(lvxl, 0x07, 0x0B){ .opc1 = 0x1F, .opc2 = 0x07, .opc3 = 0x0B, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_lvxl, }, .oname = "lvxl", }; |
6565 | |
6566 | GEN_VR_LVE(bx, 0x07, 0x00){ .opc1 = 0x1F, .opc2 = 0x07, .opc3 = 0x00, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_lvebx, }, .oname = "lvebx", }; |
6567 | GEN_VR_LVE(hx, 0x07, 0x01){ .opc1 = 0x1F, .opc2 = 0x07, .opc3 = 0x01, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_lvehx, }, .oname = "lvehx", }; |
6568 | GEN_VR_LVE(wx, 0x07, 0x02){ .opc1 = 0x1F, .opc2 = 0x07, .opc3 = 0x02, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_lvewx, }, .oname = "lvewx", }; |
6569 | |
6570 | GEN_VR_STX(svx, 0x07, 0x07){ .opc1 = 0x1F, .opc2 = 0x07, .opc3 = 0x07, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_stsvx, }, .oname = "stsvx", }; |
6571 | /* As we don't emulate the cache, stvxl is stricly equivalent to stvx */ |
6572 | GEN_VR_STX(svxl, 0x07, 0x0F){ .opc1 = 0x1F, .opc2 = 0x07, .opc3 = 0x0F, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_stsvxl, }, .oname = "stsvxl", }; |
6573 | |
6574 | GEN_VR_STVE(bx, 0x07, 0x04){ .opc1 = 0x1F, .opc2 = 0x07, .opc3 = 0x04, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_stvebx, }, .oname = "stvebx", }; |
6575 | GEN_VR_STVE(hx, 0x07, 0x05){ .opc1 = 0x1F, .opc2 = 0x07, .opc3 = 0x05, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_stvehx, }, .oname = "stvehx", }; |
6576 | GEN_VR_STVE(wx, 0x07, 0x06){ .opc1 = 0x1F, .opc2 = 0x07, .opc3 = 0x06, .pad = { 0, }, .handler = { .inval1 = 0x00000001, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_stvewx, }, .oname = "stvewx", }; |
6577 | |
6578 | static void gen_lvsl(DisasContext *ctx) |
6579 | { |
6580 | TCGv_ptr rd; |
6581 | TCGvTCGv_i32 EA; |
6582 | if (unlikely(!ctx->altivec_enabled)__builtin_expect(!!(!ctx->altivec_enabled), 0)) { |
6583 | gen_exception(ctx, POWERPC_EXCP_VPU); |
6584 | return; |
6585 | } |
6586 | EA = tcg_temp_new()tcg_temp_new_i32(); |
6587 | gen_addr_reg_index(ctx, EA); |
6588 | rd = gen_avr_ptr(rD(ctx->opcode)); |
6589 | gen_helper_lvsl(rd, EA); |
6590 | tcg_temp_freetcg_temp_free_i32(EA); |
6591 | tcg_temp_free_ptr(rd)tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((rd).iptr)}; make_tcgv_tmp;})); |
6592 | } |
6593 | |
6594 | static void gen_lvsr(DisasContext *ctx) |
6595 | { |
6596 | TCGv_ptr rd; |
6597 | TCGvTCGv_i32 EA; |
6598 | if (unlikely(!ctx->altivec_enabled)__builtin_expect(!!(!ctx->altivec_enabled), 0)) { |
6599 | gen_exception(ctx, POWERPC_EXCP_VPU); |
6600 | return; |
6601 | } |
6602 | EA = tcg_temp_new()tcg_temp_new_i32(); |
6603 | gen_addr_reg_index(ctx, EA); |
6604 | rd = gen_avr_ptr(rD(ctx->opcode)); |
6605 | gen_helper_lvsr(rd, EA); |
6606 | tcg_temp_freetcg_temp_free_i32(EA); |
6607 | tcg_temp_free_ptr(rd)tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((rd).iptr)}; make_tcgv_tmp;})); |
6608 | } |
6609 | |
6610 | static void gen_mfvscr(DisasContext *ctx) |
6611 | { |
6612 | TCGv_i32 t; |
6613 | if (unlikely(!ctx->altivec_enabled)__builtin_expect(!!(!ctx->altivec_enabled), 0)) { |
6614 | gen_exception(ctx, POWERPC_EXCP_VPU); |
6615 | return; |
6616 | } |
6617 | tcg_gen_movi_i64(cpu_avrh[rD(ctx->opcode)], 0); |
6618 | t = tcg_temp_new_i32(); |
6619 | tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, vscr)__builtin_offsetof(CPUPPCState, vscr)); |
6620 | tcg_gen_extu_i32_i64(cpu_avrl[rD(ctx->opcode)], t); |
6621 | tcg_temp_free_i32(t); |
6622 | } |
6623 | |
6624 | static void gen_mtvscr(DisasContext *ctx) |
6625 | { |
6626 | TCGv_ptr p; |
6627 | if (unlikely(!ctx->altivec_enabled)__builtin_expect(!!(!ctx->altivec_enabled), 0)) { |
6628 | gen_exception(ctx, POWERPC_EXCP_VPU); |
6629 | return; |
6630 | } |
6631 | p = gen_avr_ptr(rD(ctx->opcode)); |
6632 | gen_helper_mtvscr(cpu_env, p); |
6633 | tcg_temp_free_ptr(p)tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((p).iptr)}; make_tcgv_tmp;})); |
6634 | } |
6635 | |
6636 | /* Logical operations */ |
6637 | #define GEN_VX_LOGICAL(name, tcg_op, opc2, opc3){ .opc1 = 0x04, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", } \ |
6638 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
6639 | { \ |
6640 | if (unlikely(!ctx->altivec_enabled)__builtin_expect(!!(!ctx->altivec_enabled), 0)) { \ |
6641 | gen_exception(ctx, POWERPC_EXCP_VPU); \ |
6642 | return; \ |
6643 | } \ |
6644 | tcg_op(cpu_avrh[rD(ctx->opcode)], cpu_avrh[rA(ctx->opcode)], cpu_avrh[rB(ctx->opcode)]); \ |
6645 | tcg_op(cpu_avrl[rD(ctx->opcode)], cpu_avrl[rA(ctx->opcode)], cpu_avrl[rB(ctx->opcode)]); \ |
6646 | } |
6647 | |
6648 | GEN_VX_LOGICAL(vand, tcg_gen_and_i64, 2, 16){ .opc1 = 0x04, .opc2 = 2, .opc3 = 16, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vand, }, .oname = "vand", }; |
6649 | GEN_VX_LOGICAL(vandc, tcg_gen_andc_i64, 2, 17){ .opc1 = 0x04, .opc2 = 2, .opc3 = 17, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vandc, }, .oname = "vandc", }; |
6650 | GEN_VX_LOGICAL(vor, tcg_gen_or_i64, 2, 18){ .opc1 = 0x04, .opc2 = 2, .opc3 = 18, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vor, }, .oname = "vor", }; |
6651 | GEN_VX_LOGICAL(vxor, tcg_gen_xor_i64, 2, 19){ .opc1 = 0x04, .opc2 = 2, .opc3 = 19, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vxor, }, .oname = "vxor", }; |
6652 | GEN_VX_LOGICAL(vnor, tcg_gen_nor_i64, 2, 20){ .opc1 = 0x04, .opc2 = 2, .opc3 = 20, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vnor, }, .oname = "vnor", }; |
6653 | |
6654 | #define GEN_VXFORM(name, opc2, opc3){ .opc1 = 0x04, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", } \ |
6655 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
6656 | { \ |
6657 | TCGv_ptr ra, rb, rd; \ |
6658 | if (unlikely(!ctx->altivec_enabled)__builtin_expect(!!(!ctx->altivec_enabled), 0)) { \ |
6659 | gen_exception(ctx, POWERPC_EXCP_VPU); \ |
6660 | return; \ |
6661 | } \ |
6662 | ra = gen_avr_ptr(rA(ctx->opcode)); \ |
6663 | rb = gen_avr_ptr(rB(ctx->opcode)); \ |
6664 | rd = gen_avr_ptr(rD(ctx->opcode)); \ |
6665 | gen_helper_##name (rd, ra, rb); \ |
6666 | tcg_temp_free_ptr(ra)tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((ra).iptr)}; make_tcgv_tmp;})); \ |
6667 | tcg_temp_free_ptr(rb)tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((rb).iptr)}; make_tcgv_tmp;})); \ |
6668 | tcg_temp_free_ptr(rd)tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((rd).iptr)}; make_tcgv_tmp;})); \ |
6669 | } |
6670 | |
6671 | #define GEN_VXFORM_ENV(name, opc2, opc3)static void gen_name(DisasContext *ctx) { TCGv_ptr ra, rb, rd ; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception (ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr(rA(ctx-> opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr (rD(ctx->opcode)); gen_helper_name(cpu_env, rd, ra, rb); tcg_temp_free_i64 (__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp ;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})) ; } \ |
6672 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
6673 | { \ |
6674 | TCGv_ptr ra, rb, rd; \ |
6675 | if (unlikely(!ctx->altivec_enabled)__builtin_expect(!!(!ctx->altivec_enabled), 0)) { \ |
6676 | gen_exception(ctx, POWERPC_EXCP_VPU); \ |
6677 | return; \ |
6678 | } \ |
6679 | ra = gen_avr_ptr(rA(ctx->opcode)); \ |
6680 | rb = gen_avr_ptr(rB(ctx->opcode)); \ |
6681 | rd = gen_avr_ptr(rD(ctx->opcode)); \ |
6682 | gen_helper_##name(cpu_env, rd, ra, rb); \ |
6683 | tcg_temp_free_ptr(ra)tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((ra).iptr)}; make_tcgv_tmp;})); \ |
6684 | tcg_temp_free_ptr(rb)tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((rb).iptr)}; make_tcgv_tmp;})); \ |
6685 | tcg_temp_free_ptr(rd)tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((rd).iptr)}; make_tcgv_tmp;})); \ |
6686 | } |
6687 | |
6688 | GEN_VXFORM(vaddubm, 0, 0){ .opc1 = 0x04, .opc2 = 0, .opc3 = 0, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vaddubm, }, .oname = "vaddubm", }; |
6689 | GEN_VXFORM(vadduhm, 0, 1){ .opc1 = 0x04, .opc2 = 0, .opc3 = 1, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vadduhm, }, .oname = "vadduhm", }; |
6690 | GEN_VXFORM(vadduwm, 0, 2){ .opc1 = 0x04, .opc2 = 0, .opc3 = 2, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vadduwm, }, .oname = "vadduwm", }; |
6691 | GEN_VXFORM(vsububm, 0, 16){ .opc1 = 0x04, .opc2 = 0, .opc3 = 16, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vsububm, }, .oname = "vsububm", }; |
6692 | GEN_VXFORM(vsubuhm, 0, 17){ .opc1 = 0x04, .opc2 = 0, .opc3 = 17, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vsubuhm, }, .oname = "vsubuhm", }; |
6693 | GEN_VXFORM(vsubuwm, 0, 18){ .opc1 = 0x04, .opc2 = 0, .opc3 = 18, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vsubuwm, }, .oname = "vsubuwm", }; |
6694 | GEN_VXFORM(vmaxub, 1, 0){ .opc1 = 0x04, .opc2 = 1, .opc3 = 0, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmaxub, }, .oname = "vmaxub", }; |
6695 | GEN_VXFORM(vmaxuh, 1, 1){ .opc1 = 0x04, .opc2 = 1, .opc3 = 1, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmaxuh, }, .oname = "vmaxuh", }; |
6696 | GEN_VXFORM(vmaxuw, 1, 2){ .opc1 = 0x04, .opc2 = 1, .opc3 = 2, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmaxuw, }, .oname = "vmaxuw", }; |
6697 | GEN_VXFORM(vmaxsb, 1, 4){ .opc1 = 0x04, .opc2 = 1, .opc3 = 4, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmaxsb, }, .oname = "vmaxsb", }; |
6698 | GEN_VXFORM(vmaxsh, 1, 5){ .opc1 = 0x04, .opc2 = 1, .opc3 = 5, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmaxsh, }, .oname = "vmaxsh", }; |
6699 | GEN_VXFORM(vmaxsw, 1, 6){ .opc1 = 0x04, .opc2 = 1, .opc3 = 6, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmaxsw, }, .oname = "vmaxsw", }; |
6700 | GEN_VXFORM(vminub, 1, 8){ .opc1 = 0x04, .opc2 = 1, .opc3 = 8, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vminub, }, .oname = "vminub", }; |
6701 | GEN_VXFORM(vminuh, 1, 9){ .opc1 = 0x04, .opc2 = 1, .opc3 = 9, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vminuh, }, .oname = "vminuh", }; |
6702 | GEN_VXFORM(vminuw, 1, 10){ .opc1 = 0x04, .opc2 = 1, .opc3 = 10, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vminuw, }, .oname = "vminuw", }; |
6703 | GEN_VXFORM(vminsb, 1, 12){ .opc1 = 0x04, .opc2 = 1, .opc3 = 12, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vminsb, }, .oname = "vminsb", }; |
6704 | GEN_VXFORM(vminsh, 1, 13){ .opc1 = 0x04, .opc2 = 1, .opc3 = 13, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vminsh, }, .oname = "vminsh", }; |
6705 | GEN_VXFORM(vminsw, 1, 14){ .opc1 = 0x04, .opc2 = 1, .opc3 = 14, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vminsw, }, .oname = "vminsw", }; |
6706 | GEN_VXFORM(vavgub, 1, 16){ .opc1 = 0x04, .opc2 = 1, .opc3 = 16, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vavgub, }, .oname = "vavgub", }; |
6707 | GEN_VXFORM(vavguh, 1, 17){ .opc1 = 0x04, .opc2 = 1, .opc3 = 17, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vavguh, }, .oname = "vavguh", }; |
6708 | GEN_VXFORM(vavguw, 1, 18){ .opc1 = 0x04, .opc2 = 1, .opc3 = 18, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vavguw, }, .oname = "vavguw", }; |
6709 | GEN_VXFORM(vavgsb, 1, 20){ .opc1 = 0x04, .opc2 = 1, .opc3 = 20, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vavgsb, }, .oname = "vavgsb", }; |
6710 | GEN_VXFORM(vavgsh, 1, 21){ .opc1 = 0x04, .opc2 = 1, .opc3 = 21, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vavgsh, }, .oname = "vavgsh", }; |
6711 | GEN_VXFORM(vavgsw, 1, 22){ .opc1 = 0x04, .opc2 = 1, .opc3 = 22, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vavgsw, }, .oname = "vavgsw", }; |
6712 | GEN_VXFORM(vmrghb, 6, 0){ .opc1 = 0x04, .opc2 = 6, .opc3 = 0, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmrghb, }, .oname = "vmrghb", }; |
6713 | GEN_VXFORM(vmrghh, 6, 1){ .opc1 = 0x04, .opc2 = 6, .opc3 = 1, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmrghh, }, .oname = "vmrghh", }; |
6714 | GEN_VXFORM(vmrghw, 6, 2){ .opc1 = 0x04, .opc2 = 6, .opc3 = 2, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmrghw, }, .oname = "vmrghw", }; |
6715 | GEN_VXFORM(vmrglb, 6, 4){ .opc1 = 0x04, .opc2 = 6, .opc3 = 4, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmrglb, }, .oname = "vmrglb", }; |
6716 | GEN_VXFORM(vmrglh, 6, 5){ .opc1 = 0x04, .opc2 = 6, .opc3 = 5, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmrglh, }, .oname = "vmrglh", }; |
6717 | GEN_VXFORM(vmrglw, 6, 6){ .opc1 = 0x04, .opc2 = 6, .opc3 = 6, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmrglw, }, .oname = "vmrglw", }; |
6718 | GEN_VXFORM(vmuloub, 4, 0){ .opc1 = 0x04, .opc2 = 4, .opc3 = 0, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmuloub, }, .oname = "vmuloub", }; |
6719 | GEN_VXFORM(vmulouh, 4, 1){ .opc1 = 0x04, .opc2 = 4, .opc3 = 1, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmulouh, }, .oname = "vmulouh", }; |
6720 | GEN_VXFORM(vmulosb, 4, 4){ .opc1 = 0x04, .opc2 = 4, .opc3 = 4, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmulosb, }, .oname = "vmulosb", }; |
6721 | GEN_VXFORM(vmulosh, 4, 5){ .opc1 = 0x04, .opc2 = 4, .opc3 = 5, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmulosh, }, .oname = "vmulosh", }; |
6722 | GEN_VXFORM(vmuleub, 4, 8){ .opc1 = 0x04, .opc2 = 4, .opc3 = 8, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmuleub, }, .oname = "vmuleub", }; |
6723 | GEN_VXFORM(vmuleuh, 4, 9){ .opc1 = 0x04, .opc2 = 4, .opc3 = 9, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmuleuh, }, .oname = "vmuleuh", }; |
6724 | GEN_VXFORM(vmulesb, 4, 12){ .opc1 = 0x04, .opc2 = 4, .opc3 = 12, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmulesb, }, .oname = "vmulesb", }; |
6725 | GEN_VXFORM(vmulesh, 4, 13){ .opc1 = 0x04, .opc2 = 4, .opc3 = 13, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vmulesh, }, .oname = "vmulesh", }; |
6726 | GEN_VXFORM(vslb, 2, 4){ .opc1 = 0x04, .opc2 = 2, .opc3 = 4, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vslb, }, .oname = "vslb", }; |
6727 | GEN_VXFORM(vslh, 2, 5){ .opc1 = 0x04, .opc2 = 2, .opc3 = 5, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vslh, }, .oname = "vslh", }; |
6728 | GEN_VXFORM(vslw, 2, 6){ .opc1 = 0x04, .opc2 = 2, .opc3 = 6, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vslw, }, .oname = "vslw", }; |
6729 | GEN_VXFORM(vsrb, 2, 8){ .opc1 = 0x04, .opc2 = 2, .opc3 = 8, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vsrb, }, .oname = "vsrb", }; |
6730 | GEN_VXFORM(vsrh, 2, 9){ .opc1 = 0x04, .opc2 = 2, .opc3 = 9, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vsrh, }, .oname = "vsrh", }; |
6731 | GEN_VXFORM(vsrw, 2, 10){ .opc1 = 0x04, .opc2 = 2, .opc3 = 10, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vsrw, }, .oname = "vsrw", }; |
6732 | GEN_VXFORM(vsrab, 2, 12){ .opc1 = 0x04, .opc2 = 2, .opc3 = 12, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vsrab, }, .oname = "vsrab", }; |
6733 | GEN_VXFORM(vsrah, 2, 13){ .opc1 = 0x04, .opc2 = 2, .opc3 = 13, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vsrah, }, .oname = "vsrah", }; |
6734 | GEN_VXFORM(vsraw, 2, 14){ .opc1 = 0x04, .opc2 = 2, .opc3 = 14, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vsraw, }, .oname = "vsraw", }; |
6735 | GEN_VXFORM(vslo, 6, 16){ .opc1 = 0x04, .opc2 = 6, .opc3 = 16, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vslo, }, .oname = "vslo", }; |
6736 | GEN_VXFORM(vsro, 6, 17){ .opc1 = 0x04, .opc2 = 6, .opc3 = 17, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vsro, }, .oname = "vsro", }; |
6737 | GEN_VXFORM(vaddcuw, 0, 6){ .opc1 = 0x04, .opc2 = 0, .opc3 = 6, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vaddcuw, }, .oname = "vaddcuw", }; |
6738 | GEN_VXFORM(vsubcuw, 0, 22){ .opc1 = 0x04, .opc2 = 0, .opc3 = 22, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vsubcuw, }, .oname = "vsubcuw", }; |
6739 | GEN_VXFORM_ENV(vaddubs, 0, 8)static void gen_vaddubs(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vaddubs(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6740 | GEN_VXFORM_ENV(vadduhs, 0, 9)static void gen_vadduhs(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vadduhs(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6741 | GEN_VXFORM_ENV(vadduws, 0, 10)static void gen_vadduws(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vadduws(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6742 | GEN_VXFORM_ENV(vaddsbs, 0, 12)static void gen_vaddsbs(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vaddsbs(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6743 | GEN_VXFORM_ENV(vaddshs, 0, 13)static void gen_vaddshs(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vaddshs(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6744 | GEN_VXFORM_ENV(vaddsws, 0, 14)static void gen_vaddsws(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vaddsws(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6745 | GEN_VXFORM_ENV(vsububs, 0, 24)static void gen_vsububs(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vsububs(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6746 | GEN_VXFORM_ENV(vsubuhs, 0, 25)static void gen_vsubuhs(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vsubuhs(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6747 | GEN_VXFORM_ENV(vsubuws, 0, 26)static void gen_vsubuws(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vsubuws(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6748 | GEN_VXFORM_ENV(vsubsbs, 0, 28)static void gen_vsubsbs(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vsubsbs(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6749 | GEN_VXFORM_ENV(vsubshs, 0, 29)static void gen_vsubshs(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vsubshs(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6750 | GEN_VXFORM_ENV(vsubsws, 0, 30)static void gen_vsubsws(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vsubsws(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6751 | GEN_VXFORM(vrlb, 2, 0){ .opc1 = 0x04, .opc2 = 2, .opc3 = 0, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vrlb, }, .oname = "vrlb", }; |
6752 | GEN_VXFORM(vrlh, 2, 1){ .opc1 = 0x04, .opc2 = 2, .opc3 = 1, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vrlh, }, .oname = "vrlh", }; |
6753 | GEN_VXFORM(vrlw, 2, 2){ .opc1 = 0x04, .opc2 = 2, .opc3 = 2, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vrlw, }, .oname = "vrlw", }; |
6754 | GEN_VXFORM(vsl, 2, 7){ .opc1 = 0x04, .opc2 = 2, .opc3 = 7, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vsl, }, .oname = "vsl", }; |
6755 | GEN_VXFORM(vsr, 2, 11){ .opc1 = 0x04, .opc2 = 2, .opc3 = 11, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vsr, }, .oname = "vsr", }; |
6756 | GEN_VXFORM_ENV(vpkuhum, 7, 0)static void gen_vpkuhum(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vpkuhum(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6757 | GEN_VXFORM_ENV(vpkuwum, 7, 1)static void gen_vpkuwum(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vpkuwum(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6758 | GEN_VXFORM_ENV(vpkuhus, 7, 2)static void gen_vpkuhus(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vpkuhus(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6759 | GEN_VXFORM_ENV(vpkuwus, 7, 3)static void gen_vpkuwus(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vpkuwus(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6760 | GEN_VXFORM_ENV(vpkshus, 7, 4)static void gen_vpkshus(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vpkshus(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6761 | GEN_VXFORM_ENV(vpkswus, 7, 5)static void gen_vpkswus(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vpkswus(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6762 | GEN_VXFORM_ENV(vpkshss, 7, 6)static void gen_vpkshss(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vpkshss(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6763 | GEN_VXFORM_ENV(vpkswss, 7, 7)static void gen_vpkswss(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vpkswss(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6764 | GEN_VXFORM(vpkpx, 7, 12){ .opc1 = 0x04, .opc2 = 7, .opc3 = 12, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_vpkpx, }, .oname = "vpkpx", }; |
6765 | GEN_VXFORM_ENV(vsum4ubs, 4, 24)static void gen_vsum4ubs(DisasContext *ctx) { TCGv_ptr ra, rb , rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vsum4ubs(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6766 | GEN_VXFORM_ENV(vsum4sbs, 4, 28)static void gen_vsum4sbs(DisasContext *ctx) { TCGv_ptr ra, rb , rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vsum4sbs(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6767 | GEN_VXFORM_ENV(vsum4shs, 4, 25)static void gen_vsum4shs(DisasContext *ctx) { TCGv_ptr ra, rb , rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vsum4shs(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6768 | GEN_VXFORM_ENV(vsum2sws, 4, 26)static void gen_vsum2sws(DisasContext *ctx) { TCGv_ptr ra, rb , rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vsum2sws(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6769 | GEN_VXFORM_ENV(vsumsws, 4, 30)static void gen_vsumsws(DisasContext *ctx) { TCGv_ptr ra, rb, rd; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr (rA(ctx->opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vsumsws(cpu_env , rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6770 | GEN_VXFORM_ENV(vaddfp, 5, 0)static void gen_vaddfp(DisasContext *ctx) { TCGv_ptr ra, rb, rd ; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception (ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr(rA(ctx-> opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr (rD(ctx->opcode)); gen_helper_vaddfp(cpu_env, rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6771 | GEN_VXFORM_ENV(vsubfp, 5, 1)static void gen_vsubfp(DisasContext *ctx) { TCGv_ptr ra, rb, rd ; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception (ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr(rA(ctx-> opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr (rD(ctx->opcode)); gen_helper_vsubfp(cpu_env, rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6772 | GEN_VXFORM_ENV(vmaxfp, 5, 16)static void gen_vmaxfp(DisasContext *ctx) { TCGv_ptr ra, rb, rd ; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception (ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr(rA(ctx-> opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr (rD(ctx->opcode)); gen_helper_vmaxfp(cpu_env, rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6773 | GEN_VXFORM_ENV(vminfp, 5, 17)static void gen_vminfp(DisasContext *ctx) { TCGv_ptr ra, rb, rd ; if (__builtin_expect(!!(!ctx->altivec_enabled), 0)) { gen_exception (ctx, POWERPC_EXCP_VPU); return; } ra = gen_avr_ptr(rA(ctx-> opcode)); rb = gen_avr_ptr(rB(ctx->opcode)); rd = gen_avr_ptr (rD(ctx->opcode)); gen_helper_vminfp(cpu_env, rd, ra, rb); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((ra).iptr)}; make_tcgv_tmp;})); tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rb).iptr)}; make_tcgv_tmp;})) ; tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = {((rd).iptr)}; make_tcgv_tmp;})); }; |
6774 | |
6775 | #define GEN_VXRFORM1(opname, name, str, opc2, opc3){ .opc1 = 0x4, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = str, }, \ |
6776 | static void glue(gen_, name)gen_name(DisasContext *ctx) \ |
6777 | { \ |
6778 | TCGv_ptr ra, rb, rd; \ |
6779 | if (unlikely(!ctx->altivec_enabled)__builtin_expect(!!(!ctx->altivec_enabled), 0)) { \ |
6780 | gen_exception(ctx, POWERPC_EXCP_VPU); \ |
6781 | return; \ |
6782 | } \ |
6783 | ra = gen_avr_ptr(rA(ctx->opcode)); \ |
6784 | rb = gen_avr_ptr(rB(ctx->opcode)); \ |
6785 | rd = gen_avr_ptr(rD(ctx->opcode)); \ |
6786 | gen_helper_##opname(cpu_env, rd, ra, rb); \ |
6787 | tcg_temp_free_ptr(ra)tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((ra).iptr)}; make_tcgv_tmp;})); \ |
6788 | tcg_temp_free_ptr(rb)tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((rb).iptr)}; make_tcgv_tmp;})); \ |
6789 | tcg_temp_free_ptr(rd)tcg_temp_free_i64(__extension__ ({ TCGv_i64 make_tcgv_tmp = { ((rd).iptr)}; make_tcgv_tmp;})); \ |
6790 | } |
6791 | |
6792 | #define GEN_VXRFORM(name, opc2, opc3){ .opc1 = 0x4, .opc2 = opc2, .opc3 = opc3, .pad = { 0, }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, .type2 = PPC_NONE , .handler = &gen_name, }, .oname = "name", }, { .opc1 = 0x4 , .opc2 = opc2, .opc3 = (opc3 | (0x1 << 4)), .pad = { 0 , }, .handler = { .inval1 = 0x00000000, .type = PPC_ALTIVEC, . type2 = PPC_NONE, .handler = &gen_name_, }, .oname = "name" ".", }, \ |
6793 |