File:  [Qemu by Fabrice Bellard] / qemu / target-sh4 / translate.c
Revision 1.1.1.4 (vendor branch): download - view: text, annotated - select for diffs
Tue Apr 24 16:51:06 2018 UTC (3 years, 4 months ago) by root
Branches: qemu, MAIN
CVS tags: qemu0105, qemu0104, qemu0103, qemu0102, qemu0101, qemu0100, HEAD
qemu 0.10.0

    1: /*
    2:  *  SH4 translation
    3:  *
    4:  *  Copyright (c) 2005 Samuel Tardieu
    5:  *
    6:  * This library is free software; you can redistribute it and/or
    7:  * modify it under the terms of the GNU Lesser General Public
    8:  * License as published by the Free Software Foundation; either
    9:  * version 2 of the License, or (at your option) any later version.
   10:  *
   11:  * This library is distributed in the hope that it will be useful,
   12:  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   13:  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   14:  * Lesser General Public License for more details.
   15:  *
   16:  * You should have received a copy of the GNU Lesser General Public
   17:  * License along with this library; if not, write to the Free Software
   18:  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
   19:  */
   20: #include <stdarg.h>
   21: #include <stdlib.h>
   22: #include <stdio.h>
   23: #include <string.h>
   24: #include <inttypes.h>
   25: #include <assert.h>
   26: 
   27: #define DEBUG_DISAS
   28: #define SH4_DEBUG_DISAS
   29: //#define SH4_SINGLE_STEP
   30: 
   31: #include "cpu.h"
   32: #include "exec-all.h"
   33: #include "disas.h"
   34: #include "tcg-op.h"
   35: #include "qemu-common.h"
   36: 
   37: #include "helper.h"
   38: #define GEN_HELPER 1
   39: #include "helper.h"
   40: 
   41: typedef struct DisasContext {
   42:     struct TranslationBlock *tb;
   43:     target_ulong pc;
   44:     uint32_t sr;
   45:     uint32_t fpscr;
   46:     uint16_t opcode;
   47:     uint32_t flags;
   48:     int bstate;
   49:     int memidx;
   50:     uint32_t delayed_pc;
   51:     int singlestep_enabled;
   52:     uint32_t features;
   53: } DisasContext;
   54: 
   55: #if defined(CONFIG_USER_ONLY)
   56: #define IS_USER(ctx) 1
   57: #else
   58: #define IS_USER(ctx) (!(ctx->sr & SR_MD))
   59: #endif
   60: 
   61: enum {
   62:     BS_NONE     = 0, /* We go out of the TB without reaching a branch or an
   63:                       * exception condition
   64:                       */
   65:     BS_STOP     = 1, /* We want to stop translation for any reason */
   66:     BS_BRANCH   = 2, /* We reached a branch condition     */
   67:     BS_EXCP     = 3, /* We reached an exception condition */
   68: };
   69: 
   70: /* global register indexes */
   71: static TCGv_ptr cpu_env;
   72: static TCGv cpu_gregs[24];
   73: static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
   74: static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
   75: static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
   76: static TCGv cpu_fregs[32];
   77: 
   78: /* internal register indexes */
   79: static TCGv cpu_flags, cpu_delayed_pc;
   80: 
   81: #include "gen-icount.h"
   82: 
   83: static void sh4_translate_init(void)
   84: {
   85:     int i;
   86:     static int done_init = 0;
   87:     static const char * const gregnames[24] = {
   88:         "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
   89:         "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
   90:         "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
   91:         "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
   92:         "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
   93:     };
   94:     static const char * const fregnames[32] = {
   95:          "FPR0_BANK0",  "FPR1_BANK0",  "FPR2_BANK0",  "FPR3_BANK0",
   96:          "FPR4_BANK0",  "FPR5_BANK0",  "FPR6_BANK0",  "FPR7_BANK0",
   97:          "FPR8_BANK0",  "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
   98:         "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
   99:          "FPR0_BANK1",  "FPR1_BANK1",  "FPR2_BANK1",  "FPR3_BANK1",
  100:          "FPR4_BANK1",  "FPR5_BANK1",  "FPR6_BANK1",  "FPR7_BANK1",
  101:          "FPR8_BANK1",  "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
  102:         "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
  103:     };
  104: 
  105:     if (done_init)
  106:         return;
  107: 
  108:     cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
  109: 
  110:     for (i = 0; i < 24; i++)
  111:         cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
  112:                                               offsetof(CPUState, gregs[i]),
  113:                                               gregnames[i]);
  114: 
  115:     cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
  116:                                     offsetof(CPUState, pc), "PC");
  117:     cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
  118:                                     offsetof(CPUState, sr), "SR");
  119:     cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
  120:                                      offsetof(CPUState, ssr), "SSR");
  121:     cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
  122:                                      offsetof(CPUState, spc), "SPC");
  123:     cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
  124:                                      offsetof(CPUState, gbr), "GBR");
  125:     cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
  126:                                      offsetof(CPUState, vbr), "VBR");
  127:     cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
  128:                                      offsetof(CPUState, sgr), "SGR");
  129:     cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
  130:                                      offsetof(CPUState, dbr), "DBR");
  131:     cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
  132:                                       offsetof(CPUState, mach), "MACH");
  133:     cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
  134:                                       offsetof(CPUState, macl), "MACL");
  135:     cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
  136:                                     offsetof(CPUState, pr), "PR");
  137:     cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
  138:                                        offsetof(CPUState, fpscr), "FPSCR");
  139:     cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
  140:                                       offsetof(CPUState, fpul), "FPUL");
  141: 
  142:     cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
  143: 				       offsetof(CPUState, flags), "_flags_");
  144:     cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
  145: 					    offsetof(CPUState, delayed_pc),
  146: 					    "_delayed_pc_");
  147:     cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
  148: 				      offsetof(CPUState, ldst), "_ldst_");
  149: 
  150:     for (i = 0; i < 32; i++)
  151:         cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
  152:                                               offsetof(CPUState, fregs[i]),
  153:                                               fregnames[i]);
  154: 
  155:     /* register helpers */
  156: #define GEN_HELPER 2
  157: #include "helper.h"
  158: 
  159:     done_init = 1;
  160: }
  161: 
  162: void cpu_dump_state(CPUState * env, FILE * f,
  163: 		    int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
  164: 		    int flags)
  165: {
  166:     int i;
  167:     cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
  168: 		env->pc, env->sr, env->pr, env->fpscr);
  169:     cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
  170: 		env->spc, env->ssr, env->gbr, env->vbr);
  171:     cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
  172: 		env->sgr, env->dbr, env->delayed_pc, env->fpul);
  173:     for (i = 0; i < 24; i += 4) {
  174: 	cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
  175: 		    i, env->gregs[i], i + 1, env->gregs[i + 1],
  176: 		    i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
  177:     }
  178:     if (env->flags & DELAY_SLOT) {
  179: 	cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
  180: 		    env->delayed_pc);
  181:     } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
  182: 	cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
  183: 		    env->delayed_pc);
  184:     }
  185: }
  186: 
  187: static void cpu_sh4_reset(CPUSH4State * env)
  188: {
  189:     if (qemu_loglevel_mask(CPU_LOG_RESET)) {
  190:         qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
  191:         log_cpu_state(env, 0);
  192:     }
  193: 
  194: #if defined(CONFIG_USER_ONLY)
  195:     env->sr = 0;
  196: #else
  197:     env->sr = SR_MD | SR_RB | SR_BL | SR_I3 | SR_I2 | SR_I1 | SR_I0;
  198: #endif
  199:     env->vbr = 0;
  200:     env->pc = 0xA0000000;
  201: #if defined(CONFIG_USER_ONLY)
  202:     env->fpscr = FPSCR_PR; /* value for userspace according to the kernel */
  203:     set_float_rounding_mode(float_round_nearest_even, &env->fp_status); /* ?! */
  204: #else
  205:     env->fpscr = 0x00040001; /* CPU reset value according to SH4 manual */
  206:     set_float_rounding_mode(float_round_to_zero, &env->fp_status);
  207: #endif
  208:     env->mmucr = 0;
  209: }
  210: 
  211: typedef struct {
  212:     const char *name;
  213:     int id;
  214:     uint32_t pvr;
  215:     uint32_t prr;
  216:     uint32_t cvr;
  217:     uint32_t features;
  218: } sh4_def_t;
  219: 
  220: static sh4_def_t sh4_defs[] = {
  221:     {
  222: 	.name = "SH7750R",
  223: 	.id = SH_CPU_SH7750R,
  224: 	.pvr = 0x00050000,
  225: 	.prr = 0x00000100,
  226: 	.cvr = 0x00110000,
  227: 	.features = SH_FEATURE_BCR3_AND_BCR4,
  228:     }, {
  229: 	.name = "SH7751R",
  230: 	.id = SH_CPU_SH7751R,
  231: 	.pvr = 0x04050005,
  232: 	.prr = 0x00000113,
  233: 	.cvr = 0x00110000,	/* Neutered caches, should be 0x20480000 */
  234: 	.features = SH_FEATURE_BCR3_AND_BCR4,
  235:     }, {
  236: 	.name = "SH7785",
  237: 	.id = SH_CPU_SH7785,
  238: 	.pvr = 0x10300700,
  239: 	.prr = 0x00000200,
  240: 	.cvr = 0x71440211,
  241: 	.features = SH_FEATURE_SH4A,
  242:      },
  243: };
  244: 
  245: static const sh4_def_t *cpu_sh4_find_by_name(const char *name)
  246: {
  247:     int i;
  248: 
  249:     if (strcasecmp(name, "any") == 0)
  250: 	return &sh4_defs[0];
  251: 
  252:     for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
  253: 	if (strcasecmp(name, sh4_defs[i].name) == 0)
  254: 	    return &sh4_defs[i];
  255: 
  256:     return NULL;
  257: }
  258: 
  259: void sh4_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
  260: {
  261:     int i;
  262: 
  263:     for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
  264: 	(*cpu_fprintf)(f, "%s\n", sh4_defs[i].name);
  265: }
  266: 
  267: static void cpu_sh4_register(CPUSH4State *env, const sh4_def_t *def)
  268: {
  269:     env->pvr = def->pvr;
  270:     env->prr = def->prr;
  271:     env->cvr = def->cvr;
  272:     env->id = def->id;
  273: }
  274: 
  275: CPUSH4State *cpu_sh4_init(const char *cpu_model)
  276: {
  277:     CPUSH4State *env;
  278:     const sh4_def_t *def;
  279: 
  280:     def = cpu_sh4_find_by_name(cpu_model);
  281:     if (!def)
  282: 	return NULL;
  283:     env = qemu_mallocz(sizeof(CPUSH4State));
  284:     env->features = def->features;
  285:     cpu_exec_init(env);
  286:     sh4_translate_init();
  287:     env->cpu_model_str = cpu_model;
  288:     cpu_sh4_reset(env);
  289:     cpu_sh4_register(env, def);
  290:     tlb_flush(env, 1);
  291:     return env;
  292: }
  293: 
  294: static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
  295: {
  296:     TranslationBlock *tb;
  297:     tb = ctx->tb;
  298: 
  299:     if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
  300: 	!ctx->singlestep_enabled) {
  301: 	/* Use a direct jump if in same page and singlestep not enabled */
  302:         tcg_gen_goto_tb(n);
  303:         tcg_gen_movi_i32(cpu_pc, dest);
  304:         tcg_gen_exit_tb((long) tb + n);
  305:     } else {
  306:         tcg_gen_movi_i32(cpu_pc, dest);
  307:         if (ctx->singlestep_enabled)
  308:             gen_helper_debug();
  309:         tcg_gen_exit_tb(0);
  310:     }
  311: }
  312: 
  313: static void gen_jump(DisasContext * ctx)
  314: {
  315:     if (ctx->delayed_pc == (uint32_t) - 1) {
  316: 	/* Target is not statically known, it comes necessarily from a
  317: 	   delayed jump as immediate jump are conditinal jumps */
  318: 	tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
  319: 	if (ctx->singlestep_enabled)
  320: 	    gen_helper_debug();
  321: 	tcg_gen_exit_tb(0);
  322:     } else {
  323: 	gen_goto_tb(ctx, 0, ctx->delayed_pc);
  324:     }
  325: }
  326: 
  327: static inline void gen_branch_slot(uint32_t delayed_pc, int t)
  328: {
  329:     TCGv sr;
  330:     int label = gen_new_label();
  331:     tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
  332:     sr = tcg_temp_new();
  333:     tcg_gen_andi_i32(sr, cpu_sr, SR_T);
  334:     tcg_gen_brcondi_i32(TCG_COND_NE, sr, t ? SR_T : 0, label);
  335:     tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
  336:     gen_set_label(label);
  337: }
  338: 
  339: /* Immediate conditional jump (bt or bf) */
  340: static void gen_conditional_jump(DisasContext * ctx,
  341: 				 target_ulong ift, target_ulong ifnott)
  342: {
  343:     int l1;
  344:     TCGv sr;
  345: 
  346:     l1 = gen_new_label();
  347:     sr = tcg_temp_new();
  348:     tcg_gen_andi_i32(sr, cpu_sr, SR_T);
  349:     tcg_gen_brcondi_i32(TCG_COND_EQ, sr, SR_T, l1);
  350:     gen_goto_tb(ctx, 0, ifnott);
  351:     gen_set_label(l1);
  352:     gen_goto_tb(ctx, 1, ift);
  353: }
  354: 
  355: /* Delayed conditional jump (bt or bf) */
  356: static void gen_delayed_conditional_jump(DisasContext * ctx)
  357: {
  358:     int l1;
  359:     TCGv ds;
  360: 
  361:     l1 = gen_new_label();
  362:     ds = tcg_temp_new();
  363:     tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
  364:     tcg_gen_brcondi_i32(TCG_COND_EQ, ds, DELAY_SLOT_TRUE, l1);
  365:     gen_goto_tb(ctx, 1, ctx->pc + 2);
  366:     gen_set_label(l1);
  367:     tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
  368:     gen_jump(ctx);
  369: }
  370: 
  371: static inline void gen_set_t(void)
  372: {
  373:     tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
  374: }
  375: 
  376: static inline void gen_clr_t(void)
  377: {
  378:     tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
  379: }
  380: 
  381: static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
  382: {
  383:     int label1 = gen_new_label();
  384:     int label2 = gen_new_label();
  385:     tcg_gen_brcond_i32(cond, t1, t0, label1);
  386:     gen_clr_t();
  387:     tcg_gen_br(label2);
  388:     gen_set_label(label1);
  389:     gen_set_t();
  390:     gen_set_label(label2);
  391: }
  392: 
  393: static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
  394: {
  395:     int label1 = gen_new_label();
  396:     int label2 = gen_new_label();
  397:     tcg_gen_brcondi_i32(cond, t0, imm, label1);
  398:     gen_clr_t();
  399:     tcg_gen_br(label2);
  400:     gen_set_label(label1);
  401:     gen_set_t();
  402:     gen_set_label(label2);
  403: }
  404: 
  405: static inline void gen_store_flags(uint32_t flags)
  406: {
  407:     tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
  408:     tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
  409: }
  410: 
  411: static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
  412: {
  413:     TCGv tmp = tcg_temp_new();
  414: 
  415:     p0 &= 0x1f;
  416:     p1 &= 0x1f;
  417: 
  418:     tcg_gen_andi_i32(tmp, t1, (1 << p1));
  419:     tcg_gen_andi_i32(t0, t0, ~(1 << p0));
  420:     if (p0 < p1)
  421:         tcg_gen_shri_i32(tmp, tmp, p1 - p0);
  422:     else if (p0 > p1)
  423:         tcg_gen_shli_i32(tmp, tmp, p0 - p1);
  424:     tcg_gen_or_i32(t0, t0, tmp);
  425: 
  426:     tcg_temp_free(tmp);
  427: }
  428: 
  429: static inline void gen_load_fpr64(TCGv_i64 t, int reg)
  430: {
  431:     tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
  432: }
  433: 
  434: static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
  435: {
  436:     TCGv_i32 tmp = tcg_temp_new_i32();
  437:     tcg_gen_trunc_i64_i32(tmp, t);
  438:     tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
  439:     tcg_gen_shri_i64(t, t, 32);
  440:     tcg_gen_trunc_i64_i32(tmp, t);
  441:     tcg_gen_mov_i32(cpu_fregs[reg], tmp);
  442:     tcg_temp_free_i32(tmp);
  443: }
  444: 
  445: #define B3_0 (ctx->opcode & 0xf)
  446: #define B6_4 ((ctx->opcode >> 4) & 0x7)
  447: #define B7_4 ((ctx->opcode >> 4) & 0xf)
  448: #define B7_0 (ctx->opcode & 0xff)
  449: #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
  450: #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
  451:   (ctx->opcode & 0xfff))
  452: #define B11_8 ((ctx->opcode >> 8) & 0xf)
  453: #define B15_12 ((ctx->opcode >> 12) & 0xf)
  454: 
  455: #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
  456: 		(cpu_gregs[x + 16]) : (cpu_gregs[x]))
  457: 
  458: #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
  459: 		? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
  460: 
  461: #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
  462: #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
  463: #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
  464: #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
  465: 
  466: #define CHECK_NOT_DELAY_SLOT \
  467:   if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))     \
  468:   {                                                           \
  469:       tcg_gen_movi_i32(cpu_pc, ctx->pc-2);                    \
  470:       gen_helper_raise_slot_illegal_instruction();            \
  471:       ctx->bstate = BS_EXCP;                                  \
  472:       return;                                                 \
  473:   }
  474: 
  475: #define CHECK_PRIVILEGED                                      \
  476:   if (IS_USER(ctx)) {                                         \
  477:       tcg_gen_movi_i32(cpu_pc, ctx->pc);                      \
  478:       gen_helper_raise_illegal_instruction();                 \
  479:       ctx->bstate = BS_EXCP;                                  \
  480:       return;                                                 \
  481:   }
  482: 
  483: #define CHECK_FPU_ENABLED                                       \
  484:   if (ctx->flags & SR_FD) {                                     \
  485:       if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
  486:           tcg_gen_movi_i32(cpu_pc, ctx->pc-2);                  \
  487:           gen_helper_raise_slot_fpu_disable();                  \
  488:       } else {                                                  \
  489:           tcg_gen_movi_i32(cpu_pc, ctx->pc);                    \
  490:           gen_helper_raise_fpu_disable();                       \
  491:       }                                                         \
  492:       ctx->bstate = BS_EXCP;                                    \
  493:       return;                                                   \
  494:   }
  495: 
  496: static void _decode_opc(DisasContext * ctx)
  497: {
  498: #if 0
  499:     fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
  500: #endif
  501: 
  502:     switch (ctx->opcode) {
  503:     case 0x0019:		/* div0u */
  504: 	tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
  505: 	return;
  506:     case 0x000b:		/* rts */
  507: 	CHECK_NOT_DELAY_SLOT
  508: 	tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
  509: 	ctx->flags |= DELAY_SLOT;
  510: 	ctx->delayed_pc = (uint32_t) - 1;
  511: 	return;
  512:     case 0x0028:		/* clrmac */
  513: 	tcg_gen_movi_i32(cpu_mach, 0);
  514: 	tcg_gen_movi_i32(cpu_macl, 0);
  515: 	return;
  516:     case 0x0048:		/* clrs */
  517: 	tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
  518: 	return;
  519:     case 0x0008:		/* clrt */
  520: 	gen_clr_t();
  521: 	return;
  522:     case 0x0038:		/* ldtlb */
  523: 	CHECK_PRIVILEGED
  524: 	gen_helper_ldtlb();
  525: 	return;
  526:     case 0x002b:		/* rte */
  527: 	CHECK_PRIVILEGED
  528: 	CHECK_NOT_DELAY_SLOT
  529: 	tcg_gen_mov_i32(cpu_sr, cpu_ssr);
  530: 	tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
  531: 	ctx->flags |= DELAY_SLOT;
  532: 	ctx->delayed_pc = (uint32_t) - 1;
  533: 	return;
  534:     case 0x0058:		/* sets */
  535: 	tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
  536: 	return;
  537:     case 0x0018:		/* sett */
  538: 	gen_set_t();
  539: 	return;
  540:     case 0xfbfd:		/* frchg */
  541: 	tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
  542: 	ctx->bstate = BS_STOP;
  543: 	return;
  544:     case 0xf3fd:		/* fschg */
  545: 	tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
  546: 	ctx->bstate = BS_STOP;
  547: 	return;
  548:     case 0x0009:		/* nop */
  549: 	return;
  550:     case 0x001b:		/* sleep */
  551: 	CHECK_PRIVILEGED
  552: 	gen_helper_sleep(tcg_const_i32(ctx->pc + 2));
  553: 	return;
  554:     }
  555: 
  556:     switch (ctx->opcode & 0xf000) {
  557:     case 0x1000:		/* mov.l Rm,@(disp,Rn) */
  558: 	{
  559: 	    TCGv addr = tcg_temp_new();
  560: 	    tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
  561: 	    tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
  562: 	    tcg_temp_free(addr);
  563: 	}
  564: 	return;
  565:     case 0x5000:		/* mov.l @(disp,Rm),Rn */
  566: 	{
  567: 	    TCGv addr = tcg_temp_new();
  568: 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
  569: 	    tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
  570: 	    tcg_temp_free(addr);
  571: 	}
  572: 	return;
  573:     case 0xe000:		/* mov #imm,Rn */
  574: 	tcg_gen_movi_i32(REG(B11_8), B7_0s);
  575: 	return;
  576:     case 0x9000:		/* mov.w @(disp,PC),Rn */
  577: 	{
  578: 	    TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
  579: 	    tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
  580: 	    tcg_temp_free(addr);
  581: 	}
  582: 	return;
  583:     case 0xd000:		/* mov.l @(disp,PC),Rn */
  584: 	{
  585: 	    TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
  586: 	    tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
  587: 	    tcg_temp_free(addr);
  588: 	}
  589: 	return;
  590:     case 0x7000:		/* add #imm,Rn */
  591: 	tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
  592: 	return;
  593:     case 0xa000:		/* bra disp */
  594: 	CHECK_NOT_DELAY_SLOT
  595: 	ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
  596: 	tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
  597: 	ctx->flags |= DELAY_SLOT;
  598: 	return;
  599:     case 0xb000:		/* bsr disp */
  600: 	CHECK_NOT_DELAY_SLOT
  601: 	tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
  602: 	ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
  603: 	tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
  604: 	ctx->flags |= DELAY_SLOT;
  605: 	return;
  606:     }
  607: 
  608:     switch (ctx->opcode & 0xf00f) {
  609:     case 0x6003:		/* mov Rm,Rn */
  610: 	tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
  611: 	return;
  612:     case 0x2000:		/* mov.b Rm,@Rn */
  613: 	tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx);
  614: 	return;
  615:     case 0x2001:		/* mov.w Rm,@Rn */
  616: 	tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx);
  617: 	return;
  618:     case 0x2002:		/* mov.l Rm,@Rn */
  619: 	tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx);
  620: 	return;
  621:     case 0x6000:		/* mov.b @Rm,Rn */
  622: 	tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
  623: 	return;
  624:     case 0x6001:		/* mov.w @Rm,Rn */
  625: 	tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
  626: 	return;
  627:     case 0x6002:		/* mov.l @Rm,Rn */
  628: 	tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
  629: 	return;
  630:     case 0x2004:		/* mov.b Rm,@-Rn */
  631: 	{
  632: 	    TCGv addr = tcg_temp_new();
  633: 	    tcg_gen_subi_i32(addr, REG(B11_8), 1);
  634: 	    tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);	/* might cause re-execution */
  635: 	    tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);	/* modify register status */
  636: 	    tcg_temp_free(addr);
  637: 	}
  638: 	return;
  639:     case 0x2005:		/* mov.w Rm,@-Rn */
  640: 	{
  641: 	    TCGv addr = tcg_temp_new();
  642: 	    tcg_gen_subi_i32(addr, REG(B11_8), 2);
  643: 	    tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
  644: 	    tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 2);
  645: 	    tcg_temp_free(addr);
  646: 	}
  647: 	return;
  648:     case 0x2006:		/* mov.l Rm,@-Rn */
  649: 	{
  650: 	    TCGv addr = tcg_temp_new();
  651: 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
  652: 	    tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
  653: 	    tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4);
  654: 	}
  655: 	return;
  656:     case 0x6004:		/* mov.b @Rm+,Rn */
  657: 	tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
  658: 	if ( B11_8 != B7_4 )
  659: 		tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
  660: 	return;
  661:     case 0x6005:		/* mov.w @Rm+,Rn */
  662: 	tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
  663: 	if ( B11_8 != B7_4 )
  664: 		tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
  665: 	return;
  666:     case 0x6006:		/* mov.l @Rm+,Rn */
  667: 	tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
  668: 	if ( B11_8 != B7_4 )
  669: 		tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
  670: 	return;
  671:     case 0x0004:		/* mov.b Rm,@(R0,Rn) */
  672: 	{
  673: 	    TCGv addr = tcg_temp_new();
  674: 	    tcg_gen_add_i32(addr, REG(B11_8), REG(0));
  675: 	    tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);
  676: 	    tcg_temp_free(addr);
  677: 	}
  678: 	return;
  679:     case 0x0005:		/* mov.w Rm,@(R0,Rn) */
  680: 	{
  681: 	    TCGv addr = tcg_temp_new();
  682: 	    tcg_gen_add_i32(addr, REG(B11_8), REG(0));
  683: 	    tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
  684: 	    tcg_temp_free(addr);
  685: 	}
  686: 	return;
  687:     case 0x0006:		/* mov.l Rm,@(R0,Rn) */
  688: 	{
  689: 	    TCGv addr = tcg_temp_new();
  690: 	    tcg_gen_add_i32(addr, REG(B11_8), REG(0));
  691: 	    tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
  692: 	    tcg_temp_free(addr);
  693: 	}
  694: 	return;
  695:     case 0x000c:		/* mov.b @(R0,Rm),Rn */
  696: 	{
  697: 	    TCGv addr = tcg_temp_new();
  698: 	    tcg_gen_add_i32(addr, REG(B7_4), REG(0));
  699: 	    tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx);
  700: 	    tcg_temp_free(addr);
  701: 	}
  702: 	return;
  703:     case 0x000d:		/* mov.w @(R0,Rm),Rn */
  704: 	{
  705: 	    TCGv addr = tcg_temp_new();
  706: 	    tcg_gen_add_i32(addr, REG(B7_4), REG(0));
  707: 	    tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
  708: 	    tcg_temp_free(addr);
  709: 	}
  710: 	return;
  711:     case 0x000e:		/* mov.l @(R0,Rm),Rn */
  712: 	{
  713: 	    TCGv addr = tcg_temp_new();
  714: 	    tcg_gen_add_i32(addr, REG(B7_4), REG(0));
  715: 	    tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
  716: 	    tcg_temp_free(addr);
  717: 	}
  718: 	return;
  719:     case 0x6008:		/* swap.b Rm,Rn */
  720: 	{
  721: 	    TCGv highw, high, low;
  722: 	    highw = tcg_temp_new();
  723: 	    tcg_gen_andi_i32(highw, REG(B7_4), 0xffff0000);
  724: 	    high = tcg_temp_new();
  725: 	    tcg_gen_ext8u_i32(high, REG(B7_4));
  726: 	    tcg_gen_shli_i32(high, high, 8);
  727: 	    low = tcg_temp_new();
  728: 	    tcg_gen_shri_i32(low, REG(B7_4), 8);
  729: 	    tcg_gen_ext8u_i32(low, low);
  730: 	    tcg_gen_or_i32(REG(B11_8), high, low);
  731: 	    tcg_gen_or_i32(REG(B11_8), REG(B11_8), highw);
  732: 	    tcg_temp_free(low);
  733: 	    tcg_temp_free(high);
  734: 	}
  735: 	return;
  736:     case 0x6009:		/* swap.w Rm,Rn */
  737: 	{
  738: 	    TCGv high, low;
  739: 	    high = tcg_temp_new();
  740: 	    tcg_gen_ext16u_i32(high, REG(B7_4));
  741: 	    tcg_gen_shli_i32(high, high, 16);
  742: 	    low = tcg_temp_new();
  743: 	    tcg_gen_shri_i32(low, REG(B7_4), 16);
  744: 	    tcg_gen_ext16u_i32(low, low);
  745: 	    tcg_gen_or_i32(REG(B11_8), high, low);
  746: 	    tcg_temp_free(low);
  747: 	    tcg_temp_free(high);
  748: 	}
  749: 	return;
  750:     case 0x200d:		/* xtrct Rm,Rn */
  751: 	{
  752: 	    TCGv high, low;
  753: 	    high = tcg_temp_new();
  754: 	    tcg_gen_ext16u_i32(high, REG(B7_4));
  755: 	    tcg_gen_shli_i32(high, high, 16);
  756: 	    low = tcg_temp_new();
  757: 	    tcg_gen_shri_i32(low, REG(B11_8), 16);
  758: 	    tcg_gen_ext16u_i32(low, low);
  759: 	    tcg_gen_or_i32(REG(B11_8), high, low);
  760: 	    tcg_temp_free(low);
  761: 	    tcg_temp_free(high);
  762: 	}
  763: 	return;
  764:     case 0x300c:		/* add Rm,Rn */
  765: 	tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
  766: 	return;
  767:     case 0x300e:		/* addc Rm,Rn */
  768: 	gen_helper_addc(REG(B11_8), REG(B7_4), REG(B11_8));
  769: 	return;
  770:     case 0x300f:		/* addv Rm,Rn */
  771: 	gen_helper_addv(REG(B11_8), REG(B7_4), REG(B11_8));
  772: 	return;
  773:     case 0x2009:		/* and Rm,Rn */
  774: 	tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
  775: 	return;
  776:     case 0x3000:		/* cmp/eq Rm,Rn */
  777: 	gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
  778: 	return;
  779:     case 0x3003:		/* cmp/ge Rm,Rn */
  780: 	gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
  781: 	return;
  782:     case 0x3007:		/* cmp/gt Rm,Rn */
  783: 	gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
  784: 	return;
  785:     case 0x3006:		/* cmp/hi Rm,Rn */
  786: 	gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
  787: 	return;
  788:     case 0x3002:		/* cmp/hs Rm,Rn */
  789: 	gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
  790: 	return;
  791:     case 0x200c:		/* cmp/str Rm,Rn */
  792: 	{
  793: 	    int label1 = gen_new_label();
  794: 	    int label2 = gen_new_label();
  795: 	    TCGv cmp1 = tcg_temp_local_new();
  796: 	    TCGv cmp2 = tcg_temp_local_new();
  797: 	    tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
  798: 	    tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
  799: 	    tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
  800: 	    tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
  801: 	    tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
  802: 	    tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
  803: 	    tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
  804: 	    tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
  805: 	    tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
  806: 	    tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
  807: 	    tcg_gen_br(label2);
  808: 	    gen_set_label(label1);
  809: 	    tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
  810: 	    gen_set_label(label2);
  811: 	    tcg_temp_free(cmp2);
  812: 	    tcg_temp_free(cmp1);
  813: 	}
  814: 	return;
  815:     case 0x2007:		/* div0s Rm,Rn */
  816: 	{
  817: 	    gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31);	/* SR_Q */
  818: 	    gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31);		/* SR_M */
  819: 	    TCGv val = tcg_temp_new();
  820: 	    tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
  821: 	    gen_copy_bit_i32(cpu_sr, 0, val, 31);		/* SR_T */
  822: 	    tcg_temp_free(val);
  823: 	}
  824: 	return;
  825:     case 0x3004:		/* div1 Rm,Rn */
  826: 	gen_helper_div1(REG(B11_8), REG(B7_4), REG(B11_8));
  827: 	return;
  828:     case 0x300d:		/* dmuls.l Rm,Rn */
  829: 	{
  830: 	    TCGv_i64 tmp1 = tcg_temp_new_i64();
  831: 	    TCGv_i64 tmp2 = tcg_temp_new_i64();
  832: 
  833: 	    tcg_gen_ext_i32_i64(tmp1, REG(B7_4));
  834: 	    tcg_gen_ext_i32_i64(tmp2, REG(B11_8));
  835: 	    tcg_gen_mul_i64(tmp1, tmp1, tmp2);
  836: 	    tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
  837: 	    tcg_gen_shri_i64(tmp1, tmp1, 32);
  838: 	    tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
  839: 
  840: 	    tcg_temp_free_i64(tmp2);
  841: 	    tcg_temp_free_i64(tmp1);
  842: 	}
  843: 	return;
  844:     case 0x3005:		/* dmulu.l Rm,Rn */
  845: 	{
  846: 	    TCGv_i64 tmp1 = tcg_temp_new_i64();
  847: 	    TCGv_i64 tmp2 = tcg_temp_new_i64();
  848: 
  849: 	    tcg_gen_extu_i32_i64(tmp1, REG(B7_4));
  850: 	    tcg_gen_extu_i32_i64(tmp2, REG(B11_8));
  851: 	    tcg_gen_mul_i64(tmp1, tmp1, tmp2);
  852: 	    tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
  853: 	    tcg_gen_shri_i64(tmp1, tmp1, 32);
  854: 	    tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
  855: 
  856: 	    tcg_temp_free_i64(tmp2);
  857: 	    tcg_temp_free_i64(tmp1);
  858: 	}
  859: 	return;
  860:     case 0x600e:		/* exts.b Rm,Rn */
  861: 	tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
  862: 	return;
  863:     case 0x600f:		/* exts.w Rm,Rn */
  864: 	tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
  865: 	return;
  866:     case 0x600c:		/* extu.b Rm,Rn */
  867: 	tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
  868: 	return;
  869:     case 0x600d:		/* extu.w Rm,Rn */
  870: 	tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
  871: 	return;
  872:     case 0x000f:		/* mac.l @Rm+,@Rn+ */
  873: 	{
  874: 	    TCGv arg0, arg1;
  875: 	    arg0 = tcg_temp_new();
  876: 	    tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
  877: 	    arg1 = tcg_temp_new();
  878: 	    tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
  879: 	    gen_helper_macl(arg0, arg1);
  880: 	    tcg_temp_free(arg1);
  881: 	    tcg_temp_free(arg0);
  882: 	    tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
  883: 	    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
  884: 	}
  885: 	return;
  886:     case 0x400f:		/* mac.w @Rm+,@Rn+ */
  887: 	{
  888: 	    TCGv arg0, arg1;
  889: 	    arg0 = tcg_temp_new();
  890: 	    tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
  891: 	    arg1 = tcg_temp_new();
  892: 	    tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
  893: 	    gen_helper_macw(arg0, arg1);
  894: 	    tcg_temp_free(arg1);
  895: 	    tcg_temp_free(arg0);
  896: 	    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
  897: 	    tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
  898: 	}
  899: 	return;
  900:     case 0x0007:		/* mul.l Rm,Rn */
  901: 	tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
  902: 	return;
  903:     case 0x200f:		/* muls.w Rm,Rn */
  904: 	{
  905: 	    TCGv arg0, arg1;
  906: 	    arg0 = tcg_temp_new();
  907: 	    tcg_gen_ext16s_i32(arg0, REG(B7_4));
  908: 	    arg1 = tcg_temp_new();
  909: 	    tcg_gen_ext16s_i32(arg1, REG(B11_8));
  910: 	    tcg_gen_mul_i32(cpu_macl, arg0, arg1);
  911: 	    tcg_temp_free(arg1);
  912: 	    tcg_temp_free(arg0);
  913: 	}
  914: 	return;
  915:     case 0x200e:		/* mulu.w Rm,Rn */
  916: 	{
  917: 	    TCGv arg0, arg1;
  918: 	    arg0 = tcg_temp_new();
  919: 	    tcg_gen_ext16u_i32(arg0, REG(B7_4));
  920: 	    arg1 = tcg_temp_new();
  921: 	    tcg_gen_ext16u_i32(arg1, REG(B11_8));
  922: 	    tcg_gen_mul_i32(cpu_macl, arg0, arg1);
  923: 	    tcg_temp_free(arg1);
  924: 	    tcg_temp_free(arg0);
  925: 	}
  926: 	return;
  927:     case 0x600b:		/* neg Rm,Rn */
  928: 	tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
  929: 	return;
  930:     case 0x600a:		/* negc Rm,Rn */
  931: 	gen_helper_negc(REG(B11_8), REG(B7_4));
  932: 	return;
  933:     case 0x6007:		/* not Rm,Rn */
  934: 	tcg_gen_not_i32(REG(B11_8), REG(B7_4));
  935: 	return;
  936:     case 0x200b:		/* or Rm,Rn */
  937: 	tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
  938: 	return;
  939:     case 0x400c:		/* shad Rm,Rn */
  940: 	{
  941: 	    int label1 = gen_new_label();
  942: 	    int label2 = gen_new_label();
  943: 	    int label3 = gen_new_label();
  944: 	    int label4 = gen_new_label();
  945: 	    TCGv shift = tcg_temp_local_new();
  946: 	    tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
  947: 	    /* Rm positive, shift to the left */
  948: 	    tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
  949: 	    tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
  950: 	    tcg_gen_br(label4);
  951: 	    /* Rm negative, shift to the right */
  952: 	    gen_set_label(label1);
  953: 	    tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
  954: 	    tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
  955: 	    tcg_gen_not_i32(shift, REG(B7_4));
  956: 	    tcg_gen_andi_i32(shift, shift, 0x1f);
  957: 	    tcg_gen_addi_i32(shift, shift, 1);
  958: 	    tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
  959: 	    tcg_gen_br(label4);
  960: 	    /* Rm = -32 */
  961: 	    gen_set_label(label2);
  962: 	    tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
  963: 	    tcg_gen_movi_i32(REG(B11_8), 0);
  964: 	    tcg_gen_br(label4);
  965: 	    gen_set_label(label3);
  966: 	    tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
  967: 	    gen_set_label(label4);
  968: 	    tcg_temp_free(shift);
  969: 	}
  970: 	return;
  971:     case 0x400d:		/* shld Rm,Rn */
  972: 	{
  973: 	    int label1 = gen_new_label();
  974: 	    int label2 = gen_new_label();
  975: 	    int label3 = gen_new_label();
  976: 	    TCGv shift = tcg_temp_local_new();
  977: 	    tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
  978: 	    /* Rm positive, shift to the left */
  979: 	    tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
  980: 	    tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
  981: 	    tcg_gen_br(label3);
  982: 	    /* Rm negative, shift to the right */
  983: 	    gen_set_label(label1);
  984: 	    tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
  985: 	    tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
  986: 	    tcg_gen_not_i32(shift, REG(B7_4));
  987: 	    tcg_gen_andi_i32(shift, shift, 0x1f);
  988: 	    tcg_gen_addi_i32(shift, shift, 1);
  989: 	    tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
  990: 	    tcg_gen_br(label3);
  991: 	    /* Rm = -32 */
  992: 	    gen_set_label(label2);
  993: 	    tcg_gen_movi_i32(REG(B11_8), 0);
  994: 	    gen_set_label(label3);
  995: 	    tcg_temp_free(shift);
  996: 	}
  997: 	return;
  998:     case 0x3008:		/* sub Rm,Rn */
  999: 	tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
 1000: 	return;
 1001:     case 0x300a:		/* subc Rm,Rn */
 1002: 	gen_helper_subc(REG(B11_8), REG(B7_4), REG(B11_8));
 1003: 	return;
 1004:     case 0x300b:		/* subv Rm,Rn */
 1005: 	gen_helper_subv(REG(B11_8), REG(B7_4), REG(B11_8));
 1006: 	return;
 1007:     case 0x2008:		/* tst Rm,Rn */
 1008: 	{
 1009: 	    TCGv val = tcg_temp_new();
 1010: 	    tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
 1011: 	    gen_cmp_imm(TCG_COND_EQ, val, 0);
 1012: 	    tcg_temp_free(val);
 1013: 	}
 1014: 	return;
 1015:     case 0x200a:		/* xor Rm,Rn */
 1016: 	tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
 1017: 	return;
 1018:     case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
 1019: 	CHECK_FPU_ENABLED
 1020: 	if (ctx->fpscr & FPSCR_SZ) {
 1021: 	    TCGv_i64 fp = tcg_temp_new_i64();
 1022: 	    gen_load_fpr64(fp, XREG(B7_4));
 1023: 	    gen_store_fpr64(fp, XREG(B11_8));
 1024: 	    tcg_temp_free_i64(fp);
 1025: 	} else {
 1026: 	    tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
 1027: 	}
 1028: 	return;
 1029:     case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
 1030: 	CHECK_FPU_ENABLED
 1031: 	if (ctx->fpscr & FPSCR_SZ) {
 1032: 	    TCGv addr_hi = tcg_temp_new();
 1033: 	    int fr = XREG(B7_4);
 1034: 	    tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
 1035: 	    tcg_gen_qemu_st32(cpu_fregs[fr  ], REG(B11_8), ctx->memidx);
 1036: 	    tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi,	   ctx->memidx);
 1037: 	    tcg_temp_free(addr_hi);
 1038: 	} else {
 1039: 	    tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx);
 1040: 	}
 1041: 	return;
 1042:     case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
 1043: 	CHECK_FPU_ENABLED
 1044: 	if (ctx->fpscr & FPSCR_SZ) {
 1045: 	    TCGv addr_hi = tcg_temp_new();
 1046: 	    int fr = XREG(B11_8);
 1047: 	    tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
 1048: 	    tcg_gen_qemu_ld32u(cpu_fregs[fr  ], REG(B7_4), ctx->memidx);
 1049: 	    tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi,   ctx->memidx);
 1050: 	    tcg_temp_free(addr_hi);
 1051: 	} else {
 1052: 	    tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
 1053: 	}
 1054: 	return;
 1055:     case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
 1056: 	CHECK_FPU_ENABLED
 1057: 	if (ctx->fpscr & FPSCR_SZ) {
 1058: 	    TCGv addr_hi = tcg_temp_new();
 1059: 	    int fr = XREG(B11_8);
 1060: 	    tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
 1061: 	    tcg_gen_qemu_ld32u(cpu_fregs[fr  ], REG(B7_4), ctx->memidx);
 1062: 	    tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi,   ctx->memidx);
 1063: 	    tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
 1064: 	    tcg_temp_free(addr_hi);
 1065: 	} else {
 1066: 	    tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
 1067: 	    tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
 1068: 	}
 1069: 	return;
 1070:     case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
 1071: 	CHECK_FPU_ENABLED
 1072: 	if (ctx->fpscr & FPSCR_SZ) {
 1073: 	    TCGv addr = tcg_temp_new_i32();
 1074: 	    int fr = XREG(B7_4);
 1075: 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
 1076: 	    tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx);
 1077: 	    tcg_gen_subi_i32(addr, REG(B11_8), 8);
 1078: 	    tcg_gen_qemu_st32(cpu_fregs[fr  ], addr, ctx->memidx);
 1079: 	    tcg_gen_mov_i32(REG(B11_8), addr);
 1080: 	    tcg_temp_free(addr);
 1081: 	} else {
 1082: 	    TCGv addr;
 1083: 	    addr = tcg_temp_new_i32();
 1084: 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
 1085: 	    tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
 1086: 	    tcg_temp_free(addr);
 1087: 	    tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4);
 1088: 	}
 1089: 	return;
 1090:     case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
 1091: 	CHECK_FPU_ENABLED
 1092: 	{
 1093: 	    TCGv addr = tcg_temp_new_i32();
 1094: 	    tcg_gen_add_i32(addr, REG(B7_4), REG(0));
 1095: 	    if (ctx->fpscr & FPSCR_SZ) {
 1096: 		int fr = XREG(B11_8);
 1097: 		tcg_gen_qemu_ld32u(cpu_fregs[fr	 ], addr, ctx->memidx);
 1098: 		tcg_gen_addi_i32(addr, addr, 4);
 1099: 		tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
 1100: 	    } else {
 1101: 		tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx);
 1102: 	    }
 1103: 	    tcg_temp_free(addr);
 1104: 	}
 1105: 	return;
 1106:     case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
 1107: 	CHECK_FPU_ENABLED
 1108: 	{
 1109: 	    TCGv addr = tcg_temp_new();
 1110: 	    tcg_gen_add_i32(addr, REG(B11_8), REG(0));
 1111: 	    if (ctx->fpscr & FPSCR_SZ) {
 1112: 		int fr = XREG(B7_4);
 1113: 		tcg_gen_qemu_ld32u(cpu_fregs[fr	 ], addr, ctx->memidx);
 1114: 		tcg_gen_addi_i32(addr, addr, 4);
 1115: 		tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
 1116: 	    } else {
 1117: 		tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
 1118: 	    }
 1119: 	    tcg_temp_free(addr);
 1120: 	}
 1121: 	return;
 1122:     case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
 1123:     case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
 1124:     case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
 1125:     case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
 1126:     case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
 1127:     case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
 1128: 	{
 1129: 	    CHECK_FPU_ENABLED
 1130: 	    if (ctx->fpscr & FPSCR_PR) {
 1131:                 TCGv_i64 fp0, fp1;
 1132: 
 1133: 		if (ctx->opcode & 0x0110)
 1134: 		    break; /* illegal instruction */
 1135: 		fp0 = tcg_temp_new_i64();
 1136: 		fp1 = tcg_temp_new_i64();
 1137: 		gen_load_fpr64(fp0, DREG(B11_8));
 1138: 		gen_load_fpr64(fp1, DREG(B7_4));
 1139:                 switch (ctx->opcode & 0xf00f) {
 1140:                 case 0xf000:		/* fadd Rm,Rn */
 1141:                     gen_helper_fadd_DT(fp0, fp0, fp1);
 1142:                     break;
 1143:                 case 0xf001:		/* fsub Rm,Rn */
 1144:                     gen_helper_fsub_DT(fp0, fp0, fp1);
 1145:                     break;
 1146:                 case 0xf002:		/* fmul Rm,Rn */
 1147:                     gen_helper_fmul_DT(fp0, fp0, fp1);
 1148:                     break;
 1149:                 case 0xf003:		/* fdiv Rm,Rn */
 1150:                     gen_helper_fdiv_DT(fp0, fp0, fp1);
 1151:                     break;
 1152:                 case 0xf004:		/* fcmp/eq Rm,Rn */
 1153:                     gen_helper_fcmp_eq_DT(fp0, fp1);
 1154:                     return;
 1155:                 case 0xf005:		/* fcmp/gt Rm,Rn */
 1156:                     gen_helper_fcmp_gt_DT(fp0, fp1);
 1157:                     return;
 1158:                 }
 1159: 		gen_store_fpr64(fp0, DREG(B11_8));
 1160:                 tcg_temp_free_i64(fp0);
 1161:                 tcg_temp_free_i64(fp1);
 1162: 	    } else {
 1163:                 switch (ctx->opcode & 0xf00f) {
 1164:                 case 0xf000:		/* fadd Rm,Rn */
 1165:                     gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
 1166:                     break;
 1167:                 case 0xf001:		/* fsub Rm,Rn */
 1168:                     gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
 1169:                     break;
 1170:                 case 0xf002:		/* fmul Rm,Rn */
 1171:                     gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
 1172:                     break;
 1173:                 case 0xf003:		/* fdiv Rm,Rn */
 1174:                     gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
 1175:                     break;
 1176:                 case 0xf004:		/* fcmp/eq Rm,Rn */
 1177:                     gen_helper_fcmp_eq_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
 1178:                     return;
 1179:                 case 0xf005:		/* fcmp/gt Rm,Rn */
 1180:                     gen_helper_fcmp_gt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
 1181:                     return;
 1182:                 }
 1183: 	    }
 1184: 	}
 1185: 	return;
 1186:     case 0xf00e: /* fmac FR0,RM,Rn */
 1187:         {
 1188:             CHECK_FPU_ENABLED
 1189:             if (ctx->fpscr & FPSCR_PR) {
 1190:                 break; /* illegal instruction */
 1191:             } else {
 1192:                 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)],
 1193:                                    cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)], cpu_fregs[FREG(B11_8)]);
 1194:                 return;
 1195:             }
 1196:         }
 1197:     }
 1198: 
 1199:     switch (ctx->opcode & 0xff00) {
 1200:     case 0xc900:		/* and #imm,R0 */
 1201: 	tcg_gen_andi_i32(REG(0), REG(0), B7_0);
 1202: 	return;
 1203:     case 0xcd00:		/* and.b #imm,@(R0,GBR) */
 1204: 	{
 1205: 	    TCGv addr, val;
 1206: 	    addr = tcg_temp_new();
 1207: 	    tcg_gen_add_i32(addr, REG(0), cpu_gbr);
 1208: 	    val = tcg_temp_new();
 1209: 	    tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
 1210: 	    tcg_gen_andi_i32(val, val, B7_0);
 1211: 	    tcg_gen_qemu_st8(val, addr, ctx->memidx);
 1212: 	    tcg_temp_free(val);
 1213: 	    tcg_temp_free(addr);
 1214: 	}
 1215: 	return;
 1216:     case 0x8b00:		/* bf label */
 1217: 	CHECK_NOT_DELAY_SLOT
 1218: 	    gen_conditional_jump(ctx, ctx->pc + 2,
 1219: 				 ctx->pc + 4 + B7_0s * 2);
 1220: 	ctx->bstate = BS_BRANCH;
 1221: 	return;
 1222:     case 0x8f00:		/* bf/s label */
 1223: 	CHECK_NOT_DELAY_SLOT
 1224: 	gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
 1225: 	ctx->flags |= DELAY_SLOT_CONDITIONAL;
 1226: 	return;
 1227:     case 0x8900:		/* bt label */
 1228: 	CHECK_NOT_DELAY_SLOT
 1229: 	    gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
 1230: 				 ctx->pc + 2);
 1231: 	ctx->bstate = BS_BRANCH;
 1232: 	return;
 1233:     case 0x8d00:		/* bt/s label */
 1234: 	CHECK_NOT_DELAY_SLOT
 1235: 	gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
 1236: 	ctx->flags |= DELAY_SLOT_CONDITIONAL;
 1237: 	return;
 1238:     case 0x8800:		/* cmp/eq #imm,R0 */
 1239: 	gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
 1240: 	return;
 1241:     case 0xc400:		/* mov.b @(disp,GBR),R0 */
 1242: 	{
 1243: 	    TCGv addr = tcg_temp_new();
 1244: 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
 1245: 	    tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
 1246: 	    tcg_temp_free(addr);
 1247: 	}
 1248: 	return;
 1249:     case 0xc500:		/* mov.w @(disp,GBR),R0 */
 1250: 	{
 1251: 	    TCGv addr = tcg_temp_new();
 1252: 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
 1253: 	    tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
 1254: 	    tcg_temp_free(addr);
 1255: 	}
 1256: 	return;
 1257:     case 0xc600:		/* mov.l @(disp,GBR),R0 */
 1258: 	{
 1259: 	    TCGv addr = tcg_temp_new();
 1260: 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
 1261: 	    tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx);
 1262: 	    tcg_temp_free(addr);
 1263: 	}
 1264: 	return;
 1265:     case 0xc000:		/* mov.b R0,@(disp,GBR) */
 1266: 	{
 1267: 	    TCGv addr = tcg_temp_new();
 1268: 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
 1269: 	    tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
 1270: 	    tcg_temp_free(addr);
 1271: 	}
 1272: 	return;
 1273:     case 0xc100:		/* mov.w R0,@(disp,GBR) */
 1274: 	{
 1275: 	    TCGv addr = tcg_temp_new();
 1276: 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
 1277: 	    tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
 1278: 	    tcg_temp_free(addr);
 1279: 	}
 1280: 	return;
 1281:     case 0xc200:		/* mov.l R0,@(disp,GBR) */
 1282: 	{
 1283: 	    TCGv addr = tcg_temp_new();
 1284: 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
 1285: 	    tcg_gen_qemu_st32(REG(0), addr, ctx->memidx);
 1286: 	    tcg_temp_free(addr);
 1287: 	}
 1288: 	return;
 1289:     case 0x8000:		/* mov.b R0,@(disp,Rn) */
 1290: 	{
 1291: 	    TCGv addr = tcg_temp_new();
 1292: 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
 1293: 	    tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
 1294: 	    tcg_temp_free(addr);
 1295: 	}
 1296: 	return;
 1297:     case 0x8100:		/* mov.w R0,@(disp,Rn) */
 1298: 	{
 1299: 	    TCGv addr = tcg_temp_new();
 1300: 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
 1301: 	    tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
 1302: 	    tcg_temp_free(addr);
 1303: 	}
 1304: 	return;
 1305:     case 0x8400:		/* mov.b @(disp,Rn),R0 */
 1306: 	{
 1307: 	    TCGv addr = tcg_temp_new();
 1308: 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
 1309: 	    tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
 1310: 	    tcg_temp_free(addr);
 1311: 	}
 1312: 	return;
 1313:     case 0x8500:		/* mov.w @(disp,Rn),R0 */
 1314: 	{
 1315: 	    TCGv addr = tcg_temp_new();
 1316: 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
 1317: 	    tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
 1318: 	    tcg_temp_free(addr);
 1319: 	}
 1320: 	return;
 1321:     case 0xc700:		/* mova @(disp,PC),R0 */
 1322: 	tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
 1323: 	return;
 1324:     case 0xcb00:		/* or #imm,R0 */
 1325: 	tcg_gen_ori_i32(REG(0), REG(0), B7_0);
 1326: 	return;
 1327:     case 0xcf00:		/* or.b #imm,@(R0,GBR) */
 1328: 	{
 1329: 	    TCGv addr, val;
 1330: 	    addr = tcg_temp_new();
 1331: 	    tcg_gen_add_i32(addr, REG(0), cpu_gbr);
 1332: 	    val = tcg_temp_new();
 1333: 	    tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
 1334: 	    tcg_gen_ori_i32(val, val, B7_0);
 1335: 	    tcg_gen_qemu_st8(val, addr, ctx->memidx);
 1336: 	    tcg_temp_free(val);
 1337: 	    tcg_temp_free(addr);
 1338: 	}
 1339: 	return;
 1340:     case 0xc300:		/* trapa #imm */
 1341: 	{
 1342: 	    TCGv imm;
 1343: 	    CHECK_NOT_DELAY_SLOT
 1344: 	    tcg_gen_movi_i32(cpu_pc, ctx->pc);
 1345: 	    imm = tcg_const_i32(B7_0);
 1346: 	    gen_helper_trapa(imm);
 1347: 	    tcg_temp_free(imm);
 1348: 	    ctx->bstate = BS_BRANCH;
 1349: 	}
 1350: 	return;
 1351:     case 0xc800:		/* tst #imm,R0 */
 1352: 	{
 1353: 	    TCGv val = tcg_temp_new();
 1354: 	    tcg_gen_andi_i32(val, REG(0), B7_0);
 1355: 	    gen_cmp_imm(TCG_COND_EQ, val, 0);
 1356: 	    tcg_temp_free(val);
 1357: 	}
 1358: 	return;
 1359:     case 0xcc00:		/* tst.b #imm,@(R0,GBR) */
 1360: 	{
 1361: 	    TCGv val = tcg_temp_new();
 1362: 	    tcg_gen_add_i32(val, REG(0), cpu_gbr);
 1363: 	    tcg_gen_qemu_ld8u(val, val, ctx->memidx);
 1364: 	    tcg_gen_andi_i32(val, val, B7_0);
 1365: 	    gen_cmp_imm(TCG_COND_EQ, val, 0);
 1366: 	    tcg_temp_free(val);
 1367: 	}
 1368: 	return;
 1369:     case 0xca00:		/* xor #imm,R0 */
 1370: 	tcg_gen_xori_i32(REG(0), REG(0), B7_0);
 1371: 	return;
 1372:     case 0xce00:		/* xor.b #imm,@(R0,GBR) */
 1373: 	{
 1374: 	    TCGv addr, val;
 1375: 	    addr = tcg_temp_new();
 1376: 	    tcg_gen_add_i32(addr, REG(0), cpu_gbr);
 1377: 	    val = tcg_temp_new();
 1378: 	    tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
 1379: 	    tcg_gen_xori_i32(val, val, B7_0);
 1380: 	    tcg_gen_qemu_st8(val, addr, ctx->memidx);
 1381: 	    tcg_temp_free(val);
 1382: 	    tcg_temp_free(addr);
 1383: 	}
 1384: 	return;
 1385:     }
 1386: 
 1387:     switch (ctx->opcode & 0xf08f) {
 1388:     case 0x408e:		/* ldc Rm,Rn_BANK */
 1389: 	CHECK_PRIVILEGED
 1390: 	tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
 1391: 	return;
 1392:     case 0x4087:		/* ldc.l @Rm+,Rn_BANK */
 1393: 	CHECK_PRIVILEGED
 1394: 	tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx);
 1395: 	tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
 1396: 	return;
 1397:     case 0x0082:		/* stc Rm_BANK,Rn */
 1398: 	CHECK_PRIVILEGED
 1399: 	tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
 1400: 	return;
 1401:     case 0x4083:		/* stc.l Rm_BANK,@-Rn */
 1402: 	CHECK_PRIVILEGED
 1403: 	{
 1404: 	    TCGv addr = tcg_temp_new();
 1405: 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
 1406: 	    tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx);
 1407: 	    tcg_temp_free(addr);
 1408: 	    tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4);
 1409: 	}
 1410: 	return;
 1411:     }
 1412: 
 1413:     switch (ctx->opcode & 0xf0ff) {
 1414:     case 0x0023:		/* braf Rn */
 1415: 	CHECK_NOT_DELAY_SLOT
 1416: 	tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
 1417: 	ctx->flags |= DELAY_SLOT;
 1418: 	ctx->delayed_pc = (uint32_t) - 1;
 1419: 	return;
 1420:     case 0x0003:		/* bsrf Rn */
 1421: 	CHECK_NOT_DELAY_SLOT
 1422: 	tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
 1423: 	tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
 1424: 	ctx->flags |= DELAY_SLOT;
 1425: 	ctx->delayed_pc = (uint32_t) - 1;
 1426: 	return;
 1427:     case 0x4015:		/* cmp/pl Rn */
 1428: 	gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
 1429: 	return;
 1430:     case 0x4011:		/* cmp/pz Rn */
 1431: 	gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
 1432: 	return;
 1433:     case 0x4010:		/* dt Rn */
 1434: 	tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
 1435: 	gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
 1436: 	return;
 1437:     case 0x402b:		/* jmp @Rn */
 1438: 	CHECK_NOT_DELAY_SLOT
 1439: 	tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
 1440: 	ctx->flags |= DELAY_SLOT;
 1441: 	ctx->delayed_pc = (uint32_t) - 1;
 1442: 	return;
 1443:     case 0x400b:		/* jsr @Rn */
 1444: 	CHECK_NOT_DELAY_SLOT
 1445: 	tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
 1446: 	tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
 1447: 	ctx->flags |= DELAY_SLOT;
 1448: 	ctx->delayed_pc = (uint32_t) - 1;
 1449: 	return;
 1450:     case 0x400e:		/* ldc Rm,SR */
 1451: 	CHECK_PRIVILEGED
 1452: 	tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
 1453: 	ctx->bstate = BS_STOP;
 1454: 	return;
 1455:     case 0x4007:		/* ldc.l @Rm+,SR */
 1456: 	CHECK_PRIVILEGED
 1457: 	{
 1458: 	    TCGv val = tcg_temp_new();
 1459: 	    tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx);
 1460: 	    tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
 1461: 	    tcg_temp_free(val);
 1462: 	    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
 1463: 	    ctx->bstate = BS_STOP;
 1464: 	}
 1465: 	return;
 1466:     case 0x0002:		/* stc SR,Rn */
 1467: 	CHECK_PRIVILEGED
 1468: 	tcg_gen_mov_i32(REG(B11_8), cpu_sr);
 1469: 	return;
 1470:     case 0x4003:		/* stc SR,@-Rn */
 1471: 	CHECK_PRIVILEGED
 1472: 	{
 1473: 	    TCGv addr = tcg_temp_new();
 1474: 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
 1475: 	    tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx);
 1476: 	    tcg_temp_free(addr);
 1477: 	    tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4);
 1478: 	}
 1479: 	return;
 1480: #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk)		\
 1481:   case ldnum:							\
 1482:     prechk    							\
 1483:     tcg_gen_mov_i32 (cpu_##reg, REG(B11_8));			\
 1484:     return;							\
 1485:   case ldpnum:							\
 1486:     prechk    							\
 1487:     tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx);	\
 1488:     tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);		\
 1489:     return;							\
 1490:   case stnum:							\
 1491:     prechk    							\
 1492:     tcg_gen_mov_i32 (REG(B11_8), cpu_##reg);			\
 1493:     return;							\
 1494:   case stpnum:							\
 1495:     prechk    							\
 1496:     {								\
 1497: 	TCGv addr = tcg_temp_new();			\
 1498: 	tcg_gen_subi_i32(addr, REG(B11_8), 4);			\
 1499: 	tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx);	\
 1500: 	tcg_temp_free(addr);					\
 1501: 	tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4);		\
 1502:     }								\
 1503:     return;
 1504: 	LDST(gbr,  0x401e, 0x4017, 0x0012, 0x4013, {})
 1505: 	LDST(vbr,  0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
 1506: 	LDST(ssr,  0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
 1507: 	LDST(spc,  0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
 1508: 	LDST(dbr,  0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
 1509: 	LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
 1510: 	LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
 1511: 	LDST(pr,   0x402a, 0x4026, 0x002a, 0x4022, {})
 1512: 	LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
 1513:     case 0x406a:		/* lds Rm,FPSCR */
 1514: 	CHECK_FPU_ENABLED
 1515: 	gen_helper_ld_fpscr(REG(B11_8));
 1516: 	ctx->bstate = BS_STOP;
 1517: 	return;
 1518:     case 0x4066:		/* lds.l @Rm+,FPSCR */
 1519: 	CHECK_FPU_ENABLED
 1520: 	{
 1521: 	    TCGv addr = tcg_temp_new();
 1522: 	    tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
 1523: 	    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
 1524: 	    gen_helper_ld_fpscr(addr);
 1525: 	    tcg_temp_free(addr);
 1526: 	    ctx->bstate = BS_STOP;
 1527: 	}
 1528: 	return;
 1529:     case 0x006a:		/* sts FPSCR,Rn */
 1530: 	CHECK_FPU_ENABLED
 1531: 	tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
 1532: 	return;
 1533:     case 0x4062:		/* sts FPSCR,@-Rn */
 1534: 	CHECK_FPU_ENABLED
 1535: 	{
 1536: 	    TCGv addr, val;
 1537: 	    val = tcg_temp_new();
 1538: 	    tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
 1539: 	    addr = tcg_temp_new();
 1540: 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
 1541: 	    tcg_gen_qemu_st32(val, addr, ctx->memidx);
 1542: 	    tcg_temp_free(addr);
 1543: 	    tcg_temp_free(val);
 1544: 	    tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4);
 1545: 	}
 1546: 	return;
 1547:     case 0x00c3:		/* movca.l R0,@Rm */
 1548: 	tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
 1549: 	return;
 1550:     case 0x40a9:
 1551: 	/* MOVUA.L @Rm,R0 (Rm) -> R0
 1552: 	   Load non-boundary-aligned data */
 1553: 	tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
 1554: 	return;
 1555:     case 0x40e9:
 1556: 	/* MOVUA.L @Rm+,R0   (Rm) -> R0, Rm + 4 -> Rm
 1557: 	   Load non-boundary-aligned data */
 1558: 	tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
 1559: 	tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
 1560: 	return;
 1561:     case 0x0029:		/* movt Rn */
 1562: 	tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
 1563: 	return;
 1564:     case 0x0073:
 1565:         /* MOVCO.L
 1566: 	       LDST -> T
 1567:                If (T == 1) R0 -> (Rn)
 1568:                0 -> LDST
 1569:         */
 1570:         if (ctx->features & SH_FEATURE_SH4A) {
 1571: 	    int label = gen_new_label();
 1572: 	    gen_clr_t();
 1573: 	    tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
 1574: 	    tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
 1575: 	    tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
 1576: 	    gen_set_label(label);
 1577: 	    tcg_gen_movi_i32(cpu_ldst, 0);
 1578: 	    return;
 1579: 	} else
 1580: 	    break;
 1581:     case 0x0063:
 1582:         /* MOVLI.L @Rm,R0
 1583:                1 -> LDST
 1584:                (Rm) -> R0
 1585:                When interrupt/exception
 1586:                occurred 0 -> LDST
 1587:         */
 1588: 	if (ctx->features & SH_FEATURE_SH4A) {
 1589: 	    tcg_gen_movi_i32(cpu_ldst, 0);
 1590: 	    tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx);
 1591: 	    tcg_gen_movi_i32(cpu_ldst, 1);
 1592: 	    return;
 1593: 	} else
 1594: 	    break;
 1595:     case 0x0093:		/* ocbi @Rn */
 1596: 	{
 1597: 	    TCGv dummy = tcg_temp_new();
 1598: 	    tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx);
 1599: 	    tcg_temp_free(dummy);
 1600: 	}
 1601: 	return;
 1602:     case 0x00a3:		/* ocbp @Rn */
 1603: 	{
 1604: 	    TCGv dummy = tcg_temp_new();
 1605: 	    tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx);
 1606: 	    tcg_temp_free(dummy);
 1607: 	}
 1608: 	return;
 1609:     case 0x00b3:		/* ocbwb @Rn */
 1610: 	{
 1611: 	    TCGv dummy = tcg_temp_new();
 1612: 	    tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx);
 1613: 	    tcg_temp_free(dummy);
 1614: 	}
 1615: 	return;
 1616:     case 0x0083:		/* pref @Rn */
 1617: 	return;
 1618:     case 0x00d3:		/* prefi @Rn */
 1619: 	if (ctx->features & SH_FEATURE_SH4A)
 1620: 	    return;
 1621: 	else
 1622: 	    break;
 1623:     case 0x00e3:		/* icbi @Rn */
 1624: 	if (ctx->features & SH_FEATURE_SH4A)
 1625: 	    return;
 1626: 	else
 1627: 	    break;
 1628:     case 0x00ab:		/* synco */
 1629: 	if (ctx->features & SH_FEATURE_SH4A)
 1630: 	    return;
 1631: 	else
 1632: 	    break;
 1633:     case 0x4024:		/* rotcl Rn */
 1634: 	{
 1635: 	    TCGv tmp = tcg_temp_new();
 1636: 	    tcg_gen_mov_i32(tmp, cpu_sr);
 1637: 	    gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
 1638: 	    tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
 1639: 	    gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
 1640: 	    tcg_temp_free(tmp);
 1641: 	}
 1642: 	return;
 1643:     case 0x4025:		/* rotcr Rn */
 1644: 	{
 1645: 	    TCGv tmp = tcg_temp_new();
 1646: 	    tcg_gen_mov_i32(tmp, cpu_sr);
 1647: 	    gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
 1648: 	    tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
 1649: 	    gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
 1650: 	    tcg_temp_free(tmp);
 1651: 	}
 1652: 	return;
 1653:     case 0x4004:		/* rotl Rn */
 1654: 	gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
 1655: 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
 1656: 	gen_copy_bit_i32(REG(B11_8), 0, cpu_sr, 0);
 1657: 	return;
 1658:     case 0x4005:		/* rotr Rn */
 1659: 	gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
 1660: 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
 1661: 	gen_copy_bit_i32(REG(B11_8), 31, cpu_sr, 0);
 1662: 	return;
 1663:     case 0x4000:		/* shll Rn */
 1664:     case 0x4020:		/* shal Rn */
 1665: 	gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
 1666: 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
 1667: 	return;
 1668:     case 0x4021:		/* shar Rn */
 1669: 	gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
 1670: 	tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
 1671: 	return;
 1672:     case 0x4001:		/* shlr Rn */
 1673: 	gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
 1674: 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
 1675: 	return;
 1676:     case 0x4008:		/* shll2 Rn */
 1677: 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
 1678: 	return;
 1679:     case 0x4018:		/* shll8 Rn */
 1680: 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
 1681: 	return;
 1682:     case 0x4028:		/* shll16 Rn */
 1683: 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
 1684: 	return;
 1685:     case 0x4009:		/* shlr2 Rn */
 1686: 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
 1687: 	return;
 1688:     case 0x4019:		/* shlr8 Rn */
 1689: 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
 1690: 	return;
 1691:     case 0x4029:		/* shlr16 Rn */
 1692: 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
 1693: 	return;
 1694:     case 0x401b:		/* tas.b @Rn */
 1695: 	{
 1696: 	    TCGv addr, val;
 1697: 	    addr = tcg_temp_local_new();
 1698: 	    tcg_gen_mov_i32(addr, REG(B11_8));
 1699: 	    val = tcg_temp_local_new();
 1700: 	    tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
 1701: 	    gen_cmp_imm(TCG_COND_EQ, val, 0);
 1702: 	    tcg_gen_ori_i32(val, val, 0x80);
 1703: 	    tcg_gen_qemu_st8(val, addr, ctx->memidx);
 1704: 	    tcg_temp_free(val);
 1705: 	    tcg_temp_free(addr);
 1706: 	}
 1707: 	return;
 1708:     case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
 1709: 	CHECK_FPU_ENABLED
 1710: 	tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
 1711: 	return;
 1712:     case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
 1713: 	CHECK_FPU_ENABLED
 1714: 	tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
 1715: 	return;
 1716:     case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
 1717: 	CHECK_FPU_ENABLED
 1718: 	if (ctx->fpscr & FPSCR_PR) {
 1719: 	    TCGv_i64 fp;
 1720: 	    if (ctx->opcode & 0x0100)
 1721: 		break; /* illegal instruction */
 1722: 	    fp = tcg_temp_new_i64();
 1723: 	    gen_helper_float_DT(fp, cpu_fpul);
 1724: 	    gen_store_fpr64(fp, DREG(B11_8));
 1725: 	    tcg_temp_free_i64(fp);
 1726: 	}
 1727: 	else {
 1728: 	    gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_fpul);
 1729: 	}
 1730: 	return;
 1731:     case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
 1732: 	CHECK_FPU_ENABLED
 1733: 	if (ctx->fpscr & FPSCR_PR) {
 1734: 	    TCGv_i64 fp;
 1735: 	    if (ctx->opcode & 0x0100)
 1736: 		break; /* illegal instruction */
 1737: 	    fp = tcg_temp_new_i64();
 1738: 	    gen_load_fpr64(fp, DREG(B11_8));
 1739: 	    gen_helper_ftrc_DT(cpu_fpul, fp);
 1740: 	    tcg_temp_free_i64(fp);
 1741: 	}
 1742: 	else {
 1743: 	    gen_helper_ftrc_FT(cpu_fpul, cpu_fregs[FREG(B11_8)]);
 1744: 	}
 1745: 	return;
 1746:     case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
 1747: 	CHECK_FPU_ENABLED
 1748: 	{
 1749: 	    gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
 1750: 	}
 1751: 	return;
 1752:     case 0xf05d: /* fabs FRn/DRn */
 1753: 	CHECK_FPU_ENABLED
 1754: 	if (ctx->fpscr & FPSCR_PR) {
 1755: 	    if (ctx->opcode & 0x0100)
 1756: 		break; /* illegal instruction */
 1757: 	    TCGv_i64 fp = tcg_temp_new_i64();
 1758: 	    gen_load_fpr64(fp, DREG(B11_8));
 1759: 	    gen_helper_fabs_DT(fp, fp);
 1760: 	    gen_store_fpr64(fp, DREG(B11_8));
 1761: 	    tcg_temp_free_i64(fp);
 1762: 	} else {
 1763: 	    gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
 1764: 	}
 1765: 	return;
 1766:     case 0xf06d: /* fsqrt FRn */
 1767: 	CHECK_FPU_ENABLED
 1768: 	if (ctx->fpscr & FPSCR_PR) {
 1769: 	    if (ctx->opcode & 0x0100)
 1770: 		break; /* illegal instruction */
 1771: 	    TCGv_i64 fp = tcg_temp_new_i64();
 1772: 	    gen_load_fpr64(fp, DREG(B11_8));
 1773: 	    gen_helper_fsqrt_DT(fp, fp);
 1774: 	    gen_store_fpr64(fp, DREG(B11_8));
 1775: 	    tcg_temp_free_i64(fp);
 1776: 	} else {
 1777: 	    gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
 1778: 	}
 1779: 	return;
 1780:     case 0xf07d: /* fsrra FRn */
 1781: 	CHECK_FPU_ENABLED
 1782: 	break;
 1783:     case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
 1784: 	CHECK_FPU_ENABLED
 1785: 	if (!(ctx->fpscr & FPSCR_PR)) {
 1786: 	    tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
 1787: 	}
 1788: 	return;
 1789:     case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
 1790: 	CHECK_FPU_ENABLED
 1791: 	if (!(ctx->fpscr & FPSCR_PR)) {
 1792: 	    tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
 1793: 	}
 1794: 	return;
 1795:     case 0xf0ad: /* fcnvsd FPUL,DRn */
 1796: 	CHECK_FPU_ENABLED
 1797: 	{
 1798: 	    TCGv_i64 fp = tcg_temp_new_i64();
 1799: 	    gen_helper_fcnvsd_FT_DT(fp, cpu_fpul);
 1800: 	    gen_store_fpr64(fp, DREG(B11_8));
 1801: 	    tcg_temp_free_i64(fp);
 1802: 	}
 1803: 	return;
 1804:     case 0xf0bd: /* fcnvds DRn,FPUL */
 1805: 	CHECK_FPU_ENABLED
 1806: 	{
 1807: 	    TCGv_i64 fp = tcg_temp_new_i64();
 1808: 	    gen_load_fpr64(fp, DREG(B11_8));
 1809: 	    gen_helper_fcnvds_DT_FT(cpu_fpul, fp);
 1810: 	    tcg_temp_free_i64(fp);
 1811: 	}
 1812: 	return;
 1813:     }
 1814: #if 0
 1815:     fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
 1816: 	    ctx->opcode, ctx->pc);
 1817:     fflush(stderr);
 1818: #endif
 1819:     gen_helper_raise_illegal_instruction();
 1820:     ctx->bstate = BS_EXCP;
 1821: }
 1822: 
 1823: static void decode_opc(DisasContext * ctx)
 1824: {
 1825:     uint32_t old_flags = ctx->flags;
 1826: 
 1827:     _decode_opc(ctx);
 1828: 
 1829:     if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
 1830:         if (ctx->flags & DELAY_SLOT_CLEARME) {
 1831:             gen_store_flags(0);
 1832:         } else {
 1833: 	    /* go out of the delay slot */
 1834: 	    uint32_t new_flags = ctx->flags;
 1835: 	    new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
 1836: 	    gen_store_flags(new_flags);
 1837:         }
 1838:         ctx->flags = 0;
 1839:         ctx->bstate = BS_BRANCH;
 1840:         if (old_flags & DELAY_SLOT_CONDITIONAL) {
 1841: 	    gen_delayed_conditional_jump(ctx);
 1842:         } else if (old_flags & DELAY_SLOT) {
 1843:             gen_jump(ctx);
 1844: 	}
 1845: 
 1846:     }
 1847: 
 1848:     /* go into a delay slot */
 1849:     if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
 1850:         gen_store_flags(ctx->flags);
 1851: }
 1852: 
 1853: static inline void
 1854: gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb,
 1855:                                int search_pc)
 1856: {
 1857:     DisasContext ctx;
 1858:     target_ulong pc_start;
 1859:     static uint16_t *gen_opc_end;
 1860:     CPUBreakpoint *bp;
 1861:     int i, ii;
 1862:     int num_insns;
 1863:     int max_insns;
 1864: 
 1865:     pc_start = tb->pc;
 1866:     gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
 1867:     ctx.pc = pc_start;
 1868:     ctx.flags = (uint32_t)tb->flags;
 1869:     ctx.bstate = BS_NONE;
 1870:     ctx.sr = env->sr;
 1871:     ctx.fpscr = env->fpscr;
 1872:     ctx.memidx = (env->sr & SR_MD) ? 1 : 0;
 1873:     /* We don't know if the delayed pc came from a dynamic or static branch,
 1874:        so assume it is a dynamic branch.  */
 1875:     ctx.delayed_pc = -1; /* use delayed pc from env pointer */
 1876:     ctx.tb = tb;
 1877:     ctx.singlestep_enabled = env->singlestep_enabled;
 1878:     ctx.features = env->features;
 1879: 
 1880: #ifdef DEBUG_DISAS
 1881:     qemu_log_mask(CPU_LOG_TB_CPU,
 1882:                  "------------------------------------------------\n");
 1883:     log_cpu_state_mask(CPU_LOG_TB_CPU, env, 0);
 1884: #endif
 1885: 
 1886:     ii = -1;
 1887:     num_insns = 0;
 1888:     max_insns = tb->cflags & CF_COUNT_MASK;
 1889:     if (max_insns == 0)
 1890:         max_insns = CF_COUNT_MASK;
 1891:     gen_icount_start();
 1892:     while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
 1893:         if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
 1894:             TAILQ_FOREACH(bp, &env->breakpoints, entry) {
 1895:                 if (ctx.pc == bp->pc) {
 1896: 		    /* We have hit a breakpoint - make sure PC is up-to-date */
 1897: 		    tcg_gen_movi_i32(cpu_pc, ctx.pc);
 1898: 		    gen_helper_debug();
 1899: 		    ctx.bstate = BS_EXCP;
 1900: 		    break;
 1901: 		}
 1902: 	    }
 1903: 	}
 1904:         if (search_pc) {
 1905:             i = gen_opc_ptr - gen_opc_buf;
 1906:             if (ii < i) {
 1907:                 ii++;
 1908:                 while (ii < i)
 1909:                     gen_opc_instr_start[ii++] = 0;
 1910:             }
 1911:             gen_opc_pc[ii] = ctx.pc;
 1912:             gen_opc_hflags[ii] = ctx.flags;
 1913:             gen_opc_instr_start[ii] = 1;
 1914:             gen_opc_icount[ii] = num_insns;
 1915:         }
 1916:         if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
 1917:             gen_io_start();
 1918: #if 0
 1919: 	fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
 1920: 	fflush(stderr);
 1921: #endif
 1922: 	ctx.opcode = lduw_code(ctx.pc);
 1923: 	decode_opc(&ctx);
 1924:         num_insns++;
 1925: 	ctx.pc += 2;
 1926: 	if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
 1927: 	    break;
 1928: 	if (env->singlestep_enabled)
 1929: 	    break;
 1930:         if (num_insns >= max_insns)
 1931:             break;
 1932: #ifdef SH4_SINGLE_STEP
 1933: 	break;
 1934: #endif
 1935:     }
 1936:     if (tb->cflags & CF_LAST_IO)
 1937:         gen_io_end();
 1938:     if (env->singlestep_enabled) {
 1939:         tcg_gen_movi_i32(cpu_pc, ctx.pc);
 1940:         gen_helper_debug();
 1941:     } else {
 1942: 	switch (ctx.bstate) {
 1943:         case BS_STOP:
 1944:             /* gen_op_interrupt_restart(); */
 1945:             /* fall through */
 1946:         case BS_NONE:
 1947:             if (ctx.flags) {
 1948:                 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
 1949: 	    }
 1950:             gen_goto_tb(&ctx, 0, ctx.pc);
 1951:             break;
 1952:         case BS_EXCP:
 1953:             /* gen_op_interrupt_restart(); */
 1954:             tcg_gen_exit_tb(0);
 1955:             break;
 1956:         case BS_BRANCH:
 1957:         default:
 1958:             break;
 1959: 	}
 1960:     }
 1961: 
 1962:     gen_icount_end(tb, num_insns);
 1963:     *gen_opc_ptr = INDEX_op_end;
 1964:     if (search_pc) {
 1965:         i = gen_opc_ptr - gen_opc_buf;
 1966:         ii++;
 1967:         while (ii <= i)
 1968:             gen_opc_instr_start[ii++] = 0;
 1969:     } else {
 1970:         tb->size = ctx.pc - pc_start;
 1971:         tb->icount = num_insns;
 1972:     }
 1973: 
 1974: #ifdef DEBUG_DISAS
 1975: #ifdef SH4_DEBUG_DISAS
 1976:     qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
 1977: #endif
 1978:     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
 1979: 	qemu_log("IN:\n");	/* , lookup_symbol(pc_start)); */
 1980: 	log_target_disas(pc_start, ctx.pc - pc_start, 0);
 1981: 	qemu_log("\n");
 1982:     }
 1983: #endif
 1984: }
 1985: 
 1986: void gen_intermediate_code(CPUState * env, struct TranslationBlock *tb)
 1987: {
 1988:     gen_intermediate_code_internal(env, tb, 0);
 1989: }
 1990: 
 1991: void gen_intermediate_code_pc(CPUState * env, struct TranslationBlock *tb)
 1992: {
 1993:     gen_intermediate_code_internal(env, tb, 1);
 1994: }
 1995: 
 1996: void gen_pc_load(CPUState *env, TranslationBlock *tb,
 1997:                 unsigned long searched_pc, int pc_pos, void *puc)
 1998: {
 1999:     env->pc = gen_opc_pc[pc_pos];
 2000:     env->flags = gen_opc_hflags[pc_pos];
 2001: }

unix.superglobalmegacorp.com