# HG changeset patch # User Mike Pavone # Date 1357013349 28800 # Node ID 5416a5c4628e05ca3648473c05b9e902b6459f24 # Parent 15b8dce19cf48497322b8a6b652af4f582b7f2dd Implement most of the "X" instructions diff -r 15b8dce19cf4 -r 5416a5c4628e gen_x86.c --- a/gen_x86.c Mon Dec 31 19:17:01 2012 -0800 +++ b/gen_x86.c Mon Dec 31 20:09:09 2012 -0800 @@ -654,6 +654,31 @@ return x86_rrdisp8_sizedir(out, OP_ADD, dst, src_base, disp, size, BIT_DIR); } +uint8_t * adc_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size) +{ + return x86_rr_sizedir(out, OP_ADC, src, dst, size); +} + +uint8_t * adc_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size) +{ + return x86_ir(out, OP_IMMED_ARITH, OP_EX_ADCI, OP_ADC, val, dst, size); +} + +uint8_t * adc_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size) +{ + return x86_irdisp8(out, OP_IMMED_ARITH, OP_EX_ADCI, val, dst_base, disp, size); +} + +uint8_t * adc_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size) +{ + return x86_rrdisp8_sizedir(out, OP_ADC, src, dst_base, disp, size, 0); +} + +uint8_t * adc_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size) +{ + return x86_rrdisp8_sizedir(out, OP_ADC, dst, src_base, disp, size, BIT_DIR); +} + uint8_t * or_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size) { return x86_rr_sizedir(out, OP_OR, src, dst, size); @@ -753,6 +778,31 @@ return x86_rrdisp8_sizedir(out, OP_SUB, dst, src_base, disp, size, BIT_DIR); } +uint8_t * sbb_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size) +{ + return x86_rr_sizedir(out, OP_SBB, src, dst, size); +} + +uint8_t * sbb_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size) +{ + return x86_ir(out, OP_IMMED_ARITH, OP_EX_SBBI, OP_SBB, val, dst, size); +} + +uint8_t * sbb_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size) +{ + return x86_irdisp8(out, OP_IMMED_ARITH, OP_EX_SBBI, val, dst_base, disp, size); +} + +uint8_t * sbb_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size) +{ + return x86_rrdisp8_sizedir(out, OP_SBB, src, dst_base, disp, size, 0); +} + +uint8_t * sbb_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size) +{ + return x86_rrdisp8_sizedir(out, OP_SBB, dst, src_base, disp, size, BIT_DIR); +} + uint8_t * cmp_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size) { return x86_rr_sizedir(out, OP_CMP, src, dst, size); diff -r 15b8dce19cf4 -r 5416a5c4628e gen_x86.h --- a/gen_x86.h Mon Dec 31 19:17:01 2012 -0800 +++ b/gen_x86.h Mon Dec 31 20:09:09 2012 -0800 @@ -94,25 +94,33 @@ uint8_t * shr_clrdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size); uint8_t * sar_clrdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size); uint8_t * add_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size); +uint8_t * adc_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size); uint8_t * or_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size); uint8_t * xor_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size); uint8_t * and_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size); uint8_t * sub_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size); +uint8_t * sbb_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size); uint8_t * cmp_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size); uint8_t * add_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size); +uint8_t * adc_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size); uint8_t * or_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size); uint8_t * xor_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size); uint8_t * and_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size); uint8_t * sub_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size); +uint8_t * sbb_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size); uint8_t * cmp_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size); uint8_t * add_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size); +uint8_t * adc_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size); uint8_t * or_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size); uint8_t * xor_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size); uint8_t * and_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size); uint8_t * sub_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size); +uint8_t * sbb_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size); uint8_t * cmp_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size); uint8_t * add_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size); +uint8_t * adc_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size); uint8_t * add_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size); +uint8_t * adc_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size); uint8_t * or_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size); uint8_t * or_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size); uint8_t * xor_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size); @@ -121,6 +129,8 @@ uint8_t * and_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size); uint8_t * sub_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size); uint8_t * sub_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size); +uint8_t * sbb_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size); +uint8_t * sbb_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size); uint8_t * cmp_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size); uint8_t * cmp_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size); uint8_t * not_r(uint8_t * out, uint8_t dst, uint8_t size); diff -r 15b8dce19cf4 -r 5416a5c4628e m68k_to_x86.c --- a/m68k_to_x86.c Mon Dec 31 19:17:01 2012 -0800 +++ b/m68k_to_x86.c Mon Dec 31 20:09:09 2012 -0800 @@ -2172,8 +2172,31 @@ dst = mov_rrind(dst, FLAG_C, CONTEXT, SZ_B); dst = m68k_save_result(inst, dst, opts); break; - //case M68K_ADDX: - // break; + case M68K_ADDX: + dst = cycles(dst, BUS); + dst = bt_irdisp8(dst, 0, CONTEXT, 0, SZ_B); + if (src_op.mode == MODE_REG_DIRECT) { + if (dst_op.mode == MODE_REG_DIRECT) { + dst = adc_rr(dst, src_op.base, dst_op.base, inst->extra.size); + } else { + dst = adc_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, inst->extra.size); + } + } else if (src_op.mode == MODE_REG_DISPLACE8) { + dst = adc_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, inst->extra.size); + } else { + if (dst_op.mode == MODE_REG_DIRECT) { + dst = adc_ir(dst, src_op.disp, dst_op.base, inst->extra.size); + } else { + dst = adc_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size); + } + } + dst = setcc_r(dst, CC_C, FLAG_C); + dst = setcc_r(dst, CC_Z, FLAG_Z); + dst = setcc_r(dst, CC_S, FLAG_N); + dst = setcc_r(dst, CC_O, FLAG_V); + dst = mov_rrind(dst, FLAG_C, CONTEXT, SZ_B); + dst = m68k_save_result(inst, dst, opts); + break; case M68K_AND: dst = cycles(dst, BUS); if (src_op.mode == MODE_REG_DIRECT) { @@ -2636,8 +2659,107 @@ dst = setcc_r(dst, CC_S, FLAG_N); } break; - /*case M68K_ROXL: - case M68K_ROXR:*/ + case M68K_ROXL: + case M68K_ROXR: + dst = mov_ir(dst, 0, FLAG_V, SZ_B); + if (inst->src.addr_mode == MODE_UNUSED) { + dst = cycles(dst, BUS); + //Memory rotate + dst = bt_irdisp8(dst, 0, CONTEXT, 0, SZ_B); + if (inst->op == M68K_ROXL) { + dst = rol_ir(dst, 1, dst_op.base, inst->extra.size); + } else { + dst = ror_ir(dst, 1, dst_op.base, inst->extra.size); + } + dst = setcc_r(dst, CC_C, FLAG_C); + dst = cmp_ir(dst, 0, dst_op.base, inst->extra.size); + dst = setcc_r(dst, CC_Z, FLAG_Z); + dst = setcc_r(dst, CC_S, FLAG_N); + dst = m68k_save_result(inst, dst, opts); + } else { + if (src_op.mode == MODE_IMMED) { + dst = cycles(dst, (inst->extra.size == OPSIZE_LONG ? 8 : 6) + src_op.disp*2); + dst = bt_irdisp8(dst, 0, CONTEXT, 0, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + if (inst->op == M68K_ROXL) { + dst = rol_ir(dst, src_op.disp, dst_op.base, inst->extra.size); + } else { + dst = ror_ir(dst, src_op.disp, dst_op.base, inst->extra.size); + } + } else { + if (inst->op == M68K_ROXL) { + dst = rol_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size); + } else { + dst = ror_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size); + } + } + dst = setcc_r(dst, CC_C, FLAG_C); + } else { + if (src_op.mode == MODE_REG_DIRECT) { + if (src_op.base != SCRATCH1) { + dst = mov_rr(dst, src_op.base, SCRATCH1, SZ_B); + } + } else { + dst = mov_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH1, SZ_B); + } + dst = and_ir(dst, 63, SCRATCH1, SZ_D); + zero_off = dst+1; + dst = jcc(dst, CC_NZ, dst+2); + dst = add_rr(dst, SCRATCH1, CYCLES, SZ_D); + dst = add_rr(dst, SCRATCH1, CYCLES, SZ_D); + dst = cmp_ir(dst, 32, SCRATCH1, SZ_B); + norm_off = dst+1; + dst = jcc(dst, CC_L, dst+2); + dst = bt_irdisp8(dst, 0, CONTEXT, 0, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + if (inst->op == M68K_ROXL) { + dst = rol_ir(dst, 31, dst_op.base, inst->extra.size); + dst = rol_ir(dst, 1, dst_op.base, inst->extra.size); + } else { + dst = ror_ir(dst, 31, dst_op.base, inst->extra.size); + dst = ror_ir(dst, 1, dst_op.base, inst->extra.size); + } + } else { + if (inst->op == M68K_ROXL) { + dst = rol_irdisp8(dst, 31, dst_op.base, dst_op.disp, inst->extra.size); + dst = rol_irdisp8(dst, 1, dst_op.base, dst_op.disp, inst->extra.size); + } else { + dst = ror_irdisp8(dst, 31, dst_op.base, dst_op.disp, inst->extra.size); + dst = ror_irdisp8(dst, 1, dst_op.base, dst_op.disp, inst->extra.size); + } + } + dst = sub_ir(dst, 32, SCRATCH1, SZ_B); + *norm_off = dst - (norm_off+1); + dst = bt_irdisp8(dst, 0, CONTEXT, 0, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + if (inst->op == M68K_ROXL) { + dst = rol_clr(dst, dst_op.base, inst->extra.size); + } else { + dst = ror_clr(dst, dst_op.base, inst->extra.size); + } + } else { + if (inst->op == M68K_ROXL) { + dst = rol_clrdisp8(dst, dst_op.base, dst_op.disp, inst->extra.size); + } else { + dst = ror_clrdisp8(dst, dst_op.base, dst_op.disp, inst->extra.size); + } + } + dst = setcc_r(dst, CC_C, FLAG_C); + end_off = dst + 1; + dst = jmp(dst, dst+2); + *zero_off = dst - (zero_off+1); + dst = mov_ir(dst, 0, FLAG_C, SZ_B); + *end_off = dst - (end_off+1); + } + if (dst_op.mode == MODE_REG_DIRECT) { + dst = cmp_ir(dst, 0, dst_op.base, inst->extra.size); + } else { + dst = cmp_irdisp8(dst, 0, dst_op.base, dst_op.disp, inst->extra.size); + } + dst = setcc_r(dst, CC_Z, FLAG_Z); + dst = setcc_r(dst, CC_S, FLAG_N); + } + break; case M68K_RTE: dst = mov_rr(dst, opts->aregs[7], SCRATCH1, SZ_D); dst = call(dst, (uint8_t *)m68k_read_long_scratch1); @@ -2686,8 +2808,31 @@ dst = mov_rrind(dst, FLAG_C, CONTEXT, SZ_B); dst = m68k_save_result(inst, dst, opts); break; - //case M68K_SUBX: - // break; + case M68K_SUBX: + dst = cycles(dst, BUS); + dst = bt_irdisp8(dst, 0, CONTEXT, 0, SZ_B); + if (src_op.mode == MODE_REG_DIRECT) { + if (dst_op.mode == MODE_REG_DIRECT) { + dst = sbb_rr(dst, src_op.base, dst_op.base, inst->extra.size); + } else { + dst = sbb_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, inst->extra.size); + } + } else if (src_op.mode == MODE_REG_DISPLACE8) { + dst = sbb_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, inst->extra.size); + } else { + if (dst_op.mode == MODE_REG_DIRECT) { + dst = sbb_ir(dst, src_op.disp, dst_op.base, inst->extra.size); + } else { + dst = sbb_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size); + } + } + dst = setcc_r(dst, CC_C, FLAG_C); + dst = setcc_r(dst, CC_Z, FLAG_Z); + dst = setcc_r(dst, CC_S, FLAG_N); + dst = setcc_r(dst, CC_O, FLAG_V); + dst = mov_rrind(dst, FLAG_C, CONTEXT, SZ_B); + dst = m68k_save_result(inst, dst, opts); + break; case M68K_SWAP: dst = cycles(dst, BUS); if (src_op.mode == MODE_REG_DIRECT) {