Mercurial > repos > blastem
comparison z80_to_x86.c @ 652:f822d9216968
Merge
author | Michael Pavone <pavone@retrodev.com> |
---|---|
date | Tue, 30 Dec 2014 19:11:34 -0800 |
parents | 9d6fed6501ba 103d5cabbe14 |
children | a18e3923481e |
comparison
equal
deleted
inserted
replaced
620:9d6fed6501ba | 652:f822d9216968 |
---|---|
25 #ifdef DO_DEBUG_PRINT | 25 #ifdef DO_DEBUG_PRINT |
26 #define dprintf printf | 26 #define dprintf printf |
27 #else | 27 #else |
28 #define dprintf | 28 #define dprintf |
29 #endif | 29 #endif |
30 | |
31 uint32_t zbreakpoint_patch(z80_context * context, uint16_t address, code_ptr dst); | |
30 | 32 |
31 uint8_t z80_size(z80inst * inst) | 33 uint8_t z80_size(z80inst * inst) |
32 { | 34 { |
33 uint8_t reg = (inst->reg & 0x1F); | 35 uint8_t reg = (inst->reg & 0x1F); |
34 if (reg != Z80_UNUSED && reg != Z80_USE_IMMED) { | 36 if (reg != Z80_UNUSED && reg != Z80_USE_IMMED) { |
122 ea->base = opts->gen.scratch1; | 124 ea->base = opts->gen.scratch1; |
123 } else { | 125 } else { |
124 ea->base = opts->regs[Z80_IYL]; | 126 ea->base = opts->regs[Z80_IYL]; |
125 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); | 127 ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); |
126 } | 128 } |
127 } else { | 129 } else if(opts->regs[inst->ea_reg] >= 0) { |
128 ea->base = opts->regs[inst->ea_reg]; | 130 ea->base = opts->regs[inst->ea_reg]; |
129 if (ea->base >= AH && ea->base <= BH && inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED) { | 131 if (ea->base >= AH && ea->base <= BH && inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED) { |
130 uint8_t other_reg = opts->regs[inst->reg]; | 132 uint8_t other_reg = opts->regs[inst->reg]; |
131 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { | 133 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { |
132 //we can't mix an *H reg with a register that requires the REX prefix | 134 //we can't mix an *H reg with a register that requires the REX prefix |
133 ea->base = opts->regs[z80_low_reg(inst->ea_reg)]; | 135 ea->base = opts->regs[z80_low_reg(inst->ea_reg)]; |
134 ror_ir(code, 8, ea->base, SZ_W); | 136 ror_ir(code, 8, ea->base, SZ_W); |
135 } | 137 } |
136 } | 138 } |
139 } else { | |
140 ea->mode = MODE_REG_DISPLACE8; | |
141 ea->base = CONTEXT; | |
142 ea->disp = offsetof(z80_context, regs) + inst->ea_reg; | |
137 } | 143 } |
138 break; | 144 break; |
139 case Z80_REG_INDIRECT: | 145 case Z80_REG_INDIRECT: |
140 mov_rr(code, opts->regs[inst->ea_reg], areg, SZ_W); | 146 mov_rr(code, opts->regs[inst->ea_reg], areg, SZ_W); |
141 size = z80_size(inst); | 147 size = z80_size(inst); |
290 (context->alt_regs[Z80_IXH] << 8) | context->alt_regs[Z80_IXL], | 296 (context->alt_regs[Z80_IXH] << 8) | context->alt_regs[Z80_IXL], |
291 (context->alt_regs[Z80_IYH] << 8) | context->alt_regs[Z80_IYL]); | 297 (context->alt_regs[Z80_IYH] << 8) | context->alt_regs[Z80_IYL]); |
292 exit(0); | 298 exit(0); |
293 } | 299 } |
294 | 300 |
295 void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) | 301 void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, uint8_t interp) |
296 { | 302 { |
297 uint32_t num_cycles; | 303 uint32_t num_cycles; |
298 host_ea src_op, dst_op; | 304 host_ea src_op, dst_op; |
299 uint8_t size; | 305 uint8_t size; |
300 z80_options *opts = context->options; | 306 z80_options *opts = context->options; |
301 uint8_t * start = opts->gen.code.cur; | 307 uint8_t * start = opts->gen.code.cur; |
302 code_info *code = &opts->gen.code; | 308 code_info *code = &opts->gen.code; |
303 check_cycles_int(&opts->gen, address); | 309 if (!interp) { |
310 check_cycles_int(&opts->gen, address); | |
311 if (context->breakpoint_flags[address / sizeof(uint8_t)] & (1 << (address % sizeof(uint8_t)))) { | |
312 zbreakpoint_patch(context, address, start); | |
313 } | |
314 } | |
304 switch(inst->op) | 315 switch(inst->op) |
305 { | 316 { |
306 case Z80_LD: | 317 case Z80_LD: |
307 size = z80_size(inst); | 318 size = z80_size(inst); |
308 switch (inst->addr_mode & 0x1F) | 319 switch (inst->addr_mode & 0x1F) |
347 } | 358 } |
348 } else if(src_op.mode == MODE_IMMED) { | 359 } else if(src_op.mode == MODE_IMMED) { |
349 mov_ir(code, src_op.disp, dst_op.base, size); | 360 mov_ir(code, src_op.disp, dst_op.base, size); |
350 } else { | 361 } else { |
351 mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, size); | 362 mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, size); |
363 } | |
364 if (inst->ea_reg == Z80_I && inst->addr_mode == Z80_REG) { | |
365 //ld a, i sets some flags | |
366 //TODO: Implement half-carry flag | |
367 cmp_ir(code, 0, dst_op.base, SZ_B); | |
368 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); | |
369 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); | |
370 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);; | |
371 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, iff2), SCRATCH1, SZ_B); | |
372 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); | |
352 } | 373 } |
353 z80_save_reg(inst, opts); | 374 z80_save_reg(inst, opts); |
354 z80_save_ea(code, inst, opts); | 375 z80_save_ea(code, inst, opts); |
355 if (inst->addr_mode & Z80_DIR) { | 376 if (inst->addr_mode & Z80_DIR) { |
356 z80_save_result(opts, inst); | 377 z80_save_result(opts, inst); |
913 mov_rr(code, dst_op.base, src_op.base, SZ_B); | 934 mov_rr(code, dst_op.base, src_op.base, SZ_B); |
914 } | 935 } |
915 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); | 936 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
916 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); | 937 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
917 //TODO: Implement half-carry flag | 938 //TODO: Implement half-carry flag |
939 if (inst->immed) { | |
940 //rlca does not set these flags | |
918 cmp_ir(code, 0, dst_op.base, SZ_B); | 941 cmp_ir(code, 0, dst_op.base, SZ_B); |
919 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); | 942 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); |
920 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); | 943 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
921 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); | 944 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
945 } | |
922 if (inst->addr_mode != Z80_UNUSED) { | 946 if (inst->addr_mode != Z80_UNUSED) { |
923 z80_save_result(opts, inst); | 947 z80_save_result(opts, inst); |
924 if (src_op.mode != MODE_UNUSED) { | 948 if (src_op.mode != MODE_UNUSED) { |
925 z80_save_reg(inst, opts); | 949 z80_save_reg(inst, opts); |
926 } | 950 } |
945 mov_rr(code, dst_op.base, src_op.base, SZ_B); | 969 mov_rr(code, dst_op.base, src_op.base, SZ_B); |
946 } | 970 } |
947 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); | 971 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
948 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); | 972 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
949 //TODO: Implement half-carry flag | 973 //TODO: Implement half-carry flag |
974 if (inst->immed) { | |
975 //rla does not set these flags | |
950 cmp_ir(code, 0, dst_op.base, SZ_B); | 976 cmp_ir(code, 0, dst_op.base, SZ_B); |
951 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); | 977 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); |
952 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); | 978 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
953 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); | 979 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
980 } | |
954 if (inst->addr_mode != Z80_UNUSED) { | 981 if (inst->addr_mode != Z80_UNUSED) { |
955 z80_save_result(opts, inst); | 982 z80_save_result(opts, inst); |
956 if (src_op.mode != MODE_UNUSED) { | 983 if (src_op.mode != MODE_UNUSED) { |
957 z80_save_reg(inst, opts); | 984 z80_save_reg(inst, opts); |
958 } | 985 } |
976 mov_rr(code, dst_op.base, src_op.base, SZ_B); | 1003 mov_rr(code, dst_op.base, src_op.base, SZ_B); |
977 } | 1004 } |
978 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); | 1005 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
979 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); | 1006 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
980 //TODO: Implement half-carry flag | 1007 //TODO: Implement half-carry flag |
1008 if (inst->immed) { | |
1009 //rrca does not set these flags | |
981 cmp_ir(code, 0, dst_op.base, SZ_B); | 1010 cmp_ir(code, 0, dst_op.base, SZ_B); |
982 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); | 1011 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); |
983 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); | 1012 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
984 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); | 1013 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
1014 } | |
985 if (inst->addr_mode != Z80_UNUSED) { | 1015 if (inst->addr_mode != Z80_UNUSED) { |
986 z80_save_result(opts, inst); | 1016 z80_save_result(opts, inst); |
987 if (src_op.mode != MODE_UNUSED) { | 1017 if (src_op.mode != MODE_UNUSED) { |
988 z80_save_reg(inst, opts); | 1018 z80_save_reg(inst, opts); |
989 } | 1019 } |
1008 mov_rr(code, dst_op.base, src_op.base, SZ_B); | 1038 mov_rr(code, dst_op.base, src_op.base, SZ_B); |
1009 } | 1039 } |
1010 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); | 1040 setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); |
1011 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); | 1041 mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); |
1012 //TODO: Implement half-carry flag | 1042 //TODO: Implement half-carry flag |
1043 if (inst->immed) { | |
1044 //rra does not set these flags | |
1013 cmp_ir(code, 0, dst_op.base, SZ_B); | 1045 cmp_ir(code, 0, dst_op.base, SZ_B); |
1014 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); | 1046 setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); |
1015 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); | 1047 setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); |
1016 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); | 1048 setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); |
1049 } | |
1017 if (inst->addr_mode != Z80_UNUSED) { | 1050 if (inst->addr_mode != Z80_UNUSED) { |
1018 z80_save_result(opts, inst); | 1051 z80_save_result(opts, inst); |
1019 if (src_op.mode != MODE_UNUSED) { | 1052 if (src_op.mode != MODE_UNUSED) { |
1020 z80_save_reg(inst, opts); | 1053 z80_save_reg(inst, opts); |
1021 } | 1054 } |
1629 exit(1); | 1662 exit(1); |
1630 } | 1663 } |
1631 } | 1664 } |
1632 } | 1665 } |
1633 | 1666 |
1667 uint8_t * z80_interp_handler(uint8_t opcode, z80_context * context) | |
1668 { | |
1669 if (!context->interp_code[opcode]) { | |
1670 if (opcode == 0xCB || (opcode >= 0xDD && opcode & 0xF == 0xD)) { | |
1671 fprintf(stderr, "Encountered prefix byte %X at address %X. Z80 interpeter doesn't support those yet.", opcode, context->pc); | |
1672 exit(1); | |
1673 } | |
1674 uint8_t codebuf[8]; | |
1675 memset(codebuf, 0, sizeof(codebuf)); | |
1676 codebuf[0] = opcode; | |
1677 z80inst inst; | |
1678 uint8_t * after = z80_decode(codebuf, &inst); | |
1679 if (after - codebuf > 1) { | |
1680 fprintf(stderr, "Encountered multi-byte Z80 instruction at %X. Z80 interpeter doesn't support those yet.", context->pc); | |
1681 exit(1); | |
1682 } | |
1683 | |
1684 z80_options * opts = context->options; | |
1685 code_info *code = &opts->gen.code; | |
1686 check_alloc_code(code, ZMAX_NATIVE_SIZE); | |
1687 context->interp_code[opcode] = code->cur; | |
1688 translate_z80inst(&inst, context, 0, 1); | |
1689 mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, pc), opts->gen.scratch1, SZ_W); | |
1690 add_ir(code, after - codebuf, opts->gen.scratch1, SZ_W); | |
1691 call(code, opts->native_addr); | |
1692 jmp_r(code, opts->gen.scratch1); | |
1693 } | |
1694 return context->interp_code[opcode]; | |
1695 } | |
1696 | |
1697 code_info z80_make_interp_stub(z80_context * context, uint16_t address) | |
1698 { | |
1699 z80_options *opts = context->options; | |
1700 code_info * code = &opts->gen.code; | |
1701 check_alloc_code(code, 32); | |
1702 code_info stub = {code->cur, NULL}; | |
1703 //TODO: make this play well with the breakpoint code | |
1704 mov_ir(code, address, opts->gen.scratch1, SZ_W); | |
1705 call(code, opts->read_8); | |
1706 //normal opcode fetch is already factored into instruction timing | |
1707 //back out the base 3 cycles from a read here | |
1708 //not quite perfect, but it will have to do for now | |
1709 cycles(&opts->gen, -3); | |
1710 check_cycles_int(&opts->gen, address); | |
1711 call(code, opts->gen.save_context); | |
1712 mov_rr(code, opts->gen.scratch1, RDI, SZ_B); | |
1713 mov_irdisp(code, address, opts->gen.context_reg, offsetof(z80_context, pc), SZ_W); | |
1714 push_r(code, opts->gen.context_reg); | |
1715 call(code, (code_ptr)z80_interp_handler); | |
1716 mov_rr(code, RAX, opts->gen.scratch1, SZ_Q); | |
1717 pop_r(code, opts->gen.context_reg); | |
1718 call(code, opts->gen.load_context); | |
1719 jmp_r(code, opts->gen.scratch1); | |
1720 stub.last = code->cur; | |
1721 return stub; | |
1722 } | |
1723 | |
1724 | |
1634 uint8_t * z80_get_native_address(z80_context * context, uint32_t address) | 1725 uint8_t * z80_get_native_address(z80_context * context, uint32_t address) |
1635 { | 1726 { |
1636 native_map_slot *map; | 1727 native_map_slot *map; |
1637 if (address < 0x4000) { | 1728 if (address < 0x4000) { |
1638 address &= 0x1FFF; | 1729 address &= 0x1FFF; |
1639 map = context->static_code_map; | 1730 map = context->static_code_map; |
1640 } else if (address >= 0x8000) { | |
1641 address &= 0x7FFF; | |
1642 map = context->banked_code_map + context->bank_reg; | |
1643 } else { | 1731 } else { |
1644 //dprintf("z80_get_native_address: %X NULL\n", address); | 1732 address -= 0x4000; |
1645 return NULL; | 1733 map = context->banked_code_map; |
1646 } | 1734 } |
1647 if (!map->base || !map->offsets || map->offsets[address] == INVALID_OFFSET || map->offsets[address] == EXTENSION_WORD) { | 1735 if (!map->base || !map->offsets || map->offsets[address] == INVALID_OFFSET || map->offsets[address] == EXTENSION_WORD) { |
1648 //dprintf("z80_get_native_address: %X NULL\n", address); | 1736 //dprintf("z80_get_native_address: %X NULL\n", address); |
1649 return NULL; | 1737 return NULL; |
1650 } | 1738 } |
1652 return map->base + map->offsets[address]; | 1740 return map->base + map->offsets[address]; |
1653 } | 1741 } |
1654 | 1742 |
1655 uint8_t z80_get_native_inst_size(z80_options * opts, uint32_t address) | 1743 uint8_t z80_get_native_inst_size(z80_options * opts, uint32_t address) |
1656 { | 1744 { |
1745 //TODO: Fix for addresses >= 0x4000 | |
1657 if (address >= 0x4000) { | 1746 if (address >= 0x4000) { |
1658 return 0; | 1747 return 0; |
1659 } | 1748 } |
1660 return opts->gen.ram_inst_sizes[0][address & 0x1FFF]; | 1749 return opts->gen.ram_inst_sizes[0][address & 0x1FFF]; |
1661 } | 1750 } |
1669 address &= 0x1FFF; | 1758 address &= 0x1FFF; |
1670 map = context->static_code_map; | 1759 map = context->static_code_map; |
1671 opts->gen.ram_inst_sizes[0][address] = native_size; | 1760 opts->gen.ram_inst_sizes[0][address] = native_size; |
1672 context->ram_code_flags[(address & 0x1C00) >> 10] |= 1 << ((address & 0x380) >> 7); | 1761 context->ram_code_flags[(address & 0x1C00) >> 10] |= 1 << ((address & 0x380) >> 7); |
1673 context->ram_code_flags[((address + size) & 0x1C00) >> 10] |= 1 << (((address + size) & 0x380) >> 7); | 1762 context->ram_code_flags[((address + size) & 0x1C00) >> 10] |= 1 << (((address + size) & 0x380) >> 7); |
1674 } else if (address >= 0x8000) { | 1763 } else { |
1675 address &= 0x7FFF; | 1764 //HERE |
1676 map = context->banked_code_map + context->bank_reg; | 1765 address -= 0x4000; |
1766 map = context->banked_code_map; | |
1677 if (!map->offsets) { | 1767 if (!map->offsets) { |
1678 map->offsets = malloc(sizeof(int32_t) * 0x8000); | 1768 map->offsets = malloc(sizeof(int32_t) * 0xC000); |
1679 memset(map->offsets, 0xFF, sizeof(int32_t) * 0x8000); | 1769 memset(map->offsets, 0xFF, sizeof(int32_t) * 0xC000); |
1680 } | 1770 } |
1681 } else { | |
1682 return; | |
1683 } | 1771 } |
1684 if (!map->base) { | 1772 if (!map->base) { |
1685 map->base = native_address; | 1773 map->base = native_address; |
1686 } | 1774 } |
1687 map->offsets[address] = native_address - map->base; | 1775 map->offsets[address] = native_address - map->base; |
1688 for(--size, orig_address++; size; --size, orig_address++) { | 1776 for(--size, orig_address++; size; --size, orig_address++) { |
1689 address = orig_address; | 1777 address = orig_address; |
1690 if (address < 0x4000) { | 1778 if (address < 0x4000) { |
1691 address &= 0x1FFF; | 1779 address &= 0x1FFF; |
1692 map = context->static_code_map; | 1780 map = context->static_code_map; |
1693 } else if (address >= 0x8000) { | 1781 } else { |
1694 address &= 0x7FFF; | 1782 address -= 0x4000; |
1695 map = context->banked_code_map + context->bank_reg; | 1783 map = context->banked_code_map; |
1696 } else { | |
1697 return; | |
1698 } | 1784 } |
1699 if (!map->offsets) { | 1785 if (!map->offsets) { |
1700 map->offsets = malloc(sizeof(int32_t) * 0x8000); | 1786 map->offsets = malloc(sizeof(int32_t) * 0xC000); |
1701 memset(map->offsets, 0xFF, sizeof(int32_t) * 0x8000); | 1787 memset(map->offsets, 0xFF, sizeof(int32_t) * 0xC000); |
1702 } | 1788 } |
1703 map->offsets[address] = EXTENSION_WORD; | 1789 map->offsets[address] = EXTENSION_WORD; |
1704 } | 1790 } |
1705 } | 1791 } |
1706 | 1792 |
1707 #define INVALID_INSTRUCTION_START 0xFEEDFEED | 1793 #define INVALID_INSTRUCTION_START 0xFEEDFEED |
1708 | 1794 |
1709 uint32_t z80_get_instruction_start(native_map_slot * static_code_map, uint32_t address) | 1795 uint32_t z80_get_instruction_start(native_map_slot * static_code_map, uint32_t address) |
1710 { | 1796 { |
1797 //TODO: Fixme for address >= 0x4000 | |
1711 if (!static_code_map->base || address >= 0x4000) { | 1798 if (!static_code_map->base || address >= 0x4000) { |
1712 return INVALID_INSTRUCTION_START; | 1799 return INVALID_INSTRUCTION_START; |
1713 } | 1800 } |
1714 address &= 0x1FFF; | 1801 address &= 0x1FFF; |
1715 if (static_code_map->offsets[address] == INVALID_OFFSET) { | 1802 if (static_code_map->offsets[address] == INVALID_OFFSET) { |
1780 #endif | 1867 #endif |
1781 if (orig_size != ZMAX_NATIVE_SIZE) { | 1868 if (orig_size != ZMAX_NATIVE_SIZE) { |
1782 check_alloc_code(code, ZMAX_NATIVE_SIZE); | 1869 check_alloc_code(code, ZMAX_NATIVE_SIZE); |
1783 code_ptr start = code->cur; | 1870 code_ptr start = code->cur; |
1784 deferred_addr * orig_deferred = opts->gen.deferred; | 1871 deferred_addr * orig_deferred = opts->gen.deferred; |
1785 translate_z80inst(&instbuf, context, address); | 1872 translate_z80inst(&instbuf, context, address, 0); |
1786 /* | 1873 /* |
1787 if ((native_end - dst) <= orig_size) { | 1874 if ((native_end - dst) <= orig_size) { |
1788 uint8_t * native_next = z80_get_native_address(context, address + after-inst); | 1875 uint8_t * native_next = z80_get_native_address(context, address + after-inst); |
1789 if (native_next && ((native_next == orig_start + orig_size) || (orig_size - (native_end - dst)) > 5)) { | 1876 if (native_next && ((native_next == orig_start + orig_size) || (orig_size - (native_end - dst)) > 5)) { |
1790 remove_deferred_until(&opts->gen.deferred, orig_deferred); | 1877 remove_deferred_until(&opts->gen.deferred, orig_deferred); |
1791 native_end = translate_z80inst(&instbuf, orig_start, context, address); | 1878 native_end = translate_z80inst(&instbuf, orig_start, context, address, 0); |
1792 if (native_next == orig_start + orig_size && (native_next-native_end) < 2) { | 1879 if (native_next == orig_start + orig_size && (native_next-native_end) < 2) { |
1793 while (native_end < orig_start + orig_size) { | 1880 while (native_end < orig_start + orig_size) { |
1794 *(native_end++) = 0x90; //NOP | 1881 *(native_end++) = 0x90; //NOP |
1795 } | 1882 } |
1796 } else { | 1883 } else { |
1812 return start; | 1899 return start; |
1813 } else { | 1900 } else { |
1814 code_info tmp_code = *code; | 1901 code_info tmp_code = *code; |
1815 code->cur = orig_start; | 1902 code->cur = orig_start; |
1816 code->last = orig_start + ZMAX_NATIVE_SIZE; | 1903 code->last = orig_start + ZMAX_NATIVE_SIZE; |
1817 translate_z80inst(&instbuf, context, address); | 1904 translate_z80inst(&instbuf, context, address, 0); |
1818 code_info tmp2 = *code; | 1905 code_info tmp2 = *code; |
1819 *code = tmp_code; | 1906 *code = tmp_code; |
1820 if (!z80_is_terminal(&instbuf)) { | 1907 if (!z80_is_terminal(&instbuf)) { |
1821 | 1908 |
1822 jmp(&tmp2, z80_get_native_address_trans(context, address + after-inst)); | 1909 jmp(&tmp2, z80_get_native_address_trans(context, address + after-inst)); |
1823 } | 1910 } |
1824 z80_handle_deferred(context); | 1911 z80_handle_deferred(context); |
1825 return orig_start; | 1912 return orig_start; |
1826 } | 1913 } |
1835 z80_options * opts = context->options; | 1922 z80_options * opts = context->options; |
1836 uint32_t start_address = address; | 1923 uint32_t start_address = address; |
1837 uint8_t * encoded = NULL, *next; | 1924 uint8_t * encoded = NULL, *next; |
1838 if (address < 0x4000) { | 1925 if (address < 0x4000) { |
1839 encoded = context->mem_pointers[0] + (address & 0x1FFF); | 1926 encoded = context->mem_pointers[0] + (address & 0x1FFF); |
1840 } else if(address >= 0x8000 && context->mem_pointers[1]) { | 1927 } |
1841 printf("attempt to translate Z80 code from banked area at address %X\n", address); | 1928 |
1842 exit(1); | 1929 while (encoded != NULL || address >= 0x4000) |
1843 //encoded = context->mem_pointers[1] + (address & 0x7FFF); | |
1844 } | |
1845 while (encoded != NULL) | |
1846 { | 1930 { |
1847 z80inst inst; | 1931 z80inst inst; |
1848 dprintf("translating Z80 code at address %X\n", address); | 1932 dprintf("translating Z80 code at address %X\n", address); |
1849 do { | 1933 do { |
1850 if (address > 0x4000 && address < 0x8000) { | 1934 if (address >= 0x4000) { |
1851 xor_rr(&opts->gen.code, RDI, RDI, SZ_D); | 1935 code_info stub = z80_make_interp_stub(context, address); |
1852 call(&opts->gen.code, (uint8_t *)exit); | 1936 z80_map_native_address(context, address, stub.cur, 1, stub.last - stub.cur); |
1853 break; | 1937 break; |
1854 } | 1938 } |
1855 uint8_t * existing = z80_get_native_address(context, address); | 1939 uint8_t * existing = z80_get_native_address(context, address); |
1856 if (existing) { | 1940 if (existing) { |
1857 jmp(&opts->gen.code, existing); | 1941 jmp(&opts->gen.code, existing); |
1867 } else { | 1951 } else { |
1868 printf("%X\t%s\n", address, disbuf); | 1952 printf("%X\t%s\n", address, disbuf); |
1869 } | 1953 } |
1870 #endif | 1954 #endif |
1871 code_ptr start = opts->gen.code.cur; | 1955 code_ptr start = opts->gen.code.cur; |
1872 translate_z80inst(&inst, context, address); | 1956 translate_z80inst(&inst, context, address, 0); |
1873 z80_map_native_address(context, address, start, next-encoded, opts->gen.code.cur - start); | 1957 z80_map_native_address(context, address, start, next-encoded, opts->gen.code.cur - start); |
1874 address += next-encoded; | 1958 address += next-encoded; |
1875 if (address > 0xFFFF) { | 1959 if (address > 0xFFFF) { |
1876 address &= 0xFFFF; | 1960 address &= 0xFFFF; |
1877 | 1961 |
1883 if (opts->gen.deferred) { | 1967 if (opts->gen.deferred) { |
1884 address = opts->gen.deferred->address; | 1968 address = opts->gen.deferred->address; |
1885 dprintf("defferred address: %X\n", address); | 1969 dprintf("defferred address: %X\n", address); |
1886 if (address < 0x4000) { | 1970 if (address < 0x4000) { |
1887 encoded = context->mem_pointers[0] + (address & 0x1FFF); | 1971 encoded = context->mem_pointers[0] + (address & 0x1FFF); |
1888 } else if (address > 0x8000 && context->mem_pointers[1]) { | |
1889 encoded = context->mem_pointers[1] + (address & 0x7FFF); | |
1890 } else { | 1972 } else { |
1891 printf("attempt to translate non-memory address: %X\n", address); | 1973 encoded = NULL; |
1892 exit(1); | |
1893 } | 1974 } |
1894 } else { | 1975 } else { |
1895 encoded = NULL; | 1976 encoded = NULL; |
1977 address = 0; | |
1896 } | 1978 } |
1897 } | 1979 } |
1898 } | 1980 } |
1899 | 1981 |
1900 void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks) | 1982 void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks) |
1963 reg = i /2 + Z80_BC + (i > Z80_H ? 2 : 0); | 2045 reg = i /2 + Z80_BC + (i > Z80_H ? 2 : 0); |
1964 size = SZ_W; | 2046 size = SZ_W; |
1965 } else { | 2047 } else { |
1966 reg = i; | 2048 reg = i; |
1967 size = SZ_B; | 2049 size = SZ_B; |
1968 } | 2050 } |
1969 if (options->regs[reg] >= 0) { | 2051 if (options->regs[reg] >= 0) { |
1970 mov_rrdisp(code, options->regs[reg], options->gen.context_reg, offsetof(z80_context, regs) + i, size); | 2052 mov_rrdisp(code, options->regs[reg], options->gen.context_reg, offsetof(z80_context, regs) + i, size); |
1971 } | 2053 } |
1972 if (size == SZ_W) { | 2054 if (size == SZ_W) { |
1973 i++; | 2055 i++; |
2043 pop_r(code, RBP); | 2125 pop_r(code, RBP); |
2044 pop_r(code, RBX); | 2126 pop_r(code, RBX); |
2045 *no_sync = code->cur - (no_sync + 1); | 2127 *no_sync = code->cur - (no_sync + 1); |
2046 //return to caller of z80_run | 2128 //return to caller of z80_run |
2047 retn(code); | 2129 retn(code); |
2048 | 2130 |
2049 options->gen.handle_code_write = (code_ptr)z80_handle_code_write; | 2131 options->gen.handle_code_write = (code_ptr)z80_handle_code_write; |
2050 | 2132 |
2051 options->read_8 = gen_mem_fun(&options->gen, chunks, num_chunks, READ_8, &options->read_8_noinc); | 2133 options->read_8 = gen_mem_fun(&options->gen, chunks, num_chunks, READ_8, &options->read_8_noinc); |
2052 options->write_8 = gen_mem_fun(&options->gen, chunks, num_chunks, WRITE_8, &options->write_8_noinc); | 2134 options->write_8 = gen_mem_fun(&options->gen, chunks, num_chunks, WRITE_8, &options->write_8_noinc); |
2053 | 2135 |
2115 | 2197 |
2116 options->write_io = code->cur; | 2198 options->write_io = code->cur; |
2117 check_cycles(&options->gen); | 2199 check_cycles(&options->gen); |
2118 cycles(&options->gen, 4); | 2200 cycles(&options->gen, 4); |
2119 retn(code); | 2201 retn(code); |
2120 | 2202 |
2121 options->read_16 = code->cur; | 2203 options->read_16 = code->cur; |
2122 cycles(&options->gen, 3); | 2204 cycles(&options->gen, 3); |
2123 check_cycles(&options->gen); | 2205 check_cycles(&options->gen); |
2124 //TODO: figure out how to handle the extra wait state for word reads to bank area | 2206 //TODO: figure out how to handle the extra wait state for word reads to bank area |
2125 //may also need special handling to avoid too much stack depth when acces is blocked | 2207 //may also need special handling to avoid too much stack depth when acces is blocked |
2132 check_cycles(&options->gen); | 2214 check_cycles(&options->gen); |
2133 call(code, options->read_8_noinc); | 2215 call(code, options->read_8_noinc); |
2134 shl_ir(code, 8, options->gen.scratch1, SZ_W); | 2216 shl_ir(code, 8, options->gen.scratch1, SZ_W); |
2135 mov_rr(code, options->gen.scratch2, options->gen.scratch1, SZ_B); | 2217 mov_rr(code, options->gen.scratch2, options->gen.scratch1, SZ_B); |
2136 retn(code); | 2218 retn(code); |
2137 | 2219 |
2138 options->write_16_highfirst = code->cur; | 2220 options->write_16_highfirst = code->cur; |
2139 cycles(&options->gen, 3); | 2221 cycles(&options->gen, 3); |
2140 check_cycles(&options->gen); | 2222 check_cycles(&options->gen); |
2141 push_r(code, options->gen.scratch2); | 2223 push_r(code, options->gen.scratch2); |
2142 push_r(code, options->gen.scratch1); | 2224 push_r(code, options->gen.scratch1); |
2148 cycles(&options->gen, 3); | 2230 cycles(&options->gen, 3); |
2149 check_cycles(&options->gen); | 2231 check_cycles(&options->gen); |
2150 //TODO: Check if we can get away with TCO here | 2232 //TODO: Check if we can get away with TCO here |
2151 call(code, options->write_8_noinc); | 2233 call(code, options->write_8_noinc); |
2152 retn(code); | 2234 retn(code); |
2153 | 2235 |
2154 options->write_16_lowfirst = code->cur; | 2236 options->write_16_lowfirst = code->cur; |
2155 cycles(&options->gen, 3); | 2237 cycles(&options->gen, 3); |
2156 check_cycles(&options->gen); | 2238 check_cycles(&options->gen); |
2157 push_r(code, options->gen.scratch2); | 2239 push_r(code, options->gen.scratch2); |
2158 push_r(code, options->gen.scratch1); | 2240 push_r(code, options->gen.scratch1); |
2213 memset(context, 0, sizeof(*context)); | 2295 memset(context, 0, sizeof(*context)); |
2214 context->static_code_map = malloc(sizeof(*context->static_code_map)); | 2296 context->static_code_map = malloc(sizeof(*context->static_code_map)); |
2215 context->static_code_map->base = NULL; | 2297 context->static_code_map->base = NULL; |
2216 context->static_code_map->offsets = malloc(sizeof(int32_t) * 0x2000); | 2298 context->static_code_map->offsets = malloc(sizeof(int32_t) * 0x2000); |
2217 memset(context->static_code_map->offsets, 0xFF, sizeof(int32_t) * 0x2000); | 2299 memset(context->static_code_map->offsets, 0xFF, sizeof(int32_t) * 0x2000); |
2218 context->banked_code_map = malloc(sizeof(native_map_slot) * (1 << 9)); | 2300 context->banked_code_map = malloc(sizeof(native_map_slot)); |
2219 memset(context->banked_code_map, 0, sizeof(native_map_slot) * (1 << 9)); | 2301 memset(context->banked_code_map, 0, sizeof(native_map_slot)); |
2220 context->options = options; | 2302 context->options = options; |
2303 context->int_cycle = 0xFFFFFFFF; | |
2304 context->int_pulse_start = 0xFFFFFFFF; | |
2305 context->int_pulse_end = 0xFFFFFFFF; | |
2221 context->run = options->run; | 2306 context->run = options->run; |
2222 } | 2307 } |
2223 | 2308 |
2224 void z80_reset(z80_context * context) | 2309 void z80_reset(z80_context * context) |
2225 { | 2310 { |
2227 context->iff1 = context->iff2 = 0; | 2312 context->iff1 = context->iff2 = 0; |
2228 context->native_pc = z80_get_native_address_trans(context, 0); | 2313 context->native_pc = z80_get_native_address_trans(context, 0); |
2229 context->extra_pc = NULL; | 2314 context->extra_pc = NULL; |
2230 } | 2315 } |
2231 | 2316 |
2317 uint32_t zbreakpoint_patch(z80_context * context, uint16_t address, code_ptr dst) | |
2318 { | |
2319 code_info code = {dst, dst+16}; | |
2320 mov_ir(&code, address, SCRATCH1, SZ_W); | |
2321 call(&code, context->bp_stub); | |
2322 return code.cur-dst; | |
2323 } | |
2324 | |
2325 void zcreate_stub(z80_context * context) | |
2326 { | |
2327 z80_options * opts = context->options; | |
2328 code_info *code = &opts->gen.code; | |
2329 check_code_prologue(code); | |
2330 context->bp_stub = code->cur; | |
2331 | |
2332 //Calculate length of prologue | |
2333 check_cycles_int(&opts->gen, 0); | |
2334 int check_int_size = code->cur-context->bp_stub; | |
2335 code->cur = context->bp_stub; | |
2336 | |
2337 //Calculate length of patch | |
2338 int patch_size = zbreakpoint_patch(context, 0, code->cur); | |
2339 | |
2340 //Save context and call breakpoint handler | |
2341 call(code, opts->gen.save_context); | |
2342 push_r(code, opts->gen.scratch1); | |
2343 mov_rr(code, opts->gen.context_reg, RDI, SZ_Q); | |
2344 mov_rr(code, opts->gen.scratch1, RSI, SZ_W); | |
2345 call(code, context->bp_handler); | |
2346 mov_rr(code, RAX, opts->gen.context_reg, SZ_Q); | |
2347 //Restore context | |
2348 call(code, opts->gen.load_context); | |
2349 pop_r(code, opts->gen.scratch1); | |
2350 //do prologue stuff | |
2351 cmp_rr(code, opts->gen.cycles, opts->gen.limit, SZ_D); | |
2352 uint8_t * jmp_off = code->cur+1; | |
2353 jcc(code, CC_NC, code->cur + 7); | |
2354 pop_r(code, opts->gen.scratch1); | |
2355 add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_Q); | |
2356 push_r(code, opts->gen.scratch1); | |
2357 jmp(code, opts->gen.handle_cycle_limit_int); | |
2358 *jmp_off = code->cur - (jmp_off+1); | |
2359 //jump back to body of translated instruction | |
2360 pop_r(code, opts->gen.scratch1); | |
2361 add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_Q); | |
2362 jmp_r(code, opts->gen.scratch1); | |
2363 } | |
2364 | |
2232 void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler) | 2365 void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler) |
2233 { | 2366 { |
2234 static uint8_t * bp_stub = NULL; | 2367 context->bp_handler = bp_handler; |
2235 z80_options * opts = context->options; | 2368 uint8_t bit = 1 << (address % sizeof(uint8_t)); |
2236 code_ptr native = z80_get_native_address_trans(context, address); | 2369 if (!(bit & context->breakpoint_flags[address / sizeof(uint8_t)])) { |
2237 code_info tmp_code = {native, native+16}; | 2370 context->breakpoint_flags[address / sizeof(uint8_t)] |= bit; |
2238 mov_ir(&tmp_code, address, opts->gen.scratch1, SZ_W); | 2371 if (!context->bp_stub) { |
2239 if (!bp_stub) { | 2372 zcreate_stub(context); |
2240 code_info *code = &opts->gen.code; | 2373 } |
2241 check_code_prologue(code); | 2374 uint8_t * native = z80_get_native_address(context, address); |
2242 bp_stub = code->cur; | 2375 if (native) { |
2243 call(&tmp_code, bp_stub); | 2376 zbreakpoint_patch(context, address, native); |
2244 | 2377 } |
2245 //Calculate length of prologue | 2378 } |
2379 } | |
2380 | |
2381 void zremove_breakpoint(z80_context * context, uint16_t address) | |
2382 { | |
2383 context->breakpoint_flags[address / sizeof(uint8_t)] &= ~(1 << (address % sizeof(uint8_t))); | |
2384 uint8_t * native = z80_get_native_address(context, address); | |
2385 if (native) { | |
2386 z80_options * opts = context->options; | |
2387 code_info tmp_code = opts->gen.code; | |
2388 opts->gen.code.cur = native; | |
2389 opts->gen.code.last = native + 16; | |
2246 check_cycles_int(&opts->gen, address); | 2390 check_cycles_int(&opts->gen, address); |
2247 int check_int_size = code->cur-bp_stub; | 2391 opts->gen.code = tmp_code; |
2248 code->cur = bp_stub; | 2392 } |
2249 | 2393 } |
2250 //Save context and call breakpoint handler | 2394 |
2251 call(code, opts->gen.save_context); | |
2252 push_r(code, opts->gen.scratch1); | |
2253 mov_rr(code, opts->gen.context_reg, RDI, SZ_Q); | |
2254 mov_rr(code, opts->gen.scratch1, RSI, SZ_W); | |
2255 call(code, bp_handler); | |
2256 mov_rr(code, RAX, opts->gen.context_reg, SZ_Q); | |
2257 //Restore context | |
2258 call(code, opts->gen.load_context); | |
2259 pop_r(code, opts->gen.scratch1); | |
2260 //do prologue stuff | |
2261 cmp_rr(code, opts->gen.cycles, opts->gen.limit, SZ_D); | |
2262 uint8_t * jmp_off = code->cur+1; | |
2263 jcc(code, CC_NC, code->cur + 7); | |
2264 pop_r(code, opts->gen.scratch1); | |
2265 add_ir(code, check_int_size - (tmp_code.cur-native), opts->gen.scratch1, SZ_Q); | |
2266 push_r(code, opts->gen.scratch1); | |
2267 jmp(code, opts->gen.handle_cycle_limit_int); | |
2268 *jmp_off = code->cur - (jmp_off+1); | |
2269 //jump back to body of translated instruction | |
2270 pop_r(code, opts->gen.scratch1); | |
2271 add_ir(code, check_int_size - (tmp_code.cur-native), opts->gen.scratch1, SZ_Q); | |
2272 jmp_r(code, opts->gen.scratch1); | |
2273 } else { | |
2274 call(&tmp_code, bp_stub); | |
2275 } | |
2276 } | |
2277 | |
2278 void zremove_breakpoint(z80_context * context, uint16_t address) | |
2279 { | |
2280 uint8_t * native = z80_get_native_address(context, address); | |
2281 z80_options * opts = context->options; | |
2282 code_info tmp_code = opts->gen.code; | |
2283 opts->gen.code.cur = native; | |
2284 opts->gen.code.last = native + 16; | |
2285 check_cycles_int(&opts->gen, address); | |
2286 opts->gen.code = tmp_code; | |
2287 } | |
2288 | |
2289 |