Mercurial > repos > blastem
comparison z80_to_x86.c @ 1130:8f14767661fa
Remove memory map assumptions from Z80 core and move a little bit of logic to the generic backend.c so it can be shared between CPU cores
author | Michael Pavone <pavone@retrodev.com> |
---|---|
date | Wed, 28 Dec 2016 20:39:27 -0800 |
parents | 928a65750345 |
children | 136b1676109b |
comparison
equal
deleted
inserted
replaced
1129:6b5c92b6205c | 1130:8f14767661fa |
---|---|
13 #include <stddef.h> | 13 #include <stddef.h> |
14 #include <string.h> | 14 #include <string.h> |
15 | 15 |
16 #define MODE_UNUSED (MODE_IMMED-1) | 16 #define MODE_UNUSED (MODE_IMMED-1) |
17 #define MAX_MCYCLE_LENGTH 6 | 17 #define MAX_MCYCLE_LENGTH 6 |
18 #define NATIVE_CHUNK_SIZE 1024 | |
19 #define NATIVE_MAP_CHUNKS (0x10000 / NATIVE_CHUNK_SIZE) | |
18 | 20 |
19 //#define DO_DEBUG_PRINT | 21 //#define DO_DEBUG_PRINT |
20 | 22 |
21 #ifdef DO_DEBUG_PRINT | 23 #ifdef DO_DEBUG_PRINT |
22 #define dprintf printf | 24 #define dprintf printf |
2879 } | 2881 } |
2880 | 2882 |
2881 | 2883 |
2882 uint8_t * z80_get_native_address(z80_context * context, uint32_t address) | 2884 uint8_t * z80_get_native_address(z80_context * context, uint32_t address) |
2883 { | 2885 { |
2884 native_map_slot *map; | 2886 z80_options *opts = context->options; |
2885 if (address < 0x4000) { | 2887 native_map_slot * native_code_map = opts->gen.native_code_map; |
2886 address &= 0x1FFF; | 2888 |
2887 map = context->static_code_map; | 2889 memmap_chunk const *mem_chunk = find_map_chunk(address, &opts->gen, 0, NULL); |
2888 } else { | 2890 if (mem_chunk) { |
2889 address -= 0x4000; | 2891 //calculate the lowest alias for this address |
2890 map = context->banked_code_map; | 2892 address = mem_chunk->start + ((address - mem_chunk->start) & mem_chunk->mask); |
2891 } | 2893 } |
2892 if (!map->base || !map->offsets || map->offsets[address] == INVALID_OFFSET || map->offsets[address] == EXTENSION_WORD) { | 2894 uint32_t chunk = address / NATIVE_CHUNK_SIZE; |
2893 //dprintf("z80_get_native_address: %X NULL\n", address); | 2895 if (!native_code_map[chunk].base) { |
2894 return NULL; | 2896 return NULL; |
2895 } | 2897 } |
2896 //dprintf("z80_get_native_address: %X %p\n", address, map->base + map->offsets[address]); | 2898 uint32_t offset = address % NATIVE_CHUNK_SIZE; |
2897 return map->base + map->offsets[address]; | 2899 if (native_code_map[chunk].offsets[offset] == INVALID_OFFSET || native_code_map[chunk].offsets[offset] == EXTENSION_WORD) { |
2900 return NULL; | |
2901 } | |
2902 return native_code_map[chunk].base + native_code_map[chunk].offsets[offset]; | |
2898 } | 2903 } |
2899 | 2904 |
2900 uint8_t z80_get_native_inst_size(z80_options * opts, uint32_t address) | 2905 uint8_t z80_get_native_inst_size(z80_options * opts, uint32_t address) |
2901 { | 2906 { |
2902 //TODO: Fix for addresses >= 0x4000 | 2907 uint32_t meta_off; |
2903 if (address >= 0x4000) { | 2908 memmap_chunk const *chunk = find_map_chunk(address, &opts->gen, MMAP_WRITE | MMAP_CODE, &meta_off); |
2904 return 0; | 2909 if (chunk) { |
2905 } | 2910 meta_off += (address - chunk->start) & chunk->mask; |
2906 return opts->gen.ram_inst_sizes[0][address & 0x1FFF]; | 2911 } |
2912 uint32_t slot = meta_off/1024; | |
2913 return opts->gen.ram_inst_sizes[slot][meta_off%1024]; | |
2907 } | 2914 } |
2908 | 2915 |
2909 void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * native_address, uint8_t size, uint8_t native_size) | 2916 void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * native_address, uint8_t size, uint8_t native_size) |
2910 { | 2917 { |
2911 uint32_t orig_address = address; | 2918 uint32_t orig_address = address; |
2912 native_map_slot *map; | 2919 |
2913 z80_options * opts = context->options; | 2920 z80_options * opts = context->options; |
2914 if (address < 0x4000) { | 2921 uint32_t meta_off; |
2915 address &= 0x1FFF; | 2922 memmap_chunk const *mem_chunk = find_map_chunk(address, &opts->gen, MMAP_WRITE | MMAP_CODE, &meta_off); |
2916 map = context->static_code_map; | 2923 if (mem_chunk) { |
2917 opts->gen.ram_inst_sizes[0][address] = native_size; | 2924 if ((mem_chunk->flags & (MMAP_WRITE | MMAP_CODE)) == (MMAP_WRITE | MMAP_CODE)) { |
2918 context->ram_code_flags[(address & 0x1C00) >> 10] |= 1 << ((address & 0x380) >> 7); | 2925 uint32_t masked = (address & mem_chunk->mask); |
2919 context->ram_code_flags[((address + size) & 0x1C00) >> 10] |= 1 << (((address + size) & 0x380) >> 7); | 2926 uint32_t final_off = masked + meta_off; |
2920 } else { | 2927 uint32_t ram_flags_off = final_off >> (opts->gen.ram_flags_shift + 3); |
2921 //HERE | 2928 context->ram_code_flags[ram_flags_off] |= 1 << ((final_off >> opts->gen.ram_flags_shift) & 7); |
2922 address -= 0x4000; | 2929 |
2923 map = context->banked_code_map; | 2930 uint32_t slot = final_off / 1024; |
2924 if (!map->offsets) { | 2931 if (!opts->gen.ram_inst_sizes[slot]) { |
2925 map->offsets = malloc(sizeof(int32_t) * 0xC000); | 2932 opts->gen.ram_inst_sizes[slot] = malloc(sizeof(uint8_t) * 1024); |
2926 memset(map->offsets, 0xFF, sizeof(int32_t) * 0xC000); | 2933 } |
2927 } | 2934 opts->gen.ram_inst_sizes[slot][final_off % 1024] = native_size; |
2928 } | 2935 |
2936 //TODO: Deal with case in which end of instruction is in a different memory chunk | |
2937 masked = (address + size - 1) & mem_chunk->mask; | |
2938 final_off = masked + meta_off; | |
2939 ram_flags_off = final_off >> (opts->gen.ram_flags_shift + 3); | |
2940 context->ram_code_flags[ram_flags_off] |= 1 << ((final_off >> opts->gen.ram_flags_shift) & 7); | |
2941 } | |
2942 //calculate the lowest alias for this address | |
2943 address = mem_chunk->start + ((address - mem_chunk->start) & mem_chunk->mask); | |
2944 } | |
2945 | |
2946 native_map_slot *map = opts->gen.native_code_map + address / NATIVE_CHUNK_SIZE; | |
2929 if (!map->base) { | 2947 if (!map->base) { |
2930 map->base = native_address; | 2948 map->base = native_address; |
2931 } | 2949 map->offsets = malloc(sizeof(int32_t) * NATIVE_CHUNK_SIZE); |
2932 map->offsets[address] = native_address - map->base; | 2950 memset(map->offsets, 0xFF, sizeof(int32_t) * NATIVE_CHUNK_SIZE); |
2933 for(--size, orig_address++; size; --size, orig_address++) { | 2951 } |
2934 address = orig_address; | 2952 map->offsets[address % NATIVE_CHUNK_SIZE] = native_address - map->base; |
2935 if (address < 0x4000) { | 2953 for(--size, address++; size; --size, orig_address++) { |
2936 address &= 0x1FFF; | 2954 address &= opts->gen.address_mask; |
2937 map = context->static_code_map; | 2955 map = opts->gen.native_code_map + address / NATIVE_CHUNK_SIZE; |
2938 } else { | 2956 if (!map->base) { |
2939 address -= 0x4000; | 2957 map->base = native_address; |
2940 map = context->banked_code_map; | 2958 map->offsets = malloc(sizeof(int32_t) * NATIVE_CHUNK_SIZE); |
2941 } | 2959 memset(map->offsets, 0xFF, sizeof(int32_t) * NATIVE_CHUNK_SIZE); |
2942 if (!map->offsets) { | 2960 } |
2943 map->offsets = malloc(sizeof(int32_t) * 0xC000); | 2961 |
2944 memset(map->offsets, 0xFF, sizeof(int32_t) * 0xC000); | 2962 if (map->offsets[address % NATIVE_CHUNK_SIZE] == INVALID_OFFSET) { |
2945 } | 2963 map->offsets[address % NATIVE_CHUNK_SIZE] = EXTENSION_WORD; |
2946 map->offsets[address] = EXTENSION_WORD; | 2964 } |
2947 } | 2965 } |
2948 } | 2966 } |
2949 | 2967 |
2950 #define INVALID_INSTRUCTION_START 0xFEEDFEED | 2968 #define INVALID_INSTRUCTION_START 0xFEEDFEED |
2951 | 2969 |
2952 uint32_t z80_get_instruction_start(native_map_slot * static_code_map, uint32_t address) | 2970 uint32_t z80_get_instruction_start(z80_context *context, uint32_t address) |
2953 { | 2971 { |
2954 //TODO: Fixme for address >= 0x4000 | 2972 z80_options *opts = context->options; |
2955 if (!static_code_map->base || address >= 0x4000) { | 2973 native_map_slot * native_code_map = opts->gen.native_code_map; |
2974 memmap_chunk const *mem_chunk = find_map_chunk(address, &opts->gen, 0, NULL); | |
2975 if (mem_chunk) { | |
2976 //calculate the lowest alias for this address | |
2977 address = mem_chunk->start + ((address - mem_chunk->start) & mem_chunk->mask); | |
2978 } | |
2979 | |
2980 uint32_t chunk = address / NATIVE_CHUNK_SIZE; | |
2981 if (!native_code_map[chunk].base) { | |
2956 return INVALID_INSTRUCTION_START; | 2982 return INVALID_INSTRUCTION_START; |
2957 } | 2983 } |
2958 address &= 0x1FFF; | 2984 uint32_t offset = address % NATIVE_CHUNK_SIZE; |
2959 if (static_code_map->offsets[address] == INVALID_OFFSET) { | 2985 if (native_code_map[chunk].offsets[offset] == INVALID_OFFSET) { |
2960 return INVALID_INSTRUCTION_START; | 2986 return INVALID_INSTRUCTION_START; |
2961 } | 2987 } |
2962 while (static_code_map->offsets[address] == EXTENSION_WORD) { | 2988 while (native_code_map[chunk].offsets[offset] == EXTENSION_WORD) |
2989 { | |
2963 --address; | 2990 --address; |
2964 address &= 0x1FFF; | 2991 chunk = address / NATIVE_CHUNK_SIZE; |
2992 offset = address % NATIVE_CHUNK_SIZE; | |
2965 } | 2993 } |
2966 return address; | 2994 return address; |
2967 } | 2995 } |
2968 | 2996 |
2969 z80_context * z80_handle_code_write(uint32_t address, z80_context * context) | 2997 z80_context * z80_handle_code_write(uint32_t address, z80_context * context) |
2970 { | 2998 { |
2971 uint32_t inst_start = z80_get_instruction_start(context->static_code_map, address); | 2999 uint32_t inst_start = z80_get_instruction_start(context, address); |
2972 if (inst_start != INVALID_INSTRUCTION_START) { | 3000 if (inst_start != INVALID_INSTRUCTION_START) { |
2973 code_ptr dst = z80_get_native_address(context, inst_start); | 3001 code_ptr dst = z80_get_native_address(context, inst_start); |
2974 code_info code = {dst, dst+32, 0}; | 3002 code_info code = {dst, dst+32, 0}; |
2975 z80_options * opts = context->options; | 3003 z80_options * opts = context->options; |
2976 dprintf("patching code at %p for Z80 instruction at %X due to write to %X\n", code.cur, inst_start, address); | 3004 dprintf("patching code at %p for Z80 instruction at %X due to write to %X\n", code.cur, inst_start, address); |
3176 | 3204 |
3177 options->gen.context_reg = RSI; | 3205 options->gen.context_reg = RSI; |
3178 options->gen.cycles = RBP; | 3206 options->gen.cycles = RBP; |
3179 options->gen.limit = -1; | 3207 options->gen.limit = -1; |
3180 | 3208 |
3181 options->gen.native_code_map = malloc(sizeof(native_map_slot)); | 3209 options->gen.native_code_map = malloc(sizeof(native_map_slot) * NATIVE_MAP_CHUNKS); |
3182 memset(options->gen.native_code_map, 0, sizeof(native_map_slot)); | 3210 memset(options->gen.native_code_map, 0, sizeof(native_map_slot) * NATIVE_MAP_CHUNKS); |
3183 options->gen.deferred = NULL; | 3211 options->gen.deferred = NULL; |
3184 options->gen.ram_inst_sizes = malloc(sizeof(uint8_t) * 0x2000 + sizeof(uint8_t *)); | 3212 uint32_t inst_size_size = sizeof(uint8_t *) * ram_size(&options->gen) / 1024; |
3185 options->gen.ram_inst_sizes[0] = (uint8_t *)(options->gen.ram_inst_sizes + 1); | 3213 options->gen.ram_inst_sizes = malloc(inst_size_size); |
3186 memset(options->gen.ram_inst_sizes[0], 0, sizeof(uint8_t) * 0x2000); | 3214 memset(options->gen.ram_inst_sizes, 0, inst_size_size); |
3187 | 3215 |
3188 code_info *code = &options->gen.code; | 3216 code_info *code = &options->gen.code; |
3189 init_code_info(code); | 3217 init_code_info(code); |
3190 | 3218 |
3191 options->save_context_scratch = code->cur; | 3219 options->save_context_scratch = code->cur; |
3474 pop_r(code, options->gen.context_reg); | 3502 pop_r(code, options->gen.context_reg); |
3475 mov_rr(code, RAX, options->gen.scratch1, SZ_PTR); | 3503 mov_rr(code, RAX, options->gen.scratch1, SZ_PTR); |
3476 call(code, options->gen.load_context); | 3504 call(code, options->gen.load_context); |
3477 jmp_r(code, options->gen.scratch1); | 3505 jmp_r(code, options->gen.scratch1); |
3478 | 3506 |
3479 options->run = code->cur; | 3507 options->run = (z80_ctx_fun)code->cur; |
3480 tmp_stack_off = code->stack_off; | 3508 tmp_stack_off = code->stack_off; |
3481 save_callee_save_regs(code); | 3509 save_callee_save_regs(code); |
3482 #ifdef X86_64 | 3510 #ifdef X86_64 |
3483 mov_rr(code, RDI, options->gen.context_reg, SZ_PTR); | 3511 mov_rr(code, RDI, options->gen.context_reg, SZ_PTR); |
3484 #else | 3512 #else |
3494 *no_extra = code->cur - (no_extra + 1); | 3522 *no_extra = code->cur - (no_extra + 1); |
3495 jmp_rind(code, options->gen.context_reg); | 3523 jmp_rind(code, options->gen.context_reg); |
3496 code->stack_off = tmp_stack_off; | 3524 code->stack_off = tmp_stack_off; |
3497 } | 3525 } |
3498 | 3526 |
3499 void init_z80_context(z80_context * context, z80_options * options) | 3527 z80_context *init_z80_context(z80_options * options) |
3500 { | 3528 { |
3501 memset(context, 0, sizeof(*context)); | 3529 size_t ctx_size = sizeof(z80_context) + ram_size(&options->gen) / (1 << options->gen.ram_flags_shift) / 8; |
3502 context->static_code_map = malloc(sizeof(*context->static_code_map)); | 3530 z80_context *context = calloc(1, ctx_size); |
3503 context->static_code_map->base = NULL; | |
3504 context->static_code_map->offsets = malloc(sizeof(int32_t) * 0x2000); | |
3505 memset(context->static_code_map->offsets, 0xFF, sizeof(int32_t) * 0x2000); | |
3506 context->banked_code_map = malloc(sizeof(native_map_slot)); | |
3507 memset(context->banked_code_map, 0, sizeof(native_map_slot)); | |
3508 context->options = options; | 3531 context->options = options; |
3509 context->int_cycle = CYCLE_NEVER; | 3532 context->int_cycle = CYCLE_NEVER; |
3510 context->int_pulse_start = CYCLE_NEVER; | 3533 context->int_pulse_start = CYCLE_NEVER; |
3511 context->int_pulse_end = CYCLE_NEVER; | 3534 context->int_pulse_end = CYCLE_NEVER; |
3535 | |
3536 return context; | |
3512 } | 3537 } |
3513 | 3538 |
3514 void z80_run(z80_context * context, uint32_t target_cycle) | 3539 void z80_run(z80_context * context, uint32_t target_cycle) |
3515 { | 3540 { |
3516 if (context->reset || context->busack) { | 3541 if (context->reset || context->busack) { |