Mercurial > repos > blastem
comparison genesis.c @ 2280:9ead0fe69d9b
Implement savestate support for Sega CD
author | Michael Pavone <pavone@retrodev.com> |
---|---|
date | Sun, 08 Jan 2023 14:42:24 -0800 |
parents | 5a21bc0ec583 |
children | b9fed07f19e4 |
comparison
equal
deleted
inserted
replaced
2279:3b5fef896475 | 2280:9ead0fe69d9b |
---|---|
79 if (all) { | 79 if (all) { |
80 start_section(buf, SECTION_GEN_BUS_ARBITER); | 80 start_section(buf, SECTION_GEN_BUS_ARBITER); |
81 save_int8(buf, gen->z80->reset); | 81 save_int8(buf, gen->z80->reset); |
82 save_int8(buf, gen->z80->busreq); | 82 save_int8(buf, gen->z80->busreq); |
83 save_int16(buf, gen->z80_bank_reg); | 83 save_int16(buf, gen->z80_bank_reg); |
84 //I think the refresh logic may technically be part of the VDP, but whatever | |
85 save_int32(buf, gen->last_sync_cycle); | |
86 save_int32(buf, gen->refresh_counter); | |
84 end_section(buf); | 87 end_section(buf); |
85 | 88 |
86 start_section(buf, SECTION_SEGA_IO_1); | 89 start_section(buf, SECTION_SEGA_IO_1); |
87 io_serialize(gen->io.ports, buf); | 90 io_serialize(gen->io.ports, buf); |
88 end_section(buf); | 91 end_section(buf); |
115 end_section(buf); | 118 end_section(buf); |
116 } | 119 } |
117 | 120 |
118 cart_serialize(&gen->header, buf); | 121 cart_serialize(&gen->header, buf); |
119 } | 122 } |
123 if (gen->expansion) { | |
124 segacd_context *cd = gen->expansion; | |
125 segacd_serialize(cd, buf, all); | |
126 } | |
120 } | 127 } |
121 | 128 |
122 static uint8_t *serialize(system_header *sys, size_t *size_out) | 129 static uint8_t *serialize(system_header *sys, size_t *size_out) |
123 { | 130 { |
124 genesis_context *gen = (genesis_context *)sys; | 131 genesis_context *gen = (genesis_context *)sys; |
180 { | 187 { |
181 genesis_context *gen = vgen; | 188 genesis_context *gen = vgen; |
182 gen->z80->reset = load_int8(buf); | 189 gen->z80->reset = load_int8(buf); |
183 gen->z80->busreq = load_int8(buf); | 190 gen->z80->busreq = load_int8(buf); |
184 gen->z80_bank_reg = load_int16(buf) & 0x1FF; | 191 gen->z80_bank_reg = load_int16(buf) & 0x1FF; |
192 if ((buf->size - buf->cur_pos) >= 2 * sizeof(uint32_t)) { | |
193 gen->last_sync_cycle = load_int32(buf); | |
194 gen->refresh_counter = load_int32(buf); | |
195 } else { | |
196 //save state is from an older version of BlastEm that lacks these fields | |
197 //set them to reasonable values | |
198 gen->last_sync_cycle = gen->m68k->current_cycle; | |
199 gen->refresh_counter = 0; | |
200 } | |
185 } | 201 } |
186 | 202 |
187 static void tmss_deserialize(deserialize_buffer *buf, void *vgen) | 203 static void tmss_deserialize(deserialize_buffer *buf, void *vgen) |
188 { | 204 { |
189 genesis_context *gen = vgen; | 205 genesis_context *gen = vgen; |
192 } | 208 } |
193 | 209 |
194 static void adjust_int_cycle(m68k_context * context, vdp_context * v_context); | 210 static void adjust_int_cycle(m68k_context * context, vdp_context * v_context); |
195 static void check_tmss_lock(genesis_context *gen); | 211 static void check_tmss_lock(genesis_context *gen); |
196 static void toggle_tmss_rom(genesis_context *gen); | 212 static void toggle_tmss_rom(genesis_context *gen); |
213 #include "m68k_internal.h" //needed for get_native_address_trans, should be eliminated once handling of PC is cleaned up | |
197 void genesis_deserialize(deserialize_buffer *buf, genesis_context *gen) | 214 void genesis_deserialize(deserialize_buffer *buf, genesis_context *gen) |
198 { | 215 { |
199 register_section_handler(buf, (section_handler){.fun = m68k_deserialize, .data = gen->m68k}, SECTION_68000); | 216 register_section_handler(buf, (section_handler){.fun = m68k_deserialize, .data = gen->m68k}, SECTION_68000); |
200 register_section_handler(buf, (section_handler){.fun = z80_deserialize, .data = gen->z80}, SECTION_Z80); | 217 register_section_handler(buf, (section_handler){.fun = z80_deserialize, .data = gen->z80}, SECTION_Z80); |
201 register_section_handler(buf, (section_handler){.fun = vdp_deserialize, .data = gen->vdp}, SECTION_VDP); | 218 register_section_handler(buf, (section_handler){.fun = vdp_deserialize, .data = gen->vdp}, SECTION_VDP); |
207 register_section_handler(buf, (section_handler){.fun = io_deserialize, .data = gen->io.ports + 2}, SECTION_SEGA_IO_EXT); | 224 register_section_handler(buf, (section_handler){.fun = io_deserialize, .data = gen->io.ports + 2}, SECTION_SEGA_IO_EXT); |
208 register_section_handler(buf, (section_handler){.fun = ram_deserialize, .data = gen}, SECTION_MAIN_RAM); | 225 register_section_handler(buf, (section_handler){.fun = ram_deserialize, .data = gen}, SECTION_MAIN_RAM); |
209 register_section_handler(buf, (section_handler){.fun = zram_deserialize, .data = gen}, SECTION_SOUND_RAM); | 226 register_section_handler(buf, (section_handler){.fun = zram_deserialize, .data = gen}, SECTION_SOUND_RAM); |
210 register_section_handler(buf, (section_handler){.fun = cart_deserialize, .data = gen}, SECTION_MAPPER); | 227 register_section_handler(buf, (section_handler){.fun = cart_deserialize, .data = gen}, SECTION_MAPPER); |
211 register_section_handler(buf, (section_handler){.fun = tmss_deserialize, .data = gen}, SECTION_TMSS); | 228 register_section_handler(buf, (section_handler){.fun = tmss_deserialize, .data = gen}, SECTION_TMSS); |
229 if (gen->expansion) { | |
230 segacd_context *cd = gen->expansion; | |
231 segacd_register_section_handlers(cd, buf); | |
232 } | |
212 uint8_t tmss_old = gen->tmss; | 233 uint8_t tmss_old = gen->tmss; |
213 gen->tmss = 0xFF; | 234 gen->tmss = 0xFF; |
214 while (buf->cur_pos < buf->size) | 235 while (buf->cur_pos < buf->size) |
215 { | 236 { |
216 load_section(buf); | 237 load_section(buf); |
228 } | 249 } |
229 check_tmss_lock(gen); | 250 check_tmss_lock(gen); |
230 } | 251 } |
231 update_z80_bank_pointer(gen); | 252 update_z80_bank_pointer(gen); |
232 adjust_int_cycle(gen->m68k, gen->vdp); | 253 adjust_int_cycle(gen->m68k, gen->vdp); |
254 //HACK: Fix this once PC/IR is represented in a better way in 68K core | |
255 //Would be better for this hack to live in side the 68K core itself, but it's better to do it | |
256 //after RAM has been loaded to avoid any unnecessary retranslation | |
257 gen->m68k->resume_pc = get_native_address_trans(gen->m68k, gen->m68k->last_prefetch_address); | |
258 if (gen->expansion) { | |
259 segacd_context *cd = gen->expansion; | |
260 cd->m68k->resume_pc = get_native_address_trans(cd->m68k, cd->m68k->last_prefetch_address); | |
261 } | |
233 free(buf->handlers); | 262 free(buf->handlers); |
234 buf->handlers = NULL; | 263 buf->handlers = NULL; |
235 } | 264 } |
236 | 265 |
237 #include "m68k_internal.h" //needed for get_native_address_trans, should be eliminated once handling of PC is cleaned up | |
238 static void deserialize(system_header *sys, uint8_t *data, size_t size) | 266 static void deserialize(system_header *sys, uint8_t *data, size_t size) |
239 { | 267 { |
240 genesis_context *gen = (genesis_context *)sys; | 268 genesis_context *gen = (genesis_context *)sys; |
241 deserialize_buffer buffer; | 269 deserialize_buffer buffer; |
242 init_deserialize(&buffer, data, size); | 270 init_deserialize(&buffer, data, size); |
243 genesis_deserialize(&buffer, gen); | 271 genesis_deserialize(&buffer, gen); |
244 //HACK: Fix this once PC/IR is represented in a better way in 68K core | |
245 gen->m68k->resume_pc = get_native_address_trans(gen->m68k, gen->m68k->last_prefetch_address); | |
246 } | 272 } |
247 | 273 |
248 uint16_t read_dma_value(uint32_t address) | 274 uint16_t read_dma_value(uint32_t address) |
249 { | 275 { |
250 genesis_context *genesis = (genesis_context *)current_system; | 276 genesis_context *genesis = (genesis_context *)current_system; |
422 } | 448 } |
423 | 449 |
424 //printf("Target: %d, YM bufferpos: %d, PSG bufferpos: %d\n", target, gen->ym->buffer_pos, gen->psg->buffer_pos * 2); | 450 //printf("Target: %d, YM bufferpos: %d, PSG bufferpos: %d\n", target, gen->ym->buffer_pos, gen->psg->buffer_pos * 2); |
425 } | 451 } |
426 | 452 |
427 //My refresh emulation isn't currently good enough and causes more problems than it solves | |
428 #define REFRESH_EMULATION | |
429 #ifdef REFRESH_EMULATION | |
430 #define REFRESH_INTERVAL 128 | 453 #define REFRESH_INTERVAL 128 |
431 #define REFRESH_DELAY 2 | 454 #define REFRESH_DELAY 2 |
432 uint32_t last_sync_cycle; | |
433 uint32_t refresh_counter; | |
434 #endif | |
435 | 455 |
436 #include <limits.h> | 456 #include <limits.h> |
437 #define ADJUST_BUFFER (8*MCLKS_LINE*313) | 457 #define ADJUST_BUFFER (8*MCLKS_LINE*313) |
438 #define MAX_NO_ADJUST (UINT_MAX-ADJUST_BUFFER) | 458 #define MAX_NO_ADJUST (UINT_MAX-ADJUST_BUFFER) |
439 | 459 |
440 static m68k_context *sync_components(m68k_context * context, uint32_t address) | 460 static m68k_context *sync_components(m68k_context * context, uint32_t address) |
441 { | 461 { |
442 genesis_context * gen = context->system; | 462 genesis_context * gen = context->system; |
443 vdp_context * v_context = gen->vdp; | 463 vdp_context * v_context = gen->vdp; |
444 z80_context * z_context = gen->z80; | 464 z80_context * z_context = gen->z80; |
445 #ifdef REFRESH_EMULATION | |
446 //lame estimation of refresh cycle delay | 465 //lame estimation of refresh cycle delay |
447 refresh_counter += context->current_cycle - last_sync_cycle; | 466 gen->refresh_counter += context->current_cycle - gen->last_sync_cycle; |
448 if (!gen->bus_busy) { | 467 if (!gen->bus_busy) { |
449 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); | 468 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); |
450 } | 469 } |
451 refresh_counter = refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); | 470 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); |
452 #endif | |
453 | 471 |
454 uint32_t mclks = context->current_cycle; | 472 uint32_t mclks = context->current_cycle; |
455 sync_z80(gen, mclks); | 473 sync_z80(gen, mclks); |
456 sync_sound(gen, mclks); | 474 sync_sound(gen, mclks); |
457 vdp_run_context(v_context, mclks); | 475 vdp_run_context(v_context, mclks); |
585 free(save_path); | 603 free(save_path); |
586 } else if(gen->header.save_state) { | 604 } else if(gen->header.save_state) { |
587 context->sync_cycle = context->current_cycle + 1; | 605 context->sync_cycle = context->current_cycle + 1; |
588 } | 606 } |
589 } | 607 } |
590 #ifdef REFRESH_EMULATION | 608 gen->last_sync_cycle = context->current_cycle; |
591 last_sync_cycle = context->current_cycle; | |
592 #endif | |
593 return context; | 609 return context; |
594 } | 610 } |
595 | 611 |
596 static m68k_context * vdp_port_write(uint32_t vdp_port, m68k_context * context, uint16_t value) | 612 static m68k_context * vdp_port_write(uint32_t vdp_port, m68k_context * context, uint16_t value) |
597 { | 613 { |
602 if (!gen->vdp_unlocked) { | 618 if (!gen->vdp_unlocked) { |
603 fatal_error("machine freeze due to VDP write to %X without TMSS unlock\n", 0xC00000 | vdp_port); | 619 fatal_error("machine freeze due to VDP write to %X without TMSS unlock\n", 0xC00000 | vdp_port); |
604 } | 620 } |
605 vdp_port &= 0x1F; | 621 vdp_port &= 0x1F; |
606 //printf("vdp_port write: %X, value: %X, cycle: %d\n", vdp_port, value, context->current_cycle); | 622 //printf("vdp_port write: %X, value: %X, cycle: %d\n", vdp_port, value, context->current_cycle); |
607 #ifdef REFRESH_EMULATION | 623 |
608 //do refresh check here so we can avoid adding a penalty for a refresh that happens during a VDP access | 624 //do refresh check here so we can avoid adding a penalty for a refresh that happens during a VDP access |
609 refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - last_sync_cycle; | 625 gen->refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - gen->last_sync_cycle; |
610 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); | 626 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); |
611 refresh_counter = refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); | 627 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); |
612 last_sync_cycle = context->current_cycle; | 628 gen->last_sync_cycle = context->current_cycle; |
613 #endif | 629 |
614 sync_components(context, 0); | 630 sync_components(context, 0); |
615 vdp_context *v_context = gen->vdp; | 631 vdp_context *v_context = gen->vdp; |
616 uint32_t before_cycle = v_context->cycles; | 632 uint32_t before_cycle = v_context->cycles; |
617 if (vdp_port < 0x10) { | 633 if (vdp_port < 0x10) { |
618 int blocked; | 634 int blocked; |
685 } else if (vdp_port < 0x18) { | 701 } else if (vdp_port < 0x18) { |
686 psg_write(gen->psg, value); | 702 psg_write(gen->psg, value); |
687 } else { | 703 } else { |
688 vdp_test_port_write(gen->vdp, value); | 704 vdp_test_port_write(gen->vdp, value); |
689 } | 705 } |
690 #ifdef REFRESH_EMULATION | 706 |
691 last_sync_cycle -= 4 * MCLKS_PER_68K; | 707 gen->last_sync_cycle -= 4 * MCLKS_PER_68K; |
692 //refresh may have happened while we were waiting on the VDP, | 708 //refresh may have happened while we were waiting on the VDP, |
693 //so advance refresh_counter but don't add any delays | 709 //so advance refresh_counter but don't add any delays |
694 if (vdp_port >= 4 && vdp_port < 8 && v_context->cycles != before_cycle) { | 710 if (vdp_port >= 4 && vdp_port < 8 && v_context->cycles != before_cycle) { |
695 refresh_counter = 0; | 711 gen->refresh_counter = 0; |
696 } else { | 712 } else { |
697 refresh_counter += (context->current_cycle - last_sync_cycle); | 713 gen->refresh_counter += (context->current_cycle - gen->last_sync_cycle); |
698 refresh_counter = refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); | 714 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); |
699 } | 715 } |
700 last_sync_cycle = context->current_cycle; | 716 gen->last_sync_cycle = context->current_cycle; |
701 #endif | |
702 return context; | 717 return context; |
703 } | 718 } |
704 | 719 |
705 static m68k_context * vdp_port_write_b(uint32_t vdp_port, m68k_context * context, uint8_t value) | 720 static m68k_context * vdp_port_write_b(uint32_t vdp_port, m68k_context * context, uint8_t value) |
706 { | 721 { |
744 if (!gen->vdp_unlocked) { | 759 if (!gen->vdp_unlocked) { |
745 fatal_error("machine freeze due to VDP read from %X without TMSS unlock\n", 0xC00000 | vdp_port); | 760 fatal_error("machine freeze due to VDP read from %X without TMSS unlock\n", 0xC00000 | vdp_port); |
746 } | 761 } |
747 vdp_port &= 0x1F; | 762 vdp_port &= 0x1F; |
748 uint16_t value; | 763 uint16_t value; |
749 #ifdef REFRESH_EMULATION | 764 |
750 //do refresh check here so we can avoid adding a penalty for a refresh that happens during a VDP access | 765 //do refresh check here so we can avoid adding a penalty for a refresh that happens during a VDP access |
751 refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - last_sync_cycle; | 766 gen->refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - gen->last_sync_cycle; |
752 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); | 767 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); |
753 refresh_counter = refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); | 768 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); |
754 last_sync_cycle = context->current_cycle; | 769 gen->last_sync_cycle = context->current_cycle; |
755 #endif | 770 |
756 sync_components(context, 0); | 771 sync_components(context, 0); |
757 vdp_context * v_context = gen->vdp; | 772 vdp_context * v_context = gen->vdp; |
758 uint32_t before_cycle = context->current_cycle; | 773 uint32_t before_cycle = context->current_cycle; |
759 if (vdp_port < 0x10) { | 774 if (vdp_port < 0x10) { |
760 if (vdp_port < 4) { | 775 if (vdp_port < 4) { |
776 genesis_context *gen = context->system; | 791 genesis_context *gen = context->system; |
777 gen->bus_busy = 1; | 792 gen->bus_busy = 1; |
778 sync_z80(gen, context->current_cycle); | 793 sync_z80(gen, context->current_cycle); |
779 gen->bus_busy = 0; | 794 gen->bus_busy = 0; |
780 } | 795 } |
781 #ifdef REFRESH_EMULATION | 796 |
782 last_sync_cycle -= 4 * MCLKS_PER_68K; | 797 gen->last_sync_cycle -= 4 * MCLKS_PER_68K; |
783 //refresh may have happened while we were waiting on the VDP, | 798 //refresh may have happened while we were waiting on the VDP, |
784 //so advance refresh_counter but don't add any delays | 799 //so advance refresh_counter but don't add any delays |
785 refresh_counter += (context->current_cycle - last_sync_cycle); | 800 gen->refresh_counter += (context->current_cycle - gen->last_sync_cycle); |
786 refresh_counter = refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); | 801 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); |
787 last_sync_cycle = context->current_cycle; | 802 gen->last_sync_cycle = context->current_cycle; |
788 #endif | |
789 return value; | 803 return value; |
790 } | 804 } |
791 | 805 |
792 static uint8_t vdp_port_read_b(uint32_t vdp_port, m68k_context * context) | 806 static uint8_t vdp_port_read_b(uint32_t vdp_port, m68k_context * context) |
793 { | 807 { |
842 static uint32_t zram_counter = 0; | 856 static uint32_t zram_counter = 0; |
843 | 857 |
844 static m68k_context * io_write(uint32_t location, m68k_context * context, uint8_t value) | 858 static m68k_context * io_write(uint32_t location, m68k_context * context, uint8_t value) |
845 { | 859 { |
846 genesis_context * gen = context->system; | 860 genesis_context * gen = context->system; |
847 #ifdef REFRESH_EMULATION | 861 |
848 //do refresh check here so we can avoid adding a penalty for a refresh that happens during an IO area access | 862 //do refresh check here so we can avoid adding a penalty for a refresh that happens during an IO area access |
849 refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - last_sync_cycle; | 863 gen->refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - gen->last_sync_cycle; |
850 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); | 864 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); |
851 refresh_counter = refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); | 865 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); |
852 last_sync_cycle = context->current_cycle - 4*MCLKS_PER_68K; | 866 gen->last_sync_cycle = context->current_cycle - 4*MCLKS_PER_68K; |
853 #endif | 867 |
854 if (location < 0x10000) { | 868 if (location < 0x10000) { |
855 //Access to Z80 memory incurs a one 68K cycle wait state | 869 //Access to Z80 memory incurs a one 68K cycle wait state |
856 context->current_cycle += MCLKS_PER_68K; | 870 context->current_cycle += MCLKS_PER_68K; |
857 if (!z80_enabled || z80_get_busack(gen->z80, context->current_cycle)) { | 871 if (!z80_enabled || z80_get_busack(gen->z80, context->current_cycle)) { |
858 location &= 0x7FFF; | 872 location &= 0x7FFF; |
974 } else if (masked != 0x11300 && masked != 0x11000) { | 988 } else if (masked != 0x11300 && masked != 0x11000) { |
975 fatal_error("Machine freeze due to unmapped write to address %X\n", location | 0xA00000); | 989 fatal_error("Machine freeze due to unmapped write to address %X\n", location | 0xA00000); |
976 } | 990 } |
977 } | 991 } |
978 } | 992 } |
979 #ifdef REFRESH_EMULATION | 993 |
980 //no refresh delays during IO access | 994 //no refresh delays during IO access |
981 refresh_counter += context->current_cycle - last_sync_cycle; | 995 gen->refresh_counter += context->current_cycle - gen->last_sync_cycle; |
982 refresh_counter = refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); | 996 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); |
983 #endif | |
984 return context; | 997 return context; |
985 } | 998 } |
986 | 999 |
987 static m68k_context * io_write_w(uint32_t location, m68k_context * context, uint16_t value) | 1000 static m68k_context * io_write_w(uint32_t location, m68k_context * context, uint16_t value) |
988 { | 1001 { |
1002 | 1015 |
1003 static uint8_t io_read(uint32_t location, m68k_context * context) | 1016 static uint8_t io_read(uint32_t location, m68k_context * context) |
1004 { | 1017 { |
1005 uint8_t value; | 1018 uint8_t value; |
1006 genesis_context *gen = context->system; | 1019 genesis_context *gen = context->system; |
1007 #ifdef REFRESH_EMULATION | 1020 |
1008 //do refresh check here so we can avoid adding a penalty for a refresh that happens during an IO area access | 1021 //do refresh check here so we can avoid adding a penalty for a refresh that happens during an IO area access |
1009 refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - last_sync_cycle; | 1022 gen->refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - gen->last_sync_cycle; |
1010 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); | 1023 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); |
1011 refresh_counter = refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); | 1024 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); |
1012 last_sync_cycle = context->current_cycle - 4*MCLKS_PER_68K; | 1025 gen->last_sync_cycle = context->current_cycle - 4*MCLKS_PER_68K; |
1013 #endif | 1026 |
1014 if (location < 0x10000) { | 1027 if (location < 0x10000) { |
1015 //Access to Z80 memory incurs a one 68K cycle wait state | 1028 //Access to Z80 memory incurs a one 68K cycle wait state |
1016 context->current_cycle += MCLKS_PER_68K; | 1029 context->current_cycle += MCLKS_PER_68K; |
1017 if (!z80_enabled || z80_get_busack(gen->z80, context->current_cycle)) { | 1030 if (!z80_enabled || z80_get_busack(gen->z80, context->current_cycle)) { |
1018 location &= 0x7FFF; | 1031 location &= 0x7FFF; |
1104 fatal_error("Machine freeze due to read of unmapped IO location %X\n", location); | 1117 fatal_error("Machine freeze due to read of unmapped IO location %X\n", location); |
1105 value = 0xFF; | 1118 value = 0xFF; |
1106 } | 1119 } |
1107 } | 1120 } |
1108 } | 1121 } |
1109 #ifdef REFRESH_EMULATION | 1122 |
1110 //no refresh delays during IO access | 1123 //no refresh delays during IO access |
1111 refresh_counter += context->current_cycle - last_sync_cycle; | 1124 gen->refresh_counter += context->current_cycle - gen->last_sync_cycle; |
1112 refresh_counter = refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); | 1125 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); |
1113 last_sync_cycle = context->current_cycle; | 1126 gen->last_sync_cycle = context->current_cycle; |
1114 #endif | |
1115 return value; | 1127 return value; |
1116 } | 1128 } |
1117 | 1129 |
1118 static uint16_t io_read_w(uint32_t location, m68k_context * context) | 1130 static uint16_t io_read_w(uint32_t location, m68k_context * context) |
1119 { | 1131 { |
1222 | 1234 |
1223 static uint16_t unused_read(uint32_t location, void *vcontext) | 1235 static uint16_t unused_read(uint32_t location, void *vcontext) |
1224 { | 1236 { |
1225 m68k_context *context = vcontext; | 1237 m68k_context *context = vcontext; |
1226 genesis_context *gen = context->system; | 1238 genesis_context *gen = context->system; |
1227 #ifdef REFRESH_EMULATION | |
1228 if (location >= 0x800000) { | 1239 if (location >= 0x800000) { |
1229 //do refresh check here so we can avoid adding a penalty for a refresh that happens during an IO area access | 1240 //do refresh check here so we can avoid adding a penalty for a refresh that happens during an IO area access |
1230 refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - last_sync_cycle; | 1241 gen->refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - gen->last_sync_cycle; |
1231 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); | 1242 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); |
1232 refresh_counter += 4*MCLKS_PER_68K; | 1243 gen->refresh_counter += 4*MCLKS_PER_68K; |
1233 refresh_counter = refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); | 1244 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); |
1234 last_sync_cycle = context->current_cycle; | 1245 gen->last_sync_cycle = context->current_cycle; |
1235 } | 1246 } |
1236 #endif | |
1237 | 1247 |
1238 if (location < 0x800000 || (location >= 0xA13000 && location < 0xA13100) || (location >= 0xA12000 && location < 0xA12100)) { | 1248 if (location < 0x800000 || (location >= 0xA13000 && location < 0xA13100) || (location >= 0xA12000 && location < 0xA12100)) { |
1239 //Only called if the cart/exp doesn't have a more specific handler for this region | 1249 //Only called if the cart/exp doesn't have a more specific handler for this region |
1240 return get_open_bus_value(&gen->header); | 1250 return get_open_bus_value(&gen->header); |
1241 } else if (location == 0xA14000 || location == 0xA14002) { | 1251 } else if (location == 0xA14000 || location == 0xA14002) { |
1287 | 1297 |
1288 static void *unused_write(uint32_t location, void *vcontext, uint16_t value) | 1298 static void *unused_write(uint32_t location, void *vcontext, uint16_t value) |
1289 { | 1299 { |
1290 m68k_context *context = vcontext; | 1300 m68k_context *context = vcontext; |
1291 genesis_context *gen = context->system; | 1301 genesis_context *gen = context->system; |
1292 #ifdef REFRESH_EMULATION | |
1293 if (location >= 0x800000) { | 1302 if (location >= 0x800000) { |
1294 //do refresh check here so we can avoid adding a penalty for a refresh that happens during an IO area access | 1303 //do refresh check here so we can avoid adding a penalty for a refresh that happens during an IO area access |
1295 refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - last_sync_cycle; | 1304 gen->refresh_counter += context->current_cycle - 4*MCLKS_PER_68K - gen->last_sync_cycle; |
1296 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); | 1305 context->current_cycle += REFRESH_DELAY * MCLKS_PER_68K * (gen->refresh_counter / (MCLKS_PER_68K * REFRESH_INTERVAL)); |
1297 refresh_counter += 4*MCLKS_PER_68K; | 1306 gen->refresh_counter += 4*MCLKS_PER_68K; |
1298 refresh_counter = refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); | 1307 gen->refresh_counter = gen->refresh_counter % (MCLKS_PER_68K * REFRESH_INTERVAL); |
1299 last_sync_cycle = context->current_cycle; | 1308 gen->last_sync_cycle = context->current_cycle; |
1300 } | 1309 } |
1301 #endif | 1310 |
1302 uint8_t has_tmss = gen->version_reg & 0xF; | 1311 uint8_t has_tmss = gen->version_reg & 0xF; |
1303 if (has_tmss && (location == 0xA14000 || location == 0xA14002)) { | 1312 if (has_tmss && (location == 0xA14000 || location == 0xA14002)) { |
1304 gen->tmss_lock[location >> 1 & 1] = value; | 1313 gen->tmss_lock[location >> 1 & 1] = value; |
1305 check_tmss_lock(gen); | 1314 check_tmss_lock(gen); |
1306 } else if (has_tmss && location == 0xA14100) { | 1315 } else if (has_tmss && location == 0xA14100) { |
1410 goto done; | 1419 goto done; |
1411 } | 1420 } |
1412 if (load_from_file(&state, statepath)) { | 1421 if (load_from_file(&state, statepath)) { |
1413 genesis_deserialize(&state, gen); | 1422 genesis_deserialize(&state, gen); |
1414 free(state.data); | 1423 free(state.data); |
1415 //HACK | |
1416 pc = gen->m68k->last_prefetch_address; | |
1417 ret = 1; | 1424 ret = 1; |
1418 } else { | 1425 } else { |
1419 strcpy(statepath + strlen(statepath)-strlen("state"), "gst"); | 1426 strcpy(statepath + strlen(statepath)-strlen("state"), "gst"); |
1420 pc = load_gst(gen, statepath); | 1427 pc = load_gst(gen, statepath); |
1421 ret = pc != 0; | 1428 ret = pc != 0; |
1422 } | 1429 } |
1423 if (ret) { | 1430 if (ret) { |
1424 gen->m68k->resume_pc = get_native_address_trans(gen->m68k, pc); | 1431 debug_message("Loaded state from %s\n", statepath); |
1425 } | 1432 } |
1426 done: | 1433 done: |
1427 free(statepath); | 1434 free(statepath); |
1428 return ret; | 1435 return ret; |
1429 } | 1436 } |