comparison segacd.c @ 2350:f8b5142c06aa

Allow 68K to return mid-instruction. Adjust how 68K interrupt ack works so int2 busy flag timing is more correct. Fix some other SCD timing issues
author Michael Pavone <pavone@retrodev.com>
date Mon, 16 Oct 2023 23:30:04 -0700
parents ae073c2167e2
children 03e6ac327ba0
comparison
equal deleted inserted replaced
2349:f0fc6c09517d 2350:f8b5142c06aa
9 #include "blastem.h" 9 #include "blastem.h"
10 #include "cdimage.h" 10 #include "cdimage.h"
11 11
12 #define SCD_MCLKS 50000000 12 #define SCD_MCLKS 50000000
13 #define SCD_PERIPH_RESET_CLKS (SCD_MCLKS / 10) 13 #define SCD_PERIPH_RESET_CLKS (SCD_MCLKS / 10)
14 #define TIMER_TICK_CLKS 1536 14 #define TIMER_TICK_CLKS 1536/*1792*/
15
16 //TODO: do some logic analyzer captuers to get actual values
17 #define REFRESH_INTERVAL 259
18 #define REFRESH_DELAY 2
15 19
16 enum { 20 enum {
17 GA_SUB_CPU_CTRL, 21 GA_SUB_CPU_CTRL,
18 GA_MEM_MODE, 22 GA_MEM_MODE,
19 GA_CDC_CTRL, 23 GA_CDC_CTRL,
630 context->target_cycle = context->current_cycle; 634 context->target_cycle = context->current_cycle;
631 return; 635 return;
632 } 636 }
633 context->target_cycle = context->sync_cycle < context->int_cycle ? context->sync_cycle : context->int_cycle; 637 context->target_cycle = context->sync_cycle < context->int_cycle ? context->sync_cycle : context->int_cycle;
634 if (context->int_cycle == cdc_cycle && context->int_num == 5) { 638 if (context->int_cycle == cdc_cycle && context->int_num == 5) {
635 uint32_t before = context->target_cycle - 2 * cd->cdc.clock_step; 639 uint32_t before = cdc_cycle - cd->m68k->options->gen.clock_divider * 158; //divs worst case
636 if (before < context->target_cycle) { 640 if (before < context->target_cycle) {
637 if (before > context->current_cycle) { 641 while (before <= context->current_cycle) {
642 before += cd->cdc.clock_step;
643 }
644 if (before < context->target_cycle) {
638 context->target_cycle = context->sync_cycle = before; 645 context->target_cycle = context->sync_cycle = before;
639 } else {
640 before = context->target_cycle - cd->cdc.clock_step;
641 if (before > context->current_cycle) {
642 context->target_cycle = context->sync_cycle = before;
643 }
644 } 646 }
645 } 647 }
646 } 648 }
647 } 649 }
648 650
649 static uint16_t sub_gate_read16(uint32_t address, void *vcontext) 651 static uint16_t sub_gate_read16(uint32_t address, void *vcontext)
650 { 652 {
651 m68k_context *m68k = vcontext; 653 m68k_context *m68k = vcontext;
652 segacd_context *cd = m68k->system; 654 segacd_context *cd = m68k->system;
655 uint32_t before_cycle = m68k->current_cycle - m68k->options->gen.clock_divider * 4;
656 if (before_cycle >= cd->last_refresh_cycle) {
657 uint32_t num_refresh = (before_cycle - cd->last_refresh_cycle) / REFRESH_INTERVAL;
658 uint32_t num_full = (m68k->current_cycle - cd->last_refresh_cycle) / REFRESH_INTERVAL;
659 cd->last_refresh_cycle = cd->last_refresh_cycle + num_full * REFRESH_INTERVAL;
660 m68k->current_cycle += num_refresh * REFRESH_DELAY;
661 }
662
663
653 uint32_t reg = address >> 1; 664 uint32_t reg = address >> 1;
654 switch (reg) 665 switch (reg)
655 { 666 {
656 case GA_SUB_CPU_CTRL: { 667 case GA_SUB_CPU_CTRL: {
657 uint16_t value = cd->gate_array[reg] & 0xFFFE; 668 uint16_t value = cd->gate_array[reg] & 0xFFFE;
672 cdd_run(cd, m68k->current_cycle); 683 cdd_run(cd, m68k->current_cycle);
673 uint16_t dst = cd->gate_array[GA_CDC_CTRL] >> 8 & 0x7; 684 uint16_t dst = cd->gate_array[GA_CDC_CTRL] >> 8 & 0x7;
674 if (dst == DST_SUB_CPU) { 685 if (dst == DST_SUB_CPU) {
675 if (cd->gate_array[GA_CDC_CTRL] & BIT_DSR) { 686 if (cd->gate_array[GA_CDC_CTRL] & BIT_DSR) {
676 cd->gate_array[GA_CDC_CTRL] &= ~BIT_DSR; 687 cd->gate_array[GA_CDC_CTRL] &= ~BIT_DSR;
677 lc8951_resume_transfer(&cd->cdc, cd->cdc.cycle); 688 lc8951_resume_transfer(&cd->cdc);
678 } 689 }
679 calculate_target_cycle(cd->m68k); 690 calculate_target_cycle(cd->m68k);
680 691
681 } 692 }
682 return cd->gate_array[reg]; 693 return cd->gate_array[reg];
736 747
737 static void *sub_gate_write16(uint32_t address, void *vcontext, uint16_t value) 748 static void *sub_gate_write16(uint32_t address, void *vcontext, uint16_t value)
738 { 749 {
739 m68k_context *m68k = vcontext; 750 m68k_context *m68k = vcontext;
740 segacd_context *cd = m68k->system; 751 segacd_context *cd = m68k->system;
752 uint32_t before_cycle = m68k->current_cycle - m68k->options->gen.clock_divider * 4;
753 if (before_cycle >= cd->last_refresh_cycle) {
754 uint32_t num_refresh = (before_cycle - cd->last_refresh_cycle) / REFRESH_INTERVAL;
755 uint32_t num_full = (m68k->current_cycle - cd->last_refresh_cycle) / REFRESH_INTERVAL;
756 cd->last_refresh_cycle = cd->last_refresh_cycle + num_full * REFRESH_INTERVAL;
757 m68k->current_cycle += num_refresh * REFRESH_DELAY;
758 }
759
741 uint32_t reg = address >> 1; 760 uint32_t reg = address >> 1;
742 switch (reg) 761 switch (reg)
743 { 762 {
744 case GA_SUB_CPU_CTRL: 763 case GA_SUB_CPU_CTRL:
745 cd->gate_array[reg] &= 0xF0; 764 cd->gate_array[reg] &= 0xF0;
829 lc8951_set_dma_multiple(&cd->cdc, 21); 848 lc8951_set_dma_multiple(&cd->cdc, 21);
830 } else { 849 } else {
831 lc8951_set_dma_multiple(&cd->cdc, 6); 850 lc8951_set_dma_multiple(&cd->cdc, 6);
832 } 851 }
833 if ((old_dest < DST_MAIN_CPU || old_dest == 6) && dest >= DST_MAIN_CPU && dest != 6) { 852 if ((old_dest < DST_MAIN_CPU || old_dest == 6) && dest >= DST_MAIN_CPU && dest != 6) {
834 lc8951_resume_transfer(&cd->cdc, m68k->current_cycle); 853 lc8951_resume_transfer(&cd->cdc);
835 } 854 }
836 calculate_target_cycle(m68k); 855 calculate_target_cycle(m68k);
837 } 856 }
838 cd->gate_array[GA_CDC_DMA_ADDR] = 0; 857 cd->gate_array[GA_CDC_DMA_ADDR] = 0;
839 cd->cdc_dst_low = 0; 858 cd->cdc_dst_low = 0;
876 cd->gate_array[reg] = value; 895 cd->gate_array[reg] = value;
877 break; 896 break;
878 case GA_TIMER: 897 case GA_TIMER:
879 timers_run(cd, m68k->current_cycle); 898 timers_run(cd, m68k->current_cycle);
880 cd->gate_array[reg] = value & 0xFF; 899 cd->gate_array[reg] = value & 0xFF;
900 cd->timer_value = 0;
881 calculate_target_cycle(m68k); 901 calculate_target_cycle(m68k);
882 break; 902 break;
883 case GA_INT_MASK: 903 case GA_INT_MASK:
884 if (!(cd->gate_array[reg] & BIT_MASK_IEN6)) { 904 if (!(cd->gate_array[reg] & BIT_MASK_IEN6)) {
885 //subcode interrupts can't be made pending when they are disabled in this reg 905 //subcode interrupts can't be made pending when they are disabled in this reg
1117 cdd_run(cd, cycle); 1137 cdd_run(cd, cycle);
1118 cd_graphics_run(cd, cycle); 1138 cd_graphics_run(cd, cycle);
1119 rf5c164_run(&cd->pcm, cycle); 1139 rf5c164_run(&cd->pcm, cycle);
1120 } 1140 }
1121 1141
1122 //TODO: do some logic analyzer captuers to get actual values
1123 #define REFRESH_INTERVAL 256
1124 #define REFRESH_DELAY 2
1125
1126 static m68k_context *sync_components(m68k_context * context, uint32_t address) 1142 static m68k_context *sync_components(m68k_context * context, uint32_t address)
1127 { 1143 {
1128 segacd_context *cd = context->system; 1144 segacd_context *cd = context->system;
1129 1145
1130 uint32_t num_refresh = (context->current_cycle - cd->last_refresh_cycle) / REFRESH_INTERVAL; 1146 uint32_t num_refresh = (context->current_cycle - cd->last_refresh_cycle) / REFRESH_INTERVAL;
1144 } 1160 }
1145 #endif 1161 #endif
1146 } 1162 }
1147 cd->m68k_pc = address; 1163 cd->m68k_pc = address;
1148 } 1164 }
1149 switch (context->int_ack) 1165 calculate_target_cycle(context);
1166 return context;
1167 }
1168
1169 static m68k_context *int_ack(m68k_context *context)
1170 {
1171 segacd_context *cd = context->system;
1172 scd_peripherals_run(cd, context->current_cycle);
1173 switch (context->int_pending)
1150 { 1174 {
1151 case 1: 1175 case 1:
1152 cd->graphics_int_cycle = CYCLE_NEVER; 1176 cd->graphics_int_cycle = CYCLE_NEVER;
1153 break; 1177 break;
1154 case 2: 1178 case 2:
1165 break; 1189 break;
1166 case 6: 1190 case 6:
1167 cd->cdd.subcode_int_pending = 0; 1191 cd->cdd.subcode_int_pending = 0;
1168 break; 1192 break;
1169 } 1193 }
1170 context->int_ack = 0; 1194 //the Sega CD responds to these exclusively with !VPA which means its a slow
1171 calculate_target_cycle(context); 1195 //6800 operation. documentation says these can take between 10 and 19 cycles.
1196 //actual results measurements seem to suggest it's actually between 9 and 18
1197 //Base 68K core has added 4 cycles for a normal int ack cycle already
1198 //We add 5 + the current cycle count (in 68K cycles) mod 10 to simulate the
1199 //additional variable delay from the use of the 6800 cycle
1200 uint32_t cycle_count = context->current_cycle / context->options->gen.clock_divider;
1201 context->current_cycle += 5 + (cycle_count % 10);
1202
1172 return context; 1203 return context;
1173 } 1204 }
1174 1205
1175 void scd_run(segacd_context *cd, uint32_t cycle) 1206 void scd_run(segacd_context *cd, uint32_t cycle)
1176 { 1207 {
1177 uint8_t m68k_run = !can_main_access_prog(cd); 1208 uint8_t m68k_run = !can_main_access_prog(cd);
1178 while (cycle > cd->m68k->current_cycle) { 1209 while (cycle > cd->m68k->current_cycle) {
1179 if (m68k_run && !cd->sub_paused_wordram) { 1210 if (m68k_run && !cd->sub_paused_wordram) {
1180 uint32_t start = cd->m68k->current_cycle; 1211 uint32_t num_refresh = (cd->m68k->current_cycle - cd->last_refresh_cycle) / REFRESH_INTERVAL;
1212 cd->last_refresh_cycle = cd->last_refresh_cycle + num_refresh * REFRESH_INTERVAL;
1213 cd->m68k->current_cycle += num_refresh * REFRESH_DELAY;
1214
1181 1215
1182 cd->m68k->sync_cycle = cd->enter_debugger ? cd->m68k->current_cycle + 1 : cycle; 1216 cd->m68k->sync_cycle = cd->enter_debugger ? cd->m68k->current_cycle + 1 : cycle;
1183 if (cd->need_reset) { 1217 if (cd->need_reset) {
1184 cd->need_reset = 0; 1218 cd->need_reset = 0;
1185 m68k_reset(cd->m68k); 1219 m68k_reset(cd->m68k);
1235 } 1269 }
1236 1270
1237 static uint16_t main_gate_read16(uint32_t address, void *vcontext) 1271 static uint16_t main_gate_read16(uint32_t address, void *vcontext)
1238 { 1272 {
1239 m68k_context *m68k = vcontext; 1273 m68k_context *m68k = vcontext;
1274 gen_update_refresh_free_access(m68k);
1240 genesis_context *gen = m68k->system; 1275 genesis_context *gen = m68k->system;
1241 segacd_context *cd = gen->expansion; 1276 segacd_context *cd = gen->expansion;
1242 uint32_t scd_cycle = gen_cycle_to_scd(m68k->current_cycle, gen); 1277 uint32_t scd_cycle = gen_cycle_to_scd(m68k->current_cycle, gen);
1243 scd_run(cd, scd_cycle); 1278 scd_run(cd, scd_cycle);
1244 uint32_t offset = (address & 0x1FF) >> 1; 1279 uint32_t offset = (address & 0x1FF) >> 1;
1268 case GA_CDC_HOST_DATA: { 1303 case GA_CDC_HOST_DATA: {
1269 uint16_t dst = cd->gate_array[GA_CDC_CTRL] >> 8 & 0x7; 1304 uint16_t dst = cd->gate_array[GA_CDC_CTRL] >> 8 & 0x7;
1270 if (dst == DST_MAIN_CPU) { 1305 if (dst == DST_MAIN_CPU) {
1271 if (cd->gate_array[GA_CDC_CTRL] & BIT_DSR) { 1306 if (cd->gate_array[GA_CDC_CTRL] & BIT_DSR) {
1272 cd->gate_array[GA_CDC_CTRL] &= ~BIT_DSR; 1307 cd->gate_array[GA_CDC_CTRL] &= ~BIT_DSR;
1273 //Using the sub CPU's cycle count here is a bit of a hack 1308 lc8951_resume_transfer(&cd->cdc);
1274 //needed to ensure the interrupt does not get triggered prematurely
1275 //because the sub CPU execution granularity is too high
1276 lc8951_resume_transfer(&cd->cdc, cd->m68k->current_cycle);
1277 } else { 1309 } else {
1278 printf("Read of CDC host data with DSR clear at %u\n", scd_cycle); 1310 printf("Read of CDC host data with DSR clear at %u\n", scd_cycle);
1279 } 1311 }
1280 calculate_target_cycle(cd->m68k); 1312 calculate_target_cycle(cd->m68k);
1281 } 1313 }
1326 } 1358 }
1327 1359
1328 static void *main_gate_write16(uint32_t address, void *vcontext, uint16_t value) 1360 static void *main_gate_write16(uint32_t address, void *vcontext, uint16_t value)
1329 { 1361 {
1330 m68k_context *m68k = vcontext; 1362 m68k_context *m68k = vcontext;
1363 gen_update_refresh_free_access(m68k);
1331 genesis_context *gen = m68k->system; 1364 genesis_context *gen = m68k->system;
1332 segacd_context *cd = gen->expansion; 1365 segacd_context *cd = gen->expansion;
1333 uint32_t scd_cycle = gen_cycle_to_scd(m68k->current_cycle, gen); 1366 uint32_t scd_cycle = gen_cycle_to_scd(m68k->current_cycle, gen);
1334 scd_run(cd, scd_cycle);
1335 uint32_t reg = (address & 0x1FF) >> 1; 1367 uint32_t reg = (address & 0x1FF) >> 1;
1368 if (reg != GA_SUB_CPU_CTRL) {
1369 scd_run(cd, scd_cycle);
1370 }
1336 switch (reg) 1371 switch (reg)
1337 { 1372 {
1338 case GA_SUB_CPU_CTRL: { 1373 case GA_SUB_CPU_CTRL: {
1374 if ((value & BIT_IFL2) && (cd->gate_array[GA_INT_MASK] & BIT_MASK_IEN2)) {
1375 if (cd->int2_cycle != CYCLE_NEVER) {
1376 scd_run(cd, scd_cycle - 4 * cd->m68k->options->gen.clock_divider);
1377 while (cd->int2_cycle != CYCLE_NEVER && cd->m68k->current_cycle < scd_cycle) {
1378 scd_run(cd, cd->m68k->current_cycle + cd->m68k->options->gen.clock_divider);
1379 }
1380 }
1381 cd->int2_cycle = scd_cycle;
1382
1383 }
1384 scd_run(cd, scd_cycle);
1339 uint8_t old_access = can_main_access_prog(cd); 1385 uint8_t old_access = can_main_access_prog(cd);
1340 cd->busreq = value & BIT_SBRQ; 1386 cd->busreq = value & BIT_SBRQ;
1341 uint8_t old_reset = cd->reset; 1387 uint8_t old_reset = cd->reset;
1342 cd->reset = value & BIT_SRES; 1388 cd->reset = value & BIT_SRES;
1343 if (cd->reset && !old_reset) { 1389 if (cd->reset && !old_reset) {
1344 cd->need_reset = 1; 1390 cd->need_reset = 1;
1345 }
1346 if (value & BIT_IFL2) {
1347 cd->int2_cycle = scd_cycle;
1348 } 1391 }
1349 /*cd->gate_array[reg] &= 0x7FFF; 1392 /*cd->gate_array[reg] &= 0x7FFF;
1350 cd->gate_array[reg] |= value & 0x8000;*/ 1393 cd->gate_array[reg] |= value & 0x8000;*/
1351 uint8_t new_access = can_main_access_prog(cd); 1394 uint8_t new_access = can_main_access_prog(cd);
1352 uint32_t bank = cd->gate_array[GA_MEM_MODE] >> 6 & 0x3; 1395 uint32_t bank = cd->gate_array[GA_MEM_MODE] >> 6 & 0x3;
1360 m68k_invalidate_code_range(m68k, cd->base + 0x220000, cd->base + 0x240000); 1403 m68k_invalidate_code_range(m68k, cd->base + 0x220000, cd->base + 0x240000);
1361 m68k_invalidate_code_range(cd->m68k, bank * 0x20000, (bank + 1) * 0x20000); 1404 m68k_invalidate_code_range(cd->m68k, bank * 0x20000, (bank + 1) * 0x20000);
1362 dump_prog_ram(cd); 1405 dump_prog_ram(cd);
1363 uint16_t dst = cd->gate_array[GA_CDC_CTRL] >> 8 & 0x7; 1406 uint16_t dst = cd->gate_array[GA_CDC_CTRL] >> 8 & 0x7;
1364 if (dst == DST_PROG_RAM) { 1407 if (dst == DST_PROG_RAM) {
1365 lc8951_resume_transfer(&cd->cdc, cd->cdc.cycle); 1408 lc8951_resume_transfer(&cd->cdc);
1366 } 1409 }
1367 } 1410 }
1368 break; 1411 break;
1369 } 1412 }
1370 case GA_MEM_MODE: { 1413 case GA_MEM_MODE: {
1393 cd->sub_paused_wordram = 0; 1436 cd->sub_paused_wordram = 0;
1394 } 1437 }
1395 1438
1396 uint16_t dst = cd->gate_array[GA_CDC_CTRL] >> 8 & 0x7; 1439 uint16_t dst = cd->gate_array[GA_CDC_CTRL] >> 8 & 0x7;
1397 if (dst == DST_WORD_RAM) { 1440 if (dst == DST_WORD_RAM) {
1398 lc8951_resume_transfer(&cd->cdc, cd->cdc.cycle); 1441 lc8951_resume_transfer(&cd->cdc);
1399 } 1442 }
1400 1443
1401 m68k_invalidate_code_range(m68k, cd->base + 0x200000, cd->base + 0x240000); 1444 m68k_invalidate_code_range(m68k, cd->base + 0x200000, cd->base + 0x240000);
1402 m68k_invalidate_code_range(cd->m68k, 0x080000, 0x0C0000); 1445 m68k_invalidate_code_range(cd->m68k, 0x080000, 0x0C0000);
1403 } 1446 }
1585 } 1628 }
1586 1629
1587 sub_cpu_map[0].buffer = sub_cpu_map[1].buffer = cd->prog_ram; 1630 sub_cpu_map[0].buffer = sub_cpu_map[1].buffer = cd->prog_ram;
1588 sub_cpu_map[4].buffer = cd->bram; 1631 sub_cpu_map[4].buffer = cd->bram;
1589 m68k_options *mopts = malloc(sizeof(m68k_options)); 1632 m68k_options *mopts = malloc(sizeof(m68k_options));
1590 init_m68k_opts(mopts, sub_cpu_map, sizeof(sub_cpu_map) / sizeof(*sub_cpu_map), 4, sync_components); 1633 init_m68k_opts(mopts, sub_cpu_map, sizeof(sub_cpu_map) / sizeof(*sub_cpu_map), 4, sync_components, int_ack);
1591 cd->m68k = init_68k_context(mopts, NULL); 1634 cd->m68k = init_68k_context(mopts, NULL);
1592 cd->m68k->system = cd; 1635 cd->m68k->system = cd;
1593 cd->int2_cycle = CYCLE_NEVER; 1636 cd->int2_cycle = CYCLE_NEVER;
1594 cd->busreq = 1; 1637 cd->busreq = 1;
1595 cd->busack = 1; 1638 cd->busack = 1;