Mercurial > repos > blastem
comparison m68k_core_x86.c @ 996:784bc1e45e80
Fix 68K interrupt handling some more. Fatal Rewind is working again.
author | Michael Pavone <pavone@retrodev.com> |
---|---|
date | Sat, 30 Apr 2016 09:45:53 -0700 |
parents | 33a46d35b913 |
children | 7267bc1ab547 |
comparison
equal
deleted
inserted
replaced
995:2bc27415565b | 996:784bc1e45e80 |
---|---|
2014 } | 2014 } |
2015 if ((inst->op == M68K_ANDI_SR && (inst->src.params.immed & 0x700) != 0x700) | 2015 if ((inst->op == M68K_ANDI_SR && (inst->src.params.immed & 0x700) != 0x700) |
2016 || (inst->op == M68K_ORI_SR && inst->src.params.immed & 0x700)) { | 2016 || (inst->op == M68K_ORI_SR && inst->src.params.immed & 0x700)) { |
2017 if (inst->op == M68K_ANDI_SR) { | 2017 if (inst->op == M68K_ANDI_SR) { |
2018 //set int pending flag in case we trigger an interrupt as a result of the mask change | 2018 //set int pending flag in case we trigger an interrupt as a result of the mask change |
2019 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); | 2019 mov_irdisp(code, INT_PENDING_SR_CHANGE, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); |
2020 } | 2020 } |
2021 call(code, opts->do_sync); | 2021 call(code, opts->do_sync); |
2022 } | 2022 } |
2023 } | 2023 } |
2024 } | 2024 } |
2047 } | 2047 } |
2048 if (inst->op == M68K_EORI_SR) { | 2048 if (inst->op == M68K_EORI_SR) { |
2049 xor_irdisp(code, inst->src.params.immed >> 8, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); | 2049 xor_irdisp(code, inst->src.params.immed >> 8, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); |
2050 if (inst->src.params.immed & 0x700) { | 2050 if (inst->src.params.immed & 0x700) { |
2051 //set int pending flag in case we trigger an interrupt as a result of the mask change | 2051 //set int pending flag in case we trigger an interrupt as a result of the mask change |
2052 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); | 2052 mov_irdisp(code, INT_PENDING_SR_CHANGE, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); |
2053 call(code, opts->do_sync); | 2053 call(code, opts->do_sync); |
2054 } | 2054 } |
2055 } | 2055 } |
2056 } | 2056 } |
2057 | 2057 |
2079 //leave supervisor mode | 2079 //leave supervisor mode |
2080 swap_ssp_usp(opts); | 2080 swap_ssp_usp(opts); |
2081 } | 2081 } |
2082 if (((src_op->disp >> 8) & 7) < 7) { | 2082 if (((src_op->disp >> 8) & 7) < 7) { |
2083 //set int pending flag in case we trigger an interrupt as a result of the mask change | 2083 //set int pending flag in case we trigger an interrupt as a result of the mask change |
2084 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); | 2084 mov_irdisp(code, INT_PENDING_SR_CHANGE, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); |
2085 } | 2085 } |
2086 call(code, opts->do_sync); | 2086 call(code, opts->do_sync); |
2087 } | 2087 } |
2088 cycles(&opts->gen, 12); | 2088 cycles(&opts->gen, 12); |
2089 } else { | 2089 } else { |
2131 mov_rr(code, opts->gen.limit, opts->gen.cycles, SZ_D); | 2131 mov_rr(code, opts->gen.limit, opts->gen.cycles, SZ_D); |
2132 *after_cycle_up = code->cur - (after_cycle_up+1); | 2132 *after_cycle_up = code->cur - (after_cycle_up+1); |
2133 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_cycle), opts->gen.cycles, SZ_D); | 2133 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_cycle), opts->gen.cycles, SZ_D); |
2134 jcc(code, CC_C, loop_top); | 2134 jcc(code, CC_C, loop_top); |
2135 //set int pending flag so interrupt fires immediately after stop is done | 2135 //set int pending flag so interrupt fires immediately after stop is done |
2136 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); | 2136 mov_irdisp(code, INT_PENDING_SR_CHANGE, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); |
2137 } | 2137 } |
2138 | 2138 |
2139 void translate_m68k_trapv(m68k_options *opts, m68kinst *inst) | 2139 void translate_m68k_trapv(m68k_options *opts, m68kinst *inst) |
2140 { | 2140 { |
2141 code_info *code = &opts->gen.code; | 2141 code_info *code = &opts->gen.code; |
2558 } | 2558 } |
2559 } | 2559 } |
2560 shr_ir(code, 8, opts->gen.scratch1, SZ_W); | 2560 shr_ir(code, 8, opts->gen.scratch1, SZ_W); |
2561 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); | 2561 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); |
2562 //set int pending flag in case we trigger an interrupt as a result of the mask change | 2562 //set int pending flag in case we trigger an interrupt as a result of the mask change |
2563 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); | 2563 mov_irdisp(code, INT_PENDING_SR_CHANGE, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); |
2564 retn(code); | 2564 retn(code); |
2565 | 2565 |
2566 opts->set_ccr = code->cur; | 2566 opts->set_ccr = code->cur; |
2567 for (int flag = FLAG_C; flag >= FLAG_X; flag--) | 2567 for (int flag = FLAG_C; flag >= FLAG_X; flag--) |
2568 { | 2568 { |
2741 *do_int = code->cur - (do_int+1); | 2741 *do_int = code->cur - (do_int+1); |
2742 //implement 1 instruction latency | 2742 //implement 1 instruction latency |
2743 cmp_irdisp(code, 0, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); | 2743 cmp_irdisp(code, 0, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); |
2744 do_int = code->cur + 1; | 2744 do_int = code->cur + 1; |
2745 jcc(code, CC_NZ, do_int); | 2745 jcc(code, CC_NZ, do_int); |
2746 mov_irdisp(code, 1, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); | 2746 //store current interrupt number so it doesn't change before we start processing the vector |
2747 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_num), opts->gen.scratch1, SZ_B); | |
2748 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); | |
2747 retn(code); | 2749 retn(code); |
2748 *do_int = code->cur - (do_int + 1); | 2750 *do_int = code->cur - (do_int + 1); |
2749 //save interrupt number so it can't change during interrupt processing | 2751 //Check if int_pending has an actual interrupt priority in it |
2750 push_rdisp(code, opts->gen.context_reg, offsetof(m68k_context, int_num)); | 2752 cmp_irdisp(code, INT_PENDING_SR_CHANGE, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); |
2753 code_ptr already_int_num = code->cur + 1; | |
2754 jcc(code, CC_NZ, already_int_num); | |
2755 | |
2756 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_num), opts->gen.scratch2, SZ_B); | |
2757 mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); | |
2758 | |
2759 *already_int_num = code->cur - (already_int_num + 1); | |
2751 //save PC as stored in scratch1 for later | 2760 //save PC as stored in scratch1 for later |
2752 push_r(code, opts->gen.scratch1); | 2761 push_r(code, opts->gen.scratch1); |
2753 //set target cycle to sync cycle | 2762 //set target cycle to sync cycle |
2754 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), opts->gen.limit, SZ_D); | 2763 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), opts->gen.limit, SZ_D); |
2755 //swap USP and SSP if not already in supervisor mode | 2764 //swap USP and SSP if not already in supervisor mode |
2801 //save PC | 2810 //save PC |
2802 areg_to_native(opts, 7, opts->gen.scratch2); | 2811 areg_to_native(opts, 7, opts->gen.scratch2); |
2803 add_ir(code, 2, opts->gen.scratch2, SZ_D); | 2812 add_ir(code, 2, opts->gen.scratch2, SZ_D); |
2804 call(code, opts->write_32_lowfirst); | 2813 call(code, opts->write_32_lowfirst); |
2805 | 2814 |
2806 //restore saved interrupt number | 2815 //grab saved interrupt number |
2807 pop_r(code, opts->gen.scratch1); | 2816 xor_rr(code, opts->gen.scratch1, opts->gen.scratch1, SZ_D); |
2817 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_pending), opts->gen.scratch1, SZ_B); | |
2808 //ack the interrupt (happens earlier on hardware, but shouldn't be an observable difference) | 2818 //ack the interrupt (happens earlier on hardware, but shouldn't be an observable difference) |
2809 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, int_ack), SZ_W); | 2819 mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, int_ack), SZ_W); |
2810 //calculate the vector address | 2820 //calculate the vector address |
2811 shl_ir(code, 2, opts->gen.scratch1, SZ_D); | 2821 shl_ir(code, 2, opts->gen.scratch1, SZ_D); |
2812 add_ir(code, 0x60, opts->gen.scratch1, SZ_D); | 2822 add_ir(code, 0x60, opts->gen.scratch1, SZ_D); |
2823 //clear out pending flag | |
2824 mov_irdisp(code, 0, opts->gen.context_reg, offsetof(m68k_context, int_pending), SZ_B); | |
2825 //read vector | |
2813 call(code, opts->read_32); | 2826 call(code, opts->read_32); |
2814 call(code, opts->native_addr_and_sync); | 2827 call(code, opts->native_addr_and_sync); |
2815 //2 prefetch bus operations + 2 idle bus cycles | 2828 //2 prefetch bus operations + 2 idle bus cycles |
2816 cycles(&opts->gen, 10); | 2829 cycles(&opts->gen, 10); |
2817 tmp_stack_off = code->stack_off; | 2830 tmp_stack_off = code->stack_off; |