Mercurial > repos > blastem
comparison m68k_core_x86.c @ 656:24ccfd70133a
Added 2 new functions to gen_x86.c for handling passing args according to the C abi of the host system and adapted the code in m68k_core_x86.c to use that instead of doing everything by hand
author | Michael Pavone <pavone@retrodev.com> |
---|---|
date | Thu, 01 Jan 2015 19:15:05 -0800 |
parents | 98927f1b005b |
children | 92ce5ea5ffc9 |
comparison
equal
deleted
inserted
replaced
655:38006d43f5a3 | 656:24ccfd70133a |
---|---|
11 #include "backend.h" | 11 #include "backend.h" |
12 #include <stdio.h> | 12 #include <stdio.h> |
13 #include <stddef.h> | 13 #include <stddef.h> |
14 #include <stdlib.h> | 14 #include <stdlib.h> |
15 #include <string.h> | 15 #include <string.h> |
16 | |
17 #define CYCLES RAX | |
18 #define LIMIT RBP | |
19 #define CONTEXT RSI | |
20 #define SCRATCH1 RCX | |
21 | |
22 #ifdef X86_64 | |
23 #define SCRATCH2 RDI | |
24 #else | |
25 #define SCRATCH2 RBX | |
26 #endif | |
27 | 16 |
28 enum { | 17 enum { |
29 FLAG_X, | 18 FLAG_X, |
30 FLAG_N, | 19 FLAG_N, |
31 FLAG_Z, | 20 FLAG_Z, |
1075 } | 1064 } |
1076 z_off = code->cur + 1; | 1065 z_off = code->cur + 1; |
1077 jmp(code, code->cur + 2); | 1066 jmp(code, code->cur + 2); |
1078 *nz_off = code->cur - (nz_off + 1); | 1067 *nz_off = code->cur - (nz_off + 1); |
1079 //add 2 cycles for every bit shifted | 1068 //add 2 cycles for every bit shifted |
1080 add_rr(code, RCX, CYCLES, SZ_D); | 1069 add_rr(code, RCX, opts->gen.cycles, SZ_D); |
1081 add_rr(code, RCX, CYCLES, SZ_D); | 1070 add_rr(code, RCX, opts->gen.cycles, SZ_D); |
1082 if (inst->op == M68K_ASL) { | 1071 if (inst->op == M68K_ASL) { |
1083 //ASL has Overflow flag behavior that depends on all of the bits shifted through the MSB | 1072 //ASL has Overflow flag behavior that depends on all of the bits shifted through the MSB |
1084 //Easiest way to deal with this is to shift one bit at a time | 1073 //Easiest way to deal with this is to shift one bit at a time |
1085 set_flag(opts, 0, FLAG_V); | 1074 set_flag(opts, 0, FLAG_V); |
1086 check_alloc_code(code, 5*MAX_INST_LEN); | 1075 check_alloc_code(code, 5*MAX_INST_LEN); |
1874 mov_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch1, SZ_B); | 1863 mov_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch1, SZ_B); |
1875 } | 1864 } |
1876 and_ir(code, 63, opts->gen.scratch1, SZ_D); | 1865 and_ir(code, 63, opts->gen.scratch1, SZ_D); |
1877 code_ptr zero_off = code->cur + 1; | 1866 code_ptr zero_off = code->cur + 1; |
1878 jcc(code, CC_Z, code->cur + 2); | 1867 jcc(code, CC_Z, code->cur + 2); |
1879 add_rr(code, opts->gen.scratch1, CYCLES, SZ_D); | 1868 add_rr(code, opts->gen.scratch1, opts->gen.cycles, SZ_D); |
1880 add_rr(code, opts->gen.scratch1, CYCLES, SZ_D); | 1869 add_rr(code, opts->gen.scratch1, opts->gen.cycles, SZ_D); |
1881 cmp_ir(code, 32, opts->gen.scratch1, SZ_B); | 1870 cmp_ir(code, 32, opts->gen.scratch1, SZ_B); |
1882 code_ptr norm_off = code->cur + 1; | 1871 code_ptr norm_off = code->cur + 1; |
1883 jcc(code, CC_L, code->cur + 2); | 1872 jcc(code, CC_L, code->cur + 2); |
1884 if (inst->op == M68K_ROXR || inst->op == M68K_ROXL) { | 1873 if (inst->op == M68K_ROXR || inst->op == M68K_ROXL) { |
1885 flag_to_carry(opts, FLAG_X); | 1874 flag_to_carry(opts, FLAG_X); |
1931 | 1920 |
1932 void translate_m68k_illegal(m68k_options *opts, m68kinst *inst) | 1921 void translate_m68k_illegal(m68k_options *opts, m68kinst *inst) |
1933 { | 1922 { |
1934 code_info *code = &opts->gen.code; | 1923 code_info *code = &opts->gen.code; |
1935 call(code, opts->gen.save_context); | 1924 call(code, opts->gen.save_context); |
1936 #ifdef X86_64 | 1925 call_args(code, (code_ptr)print_regs_exit, 1, opts->gen.context_reg); |
1937 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR); | |
1938 #else | |
1939 push_r(code, opts->gen.context_reg); | |
1940 #endif | |
1941 call(code, (code_ptr)print_regs_exit); | |
1942 } | 1926 } |
1943 | 1927 |
1944 #define BIT_SUPERVISOR 5 | 1928 #define BIT_SUPERVISOR 5 |
1945 | 1929 |
1946 void translate_m68k_andi_ori_ccr_sr(m68k_options *opts, m68kinst *inst) | 1930 void translate_m68k_andi_ori_ccr_sr(m68k_options *opts, m68kinst *inst) |
2086 | 2070 |
2087 void translate_m68k_reset(m68k_options *opts, m68kinst *inst) | 2071 void translate_m68k_reset(m68k_options *opts, m68kinst *inst) |
2088 { | 2072 { |
2089 code_info *code = &opts->gen.code; | 2073 code_info *code = &opts->gen.code; |
2090 call(code, opts->gen.save_context); | 2074 call(code, opts->gen.save_context); |
2091 #ifdef X86_64 | 2075 call_args(code, (code_ptr)print_regs_exit, 1, opts->gen.context_reg); |
2092 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR); | |
2093 #else | |
2094 push_r(code, opts->gen.context_reg); | |
2095 #endif | |
2096 call(code, (code_ptr)print_regs_exit); | |
2097 } | 2076 } |
2098 | 2077 |
2099 void translate_m68k_rte(m68k_options *opts, m68kinst *inst) | 2078 void translate_m68k_rte(m68k_options *opts, m68kinst *inst) |
2100 { | 2079 { |
2101 code_info *code = &opts->gen.code; | 2080 code_info *code = &opts->gen.code; |
2121 } | 2100 } |
2122 | 2101 |
2123 void translate_out_of_bounds(code_info *code) | 2102 void translate_out_of_bounds(code_info *code) |
2124 { | 2103 { |
2125 xor_rr(code, RDI, RDI, SZ_D); | 2104 xor_rr(code, RDI, RDI, SZ_D); |
2126 #ifdef X86_32 | 2105 call_args(code, (code_ptr)exit, 1, RDI); |
2127 push_r(code, RDI); | |
2128 #endif | |
2129 call(code, (code_ptr)exit); | |
2130 } | 2106 } |
2131 | 2107 |
2132 void nop_fill_or_jmp_next(code_info *code, code_ptr old_end, code_ptr next_inst) | 2108 void nop_fill_or_jmp_next(code_info *code, code_ptr old_end, code_ptr next_inst) |
2133 { | 2109 { |
2134 if (next_inst == old_end && next_inst - code->cur < 2) { | 2110 if (next_inst == old_end && next_inst - code->cur < 2) { |
2154 | 2130 |
2155 if (!options->retrans_stub) { | 2131 if (!options->retrans_stub) { |
2156 options->retrans_stub = code->cur; | 2132 options->retrans_stub = code->cur; |
2157 call(code, options->gen.save_context); | 2133 call(code, options->gen.save_context); |
2158 push_r(code, options->gen.context_reg); | 2134 push_r(code, options->gen.context_reg); |
2159 #ifdef X86_32 | 2135 call_args(code,(code_ptr)m68k_retranslate_inst, 2, options->gen.scratch2, options->gen.context_reg); |
2160 push_r(code, options->gen.context_reg); | |
2161 push_r(code, options->gen.scratch2); | |
2162 #endif | |
2163 call(code, (code_ptr)m68k_retranslate_inst); | |
2164 #ifdef X86_32 | |
2165 add_ir(code, 8, RSP, SZ_D); | |
2166 #endif | |
2167 pop_r(code, options->gen.context_reg); | 2136 pop_r(code, options->gen.context_reg); |
2168 mov_rr(code, RAX, options->gen.scratch1, SZ_PTR); | 2137 mov_rr(code, RAX, options->gen.scratch1, SZ_PTR); |
2169 call(code, options->gen.load_context); | 2138 call(code, options->gen.load_context); |
2170 jmp_r(code, options->gen.scratch1); | 2139 jmp_r(code, options->gen.scratch1); |
2171 } | 2140 } |
2195 code->cur = bp_stub; | 2164 code->cur = bp_stub; |
2196 | 2165 |
2197 //Save context and call breakpoint handler | 2166 //Save context and call breakpoint handler |
2198 call(code, opts->gen.save_context); | 2167 call(code, opts->gen.save_context); |
2199 push_r(code, opts->gen.scratch1); | 2168 push_r(code, opts->gen.scratch1); |
2200 #ifdef X86_64 | 2169 call_args(code, bp_handler, 2, opts->gen.context_reg, opts->gen.scratch1); |
2201 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR); | |
2202 mov_rr(code, opts->gen.scratch1, RSI, SZ_D); | |
2203 #else | |
2204 push_r(code, opts->gen.scratch1); | |
2205 push_r(code, opts->gen.context_reg); | |
2206 #endif | |
2207 call(code, bp_handler); | |
2208 #ifdef X86_32 | |
2209 add_ir(code, 8, RSP, SZ_D); | |
2210 #endif | |
2211 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR); | 2170 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR); |
2212 //Restore context | 2171 //Restore context |
2213 call(code, opts->gen.load_context); | 2172 call(code, opts->gen.load_context); |
2214 pop_r(code, opts->gen.scratch1); | 2173 pop_r(code, opts->gen.scratch1); |
2215 //do prologue stuff | 2174 //do prologue stuff |
2315 } | 2274 } |
2316 if (opts->aregs[i] >= 0) { | 2275 if (opts->aregs[i] >= 0) { |
2317 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * i, opts->aregs[i], SZ_D); | 2276 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * i, opts->aregs[i], SZ_D); |
2318 } | 2277 } |
2319 } | 2278 } |
2320 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, current_cycle), CYCLES, SZ_D); | 2279 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, current_cycle), opts->gen.cycles, SZ_D); |
2321 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, target_cycle), LIMIT, SZ_D); | 2280 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, target_cycle), opts->gen.limit, SZ_D); |
2322 retn(code); | 2281 retn(code); |
2323 | 2282 |
2324 opts->start_context = (start_fun)code->cur; | 2283 opts->start_context = (start_fun)code->cur; |
2325 #ifdef X86_64 | 2284 #ifdef X86_64 |
2326 if (opts->gen.scratch2 != RDI) { | 2285 if (opts->gen.scratch2 != RDI) { |
2361 retn(code); | 2320 retn(code); |
2362 | 2321 |
2363 opts->native_addr = code->cur; | 2322 opts->native_addr = code->cur; |
2364 call(code, opts->gen.save_context); | 2323 call(code, opts->gen.save_context); |
2365 push_r(code, opts->gen.context_reg); | 2324 push_r(code, opts->gen.context_reg); |
2366 #ifdef X86_64 | 2325 call_args(code, (code_ptr)get_native_address_trans, 2, opts->gen.context_reg, opts->gen.scratch1); |
2367 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR); //move context to 1st arg reg | |
2368 mov_rr(code, opts->gen.scratch1, RSI, SZ_D); //move address to 2nd arg reg | |
2369 #else | |
2370 push_r(code, opts->gen.scratch1); | |
2371 push_r(code, opts->gen.context_reg); | |
2372 #endif | |
2373 call(code, (code_ptr)get_native_address_trans); | |
2374 #ifdef X86_32 | |
2375 add_ir(code, 8, RSP, SZ_D); | |
2376 #endif | |
2377 mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); //move result to scratch reg | 2326 mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); //move result to scratch reg |
2378 pop_r(code, opts->gen.context_reg); | 2327 pop_r(code, opts->gen.context_reg); |
2379 call(code, opts->gen.load_context); | 2328 call(code, opts->gen.load_context); |
2380 retn(code); | 2329 retn(code); |
2381 | 2330 |
2382 opts->native_addr_and_sync = code->cur; | 2331 opts->native_addr_and_sync = code->cur; |
2383 call(code, opts->gen.save_context); | 2332 call(code, opts->gen.save_context); |
2384 push_r(code, opts->gen.scratch1); | 2333 push_r(code, opts->gen.scratch1); |
2385 #ifdef X86_64 | 2334 |
2386 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR); | 2335 xor_rr(code, opts->gen.scratch1, opts->gen.scratch1, SZ_D); |
2387 xor_rr(code, RSI, RSI, SZ_D); | 2336 call_args_abi(code, (code_ptr)sync_components, 2, opts->gen.context_reg, opts->gen.scratch1); |
2388 test_ir(code, 8, RSP, SZ_PTR); //check stack alignment | |
2389 code_ptr do_adjust_rsp = code->cur + 1; | |
2390 jcc(code, CC_NZ, code->cur + 2); | |
2391 call(code, (code_ptr)sync_components); | |
2392 code_ptr no_adjust_rsp = code->cur + 1; | |
2393 jmp(code, code->cur + 2); | |
2394 *do_adjust_rsp = code->cur - (do_adjust_rsp+1); | |
2395 sub_ir(code, 8, RSP, SZ_PTR); | |
2396 call(code, (code_ptr)sync_components); | |
2397 add_ir(code, 8, RSP, SZ_PTR); | |
2398 *no_adjust_rsp = code->cur - (no_adjust_rsp+1); | |
2399 pop_r(code, RSI); | |
2400 push_r(code, RAX); | |
2401 mov_rr(code, RAX, RDI, SZ_PTR); | |
2402 call(code, (code_ptr)get_native_address_trans); | |
2403 #else | |
2404 //TODO: Add support for pushing a constant in gen_x86 | |
2405 xor_rr(code, RAX, RAX, SZ_D); | |
2406 push_r(code, RAX); | |
2407 push_r(code, opts->gen.context_reg); | |
2408 call(code, (code_ptr)sync_components); | |
2409 add_ir(code, 8, RSP, SZ_D); | |
2410 pop_r(code, RSI); //restore saved address from opts->gen.scratch1 | 2337 pop_r(code, RSI); //restore saved address from opts->gen.scratch1 |
2411 push_r(code, RAX); //save context pointer for later | 2338 push_r(code, RAX); //save context pointer for later |
2412 push_r(code, RSI); //2nd arg -- address | 2339 call_args(code, (code_ptr)get_native_address_trans, 2, RAX, RSI); |
2413 push_r(code, RAX); //1st arg -- context pointer | |
2414 call(code, (code_ptr)get_native_address_trans); | |
2415 add_ir(code, 8, RSP, SZ_D); | |
2416 #endif | |
2417 | |
2418 mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); //move result to scratch reg | 2340 mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); //move result to scratch reg |
2419 pop_r(code, opts->gen.context_reg); | 2341 pop_r(code, opts->gen.context_reg); |
2420 call(code, opts->gen.load_context); | 2342 call(code, opts->gen.load_context); |
2421 retn(code); | 2343 retn(code); |
2422 | 2344 |
2423 opts->gen.handle_cycle_limit = code->cur; | 2345 opts->gen.handle_cycle_limit = code->cur; |
2424 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), CYCLES, SZ_D); | 2346 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), opts->gen.cycles, SZ_D); |
2425 code_ptr skip_sync = code->cur + 1; | 2347 code_ptr skip_sync = code->cur + 1; |
2426 jcc(code, CC_C, code->cur + 2); | 2348 jcc(code, CC_C, code->cur + 2); |
2427 opts->do_sync = code->cur; | 2349 opts->do_sync = code->cur; |
2428 push_r(code, opts->gen.scratch1); | 2350 push_r(code, opts->gen.scratch1); |
2429 push_r(code, opts->gen.scratch2); | 2351 push_r(code, opts->gen.scratch2); |
2430 call(code, opts->gen.save_context); | 2352 call(code, opts->gen.save_context); |
2431 #ifdef X86_64 | 2353 xor_rr(code, opts->gen.scratch1, opts->gen.scratch1, SZ_D); |
2432 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR); | 2354 call_args_abi(code, (code_ptr)sync_components, 2, opts->gen.context_reg, opts->gen.scratch1); |
2433 xor_rr(code, RSI, RSI, SZ_D); | |
2434 test_ir(code, 8, RSP, SZ_D); | |
2435 code_ptr adjust_rsp = code->cur + 1; | |
2436 jcc(code, CC_NZ, code->cur + 2); | |
2437 call(code, (code_ptr)sync_components); | |
2438 code_ptr no_adjust = code->cur + 1; | |
2439 jmp(code, code->cur + 2); | |
2440 *adjust_rsp = code->cur - (adjust_rsp + 1); | |
2441 sub_ir(code, 8, RSP, SZ_PTR); | |
2442 call(code, (code_ptr)sync_components); | |
2443 add_ir(code, 8, RSP, SZ_PTR); | |
2444 *no_adjust = code->cur - (no_adjust+1); | |
2445 #else | |
2446 //TODO: Add support for pushing a constant in gen_x86 | |
2447 xor_rr(code, RAX, RAX, SZ_D); | |
2448 push_r(code, RAX); | |
2449 push_r(code, opts->gen.context_reg); | |
2450 call(code, (code_ptr)sync_components); | |
2451 add_ir(code, 8, RSP, SZ_D); | |
2452 #endif | |
2453 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR); | 2355 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR); |
2454 call(code, opts->gen.load_context); | 2356 call(code, opts->gen.load_context); |
2455 pop_r(code, opts->gen.scratch2); | 2357 pop_r(code, opts->gen.scratch2); |
2456 pop_r(code, opts->gen.scratch1); | 2358 pop_r(code, opts->gen.scratch1); |
2457 *skip_sync = code->cur - (skip_sync+1); | 2359 *skip_sync = code->cur - (skip_sync+1); |
2557 } | 2459 } |
2558 } | 2460 } |
2559 retn(code); | 2461 retn(code); |
2560 | 2462 |
2561 opts->gen.handle_cycle_limit_int = code->cur; | 2463 opts->gen.handle_cycle_limit_int = code->cur; |
2562 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_cycle), CYCLES, SZ_D); | 2464 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_cycle), opts->gen.cycles, SZ_D); |
2563 code_ptr do_int = code->cur + 1; | 2465 code_ptr do_int = code->cur + 1; |
2564 jcc(code, CC_NC, code->cur + 2); | 2466 jcc(code, CC_NC, code->cur + 2); |
2565 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), CYCLES, SZ_D); | 2467 cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), opts->gen.cycles, SZ_D); |
2566 skip_sync = code->cur + 1; | 2468 skip_sync = code->cur + 1; |
2567 jcc(code, CC_C, code->cur + 2); | 2469 jcc(code, CC_C, code->cur + 2); |
2568 call(code, opts->gen.save_context); | 2470 call(code, opts->gen.save_context); |
2569 #ifdef X86_64 | 2471 call_args_abi(code, (code_ptr)sync_components, 2, opts->gen.context_reg, opts->gen.scratch1); |
2570 mov_rr(code, opts->gen.context_reg, RDI, SZ_PTR); | |
2571 mov_rr(code, opts->gen.scratch1, RSI, SZ_D); | |
2572 test_ir(code, 8, RSP, SZ_D); | |
2573 adjust_rsp = code->cur + 1; | |
2574 jcc(code, CC_NZ, code->cur + 2); | |
2575 call(code, (code_ptr)sync_components); | |
2576 no_adjust = code->cur + 1; | |
2577 jmp(code, code->cur + 2); | |
2578 *adjust_rsp = code->cur - (adjust_rsp + 1); | |
2579 sub_ir(code, 8, RSP, SZ_PTR); | |
2580 call(code, (code_ptr)sync_components); | |
2581 add_ir(code, 8, RSP, SZ_PTR); | |
2582 *no_adjust = code->cur - (no_adjust+1); | |
2583 #else | |
2584 push_r(code, opts->gen.scratch1); | |
2585 push_r(code, opts->gen.context_reg); | |
2586 call(code, (code_ptr)sync_components); | |
2587 add_ir(code, 8, RSP, SZ_D); | |
2588 #endif | |
2589 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR); | 2472 mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR); |
2590 jmp(code, opts->gen.load_context); | 2473 jmp(code, opts->gen.load_context); |
2591 *skip_sync = code->cur - (skip_sync+1); | 2474 *skip_sync = code->cur - (skip_sync+1); |
2592 retn(code); | 2475 retn(code); |
2593 *do_int = code->cur - (do_int+1); | 2476 *do_int = code->cur - (do_int+1); |
2594 //set target cycle to sync cycle | 2477 //set target cycle to sync cycle |
2595 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), LIMIT, SZ_D); | 2478 mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), opts->gen.limit, SZ_D); |
2596 //swap USP and SSP if not already in supervisor mode | 2479 //swap USP and SSP if not already in supervisor mode |
2597 bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); | 2480 bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B); |
2598 code_ptr already_supervisor = code->cur + 1; | 2481 code_ptr already_supervisor = code->cur + 1; |
2599 jcc(code, CC_C, code->cur + 2); | 2482 jcc(code, CC_C, code->cur + 2); |
2600 swap_ssp_usp(opts); | 2483 swap_ssp_usp(opts); |