diff --git a/PowerRecomp/recompiler.cpp b/PowerRecomp/recompiler.cpp index e330293..0821717 100644 --- a/PowerRecomp/recompiler.cpp +++ b/PowerRecomp/recompiler.cpp @@ -426,6 +426,13 @@ bool Recompiler::Recompile( println("\t{}.compare({}.s32, 0, {});", cr(0), r(insn.operands[0]), xer()); break; + case PPC_INST_ADDC: + println("\t{}.ca = {}.u32 > ~{}.u32;", xer(), r(insn.operands[2]), r(insn.operands[1])); + println("\t{}.u64 = {}.u64 + {}.u64;", r(insn.operands[0]), r(insn.operands[1]), r(insn.operands[2])); + if (strchr(insn.opcode->name, '.')) + println("\t{}.compare({}.s32, 0, {});", cr(0), r(insn.operands[0]), xer()); + break; + case PPC_INST_ADDE: println("\t{}.u8 = ({}.u32 + {}.u32 < {}.u32) | ({}.u32 + {}.u32 + {}.ca < {}.ca);", temp(), r(insn.operands[1]), r(insn.operands[2]), r(insn.operands[1]), r(insn.operands[1]), r(insn.operands[2]), xer(), xer()); println("\t{}.u64 = {}.u64 + {}.u64 + {}.ca;", r(insn.operands[0]), r(insn.operands[1]), r(insn.operands[2]), xer()); @@ -434,6 +441,14 @@ bool Recompiler::Recompile( println("\t{}.compare({}.s32, 0, {});", cr(0), r(insn.operands[0]), xer()); break; + case PPC_INST_ADDME: + println("\t{}.u8 = ({}.u32 - 1 < {}.u32) | ({}.u32 - 1 + {}.ca < {}.ca);", temp(), r(insn.operands[1]), r(insn.operands[1]), r(insn.operands[1]), xer(), xer()); + println("\t{}.u64 = {}.u64 - 1 + {}.ca;", r(insn.operands[0]), r(insn.operands[1]), xer()); + println("\t{}.ca = {}.u8;", xer(), temp()); + if (strchr(insn.opcode->name, '.')) + println("\t{}.compare({}.s32, 0, {});", cr(0), r(insn.operands[0]), xer()); + break; + case PPC_INST_ADDI: print("\t{}.s64 = ", r(insn.operands[0])); if (insn.operands[1] != 0) @@ -547,6 +562,14 @@ bool Recompiler::Recompile( println("\tif ({}.u32 == 0) goto loc_{:X};", ctr(), insn.operands[0]); break; + case PPC_INST_BDZF: + { + constexpr std::string_view fields[] = { "lt", "gt", "eq", "so" }; + println("\t--{}.u64;", ctr()); + println("\tif ({}.u32 == 0 && !{}.{}) goto loc_{:X};", ctr(), cr(insn.operands[0] / 4), fields[insn.operands[0] % 4], insn.operands[1]); + break; + } + case PPC_INST_BDZLR: println("\t--{}.u64;", ctr()); println("\tif ({}.u32 == 0) return;", ctr(), insn.operands[0]); @@ -558,10 +581,20 @@ bool Recompiler::Recompile( break; case PPC_INST_BDNZF: - // NOTE: assuming eq here as a shortcut because all the instructions in the game do that + { + constexpr std::string_view fields[] = { "lt", "gt", "eq", "so" }; + println("\t--{}.u64;", ctr()); + println("\tif ({}.u32 != 0 && !{}.{}) goto loc_{:X};", ctr(), cr(insn.operands[0] / 4), fields[insn.operands[0] % 4], insn.operands[1]); + break; + } + + case PPC_INST_BDNZT: + { + constexpr std::string_view fields[] = { "lt", "gt", "eq", "so" }; println("\t--{}.u64;", ctr()); - println("\tif ({}.u32 != 0 && !{}.eq) goto loc_{:X};", ctr(), cr(insn.operands[0] / 4), insn.operands[1]); + println("\tif ({}.u32 != 0 && {}.{}) goto loc_{:X};", ctr(), cr(insn.operands[0] / 4), fields[insn.operands[0] % 4], insn.operands[1]); break; + } case PPC_INST_BEQ: printConditionalBranch(false, "eq"); @@ -691,6 +724,20 @@ bool Recompiler::Recompile( println("\t{}.u64 = __lzcnt({}.u32);", r(insn.operands[0]), r(insn.operands[1])); break; + case PPC_INST_CROR: + { + constexpr std::string_view fields[] = { "lt", "gt", "eq", "so" }; + println("\t{}.{} = {}.{} | {}.{};", cr(insn.operands[0] / 4), fields[insn.operands[0] % 4], cr(insn.operands[1] / 4), fields[insn.operands[1] % 4], cr(insn.operands[2] / 4), fields[insn.operands[2] % 4]); + break; + } + + case PPC_INST_CRORC: + { + constexpr std::string_view fields[] = { "lt", "gt", "eq", "so" }; + println("\t{}.{} = {}.{} | (~{}.{} & 1);", cr(insn.operands[0] / 4), fields[insn.operands[0] % 4], cr(insn.operands[1] / 4), fields[insn.operands[1] % 4], cr(insn.operands[2] / 4), fields[insn.operands[2] % 4]); + break; + } + case PPC_INST_DB16CYC: // no op break; @@ -703,6 +750,10 @@ bool Recompiler::Recompile( // no op break; + case PPC_INST_DCBST: + // no op + break; + case PPC_INST_DCBTST: // no op break; @@ -747,6 +798,12 @@ bool Recompiler::Recompile( // no op break; + case PPC_INST_EQV: + println("\t{}.u64 = ~({}.u64 ^ {}.u64);", r(insn.operands[0]), r(insn.operands[1]), r(insn.operands[2])); + if (strchr(insn.opcode->name, '.')) + println("\t{}.compare({}.s32, 0, {});", cr(0), r(insn.operands[0]), xer()); + break; + case PPC_INST_EXTSB: println("\t{}.s64 = {}.s8;", r(insn.operands[0]), r(insn.operands[1])); if (strchr(insn.opcode->name, '.')) @@ -930,6 +987,12 @@ bool Recompiler::Recompile( println("{}.u32);", r(insn.operands[2])); break; + case PPC_INST_LBZUX: + println("\t{} = {}.u32 + {}.u32;", ea(), r(insn.operands[1]), r(insn.operands[2])); + println("\t{}.u64 = PPC_LOAD_U8({});", r(insn.operands[0]), ea()); + println("\t{}.u32 = {};", r(insn.operands[1]), ea()); + break; + case PPC_INST_LD: print("\t{}.u64 = PPC_LOAD_U64(", r(insn.operands[0])); if (insn.operands[2] != 0) @@ -958,6 +1021,12 @@ bool Recompiler::Recompile( println("{}.u32);", r(insn.operands[2])); break; + case PPC_INST_LDUX: + println("\t{} = {}.u32 + {}.u32;", ea(), r(insn.operands[1]), r(insn.operands[2])); + println("\t{}.u64 = PPC_LOAD_U64({});", r(insn.operands[0]), ea()); + println("\t{}.u32 = {};", r(insn.operands[1]), ea()); + break; + case PPC_INST_LFD: printSetFlushMode(false); print("\t{}.u64 = PPC_LOAD_U64(", f(insn.operands[0])); @@ -966,6 +1035,13 @@ bool Recompiler::Recompile( println("{});", int32_t(insn.operands[1])); break; + case PPC_INST_LFDU: + printSetFlushMode(false); + println("\t{} = {} + {}.u32;", ea(), int32_t(insn.operands[1]), r(insn.operands[2])); + println("\t{}.u64 = PPC_LOAD_U64({});", r(insn.operands[0]), ea()); + println("\t{}.u32 = {};", r(insn.operands[2]), ea()); + break; + case PPC_INST_LFDX: printSetFlushMode(false); print("\t{}.u64 = PPC_LOAD_U64(", f(insn.operands[0])); @@ -974,6 +1050,13 @@ bool Recompiler::Recompile( println("{}.u32);", r(insn.operands[2])); break; + case PPC_INST_LFDUX: + printSetFlushMode(false); + println("\t{} = {}.u32 + {}.u32;", ea(), r(insn.operands[1]), r(insn.operands[2])); + println("\t{}.u64 = PPC_LOAD_U64({});", r(insn.operands[0]), ea()); + println("\t{}.u32 = {};", r(insn.operands[1]), ea()); + break; + case PPC_INST_LFS: printSetFlushMode(false); print("\t{}.u32 = PPC_LOAD_U32(", temp()); @@ -983,6 +1066,14 @@ bool Recompiler::Recompile( println("\t{}.f64 = double({}.f32);", f(insn.operands[0]), temp()); break; + case PPC_INST_LFSU: + printSetFlushMode(false); + println("\t{} = {} + {}.u32;", ea(), int32_t(insn.operands[1]), r(insn.operands[2])); + println("\t{}.u32 = PPC_LOAD_U32({});", temp(), ea()); + println("\t{}.u32 = {};", r(insn.operands[2]), ea()); + println("\t{}.f64 = double({}.f32);", f(insn.operands[0]), temp()); + break; + case PPC_INST_LFSX: printSetFlushMode(false); print("\t{}.u32 = PPC_LOAD_U32(", temp()); @@ -992,6 +1083,14 @@ bool Recompiler::Recompile( println("\t{}.f64 = double({}.f32);", f(insn.operands[0]), temp()); break; + case PPC_INST_LFSUX: + printSetFlushMode(false); + println("\t{} = {}.u32 + {}.u32;", ea(), r(insn.operands[1]), r(insn.operands[2])); + println("\t{}.u32 = PPC_LOAD_U32({});", temp(), ea()); + println("\t{}.u32 = {};", r(insn.operands[1]), ea()); + println("\t{}.f64 = double({}.f32);", f(insn.operands[0]), temp()); + break; + case PPC_INST_LHA: print("\t{}.s64 = int16_t(PPC_LOAD_U16(", r(insn.operands[0])); if (insn.operands[2] != 0) @@ -999,6 +1098,12 @@ bool Recompiler::Recompile( println("{}));", int32_t(insn.operands[1])); break; + case PPC_INST_LHAU: + print("\t{} = {} + {}.u32;", ea(), int32_t(insn.operands[1]), r(insn.operands[2])); + print("\t{}.s64 = int16_t(PPC_LOAD_U16({}));", r(insn.operands[0]), ea()); + print("\t{}.u32 = {};", r(insn.operands[2]), ea()); + break; + case PPC_INST_LHAX: print("\t{}.s64 = int16_t(PPC_LOAD_U16(", r(insn.operands[0])); if (insn.operands[1] != 0) @@ -1013,6 +1118,12 @@ bool Recompiler::Recompile( println("{});", int32_t(insn.operands[1])); break; + case PPC_INST_LHZU: + println("\t{} = {} + {}.u32;", ea(), int32_t(insn.operands[1]), r(insn.operands[2])); + println("\t{}.u64 = PPC_LOAD_U16({});", r(insn.operands[0]), ea()); + println("\t{}.u32 = {};", r(insn.operands[2]), ea()); + break; + case PPC_INST_LHZX: print("\t{}.u64 = PPC_LOAD_U16(", r(insn.operands[0])); if (insn.operands[1] != 0) @@ -1020,6 +1131,12 @@ bool Recompiler::Recompile( println("{}.u32);", r(insn.operands[2])); break; + case PPC_INST_LHZUX: + println("\t{} = {}.u32 + {}.u32;", ea(), r(insn.operands[1]), r(insn.operands[2])); + println("\t{}.u64 = PPC_LOAD_U16({});", r(insn.operands[0]), ea()); + println("\t{}.u32 = {};", r(insn.operands[1]), ea()); + break; + case PPC_INST_LI: println("\t{}.s64 = {};", r(insn.operands[0]), int32_t(insn.operands[1])); break; @@ -1032,6 +1149,7 @@ bool Recompiler::Recompile( case PPC_INST_LVEWX128: case PPC_INST_LVX: case PPC_INST_LVX128: + case PPC_INST_LVEHX: // NOTE: for endian swapping, we reverse the whole vector instead of individual elements. // this is accounted for in every instruction (eg. dp3 sums yzw instead of xyz) print("\t_mm_store_si128((__m128i*){}.u8, _mm_shuffle_epi8(_mm_load_si128((__m128i*)(base + ((", v(insn.operands[0])); @@ -1127,6 +1245,12 @@ bool Recompiler::Recompile( println("{}.u32);", r(insn.operands[2])); break; + case PPC_INST_LWZUX: + println("\t{} = {}.u32 + {}.u32;", ea(), r(insn.operands[1]), r(insn.operands[2])); + println("\t{}.u64 = PPC_LOAD_U32({});", r(insn.operands[0]), ea()); + println("\t{}.u32 = {};", r(insn.operands[1]), ea()); + break; + case PPC_INST_MFCR: for (size_t i = 0; i < 32; i++) { @@ -1377,7 +1501,7 @@ bool Recompiler::Recompile( case PPC_INST_STBU: println("\t{} = {} + {}.u32;", ea(), int32_t(insn.operands[1]), r(insn.operands[2])); - println("\tPPC_STORE_U8({}, {}.u8);", ea(), r(insn.operands[0])); + println("\t{}{}, {}.u8);", mmioStore() ? "PPC_MM_STORE_U8(" : "PPC_STORE_U8(", ea(), r(insn.operands[0])); println("\t{}.u32 = {};", r(insn.operands[2]), ea()); break; @@ -1388,6 +1512,12 @@ bool Recompiler::Recompile( println("{}.u32, {}.u8);", r(insn.operands[2]), r(insn.operands[0])); break; + case PPC_INST_STBUX: + println("\t{} = {}.u32 + {}.u32;", ea(), r(insn.operands[1]), r(insn.operands[2])); + println("\t{}{}, {}.u8);", mmioStore() ? "PPC_MM_STORE_U8(" : "PPC_STORE_U8(", ea(), r(insn.operands[0])); + println("\t{}.u32 = {};", r(insn.operands[1]), ea()); + break; + case PPC_INST_STD: print("{}", mmioStore() ? "\tPPC_MM_STORE_U64(" : "\tPPC_STORE_U64("); if (insn.operands[2] != 0) @@ -1407,7 +1537,7 @@ bool Recompiler::Recompile( case PPC_INST_STDU: println("\t{} = {} + {}.u32;", ea(), int32_t(insn.operands[1]), r(insn.operands[2])); - println("\tPPC_STORE_U64({}, {}.u64);", ea(), r(insn.operands[0])); + println("\t{}{}, {}.u64);", mmioStore() ? "PPC_MM_STORE_U64(" : "PPC_STORE_U64(", ea(), r(insn.operands[0])); println("\t{}.u32 = {};", r(insn.operands[2]), ea()); break; @@ -1418,6 +1548,12 @@ bool Recompiler::Recompile( println("{}.u32, {}.u64);", r(insn.operands[2]), r(insn.operands[0])); break; + case PPC_INST_STDUX: + println("\t{} = {}.u32 + {}.u32;", ea(), r(insn.operands[1]), r(insn.operands[2])); + println("\t{}{}, {}.u64);", mmioStore() ? "PPC_MM_STORE_U64(" : "PPC_STORE_U64(", ea(), r(insn.operands[0])); + println("\t{}.u32 = {};", r(insn.operands[1]), ea()); + break; + case PPC_INST_STFD: printSetFlushMode(false); print("{}", mmioStore() ? "\tPPC_MM_STORE_U64(" : "\tPPC_STORE_U64("); @@ -1426,6 +1562,13 @@ bool Recompiler::Recompile( println("{}, {}.u64);", int32_t(insn.operands[1]), f(insn.operands[0])); break; + case PPC_INST_STFDU: + printSetFlushMode(false); + println("\t{} = {} + {}.u32;", ea(), int32_t(insn.operands[1]), r(insn.operands[2])); + println("\t{}{}, {}.u64);", mmioStore() ? "PPC_MM_STORE_U64(" : "PPC_STORE_U64(", ea(), r(insn.operands[0])); + println("\t{}.u32 = {};", r(insn.operands[2]), ea()); + break; + case PPC_INST_STFDX: printSetFlushMode(false); print("{}", mmioStore() ? "\tPPC_MM_STORE_U64(" : "\tPPC_STORE_U64("); @@ -1451,6 +1594,14 @@ bool Recompiler::Recompile( println("{}, {}.u32);", int32_t(insn.operands[1]), temp()); break; + case PPC_INST_STFSU: + printSetFlushMode(false); + println("\t{}.f32 = float({}.f64);", temp(), f(insn.operands[0])); + println("\t{} = {} + {}.u32;", ea(), int32_t(insn.operands[1]), r(insn.operands[2])); + println("\t{}{}, {}.u32);", mmioStore() ? "PPC_MM_STORE_U32(" : "PPC_STORE_U32(", ea(), temp()); + println("\t{}.u32 = {};", r(insn.operands[2]), ea()); + break; + case PPC_INST_STFSX: printSetFlushMode(false); println("\t{}.f32 = float({}.f64);", temp(), f(insn.operands[0])); @@ -1460,6 +1611,14 @@ bool Recompiler::Recompile( println("{}.u32, {}.u32);", r(insn.operands[2]), temp()); break; + case PPC_INST_STFSUX: + printSetFlushMode(false); + println("\t{}.f32 = float({}.f64);", temp(), f(insn.operands[0])); + println("\t{} = {}.u32 + {}.u32;", ea(), r(insn.operands[1]), r(insn.operands[2])); + println("\t{}{}, {}.u32);", mmioStore() ? "PPC_MM_STORE_U32(" : "PPC_STORE_U32(", ea(), temp()); + println("\t{}.u32 = {};", r(insn.operands[1]), ea()); + break; + case PPC_INST_STH: print("{}", mmioStore() ? "\tPPC_MM_STORE_U16(" : "\tPPC_STORE_U16("); if (insn.operands[2] != 0) @@ -1467,6 +1626,18 @@ bool Recompiler::Recompile( println("{}, {}.u16);", int32_t(insn.operands[1]), r(insn.operands[0])); break; + case PPC_INST_STHU: + println("\t{} = {} + {}.u32;", ea(), int32_t(insn.operands[1]), r(insn.operands[2])); + println("\t{}{}, {}.u16);", mmioStore() ? "PPC_MM_STORE_U16(" : "PPC_STORE_U16(", ea(), r(insn.operands[0])); + println("\t{}.u32 = {};", r(insn.operands[2]), ea()); + break; + + case PPC_INST_STHUX: + println("\t{} = {}.u32 + {}.u32;", ea(), r(insn.operands[1]), r(insn.operands[2])); + println("\t{}{}, {}.u16);", mmioStore() ? "PPC_MM_STORE_U16(" : "PPC_STORE_U16(", ea(), r(insn.operands[0])); + println("\t{}.u32 = {};", r(insn.operands[1]), ea()); + break; + case PPC_INST_STHBRX: print("{}", mmioStore() ? "\tPPC_MM_STORE_U16(" : "\tPPC_STORE_U16("); if (insn.operands[1] != 0) @@ -1562,13 +1733,13 @@ bool Recompiler::Recompile( case PPC_INST_STWU: println("\t{} = {} + {}.u32;", ea(), int32_t(insn.operands[1]), r(insn.operands[2])); - println("\tPPC_STORE_U32({}, {}.u32);", ea(), r(insn.operands[0])); + println("\t{}{}, {}.u32);", mmioStore() ? "PPC_MM_STORE_U32(" : "PPC_STORE_U32(", ea(), r(insn.operands[0])); println("\t{}.u32 = {};", r(insn.operands[2]), ea()); break; case PPC_INST_STWUX: println("\t{} = {}.u32 + {}.u32;", ea(), r(insn.operands[1]), r(insn.operands[2])); - println("\tPPC_STORE_U32({}, {}.u32);", ea(), r(insn.operands[0])); + println("\t{}{}, {}.u32);", mmioStore() ? "PPC_MM_STORE_U32(" : "PPC_STORE_U32(", ea(), r(insn.operands[0])); println("\t{}.u32 = {};", r(insn.operands[1]), ea()); break; @@ -1600,6 +1771,14 @@ bool Recompiler::Recompile( println("\t{}.compare({}.s32, 0, {});", cr(0), r(insn.operands[0]), xer()); break; + case PPC_INST_SUBFZE: + println("\t{}.u8 = (~{}.u32 < ~{}.u32) | (~{}.u32 + {}.ca < {}.ca);", temp(), r(insn.operands[1]), r(insn.operands[1]), r(insn.operands[1]), xer(), xer()); + println("\t{}.u64 = ~{}.u64 + {}.ca;", r(insn.operands[0]), r(insn.operands[1]), xer()); + println("\t{}.ca = {}.u8;", xer(), temp()); + if (strchr(insn.opcode->name, '.')) + println("\t{}.compare({}.s32, 0, {});", cr(0), r(insn.operands[0]), xer()); + break; + case PPC_INST_SUBFIC: println("\t{}.ca = {}.u32 <= {};", xer(), r(insn.operands[1]), insn.operands[2]); println("\t{}.s64 = {} - {}.s64;", r(insn.operands[0]), int32_t(insn.operands[2]), r(insn.operands[1])); @@ -1635,10 +1814,23 @@ bool Recompiler::Recompile( println("\t_mm_store_ps({}.f32, _mm_add_ps(_mm_load_ps({}.f32), _mm_load_ps({}.f32)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); break; + case PPC_INST_VADDSBS: + println("\t_mm_store_si128((__m128i*){}.s8, _mm_adds_epi8(_mm_load_si128((__m128i*){}.s8), _mm_load_si128((__m128i*){}.s8)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); + break; + case PPC_INST_VADDSHS: println("\t_mm_store_si128((__m128i*){}.s16, _mm_adds_epi16(_mm_load_si128((__m128i*){}.s16), _mm_load_si128((__m128i*){}.s16)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); break; + case PPC_INST_VADDSWS: + // TODO: vectorize + for (size_t i = 0; i < 4; i++) + { + println("\t{}.s64 = int64_t({}.s32[{}]) + int64_t({}.s32[{}]);", temp(), v(insn.operands[1]), i, v(insn.operands[2]), i); + println("\t{}.s32[{}] = {}.s64 > INT_MAX ? INT_MAX : {}.s64 < INT_MIN ? INT_MIN : {}.s64;", v(insn.operands[0]), i, temp(), temp(), temp()); + } + break; + case PPC_INST_VADDUBM: println("\t_mm_store_si128((__m128i*){}.u8, _mm_add_epi8(_mm_load_si128((__m128i*){}.u8), _mm_load_si128((__m128i*){}.u8)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); break; @@ -1680,6 +1872,10 @@ bool Recompiler::Recompile( println("\t_mm_store_si128((__m128i*){}.u8, _mm_avg_epu8(_mm_load_si128((__m128i*){}.u8), _mm_load_si128((__m128i*){}.u8)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); break; + case PPC_INST_VAVGUH: + println("\t_mm_store_si128((__m128i*){}.u8, _mm_avg_epu16(_mm_load_si128((__m128i*){}.u16), _mm_load_si128((__m128i*){}.u16)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); + break; + case PPC_INST_VCTSXS: case PPC_INST_VCFPSXWS128: printSetFlushMode(true); @@ -1690,6 +1886,16 @@ bool Recompiler::Recompile( println("_mm_load_ps({}.f32)));", v(insn.operands[1])); break; + case PPC_INST_VCTUXS: + case PPC_INST_VCFPUXWS128: + printSetFlushMode(true); + print("\t_mm_store_si128((__m128i*){}.u32, _mm_vctuxs(", v(insn.operands[0])); + if (insn.operands[2] != 0) + println("_mm_mul_ps(_mm_load_ps({}.f32), _mm_set1_ps({}))));", v(insn.operands[1]), 1u << insn.operands[2]); + else + println("_mm_load_ps({}.f32)));", v(insn.operands[1])); + break; + case PPC_INST_VCFSX: case PPC_INST_VCSXWFP128: { @@ -1743,6 +1949,12 @@ bool Recompiler::Recompile( println("\t{}.setFromMask(_mm_load_si128((__m128i*){}.u8), 0xFFFF);", cr(6), v(insn.operands[0])); break; + case PPC_INST_VCMPEQUH: + println("\t_mm_store_si128((__m128i*){}.u8, _mm_cmpeq_epi16(_mm_load_si128((__m128i*){}.u16), _mm_load_si128((__m128i*){}.u16)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); + if (strchr(insn.opcode->name, '.')) + println("\t{}.setFromMask(_mm_load_si128((__m128i*){}.u16), 0xFFFF);", cr(6), v(insn.operands[0])); + break; + case PPC_INST_VCMPEQUW: case PPC_INST_VCMPEQUW128: println("\t_mm_store_si128((__m128i*){}.u8, _mm_cmpeq_epi32(_mm_load_si128((__m128i*){}.u32), _mm_load_si128((__m128i*){}.u32)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); @@ -1768,10 +1980,26 @@ bool Recompiler::Recompile( case PPC_INST_VCMPGTUB: println("\t_mm_store_si128((__m128i*){}.u8, _mm_cmpgt_epu8(_mm_load_si128((__m128i*){}.u8), _mm_load_si128((__m128i*){}.u8)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); + if (strchr(insn.opcode->name, '.')) + println("\t{}.setFromMask(_mm_load_si128((__m128i*){}.u8), 0xFFFF);", cr(6), v(insn.operands[0])); break; case PPC_INST_VCMPGTUH: println("\t_mm_store_si128((__m128i*){}.u8, _mm_cmpgt_epu16(_mm_load_si128((__m128i*){}.u16), _mm_load_si128((__m128i*){}.u16)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); + if (strchr(insn.opcode->name, '.')) + println("\t{}.setFromMask(_mm_load_si128((__m128i*){}.u16), 0xFFFF);", cr(6), v(insn.operands[0])); + break; + + case PPC_INST_VCMPGTSH: + println("\t_mm_store_si128((__m128i*){}.s8, _mm_cmpgt_epi16(_mm_load_si128((__m128i*){}.u16), _mm_load_si128((__m128i*){}.u16)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); + if (strchr(insn.opcode->name, '.')) + println("\t{}.setFromMask(_mm_load_si128((__m128i*){}.s16), 0xFFFF);", cr(6), v(insn.operands[0])); + break; + + case PPC_INST_VCMPGTSW: + println("\t_mm_store_si128((__m128i*){}.s8, _mm_cmpgt_epi32(_mm_load_si128((__m128i*){}.u32), _mm_load_si128((__m128i*){}.u32)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); + if (strchr(insn.opcode->name, '.')) + println("\t{}.setFromMask(_mm_load_si128((__m128i*){}.s32), 0xFFFF);", cr(6), v(insn.operands[0])); break; case PPC_INST_VEXPTEFP: @@ -1803,10 +2031,18 @@ bool Recompiler::Recompile( println("\t_mm_store_ps({}.f32, _mm_max_ps(_mm_load_ps({}.f32), _mm_load_ps({}.f32)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); break; + case PPC_INST_VMAXSH: + println("\t_mm_store_si128((__m128i*){}.u16, _mm_max_epi16(_mm_load_si128((__m128i*){}.u16), _mm_load_si128((__m128i*){}.u16)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); + break; + case PPC_INST_VMAXSW: println("\t_mm_store_si128((__m128i*){}.u32, _mm_max_epi32(_mm_load_si128((__m128i*){}.u32), _mm_load_si128((__m128i*){}.u32)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); break; + case PPC_INST_VMINSH: + println("\t_mm_store_si128((__m128i*){}.u16, _mm_min_epi16(_mm_load_si128((__m128i*){}.u16), _mm_load_si128((__m128i*){}.u16)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); + break; + case PPC_INST_VMINFP: case PPC_INST_VMINFP128: printSetFlushMode(true); @@ -1915,11 +2151,36 @@ bool Recompiler::Recompile( } break; + case PPC_INST_VPKSHSS: + case PPC_INST_VPKSHSS128: + println("\t_mm_store_si128((__m128i*){}.u8, _mm_packs_epi16(_mm_load_si128((__m128i*){}.s16), _mm_load_si128((__m128i*){}.s16)));", v(insn.operands[0]), v(insn.operands[2]), v(insn.operands[1])); + break; + + case PPC_INST_VPKSWSS: + case PPC_INST_VPKSWSS128: + println("\t_mm_store_si128((__m128i*){}.u8, _mm_packs_epi32(_mm_load_si128((__m128i*){}.s32), _mm_load_si128((__m128i*){}.s32)));", v(insn.operands[0]), v(insn.operands[2]), v(insn.operands[1])); + break; + case PPC_INST_VPKSHUS: case PPC_INST_VPKSHUS128: println("\t_mm_store_si128((__m128i*){}.u8, _mm_packus_epi16(_mm_load_si128((__m128i*){}.s16), _mm_load_si128((__m128i*){}.s16)));", v(insn.operands[0]), v(insn.operands[2]), v(insn.operands[1])); break; + case PPC_INST_VPKSWUS: + case PPC_INST_VPKSWUS128: + println("\t_mm_store_si128((__m128i*){}.u8, _mm_packus_epi32(_mm_load_si128((__m128i*){}.s32), _mm_load_si128((__m128i*){}.s32)));", v(insn.operands[0]), v(insn.operands[2]), v(insn.operands[1])); + break; + + case PPC_INST_VPKUHUS: + case PPC_INST_VPKUHUS128: + for (size_t i = 0; i < 8; i++) + { + println("\t{0}.u8[{1}] = {2}.u16[{1}] > UCHAR_MAX ? UCHAR_MAX : {2}.u16[{1}];", vTemp(), i, v(insn.operands[2])); + println("\t{0}.u8[{1}] = {2}.u16[{3}] > UCHAR_MAX ? UCHAR_MAX : {2}.u16[{3}];", vTemp(), i + 8, v(insn.operands[1]), i); + } + println("{} = {};", v(insn.operands[0]), vTemp()); + break; + case PPC_INST_VREFP: case PPC_INST_VREFP128: // TODO: see if we can use rcp safely @@ -1952,6 +2213,14 @@ bool Recompiler::Recompile( break; } + case PPC_INST_VRLH: + for (size_t i = 0; i < 8; i++) + { + println("\t{0}.u16[{1}] = ({2}.u16[{1}] << ({3}.u16[{1}] & 0xF)) | ({2}.u16[{1}] >> (16 - ({3}.u16[{1}] & 0xF)));", vTemp(), i, v(insn.operands[1]), v(insn.operands[2])); + } + println("{} = {};", v(insn.operands[0]), vTemp()); + break; + case PPC_INST_VRSQRTEFP: case PPC_INST_VRSQRTEFP128: // TODO: see if we can use rsqrt safely @@ -1961,6 +2230,7 @@ bool Recompiler::Recompile( break; case PPC_INST_VSEL: + case PPC_INST_VSEL128: println("\t_mm_store_si128((__m128i*){}.u8, _mm_or_si128(_mm_andnot_si128(_mm_load_si128((__m128i*){}.u8), _mm_load_si128((__m128i*){}.u8)), _mm_and_si128(_mm_load_si128((__m128i*){}.u8), _mm_load_si128((__m128i*){}.u8))));", v(insn.operands[0]), v(insn.operands[3]), v(insn.operands[1]), v(insn.operands[3]), v(insn.operands[2])); break; @@ -1970,6 +2240,12 @@ bool Recompiler::Recompile( println("\t{}.u8[{}] = {}.u8[{}] << ({}.u8[{}] & 0x7);", v(insn.operands[0]), i, v(insn.operands[1]), i, v(insn.operands[2]), i); break; + case PPC_INST_VSLH: + // TODO: vectorize + for (size_t i = 0; i < 8; i++) + println("\t{}.u16[{}] = {}.u16[{}] << ({}.u8[{}] & 0xF);", v(insn.operands[0]), i, v(insn.operands[1]), i, v(insn.operands[2]), i * 2); + break; + case PPC_INST_VSLDOI: case PPC_INST_VSLDOI128: println("\t_mm_store_si128((__m128i*){}.u8, _mm_alignr_epi8(_mm_load_si128((__m128i*){}.u8), _mm_load_si128((__m128i*){}.u8), {}));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2]), 16 - insn.operands[3]); @@ -2003,6 +2279,10 @@ bool Recompiler::Recompile( println("\t_mm_store_si128((__m128i*){}.u8, _mm_set1_epi8(char(0x{:X})));", v(insn.operands[0]), insn.operands[1]); break; + case PPC_INST_VSPLTISH: + println("\t_mm_store_si128((__m128i*){}.u16, _mm_set1_epi16(int(0x{:X})));", v(insn.operands[0]), insn.operands[1]); + break; + case PPC_INST_VSPLTISW: case PPC_INST_VSPLTISW128: println("\t_mm_store_si128((__m128i*){}.u32, _mm_set1_epi32(int(0x{:X})));", v(insn.operands[0]), insn.operands[1]); @@ -2022,6 +2302,18 @@ bool Recompiler::Recompile( println("\t_mm_store_si128((__m128i*){}.u8, _mm_vsr(_mm_load_si128((__m128i*){}.u8), _mm_load_si128((__m128i*){}.u8)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); break; + case PPC_INST_VSRAB: + // TODO: vectorize, ensure endianness is correct + for (size_t i = 0; i < 16; i++) + println("\t{}.s8[{}] = {}.s8[{}] >> ({}.u8[{}] & 0x7);", v(insn.operands[0]), i, v(insn.operands[1]), i, v(insn.operands[2]), i); + break; + + case PPC_INST_VSRAH: + // TODO: vectorize, ensure endianness is correct + for (size_t i = 0; i < 8; i++) + println("\t{}.s16[{}] = {}.s16[{}] >> ({}.u8[{}] & 0xF);", v(insn.operands[0]), i, v(insn.operands[1]), i, v(insn.operands[2]), i * 2); + break; + case PPC_INST_VSRAW: case PPC_INST_VSRAW128: // TODO: vectorize, ensure endianness is correct @@ -2029,6 +2321,12 @@ bool Recompiler::Recompile( println("\t{}.s32[{}] = {}.s32[{}] >> ({}.u8[{}] & 0x1F);", v(insn.operands[0]), i, v(insn.operands[1]), i, v(insn.operands[2]), i * 4); break; + case PPC_INST_VSRH: + // TODO: vectorize, ensure endianness is correct + for (size_t i = 0; i < 8; i++) + println("\t{}.u16[{}] = {}.u16[{}] >> ({}.u8[{}] & 0xF);", v(insn.operands[0]), i, v(insn.operands[1]), i, v(insn.operands[2]), i * 2); + break; + case PPC_INST_VSRW: case PPC_INST_VSRW128: // TODO: vectorize, ensure endianness is correct @@ -2042,6 +2340,15 @@ bool Recompiler::Recompile( println("\t_mm_store_ps({}.f32, _mm_sub_ps(_mm_load_ps({}.f32), _mm_load_ps({}.f32)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); break; + case PPC_INST_VSUBSHS: + // TODO: vectorize + for (size_t i = 0; i < 8; i++) + { + println("\t{}.s64 = int64_t({}.s16[{}]) - int64_t({}.s16[{}]);", temp(), v(insn.operands[1]), i, v(insn.operands[2]), i); + println("\t{}.s16[{}] = {}.s64 > SHRT_MAX ? SHRT_MAX : {}.s64 < SHRT_MIN ? SHRT_MIN : {}.s64;", v(insn.operands[0]), i, temp(), temp(), temp()); + } + break; + case PPC_INST_VSUBSWS: // TODO: vectorize for (size_t i = 0; i < 4; i++) @@ -2055,8 +2362,12 @@ bool Recompiler::Recompile( println("\t_mm_store_si128((__m128i*){}.u8, _mm_subs_epu8(_mm_load_si128((__m128i*){}.u8), _mm_load_si128((__m128i*){}.u8)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); break; + case PPC_INST_VSUBUBM: + println("\t_mm_store_si128((__m128i*){}.u8, _mm_sub_epi8(_mm_load_si128((__m128i*){}.u8), _mm_load_si128((__m128i*){}.u8)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); + break; + case PPC_INST_VSUBUHM: - println("\t_mm_store_si128((__m128i*){}.u8, _mm_sub_epi16(_mm_load_si128((__m128i*){}.u8), _mm_load_si128((__m128i*){}.u8)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); + println("\t_mm_store_si128((__m128i*){}.u8, _mm_sub_epi16(_mm_load_si128((__m128i*){}.u16), _mm_load_si128((__m128i*){}.u16)));", v(insn.operands[0]), v(insn.operands[1]), v(insn.operands[2])); break; case PPC_INST_VUPKD3D128: diff --git a/PowerUtils/ppc_context.h b/PowerUtils/ppc_context.h index 64c257d..727492b 100644 --- a/PowerUtils/ppc_context.h +++ b/PowerUtils/ppc_context.h @@ -644,6 +644,19 @@ inline __m128i _mm_vctsxs(__m128 src1) return _mm_andnot_si128(_mm_castps_si128(xmm2), _mm_castps_si128(dest)); } +inline __m128i _mm_vctuxs(__m128 src1) +{ + __m128 xmm0 = _mm_max_ps(src1, _mm_set1_epi32(0)); + __m128 xmm1 = _mm_cmpge_ps(xmm0, _mm_set1_ps((float)0x80000000)); + __m128 xmm2 = _mm_sub_ps(xmm0, _mm_set1_ps((float)0x80000000)); + xmm0 = _mm_blendv_ps(xmm0, xmm2, xmm1); + __m128i dest = _mm_cvttps_epi32(xmm0); + xmm0 = _mm_cmpeq_epi32(dest, _mm_set1_epi32(INT_MIN)); + xmm1 = _mm_and_si128(xmm1, _mm_set1_epi32(INT_MIN)); + dest = _mm_add_epi32(dest, xmm1); + return _mm_or_si128(dest, xmm0); +} + inline __m128i _mm_vsr(__m128i a, __m128i b) { b = _mm_srli_epi64(_mm_slli_epi64(b, 61), 61);