Skip to content

Commit

Permalink
fixup! x86_64: Enhance the effectiveness of the register cache
Browse files Browse the repository at this point in the history
156473
  • Loading branch information
bjorng committed Jan 21, 2024
1 parent a8c5423 commit c2ea182
Show file tree
Hide file tree
Showing 6 changed files with 177 additions and 121 deletions.
36 changes: 22 additions & 14 deletions erts/emulator/beam/jit/x86/beam_asm.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1082,7 +1082,8 @@ class BeamModuleAssembler : public BeamAssembler,
for (int slot = 0; slot < num_cache_entries; slot++) {
if (reg == cache[slot].reg) {
return false;
} else if (cache[slot].mem.hasBase() && cache[slot].mem.baseReg() == reg) {
} else if (cache[slot].mem.hasBase() &&
cache[slot].mem.baseReg() == reg) {
return false;
}
}
Expand Down Expand Up @@ -1237,7 +1238,8 @@ class BeamModuleAssembler : public BeamAssembler,
if (is_cache_valid()) {
/* We never pick ARG2 because it is used to point to the
* currently active tuple. */
for (x86::Gp reg : {TMP1, TMP2, ARG3, ARG4, ARG5, ARG6, ARG1, RET}) {
for (x86::Gp reg :
{TMP1, TMP2, ARG3, ARG4, ARG5, ARG6, ARG1, RET}) {
if (is_not_cached(reg)) {
return reg;
}
Expand Down Expand Up @@ -1648,9 +1650,13 @@ class BeamModuleAssembler : public BeamAssembler,
},
{to});
} else if (from.isLambda()) {
preserve_cache([&]() {
make_move_patch(to, lambdas[from.as<ArgLambda>().get()].patches);
}, {to});
preserve_cache(
[&]() {
make_move_patch(
to,
lambdas[from.as<ArgLambda>().get()].patches);
},
{to});
} else if (from.isLiteral()) {
preserve_cache(
[&]() {
Expand Down Expand Up @@ -1734,15 +1740,17 @@ class BeamModuleAssembler : public BeamAssembler,
}

void mov_arg(const ArgVal &to, BeamInstr from, const x86::Gp &spill) {
preserve_cache([&]() {
if (Support::isInt32((Sint)from)) {
a.mov(getArgRef(to), imm(from));
} else {
a.mov(spill, imm(from));
mov_arg(to, spill);
}
invalidate_cache(getArgRef(to));
}, {spill});
preserve_cache(
[&]() {
if (Support::isInt32((Sint)from)) {
a.mov(getArgRef(to), imm(from));
} else {
a.mov(spill, imm(from));
mov_arg(to, spill);
}
invalidate_cache(getArgRef(to));
},
{spill});
}

void mov_arg(const ArgVal &to, const ArgVal &from, const x86::Gp &spill) {
Expand Down
42 changes: 27 additions & 15 deletions erts/emulator/beam/jit/x86/instr_arith.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,15 +198,21 @@ void BeamModuleAssembler::emit_i_plus(const ArgSource &LHS,
},
{RET});
} else if (RHS.isSmall()) {
preserve_cache([&]() {
mov_imm(ARG2, RHS.as<ArgSmall>().get() & ~_TAG_IMMED1_MASK);
a.add(RET, ARG2);
}, {RET, ARG2});
preserve_cache(
[&]() {
mov_imm(ARG2,
RHS.as<ArgSmall>().get() & ~_TAG_IMMED1_MASK);
a.add(RET, ARG2);
},
{RET, ARG2});
} else {
mov_arg(ARG2, RHS);
preserve_cache([&]() {
a.lea(RET, x86::qword_ptr(RET, ARG2, 0, -_TAG_IMMED1_SMALL));
}, {RET});
preserve_cache(
[&]() {
a.lea(RET,
x86::qword_ptr(RET, ARG2, 0, -_TAG_IMMED1_SMALL));
},
{RET});
}

mov_arg(Dst, RET);
Expand Down Expand Up @@ -1405,16 +1411,22 @@ void BeamModuleAssembler::emit_i_bxor(const ArgLabel &Fail,
comment("skipped test for small operands since they are always small");
mov_arg(RET, LHS);
if (RHS.isImmed() && Support::isInt32((Sint)RHS.as<ArgSmall>().get())) {
preserve_cache([&]() {
a.xor_(RET, imm(RHS.as<ArgSmall>().get() & ~_TAG_IMMED1_SMALL));
}, {RET});
preserve_cache(
[&]() {
a.xor_(RET,
imm(RHS.as<ArgSmall>().get() &
~_TAG_IMMED1_SMALL));
},
{RET});
} else {
mov_arg(ARG2, RHS);
preserve_cache([&]() {
/* TAG ^ TAG = 0, so we need to tag it again. */
a.xor_(RET, ARG2);
a.or_(RET, imm(_TAG_IMMED1_SMALL));
}, {RET});
preserve_cache(
[&]() {
/* TAG ^ TAG = 0, so we need to tag it again. */
a.xor_(RET, ARG2);
a.or_(RET, imm(_TAG_IMMED1_SMALL));
},
{RET});
}
mov_arg(Dst, RET);
return;
Expand Down
Loading

0 comments on commit c2ea182

Please sign in to comment.