Emit better x86_64 asm for constant loads
Instead of always emitting movabs, emit a regular mov or a xor. Slims down sequences like: movabs $0,%rax mov %rsi,%rax To: xor %eax,%eax // also zeroes upper word mov %rsi,%rax Future work is to just emit: xor %esi,%esi
This commit is contained in:
parent
b668b72b06
commit
c21576f8a3
1 changed files with 9 additions and 2 deletions
|
@ -489,8 +489,15 @@ void load(int r, SValue *sv)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
} else if (is64_type(ft)) {
|
} else if (is64_type(ft)) {
|
||||||
orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
|
if (sv->c.i > UINT32_MAX) {
|
||||||
|
orex(1,r,0, 0xb8 + REG_VALUE(r)); /* movabs $xx, r */
|
||||||
gen_le64(sv->c.i);
|
gen_le64(sv->c.i);
|
||||||
|
} else if (sv->c.i > 0) {
|
||||||
|
orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
|
||||||
|
gen_le32(sv->c.i);
|
||||||
|
} else {
|
||||||
|
o(0xc031 + REG_VALUE(r) * 0x900); /* xor r, r */
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
|
orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
|
||||||
gen_le32(fc);
|
gen_le32(fc);
|
||||||
|
|
Loading…
Reference in a new issue