ack/mach/vc4/ncg/table
David Given 08c4334224 Typo fixes.
--HG--
branch : dtrg-videocore
2013-05-22 00:44:04 +01:00

1417 lines
30 KiB
Plaintext

/*
* VideoCore IV code generator for the ACK
* © 2013 David Given
* This file is redistributable under the terms of the 3-clause BSD license.
* See the file 'Copying' in the root of the distribution for the full text.
*/
EM_WSIZE = 4
EM_PSIZE = 4
EM_BSIZE = 8 /* two words saved in call frame */
BYTE = 1 /* Size of values */
WORD = 2
QUAD = 4
FP_OFFSET = 0 /* Offset of saved FP relative to our FP */
PC_OFFSET = 4 /* Offset of saved PC relative to our FP */
#define COMMENT(n) /* noop */
#define nicesize(x) ((x)==BYTE || (x)==WORD || (x)==QUAD)
PROPERTIES
GPR /* any GPR */
REG /* any allocatable GPR */
STACKABLE /* a push/popable register (r0, r6, r16, fp) */
GPR0 GPR1 GPR2 GPR3 GPR4 GPR5 GPR6 GPR7
GPR8 GPR9 GPR10 GPR11 GPR12 GPR13 GPR14 GPR15
GPR16 GPR17 GPR18 GPR19 GPR20 GPR21 GPR22 GPR23
GPRGP GPRFP GPRSP GPRLR GPRPC
REGISTERS
R0("r0") : GPR, REG, STACKABLE, GPR0.
R1("r1") : GPR, REG, GPR1.
R2("r2") : GPR, REG, GPR2.
R3("r3") : GPR, REG, GPR3.
R4("r4") : GPR, REG, GPR4.
R5("r5") : GPR, REG, GPR5.
R6("r6") : GPR, REG, STACKABLE, GPR6 regvar.
R7("r7") : GPR, REG, GPR7 regvar.
R8("r8") : GPR, REG, GPR8 regvar.
R9("r9") : GPR, REG, GPR9 regvar.
R10("r10") : GPR, REG, GPR10 regvar.
R11("r11") : GPR, REG, GPR11 regvar.
R12("r12") : GPR, REG, GPR12 regvar.
R13("r13") : GPR, REG, GPR13 regvar.
R14("r14") : GPR, REG, GPR14 regvar.
GP("r15") : GPR, GPRGP.
R23("r23") : GPR.
FP("fp") : GPR, GPRFP, STACKABLE.
SP("sp") : GPR, GPRSP.
LR("lr") : GPR, GPRLR.
PC("pc") : GPR, GPRPC.
/* r26 to r31 are special and the code generator doesn't touch them. */
#define SCRATCH R23
TOKENS
/* Used only in instruction descriptions (to generate the correct syntax). */
GPROFFSET = { GPR reg; INT off; } 4 off "(" reg ")".
GPRGPR = { GPR reg1; GPR reg2; } 4 "(" reg1 "," reg2 ")".
/* Primitives */
LABEL = { ADDR adr; } 4 adr.
CONST = { INT val; } 4 "#" val.
/* Allows us to use regvar() to refer to registers */
GPRE = { GPR reg; } 4 reg.
/* The results of comparisons. */
TRISTATE_RC_S = { GPR reg; INT val; } 4.
TRISTATE_RC_U = { GPR reg; INT val; } 4.
TRISTATE_RR_S = { GPR reg1; GPR reg2; } 4.
TRISTATE_RR_U = { GPR reg1; GPR reg2; } 4.
SETS
TOKEN = LABEL + CONST.
GPRI = GPR + GPRE.
INSTRUCTIONS
add GPRI:wo, GPRI:ro, GPRI+CONST:ro.
add GPRI:rw, GPRI+CONST:ro.
and GPRI:rw, GPRI+CONST:ro.
asr GPRI:rw, GPRI+CONST:ro.
beq "b.eq" LABEL:ro.
bne "b.ne" LABEL:ro.
bgt "b.gt" LABEL:ro.
bgt "b.gt" LABEL:ro.
bhi "b.hi" LABEL:ro.
b GPRI+LABEL:ro.
bl GPRI+LABEL:ro.
cmp GPRI:ro, GPRI+CONST:ro kills :cc.
divs GPRI:wo, GPRI:ro, GPRI+CONST:ro.
divu GPRI:wo, GPRI:ro, GPRI+CONST:ro.
eor GPRI:rw, GPRI+CONST:ro.
exts GPRI:wo, GPRI:ro, GPRI+CONST:ro.
exts GPRI:rw, GPRI+CONST:ro.
fadd GPRI:wo, GPRI:ro, GPRI:ro.
fcmp GPRI:wo, GPRI:ro, GPRI:ro.
fdiv GPRI:wo, GPRI:ro, GPRI:ro.
fmul GPRI:wo, GPRI:ro, GPRI:ro.
fsub GPRI:wo, GPRI:ro, GPRI:ro.
ld GPRI:wo, GPROFFSET+GPRGPR+LABEL:ro.
ldb GPRI:wo, GPROFFSET+GPRGPR+LABEL:ro.
ldh GPRI:wo, GPROFFSET+GPRGPR+LABEL:ro.
ldhs GPRI:wo, GPROFFSET+GPRGPR+LABEL:ro.
lea GPRI:wo, LABEL:ro.
lsl GPRI:rw, GPRI+CONST:ro.
lsl GPRI:wo, GPRI:ro, GPRI+CONST:ro.
lsr GPRI:rw, GPRI+CONST:ro.
mov GPRI:wo, GPRI+CONST:ro.
mul GPRI:rw, GPRI+CONST:ro.
neg GPRI:rw, GPRI+CONST:ro.
or GPRI:rw, GPRI+CONST:ro.
pop STACKABLE:wo.
pop STACKABLE:wo, GPRLR+GPRPC:wo.
push STACKABLE:ro.
sub GPRI:wo, GPRI:ro, CONST+GPRI:ro.
sub GPRI:rw, GPRI+CONST:ro.
st GPRI:ro, GPROFFSET+GPRGPR+LABEL:ro.
stb GPRI:ro, GPROFFSET+GPRGPR+LABEL:ro.
sth GPRI:ro, GPROFFSET+GPRGPR+LABEL:ro.
sths GPRI:ro, GPROFFSET+GPRGPR+LABEL:ro.
invalid "invalid".
MOVES
from GPR to GPR
gen
COMMENT("mov GPR->GPR")
mov %2, %1
/* GPRE exists solely to allow us to use regvar() (which can only be used in
an expression) as a register constant. */
from GPRE to GPR
gen
mov %2, %1
/* Constants */
from CONST to GPR
gen
mov %2, %1
from LABEL to GPR
gen
lea %2, {LABEL, %1.adr}
sub %2, GP
/* Miscellaneous */
from CONST+LABEL+GPR+GPRE to GPRE
gen
move %1, %2.reg
TESTS
to test GPR
gen
cmp %1, {CONST, 0}
STACKINGRULES
from STACKABLE to STACK
gen
push %1
from GPR to STACK
uses STACKABLE
gen
move %1, %a
push %a
from GPR to STACK
gen
sub SP, SP, {CONST, 4}
st %1, {GPROFFSET, SP, 0}
from GPRE to STACK
uses STACKABLE
gen
move %1, %a
push %a
from GPRE to STACK
gen
sub SP, {CONST, 4}
st %1, {GPROFFSET, SP, 0}
from TOKEN to STACK
uses STACKABLE
gen
move %1, %a
push %a
from TOKEN to STACK
gen
COMMENT("fallback stack")
move %1, SCRATCH
sub SP, SP, {CONST, 4}
st SCRATCH, {GPROFFSET, SP, 0}
COERCIONS
from GPRE
uses reusing %1, REG=%1
yields %a
from CONST
uses REG
gen
COMMENT("coerce CONST->REG")
move %1, %a
yields %a
from LABEL
uses REG
gen
COMMENT("coerce LABEL->REG")
move %1, %a
yields %a
from STACK
uses STACKABLE
gen
COMMENT("coerce STACK->REG")
pop %a
yields %a
PATTERNS
/* Intrinsics */
pat loc /* Load constant */
yields {CONST, $1}
pat dup $1<=QUAD /* Duplicate word on top of stack */
with GPRI
yields %1 %1
pat dup $1<=(2*QUAD) /* Duplicate word pair on top of stack */
with GPRI GPRI
yields %1 %2 %1 %2
pat exg $1==QUAD /* Exchange top two words on stack */
with GPRI GPRI
yields %1 %2
#if 0
pat stl lol $1==$2 /* Store then load local */
leaving
dup QUAD
stl $1
#endif
pat lal sti lal loi $1==$3 && $2==$4 /* Store then load local, of a different size */
leaving
dup $2
lal $1
sti $2
pat ste loe $1==$2 /* Store then load external */
leaving
dup 4
ste $1
/* Type conversions */
pat loc loc cii loc loc cii $1==$4 && $2==$5 /* madness, generated by the C compiler */
leaving
loc $1
loc $2
cii
pat loc loc cii loc loc cii $2==QUAD && $5==QUAD && $4<$2 /* madness, generated by the C compiler */
leaving
loc $4
loc $5
cii
pat loc loc ciu /* signed X -> unsigned X */
leaving
loc $1
loc $2
cuu
pat loc loc cuu $1==$2 /* unsigned X -> unsigned X */
/* nop */
pat loc loc cii $1==$2 /* signed X -> signed X */
/* nop */
pat loc loc cui $1==$2 /* unsigned X -> signed X */
/* nop */
pat loc loc cui $1==BYTE && $2==QUAD /* unsigned char -> signed int */
/* nop */
pat loc loc cui $1==WORD && $2==QUAD /* unsigned short -> signed int */
/* nop */
pat loc loc cii $1==BYTE && $2>BYTE /* signed char -> anything */
with REG
uses reusing %1, REG=%1
gen
exts %a, {CONST, 8}
yields %a
pat loc loc cii $1==WORD && $2>WORD /* signed short -> anything */
with REG
uses reusing %1, REG=%1
gen
exts %a, {CONST, 16}
yields %a
/* Local variables */
pat lal /* Load address of local */
uses REG
gen
add %a, FP, {CONST, $1}
sub %a, GP
yields %a
pat lol inreg($1)>0 /* Load from local */
yields {GPRE, regvar($1)}
pat lol /* Load quad from local */
uses REG
gen
ld %a, {GPROFFSET, FP, $1}
yields %a
pat ldl /* Load double-word from local */
leaving
lal $1
loi QUAD*2
pat stl inreg($1)>0 /* Store to local */
with CONST+GPRI
kills regvar($1)
gen
move %1, {GPRE, regvar($1)}
pat stl /* Store to local */
with GPRI
gen
st %1, {GPROFFSET, FP, $1}
pat sdl /* Store double-word to local */
leaving
lal $1
sti QUAD*2
pat lil inreg($1)>0 /* Load from indirected local */
uses REG
gen
ld %a, {GPROFFSET, regvar($1), 0}
yields %a
pat lil /* Load from indirected local */
leaving
lol $1
loi QUAD
pat sil /* Save to indirected local */
leaving
lol $1
sti QUAD
pat stl lol $1==$2 /* Save then load (generated by C compiler) */
leaving
dup 4
stl $1
pat zrl /* Zero local */
leaving
loc 0
stl $1
pat inl /* Increment local */
leaving
lol $1
loc 1
adi 4
stl $1
pat del /* Decrement local */
leaving
lol $1
loc 1
sbi 4
stl $1
/* Global variables */
pat lpi /* Load address of external function */
leaving
lae $1
pat lae /* Load address of external */
yields {LABEL, $1}
pat loe /* Load word external */
leaving
lae $1
loi QUAD
pat ste /* Store word external */
leaving
lae $1
sti QUAD
pat zre /* Zero external */
leaving
loc 0
ste $1
pat ine /* Increment external */
leaving
lae $1
dup QUAD
loi QUAD
inc
sti QUAD
pat dee /* Decrement external */
leaving
lae $1
dup QUAD
loi QUAD
dec
sti QUAD
pat lde /* Load external */
uses REG, REG
gen
lea %a, {LABEL, $1}
ld %b, {GPROFFSET, %a, 4}
ld %a, {GPROFFSET, %a, 0}
yields %b %a
/* Structures */
pat lof /* Load word offsetted */
leaving
adp $1
loi QUAD
pat ldf /* Load double offsetted */
with GPRI
uses reusing %1, REG=%1, REG
gen
add %a, GP
ld %b, {GPROFFSET, %a, $1+4}
ld %a, {GPROFFSET, %a, $1+0}
yields %a %b
pat stf /* Store word offsetted */
leaving
adp $1
sti QUAD
pat sdf /* Store double offsetted */
with GPRI GPRI GPRI
uses reusing %3, REG=%3
gen
add %a, GP
st %1, {GPROFFSET, %a, $1+0}
st %2, {GPROFFSET, %a, $1+4}
/* Loads and stores */
pat loi $1==BYTE /* Load byte indirect */
with GPR
uses reusing %1, REG
gen
ldb %a, {GPRGPR, %1, GP}
yields %a
with GPRE
uses reusing %1.reg, REG
gen
ldb %a, {GPRGPR, %1.reg, GP}
yields %a
#if 0
pat loi loc loc cii $1==WORD && $2==WORD && $3==QUAD /* Load half-word indirect and sign extend */
with GPR
uses REG
gen
lha %a, {GPROFFSET, %1, 0}
yields %a
with SUM_RR
uses reusing %1, REG
gen
lhax %a, %1.reg1, %1.reg2
yields %a
with SUM_RC
uses REG
gen
move {IND_RC_H_S, %1.reg, %1.off}, %a
yields %a
pat loi $1==WORD /* Load half-word indirect */
with GPR
uses REG
gen
lhz %a, {GPROFFSET, %1, 0}
yields %a
with SUM_RR
uses reusing %1, REG
gen
lhzx %a, %1.reg1, %1.reg2
yields %a
with SUM_RC
uses REG
gen
move {IND_RC_H, %1.reg, %1.off}, %a
yields %a
#endif
pat loi $1==QUAD /* Load quad indirect */
with GPR
uses reusing %1, REG
gen
add %a, %1, GP
ld %a, {GPROFFSET, %a, 0}
yields %a
pat loi /* Load arbitrary size */
leaving
loc $1
los QUAD
pat los /* Load arbitrary size */
with STACK
kills ALL
gen
bl {LABEL, ".los"}
pat sti $1==BYTE /* Store byte indirect */
with GPR GPRI
gen
stb %2, {GPRGPR, %1, GP}
with GPRE GPRI
gen
stb %2, {GPRGPR, %1.reg, GP}
pat sti $1==WORD /* Store half-word indirect */
with GPR GPR
uses REG
gen
add %a, %1, GP
sth %2, {GPROFFSET, %a, 0}
pat sti $1==QUAD /* Store quad indirect */
with GPR GPR
uses REG
gen
add %a, %1, GP
st %2, {GPROFFSET, %a, 0}
pat sti /* Store arbitrary size */
leaving
loc $1
sts QUAD
pat sts /* Load arbitrary size */
with STACK
kills ALL
gen
bl {LABEL, ".sts"}
/* Arithmetic wrappers */
pat ads $1==4 /* Add var to pointer */
leaving adi $1
pat sbs $1==4 /* Subtract var from pointer */
leaving sbi $1
pat adp /* Add constant to pointer */
leaving
loc $1
adi 4
pat adu /* Add unsigned */
leaving
adi $1
pat sbu /* Subtract unsigned */
leaving
sbi $1
pat inc /* Add 1 */
leaving
loc 1
adi 4
pat dec /* Subtract 1 */
leaving
loc 1
sbi 4
pat loc mlu $2==2 /* Unsigned multiply by constant */
leaving
loc $1
mli 4
pat mlu /* Unsigned multiply by var */
leaving
mli $1
pat loc slu /* Shift left unsigned by constant amount */
leaving
loc $1
sli $2
pat slu /* Shift left unsigned by variable amount */
leaving
sli $1
/* Word arithmetic */
pat adi $1==QUAD /* Add word (second + top) */
with GPRI+CONST GPRI
uses reusing %2, REG=%2
gen
add %a, %1
yields %a
with GPRI GPRI+CONST
uses reusing %1, REG=%1
gen
add %a, %2
yields %a
pat sbi $1==QUAD /* Subtract word (second - top) */
with GPRI+CONST GPRI
uses reusing %2, REG=%2
gen
sub %a, %1
yields %a
pat mli $1==QUAD /* Multiply word (second * top) */
with GPRI+CONST GPRI
uses reusing %2, REG=%2
gen
mul %a, %1
yields %a
with GPRI GPRI+CONST
uses reusing %1, REG=%1
gen
mul %a, %2
yields %a
pat mlu
leaving
mli $1
pat dvi $1==QUAD /* Divide word (second / top) */
with GPRI GPRI
uses reusing %2, REG
gen
divs %a, %2, %1
yields %a
pat dvu $1==QUAD /* Divide unsigned word (second / top) */
with GPRI GPRI
uses reusing %2, REG
gen
divu %a, %2, %1
yields %a
pat rmu $1==QUAD /* Remainder unsigned word (second % top) */
with GPRI GPRI
uses REG
gen
divu %a, %2, %1
mul %a, %1
sub %a, %2
yields %a
pat rmi $1==QUAD /* Remainder signed word (second % top) */
with GPRI GPRI
uses REG
gen
divs %a, %2, %1
mul %a, %1
sub %a, %2
yields %a
pat ngi $1==QUAD /* Negate word */
with GPRI
uses reusing %1, REG=%1
gen
neg %a, %a
yields %a
pat and $1==QUAD /* AND word */
with GPRI+CONST GPRI
uses reusing %2, REG=%2
gen
and %a, %1
yields %a
with GPRI GPRI+CONST
uses reusing %1, REG=%1
gen
and %a, %2
yields %a
pat ior $1==QUAD /* OR word */
with GPRI+CONST GPRI
uses reusing %2, REG=%2
gen
or %a, %1
yields %a
with GPRI GPRI+CONST
uses reusing %1, REG=%1
gen
or %a, %2
yields %a
pat xor $1==QUAD /* XOR word */
with GPRI+CONST GPRI
uses reusing %2, REG=%2
gen
eor %a, %1
yields %a
with GPRI GPRI+CONST
uses reusing %1, REG=%1
gen
eor %a, %2
yields %a
pat dvi $1==QUAD /* Divide word (second / top) */
with GPRI GPRI
uses reusing %2, REG
gen
divs %a, %2, %1
yields %a
pat dvu $1==QUAD /* Divide unsigned word (second / top) */
with GPRI GPRI
uses reusing %2, REG
gen
divu %a, %2, %1
yields %a
pat rmu $1==QUAD /* Remainder unsigned word (second % top) */
with GPRI GPRI
uses REG
gen
divu %a, %2, %1
mul %a, %1
sub %a, %2
yields %a
pat rmi $1==QUAD /* Remainder signed word (second % top) */
with GPRI GPRI
uses REG
gen
divs %a, %2, %1
mul %a, %1
sub %a, %2
yields %a
#if 0
pat mli $1==4 /* Multiply word (second * top) */
with REG REG
uses reusing %2, REG
gen
mullw %a, %2, %1
yields %a
pat and !defined($1) /* AND set */
with STACK
gen
bl {LABEL, ".and"}
pat ior !defined($1) /* OR set */
with STACK
gen
bl {LABEL, ".ior"}
pat xor $1==4 /* XOR word */
with GPR GPR
yields {XOR_RR, %1, %2}
with GPR CONST
yields {XOR_RC, %1, %2.val}
with CONST GPR
yields {XOR_RC, %2, %1.val}
pat xor !defined($1) /* XOR set */
with STACK
gen
bl {LABEL, ".xor"}
pat com $1==QUAD /* NOT word */
with AND_RR
uses REG
gen
nand %a, %1.reg1, %1.reg2
yields %a
with OR_RR
uses REG
gen
nor %a, %1.reg1, %1.reg2
yields %a
with XOR_RR
uses REG
gen
eqv %a, %1.reg1, %1.reg2
yields %a
with GPR
yields {NOT_R, %1}
pat com !defined($1) /* NOT set */
with STACK
gen
bl {LABEL, ".com"}
#endif
pat sli $1==4 /* Shift left (second << top) */
with CONST+GPRI GPRI
uses reusing %2, REG=%2
gen
lsl %a, %1
yields %a
pat sri $1==4 /* Shift right signed (second >> top) */
with CONST+GPRI GPRI
uses reusing %2, REG=%2
gen
asr %2, %1
yields %a
pat sru $1==4 /* Shift right unsigned (second >> top) */
with CONST+GPRI GPRI
uses reusing %2, REG=%2
gen
lsr %2, %1
yields %a
/* Arrays */
pat aar $1==QUAD /* Index array */
with STACK
uses GPR0
gen
bl {LABEL, ".aar4stack"}
yields R0
with GPR0 GPR1 GPR2
uses GPR0
gen
bl {LABEL, ".aar4"}
yields R0
pat lae lar $2==QUAD && nicesize(rom($1, 3)) /* Load array */
leaving
lae $1
aar QUAD
loi rom($1, 3)
pat lar $1==QUAD /* Load array */
with STACK
uses GPR0
gen
bl {LABEL, ".lar4stack"}
yields R0
with GPR0 GPR1 GPR2
uses GPR0
gen
bl {LABEL, ".lar4"}
yields R0
pat lae sar $2==QUAD && nicesize(rom($1, 3)) /* Store array */
leaving
lae $1
aar QUAD
sti rom($1, 3)
pat sar $1==QUAD /* Store array */
with STACK
uses GPR0
gen
bl {LABEL, ".sar4stack"}
yields R0
with GPR0 GPR1 GPR2
uses GPR0
gen
bl {LABEL, ".sar4"}
/* Sets */
pat set defined($1) /* Create word with set bit */
leaving
loc 1
exg $1
sli $1
pat set !defined($1) /* Create structure with set bit (variable) */
with STACK
gen
bl {LABEL, ".set"}
pat inn defined($1) /* Test for set bit */
leaving
set QUAD
and QUAD
pat inn !defined($1) /* Test for set bit (variable) */
with GPR0 STACK
gen
bl {LABEL, ".inn"}
yields R0
pat ior !defined($1) /* Or two sets */
with STACK
gen
bl {LABEL, ".ior"}
/* Boolean resolutions */
proc cm_t example teq
with GPRI GPRI
uses reusing %1, REG
gen
cmp %1, %2
mov %a, {CONST, 0}
add[1] %a, {CONST, 1}
yields %a
pat cmu teq call cm_t("add.eq") /* top = (second == top) */
pat cmu tne call cm_t("add.ne") /* top = (second != top) */
pat cmu tlt call cm_t("add.lo") /* top = unsigned (second < top) */
pat cmu tle call cm_t("add.ls") /* top = unsigned (second <= top) */
pat cmu tgt call cm_t("add.hi") /* top = unsigned (second < top) */
pat cmu tge call cm_t("add.hs") /* top = unsigned (second >= top) */
pat cmi teq call cm_t("add.eq") /* top = (second == top) */
pat cmi tne call cm_t("add.ne") /* top = (second != top) */
pat cmi tlt call cm_t("add.lt") /* top = signed (second < top) */
pat cmi tle call cm_t("add.le") /* top = signed (second <= top) */
pat cmi tgt call cm_t("add.gt") /* top = signed (second < top) */
pat cmi tge call cm_t("add.ge") /* top = signed (second >= top) */
proc cmf_t example teq
with GPRI GPRI
uses reusing %1, REG
gen
fcmp %a, %1, %2
mov %a, {CONST, 0}
add[1] %a, {CONST, 1}
yields %a
pat cmf teq call cmf_t("add.eq") /* top = float (second == top) */
pat cmf tne call cmf_t("add.ne") /* top = float (second != top) */
pat cmf tlt call cmf_t("add.lo") /* top = float (second < top) */
pat cmf tle call cmf_t("add.ls") /* top = float (second <= top) */
pat cmf tgt call cmf_t("add.hi") /* top = float (second < top) */
pat cmf tge call cmf_t("add.hs") /* top = float (second >= top) */
proc fallback_t example teq
with GPRI
uses reusing %1, REG
gen
cmp %1, {CONST, 0}
mov %a, {CONST, 0}
add[1] %a, {CONST, 1}
yields %a
pat teq call fallback_t("add.eq") /* top = float (top == 0) */
pat tne call fallback_t("add.ne") /* top = float (top != 0) */
pat tlt call fallback_t("add.lo") /* top = float (top < 0) */
pat tle call fallback_t("add.ls") /* top = float (top <= 0) */
pat tgt call fallback_t("add.hi") /* top = float (top < 0) */
pat tge call fallback_t("add.hs") /* top = float (top >= 0) */
/* Simple branches */
proc anyz example zeq
with GPRI STACK
gen
cmp %1, {CONST, 0}
beq[1] {LABEL, $1}
pat zeq call anyz("b.eq") /* Branch if signed top == 0 */
pat zne call anyz("b.ne") /* Branch if signed top != 0 */
pat zgt call anyz("b.gt") /* Branch if signed top > 0 */
pat zlt call anyz("b.lt") /* Branch if signed top < 0 */
pat zge call anyz("b.ge") /* Branch if signed top >= 0 */
pat zle call anyz("b.le") /* Branch if signed top <= 0 */
proc anyb example beq
with GPRI+CONST GPRI STACK
gen
cmp %2, %1
beq[1] {LABEL, $1}
pat beq call anyb("b.eq") /* Branch if signed second == top */
pat bne call anyb("b.ne") /* Branch if signed second != top */
pat bgt call anyb("b.gt") /* Branch if signed second > top */
pat bge call anyb("b.ge") /* Branch if signed second >= top */
pat blt call anyb("b.lt") /* Branch if signed second < top */
pat ble call anyb("b.le") /* Branch if signed second <= top */
proc cmu_z example cmu zeq
with GPR+CONST GPRI STACK
gen
cmp %2, %1
beq[1] {LABEL, $2}
pat cmu zgt call cmu_z("b.hi") /* Branch if unsigned second > top */
pat cmu zlt call cmu_z("b.lo") /* Branch if unsigned second < top */
pat cmu zge call cmu_z("b.hs") /* Branch if unsigned second >= top */
pat cmu zle call cmu_z("b.ls") /* Branch if unsigned second <= top */
proc cmf_z example cmu zeq
with GPRI GPRI STACK
gen
fcmp %2, %2, %1
beq[1] {LABEL, $2}
pat cmf zeq call cmf_z("b.eq") /* Branch if float second == top */
pat cmf zne call cmf_z("b.ne") /* Branch if float second != top */
pat cmf zgt call cmf_z("b.gt") /* Branch if float second > top */
pat cmf zlt call cmf_z("b.lt") /* Branch if float second < top */
pat cmf zge call cmf_z("b.ge") /* Branch if float second >= top */
pat cmf zle call cmf_z("b.le") /* Branch if float second <= top */
#if 0
pat cmi /* Signed tristate compare */
with CONST GPR
yields {TRISTATE_RC_S, %2, %1.val}
with GPR GPR
yields {TRISTATE_RR_S, %2, %1}
pat cmu /* Unsigned tristate compare */
with CONST GPR
yields {TRISTATE_RC_U, %2, %1.val}
with GPR GPR
yields {TRISTATE_RR_U, %2, %1}
#endif
pat cmp /* Compare pointers */
leaving
cmu QUAD
pat cms $1==QUAD /* Compare blocks (word sized) */
leaving
cmi QUAD
/* Other branching and labelling */
#if 0
pat lab topeltsize($1)<=4 && !fallthrough($1)
gen
labeldef $1
yields R0
pat lab topeltsize($1)<=4 && fallthrough($1)
with GPR0
gen
labeldef $1
yields %1
pat lab topeltsize($1)>4
with STACK
kills ALL
gen
labeldef $1
pat bra topeltsize($1)<=4 /* Unconditional jump with TOS register */
with GPR0 STACK
gen
b {LABEL, $1}
pat bra topeltsize($1)>4 /* Unconditional jump without TOS register */
with STACK
gen
b {LABEL, $1}
#endif
pat lab
with STACK
kills ALL
gen
labeldef $1
pat bra
with STACK
gen
b {LABEL, $1}
/* Miscellaneous */
pat cal /* Call procedure */
with STACK
kills ALL
gen
bl {LABEL, $1}
pat cai /* Call procedure indirect */
with GPR STACK
kills ALL
gen
bl %1
pat lfr $1==QUAD /* Load function result, word */
yields R0
pat ret $1==0 /* Return from procedure */
gen
return
mov SP, FP
pop FP, PC
pat ret $1<=QUAD /* Return from procedure, word */
with GPR0
gen
return
mov SP, FP
pop FP, PC
with STACK
gen
pop R0
return
mov SP, FP
pop FP, PC
pat blm /* Block move constant length */
leaving
loc $1
bls
pat bls /* Block move variable length */
with STACK
kills ALL
gen
bl {LABEL, "_memmove"}
pat csa /* Array-lookup switch */
with STACK
gen
bl {LABEL, ".csa"}
pat csb /* Table-lookup switch */
with STACK
gen
bl {LABEL, ".csb"}
/* EM specials */
pat fil /* Set current filename */
leaving
lae $1
ste ".filename"
pat lin /* Set current line number */
leaving
loc $1
ste ".linenumber"
pat lni /* Increment line number */
leaving
ine ".linenumber"
pat lim /* Load EM trap ignore mask */
leaving
lde ".ignmask"
pat sim /* Store EM trap ignore mask */
leaving
ste ".ignmask"
pat trp /* Raise EM trap */
with GPR0
gen
bl {LABEL, ".trap"}
pat sig /* Set trap handler */
leaving
ste ".trppc"
pat rtt /* Return from trap */
leaving
ret 0
pat lxl $1==0 /* Load FP */
leaving
lor 0
pat lxl $1==1 /* Load caller's FP */
leaving
lxl 0
dch
pat dch /* FP -> caller FP */
with GPR
uses reusing %1, REG
gen
ld %a, {GPROFFSET, %1, FP_OFFSET}
sub %a, GP
yields %a
pat lpb /* Convert FP to argument address */
leaving
adp EM_BSIZE
pat lxa /* Load caller's SP */
leaving
lxl $1
lpb
pat gto /* longjmp */
uses REG, REG
gen
move {LABEL, $1}, %a
ld %b, {GPROFFSET, %a, 8}
add FP, %b, GP
ld %b, {GPROFFSET, %a, 4}
add SP, %b, GP
ld %b, {GPROFFSET, %a, 0}
add %b, GP
b %b
#if 0
pat gto /* longjmp */
with STACK
gen
ld {LABEL, $1+2}
wspec {CONST, 1}
ld {LABEL, $1+4}
wspec {CONST, 0}
ld {LABEL, $1+0}
wspec {CONST, 2}
pat str $1==1 /* Store special GPRister */
with GPR0
gen
wspec {CONST, $1}
#endif
pat lor $1==0 /* Load FP */
uses REG
gen
move FP, %a
yields %a
pat lor $1==1 /* Load SP */
uses REG
gen
move SP, %a
yields %a
pat lor $1==2 /* Load HP */
leaving
loe ".reghp"
pat str $1==0 /* Store FP */
with GPRI
gen
sub FP, %1, GP
pat str $1==1 /* Store SP */
with GPRI
gen
sub SP, %1, GP
pat str $1==2 /* Store HP */
leaving
ste ".reghp"
pat ass /* Adjust stack by variable amount */
with CONST+GPRI
gen
add SP, %1
pat asp /* Adjust stack by constant amount */
leaving
loc $1
ass
/* Floating point */
pat ngf /* Negate float */
leaving
loc 0
exg QUAD
sbf QUAD
proc simple_f example adf
with GPRI GPRI
uses reusing %1, REG
gen
fadd[1] %a, %2, %1
yields %a
pat adf call simple_f("fadd") /* Float subtract (second + top) */
pat sbf call simple_f("fsub") /* Float subtract (second - top) */
pat mlf call simple_f("fmul") /* Float multiply (second * top) */
pat dvf call simple_f("fdiv") /* Float divide (second / top) */
pat loc loc cff $1==$2 && $1==QUAD /* Convert float to float */
leaving
nop
pat loc loc cfi $1==$2 && $1==QUAD /* Convert float -> integer */
with GPR0
gen
bl {LABEL, ".cfi"}
yields R0
pat loc loc cfu $1==$2 && $1==QUAD /* Convert float -> unsigned */
with GPR0
gen
bl {LABEL, ".cfu"}
yields R0
pat loc loc cif $1==$2 && $1==QUAD /* Convert integer -> float */
with GPR0
gen
bl {LABEL, ".cif"}
yields R0
pat loc loc cuf $1==$2 && $1==QUAD /* Convert unsigned -> float */
with GPR0
gen
bl {LABEL, ".cuf"}
yields R0
pat fef /* Split float */
with GPR0
kills GPR1
gen
bl {LABEL, ".fef"}
yields R0 R1
pat fif /* Multiply float and split (?) */
with GPRI GPRI
kills GPR0, GPR1
gen
fmul R0, %2, %1
bl {LABEL, ".fef"}
yields R0 R1
pat zrf /* Load a floating zero */
leaving
loc 0