/* * VideoCore IV code generator for the ACK * © 2013 David Given * This file is redistributable under the terms of the 3-clause BSD license. * See the file 'Copying' in the root of the distribution for the full text. */ EM_WSIZE = 4 EM_PSIZE = 4 EM_BSIZE = 8 /* two words saved in call frame */ INT8 = 1 /* Size of values */ INT16 = 2 INT32 = 4 INT64 = 8 FP_OFFSET = 0 /* Offset of saved FP relative to our FP */ PC_OFFSET = 4 /* Offset of saved PC relative to our FP */ #define COMMENT(n) /* noop */ #define nicesize(x) ((x)==INT8 || (x)==INT16 || (x)==INT32 || (x)==INT64) PROPERTIES GPR /* any GPR */ REG /* any allocatable GPR */ LREG /* any allocatable low register (r0-r15) */ HREG /* any allocatable high register (r0-r15) */ STACKABLE /* a push/popable register (r0, r6, r16, fp) */ GPR0 GPR1 GPR2 GPR3 GPR4 GPR5 GPR6 GPR7 GPR8 GPR9 GPR10 GPR11 GPR12 GPR13 GPR14 GPR15 GPR16 GPR17 GPR18 GPR19 GPR20 GPR21 GPR22 GPR23 GPRFP GPRSP GPRLR GPRPC REGISTERS R0("r0") : GPR, REG, LREG, STACKABLE, GPR0. R1("r1") : GPR, REG, LREG, GPR1. R2("r2") : GPR, REG, LREG, GPR2. R3("r3") : GPR, REG, LREG, GPR3. R4("r4") : GPR, REG, LREG, GPR4. R5("r5") : GPR, REG, LREG, GPR5. R6("r6") : GPR, REG, LREG, STACKABLE, GPR6 regvar. R7("r7") : GPR, REG, LREG, GPR7 regvar. R8("r8") : GPR, REG, LREG, GPR8 regvar. R9("r9") : GPR, REG, LREG, GPR9 regvar. R10("r10") : GPR, REG, LREG, GPR10 regvar. R11("r11") : GPR, REG, LREG, GPR11 regvar. R12("r12") : GPR, REG, LREG, GPR12 regvar. R13("r13") : GPR, REG, LREG, GPR13 regvar. R14("r14") : GPR, REG, LREG, GPR14 regvar. R15("r15") : GPR, REG, LREG, GPR15 regvar. R16("r16") : GPR, REG, HREG, STACKABLE, GPR16 regvar. R17("r17") : GPR, REG, HREG, GPR17 regvar. R18("r18") : GPR, REG, HREG, GPR18 regvar. R19("r19") : GPR, REG, HREG, GPR19 regvar. R20("r20") : GPR, REG, HREG, GPR20 regvar. R21("r21") : GPR, REG, HREG, GPR21 regvar. R22("r22") : GPR, REG, HREG, GPR22 regvar. R23("r23") : GPR, GPR23. FP("fp") : GPR, GPRFP, STACKABLE. SP("sp") : GPR, GPRSP. LR("lr") : GPR, GPRLR. PC("pc") : GPR, GPRPC. /* r26 to r31 are special and the code generator doesn't touch them. */ #define SCRATCH R23 TOKENS /* Used only in instruction descriptions (to generate the correct syntax). */ GPROFFSET = { GPR reg; INT off; } 4 off "(" reg ")". GPRGPR = { GPR reg1; GPR reg2; } 4 "(" reg1 "," reg2 ")". /* Primitives */ LABEL = { ADDR adr; } 4 adr. CONST = { INT val; } 4 "#" val. LOCAL = { INT off; } 4. /* Allows us to use regvar() to refer to registers */ GPRE = { GPR reg; } 4 reg. /* Expression partial results */ SUM_RC = { GPR reg; INT off; } 4. SUM_RR = { GPR reg1; GPR reg2; } 4. SEX_B = { GPR reg; } 4. SEX_H = { GPR reg; } 4. IND_RC_B = { GPR reg; INT off; } 4. IND_RR_B = { GPR reg1; GPR reg2; } 4. IND_LABEL_B = { ADDR adr; } 4. IND_RC_H = { GPR reg; INT off; } 4. IND_RR_H = { GPR reg1; GPR reg2; } 4. IND_LABEL_H = { ADDR adr; } 4. IND_RC_H_S = { GPR reg; INT off; } 4. IND_RC_Q = { GPR reg; INT off; } 4. IND_RR_Q = { GPR reg1; GPR reg2; } 4. IND_LABEL_Q = { ADDR adr; } 4. IND_RC_D = { GPR reg; INT off; } 8. IND_RR_D = { GPR reg1; GPR reg2; } 8. IND_LABEL_D = { ADDR adr; } 8. /* Comments */ LABELI = { ADDR msg; INT num; } 4 msg " " num. SETS TOKEN = LABEL + CONST + LOCAL. GPRI = GPR + GPRE. SUM_ALL = SUM_RC + SUM_RR. SEX_ALL = SEX_B + SEX_H. IND_ALL_B = IND_RC_B + IND_RR_B + IND_LABEL_B. IND_ALL_H = IND_RC_H + IND_RR_H + IND_LABEL_H. IND_ALL_Q = IND_RC_Q + IND_RR_Q + IND_LABEL_Q. IND_ALL_D = IND_RC_D + IND_RR_D + IND_LABEL_D. #if 0 OP_ALL_Q = SUM_ALL + TRISTATE_ALL + SEX_ALL + LOGICAL_ALL + IND_ALL_Q. #endif OP_ALL_Q = SUM_ALL + SEX_ALL + IND_ALL_B + IND_ALL_H + IND_ALL_Q. INSTRUCTIONS add GPRI:wo, GPRI:ro, GPRI+CONST:ro. beq "b.eq" LABEL:ro. bne "b.ne" LABEL:ro. b GPRI+LABEL:ro. bl GPRI+LABEL:ro. cmp GPRI:ro, GPRI+CONST:ro. exts GPRI:wo, GPRI:ro, GPRI+CONST:ro. ld GPRI:wo, GPROFFSET+GPRGPR+LABEL:ro. ldb GPRI:wo, GPROFFSET+GPRGPR+LABEL:ro. ldh GPRI:wo, GPROFFSET+GPRGPR+LABEL:ro. ldhs GPRI:wo, GPROFFSET+GPRGPR+LABEL:ro. lea GPRI:wo, LABEL:ro. lsl GPRI:wo, GPRI:ro, GPRI+CONST:ro. mov GPRI:wo, GPRI+CONST:ro. pop STACKABLE:wo. pop STACKABLE:wo, GPRLR+GPRPC:wo. push STACKABLE:ro. sub GPRI:wo, GPRI:ro, CONST+GPRI:ro. st GPRI:ro, GPROFFSET+GPRGPR+LABEL:ro. stb GPRI:ro, GPROFFSET+GPRGPR+LABEL:ro. sth GPRI:ro, GPROFFSET+GPRGPR+LABEL:ro. sths GPRI:ro, GPROFFSET+GPRGPR+LABEL:ro. invalid "invalid". comment "!" LABEL+LABELI:ro. MOVES from GPR to GPR gen COMMENT("mov GPR->GPR") mov %2, %1 /* GPRE exists solely to allow us to use regvar() (which can only be used in an expression) as a register constant. */ from GPR to GPRE gen COMMENT("move GPR->GPRE") mov %2, %1 from GPRE to GPR gen COMMENT("move GPRE->GPR") mov %2, %1 /* Constants */ from CONST to GPR gen COMMENT("move CONST->GPR") mov %2, %1 from LABEL to GPR gen COMMENT("move LABEL->GPR") lea %2, {LABEL, %1.adr} /* Sign extension */ from SEX_B to GPR gen COMMENT("move SEX_B->GPR") exts %2, %1.reg, {CONST, 8} from SEX_H to GPR gen COMMENT("move SEX_H->GPR") exts %2, %1.reg, {CONST, 16} /* Register + something */ from SUM_RC to GPR gen COMMENT("move SUM_RC->GPR") add %2, %1.reg, {CONST, %1.off} from SUM_RR to GPR gen COMMENT("move SUM_RR->GPR") add %2, %1.reg1, %1.reg2 from SUM_RR to GPR gen COMMENT("move SUM_RR->GPRE") add %2, %1.reg1, %1.reg2 /* Read byte */ from IND_RC_B to GPR gen COMMENT("move IND_RC_B->GPR") ldb %2, {GPROFFSET, %1.reg, %1.off} from IND_RR_B to GPR gen COMMENT("move IND_RR_B->GPR") ldb %2, {GPRGPR, %1.reg1, %1.reg2} from IND_LABEL_B to GPR gen COMMENT("move IND_LABEL_B->GPR") ldb %2, {LABEL, %1.adr} /* Write byte */ from GPR to IND_RC_B gen COMMENT("move GPR->IND_RC_B") stb %1, {GPROFFSET, %2.reg, %2.off} from GPR to IND_RR_B gen COMMENT("move GPR->IND_RR_B") stb %1, {GPRGPR, %2.reg1, %2.reg2} from GPR to IND_LABEL_B gen COMMENT("move GPR->IND_LABEL_B") stb %1, {LABEL, %2.adr} /* Read short */ from IND_RC_H to GPR gen COMMENT("move IND_RC_H->GPR") ldh %2, {GPROFFSET, %1.reg, %1.off} from IND_RR_H to GPR gen COMMENT("move IND_RR_H->GPR") ldh %2, {GPRGPR, %1.reg1, %1.reg2} from IND_LABEL_H to GPR gen COMMENT("move IND_LABEL_H->GPR") ldh %2, {LABEL, %1.adr} /* Write short */ from GPR to IND_RC_H gen COMMENT("move GPR->IND_RC_H") sth %1, {GPROFFSET, %2.reg, %2.off} from GPR to IND_RR_H gen COMMENT("move GPR->IND_RR_H") sth %1, {GPRGPR, %2.reg1, %2.reg2} from GPR to IND_LABEL_H gen COMMENT("move GPR->IND_LABEL_H") sth %1, {LABEL, %2.adr} /* Read quad */ from IND_RC_Q to GPR gen COMMENT("move IND_RC_Q->GPR") ld %2, {GPROFFSET, %1.reg, %1.off} from IND_RR_Q to GPR gen COMMENT("move IND_RR_Q->GPR") ld %2, {GPRGPR, %1.reg1, %1.reg2} from IND_LABEL_Q to GPR gen COMMENT("move IND_LABEL_Q->GPR") ld %2, {LABEL, %1.adr} /* Write quad */ from GPR to IND_RC_Q gen COMMENT("move GPR->IND_RC_Q") st %1, {GPROFFSET, %2.reg, %2.off} from GPR to IND_RR_Q gen COMMENT("move GPR->IND_RR_Q") st %1, {GPRGPR, %2.reg1, %2.reg2} from GPR to IND_LABEL_Q gen COMMENT("move GPR->IND_LABEL_Q") st %1, {LABEL, %2.adr} /* Miscellaneous */ from CONST + LABEL + GPR + OP_ALL_Q to GPRE gen move %1, %2.reg #if 0 TESTS to test GPR gen invalid #endif STACKINGRULES from STACKABLE to STACK gen COMMENT("stack STACKABLE") push %1 from REG to STACK uses STACKABLE gen COMMENT("stack non-STACKABLE") move %1, %a push %a from REG to STACK gen COMMENT("stack non-STACKABLE, fallback") sub SP, SP, {CONST, 4} st %1, {GPROFFSET, SP, 0} from CONST + OP_ALL_Q to STACK uses STACKABLE gen move %1, %a push %a from CONST + OP_ALL_Q to STACK gen COMMENT("fallback stack") move %1, SCRATCH sub SP, SP, {CONST, 4} st SCRATCH, {GPROFFSET, SP, 0} from TOKEN to STACK gen invalid. COERCIONS from REG uses REG gen COMMENT("coerce REG->REG") move %1, %a yields %a from GPRE uses REG gen COMMENT("coerce GPRE->REG") move %1, %a yields %a from CONST uses REG gen COMMENT("coerce CONST->REG") move %1, %a yields %a from LABEL uses REG gen COMMENT("coerce LABEL->REG") move %1, %a yields %a from STACK uses STACKABLE gen COMMENT("coerce STACK->REG") pop %a yields %a from SEX_B uses REG gen COMMENT("coerce SEX_B->REG") exts %a, %1.reg, {CONST, 8} yields %a from SEX_H uses REG gen COMMENT("coerce SEX_H->REG") exts %a, %1.reg, {CONST, 16} yields %a #if 0 from SUM_ALL + TRISTATE_ALL + LOGICAL_ALL uses REG gen move %1, {GPRE, %a} yields %a from IND_ALL_Q uses REG gen move %1, %a yields %a #endif from OP_ALL_Q uses REG gen move %1, %a yields %a PATTERNS /* Intrinsics */ pat loc /* Load constant */ yields {CONST, $1} pat dup $1==INT32 /* Duplicate word on top of stack */ with REG yields %1 %1 pat dup $1==INT64 /* Duplicate double-word on top of stack */ with REG REG yields %2 %1 %2 %1 pat exg $1==INT32 /* Exchange top two words on stack */ with REG REG yields %1 %2 pat stl lol $1==$2 /* Store then load local */ leaving dup 4 stl $1 pat lal sti lal loi $1==$3 && $2==$4 /* Store then load local, of a different size */ leaving dup INT32 lal $1 sti $2 pat ste loe $1==$2 /* Store then load external */ leaving dup 4 ste $1 /* Type conversions */ pat loc loc cii loc loc cii $1==$4 && $2==$5 /* madness, generated by the C compiler */ leaving loc $1 loc $2 cii pat loc loc cii loc loc cii $2==INT32 && $5==INT32 && $4<$2 /* madness, generated by the C compiler */ leaving loc $4 loc $5 cii pat loc loc ciu /* signed X -> unsigned X */ leaving loc $1 loc $2 cuu pat loc loc cuu $1==$2 /* unsigned X -> unsigned X */ /* nop */ pat loc loc cii $1==$2 /* signed X -> signed X */ /* nop */ pat loc loc cui $1==$2 /* unsigned X -> signed X */ /* nop */ pat loc loc cui $1==INT8 && $2==INT32 /* unsigned char -> signed int */ /* nop */ pat loc loc cui $1==INT16 && $2==INT32 /* unsigned short -> signed int */ /* nop */ pat loc loc cii $1==INT8 && $2==INT32 /* signed char -> signed int */ with REG yields {SEX_B, %1} pat loc loc cii $1==2 && $2==4 /* signed char -> signed short */ with REG yields {SEX_H, %1} /* Local variables */ pat lal /* Load address of local */ yields {SUM_RC, FP, $1} pat lol inreg($1)>0 /* Load from local */ yields {LOCAL, $1} pat lol /* Load from local */ leaving lal $1 loi INT32 pat ldl /* Load double-word from local */ leaving lal $1 loi INT32*2 pat stl inreg($1)>0 /* Store to local */ with CONST + LABEL + GPR + OP_ALL_Q kills regvar($1), LOCAL %off==$1 gen move %1, {GPRE, regvar($1)} pat stl /* Store to local */ leaving lal $1 sti INT32 pat sdl /* Store double-word to local */ leaving lal $1 sti INT32*2 pat lil inreg($1)>0 /* Load from indirected local */ uses REG gen ld %a, {GPROFFSET, regvar($1), 0} yields %a pat lil /* Load from indirected local */ leaving lol $1 loi INT32 pat sil /* Save to indirected local */ leaving lol $1 sti INT32 pat stl lol $1==$2 /* Save then load (generated by C compiler) */ leaving dup 4 stl $1 pat zrl /* Zero local */ leaving loc 0 stl $1 pat inl /* Increment local */ leaving lol $1 loc 1 adi 4 stl $1 pat del /* Decrement local */ leaving lol $1 loc 1 sbi 4 stl $1 /* Global variables */ pat lpi /* Load address of external function */ leaving lae $1 pat lae /* Load address of external */ yields {LABEL, $1} pat loe /* Load word external */ leaving lae $1 loi INT32 pat ste /* Store word external */ leaving lae $1 sti INT32 pat lde /* Load double-word external */ leaving lae $1 loi INT64 pat sde /* Store double-word external */ leaving lae $1 sti INT64 pat zre /* Zero external */ leaving loc 0 ste $1 #if 0 pat ine /* Increment external */ uses REG={LABEL, $1}, REG gen lwz %b, {GPROFFSET, %a, 0} addi %b, %b, {CONST, 1} stw %b, {GPROFFSET, %a, 0} pat dee /* Decrement external */ uses REG={LABEL, $1}, REG gen lwz %b, {GPROFFSET, %a, 0} addi %b, %b, {CONST, 0-1} stw %b, {GPROFFSET, %a, 0} #endif /* Structures */ pat lof /* Load word offsetted */ leaving adp $1 loi INT32 pat ldf /* Load double-word offsetted */ leaving adp $1 loi INT64 pat stf /* Store word offsetted */ leaving adp $1 sti INT32 pat sdf /* Store double-word offsetted */ leaving adp $1 sti INT64 /* Loads and stores */ pat loi $1==INT8 /* Load byte indirect */ with GPR yields {IND_RC_B, %1, 0} with SUM_RR yields {IND_RR_B, %1.reg1, %1.reg2} with SUM_RC yields {IND_RC_B, %1.reg, %1.off} #if 0 pat loi loc loc cii $1==INT16 && $2==INT16 && $3==INT32 /* Load half-word indirect and sign extend */ with GPR uses REG gen lha %a, {GPROFFSET, %1, 0} yields %a with SUM_RR uses reusing %1, REG gen lhax %a, %1.reg1, %1.reg2 yields %a with SUM_RC uses REG gen move {IND_RC_H_S, %1.reg, %1.off}, %a yields %a pat loi $1==INT16 /* Load half-word indirect */ with GPR uses REG gen lhz %a, {GPROFFSET, %1, 0} yields %a with SUM_RR uses reusing %1, REG gen lhzx %a, %1.reg1, %1.reg2 yields %a with SUM_RC uses REG gen move {IND_RC_H, %1.reg, %1.off}, %a yields %a #endif pat loi $1==INT32 /* Load quad indirect */ with GPR yields {IND_RC_Q, %1, 0} with SUM_RC yields {IND_RC_Q, %1.reg, %1.off} with SUM_RR yields {IND_RR_Q, %1.reg1, %1.reg2} with LABEL yields {IND_LABEL_Q, %1.adr} #if 0 pat loi $1==INT64 /* Load double-quad indirect */ with GPR yields {IND_RC_D, %1, 0} with SUM_RC yields {IND_RC_D, %1.reg, %1.off} with SUM_RR yields {IND_RR_D, %1.reg1, %1.reg2} with LABEL yields {IND_LABEL_D, %1.adr} #endif pat loi /* Load arbitrary size */ leaving loc $1 los INT32 pat los /* Load arbitrary size */ with GPR0 GPR1 STACK kills ALL gen bl {LABEL, ".los"} pat sti $1==INT8 /* Store byte indirect */ with GPR GPR gen move %2, {IND_RC_B, %1, 0} with SUM_RR GPR gen move %2, {IND_RR_B, %1.reg1, %1.reg2} with SUM_RC GPR gen move %2, {IND_RC_B, %1.reg, %1.off} with GPR SEX_B gen move %2.reg, {IND_RC_B, %1, 0} with SUM_RR SEX_B gen move %2.reg, {IND_RR_B, %1.reg1, %1.reg2} with SUM_RC SEX_B gen move %2.reg, {IND_RC_B, %1.reg, %1.off} with LABEL GPR gen move %2, {IND_LABEL_B, %1.adr} pat sti $1==INT16 /* Store half-word indirect */ with GPR GPR gen move %2, {IND_RC_H, %1, 0} with SUM_RR GPR gen move %2, {IND_RR_H, %1.reg1, %1.reg2} with SUM_RC GPR gen move %2, {IND_RC_H, %1.reg, %1.off} with GPR SEX_H gen move %2.reg, {IND_RC_H, %1, 0} with SUM_RR SEX_H gen move %2.reg, {IND_RR_H, %1.reg1, %1.reg2} with SUM_RC SEX_H gen move %2.reg, {IND_RC_H, %1.reg, %1.off} with LABEL GPR gen move %2, {IND_LABEL_H, %1.adr} pat sti $1==INT32 /* Store quad indirect */ with GPR GPR gen move %2, {IND_RC_Q, %1, 0} with SUM_RR GPR gen move %2, {IND_RR_Q, %1.reg1, %1.reg2} with SUM_RC GPR gen move %2, {IND_RC_Q, %1.reg, %1.off} with LABEL GPR gen move %2, {IND_LABEL_Q, %1.adr} #if 0 pat sti $1==INT64 /* Store double-word indirect */ with GPR FD gen move %2, {IND_RC_D, %1, 0} with SUM_RR FD gen move %2, {IND_RR_D, %1.reg1, %1.reg2} with SUM_RC FD gen move %2, {IND_RC_D, %1.reg, %1.off} with GPR GPR GPR gen stw %2, {GPROFFSET, %1, 0} stw %3, {GPROFFSET, %1, 4} with SUM_RC GPR GPR gen move %2, {IND_RC_Q, %1.reg, %1.off} move %3, {IND_RC_Q, %1.reg, %1.off+4} with LABEL FD gen move %2, {IND_LABEL_D, %1.adr} pat sti /* Store arbitrary size */ leaving loc $1 sts INT32 pat sts /* Load arbitrary size */ with GPR3 GPR4 STACK kills ALL gen bl {LABEL, ".sts"} #endif /* Arithmetic wrappers */ pat ads $1==4 /* Add var to pointer */ leaving adi $1 pat sbs $1==4 /* Subtract var from pointer */ leaving sbi $1 pat adp /* Add constant to pointer */ leaving loc $1 adi 4 pat adu /* Add unsigned */ leaving adi $1 pat sbu /* Subtract unsigned */ leaving sbi $1 pat inc /* Add 1 */ leaving loc 1 adi 4 pat dec /* Subtract 1 */ leaving loc 1 sbi 4 pat loc mlu $2==2 /* Unsigned multiply by constant */ leaving loc $1 mli 4 pat mlu /* Unsigned multiply by var */ leaving mli $1 pat loc slu /* Shift left unsigned by constant amount */ leaving loc $1 sli $2 pat slu /* Shift left unsigned by variable amount */ leaving sli $1 /* Word arithmetic */ pat adi $1==INT32 /* Add word (second + top) */ with REG REG yields {SUM_RR, %1, %2} with CONST REG yields {SUM_RC, %2, %1.val} with REG CONST yields {SUM_RC, %1, %2.val} with CONST SUM_RC yields {SUM_RC, %2.reg, %2.off+%1.val} with CONST LABEL yields {LABEL, %2.adr+%1.val} #if 0 pat sbi $1==4 /* Subtract word (second - top) */ with REG REG uses reusing %2, REG gen subf %a, %1, %2 yields %a with CONST REG yields {SUM_RC, %2, 0-%1.val} with CONST SUM_RC yields {SUM_RC, %2.reg, %2.off-%1.val} with CONST LABEL yields {LABEL, %2.adr+(0-%1.val)} pat ngi $1==4 /* Negate word */ with REG uses reusing %1, REG gen neg %a, %1 yields %a pat mli $1==4 /* Multiply word (second * top) */ with REG REG uses reusing %2, REG gen mullw %a, %2, %1 yields %a pat dvi $1==4 /* Divide word (second / top) */ with REG REG uses reusing %2, REG gen divw %a, %2, %1 yields %a pat dvu $1==4 /* Divide unsigned word (second / top) */ with REG REG uses reusing %2, REG gen divwu %a, %2, %1 yields %a pat rmi $1==4 /* Remainder word (second % top) */ with REG REG uses REG gen divw %a, %2, %1 mullw %a, %a, %1 subf %a, %a, %2 yields %a pat rmu $1==4 /* Remainder unsigned word (second % top) */ with REG REG uses REG gen divwu %a, %2, %1 mullw %a, %a, %1 subf %a, %a, %2 yields %a pat and $1==4 /* AND word */ with GPR NOT_R uses reusing %1, REG gen andc %a, %1, %2.reg yields %a with NOT_R GPR uses reusing %1, REG gen andc %a, %2, %1.reg yields %a with GPR GPR yields {AND_RR, %1, %2} with GPR CONST yields {AND_RC, %1, %2.val} with CONST GPR yields {AND_RC, %2, %1.val} pat and !defined($1) /* AND set */ with STACK gen bl {LABEL, ".and"} pat ior $1==4 /* OR word */ with GPR NOT_R uses reusing %1, REG gen orc %a, %1, %2.reg yields %a with NOT_R GPR uses reusing %2, REG gen orc %a, %2, %1.reg yields %a with GPR GPR yields {OR_RR, %1, %2} with GPR CONST yields {OR_RC, %1, %2.val} with CONST GPR yields {OR_RC, %2, %1.val} pat ior !defined($1) /* OR set */ with STACK gen bl {LABEL, ".ior"} pat xor $1==4 /* XOR word */ with GPR GPR yields {XOR_RR, %1, %2} with GPR CONST yields {XOR_RC, %1, %2.val} with CONST GPR yields {XOR_RC, %2, %1.val} pat xor !defined($1) /* XOR set */ with STACK gen bl {LABEL, ".xor"} pat com $1==INT32 /* NOT word */ with AND_RR uses REG gen nand %a, %1.reg1, %1.reg2 yields %a with OR_RR uses REG gen nor %a, %1.reg1, %1.reg2 yields %a with XOR_RR uses REG gen eqv %a, %1.reg1, %1.reg2 yields %a with GPR yields {NOT_R, %1} pat com !defined($1) /* NOT set */ with STACK gen bl {LABEL, ".com"} #endif pat sli $1==4 /* Shift left (second << top) */ with CONST+GPR GPR uses reusing %2, REG gen lsl %a, %2, %1 yields %a #if 0 pat sri $1==4 /* Shift right signed (second >> top) */ with CONST GPR uses reusing %2, REG gen srawi %a, %2, {CONST, %1.val & 0x1F} yields %a with GPR GPR uses reusing %2, REG gen sraw %a, %2, %1 yields %a pat sru $1==4 /* Shift right unsigned (second >> top) */ with CONST GPR uses reusing %2, REG gen rlwinm %a, %2, {CONST, 32-(%1.val & 0x1F)}, {CONST, (%1.val & 0x1F)}, {CONST, 31} yields %a with GPR GPR uses reusing %2, REG gen srw %a, %2, %1 yields %a /* Arrays */ pat aar $1==INT32 /* Index array */ with GPR3 GPR4 GPR5 gen bl {LABEL, ".aar4"} yields R3 pat lae lar $2==INT32 && nicesize(rom($1, 3)) /* Load array */ leaving lae $1 aar INT32 loi rom($1, 3) pat lar $1==INT32 /* Load array */ with GPR3 GPR4 GPR5 STACK kills ALL gen bl {LABEL, ".lar4"} pat lae sar $2==INT32 && nicesize(rom($1, 3)) /* Store array */ leaving lae $1 aar INT32 sti rom($1, 3) pat sar $1==INT32 /* Store array */ with GPR3 GPR4 GPR5 STACK kills ALL gen bl {LABEL, ".sar4"} /* Sets */ pat set defined($1) /* Create word with set bit */ leaving loc 1 exg INT32 sli INT32 pat set !defined($1) /* Create structure with set bit (variable) */ with GPR3 GPR4 STACK gen bl {LABEL, ".set"} pat inn defined($1) /* Test for set bit */ leaving set INT32 and INT32 pat inn !defined($1) /* Test for set bit (variable) */ with GPR3 STACK gen bl {LABEL, ".inn"} /* Boolean resolutions */ pat teq /* top = (top == 0) */ with TRISTATE_ALL + GPR uses reusing %1, REG gen move %1, C0 move C0, SCRATCH move {LABEL, ".teq_table"}, %a lwzx %a, %a, SCRATCH yields %a pat tne /* top = (top != 0) */ with TRISTATE_ALL + GPR uses reusing %1, REG gen move %1, C0 move C0, SCRATCH move {LABEL, ".tne_table"}, %a lwzx %a, %a, SCRATCH yields %a pat tlt /* top = (top < 0) */ with TRISTATE_ALL + GPR uses reusing %1, REG gen move %1, C0 move C0, SCRATCH move {LABEL, ".tlt_table"}, %a lwzx %a, %a, SCRATCH yields %a pat tle /* top = (top <= 0) */ with TRISTATE_ALL + GPR uses reusing %1, REG gen move %1, C0 move C0, SCRATCH move {LABEL, ".tle_table"}, %a lwzx %a, %a, SCRATCH yields %a pat tgt /* top = (top > 0) */ with TRISTATE_ALL + GPR uses reusing %1, REG gen move %1, C0 move C0, SCRATCH move {LABEL, ".tgt_table"}, %a lwzx %a, %a, SCRATCH yields %a pat tge /* top = (top >= 0) */ with TRISTATE_ALL + GPR uses reusing %1, REG gen move %1, C0 move C0, SCRATCH move {LABEL, ".tge_table"}, %a lwzx %a, %a, SCRATCH yields %a #endif /* Simple branches */ pat zeq /* Branch if signed top == 0 */ with GPR STACK gen cmp %1, {CONST, 0} beq {LABEL, $1} pat beq with GPR GPR STACK gen cmp %1, %2 beq {LABEL, $1} pat zne /* Branch if signed top != 0 */ with GPR STACK gen cmp %1, {CONST, 0} bne {LABEL, $1} pat bne with GPR GPR STACK gen cmp %1, %2 bne {LABEL, $1} #if 0 pat zgt /* Branch if signed top > 0 */ with TRISTATE_ALL+GPR STACK gen move %1, C0 bc IFTRUE, GT, {LABEL, $1} pat bgt leaving cmi INT32 zgt $1 pat zge /* Branch if signed top >= 0 */ with TRISTATE_ALL+GPR STACK gen move %1, C0 bc IFFALSE, LT, {LABEL, $1} pat bge leaving cmi INT32 zge $1 pat zlt /* Branch if signed top < 0 */ with TRISTATE_ALL+GPR STACK gen move %1, C0 bc IFTRUE, LT, {LABEL, $1} pat blt leaving cmi INT32 zlt $1 pat zle /* Branch if signed top >= 0 */ with TRISTATE_ALL+GPR STACK gen move %1, C0 bc IFFALSE, GT, {LABEL, $1} pat ble leaving cmi INT32 zle $1 #endif #if 0 /* Compare and jump */ pat cmi /* Signed tristate compare */ with CONST GPR yields {TRISTATE_RC_S, %2, %1.val} with GPR GPR yields {TRISTATE_RR_S, %2, %1} pat cmu /* Unsigned tristate compare */ with CONST GPR yields {TRISTATE_RC_U, %2, %1.val} with GPR GPR yields {TRISTATE_RR_U, %2, %1} pat cmp /* Compare pointers */ leaving cmu INT32 pat cms $1==INT32 /* Compare blocks (word sized) */ leaving cmi INT32 /* Other branching and labelling */ pat lab topeltsize($1)==4 && !fallthrough($1) gen labeldef $1 yields R3 pat lab topeltsize($1)==4 && fallthrough($1) with GPR3 gen labeldef $1 yields %1 pat lab topeltsize($1)!=4 with STACK kills ALL gen labeldef $1 #endif pat bra /* Unconditional jump */ with STACK gen b {LABEL, $1} /* Miscellaneous */ pat cal /* Call procedure */ with STACK kills ALL gen bl {LABEL, $1} pat cai /* Call procedure indirect */ with GPR STACK kills ALL gen bl %1 pat lfr $1==INT32 /* Load function result, word */ yields R0 pat lfr $1==INT64 /* Load function result, double-word */ yields R0 R1 pat ret $1==0 /* Return from procedure */ gen return mov SP, FP pop FP, PC pat ret $1==INT32 /* Return from procedure, word */ with GPR0 gen return mov SP, FP pop FP, PC pat ret $1==INT64 /* Return from procedure, double-word */ with GPR0 GPR1 gen return mov SP, FP pop FP, PC #if 0 pat blm /* Block move constant length */ with GPR GPR STACK uses REG gen move {CONST, $1}, %a stwu %a, {GPROFFSET, SP, 0-4} stwu %2, {GPROFFSET, SP, 0-4} stwu %1, {GPROFFSET, SP, 0-4} bl {LABEL, "_memmove"} addi SP, SP, {CONST, 12} pat bls /* Block move variable length */ with GPR GPR GPR STACK gen stwu %1, {GPROFFSET, SP, 0-4} stwu %3, {GPROFFSET, SP, 0-4} stwu %2, {GPROFFSET, SP, 0-4} bl {LABEL, "_memmove"} addi SP, SP, {CONST, 12} pat csa /* Array-lookup switch */ with GPR3 GPR4 STACK gen b {LABEL, ".csa"} pat csb /* Table-lookup switch */ with GPR3 GPR4 STACK gen b {LABEL, ".csb"} /* EM specials */ pat fil /* Set current filename */ leaving lae $1 ste ".filename" pat lin /* Set current line number */ leaving loc $1 ste ".linenumber" pat lni /* Increment line number */ leaving ine ".linenumber" pat lim /* Load EM trap ignore mask */ leaving lde ".ignmask" pat sim /* Store EM trap ignore mask */ leaving ste ".ignmask" pat trp /* Raise EM trap */ with GPR3 gen bl {LABEL, ".trap"} pat sig /* Set trap handler */ leaving ste ".trppc" pat rtt /* Return from trap */ leaving ret 0 pat lxl $1==0 /* Load FP */ leaving lor 0 pat lxl $1==1 /* Load caller's FP */ leaving lxl 0 dch pat dch /* FP -> caller FP */ with GPR uses reusing %1, REG gen lwz %a, {GPROFFSET, %1, FP_OFFSET} yields %a pat lpb /* Convert FP to argument address */ leaving adp EM_BSIZE pat lxa /* Load caller's SP */ leaving lxl $1 lpb pat gto /* longjmp */ uses REG gen move {LABEL, $1}, %a move {IND_RC_Q, %a, 8}, FP move {IND_RC_Q, %a, 4}, SP move {IND_RC_Q, %a, 0}, %a mtspr CTR, %a bcctr ALWAYS, {CONST, 0}, {CONST, 0} #if 0 pat gto /* longjmp */ with STACK gen ld {LABEL, $1+2} wspec {CONST, 1} ld {LABEL, $1+4} wspec {CONST, 0} ld {LABEL, $1+0} wspec {CONST, 2} pat str $1==1 /* Store special GPRister */ with GPR0 gen wspec {CONST, $1} #endif #endif pat lor $1==0 /* Load FP */ uses REG gen move FP, %a yields %a pat lor $1==1 /* Load SP */ uses REG gen move SP, %a yields %a pat lor $1==2 /* Load HP */ leaving loe ".reghp" pat str $1==0 /* Store FP */ with GPR gen move %1, FP pat str $1==1 /* Store SP */ with GPR gen move %1, SP pat str $1==2 /* Store HP */ leaving ste ".reghp" pat ass /* Adjust stack by variable amount */ with CONST gen move {SUM_RC, SP, %1.val}, SP with GPR gen move {SUM_RR, SP, %1}, SP pat asp /* Adjust stack by constant amount */ leaving loc $1 ass