/* * VideoCore IV code generator for the ACK * © 2013 David Given * This file is redistributable under the terms of the 3-clause BSD license. * See the file 'Copying' in the root of the distribution for the full text. */ EM_WSIZE = 4 EM_PSIZE = 4 EM_BSIZE = 8 /* two words saved in call frame */ INT8 = 1 /* Size of values */ INT16 = 2 INT32 = 4 INT64 = 8 FLOAT32 = 4 FLOAT64 = 8 FP_OFFSET = 0 /* Offset of saved FP relative to our FP */ PC_OFFSET = 4 /* Offset of saved PC relative to our FP */ #define COMMENT(n) /* noop */ #define nicesize(x) ((x)==INT8 || (x)==INT16 || (x)==INT32 || (x)==INT64) PROPERTIES GPR /* any GPR */ REG /* any allocatable GPR */ STACKABLE /* a push/popable register (r0, r6, r16, fp) */ GPR0 GPR1 GPR2 GPR3 GPR4 GPR5 GPR6 GPR7 GPR8 GPR9 GPR10 GPR11 GPR12 GPR13 GPR14 GPR15 GPR16 GPR17 GPR18 GPR19 GPR20 GPR21 GPR22 GPR23 GPRGP GPRFP GPRSP GPRLR GPRPC REGISTERS R0("r0") : GPR, REG, STACKABLE, GPR0. R1("r1") : GPR, REG, GPR1. R2("r2") : GPR, REG, GPR2. R3("r3") : GPR, REG, GPR3. R4("r4") : GPR, REG, GPR4. R5("r5") : GPR, REG, GPR5. R6("r6") : GPR, REG, STACKABLE, GPR6 regvar. R7("r7") : GPR, REG, GPR7 regvar. R8("r8") : GPR, REG, GPR8 regvar. R9("r9") : GPR, REG, GPR9 regvar. R10("r10") : GPR, REG, GPR10 regvar. R11("r11") : GPR, REG, GPR11 regvar. R12("r12") : GPR, REG, GPR12 regvar. R13("r13") : GPR, REG, GPR13 regvar. R14("r14") : GPR, REG, GPR14 regvar. GP("r15") : GPR, GPRGP. R23("r23") : GPR. FP("fp") : GPR, GPRFP, STACKABLE. SP("sp") : GPR, GPRSP. LR("lr") : GPR, GPRLR. PC("pc") : GPR, GPRPC. /* r26 to r31 are special and the code generator doesn't touch them. */ #define SCRATCH R23 TOKENS /* Used only in instruction descriptions (to generate the correct syntax). */ GPROFFSET = { GPR reg; INT off; } 4 off "(" reg ")". GPRGPR = { GPR reg1; GPR reg2; } 4 "(" reg1 "," reg2 ")". /* Primitives */ LABEL = { ADDR adr; } 4 adr. CONST = { INT val; } 4 "#" val. /* Allows us to use regvar() to refer to registers */ GPRE = { GPR reg; } 4 reg. /* The results of comparisons. */ TRISTATE_RC_S = { GPR reg; INT val; } 4. TRISTATE_RC_U = { GPR reg; INT val; } 4. TRISTATE_RR_S = { GPR reg1; GPR reg2; } 4. TRISTATE_RR_U = { GPR reg1; GPR reg2; } 4. SETS TOKEN = LABEL + CONST. GPRI = GPR + GPRE. INSTRUCTIONS add GPRI:wo, GPRI:ro, GPRI+CONST:ro. add GPRI:rw, GPRI+CONST:ro. and GPRI:rw, GPRI+CONST:ro. beq "b.eq" LABEL:ro. bne "b.ne" LABEL:ro. bgt "b.gt" LABEL:ro. bgt "b.gt" LABEL:ro. bhi "b.hi" LABEL:ro. b GPRI+LABEL:ro. bl GPRI+LABEL:ro. cmp GPRI:ro, GPRI+CONST:ro kills :cc. divs GPRI:wo, GPRI:ro, GPRI+CONST:ro. divu GPRI:wo, GPRI:ro, GPRI+CONST:ro. eor GPRI:rw, GPRI+CONST:ro. exts GPRI:wo, GPRI:ro, GPRI+CONST:ro. exts GPRI:rw, GPRI+CONST:ro. ld GPRI:wo, GPROFFSET+GPRGPR+LABEL:ro. ldb GPRI:wo, GPROFFSET+GPRGPR+LABEL:ro. ldh GPRI:wo, GPROFFSET+GPRGPR+LABEL:ro. ldhs GPRI:wo, GPROFFSET+GPRGPR+LABEL:ro. lea GPRI:wo, LABEL:ro. lsl GPRI:rw, GPRI+CONST:ro. lsl GPRI:wo, GPRI:ro, GPRI+CONST:ro. mov GPRI:wo, GPRI+CONST:ro. mul GPRI:rw, GPRI+CONST:ro. neg GPRI:rw, GPRI+CONST:ro. or GPRI:rw, GPRI+CONST:ro. pop STACKABLE:wo. pop STACKABLE:wo, GPRLR+GPRPC:wo. push STACKABLE:ro. sub GPRI:wo, GPRI:ro, CONST+GPRI:ro. sub GPRI:rw, GPRI+CONST:ro. st GPRI:ro, GPROFFSET+GPRGPR+LABEL:ro. stb GPRI:ro, GPROFFSET+GPRGPR+LABEL:ro. sth GPRI:ro, GPROFFSET+GPRGPR+LABEL:ro. sths GPRI:ro, GPROFFSET+GPRGPR+LABEL:ro. invalid "invalid". MOVES from GPR to GPR gen COMMENT("mov GPR->GPR") mov %2, %1 /* GPRE exists solely to allow us to use regvar() (which can only be used in an expression) as a register constant. */ from GPRE to GPR gen mov %2, %1 /* Constants */ from CONST to GPR gen mov %2, %1 from LABEL to GPR gen lea %2, {LABEL, %1.adr} sub %2, GP /* Miscellaneous */ from CONST+LABEL+GPR+GPRE to GPRE gen move %1, %2.reg TESTS to test GPR gen cmp %1, {CONST, 0} STACKINGRULES from STACKABLE to STACK gen push %1 from GPR to STACK uses STACKABLE gen move %1, %a push %a from GPR to STACK gen sub SP, SP, {CONST, 4} st %1, {GPROFFSET, SP, 0} from GPRE to STACK uses STACKABLE gen move %1, %a push %a from GPRE to STACK gen sub SP, {CONST, 4} st %1, {GPROFFSET, SP, 0} from TOKEN to STACK uses STACKABLE gen move %1, %a push %a from TOKEN to STACK gen COMMENT("fallback stack") move %1, SCRATCH sub SP, SP, {CONST, 4} st SCRATCH, {GPROFFSET, SP, 0} COERCIONS from GPRE uses reusing %1, REG=%1 yields %a from CONST uses REG gen COMMENT("coerce CONST->REG") move %1, %a yields %a from LABEL uses REG gen COMMENT("coerce LABEL->REG") move %1, %a yields %a from STACK uses STACKABLE gen COMMENT("coerce STACK->REG") pop %a yields %a PATTERNS /* Intrinsics */ pat loc /* Load constant */ yields {CONST, $1} pat dup $1<=INT32 /* Duplicate word on top of stack */ with GPR yields %1 %1 pat dup $1==INT64 /* Duplicate double-word on top of stack */ with GPR GPR yields %2 %1 %2 %1 pat exg $1==INT32 /* Exchange top two words on stack */ with GPR GPR yields %1 %2 #if 0 pat stl lol $1==$2 /* Store then load local */ leaving dup INT32 stl $1 #endif pat lal sti lal loi $1==$3 && $2==$4 /* Store then load local, of a different size */ leaving dup $2 lal $1 sti $2 pat ste loe $1==$2 /* Store then load external */ leaving dup 4 ste $1 /* Type conversions */ pat loc loc cii loc loc cii $1==$4 && $2==$5 /* madness, generated by the C compiler */ leaving loc $1 loc $2 cii pat loc loc cii loc loc cii $2==INT32 && $5==INT32 && $4<$2 /* madness, generated by the C compiler */ leaving loc $4 loc $5 cii pat loc loc ciu /* signed X -> unsigned X */ leaving loc $1 loc $2 cuu pat loc loc cuu $1==$2 /* unsigned X -> unsigned X */ /* nop */ pat loc loc cii $1==$2 /* signed X -> signed X */ /* nop */ pat loc loc cui $1==$2 /* unsigned X -> signed X */ /* nop */ pat loc loc cui $1==INT8 && $2==INT32 /* unsigned char -> signed int */ /* nop */ pat loc loc cui $1==INT16 && $2==INT32 /* unsigned short -> signed int */ /* nop */ pat loc loc cii $1==INT8 && $2>INT8 /* signed char -> anything */ with REG uses reusing %1, REG=%1 gen exts %a, {CONST, 8} yields %a pat loc loc cii $1==INT16 && $2>INT16 /* signed short -> anything */ with REG uses reusing %1, REG=%1 gen exts %a, {CONST, 16} yields %a /* Local variables */ pat lal /* Load address of local */ uses REG gen add %a, FP, {CONST, $1} sub %a, GP yields %a pat lol inreg($1)>0 /* Load from local */ yields {GPRE, regvar($1)} pat lol /* Load quad from local */ uses REG gen ld %a, {GPROFFSET, FP, $1} yields %a pat ldl /* Load double-word from local */ leaving lal $1 loi INT32*2 pat stl inreg($1)>0 /* Store to local */ with CONST+GPRI kills regvar($1) gen move %1, {GPRE, regvar($1)} pat stl /* Store to local */ with GPRI gen st %1, {GPROFFSET, FP, $1} pat sdl /* Store double-word to local */ leaving lal $1 sti INT32*2 pat lil inreg($1)>0 /* Load from indirected local */ uses REG gen ld %a, {GPROFFSET, regvar($1), 0} yields %a pat lil /* Load from indirected local */ leaving lol $1 loi INT32 pat sil /* Save to indirected local */ leaving lol $1 sti INT32 pat stl lol $1==$2 /* Save then load (generated by C compiler) */ leaving dup 4 stl $1 pat zrl /* Zero local */ leaving loc 0 stl $1 pat inl /* Increment local */ leaving lol $1 loc 1 adi 4 stl $1 pat del /* Decrement local */ leaving lol $1 loc 1 sbi 4 stl $1 /* Global variables */ pat lpi /* Load address of external function */ leaving lae $1 pat lae /* Load address of external */ yields {LABEL, $1} pat loe /* Load word external */ leaving lae $1 loi INT32 pat ste /* Store word external */ leaving lae $1 sti INT32 pat lde /* Load double-word external */ leaving lae $1 loi INT64 pat sde /* Store double-word external */ leaving lae $1 sti INT64 pat zre /* Zero external */ leaving loc 0 ste $1 #if 0 pat ine /* Increment external */ uses REG={LABEL, $1}, REG gen lwz %b, {GPROFFSET, %a, 0} addi %b, %b, {CONST, 1} stw %b, {GPROFFSET, %a, 0} pat dee /* Decrement external */ uses REG={LABEL, $1}, REG gen lwz %b, {GPROFFSET, %a, 0} addi %b, %b, {CONST, 0-1} stw %b, {GPROFFSET, %a, 0} #endif /* Structures */ pat lof /* Load word offsetted */ leaving adp $1 loi INT32 pat ldf /* Load double-word offsetted */ leaving adp $1 loi INT64 pat stf /* Store word offsetted */ leaving adp $1 sti INT32 pat sdf /* Store double-word offsetted */ leaving adp $1 sti INT64 /* Loads and stores */ pat loi $1==INT8 /* Load byte indirect */ with GPR uses reusing %1, REG gen ldb %a, {GPRGPR, %1, GP} yields %a with GPRE uses reusing %1.reg, REG gen ldb %a, {GPRGPR, %1.reg, GP} yields %a #if 0 pat loi loc loc cii $1==INT16 && $2==INT16 && $3==INT32 /* Load half-word indirect and sign extend */ with GPR uses REG gen lha %a, {GPROFFSET, %1, 0} yields %a with SUM_RR uses reusing %1, REG gen lhax %a, %1.reg1, %1.reg2 yields %a with SUM_RC uses REG gen move {IND_RC_H_S, %1.reg, %1.off}, %a yields %a pat loi $1==INT16 /* Load half-word indirect */ with GPR uses REG gen lhz %a, {GPROFFSET, %1, 0} yields %a with SUM_RR uses reusing %1, REG gen lhzx %a, %1.reg1, %1.reg2 yields %a with SUM_RC uses REG gen move {IND_RC_H, %1.reg, %1.off}, %a yields %a #endif pat loi $1==INT32 /* Load quad indirect */ with GPR uses reusing %1, REG gen add %a, %1, GP ld %a, {GPROFFSET, %a, 0} yields %a pat loi $1==INT64 /* Load double-quad indirect */ with GPRI uses reusing %1, REG, REG gen add %a, %1, GP ld %b, {GPROFFSET, %a, 4} ld %a, {GPROFFSET, %a, 0} yields %a %b pat loi /* Load arbitrary size */ leaving loc $1 los INT32 pat los /* Load arbitrary size */ with STACK kills ALL gen bl {LABEL, ".los"} pat sti $1==INT8 /* Store byte indirect */ with GPR GPRI gen stb %2, {GPRGPR, %1, GP} with GPRE GPRI gen stb %2, {GPRGPR, %1.reg, GP} pat sti $1==INT16 /* Store half-word indirect */ with GPR GPR uses REG gen add %a, %1, GP sth %2, {GPROFFSET, %a, 0} pat sti $1==INT32 /* Store quad indirect */ with GPR GPR uses REG gen add %a, %1, GP st %2, {GPROFFSET, %a, 0} pat sti $1==INT64 /* Store double-quad indirect */ with GPR GPR uses REG gen add %a, %1, GP st %1, {GPROFFSET, %a, 0} st %2, {GPROFFSET, %a, 4} pat sti /* Store arbitrary size */ leaving loc $1 sts INT32 pat sts /* Load arbitrary size */ with STACK kills ALL gen bl {LABEL, ".sts"} /* Arithmetic wrappers */ pat ads $1==4 /* Add var to pointer */ leaving adi $1 pat sbs $1==4 /* Subtract var from pointer */ leaving sbi $1 pat adp /* Add constant to pointer */ leaving loc $1 adi 4 pat adu /* Add unsigned */ leaving adi $1 pat sbu /* Subtract unsigned */ leaving sbi $1 pat inc /* Add 1 */ leaving loc 1 adi 4 pat dec /* Subtract 1 */ leaving loc 1 sbi 4 pat loc mlu $2==2 /* Unsigned multiply by constant */ leaving loc $1 mli 4 pat mlu /* Unsigned multiply by var */ leaving mli $1 pat loc slu /* Shift left unsigned by constant amount */ leaving loc $1 sli $2 pat slu /* Shift left unsigned by variable amount */ leaving sli $1 /* Word arithmetic */ pat adi $1==INT32 /* Add word (second + top) */ with GPRI+CONST GPRI uses reusing %2, REG=%2 gen add %a, %1 yields %a with GPRI GPRI+CONST uses reusing %1, REG=%1 gen add %a, %2 yields %a pat sbi $1==INT32 /* Subtract word (second - top) */ with GPRI+CONST GPRI uses reusing %2, REG=%2 gen sub %a, %1 yields %a pat ngi $1==INT32 /* Negate word */ with GPRI uses reusing %1, REG=%1 gen neg %a, %a yields %a pat and $1==INT32 /* AND word */ with GPRI+CONST GPRI uses reusing %2, REG=%2 gen and %a, %1 yields %a with GPRI GPRI+CONST uses reusing %1, REG=%1 gen and %a, %2 yields %a pat ior $1==INT32 /* OR word */ with GPRI+CONST GPRI uses reusing %2, REG=%2 gen or %a, %1 yields %a with GPRI GPRI+CONST uses reusing %1, REG=%1 gen or %a, %2 yields %a pat xor $1==INT32 /* XOR word */ with GPRI+CONST GPRI uses reusing %2, REG=%2 gen eor %a, %1 yields %a with GPRI GPRI+CONST uses reusing %1, REG=%1 gen eor %a, %2 yields %a pat dvi $1==INT32 /* Divide word (second / top) */ with GPRI GPRI uses reusing %2, REG gen divs %a, %2, %1 yields %a pat dvu $1==INT32 /* Divide unsigned word (second / top) */ with GPRI GPRI uses reusing %2, REG gen divu %a, %2, %1 yields %a pat rmu $1==INT32 /* Remainder unsigned word (second % top) */ with GPRI GPRI uses REG gen divu %a, %2, %1 mul %a, %1 sub %a, %2 yields %a #if 0 pat mli $1==4 /* Multiply word (second * top) */ with REG REG uses reusing %2, REG gen mullw %a, %2, %1 yields %a pat rmi $1==4 /* Remainder word (second % top) */ with REG REG uses REG gen divw %a, %2, %1 mullw %a, %a, %1 subf %a, %a, %2 yields %a pat and !defined($1) /* AND set */ with STACK gen bl {LABEL, ".and"} pat ior !defined($1) /* OR set */ with STACK gen bl {LABEL, ".ior"} pat xor $1==4 /* XOR word */ with GPR GPR yields {XOR_RR, %1, %2} with GPR CONST yields {XOR_RC, %1, %2.val} with CONST GPR yields {XOR_RC, %2, %1.val} pat xor !defined($1) /* XOR set */ with STACK gen bl {LABEL, ".xor"} pat com $1==INT32 /* NOT word */ with AND_RR uses REG gen nand %a, %1.reg1, %1.reg2 yields %a with OR_RR uses REG gen nor %a, %1.reg1, %1.reg2 yields %a with XOR_RR uses REG gen eqv %a, %1.reg1, %1.reg2 yields %a with GPR yields {NOT_R, %1} pat com !defined($1) /* NOT set */ with STACK gen bl {LABEL, ".com"} #endif pat sli $1==4 /* Shift left (second << top) */ with CONST+GPRI GPRI uses reusing %2, REG=%2 gen lsl %a, %1 yields %a #if 0 pat sri $1==4 /* Shift right signed (second >> top) */ with CONST GPR uses reusing %2, REG gen srawi %a, %2, {CONST, %1.val & 0x1F} yields %a with GPR GPR uses reusing %2, REG gen sraw %a, %2, %1 yields %a pat sru $1==4 /* Shift right unsigned (second >> top) */ with CONST GPR uses reusing %2, REG gen rlwinm %a, %2, {CONST, 32-(%1.val & 0x1F)}, {CONST, (%1.val & 0x1F)}, {CONST, 31} yields %a with GPR GPR uses reusing %2, REG gen srw %a, %2, %1 yields %a /* Arrays */ pat aar $1==INT32 /* Index array */ with GPR3 GPR4 GPR5 gen bl {LABEL, ".aar4"} yields R3 pat lae lar $2==INT32 && nicesize(rom($1, 3)) /* Load array */ leaving lae $1 aar INT32 loi rom($1, 3) pat lar $1==INT32 /* Load array */ with GPR3 GPR4 GPR5 STACK kills ALL gen bl {LABEL, ".lar4"} pat lae sar $2==INT32 && nicesize(rom($1, 3)) /* Store array */ leaving lae $1 aar INT32 sti rom($1, 3) pat sar $1==INT32 /* Store array */ with GPR3 GPR4 GPR5 STACK kills ALL gen bl {LABEL, ".sar4"} /* Sets */ pat set defined($1) /* Create word with set bit */ leaving loc 1 exg INT32 sli INT32 pat set !defined($1) /* Create structure with set bit (variable) */ with GPR3 GPR4 STACK gen bl {LABEL, ".set"} pat inn defined($1) /* Test for set bit */ leaving set INT32 and INT32 pat inn !defined($1) /* Test for set bit (variable) */ with GPR3 STACK gen bl {LABEL, ".inn"} #endif /* Boolean resolutions */ proc anyt example teq with GPRI uses reusing %1, REG=%1 gen cmp %1, {CONST, 0} mov %a, {CONST, 0} add[1] %a, {CONST, 1} yields %a pat cmu teq call anyt("add.eq") /* top = (top == 0) */ pat cmu tne call anyt("add.ne") /* top = (top != 0) */ pat cmu tlt call anyt("add.lo") /* top = unsigned (top < 0) */ pat cmu tle call anyt("add.ls") /* top = unsigned (top <= 0) */ pat cmu tgt call anyt("add.hi") /* top = unsigned (top > 0) */ pat cmu tge call anyt("add.hs") /* top = unsigned (top >= 0) */ /* Simple branches */ proc anyz example zeq with GPRI STACK gen cmp %1, {CONST, 0} beq[1] {LABEL, $1} pat zeq call anyz("b.eq") /* Branch if signed top == 0 */ pat zne call anyz("b.ne") /* Branch if signed top != 0 */ pat zgt call anyz("b.gt") /* Branch if signed top > 0 */ pat zlt call anyz("b.lt") /* Branch if signed top < 0 */ pat zge call anyz("b.ge") /* Branch if signed top >= 0 */ pat zle call anyz("b.le") /* Branch if signed top <= 0 */ proc anyb example beq with GPR+CONST GPRI STACK gen cmp %2, %1 beq[1] {LABEL, $1} pat beq call anyz("b.eq") /* Branch if signed second == top */ pat bne call anyz("b.ne") /* Branch if signed second != top */ pat bgt call anyz("b.gt") /* Branch if signed second > top */ pat bge call anyz("b.ge") /* Branch if signed second >= top */ pat blt call anyz("b.lt") /* Branch if signed second < top */ pat ble call anyz("b.le") /* Branch if signed second <= top */ proc anycmpb example cmu zeq with GPR+CONST GPRI STACK gen cmp %2, %1 beq[1] {LABEL, $2} pat cmu zgt call anycmpb("b.hi") /* Branch if unsigned second > top */ pat cmu zlt call anycmpb("b.lo") /* Branch if unsigned second < top */ pat cmu zge call anycmpb("b.hs") /* Branch if unsigned second >= top */ pat cmu zle call anycmpb("b.ls") /* Branch if unsigned second <= top */ #if 0 pat cmi /* Signed tristate compare */ with CONST GPR yields {TRISTATE_RC_S, %2, %1.val} with GPR GPR yields {TRISTATE_RR_S, %2, %1} pat cmu /* Unsigned tristate compare */ with CONST GPR yields {TRISTATE_RC_U, %2, %1.val} with GPR GPR yields {TRISTATE_RR_U, %2, %1} #endif pat cmp /* Compare pointers */ leaving cmu INT32 pat cms $1==INT32 /* Compare blocks (word sized) */ leaving cmi INT32 proc anycmf64 example teq with STACK uses REG gen bl {LABEL, ".cmf8"} mov %a, {CONST, 0} add[1] %a, {CONST, 1} yields %a pat cmf tlt $1==FLOAT64 call anyt("add.lo") /* top = unsigned (top < 0) */ pat cmf tle $1==FLOAT64 call anyt("add.ls") /* top = unsigned (top <= 0) */ pat cmf tgt $1==FLOAT64 call anyt("add.hi") /* top = unsigned (top > 0) */ pat cmf tge $1==FLOAT64 call anyt("add.hs") /* top = unsigned (top >= 0) */ #if 0 /* Other branching and labelling */ pat lab topeltsize($1)==4 && !fallthrough($1) gen labeldef $1 yields R3 pat lab topeltsize($1)==4 && fallthrough($1) with GPR3 gen labeldef $1 yields %1 pat lab topeltsize($1)!=4 with STACK kills ALL gen labeldef $1 #endif pat bra /* Unconditional jump */ with STACK gen b {LABEL, $1} /* Miscellaneous */ pat cal /* Call procedure */ with STACK kills ALL gen bl {LABEL, $1} pat cai /* Call procedure indirect */ with GPR STACK kills ALL gen bl %1 pat lfr $1==INT32 /* Load function result, word */ yields R0 pat lfr $1==INT64 /* Load function result, double-word */ yields R0 R1 pat ret $1==0 /* Return from procedure */ gen return mov SP, FP pop FP, PC pat ret $1==INT32 /* Return from procedure, word */ with GPR0 gen return mov SP, FP pop FP, PC pat ret $1==INT64 /* Return from procedure, double-word */ with GPR0 GPR1 gen return mov SP, FP pop FP, PC pat blm /* Block move constant length */ with GPRI GPRI STACK uses REG gen sub SP, {CONST, 12} mov %a, {CONST, $1} st %1, {GPROFFSET, SP, 0} st %2, {GPROFFSET, SP, 4} st %a, {GPROFFSET, SP, 8} bl {LABEL, "_memmove"} add SP, {CONST, 12} #if 0 pat bls /* Block move variable length */ with GPR GPR GPR STACK gen stwu %1, {GPROFFSET, SP, 0-4} stwu %3, {GPROFFSET, SP, 0-4} stwu %2, {GPROFFSET, SP, 0-4} bl {LABEL, "_memmove"} addi SP, SP, {CONST, 12} #endif pat csa /* Array-lookup switch */ with STACK gen bl {LABEL, ".csa"} pat csb /* Table-lookup switch */ with STACK gen bl {LABEL, ".csb"} /* EM specials */ pat fil /* Set current filename */ leaving lae $1 ste ".filename" pat lin /* Set current line number */ leaving loc $1 ste ".linenumber" pat lni /* Increment line number */ leaving ine ".linenumber" pat lim /* Load EM trap ignore mask */ leaving lde ".ignmask" pat sim /* Store EM trap ignore mask */ leaving ste ".ignmask" pat trp /* Raise EM trap */ with GPR0 gen bl {LABEL, ".trap"} pat sig /* Set trap handler */ leaving ste ".trppc" pat rtt /* Return from trap */ leaving ret 0 pat lxl $1==0 /* Load FP */ leaving lor 0 pat lxl $1==1 /* Load caller's FP */ leaving lxl 0 dch pat dch /* FP -> caller FP */ with GPR uses reusing %1, REG gen ld %a, {GPROFFSET, %1, FP_OFFSET} sub %a, GP yields %a pat lpb /* Convert FP to argument address */ leaving adp EM_BSIZE pat lxa /* Load caller's SP */ leaving lxl $1 lpb pat gto /* longjmp */ uses REG, REG gen move {LABEL, $1}, %a ld %b, {GPROFFSET, %a, 8} add FP, %b, GP ld %b, {GPROFFSET, %a, 4} add SP, %b, GP ld %b, {GPROFFSET, %a, 0} add %b, GP b %b #if 0 pat gto /* longjmp */ with STACK gen ld {LABEL, $1+2} wspec {CONST, 1} ld {LABEL, $1+4} wspec {CONST, 0} ld {LABEL, $1+0} wspec {CONST, 2} pat str $1==1 /* Store special GPRister */ with GPR0 gen wspec {CONST, $1} #endif pat lor $1==0 /* Load FP */ uses REG gen move FP, %a yields %a pat lor $1==1 /* Load SP */ uses REG gen move SP, %a yields %a pat lor $1==2 /* Load HP */ leaving loe ".reghp" pat str $1==0 /* Store FP */ with GPRI gen sub FP, %1, GP pat str $1==1 /* Store SP */ with GPRI gen sub SP, %1, GP pat str $1==2 /* Store HP */ leaving ste ".reghp" pat ass /* Adjust stack by variable amount */ with CONST+GPRI gen add SP, %1 pat asp /* Adjust stack by constant amount */ leaving loc $1 ass