I added REG_PAIR incfbc537to speed up the register allocator, because ncg was taking about 2 seconds on each sti 8. I defined only 4 such pairs, so allocating REG_PAIR was much faster than allocating REG REG. After my last commitc5bb3be, allocation of REG REG is fast, and REG_PAIR seems unnecessary.
		
			
				
	
	
		
			2325 lines
		
	
	
	
		
			54 KiB
		
	
	
	
		
			Text
		
	
	
	
	
	
			
		
		
	
	
			2325 lines
		
	
	
	
		
			54 KiB
		
	
	
	
		
			Text
		
	
	
	
	
	
| EM_WSIZE = 4
 | |
| EM_PSIZE = 4
 | |
| EM_BSIZE = 8    /* two words saved in call frame */
 | |
| 
 | |
| INT8 = 1        /* Size of values */
 | |
| INT16 = 2
 | |
| INT32 = 4
 | |
| INT64 = 8
 | |
| 
 | |
| FP_OFFSET = 0   /* Offset of saved FP relative to our FP */
 | |
| PC_OFFSET = 4   /* Offset of saved PC relative to our FP */
 | |
| 
 | |
| #define COMMENT(n) /* comment {LABEL, n} */
 | |
| 
 | |
| 
 | |
| #define nicesize(x) ((x)==INT8 || (x)==INT16 || (x)==INT32 || (x)==INT64)
 | |
| 
 | |
| #define smalls(n) sfit(n, 16)
 | |
| #define smallu(n) ufit(n, 16)
 | |
| 
 | |
| #define lo(n) ((n) & 0xFFFF)
 | |
| #define hi(n) (((n)>>16) & 0xFFFF)
 | |
| 
 | |
| /* Use these for instructions that treat the low half as signed --- his()
 | |
|  * includes a modifier to produce the correct value when the low half gets
 | |
|  * sign extended. Er, do make sure you load the low half second. */
 | |
| #define los(n) (lo(n) | (((0-(lo(n)>>15)) & ~0xFFFF)))
 | |
| #define his(n) ((hi(n) + (lo(n)>>15)) & 0xFFFF)
 | |
| 
 | |
| 
 | |
| PROPERTIES
 | |
| 
 | |
| 	GPR             /* any GPR */
 | |
| 	REG             /* any allocatable GPR */
 | |
| 	FPR(8)          /* any FPR */
 | |
| 	FREG(8)         /* any allocatable FPR */
 | |
| 	FSREG           /* any allocatable single-precision FPR */
 | |
| 	SPR             /* any SPR */
 | |
| 	CR              /* any CR */
 | |
| 
 | |
| 
 | |
| REGISTERS
 | |
| 
 | |
| 	/* Reverse order to encourage ncg to allocate them from r31 down */
 | |
| 
 | |
| 	r31, r30, r29, r28, r27, r26,
 | |
| 	r25, r24, r23, r22, r21, r20,
 | |
| 	r19, r18, r17, r16, r15, r14,
 | |
| 	r13                             : GPR, REG regvar.
 | |
| 
 | |
| 	r12, r11, r10, r9, r8, r7,
 | |
| 	r6, r5, r4, r3                  : GPR, REG.
 | |
| 
 | |
| 	fp, sp, r0                      : GPR.
 | |
| 
 | |
| 	/* f31 to f14 are reserved for regvar. */
 | |
| 
 | |
| 	f13, f12, f11, f10, f9, f8
 | |
| 	f7, f6, f5, f4, f3, f2, f1      : FPR, FREG.
 | |
| 
 | |
| 	f0                              : FPR.
 | |
| 
 | |
| 	fs13("f13")=f13, fs12("f12")=f12,
 | |
| 	fs11("f11")=f11, fs10("f10")=f10,
 | |
| 	fs9("f9")=f9, fs8("f8")=f8,
 | |
| 	fs7("f7")=f7, fs6("f6")=f6,
 | |
| 	fs5("f5")=f5, fs4("f4")=f4,
 | |
| 	fs3("f3")=f3, fs2("f2")=f2,
 | |
| 	fs1("f1")=f1                    : FSREG.
 | |
| 
 | |
| 	lr, ctr     : SPR.
 | |
| 	cr0         : CR.
 | |
| 
 | |
| #define RSCRATCH r0
 | |
| #define FSCRATCH f0
 | |
| 
 | |
| 
 | |
| TOKENS
 | |
| 
 | |
| /* Primitives */
 | |
| 
 | |
| 	CONST              = { INT val; }             4    val.
 | |
| 	LABEL              = { ADDR adr; }            4    adr.
 | |
| 	LABEL_HI           = { ADDR adr; }            4    "hi16[" adr "]".
 | |
| 	LABEL_HA           = { ADDR adr; }            4    "ha16[" adr "]".
 | |
| 	LABEL_LO           = { ADDR adr; }            4    "lo16[" adr "]".
 | |
| 	LOCAL              = { INT off; }             4    ">>> BUG IN LOCAL".
 | |
| 
 | |
| /* Allows us to use regvar() to refer to registers */
 | |
| 
 | |
| 	GPRE               = { GPR reg; }             4    reg.
 | |
| 
 | |
| /* Constants on the stack */
 | |
| 
 | |
| 	CONST_N8000        = { INT val; }             4.
 | |
| 	CONST_N7FFF_N0001  = { INT val; }             4.
 | |
| 	CONST_0000_7FFF    = { INT val; }             4.
 | |
| 	CONST_8000         = { INT val; }             4.
 | |
| 	CONST_8001_FFFF    = { INT val; }             4.
 | |
| 	CONST_HZ           = { INT val; }             4.
 | |
| 	CONST_HL           = { INT val; }             4.
 | |
| 
 | |
| /* Expression partial results */
 | |
| 
 | |
| 	SUM_RIS     = { GPR reg; INT offhi; }  4.   /* reg + (offhi << 16) */
 | |
| 	SUM_RC      = { GPR reg; INT off; }    4.   /* reg + off */
 | |
| 	SUM_RL      = { GPR reg; ADDR adr; }   4.   /* reg + lo16[adr] */
 | |
| 	SUM_RR      = { GPR reg1; GPR reg2; }  4.   /* reg1 + reg2 */
 | |
| 
 | |
| 	SEX_B              = { GPR reg; }             4.
 | |
| 	SEX_H              = { GPR reg; }             4.
 | |
| 
 | |
| 	IND_RC_B    = { GPR reg; INT off; }    4    off "(" reg ")".
 | |
| 	IND_RL_B    = { GPR reg; ADDR adr; }   4    "lo16[" adr "](" reg ")".
 | |
| 	IND_RR_B    = { GPR reg1; GPR reg2; }  4.
 | |
| 	IND_RC_H    = { GPR reg; INT off; }    4    off "(" reg ")".
 | |
| 	IND_RL_H    = { GPR reg; ADDR adr; }   4    "lo16[" adr "](" reg ")".
 | |
| 	IND_RR_H    = { GPR reg1; GPR reg2; }  4.
 | |
| 	IND_RC_H_S  = { GPR reg; INT off; }    4    off "(" reg ")".
 | |
| 	IND_RL_H_S  = { GPR reg; ADDR adr; }   4    "lo16[" adr "](" reg ")".
 | |
| 	IND_RR_H_S  = { GPR reg1; GPR reg2; }  4.
 | |
| 	IND_RC_W    = { GPR reg; INT off; }    4    off "(" reg ")".
 | |
| 	IND_RL_W    = { GPR reg; ADDR adr; }   4    "lo16[" adr "](" reg ")".
 | |
| 	IND_RR_W    = { GPR reg1; GPR reg2; }  4.
 | |
| 	IND_RC_D    = { GPR reg; INT off; }    8    off "(" reg ")".
 | |
| 	IND_RL_D    = { GPR reg; ADDR adr; }   8    "lo16[" adr "](" reg ")".
 | |
| 	IND_RR_D    = { GPR reg1; GPR reg2; }  8.
 | |
| 
 | |
| 	NOT_R              = { GPR reg; }             4.
 | |
| 
 | |
| 	AND_RR             = { GPR reg1; GPR reg2; }  4.
 | |
| 	OR_RR              = { GPR reg1; GPR reg2; }  4.
 | |
| 	OR_RIS             = { GPR reg; INT valhi; }  4.
 | |
| 	OR_RC              = { GPR reg; INT val; }    4.
 | |
| 	XOR_RR             = { GPR reg1; GPR reg2; }  4.
 | |
| 	XOR_RIS            = { GPR reg; INT valhi; }  4.
 | |
| 	XOR_RC             = { GPR reg; INT val; }    4.
 | |
| 
 | |
| 	COND_RC            = { GPR reg; INT val; }    4.
 | |
| 	COND_RR            = { GPR reg1; GPR reg2; }  4.
 | |
| 	CONDL_RC           = { GPR reg; INT val; }    4.
 | |
| 	CONDL_RR           = { GPR reg1; GPR reg2; }  4.
 | |
| 	COND_FS            = { FSREG reg1; FSREG reg2; } 4.
 | |
| 	COND_FD            = { FREG reg1; FREG reg2; }   4.
 | |
| 
 | |
| 	XEQ                = { GPR reg; }             4.
 | |
| 	XNE                = { GPR reg; }             4.
 | |
| 	XGT                = { GPR reg; }             4.
 | |
| 	XGE                = { GPR reg; }             4.
 | |
| 	XLT                = { GPR reg; }             4.
 | |
| 	XLE                = { GPR reg; }             4.
 | |
| 
 | |
| 
 | |
| SETS
 | |
| 
 | |
| 	/* signed 16-bit integer */
 | |
| 	CONST2          = CONST_N8000 + CONST_N7FFF_N0001 + CONST_0000_7FFF.
 | |
| 	/* integer that, when negated, fits signed 16-bit */
 | |
| 	CONST2_WHEN_NEG = CONST_N7FFF_N0001 + CONST_0000_7FFF + CONST_8000.
 | |
| 	/* unsigned 16-bit integer */
 | |
| 	UCONST2         = CONST_0000_7FFF + CONST_8000 + CONST_8001_FFFF.
 | |
| 	/* any constant on stack */
 | |
| 	CONST_STACK     = CONST_N8000 + CONST_N7FFF_N0001 + CONST_0000_7FFF +
 | |
| 	                  CONST_8000 + CONST_8001_FFFF + CONST_HZ + CONST_HL.
 | |
| 
 | |
| 	SUM_ALL            = SUM_RC + SUM_RL + SUM_RR.
 | |
| 
 | |
| 	SEX_ALL            = SEX_B + SEX_H.
 | |
| 
 | |
| 	LOGICAL_ALL        = NOT_R + AND_RR + OR_RR + OR_RC + XOR_RR +
 | |
| 	                     XOR_RC.
 | |
| 
 | |
| 	IND_ALL_B          = IND_RC_B + IND_RL_B + IND_RR_B.
 | |
| 	IND_ALL_H          = IND_RC_H + IND_RL_H + IND_RR_H +
 | |
| 	                     IND_RC_H_S + IND_RL_H_S + IND_RR_H_S.
 | |
| 	IND_ALL_W          = IND_RC_W + IND_RL_W + IND_RR_W.
 | |
| 	IND_ALL_D          = IND_RC_D + IND_RL_D + IND_RR_D.
 | |
| 	IND_ALL_BHW        = IND_ALL_B + IND_ALL_H + IND_ALL_W.
 | |
| 
 | |
| 	/* anything killed by sti (store indirect) */
 | |
| 	MEMORY             = IND_ALL_BHW + IND_ALL_D.
 | |
| 
 | |
| 	/* any stack token that we can easily move to GPR */
 | |
| 	ANY_BHW            = REG + CONST_STACK + SEX_ALL +
 | |
| 	                     SUM_ALL + IND_ALL_BHW + LOGICAL_ALL.
 | |
| 
 | |
| 
 | |
| INSTRUCTIONS
 | |
| 
 | |
|   /* We give time as cycles of total latency from Freescale
 | |
|    * Semiconductor, MPC7450 RISC Microprocessor Family Reference
 | |
|    * Manual, Rev. 5, section 6.6.
 | |
|    *
 | |
|    * We have only 4-byte alignment for doubles; 8-byte alignment is
 | |
|    * optimal.  We guess the misalignment penalty by adding 1 cycle to
 | |
|    * the cost of loading or storing a double:
 | |
|    *   lfd lfdu lfdx: 4 -> 5
 | |
|    *   stfd stfdu stfdx: 3 -> 4
 | |
|    */
 | |
|   cost(4, 1) /* space, time */
 | |
| 
 | |
|   add             GPR:wo, GPR:ro, GPR:ro.
 | |
|   addX "add."     GPR:wo, GPR:ro, GPR:ro.
 | |
|   addi            GPR:wo, GPR:ro, CONST+LABEL_LO:ro.
 | |
|     li            GPR:wo, CONST:ro.
 | |
|   addis           GPR:wo, GPR:ro, CONST+LABEL_HI+LABEL_HA:ro.
 | |
|     lis           GPR:wo, CONST+LABEL_HI+LABEL_HA:ro.
 | |
|   and             GPR:wo, GPR:ro, GPR:ro.
 | |
|   andc            GPR:wo, GPR:ro, GPR:ro.
 | |
|   andiX  "andi."  GPR:wo:cc, GPR:ro, CONST:ro.
 | |
|   andisX "andis." GPR:wo:cc, GPR:ro, CONST:ro.
 | |
|   b               LABEL:ro.
 | |
|   bc              CONST:ro, CONST:ro, LABEL:ro.
 | |
|     bdnz          LABEL:ro.
 | |
|     beq           LABEL:ro.
 | |
|     bne           LABEL:ro.
 | |
|     bgt           LABEL:ro.
 | |
|     bge           LABEL:ro.
 | |
|     blt           LABEL:ro.
 | |
|     ble           LABEL:ro.
 | |
|     bxx           LABEL:ro. /* dummy */
 | |
|   bcctr           CONST:ro, CONST:ro, CONST:ro.
 | |
|     bctr.
 | |
|   bcctrl          CONST:ro, CONST:ro, CONST:ro.
 | |
|     bctrl.
 | |
|   bclr            CONST:ro, CONST:ro, CONST:ro.
 | |
|   bl              LABEL:ro.
 | |
|   cmp             CR:ro, CONST:ro, GPR:ro, GPR:ro kills :cc.
 | |
|     cmpw          GPR:ro, GPR:ro kills :cc.
 | |
|   cmpi            CR:ro, CONST:ro, GPR:ro, CONST:ro kills :cc.
 | |
|     cmpwi         GPR:ro, CONST:ro kills :cc.
 | |
|   cmpl            CR:ro, CONST:ro, GPR:ro, GPR:ro kills :cc.
 | |
|     cmplw         GPR:ro, GPR:ro kills :cc.
 | |
|   cmpli           CR:ro, CONST:ro, GPR:ro, CONST:ro kills :cc.
 | |
|     cmplwi        GPR:ro, CONST:ro kills :cc.
 | |
|   divw            GPR:wo, GPR:ro, GPR:ro cost(4, 23).
 | |
|   divwu           GPR:wo, GPR:ro, GPR:ro cost(4, 23).
 | |
|   eqv             GPR:wo, GPR:ro, GPR:ro.
 | |
|   extsb           GPR:wo, GPR:ro.
 | |
|   extsh           GPR:wo, GPR:ro.
 | |
|   fadd            FREG:wo, FREG:ro, FREG:ro cost(4, 5).
 | |
|   fadds           FSREG:wo, FSREG:ro, FSREG:ro cost(4, 5).
 | |
|   fcmpo           CR:wo, FREG:ro, FREG:ro cost(4, 5).
 | |
|   fcmpo           CR:wo, FSREG:ro, FSREG:ro cost(4, 5).
 | |
|   fctiwz          FREG:wo, FREG:ro.
 | |
|   fdiv            FREG:wo, FREG:ro, FREG:ro cost(4, 35).
 | |
|   fdivs           FSREG:wo, FSREG:ro, FSREG:ro cost(4, 21).
 | |
|   fmr             FPR:wo, FPR:ro cost(4, 5).
 | |
|   fmr             FSREG:wo, FSREG:ro cost(4, 5).
 | |
|   fmul            FREG:wo, FREG:ro, FREG:ro cost(4, 5).
 | |
|   fmuls           FSREG:wo, FSREG:ro, FSREG:ro cost(4, 5).
 | |
|   fneg            FREG:wo, FREG:ro cost(4, 5).
 | |
|   fneg            FSREG:wo, FSREG:ro cost(4, 5).
 | |
|   frsp            FSREG:wo, FREG:ro cost(4, 5).
 | |
|   fsub            FREG:wo, FREG:ro, FREG:ro cost(4, 5).
 | |
|   fsubs           FSREG:wo, FSREG:ro, FSREG:ro cost(4, 5).
 | |
|   lbz             GPR:wo, IND_RC_B+IND_RL_B:ro cost(4, 3).
 | |
|   lbzx            GPR:wo, GPR:ro, GPR:ro cost(4, 3).
 | |
|   lfd             FPR:wo, IND_RC_D+IND_RL_D:ro cost(4, 5).
 | |
|   lfdu            FPR:wo, IND_RC_D:ro cost(4, 5).
 | |
|   lfdx            FPR:wo, GPR:ro, GPR:ro cost(4, 5).
 | |
|   lfs             FSREG:wo, IND_RC_W+IND_RL_W:ro cost(4, 4).
 | |
|   lfsu            FSREG:wo, IND_RC_W:rw cost(4, 4).
 | |
|   lfsx            FSREG:wo, GPR:ro, GPR:ro cost(4, 4).
 | |
|   lha             GPR:wo, IND_RC_H_S+IND_RL_H_S:ro cost(4, 3).
 | |
|   lhax            GPR:wo, GPR:ro, GPR:ro cost(4, 3).
 | |
|   lhz             GPR:wo, IND_RC_H+IND_RL_H:ro cost(4, 3).
 | |
|   lhzx            GPR:wo, GPR:ro, GPR:ro cost(4, 3).
 | |
|   lwzu            GPR:wo, IND_RC_W:ro cost(4, 3).
 | |
|   lwzx            GPR:wo, GPR:ro, GPR:ro cost(4, 3).
 | |
|   lwz             GPR:wo, IND_RC_W+IND_RL_W:ro cost(4, 3).
 | |
|   nand            GPR:wo, GPR:ro, GPR:ro.
 | |
|   neg             GPR:wo, GPR:ro.
 | |
|   nor             GPR:wo, GPR:ro, GPR:ro.
 | |
|   mfcr            GPR:wo cost(4,2).
 | |
|   mullw           GPR:wo, GPR:ro, GPR:ro cost(4, 4).
 | |
|   mfspr           GPR:wo, SPR:ro cost(4, 3).
 | |
|   mtspr           SPR:wo, GPR:ro cost(4, 2).
 | |
|   or              GPR:wo, GPR:ro, GPR:ro.
 | |
|     mr            GPR:wo, GPR:ro.
 | |
|   orX "or."       GPR:wo:cc, GPR:ro, GPR:ro.
 | |
|     orX_readonly "or." GPR:ro:cc, GPR:ro, GPR:ro.
 | |
|   orc             GPR:wo, GPR:ro, GPR:ro.
 | |
|   ori             GPR:wo, GPR:ro, CONST+LABEL_LO:ro.
 | |
|   oris            GPR:wo, GPR:ro, CONST:ro.
 | |
|   rlwinm          GPR:wo, GPR:ro, CONST:ro, CONST:ro, CONST:ro.
 | |
|     extlwi        GPR:wo, GPR:ro, CONST:ro, CONST:ro.
 | |
|     extrwi        GPR:wo, GPR:ro, CONST:ro, CONST:ro.
 | |
|     srwi          GPR:wo, GPR:ro, CONST:ro.
 | |
|   slw             GPR:wo, GPR:ro, GPR:ro.
 | |
|   subf            GPR:wo, GPR:ro, GPR:ro.
 | |
|   sraw            GPR:wo, GPR:ro, GPR:ro cost(4, 2).
 | |
|   srawi           GPR:wo, GPR:ro, CONST:ro cost(4, 2).
 | |
|   srw             GPR:wo, GPR:ro, GPR:ro.
 | |
|   stb             GPR:ro, IND_RC_B+IND_RL_B:rw cost(4, 3).
 | |
|   stbx            GPR:ro, GPR:ro, GPR:ro cost(4, 3).
 | |
|   stfd            FPR:ro, IND_RC_D+IND_RL_D:rw cost(4, 4).
 | |
|   stfdu           FPR:ro, IND_RC_D:rw cost(4, 4).
 | |
|   stfdx           FPR:ro, GPR:ro, GPR:ro cost(4, 4).
 | |
|   stfs            FSREG:ro, IND_RC_W+IND_RL_W:rw cost(4, 3).
 | |
|   stfsu           FSREG:ro, IND_RC_W:rw cost(4, 3).
 | |
|   stfsx           FSREG:ro, GPR:ro, GPR:ro cost(4, 3).
 | |
|   sth             GPR:ro, IND_RC_H+IND_RL_H:rw cost(4, 3).
 | |
|   sthx            GPR:ro, GPR:ro, GPR:ro cost(4, 3).
 | |
|   stw             GPR:ro, IND_RC_W+IND_RL_W:rw cost(4, 3).
 | |
|   stwx            GPR:ro, GPR:ro, GPR:ro cost(4, 3).
 | |
|   stwu            GPR+LOCAL:ro, IND_RC_W:rw cost(4, 3).
 | |
|   xor             GPR:wo, GPR:ro, GPR:ro.
 | |
|   xori            GPR:wo, GPR:ro, CONST:ro.
 | |
|   xoris           GPR:wo, GPR:ro, CONST:ro.
 | |
| 
 | |
|   comment "!"             LABEL:ro cost(0, 0).
 | |
| 
 | |
| 
 | |
| MOVES
 | |
| 
 | |
| 	from GPR to GPR
 | |
| 		gen mr %2, %1
 | |
| 
 | |
| /* Constants */
 | |
| 
 | |
| 	from CONST + CONST_STACK smalls(%val) to GPR
 | |
| 		gen
 | |
| 			COMMENT("move CONST->GPR smalls")
 | |
| 			li %2, {CONST, %1.val}
 | |
| 
 | |
| 	from CONST + CONST_STACK lo(%val)==0 to GPR
 | |
| 		gen
 | |
| 			COMMENT("move CONST->GPR shifted")
 | |
| 			lis %2, {CONST, hi(%1.val)}
 | |
| 
 | |
| 	from CONST + CONST_STACK to GPR
 | |
| 		gen
 | |
| 			COMMENT("move CONST->GPR")
 | |
| 			lis %2, {CONST, hi(%1.val)}
 | |
| 			ori %2, %2, {CONST, lo(%1.val)}
 | |
| 			/* Can't use addi %2, %2, {CONST, los(%1.val)}
 | |
| 			 * because %2 might be R0. */
 | |
| 
 | |
| 	from LABEL to GPR
 | |
| 		gen
 | |
| 			COMMENT("move LABEL->GPR")
 | |
| 			lis %2, {LABEL_HI, %1.adr}
 | |
| 			ori %2, %2, {LABEL_LO, %1.adr}
 | |
| 
 | |
| 	from LABEL_HA to GPR
 | |
| 		gen lis %2, %1
 | |
| 
 | |
| /* Sign extension */
 | |
| 
 | |
| 	from SEX_B to GPR
 | |
| 		gen extsb %2, %1.reg
 | |
| 
 | |
| 	from SEX_H to GPR
 | |
| 		gen extsh %2, %1.reg
 | |
| 
 | |
| /* Register + something */
 | |
| 
 | |
| 	from SUM_RIS to GPR
 | |
| 		gen addis %2, %1.reg, {CONST, %1.offhi}
 | |
| 
 | |
| 	from SUM_RC to GPR
 | |
| 		gen addi %2, %1.reg, {CONST, %1.off}
 | |
| 
 | |
| 	from SUM_RL to GPR
 | |
| 		gen addi %2, %1.reg, {LABEL_LO, %1.adr}
 | |
| 
 | |
| 	from SUM_RR to GPR
 | |
| 		gen add %2, %1.reg1, %1.reg2
 | |
| 
 | |
| /* Read byte */
 | |
| 
 | |
| 	from IND_RC_B+IND_RL_B to GPR
 | |
| 		gen lbz %2, %1
 | |
| 
 | |
| 	from IND_RR_B to GPR
 | |
| 		gen lbzx %2, %1.reg1, %1.reg2
 | |
| 
 | |
| /* Write byte */
 | |
| 
 | |
| 	from GPR to IND_RC_B+IND_RL_B
 | |
| 		gen stb %1, %2
 | |
| 
 | |
| 	from GPR to IND_RR_B
 | |
| 		gen stbx %1, %2.reg1, %2.reg2
 | |
| 
 | |
| /* Read halfword (short) */
 | |
| 
 | |
| 	from IND_RC_H+IND_RL_H to GPR
 | |
| 		gen lhz %2, %1
 | |
| 
 | |
| 	from IND_RR_H to GPR
 | |
| 		gen lhzx %2, %1.reg1, %1.reg2
 | |
| 
 | |
| 	from IND_RC_H_S+IND_RL_H_S to GPR
 | |
| 		gen lha %2, %1
 | |
| 
 | |
| 	from IND_RR_H_S to GPR
 | |
| 		gen lhax %2, %1.reg1, %1.reg2
 | |
| 
 | |
| /* Write halfword */
 | |
| 
 | |
| 	from GPR to IND_RC_H+IND_RL_H
 | |
| 		gen sth %1, %2
 | |
| 
 | |
| 	from GPR to IND_RR_H
 | |
| 		gen sthx %1, %2.reg1, %2.reg2
 | |
| 
 | |
| /* Read word */
 | |
| 
 | |
| 	from IND_RC_W+IND_RL_W to GPR
 | |
| 		gen lwz %2, %1
 | |
| 
 | |
| 	from IND_RR_W to GPR
 | |
| 		gen lwzx %2, %1.reg1, %1.reg2
 | |
| 
 | |
| 	from IND_RC_W+IND_RL_W to FSREG
 | |
| 		gen lfs %2, %1
 | |
| 
 | |
| 	from IND_RR_W to FSREG
 | |
| 		gen lfsx %2, %1.reg1, %1.reg2
 | |
| 
 | |
| /* Write word */
 | |
| 
 | |
| 	from GPR to IND_RC_W+IND_RL_W
 | |
| 		gen stw %1, %2
 | |
| 
 | |
| 	from GPR to IND_RR_W
 | |
| 		gen stwx %1, %2.reg1, %2.reg2
 | |
| 
 | |
| 	from FSREG to IND_RC_W+IND_RL_W
 | |
| 		gen stfs %1, %2
 | |
| 
 | |
| 	from FSREG to IND_RR_W
 | |
| 		gen stfsx %1, %2.reg1, %2.reg2
 | |
| 
 | |
| /* Read double */
 | |
| 
 | |
| 	from IND_RC_D+IND_RL_D to FPR
 | |
| 		gen lfd %2, %1
 | |
| 
 | |
| 	from IND_RR_D to FPR
 | |
| 		gen lfdx %2, %1.reg1, %1.reg2
 | |
| 
 | |
| /* Write double */
 | |
| 
 | |
| 	from FPR to IND_RC_D+IND_RL_D
 | |
| 		gen stfd %1, %2
 | |
| 
 | |
| 	from FPR to IND_RR_D
 | |
| 		gen stfdx %1, %2.reg1, %2.reg2
 | |
| 
 | |
| /* Logicals */
 | |
| 
 | |
| 	from NOT_R to GPR
 | |
| 		gen nor %2, %1.reg, %1.reg
 | |
| 
 | |
| 	from AND_RR to GPR
 | |
| 		gen and %2, %1.reg1, %1.reg2
 | |
| 
 | |
| 	from OR_RR to GPR
 | |
| 		gen or %2, %1.reg1, %1.reg2
 | |
| 
 | |
| 	from OR_RIS to GPR
 | |
| 		gen oris %2, %1.reg, {CONST, %1.valhi}
 | |
| 
 | |
| 	from OR_RC to GPR
 | |
| 		gen ori %2, %1.reg, {CONST, %1.val}
 | |
| 
 | |
| 	from XOR_RR to GPR
 | |
| 		gen xor %2, %1.reg1, %1.reg2
 | |
| 
 | |
| 	from XOR_RIS to GPR
 | |
| 		gen xoris %2, %1.reg, {CONST, %1.valhi}
 | |
| 
 | |
| 	from XOR_RC to GPR
 | |
| 		gen xori %2, %1.reg, {CONST, %1.val}
 | |
| 
 | |
| /* Conditions */
 | |
| 
 | |
| 	/* Compare values, then copy cr0 to GPR. */
 | |
| 
 | |
| 	from COND_RC to GPR
 | |
| 		gen
 | |
| 			cmpwi %1.reg, {CONST, %1.val}
 | |
| 			mfcr %2
 | |
| 
 | |
| 	from COND_RR to GPR
 | |
| 		gen
 | |
| 			cmpw %1.reg1, %1.reg2
 | |
| 			mfcr %2
 | |
| 
 | |
| 	from CONDL_RC to GPR
 | |
| 		gen
 | |
| 			cmplwi %1.reg, {CONST, %1.val}
 | |
| 			mfcr %2
 | |
| 
 | |
| 	from CONDL_RR to GPR
 | |
| 		gen
 | |
| 			cmplw %1.reg1, %1.reg2
 | |
| 			mfcr %2
 | |
| 
 | |
| 	from COND_FS to GPR
 | |
| 		gen
 | |
| 			fcmpo cr0, %1.reg1, %1.reg2
 | |
| 			mfcr %2
 | |
| 
 | |
| 	from COND_FD to GPR
 | |
| 		gen
 | |
| 			fcmpo cr0, %1.reg1, %1.reg2
 | |
| 			mfcr %2
 | |
| 
 | |
| 	/* Given a copy of cr0 in %1.reg, extract a condition bit
 | |
| 	 * (lt, gt, eq) and perhaps flip it.
 | |
| 	 */
 | |
| 
 | |
| 	from XEQ to GPR
 | |
| 		gen
 | |
| 			extrwi %2, %1.reg, {CONST, 1}, {CONST, 2}
 | |
| 
 | |
| 	from XNE to GPR
 | |
| 		gen
 | |
| 			extrwi %2, %1.reg, {CONST, 1}, {CONST, 2}
 | |
| 			xori %2, %2, {CONST, 1}
 | |
| 
 | |
| 	from XGT to GPR
 | |
| 		gen
 | |
| 			extrwi %2, %1.reg, {CONST, 1}, {CONST, 1}
 | |
| 
 | |
| 	from XGE to GPR
 | |
| 		gen
 | |
| 			extrwi %2, %1.reg, {CONST, 1}, {CONST, 0}
 | |
| 			xori %2, %2, {CONST, 1}
 | |
| 
 | |
| 	from XLT to GPR
 | |
| 		gen
 | |
| 			extrwi %2, %1.reg, {CONST, 1}, {CONST, 0}
 | |
| 
 | |
| 	from XLE to GPR
 | |
| 		gen
 | |
| 			extrwi %2, %1.reg, {CONST, 1}, {CONST, 1}
 | |
| 			xori %2, %2, {CONST, 1}
 | |
| 
 | |
| /* GPRE exists solely to allow us to use regvar() (which can only be used in
 | |
|    an expression) as a register constant. */
 | |
| 
 | |
| 	from ANY_BHW to GPRE
 | |
| 		gen move %1, %2.reg
 | |
| 
 | |
| 
 | |
| TESTS
 | |
| 
 | |
| 	/* Given orX %1, %1, %1, ncgg says, "Instruction destroys %1,
 | |
| 	 * not allowed here".  We use orX_readonly to trick ncgg.
 | |
| 	 *
 | |
| 	 * Using "or." and not "mr." because mach/powerpc/top/table
 | |
| 	 * was optimizing "or." and not "mr.".
 | |
| 	 */
 | |
| 	to test GPR
 | |
| 		gen
 | |
| 			orX_readonly %1, %1, %1
 | |
| 
 | |
| 
 | |
| STACKINGRULES
 | |
| 
 | |
| 	from LOCAL to STACK
 | |
| 		gen
 | |
| 			COMMENT("stack LOCAL")
 | |
| 			stwu %1, {IND_RC_W, sp, 0-4}
 | |
| 
 | |
| 	from REG to STACK
 | |
| 		gen
 | |
| 			COMMENT("stack REG")
 | |
| 			stwu %1, {IND_RC_W, sp, 0-4}
 | |
| 
 | |
| 	from ANY_BHW-REG to STACK
 | |
| 		gen
 | |
| 			COMMENT("stack ANY_BHW-REG")
 | |
| 			move %1, RSCRATCH
 | |
| 			stwu RSCRATCH, {IND_RC_W, sp, 0-4}
 | |
| 
 | |
| 	from IND_ALL_D to STACK
 | |
| 		gen
 | |
| 			COMMENT("stack IND_ALL_D")
 | |
| 			move %1, FSCRATCH
 | |
| 			stfdu FSCRATCH, {IND_RC_D, sp, 0-8}
 | |
| 
 | |
| 	from FREG to STACK
 | |
| 		gen
 | |
| 			COMMENT("stack FPR")
 | |
| 			stfdu %1, {IND_RC_D, sp, 0-8}
 | |
| 
 | |
| 	from FSREG to STACK
 | |
| 		gen
 | |
| 			COMMENT("stack FSREG")
 | |
| 			stfsu %1, {IND_RC_W, sp, 0-4}
 | |
| 
 | |
| 
 | |
| 
 | |
| COERCIONS
 | |
| 
 | |
| 	from ANY_BHW
 | |
| 		uses REG
 | |
| 		gen
 | |
| 			COMMENT("coerce ANY_BHW->REG")
 | |
| 			move %1, %a
 | |
| 		yields %a
 | |
| 
 | |
| 	from STACK
 | |
| 		uses REG
 | |
| 		gen
 | |
| 			COMMENT("coerce STACK->REG")
 | |
| 			lwz %a, {IND_RC_W, sp, 0}
 | |
| 			addi sp, sp, {CONST, 4}
 | |
| 		yields %a
 | |
| 
 | |
| 	from FSREG
 | |
| 		uses FSREG
 | |
| 		gen
 | |
| 			fmr %a, %1
 | |
| 		yields %a
 | |
| 
 | |
| 	from FREG
 | |
| 		uses FREG
 | |
| 		gen
 | |
| 			fmr %a, %1
 | |
| 		yields %a
 | |
| 
 | |
| 	from STACK
 | |
| 		uses FREG
 | |
| 		gen
 | |
| 			COMMENT("coerce STACK->FREG")
 | |
| 			lfd %a, {IND_RC_D, sp, 0}
 | |
| 			addi sp, sp, {CONST, 8}
 | |
| 		yields %a
 | |
| 
 | |
| 	from STACK
 | |
| 		uses FSREG
 | |
| 		gen
 | |
| 			COMMENT("coerce STACK->FSREG")
 | |
| 			lfs %a, {IND_RC_W, sp, 0}
 | |
| 			addi sp, sp, {CONST, 4}
 | |
| 		yields %a
 | |
| 
 | |
| 	from IND_ALL_W
 | |
| 		uses FSREG
 | |
| 		gen
 | |
| 			move %1, %a
 | |
| 		yields %a
 | |
| 
 | |
| 	/*
 | |
| 	 * from IND_RC_D to REG REG is not possible, because
 | |
| 	 * %1.off+4 might overflow a signed 16-bit integer in
 | |
| 	 *   move {IND_RC_W, %1.val, %1.off+4}, %a
 | |
| 	 */
 | |
| 
 | |
| 	from IND_ALL_D
 | |
| 		uses FREG
 | |
| 		gen
 | |
| 			move %1, %a
 | |
| 		yields %a
 | |
| 
 | |
| 
 | |
| 
 | |
| PATTERNS
 | |
| 
 | |
| /* Intrinsics */
 | |
| 
 | |
| 	pat loc $1==(0-0x8000)             /* Load constant */
 | |
| 		yields {CONST_N8000, $1}
 | |
| 	pat loc $1>=(0-0x7FFF) && $1<=(0-1)
 | |
| 		yields {CONST_N7FFF_N0001, $1}
 | |
| 	pat loc $1>=0 && $1<=0x7FFF
 | |
| 		yields {CONST_0000_7FFF, $1}
 | |
| 	pat loc $1==0x8000
 | |
| 		yields {CONST_8000, $1}
 | |
| 	pat loc $1>=0x8001 && $1<=0xFFFF
 | |
| 		yields {CONST_8001_FFFF, $1}
 | |
| 	pat loc lo($1)==0
 | |
| 		yields {CONST_HZ, $1}
 | |
| 	pat loc
 | |
| 		yields {CONST_HL, $1}
 | |
| 
 | |
| 	pat dup $1==INT32                  /* Duplicate word on top of stack */
 | |
| 		with REG
 | |
| 			yields %1 %1
 | |
| 		with FSREG
 | |
| 			yields %1 %1
 | |
| 
 | |
| 	pat dup $1==INT64                  /* Duplicate double-word on top of stack */
 | |
| 		with REG REG
 | |
| 			yields %2 %1 %2 %1
 | |
| 		with FREG
 | |
| 			yields %1 %1
 | |
| 
 | |
| 	pat exg $1==INT32                  /* Exchange top two words on stack */
 | |
| 		with REG REG
 | |
| 			yields %1 %2
 | |
| 
 | |
| 	pat stl lol $1==$2                 /* Store then load local */
 | |
| 		leaving
 | |
| 			dup 4
 | |
| 			stl $1
 | |
| 
 | |
| 	pat sdl ldl $1==$2                 /* Store then load double local */
 | |
| 		leaving
 | |
| 			dup 8
 | |
| 			sdl $1
 | |
| 
 | |
| 	pat lal sti lal loi $1==$3 && $2==$4 /* Store then load local, of a different size */
 | |
| 		leaving
 | |
| 			dup INT32
 | |
| 			lal $1
 | |
| 			sti $2
 | |
| 
 | |
| 	pat ste loe $1==$2                 /* Store then load external */
 | |
| 		leaving
 | |
| 			dup 4
 | |
| 			ste $1
 | |
| 
 | |
| 
 | |
| /* Type conversions */
 | |
| 
 | |
| 	pat loc loc ciu                    /* signed X -> unsigned X */
 | |
| 		leaving
 | |
| 			loc $1
 | |
| 			loc $2
 | |
| 			cuu
 | |
| 
 | |
| 	pat loc loc cuu $1==$2             /* unsigned X -> unsigned X */
 | |
| 		/* nop */
 | |
| 
 | |
| 	pat loc loc cii $1==$2             /* signed X -> signed X */
 | |
| 		/* nop */
 | |
| 
 | |
| 	pat loc loc cui $1==$2             /* unsigned X -> signed X */
 | |
| 		/* nop */
 | |
| 
 | |
| 	pat loc loc cui $1==INT8 && $2==INT32 /* unsigned char -> signed int */
 | |
| 		/* nop */
 | |
| 
 | |
| 	pat loc loc cui $1==INT16 && $2==INT32 /* unsigned short -> signed int */
 | |
| 		/* nop */
 | |
| 
 | |
| 	pat loc loc cii $1==INT8 && $2==INT32 /* signed char -> signed int */
 | |
| 		with REG
 | |
| 			yields {SEX_B, %1}
 | |
| 
 | |
| 	pat loc loc cii $1==2 && $2==4     /* signed char -> signed short */
 | |
| 		with REG
 | |
| 			yields {SEX_H, %1}
 | |
| 
 | |
| 
 | |
| /* Local variables */
 | |
| 
 | |
| 	pat lal smalls($1)                 /* Load address of local */
 | |
| 		yields {SUM_RC, fp, $1}
 | |
| 
 | |
| 	pat lal                            /* Load address of local */
 | |
| 		uses REG={SUM_RIS, fp, his($1)}
 | |
| 		yields {SUM_RC, %a, los($1)}
 | |
| 
 | |
| 	pat lol inreg($1)>0                /* Load from local */
 | |
| 		yields {LOCAL, $1}
 | |
| 
 | |
| 	pat lol                            /* Load from local */
 | |
| 		leaving
 | |
| 			lal $1
 | |
| 			loi INT32
 | |
| 
 | |
| 	pat ldl                            /* Load double-word from local */
 | |
| 		leaving
 | |
| 			lal $1
 | |
| 			loi INT32*2
 | |
| 
 | |
| 	pat stl inreg($1)>0                /* Store to local */
 | |
| 		with ANY_BHW
 | |
| 			kills regvar($1), LOCAL %off==$1
 | |
| 			gen
 | |
| 				move %1, {GPRE, regvar($1)}
 | |
| 
 | |
| 	pat stl                            /* Store to local */
 | |
| 		leaving
 | |
| 			lal $1
 | |
| 			sti INT32
 | |
| 
 | |
| 	pat sdl                            /* Store double-word to local */
 | |
| 		leaving
 | |
| 			lal $1
 | |
| 			sti INT32*2
 | |
| 
 | |
| 	pat lil inreg($1)>0                /* Load from indirected local */
 | |
| 		yields {IND_RC_W, regvar($1), 0}
 | |
| 
 | |
| 	pat lil                            /* Load from indirected local */
 | |
| 		leaving
 | |
| 			lol $1
 | |
| 			loi INT32
 | |
| 
 | |
| 	pat sil                            /* Save to indirected local */
 | |
| 		leaving
 | |
| 			lol $1
 | |
| 			sti INT32
 | |
| 
 | |
| 	pat zrl                             /* Zero local */
 | |
| 		leaving
 | |
| 			loc 0
 | |
| 			stl $1
 | |
| 
 | |
| 	pat inl                             /* Increment local */
 | |
| 		leaving
 | |
| 			lol $1
 | |
| 			loc 1
 | |
| 			adi 4
 | |
| 			stl $1
 | |
| 
 | |
| 	pat del                             /* Decrement local */
 | |
| 		leaving
 | |
| 			lol $1
 | |
| 			loc 1
 | |
| 			sbi 4
 | |
| 			stl $1
 | |
| 
 | |
| 
 | |
| /* Global variables */
 | |
| 
 | |
| 	pat lpi                            /* Load address of external function */
 | |
| 		leaving
 | |
| 			lae $1
 | |
| 
 | |
| 	pat lae                            /* Load address of external */
 | |
| 		uses REG={LABEL_HA, $1}
 | |
| 		yields {SUM_RL, %a, $1}
 | |
| 
 | |
| 	pat loe                            /* Load word external */
 | |
| 		leaving
 | |
| 			lae $1
 | |
| 			loi INT32
 | |
| 
 | |
| 	pat ste                            /* Store word external */
 | |
| 		leaving
 | |
| 			lae $1
 | |
| 			sti INT32
 | |
| 
 | |
| 	pat lde                            /* Load double-word external */
 | |
| 		leaving
 | |
| 			lae $1
 | |
| 			loi INT64
 | |
| 
 | |
| 	pat sde                            /* Store double-word external */
 | |
| 		leaving
 | |
| 			lae $1
 | |
| 			sti INT64
 | |
| 
 | |
| 	pat zre                             /* Zero external */
 | |
| 		leaving
 | |
| 			loc 0
 | |
| 			ste $1
 | |
| 
 | |
| 	pat ine                             /* Increment external */
 | |
| 		leaving
 | |
| 			loe $1
 | |
| 			inc
 | |
| 			ste $1
 | |
| 
 | |
| 	pat dee                             /* Decrement external */
 | |
| 		leaving
 | |
| 			loe $1
 | |
| 			dec
 | |
| 			ste $1
 | |
| 
 | |
| 
 | |
| /* Structures */
 | |
| 
 | |
| 	pat lof                            /* Load word offsetted */
 | |
| 		leaving
 | |
| 			adp $1
 | |
| 			loi INT32
 | |
| 
 | |
| 	pat ldf                            /* Load double-word offsetted */
 | |
| 		leaving
 | |
| 			adp $1
 | |
| 			loi INT64
 | |
| 
 | |
| 	pat stf                            /* Store word offsetted */
 | |
| 		leaving
 | |
| 			adp $1
 | |
| 			sti INT32
 | |
| 
 | |
| 	pat sdf                            /* Store double-word offsetted */
 | |
| 		leaving
 | |
| 			adp $1
 | |
| 			sti INT64
 | |
| 
 | |
| 
 | |
| /* Loads and stores */
 | |
| 
 | |
| 	pat loi $1==INT8                   /* Load byte indirect */
 | |
| 		with REG
 | |
| 			yields {IND_RC_B, %1, 0}
 | |
| 		with exact SUM_RC
 | |
| 			yields {IND_RC_B, %1.reg, %1.off}
 | |
| 		with exact SUM_RL
 | |
| 			yields {IND_RL_B, %1.reg, %1.adr}
 | |
| 		with exact SUM_RR
 | |
| 			yields {IND_RR_B, %1.reg1, %1.reg2}
 | |
| 
 | |
| 	pat loi loc loc cii $1==INT16 && $2==INT16 && $3==INT32
 | |
| 	/* Load half-word indirect and sign extend */
 | |
| 		with REG
 | |
| 			yields {IND_RC_H_S, %1, 0}
 | |
| 		with exact SUM_RC
 | |
| 			yields {IND_RC_H_S, %1.reg, %1.off}
 | |
| 		with exact SUM_RL
 | |
| 			yields {IND_RL_H_S, %1.reg, %1.adr}
 | |
| 		with exact SUM_RR
 | |
| 			yields {IND_RR_H_S, %1.reg1, %1.reg2}
 | |
| 
 | |
| 	pat loi $1==INT16                  /* Load half-word indirect */
 | |
| 		with REG
 | |
| 			yields {IND_RC_H, %1, 0}
 | |
| 		with exact SUM_RC
 | |
| 			yields {IND_RC_H, %1.reg, %1.off}
 | |
| 		with exact SUM_RL
 | |
| 			yields {IND_RL_H, %1.reg, %1.adr}
 | |
| 		with exact SUM_RR
 | |
| 			yields {IND_RR_H, %1.reg1, %1.reg2}
 | |
| 
 | |
| 	pat loi $1==INT32                  /* Load word indirect */
 | |
| 		with REG
 | |
| 			yields {IND_RC_W, %1, 0}
 | |
| 		with exact SUM_RC
 | |
| 			yields {IND_RC_W, %1.reg, %1.off}
 | |
| 		with exact SUM_RL
 | |
| 			yields {IND_RL_W, %1.reg, %1.adr}
 | |
| 		with exact SUM_RR
 | |
| 			yields {IND_RR_W, %1.reg1, %1.reg2}
 | |
| 
 | |
| 	pat loi $1==INT64                  /* Load double-word indirect */
 | |
| 		with REG
 | |
| 			yields {IND_RC_D, %1, 0}
 | |
| 		with exact SUM_RC
 | |
| 			yields {IND_RC_D, %1.reg, %1.off}
 | |
| 		with exact SUM_RL
 | |
| 			yields {IND_RL_D, %1.reg, %1.adr}
 | |
| 		with exact SUM_RR
 | |
| 			yields {IND_RR_D, %1.reg1, %1.reg2}
 | |
| 
 | |
| 	pat loi                            /* Load arbitrary size */
 | |
| 		leaving
 | |
| 			loc $1
 | |
| 			los 4
 | |
| 
 | |
| 	pat los $1==4                      /* Load arbitrary size */
 | |
| 		with REG STACK
 | |
| 			kills ALL
 | |
| 			gen
 | |
| 				move %1, r3
 | |
| 				bl {LABEL, ".los4"}
 | |
| 
 | |
| 	pat sti $1==INT8                   /* Store byte indirect */
 | |
| 		with REG REG
 | |
| 			kills MEMORY
 | |
| 			gen move %2, {IND_RC_B, %1, 0}
 | |
| 		with SUM_RC REG
 | |
| 			kills MEMORY
 | |
| 			gen move %2, {IND_RC_B, %1.reg, %1.off}
 | |
| 		with SUM_RL REG
 | |
| 			kills MEMORY
 | |
| 			gen move %2, {IND_RL_B, %1.reg, %1.adr}
 | |
| 		with SUM_RR REG
 | |
| 			kills MEMORY
 | |
| 			gen move %2, {IND_RR_B, %1.reg1, %1.reg2}
 | |
| 
 | |
| 	pat sti $1==INT16                  /* Store half-word indirect */
 | |
| 		with REG REG
 | |
| 			kills MEMORY
 | |
| 			gen move %2, {IND_RC_H, %1, 0}
 | |
| 		with SUM_RC REG
 | |
| 			kills MEMORY
 | |
| 			gen move %2, {IND_RC_H, %1.reg, %1.off}
 | |
| 		with SUM_RL REG
 | |
| 			kills MEMORY
 | |
| 			gen move %2, {IND_RL_H, %1.reg, %1.adr}
 | |
| 		with SUM_RR REG
 | |
| 			kills MEMORY
 | |
| 			gen move %2, {IND_RR_H, %1.reg1, %1.reg2}
 | |
| 
 | |
| 	pat sti $1==INT32                  /* Store word indirect */
 | |
| 		with REG REG+FSREG
 | |
| 			kills MEMORY
 | |
| 			gen move %2, {IND_RC_W, %1, 0}
 | |
| 		with SUM_RC REG+FSREG
 | |
| 			kills MEMORY
 | |
| 			gen move %2, {IND_RC_W, %1.reg, %1.off}
 | |
| 		with SUM_RL REG+FSREG
 | |
| 			kills MEMORY
 | |
| 			gen move %2, {IND_RL_W, %1.reg, %1.adr}
 | |
| 		with SUM_RR REG+FSREG
 | |
| 			kills MEMORY
 | |
| 			gen move %2, {IND_RR_W, %1.reg1, %1.reg2}
 | |
| 
 | |
| 	pat sti $1==INT64                  /* Store double-word indirect */
 | |
| 		with REG FREG
 | |
| 			kills MEMORY
 | |
| 			gen move %2, {IND_RC_D, %1, 0}
 | |
| 		with SUM_RC FREG
 | |
| 			kills MEMORY
 | |
| 			gen move %2, {IND_RC_D, %1.reg, %1.off}
 | |
| 		with SUM_RL FREG
 | |
| 			kills MEMORY
 | |
| 			gen move %2, {IND_RL_D, %1.reg, %1.adr}
 | |
| 		with SUM_RR FREG
 | |
| 			kills MEMORY
 | |
| 			gen move %2, {IND_RR_D, %1.reg1, %1.reg2}
 | |
| 		with REG REG REG
 | |
| 			kills MEMORY
 | |
| 			gen
 | |
| 				move %2, {IND_RC_W, %1, 0}
 | |
| 				move %3, {IND_RC_W, %1, 4}
 | |
| 		/*
 | |
| 		 * Next 2 patterns exist because there is no coercion
 | |
| 		 * from IND_ALL_D to REG REG.
 | |
| 		 */
 | |
| 		with REG IND_RC_D
 | |
| 			kills MEMORY
 | |
| 			uses REG={SUM_RC, %2.reg, %2.off}, REG, REG
 | |
| 			gen
 | |
| 				move {IND_RC_W, %a, 0}, %b
 | |
| 				move {IND_RC_W, %a, 4}, %c
 | |
| 				move %b, {IND_RC_W, %1, 0}
 | |
| 				move %c, {IND_RC_W, %1, 4}
 | |
| 		with REG IND_RR_D
 | |
| 			kills MEMORY
 | |
| 			uses REG={SUM_RR, %2.reg1, %2.reg2}, REG, REG
 | |
| 			gen
 | |
| 				move {IND_RC_W, %a, 0}, %b
 | |
| 				move {IND_RC_W, %a, 4}, %c
 | |
| 				move %b, {IND_RC_W, %1, 0}
 | |
| 				move %c, {IND_RC_W, %1, 4}
 | |
| 
 | |
| 	pat sti                            /* Store arbitrary size */
 | |
| 		leaving
 | |
| 			loc $1
 | |
| 			sts 4
 | |
| 
 | |
| 	pat sts $1==4                      /* Store arbitrary size */
 | |
| 		with REG STACK
 | |
| 			kills ALL
 | |
| 			gen
 | |
| 				move %1, r3
 | |
| 				bl {LABEL, ".sts4"}
 | |
| 
 | |
| 
 | |
| /* Arithmetic wrappers */
 | |
| 
 | |
| 	pat ads $1==4                      /* Add var to pointer */
 | |
| 		leaving adi $1
 | |
| 
 | |
| 	pat sbs $1==4                      /* Subtract var from pointer */
 | |
| 		leaving sbi $1
 | |
| 
 | |
| 	pat adp                            /* Add constant to pointer */
 | |
| 		leaving
 | |
| 			loc $1
 | |
| 			adi 4
 | |
| 
 | |
| 	pat adu                            /* Add unsigned */
 | |
| 		leaving
 | |
| 			adi $1
 | |
| 
 | |
| 	pat sbu                            /* Subtract unsigned */
 | |
| 		leaving
 | |
| 			sbi $1
 | |
| 
 | |
| 	pat inc                            /* Add 1 */
 | |
| 		leaving
 | |
| 			loc 1
 | |
| 			adi 4
 | |
| 
 | |
| 	pat dec                            /* Subtract 1 */
 | |
| 		leaving
 | |
| 			loc 1
 | |
| 			sbi 4
 | |
| 
 | |
| 	pat mlu                            /* Multiply unsigned */
 | |
| 		leaving
 | |
| 			mli $1
 | |
| 
 | |
| 	pat slu                            /* Shift left unsigned */
 | |
| 		leaving
 | |
| 			sli $1
 | |
| 
 | |
| 
 | |
| /* Word arithmetic */
 | |
| 
 | |
| 	pat adi $1==4                      /* Add word (second + top) */
 | |
| 		with REG REG
 | |
| 			yields {SUM_RR, %1, %2}
 | |
| 		with CONST2 REG
 | |
| 			yields {SUM_RC, %2, %1.val}
 | |
| 		with REG CONST2
 | |
| 			yields {SUM_RC, %1, %2.val}
 | |
| 		with CONST_HZ REG
 | |
| 			uses reusing %2, REG={SUM_RIS, %2, his(%1.val)}
 | |
| 			yields %a
 | |
| 		with REG CONST_HZ
 | |
| 			uses reusing %1, REG={SUM_RIS, %1, his(%2.val)}
 | |
| 			yields %a
 | |
| 		with CONST_STACK-CONST2-CONST_HZ REG
 | |
| 			uses reusing %2, REG={SUM_RIS, %2, his(%1.val)}
 | |
| 			yields {SUM_RC, %a, los(%1.val)}
 | |
| 		with REG CONST_STACK-CONST2-CONST_HZ
 | |
| 			uses reusing %1, REG={SUM_RIS, %1, his(%2.val)}
 | |
| 			yields {SUM_RC, %a, los(%2.val)}
 | |
| 
 | |
| 	pat sbi $1==4                      /* Subtract word (second - top) */
 | |
| 		with REG REG
 | |
| 			uses reusing %2, REG
 | |
| 			gen
 | |
| 				subf %a, %1, %2
 | |
| 			yields %a
 | |
| 		with CONST2_WHEN_NEG REG
 | |
| 			yields {SUM_RC, %2, 0-%1.val}
 | |
| 		with CONST_HZ REG
 | |
| 			uses reusing %2, REG={SUM_RIS, %2, his(0-%1.val)}
 | |
| 			yields %a
 | |
| 		with CONST_STACK-CONST2_WHEN_NEG-CONST_HZ REG
 | |
| 			uses reusing %2, REG={SUM_RIS, %2, his(0-%1.val)}
 | |
| 			yields {SUM_RC, %a, los(0-%1.val)}
 | |
| 
 | |
| 	pat ngi $1==4                      /* Negate word */
 | |
| 		with REG
 | |
| 			uses reusing %1, REG
 | |
| 			gen
 | |
| 				neg %a, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	pat mli $1==4                      /* Multiply word (second * top) */
 | |
| 		with REG REG
 | |
| 			uses reusing %2, REG
 | |
| 			gen
 | |
| 				mullw %a, %2, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	pat dvi $1==4                      /* Divide word (second / top) */
 | |
| 		with REG REG
 | |
| 			uses reusing %2, REG
 | |
| 			gen
 | |
| 				divw %a, %2, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	pat dvu $1==4                      /* Divide unsigned word (second / top) */
 | |
| 		with REG REG
 | |
| 			uses reusing %2, REG
 | |
| 			gen
 | |
| 				divwu %a, %2, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	pat rmi $1==4                      /* Remainder word (second % top) */
 | |
| 		with REG REG
 | |
| 			uses REG
 | |
| 			gen
 | |
| 				divw %a, %2, %1
 | |
| 				mullw %a, %a, %1
 | |
| 				subf %a, %a, %2
 | |
| 			yields %a
 | |
| 
 | |
| 	pat rmu $1==4                      /* Remainder unsigned word (second % top) */
 | |
| 		with REG REG
 | |
| 			uses REG
 | |
| 			gen
 | |
| 				divwu %a, %2, %1
 | |
| 				mullw %a, %a, %1
 | |
| 				subf %a, %a, %2
 | |
| 			yields %a
 | |
| 
 | |
| 	pat and $1==4                      /* AND word */
 | |
| 		with REG NOT_R
 | |
| 			uses reusing %1, REG
 | |
| 			gen
 | |
| 				andc %a, %1, %2.reg
 | |
| 			yields %a
 | |
| 		with NOT_R REG
 | |
| 			uses reusing %1, REG
 | |
| 			gen
 | |
| 				andc %a, %2, %1.reg
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			yields {AND_RR, %1, %2}
 | |
| 		with REG UCONST2
 | |
| 			uses reusing %1, REG
 | |
| 			gen
 | |
| 				andiX %a, %1, {CONST, %2.val}
 | |
| 			yields %a
 | |
| 		with UCONST2 REG
 | |
| 			uses reusing %2, REG
 | |
| 			gen
 | |
| 				andiX %a, %2, {CONST, %1.val}
 | |
| 			yields %a
 | |
| 		with REG CONST_HZ
 | |
| 			uses reusing %1, REG
 | |
| 			gen
 | |
| 				andisX %a, %1, {CONST, hi(%2.val)}
 | |
| 			yields %a
 | |
| 		with CONST_HZ REG
 | |
| 			uses reusing %2, REG
 | |
| 			gen
 | |
| 				andisX %a, %2, {CONST, hi(%1.val)}
 | |
| 			yields %a
 | |
| 
 | |
| 	pat and defined($1)                /* AND set */
 | |
| 		leaving
 | |
| 			loc $1
 | |
| 			cal ".and"
 | |
| 
 | |
| 	pat and !defined($1)
 | |
| 		leaving
 | |
| 			cal ".and"
 | |
| 
 | |
| 	pat ior $1==4                      /* OR word */
 | |
| 		with REG NOT_R
 | |
| 			uses reusing %1, REG
 | |
| 			gen
 | |
| 				orc %a, %1, %2.reg
 | |
| 			yields %a
 | |
| 		with NOT_R REG
 | |
| 			uses reusing %2, REG
 | |
| 			gen
 | |
| 				orc %a, %2, %1.reg
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			yields {OR_RR, %1, %2}
 | |
| 		with REG UCONST2
 | |
| 			yields {OR_RC, %1, %2.val}
 | |
| 		with UCONST2 REG
 | |
| 			yields {OR_RC, %2, %1.val}
 | |
| 		with REG CONST_HZ
 | |
| 			uses reusing %1, REG={OR_RIS, %1, hi(%2.val)}
 | |
| 			yields %a
 | |
| 		with CONST_HZ REG
 | |
| 			uses reusing %2, REG={OR_RIS, %2, hi(%1.val)}
 | |
| 			yields %a
 | |
| 		with REG CONST_STACK-UCONST2-CONST_HZ
 | |
| 			uses reusing %1, REG={OR_RIS, %1, hi(%2.val)}
 | |
| 			yields {OR_RC, %1, lo(%2.val)}
 | |
| 		with CONST_STACK-UCONST2-CONST_HZ REG
 | |
| 			uses reusing %2, REG={OR_RIS, %2, hi(%1.val)}
 | |
| 			yields {OR_RC, %2, lo(%1.val)}
 | |
| 
 | |
| 	pat ior defined($1)                /* OR set */
 | |
| 		leaving
 | |
| 			loc $1
 | |
| 			cal ".ior"
 | |
| 
 | |
| 	/* OR set (variable), used in lang/m2/libm2/LtoUset.e */
 | |
| 	pat ior !defined($1)
 | |
| 		leaving
 | |
| 			cal ".ior"
 | |
| 
 | |
| 	pat xor $1==4                      /* XOR word */
 | |
| 		with REG REG
 | |
| 			yields {XOR_RR, %1, %2}
 | |
| 		with REG UCONST2
 | |
| 			yields {XOR_RC, %1, %2.val}
 | |
| 		with UCONST2 REG
 | |
| 			yields {XOR_RC, %2, %1.val}
 | |
| 		with REG CONST_HZ
 | |
| 			uses reusing %1, REG={XOR_RIS, %1, hi(%2.val)}
 | |
| 			yields %a
 | |
| 		with CONST_HZ REG
 | |
| 			uses reusing %2, REG={XOR_RIS, %2, hi(%1.val)}
 | |
| 			yields %a
 | |
| 		with REG CONST_STACK-UCONST2-CONST_HZ
 | |
| 			uses reusing %1, REG={XOR_RIS, %1, hi(%2.val)}
 | |
| 			yields {XOR_RC, %1, lo(%2.val)}
 | |
| 		with CONST_STACK-UCONST2-CONST_HZ REG
 | |
| 			uses reusing %2, REG={XOR_RIS, %2, hi(%1.val)}
 | |
| 			yields {XOR_RC, %2, lo(%1.val)}
 | |
| 
 | |
| 	pat xor defined($1)                /* XOR set */
 | |
| 		leaving
 | |
| 			loc $1
 | |
| 			cal ".xor"
 | |
| 
 | |
| 	pat xor !defined($1)
 | |
| 		leaving
 | |
| 			cal ".xor"
 | |
| 
 | |
| 	pat com $1==INT32                  /* NOT word */
 | |
| 		with AND_RR
 | |
| 			uses REG
 | |
| 			gen
 | |
| 				nand %a, %1.reg1, %1.reg2
 | |
| 			yields %a
 | |
| 		with OR_RR
 | |
| 			uses REG
 | |
| 			gen
 | |
| 				nor %a, %1.reg1, %1.reg2
 | |
| 			yields %a
 | |
| 		with XOR_RR
 | |
| 			uses REG
 | |
| 			gen
 | |
| 				eqv %a, %1.reg1, %1.reg2
 | |
| 			yields %a
 | |
| 		with REG
 | |
| 			yields {NOT_R, %1}
 | |
| 
 | |
| 	pat com defined($1)                /* NOT set */
 | |
| 		leaving
 | |
| 			loc $1
 | |
| 			cal ".com"
 | |
| 
 | |
| 	pat com !defined($1)
 | |
| 		leaving
 | |
| 			cal ".com"
 | |
| 
 | |
| 	pat zer $1==4                      /* Push zero */
 | |
| 		leaving
 | |
| 			loc 0
 | |
| 
 | |
| 	pat zer defined($1)	   	           /* Create empty set */
 | |
| 		leaving
 | |
| 			loc $1
 | |
| 			cal ".zer"
 | |
| 
 | |
| 	pat sli $1==4                      /* Shift left (second << top) */
 | |
| 		with CONST_STACK REG
 | |
| 			uses reusing %2, REG
 | |
| 			gen
 | |
| 				rlwinm %a, %2, {CONST, (%1.val & 0x1F)}, {CONST, 0}, {CONST, 31-(%1.val & 0x1F)}
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %2, REG
 | |
| 			gen
 | |
| 				slw %a, %2, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	pat sri $1==4                      /* Shift right signed (second >> top) */
 | |
| 		with CONST_STACK REG
 | |
| 			uses reusing %2, REG
 | |
| 			gen
 | |
| 				srawi %a, %2, {CONST, %1.val & 0x1F}
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %2, REG
 | |
| 			gen
 | |
| 				sraw %a, %2, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	pat sru $1==4                      /* Shift right unsigned (second >> top) */
 | |
| 		with CONST_STACK REG
 | |
| 			uses reusing %2, REG
 | |
| 			gen
 | |
| 				rlwinm %a, %2, {CONST, 32-(%1.val & 0x1F)}, {CONST, (%1.val & 0x1F)}, {CONST, 31}
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %2, REG
 | |
| 			gen
 | |
| 				srw %a, %2, %1
 | |
| 			yields %a
 | |
| 
 | |
| 
 | |
| /* Arrays */
 | |
| 
 | |
| 	pat aar $1==4                      /* Address of array element */
 | |
| 		leaving
 | |
| 			cal ".aar4"
 | |
| 
 | |
| 	pat lar $1==4                      /* Load from array */
 | |
| 		with STACK
 | |
| 			kills ALL
 | |
| 			gen
 | |
| 				bl {LABEL, ".aar4"}
 | |
| 				/* pass r3 = size from .aar4 to .los4 */
 | |
| 				bl {LABEL, ".los4"}
 | |
| 
 | |
| 	pat lae lar $2==4 && nicesize(rom($1, 3))
 | |
| 		leaving
 | |
| 			lae $1
 | |
| 			aar 4
 | |
| 			loi rom($1, 3)
 | |
| 
 | |
| 	pat sar $1==4                      /* Store to array */
 | |
| 		with STACK
 | |
| 			kills ALL
 | |
| 			gen
 | |
| 				bl {LABEL, ".aar4"}
 | |
| 				/* pass r3 = size from .aar4 to .sts4 */
 | |
| 				bl {LABEL, ".sts4"}
 | |
| 
 | |
| 	pat lae sar $2==4 && nicesize(rom($1, 3))
 | |
| 		leaving
 | |
| 			lae $1
 | |
| 			aar 4
 | |
| 			sti rom($1, 3)
 | |
| 
 | |
| 
 | |
| /* Sets */
 | |
| 
 | |
| 	pat set defined($1)                /* Create singleton set */
 | |
| 		leaving
 | |
| 			loc $1
 | |
| 			cal ".set"
 | |
| 
 | |
| 	/* Create set (variable), used in lang/m2/libm2/LtoUset.e */
 | |
| 	pat set !defined($1)
 | |
| 		leaving
 | |
| 			cal ".set"
 | |
| 
 | |
| 	pat inn defined($1)                /* Test for set bit */
 | |
| 		leaving
 | |
| 			loc $1
 | |
| 			cal ".inn"
 | |
| 
 | |
| 	pat inn !defined($1)
 | |
| 		leaving
 | |
| 			cal ".inn"
 | |
| 
 | |
| 
 | |
| /* Boolean resolutions */
 | |
| 
 | |
| 	pat teq                            /* top = (top == 0) */
 | |
| 		with REG
 | |
| 			uses reusing %1, REG
 | |
| 			gen
 | |
| 				test %1
 | |
| 				mfcr %a
 | |
| 				move {XEQ, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat tne                            /* top = (top != 0) */
 | |
| 		with REG
 | |
| 			uses reusing %1, REG
 | |
| 			gen
 | |
| 				test %1
 | |
| 				mfcr %a
 | |
| 				move {XNE, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat tlt                            /* top = (top < 0) */
 | |
| 		with REG
 | |
| 			uses reusing %1, REG
 | |
| 			gen
 | |
| 				test %1
 | |
| 				mfcr %a
 | |
| 				move {XLT, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat tle                            /* top = (top <= 0) */
 | |
| 		with REG
 | |
| 			uses reusing %1, REG
 | |
| 			gen
 | |
| 				test %1
 | |
| 				mfcr %a
 | |
| 				move {XLE, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat tgt                            /* top = (top > 0) */
 | |
| 		with REG
 | |
| 			uses reusing %1, REG
 | |
| 			gen
 | |
| 				test %1
 | |
| 				mfcr %a
 | |
| 				move {XGT, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat tge                            /* top = (top >= 0) */
 | |
| 		with REG
 | |
| 			uses reusing %1, REG
 | |
| 			gen
 | |
| 				test %1
 | |
| 				mfcr %a
 | |
| 				move {XGE, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmi teq $1==4                  /* Signed second == top */
 | |
| 		with REG CONST2
 | |
| 			uses reusing %1, REG={COND_RC, %1, %2.val}
 | |
| 			gen move {XEQ, %a}, %a
 | |
| 			yields %a
 | |
| 		with CONST2 REG
 | |
| 			uses reusing %1, REG={COND_RC, %2, %1.val}
 | |
| 			gen move {XEQ, %a}, %a
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %1, REG={COND_RR, %2, %1}
 | |
| 			gen move {XEQ, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmi tne $1==4                  /* Signed second != top */
 | |
| 		with REG CONST2
 | |
| 			uses reusing %1, REG={COND_RC, %1, %2.val}
 | |
| 			gen move {XNE, %a}, %a
 | |
| 			yields %a
 | |
| 		with CONST2 REG
 | |
| 			uses reusing %1, REG={COND_RC, %2, %1.val}
 | |
| 			gen move {XNE, %a}, %a
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %1, REG={COND_RR, %2, %1}
 | |
| 			gen move {XNE, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmi tgt $1==4                  /* Signed second > top */
 | |
| 		with REG CONST2
 | |
| 			uses reusing %1, REG={COND_RC, %1, %2.val}
 | |
| 			gen move {XLT, %a}, %a
 | |
| 			yields %a
 | |
| 		with CONST2 REG
 | |
| 			uses reusing %1, REG={COND_RC, %2, %1.val}
 | |
| 			gen move {XGT, %a}, %a
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %1, REG={COND_RR, %2, %1}
 | |
| 			gen move {XGT, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmi tge $1==4                  /* Signed second >= top */
 | |
| 		with REG CONST2
 | |
| 			uses reusing %1, REG={COND_RC, %1, %2.val}
 | |
| 			gen move {XLE, %a}, %a
 | |
| 			yields %a
 | |
| 		with CONST2 REG
 | |
| 			uses reusing %1, REG={COND_RC, %2, %1.val}
 | |
| 			gen move {XGE, %a}, %a
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %1, REG={COND_RR, %2, %1}
 | |
| 			gen move {XGE, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmi tlt $1==4                  /* Signed second < top */
 | |
| 		with REG CONST2
 | |
| 			uses reusing %1, REG={COND_RC, %1, %2.val}
 | |
| 			gen move {XGT, %a}, %a
 | |
| 			yields %a
 | |
| 		with CONST2 REG
 | |
| 			uses reusing %1, REG={COND_RC, %2, %1.val}
 | |
| 			gen move {XLT, %a}, %a
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %1, REG={COND_RR, %2, %1}
 | |
| 			gen move {XLT, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmi tle $1==4                  /* Signed second <= top */
 | |
| 		with REG CONST2
 | |
| 			uses reusing %1, REG={COND_RC, %1, %2.val}
 | |
| 			gen move {XGE, %a}, %a
 | |
| 			yields %a
 | |
| 		with CONST2 REG
 | |
| 			uses reusing %1, REG={COND_RC, %2, %1.val}
 | |
| 			gen move {XLE, %a}, %a
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %1, REG={COND_RR, %2, %1}
 | |
| 			gen move {XLE, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmu teq $1==4                  /* Unsigned second == top */
 | |
| 		with REG UCONST2
 | |
| 			uses reusing %1, REG={CONDL_RC, %1, %2.val}
 | |
| 			gen move {XEQ, %a}, %a
 | |
| 			yields %a
 | |
| 		with UCONST2 REG
 | |
| 			uses reusing %1, REG={CONDL_RC, %2, %1.val}
 | |
| 			gen move {XEQ, %a}, %a
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %1, REG={CONDL_RR, %2, %1}
 | |
| 			gen move {XEQ, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmu tne $1==4                  /* Unsigned second != top */
 | |
| 		with REG UCONST2
 | |
| 			uses reusing %1, REG={CONDL_RC, %1, %2.val}
 | |
| 			gen move {XNE, %a}, %a
 | |
| 			yields %a
 | |
| 		with UCONST2 REG
 | |
| 			uses reusing %1, REG={CONDL_RC, %2, %1.val}
 | |
| 			gen move {XNE, %a}, %a
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %1, REG={CONDL_RR, %2, %1}
 | |
| 			gen move {XNE, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmu tgt $1==4                  /* Unsigned second > top */
 | |
| 		with REG UCONST2
 | |
| 			uses reusing %1, REG={CONDL_RC, %1, %2.val}
 | |
| 			gen move {XLT, %a}, %a
 | |
| 			yields %a
 | |
| 		with UCONST2 REG
 | |
| 			uses reusing %1, REG={CONDL_RC, %2, %1.val}
 | |
| 			gen move {XGT, %a}, %a
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %1, REG={CONDL_RR, %2, %1}
 | |
| 			gen move {XGT, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmu tge $1==4                  /* Unsigned second >= top */
 | |
| 		with REG UCONST2
 | |
| 			uses reusing %1, REG={CONDL_RC, %1, %2.val}
 | |
| 			gen move {XLE, %a}, %a
 | |
| 			yields %a
 | |
| 		with UCONST2 REG
 | |
| 			uses reusing %1, REG={CONDL_RC, %2, %1.val}
 | |
| 			gen move {XGE, %a}, %a
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %1, REG={CONDL_RR, %2, %1}
 | |
| 			gen move {XGE, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmu tlt $1==4                  /* Unsigned second < top */
 | |
| 		with REG UCONST2
 | |
| 			uses reusing %1, REG={CONDL_RC, %1, %2.val}
 | |
| 			gen move {XGT, %a}, %a
 | |
| 			yields %a
 | |
| 		with UCONST2 REG
 | |
| 			uses reusing %1, REG={CONDL_RC, %2, %1.val}
 | |
| 			gen move {XLT, %a}, %a
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %1, REG={CONDL_RR, %2, %1}
 | |
| 			gen move {XLT, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmu tle $1==4                  /* Unsigned second <= top */
 | |
| 		with REG UCONST2
 | |
| 			uses reusing %1, REG={CONDL_RC, %1, %2.val}
 | |
| 			gen move {XGE, %a}, %a
 | |
| 			yields %a
 | |
| 		with UCONST2 REG
 | |
| 			uses reusing %1, REG={CONDL_RC, %2, %1.val}
 | |
| 			gen move {XLE, %a}, %a
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %1, REG={CONDL_RR, %2, %1}
 | |
| 			gen move {XLE, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 
 | |
| /* Simple branches */
 | |
| 
 | |
| 	proc zxx example zeq
 | |
| 		with REG STACK
 | |
| 			gen
 | |
| 				test %1
 | |
| 				bxx* {LABEL, $1}
 | |
| 
 | |
| 	/* Pop signed int, branch if... */
 | |
| 	pat zeq    call zxx("beq")         /* top == 0 */
 | |
| 	pat zne    call zxx("bne")         /* top != 0 */
 | |
| 	pat zgt    call zxx("bgt")         /* top > 0 */
 | |
| 	pat zge    call zxx("bge")         /* top >= 0 */
 | |
| 	pat zlt    call zxx("blt")         /* top < 0 */
 | |
| 	pat zle    call zxx("ble")         /* top >= 0 */
 | |
| 
 | |
| 	/* The peephole optimizer rewrites
 | |
| 	 *   cmi 4 zeq
 | |
| 	 * as beq, and does same for bne, bgt, and so on.
 | |
| 	 */
 | |
| 
 | |
| 	proc bxx example beq
 | |
| 		with REG CONST2 STACK
 | |
| 			gen
 | |
| 				cmpwi %1, {CONST, %2.val}
 | |
| 				bxx[2] {LABEL, $1}
 | |
| 		with CONST2 REG STACK
 | |
| 			gen
 | |
| 				cmpwi %2, {CONST, %1.val}
 | |
| 				bxx[1] {LABEL, $1}
 | |
| 		with REG REG STACK
 | |
| 			gen
 | |
| 				cmpw %2, %1
 | |
| 				bxx[1] {LABEL, $1}
 | |
| 
 | |
| 	/* Pop two signed ints, branch if... */
 | |
| 	pat beq    call bxx("beq", "beq")  /* second == top */
 | |
| 	pat bne    call bxx("bne", "bne")  /* second != top */
 | |
| 	pat bgt    call bxx("bgt", "blt")  /* second > top */
 | |
| 	pat bge    call bxx("bge", "ble")  /* second >= top */
 | |
| 	pat blt    call bxx("blt", "bgt")  /* second < top */
 | |
| 	pat ble    call bxx("ble", "bge")  /* second >= top */
 | |
| 
 | |
| 	proc cmu4zxx example cmu zeq
 | |
| 		with REG CONST2 STACK
 | |
| 			gen
 | |
| 				cmplwi %1, {CONST, %2.val}
 | |
| 				bxx[2] {LABEL, $2}
 | |
| 		with CONST2 REG STACK
 | |
| 			gen
 | |
| 				cmplwi %2, {CONST, %1.val}
 | |
| 				bxx[1] {LABEL, $2}
 | |
| 		with REG REG STACK
 | |
| 			gen
 | |
| 				cmplw %2, %1
 | |
| 				bxx[1] {LABEL, $2}
 | |
| 
 | |
| 	/* Pop two unsigned ints, branch if... */
 | |
| 	pat cmu zeq $1==4    call cmu4zxx("beq", "beq")
 | |
| 	pat cmu zne $1==4    call cmu4zxx("bne", "bne")
 | |
| 	pat cmu zgt $1==4    call cmu4zxx("bgt", "blt")
 | |
| 	pat cmu zge $1==4    call cmu4zxx("bge", "ble")
 | |
| 	pat cmu zlt $1==4    call cmu4zxx("blt", "bgt")
 | |
| 	pat cmu zle $1==4    call cmu4zxx("ble", "bge")
 | |
| 
 | |
| 
 | |
| /* Comparisons */
 | |
| 
 | |
| 	/* Each comparison extracts the lt and gt bits from cr0.
 | |
| 	 *   extlwi %a, %a, 2, 0
 | |
| 	 * puts lt in the sign bit, so lt yields a negative result,
 | |
| 	 * gt yields positive.
 | |
| 	 *   rlwinm %a, %a, 1, 31, 0
 | |
| 	 * puts gt in the sign bit, to reverse the comparison.
 | |
| 	 */
 | |
| 
 | |
| 	pat cmi $1==INT32                  /* Signed tristate compare */
 | |
| 		with REG CONST2
 | |
| 			uses reusing %1, REG={COND_RC, %1, %2.val}
 | |
| 			gen rlwinm %a, %a, {CONST, 1}, {CONST, 31}, {CONST, 0}
 | |
| 			yields %a
 | |
| 		with CONST2 REG
 | |
| 			uses reusing %2, REG={COND_RC, %2, %1.val}
 | |
| 			gen extlwi %a, %a, {CONST, 2}, {CONST, 0}
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %1, REG={COND_RR, %2, %1}
 | |
| 			gen extlwi %a, %a, {CONST, 2}, {CONST, 0}
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmu $1==INT32                  /* Unsigned tristate compare */
 | |
| 		with REG UCONST2
 | |
| 			uses reusing %1, REG={CONDL_RC, %1, %2.val}
 | |
| 			gen rlwinm %a, %a, {CONST, 1}, {CONST, 31}, {CONST, 0}
 | |
| 			yields %a
 | |
| 		with UCONST2 REG
 | |
| 			uses reusing %2, REG={CONDL_RC, %2, %1.val}
 | |
| 			gen extlwi %a, %a, {CONST, 2}, {CONST, 0}
 | |
| 			yields %a
 | |
| 		with REG REG
 | |
| 			uses reusing %1, REG={CONDL_RR, %2, %1}
 | |
| 			gen extlwi %a, %a, {CONST, 2}, {CONST, 0}
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmp                            /* Compare pointers */
 | |
| 		leaving
 | |
| 			cmu INT32
 | |
| 
 | |
| 	pat cms $1==INT32                  /* Compare blocks (word sized) */
 | |
| 		leaving
 | |
| 			cmi INT32
 | |
| 
 | |
| 	pat cms defined($1)
 | |
| 		leaving
 | |
| 			loc $1
 | |
| 			cal ".cms"
 | |
| 
 | |
| 	pat cms !defined($1)
 | |
| 		leaving
 | |
| 			cal ".cms"
 | |
| 
 | |
| 
 | |
| /* Other branching and labelling */
 | |
| 
 | |
| 	pat lab topeltsize($1)==4 && !fallthrough($1)
 | |
| 		kills ALL
 | |
| 		gen
 | |
| 			labeldef $1
 | |
| 			yields r3
 | |
| 
 | |
| 	pat lab topeltsize($1)==4 && fallthrough($1)
 | |
| 		with REG STACK
 | |
| 		kills ALL
 | |
| 		gen
 | |
| 			move %1, r3
 | |
| 			labeldef $1
 | |
| 		yields r3
 | |
| 
 | |
| 	pat lab topeltsize($1)!=4
 | |
| 		with STACK
 | |
| 		kills ALL
 | |
| 		gen
 | |
| 			labeldef $1
 | |
| 
 | |
| 	pat bra topeltsize($1)==4          /* Unconditional jump with TOS GPRister */
 | |
| 		with REG STACK
 | |
| 		gen
 | |
| 			move %1, r3
 | |
| 			b {LABEL, $1}
 | |
| 
 | |
| 	pat bra topeltsize($1)!=4          /* Unconditional jump without TOS GPRister */
 | |
| 		with STACK
 | |
| 		gen
 | |
| 			b {LABEL, $1}
 | |
| 
 | |
| 
 | |
| /* Miscellaneous */
 | |
| 
 | |
| 	pat cal                            /* Call procedure */
 | |
| 		with STACK
 | |
| 			kills ALL
 | |
| 			gen
 | |
| 				bl {LABEL, $1}
 | |
| 
 | |
| 	pat cai                            /* Call procedure indirect */
 | |
| 		with REG STACK
 | |
| 			kills ALL
 | |
| 			gen
 | |
| 				mtspr ctr, %1
 | |
| 				bctrl.
 | |
| 
 | |
| 	pat lfr $1==INT32                  /* Load function result, word */
 | |
| 		yields r3
 | |
| 
 | |
| 	pat lfr $1==INT64                  /* Load function result, double-word */
 | |
| 		yields r4 r3
 | |
| 
 | |
| 	pat ret $1==0                      /* Return from procedure */
 | |
| 		gen
 | |
| 			return
 | |
| 			b {LABEL, ".ret"}
 | |
| 
 | |
| 	pat ret $1==INT32                  /* Return from procedure, word */
 | |
| 		with REG
 | |
| 		gen
 | |
| 			move %1, r3
 | |
| 			return
 | |
| 			b {LABEL, ".ret"}
 | |
| 
 | |
| 	pat ret $1==INT64                  /* Return from procedure, double-word */
 | |
| 		with REG REG
 | |
| 		gen
 | |
| 			move %1, r3
 | |
| 			move %2, r4
 | |
| 			return
 | |
| 			b {LABEL, ".ret"}
 | |
| 
 | |
| 	pat blm                            /* Block move constant length */
 | |
| 		leaving
 | |
| 			loc $1
 | |
| 			bls
 | |
| 
 | |
| 	pat bls                            /* Block move variable length */
 | |
| 		with REG REG REG
 | |
| 			uses reusing %1, REG, REG={CONST_0000_7FFF, 0}
 | |
| 			gen
 | |
| 				/* Wrong if size is zero */
 | |
| 				srwi %1, %1, {CONST, 2}
 | |
| 				mtspr ctr, %1
 | |
| 				1:
 | |
| 				lwzx %a, %3, %b
 | |
| 				stwx %a, %2, %b
 | |
| 				addi %b, %b, {CONST, 4}
 | |
| 				bdnz {LABEL, "1b"}
 | |
| 
 | |
| 	pat csa                            /* Array-lookup switch */
 | |
| 		with STACK
 | |
| 			kills ALL
 | |
| 			gen
 | |
| 				b {LABEL, ".csa"}
 | |
| 
 | |
| 	pat csb                            /* Table-lookup switch */
 | |
| 		with STACK
 | |
| 			kills ALL
 | |
| 			gen
 | |
| 				b {LABEL, ".csb"}
 | |
| 
 | |
| 
 | |
| /* EM specials */
 | |
| 
 | |
| 	pat fil                            /* Set current filename */
 | |
| 		leaving
 | |
| 			lae $1
 | |
| 			ste "hol0+4"
 | |
| 
 | |
| 	pat lin                            /* Set current line number */
 | |
| 		leaving
 | |
| 			loc $1
 | |
| 			ste "hol0"
 | |
| 
 | |
| 	pat lni                            /* Increment line number */
 | |
| 		leaving
 | |
| 			ine "hol0"
 | |
| 
 | |
| 	pat lim                            /* Load EM trap ignore mask */
 | |
| 		leaving
 | |
| 			lde ".ignmask"
 | |
| 
 | |
| 	pat sim                            /* Store EM trap ignore mask */
 | |
| 		leaving
 | |
| 			ste ".ignmask"
 | |
| 
 | |
| 	pat trp                            /* Raise EM trap */
 | |
| 		with REG
 | |
| 			kills ALL
 | |
| 			gen
 | |
| 				move %1, r3
 | |
| 				bl {LABEL, ".trap"}
 | |
| 
 | |
| 	pat sig                            /* Set trap handler */
 | |
| 		leaving
 | |
| 			ste ".trppc"
 | |
| 
 | |
| 	pat rtt                            /* Return from trap */
 | |
| 		leaving
 | |
| 			ret 0
 | |
| 
 | |
| 	pat lxl $1==0                      /* Load FP */
 | |
| 		leaving
 | |
| 			lor 0
 | |
| 
 | |
| 	pat lxl $1==1                      /* Load caller's FP */
 | |
| 		leaving
 | |
| 			lxl 0
 | |
| 			dch
 | |
| 
 | |
| 	pat dch                            /* FP -> caller FP */
 | |
| 		with REG
 | |
| 			uses reusing %1, REG
 | |
| 			gen
 | |
| 				lwz %a, {IND_RC_W, %1, FP_OFFSET}
 | |
| 			yields %a
 | |
| 
 | |
| 	pat lpb                            /* Convert FP to argument address */
 | |
| 		leaving
 | |
| 			adp EM_BSIZE
 | |
| 
 | |
| 	pat lxa                            /* Load caller's SP */
 | |
| 		leaving
 | |
| 			lxl $1
 | |
| 			lpb
 | |
| 
 | |
| 	pat gto                            /* longjmp */
 | |
| 		with STACK
 | |
| 			uses REG
 | |
| 			gen
 | |
| 				move {LABEL, $1}, %a
 | |
| 				move {IND_RC_W, %a, 8}, fp
 | |
| 				move {IND_RC_W, %a, 4}, sp
 | |
| 				move {IND_RC_W, %a, 0}, %a
 | |
| 				mtspr ctr, %a
 | |
| 				bctr.
 | |
| 
 | |
| 	pat lor $1==0                      /* Load FP */
 | |
| 		uses REG
 | |
| 		gen
 | |
| 			move fp, %a
 | |
| 		yields %a
 | |
| 
 | |
| 	pat lor $1==1                      /* Load SP */
 | |
| 		uses REG
 | |
| 		gen
 | |
| 			move sp, %a
 | |
| 		yields %a
 | |
| 
 | |
| 	pat str $1==0                      /* Store FP */
 | |
| 		with REG
 | |
| 			gen
 | |
| 				move %1, fp
 | |
| 
 | |
| 	pat str $1==1                      /* Store SP */
 | |
| 		with REG
 | |
| 			gen
 | |
| 				move %1, sp
 | |
| 
 | |
| 	pat loc ass $1==4 && $2==4         /* Drop 4 bytes from stack */
 | |
| 		with exact REG
 | |
| 			/* nop */
 | |
| 		with STACK
 | |
| 			gen
 | |
| 				addi sp, sp, {CONST, 4}
 | |
| 
 | |
| 	pat ass $1==4                      /* Adjust stack by variable amount */
 | |
| 		with CONST2 STACK
 | |
| 			gen
 | |
| 				move {SUM_RC, sp, %1.val}, sp
 | |
| 		with CONST_HZ STACK
 | |
| 			gen
 | |
| 				move {SUM_RC, sp, his(%1.val)}, sp
 | |
| 		with CONST_STACK-CONST2-CONST_HZ STACK
 | |
| 			gen
 | |
| 				move {SUM_RC, sp, his(%1.val)}, sp
 | |
| 				move {SUM_RC, sp, los(%1.val)}, sp
 | |
| 		with REG STACK
 | |
| 			gen
 | |
| 				move {SUM_RR, sp, %1}, sp
 | |
| 
 | |
| 	pat asp                            /* Adjust stack by constant amount */
 | |
| 		leaving
 | |
| 			loc $1
 | |
| 			ass 4
 | |
| 
 | |
| 	pat lae rck $2==4                  /* Range check */
 | |
| 		with REG
 | |
| 			kills ALL
 | |
| 			gen
 | |
| 				cmpwi %1, {CONST, rom($1, 1)}
 | |
| 				blt {LABEL, ".trap_erange"}
 | |
| 				cmpwi %1, {CONST, rom($1, 2)}
 | |
| 				bgt {LABEL, ".trap_erange"}
 | |
| 			yields %1
 | |
| 
 | |
| 
 | |
| /* Floating point support */
 | |
| 
 | |
| 	/* All very cheap and nasty --- this needs to be properly integrated into
 | |
| 	 * the code generator. ncg doesn't like having separate FPU registers. */
 | |
| 
 | |
| 	/* Single-precision */
 | |
| 
 | |
| 	pat zrf $1==INT32                  /* Push zero */
 | |
| 		leaving
 | |
| 			loe ".fs_00000000"
 | |
| 
 | |
| 	pat adf $1==INT32                  /* Add single */
 | |
| 		with FSREG FSREG
 | |
| 			uses reusing %1, FSREG
 | |
| 			gen
 | |
| 				fadds %a, %2, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	pat sbf $1==INT32                  /* Subtract single */
 | |
| 		with FSREG FSREG
 | |
| 			uses reusing %1, FSREG
 | |
| 			gen
 | |
| 				fsubs %a, %2, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	pat mlf $1==INT32                  /* Multiply single */
 | |
| 		with FSREG FSREG
 | |
| 			uses reusing %1, FSREG
 | |
| 			gen
 | |
| 				fmuls %a, %2, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	pat dvf $1==INT32                  /* Divide single */
 | |
| 		with FSREG FSREG
 | |
| 			uses reusing %1, FSREG
 | |
| 			gen
 | |
| 				fdivs %a, %2, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	pat ngf $1==INT32                  /* Negate single */
 | |
| 		with FSREG
 | |
| 			uses reusing %1, FSREG
 | |
| 			gen
 | |
| 				fneg %a, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmf $1==INT32                  /* Compare single */
 | |
| 		with FSREG FSREG
 | |
| 			uses REG={COND_FS, %2, %1}
 | |
| 			gen extlwi %a, %a, {CONST, 2}, {CONST, 0}
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmf teq $1==4                  /* Single second == top */
 | |
| 		with FSREG FSREG
 | |
| 			uses REG={COND_FS, %2, %1}
 | |
| 			gen move {XEQ, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmf tne $1==4                  /* Single second == top */
 | |
| 		with FSREG FSREG
 | |
| 			uses REG={COND_FS, %2, %1}
 | |
| 			gen move {XNE, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmf tgt $1==4                  /* Single second > top */
 | |
| 		with FSREG FSREG
 | |
| 			uses REG={COND_FS, %2, %1}
 | |
| 			gen move {XGT, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmf tge $1==4                  /* Single second >= top */
 | |
| 		with FSREG FSREG
 | |
| 			uses REG={COND_FS, %2, %1}
 | |
| 			gen move {XGE, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmf tlt $1==4                  /* Single second < top */
 | |
| 		with FSREG FSREG
 | |
| 			uses REG={COND_FS, %2, %1}
 | |
| 			gen move {XLT, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmf tle $1==4                  /* Single second <= top */
 | |
| 		with FSREG FSREG
 | |
| 			uses REG={COND_FS, %2, %1}
 | |
| 			gen move {XLE, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	proc cmf4zxx example cmf zeq
 | |
| 		with FREG FREG STACK
 | |
| 			uses REG
 | |
| 			gen
 | |
| 				fcmpo cr0, %2, %1
 | |
| 				bxx* {LABEL, $2}
 | |
| 
 | |
| 	/* Pop 2 singles, branch if... */
 | |
| 	pat cmf zeq $1==4    call cmf4zxx("beq")
 | |
| 	pat cmf zne $1==4    call cmf4zxx("bne")
 | |
| 	pat cmf zgt $1==4    call cmf4zxx("bgt")
 | |
| 	pat cmf zge $1==4    call cmf4zxx("bge")
 | |
| 	pat cmf zlt $1==4    call cmf4zxx("blt")
 | |
| 	pat cmf zle $1==4    call cmf4zxx("ble")
 | |
| 
 | |
| 	pat loc loc cff $1==INT32 && $2==INT64 /* Convert single to double */
 | |
| 		with FSREG
 | |
| 			yields %1.1
 | |
| 
 | |
| 	/* Convert single to signed int */
 | |
| 	pat loc loc cfi $1==4 && $2==4
 | |
| 		leaving
 | |
| 			loc 4
 | |
| 			loc 8
 | |
| 			cff
 | |
| 			loc 8
 | |
| 			loc 4
 | |
| 			cfi
 | |
| 
 | |
| 	/* Convert single to unsigned int */
 | |
| 	pat loc loc cfu $1==4 && $2==4
 | |
| 		leaving
 | |
| 			loc 4
 | |
| 			loc 8
 | |
| 			cff
 | |
| 			loc 8
 | |
| 			loc 4
 | |
| 			cfu
 | |
| 
 | |
| 	/* Convert signed int to single */
 | |
| 	pat loc loc cif $1==4 && $2==4
 | |
| 		leaving
 | |
| 			loc 4
 | |
| 			loc 8
 | |
| 			cif
 | |
| 			loc 8
 | |
| 			loc 4
 | |
| 			cff
 | |
| 
 | |
| 	/* Convert unsigned int to single */
 | |
| 	pat loc loc cuf $1==4 && $2==4
 | |
| 		leaving
 | |
| 			loc 4
 | |
| 			loc 8
 | |
| 			cuf
 | |
| 			loc 8
 | |
| 			loc 4
 | |
| 			cff
 | |
| 
 | |
| 	/* Double-precision */
 | |
| 
 | |
| 	pat zrf $1==INT64                  /* Push zero */
 | |
| 		leaving
 | |
| 			lde ".fd_00000000"
 | |
| 
 | |
| 	pat adf $1==INT64                  /* Add double */
 | |
| 		with FREG FREG
 | |
| 			uses FREG
 | |
| 			gen
 | |
| 				fadd %a, %2, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	pat sbf $1==INT64                  /* Subtract double */
 | |
| 		with FREG FREG
 | |
| 			uses FREG
 | |
| 			gen
 | |
| 				fsub %a, %2, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	pat mlf $1==INT64                  /* Multiply double */
 | |
| 		with FREG FREG
 | |
| 			uses reusing %1, FREG
 | |
| 			gen
 | |
| 				fmul %a, %2, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	pat dvf $1==INT64                  /* Divide double */
 | |
| 		with FREG FREG
 | |
| 			uses reusing %1, FREG
 | |
| 			gen
 | |
| 				fdiv %a, %2, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	pat ngf $1==INT64                  /* Negate double */
 | |
| 		with FREG
 | |
| 			uses reusing %1, FREG
 | |
| 			gen
 | |
| 				fneg %a, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmf $1==INT64                  /* Compare double */
 | |
| 		with FREG FREG
 | |
| 			uses REG={COND_FD, %2, %1}
 | |
| 			gen extlwi %a, %a, {CONST, 2}, {CONST, 0}
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmf teq $1==8                  /* Double second == top */
 | |
| 		with FREG FREG
 | |
| 			uses REG={COND_FD, %2, %1}
 | |
| 			gen move {XEQ, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmf tne $1==8                  /* Single second == top */
 | |
| 		with FREG FREG
 | |
| 			uses REG={COND_FD, %2, %1}
 | |
| 			gen move {XNE, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmf tgt $1==8                  /* Double second > top */
 | |
| 		with FREG FREG
 | |
| 			uses REG={COND_FD, %2, %1}
 | |
| 			gen move {XGT, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmf tge $1==8                  /* Double second >= top */
 | |
| 		with FREG FREG
 | |
| 			uses REG={COND_FD, %2, %1}
 | |
| 			gen move {XGE, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmf tlt $1==8                  /* Double second < top */
 | |
| 		with FREG FREG
 | |
| 			uses REG={COND_FD, %2, %1}
 | |
| 			gen move {XLT, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	pat cmf tle $1==8                  /* Double second <= top */
 | |
| 		with FREG FREG
 | |
| 			uses REG={COND_FD, %2, %1}
 | |
| 			gen move {XLE, %a}, %a
 | |
| 			yields %a
 | |
| 
 | |
| 	proc cmf8zxx example cmf zeq
 | |
| 		with FREG FREG STACK
 | |
| 			uses REG
 | |
| 			gen
 | |
| 				fcmpo cr0, %2, %1
 | |
| 				bxx* {LABEL, $2}
 | |
| 
 | |
| 	/* Pop 2 doubles, branch if... */
 | |
| 	pat cmf zeq $1==8    call cmf8zxx("beq")
 | |
| 	pat cmf zne $1==8    call cmf8zxx("bne")
 | |
| 	pat cmf zgt $1==8    call cmf8zxx("bgt")
 | |
| 	pat cmf zge $1==8    call cmf8zxx("bge")
 | |
| 	pat cmf zlt $1==8    call cmf8zxx("blt")
 | |
| 	pat cmf zle $1==8    call cmf8zxx("ble")
 | |
| 
 | |
| 	pat loc loc cff $1==INT64 && $2==INT32 /* Convert double to single */
 | |
| 		with FREG
 | |
| 			uses reusing %1, FSREG
 | |
| 			gen
 | |
| 				frsp %a, %1
 | |
| 			yields %a
 | |
| 
 | |
| 	/* Convert double to signed int */
 | |
| 	pat loc loc cfi $1==8 && $2==4
 | |
| 		with FREG STACK
 | |
| 			uses reusing %1, FREG
 | |
| 			gen
 | |
| 				fctiwz %a, %1
 | |
| 				stfdu %a, {IND_RC_D, sp, 0-8}
 | |
| 				addi sp, sp, {CONST, 4}
 | |
| 
 | |
| 	/* Convert double to unsigned int */
 | |
| 	pat loc loc cfu $1==8 && $2==4
 | |
| 		leaving
 | |
| 			cal ".cfu8"
 | |
| 
 | |
| 	/*
 | |
| 	 * To convert integer to IEEE double, we pack the integer in
 | |
| 	 * the low bits of the magic double
 | |
| 	 *   1 << 52 == 0x 4330 0000 0000 0000
 | |
| 	 *
 | |
| 	 * For signed integer i, we flip its sign bit, then compute
 | |
| 	 *   ((1 << 52) + i) - ((1 << 52) + (1 << 31))
 | |
| 	 */
 | |
| 	pat loc loc cif $1==4 && $2==8
 | |
| 		with REG
 | |
| 			uses reusing %1, REG={XOR_RIS, %1, 0x8000},
 | |
| 			  REG={CONST_HZ, 0x43300000},
 | |
| 			  REG={CONST_HZ, 0x80000000},
 | |
| 			  FREG, FREG
 | |
| 			gen
 | |
| 				stwu %b, {IND_RC_W, sp, 0-8}
 | |
| 				stw %a, {IND_RC_W, sp, 4}
 | |
| 				lfd %d, {IND_RC_D, sp, 0}
 | |
| 				stw %c, {IND_RC_W, sp, 4}
 | |
| 				lfd %e, {IND_RC_D, sp, 0}
 | |
| 				fsub %d, %d, %e
 | |
| 				addi sp, sp, {CONST, 8}
 | |
| 			yields %d
 | |
| 
 | |
| 	/*
 | |
| 	 * To convert unsigned integer u to IEEE double, we compute
 | |
| 	 *   ((1 << 52) + u) - (1 << 52)
 | |
| 	 */
 | |
| 	pat loc loc cuf $1==4 && $2==8
 | |
| 		with REG
 | |
| 			uses REG={CONST_HZ, 0x43300000},
 | |
| 			  REG={CONST_0000_7FFF, 0},
 | |
| 			  FREG, FREG
 | |
| 			gen
 | |
| 				stwu %a, {IND_RC_W, sp, 0-8}
 | |
| 				stw %1, {IND_RC_W, sp, 4}
 | |
| 				lfd %c, {IND_RC_D, sp, 0}
 | |
| 				stw %b, {IND_RC_W, sp, 4}
 | |
| 				lfd %d, {IND_RC_D, sp, 0}
 | |
| 				fsub %c, %c, %d
 | |
| 				addi sp, sp, {CONST, 8}
 | |
| 			yields %c
 | |
| 
 | |
| 	pat fef $1==8                      /* Split fraction, exponent */
 | |
| 		leaving
 | |
| 			cal ".fef8"
 | |
| 
 | |
| 	/* Multiply two doubles, then split fraction, integer */
 | |
| 	pat fif $1==8
 | |
| 		leaving
 | |
| 			cal ".fif8"
 |