module X86.CodeGen (
cmmTopCodeGen,
generateJumpTableForInstr,
InstrBlock
)
where
#include "HsVersions.h"
#include "nativeGen/NCG.h"
#include "../includes/MachDeps.h"
import X86.Instr
import X86.Cond
import X86.Regs
import X86.RegInfo
import Instruction
import PIC
import NCGMonad
import Size
import Reg
import Platform
import BasicTypes
import BlockId
import PprCmm ()
import OldCmm
import OldPprCmm ()
import CLabel
import StaticFlags ( opt_PIC )
import ForeignCall ( CCallConv(..) )
import OrdList
import Outputable
import Unique
import FastString
import FastBool ( isFastTrue )
import Constants ( wORD_SIZE )
import DynFlags
import Control.Monad
import Data.Bits
import Data.Int
import Data.Maybe
import Data.Word
sse2Enabled :: NatM Bool
sse2Enabled = do
dflags <- getDynFlagsNat
case platformArch (targetPlatform dflags) of
ArchX86_64 ->
return True
ArchX86 -> return (dopt Opt_SSE2 dflags)
_ -> panic "sse2Enabled: Not an X86* arch"
if_sse2 :: NatM a -> NatM a -> NatM a
if_sse2 sse2 x87 = do
b <- sse2Enabled
if b then sse2 else x87
cmmTopCodeGen
:: RawCmmTop
-> NatM [NatCmmTop (Alignment, CmmStatics) Instr]
cmmTopCodeGen (CmmProc info lab (ListGraph blocks)) = do
(nat_blocks,statics) <- mapAndUnzipM basicBlockCodeGen blocks
picBaseMb <- getPicBaseMaybeNat
dflags <- getDynFlagsNat
let proc = CmmProc info lab (ListGraph $ concat nat_blocks)
tops = proc : concat statics
os = platformOS $ targetPlatform dflags
case picBaseMb of
Just picBase -> initializePicBase_x86 ArchX86 os picBase tops
Nothing -> return tops
cmmTopCodeGen (CmmData sec dat) = do
return [CmmData sec (1, dat)]
basicBlockCodeGen
:: CmmBasicBlock
-> NatM ( [NatBasicBlock Instr]
, [NatCmmTop (Alignment, CmmStatics) Instr])
basicBlockCodeGen (BasicBlock id stmts) = do
instrs <- stmtsToInstrs stmts
let
(top,other_blocks,statics) = foldrOL mkBlocks ([],[],[]) instrs
mkBlocks (NEWBLOCK id) (instrs,blocks,statics)
= ([], BasicBlock id instrs : blocks, statics)
mkBlocks (LDATA sec dat) (instrs,blocks,statics)
= (instrs, blocks, CmmData sec dat:statics)
mkBlocks instr (instrs,blocks,statics)
= (instr:instrs, blocks, statics)
return (BasicBlock id top : other_blocks, statics)
stmtsToInstrs :: [CmmStmt] -> NatM InstrBlock
stmtsToInstrs stmts
= do instrss <- mapM stmtToInstrs stmts
return (concatOL instrss)
stmtToInstrs :: CmmStmt -> NatM InstrBlock
stmtToInstrs stmt = do
dflags <- getDynFlagsNat
let is32Bit = target32Bit (targetPlatform dflags)
case stmt of
CmmNop -> return nilOL
CmmComment s -> return (unitOL (COMMENT s))
CmmAssign reg src
| isFloatType ty -> assignReg_FltCode size reg src
| is32Bit && isWord64 ty -> assignReg_I64Code reg src
| otherwise -> assignReg_IntCode size reg src
where ty = cmmRegType reg
size = cmmTypeSize ty
CmmStore addr src
| isFloatType ty -> assignMem_FltCode size addr src
| is32Bit && isWord64 ty -> assignMem_I64Code addr src
| otherwise -> assignMem_IntCode size addr src
where ty = cmmExprType src
size = cmmTypeSize ty
CmmCall target result_regs args _ _
-> genCCall target result_regs args
CmmBranch id -> genBranch id
CmmCondBranch arg id -> genCondJump id arg
CmmSwitch arg ids -> genSwitch arg ids
CmmJump arg _ -> genJump arg
CmmReturn _ ->
panic "stmtToInstrs: return statement should have been cps'd away"
type InstrBlock
= OrdList Instr
data CondCode
= CondCode Bool Cond InstrBlock
data ChildCode64
= ChildCode64
InstrBlock
Reg
data Register
= Fixed Size Reg InstrBlock
| Any Size (Reg -> InstrBlock)
swizzleRegisterRep :: Register -> Size -> Register
swizzleRegisterRep (Fixed _ reg code) size = Fixed size reg code
swizzleRegisterRep (Any _ codefn) size = Any size codefn
getRegisterReg :: Bool -> CmmReg -> Reg
getRegisterReg use_sse2 (CmmLocal (LocalReg u pk))
= let sz = cmmTypeSize pk in
if isFloatSize sz && not use_sse2
then RegVirtual (mkVirtualReg u FF80)
else RegVirtual (mkVirtualReg u sz)
getRegisterReg _ (CmmGlobal mid)
= case globalRegMaybe mid of
Just reg -> RegReal $ reg
Nothing -> pprPanic "getRegisterReg-memory" (ppr $ CmmGlobal mid)
data Amode
= Amode AddrMode InstrBlock
is32BitInteger :: Integer -> Bool
is32BitInteger i = i64 <= 0x7fffffff && i64 >= 0x80000000
where i64 = fromIntegral i :: Int64
jumpTableEntry :: Maybe BlockId -> CmmStatic
jumpTableEntry Nothing = CmmStaticLit (CmmInt 0 wordWidth)
jumpTableEntry (Just blockid) = CmmStaticLit (CmmLabel blockLabel)
where blockLabel = mkAsmTempLabel (getUnique blockid)
mangleIndexTree :: CmmReg -> Int -> CmmExpr
mangleIndexTree reg off
= CmmMachOp (MO_Add width) [CmmReg reg, CmmLit (CmmInt (fromIntegral off) width)]
where width = typeWidth (cmmRegType reg)
getSomeReg :: CmmExpr -> NatM (Reg, InstrBlock)
getSomeReg expr = do
r <- getRegister expr
case r of
Any rep code -> do
tmp <- getNewRegNat rep
return (tmp, code tmp)
Fixed _ reg code ->
return (reg, code)
assignMem_I64Code :: CmmExpr -> CmmExpr -> NatM InstrBlock
assignMem_I64Code addrTree valueTree = do
Amode addr addr_code <- getAmode addrTree
ChildCode64 vcode rlo <- iselExpr64 valueTree
let
rhi = getHiVRegFromLo rlo
mov_lo = MOV II32 (OpReg rlo) (OpAddr addr)
mov_hi = MOV II32 (OpReg rhi) (OpAddr (fromJust (addrOffset addr 4)))
return (vcode `appOL` addr_code `snocOL` mov_lo `snocOL` mov_hi)
assignReg_I64Code :: CmmReg -> CmmExpr -> NatM InstrBlock
assignReg_I64Code (CmmLocal (LocalReg u_dst _)) valueTree = do
ChildCode64 vcode r_src_lo <- iselExpr64 valueTree
let
r_dst_lo = RegVirtual $ mkVirtualReg u_dst II32
r_dst_hi = getHiVRegFromLo r_dst_lo
r_src_hi = getHiVRegFromLo r_src_lo
mov_lo = MOV II32 (OpReg r_src_lo) (OpReg r_dst_lo)
mov_hi = MOV II32 (OpReg r_src_hi) (OpReg r_dst_hi)
return (
vcode `snocOL` mov_lo `snocOL` mov_hi
)
assignReg_I64Code _ _
= panic "assignReg_I64Code(i386): invalid lvalue"
iselExpr64 :: CmmExpr -> NatM ChildCode64
iselExpr64 (CmmLit (CmmInt i _)) = do
(rlo,rhi) <- getNewRegPairNat II32
let
r = fromIntegral (fromIntegral i :: Word32)
q = fromIntegral (fromIntegral (i `shiftR` 32) :: Word32)
code = toOL [
MOV II32 (OpImm (ImmInteger r)) (OpReg rlo),
MOV II32 (OpImm (ImmInteger q)) (OpReg rhi)
]
return (ChildCode64 code rlo)
iselExpr64 (CmmLoad addrTree ty) | isWord64 ty = do
Amode addr addr_code <- getAmode addrTree
(rlo,rhi) <- getNewRegPairNat II32
let
mov_lo = MOV II32 (OpAddr addr) (OpReg rlo)
mov_hi = MOV II32 (OpAddr (fromJust (addrOffset addr 4))) (OpReg rhi)
return (
ChildCode64 (addr_code `snocOL` mov_lo `snocOL` mov_hi)
rlo
)
iselExpr64 (CmmReg (CmmLocal (LocalReg vu ty))) | isWord64 ty
= return (ChildCode64 nilOL (RegVirtual $ mkVirtualReg vu II32))
iselExpr64 (CmmMachOp (MO_Add _) [e1, CmmLit (CmmInt i _)]) = do
ChildCode64 code1 r1lo <- iselExpr64 e1
(rlo,rhi) <- getNewRegPairNat II32
let
r = fromIntegral (fromIntegral i :: Word32)
q = fromIntegral (fromIntegral (i `shiftR` 32) :: Word32)
r1hi = getHiVRegFromLo r1lo
code = code1 `appOL`
toOL [ MOV II32 (OpReg r1lo) (OpReg rlo),
ADD II32 (OpImm (ImmInteger r)) (OpReg rlo),
MOV II32 (OpReg r1hi) (OpReg rhi),
ADC II32 (OpImm (ImmInteger q)) (OpReg rhi) ]
return (ChildCode64 code rlo)
iselExpr64 (CmmMachOp (MO_Add _) [e1,e2]) = do
ChildCode64 code1 r1lo <- iselExpr64 e1
ChildCode64 code2 r2lo <- iselExpr64 e2
(rlo,rhi) <- getNewRegPairNat II32
let
r1hi = getHiVRegFromLo r1lo
r2hi = getHiVRegFromLo r2lo
code = code1 `appOL`
code2 `appOL`
toOL [ MOV II32 (OpReg r1lo) (OpReg rlo),
ADD II32 (OpReg r2lo) (OpReg rlo),
MOV II32 (OpReg r1hi) (OpReg rhi),
ADC II32 (OpReg r2hi) (OpReg rhi) ]
return (ChildCode64 code rlo)
iselExpr64 (CmmMachOp (MO_UU_Conv _ W64) [expr]) = do
fn <- getAnyReg expr
r_dst_lo <- getNewRegNat II32
let r_dst_hi = getHiVRegFromLo r_dst_lo
code = fn r_dst_lo
return (
ChildCode64 (code `snocOL`
MOV II32 (OpImm (ImmInt 0)) (OpReg r_dst_hi))
r_dst_lo
)
iselExpr64 expr
= pprPanic "iselExpr64(i386)" (ppr expr)
getRegister :: CmmExpr -> NatM Register
getRegister e = do dflags <- getDynFlagsNat
getRegister' (target32Bit (targetPlatform dflags)) e
getRegister' :: Bool -> CmmExpr -> NatM Register
getRegister' is32Bit (CmmReg reg)
= case reg of
CmmGlobal PicBaseReg
| is32Bit ->
do reg' <- getPicBaseNat archWordSize
return (Fixed archWordSize reg' nilOL)
_ ->
do use_sse2 <- sse2Enabled
let
sz = cmmTypeSize (cmmRegType reg)
size | not use_sse2 && isFloatSize sz = FF80
| otherwise = sz
return (Fixed size (getRegisterReg use_sse2 reg) nilOL)
getRegister' is32Bit (CmmRegOff r n)
= getRegister' is32Bit $ mangleIndexTree r n
getRegister' is32Bit (CmmMachOp (MO_UU_Conv W64 W32)
[CmmMachOp (MO_U_Shr W64) [x,CmmLit (CmmInt 32 _)]])
| is32Bit = do
ChildCode64 code rlo <- iselExpr64 x
return $ Fixed II32 (getHiVRegFromLo rlo) code
getRegister' is32Bit (CmmMachOp (MO_SS_Conv W64 W32)
[CmmMachOp (MO_U_Shr W64) [x,CmmLit (CmmInt 32 _)]])
| is32Bit = do
ChildCode64 code rlo <- iselExpr64 x
return $ Fixed II32 (getHiVRegFromLo rlo) code
getRegister' is32Bit (CmmMachOp (MO_UU_Conv W64 W32) [x])
| is32Bit = do
ChildCode64 code rlo <- iselExpr64 x
return $ Fixed II32 rlo code
getRegister' is32Bit (CmmMachOp (MO_SS_Conv W64 W32) [x])
| is32Bit = do
ChildCode64 code rlo <- iselExpr64 x
return $ Fixed II32 rlo code
getRegister' _ (CmmLit lit@(CmmFloat f w)) =
if_sse2 float_const_sse2 float_const_x87
where
float_const_sse2
| f == 0.0 = do
let
size = floatSize w
code dst = unitOL (XOR size (OpReg dst) (OpReg dst))
return (Any size code)
| otherwise = do
Amode addr code <- memConstant (widthInBytes w) lit
loadFloatAmode True w addr code
float_const_x87 = case w of
W64
| f == 0.0 ->
let code dst = unitOL (GLDZ dst)
in return (Any FF80 code)
| f == 1.0 ->
let code dst = unitOL (GLD1 dst)
in return (Any FF80 code)
_otherwise -> do
Amode addr code <- memConstant (widthInBytes w) lit
loadFloatAmode False w addr code
getRegister' _ (CmmMachOp (MO_UU_Conv W8 W32) [CmmLoad addr _]) = do
code <- intLoadCode (MOVZxL II8) addr
return (Any II32 code)
getRegister' _ (CmmMachOp (MO_SS_Conv W8 W32) [CmmLoad addr _]) = do
code <- intLoadCode (MOVSxL II8) addr
return (Any II32 code)
getRegister' _ (CmmMachOp (MO_UU_Conv W16 W32) [CmmLoad addr _]) = do
code <- intLoadCode (MOVZxL II16) addr
return (Any II32 code)
getRegister' _ (CmmMachOp (MO_SS_Conv W16 W32) [CmmLoad addr _]) = do
code <- intLoadCode (MOVSxL II16) addr
return (Any II32 code)
getRegister' is32Bit (CmmMachOp (MO_UU_Conv W8 W64) [CmmLoad addr _])
| not is32Bit = do
code <- intLoadCode (MOVZxL II8) addr
return (Any II64 code)
getRegister' is32Bit (CmmMachOp (MO_SS_Conv W8 W64) [CmmLoad addr _])
| not is32Bit = do
code <- intLoadCode (MOVSxL II8) addr
return (Any II64 code)
getRegister' is32Bit (CmmMachOp (MO_UU_Conv W16 W64) [CmmLoad addr _])
| not is32Bit = do
code <- intLoadCode (MOVZxL II16) addr
return (Any II64 code)
getRegister' is32Bit (CmmMachOp (MO_SS_Conv W16 W64) [CmmLoad addr _])
| not is32Bit = do
code <- intLoadCode (MOVSxL II16) addr
return (Any II64 code)
getRegister' is32Bit (CmmMachOp (MO_UU_Conv W32 W64) [CmmLoad addr _])
| not is32Bit = do
code <- intLoadCode (MOV II32) addr
return (Any II64 code)
getRegister' is32Bit (CmmMachOp (MO_SS_Conv W32 W64) [CmmLoad addr _])
| not is32Bit = do
code <- intLoadCode (MOVSxL II32) addr
return (Any II64 code)
getRegister' is32Bit (CmmMachOp (MO_Add W64) [CmmReg (CmmGlobal PicBaseReg),
CmmLit displacement])
| not is32Bit = do
return $ Any II64 (\dst -> unitOL $
LEA II64 (OpAddr (ripRel (litToImm displacement))) (OpReg dst))
getRegister' is32Bit (CmmMachOp mop [x]) = do
sse2 <- sse2Enabled
case mop of
MO_F_Neg w
| sse2 -> sse2NegCode w x
| otherwise -> trivialUFCode FF80 (GNEG FF80) x
MO_S_Neg w -> triv_ucode NEGI (intSize w)
MO_Not w -> triv_ucode NOT (intSize w)
MO_UU_Conv W32 W8 -> toI8Reg W32 x
MO_SS_Conv W32 W8 -> toI8Reg W32 x
MO_UU_Conv W16 W8 -> toI8Reg W16 x
MO_SS_Conv W16 W8 -> toI8Reg W16 x
MO_UU_Conv W32 W16 -> toI16Reg W32 x
MO_SS_Conv W32 W16 -> toI16Reg W32 x
MO_UU_Conv W64 W32 | not is32Bit -> conversionNop II64 x
MO_SS_Conv W64 W32 | not is32Bit -> conversionNop II64 x
MO_UU_Conv W64 W16 | not is32Bit -> toI16Reg W64 x
MO_SS_Conv W64 W16 | not is32Bit -> toI16Reg W64 x
MO_UU_Conv W64 W8 | not is32Bit -> toI8Reg W64 x
MO_SS_Conv W64 W8 | not is32Bit -> toI8Reg W64 x
MO_UU_Conv rep1 rep2 | rep1 == rep2 -> conversionNop (intSize rep1) x
MO_SS_Conv rep1 rep2 | rep1 == rep2 -> conversionNop (intSize rep1) x
MO_UU_Conv W8 W32 -> integerExtend W8 W32 MOVZxL x
MO_UU_Conv W16 W32 -> integerExtend W16 W32 MOVZxL x
MO_UU_Conv W8 W16 -> integerExtend W8 W16 MOVZxL x
MO_SS_Conv W8 W32 -> integerExtend W8 W32 MOVSxL x
MO_SS_Conv W16 W32 -> integerExtend W16 W32 MOVSxL x
MO_SS_Conv W8 W16 -> integerExtend W8 W16 MOVSxL x
MO_UU_Conv W8 W64 | not is32Bit -> integerExtend W8 W64 MOVZxL x
MO_UU_Conv W16 W64 | not is32Bit -> integerExtend W16 W64 MOVZxL x
MO_UU_Conv W32 W64 | not is32Bit -> integerExtend W32 W64 MOVZxL x
MO_SS_Conv W8 W64 | not is32Bit -> integerExtend W8 W64 MOVSxL x
MO_SS_Conv W16 W64 | not is32Bit -> integerExtend W16 W64 MOVSxL x
MO_SS_Conv W32 W64 | not is32Bit -> integerExtend W32 W64 MOVSxL x
MO_FF_Conv W32 W64
| sse2 -> coerceFP2FP W64 x
| otherwise -> conversionNop FF80 x
MO_FF_Conv W64 W32 -> coerceFP2FP W32 x
MO_FS_Conv from to -> coerceFP2Int from to x
MO_SF_Conv from to -> coerceInt2FP from to x
_other -> pprPanic "getRegister" (pprMachOp mop)
where
triv_ucode :: (Size -> Operand -> Instr) -> Size -> NatM Register
triv_ucode instr size = trivialUCode size (instr size) x
integerExtend :: Width -> Width
-> (Size -> Operand -> Operand -> Instr)
-> CmmExpr -> NatM Register
integerExtend from to instr expr = do
(reg,e_code) <- if from == W8 then getByteReg expr
else getSomeReg expr
let
code dst =
e_code `snocOL`
instr (intSize from) (OpReg reg) (OpReg dst)
return (Any (intSize to) code)
toI8Reg :: Width -> CmmExpr -> NatM Register
toI8Reg new_rep expr
= do codefn <- getAnyReg expr
return (Any (intSize new_rep) codefn)
toI16Reg = toI8Reg
conversionNop :: Size -> CmmExpr -> NatM Register
conversionNop new_size expr
= do e_code <- getRegister' is32Bit expr
return (swizzleRegisterRep e_code new_size)
getRegister' _ (CmmMachOp mop [x, y]) = do
sse2 <- sse2Enabled
case mop of
MO_F_Eq _ -> condFltReg EQQ x y
MO_F_Ne _ -> condFltReg NE x y
MO_F_Gt _ -> condFltReg GTT x y
MO_F_Ge _ -> condFltReg GE x y
MO_F_Lt _ -> condFltReg LTT x y
MO_F_Le _ -> condFltReg LE x y
MO_Eq _ -> condIntReg EQQ x y
MO_Ne _ -> condIntReg NE x y
MO_S_Gt _ -> condIntReg GTT x y
MO_S_Ge _ -> condIntReg GE x y
MO_S_Lt _ -> condIntReg LTT x y
MO_S_Le _ -> condIntReg LE x y
MO_U_Gt _ -> condIntReg GU x y
MO_U_Ge _ -> condIntReg GEU x y
MO_U_Lt _ -> condIntReg LU x y
MO_U_Le _ -> condIntReg LEU x y
MO_F_Add w | sse2 -> trivialFCode_sse2 w ADD x y
| otherwise -> trivialFCode_x87 GADD x y
MO_F_Sub w | sse2 -> trivialFCode_sse2 w SUB x y
| otherwise -> trivialFCode_x87 GSUB x y
MO_F_Quot w | sse2 -> trivialFCode_sse2 w FDIV x y
| otherwise -> trivialFCode_x87 GDIV x y
MO_F_Mul w | sse2 -> trivialFCode_sse2 w MUL x y
| otherwise -> trivialFCode_x87 GMUL x y
MO_Add rep -> add_code rep x y
MO_Sub rep -> sub_code rep x y
MO_S_Quot rep -> div_code rep True True x y
MO_S_Rem rep -> div_code rep True False x y
MO_U_Quot rep -> div_code rep False True x y
MO_U_Rem rep -> div_code rep False False x y
MO_S_MulMayOflo rep -> imulMayOflo rep x y
MO_Mul rep -> triv_op rep IMUL
MO_And rep -> triv_op rep AND
MO_Or rep -> triv_op rep OR
MO_Xor rep -> triv_op rep XOR
MO_Shl rep -> shift_code rep SHL x y
MO_U_Shr rep -> shift_code rep SHR x y
MO_S_Shr rep -> shift_code rep SAR x y
_other -> pprPanic "getRegister(x86) - binary CmmMachOp (1)" (pprMachOp mop)
where
triv_op width instr = trivialCode width op (Just op) x y
where op = instr (intSize width)
imulMayOflo :: Width -> CmmExpr -> CmmExpr -> NatM Register
imulMayOflo rep a b = do
(a_reg, a_code) <- getNonClobberedReg a
b_code <- getAnyReg b
let
shift_amt = case rep of
W32 -> 31
W64 -> 63
_ -> panic "shift_amt"
size = intSize rep
code = a_code `appOL` b_code eax `appOL`
toOL [
IMUL2 size (OpReg a_reg),
SAR size (OpImm (ImmInt shift_amt)) (OpReg eax),
SUB size (OpReg edx) (OpReg eax)
]
return (Fixed size eax code)
shift_code :: Width
-> (Size -> Operand -> Operand -> Instr)
-> CmmExpr
-> CmmExpr
-> NatM Register
shift_code width instr x (CmmLit lit) = do
x_code <- getAnyReg x
let
size = intSize width
code dst
= x_code dst `snocOL`
instr size (OpImm (litToImm lit)) (OpReg dst)
return (Any size code)
shift_code width instr x y = do
x_code <- getAnyReg x
let size = intSize width
tmp <- getNewRegNat size
y_code <- getAnyReg y
let
code = x_code tmp `appOL`
y_code ecx `snocOL`
instr size (OpReg ecx) (OpReg tmp)
return (Fixed size tmp code)
add_code :: Width -> CmmExpr -> CmmExpr -> NatM Register
add_code rep x (CmmLit (CmmInt y _))
| is32BitInteger y = add_int rep x y
add_code rep x y = trivialCode rep (ADD size) (Just (ADD size)) x y
where size = intSize rep
sub_code :: Width -> CmmExpr -> CmmExpr -> NatM Register
sub_code rep x (CmmLit (CmmInt y _))
| is32BitInteger (y) = add_int rep x (y)
sub_code rep x y = trivialCode rep (SUB (intSize rep)) Nothing x y
add_int width x y = do
(x_reg, x_code) <- getSomeReg x
let
size = intSize width
imm = ImmInt (fromInteger y)
code dst
= x_code `snocOL`
LEA size
(OpAddr (AddrBaseIndex (EABaseReg x_reg) EAIndexNone imm))
(OpReg dst)
return (Any size code)
div_code width signed quotient x y = do
(y_op, y_code) <- getRegOrMem y
x_code <- getAnyReg x
let
size = intSize width
widen | signed = CLTD size
| otherwise = XOR size (OpReg edx) (OpReg edx)
instr | signed = IDIV
| otherwise = DIV
code = y_code `appOL`
x_code eax `appOL`
toOL [widen, instr size y_op]
result | quotient = eax
| otherwise = edx
return (Fixed size result code)
getRegister' _ (CmmLoad mem pk)
| isFloatType pk
= do
Amode addr mem_code <- getAmode mem
use_sse2 <- sse2Enabled
loadFloatAmode use_sse2 (typeWidth pk) addr mem_code
getRegister' is32Bit (CmmLoad mem pk)
| is32Bit && not (isWord64 pk)
= do
code <- intLoadCode instr mem
return (Any size code)
where
width = typeWidth pk
size = intSize width
instr = case width of
W8 -> MOVZxL II8
_other -> MOV size
getRegister' is32Bit (CmmLoad mem pk)
| not is32Bit
= do
code <- intLoadCode (MOV size) mem
return (Any size code)
where size = intSize $ typeWidth pk
getRegister' _ (CmmLit (CmmInt 0 width))
= let
size = intSize width
size1 = IF_ARCH_i386( size, case size of II64 -> II32; _ -> size )
code dst
= unitOL (XOR size1 (OpReg dst) (OpReg dst))
in
return (Any size code)
getRegister' is32Bit (CmmLit lit)
| not is32Bit, isWord64 (cmmLitType lit), not (isBigLit lit)
= let
imm = litToImm lit
code dst = unitOL (MOV II32 (OpImm imm) (OpReg dst))
in
return (Any II64 code)
where
isBigLit (CmmInt i _) = i < 0 || i > 0xffffffff
isBigLit _ = False
getRegister' _ (CmmLit lit)
= let
size = cmmTypeSize (cmmLitType lit)
imm = litToImm lit
code dst = unitOL (MOV size (OpImm imm) (OpReg dst))
in
return (Any size code)
getRegister' _ other = pprPanic "getRegister(x86)" (ppr other)
intLoadCode :: (Operand -> Operand -> Instr) -> CmmExpr
-> NatM (Reg -> InstrBlock)
intLoadCode instr mem = do
Amode src mem_code <- getAmode mem
return (\dst -> mem_code `snocOL` instr (OpAddr src) (OpReg dst))
getAnyReg :: CmmExpr -> NatM (Reg -> InstrBlock)
getAnyReg expr = do
r <- getRegister expr
anyReg r
anyReg :: Register -> NatM (Reg -> InstrBlock)
anyReg (Any _ code) = return code
anyReg (Fixed rep reg fcode) = return (\dst -> fcode `snocOL` reg2reg rep reg dst)
getByteReg :: CmmExpr -> NatM (Reg, InstrBlock)
getByteReg expr = do
dflags <- getDynFlagsNat
if target32Bit (targetPlatform dflags)
then do r <- getRegister expr
case r of
Any rep code -> do
tmp <- getNewRegNat rep
return (tmp, code tmp)
Fixed rep reg code
| isVirtualReg reg -> return (reg,code)
| otherwise -> do
tmp <- getNewRegNat rep
return (tmp, code `snocOL` reg2reg rep reg tmp)
else getSomeReg expr
getNonClobberedReg :: CmmExpr -> NatM (Reg, InstrBlock)
getNonClobberedReg expr = do
r <- getRegister expr
case r of
Any rep code -> do
tmp <- getNewRegNat rep
return (tmp, code tmp)
Fixed rep reg code
| RegReal (RealRegSingle rr) <- reg
, isFastTrue (freeReg rr)
-> do
tmp <- getNewRegNat rep
return (tmp, code `snocOL` reg2reg rep reg tmp)
| otherwise ->
return (reg, code)
reg2reg :: Size -> Reg -> Reg -> Instr
reg2reg size src dst
| size == FF80 = GMOV src dst
| otherwise = MOV size (OpReg src) (OpReg dst)
getAmode :: CmmExpr -> NatM Amode
getAmode e = do dflags <- getDynFlagsNat
getAmode' (target32Bit (targetPlatform dflags)) e
getAmode' :: Bool -> CmmExpr -> NatM Amode
getAmode' _ (CmmRegOff r n) = getAmode $ mangleIndexTree r n
getAmode' is32Bit (CmmMachOp (MO_Add W64) [CmmReg (CmmGlobal PicBaseReg),
CmmLit displacement])
| not is32Bit
= return $ Amode (ripRel (litToImm displacement)) nilOL
getAmode' _ (CmmMachOp (MO_Sub _rep) [x, CmmLit lit@(CmmInt i _)])
| is32BitLit lit
= do (x_reg, x_code) <- getSomeReg x
let off = ImmInt ((fromInteger i))
return (Amode (AddrBaseIndex (EABaseReg x_reg) EAIndexNone off) x_code)
getAmode' _ (CmmMachOp (MO_Add _rep) [x, CmmLit lit])
| is32BitLit lit
= do (x_reg, x_code) <- getSomeReg x
let off = litToImm lit
return (Amode (AddrBaseIndex (EABaseReg x_reg) EAIndexNone off) x_code)
getAmode' is32Bit (CmmMachOp (MO_Add rep) [a@(CmmMachOp (MO_Shl _) _),
b@(CmmLit _)])
= getAmode' is32Bit (CmmMachOp (MO_Add rep) [b,a])
getAmode' _ (CmmMachOp (MO_Add _) [x, CmmMachOp (MO_Shl _)
[y, CmmLit (CmmInt shift _)]])
| shift == 0 || shift == 1 || shift == 2 || shift == 3
= x86_complex_amode x y shift 0
getAmode' _ (CmmMachOp (MO_Add _)
[x, CmmMachOp (MO_Add _)
[CmmMachOp (MO_Shl _) [y, CmmLit (CmmInt shift _)],
CmmLit (CmmInt offset _)]])
| shift == 0 || shift == 1 || shift == 2 || shift == 3
&& is32BitInteger offset
= x86_complex_amode x y shift offset
getAmode' _ (CmmMachOp (MO_Add _) [x,y])
= x86_complex_amode x y 0 0
getAmode' _ (CmmLit lit) | is32BitLit lit
= return (Amode (ImmAddr (litToImm lit) 0) nilOL)
getAmode' _ expr = do
(reg,code) <- getSomeReg expr
return (Amode (AddrBaseIndex (EABaseReg reg) EAIndexNone (ImmInt 0)) code)
x86_complex_amode :: CmmExpr -> CmmExpr -> Integer -> Integer -> NatM Amode
x86_complex_amode base index shift offset
= do (x_reg, x_code) <- getNonClobberedReg base
(y_reg, y_code) <- getSomeReg index
let
code = x_code `appOL` y_code
base = case shift of 0 -> 1; 1 -> 2; 2 -> 4; 3 -> 8;
n -> panic $ "x86_complex_amode: unhandled shift! (" ++ show n ++ ")"
return (Amode (AddrBaseIndex (EABaseReg x_reg) (EAIndex y_reg base) (ImmInt (fromIntegral offset)))
code)
getNonClobberedOperand :: CmmExpr -> NatM (Operand, InstrBlock)
getNonClobberedOperand (CmmLit lit) = do
use_sse2 <- sse2Enabled
if use_sse2 && isSuitableFloatingPointLit lit
then do
let CmmFloat _ w = lit
Amode addr code <- memConstant (widthInBytes w) lit
return (OpAddr addr, code)
else do
if is32BitLit lit && not (isFloatType (cmmLitType lit))
then return (OpImm (litToImm lit), nilOL)
else getNonClobberedOperand_generic (CmmLit lit)
getNonClobberedOperand (CmmLoad mem pk) = do
use_sse2 <- sse2Enabled
if (not (isFloatType pk) || use_sse2)
&& IF_ARCH_i386(not (isWord64 pk), True)
then do
Amode src mem_code <- getAmode mem
(src',save_code) <-
if (amodeCouldBeClobbered src)
then do
tmp <- getNewRegNat archWordSize
return (AddrBaseIndex (EABaseReg tmp) EAIndexNone (ImmInt 0),
unitOL (LEA II32 (OpAddr src) (OpReg tmp)))
else
return (src, nilOL)
return (OpAddr src', save_code `appOL` mem_code)
else do
getNonClobberedOperand_generic (CmmLoad mem pk)
getNonClobberedOperand e = getNonClobberedOperand_generic e
getNonClobberedOperand_generic :: CmmExpr -> NatM (Operand, InstrBlock)
getNonClobberedOperand_generic e = do
(reg, code) <- getNonClobberedReg e
return (OpReg reg, code)
amodeCouldBeClobbered :: AddrMode -> Bool
amodeCouldBeClobbered amode = any regClobbered (addrModeRegs amode)
regClobbered :: Reg -> Bool
regClobbered (RegReal (RealRegSingle rr)) = isFastTrue (freeReg rr)
regClobbered _ = False
getOperand :: CmmExpr -> NatM (Operand, InstrBlock)
getOperand (CmmLit lit) = do
use_sse2 <- sse2Enabled
if (use_sse2 && isSuitableFloatingPointLit lit)
then do
let CmmFloat _ w = lit
Amode addr code <- memConstant (widthInBytes w) lit
return (OpAddr addr, code)
else do
if is32BitLit lit && not (isFloatType (cmmLitType lit))
then return (OpImm (litToImm lit), nilOL)
else getOperand_generic (CmmLit lit)
getOperand (CmmLoad mem pk) = do
use_sse2 <- sse2Enabled
if (not (isFloatType pk) || use_sse2) && IF_ARCH_i386(not (isWord64 pk), True)
then do
Amode src mem_code <- getAmode mem
return (OpAddr src, mem_code)
else
getOperand_generic (CmmLoad mem pk)
getOperand e = getOperand_generic e
getOperand_generic :: CmmExpr -> NatM (Operand, InstrBlock)
getOperand_generic e = do
(reg, code) <- getSomeReg e
return (OpReg reg, code)
isOperand :: CmmExpr -> Bool
isOperand (CmmLoad _ _) = True
isOperand (CmmLit lit) = is32BitLit lit
|| isSuitableFloatingPointLit lit
isOperand _ = False
memConstant :: Int -> CmmLit -> NatM Amode
memConstant align lit = do
lbl <- getNewLabelNat
dflags <- getDynFlagsNat
(addr, addr_code) <- if target32Bit (targetPlatform dflags)
then do dynRef <- cmmMakeDynamicReference
dflags
addImportNat
DataReference
lbl
Amode addr addr_code <- getAmode dynRef
return (addr, addr_code)
else return (ripRel (ImmCLbl lbl), nilOL)
let code =
LDATA ReadOnlyData (align, Statics lbl [CmmStaticLit lit])
`consOL` addr_code
return (Amode addr code)
loadFloatAmode :: Bool -> Width -> AddrMode -> InstrBlock -> NatM Register
loadFloatAmode use_sse2 w addr addr_code = do
let size = floatSize w
code dst = addr_code `snocOL`
if use_sse2
then MOV size (OpAddr addr) (OpReg dst)
else GLD size addr dst
return (Any (if use_sse2 then size else FF80) code)
isSuitableFloatingPointLit :: CmmLit -> Bool
isSuitableFloatingPointLit (CmmFloat f _) = f /= 0.0
isSuitableFloatingPointLit _ = False
getRegOrMem :: CmmExpr -> NatM (Operand, InstrBlock)
getRegOrMem e@(CmmLoad mem pk) = do
use_sse2 <- sse2Enabled
if (not (isFloatType pk) || use_sse2) && IF_ARCH_i386(not (isWord64 pk), True)
then do
Amode src mem_code <- getAmode mem
return (OpAddr src, mem_code)
else do
(reg, code) <- getNonClobberedReg e
return (OpReg reg, code)
getRegOrMem e = do
(reg, code) <- getNonClobberedReg e
return (OpReg reg, code)
is32BitLit :: CmmLit -> Bool
#if x86_64_TARGET_ARCH
is32BitLit (CmmInt i W64) = is32BitInteger i
#endif
is32BitLit _ = True
getCondCode :: CmmExpr -> NatM CondCode
getCondCode (CmmMachOp mop [x, y])
=
case mop of
MO_F_Eq W32 -> condFltCode EQQ x y
MO_F_Ne W32 -> condFltCode NE x y
MO_F_Gt W32 -> condFltCode GTT x y
MO_F_Ge W32 -> condFltCode GE x y
MO_F_Lt W32 -> condFltCode LTT x y
MO_F_Le W32 -> condFltCode LE x y
MO_F_Eq W64 -> condFltCode EQQ x y
MO_F_Ne W64 -> condFltCode NE x y
MO_F_Gt W64 -> condFltCode GTT x y
MO_F_Ge W64 -> condFltCode GE x y
MO_F_Lt W64 -> condFltCode LTT x y
MO_F_Le W64 -> condFltCode LE x y
MO_Eq _ -> condIntCode EQQ x y
MO_Ne _ -> condIntCode NE x y
MO_S_Gt _ -> condIntCode GTT x y
MO_S_Ge _ -> condIntCode GE x y
MO_S_Lt _ -> condIntCode LTT x y
MO_S_Le _ -> condIntCode LE x y
MO_U_Gt _ -> condIntCode GU x y
MO_U_Ge _ -> condIntCode GEU x y
MO_U_Lt _ -> condIntCode LU x y
MO_U_Le _ -> condIntCode LEU x y
_other -> pprPanic "getCondCode(x86,x86_64,sparc)" (ppr (CmmMachOp mop [x,y]))
getCondCode other = pprPanic "getCondCode(2)(x86,sparc)" (ppr other)
condIntCode :: Cond -> CmmExpr -> CmmExpr -> NatM CondCode
condIntCode cond (CmmLoad x pk) (CmmLit lit) | is32BitLit lit = do
Amode x_addr x_code <- getAmode x
let
imm = litToImm lit
code = x_code `snocOL`
CMP (cmmTypeSize pk) (OpImm imm) (OpAddr x_addr)
return (CondCode False cond code)
condIntCode cond (CmmMachOp (MO_And _) [x,o2]) (CmmLit (CmmInt 0 pk))
| (CmmLit lit@(CmmInt mask _)) <- o2, is32BitLit lit
= do
(x_reg, x_code) <- getSomeReg x
let
code = x_code `snocOL`
TEST (intSize pk) (OpImm (ImmInteger mask)) (OpReg x_reg)
return (CondCode False cond code)
condIntCode cond x (CmmLit (CmmInt 0 pk)) = do
(x_reg, x_code) <- getSomeReg x
let
code = x_code `snocOL`
TEST (intSize pk) (OpReg x_reg) (OpReg x_reg)
return (CondCode False cond code)
condIntCode cond x y | isOperand y = do
(x_reg, x_code) <- getNonClobberedReg x
(y_op, y_code) <- getOperand y
let
code = x_code `appOL` y_code `snocOL`
CMP (cmmTypeSize (cmmExprType x)) y_op (OpReg x_reg)
return (CondCode False cond code)
condIntCode cond x y = do
(y_reg, y_code) <- getNonClobberedReg y
(x_op, x_code) <- getRegOrMem x
let
code = y_code `appOL`
x_code `snocOL`
CMP (cmmTypeSize (cmmExprType x)) (OpReg y_reg) x_op
return (CondCode False cond code)
condFltCode :: Cond -> CmmExpr -> CmmExpr -> NatM CondCode
condFltCode cond x y
= if_sse2 condFltCode_sse2 condFltCode_x87
where
condFltCode_x87
= ASSERT(cond `elem` ([EQQ, NE, LE, LTT, GE, GTT])) do
(x_reg, x_code) <- getNonClobberedReg x
(y_reg, y_code) <- getSomeReg y
let
code = x_code `appOL` y_code `snocOL`
GCMP cond x_reg y_reg
return (CondCode True EQQ code)
condFltCode_sse2 = do
(x_reg, x_code) <- getNonClobberedReg x
(y_op, y_code) <- getOperand y
let
code = x_code `appOL`
y_code `snocOL`
CMP (floatSize $ cmmExprWidth x) y_op (OpReg x_reg)
return (CondCode True (condToUnsigned cond) code)
assignMem_IntCode :: Size -> CmmExpr -> CmmExpr -> NatM InstrBlock
assignReg_IntCode :: Size -> CmmReg -> CmmExpr -> NatM InstrBlock
assignMem_FltCode :: Size -> CmmExpr -> CmmExpr -> NatM InstrBlock
assignReg_FltCode :: Size -> CmmReg -> CmmExpr -> NatM InstrBlock
assignMem_IntCode pk addr (CmmMachOp op [CmmLoad addr2 _,
CmmLit (CmmInt i _)])
| addr == addr2, pk /= II64 || is32BitInteger i,
Just instr <- check op
= do Amode amode code_addr <- getAmode addr
let code = code_addr `snocOL`
instr pk (OpImm (ImmInt (fromIntegral i))) (OpAddr amode)
return code
where
check (MO_Add _) = Just ADD
check (MO_Sub _) = Just SUB
check _ = Nothing
assignMem_IntCode pk addr src = do
Amode addr code_addr <- getAmode addr
(code_src, op_src) <- get_op_RI src
let
code = code_src `appOL`
code_addr `snocOL`
MOV pk op_src (OpAddr addr)
return code
where
get_op_RI :: CmmExpr -> NatM (InstrBlock,Operand)
get_op_RI (CmmLit lit) | is32BitLit lit
= return (nilOL, OpImm (litToImm lit))
get_op_RI op
= do (reg,code) <- getNonClobberedReg op
return (code, OpReg reg)
assignReg_IntCode pk reg (CmmLoad src _) = do
load_code <- intLoadCode (MOV pk) src
return (load_code (getRegisterReg False reg))
assignReg_IntCode _ reg src = do
code <- getAnyReg src
return (code (getRegisterReg False reg))
assignMem_FltCode pk addr src = do
(src_reg, src_code) <- getNonClobberedReg src
Amode addr addr_code <- getAmode addr
use_sse2 <- sse2Enabled
let
code = src_code `appOL`
addr_code `snocOL`
if use_sse2 then MOV pk (OpReg src_reg) (OpAddr addr)
else GST pk src_reg addr
return code
assignReg_FltCode _ reg src = do
use_sse2 <- sse2Enabled
src_code <- getAnyReg src
return (src_code (getRegisterReg use_sse2 reg))
genJump :: CmmExpr -> NatM InstrBlock
genJump (CmmLoad mem _) = do
Amode target code <- getAmode mem
return (code `snocOL` JMP (OpAddr target))
genJump (CmmLit lit) = do
return (unitOL (JMP (OpImm (litToImm lit))))
genJump expr = do
(reg,code) <- getSomeReg expr
return (code `snocOL` JMP (OpReg reg))
genBranch :: BlockId -> NatM InstrBlock
genBranch = return . toOL . mkJumpInstr
genCondJump
:: BlockId
-> CmmExpr
-> NatM InstrBlock
genCondJump id bool = do
CondCode is_float cond cond_code <- getCondCode bool
use_sse2 <- sse2Enabled
if not is_float || not use_sse2
then
return (cond_code `snocOL` JXX cond id)
else do
lbl <- getBlockIdNat
let code = case cond of
NE -> or_unordered
GU -> plain_test
GEU -> plain_test
_ -> and_ordered
plain_test = unitOL (
JXX cond id
)
or_unordered = toOL [
JXX cond id,
JXX PARITY id
]
and_ordered = toOL [
JXX PARITY lbl,
JXX cond id,
JXX ALWAYS lbl,
NEWBLOCK lbl
]
return (cond_code `appOL` code)
genCCall
:: CmmCallTarget
-> [HintedCmmFormal]
-> [HintedCmmActual]
-> NatM InstrBlock
genCCall (CmmPrim MO_Memcpy) _ [CmmHinted dst _, CmmHinted src _,
CmmHinted (CmmLit (CmmInt n _)) _,
CmmHinted (CmmLit (CmmInt align _)) _]
| n <= maxInlineSizeThreshold && align .&. 3 == 0 = do
code_dst <- getAnyReg dst
dst_r <- getNewRegNat size
code_src <- getAnyReg src
src_r <- getNewRegNat size
tmp_r <- getNewRegNat size
return $ code_dst dst_r `appOL` code_src src_r `appOL`
go dst_r src_r tmp_r n
where
size = if align .&. 4 /= 0 then II32 else archWordSize
sizeBytes = fromIntegral (sizeInBytes size)
go :: Reg -> Reg -> Reg -> Integer -> OrdList Instr
go dst src tmp i
| i >= sizeBytes =
unitOL (MOV size (OpAddr src_addr) (OpReg tmp)) `appOL`
unitOL (MOV size (OpReg tmp) (OpAddr dst_addr)) `appOL`
go dst src tmp (i sizeBytes)
| i >= 4 =
unitOL (MOV II32 (OpAddr src_addr) (OpReg tmp)) `appOL`
unitOL (MOV II32 (OpReg tmp) (OpAddr dst_addr)) `appOL`
go dst src tmp (i 4)
| i >= 2 =
unitOL (MOVZxL II16 (OpAddr src_addr) (OpReg tmp)) `appOL`
unitOL (MOV II16 (OpReg tmp) (OpAddr dst_addr)) `appOL`
go dst src tmp (i 2)
| i >= 1 =
unitOL (MOVZxL II8 (OpAddr src_addr) (OpReg tmp)) `appOL`
unitOL (MOV II8 (OpReg tmp) (OpAddr dst_addr)) `appOL`
go dst src tmp (i 1)
| otherwise = nilOL
where
src_addr = AddrBaseIndex (EABaseReg src) EAIndexNone
(ImmInteger (n i))
dst_addr = AddrBaseIndex (EABaseReg dst) EAIndexNone
(ImmInteger (n i))
genCCall (CmmPrim MO_Memset) _ [CmmHinted dst _,
CmmHinted (CmmLit (CmmInt c _)) _,
CmmHinted (CmmLit (CmmInt n _)) _,
CmmHinted (CmmLit (CmmInt align _)) _]
| n <= maxInlineSizeThreshold && align .&. 3 == 0 = do
code_dst <- getAnyReg dst
dst_r <- getNewRegNat size
return $ code_dst dst_r `appOL` go dst_r n
where
(size, val) = case align .&. 3 of
2 -> (II16, c2)
0 -> (II32, c4)
_ -> (II8, c)
c2 = c `shiftL` 8 .|. c
c4 = c2 `shiftL` 16 .|. c2
sizeBytes = fromIntegral (sizeInBytes size)
go :: Reg -> Integer -> OrdList Instr
go dst i
| i >= sizeBytes =
unitOL (MOV size (OpImm (ImmInteger val)) (OpAddr dst_addr)) `appOL`
go dst (i sizeBytes)
| i >= 4 =
unitOL (MOV II32 (OpImm (ImmInteger c4)) (OpAddr dst_addr)) `appOL`
go dst (i 4)
| i >= 2 =
unitOL (MOV II16 (OpImm (ImmInteger c2)) (OpAddr dst_addr)) `appOL`
go dst (i 2)
| i >= 1 =
unitOL (MOV II8 (OpImm (ImmInteger c)) (OpAddr dst_addr)) `appOL`
go dst (i 1)
| otherwise = nilOL
where
dst_addr = AddrBaseIndex (EABaseReg dst) EAIndexNone
(ImmInteger (n i))
genCCall (CmmPrim MO_WriteBarrier) _ _ = return nilOL
genCCall target dest_regs args =
do dflags <- getDynFlagsNat
if target32Bit (targetPlatform dflags)
then genCCall32 target dest_regs args
else genCCall64 target dest_regs args
genCCall32 :: CmmCallTarget
-> [HintedCmmFormal]
-> [HintedCmmActual]
-> NatM InstrBlock
genCCall32 target dest_regs args =
case (target, dest_regs) of
(CmmPrim op, []) ->
outOfLineCmmOp op Nothing args
(CmmPrim op, [r_hinted@(CmmHinted r _)]) -> do
l1 <- getNewLabelNat
l2 <- getNewLabelNat
sse2 <- sse2Enabled
if sse2
then
outOfLineCmmOp op (Just r_hinted) args
else case op of
MO_F32_Sqrt -> actuallyInlineFloatOp GSQRT FF32 args
MO_F64_Sqrt -> actuallyInlineFloatOp GSQRT FF64 args
MO_F32_Sin -> actuallyInlineFloatOp (\s -> GSIN s l1 l2) FF32 args
MO_F64_Sin -> actuallyInlineFloatOp (\s -> GSIN s l1 l2) FF64 args
MO_F32_Cos -> actuallyInlineFloatOp (\s -> GCOS s l1 l2) FF32 args
MO_F64_Cos -> actuallyInlineFloatOp (\s -> GCOS s l1 l2) FF64 args
MO_F32_Tan -> actuallyInlineFloatOp (\s -> GTAN s l1 l2) FF32 args
MO_F64_Tan -> actuallyInlineFloatOp (\s -> GTAN s l1 l2) FF64 args
_other_op -> outOfLineCmmOp op (Just r_hinted) args
where
actuallyInlineFloatOp instr size [CmmHinted x _]
= do res <- trivialUFCode size (instr size) x
any <- anyReg res
return (any (getRegisterReg False (CmmLocal r)))
actuallyInlineFloatOp _ _ args
= panic $ "genCCall.actuallyInlineFloatOp: bad number of arguments! ("
++ show (length args) ++ ")"
_ -> do
let
sizes = map (arg_size . cmmExprType . hintlessCmm) (reverse args)
raw_arg_size = sum sizes
tot_arg_size = roundTo 16 raw_arg_size
arg_pad_size = tot_arg_size raw_arg_size
delta0 <- getDeltaNat
setDeltaNat (delta0 arg_pad_size)
use_sse2 <- sse2Enabled
push_codes <- mapM (push_arg use_sse2) (reverse args)
delta <- getDeltaNat
(callinsns,cconv) <-
case target of
CmmCallee (CmmLit (CmmLabel lbl)) conv
->
return (unitOL (CALL (Left fn_imm) []), conv)
where fn_imm = ImmCLbl lbl
CmmCallee expr conv
-> do { (dyn_r, dyn_c) <- getSomeReg expr
; ASSERT( isWord32 (cmmExprType expr) )
return (dyn_c `snocOL` CALL (Right dyn_r) [], conv) }
CmmPrim _
-> panic $ "genCCall: Can't handle CmmPrim call type here, error "
++ "probably because too many return values."
let push_code
| arg_pad_size /= 0
= toOL [SUB II32 (OpImm (ImmInt arg_pad_size)) (OpReg esp),
DELTA (delta0 arg_pad_size)]
`appOL` concatOL push_codes
| otherwise
= concatOL push_codes
pop_size | cconv /= StdCallConv = tot_arg_size
| otherwise = arg_pad_size
call = callinsns `appOL`
toOL (
(if pop_size==0 then [] else
[ADD II32 (OpImm (ImmInt pop_size)) (OpReg esp)])
++
[DELTA (delta + tot_arg_size)]
)
setDeltaNat (delta + tot_arg_size)
let
assign_code [] = nilOL
assign_code [CmmHinted dest _hint]
| isFloatType ty =
if use_sse2
then let tmp_amode = AddrBaseIndex (EABaseReg esp)
EAIndexNone
(ImmInt 0)
sz = floatSize w
in toOL [ SUB II32 (OpImm (ImmInt b)) (OpReg esp),
GST sz fake0 tmp_amode,
MOV sz (OpAddr tmp_amode) (OpReg r_dest),
ADD II32 (OpImm (ImmInt b)) (OpReg esp)]
else unitOL (GMOV fake0 r_dest)
| isWord64 ty = toOL [MOV II32 (OpReg eax) (OpReg r_dest),
MOV II32 (OpReg edx) (OpReg r_dest_hi)]
| otherwise = unitOL (MOV (intSize w) (OpReg eax) (OpReg r_dest))
where
ty = localRegType dest
w = typeWidth ty
b = widthInBytes w
r_dest_hi = getHiVRegFromLo r_dest
r_dest = getRegisterReg use_sse2 (CmmLocal dest)
assign_code many = pprPanic "genCCall.assign_code - too many return values:" (ppr many)
return (push_code `appOL`
call `appOL`
assign_code dest_regs)
where
arg_size :: CmmType -> Int
arg_size ty = widthInBytes (typeWidth ty)
roundTo a x | x `mod` a == 0 = x
| otherwise = x + a (x `mod` a)
push_arg :: Bool -> HintedCmmActual
-> NatM InstrBlock
push_arg use_sse2 (CmmHinted arg _hint)
| isWord64 arg_ty = do
ChildCode64 code r_lo <- iselExpr64 arg
delta <- getDeltaNat
setDeltaNat (delta 8)
let
r_hi = getHiVRegFromLo r_lo
return ( code `appOL`
toOL [PUSH II32 (OpReg r_hi), DELTA (delta 4),
PUSH II32 (OpReg r_lo), DELTA (delta 8),
DELTA (delta8)]
)
| isFloatType arg_ty = do
(reg, code) <- getSomeReg arg
delta <- getDeltaNat
setDeltaNat (deltasize)
return (code `appOL`
toOL [SUB II32 (OpImm (ImmInt size)) (OpReg esp),
DELTA (deltasize),
let addr = AddrBaseIndex (EABaseReg esp)
EAIndexNone
(ImmInt 0)
size = floatSize (typeWidth arg_ty)
in
if use_sse2
then MOV size (OpReg reg) (OpAddr addr)
else GST size reg addr
]
)
| otherwise = do
(operand, code) <- getOperand arg
delta <- getDeltaNat
setDeltaNat (deltasize)
return (code `snocOL`
PUSH II32 operand `snocOL`
DELTA (deltasize))
where
arg_ty = cmmExprType arg
size = arg_size arg_ty
genCCall64 :: CmmCallTarget
-> [HintedCmmFormal]
-> [HintedCmmActual]
-> NatM InstrBlock
genCCall64 target dest_regs args =
case (target, dest_regs) of
(CmmPrim op, []) ->
outOfLineCmmOp op Nothing args
(CmmPrim op, [res]) ->
outOfLineCmmOp op (Just res) args
_ -> do
(stack_args, aregs, fregs, load_args_code)
<- load_args args allArgRegs allFPArgRegs nilOL
let
fp_regs_used = reverse (drop (length fregs) (reverse allFPArgRegs))
int_regs_used = reverse (drop (length aregs) (reverse allArgRegs))
arg_regs = [eax] ++ int_regs_used ++ fp_regs_used
sse_regs = length fp_regs_used
tot_arg_size = arg_size * length stack_args
(real_size, adjust_rsp) <-
if tot_arg_size `rem` 16 == 0
then return (tot_arg_size, nilOL)
else do
delta <- getDeltaNat
setDeltaNat (delta8)
return (tot_arg_size+8, toOL [
SUB II64 (OpImm (ImmInt 8)) (OpReg rsp),
DELTA (delta8)
])
push_code <- push_args (reverse stack_args) nilOL
delta <- getDeltaNat
(callinsns,cconv) <-
case target of
CmmCallee (CmmLit (CmmLabel lbl)) conv
->
return (unitOL (CALL (Left fn_imm) arg_regs), conv)
where fn_imm = ImmCLbl lbl
CmmCallee expr conv
-> do (dyn_r, dyn_c) <- getSomeReg expr
return (dyn_c `snocOL` CALL (Right dyn_r) arg_regs, conv)
CmmPrim _
-> panic $ "genCCall: Can't handle CmmPrim call type here, error "
++ "probably because too many return values."
let
assign_eax n = unitOL (MOV II32 (OpImm (ImmInt n)) (OpReg eax))
let call = callinsns `appOL`
toOL (
(if cconv == StdCallConv || real_size==0 then [] else
[ADD (intSize wordWidth) (OpImm (ImmInt real_size)) (OpReg esp)])
++
[DELTA (delta + real_size)]
)
setDeltaNat (delta + real_size)
let
assign_code [] = nilOL
assign_code [CmmHinted dest _hint] =
case typeWidth rep of
W32 | isFloatType rep -> unitOL (MOV (floatSize W32) (OpReg xmm0) (OpReg r_dest))
W64 | isFloatType rep -> unitOL (MOV (floatSize W64) (OpReg xmm0) (OpReg r_dest))
_ -> unitOL (MOV (cmmTypeSize rep) (OpReg rax) (OpReg r_dest))
where
rep = localRegType dest
r_dest = getRegisterReg True (CmmLocal dest)
assign_code _many = panic "genCCall.assign_code many"
return (load_args_code `appOL`
adjust_rsp `appOL`
push_code `appOL`
assign_eax sse_regs `appOL`
call `appOL`
assign_code dest_regs)
where
arg_size = 8
load_args :: [CmmHinted CmmExpr]
-> [Reg]
-> [Reg]
-> InstrBlock
-> NatM ([CmmHinted CmmExpr],[Reg],[Reg],InstrBlock)
load_args args [] [] code = return (args, [], [], code)
load_args [] aregs fregs code = return ([], aregs, fregs, code)
load_args ((CmmHinted arg hint) : rest) aregs fregs code
| isFloatType arg_rep =
case fregs of
[] -> push_this_arg
(r:rs) -> do
arg_code <- getAnyReg arg
load_args rest aregs rs (code `appOL` arg_code r)
| otherwise =
case aregs of
[] -> push_this_arg
(r:rs) -> do
arg_code <- getAnyReg arg
load_args rest rs fregs (code `appOL` arg_code r)
where
arg_rep = cmmExprType arg
push_this_arg = do
(args',ars,frs,code') <- load_args rest aregs fregs code
return ((CmmHinted arg hint):args', ars, frs, code')
push_args [] code = return code
push_args ((CmmHinted arg _):rest) code
| isFloatType arg_rep = do
(arg_reg, arg_code) <- getSomeReg arg
delta <- getDeltaNat
setDeltaNat (deltaarg_size)
let code' = code `appOL` arg_code `appOL` toOL [
SUB (intSize wordWidth) (OpImm (ImmInt arg_size)) (OpReg rsp) ,
DELTA (deltaarg_size),
MOV (floatSize width) (OpReg arg_reg) (OpAddr (spRel 0))]
push_args rest code'
| otherwise = do
ASSERT(width == W64) return ()
(arg_op, arg_code) <- getOperand arg
delta <- getDeltaNat
setDeltaNat (deltaarg_size)
let code' = code `appOL` arg_code `appOL` toOL [
PUSH II64 arg_op,
DELTA (deltaarg_size)]
push_args rest code'
where
arg_rep = cmmExprType arg
width = typeWidth arg_rep
maxInlineSizeThreshold :: Integer
maxInlineSizeThreshold = 128
outOfLineCmmOp :: CallishMachOp -> Maybe HintedCmmFormal -> [HintedCmmActual] -> NatM InstrBlock
outOfLineCmmOp mop res args
= do
dflags <- getDynFlagsNat
targetExpr <- cmmMakeDynamicReference dflags addImportNat CallReference lbl
let target = CmmCallee targetExpr CCallConv
stmtToInstrs (CmmCall target (catMaybes [res]) args' CmmUnsafe CmmMayReturn)
where
lbl = mkForeignLabel fn Nothing ForeignLabelInThisPackage IsFunction
args' = case mop of
MO_Memcpy -> init args
MO_Memset -> init args
MO_Memmove -> init args
_ -> args
fn = case mop of
MO_F32_Sqrt -> fsLit "sqrtf"
MO_F32_Sin -> fsLit "sinf"
MO_F32_Cos -> fsLit "cosf"
MO_F32_Tan -> fsLit "tanf"
MO_F32_Exp -> fsLit "expf"
MO_F32_Log -> fsLit "logf"
MO_F32_Asin -> fsLit "asinf"
MO_F32_Acos -> fsLit "acosf"
MO_F32_Atan -> fsLit "atanf"
MO_F32_Sinh -> fsLit "sinhf"
MO_F32_Cosh -> fsLit "coshf"
MO_F32_Tanh -> fsLit "tanhf"
MO_F32_Pwr -> fsLit "powf"
MO_F64_Sqrt -> fsLit "sqrt"
MO_F64_Sin -> fsLit "sin"
MO_F64_Cos -> fsLit "cos"
MO_F64_Tan -> fsLit "tan"
MO_F64_Exp -> fsLit "exp"
MO_F64_Log -> fsLit "log"
MO_F64_Asin -> fsLit "asin"
MO_F64_Acos -> fsLit "acos"
MO_F64_Atan -> fsLit "atan"
MO_F64_Sinh -> fsLit "sinh"
MO_F64_Cosh -> fsLit "cosh"
MO_F64_Tanh -> fsLit "tanh"
MO_F64_Pwr -> fsLit "pow"
MO_Memcpy -> fsLit "memcpy"
MO_Memset -> fsLit "memset"
MO_Memmove -> fsLit "memmove"
other -> panic $ "outOfLineCmmOp: unmatched op! (" ++ show other ++ ")"
genSwitch :: CmmExpr -> [Maybe BlockId] -> NatM InstrBlock
genSwitch expr ids
| opt_PIC
= do
(reg,e_code) <- getSomeReg expr
lbl <- getNewLabelNat
dflags <- getDynFlagsNat
dynRef <- cmmMakeDynamicReference dflags addImportNat DataReference lbl
(tableReg,t_code) <- getSomeReg $ dynRef
let op = OpAddr (AddrBaseIndex (EABaseReg tableReg)
(EAIndex reg wORD_SIZE) (ImmInt 0))
return $ if target32Bit (targetPlatform dflags)
then e_code `appOL` t_code `appOL` toOL [
ADD (intSize wordWidth) op (OpReg tableReg),
JMP_TBL (OpReg tableReg) ids ReadOnlyData lbl
]
else case platformOS (targetPlatform dflags) of
OSDarwin ->
e_code `appOL` t_code `appOL` toOL [
ADD (intSize wordWidth) op (OpReg tableReg),
JMP_TBL (OpReg tableReg) ids Text lbl
]
_ ->
e_code `appOL` t_code `appOL` toOL [
MOVSxL II32 op (OpReg reg),
ADD (intSize wordWidth) (OpReg reg) (OpReg tableReg),
JMP_TBL (OpReg tableReg) ids ReadOnlyData lbl
]
| otherwise
= do
(reg,e_code) <- getSomeReg expr
lbl <- getNewLabelNat
let op = OpAddr (AddrBaseIndex EABaseNone (EAIndex reg wORD_SIZE) (ImmCLbl lbl))
code = e_code `appOL` toOL [
JMP_TBL op ids ReadOnlyData lbl
]
return code
generateJumpTableForInstr :: Instr -> Maybe (NatCmmTop (Alignment, CmmStatics) Instr)
generateJumpTableForInstr (JMP_TBL _ ids section lbl) = Just (createJumpTable ids section lbl)
generateJumpTableForInstr _ = Nothing
createJumpTable :: [Maybe BlockId] -> Section -> CLabel -> GenCmmTop (Alignment, CmmStatics) h g
createJumpTable ids section lbl
= let jumpTable
| opt_PIC =
let jumpTableEntryRel Nothing
= CmmStaticLit (CmmInt 0 wordWidth)
jumpTableEntryRel (Just blockid)
= CmmStaticLit (CmmLabelDiffOff blockLabel lbl 0)
where blockLabel = mkAsmTempLabel (getUnique blockid)
in map jumpTableEntryRel ids
| otherwise = map jumpTableEntry ids
in CmmData section (1, Statics lbl jumpTable)
condIntReg :: Cond -> CmmExpr -> CmmExpr -> NatM Register
condIntReg cond x y = do
CondCode _ cond cond_code <- condIntCode cond x y
tmp <- getNewRegNat II8
let
code dst = cond_code `appOL` toOL [
SETCC cond (OpReg tmp),
MOVZxL II8 (OpReg tmp) (OpReg dst)
]
return (Any II32 code)
condFltReg :: Cond -> CmmExpr -> CmmExpr -> NatM Register
condFltReg cond x y = if_sse2 condFltReg_sse2 condFltReg_x87
where
condFltReg_x87 = do
CondCode _ cond cond_code <- condFltCode cond x y
tmp <- getNewRegNat II8
let
code dst = cond_code `appOL` toOL [
SETCC cond (OpReg tmp),
MOVZxL II8 (OpReg tmp) (OpReg dst)
]
return (Any II32 code)
condFltReg_sse2 = do
CondCode _ cond cond_code <- condFltCode cond x y
tmp1 <- getNewRegNat archWordSize
tmp2 <- getNewRegNat archWordSize
let
code dst =
cond_code `appOL`
(case cond of
NE -> or_unordered dst
GU -> plain_test dst
GEU -> plain_test dst
_ -> and_ordered dst)
plain_test dst = toOL [
SETCC cond (OpReg tmp1),
MOVZxL II8 (OpReg tmp1) (OpReg dst)
]
or_unordered dst = toOL [
SETCC cond (OpReg tmp1),
SETCC PARITY (OpReg tmp2),
OR II8 (OpReg tmp1) (OpReg tmp2),
MOVZxL II8 (OpReg tmp2) (OpReg dst)
]
and_ordered dst = toOL [
SETCC cond (OpReg tmp1),
SETCC NOTPARITY (OpReg tmp2),
AND II8 (OpReg tmp1) (OpReg tmp2),
MOVZxL II8 (OpReg tmp2) (OpReg dst)
]
return (Any II32 code)
trivialCode :: Width -> (Operand -> Operand -> Instr)
-> Maybe (Operand -> Operand -> Instr)
-> CmmExpr -> CmmExpr -> NatM Register
trivialCode width _ (Just revinstr) (CmmLit lit_a) b
| is32BitLit lit_a = do
b_code <- getAnyReg b
let
code dst
= b_code dst `snocOL`
revinstr (OpImm (litToImm lit_a)) (OpReg dst)
return (Any (intSize width) code)
trivialCode width instr _ a b
= genTrivialCode (intSize width) instr a b
genTrivialCode :: Size -> (Operand -> Operand -> Instr)
-> CmmExpr -> CmmExpr -> NatM Register
genTrivialCode rep instr a b = do
(b_op, b_code) <- getNonClobberedOperand b
a_code <- getAnyReg a
tmp <- getNewRegNat rep
let
code dst
| dst `regClashesWithOp` b_op =
b_code `appOL`
unitOL (MOV rep b_op (OpReg tmp)) `appOL`
a_code dst `snocOL`
instr (OpReg tmp) (OpReg dst)
| otherwise =
b_code `appOL`
a_code dst `snocOL`
instr b_op (OpReg dst)
return (Any rep code)
regClashesWithOp :: Reg -> Operand -> Bool
reg `regClashesWithOp` OpReg reg2 = reg == reg2
reg `regClashesWithOp` OpAddr amode = any (==reg) (addrModeRegs amode)
_ `regClashesWithOp` _ = False
trivialUCode :: Size -> (Operand -> Instr)
-> CmmExpr -> NatM Register
trivialUCode rep instr x = do
x_code <- getAnyReg x
let
code dst =
x_code dst `snocOL`
instr (OpReg dst)
return (Any rep code)
trivialFCode_x87 :: (Size -> Reg -> Reg -> Reg -> Instr)
-> CmmExpr -> CmmExpr -> NatM Register
trivialFCode_x87 instr x y = do
(x_reg, x_code) <- getNonClobberedReg x
(y_reg, y_code) <- getSomeReg y
let
size = FF80
code dst =
x_code `appOL`
y_code `snocOL`
instr size x_reg y_reg dst
return (Any size code)
trivialFCode_sse2 :: Width -> (Size -> Operand -> Operand -> Instr)
-> CmmExpr -> CmmExpr -> NatM Register
trivialFCode_sse2 pk instr x y
= genTrivialCode size (instr size) x y
where size = floatSize pk
trivialUFCode :: Size -> (Reg -> Reg -> Instr) -> CmmExpr -> NatM Register
trivialUFCode size instr x = do
(x_reg, x_code) <- getSomeReg x
let
code dst =
x_code `snocOL`
instr x_reg dst
return (Any size code)
coerceInt2FP :: Width -> Width -> CmmExpr -> NatM Register
coerceInt2FP from to x = if_sse2 coerce_sse2 coerce_x87
where
coerce_x87 = do
(x_reg, x_code) <- getSomeReg x
let
opc = case to of W32 -> GITOF; W64 -> GITOD;
n -> panic $ "coerceInt2FP.x87: unhandled width ("
++ show n ++ ")"
code dst = x_code `snocOL` opc x_reg dst
return (Any FF80 code)
coerce_sse2 = do
(x_op, x_code) <- getOperand x
let
opc = case to of W32 -> CVTSI2SS; W64 -> CVTSI2SD
n -> panic $ "coerceInt2FP.sse: unhandled width ("
++ show n ++ ")"
code dst = x_code `snocOL` opc (intSize from) x_op dst
return (Any (floatSize to) code)
coerceFP2Int :: Width -> Width -> CmmExpr -> NatM Register
coerceFP2Int from to x = if_sse2 coerceFP2Int_sse2 coerceFP2Int_x87
where
coerceFP2Int_x87 = do
(x_reg, x_code) <- getSomeReg x
let
opc = case from of W32 -> GFTOI; W64 -> GDTOI
n -> panic $ "coerceFP2Int.x87: unhandled width ("
++ show n ++ ")"
code dst = x_code `snocOL` opc x_reg dst
return (Any (intSize to) code)
coerceFP2Int_sse2 = do
(x_op, x_code) <- getOperand x
let
opc = case from of W32 -> CVTTSS2SIQ; W64 -> CVTTSD2SIQ;
n -> panic $ "coerceFP2Init.sse: unhandled width ("
++ show n ++ ")"
code dst = x_code `snocOL` opc (intSize to) x_op dst
return (Any (intSize to) code)
coerceFP2FP :: Width -> CmmExpr -> NatM Register
coerceFP2FP to x = do
use_sse2 <- sse2Enabled
(x_reg, x_code) <- getSomeReg x
let
opc | use_sse2 = case to of W32 -> CVTSD2SS; W64 -> CVTSS2SD;
n -> panic $ "coerceFP2FP: unhandled width ("
++ show n ++ ")"
| otherwise = GDTOF
code dst = x_code `snocOL` opc x_reg dst
return (Any (if use_sse2 then floatSize to else FF80) code)
sse2NegCode :: Width -> CmmExpr -> NatM Register
sse2NegCode w x = do
let sz = floatSize w
x_code <- getAnyReg x
let
const | FF32 <- sz = CmmInt 0x80000000 W32
| otherwise = CmmInt 0x8000000000000000 W64
Amode amode amode_code <- memConstant (widthInBytes w) const
tmp <- getNewRegNat sz
let
code dst = x_code dst `appOL` amode_code `appOL` toOL [
MOV sz (OpAddr amode) (OpReg tmp),
XOR sz (OpReg tmp) (OpReg dst)
]
return (Any sz code)