;; GCC machine description for IA-32 and x86-64.
;; Copyright (C) 1988-2022 Free Software Foundation, Inc.
;; Mostly by William Schelter.
;; x86_64 support added by Jan Hubicka
;;
;; This file is part of GCC.
;;
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
;;
;; GCC is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; . */
;;
;; The original PO technology requires these to be ordered by speed,
;; so that assigner will pick the fastest.
;;
;; See file "rtl.def" for documentation on define_insn, match_*, et. al.
;;
;; The special asm out single letter directives following a '%' are:
;; L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
;; C -- print opcode suffix for set/cmov insn.
;; c -- like C, but print reversed condition
;; F,f -- likewise, but for floating-point.
;; O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
;; otherwise nothing
;; R -- print the prefix for register names.
;; z -- print the opcode suffix for the size of the current operand.
;; Z -- likewise, with special suffixes for x87 instructions.
;; * -- print a star (in certain assembler syntax)
;; A -- print an absolute memory reference.
;; E -- print address with DImode register names if TARGET_64BIT.
;; w -- print the operand as if it's a "word" (HImode) even if it isn't.
;; s -- print a shift double count, followed by the assemblers argument
;; delimiter.
;; b -- print the QImode name of the register for the indicated operand.
;; %b0 would print %al if operands[0] is reg 0.
;; w -- likewise, print the HImode name of the register.
;; k -- likewise, print the SImode name of the register.
;; q -- likewise, print the DImode name of the register.
;; x -- likewise, print the V4SFmode name of the register.
;; t -- likewise, print the V8SFmode name of the register.
;; h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
;; y -- print "st(0)" instead of "st" as a register.
;; d -- print duplicated register operand for AVX instruction.
;; D -- print condition for SSE cmp instruction.
;; P -- if PIC, print an @PLT suffix.
;; p -- print raw symbol name.
;; X -- don't print any sort of PIC '@' suffix for a symbol.
;; & -- print some in-use local-dynamic symbol name.
;; H -- print a memory address offset by 8; used for sse high-parts
;; K -- print HLE lock prefix
;; Y -- print condition for XOP pcom* instruction.
;; + -- print a branch hint as 'cs' or 'ds' prefix
;; ; -- print a semicolon (after prefixes due to bug in older gas).
;; ~ -- print "i" if TARGET_AVX2, "f" otherwise.
;; ^ -- print addr32 prefix if TARGET_64BIT and Pmode != word_mode
;; ! -- print NOTRACK prefix for jxx/call/ret instructions if required.
(define_c_enum "unspec" [
;; Relocation specifiers
UNSPEC_GOT
UNSPEC_GOTOFF
UNSPEC_GOTPCREL
UNSPEC_GOTTPOFF
UNSPEC_TPOFF
UNSPEC_NTPOFF
UNSPEC_DTPOFF
UNSPEC_GOTNTPOFF
UNSPEC_INDNTPOFF
UNSPEC_PLTOFF
UNSPEC_MACHOPIC_OFFSET
UNSPEC_PCREL
UNSPEC_SIZEOF
;; Prologue support
UNSPEC_STACK_ALLOC
UNSPEC_SET_GOT
UNSPEC_SET_RIP
UNSPEC_SET_GOT_OFFSET
UNSPEC_MEMORY_BLOCKAGE
UNSPEC_PROBE_STACK
;; TLS support
UNSPEC_TP
UNSPEC_TLS_GD
UNSPEC_TLS_LD_BASE
UNSPEC_TLSDESC
UNSPEC_TLS_IE_SUN
;; Other random patterns
UNSPEC_SCAS
UNSPEC_FNSTSW
UNSPEC_SAHF
UNSPEC_NOTRAP
UNSPEC_PARITY
UNSPEC_FSTCW
UNSPEC_REP
UNSPEC_LD_MPIC ; load_macho_picbase
UNSPEC_TRUNC_NOOP
UNSPEC_DIV_ALREADY_SPLIT
UNSPEC_PAUSE
UNSPEC_LEA_ADDR
UNSPEC_XBEGIN_ABORT
UNSPEC_STOS
UNSPEC_PEEPSIB
UNSPEC_INSN_FALSE_DEP
UNSPEC_SBB
;; For SSE/MMX support:
UNSPEC_FIX_NOTRUNC
UNSPEC_MASKMOV
UNSPEC_MOVCC_MASK
UNSPEC_MOVMSK
UNSPEC_BLENDV
UNSPEC_PSHUFB
UNSPEC_XOP_PERMUTE
UNSPEC_RCP
UNSPEC_RSQRT
UNSPEC_PSADBW
;; For AVX/AVX512F support
UNSPEC_SCALEF
UNSPEC_PCMP
;; Generic math support
UNSPEC_IEEE_MIN ; not commutative
UNSPEC_IEEE_MAX ; not commutative
;; x87 Floating point
UNSPEC_SIN
UNSPEC_COS
UNSPEC_FPATAN
UNSPEC_FYL2X
UNSPEC_FYL2XP1
UNSPEC_FRNDINT
UNSPEC_FIST
UNSPEC_F2XM1
UNSPEC_TAN
UNSPEC_FXAM
;; x87 Rounding
UNSPEC_FRNDINT_ROUNDEVEN
UNSPEC_FRNDINT_FLOOR
UNSPEC_FRNDINT_CEIL
UNSPEC_FRNDINT_TRUNC
UNSPEC_FIST_FLOOR
UNSPEC_FIST_CEIL
;; x87 Double output FP
UNSPEC_SINCOS_COS
UNSPEC_SINCOS_SIN
UNSPEC_XTRACT_FRACT
UNSPEC_XTRACT_EXP
UNSPEC_FSCALE_FRACT
UNSPEC_FSCALE_EXP
UNSPEC_FPREM_F
UNSPEC_FPREM_U
UNSPEC_FPREM1_F
UNSPEC_FPREM1_U
UNSPEC_C2_FLAG
UNSPEC_FXAM_MEM
;; SSP patterns
UNSPEC_SP_SET
UNSPEC_SP_TEST
;; For ROUND support
UNSPEC_ROUND
;; For CRC32 support
UNSPEC_CRC32
;; For LZCNT suppoprt
UNSPEC_LZCNT
;; For BMI support
UNSPEC_TZCNT
UNSPEC_BEXTR
;; For BMI2 support
UNSPEC_PDEP
UNSPEC_PEXT
;; IRET support
UNSPEC_INTERRUPT_RETURN
;; For MOVDIRI and MOVDIR64B support
UNSPEC_MOVDIRI
UNSPEC_MOVDIR64B
;; For insn_callee_abi:
UNSPEC_CALLEE_ABI
])
(define_c_enum "unspecv" [
UNSPECV_UD2
UNSPECV_BLOCKAGE
UNSPECV_STACK_PROBE
UNSPECV_PROBE_STACK_RANGE
UNSPECV_ALIGN
UNSPECV_PROLOGUE_USE
UNSPECV_SPLIT_STACK_RETURN
UNSPECV_CLD
UNSPECV_NOPS
UNSPECV_RDTSC
UNSPECV_RDTSCP
UNSPECV_RDPMC
UNSPECV_LLWP_INTRINSIC
UNSPECV_SLWP_INTRINSIC
UNSPECV_LWPVAL_INTRINSIC
UNSPECV_LWPINS_INTRINSIC
UNSPECV_RDFSBASE
UNSPECV_RDGSBASE
UNSPECV_WRFSBASE
UNSPECV_WRGSBASE
UNSPECV_FXSAVE
UNSPECV_FXRSTOR
UNSPECV_FXSAVE64
UNSPECV_FXRSTOR64
UNSPECV_XSAVE
UNSPECV_XRSTOR
UNSPECV_XSAVE64
UNSPECV_XRSTOR64
UNSPECV_XSAVEOPT
UNSPECV_XSAVEOPT64
UNSPECV_XSAVES
UNSPECV_XRSTORS
UNSPECV_XSAVES64
UNSPECV_XRSTORS64
UNSPECV_XSAVEC
UNSPECV_XSAVEC64
UNSPECV_XGETBV
UNSPECV_XSETBV
UNSPECV_WBINVD
UNSPECV_WBNOINVD
;; For atomic compound assignments.
UNSPECV_FNSTENV
UNSPECV_FLDENV
UNSPECV_FNSTSW
UNSPECV_FNCLEX
;; For RDRAND support
UNSPECV_RDRAND
;; For RDSEED support
UNSPECV_RDSEED
;; For RTM support
UNSPECV_XBEGIN
UNSPECV_XEND
UNSPECV_XABORT
UNSPECV_XTEST
UNSPECV_NLGR
;; For CLWB support
UNSPECV_CLWB
;; For CLFLUSHOPT support
UNSPECV_CLFLUSHOPT
;; For MONITORX and MWAITX support
UNSPECV_MONITORX
UNSPECV_MWAITX
;; For CLZERO support
UNSPECV_CLZERO
;; For RDPKRU and WRPKRU support
UNSPECV_PKU
;; For RDPID support
UNSPECV_RDPID
;; For CET support
UNSPECV_NOP_ENDBR
UNSPECV_NOP_RDSSP
UNSPECV_INCSSP
UNSPECV_SAVEPREVSSP
UNSPECV_RSTORSSP
UNSPECV_WRSS
UNSPECV_WRUSS
UNSPECV_SETSSBSY
UNSPECV_CLRSSBSY
;; For TSXLDTRK support
UNSPECV_XSUSLDTRK
UNSPECV_XRESLDTRK
;; For WAITPKG support
UNSPECV_UMWAIT
UNSPECV_UMONITOR
UNSPECV_TPAUSE
;; For UINTR support
UNSPECV_CLUI
UNSPECV_STUI
UNSPECV_TESTUI
UNSPECV_SENDUIPI
;; For CLDEMOTE support
UNSPECV_CLDEMOTE
;; For Speculation Barrier support
UNSPECV_SPECULATION_BARRIER
UNSPECV_PTWRITE
;; For ENQCMD and ENQCMDS support
UNSPECV_ENQCMD
UNSPECV_ENQCMDS
;; For SERIALIZE support
UNSPECV_SERIALIZE
;; For patchable area support
UNSPECV_PATCHABLE_AREA
;; For HRESET support
UNSPECV_HRESET
])
;; Constants to represent rounding modes in the ROUND instruction
(define_constants
[(ROUND_ROUNDEVEN 0x0)
(ROUND_FLOOR 0x1)
(ROUND_CEIL 0x2)
(ROUND_TRUNC 0x3)
(ROUND_MXCSR 0x4)
(ROUND_NO_EXC 0x8)
])
;; Constants to represent AVX512F embeded rounding
(define_constants
[(ROUND_NEAREST_INT 0)
(ROUND_NEG_INF 1)
(ROUND_POS_INF 2)
(ROUND_ZERO 3)
(NO_ROUND 4)
(ROUND_SAE 8)
])
;; Constants to represent pcomtrue/pcomfalse variants
(define_constants
[(PCOM_FALSE 0)
(PCOM_TRUE 1)
(COM_FALSE_S 2)
(COM_FALSE_P 3)
(COM_TRUE_S 4)
(COM_TRUE_P 5)
])
;; Constants used in the XOP pperm instruction
(define_constants
[(PPERM_SRC 0x00) /* copy source */
(PPERM_INVERT 0x20) /* invert source */
(PPERM_REVERSE 0x40) /* bit reverse source */
(PPERM_REV_INV 0x60) /* bit reverse & invert src */
(PPERM_ZERO 0x80) /* all 0's */
(PPERM_ONES 0xa0) /* all 1's */
(PPERM_SIGN 0xc0) /* propagate sign bit */
(PPERM_INV_SIGN 0xe0) /* invert & propagate sign */
(PPERM_SRC1 0x00) /* use first source byte */
(PPERM_SRC2 0x10) /* use second source byte */
])
;; Registers by name.
(define_constants
[(AX_REG 0)
(DX_REG 1)
(CX_REG 2)
(BX_REG 3)
(SI_REG 4)
(DI_REG 5)
(BP_REG 6)
(SP_REG 7)
(ST0_REG 8)
(ST1_REG 9)
(ST2_REG 10)
(ST3_REG 11)
(ST4_REG 12)
(ST5_REG 13)
(ST6_REG 14)
(ST7_REG 15)
(ARGP_REG 16)
(FLAGS_REG 17)
(FPSR_REG 18)
(FRAME_REG 19)
(XMM0_REG 20)
(XMM1_REG 21)
(XMM2_REG 22)
(XMM3_REG 23)
(XMM4_REG 24)
(XMM5_REG 25)
(XMM6_REG 26)
(XMM7_REG 27)
(MM0_REG 28)
(MM1_REG 29)
(MM2_REG 30)
(MM3_REG 31)
(MM4_REG 32)
(MM5_REG 33)
(MM6_REG 34)
(MM7_REG 35)
(R8_REG 36)
(R9_REG 37)
(R10_REG 38)
(R11_REG 39)
(R12_REG 40)
(R13_REG 41)
(R14_REG 42)
(R15_REG 43)
(XMM8_REG 44)
(XMM9_REG 45)
(XMM10_REG 46)
(XMM11_REG 47)
(XMM12_REG 48)
(XMM13_REG 49)
(XMM14_REG 50)
(XMM15_REG 51)
(XMM16_REG 52)
(XMM17_REG 53)
(XMM18_REG 54)
(XMM19_REG 55)
(XMM20_REG 56)
(XMM21_REG 57)
(XMM22_REG 58)
(XMM23_REG 59)
(XMM24_REG 60)
(XMM25_REG 61)
(XMM26_REG 62)
(XMM27_REG 63)
(XMM28_REG 64)
(XMM29_REG 65)
(XMM30_REG 66)
(XMM31_REG 67)
(MASK0_REG 68)
(MASK1_REG 69)
(MASK2_REG 70)
(MASK3_REG 71)
(MASK4_REG 72)
(MASK5_REG 73)
(MASK6_REG 74)
(MASK7_REG 75)
(FIRST_PSEUDO_REG 76)
])
;; Insn callee abi index.
(define_constants
[(ABI_DEFAULT 0)
(ABI_VZEROUPPER 1)
(ABI_UNKNOWN 2)])
;; Insns whose names begin with "x86_" are emitted by gen_FOO calls
;; from i386.cc.
;; In C guard expressions, put expressions which may be compile-time
;; constants first. This allows for better optimization. For
;; example, write "TARGET_64BIT && reload_completed", not
;; "reload_completed && TARGET_64BIT".
;; Processor type.
(define_attr "cpu" "none,pentium,pentiumpro,geode,k6,athlon,k8,core2,nehalem,
atom,slm,glm,haswell,generic,amdfam10,bdver1,bdver2,bdver3,
bdver4,btver2,znver1,znver2,znver3,znver4"
(const (symbol_ref "ix86_schedule")))
;; A basic instruction type. Refinements due to arguments to be
;; provided in other attributes.
(define_attr "type"
"other,multi,
alu,alu1,negnot,imov,imovx,lea,
incdec,ishift,ishiftx,ishift1,rotate,rotatex,rotate1,
imul,imulx,idiv,icmp,test,ibr,setcc,icmov,
push,pop,call,callv,leave,
str,bitmanip,
fmov,fop,fsgn,fmul,fdiv,fpspc,fcmov,fcmp,
fxch,fistp,fisttp,frndint,
sse,ssemov,sseadd,sseadd1,sseiadd,sseiadd1,
ssemul,sseimul,ssediv,sselog,sselog1,
sseishft,sseishft1,ssecmp,ssecomi,
ssecvt,ssecvt1,sseicvt,sseins,
sseshuf,sseshuf1,ssemuladd,sse4arg,
lwp,mskmov,msklog,
mmx,mmxmov,mmxadd,mmxmul,mmxcmp,mmxcvt,mmxshft"
(const_string "other"))
;; Main data type used by the insn
(define_attr "mode"
"unknown,none,QI,HI,SI,DI,TI,OI,XI,HF,SF,DF,XF,TF,V32HF,V16HF,V8HF,
V16SF,V8SF,V4DF,V4SF,V2DF,V2SF,V1DF,V8DF,V4HF,V2HF"
(const_string "unknown"))
;; The CPU unit operations uses.
(define_attr "unit" "integer,i387,sse,mmx,unknown"
(cond [(eq_attr "type" "fmov,fop,fsgn,fmul,fdiv,fpspc,fcmov,fcmp,
fxch,fistp,fisttp,frndint")
(const_string "i387")
(eq_attr "type" "sse,ssemov,sseadd,sseadd1,sseiadd,sseiadd1,
ssemul,sseimul,ssediv,sselog,sselog1,
sseishft,sseishft1,ssecmp,ssecomi,
ssecvt,ssecvt1,sseicvt,sseins,
sseshuf,sseshuf1,ssemuladd,sse4arg,mskmov")
(const_string "sse")
(eq_attr "type" "mmx,mmxmov,mmxadd,mmxmul,mmxcmp,mmxcvt,mmxshft")
(const_string "mmx")
(eq_attr "type" "other")
(const_string "unknown")]
(const_string "integer")))
;; The (bounding maximum) length of an instruction immediate.
(define_attr "length_immediate" ""
(cond [(eq_attr "type" "incdec,setcc,icmov,str,lea,other,multi,idiv,leave,
bitmanip,imulx,msklog,mskmov")
(const_int 0)
(eq_attr "unit" "i387,sse,mmx")
(const_int 0)
(eq_attr "type" "alu,alu1,negnot,imovx,ishift,ishiftx,ishift1,
rotate,rotatex,rotate1,imul,icmp,push,pop")
(symbol_ref "ix86_attr_length_immediate_default (insn, true)")
(eq_attr "type" "imov,test")
(symbol_ref "ix86_attr_length_immediate_default (insn, false)")
(eq_attr "type" "call")
(if_then_else (match_operand 0 "constant_call_address_operand")
(const_int 4)
(const_int 0))
(eq_attr "type" "callv")
(if_then_else (match_operand 1 "constant_call_address_operand")
(const_int 4)
(const_int 0))
;; We don't know the size before shorten_branches. Expect
;; the instruction to fit for better scheduling.
(eq_attr "type" "ibr")
(const_int 1)
]
(symbol_ref "/* Update immediate_length and other attributes! */
gcc_unreachable (),1")))
;; The (bounding maximum) length of an instruction address.
(define_attr "length_address" ""
(cond [(eq_attr "type" "str,other,multi,fxch")
(const_int 0)
(and (eq_attr "type" "call")
(match_operand 0 "constant_call_address_operand"))
(const_int 0)
(and (eq_attr "type" "callv")
(match_operand 1 "constant_call_address_operand"))
(const_int 0)
]
(symbol_ref "ix86_attr_length_address_default (insn)")))
;; Set when length prefix is used.
(define_attr "prefix_data16" ""
(cond [(eq_attr "type" "ssemuladd,sse4arg,sseiadd1,ssecvt1")
(const_int 0)
(eq_attr "mode" "HI")
(const_int 1)
(and (eq_attr "unit" "sse") (eq_attr "mode" "V2DF,TI"))
(const_int 1)
]
(const_int 0)))
;; Set when string REP prefix is used.
(define_attr "prefix_rep" ""
(cond [(eq_attr "type" "ssemuladd,sse4arg,sseiadd1,ssecvt1")
(const_int 0)
(and (eq_attr "unit" "sse") (eq_attr "mode" "SF,DF"))
(const_int 1)
]
(const_int 0)))
;; Set when 0f opcode prefix is used.
(define_attr "prefix_0f" ""
(if_then_else
(ior (eq_attr "type" "imovx,setcc,icmov,bitmanip,msklog,mskmov")
(eq_attr "unit" "sse,mmx"))
(const_int 1)
(const_int 0)))
;; Set when REX opcode prefix is used.
(define_attr "prefix_rex" ""
(cond [(not (match_test "TARGET_64BIT"))
(const_int 0)
(and (eq_attr "mode" "DI")
(and (eq_attr "type" "!push,pop,call,callv,leave,ibr")
(eq_attr "unit" "!mmx")))
(const_int 1)
(and (eq_attr "mode" "QI")
(match_test "x86_extended_QIreg_mentioned_p (insn)"))
(const_int 1)
(match_test "x86_extended_reg_mentioned_p (insn)")
(const_int 1)
(and (eq_attr "type" "imovx")
(match_operand:QI 1 "ext_QIreg_operand"))
(const_int 1)
]
(const_int 0)))
;; There are also additional prefixes in 3DNOW, SSSE3.
;; ssemuladd,sse4arg default to 0f24/0f25 and DREX byte,
;; sseiadd1,ssecvt1 to 0f7a with no DREX byte.
;; 3DNOW has 0f0f prefix, SSSE3 and SSE4_{1,2} 0f38/0f3a.
(define_attr "prefix_extra" ""
(cond [(eq_attr "type" "ssemuladd,sse4arg")
(const_int 2)
(eq_attr "type" "sseiadd1,ssecvt1")
(const_int 1)
]
(const_int 0)))
;; Prefix used: original, VEX or maybe VEX.
(define_attr "prefix" "orig,vex,maybe_vex,evex,maybe_evex"
(cond [(eq_attr "mode" "OI,V8SF,V4DF")
(const_string "vex")
(eq_attr "mode" "XI,V16SF,V8DF")
(const_string "evex")
]
(const_string "orig")))
;; VEX W bit is used.
(define_attr "prefix_vex_w" "" (const_int 0))
;; The length of VEX prefix
;; Only instructions with 0f prefix can have 2 byte VEX prefix,
;; 0f38/0f3a prefixes can't. In i386.md 0f3[8a] is
;; still prefix_0f 1, with prefix_extra 1.
(define_attr "length_vex" ""
(if_then_else (and (eq_attr "prefix_0f" "1")
(eq_attr "prefix_extra" "0"))
(if_then_else (eq_attr "prefix_vex_w" "1")
(symbol_ref "ix86_attr_length_vex_default (insn, true, true)")
(symbol_ref "ix86_attr_length_vex_default (insn, true, false)"))
(if_then_else (eq_attr "prefix_vex_w" "1")
(symbol_ref "ix86_attr_length_vex_default (insn, false, true)")
(symbol_ref "ix86_attr_length_vex_default (insn, false, false)"))))
;; 4-bytes evex prefix and 1 byte opcode.
(define_attr "length_evex" "" (const_int 5))
;; Set when modrm byte is used.
(define_attr "modrm" ""
(cond [(eq_attr "type" "str,leave")
(const_int 0)
(eq_attr "unit" "i387")
(const_int 0)
(and (eq_attr "type" "incdec")
(and (not (match_test "TARGET_64BIT"))
(ior (match_operand:SI 1 "register_operand")
(match_operand:HI 1 "register_operand"))))
(const_int 0)
(and (eq_attr "type" "push")
(not (match_operand 1 "memory_operand")))
(const_int 0)
(and (eq_attr "type" "pop")
(not (match_operand 0 "memory_operand")))
(const_int 0)
(and (eq_attr "type" "imov")
(and (not (eq_attr "mode" "DI"))
(ior (and (match_operand 0 "register_operand")
(match_operand 1 "immediate_operand"))
(ior (and (match_operand 0 "ax_reg_operand")
(match_operand 1 "memory_displacement_only_operand"))
(and (match_operand 0 "memory_displacement_only_operand")
(match_operand 1 "ax_reg_operand"))))))
(const_int 0)
(and (eq_attr "type" "call")
(match_operand 0 "constant_call_address_operand"))
(const_int 0)
(and (eq_attr "type" "callv")
(match_operand 1 "constant_call_address_operand"))
(const_int 0)
(and (eq_attr "type" "alu,alu1,icmp,test")
(match_operand 0 "ax_reg_operand"))
(symbol_ref "(get_attr_length_immediate (insn) <= (get_attr_mode (insn) != MODE_QI))")
]
(const_int 1)))
;; The (bounding maximum) length of an instruction in bytes.
;; ??? fistp and frndint are in fact fldcw/{fistp,frndint}/fldcw sequences.
;; Later we may want to split them and compute proper length as for
;; other insns.
(define_attr "length" ""
(cond [(eq_attr "type" "other,multi,fistp,frndint")
(const_int 16)
(eq_attr "type" "fcmp")
(const_int 4)
(eq_attr "unit" "i387")
(plus (const_int 2)
(plus (attr "prefix_data16")
(attr "length_address")))
(ior (eq_attr "prefix" "evex")
(and (ior (eq_attr "prefix" "maybe_evex")
(eq_attr "prefix" "maybe_vex"))
(match_test "TARGET_AVX512F")))
(plus (attr "length_evex")
(plus (attr "length_immediate")
(plus (attr "modrm")
(attr "length_address"))))
(ior (eq_attr "prefix" "vex")
(and (ior (eq_attr "prefix" "maybe_vex")
(eq_attr "prefix" "maybe_evex"))
(match_test "TARGET_AVX")))
(plus (attr "length_vex")
(plus (attr "length_immediate")
(plus (attr "modrm")
(attr "length_address"))))]
(plus (plus (attr "modrm")
(plus (attr "prefix_0f")
(plus (attr "prefix_rex")
(plus (attr "prefix_extra")
(const_int 1)))))
(plus (attr "prefix_rep")
(plus (attr "prefix_data16")
(plus (attr "length_immediate")
(attr "length_address")))))))
;; The `memory' attribute is `none' if no memory is referenced, `load' or
;; `store' if there is a simple memory reference therein, or `unknown'
;; if the instruction is complex.
(define_attr "memory" "none,load,store,both,unknown"
(cond [(eq_attr "type" "other,multi,str,lwp")
(const_string "unknown")
(eq_attr "type" "lea,fcmov,fpspc")
(const_string "none")
(eq_attr "type" "fistp,leave")
(const_string "both")
(eq_attr "type" "frndint")
(const_string "load")
(eq_attr "type" "push")
(if_then_else (match_operand 1 "memory_operand")
(const_string "both")
(const_string "store"))
(eq_attr "type" "pop")
(if_then_else (match_operand 0 "memory_operand")
(const_string "both")
(const_string "load"))
(eq_attr "type" "setcc")
(if_then_else (match_operand 0 "memory_operand")
(const_string "store")
(const_string "none"))
(eq_attr "type" "icmp,test,ssecmp,ssecomi,mmxcmp,fcmp")
(if_then_else (ior (match_operand 0 "memory_operand")
(match_operand 1 "memory_operand"))
(const_string "load")
(const_string "none"))
(eq_attr "type" "ibr")
(if_then_else (match_operand 0 "memory_operand")
(const_string "load")
(const_string "none"))
(eq_attr "type" "call")
(if_then_else (match_operand 0 "constant_call_address_operand")
(const_string "none")
(const_string "load"))
(eq_attr "type" "callv")
(if_then_else (match_operand 1 "constant_call_address_operand")
(const_string "none")
(const_string "load"))
(and (eq_attr "type" "alu1,negnot,ishift1,rotate1,sselog1,sseshuf1")
(match_operand 1 "memory_operand"))
(const_string "both")
(and (match_operand 0 "memory_operand")
(match_operand 1 "memory_operand"))
(const_string "both")
(match_operand 0 "memory_operand")
(const_string "store")
(match_operand 1 "memory_operand")
(const_string "load")
(and (eq_attr "type"
"!alu1,negnot,ishift1,rotate1,
imov,imovx,icmp,test,bitmanip,
fmov,fcmp,fsgn,
sse,ssemov,ssecmp,ssecomi,ssecvt,ssecvt1,sseicvt,
sselog1,sseshuf1,sseadd1,sseiadd1,sseishft1,
mmx,mmxmov,mmxcmp,mmxcvt,mskmov,msklog")
(match_operand 2 "memory_operand"))
(const_string "load")
(and (eq_attr "type" "icmov,ssemuladd,sse4arg")
(match_operand 3 "memory_operand"))
(const_string "load")
]
(const_string "none")))
;; Indicates if an instruction has both an immediate and a displacement.
(define_attr "imm_disp" "false,true,unknown"
(cond [(eq_attr "type" "other,multi")
(const_string "unknown")
(and (eq_attr "type" "icmp,test,imov,alu1,ishift1,rotate1")
(and (match_operand 0 "memory_displacement_operand")
(match_operand 1 "immediate_operand")))
(const_string "true")
(and (eq_attr "type" "alu,ishift,ishiftx,rotate,rotatex,imul,idiv")
(and (match_operand 0 "memory_displacement_operand")
(match_operand 2 "immediate_operand")))
(const_string "true")
]
(const_string "false")))
;; Indicates if an FP operation has an integer source.
(define_attr "fp_int_src" "false,true"
(const_string "false"))
;; Defines rounding mode of an FP operation.
(define_attr "i387_cw" "roundeven,floor,ceil,trunc,uninitialized,any"
(const_string "any"))
;; Define attribute to indicate AVX insns with partial XMM register update.
(define_attr "avx_partial_xmm_update" "false,true"
(const_string "false"))
;; Define attribute to classify add/sub insns that consumes carry flag (CF)
(define_attr "use_carry" "0,1" (const_string "0"))
;; Define attribute to indicate unaligned ssemov insns
(define_attr "movu" "0,1" (const_string "0"))
;; Used to control the "enabled" attribute on a per-instruction basis.
(define_attr "isa" "base,x64,nox64,x64_sse2,x64_sse4,x64_sse4_noavx,
x64_avx,x64_avx512bw,x64_avx512dq,
sse_noavx,sse2,sse2_noavx,sse3,sse3_noavx,sse4,sse4_noavx,
avx,noavx,avx2,noavx2,bmi,bmi2,fma4,fma,avx512f,noavx512f,
avx512bw,noavx512bw,avx512dq,noavx512dq,fma_or_avx512vl,
avx512vl,noavx512vl,avxvnni,avx512vnnivl,avx512fp16"
(const_string "base"))
;; Define instruction set of MMX instructions
(define_attr "mmx_isa" "base,native,sse,sse_noavx,avx"
(const_string "base"))
(define_attr "enabled" ""
(cond [(eq_attr "isa" "x64") (symbol_ref "TARGET_64BIT")
(eq_attr "isa" "nox64") (symbol_ref "!TARGET_64BIT")
(eq_attr "isa" "x64_sse2")
(symbol_ref "TARGET_64BIT && TARGET_SSE2")
(eq_attr "isa" "x64_sse4")
(symbol_ref "TARGET_64BIT && TARGET_SSE4_1")
(eq_attr "isa" "x64_sse4_noavx")
(symbol_ref "TARGET_64BIT && TARGET_SSE4_1 && !TARGET_AVX")
(eq_attr "isa" "x64_avx")
(symbol_ref "TARGET_64BIT && TARGET_AVX")
(eq_attr "isa" "x64_avx512bw")
(symbol_ref "TARGET_64BIT && TARGET_AVX512BW")
(eq_attr "isa" "x64_avx512dq")
(symbol_ref "TARGET_64BIT && TARGET_AVX512DQ")
(eq_attr "isa" "sse_noavx")
(symbol_ref "TARGET_SSE && !TARGET_AVX")
(eq_attr "isa" "sse2") (symbol_ref "TARGET_SSE2")
(eq_attr "isa" "sse2_noavx")
(symbol_ref "TARGET_SSE2 && !TARGET_AVX")
(eq_attr "isa" "sse3") (symbol_ref "TARGET_SSE3")
(eq_attr "isa" "sse3_noavx")
(symbol_ref "TARGET_SSE3 && !TARGET_AVX")
(eq_attr "isa" "sse4") (symbol_ref "TARGET_SSE4_1")
(eq_attr "isa" "sse4_noavx")
(symbol_ref "TARGET_SSE4_1 && !TARGET_AVX")
(eq_attr "isa" "avx") (symbol_ref "TARGET_AVX")
(eq_attr "isa" "noavx") (symbol_ref "!TARGET_AVX")
(eq_attr "isa" "avx2") (symbol_ref "TARGET_AVX2")
(eq_attr "isa" "noavx2") (symbol_ref "!TARGET_AVX2")
(eq_attr "isa" "bmi") (symbol_ref "TARGET_BMI")
(eq_attr "isa" "bmi2") (symbol_ref "TARGET_BMI2")
(eq_attr "isa" "fma4") (symbol_ref "TARGET_FMA4")
(eq_attr "isa" "fma") (symbol_ref "TARGET_FMA")
(eq_attr "isa" "fma_or_avx512vl")
(symbol_ref "TARGET_FMA || TARGET_AVX512VL")
(eq_attr "isa" "avx512f") (symbol_ref "TARGET_AVX512F")
(eq_attr "isa" "noavx512f") (symbol_ref "!TARGET_AVX512F")
(eq_attr "isa" "avx512bw") (symbol_ref "TARGET_AVX512BW")
(eq_attr "isa" "noavx512bw") (symbol_ref "!TARGET_AVX512BW")
(eq_attr "isa" "avx512dq") (symbol_ref "TARGET_AVX512DQ")
(eq_attr "isa" "noavx512dq") (symbol_ref "!TARGET_AVX512DQ")
(eq_attr "isa" "avx512vl") (symbol_ref "TARGET_AVX512VL")
(eq_attr "isa" "noavx512vl") (symbol_ref "!TARGET_AVX512VL")
(eq_attr "isa" "avxvnni") (symbol_ref "TARGET_AVXVNNI")
(eq_attr "isa" "avx512vnnivl")
(symbol_ref "TARGET_AVX512VNNI && TARGET_AVX512VL")
(eq_attr "isa" "avx512fp16")
(symbol_ref "TARGET_AVX512FP16")
(eq_attr "mmx_isa" "native")
(symbol_ref "!TARGET_MMX_WITH_SSE")
(eq_attr "mmx_isa" "sse")
(symbol_ref "TARGET_MMX_WITH_SSE")
(eq_attr "mmx_isa" "sse_noavx")
(symbol_ref "TARGET_MMX_WITH_SSE && !TARGET_AVX")
(eq_attr "mmx_isa" "avx")
(symbol_ref "TARGET_MMX_WITH_SSE && TARGET_AVX")
]
(const_int 1)))
(define_attr "preferred_for_size" "" (const_int 1))
(define_attr "preferred_for_speed" "" (const_int 1))
;; Describe a user's asm statement.
(define_asm_attributes
[(set_attr "length" "128")
(set_attr "type" "multi")])
(define_code_iterator plusminus [plus minus])
(define_code_iterator plusminusmultdiv [plus minus mult div])
(define_code_iterator sat_plusminus [ss_plus us_plus ss_minus us_minus])
;; Base name for insn mnemonic.
(define_code_attr plusminus_mnemonic
[(plus "add") (ss_plus "adds") (us_plus "addus")
(minus "sub") (ss_minus "subs") (us_minus "subus")])
(define_code_iterator multdiv [mult div])
(define_code_attr multdiv_mnemonic
[(mult "mul") (div "div")])
;; Mark commutative operators as such in constraints.
(define_code_attr comm [(plus "%") (ss_plus "%") (us_plus "%")
(minus "") (ss_minus "") (us_minus "")
(mult "%") (div "")])
;; Mapping of max and min
(define_code_iterator maxmin [smax smin umax umin])
;; Mapping of signed max and min
(define_code_iterator smaxmin [smax smin])
;; Mapping of unsigned max and min
(define_code_iterator umaxmin [umax umin])
;; Base name for integer and FP insn mnemonic
(define_code_attr maxmin_int [(smax "maxs") (smin "mins")
(umax "maxu") (umin "minu")])
(define_code_attr maxmin_float [(smax "max") (smin "min")])
(define_int_iterator IEEE_MAXMIN
[UNSPEC_IEEE_MAX
UNSPEC_IEEE_MIN])
(define_int_attr ieee_maxmin
[(UNSPEC_IEEE_MAX "max")
(UNSPEC_IEEE_MIN "min")])
;; Mapping of logic operators
(define_code_iterator any_logic [and ior xor])
(define_code_iterator any_or [ior xor])
(define_code_iterator fpint_logic [and xor])
;; Base name for insn mnemonic.
(define_code_attr logic [(and "and") (ior "or") (xor "xor")])
;; Mapping of logic-shift operators
(define_code_iterator any_lshift [ashift lshiftrt])
;; Mapping of shift-right operators
(define_code_iterator any_shiftrt [lshiftrt ashiftrt])
;; Mapping of all shift operators
(define_code_iterator any_shift [ashift lshiftrt ashiftrt])
;; Base name for insn mnemonic.
(define_code_attr shift [(ashift "sll") (lshiftrt "shr") (ashiftrt "sar")])
(define_code_attr vshift [(ashift "sll") (lshiftrt "srl") (ashiftrt "sra")])
;; Mapping of rotate operators
(define_code_iterator any_rotate [rotate rotatert])
;; Base name for insn mnemonic.
(define_code_attr rotate [(rotate "rol") (rotatert "ror")])
;; Mapping of abs neg operators
(define_code_iterator absneg [abs neg])
;; Mapping of abs neg operators to logic operation
(define_code_attr absneg_op [(abs "and") (neg "xor")])
;; Base name for x87 insn mnemonic.
(define_code_attr absneg_mnemonic [(abs "fabs") (neg "fchs")])
;; Mapping of extend operators
(define_code_iterator any_extend [sign_extend zero_extend])
;; Mapping of highpart multiply operators
(define_code_iterator any_mul_highpart [smul_highpart umul_highpart])
;; Prefix for insn menmonic.
(define_code_attr sgnprefix [(sign_extend "i") (zero_extend "")
(smul_highpart "i") (umul_highpart "")
(div "i") (udiv "")])
;; Prefix for define_insn
(define_code_attr s [(sign_extend "s") (zero_extend "u")
(smul_highpart "s") (umul_highpart "u")])
(define_code_attr u [(sign_extend "") (zero_extend "u")
(div "") (udiv "u")])
(define_code_attr u_bool [(sign_extend "false") (zero_extend "true")
(div "false") (udiv "true")])
;; Used in signed and unsigned truncations.
(define_code_iterator any_truncate [ss_truncate truncate us_truncate])
;; Instruction suffix for truncations.
(define_code_attr trunsuffix
[(ss_truncate "s") (truncate "") (us_truncate "us")])
;; Instruction suffix for SSE sign and zero extensions.
(define_code_attr extsuffix [(sign_extend "sx") (zero_extend "zx")])
;; Used in signed and unsigned fix.
(define_code_iterator any_fix [fix unsigned_fix])
(define_code_attr fixsuffix [(fix "") (unsigned_fix "u")])
(define_code_attr fixunssuffix [(fix "") (unsigned_fix "uns")])
(define_code_attr fixprefix [(fix "s") (unsigned_fix "u")])
;; Used in signed and unsigned float.
(define_code_iterator any_float [float unsigned_float])
(define_code_attr floatsuffix [(float "") (unsigned_float "u")])
(define_code_attr floatunssuffix [(float "") (unsigned_float "uns")])
(define_code_attr floatprefix [(float "s") (unsigned_float "u")])
;; Base name for expression
(define_code_attr insn
[(plus "add") (ss_plus "ssadd") (us_plus "usadd")
(minus "sub") (ss_minus "sssub") (us_minus "ussub")
(sign_extend "extend") (zero_extend "zero_extend")
(ashift "ashl") (lshiftrt "lshr") (ashiftrt "ashr")
(rotate "rotl") (rotatert "rotr")
(mult "mul") (div "div")])
;; All integer modes.
(define_mode_iterator SWI1248x [QI HI SI DI])
;; All integer modes without QImode.
(define_mode_iterator SWI248x [HI SI DI])
;; All integer modes without QImode and HImode.
(define_mode_iterator SWI48x [SI DI])
;; All integer modes without SImode and DImode.
(define_mode_iterator SWI12 [QI HI])
;; All integer modes without DImode.
(define_mode_iterator SWI124 [QI HI SI])
;; All integer modes without QImode and DImode.
(define_mode_iterator SWI24 [HI SI])
;; Single word integer modes.
(define_mode_iterator SWI [QI HI SI (DI "TARGET_64BIT")])
;; Single word integer modes without QImode.
(define_mode_iterator SWI248 [HI SI (DI "TARGET_64BIT")])
;; Single word integer modes without QImode and HImode.
(define_mode_iterator SWI48 [SI (DI "TARGET_64BIT")])
;; All math-dependant single and double word integer modes.
(define_mode_iterator SDWIM [(QI "TARGET_QIMODE_MATH")
(HI "TARGET_HIMODE_MATH")
SI DI (TI "TARGET_64BIT")])
;; Math-dependant single word integer modes.
(define_mode_iterator SWIM [(QI "TARGET_QIMODE_MATH")
(HI "TARGET_HIMODE_MATH")
SI (DI "TARGET_64BIT")])
;; Math-dependant integer modes without DImode.
(define_mode_iterator SWIM124 [(QI "TARGET_QIMODE_MATH")
(HI "TARGET_HIMODE_MATH")
SI])
;; Math-dependant integer modes with DImode.
(define_mode_iterator SWIM1248x
[(QI "TARGET_QIMODE_MATH")
(HI "TARGET_HIMODE_MATH")
SI DI])
;; Math-dependant single word integer modes without QImode.
(define_mode_iterator SWIM248 [(HI "TARGET_HIMODE_MATH")
SI (DI "TARGET_64BIT")])
;; Double word integer modes.
(define_mode_iterator DWI [(DI "!TARGET_64BIT")
(TI "TARGET_64BIT")])
;; SWI and DWI together.
(define_mode_iterator SWIDWI [QI HI SI DI (TI "TARGET_64BIT")])
;; SWI48 and DWI together.
(define_mode_iterator SWI48DWI [SI DI (TI "TARGET_64BIT")])
;; GET_MODE_SIZE for selected modes. As GET_MODE_SIZE is not
;; compile time constant, it is faster to use than
;; GET_MODE_SIZE (mode). For XFmode which depends on
;; command line options just use GET_MODE_SIZE macro.
(define_mode_attr MODE_SIZE [(QI "1") (HI "2") (SI "4") (DI "8")
(TI "16") (HF "2") (SF "4") (DF "8")
(XF "GET_MODE_SIZE (XFmode)")
(V16QI "16") (V32QI "32") (V64QI "64")
(V8HI "16") (V16HI "32") (V32HI "64")
(V4SI "16") (V8SI "32") (V16SI "64")
(V2DI "16") (V4DI "32") (V8DI "64")
(V1TI "16") (V2TI "32") (V4TI "64")
(V2DF "16") (V4DF "32") (V8DF "64")
(V4SF "16") (V8SF "32") (V16SF "64")
(V8HF "16") (V16HF "32") (V32HF "64")
(V4HF "8") (V2HF "4")])
;; Double word integer modes as mode attribute.
(define_mode_attr DWI [(QI "HI") (HI "SI") (SI "DI") (DI "TI") (TI "OI")])
(define_mode_attr dwi [(QI "hi") (HI "si") (SI "di") (DI "ti") (TI "oi")])
;; LEA mode corresponding to an integer mode
(define_mode_attr LEAMODE [(QI "SI") (HI "SI") (SI "SI") (DI "DI")])
;; Half mode for double word integer modes.
(define_mode_iterator DWIH [(SI "!TARGET_64BIT")
(DI "TARGET_64BIT")])
;; Instruction suffix for integer modes.
(define_mode_attr imodesuffix [(QI "b") (HI "w") (SI "l") (DI "q")])
;; Instruction suffix for masks.
(define_mode_attr mskmodesuffix [(QI "b") (HI "w") (SI "d") (DI "q")])
;; Pointer size prefix for integer modes (Intel asm dialect)
(define_mode_attr iptrsize [(QI "BYTE")
(HI "WORD")
(SI "DWORD")
(DI "QWORD")])
;; Register class for integer modes.
(define_mode_attr r [(QI "q") (HI "r") (SI "r") (DI "r")])
;; Immediate operand constraint for integer modes.
(define_mode_attr i [(QI "n") (HI "n") (SI "e") (DI "e")])
;; General operand constraint for word modes.
(define_mode_attr g [(QI "qmn") (HI "rmn") (SI "rme") (DI "rme")])
;; Memory operand constraint for word modes.
(define_mode_attr m [(QI "m") (HI "m") (SI "BM") (DI "BM")])
;; Immediate operand constraint for double integer modes.
(define_mode_attr di [(SI "nF") (DI "Wd")])
;; Immediate operand constraint for shifts.
(define_mode_attr S [(QI "I") (HI "I") (SI "I") (DI "J") (TI "O")])
(define_mode_attr KS [(QI "Wb") (HI "Ww") (SI "I") (DI "J")])
;; Print register name in the specified mode.
(define_mode_attr k [(QI "b") (HI "w") (SI "k") (DI "q")])
;; General operand predicate for integer modes.
(define_mode_attr general_operand
[(QI "general_operand")
(HI "general_operand")
(SI "x86_64_general_operand")
(DI "x86_64_general_operand")
(TI "x86_64_general_operand")])
;; General operand predicate for integer modes, where for TImode
;; we need both words of the operand to be general operands.
(define_mode_attr general_hilo_operand
[(QI "general_operand")
(HI "general_operand")
(SI "x86_64_general_operand")
(DI "x86_64_general_operand")
(TI "x86_64_hilo_general_operand")])
;; General sign extend operand predicate for integer modes,
;; which disallows VOIDmode operands and thus it is suitable
;; for use inside sign_extend.
(define_mode_attr general_sext_operand
[(QI "sext_operand")
(HI "sext_operand")
(SI "x86_64_sext_operand")
(DI "x86_64_sext_operand")])
;; General sign/zero extend operand predicate for integer modes.
(define_mode_attr general_szext_operand
[(QI "general_operand")
(HI "general_operand")
(SI "x86_64_szext_general_operand")
(DI "x86_64_szext_general_operand")])
(define_mode_attr nonmemory_szext_operand
[(QI "nonmemory_operand")
(HI "nonmemory_operand")
(SI "x86_64_szext_nonmemory_operand")
(DI "x86_64_szext_nonmemory_operand")])
;; Immediate operand predicate for integer modes.
(define_mode_attr immediate_operand
[(QI "immediate_operand")
(HI "immediate_operand")
(SI "x86_64_immediate_operand")
(DI "x86_64_immediate_operand")])
;; Nonmemory operand predicate for integer modes.
(define_mode_attr nonmemory_operand
[(QI "nonmemory_operand")
(HI "nonmemory_operand")
(SI "x86_64_nonmemory_operand")
(DI "x86_64_nonmemory_operand")])
;; Operand predicate for shifts.
(define_mode_attr shift_operand
[(QI "nonimmediate_operand")
(HI "nonimmediate_operand")
(SI "nonimmediate_operand")
(DI "shiftdi_operand")
(TI "register_operand")])
;; Operand predicate for shift argument.
(define_mode_attr shift_immediate_operand
[(QI "const_1_to_31_operand")
(HI "const_1_to_31_operand")
(SI "const_1_to_31_operand")
(DI "const_1_to_63_operand")])
;; Input operand predicate for arithmetic left shifts.
(define_mode_attr ashl_input_operand
[(QI "nonimmediate_operand")
(HI "nonimmediate_operand")
(SI "nonimmediate_operand")
(DI "ashldi_input_operand")
(TI "reg_or_pm1_operand")])
;; SSE and x87 SFmode and DFmode floating point modes
(define_mode_iterator MODEF [SF DF])
;; SSE floating point modes
(define_mode_iterator MODEFH [(HF "TARGET_AVX512FP16") SF DF])
;; All x87 floating point modes
(define_mode_iterator X87MODEF [SF DF XF])
;; All x87 floating point modes plus HFmode
(define_mode_iterator X87MODEFH [HF SF DF XF])
;; All SSE floating point modes
(define_mode_iterator SSEMODEF [HF SF DF TF])
(define_mode_attr ssevecmodef [(HF "V8HF") (SF "V4SF") (DF "V2DF") (TF "TF")])
;; SSE instruction suffix for various modes
(define_mode_attr ssemodesuffix
[(HF "sh") (SF "ss") (DF "sd")
(V32HF "ph") (V16SF "ps") (V8DF "pd")
(V16HF "ph") (V8SF "ps") (V4DF "pd")
(V8HF "ph") (V4SF "ps") (V2DF "pd")
(V16QI "b") (V8HI "w") (V4SI "d") (V2DI "q")
(V32QI "b") (V16HI "w") (V8SI "d") (V4DI "q")
(V64QI "b") (V32HI "w") (V16SI "d") (V8DI "q")])
;; SSE vector suffix for floating point modes
(define_mode_attr ssevecmodesuffix [(SF "ps") (DF "pd")])
;; SSE vector mode corresponding to a scalar mode
(define_mode_attr ssevecmode
[(QI "V16QI") (HI "V8HI") (SI "V4SI") (DI "V2DI") (HF "V8HF") (SF "V4SF") (DF "V2DF")])
(define_mode_attr ssevecmodelower
[(QI "v16qi") (HI "v8hi") (SI "v4si") (DI "v2di") (SF "v4sf") (DF "v2df")])
;; AVX512F vector mode corresponding to a scalar mode
(define_mode_attr avx512fvecmode
[(QI "V64QI") (HI "V32HI") (SI "V16SI") (DI "V8DI") (SF "V16SF") (DF "V8DF")])
;; Instruction suffix for REX 64bit operators.
(define_mode_attr rex64suffix [(SI "{l}") (DI "{q}")])
(define_mode_attr rex64namesuffix [(SI "") (DI "q")])
;; This mode iterator allows :P to be used for patterns that operate on
;; pointer-sized quantities. Exactly one of the two alternatives will match.
(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")])
;; This mode iterator allows :W to be used for patterns that operate on
;; word_mode sized quantities.
(define_mode_iterator W
[(SI "word_mode == SImode") (DI "word_mode == DImode")])
;; This mode iterator allows :PTR to be used for patterns that operate on
;; ptr_mode sized quantities.
(define_mode_iterator PTR
[(SI "ptr_mode == SImode") (DI "ptr_mode == DImode")])
;; Scheduling descriptions
(include "pentium.md")
(include "ppro.md")
(include "k6.md")
(include "athlon.md")
(include "bdver1.md")
(include "bdver3.md")
(include "btver2.md")
(include "znver.md")
(include "znver4.md")
(include "geode.md")
(include "atom.md")
(include "slm.md")
(include "glm.md")
(include "core2.md")
(include "haswell.md")
;; Operand and operator predicates and constraints
(include "predicates.md")
(include "constraints.md")
;; Compare and branch/compare and store instructions.
(define_expand "cbranch4"
[(set (reg:CC FLAGS_REG)
(compare:CC (match_operand:SDWIM 1 "nonimmediate_operand")
(match_operand:SDWIM 2 "")))
(set (pc) (if_then_else
(match_operator 0 "ordered_comparison_operator"
[(reg:CC FLAGS_REG) (const_int 0)])
(label_ref (match_operand 3))
(pc)))]
""
{
if (MEM_P (operands[1]) && MEM_P (operands[2]))
operands[1] = force_reg (mode, operands[1]);
ix86_expand_branch (GET_CODE (operands[0]),
operands[1], operands[2], operands[3]);
DONE;
})
(define_expand "cstore4"
[(set (reg:CC FLAGS_REG)
(compare:CC (match_operand:SWIM 2 "nonimmediate_operand")
(match_operand:SWIM 3 "")))
(set (match_operand:QI 0 "register_operand")
(match_operator 1 "ordered_comparison_operator"
[(reg:CC FLAGS_REG) (const_int 0)]))]
""
{
if (MEM_P (operands[2]) && MEM_P (operands[3]))
operands[2] = force_reg (mode, operands[2]);
ix86_expand_setcc (operands[0], GET_CODE (operands[1]),
operands[2], operands[3]);
DONE;
})
(define_expand "@cmp_1"
[(set (reg:CC FLAGS_REG)
(compare:CC (match_operand:SWI48 0 "nonimmediate_operand")
(match_operand:SWI48 1 "")))])
(define_mode_iterator SWI1248_AVX512BWDQ_64
[(QI "TARGET_AVX512DQ") HI
(SI "TARGET_AVX512BW") (DI "TARGET_AVX512BW && TARGET_64BIT")])
(define_insn "*cmp_ccz_1"
[(set (reg FLAGS_REG)
(compare (match_operand:SWI1248_AVX512BWDQ_64 0
"nonimmediate_operand" ",?m,$k")
(match_operand:SWI1248_AVX512BWDQ_64 1 "const0_operand")))]
"TARGET_AVX512F && ix86_match_ccmode (insn, CCZmode)"
"@
test{}\t%0, %0
cmp{}\t{%1, %0|%0, %1}
kortest\t%0, %0"
[(set_attr "type" "test,icmp,msklog")
(set_attr "length_immediate" "0,1,*")
(set_attr "prefix" "*,*,vex")
(set_attr "mode" "")])
(define_insn "*cmp_ccno_1"
[(set (reg FLAGS_REG)
(compare (match_operand:SWI 0 "nonimmediate_operand" ",?m")
(match_operand:SWI 1 "const0_operand")))]
"ix86_match_ccmode (insn, CCNOmode)"
"@
test{}\t%0, %0
cmp{}\t{%1, %0|%0, %1}"
[(set_attr "type" "test,icmp")
(set_attr "length_immediate" "0,1")
(set_attr "mode" "")])
(define_insn "*cmp_1"
[(set (reg FLAGS_REG)
(compare (match_operand:SWI 0 "nonimmediate_operand" "m,")
(match_operand:SWI 1 "" ",")))]
"ix86_match_ccmode (insn, CCmode)"
"cmp{}\t{%1, %0|%0, %1}"
[(set_attr "type" "icmp")
(set_attr "mode" "")])
(define_insn "*cmp_minus_1"
[(set (reg FLAGS_REG)
(compare
(minus:SWI (match_operand:SWI 0 "nonimmediate_operand" "m,")
(match_operand:SWI 1 "" ","))
(const_int 0)))]
"ix86_match_ccmode (insn, CCGOCmode)"
"cmp{}\t{%1, %0|%0, %1}"
[(set_attr "type" "icmp")
(set_attr "mode" "")])
(define_insn "*cmpqi_ext_1"
[(set (reg FLAGS_REG)
(compare
(match_operand:QI 0 "nonimmediate_operand" "QBc,m")
(subreg:QI
(zero_extract:SWI248
(match_operand:SWI248 1 "register_operand" "Q,Q")
(const_int 8)
(const_int 8)) 0)))]
"ix86_match_ccmode (insn, CCmode)"
"cmp{b}\t{%h1, %0|%0, %h1}"
[(set_attr "isa" "*,nox64")
(set_attr "type" "icmp")
(set_attr "mode" "QI")])
(define_insn "*cmpqi_ext_2"
[(set (reg FLAGS_REG)
(compare
(subreg:QI
(zero_extract:SWI248
(match_operand:SWI248 0 "register_operand" "Q")
(const_int 8)
(const_int 8)) 0)
(match_operand:QI 1 "const0_operand")))]
"ix86_match_ccmode (insn, CCNOmode)"
"test{b}\t%h0, %h0"
[(set_attr "type" "test")
(set_attr "length_immediate" "0")
(set_attr "mode" "QI")])
(define_expand "cmpqi_ext_3"
[(set (reg:CC FLAGS_REG)
(compare:CC
(subreg:QI
(zero_extract:HI
(match_operand:HI 0 "register_operand")
(const_int 8)
(const_int 8)) 0)
(match_operand:QI 1 "const_int_operand")))])
(define_insn "*cmpqi_ext_3"
[(set (reg FLAGS_REG)
(compare
(subreg:QI
(zero_extract:SWI248
(match_operand:SWI248 0 "register_operand" "Q,Q")
(const_int 8)
(const_int 8)) 0)
(match_operand:QI 1 "general_operand" "QnBc,m")))]
"ix86_match_ccmode (insn, CCmode)"
"cmp{b}\t{%1, %h0|%h0, %1}"
[(set_attr "isa" "*,nox64")
(set_attr "type" "icmp")
(set_attr "mode" "QI")])
(define_insn "*cmpqi_ext_4"
[(set (reg FLAGS_REG)
(compare
(subreg:QI
(zero_extract:SWI248
(match_operand:SWI248 0 "register_operand" "Q")
(const_int 8)
(const_int 8)) 0)
(subreg:QI
(zero_extract:SWI248
(match_operand:SWI248 1 "register_operand" "Q")
(const_int 8)
(const_int 8)) 0)))]
"ix86_match_ccmode (insn, CCmode)"
"cmp{b}\t{%h1, %h0|%h0, %h1}"
[(set_attr "type" "icmp")
(set_attr "mode" "QI")])
;; These implement float point compares.
;; %%% See if we can get away with VOIDmode operands on the actual insns,
;; which would allow mix and match FP modes on the compares. Which is what
;; the old patterns did, but with many more of them.
(define_expand "cbranchxf4"
[(set (reg:CC FLAGS_REG)
(compare:CC (match_operand:XF 1 "nonmemory_operand")
(match_operand:XF 2 "nonmemory_operand")))
(set (pc) (if_then_else
(match_operator 0 "ix86_fp_comparison_operator"
[(reg:CC FLAGS_REG)
(const_int 0)])
(label_ref (match_operand 3))
(pc)))]
"TARGET_80387"
{
ix86_expand_branch (GET_CODE (operands[0]),
operands[1], operands[2], operands[3]);
DONE;
})
(define_expand "cstorexf4"
[(set (reg:CC FLAGS_REG)
(compare:CC (match_operand:XF 2 "nonmemory_operand")
(match_operand:XF 3 "nonmemory_operand")))
(set (match_operand:QI 0 "register_operand")
(match_operator 1 "ix86_fp_comparison_operator"
[(reg:CC FLAGS_REG)
(const_int 0)]))]
"TARGET_80387"
{
ix86_expand_setcc (operands[0], GET_CODE (operands[1]),
operands[2], operands[3]);
DONE;
})
(define_expand "cbranchhf4"
[(set (reg:CC FLAGS_REG)
(compare:CC (match_operand:HF 1 "cmp_fp_expander_operand")
(match_operand:HF 2 "cmp_fp_expander_operand")))
(set (pc) (if_then_else
(match_operator 0 "ix86_fp_comparison_operator"
[(reg:CC FLAGS_REG)
(const_int 0)])
(label_ref (match_operand 3))
(pc)))]
"TARGET_AVX512FP16"
{
ix86_expand_branch (GET_CODE (operands[0]),
operands[1], operands[2], operands[3]);
DONE;
})
(define_expand "cbranch4"
[(set (reg:CC FLAGS_REG)
(compare:CC (match_operand:MODEF 1 "cmp_fp_expander_operand")
(match_operand:MODEF 2 "cmp_fp_expander_operand")))
(set (pc) (if_then_else
(match_operator 0 "ix86_fp_comparison_operator"
[(reg:CC FLAGS_REG)
(const_int 0)])
(label_ref (match_operand 3))
(pc)))]
"TARGET_80387 || (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)"
{
ix86_expand_branch (GET_CODE (operands[0]),
operands[1], operands[2], operands[3]);
DONE;
})
(define_expand "cstorehf4"
[(set (reg:CC FLAGS_REG)
(compare:CC (match_operand:HF 2 "cmp_fp_expander_operand")
(match_operand:HF 3 "cmp_fp_expander_operand")))
(set (match_operand:QI 0 "register_operand")
(match_operator 1 "ix86_fp_comparison_operator"
[(reg:CC FLAGS_REG)
(const_int 0)]))]
"TARGET_AVX512FP16"
{
ix86_expand_setcc (operands[0], GET_CODE (operands[1]),
operands[2], operands[3]);
DONE;
})
(define_expand "cstore4"
[(set (reg:CC FLAGS_REG)
(compare:CC (match_operand:MODEF 2 "cmp_fp_expander_operand")
(match_operand:MODEF 3 "cmp_fp_expander_operand")))
(set (match_operand:QI 0 "register_operand")
(match_operator 1 "ix86_fp_comparison_operator"
[(reg:CC FLAGS_REG)
(const_int 0)]))]
"TARGET_80387 || (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)"
{
ix86_expand_setcc (operands[0], GET_CODE (operands[1]),
operands[2], operands[3]);
DONE;
})
(define_expand "cbranchcc4"
[(set (pc) (if_then_else
(match_operator 0 "comparison_operator"
[(match_operand 1 "flags_reg_operand")
(match_operand 2 "const0_operand")])
(label_ref (match_operand 3))
(pc)))]
""
{
ix86_expand_branch (GET_CODE (operands[0]),
operands[1], operands[2], operands[3]);
DONE;
})
(define_expand "cstorecc4"
[(set (match_operand:QI 0 "register_operand")
(match_operator 1 "comparison_operator"
[(match_operand 2 "flags_reg_operand")
(match_operand 3 "const0_operand")]))]
""
{
ix86_expand_setcc (operands[0], GET_CODE (operands[1]),
operands[2], operands[3]);
DONE;
})
;; FP compares, step 1:
;; Set the FP condition codes and move fpsr to ax.
;; We may not use "#" to split and emit these
;; due to reg-stack pops killing fpsr.
(define_insn "*cmpxf_i387"
[(set (match_operand:HI 0 "register_operand" "=a")
(unspec:HI
[(compare:CCFP
(match_operand:XF 1 "register_operand" "f")
(match_operand:XF 2 "reg_or_0_operand" "fC"))]
UNSPEC_FNSTSW))]
"TARGET_80387"
"* return output_fp_compare (insn, operands, false, false);"
[(set_attr "type" "multi")
(set_attr "unit" "i387")
(set_attr "mode" "XF")])
(define_insn "*cmp_i387"
[(set (match_operand:HI 0 "register_operand" "=a")
(unspec:HI
[(compare:CCFP
(match_operand:MODEF 1 "register_operand" "f")
(match_operand:MODEF 2 "nonimm_or_0_operand" "fmC"))]
UNSPEC_FNSTSW))]
"TARGET_80387"
"* return output_fp_compare (insn, operands, false, false);"
[(set_attr "type" "multi")
(set_attr "unit" "i387")
(set_attr "mode" "")])
(define_insn "*cmp__i387"
[(set (match_operand:HI 0 "register_operand" "=a")
(unspec:HI
[(compare:CCFP
(match_operand:X87MODEF 1 "register_operand" "f")
(float:X87MODEF
(match_operand:SWI24 2 "nonimmediate_operand" "m")))]
UNSPEC_FNSTSW))]
"TARGET_80387
&& (TARGET_USE_MODE_FIOP
|| optimize_function_for_size_p (cfun))"
"* return output_fp_compare (insn, operands, false, false);"
[(set_attr "type" "multi")
(set_attr "unit" "i387")
(set_attr "fp_int_src" "true")
(set_attr "mode" "")])
(define_insn "*cmpu_i387"
[(set (match_operand:HI 0 "register_operand" "=a")
(unspec:HI
[(unspec:CCFP
[(compare:CCFP
(match_operand:X87MODEF 1 "register_operand" "f")
(match_operand:X87MODEF 2 "register_operand" "f"))]
UNSPEC_NOTRAP)]
UNSPEC_FNSTSW))]
"TARGET_80387"
"* return output_fp_compare (insn, operands, false, true);"
[(set_attr "type" "multi")
(set_attr "unit" "i387")
(set_attr "mode" "")])
;; FP compares, step 2:
;; Get ax into flags, general case.
(define_insn "x86_sahf_1"
[(set (reg:CC FLAGS_REG)
(unspec:CC [(match_operand:HI 0 "register_operand" "a")]
UNSPEC_SAHF))]
"TARGET_SAHF"
{
#ifndef HAVE_AS_IX86_SAHF
if (TARGET_64BIT)
return ASM_BYTE "0x9e";
else
#endif
return "sahf";
}
[(set_attr "length" "1")
(set_attr "athlon_decode" "vector")
(set_attr "amdfam10_decode" "direct")
(set_attr "bdver1_decode" "direct")
(set_attr "mode" "SI")])
;; Pentium Pro can do both steps in one go.
;; (these instructions set flags directly)
(define_subst_attr "unord" "unord_subst" "" "u")
(define_subst_attr "unordered" "unord_subst" "false" "true")
(define_subst "unord_subst"
[(set (match_operand:CCFP 0)
(match_operand:CCFP 1))]
""
[(set (match_dup 0)
(unspec:CCFP
[(match_dup 1)]
UNSPEC_NOTRAP))])
(define_insn "*cmpixf_i387"
[(set (reg:CCFP FLAGS_REG)
(compare:CCFP
(match_operand:XF 0 "register_operand" "f")
(match_operand:XF 1 "register_operand" "f")))]
"TARGET_80387 && TARGET_CMOVE"
"* return output_fp_compare (insn, operands, true, );"
[(set_attr "type" "fcmp")
(set_attr "mode" "XF")
(set_attr "athlon_decode" "vector")
(set_attr "amdfam10_decode" "direct")
(set_attr "bdver1_decode" "double")
(set_attr "znver1_decode" "double")])
(define_insn "*cmpi"
[(set (reg:CCFP FLAGS_REG)
(compare:CCFP
(match_operand:MODEF 0 "register_operand" "f,v")
(match_operand:MODEF 1 "register_ssemem_operand" "f,vm")))]
"(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
|| (TARGET_80387 && TARGET_CMOVE)"
"@
* return output_fp_compare (insn, operands, true, );
%vcomi\t{%1, %0|%0, %1}"
[(set_attr "type" "fcmp,ssecomi")
(set_attr "prefix" "orig,maybe_vex")
(set_attr "mode" "")
(set_attr "prefix_rep" "*,0")
(set (attr "prefix_data16")
(cond [(eq_attr "alternative" "0")
(const_string "*")
(eq_attr "mode" "DF")
(const_string "1")
]
(const_string "0")))
(set_attr "athlon_decode" "vector")
(set_attr "amdfam10_decode" "direct")
(set_attr "bdver1_decode" "double")
(set_attr "znver1_decode" "double")
(set (attr "enabled")
(if_then_else
(match_test ("SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH"))
(if_then_else
(eq_attr "alternative" "0")
(symbol_ref "TARGET_MIX_SSE_I387")
(symbol_ref "true"))
(if_then_else
(eq_attr "alternative" "0")
(symbol_ref "true")
(symbol_ref "false"))))])
(define_insn "*cmpihf"
[(set (reg:CCFP FLAGS_REG)
(compare:CCFP
(match_operand:HF 0 "register_operand" "v")
(match_operand:HF 1 "nonimmediate_operand" "vm")))]
"TARGET_AVX512FP16"
"vcomish\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecomi")
(set_attr "prefix" "evex")
(set_attr "mode" "HF")])
;; Push/pop instructions.
(define_insn_and_split "*pushv1ti2"
[(set (match_operand:V1TI 0 "push_operand" "=<")
(match_operand:V1TI 1 "register_operand" "v"))]
"TARGET_64BIT && TARGET_STV"
"#"
"&& reload_completed"
[(set (reg:P SP_REG) (plus:P (reg:P SP_REG) (match_dup 2)))
(set (match_dup 0) (match_dup 1))]
{
operands[2] = GEN_INT (-PUSH_ROUNDING (GET_MODE_SIZE (V1TImode)));
/* Preserve memory attributes. */
operands[0] = replace_equiv_address (operands[0], stack_pointer_rtx);
}
[(set_attr "type" "multi")
(set_attr "mode" "TI")])
(define_insn "*push2"
[(set (match_operand:DWI 0 "push_operand" "=<,<")
(match_operand:DWI 1 "general_no_elim_operand" "riF*o,*v"))]
""
"#"
[(set_attr "type" "multi")
(set_attr "mode" "")])
(define_split
[(set (match_operand:DWI 0 "push_operand")
(match_operand:DWI 1 "general_gr_operand"))]
"reload_completed"
[(const_int 0)]
"ix86_split_long_move (operands); DONE;")
(define_insn "*pushdi2_rex64"
[(set (match_operand:DI 0 "push_operand" "=<,<,!<")
(match_operand:DI 1 "general_no_elim_operand" "re*m,*v,n"))]
"TARGET_64BIT"
"@
push{q}\t%1
#
#"
[(set_attr "type" "push,multi,multi")
(set_attr "mode" "DI")])
;; Convert impossible pushes of immediate to existing instructions.
;; First try to get scratch register and go through it. In case this
;; fails, push sign extended lower part first and then overwrite
;; upper part by 32bit move.
(define_peephole2
[(match_scratch:DI 2 "r")
(set (match_operand:DI 0 "push_operand")
(match_operand:DI 1 "immediate_operand"))]
"TARGET_64BIT
&& !symbolic_operand (operands[1], DImode)
&& !x86_64_immediate_operand (operands[1], DImode)"
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0) (match_dup 2))])
(define_split
[(set (match_operand:DI 0 "push_operand")
(match_operand:DI 1 "immediate_operand"))]
"TARGET_64BIT && epilogue_completed
&& !symbolic_operand (operands[1], DImode)
&& !x86_64_immediate_operand (operands[1], DImode)"
[(set (match_dup 0) (match_dup 1))
(set (match_dup 2) (match_dup 3))]
{
split_double_mode (DImode, &operands[1], 1, &operands[2], &operands[3]);
operands[1] = gen_lowpart (DImode, operands[2]);
operands[2] = gen_rtx_MEM (SImode,
plus_constant (Pmode, stack_pointer_rtx, 4));
})
;; For TARGET_64BIT we always round up to 8 bytes.
(define_insn "*pushsi2_rex64"
[(set (match_operand:SI 0 "push_operand" "=X,X")
(match_operand:SI 1 "nonmemory_no_elim_operand" "re,*v"))]
"TARGET_64BIT"
"@
push{q}\t%q1
#"
[(set_attr "type" "push,multi")
(set_attr "mode" "DI")])
(define_insn "*pushsi2"
[(set (match_operand:SI 0 "push_operand" "=<,<")
(match_operand:SI 1 "general_no_elim_operand" "ri*m,*v"))]
"!TARGET_64BIT"
"@
push{l}\t%1
#"
[(set_attr "type" "push,multi")
(set_attr "mode" "SI")])
(define_split
[(set (match_operand:SWI48DWI 0 "push_operand")
(match_operand:SWI48DWI 1 "sse_reg_operand"))]
"TARGET_SSE && reload_completed"
[(set (reg:P SP_REG) (plus:P (reg:P SP_REG) (match_dup 2)))
(set (match_dup 0) (match_dup 1))]
{
operands[2] = GEN_INT (-PUSH_ROUNDING (GET_MODE_SIZE (mode)));
/* Preserve memory attributes. */
operands[0] = replace_equiv_address (operands[0], stack_pointer_rtx);
})
;; emit_push_insn when it calls move_by_pieces requires an insn to
;; "push a byte/word". But actually we use push{l,q}, which has
;; the effect of rounding the amount pushed up to a word.
(define_insn "*push2"
[(set (match_operand:SWI12 0 "push_operand" "=X")
(match_operand:SWI12 1 "nonmemory_no_elim_operand" "rn"))]
""
"* return TARGET_64BIT ? \"push{q}\t%q1\" : \"push{l}\t%k1\";"
[(set_attr "type" "push")
(set (attr "mode")
(if_then_else (match_test "TARGET_64BIT")
(const_string "DI")
(const_string "SI")))])
(define_insn "*push2_prologue"
[(set (match_operand:W 0 "push_operand" "=<")
(match_operand:W 1 "general_no_elim_operand" "r*m"))
(clobber (mem:BLK (scratch)))]
""
"push{}\t%1"
[(set_attr "type" "push")
(set_attr "mode" "")])
(define_insn "*pop1"
[(set (match_operand:W 0 "nonimmediate_operand" "=r*m")
(match_operand:W 1 "pop_operand" ">"))]
""
"pop{}\t%0"
[(set_attr "type" "pop")
(set_attr "mode" "")])
(define_insn "*pop1_epilogue"
[(set (match_operand:W 0 "nonimmediate_operand" "=r*m")
(match_operand:W 1 "pop_operand" ">"))
(clobber (mem:BLK (scratch)))]
""
"pop{}\t%0"
[(set_attr "type" "pop")
(set_attr "mode" "")])
(define_insn "*pushfl2"
[(set (match_operand:W 0 "push_operand" "=<")
(match_operand:W 1 "flags_reg_operand"))]
""
"pushf{}"
[(set_attr "type" "push")
(set_attr "mode" "")])
(define_insn "*popfl1"
[(set (match_operand:W 0 "flags_reg_operand")
(match_operand:W 1 "pop_operand" ">"))]
""
"popf{}"
[(set_attr "type" "pop")
(set_attr "mode" "")])
;; Reload patterns to support multi-word load/store
;; with non-offsetable address.
(define_expand "reload_noff_store"
[(parallel [(match_operand 0 "memory_operand" "=m")
(match_operand 1 "register_operand" "r")
(match_operand:DI 2 "register_operand" "=&r")])]
"TARGET_64BIT"
{
rtx mem = operands[0];
rtx addr = XEXP (mem, 0);
emit_move_insn (operands[2], addr);
mem = replace_equiv_address_nv (mem, operands[2]);
emit_insn (gen_rtx_SET (mem, operands[1]));
DONE;
})
(define_expand "reload_noff_load"
[(parallel [(match_operand 0 "register_operand" "=r")
(match_operand 1 "memory_operand" "m")
(match_operand:DI 2 "register_operand" "=r")])]
"TARGET_64BIT"
{
rtx mem = operands[1];
rtx addr = XEXP (mem, 0);
emit_move_insn (operands[2], addr);
mem = replace_equiv_address_nv (mem, operands[2]);
emit_insn (gen_rtx_SET (operands[0], mem));
DONE;
})
;; Move instructions.
(define_expand "movxi"
[(set (match_operand:XI 0 "nonimmediate_operand")
(match_operand:XI 1 "general_operand"))]
"TARGET_AVX512F"
"ix86_expand_vector_move (XImode, operands); DONE;")
(define_expand "movoi"
[(set (match_operand:OI 0 "nonimmediate_operand")
(match_operand:OI 1 "general_operand"))]
"TARGET_AVX"
"ix86_expand_vector_move (OImode, operands); DONE;")
(define_expand "movti"
[(set (match_operand:TI 0 "nonimmediate_operand")
(match_operand:TI 1 "general_operand"))]
"TARGET_64BIT || TARGET_SSE"
{
if (TARGET_64BIT)
ix86_expand_move (TImode, operands);
else
ix86_expand_vector_move (TImode, operands);
DONE;
})
;; This expands to what emit_move_complex would generate if we didn't
;; have a movti pattern. Having this avoids problems with reload on
;; 32-bit targets when SSE is present, but doesn't seem to be harmful
;; to have around all the time.
(define_expand "movcdi"
[(set (match_operand:CDI 0 "nonimmediate_operand")
(match_operand:CDI 1 "general_operand"))]
""
{
if (push_operand (operands[0], CDImode))
emit_move_complex_push (CDImode, operands[0], operands[1]);
else
emit_move_complex_parts (operands[0], operands[1]);
DONE;
})
(define_expand "mov"
[(set (match_operand:SWI1248x 0 "nonimmediate_operand")
(match_operand:SWI1248x 1 "general_operand"))]
""
"ix86_expand_move (mode, operands); DONE;")
(define_insn "*mov_xor"
[(set (match_operand:SWI48 0 "register_operand" "=r")
(match_operand:SWI48 1 "const0_operand"))
(clobber (reg:CC FLAGS_REG))]
"reload_completed"
"xor{l}\t%k0, %k0"
[(set_attr "type" "alu1")
(set_attr "mode" "SI")
(set_attr "length_immediate" "0")])
(define_insn "*mov_and"
[(set (match_operand:SWI248 0 "memory_operand" "=m")
(match_operand:SWI248 1 "const0_operand"))
(clobber (reg:CC FLAGS_REG))]
"reload_completed"
"and{}\t{%1, %0|%0, %1}"
[(set_attr "type" "alu1")
(set_attr "mode" "")
(set_attr "length_immediate" "1")])
(define_insn "*mov_or"
[(set (match_operand:SWI248 0 "nonimmediate_operand" "=rm")
(match_operand:SWI248 1 "constm1_operand"))
(clobber (reg:CC FLAGS_REG))]
"reload_completed"
"or{}\t{%1, %0|%0, %1}"
[(set_attr "type" "alu1")
(set_attr "mode" "")
(set_attr "length_immediate" "1")])
(define_insn "*movxi_internal_avx512f"
[(set (match_operand:XI 0 "nonimmediate_operand" "=v,v ,v ,m")
(match_operand:XI 1 "nonimmediate_or_sse_const_operand" " C,BC,vm,v"))]
"TARGET_AVX512F
&& (register_operand (operands[0], XImode)
|| register_operand (operands[1], XImode))"
{
switch (get_attr_type (insn))
{
case TYPE_SSELOG1:
return standard_sse_constant_opcode (insn, operands);
case TYPE_SSEMOV:
return ix86_output_ssemov (insn, operands);
default:
gcc_unreachable ();
}
}
[(set_attr "type" "sselog1,sselog1,ssemov,ssemov")
(set_attr "prefix" "evex")
(set_attr "mode" "XI")])
(define_insn "*movoi_internal_avx"
[(set (match_operand:OI 0 "nonimmediate_operand" "=v,v ,v ,m")
(match_operand:OI 1 "nonimmediate_or_sse_const_operand" " C,BC,vm,v"))]
"TARGET_AVX
&& (register_operand (operands[0], OImode)
|| register_operand (operands[1], OImode))"
{
switch (get_attr_type (insn))
{
case TYPE_SSELOG1:
return standard_sse_constant_opcode (insn, operands);
case TYPE_SSEMOV:
return ix86_output_ssemov (insn, operands);
default:
gcc_unreachable ();
}
}
[(set_attr "isa" "*,avx2,*,*")
(set_attr "type" "sselog1,sselog1,ssemov,ssemov")
(set_attr "prefix" "vex")
(set_attr "mode" "OI")])
(define_insn "*movti_internal"
[(set (match_operand:TI 0 "nonimmediate_operand" "=!r ,o ,v,v ,v ,m,?r,?Yd")
(match_operand:TI 1 "general_operand" "riFo,re,C,BC,vm,v,Yd,r"))]
"(TARGET_64BIT
&& !(MEM_P (operands[0]) && MEM_P (operands[1])))
|| (TARGET_SSE
&& nonimmediate_or_sse_const_operand (operands[1], TImode)
&& (register_operand (operands[0], TImode)
|| register_operand (operands[1], TImode)))"
{
switch (get_attr_type (insn))
{
case TYPE_MULTI:
return "#";
case TYPE_SSELOG1:
return standard_sse_constant_opcode (insn, operands);
case TYPE_SSEMOV:
return ix86_output_ssemov (insn, operands);
default:
gcc_unreachable ();
}
}
[(set (attr "isa")
(cond [(eq_attr "alternative" "0,1,6,7")
(const_string "x64")
(eq_attr "alternative" "3")
(const_string "sse2")
]
(const_string "*")))
(set (attr "type")
(cond [(eq_attr "alternative" "0,1,6,7")
(const_string "multi")
(eq_attr "alternative" "2,3")
(const_string "sselog1")
]
(const_string "ssemov")))
(set (attr "prefix")
(if_then_else (eq_attr "type" "sselog1,ssemov")
(const_string "maybe_vex")
(const_string "orig")))
(set (attr "mode")
(cond [(eq_attr "alternative" "0,1")
(const_string "DI")
(match_test "TARGET_AVX")
(const_string "TI")
(ior (not (match_test "TARGET_SSE2"))
(match_test "optimize_function_for_size_p (cfun)"))
(const_string "V4SF")
(and (eq_attr "alternative" "5")
(match_test "TARGET_SSE_TYPELESS_STORES"))
(const_string "V4SF")
]
(const_string "TI")))
(set (attr "preferred_for_speed")
(cond [(eq_attr "alternative" "6")
(symbol_ref "TARGET_INTER_UNIT_MOVES_FROM_VEC")
(eq_attr "alternative" "7")
(symbol_ref "TARGET_INTER_UNIT_MOVES_TO_VEC")
]
(symbol_ref "true")))])
(define_split
[(set (match_operand:TI 0 "sse_reg_operand")
(match_operand:TI 1 "general_reg_operand"))]
"TARGET_64BIT && TARGET_SSE4_1
&& reload_completed"
[(set (match_dup 2)
(vec_merge:V2DI
(vec_duplicate:V2DI (match_dup 3))
(match_dup 2)
(const_int 2)))]
{
operands[2] = lowpart_subreg (V2DImode, operands[0], TImode);
operands[3] = gen_highpart (DImode, operands[1]);
emit_move_insn (gen_lowpart (DImode, operands[0]),
gen_lowpart (DImode, operands[1]));
})
(define_insn "*movdi_internal"
[(set (match_operand:DI 0 "nonimmediate_operand"
"=r ,o ,r,r ,r,m ,*y,*y,?*y,?m,?r,?*y,*v,*v,*v,m ,m,?r ,?*Yd,?r,?*v,?*y,?*x,*k,*k ,*r,*m,*k")
(match_operand:DI 1 "general_operand"
"riFo,riF,Z,rem,i,re,C ,*y,Bk ,*y,*y,r ,C ,*v,Bk,*v,v,*Yd,r ,*v,r ,*x ,*y ,*r,*kBk,*k,*k,CBC"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& ix86_hardreg_mov_ok (operands[0], operands[1])"
{
switch (get_attr_type (insn))
{
case TYPE_MSKMOV:
return "kmovq\t{%1, %0|%0, %1}";
case TYPE_MSKLOG:
if (operands[1] == const0_rtx)
return "kxorq\t%0, %0, %0";
else if (operands[1] == constm1_rtx)
return "kxnorq\t%0, %0, %0";
gcc_unreachable ();
case TYPE_MULTI:
return "#";
case TYPE_MMX:
return "pxor\t%0, %0";
case TYPE_MMXMOV:
/* Handle broken assemblers that require movd instead of movq. */
if (!HAVE_AS_IX86_INTERUNIT_MOVQ
&& (GENERAL_REG_P (operands[0]) || GENERAL_REG_P (operands[1])))
return "movd\t{%1, %0|%0, %1}";
return "movq\t{%1, %0|%0, %1}";
case TYPE_SSELOG1:
return standard_sse_constant_opcode (insn, operands);
case TYPE_SSEMOV:
return ix86_output_ssemov (insn, operands);
case TYPE_SSECVT:
if (SSE_REG_P (operands[0]))
return "movq2dq\t{%1, %0|%0, %1}";
else
return "movdq2q\t{%1, %0|%0, %1}";
case TYPE_LEA:
return "lea{q}\t{%E1, %0|%0, %E1}";
case TYPE_IMOV:
gcc_assert (!flag_pic || LEGITIMATE_PIC_OPERAND_P (operands[1]));
if (get_attr_mode (insn) == MODE_SI)
return "mov{l}\t{%k1, %k0|%k0, %k1}";
else if (which_alternative == 4)
return "movabs{q}\t{%1, %0|%0, %1}";
else if (ix86_use_lea_for_mov (insn, operands))
return "lea{q}\t{%E1, %0|%0, %E1}";
else
return "mov{q}\t{%1, %0|%0, %1}";
default:
gcc_unreachable ();
}
}
[(set (attr "isa")
(cond [(eq_attr "alternative" "0,1,17,18")
(const_string "nox64")
(eq_attr "alternative" "2,3,4,5,10,11,23,25")
(const_string "x64")
(eq_attr "alternative" "19,20")
(const_string "x64_sse2")
(eq_attr "alternative" "21,22")
(const_string "sse2")
]
(const_string "*")))
(set (attr "type")
(cond [(eq_attr "alternative" "0,1,17,18")
(const_string "multi")
(eq_attr "alternative" "6")
(const_string "mmx")
(eq_attr "alternative" "7,8,9,10,11")
(const_string "mmxmov")
(eq_attr "alternative" "12")
(const_string "sselog1")
(eq_attr "alternative" "13,14,15,16,19,20")
(const_string "ssemov")
(eq_attr "alternative" "21,22")
(const_string "ssecvt")
(eq_attr "alternative" "23,24,25,26")
(const_string "mskmov")
(eq_attr "alternative" "27")
(const_string "msklog")
(and (match_operand 0 "register_operand")
(match_operand 1 "pic_32bit_operand"))
(const_string "lea")
]
(const_string "imov")))
(set (attr "modrm")
(if_then_else
(and (eq_attr "alternative" "4") (eq_attr "type" "imov"))
(const_string "0")
(const_string "*")))
(set (attr "length_immediate")
(if_then_else
(and (eq_attr "alternative" "4") (eq_attr "type" "imov"))
(const_string "8")
(const_string "*")))
(set (attr "prefix_rex")
(if_then_else
(eq_attr "alternative" "10,11,19,20")
(const_string "1")
(const_string "*")))
(set (attr "prefix")
(if_then_else (eq_attr "type" "sselog1,ssemov")
(const_string "maybe_vex")
(const_string "orig")))
(set (attr "prefix_data16")
(if_then_else (and (eq_attr "type" "ssemov") (eq_attr "mode" "DI"))
(const_string "1")
(const_string "*")))
(set (attr "mode")
(cond [(eq_attr "alternative" "2")
(const_string "SI")
(eq_attr "alternative" "12,13")
(cond [(match_test "TARGET_AVX")
(const_string "TI")
(ior (not (match_test "TARGET_SSE2"))
(match_test "optimize_function_for_size_p (cfun)"))
(const_string "V4SF")
]
(const_string "TI"))
(and (eq_attr "alternative" "14,15,16")
(not (match_test "TARGET_SSE2")))
(const_string "V2SF")
]
(const_string "DI")))
(set (attr "preferred_for_speed")
(cond [(eq_attr "alternative" "10,17,19")
(symbol_ref "TARGET_INTER_UNIT_MOVES_FROM_VEC")
(eq_attr "alternative" "11,18,20")
(symbol_ref "TARGET_INTER_UNIT_MOVES_TO_VEC")
]
(symbol_ref "true")))
(set (attr "enabled")
(cond [(eq_attr "alternative" "15")
(if_then_else
(match_test "TARGET_STV && TARGET_SSE2")
(symbol_ref "false")
(const_string "*"))
(eq_attr "alternative" "16")
(if_then_else
(match_test "TARGET_STV && TARGET_SSE2")
(symbol_ref "true")
(symbol_ref "false"))
]
(const_string "*")))])
(define_split
[(set (match_operand: 0 "general_reg_operand")
(match_operand: 1 "sse_reg_operand"))]
"TARGET_SSE4_1
&& reload_completed"
[(set (match_dup 2)
(vec_select:DWIH
(match_dup 3)
(parallel [(const_int 1)])))]
{
operands[2] = gen_highpart (mode, operands[0]);
operands[3] = lowpart_subreg (mode, operands[1], mode);
emit_move_insn (gen_lowpart (mode, operands[0]),
gen_lowpart (mode, operands[1]));
})
(define_split
[(set (match_operand:DWI 0 "nonimmediate_gr_operand")
(match_operand:DWI 1 "general_gr_operand"))]
"reload_completed"
[(const_int 0)]
"ix86_split_long_move (operands); DONE;")
(define_split
[(set (match_operand:DI 0 "sse_reg_operand")
(match_operand:DI 1 "general_reg_operand"))]
"!TARGET_64BIT && TARGET_SSE4_1
&& reload_completed"
[(set (match_dup 2)
(vec_merge:V4SI
(vec_duplicate:V4SI (match_dup 3))
(match_dup 2)
(const_int 2)))]
{
operands[2] = lowpart_subreg (V4SImode, operands[0], DImode);
operands[3] = gen_highpart (SImode, operands[1]);
emit_move_insn (gen_lowpart (SImode, operands[0]),
gen_lowpart (SImode, operands[1]));
})
;; movabsq $0x0012345678000000, %rax is longer
;; than movl $0x12345678, %eax; shlq $24, %rax.
(define_peephole2
[(set (match_operand:DI 0 "register_operand")
(match_operand:DI 1 "const_int_operand"))]
"TARGET_64BIT
&& optimize_insn_for_size_p ()
&& LEGACY_INT_REG_P (operands[0])
&& !x86_64_immediate_operand (operands[1], DImode)
&& !x86_64_zext_immediate_operand (operands[1], DImode)
&& !((UINTVAL (operands[1]) >> ctz_hwi (UINTVAL (operands[1])))
& ~(HOST_WIDE_INT) 0xffffffff)
&& peep2_regno_dead_p (0, FLAGS_REG)"
[(set (match_dup 0) (match_dup 1))
(parallel [(set (match_dup 0) (ashift:DI (match_dup 0) (match_dup 2)))
(clobber (reg:CC FLAGS_REG))])]
{
int shift = ctz_hwi (UINTVAL (operands[1]));
operands[1] = gen_int_mode (UINTVAL (operands[1]) >> shift, DImode);
operands[2] = gen_int_mode (shift, QImode);
})
(define_insn "*movsi_internal"
[(set (match_operand:SI 0 "nonimmediate_operand"
"=r,m ,*y,*y,?*y,?m,?r,?*y,*v,*v,*v,m ,?r,?*v,*k,*k ,*rm,*k")
(match_operand:SI 1 "general_operand"
"g ,re,C ,*y,Bk ,*y,*y,r ,C ,*v,Bk,*v,*v,r ,*r,*kBk,*k ,CBC"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& ix86_hardreg_mov_ok (operands[0], operands[1])"
{
switch (get_attr_type (insn))
{
case TYPE_SSELOG1:
return standard_sse_constant_opcode (insn, operands);
case TYPE_MSKMOV:
return "kmovd\t{%1, %0|%0, %1}";
case TYPE_MSKLOG:
if (operands[1] == const0_rtx)
return "kxord\t%0, %0, %0";
else if (operands[1] == constm1_rtx)
return "kxnord\t%0, %0, %0";
gcc_unreachable ();
case TYPE_SSEMOV:
return ix86_output_ssemov (insn, operands);
case TYPE_MMX:
return "pxor\t%0, %0";
case TYPE_MMXMOV:
switch (get_attr_mode (insn))
{
case MODE_DI:
return "movq\t{%1, %0|%0, %1}";
case MODE_SI:
return "movd\t{%1, %0|%0, %1}";
default:
gcc_unreachable ();
}
case TYPE_LEA:
return "lea{l}\t{%E1, %0|%0, %E1}";
case TYPE_IMOV:
gcc_assert (!flag_pic || LEGITIMATE_PIC_OPERAND_P (operands[1]));
if (ix86_use_lea_for_mov (insn, operands))
return "lea{l}\t{%E1, %0|%0, %E1}";
else
return "mov{l}\t{%1, %0|%0, %1}";
default:
gcc_unreachable ();
}
}
[(set (attr "isa")
(cond [(eq_attr "alternative" "12,13")
(const_string "sse2")
]
(const_string "*")))
(set (attr "type")
(cond [(eq_attr "alternative" "2")
(const_string "mmx")
(eq_attr "alternative" "3,4,5,6,7")
(const_string "mmxmov")
(eq_attr "alternative" "8")
(const_string "sselog1")
(eq_attr "alternative" "9,10,11,12,13")
(const_string "ssemov")
(eq_attr "alternative" "14,15,16")
(const_string "mskmov")
(eq_attr "alternative" "17")
(const_string "msklog")
(and (match_operand 0 "register_operand")
(match_operand 1 "pic_32bit_operand"))
(const_string "lea")
]
(const_string "imov")))
(set (attr "prefix")
(if_then_else (eq_attr "type" "sselog1,ssemov")
(const_string "maybe_vex")
(const_string "orig")))
(set (attr "prefix_data16")
(if_then_else (and (eq_attr "type" "ssemov") (eq_attr "mode" "SI"))
(const_string "1")
(const_string "*")))
(set (attr "mode")
(cond [(eq_attr "alternative" "2,3")
(const_string "DI")
(eq_attr "alternative" "8,9")
(cond [(match_test "TARGET_AVX")
(const_string "TI")
(ior (not (match_test "TARGET_SSE2"))
(match_test "optimize_function_for_size_p (cfun)"))
(const_string "V4SF")
]
(const_string "TI"))
(and (eq_attr "alternative" "10,11")
(not (match_test "TARGET_SSE2")))
(const_string "SF")
]
(const_string "SI")))
(set (attr "preferred_for_speed")
(cond [(eq_attr "alternative" "6,12")
(symbol_ref "TARGET_INTER_UNIT_MOVES_FROM_VEC")
(eq_attr "alternative" "7,13")
(symbol_ref "TARGET_INTER_UNIT_MOVES_TO_VEC")
]
(symbol_ref "true")))])
;; With -Oz, transform mov $imm,reg to the shorter push $imm; pop reg.
(define_peephole2
[(set (match_operand:SWI248 0 "general_reg_operand")
(match_operand:SWI248 1 "const_int_operand"))]
"optimize_insn_for_size_p () && optimize_size > 1
&& operands[1] != const0_rtx
&& IN_RANGE (INTVAL (operands[1]), -128, 127)
&& !ix86_red_zone_used
&& REGNO (operands[0]) != SP_REG"
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0) (match_dup 3))]
{
if (GET_MODE (operands[0]) != word_mode)
operands[0] = gen_rtx_REG (word_mode, REGNO (operands[0]));
operands[2] = gen_rtx_MEM (word_mode,
gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
operands[3] = gen_rtx_MEM (word_mode,
gen_rtx_POST_INC (Pmode, stack_pointer_rtx));
})
;; With -Oz, transform mov $0,mem to the shorter and $0,mem.
;; Likewise, transform mov $-1,mem to the shorter or $-1,mem.
(define_peephole2
[(set (match_operand:SWI248 0 "memory_operand")
(match_operand:SWI248 1 "const_int_operand"))]
"(operands[1] == const0_rtx || operands[1] == constm1_rtx)
&& optimize_insn_for_size_p () && optimize_size > 1
&& peep2_regno_dead_p (0, FLAGS_REG)"
[(parallel [(set (match_dup 0) (match_dup 1))
(clobber (reg:CC FLAGS_REG))])])
(define_insn "*movhi_internal"
[(set (match_operand:HI 0 "nonimmediate_operand"
"=r,r,r,m ,*k,*k ,r ,m ,*k ,?r,?*v,*v,*v,*v,m")
(match_operand:HI 1 "general_operand"
"r ,n,m,rn,r ,*km,*k,*k,CBC,*v,r ,C ,*v,m ,*v"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& ix86_hardreg_mov_ok (operands[0], operands[1])"
{
switch (get_attr_type (insn))
{
case TYPE_IMOVX:
/* movzwl is faster than movw on p2 due to partial word stalls,
though not as fast as an aligned movl. */
return "movz{wl|x}\t{%1, %k0|%k0, %1}";
case TYPE_MSKMOV:
switch (which_alternative)
{
case 4:
return "kmovw\t{%k1, %0|%0, %k1}";
case 6:
return "kmovw\t{%1, %k0|%k0, %1}";
case 5:
case 7:
return "kmovw\t{%1, %0|%0, %1}";
default:
gcc_unreachable ();
}
case TYPE_SSEMOV:
return ix86_output_ssemov (insn, operands);
case TYPE_SSELOG1:
if (satisfies_constraint_C (operands[1]))
return standard_sse_constant_opcode (insn, operands);
if (SSE_REG_P (operands[0]))
return "%vpinsrw\t{$0, %1, %d0|%d0, %1, 0}";
else
return "%vpextrw\t{$0, %1, %0|%0, %1, 0}";
case TYPE_MSKLOG:
if (operands[1] == const0_rtx)
return "kxorw\t%0, %0, %0";
else if (operands[1] == constm1_rtx)
return "kxnorw\t%0, %0, %0";
gcc_unreachable ();
default:
if (get_attr_mode (insn) == MODE_SI)
return "mov{l}\t{%k1, %k0|%k0, %k1}";
else
return "mov{w}\t{%1, %0|%0, %1}";
}
}
[(set (attr "isa")
(cond [(eq_attr "alternative" "9,10,11,12,13")
(const_string "sse2")
(eq_attr "alternative" "14")
(const_string "sse4")
]
(const_string "*")))
(set (attr "type")
(cond [(eq_attr "alternative" "4,5,6,7")
(const_string "mskmov")
(eq_attr "alternative" "8")
(const_string "msklog")
(eq_attr "alternative" "13,14")
(if_then_else (match_test "TARGET_AVX512FP16")
(const_string "ssemov")
(const_string "sselog1"))
(eq_attr "alternative" "11")
(const_string "sselog1")
(eq_attr "alternative" "9,10,12")
(const_string "ssemov")
(match_test "optimize_function_for_size_p (cfun)")
(const_string "imov")
(and (eq_attr "alternative" "0")
(ior (not (match_test "TARGET_PARTIAL_REG_STALL"))
(not (match_test "TARGET_HIMODE_MATH"))))
(const_string "imov")
(and (eq_attr "alternative" "1,2")
(match_operand:HI 1 "aligned_operand"))
(const_string "imov")
(and (match_test "TARGET_MOVX")
(eq_attr "alternative" "0,2"))
(const_string "imovx")
]
(const_string "imov")))
(set (attr "prefix")
(cond [(eq_attr "alternative" "4,5,6,7,8")
(const_string "vex")
(eq_attr "alternative" "9,10,11,12,13,14")
(const_string "maybe_evex")
]
(const_string "orig")))
(set (attr "mode")
(cond [(eq_attr "alternative" "9,10")
(if_then_else (match_test "TARGET_AVX512FP16")
(const_string "HI")
(const_string "SI"))
(eq_attr "alternative" "13,14")
(if_then_else (match_test "TARGET_AVX512FP16")
(const_string "HI")
(const_string "TI"))
(eq_attr "alternative" "11")
(cond [(match_test "TARGET_AVX")
(const_string "TI")
(ior (not (match_test "TARGET_SSE2"))
(match_test "optimize_function_for_size_p (cfun)"))
(const_string "V4SF")
]
(const_string "TI"))
(eq_attr "alternative" "12")
(cond [(match_test "TARGET_AVX512FP16")
(const_string "HF")
(match_test "TARGET_AVX")
(const_string "TI")
(ior (not (match_test "TARGET_SSE2"))
(match_test "optimize_function_for_size_p (cfun)"))
(const_string "V4SF")
]
(const_string "TI"))
(eq_attr "type" "imovx")
(const_string "SI")
(and (eq_attr "alternative" "1,2")
(match_operand:HI 1 "aligned_operand"))
(const_string "SI")
(and (eq_attr "alternative" "0")
(ior (not (match_test "TARGET_PARTIAL_REG_STALL"))
(not (match_test "TARGET_HIMODE_MATH"))))
(const_string "SI")
]
(const_string "HI")))
(set (attr "preferred_for_speed")
(cond [(eq_attr "alternative" "9")
(symbol_ref "TARGET_INTER_UNIT_MOVES_FROM_VEC")
(eq_attr "alternative" "10")
(symbol_ref "TARGET_INTER_UNIT_MOVES_TO_VEC")
]
(symbol_ref "true")))])
;; Situation is quite tricky about when to choose full sized (SImode) move
;; over QImode moves. For Q_REG -> Q_REG move we use full size only for
;; partial register dependency machines (such as AMD Athlon), where QImode
;; moves issue extra dependency and for partial register stalls machines
;; that don't use QImode patterns (and QImode move cause stall on the next
;; instruction).
;;
;; For loads of Q_REG to NONQ_REG we use full sized moves except for partial
;; register stall machines with, where we use QImode instructions, since
;; partial register stall can be caused there. Then we use movzx.
(define_insn "*movqi_internal"
[(set (match_operand:QI 0 "nonimmediate_operand"
"=Q,R,r,q,q,r,r ,?r,m ,*k,*k,*r,*m,*k,*k,*k")
(match_operand:QI 1 "general_operand"
"Q ,R,r,n,m,q,rn, m,qn,*r,*k,*k,*k,*m,C,BC"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& ix86_hardreg_mov_ok (operands[0], operands[1])"
{
char buf[128];
const char *ops;
const char *suffix;
switch (get_attr_type (insn))
{
case TYPE_IMOVX:
gcc_assert (ANY_QI_REG_P (operands[1]) || MEM_P (operands[1]));
return "movz{bl|x}\t{%1, %k0|%k0, %1}";
case TYPE_MSKMOV:
switch (which_alternative)
{
case 9:
ops = "kmov%s\t{%%k1, %%0|%%0, %%k1}";
break;
case 11:
ops = "kmov%s\t{%%1, %%k0|%%k0, %%1}";
break;
case 12:
case 13:
gcc_assert (TARGET_AVX512DQ);
/* FALLTHRU */
case 10:
ops = "kmov%s\t{%%1, %%0|%%0, %%1}";
break;
default:
gcc_unreachable ();
}
suffix = (get_attr_mode (insn) == MODE_HI) ? "w" : "b";
snprintf (buf, sizeof (buf), ops, suffix);
output_asm_insn (buf, operands);
return "";
case TYPE_MSKLOG:
if (operands[1] == const0_rtx)
{
if (get_attr_mode (insn) == MODE_HI)
return "kxorw\t%0, %0, %0";
else
return "kxorb\t%0, %0, %0";
}
else if (operands[1] == constm1_rtx)
{
gcc_assert (TARGET_AVX512DQ);
return "kxnorb\t%0, %0, %0";
}
gcc_unreachable ();
default:
if (get_attr_mode (insn) == MODE_SI)
return "mov{l}\t{%k1, %k0|%k0, %k1}";
else
return "mov{b}\t{%1, %0|%0, %1}";
}
}
[(set (attr "isa")
(cond [(eq_attr "alternative" "1,2")
(const_string "x64")
(eq_attr "alternative" "12,13,15")
(const_string "avx512dq")
]
(const_string "*")))
(set (attr "type")
(cond [(eq_attr "alternative" "9,10,11,12,13")
(const_string "mskmov")
(eq_attr "alternative" "14,15")
(const_string "msklog")
(and (eq_attr "alternative" "7")
(not (match_operand:QI 1 "aligned_operand")))
(const_string "imovx")
(match_test "optimize_function_for_size_p (cfun)")
(const_string "imov")
(and (eq_attr "alternative" "5")
(ior (not (match_test "TARGET_PARTIAL_REG_STALL"))
(not (match_test "TARGET_QIMODE_MATH"))))
(const_string "imov")
(eq_attr "alternative" "5,7")
(const_string "imovx")
(and (match_test "TARGET_MOVX")
(eq_attr "alternative" "4"))
(const_string "imovx")
]
(const_string "imov")))
(set (attr "prefix")
(if_then_else (eq_attr "alternative" "9,10,11,12,13,14,15")
(const_string "vex")
(const_string "orig")))
(set (attr "mode")
(cond [(eq_attr "alternative" "5,6,7")
(const_string "SI")
(eq_attr "alternative" "8")
(const_string "QI")
(and (eq_attr "alternative" "9,10,11,14")
(not (match_test "TARGET_AVX512DQ")))
(const_string "HI")
(eq_attr "type" "imovx")
(const_string "SI")
;; For -Os, 8-bit immediates are always shorter than 32-bit
;; ones.
(and (eq_attr "type" "imov")
(and (eq_attr "alternative" "3")
(match_test "optimize_function_for_size_p (cfun)")))
(const_string "QI")
;; For -Os, movl where one or both operands are NON_Q_REGS
;; and both are LEGACY_REGS is shorter than movb.
;; Otherwise movb and movl sizes are the same, so decide purely
;; based on speed factors.
(and (eq_attr "type" "imov")
(and (eq_attr "alternative" "1")
(match_test "optimize_function_for_size_p (cfun)")))
(const_string "SI")
(and (eq_attr "type" "imov")
(and (eq_attr "alternative" "0,1,2,3")
(and (match_test "TARGET_PARTIAL_REG_DEPENDENCY")
(not (match_test "TARGET_PARTIAL_REG_STALL")))))
(const_string "SI")
;; Avoid partial register stalls when not using QImode arithmetic
(and (eq_attr "type" "imov")
(and (eq_attr "alternative" "0,1,2,3")
(and (match_test "TARGET_PARTIAL_REG_STALL")
(not (match_test "TARGET_QIMODE_MATH")))))
(const_string "SI")
]
(const_string "QI")))])
/* Reload dislikes loading 0/-1 directly into mask registers.
Try to tidy things up here. */
(define_peephole2
[(set (match_operand:SWI 0 "general_reg_operand")
(match_operand:SWI 1 "immediate_operand"))
(set (match_operand:SWI 2 "mask_reg_operand")
(match_dup 0))]
"peep2_reg_dead_p (2, operands[0])
&& (const0_operand (operands[1], mode)
|| (constm1_operand (operands[1], mode)
&& ( > 1 || TARGET_AVX512DQ)))"
[(set (match_dup 2) (match_dup 1))])
;; Stores and loads of ax to arbitrary constant address.
;; We fake an second form of instruction to force reload to load address
;; into register when rax is not available
(define_insn "*movabs_1"
[(set (mem:SWI1248x (match_operand:DI 0 "x86_64_movabs_operand" "i,r"))
(match_operand:SWI1248x 1 "nonmemory_operand" "a,r"))]
"TARGET_LP64 && ix86_check_movabs (insn, 0)"
{
/* Recover the full memory rtx. */
operands[0] = SET_DEST (PATTERN (insn));
switch (which_alternative)
{
case 0:
return "movabs{}\t{%1, %P0| PTR [%P0], %1}";
case 1:
return "mov{}\t{%1, %0|%0, %1}";
default:
gcc_unreachable ();
}
}
[(set_attr "type" "imov")
(set_attr "modrm" "0,*")
(set_attr "length_address" "8,0")
(set_attr "length_immediate" "0,*")
(set_attr "memory" "store")
(set_attr "mode" "")])
(define_insn "*movabs_2"
[(set (match_operand:SWI1248x 0 "register_operand" "=a,r")
(mem:SWI1248x (match_operand:DI 1 "x86_64_movabs_operand" "i,r")))]
"TARGET_LP64 && ix86_check_movabs (insn, 1)"
{
/* Recover the full memory rtx. */
operands[1] = SET_SRC (PATTERN (insn));
switch (which_alternative)
{
case 0:
return "movabs{}\t{%P1, %0|%0, PTR [%P1]}";
case 1:
return "mov{}\t{%1, %0|%0, %1}";
default:
gcc_unreachable ();
}
}
[(set_attr "type" "imov")
(set_attr "modrm" "0,*")
(set_attr "length_address" "8,0")
(set_attr "length_immediate" "0")
(set_attr "memory" "load")
(set_attr "mode" "")])
(define_insn "*swap"
[(set (match_operand:SWI48 0 "register_operand" "+r")
(match_operand:SWI48 1 "register_operand" "+r"))
(set (match_dup 1)
(match_dup 0))]
""
"xchg{}\t%1, %0"
[(set_attr "type" "imov")
(set_attr "mode" "")
(set_attr "pent_pair" "np")
(set_attr "athlon_decode" "vector")
(set_attr "amdfam10_decode" "double")
(set_attr "bdver1_decode" "double")])
(define_insn "*swap"
[(set (match_operand:SWI12 0 "register_operand" "+,r")
(match_operand:SWI12 1 "register_operand" "+,r"))
(set (match_dup 1)
(match_dup 0))]
""
"@
xchg{}\t%1, %0
xchg{l}\t%k1, %k0"
[(set_attr "type" "imov")
(set_attr "mode" ",SI")
(set (attr "preferred_for_size")
(cond [(eq_attr "alternative" "0")
(symbol_ref "false")]
(symbol_ref "true")))
;; Potential partial reg stall on alternative 1.
(set (attr "preferred_for_speed")
(cond [(eq_attr "alternative" "1")
(symbol_ref "!TARGET_PARTIAL_REG_STALL")]
(symbol_ref "true")))
(set_attr "pent_pair" "np")
(set_attr "athlon_decode" "vector")
(set_attr "amdfam10_decode" "double")
(set_attr "bdver1_decode" "double")])
(define_peephole2
[(set (match_operand:SWI 0 "general_reg_operand")
(match_operand:SWI 1 "general_reg_operand"))
(set (match_dup 1)
(match_operand:SWI 2 "general_reg_operand"))
(set (match_dup 2) (match_dup 0))]
"peep2_reg_dead_p (3, operands[0])
&& optimize_insn_for_size_p ()"
[(parallel [(set (match_dup 1) (match_dup 2))
(set (match_dup 2) (match_dup 1))])])
(define_expand "movstrict"
[(set (strict_low_part (match_operand:SWI12 0 "register_operand"))
(match_operand:SWI12 1 "general_operand"))]
""
{
gcc_assert (SUBREG_P (operands[0]));
if ((TARGET_PARTIAL_REG_STALL && optimize_function_for_speed_p (cfun))
|| !VALID_INT_MODE_P (GET_MODE (SUBREG_REG (operands[0]))))
FAIL;
})
(define_insn "*movstrict_1"
[(set (strict_low_part
(match_operand:SWI12 0 "register_operand" "+"))
(match_operand:SWI12 1 "general_operand" "mn"))]
"!TARGET_PARTIAL_REG_STALL || optimize_function_for_size_p (cfun)"
"mov{}\t{%1, %0|%0, %1}"
[(set_attr "type" "imov")
(set_attr "mode" "")])
(define_insn "*movstrict_xor"
[(set (strict_low_part (match_operand:SWI12 0 "register_operand" "+"))
(match_operand:SWI12 1 "const0_operand"))
(clobber (reg:CC FLAGS_REG))]
"reload_completed"
"xor{}\t%0, %0"
[(set_attr "type" "alu1")
(set_attr "mode" "")
(set_attr "length_immediate" "0")])
(define_expand "extv"
[(set (match_operand:SWI24 0 "register_operand")
(sign_extract:SWI24 (match_operand:SWI24 1 "register_operand")
(match_operand:SI 2 "const_int_operand")
(match_operand:SI 3 "const_int_operand")))]
""
{
/* Handle extractions from %ah et al. */
if (INTVAL (operands[2]) != 8 || INTVAL (operands[3]) != 8)
FAIL;
unsigned int regno = reg_or_subregno (operands[1]);
/* Be careful to expand only with registers having upper parts. */
if (regno <= LAST_VIRTUAL_REGISTER && !QI_REGNO_P (regno))
operands[1] = copy_to_reg (operands[1]);
})
(define_insn "*extv"
[(set (match_operand:SWI24 0 "register_operand" "=R")
(sign_extract:SWI24 (match_operand:SWI24 1 "register_operand" "Q")
(const_int 8)
(const_int 8)))]
""
"movs{bl|x}\t{%h1, %k0|%k0, %h1}"
[(set_attr "type" "imovx")
(set_attr "mode" "SI")])
(define_expand "extzv"
[(set (match_operand:SWI248 0 "register_operand")
(zero_extract:SWI248 (match_operand:SWI248 1 "register_operand")
(match_operand:SI 2 "const_int_operand")
(match_operand:SI 3 "const_int_operand")))]
""
{
if (ix86_expand_pextr (operands))
DONE;
/* Handle extractions from %ah et al. */
if (INTVAL (operands[2]) != 8 || INTVAL (operands[3]) != 8)
FAIL;
unsigned int regno = reg_or_subregno (operands[1]);
/* Be careful to expand only with registers having upper parts. */
if (regno <= LAST_VIRTUAL_REGISTER && !QI_REGNO_P (regno))
operands[1] = copy_to_reg (operands[1]);
})
(define_insn "*extzvqi_mem_rex64"
[(set (match_operand:QI 0 "norex_memory_operand" "=Bn")
(subreg:QI
(zero_extract:SWI248
(match_operand:SWI248 1 "register_operand" "Q")
(const_int 8)
(const_int 8)) 0))]
"TARGET_64BIT && reload_completed"
"mov{b}\t{%h1, %0|%0, %h1}"
[(set_attr "type" "imov")
(set_attr "mode" "QI")])
(define_insn "*extzv"
[(set (match_operand:SWI248 0 "register_operand" "=R")
(zero_extract:SWI248 (match_operand:SWI248 1 "register_operand" "Q")
(const_int 8)
(const_int 8)))]
""
"movz{bl|x}\t{%h1, %k0|%k0, %h1}"
[(set_attr "type" "imovx")
(set_attr "mode" "SI")])
(define_insn "*extzvqi"
[(set (match_operand:QI 0 "nonimmediate_operand" "=QBc,?R,m")
(subreg:QI
(zero_extract:SWI248
(match_operand:SWI248 1 "register_operand" "Q,Q,Q")
(const_int 8)
(const_int 8)) 0))]
""
{
switch (get_attr_type (insn))
{
case TYPE_IMOVX:
return "movz{bl|x}\t{%h1, %k0|%k0, %h1}";
default:
return "mov{b}\t{%h1, %0|%0, %h1}";
}
}
[(set_attr "isa" "*,*,nox64")
(set (attr "type")
(if_then_else (and (match_operand:QI 0 "register_operand")
(ior (not (match_operand:QI 0 "QIreg_operand"))
(match_test "TARGET_MOVX")))
(const_string "imovx")
(const_string "imov")))
(set (attr "mode")
(if_then_else (eq_attr "type" "imovx")
(const_string "SI")
(const_string "QI")))])
(define_peephole2
[(set (match_operand:QI 0 "register_operand")
(subreg:QI
(zero_extract:SWI248 (match_operand:SWI248 1 "register_operand")
(const_int 8)
(const_int 8)) 0))
(set (match_operand:QI 2 "norex_memory_operand") (match_dup 0))]
"TARGET_64BIT
&& peep2_reg_dead_p (2, operands[0])"
[(set (match_dup 2)
(subreg:QI
(zero_extract:SWI248 (match_dup 1)
(const_int 8)
(const_int 8)) 0))])
(define_expand "insv"
[(set (zero_extract:SWI248 (match_operand:SWI248 0 "register_operand")
(match_operand:SI 1 "const_int_operand")
(match_operand:SI 2 "const_int_operand"))
(match_operand:SWI248 3 "register_operand"))]
""
{
rtx dst;
if (ix86_expand_pinsr (operands))
DONE;
/* Handle insertions to %ah et al. */
if (INTVAL (operands[1]) != 8 || INTVAL (operands[2]) != 8)
FAIL;
unsigned int regno = reg_or_subregno (operands[0]);
/* Be careful to expand only with registers having upper parts. */
if (regno <= LAST_VIRTUAL_REGISTER && !QI_REGNO_P (regno))
dst = copy_to_reg (operands[0]);
else
dst = operands[0];
emit_insn (gen_insv_1 (mode, dst, operands[3]));
/* Fix up the destination if needed. */
if (dst != operands[0])
emit_move_insn (operands[0], dst);
DONE;
})
(define_insn "*insvqi_1_mem_rex64"
[(set (zero_extract:SWI248
(match_operand:SWI248 0 "register_operand" "+Q")
(const_int 8)
(const_int 8))
(subreg:SWI248
(match_operand:QI 1 "norex_memory_operand" "Bn") 0))]
"TARGET_64BIT && reload_completed"
"mov{b}\t{%1, %h0|%h0, %1}"
[(set_attr "type" "imov")
(set_attr "mode" "QI")])
(define_insn "@insv_1"
[(set (zero_extract:SWI248
(match_operand:SWI248 0 "register_operand" "+Q,Q")
(const_int 8)
(const_int 8))
(match_operand:SWI248 1 "general_operand" "QnBc,m"))]
""
{
if (CONST_INT_P (operands[1]))
operands[1] = gen_int_mode (INTVAL (operands[1]), QImode);
return "mov{b}\t{%b1, %h0|%h0, %b1}";
}
[(set_attr "isa" "*,nox64")
(set_attr "type" "imov")
(set_attr "mode" "QI")])
(define_insn "*insvqi_1"
[(set (zero_extract:SWI248
(match_operand:SWI248 0 "register_operand" "+Q,Q")
(const_int 8)
(const_int 8))
(subreg:SWI248
(match_operand:QI 1 "general_operand" "QnBc,m") 0))]
""
"mov{b}\t{%1, %h0|%h0, %1}"
[(set_attr "isa" "*,nox64")
(set_attr "type" "imov")
(set_attr "mode" "QI")])
(define_peephole2
[(set (match_operand:QI 0 "register_operand")
(match_operand:QI 1 "norex_memory_operand"))
(set (zero_extract:SWI248 (match_operand:SWI248 2 "register_operand")
(const_int 8)
(const_int 8))
(subreg:SWI248 (match_dup 0) 0))]
"TARGET_64BIT
&& peep2_reg_dead_p (2, operands[0])"
[(set (zero_extract:SWI248 (match_dup 2)
(const_int 8)
(const_int 8))
(subreg:SWI248 (match_dup 1) 0))])
;; Eliminate redundant insv, e.g. xorl %eax,%eax; movb $0, %ah
(define_peephole2
[(parallel [(set (match_operand:SWI48 0 "general_reg_operand")
(const_int 0))
(clobber (reg:CC FLAGS_REG))])
(set (zero_extract:SWI248 (match_operand:SWI248 1 "general_reg_operand")
(const_int 8)
(const_int 8))
(const_int 0))]
"REGNO (operands[0]) == REGNO (operands[1])"
[(parallel [(set (match_operand:SWI48 0 "general_reg_operand")
(const_int 0))
(clobber (reg:CC FLAGS_REG))])])
;; Combine movl followed by movb.
(define_peephole2
[(set (match_operand:SWI48 0 "general_reg_operand")
(match_operand:SWI48 1 "const_int_operand"))
(set (zero_extract:SWI248 (match_operand:SWI248 2 "general_reg_operand")
(const_int 8)
(const_int 8))
(match_operand:SWI248 3 "const_int_operand"))]
"REGNO (operands[0]) == REGNO (operands[2])"
[(set (match_operand:SWI48 0 "general_reg_operand")
(match_dup 4))]
{
HOST_WIDE_INT tmp = INTVAL (operands[1]) & ~(HOST_WIDE_INT)0xff00;
tmp |= (INTVAL (operands[3]) & 0xff) << 8;
operands[4] = gen_int_mode (tmp, mode);
})
(define_code_iterator any_extract [sign_extract zero_extract])
(define_insn "*insvqi_2"
[(set (zero_extract:SWI248
(match_operand:SWI248 0 "register_operand" "+Q")
(const_int 8)
(const_int 8))
(any_extract:SWI248
(match_operand:SWI248 1 "register_operand" "Q")
(const_int 8)
(const_int 8)))]
""
"mov{b}\t{%h1, %h0|%h0, %h1}"
[(set_attr "type" "imov")
(set_attr "mode" "QI")])
(define_insn "*insvqi_3"
[(set (zero_extract:SWI248
(match_operand:SWI248 0 "register_operand" "+Q")
(const_int 8)
(const_int 8))
(any_shiftrt:SWI248
(match_operand:SWI248 1 "register_operand" "Q")
(const_int 8)))]
""
"mov{b}\t{%h1, %h0|%h0, %h1}"
[(set_attr "type" "imov")
(set_attr "mode" "QI")])
;; Floating point push instructions.
(define_insn "*pushtf"
[(set (match_operand:TF 0 "push_operand" "=<,<")
(match_operand:TF 1 "general_no_elim_operand" "v,*roC"))]
"TARGET_64BIT || TARGET_SSE"
{
/* This insn should be already split before reg-stack. */
return "#";
}
[(set_attr "isa" "*,x64")
(set_attr "type" "multi")
(set_attr "unit" "sse,*")
(set_attr "mode" "TF,DI")])
;; %%% Kill this when call knows how to work this out.
(define_split
[(set (match_operand:TF 0 "push_operand")
(match_operand:TF 1 "sse_reg_operand"))]
"TARGET_SSE && reload_completed"
[(set (reg:P SP_REG) (plus:P (reg:P SP_REG) (const_int -16)))
(set (match_dup 0) (match_dup 1))]
{
/* Preserve memory attributes. */
operands[0] = replace_equiv_address (operands[0], stack_pointer_rtx);
})
(define_insn "*pushxf"
[(set (match_operand:XF 0 "push_operand" "=<,<,<,<,<")
(match_operand:XF 1 "general_no_elim_operand" "f,r,*r,oF,oC"))]
""
{
/* This insn should be already split before reg-stack. */
return "#";
}
[(set_attr "isa" "*,*,*,nox64,x64")
(set_attr "type" "multi")
(set_attr "unit" "i387,*,*,*,*")
(set (attr "mode")
(cond [(eq_attr "alternative" "1,2,3,4")
(if_then_else (match_test "TARGET_64BIT")
(const_string "DI")
(const_string "SI"))
]
(const_string "XF")))
(set (attr "preferred_for_size")
(cond [(eq_attr "alternative" "1")
(symbol_ref "false")]
(symbol_ref "true")))])
;; %%% Kill this when call knows how to work this out.
(define_split
[(set (match_operand:XF 0 "push_operand")
(match_operand:XF 1 "fp_register_operand"))]
"reload_completed"
[(set (reg:P SP_REG) (plus:P (reg:P SP_REG) (match_dup 2)))
(set (match_dup 0) (match_dup 1))]
{
operands[2] = GEN_INT (-PUSH_ROUNDING (GET_MODE_SIZE (XFmode)));
/* Preserve memory attributes. */
operands[0] = replace_equiv_address (operands[0], stack_pointer_rtx);
})
(define_insn "*pushdf"
[(set (match_operand:DF 0 "push_operand" "=<,<,<,<,<,<")
(match_operand:DF 1 "general_no_elim_operand" "f,r,*r,oF,rmC,v"))]
""
{
/* This insn should be already split before reg-stack. */
return "#";
}
[(set_attr "isa" "*,nox64,nox64,nox64,x64,sse2")
(set_attr "type" "multi")
(set_attr "unit" "i387,*,*,*,*,sse")
(set_attr "mode" "DF,SI,SI,SI,DI,DF")
(set (attr "preferred_for_size")
(cond [(eq_attr "alternative" "1")
(symbol_ref "false")]
(symbol_ref "true")))
(set (attr "preferred_for_speed")
(cond [(eq_attr "alternative" "1")
(symbol_ref "TARGET_INTEGER_DFMODE_MOVES")]
(symbol_ref "true")))])
;; %%% Kill this when call knows how to work this out.
(define_split
[(set (match_operand:DF 0 "push_operand")
(match_operand:DF 1 "any_fp_register_operand"))]
"reload_completed"
[(set (reg:P SP_REG) (plus:P (reg:P SP_REG) (const_int -8)))
(set (match_dup 0) (match_dup 1))]
{
/* Preserve memory attributes. */
operands[0] = replace_equiv_address (operands[0], stack_pointer_rtx);
})
(define_insn "*pushhf_rex64"
[(set (match_operand:HF 0 "push_operand" "=X,X")
(match_operand:HF 1 "nonmemory_no_elim_operand" "r,x"))]
"TARGET_64BIT"
{
/* Anything else should be already split before reg-stack. */
gcc_assert (which_alternative == 0);
return "push{q}\t%q1";
}
[(set_attr "isa" "*,sse4")
(set_attr "type" "push,multi")
(set_attr "mode" "DI,TI")])
(define_insn "*pushhf"
[(set (match_operand:HF 0 "push_operand" "=X,X")
(match_operand:HF 1 "general_no_elim_operand" "rmF,x"))]
"!TARGET_64BIT"
{
/* Anything else should be already split before reg-stack. */
gcc_assert (which_alternative == 0);
return "push{l}\t%k1";
}
[(set_attr "isa" "*,sse4")
(set_attr "type" "push,multi")
(set_attr "mode" "SI,TI")])
(define_insn "*pushsf_rex64"
[(set (match_operand:SF 0 "push_operand" "=X,X,X")
(match_operand:SF 1 "nonmemory_no_elim_operand" "f,rF,v"))]
"TARGET_64BIT"
{
/* Anything else should be already split before reg-stack. */
if (which_alternative != 1)
return "#";
return "push{q}\t%q1";
}
[(set_attr "type" "multi,push,multi")
(set_attr "unit" "i387,*,*")
(set_attr "mode" "SF,DI,SF")])
(define_insn "*pushsf"
[(set (match_operand:SF 0 "push_operand" "=<,<,<")
(match_operand:SF 1 "general_no_elim_operand" "f,rmF,v"))]
"!TARGET_64BIT"
{
/* Anything else should be already split before reg-stack. */
if (which_alternative != 1)
return "#";
return "push{l}\t%1";
}
[(set_attr "type" "multi,push,multi")
(set_attr "unit" "i387,*,*")
(set_attr "mode" "SF,SI,SF")])
(define_mode_iterator MODESH [SF HF])
;; %%% Kill this when call knows how to work this out.
(define_split
[(set (match_operand:MODESH 0 "push_operand")
(match_operand:MODESH 1 "any_fp_register_operand"))]
"reload_completed"
[(set (reg:P SP_REG) (plus:P (reg:P SP_REG) (match_dup 2)))
(set (match_dup 0) (match_dup 1))]
{
rtx op = XEXP (operands[0], 0);
if (GET_CODE (op) == PRE_DEC)
{
gcc_assert (!TARGET_64BIT);
op = GEN_INT (-4);
}
else
{
op = XEXP (XEXP (op, 1), 1);
gcc_assert (CONST_INT_P (op));
}
operands[2] = op;
/* Preserve memory attributes. */
operands[0] = replace_equiv_address (operands[0], stack_pointer_rtx);
})
(define_split
[(set (match_operand:SF 0 "push_operand")
(match_operand:SF 1 "memory_operand"))]
"reload_completed
&& find_constant_src (insn)"
[(set (match_dup 0) (match_dup 2))]
"operands[2] = find_constant_src (curr_insn);")
(define_split
[(set (match_operand 0 "push_operand")
(match_operand 1 "general_gr_operand"))]
"reload_completed
&& (GET_MODE (operands[0]) == TFmode
|| GET_MODE (operands[0]) == XFmode
|| GET_MODE (operands[0]) == DFmode)"
[(const_int 0)]
"ix86_split_long_move (operands); DONE;")
;; Floating point move instructions.
(define_expand "movtf"
[(set (match_operand:TF 0 "nonimmediate_operand")
(match_operand:TF 1 "nonimmediate_operand"))]
"TARGET_64BIT || TARGET_SSE"
"ix86_expand_move (TFmode, operands); DONE;")
(define_expand "mov"
[(set (match_operand:X87MODEFH 0 "nonimmediate_operand")
(match_operand:X87MODEFH 1 "general_operand"))]
""
"ix86_expand_move (mode, operands); DONE;")
(define_insn "*movtf_internal"
[(set (match_operand:TF 0 "nonimmediate_operand" "=v,v ,m,?*r ,!o")
(match_operand:TF 1 "general_operand" "C ,vm,v,*roF,*rC"))]
"(TARGET_64BIT || TARGET_SSE)
&& !(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (lra_in_progress || reload_completed
|| !CONST_DOUBLE_P (operands[1])
|| (standard_sse_constant_p (operands[1], TFmode) == 1
&& !memory_operand (operands[0], TFmode))
|| (!TARGET_MEMORY_MISMATCH_STALL
&& memory_operand (operands[0], TFmode)))"
{
switch (get_attr_type (insn))
{
case TYPE_SSELOG1:
return standard_sse_constant_opcode (insn, operands);
case TYPE_SSEMOV:
return ix86_output_ssemov (insn, operands);
case TYPE_MULTI:
return "#";
default:
gcc_unreachable ();
}
}
[(set_attr "isa" "*,*,*,x64,x64")
(set_attr "type" "sselog1,ssemov,ssemov,multi,multi")
(set (attr "prefix")
(if_then_else (eq_attr "type" "sselog1,ssemov")
(const_string "maybe_vex")
(const_string "orig")))
(set (attr "mode")
(cond [(eq_attr "alternative" "3,4")
(const_string "DI")
(match_test "TARGET_AVX")
(const_string "TI")
(ior (not (match_test "TARGET_SSE2"))
(match_test "optimize_function_for_size_p (cfun)"))
(const_string "V4SF")
(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(const_string "V4SF")
(and (eq_attr "alternative" "2")
(match_test "TARGET_SSE_TYPELESS_STORES"))
(const_string "V4SF")
]
(const_string "TI")))])
(define_split
[(set (match_operand:TF 0 "nonimmediate_gr_operand")
(match_operand:TF 1 "general_gr_operand"))]
"reload_completed"
[(const_int 0)]
"ix86_split_long_move (operands); DONE;")
;; Possible store forwarding (partial memory) stall
;; in alternatives 4, 6, 7 and 8.
(define_insn "*movxf_internal"
[(set (match_operand:XF 0 "nonimmediate_operand"
"=f,m,f,?r ,!o,?*r ,!o,!o,!o,r ,o ,o")
(match_operand:XF 1 "general_operand"
"fm,f,G,roF,r ,*roF,*r,F ,C ,roF,rF,rC"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (lra_in_progress || reload_completed
|| !CONST_DOUBLE_P (operands[1])
|| ((optimize_function_for_size_p (cfun)
|| (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC))
&& standard_80387_constant_p (operands[1]) > 0
&& !memory_operand (operands[0], XFmode))
|| (!TARGET_MEMORY_MISMATCH_STALL
&& memory_operand (operands[0], XFmode))
|| !TARGET_HARD_XF_REGS)"
{
switch (get_attr_type (insn))
{
case TYPE_FMOV:
if (which_alternative == 2)
return standard_80387_constant_opcode (operands[1]);
return output_387_reg_move (insn, operands);
case TYPE_MULTI:
return "#";
default:
gcc_unreachable ();
}
}
[(set (attr "isa")
(cond [(eq_attr "alternative" "7,10")
(const_string "nox64")
(eq_attr "alternative" "8,11")
(const_string "x64")
]
(const_string "*")))
(set (attr "type")
(cond [(eq_attr "alternative" "3,4,5,6,7,8,9,10,11")
(const_string "multi")
]
(const_string "fmov")))
(set (attr "mode")
(cond [(eq_attr "alternative" "3,4,5,6,7,8,9,10,11")
(if_then_else (match_test "TARGET_64BIT")
(const_string "DI")
(const_string "SI"))
]
(const_string "XF")))
(set (attr "preferred_for_size")
(cond [(eq_attr "alternative" "3,4")
(symbol_ref "false")]
(symbol_ref "true")))
(set (attr "enabled")
(cond [(eq_attr "alternative" "9,10,11")
(if_then_else
(match_test "TARGET_HARD_XF_REGS")
(symbol_ref "false")
(const_string "*"))
(not (match_test "TARGET_HARD_XF_REGS"))
(symbol_ref "false")
]
(const_string "*")))])
(define_split
[(set (match_operand:XF 0 "nonimmediate_gr_operand")
(match_operand:XF 1 "general_gr_operand"))]
"reload_completed"
[(const_int 0)]
"ix86_split_long_move (operands); DONE;")
;; Possible store forwarding (partial memory) stall in alternatives 4, 6 and 7.
(define_insn "*movdf_internal"
[(set (match_operand:DF 0 "nonimmediate_operand"
"=Yf*f,m ,Yf*f,?r ,!o,?*r ,!o,!o,?r,?m,?r,?r,v,v,v,m,*x,*x,*x,m ,r ,v,r ,o ,r ,m")
(match_operand:DF 1 "general_operand"
"Yf*fm,Yf*f,G ,roF,r ,*roF,*r,F ,rm,rC,C ,F ,C,v,m,v,C ,*x,m ,*x,v,r ,roF,rF,rmF,rC"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (lra_in_progress || reload_completed
|| !CONST_DOUBLE_P (operands[1])
|| ((optimize_function_for_size_p (cfun)
|| (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC))
&& IS_STACK_MODE (DFmode)
&& standard_80387_constant_p (operands[1]) > 0
&& !memory_operand (operands[0], DFmode))
|| (TARGET_SSE2 && TARGET_SSE_MATH
&& standard_sse_constant_p (operands[1], DFmode) == 1
&& !memory_operand (operands[0], DFmode))
|| ((TARGET_64BIT || !TARGET_MEMORY_MISMATCH_STALL)
&& memory_operand (operands[0], DFmode))
|| !TARGET_HARD_DF_REGS)"
{
switch (get_attr_type (insn))
{
case TYPE_FMOV:
if (which_alternative == 2)
return standard_80387_constant_opcode (operands[1]);
return output_387_reg_move (insn, operands);
case TYPE_MULTI:
return "#";
case TYPE_IMOV:
if (get_attr_mode (insn) == MODE_SI)
return "mov{l}\t{%1, %k0|%k0, %1}";
else if (which_alternative == 11)
return "movabs{q}\t{%1, %0|%0, %1}";
else
return "mov{q}\t{%1, %0|%0, %1}";
case TYPE_SSELOG1:
return standard_sse_constant_opcode (insn, operands);
case TYPE_SSEMOV:
return ix86_output_ssemov (insn, operands);
default:
gcc_unreachable ();
}
}
[(set (attr "isa")
(cond [(eq_attr "alternative" "3,4,5,6,7,22,23")
(const_string "nox64")
(eq_attr "alternative" "8,9,10,11,24,25")
(const_string "x64")
(eq_attr "alternative" "12,13,14,15")
(const_string "sse2")
(eq_attr "alternative" "20,21")
(const_string "x64_sse2")
]
(const_string "*")))
(set (attr "type")
(cond [(eq_attr "alternative" "0,1,2")
(const_string "fmov")
(eq_attr "alternative" "3,4,5,6,7,22,23")
(const_string "multi")
(eq_attr "alternative" "8,9,10,11,24,25")
(const_string "imov")
(eq_attr "alternative" "12,16")
(const_string "sselog1")
]
(const_string "ssemov")))
(set (attr "modrm")
(if_then_else (eq_attr "alternative" "11")
(const_string "0")
(const_string "*")))
(set (attr "length_immediate")
(if_then_else (eq_attr "alternative" "11")
(const_string "8")
(const_string "*")))
(set (attr "prefix")
(if_then_else (eq_attr "type" "sselog1,ssemov")
(const_string "maybe_vex")
(const_string "orig")))
(set (attr "prefix_data16")
(if_then_else
(ior (and (eq_attr "type" "ssemov") (eq_attr "mode" "DI"))
(eq_attr "mode" "V1DF"))
(const_string "1")
(const_string "*")))
(set (attr "mode")
(cond [(eq_attr "alternative" "3,4,5,6,7,10,22,23")
(const_string "SI")
(eq_attr "alternative" "8,9,11,20,21,24,25")
(const_string "DI")
/* xorps is one byte shorter for non-AVX targets. */
(eq_attr "alternative" "12,16")
(cond [(match_test "TARGET_AVX")
(const_string "V2DF")
(ior (not (match_test "TARGET_SSE2"))
(match_test "optimize_function_for_size_p (cfun)"))
(const_string "V4SF")
(match_test "TARGET_SSE_LOAD0_BY_PXOR")
(const_string "TI")
]
(const_string "V2DF"))
/* For architectures resolving dependencies on
whole SSE registers use movapd to break dependency
chains, otherwise use short move to avoid extra work. */
/* movaps is one byte shorter for non-AVX targets. */
(eq_attr "alternative" "13,17")
(cond [(match_test "TARGET_AVX")
(const_string "DF")
(ior (not (match_test "TARGET_SSE2"))
(match_test "optimize_function_for_size_p (cfun)"))
(const_string "V4SF")
(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(const_string "V4SF")
(match_test "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
(const_string "V2DF")
]
(const_string "DF"))
/* For architectures resolving dependencies on register
parts we may avoid extra work to zero out upper part
of register. */
(eq_attr "alternative" "14,18")
(cond [(not (match_test "TARGET_SSE2"))
(const_string "V2SF")
(match_test "TARGET_AVX")
(const_string "DF")
(match_test "TARGET_SSE_SPLIT_REGS")
(const_string "V1DF")
]
(const_string "DF"))
(and (eq_attr "alternative" "15,19")
(not (match_test "TARGET_SSE2")))
(const_string "V2SF")
]
(const_string "DF")))
(set (attr "preferred_for_size")
(cond [(eq_attr "alternative" "3,4")
(symbol_ref "false")]
(symbol_ref "true")))
(set (attr "preferred_for_speed")
(cond [(eq_attr "alternative" "3,4")
(symbol_ref "TARGET_INTEGER_DFMODE_MOVES")
(eq_attr "alternative" "20")
(symbol_ref "TARGET_INTER_UNIT_MOVES_FROM_VEC")
(eq_attr "alternative" "21")
(symbol_ref "TARGET_INTER_UNIT_MOVES_TO_VEC")
]
(symbol_ref "true")))
(set (attr "enabled")
(cond [(eq_attr "alternative" "22,23,24,25")
(if_then_else
(match_test "TARGET_HARD_DF_REGS")
(symbol_ref "false")
(const_string "*"))
(not (match_test "TARGET_HARD_DF_REGS"))
(symbol_ref "false")
]
(const_string "*")))])
(define_split
[(set (match_operand:DF 0 "nonimmediate_gr_operand")
(match_operand:DF 1 "general_gr_operand"))]
"!TARGET_64BIT && reload_completed"
[(const_int 0)]
"ix86_split_long_move (operands); DONE;")
(define_insn "*movsf_internal"
[(set (match_operand:SF 0 "nonimmediate_operand"
"=Yf*f,m ,Yf*f,?r ,?m,v,v,v,m,?r,?v,!*y,!*y,!m,!r,!*y,r ,m")
(match_operand:SF 1 "general_operand"
"Yf*fm,Yf*f,G ,rmF,rF,C,v,m,v,v ,r ,*y ,m ,*y,*y,r ,rmF,rF"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (lra_in_progress || reload_completed
|| !CONST_DOUBLE_P (operands[1])
|| ((optimize_function_for_size_p (cfun)
|| (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC))
&& IS_STACK_MODE (SFmode)
&& standard_80387_constant_p (operands[1]) > 0)
|| (TARGET_SSE && TARGET_SSE_MATH
&& standard_sse_constant_p (operands[1], SFmode) == 1)
|| memory_operand (operands[0], SFmode)
|| !TARGET_HARD_SF_REGS)"
{
switch (get_attr_type (insn))
{
case TYPE_FMOV:
if (which_alternative == 2)
return standard_80387_constant_opcode (operands[1]);
return output_387_reg_move (insn, operands);
case TYPE_IMOV:
return "mov{l}\t{%1, %0|%0, %1}";
case TYPE_SSELOG1:
return standard_sse_constant_opcode (insn, operands);
case TYPE_SSEMOV:
return ix86_output_ssemov (insn, operands);
case TYPE_MMXMOV:
switch (get_attr_mode (insn))
{
case MODE_DI:
return "movq\t{%1, %0|%0, %1}";
case MODE_SI:
return "movd\t{%1, %0|%0, %1}";
default:
gcc_unreachable ();
}
default:
gcc_unreachable ();
}
}
[(set (attr "isa")
(cond [(eq_attr "alternative" "9,10")
(const_string "sse2")
]
(const_string "*")))
(set (attr "type")
(cond [(eq_attr "alternative" "0,1,2")
(const_string "fmov")
(eq_attr "alternative" "3,4,16,17")
(const_string "imov")
(eq_attr "alternative" "5")
(const_string "sselog1")
(eq_attr "alternative" "11,12,13,14,15")
(const_string "mmxmov")
]
(const_string "ssemov")))
(set (attr "prefix")
(if_then_else (eq_attr "type" "sselog1,ssemov")
(const_string "maybe_vex")
(const_string "orig")))
(set (attr "prefix_data16")
(if_then_else (and (eq_attr "type" "ssemov") (eq_attr "mode" "SI"))
(const_string "1")
(const_string "*")))
(set (attr "mode")
(cond [(eq_attr "alternative" "3,4,9,10,12,13,14,15,16,17")
(const_string "SI")
(eq_attr "alternative" "11")
(const_string "DI")
(eq_attr "alternative" "5")
(cond [(and (match_test "TARGET_AVX512F")
(not (match_test "TARGET_PREFER_AVX256")))
(const_string "V16SF")
(match_test "TARGET_AVX")
(const_string "V4SF")
(ior (not (match_test "TARGET_SSE2"))
(match_test "optimize_function_for_size_p (cfun)"))
(const_string "V4SF")
(match_test "TARGET_SSE_LOAD0_BY_PXOR")
(const_string "TI")
]
(const_string "V4SF"))
/* For architectures resolving dependencies on
whole SSE registers use APS move to break dependency
chains, otherwise use short move to avoid extra work.
Do the same for architectures resolving dependencies on
the parts. While in DF mode it is better to always handle
just register parts, the SF mode is different due to lack
of instructions to load just part of the register. It is
better to maintain the whole registers in single format
to avoid problems on using packed logical operations. */
(eq_attr "alternative" "6")
(cond [(ior (match_test "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
(match_test "TARGET_SSE_SPLIT_REGS"))
(const_string "V4SF")
]
(const_string "SF"))
]
(const_string "SF")))
(set (attr "preferred_for_speed")
(cond [(eq_attr "alternative" "9,14")
(symbol_ref "TARGET_INTER_UNIT_MOVES_FROM_VEC")
(eq_attr "alternative" "10,15")
(symbol_ref "TARGET_INTER_UNIT_MOVES_TO_VEC")
]
(symbol_ref "true")))
(set (attr "enabled")
(cond [(eq_attr "alternative" "16,17")
(if_then_else
(match_test "TARGET_HARD_SF_REGS")
(symbol_ref "false")
(const_string "*"))
(not (match_test "TARGET_HARD_SF_REGS"))
(symbol_ref "false")
]
(const_string "*")))])
(define_insn "*movhf_internal"
[(set (match_operand:HF 0 "nonimmediate_operand"
"=?r,?r,?r,?m,v,v,?r,m,?v,v")
(match_operand:HF 1 "general_operand"
"r ,F ,m ,rF,C,v, v,v,r ,m"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (lra_in_progress
|| reload_completed
|| !CONST_DOUBLE_P (operands[1])
|| (TARGET_SSE2
&& standard_sse_constant_p (operands[1], HFmode) == 1)
|| memory_operand (operands[0], HFmode))"
{
switch (get_attr_type (insn))
{
case TYPE_IMOVX:
/* movzwl is faster than movw on p2 due to partial word stalls,
though not as fast as an aligned movl. */
return "movz{wl|x}\t{%1, %k0|%k0, %1}";
case TYPE_SSEMOV:
return ix86_output_ssemov (insn, operands);
case TYPE_SSELOG1:
if (satisfies_constraint_C (operands[1]))
return standard_sse_constant_opcode (insn, operands);
if (SSE_REG_P (operands[0]))
return "%vpinsrw\t{$0, %1, %d0|%d0, %1, 0}";
else
return "%vpextrw\t{$0, %1, %0|%0, %1, 0}";
default:
if (get_attr_mode (insn) == MODE_SI)
return "mov{l}\t{%k1, %k0|%k0, %k1}";
else
return "mov{w}\t{%1, %0|%0, %1}";
}
}
[(set (attr "isa")
(cond [(eq_attr "alternative" "4,5,6,8,9")
(const_string "sse2")
(eq_attr "alternative" "7")
(const_string "sse4")
]
(const_string "*")))
(set (attr "type")
(cond [(eq_attr "alternative" "4")
(const_string "sselog1")
(eq_attr "alternative" "5,6,8")
(const_string "ssemov")
(eq_attr "alternative" "7,9")
(if_then_else
(match_test ("TARGET_AVX512FP16"))
(const_string "ssemov")
(const_string "sselog1"))
(match_test "optimize_function_for_size_p (cfun)")
(const_string "imov")
(and (eq_attr "alternative" "0")
(ior (not (match_test "TARGET_PARTIAL_REG_STALL"))
(not (match_test "TARGET_HIMODE_MATH"))))
(const_string "imov")
(and (eq_attr "alternative" "1,2")
(match_operand:HI 1 "aligned_operand"))
(const_string "imov")
(and (match_test "TARGET_MOVX")
(eq_attr "alternative" "0,2"))
(const_string "imovx")
]
(const_string "imov")))
(set (attr "prefix")
(cond [(eq_attr "alternative" "4,5,6,7,8,9")
(const_string "maybe_vex")
]
(const_string "orig")))
(set (attr "mode")
(cond [(eq_attr "alternative" "4")
(const_string "V4SF")
(eq_attr "alternative" "6,8")
(if_then_else
(match_test "TARGET_AVX512FP16")
(const_string "HI")
(const_string "SI"))
(eq_attr "alternative" "7,9")
(if_then_else
(match_test "TARGET_AVX512FP16")
(const_string "HI")
(const_string "TI"))
(eq_attr "alternative" "5")
(cond [(match_test "TARGET_AVX512FP16")
(const_string "HF")
(ior (match_test "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
(match_test "TARGET_SSE_SPLIT_REGS"))
(const_string "V4SF")
]
(const_string "SF"))
(eq_attr "type" "imovx")
(const_string "SI")
(and (eq_attr "alternative" "1,2")
(match_operand:HI 1 "aligned_operand"))
(const_string "SI")
(and (eq_attr "alternative" "0")
(ior (not (match_test "TARGET_PARTIAL_REG_STALL"))
(not (match_test "TARGET_HIMODE_MATH"))))
(const_string "SI")
]
(const_string "HI")))])
(define_split
[(set (match_operand 0 "any_fp_register_operand")
(match_operand 1 "memory_operand"))]
"reload_completed
&& (GET_MODE (operands[0]) == TFmode
|| GET_MODE (operands[0]) == XFmode
|| GET_MODE (operands[0]) == DFmode
|| GET_MODE (operands[0]) == SFmode)
&& ix86_standard_x87sse_constant_load_p (insn, operands[0])"
[(set (match_dup 0) (match_dup 2))]
"operands[2] = find_constant_src (curr_insn);")
(define_split
[(set (match_operand 0 "any_fp_register_operand")
(float_extend (match_operand 1 "memory_operand")))]
"reload_completed
&& (GET_MODE (operands[0]) == TFmode
|| GET_MODE (operands[0]) == XFmode
|| GET_MODE (operands[0]) == DFmode)
&& ix86_standard_x87sse_constant_load_p (insn, operands[0])"
[(set (match_dup 0) (match_dup 2))]
"operands[2] = find_constant_src (curr_insn);")
;; Split the load of -0.0 or -1.0 into fldz;fchs or fld1;fchs sequence
(define_split
[(set (match_operand:X87MODEF 0 "fp_register_operand")
(match_operand:X87MODEF 1 "immediate_operand"))]
"reload_completed
&& (standard_80387_constant_p (operands[1]) == 8
|| standard_80387_constant_p (operands[1]) == 9)"
[(set (match_dup 0)(match_dup 1))
(set (match_dup 0)
(neg:X87MODEF (match_dup 0)))]
{
if (real_isnegzero (CONST_DOUBLE_REAL_VALUE (operands[1])))
operands[1] = CONST0_RTX (mode);
else
operands[1] = CONST1_RTX (mode);
})
(define_insn "*swapxf"
[(set (match_operand:XF 0 "register_operand" "+f")
(match_operand:XF 1 "register_operand" "+f"))
(set (match_dup 1)
(match_dup 0))]
"TARGET_80387"
{
if (STACK_TOP_P (operands[0]))
return "fxch\t%1";
else
return "fxch\t%0";
}
[(set_attr "type" "fxch")
(set_attr "mode" "XF")])
;; Zero extension instructions
(define_expand "zero_extendsidi2"
[(set (match_operand:DI 0 "nonimmediate_operand")
(zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))])
(define_insn "*zero_extendsidi2"
[(set (match_operand:DI 0 "nonimmediate_operand"
"=r,?r,?o,r ,o,?*y,?!*y,$r,$v,$x,*x,*v,*r,*k")
(zero_extend:DI
(match_operand:SI 1 "x86_64_zext_operand"
"0 ,rm,r ,rmWz,0,r ,m ,v ,r ,m ,*x,*v,*k,*km")))]
""
{
switch (get_attr_type (insn))
{
case TYPE_IMOVX:
if (ix86_use_lea_for_mov (insn, operands))
return "lea{l}\t{%E1, %k0|%k0, %E1}";
else
return "mov{l}\t{%1, %k0|%k0, %1}";
case TYPE_MULTI:
return "#";
case TYPE_MMXMOV:
return "movd\t{%1, %0|%0, %1}";
case TYPE_SSEMOV:
if (SSE_REG_P (operands[0]) && SSE_REG_P (operands[1]))
{
if (EXT_REX_SSE_REG_P (operands[0])
|| EXT_REX_SSE_REG_P (operands[1]))
return "vpmovzxdq\t{%t1, %g0|%g0, %t1}";
else
return "%vpmovzxdq\t{%1, %0|%0, %1}";
}
if (GENERAL_REG_P (operands[0]))
return "%vmovd\t{%1, %k0|%k0, %1}";
return "%vmovd\t{%1, %0|%0, %1}";
case TYPE_MSKMOV:
return "kmovd\t{%1, %k0|%k0, %1}";
default:
gcc_unreachable ();
}
}
[(set (attr "isa")
(cond [(eq_attr "alternative" "0,1,2")
(const_string "nox64")
(eq_attr "alternative" "3")
(const_string "x64")
(eq_attr "alternative" "7,8,9")
(const_string "sse2")
(eq_attr "alternative" "10")
(const_string "sse4")
(eq_attr "alternative" "11")
(const_string "avx512f")
(eq_attr "alternative" "12")
(const_string "x64_avx512bw")
(eq_attr "alternative" "13")
(const_string "avx512bw")
]
(const_string "*")))
(set (attr "mmx_isa")
(if_then_else (eq_attr "alternative" "5,6")
(const_string "native")
(const_string "*")))
(set (attr "type")
(cond [(eq_attr "alternative" "0,1,2,4")
(const_string "multi")
(eq_attr "alternative" "5,6")
(const_string "mmxmov")
(eq_attr "alternative" "7")
(if_then_else (match_test "TARGET_64BIT")
(const_string "ssemov")
(const_string "multi"))
(eq_attr "alternative" "8,9,10,11")
(const_string "ssemov")
(eq_attr "alternative" "12,13")
(const_string "mskmov")
]
(const_string "imovx")))
(set (attr "prefix_extra")
(if_then_else (eq_attr "alternative" "10,11")
(const_string "1")
(const_string "*")))
(set (attr "prefix")
(if_then_else (eq_attr "type" "ssemov")
(const_string "maybe_vex")
(const_string "orig")))
(set (attr "prefix_0f")
(if_then_else (eq_attr "type" "imovx")
(const_string "0")
(const_string "*")))
(set (attr "mode")
(cond [(eq_attr "alternative" "5,6")
(const_string "DI")
(and (eq_attr "alternative" "7")
(match_test "TARGET_64BIT"))
(const_string "TI")
(eq_attr "alternative" "8,10,11")
(const_string "TI")
]
(const_string "SI")))
(set (attr "preferred_for_speed")
(cond [(eq_attr "alternative" "7")
(symbol_ref "TARGET_INTER_UNIT_MOVES_FROM_VEC")
(eq_attr "alternative" "5,8")
(symbol_ref "TARGET_INTER_UNIT_MOVES_TO_VEC")
]
(symbol_ref "true")))])
(define_split
[(set (match_operand:DI 0 "memory_operand")
(zero_extend:DI (match_operand:SI 1 "memory_operand")))]
"reload_completed"
[(set (match_dup 4) (const_int 0))]
"split_double_mode (DImode, &operands[0], 1, &operands[3], &operands[4]);")
(define_split
[(set (match_operand:DI 0 "general_reg_operand")
(zero_extend:DI (match_operand:SI 1 "general_reg_operand")))]
"!TARGET_64BIT && reload_completed
&& REGNO (operands[0]) == REGNO (operands[1])"
[(set (match_dup 4) (const_int 0))]
"split_double_mode (DImode, &operands[0], 1, &operands[3], &operands[4]);")
(define_split
[(set (match_operand:DI 0 "nonimmediate_gr_operand")
(zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))]
"!TARGET_64BIT && reload_completed
&& !(MEM_P (operands[0]) && MEM_P (operands[1]))"
[(set (match_dup 3) (match_dup 1))
(set (match_dup 4) (const_int 0))]
"split_double_mode (DImode, &operands[0], 1, &operands[3], &operands[4]);")
(define_mode_attr kmov_isa
[(QI "avx512dq") (HI "avx512f") (SI "avx512bw") (DI "avx512bw")])
(define_insn "zero_extenddi2"
[(set (match_operand:DI 0 "register_operand" "=r,*r,*k")
(zero_extend:DI
(match_operand:SWI12 1 "nonimmediate_operand" "m,*k,*km")))]
"TARGET_64BIT"
"@
movz{l|x}\t{%1, %k0|%k0, %1}
kmov\t{%1, %k0|%k0, %1}
kmov\t{%1, %k0|%k0, %1}"
[(set_attr "isa" "*,,")
(set_attr "type" "imovx,mskmov,mskmov")
(set_attr "mode" "SI,,")])
(define_expand "zero_extendsi2"
[(set (match_operand:SI 0 "register_operand")
(zero_extend:SI (match_operand:SWI12 1 "nonimmediate_operand")))]
""
{
if (TARGET_ZERO_EXTEND_WITH_AND && optimize_function_for_speed_p (cfun))
{
operands[1] = force_reg (mode, operands[1]);
emit_insn (gen_zero_extendsi2_and (operands[0], operands[1]));
DONE;
}
})
(define_insn_and_split "zero_extendsi2_and"
[(set (match_operand:SI 0 "register_operand" "=r,?&")
(zero_extend:SI
(match_operand:SWI12 1 "nonimmediate_operand" "0,m")))
(clobber (reg:CC FLAGS_REG))]
"TARGET_ZERO_EXTEND_WITH_AND && optimize_function_for_speed_p (cfun)"
"#"
"&& reload_completed"
[(parallel [(set (match_dup 0) (and:SI (match_dup 0) (match_dup 2)))
(clobber (reg:CC FLAGS_REG))])]
{
if (!REG_P (operands[1])
|| REGNO (operands[0]) != REGNO (operands[1]))
{
ix86_expand_clear (operands[0]);
gcc_assert (!TARGET_PARTIAL_REG_STALL);
emit_insn (gen_rtx_SET
(gen_rtx_STRICT_LOW_PART
(VOIDmode, gen_lowpart (mode, operands[0])),
operands[1]));
DONE;
}
operands[2] = GEN_INT (GET_MODE_MASK (mode));
}
[(set_attr "type" "alu1")
(set_attr "mode" "SI")])
(define_insn "*zero_extendsi2"
[(set (match_operand:SI 0 "register_operand" "=r,*r,*k")
(zero_extend:SI
(match_operand:SWI12 1 "nonimmediate_operand" "m,*k,*km")))]
"!(TARGET_ZERO_EXTEND_WITH_AND && optimize_function_for_speed_p (cfun))"
"@
movz{l|x}\t{%1, %0|%0, %1}
kmov\t{%1, %0|%0, %1}
kmov\t{%1, %0|%0, %1}"
[(set_attr "isa" "*,,")
(set_attr "type" "imovx,mskmov,mskmov")
(set_attr "mode" "SI,,")])
(define_expand "zero_extendqihi2"
[(set (match_operand:HI 0 "register_operand")
(zero_extend:HI (match_operand:QI 1 "nonimmediate_operand")))]
""
{
if (TARGET_ZERO_EXTEND_WITH_AND && optimize_function_for_speed_p (cfun))
{
operands[1] = force_reg (QImode, operands[1]);
emit_insn (gen_zero_extendqihi2_and (operands[0], operands[1]));
DONE;
}
})
(define_insn_and_split "zero_extendqihi2_and"
[(set (match_operand:HI 0 "register_operand" "=r,?&q")
(zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0,qm")))
(clobber (reg:CC FLAGS_REG))]
"TARGET_ZERO_EXTEND_WITH_AND && optimize_function_for_speed_p (cfun)"
"#"
"&& reload_completed"
[(parallel [(set (match_dup 0) (and:SI (match_dup 0) (const_int 255)))
(clobber (reg:CC FLAGS_REG))])]
{
if (!REG_P (operands[1])
|| REGNO (operands[0]) != REGNO (operands[1]))
{
ix86_expand_clear (operands[0]);
gcc_assert (!TARGET_PARTIAL_REG_STALL);
emit_insn (gen_rtx_SET
(gen_rtx_STRICT_LOW_PART
(VOIDmode, gen_lowpart (QImode, operands[0])),
operands[1]));
DONE;
}
operands[0] = gen_lowpart (SImode, operands[0]);
}
[(set_attr "type" "alu1")
(set_attr "mode" "SI")])
; zero extend to SImode to avoid partial register stalls
(define_insn "*zero_extendqihi2"
[(set (match_operand:HI 0 "register_operand" "=r,*r,*k")
(zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "qm,*k,*km")))]
"!(TARGET_ZERO_EXTEND_WITH_AND && optimize_function_for_speed_p (cfun))"
"@
movz{bl|x}\t{%1, %k0|%k0, %1}
kmovb\t{%1, %k0|%k0, %1}
kmovb\t{%1, %0|%0, %1}"
[(set_attr "isa" "*,avx512dq,avx512dq")
(set_attr "type" "imovx,mskmov,mskmov")
(set_attr "mode" "SI,QI,QI")])
;; Transform xorl; mov[bw] (set strict_low_part) into movz[bw]l.
(define_peephole2
[(parallel [(set (match_operand:SWI48 0 "general_reg_operand")
(const_int 0))
(clobber (reg:CC FLAGS_REG))])
(set (strict_low_part (match_operand:SWI12 1 "general_reg_operand"))
(match_operand:SWI12 2 "nonimmediate_operand"))]
"REGNO (operands[0]) == REGNO (operands[1])
&& (mode != SImode
|| !TARGET_ZERO_EXTEND_WITH_AND
|| !optimize_function_for_speed_p (cfun))"
[(set (match_dup 0) (zero_extend:SWI48 (match_dup 2)))])
;; Likewise, but preserving FLAGS_REG.
(define_peephole2
[(set (match_operand:SWI48 0 "general_reg_operand") (const_int 0))
(set (strict_low_part (match_operand:SWI12 1 "general_reg_operand"))
(match_operand:SWI12 2 "nonimmediate_operand"))]
"REGNO (operands[0]) == REGNO (operands[1])
&& (mode != SImode
|| !TARGET_ZERO_EXTEND_WITH_AND
|| !optimize_function_for_speed_p (cfun))"
[(set (match_dup 0) (zero_extend:SWI48 (match_dup 2)))])
;; Sign extension instructions
(define_expand "extendsidi2"
[(set (match_operand:DI 0 "register_operand")
(sign_extend:DI (match_operand:SI 1 "register_operand")))]
""
{
if (!TARGET_64BIT)
{
emit_insn (gen_extendsidi2_1 (operands[0], operands[1]));
DONE;
}
})
(define_insn "*extendsidi2_rex64"
[(set (match_operand:DI 0 "register_operand" "=*a,r")
(sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "*0,rm")))]
"TARGET_64BIT"
"@
{cltq|cdqe}
movs{lq|x}\t{%1, %0|%0, %1}"
[(set_attr "type" "imovx")
(set_attr "mode" "DI")
(set_attr "prefix_0f" "0")
(set_attr "modrm" "0,1")])
(define_insn "extendsidi2_1"
[(set (match_operand:DI 0 "nonimmediate_operand" "=*A,r,?r,?*o")
(sign_extend:DI (match_operand:SI 1 "register_operand" "0,0,r,r")))
(clobber (reg:CC FLAGS_REG))
(clobber (match_scratch:SI 2 "=X,X,X,&r"))]
"!TARGET_64BIT"
"#")
;; Split the memory case. If the source register doesn't die, it will stay
;; this way, if it does die, following peephole2s take care of it.
(define_split
[(set (match_operand:DI 0 "memory_operand")
(sign_extend:DI (match_operand:SI 1 "register_operand")))
(clobber (reg:CC FLAGS_REG))
(clobber (match_operand:SI 2 "register_operand"))]
"reload_completed"
[(const_int 0)]
{
split_double_mode (DImode, &operands[0], 1, &operands[3], &operands[4]);
emit_move_insn (operands[3], operands[1]);
/* Generate a cltd if possible and doing so it profitable. */
if ((optimize_function_for_size_p (cfun) || TARGET_USE_CLTD)
&& REGNO (operands[1]) == AX_REG
&& REGNO (operands[2]) == DX_REG)
{
emit_insn (gen_ashrsi3_cvt (operands[2], operands[1], GEN_INT (31)));
}
else
{
emit_move_insn (operands[2], operands[1]);
emit_insn (gen_ashrsi3_cvt (operands[2], operands[2], GEN_INT (31)));
}
emit_move_insn (operands[4], operands[2]);
DONE;
})
;; Peepholes for the case where the source register does die, after
;; being split with the above splitter.
(define_peephole2
[(set (match_operand:SI 0 "memory_operand")
(match_operand:SI 1 "general_reg_operand"))
(set (match_operand:SI 2 "general_reg_operand") (match_dup 1))
(parallel [(set (match_dup 2)
(ashiftrt:SI (match_dup 2) (const_int 31)))
(clobber (reg:CC FLAGS_REG))])
(set (match_operand:SI 3 "memory_operand") (match_dup 2))]
"REGNO (operands[1]) != REGNO (operands[2])
&& peep2_reg_dead_p (2, operands[1])
&& peep2_reg_dead_p (4, operands[2])
&& !reg_mentioned_p (operands[2], operands[3])"
[(set (match_dup 0) (match_dup 1))
(parallel [(set (match_dup 1) (ashiftrt:SI (match_dup 1) (const_int 31)))
(clobber (reg:CC FLAGS_REG))])
(set (match_dup 3) (match_dup 1))])
(define_peephole2
[(set (match_operand:SI 0 "memory_operand")
(match_operand:SI 1 "general_reg_operand"))
(parallel [(set (match_operand:SI 2 "general_reg_operand")
(ashiftrt:SI (match_dup 1) (const_int 31)))
(clobber (reg:CC FLAGS_REG))])
(set (match_operand:SI 3 "memory_operand") (match_dup 2))]
"/* cltd is shorter than sarl $31, %eax */
!optimize_function_for_size_p (cfun)
&& REGNO (operands[1]) == AX_REG
&& REGNO (operands[2]) == DX_REG
&& peep2_reg_dead_p (2, operands[1])
&& peep2_reg_dead_p (3, operands[2])
&& !reg_mentioned_p (operands[2], operands[3])"
[(set (match_dup 0) (match_dup 1))
(parallel [(set (match_dup 1) (ashiftrt:SI (match_dup 1) (const_int 31)))
(clobber (reg:CC FLAGS_REG))])
(set (match_dup 3) (match_dup 1))])
;; Extend to register case. Optimize case where source and destination
;; registers match and cases where we can use cltd.
(define_split
[(set (match_operand:DI 0 "register_operand")
(sign_extend:DI (match_operand:SI 1 "register_operand")))
(clobber (reg:CC FLAGS_REG))
(clobber (match_scratch:SI 2))]
"reload_completed"
[(const_int 0)]
{
split_double_mode (DImode, &operands[0], 1, &operands[3], &operands[4]);
if (REGNO (operands[3]) != REGNO (operands[1]))
emit_move_insn (operands[3], operands[1]);
/* Generate a cltd if possible and doing so it profitable. */
if ((optimize_function_for_size_p (cfun) || TARGET_USE_CLTD)
&& REGNO (operands[3]) == AX_REG
&& REGNO (operands[4]) == DX_REG)
{
emit_insn (gen_ashrsi3_cvt (operands[4], operands[3], GEN_INT (31)));
DONE;
}
if (REGNO (operands[4]) != REGNO (operands[1]))
emit_move_insn (operands[4], operands[1]);
emit_insn (gen_ashrsi3_cvt (operands[4], operands[4], GEN_INT (31)));
DONE;
})
(define_insn "extenddi2"
[(set (match_operand:DI 0 "register_operand" "=r")
(sign_extend:DI
(match_operand:SWI12 1 "nonimmediate_operand" "m")))]
"TARGET_64BIT"
"movs{q|x}\t{%1, %0|%0, %1}"
[(set_attr "type" "imovx")
(set_attr "mode" "DI")])
(define_insn "extendhisi2"
[(set (match_operand:SI 0 "register_operand" "=*a,r")
(sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "*0,rm")))]
""
{
switch (get_attr_prefix_0f (insn))
{
case 0:
return "{cwtl|cwde}";
default:
return "movs{wl|x}\t{%1, %0|%0, %1}";
}
}
[(set_attr "type" "imovx")
(set_attr "mode" "SI")
(set (attr "prefix_0f")
;; movsx is short decodable while cwtl is vector decoded.
(if_then_else (and (eq_attr "cpu" "!k6")
(eq_attr "alternative" "0"))
(const_string "0")
(const_string "1")))
(set (attr "znver1_decode")
(if_then_else (eq_attr "prefix_0f" "0")
(const_string "double")
(const_string "direct")))
(set (attr "modrm")
(if_then_else (eq_attr "prefix_0f" "0")
(const_string "0")
(const_string "1")))])
(define_insn "*extendhisi2_zext"
[(set (match_operand:DI 0 "register_operand" "=*a,r")
(zero_extend:DI
(sign_extend:SI
(match_operand:HI 1 "nonimmediate_operand" "*0,rm"))))]
"TARGET_64BIT"
{
switch (get_attr_prefix_0f (insn))
{
case 0:
return "{cwtl|cwde}";
default:
return "movs{wl|x}\t{%1, %k0|%k0, %1}";
}
}
[(set_attr "type" "imovx")
(set_attr "mode" "SI")
(set (attr "prefix_0f")
;; movsx is short decodable while cwtl is vector decoded.
(if_then_else (and (eq_attr "cpu" "!k6")
(eq_attr "alternative" "0"))
(const_string "0")
(const_string "1")))
(set (attr "modrm")
(if_then_else (eq_attr "prefix_0f" "0")
(const_string "0")
(const_string "1")))])
(define_insn "extendqisi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "qm")))]
""
"movs{bl|x}\t{%1, %0|%0, %1}"
[(set_attr "type" "imovx")
(set_attr "mode" "SI")])
(define_insn "*extendqisi2_zext"
[(set (match_operand:DI 0 "register_operand" "=r")
(zero_extend:DI
(sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "qm"))))]
"TARGET_64BIT"
"movs{bl|x}\t{%1, %k0|%k0, %1}"
[(set_attr "type" "imovx")
(set_attr "mode" "SI")])
(define_insn "extendqihi2"
[(set (match_operand:HI 0 "register_operand" "=*a,r")
(sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "*0,qm")))]
""
{
switch (get_attr_prefix_0f (insn))
{
case 0:
return "{cbtw|cbw}";
default:
return "movs{bw|x}\t{%1, %0|%0, %1}";
}
}
[(set_attr "type" "imovx")
(set_attr "mode" "HI")
(set (attr "prefix_0f")
;; movsx is short decodable while cwtl is vector decoded.
(if_then_else (and (eq_attr "cpu" "!k6")
(eq_attr "alternative" "0"))
(const_string "0")
(const_string "1")))
(set (attr "modrm")
(if_then_else (eq_attr "prefix_0f" "0")
(const_string "0")
(const_string "1")))])
;; Conversions between float and double.
;; These are all no-ops in the model used for the 80387.
;; So just emit moves.
;; %%% Kill these when call knows how to work out a DFmode push earlier.
(define_split
[(set (match_operand:DF 0 "push_operand")
(float_extend:DF (match_operand:SF 1 "fp_register_operand")))]
"reload_completed"
[(set (reg:P SP_REG) (plus:P (reg:P SP_REG) (const_int -8)))
(set (mem:DF (reg:P SP_REG)) (float_extend:DF (match_dup 1)))])
(define_split
[(set (match_operand:XF 0 "push_operand")
(float_extend:XF (match_operand:MODEF 1 "fp_register_operand")))]
"reload_completed"
[(set (reg:P SP_REG) (plus:P (reg:P SP_REG) (match_dup 2)))
(set (mem:XF (reg:P SP_REG)) (float_extend:XF (match_dup 1)))]
"operands[2] = GEN_INT (-GET_MODE_SIZE (XFmode));")
(define_expand "extendsfdf2"
[(set (match_operand:DF 0 "nonimm_ssenomem_operand")
(float_extend:DF (match_operand:SF 1 "general_operand")))]
"TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
{
/* ??? Needed for compress_float_constant since all fp constants
are TARGET_LEGITIMATE_CONSTANT_P. */
if (CONST_DOUBLE_P (operands[1]))
{
if ((!TARGET_SSE2 || TARGET_MIX_SSE_I387)
&& standard_80387_constant_p (operands[1]) > 0)
{
operands[1] = simplify_const_unary_operation
(FLOAT_EXTEND, DFmode, operands[1], SFmode);
emit_move_insn_1 (operands[0], operands[1]);
DONE;
}
operands[1] = validize_mem (force_const_mem (SFmode, operands[1]));
}
})
(define_insn "*extendsfdf2"
[(set (match_operand:DF 0 "nonimm_ssenomem_operand" "=f,m,v,v")
(float_extend:DF
(match_operand:SF 1 "nonimmediate_operand" "fm,f,v,m")))]
"TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
{
switch (which_alternative)
{
case 0:
case 1:
return output_387_reg_move (insn, operands);
case 2:
return "%vcvtss2sd\t{%d1, %0|%0, %d1}";
case 3:
return "%vcvtss2sd\t{%1, %d0|%d0, %1}";
default:
gcc_unreachable ();
}
}
[(set_attr "type" "fmov,fmov,ssecvt,ssecvt")
(set_attr "avx_partial_xmm_update" "false,false,false,true")
(set_attr "prefix" "orig,orig,maybe_vex,maybe_vex")
(set_attr "mode" "SF,XF,DF,DF")
(set (attr "enabled")
(if_then_else
(match_test ("TARGET_SSE2 && TARGET_SSE_MATH"))
(if_then_else
(eq_attr "alternative" "0,1")
(symbol_ref "TARGET_MIX_SSE_I387")
(symbol_ref "true"))
(if_then_else
(eq_attr "alternative" "0,1")
(symbol_ref "true")
(symbol_ref "false"))))])
/* For converting SF(xmm2) to DF(xmm1), use the following code instead of
cvtss2sd:
unpcklps xmm2,xmm2 ; packed conversion might crash on signaling NaNs
cvtps2pd xmm2,xmm1
We do the conversion post reload to avoid producing of 128bit spills
that might lead to ICE on 32bit target. The sequence unlikely combine
anyway. */
(define_split
[(set (match_operand:DF 0 "sse_reg_operand")
(float_extend:DF
(match_operand:SF 1 "nonimmediate_operand")))]
"TARGET_USE_VECTOR_FP_CONVERTS
&& optimize_insn_for_speed_p ()
&& reload_completed
&& (!EXT_REX_SSE_REG_P (operands[0])
|| TARGET_AVX512VL)"
[(set (match_dup 2)
(float_extend:V2DF
(vec_select:V2SF
(match_dup 3)
(parallel [(const_int 0) (const_int 1)]))))]
{
operands[2] = lowpart_subreg (V2DFmode, operands[0], DFmode);
operands[3] = lowpart_subreg (V4SFmode, operands[0], DFmode);
/* Use movss for loading from memory, unpcklps reg, reg for registers.
Try to avoid move when unpacking can be done in source. */
if (REG_P (operands[1]))
{
/* If it is unsafe to overwrite upper half of source, we need
to move to destination and unpack there. */
if (REGNO (operands[0]) != REGNO (operands[1])
|| (EXT_REX_SSE_REG_P (operands[1])
&& !TARGET_AVX512VL))
{
rtx tmp = lowpart_subreg (SFmode, operands[0], DFmode);
emit_move_insn (tmp, operands[1]);
}
else
operands[3] = lowpart_subreg (V4SFmode, operands[1], SFmode);
/* FIXME: vec_interleave_lowv4sf for AVX512VL should allow
=v, v, then vbroadcastss will be only needed for AVX512F without
AVX512VL. */
if (!EXT_REX_SSE_REGNO_P (REGNO (operands[3])))
emit_insn (gen_vec_interleave_lowv4sf (operands[3], operands[3],
operands[3]));
else
{
rtx tmp = lowpart_subreg (V16SFmode, operands[3], V4SFmode);
emit_insn (gen_avx512f_vec_dupv16sf_1 (tmp, tmp));
}
}
else
emit_insn (gen_vec_setv4sf_0 (operands[3],
CONST0_RTX (V4SFmode), operands[1]));
})
;; It's more profitable to split and then extend in the same register.
(define_peephole2
[(set (match_operand:DF 0 "sse_reg_operand")
(float_extend:DF
(match_operand:SF 1 "memory_operand")))]
"TARGET_SPLIT_MEM_OPND_FOR_FP_CONVERTS
&& optimize_insn_for_speed_p ()"
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0) (float_extend:DF (match_dup 2)))]
"operands[2] = lowpart_subreg (SFmode, operands[0], DFmode);")
;; Break partial SSE register dependency stall. This splitter should split
;; late in the pass sequence (after register rename pass), so allocated
;; registers won't change anymore
(define_split
[(set (match_operand:DF 0 "sse_reg_operand")
(float_extend:DF
(match_operand:SF 1 "nonimmediate_operand")))]
"!TARGET_AVX
&& TARGET_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY
&& epilogue_completed
&& optimize_function_for_speed_p (cfun)
&& (!REG_P (operands[1])
|| (!TARGET_AVX && REGNO (operands[0]) != REGNO (operands[1])))
&& (!EXT_REX_SSE_REG_P (operands[0])
|| TARGET_AVX512VL)"
[(set (match_dup 0)
(vec_merge:V2DF
(vec_duplicate:V2DF
(float_extend:DF
(match_dup 1)))
(match_dup 0)
(const_int 1)))]
{
operands[0] = lowpart_subreg (V2DFmode, operands[0], DFmode);
emit_move_insn (operands[0], CONST0_RTX (V2DFmode));
})
(define_expand "extendhfsf2"
[(set (match_operand:SF 0 "register_operand")
(float_extend:SF
(match_operand:HF 1 "nonimmediate_operand")))]
"TARGET_AVX512FP16 || TARGET_F16C || TARGET_AVX512VL"
{
if (!TARGET_AVX512FP16)
{
rtx res = gen_reg_rtx (V4SFmode);
rtx tmp = gen_reg_rtx (V8HFmode);
rtx zero = force_reg (V8HFmode, CONST0_RTX (V8HFmode));
emit_insn (gen_vec_setv8hf_0 (tmp, zero, operands[1]));
emit_insn (gen_vcvtph2ps (res, gen_lowpart (V8HImode, tmp)));
emit_move_insn (operands[0], gen_lowpart (SFmode, res));
DONE;
}
})
(define_expand "extendhfdf2"
[(set (match_operand:DF 0 "register_operand")
(float_extend:DF
(match_operand:HF 1 "nonimmediate_operand")))]
"TARGET_AVX512FP16")
(define_insn "*extendhf2"
[(set (match_operand:MODEF 0 "register_operand" "=v")
(float_extend:MODEF
(match_operand:HF 1 "nonimmediate_operand" "vm")))]
"TARGET_AVX512FP16"
"vcvtsh2\t{%1, %0, %0|%0, %0, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(set_attr "mode" "")])
(define_expand "extendxf2"
[(set (match_operand:XF 0 "nonimmediate_operand")
(float_extend:XF (match_operand:MODEF 1 "general_operand")))]
"TARGET_80387"
{
/* ??? Needed for compress_float_constant since all fp constants
are TARGET_LEGITIMATE_CONSTANT_P. */
if (CONST_DOUBLE_P (operands[1]))
{
if (standard_80387_constant_p (operands[1]) > 0)
{
operands[1] = simplify_const_unary_operation
(FLOAT_EXTEND, XFmode, operands[1], mode);
emit_move_insn_1 (operands[0], operands[1]);
DONE;
}
operands[1] = validize_mem (force_const_mem (mode, operands[1]));
}
})
(define_insn "*extendxf2_i387"
[(set (match_operand:XF 0 "nonimmediate_operand" "=f,m")
(float_extend:XF
(match_operand:MODEF 1 "nonimmediate_operand" "fm,f")))]
"TARGET_80387"
"* return output_387_reg_move (insn, operands);"
[(set_attr "type" "fmov")
(set_attr "mode" ",XF")])
;; %%% This seems like bad news.
;; This cannot output into an f-reg because there is no way to be sure
;; of truncating in that case. Otherwise this is just like a simple move
;; insn. So we pretend we can output to a reg in order to get better
;; register preferencing, but we really use a stack slot.
;; Conversion from DFmode to SFmode.
(define_insn "truncdfsf2"
[(set (match_operand:SF 0 "nonimm_ssenomem_operand" "=m,f,v,v")
(float_truncate:SF
(match_operand:DF 1 "register_ssemem_operand" "f,f,v,m")))]
"TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
{
switch (which_alternative)
{
case 0:
case 1:
return output_387_reg_move (insn, operands);
case 2:
return "%vcvtsd2ss\t{%d1, %0|%0, %d1}";
case 3:
return "%vcvtsd2ss\t{%1, %d0|%d0, %1}";
default:
gcc_unreachable ();
}
}
[(set_attr "type" "fmov,fmov,ssecvt,ssecvt")
(set_attr "avx_partial_xmm_update" "false,false,false,true")
(set_attr "mode" "SF")
(set (attr "enabled")
(if_then_else
(match_test ("TARGET_SSE2 && TARGET_SSE_MATH"))
(cond [(eq_attr "alternative" "0")
(symbol_ref "TARGET_MIX_SSE_I387")
(eq_attr "alternative" "1")
(symbol_ref "TARGET_MIX_SSE_I387
&& flag_unsafe_math_optimizations")
]
(symbol_ref "true"))
(cond [(eq_attr "alternative" "0")
(symbol_ref "true")
(eq_attr "alternative" "1")
(symbol_ref "flag_unsafe_math_optimizations")
]
(symbol_ref "false"))))])
/* For converting DF(xmm2) to SF(xmm1), use the following code instead of
cvtsd2ss:
unpcklpd xmm2,xmm2 ; packed conversion might crash on signaling NaNs
cvtpd2ps xmm2,xmm1
We do the conversion post reload to avoid producing of 128bit spills
that might lead to ICE on 32bit target. The sequence unlikely combine
anyway. */
(define_split
[(set (match_operand:SF 0 "sse_reg_operand")
(float_truncate:SF
(match_operand:DF 1 "nonimmediate_operand")))]
"TARGET_USE_VECTOR_FP_CONVERTS
&& optimize_insn_for_speed_p ()
&& reload_completed
&& (!EXT_REX_SSE_REG_P (operands[0])
|| TARGET_AVX512VL)"
[(set (match_dup 2)
(vec_concat:V4SF
(float_truncate:V2SF
(match_dup 4))
(match_dup 3)))]
{
operands[2] = lowpart_subreg (V4SFmode, operands[0], SFmode);
operands[3] = CONST0_RTX (V2SFmode);
operands[4] = lowpart_subreg (V2DFmode, operands[0], SFmode);
/* Use movsd for loading from memory, unpcklpd for registers.
Try to avoid move when unpacking can be done in source, or SSE3
movddup is available. */
if (REG_P (operands[1]))
{
if ((!TARGET_SSE3 && REGNO (operands[0]) != REGNO (operands[1]))
|| (EXT_REX_SSE_REG_P (operands[1]) && !TARGET_AVX512VL))
{
rtx tmp = lowpart_subreg (DFmode, operands[0], SFmode);
emit_move_insn (tmp, operands[1]);
operands[1] = tmp;
}
else if (!TARGET_SSE3)
operands[4] = lowpart_subreg (V2DFmode, operands[1], DFmode);
emit_insn (gen_vec_dupv2df (operands[4], operands[1]));
}
else
emit_insn (gen_vec_concatv2df (operands[4], operands[1],
CONST0_RTX (DFmode)));
})
;; It's more profitable to split and then truncate in the same register.
(define_peephole2
[(set (match_operand:SF 0 "sse_reg_operand")
(float_truncate:SF
(match_operand:DF 1 "memory_operand")))]
"TARGET_SPLIT_MEM_OPND_FOR_FP_CONVERTS
&& optimize_insn_for_speed_p ()"
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0) (float_truncate:SF (match_dup 2)))]
"operands[2] = lowpart_subreg (DFmode, operands[0], SFmode);")
;; Break partial SSE register dependency stall. This splitter should split
;; late in the pass sequence (after register rename pass), so allocated
;; registers won't change anymore
(define_split
[(set (match_operand:SF 0 "sse_reg_operand")
(float_truncate:SF
(match_operand:DF 1 "nonimmediate_operand")))]
"!TARGET_AVX
&& TARGET_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY
&& epilogue_completed
&& optimize_function_for_speed_p (cfun)
&& (!REG_P (operands[1])
|| (!TARGET_AVX && REGNO (operands[0]) != REGNO (operands[1])))
&& (!EXT_REX_SSE_REG_P (operands[0])
|| TARGET_AVX512VL)"
[(set (match_dup 0)
(vec_merge:V4SF
(vec_duplicate:V4SF
(float_truncate:SF
(match_dup 1)))
(match_dup 0)
(const_int 1)))]
{
operands[0] = lowpart_subreg (V4SFmode, operands[0], SFmode);
emit_move_insn (operands[0], CONST0_RTX (V4SFmode));
})
;; Conversion from XFmode to {SF,DF}mode
(define_insn "truncxf2"
[(set (match_operand:MODEF 0 "nonimmediate_operand" "=m,f")
(float_truncate:MODEF
(match_operand:XF 1 "register_operand" "f,f")))]
"TARGET_80387"
"* return output_387_reg_move (insn, operands);"
[(set_attr "type" "fmov")
(set_attr "mode" "")
(set (attr "enabled")
(cond [(eq_attr "alternative" "1")
(symbol_ref "flag_unsafe_math_optimizations")
]
(symbol_ref "true")))])
;; Conversion from {SF,DF}mode to HFmode.
(define_expand "truncsfhf2"
[(set (match_operand:HF 0 "register_operand")
(float_truncate:HF
(match_operand:SF 1 "nonimmediate_operand")))]
"TARGET_AVX512FP16 || TARGET_F16C || TARGET_AVX512VL"
{
if (!TARGET_AVX512FP16)
{
rtx res = gen_reg_rtx (V8HFmode);
rtx tmp = gen_reg_rtx (V4SFmode);
rtx zero = force_reg (V4SFmode, CONST0_RTX (V4SFmode));
emit_insn (gen_vec_setv4sf_0 (tmp, zero, operands[1]));
emit_insn (gen_vcvtps2ph (gen_lowpart (V8HImode, res), tmp, GEN_INT (4)));
emit_move_insn (operands[0], gen_lowpart (HFmode, res));
DONE;
}
})
(define_expand "truncdfhf2"
[(set (match_operand:HF 0 "register_operand")
(float_truncate:HF
(match_operand:DF 1 "nonimmediate_operand")))]
"TARGET_AVX512FP16")
(define_insn "*trunchf2"
[(set (match_operand:HF 0 "register_operand" "=v")
(float_truncate:HF
(match_operand:MODEF 1 "nonimmediate_operand" "vm")))]
"TARGET_AVX512FP16"
"vcvt2sh\t{%1, %d0|%d0, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(set_attr "mode" "HF")])
;; Signed conversion to DImode.
(define_expand "fix_truncxfdi2"
[(parallel [(set (match_operand:DI 0 "nonimmediate_operand")
(fix:DI (match_operand:XF 1 "register_operand")))
(clobber (reg:CC FLAGS_REG))])]
"TARGET_80387"
{
if (TARGET_FISTTP)
{
emit_insn (gen_fix_truncdi_i387_fisttp (operands[0], operands[1]));
DONE;
}
})
(define_expand "fix_truncdi2"
[(parallel [(set (match_operand:DI 0 "nonimmediate_operand")
(fix:DI (match_operand:MODEF 1 "register_operand")))
(clobber (reg:CC FLAGS_REG))])]
"TARGET_80387 || (TARGET_64BIT && SSE_FLOAT_MODE_P (mode))"
{
if (TARGET_FISTTP
&& !(TARGET_64BIT && SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
{
emit_insn (gen_fix_truncdi_i387_fisttp (operands[0], operands[1]));
DONE;
}
if (TARGET_64BIT && SSE_FLOAT_MODE_P (mode))
{
rtx out = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (DImode);
emit_insn (gen_fix_truncdi_sse (out, operands[1]));
if (out != operands[0])
emit_move_insn (operands[0], out);
DONE;
}
})
(define_insn "fix_trunchf2"
[(set (match_operand:SWI48 0 "register_operand" "=r")
(any_fix:SWI48
(match_operand:HF 1 "nonimmediate_operand" "vm")))]
"TARGET_AVX512FP16"
"vcvttsh2si\t{%1, %0|%0, %1}"
[(set_attr "type" "sseicvt")
(set_attr "prefix" "evex")
(set_attr "mode" "")])
;; Signed conversion to SImode.
(define_expand "fix_truncxfsi2"
[(parallel [(set (match_operand:SI 0 "nonimmediate_operand")
(fix:SI (match_operand:XF 1 "register_operand")))
(clobber (reg:CC FLAGS_REG))])]
"TARGET_80387"
{
if (TARGET_FISTTP)
{
emit_insn (gen_fix_truncsi_i387_fisttp (operands[0], operands[1]));
DONE;
}
})
(define_expand "fix_truncsi2"
[(parallel [(set (match_operand:SI 0 "nonimmediate_operand")
(fix:SI (match_operand:MODEF 1 "register_operand")))
(clobber (reg:CC FLAGS_REG))])]
"TARGET_80387 || SSE_FLOAT_MODE_P (mode)"
{
if (TARGET_FISTTP
&& !(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
{
emit_insn (gen_fix_truncsi_i387_fisttp (operands[0], operands[1]));
DONE;
}
if (SSE_FLOAT_MODE_P (mode))
{
rtx out = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (SImode);
emit_insn (gen_fix_truncsi_sse (out, operands[1]));
if (out != operands[0])
emit_move_insn (operands[0], out);
DONE;
}
})
;; Signed conversion to HImode.
(define_expand "fix_trunchi2"
[(parallel [(set (match_operand:HI 0 "nonimmediate_operand")
(fix:HI (match_operand:X87MODEF 1 "register_operand")))
(clobber (reg:CC FLAGS_REG))])]
"TARGET_80387
&& !(SSE_FLOAT_MODE_P (mode) && (!TARGET_FISTTP || TARGET_SSE_MATH))"
{
if (TARGET_FISTTP)
{
emit_insn (gen_fix_trunchi_i387_fisttp (operands[0], operands[1]));
DONE;
}
})
;; Unsigned conversion to DImode
(define_insn "fixuns_truncdi2"
[(set (match_operand:DI 0 "register_operand" "=r")
(unsigned_fix:DI
(match_operand:MODEF 1 "nonimmediate_operand" "vm")))]
"TARGET_64BIT && TARGET_AVX512F && TARGET_SSE_MATH"
"vcvtt2usi\t{%1, %0|%0, %1}"
[(set_attr "type" "sseicvt")
(set_attr "prefix" "evex")
(set_attr "mode" "DI")])
;; Unsigned conversion to SImode.
(define_expand "fixuns_truncsi2"
[(parallel
[(set (match_operand:SI 0 "register_operand")
(unsigned_fix:SI
(match_operand:MODEF 1 "nonimmediate_operand")))
(use (match_dup 2))
(clobber (scratch:))
(clobber (scratch:))])]
"(!TARGET_64BIT || TARGET_AVX512F) && TARGET_SSE2 && TARGET_SSE_MATH"
{
machine_mode mode = mode;
machine_mode vecmode = mode;
REAL_VALUE_TYPE TWO31r;
rtx two31;
if (TARGET_AVX512F)
{
emit_insn (gen_fixuns_truncsi2_avx512f (operands[0], operands[1]));
DONE;
}
if (optimize_insn_for_size_p ())
FAIL;
real_ldexp (&TWO31r, &dconst1, 31);
two31 = const_double_from_real_value (TWO31r, mode);
two31 = ix86_build_const_vector (vecmode, true, two31);
operands[2] = force_reg (vecmode, two31);
})
(define_insn "fixuns_truncsi2_avx512f"
[(set (match_operand:SI 0 "register_operand" "=r")
(unsigned_fix:SI
(match_operand:MODEF 1 "nonimmediate_operand" "vm")))]
"TARGET_AVX512F && TARGET_SSE_MATH"
"vcvtt2usi\t{%1, %0|%0, %1}"
[(set_attr "type" "sseicvt")
(set_attr "prefix" "evex")
(set_attr "mode" "SI")])
(define_insn "*fixuns_trunchfsi2zext"
[(set (match_operand:DI 0 "register_operand" "=r")
(zero_extend:DI
(unsigned_fix:SI
(match_operand:HF 1 "nonimmediate_operand" "vm"))))]
"TARGET_64BIT && TARGET_AVX512FP16"
"vcvttsh2usi\t{%1, %k0|%k0, %1}"
[(set_attr "type" "sseicvt")
(set_attr "prefix" "evex")
(set_attr "mode" "SI")])
(define_insn "*fixuns_truncsi2_avx512f_zext"
[(set (match_operand:DI 0 "register_operand" "=r")
(zero_extend:DI
(unsigned_fix:SI
(match_operand:MODEF 1 "nonimmediate_operand" "vm"))))]
"TARGET_64BIT && TARGET_AVX512F && TARGET_SSE_MATH"
"vcvtt2usi\t{%1, %k0|%k0, %1}"
[(set_attr "type" "sseicvt")
(set_attr "prefix" "evex")
(set_attr "mode" "SI")])
(define_insn_and_split "*fixuns_trunc_1"
[(set (match_operand:SI 0 "register_operand" "=&x,&x")
(unsigned_fix:SI
(match_operand:MODEF 3 "nonimmediate_operand" "xm,xm")))
(use (match_operand: 4 "nonimmediate_operand" "m,x"))
(clobber (match_scratch: 1 "=x,&x"))
(clobber (match_scratch: 2 "=x,x"))]
"!TARGET_64BIT && TARGET_SSE2 && TARGET_SSE_MATH
&& optimize_function_for_speed_p (cfun)"
"#"
"&& reload_completed"
[(const_int 0)]
{
ix86_split_convert_uns_si_sse (operands);
DONE;
})
;; Unsigned conversion to HImode.
;; Without these patterns, we'll try the unsigned SI conversion which
;; is complex for SSE, rather than the signed SI conversion, which isn't.
(define_expand "fixuns_trunchfhi2"
[(set (match_dup 2)
(fix:SI (match_operand:HF 1 "nonimmediate_operand")))
(set (match_operand:HI 0 "nonimmediate_operand")
(subreg:HI (match_dup 2) 0))]
"TARGET_AVX512FP16"
"operands[2] = gen_reg_rtx (SImode);")
(define_expand "fixuns_trunchi2"
[(set (match_dup 2)
(fix:SI (match_operand:MODEF 1 "nonimmediate_operand")))
(set (match_operand:HI 0 "nonimmediate_operand")
(subreg:HI (match_dup 2) 0))]
"SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH"
"operands[2] = gen_reg_rtx (SImode);")
;; When SSE is available, it is always faster to use it!
(define_insn "fix_trunc_sse"
[(set (match_operand:SWI48 0 "register_operand" "=r,r")
(fix:SWI48 (match_operand:MODEF 1 "nonimmediate_operand" "v,m")))]
"SSE_FLOAT_MODE_P (mode)
&& (!TARGET_FISTTP || TARGET_SSE_MATH)"
"%vcvtt2si\t{%1, %0|%0, %1}"
[(set_attr "type" "sseicvt")
(set_attr "prefix" "maybe_vex")
(set (attr "prefix_rex")
(if_then_else
(match_test "mode == DImode")
(const_string "1")
(const_string "*")))
(set_attr "mode" "")
(set_attr "athlon_decode" "double,vector")
(set_attr "amdfam10_decode" "double,double")
(set_attr "bdver1_decode" "double,double")])
;; Avoid vector decoded forms of the instruction.
(define_peephole2
[(match_scratch:MODEF 2 "x")
(set (match_operand:SWI48 0 "register_operand")
(fix:SWI48 (match_operand:MODEF 1 "memory_operand")))]
"TARGET_AVOID_VECTOR_DECODE
&& SSE_FLOAT_MODE_P (mode)
&& optimize_insn_for_speed_p ()"
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0) (fix:SWI48 (match_dup 2)))])
(define_insn "fix_trunc_i387_fisttp"
[(set (match_operand:SWI248x 0 "nonimmediate_operand" "=m")
(fix:SWI248x (match_operand 1 "register_operand" "f")))
(clobber (match_scratch:XF 2 "=&f"))]
"X87_FLOAT_MODE_P (GET_MODE (operands[1]))
&& TARGET_FISTTP
&& !((SSE_FLOAT_MODE_P (GET_MODE (operands[1]))
&& (TARGET_64BIT || mode != DImode))
&& TARGET_SSE_MATH)"
"* return output_fix_trunc (insn, operands, true);"
[(set_attr "type" "fisttp")
(set_attr "mode" "")])
;; See the comments in i386.h near OPTIMIZE_MODE_SWITCHING for the description
;; of the machinery. Please note the clobber of FLAGS_REG. In i387 control
;; word calculation (inserted by LCM in mode switching pass) a FLAGS_REG
;; clobbering insns can be used. Look at emit_i387_cw_initialization ()
;; function in i386.cc.
(define_insn_and_split "*fix_trunc_i387_1"
[(set (match_operand:SWI248x 0 "nonimmediate_operand")
(fix:SWI248x (match_operand 1 "register_operand")))
(clobber (reg:CC FLAGS_REG))]
"X87_FLOAT_MODE_P (GET_MODE (operands[1]))
&& !TARGET_FISTTP
&& !(SSE_FLOAT_MODE_P (GET_MODE (operands[1]))
&& (TARGET_64BIT || mode != DImode))
&& ix86_pre_reload_split ()"
"#"
"&& 1"
[(const_int 0)]
{
ix86_optimize_mode_switching[I387_TRUNC] = 1;
operands[2] = assign_386_stack_local (HImode, SLOT_CW_STORED);
operands[3] = assign_386_stack_local (HImode, SLOT_CW_TRUNC);
emit_insn (gen_fix_trunc_i387 (operands[0], operands[1],
operands[2], operands[3]));
DONE;
}
[(set_attr "type" "fistp")
(set_attr "i387_cw" "trunc")
(set_attr "mode" "")])
(define_insn "fix_truncdi_i387"
[(set (match_operand:DI 0 "nonimmediate_operand" "=m")
(fix:DI (match_operand 1 "register_operand" "f")))
(use (match_operand:HI 2 "memory_operand" "m"))
(use (match_operand:HI 3 "memory_operand" "m"))
(clobber (match_scratch:XF 4 "=&f"))]
"X87_FLOAT_MODE_P (GET_MODE (operands[1]))
&& !TARGET_FISTTP
&& !(TARGET_64BIT && SSE_FLOAT_MODE_P (GET_MODE (operands[1])))"
"* return output_fix_trunc (insn, operands, false);"
[(set_attr "type" "fistp")
(set_attr "i387_cw" "trunc")
(set_attr "mode" "DI")])
(define_insn "fix_trunc_i387"
[(set (match_operand:SWI24 0 "nonimmediate_operand" "=m")
(fix:SWI24 (match_operand 1 "register_operand" "f")))
(use (match_operand:HI 2 "memory_operand" "m"))
(use (match_operand:HI 3 "memory_operand" "m"))]
"X87_FLOAT_MODE_P (GET_MODE (operands[1]))
&& !TARGET_FISTTP
&& !SSE_FLOAT_MODE_P (GET_MODE (operands[1]))"
"* return output_fix_trunc (insn, operands, false);"
[(set_attr "type" "fistp")
(set_attr "i387_cw" "trunc")
(set_attr "mode" "")])
(define_insn "x86_fnstcw_1"
[(set (match_operand:HI 0 "memory_operand" "=m")
(unspec:HI [(const_int 0)] UNSPEC_FSTCW))]
"TARGET_80387"
"fnstcw\t%0"
[(set (attr "length")
(symbol_ref "ix86_attr_length_address_default (insn) + 2"))
(set_attr "mode" "HI")
(set_attr "unit" "i387")
(set_attr "bdver1_decode" "vector")])
;; Conversion between fixed point and floating point.
;; Even though we only accept memory inputs, the backend _really_
;; wants to be able to do this between registers. Thankfully, LRA
;; will fix this up for us during register allocation.
(define_insn "floathi2"
[(set (match_operand:X87MODEF 0 "register_operand" "=f")
(float:X87MODEF (match_operand:HI 1 "nonimmediate_operand" "m")))]
"TARGET_80387
&& (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
|| TARGET_MIX_SSE_I387)"
"fild%Z1\t%1"
[(set_attr "type" "fmov")
(set_attr "mode" "")
(set_attr "znver1_decode" "double")
(set_attr "fp_int_src" "true")])
(define_insn "floatxf2"
[(set (match_operand:XF 0 "register_operand" "=f")
(float:XF (match_operand:SWI48x 1 "nonimmediate_operand" "m")))]
"TARGET_80387"
"fild%Z1\t%1"
[(set_attr "type" "fmov")
(set_attr "mode" "XF")
(set_attr "znver1_decode" "double")
(set_attr "fp_int_src" "true")])
(define_expand "float2"
[(set (match_operand:MODEF 0 "register_operand")
(float:MODEF (match_operand:SWI48x 1 "nonimmediate_operand")))]
"(TARGET_80387 && X87_ENABLE_FLOAT (mode, mode))
|| (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH
&& ((