;; GCC machine description for SSE instructions
;; Copyright (C) 2005-2017 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
;;
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
;;
;; GCC is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; .
(define_c_enum "unspec" [
;; SSE
UNSPEC_MOVNT
;; SSE2
UNSPEC_MOVDI_TO_SSE
;; SSE3
UNSPEC_LDDQU
;; SSSE3
UNSPEC_PSHUFB
UNSPEC_PSIGN
UNSPEC_PALIGNR
;; For SSE4A support
UNSPEC_EXTRQI
UNSPEC_EXTRQ
UNSPEC_INSERTQI
UNSPEC_INSERTQ
;; For SSE4.1 support
UNSPEC_BLENDV
UNSPEC_INSERTPS
UNSPEC_DP
UNSPEC_MOVNTDQA
UNSPEC_MPSADBW
UNSPEC_PHMINPOSUW
UNSPEC_PTEST
;; For SSE4.2 support
UNSPEC_PCMPESTR
UNSPEC_PCMPISTR
;; For FMA4 support
UNSPEC_FMADDSUB
UNSPEC_XOP_UNSIGNED_CMP
UNSPEC_XOP_TRUEFALSE
UNSPEC_XOP_PERMUTE
UNSPEC_FRCZ
;; For AES support
UNSPEC_AESENC
UNSPEC_AESENCLAST
UNSPEC_AESDEC
UNSPEC_AESDECLAST
UNSPEC_AESIMC
UNSPEC_AESKEYGENASSIST
;; For PCLMUL support
UNSPEC_PCLMUL
;; For AVX support
UNSPEC_PCMP
UNSPEC_VPERMIL
UNSPEC_VPERMIL2
UNSPEC_VPERMIL2F128
UNSPEC_CAST
UNSPEC_VTESTP
UNSPEC_VCVTPH2PS
UNSPEC_VCVTPS2PH
;; For AVX2 support
UNSPEC_VPERMVAR
UNSPEC_VPERMTI
UNSPEC_GATHER
UNSPEC_VSIBADDR
;; For AVX512F support
UNSPEC_VPERMI2
UNSPEC_VPERMT2
UNSPEC_VPERMI2_MASK
UNSPEC_UNSIGNED_FIX_NOTRUNC
UNSPEC_UNSIGNED_PCMP
UNSPEC_TESTM
UNSPEC_TESTNM
UNSPEC_SCATTER
UNSPEC_RCP14
UNSPEC_RSQRT14
UNSPEC_FIXUPIMM
UNSPEC_SCALEF
UNSPEC_VTERNLOG
UNSPEC_GETEXP
UNSPEC_GETMANT
UNSPEC_ALIGN
UNSPEC_CONFLICT
UNSPEC_COMPRESS
UNSPEC_COMPRESS_STORE
UNSPEC_EXPAND
UNSPEC_MASKED_EQ
UNSPEC_MASKED_GT
;; Mask operations
UNSPEC_MASKOP
UNSPEC_KORTEST
UNSPEC_KTEST
;; For embed. rounding feature
UNSPEC_EMBEDDED_ROUNDING
;; For AVX512PF support
UNSPEC_GATHER_PREFETCH
UNSPEC_SCATTER_PREFETCH
;; For AVX512ER support
UNSPEC_EXP2
UNSPEC_RCP28
UNSPEC_RSQRT28
;; For SHA support
UNSPEC_SHA1MSG1
UNSPEC_SHA1MSG2
UNSPEC_SHA1NEXTE
UNSPEC_SHA1RNDS4
UNSPEC_SHA256MSG1
UNSPEC_SHA256MSG2
UNSPEC_SHA256RNDS2
;; For AVX512BW support
UNSPEC_DBPSADBW
UNSPEC_PMADDUBSW512
UNSPEC_PMADDWD512
UNSPEC_PSHUFHW
UNSPEC_PSHUFLW
UNSPEC_CVTINT2MASK
;; For AVX512DQ support
UNSPEC_REDUCE
UNSPEC_FPCLASS
UNSPEC_RANGE
;; For AVX512IFMA support
UNSPEC_VPMADD52LUQ
UNSPEC_VPMADD52HUQ
;; For AVX512VBMI support
UNSPEC_VPMULTISHIFT
;; For AVX5124FMAPS/AVX5124VNNIW support
UNSPEC_VP4FMADD
UNSPEC_VP4FNMADD
UNSPEC_VP4DPWSSD
UNSPEC_VP4DPWSSDS
])
(define_c_enum "unspecv" [
UNSPECV_LDMXCSR
UNSPECV_STMXCSR
UNSPECV_CLFLUSH
UNSPECV_MONITOR
UNSPECV_MWAIT
UNSPECV_VZEROALL
UNSPECV_VZEROUPPER
])
;; All vector modes including V?TImode, used in move patterns.
(define_mode_iterator VMOVE
[(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI
(V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI
(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI
(V4TI "TARGET_AVX512BW") (V2TI "TARGET_AVX") V1TI
(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
(V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF])
;; All AVX-512{F,VL} vector modes. Supposed TARGET_AVX512F baseline.
(define_mode_iterator V48_AVX512VL
[V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
;; 1,2 byte AVX-512{BW,VL} vector modes. Supposed TARGET_AVX512BW baseline.
(define_mode_iterator VI12_AVX512VL
[V64QI (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")
V32HI (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")])
(define_mode_iterator VI1_AVX512VL
[V64QI (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")])
;; All vector modes
(define_mode_iterator V
[(V32QI "TARGET_AVX") V16QI
(V16HI "TARGET_AVX") V8HI
(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI
(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
(V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
;; All 128bit vector modes
(define_mode_iterator V_128
[V16QI V8HI V4SI V2DI V4SF (V2DF "TARGET_SSE2")])
;; All 256bit vector modes
(define_mode_iterator V_256
[V32QI V16HI V8SI V4DI V8SF V4DF])
;; All 512bit vector modes
(define_mode_iterator V_512 [V64QI V32HI V16SI V8DI V16SF V8DF])
;; All 256bit and 512bit vector modes
(define_mode_iterator V_256_512
[V32QI V16HI V8SI V4DI V8SF V4DF
(V64QI "TARGET_AVX512F") (V32HI "TARGET_AVX512F") (V16SI "TARGET_AVX512F")
(V8DI "TARGET_AVX512F") (V16SF "TARGET_AVX512F") (V8DF "TARGET_AVX512F")])
;; All vector float modes
(define_mode_iterator VF
[(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
(V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
;; 128- and 256-bit float vector modes
(define_mode_iterator VF_128_256
[(V8SF "TARGET_AVX") V4SF
(V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
;; All SFmode vector float modes
(define_mode_iterator VF1
[(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF])
;; 128- and 256-bit SF vector modes
(define_mode_iterator VF1_128_256
[(V8SF "TARGET_AVX") V4SF])
(define_mode_iterator VF1_128_256VL
[V8SF (V4SF "TARGET_AVX512VL")])
;; All DFmode vector float modes
(define_mode_iterator VF2
[(V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF])
;; 128- and 256-bit DF vector modes
(define_mode_iterator VF2_128_256
[(V4DF "TARGET_AVX") V2DF])
(define_mode_iterator VF2_512_256
[(V8DF "TARGET_AVX512F") V4DF])
(define_mode_iterator VF2_512_256VL
[V8DF (V4DF "TARGET_AVX512VL")])
;; All 128bit vector float modes
(define_mode_iterator VF_128
[V4SF (V2DF "TARGET_SSE2")])
;; All 256bit vector float modes
(define_mode_iterator VF_256
[V8SF V4DF])
;; All 512bit vector float modes
(define_mode_iterator VF_512
[V16SF V8DF])
(define_mode_iterator VI48_AVX512VL
[V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
(define_mode_iterator VF_AVX512VL
[V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
(define_mode_iterator VF2_AVX512VL
[V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
(define_mode_iterator VF1_AVX512VL
[V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")])
;; All vector integer modes
(define_mode_iterator VI
[(V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F")
(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX") V16QI
(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX") V8HI
(V8SI "TARGET_AVX") V4SI
(V4DI "TARGET_AVX") V2DI])
(define_mode_iterator VI_AVX2
[(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI
(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI
(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX2") V4SI
(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX2") V2DI])
;; All QImode vector integer modes
(define_mode_iterator VI1
[(V32QI "TARGET_AVX") V16QI])
;; All DImode vector integer modes
(define_mode_iterator V_AVX
[V16QI V8HI V4SI V2DI V4SF V2DF
(V32QI "TARGET_AVX") (V16HI "TARGET_AVX")
(V8SI "TARGET_AVX") (V4DI "TARGET_AVX")
(V8SF "TARGET_AVX") (V4DF"TARGET_AVX")])
(define_mode_iterator VI48_AVX
[V4SI V2DI
(V8SI "TARGET_AVX") (V4DI "TARGET_AVX")])
(define_mode_iterator VI8
[(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI])
(define_mode_iterator VI8_AVX512VL
[V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
(define_mode_iterator VI8_256_512
[V8DI (V4DI "TARGET_AVX512VL")])
(define_mode_iterator VI1_AVX2
[(V32QI "TARGET_AVX2") V16QI])
(define_mode_iterator VI1_AVX512
[(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI])
(define_mode_iterator VI2_AVX2
[(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI2_AVX512F
[(V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI4_AVX
[(V8SI "TARGET_AVX") V4SI])
(define_mode_iterator VI4_AVX2
[(V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI4_AVX512F
[(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI4_AVX512VL
[V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")])
(define_mode_iterator VI48_AVX512F_AVX512VL
[V4SI V8SI (V16SI "TARGET_AVX512F")
(V2DI "TARGET_AVX512VL") (V4DI "TARGET_AVX512VL") (V8DI "TARGET_AVX512F")])
(define_mode_iterator VI2_AVX512VL
[(V8HI "TARGET_AVX512VL") (V16HI "TARGET_AVX512VL") V32HI])
(define_mode_iterator VI8_AVX2_AVX512BW
[(V8DI "TARGET_AVX512BW") (V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI8_AVX2
[(V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI8_AVX2_AVX512F
[(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI4_128_8_256
[V4SI V4DI])
;; All V8D* modes
(define_mode_iterator V8FI
[V8DF V8DI])
;; All V16S* modes
(define_mode_iterator V16FI
[V16SF V16SI])
;; ??? We should probably use TImode instead.
(define_mode_iterator VIMAX_AVX2
[(V4TI "TARGET_AVX512BW") (V2TI "TARGET_AVX2") V1TI])
;; ??? This should probably be dropped in favor of VIMAX_AVX2.
(define_mode_iterator SSESCALARMODE
[(V4TI "TARGET_AVX512BW") (V2TI "TARGET_AVX2") TI])
(define_mode_iterator VI12_AVX2
[(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI
(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI24_AVX2
[(V16HI "TARGET_AVX2") V8HI
(V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI124_AVX2_24_AVX512F_1_AVX512BW
[(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI
(V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX2") V8HI
(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI124_AVX2
[(V32QI "TARGET_AVX2") V16QI
(V16HI "TARGET_AVX2") V8HI
(V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI2_AVX2_AVX512BW
[(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI48_AVX2
[(V8SI "TARGET_AVX2") V4SI
(V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI248_AVX2_8_AVX512F_24_AVX512BW
[(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI
(V16SI "TARGET_AVX512BW") (V8SI "TARGET_AVX2") V4SI
(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI248_AVX512BW_AVX512VL
[(V32HI "TARGET_AVX512BW")
(V4DI "TARGET_AVX512VL") V16SI V8DI])
;; Suppose TARGET_AVX512VL as baseline
(define_mode_iterator VI24_AVX512BW_1
[(V16HI "TARGET_AVX512BW") (V8HI "TARGET_AVX512BW")
V8SI V4SI])
(define_mode_iterator VI48_AVX512F
[(V16SI "TARGET_AVX512F") V8SI V4SI
(V8DI "TARGET_AVX512F") V4DI V2DI])
(define_mode_iterator VI48_AVX_AVX512F
[(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI])
(define_mode_iterator VI12_AVX_AVX512F
[ (V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI
(V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI])
(define_mode_iterator V48_AVX2
[V4SF V2DF
V8SF V4DF
(V4SI "TARGET_AVX2") (V2DI "TARGET_AVX2")
(V8SI "TARGET_AVX2") (V4DI "TARGET_AVX2")])
(define_mode_attr avx512
[(V16QI "avx512vl") (V32QI "avx512vl") (V64QI "avx512bw")
(V8HI "avx512vl") (V16HI "avx512vl") (V32HI "avx512bw")
(V4SI "avx512vl") (V8SI "avx512vl") (V16SI "avx512f")
(V2DI "avx512vl") (V4DI "avx512vl") (V8DI "avx512f")
(V4SF "avx512vl") (V8SF "avx512vl") (V16SF "avx512f")
(V2DF "avx512vl") (V4DF "avx512vl") (V8DF "avx512f")])
(define_mode_attr sse2_avx_avx512f
[(V16QI "sse2") (V32QI "avx") (V64QI "avx512f")
(V8HI "avx512vl") (V16HI "avx512vl") (V32HI "avx512bw")
(V4SI "sse2") (V8SI "avx") (V16SI "avx512f")
(V2DI "avx512vl") (V4DI "avx512vl") (V8DI "avx512f")
(V16SF "avx512f") (V8SF "avx") (V4SF "avx")
(V8DF "avx512f") (V4DF "avx") (V2DF "avx")])
(define_mode_attr sse2_avx2
[(V16QI "sse2") (V32QI "avx2") (V64QI "avx512bw")
(V8HI "sse2") (V16HI "avx2") (V32HI "avx512bw")
(V4SI "sse2") (V8SI "avx2") (V16SI "avx512f")
(V2DI "sse2") (V4DI "avx2") (V8DI "avx512f")
(V1TI "sse2") (V2TI "avx2") (V4TI "avx512bw")])
(define_mode_attr ssse3_avx2
[(V16QI "ssse3") (V32QI "avx2") (V64QI "avx512bw")
(V4HI "ssse3") (V8HI "ssse3") (V16HI "avx2") (V32HI "avx512bw")
(V4SI "ssse3") (V8SI "avx2")
(V2DI "ssse3") (V4DI "avx2")
(TI "ssse3") (V2TI "avx2") (V4TI "avx512bw")])
(define_mode_attr sse4_1_avx2
[(V16QI "sse4_1") (V32QI "avx2") (V64QI "avx512bw")
(V8HI "sse4_1") (V16HI "avx2") (V32HI "avx512bw")
(V4SI "sse4_1") (V8SI "avx2") (V16SI "avx512f")
(V2DI "sse4_1") (V4DI "avx2") (V8DI "avx512dq")])
(define_mode_attr avx_avx2
[(V4SF "avx") (V2DF "avx")
(V8SF "avx") (V4DF "avx")
(V4SI "avx2") (V2DI "avx2")
(V8SI "avx2") (V4DI "avx2")])
(define_mode_attr vec_avx2
[(V16QI "vec") (V32QI "avx2")
(V8HI "vec") (V16HI "avx2")
(V4SI "vec") (V8SI "avx2")
(V2DI "vec") (V4DI "avx2")])
(define_mode_attr avx2_avx512
[(V4SI "avx2") (V8SI "avx2") (V16SI "avx512f")
(V2DI "avx2") (V4DI "avx2") (V8DI "avx512f")
(V4SF "avx2") (V8SF "avx2") (V16SF "avx512f")
(V2DF "avx2") (V4DF "avx2") (V8DF "avx512f")
(V8HI "avx512vl") (V16HI "avx512vl") (V32HI "avx512bw")])
(define_mode_attr shuffletype
[(V16SF "f") (V16SI "i") (V8DF "f") (V8DI "i")
(V8SF "f") (V8SI "i") (V4DF "f") (V4DI "i")
(V4SF "f") (V4SI "i") (V2DF "f") (V2DI "i")
(V32HI "i") (V16HI "i") (V8HI "i")
(V64QI "i") (V32QI "i") (V16QI "i")
(V4TI "i") (V2TI "i") (V1TI "i")])
(define_mode_attr ssequartermode
[(V16SF "V4SF") (V8DF "V2DF") (V16SI "V4SI") (V8DI "V2DI")])
(define_mode_attr ssedoublemodelower
[(V16QI "v16hi") (V32QI "v32hi") (V64QI "v64hi")
(V8HI "v8si") (V16HI "v16si") (V32HI "v32si")
(V4SI "v4di") (V8SI "v8di") (V16SI "v16di")])
(define_mode_attr ssedoublemode
[(V4SF "V8SF") (V8SF "V16SF") (V16SF "V32SF")
(V2DF "V4DF") (V4DF "V8DF") (V8DF "V16DF")
(V16QI "V16HI") (V32QI "V32HI") (V64QI "V64HI")
(V4HI "V4SI") (V8HI "V8SI") (V16HI "V16SI") (V32HI "V32SI")
(V4SI "V4DI") (V8SI "V16SI") (V16SI "V32SI")
(V4DI "V8DI") (V8DI "V16DI")])
(define_mode_attr ssebytemode
[(V8DI "V64QI") (V4DI "V32QI") (V2DI "V16QI")])
;; All 128bit vector integer modes
(define_mode_iterator VI_128 [V16QI V8HI V4SI V2DI])
;; All 256bit vector integer modes
(define_mode_iterator VI_256 [V32QI V16HI V8SI V4DI])
;; Various 128bit vector integer mode combinations
(define_mode_iterator VI12_128 [V16QI V8HI])
(define_mode_iterator VI14_128 [V16QI V4SI])
(define_mode_iterator VI124_128 [V16QI V8HI V4SI])
(define_mode_iterator VI24_128 [V8HI V4SI])
(define_mode_iterator VI248_128 [V8HI V4SI V2DI])
(define_mode_iterator VI48_128 [V4SI V2DI])
;; Various 256bit and 512 vector integer mode combinations
(define_mode_iterator VI124_256 [V32QI V16HI V8SI])
(define_mode_iterator VI124_256_AVX512F_AVX512BW
[V32QI V16HI V8SI
(V64QI "TARGET_AVX512BW")
(V32HI "TARGET_AVX512BW")
(V16SI "TARGET_AVX512F")])
(define_mode_iterator VI48_256 [V8SI V4DI])
(define_mode_iterator VI48_512 [V16SI V8DI])
(define_mode_iterator VI4_256_8_512 [V8SI V8DI])
(define_mode_iterator VI_AVX512BW
[V16SI V8DI (V32HI "TARGET_AVX512BW") (V64QI "TARGET_AVX512BW")])
;; Int-float size matches
(define_mode_iterator VI4F_128 [V4SI V4SF])
(define_mode_iterator VI8F_128 [V2DI V2DF])
(define_mode_iterator VI4F_256 [V8SI V8SF])
(define_mode_iterator VI8F_256 [V4DI V4DF])
(define_mode_iterator VI48F_256_512
[V8SI V8SF
(V16SI "TARGET_AVX512F") (V16SF "TARGET_AVX512F")
(V8DI "TARGET_AVX512F") (V8DF "TARGET_AVX512F")
(V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")])
(define_mode_iterator VF48_I1248
[V16SI V16SF V8DI V8DF V32HI V64QI])
(define_mode_iterator VI48F
[V16SI V16SF V8DI V8DF
(V8SI "TARGET_AVX512VL") (V8SF "TARGET_AVX512VL")
(V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")
(V4SI "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
(V2DI "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
(define_mode_iterator VI48F_256 [V8SI V8SF V4DI V4DF])
;; Mapping from float mode to required SSE level
(define_mode_attr sse
[(SF "sse") (DF "sse2")
(V4SF "sse") (V2DF "sse2")
(V16SF "avx512f") (V8SF "avx")
(V8DF "avx512f") (V4DF "avx")])
(define_mode_attr sse2
[(V16QI "sse2") (V32QI "avx") (V64QI "avx512f")
(V2DI "sse2") (V4DI "avx") (V8DI "avx512f")])
(define_mode_attr sse3
[(V16QI "sse3") (V32QI "avx")])
(define_mode_attr sse4_1
[(V4SF "sse4_1") (V2DF "sse4_1")
(V8SF "avx") (V4DF "avx")
(V8DF "avx512f")
(V4DI "avx") (V2DI "sse4_1")
(V8SI "avx") (V4SI "sse4_1")
(V16QI "sse4_1") (V32QI "avx")
(V8HI "sse4_1") (V16HI "avx")])
(define_mode_attr avxsizesuffix
[(V64QI "512") (V32HI "512") (V16SI "512") (V8DI "512")
(V32QI "256") (V16HI "256") (V8SI "256") (V4DI "256")
(V16QI "") (V8HI "") (V4SI "") (V2DI "")
(V16SF "512") (V8DF "512")
(V8SF "256") (V4DF "256")
(V4SF "") (V2DF "")])
;; SSE instruction mode
(define_mode_attr sseinsnmode
[(V64QI "XI") (V32HI "XI") (V16SI "XI") (V8DI "XI") (V4TI "XI")
(V32QI "OI") (V16HI "OI") (V8SI "OI") (V4DI "OI") (V2TI "OI")
(V16QI "TI") (V8HI "TI") (V4SI "TI") (V2DI "TI") (V1TI "TI")
(V16SF "V16SF") (V8DF "V8DF")
(V8SF "V8SF") (V4DF "V4DF")
(V4SF "V4SF") (V2DF "V2DF")
(TI "TI")])
;; Mapping of vector modes to corresponding mask size
(define_mode_attr avx512fmaskmode
[(V64QI "DI") (V32QI "SI") (V16QI "HI")
(V32HI "SI") (V16HI "HI") (V8HI "QI") (V4HI "QI")
(V16SI "HI") (V8SI "QI") (V4SI "QI")
(V8DI "QI") (V4DI "QI") (V2DI "QI")
(V16SF "HI") (V8SF "QI") (V4SF "QI")
(V8DF "QI") (V4DF "QI") (V2DF "QI")])
;; Mapping of vector modes to corresponding mask size
(define_mode_attr avx512fmaskmodelower
[(V64QI "di") (V32QI "si") (V16QI "hi")
(V32HI "si") (V16HI "hi") (V8HI "qi") (V4HI "qi")
(V16SI "hi") (V8SI "qi") (V4SI "qi")
(V8DI "qi") (V4DI "qi") (V2DI "qi")
(V16SF "hi") (V8SF "qi") (V4SF "qi")
(V8DF "qi") (V4DF "qi") (V2DF "qi")])
;; Mapping of vector float modes to an integer mode of the same size
(define_mode_attr sseintvecmode
[(V16SF "V16SI") (V8DF "V8DI")
(V8SF "V8SI") (V4DF "V4DI")
(V4SF "V4SI") (V2DF "V2DI")
(V16SI "V16SI") (V8DI "V8DI")
(V8SI "V8SI") (V4DI "V4DI")
(V4SI "V4SI") (V2DI "V2DI")
(V16HI "V16HI") (V8HI "V8HI")
(V32HI "V32HI") (V64QI "V64QI")
(V32QI "V32QI") (V16QI "V16QI")])
(define_mode_attr sseintvecmode2
[(V8DF "XI") (V4DF "OI") (V2DF "TI")
(V8SF "OI") (V4SF "TI")])
(define_mode_attr sseintvecmodelower
[(V16SF "v16si") (V8DF "v8di")
(V8SF "v8si") (V4DF "v4di")
(V4SF "v4si") (V2DF "v2di")
(V8SI "v8si") (V4DI "v4di")
(V4SI "v4si") (V2DI "v2di")
(V16HI "v16hi") (V8HI "v8hi")
(V32QI "v32qi") (V16QI "v16qi")])
;; Mapping of vector modes to a vector mode of double size
(define_mode_attr ssedoublevecmode
[(V32QI "V64QI") (V16HI "V32HI") (V8SI "V16SI") (V4DI "V8DI")
(V16QI "V32QI") (V8HI "V16HI") (V4SI "V8SI") (V2DI "V4DI")
(V8SF "V16SF") (V4DF "V8DF")
(V4SF "V8SF") (V2DF "V4DF")])
;; Mapping of vector modes to a vector mode of half size
(define_mode_attr ssehalfvecmode
[(V64QI "V32QI") (V32HI "V16HI") (V16SI "V8SI") (V8DI "V4DI")
(V32QI "V16QI") (V16HI "V8HI") (V8SI "V4SI") (V4DI "V2DI")
(V16QI "V8QI") (V8HI "V4HI") (V4SI "V2SI")
(V16SF "V8SF") (V8DF "V4DF")
(V8SF "V4SF") (V4DF "V2DF")
(V4SF "V2SF")])
;; Mapping of vector modes ti packed single mode of the same size
(define_mode_attr ssePSmode
[(V16SI "V16SF") (V8DF "V16SF")
(V16SF "V16SF") (V8DI "V16SF")
(V64QI "V16SF") (V32QI "V8SF") (V16QI "V4SF")
(V32HI "V16SF") (V16HI "V8SF") (V8HI "V4SF")
(V8SI "V8SF") (V4SI "V4SF")
(V4DI "V8SF") (V2DI "V4SF")
(V4TI "V16SF") (V2TI "V8SF") (V1TI "V4SF")
(V8SF "V8SF") (V4SF "V4SF")
(V4DF "V8SF") (V2DF "V4SF")])
(define_mode_attr ssePSmode2
[(V8DI "V8SF") (V4DI "V4SF")])
;; Mapping of vector modes back to the scalar modes
(define_mode_attr ssescalarmode
[(V64QI "QI") (V32QI "QI") (V16QI "QI")
(V32HI "HI") (V16HI "HI") (V8HI "HI")
(V16SI "SI") (V8SI "SI") (V4SI "SI")
(V8DI "DI") (V4DI "DI") (V2DI "DI")
(V16SF "SF") (V8SF "SF") (V4SF "SF")
(V8DF "DF") (V4DF "DF") (V2DF "DF")])
;; Mapping of vector modes to the 128bit modes
(define_mode_attr ssexmmmode
[(V64QI "V16QI") (V32QI "V16QI") (V16QI "V16QI")
(V32HI "V8HI") (V16HI "V8HI") (V8HI "V8HI")
(V16SI "V4SI") (V8SI "V4SI") (V4SI "V4SI")
(V8DI "V2DI") (V4DI "V2DI") (V2DI "V2DI")
(V16SF "V4SF") (V8SF "V4SF") (V4SF "V4SF")
(V8DF "V2DF") (V4DF "V2DF") (V2DF "V2DF")])
;; Pointer size override for scalar modes (Intel asm dialect)
(define_mode_attr iptr
[(V64QI "b") (V32HI "w") (V16SI "k") (V8DI "q")
(V32QI "b") (V16HI "w") (V8SI "k") (V4DI "q")
(V16QI "b") (V8HI "w") (V4SI "k") (V2DI "q")
(V8SF "k") (V4DF "q")
(V4SF "k") (V2DF "q")
(SF "k") (DF "q")])
;; Number of scalar elements in each vector type
(define_mode_attr ssescalarnum
[(V64QI "64") (V16SI "16") (V8DI "8")
(V32QI "32") (V16HI "16") (V8SI "8") (V4DI "4")
(V16QI "16") (V8HI "8") (V4SI "4") (V2DI "2")
(V16SF "16") (V8DF "8")
(V8SF "8") (V4DF "4")
(V4SF "4") (V2DF "2")])
;; Mask of scalar elements in each vector type
(define_mode_attr ssescalarnummask
[(V32QI "31") (V16HI "15") (V8SI "7") (V4DI "3")
(V16QI "15") (V8HI "7") (V4SI "3") (V2DI "1")
(V8SF "7") (V4DF "3")
(V4SF "3") (V2DF "1")])
(define_mode_attr ssescalarsize
[(V4TI "64") (V2TI "64") (V1TI "64")
(V8DI "64") (V4DI "64") (V2DI "64")
(V64QI "8") (V32QI "8") (V16QI "8")
(V32HI "16") (V16HI "16") (V8HI "16")
(V16SI "32") (V8SI "32") (V4SI "32")
(V16SF "32") (V8SF "32") (V4SF "32")
(V8DF "64") (V4DF "64") (V2DF "64")])
;; SSE prefix for integer vector modes
(define_mode_attr sseintprefix
[(V2DI "p") (V2DF "")
(V4DI "p") (V4DF "")
(V8DI "p") (V8DF "")
(V4SI "p") (V4SF "")
(V8SI "p") (V8SF "")
(V16SI "p") (V16SF "")
(V16QI "p") (V8HI "p")
(V32QI "p") (V16HI "p")
(V64QI "p") (V32HI "p")])
;; SSE scalar suffix for vector modes
(define_mode_attr ssescalarmodesuffix
[(SF "ss") (DF "sd")
(V8SF "ss") (V4DF "sd")
(V4SF "ss") (V2DF "sd")
(V8SI "ss") (V4DI "sd")
(V4SI "d")])
;; Pack/unpack vector modes
(define_mode_attr sseunpackmode
[(V16QI "V8HI") (V8HI "V4SI") (V4SI "V2DI")
(V32QI "V16HI") (V16HI "V8SI") (V8SI "V4DI")
(V32HI "V16SI") (V64QI "V32HI") (V16SI "V8DI")])
(define_mode_attr ssepackmode
[(V8HI "V16QI") (V4SI "V8HI") (V2DI "V4SI")
(V16HI "V32QI") (V8SI "V16HI") (V4DI "V8SI")
(V32HI "V64QI") (V16SI "V32HI") (V8DI "V16SI")])
;; Mapping of the max integer size for xop rotate immediate constraint
(define_mode_attr sserotatemax
[(V16QI "7") (V8HI "15") (V4SI "31") (V2DI "63")])
;; Mapping of mode to cast intrinsic name
(define_mode_attr castmode
[(V8SI "si") (V8SF "ps") (V4DF "pd")
(V16SI "si") (V16SF "ps") (V8DF "pd")])
;; Instruction suffix for sign and zero extensions.
(define_code_attr extsuffix [(sign_extend "sx") (zero_extend "zx")])
;; i128 for integer vectors and TARGET_AVX2, f128 otherwise.
;; i64x4 or f64x4 for 512bit modes.
(define_mode_attr i128
[(V16SF "f64x4") (V8SF "f128") (V8DF "f64x4") (V4DF "f128")
(V64QI "i64x4") (V32QI "%~128") (V32HI "i64x4") (V16HI "%~128")
(V16SI "i64x4") (V8SI "%~128") (V8DI "i64x4") (V4DI "%~128")])
;; For 256-bit modes for TARGET_AVX512VL && TARGET_AVX512DQ
;; i32x4, f32x4, i64x2 or f64x2 suffixes.
(define_mode_attr i128vldq
[(V8SF "f32x4") (V4DF "f64x2")
(V32QI "i32x4") (V16HI "i32x4") (V8SI "i32x4") (V4DI "i64x2")])
;; Mix-n-match
(define_mode_iterator AVX256MODE2P [V8SI V8SF V4DF])
(define_mode_iterator AVX512MODE2P [V16SI V16SF V8DF])
;; Mapping for dbpsabbw modes
(define_mode_attr dbpsadbwmode
[(V32HI "V64QI") (V16HI "V32QI") (V8HI "V16QI")])
;; Mapping suffixes for broadcast
(define_mode_attr bcstscalarsuff
[(V64QI "b") (V32QI "b") (V16QI "b")
(V32HI "w") (V16HI "w") (V8HI "w")
(V16SI "d") (V8SI "d") (V4SI "d")
(V8DI "q") (V4DI "q") (V2DI "q")
(V16SF "ss") (V8SF "ss") (V4SF "ss")
(V8DF "sd") (V4DF "sd") (V2DF "sd")])
;; Tie mode of assembler operand to mode iterator
(define_mode_attr concat_tg_mode
[(V32QI "t") (V16HI "t") (V8SI "t") (V4DI "t") (V8SF "t") (V4DF "t")
(V64QI "g") (V32HI "g") (V16SI "g") (V8DI "g") (V16SF "g") (V8DF "g")])
;; Tie mode of assembler operand to mode iterator
(define_mode_attr xtg_mode
[(V16QI "x") (V8HI "x") (V4SI "x") (V2DI "x") (V4SF "x") (V2DF "x")
(V32QI "t") (V16HI "t") (V8SI "t") (V4DI "t") (V8SF "t") (V4DF "t")
(V64QI "g") (V32HI "g") (V16SI "g") (V8DI "g") (V16SF "g") (V8DF "g")])
;; Half mask mode for unpacks
(define_mode_attr HALFMASKMODE
[(DI "SI") (SI "HI")])
;; Double mask mode for packs
(define_mode_attr DOUBLEMASKMODE
[(HI "SI") (SI "DI")])
;; Include define_subst patterns for instructions with mask
(include "subst.md")
;; Patterns whose name begins with "sse{,2,3}_" are invoked by intrinsics.
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Move patterns
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; All of these patterns are enabled for SSE1 as well as SSE2.
;; This is essential for maintaining stable calling conventions.
(define_expand "mov"
[(set (match_operand:VMOVE 0 "nonimmediate_operand")
(match_operand:VMOVE 1 "nonimmediate_operand"))]
"TARGET_SSE"
{
ix86_expand_vector_move (mode, operands);
DONE;
})
(define_insn "mov_internal"
[(set (match_operand:VMOVE 0 "nonimmediate_operand"
"=v,v ,v ,m")
(match_operand:VMOVE 1 "nonimmediate_or_sse_const_operand"
" C,BC,vm,v"))]
"TARGET_SSE
&& (register_operand (operands[0], mode)
|| register_operand (operands[1], mode))"
{
switch (get_attr_type (insn))
{
case TYPE_SSELOG1:
return standard_sse_constant_opcode (insn, operands[1]);
case TYPE_SSEMOV:
/* There is no evex-encoded vmov* for sizes smaller than 64-bytes
in avx512f, so we need to use workarounds, to access sse registers
16-31, which are evex-only. In avx512vl we don't need workarounds. */
if (TARGET_AVX512F && < 64 && !TARGET_AVX512VL
&& (EXT_REX_SSE_REG_P (operands[0])
|| EXT_REX_SSE_REG_P (operands[1])))
{
if (memory_operand (operands[0], mode))
{
if ( == 32)
return "vextract64x4\t{$0x0, %g1, %0|%0, %g1, 0x0}";
else if ( == 16)
return "vextract32x4\t{$0x0, %g1, %0|%0, %g1, 0x0}";
else
gcc_unreachable ();
}
else if (memory_operand (operands[1], mode))
{
if ( == 32)
return "vbroadcast64x4\t{%1, %g0|%g0, %1}";
else if ( == 16)
return "vbroadcast32x4\t{%1, %g0|%g0, %1}";
else
gcc_unreachable ();
}
else
/* Reg -> reg move is always aligned. Just use wider move. */
switch (get_attr_mode (insn))
{
case MODE_V8SF:
case MODE_V4SF:
return "vmovaps\t{%g1, %g0|%g0, %g1}";
case MODE_V4DF:
case MODE_V2DF:
return "vmovapd\t{%g1, %g0|%g0, %g1}";
case MODE_OI:
case MODE_TI:
return "vmovdqa64\t{%g1, %g0|%g0, %g1}";
default:
gcc_unreachable ();
}
}
switch (get_attr_mode (insn))
{
case MODE_V16SF:
case MODE_V8SF:
case MODE_V4SF:
if (misaligned_operand (operands[0], mode)
|| misaligned_operand (operands[1], mode))
return "%vmovups\t{%1, %0|%0, %1}";
else
return "%vmovaps\t{%1, %0|%0, %1}";
case MODE_V8DF:
case MODE_V4DF:
case MODE_V2DF:
if (misaligned_operand (operands[0], mode)
|| misaligned_operand (operands[1], mode))
return "%vmovupd\t{%1, %0|%0, %1}";
else
return "%vmovapd\t{%1, %0|%0, %1}";
case MODE_OI:
case MODE_TI:
if (misaligned_operand (operands[0], mode)
|| misaligned_operand (operands[1], mode))
return TARGET_AVX512VL ? "vmovdqu\t{%1, %0|%0, %1}"
: "%vmovdqu\t{%1, %0|%0, %1}";
else
return TARGET_AVX512VL ? "vmovdqa64\t{%1, %0|%0, %1}"
: "%vmovdqa\t{%1, %0|%0, %1}";
case MODE_XI:
if (misaligned_operand (operands[0], mode)
|| misaligned_operand (operands[1], mode))
return (mode == V16SImode
|| mode == V8DImode
|| TARGET_AVX512BW)
? "vmovdqu\t{%1, %0|%0, %1}"
: "vmovdqu64\t{%1, %0|%0, %1}";
else
return "vmovdqa64\t{%1, %0|%0, %1}";
default:
gcc_unreachable ();
}
default:
gcc_unreachable ();
}
}
[(set_attr "type" "sselog1,sselog1,ssemov,ssemov")
(set_attr "prefix" "maybe_vex")
(set (attr "mode")
(cond [(and (eq_attr "alternative" "1")
(match_test "TARGET_AVX512VL"))
(const_string "XI")
(and (match_test " == 16")
(ior (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(and (eq_attr "alternative" "3")
(match_test "TARGET_SSE_TYPELESS_STORES"))))
(const_string "")
(match_test "TARGET_AVX")
(const_string "")
(ior (not (match_test "TARGET_SSE2"))
(match_test "optimize_function_for_size_p (cfun)"))
(const_string "V4SF")
(and (eq_attr "alternative" "0")
(match_test "TARGET_SSE_LOAD0_BY_PXOR"))
(const_string "TI")
]
(const_string "")))
(set (attr "enabled")
(cond [(and (match_test " == 16")
(eq_attr "alternative" "1"))
(symbol_ref "TARGET_SSE2")
(and (match_test " == 32")
(eq_attr "alternative" "1"))
(symbol_ref "TARGET_AVX2")
]
(symbol_ref "true")))])
(define_insn "_load_mask"
[(set (match_operand:V48_AVX512VL 0 "register_operand" "=v,v")
(vec_merge:V48_AVX512VL
(match_operand:V48_AVX512VL 1 "nonimmediate_operand" "v,m")
(match_operand:V48_AVX512VL 2 "vector_move_operand" "0C,0C")
(match_operand: 3 "register_operand" "Yk,Yk")))]
"TARGET_AVX512F"
{
if (FLOAT_MODE_P (GET_MODE_INNER (mode)))
{
if (misaligned_operand (operands[1], mode))
return "vmovu\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
else
return "vmova\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
}
else
{
if (misaligned_operand (operands[1], mode))
return "vmovdqu\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
else
return "vmovdqa\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
}
}
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
(set_attr "memory" "none,load")
(set_attr "mode" "")])
(define_insn "_load_mask"
[(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v,v")
(vec_merge:VI12_AVX512VL
(match_operand:VI12_AVX512VL 1 "nonimmediate_operand" "v,m")
(match_operand:VI12_AVX512VL 2 "vector_move_operand" "0C,0C")
(match_operand: 3 "register_operand" "Yk,Yk")))]
"TARGET_AVX512BW"
"vmovdqu\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
(set_attr "memory" "none,load")
(set_attr "mode" "")])
(define_insn "_blendm"
[(set (match_operand:V48_AVX512VL 0 "register_operand" "=v")
(vec_merge:V48_AVX512VL
(match_operand:V48_AVX512VL 2 "nonimmediate_operand" "vm")
(match_operand:V48_AVX512VL 1 "register_operand" "v")
(match_operand: 3 "register_operand" "Yk")))]
"TARGET_AVX512F"
"vblendm\t{%2, %1, %0%{%3%}|%0%{%3%}, %1, %2}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
(set_attr "mode" "")])
(define_insn "_blendm"
[(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v")
(vec_merge:VI12_AVX512VL
(match_operand:VI12_AVX512VL 2 "nonimmediate_operand" "vm")
(match_operand:VI12_AVX512VL 1 "register_operand" "v")
(match_operand: 3 "register_operand" "Yk")))]
"TARGET_AVX512BW"
"vpblendm\t{%2, %1, %0%{%3%}|%0%{%3%}, %1, %2}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
(set_attr "mode" "")])
(define_insn "_store_mask"
[(set (match_operand:V48_AVX512VL 0 "memory_operand" "=m")
(vec_merge:V48_AVX512VL
(match_operand:V48_AVX512VL 1 "register_operand" "v")
(match_dup 0)
(match_operand: 2 "register_operand" "Yk")))]
"TARGET_AVX512F"
{
if (FLOAT_MODE_P (GET_MODE_INNER (mode)))
{
if (misaligned_operand (operands[0], mode))
return "vmovu\t{%1, %0%{%2%}|%0%{%2%}, %1}";
else
return "vmova\t{%1, %0%{%2%}|%0%{%2%}, %1}";
}
else
{
if (misaligned_operand (operands[0], mode))
return "vmovdqu\t{%1, %0%{%2%}|%0%{%2%}, %1}";
else
return "vmovdqa\t{%1, %0%{%2%}|%0%{%2%}, %1}";
}
}
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
(set_attr "memory" "store")
(set_attr "mode" "")])
(define_insn "_store_mask"
[(set (match_operand:VI12_AVX512VL 0 "memory_operand" "=m")
(vec_merge:VI12_AVX512VL
(match_operand:VI12_AVX512VL 1 "register_operand" "v")
(match_dup 0)
(match_operand: 2 "register_operand" "Yk")))]
"TARGET_AVX512BW"
"vmovdqu\t{%1, %0%{%2%}|%0%{%2%}, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
(set_attr "memory" "store")
(set_attr "mode" "")])
(define_insn "sse2_movq128"
[(set (match_operand:V2DI 0 "register_operand" "=v")
(vec_concat:V2DI
(vec_select:DI
(match_operand:V2DI 1 "nonimmediate_operand" "vm")
(parallel [(const_int 0)]))
(const_int 0)))]
"TARGET_SSE2"
"%vmovq\t{%1, %0|%0, %q1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
;; Move a DI from a 32-bit register pair (e.g. %edx:%eax) to an xmm.
;; We'd rather avoid this entirely; if the 32-bit reg pair was loaded
;; from memory, we'd prefer to load the memory directly into the %xmm
;; register. To facilitate this happy circumstance, this pattern won't
;; split until after register allocation. If the 64-bit value didn't
;; come from memory, this is the best we can do. This is much better
;; than storing %edx:%eax into a stack temporary and loading an %xmm
;; from there.
(define_insn_and_split "movdi_to_sse"
[(set (match_operand:V4SI 0 "register_operand" "=?x,x")
(unspec:V4SI [(match_operand:DI 1 "nonimmediate_operand" "r,m")]
UNSPEC_MOVDI_TO_SSE))
(clobber (match_scratch:V4SI 2 "=&x,X"))]
"!TARGET_64BIT && TARGET_SSE2 && TARGET_INTER_UNIT_MOVES_TO_VEC"
"#"
"&& reload_completed"
[(const_int 0)]
{
if (register_operand (operands[1], DImode))
{
/* The DImode arrived in a pair of integral registers (e.g. %edx:%eax).
Assemble the 64-bit DImode value in an xmm register. */
emit_insn (gen_sse2_loadld (operands[0], CONST0_RTX (V4SImode),
gen_lowpart (SImode, operands[1])));
emit_insn (gen_sse2_loadld (operands[2], CONST0_RTX (V4SImode),
gen_highpart (SImode, operands[1])));
emit_insn (gen_vec_interleave_lowv4si (operands[0], operands[0],
operands[2]));
}
else if (memory_operand (operands[1], DImode))
emit_insn (gen_vec_concatv2di (gen_lowpart (V2DImode, operands[0]),
operands[1], const0_rtx));
else
gcc_unreachable ();
DONE;
})
(define_split
[(set (match_operand:V4SF 0 "register_operand")
(match_operand:V4SF 1 "zero_extended_scalar_load_operand"))]
"TARGET_SSE && reload_completed"
[(set (match_dup 0)
(vec_merge:V4SF
(vec_duplicate:V4SF (match_dup 1))
(match_dup 2)
(const_int 1)))]
{
operands[1] = gen_lowpart (SFmode, operands[1]);
operands[2] = CONST0_RTX (V4SFmode);
})
(define_split
[(set (match_operand:V2DF 0 "register_operand")
(match_operand:V2DF 1 "zero_extended_scalar_load_operand"))]
"TARGET_SSE2 && reload_completed"
[(set (match_dup 0) (vec_concat:V2DF (match_dup 1) (match_dup 2)))]
{
operands[1] = gen_lowpart (DFmode, operands[1]);
operands[2] = CONST0_RTX (DFmode);
})
(define_expand "movmisalign"
[(set (match_operand:VMOVE 0 "nonimmediate_operand")
(match_operand:VMOVE 1 "nonimmediate_operand"))]
"TARGET_SSE"
{
ix86_expand_vector_move_misalign (mode, operands);
DONE;
})
;; Merge movsd/movhpd to movupd for TARGET_SSE_UNALIGNED_LOAD_OPTIMAL targets.
(define_peephole2
[(set (match_operand:V2DF 0 "sse_reg_operand")
(vec_concat:V2DF (match_operand:DF 1 "memory_operand")
(match_operand:DF 4 "const0_operand")))
(set (match_operand:V2DF 2 "sse_reg_operand")
(vec_concat:V2DF (vec_select:DF (match_dup 2)
(parallel [(const_int 0)]))
(match_operand:DF 3 "memory_operand")))]
"TARGET_SSE2 && TARGET_SSE_UNALIGNED_LOAD_OPTIMAL
&& ix86_operands_ok_for_move_multiple (operands, true, DFmode)"
[(set (match_dup 2) (match_dup 5))]
"operands[5] = adjust_address (operands[1], V2DFmode, 0);")
(define_peephole2
[(set (match_operand:DF 0 "sse_reg_operand")
(match_operand:DF 1 "memory_operand"))
(set (match_operand:V2DF 2 "sse_reg_operand")
(vec_concat:V2DF (match_operand:DF 4 "sse_reg_operand")
(match_operand:DF 3 "memory_operand")))]
"TARGET_SSE2 && TARGET_SSE_UNALIGNED_LOAD_OPTIMAL
&& REGNO (operands[4]) == REGNO (operands[2])
&& ix86_operands_ok_for_move_multiple (operands, true, DFmode)"
[(set (match_dup 2) (match_dup 5))]
"operands[5] = adjust_address (operands[1], V2DFmode, 0);")
;; Merge movlpd/movhpd to movupd for TARGET_SSE_UNALIGNED_STORE_OPTIMAL targets.
(define_peephole2
[(set (match_operand:DF 0 "memory_operand")
(vec_select:DF (match_operand:V2DF 1 "sse_reg_operand")
(parallel [(const_int 0)])))
(set (match_operand:DF 2 "memory_operand")
(vec_select:DF (match_operand:V2DF 3 "sse_reg_operand")
(parallel [(const_int 1)])))]
"TARGET_SSE2 && TARGET_SSE_UNALIGNED_STORE_OPTIMAL
&& ix86_operands_ok_for_move_multiple (operands, false, DFmode)"
[(set (match_dup 4) (match_dup 1))]
"operands[4] = adjust_address (operands[0], V2DFmode, 0);")
(define_insn "_lddqu"
[(set (match_operand:VI1 0 "register_operand" "=x")
(unspec:VI1 [(match_operand:VI1 1 "memory_operand" "m")]
UNSPEC_LDDQU))]
"TARGET_SSE3"
"%vlddqu\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "movu" "1")
(set (attr "prefix_data16")
(if_then_else
(match_test "TARGET_AVX")
(const_string "*")
(const_string "0")))
(set (attr "prefix_rep")
(if_then_else
(match_test "TARGET_AVX")
(const_string "*")
(const_string "1")))
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "")])
(define_insn "sse2_movnti"
[(set (match_operand:SWI48 0 "memory_operand" "=m")
(unspec:SWI48 [(match_operand:SWI48 1 "register_operand" "r")]
UNSPEC_MOVNT))]
"TARGET_SSE2"
"movnti\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix_data16" "0")
(set_attr "mode" "")])
(define_insn "_movnt"
[(set (match_operand:VF 0 "memory_operand" "=m")
(unspec:VF
[(match_operand:VF 1 "register_operand" "v")]
UNSPEC_MOVNT))]
"TARGET_SSE"
"%vmovnt\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "")])
(define_insn "_movnt"
[(set (match_operand:VI8 0 "memory_operand" "=m")
(unspec:VI8 [(match_operand:VI8 1 "register_operand" "v")]
UNSPEC_MOVNT))]
"TARGET_SSE2"
"%vmovntdq\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecvt")
(set (attr "prefix_data16")
(if_then_else
(match_test "TARGET_AVX")
(const_string "*")
(const_string "1")))
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "")])
; Expand patterns for non-temporal stores. At the moment, only those
; that directly map to insns are defined; it would be possible to
; define patterns for other modes that would expand to several insns.
;; Modes handled by storent patterns.
(define_mode_iterator STORENT_MODE
[(DI "TARGET_SSE2 && TARGET_64BIT") (SI "TARGET_SSE2")
(SF "TARGET_SSE4A") (DF "TARGET_SSE4A")
(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") (V2DI "TARGET_SSE2")
(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
(V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
(define_expand "storent"
[(set (match_operand:STORENT_MODE 0 "memory_operand")
(unspec:STORENT_MODE
[(match_operand:STORENT_MODE 1 "register_operand")]
UNSPEC_MOVNT))]
"TARGET_SSE")
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Mask operations
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; All integer modes with AVX512BW/DQ.
(define_mode_iterator SWI1248_AVX512BWDQ
[(QI "TARGET_AVX512DQ") HI (SI "TARGET_AVX512BW") (DI "TARGET_AVX512BW")])
;; All integer modes with AVX512BW, where HImode operation
;; can be used instead of QImode.
(define_mode_iterator SWI1248_AVX512BW
[QI HI (SI "TARGET_AVX512BW") (DI "TARGET_AVX512BW")])
;; All integer modes with AVX512BW/DQ, even HImode requires DQ.
(define_mode_iterator SWI1248_AVX512BWDQ2
[(QI "TARGET_AVX512DQ") (HI "TARGET_AVX512DQ")
(SI "TARGET_AVX512BW") (DI "TARGET_AVX512BW")])
(define_expand "kmov"
[(set (match_operand:SWI1248_AVX512BWDQ 0 "nonimmediate_operand")
(match_operand:SWI1248_AVX512BWDQ 1 "nonimmediate_operand"))]
"TARGET_AVX512F
&& !(MEM_P (operands[0]) && MEM_P (operands[1]))")
(define_insn "k"
[(set (match_operand:SWI1248_AVX512BW 0 "register_operand" "=k")
(any_logic:SWI1248_AVX512BW
(match_operand:SWI1248_AVX512BW 1 "register_operand" "k")
(match_operand:SWI1248_AVX512BW 2 "register_operand" "k")))
(unspec [(const_int 0)] UNSPEC_MASKOP)]
"TARGET_AVX512F"
{
if (get_attr_mode (insn) == MODE_HI)
return "kw\t{%2, %1, %0|%0, %1, %2}";
else
return "k\t{%2, %1, %0|%0, %1, %2}";
}
[(set_attr "type" "msklog")
(set_attr "prefix" "vex")
(set (attr "mode")
(cond [(and (match_test "mode == QImode")
(not (match_test "TARGET_AVX512DQ")))
(const_string "HI")
]
(const_string "")))])
(define_insn "kandn"
[(set (match_operand:SWI1248_AVX512BW 0 "register_operand" "=k")
(and:SWI1248_AVX512BW
(not:SWI1248_AVX512BW
(match_operand:SWI1248_AVX512BW 1 "register_operand" "k"))
(match_operand:SWI1248_AVX512BW 2 "register_operand" "k")))
(unspec [(const_int 0)] UNSPEC_MASKOP)]
"TARGET_AVX512F"
{
if (get_attr_mode (insn) == MODE_HI)
return "kandnw\t{%2, %1, %0|%0, %1, %2}";
else
return "kandn\t{%2, %1, %0|%0, %1, %2}";
}
[(set_attr "type" "msklog")
(set_attr "prefix" "vex")
(set (attr "mode")
(cond [(and (match_test "mode == QImode")
(not (match_test "TARGET_AVX512DQ")))
(const_string "HI")
]
(const_string "")))])
(define_insn "kxnor"
[(set (match_operand:SWI1248_AVX512BW 0 "register_operand" "=k")
(not:SWI1248_AVX512BW
(xor:SWI1248_AVX512BW
(match_operand:SWI1248_AVX512BW 1 "register_operand" "k")
(match_operand:SWI1248_AVX512BW 2 "register_operand" "k"))))
(unspec [(const_int 0)] UNSPEC_MASKOP)]
"TARGET_AVX512F"
{
if (get_attr_mode (insn) == MODE_HI)
return "kxnorw\t{%2, %1, %0|%0, %1, %2}";
else
return "kxnor\t{%2, %1, %0|%0, %1, %2}";
}
[(set_attr "type" "msklog")
(set_attr "prefix" "vex")
(set (attr "mode")
(cond [(and (match_test "mode == QImode")
(not (match_test "TARGET_AVX512DQ")))
(const_string "HI")
]
(const_string "")))])
(define_insn "knot"
[(set (match_operand:SWI1248_AVX512BW 0 "register_operand" "=k")
(not:SWI1248_AVX512BW
(match_operand:SWI1248_AVX512BW 1 "register_operand" "k")))
(unspec [(const_int 0)] UNSPEC_MASKOP)]
"TARGET_AVX512F"
{
if (get_attr_mode (insn) == MODE_HI)
return "knotw\t{%1, %0|%0, %1}";
else
return "knot\t{%1, %0|%0, %1}";
}
[(set_attr "type" "msklog")
(set_attr "prefix" "vex")
(set (attr "mode")
(cond [(and (match_test "mode == QImode")
(not (match_test "TARGET_AVX512DQ")))
(const_string "HI")
]
(const_string "")))])
(define_insn "kadd"
[(set (match_operand:SWI1248_AVX512BWDQ2 0 "register_operand" "=k")
(plus:SWI1248_AVX512BWDQ2
(match_operand:SWI1248_AVX512BWDQ2 1 "register_operand" "k")
(match_operand:SWI1248_AVX512BWDQ2 2 "register_operand" "k")))
(unspec [(const_int 0)] UNSPEC_MASKOP)]
"TARGET_AVX512F"
"kadd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "msklog")
(set_attr "prefix" "vex")
(set_attr "mode" "")])
;; Mask variant shift mnemonics
(define_code_attr mshift [(ashift "shiftl") (lshiftrt "shiftr")])
(define_insn "k"
[(set (match_operand:SWI1248_AVX512BWDQ 0 "register_operand" "=k")
(any_lshift:SWI1248_AVX512BWDQ
(match_operand:SWI1248_AVX512BWDQ 1 "register_operand" "k")
(match_operand:QI 2 "immediate_operand" "n")))
(unspec [(const_int 0)] UNSPEC_MASKOP)]
"TARGET_AVX512F"
"k\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "msklog")
(set_attr "prefix" "vex")
(set_attr "mode" "")])
(define_insn "ktest"
[(set (reg:CC FLAGS_REG)
(unspec:CC
[(match_operand:SWI1248_AVX512BWDQ2 0 "register_operand" "k")
(match_operand:SWI1248_AVX512BWDQ2 1 "register_operand" "k")]
UNSPEC_KTEST))]
"TARGET_AVX512F"
"ktest\t{%1, %0|%0, %1}"
[(set_attr "mode" "")
(set_attr "type" "msklog")
(set_attr "prefix" "vex")])
(define_insn "kortest"
[(set (reg:CC FLAGS_REG)
(unspec:CC
[(match_operand:SWI1248_AVX512BWDQ 0 "register_operand" "k")
(match_operand:SWI1248_AVX512BWDQ 1 "register_operand" "k")]
UNSPEC_KORTEST))]
"TARGET_AVX512F"
"kortest\t{%1, %0|%0, %1}"
[(set_attr "mode" "")
(set_attr "type" "msklog")
(set_attr "prefix" "vex")])
(define_insn "kunpckhi"
[(set (match_operand:HI 0 "register_operand" "=k")
(ior:HI
(ashift:HI
(zero_extend:HI (match_operand:QI 1 "register_operand" "k"))
(const_int 8))
(zero_extend:HI (match_operand:QI 2 "register_operand" "k"))))]
"TARGET_AVX512F"
"kunpckbw\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "mode" "HI")
(set_attr "type" "msklog")
(set_attr "prefix" "vex")])
(define_insn "kunpcksi"
[(set (match_operand:SI 0 "register_operand" "=k")
(ior:SI
(ashift:SI
(zero_extend:SI (match_operand:HI 1 "register_operand" "k"))
(const_int 16))
(zero_extend:SI (match_operand:HI 2 "register_operand" "k"))))]
"TARGET_AVX512BW"
"kunpckwd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "mode" "SI")])
(define_insn "kunpckdi"
[(set (match_operand:DI 0 "register_operand" "=k")
(ior:DI
(ashift:DI
(zero_extend:DI (match_operand:SI 1 "register_operand" "k"))
(const_int 32))
(zero_extend:DI (match_operand:SI 2 "register_operand" "k"))))]
"TARGET_AVX512BW"
"kunpckdq\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "mode" "DI")])
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Parallel floating point arithmetic
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(define_expand "2"
[(set (match_operand:VF 0 "register_operand")
(absneg:VF
(match_operand:VF 1 "register_operand")))]
"TARGET_SSE"
"ix86_expand_fp_absneg_operator (, mode, operands); DONE;")
(define_insn_and_split "*absneg2"
[(set (match_operand:VF 0 "register_operand" "=x,x,v,v")
(match_operator:VF 3 "absneg_operator"
[(match_operand:VF 1 "vector_operand" "0, xBm,v, m")]))
(use (match_operand:VF 2 "vector_operand" "xBm,0, vm,v"))]
"TARGET_SSE"
"#"
"&& reload_completed"
[(const_int 0)]
{
enum rtx_code absneg_op;
rtx op1, op2;
rtx t;
if (TARGET_AVX)
{
if (MEM_P (operands[1]))
op1 = operands[2], op2 = operands[1];
else
op1 = operands[1], op2 = operands[2];
}
else
{
op1 = operands[0];
if (rtx_equal_p (operands[0], operands[1]))
op2 = operands[2];
else
op2 = operands[1];
}
absneg_op = GET_CODE (operands[3]) == NEG ? XOR : AND;
t = gen_rtx_fmt_ee (absneg_op, mode, op1, op2);
t = gen_rtx_SET (operands[0], t);
emit_insn (t);
DONE;
}
[(set_attr "isa" "noavx,noavx,avx,avx")])
(define_expand "3"
[(set (match_operand:VF 0 "register_operand")
(plusminus:VF
(match_operand:VF 1 "")
(match_operand:VF 2 "")))]
"TARGET_SSE && && "
"ix86_fixup_binary_operands_no_copy (, mode, operands);")
(define_insn "*3"
[(set (match_operand:VF 0 "register_operand" "=x,v")
(plusminus:VF
(match_operand:VF 1 "" "0,v")
(match_operand:VF 2 "" "xBm,")))]
"TARGET_SSE && ix86_binary_operator_ok (, mode, operands) && && "
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
(set_attr "prefix" "")
(set_attr "mode" "")])
(define_insn "_vm3"
[(set (match_operand:VF_128 0 "register_operand" "=x,v")
(vec_merge:VF_128
(plusminus:VF_128
(match_operand:VF_128 1 "register_operand" "0,v")
(match_operand:VF_128 2 "vector_operand" "xBm,"))
(match_dup 1)
(const_int 1)))]
"TARGET_SSE"
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
(set_attr "prefix" "")
(set_attr "mode" "")])
(define_expand "mul3"
[(set (match_operand:VF 0 "register_operand")
(mult:VF
(match_operand:VF 1 "")
(match_operand:VF 2 "")))]
"TARGET_SSE && && "
"ix86_fixup_binary_operands_no_copy (MULT, mode, operands);")
(define_insn "*mul3"
[(set (match_operand:VF 0 "register_operand" "=x,v")
(mult:VF
(match_operand:VF 1 "" "%0,v")
(match_operand:VF 2 "" "xBm,")))]
"TARGET_SSE && ix86_binary_operator_ok (MULT, mode, operands) && && "
"@
mul\t{%2, %0|%0, %2}
vmul\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "ssemul")
(set_attr "prefix" "")
(set_attr "btver2_decode" "direct,double")
(set_attr "mode" "")])
(define_insn "_vm3"
[(set (match_operand:VF_128 0 "register_operand" "=x,v")
(vec_merge:VF_128
(multdiv:VF_128
(match_operand:VF_128 1 "register_operand" "0,v")
(match_operand:VF_128 2 "vector_operand" "xBm,"))
(match_dup 1)
(const_int 1)))]
"TARGET_SSE"
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sse")
(set_attr "prefix" "")
(set_attr "btver2_decode" "direct,double")
(set_attr "mode" "")])
(define_expand "div3"
[(set (match_operand:VF2 0 "register_operand")
(div:VF2 (match_operand:VF2 1 "register_operand")
(match_operand:VF2 2 "vector_operand")))]
"TARGET_SSE2"
"ix86_fixup_binary_operands_no_copy (DIV, mode, operands);")
(define_expand "div3"
[(set (match_operand:VF1 0 "register_operand")
(div:VF1 (match_operand:VF1 1 "register_operand")
(match_operand:VF1 2 "vector_operand")))]
"TARGET_SSE"
{
ix86_fixup_binary_operands_no_copy (DIV, mode, operands);
if (TARGET_SSE_MATH
&& TARGET_RECIP_VEC_DIV
&& !optimize_insn_for_size_p ()
&& flag_finite_math_only && !flag_trapping_math
&& flag_unsafe_math_optimizations)
{
ix86_emit_swdivsf (operands[0], operands[1], operands[2], mode);
DONE;
}
})
(define_insn "_div3"
[(set (match_operand:VF 0 "register_operand" "=x,v")
(div:VF
(match_operand:VF 1 "register_operand" "0,v")
(match_operand:VF 2 "" "xBm,")))]
"TARGET_SSE && && "
"@
div\t{%2, %0|%0, %2}
vdiv\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "ssediv")
(set_attr "prefix" "")
(set_attr "mode" "")])
(define_insn "_rcp2"
[(set (match_operand:VF1_128_256 0 "register_operand" "=x")
(unspec:VF1_128_256
[(match_operand:VF1_128_256 1 "vector_operand" "xBm")] UNSPEC_RCP))]
"TARGET_SSE"
"%vrcpps\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
(set_attr "atom_sse_attr" "rcp")
(set_attr "btver2_sse_attr" "rcp")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "")])
(define_insn "sse_vmrcpv4sf2"
[(set (match_operand:V4SF 0 "register_operand" "=x,x")
(vec_merge:V4SF
(unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "xm,xm")]
UNSPEC_RCP)
(match_operand:V4SF 2 "register_operand" "0,x")
(const_int 1)))]
"TARGET_SSE"
"@
rcpss\t{%1, %0|%0, %k1}
vrcpss\t{%1, %2, %0|%0, %2, %k1}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sse")
(set_attr "atom_sse_attr" "rcp")
(set_attr "btver2_sse_attr" "rcp")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "SF")])
(define_insn "rcp14"
[(set (match_operand:VF_AVX512VL 0 "register_operand" "=v")
(unspec:VF_AVX512VL
[(match_operand:VF_AVX512VL 1 "nonimmediate_operand" "vm")]
UNSPEC_RCP14))]
"TARGET_AVX512F"
"vrcp14\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
(set_attr "prefix" "evex")
(set_attr "mode" "")])
(define_insn "srcp14"
[(set (match_operand:VF_128 0 "register_operand" "=v")
(vec_merge:VF_128
(unspec:VF_128
[(match_operand:VF_128 1 "nonimmediate_operand" "vm")]
UNSPEC_RCP14)
(match_operand:VF_128 2 "register_operand" "v")
(const_int 1)))]
"TARGET_AVX512F"
"vrcp14\t{%1, %2, %0|%0, %2, %1}"
[(set_attr "type" "sse")
(set_attr "prefix" "evex")
(set_attr "mode" "")])
(define_expand "sqrt