-
Notifications
You must be signed in to change notification settings - Fork 15.3k
[X86] optimize saturating (masked) pack #169995
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
|
@llvm/pr-subscribers-backend-x86 Author: Folkert de Vries (folkertdev) Changesoptimize (masked) saturating packs manual https://godbolt.org/z/Gendfd1jj src:
vpmovsxbq zmm2, qword ptr [rip + .LCPI0_2] # zmm2 = [4,5,12,13,6,7,14,15]
vpermi2q zmm2, zmm0, zmm1
vpmovsxbq zmm3, qword ptr [rip + .LCPI0_3] # zmm3 = [0,1,8,9,2,3,10,11]
vpermi2q zmm3, zmm0, zmm1
vpmovsdw ymm0, zmm3
vpmovsdw ymm1, zmm2
vinserti64x4 zmm0, zmm0, ymm1, 1
ret
tgt:
vpackssdw zmm1, zmm0, zmm0
retThis requires some additional logic to recognize the shuffle mask. There are some existing functions for various masks, but not the sort of lane interleaving one that is needed here. Maybe there is some better way though? When the right simd width is not available, packss optimizes much better than packus. I don't see how that is due to my code though, I suspect that is some other problem. The signed case removes the truncation fully when the packs is inserted, while the unsigned version leaves the Not that important really, but maybe it is indicative of some underlying problem? Patch is 92.96 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/169995.diff 5 Files Affected:
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index d49f25a950e3a..8ff476b87dc5e 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -52829,6 +52829,91 @@ static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
return DAG.getSetCC(SDLoc(N), VT, Shift.getOperand(0), Ones, ISD::SETGT);
}
+// Check whether this is a shuffle that interleaves the lanes of the two input
+// vectors. e.g. when interleaving two v8i32 into a single v16i32 that mask is
+// <0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23>. Indices are based
+// on the target type.
+static bool isLaneInterleaveMask(ArrayRef<int> Mask, MVT VT) {
+ assert(VT.isVector() && "Expected vector VT.");
+
+ MVT ElemVT = VT.getScalarType();
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned EltBits = ElemVT.getSizeInBits();
+
+ if (Mask.size() != NumElts)
+ return false;
+
+ // A lane is 128 bits.
+ if (EltBits == 0 || (128u % EltBits) != 0)
+ return false;
+
+ // So 4 for i32, 8 for i16, etc.
+ unsigned EltsPerLane = 128u / EltBits;
+ unsigned GroupSize = 2 * EltsPerLane;
+
+ if (NumElts % GroupSize != 0)
+ return false;
+
+ unsigned Pos = 0;
+ for (unsigned G = 0; G != (NumElts / GroupSize); ++G) {
+ // Indices are based on the output type, hence B starts at NumElts.
+ unsigned ABase = G * EltsPerLane;
+ unsigned BBase = NumElts + G * EltsPerLane;
+
+ for (unsigned I = 0; I != EltsPerLane; ++I)
+ if (Mask[Pos++] != (int)(ABase + I))
+ return false;
+
+ for (unsigned I = 0; I != EltsPerLane; ++I)
+ if (Mask[Pos++] != (int)(BBase + I))
+ return false;
+ }
+
+ return true;
+}
+
+// Check whether this is a shuffle that interleaves the lanes of the two input
+// vectors. e.g. v16i32 that mask is <0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7,
+// 20, 21, 22, 23>.
+static bool isLaneInterleaveShuffle(MVT VT, SDValue Shuf, SDValue &A,
+ SDValue &B, const SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ // For the _mm_pack{u|s}s variants, the shuffle is trivial and therefore
+ // elided.
+ if (VT == MVT::v16i16 || VT == MVT::v8i32) {
+ if (Shuf.getOpcode() == ISD::CONCAT_VECTORS && Shuf.getNumOperands() == 2) {
+ A = Shuf->getOperand(0);
+ B = Shuf->getOperand(1);
+ return true;
+ }
+
+ return false;
+ }
+
+ auto *SVN = dyn_cast<ShuffleVectorSDNode>(Shuf.getNode());
+ if (!SVN)
+ return false;
+
+ ArrayRef<int> TargetMask = SVN->getMask();
+ SDValue V1 = SVN->getOperand(0);
+ SDValue V2 = SVN->getOperand(1);
+
+ if (isLaneInterleaveMask(TargetMask, VT)) {
+ auto peelConcat = [](SDValue V) -> SDValue {
+ if (V.getOpcode() == ISD::CONCAT_VECTORS && V.getNumOperands() == 2)
+ return V.getOperand(0);
+ return V;
+ };
+
+ // The upper half is undefined.
+ A = peelConcat(V1);
+ B = peelConcat(V2);
+ return true;
+ }
+
+ return false;
+}
+
/// Detect patterns of truncation with unsigned saturation:
///
/// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
@@ -52973,42 +53058,68 @@ static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
Subtarget);
}
+ if (!(SVT == MVT::i32 || SVT == MVT::i16 || SVT == MVT::i8))
+ return SDValue();
+
+ unsigned TruncOpc = 0;
+ SDValue SatVal;
+ if (SDValue SSatVal = detectSSatPattern(In, VT)) {
+ SatVal = SSatVal;
+ TruncOpc = X86ISD::VTRUNCS;
+ } else if (SDValue USatVal = detectUSatPattern(In, VT, DAG, DL)) {
+ SatVal = USatVal;
+ TruncOpc = X86ISD::VTRUNCUS;
+ } else {
+ return SDValue();
+ }
+
+ unsigned ResElts = VT.getVectorNumElements();
+
+ bool IsEpi16 = (SVT == MVT::i8 && InSVT == MVT::i16);
+ bool IsEpi32 = (SVT == MVT::i16 && InSVT == MVT::i32);
+
+ // Is there an adventageous pack given the current types and features?
+ unsigned Width = VT.getSizeInBits();
+ bool HasPackForWidth =
+ (Width == 128 && Subtarget.hasSSE41()) ||
+ (Width == 256 && Subtarget.hasAVX2()) ||
+ (Width == 512 && Subtarget.hasBWI() && Subtarget.hasVLX());
+
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
- Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI()) &&
- (SVT == MVT::i32 || SVT == MVT::i16 || SVT == MVT::i8)) {
- unsigned TruncOpc = 0;
- SDValue SatVal;
- if (SDValue SSatVal = detectSSatPattern(In, VT)) {
- SatVal = SSatVal;
- TruncOpc = X86ISD::VTRUNCS;
- } else if (SDValue USatVal = detectUSatPattern(In, VT, DAG, DL)) {
- SatVal = USatVal;
- TruncOpc = X86ISD::VTRUNCUS;
- }
- if (SatVal) {
- unsigned ResElts = VT.getVectorNumElements();
- // If the input type is less than 512 bits and we don't have VLX, we need
- // to widen to 512 bits.
- if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
- unsigned NumConcats = 512 / InVT.getSizeInBits();
- ResElts *= NumConcats;
- SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
- ConcatOps[0] = SatVal;
- InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
- NumConcats * InVT.getVectorNumElements());
- SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
- }
- // Widen the result if its narrower than 128 bits.
- if (ResElts * SVT.getSizeInBits() < 128)
- ResElts = 128 / SVT.getSizeInBits();
- EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
- SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
- DAG.getVectorIdxConstant(0, DL));
+ if (HasPackForWidth && (IsEpi16 || IsEpi32)) {
+ SDValue A, B;
+ if (isLaneInterleaveShuffle(InVT.getSimpleVT(), SatVal, A, B, DAG,
+ Subtarget)) {
+ unsigned PackOpc =
+ TruncOpc == X86ISD::VTRUNCS ? X86ISD::PACKSS : X86ISD::PACKUS;
+
+ return DAG.getNode(PackOpc, DL, VT, A, B);
}
}
+ if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
+ Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI())) {
+
+ // If the input type is less than 512 bits and we don't have VLX, we
+ // need to widen to 512 bits.
+ if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
+ unsigned NumConcats = 512 / InVT.getSizeInBits();
+ ResElts *= NumConcats;
+ SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
+ ConcatOps[0] = SatVal;
+ InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
+ NumConcats * InVT.getVectorNumElements());
+ SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
+ }
+ // Widen the result if its narrower than 128 bits.
+ if (ResElts * SVT.getSizeInBits() < 128)
+ ResElts = 128 / SVT.getSizeInBits();
+ EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
+ SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
+ DAG.getVectorIdxConstant(0, DL));
+ }
+
return SDValue();
}
diff --git a/llvm/test/CodeGen/X86/masked_packss.ll b/llvm/test/CodeGen/X86/masked_packss.ll
new file mode 100644
index 0000000000000..183cfec4a7933
--- /dev/null
+++ b/llvm/test/CodeGen/X86/masked_packss.ll
@@ -0,0 +1,189 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,avx512vl | FileCheck %s --check-prefixes=AVX512
+
+define <16 x i8> @_mm_mask_packss_epi16_manual(<16 x i8> %src, i16 noundef %k, <8 x i16> %a, <8 x i16> %b) unnamed_addr {
+; AVX2-LABEL: _mm_mask_packss_epi16_manual:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vmovd %edi, %xmm2
+; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; AVX2-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: _mm_mask_packss_epi16_manual:
+; AVX512: # %bb.0:
+; AVX512-NEXT: kmovd %edi, %k1
+; AVX512-NEXT: vpacksswb %xmm2, %xmm1, %xmm0 {%k1}
+; AVX512-NEXT: retq
+ %sh = shufflevector <8 x i16> %a, <8 x i16> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %minv = tail call <16 x i16> @llvm.smax.v16i16(<16 x i16> %sh, <16 x i16> splat (i16 -128))
+ %sat = tail call <16 x i16> @llvm.smin.v16i16(<16 x i16> %minv, <16 x i16> splat (i16 127))
+ %tr = trunc <16 x i16> %sat to <16 x i8>
+ %mk = bitcast i16 %k to <16 x i1>
+ %res = select <16 x i1> %mk, <16 x i8> %tr, <16 x i8> %src
+ ret <16 x i8> %res
+}
+
+define <32 x i8> @_mm256_mask_packss_epi16_manual(<32 x i8> %src, i32 noundef %k, <16 x i16> %a, <16 x i16> %b) unnamed_addr {
+; AVX2-LABEL: _mm256_mask_packss_epi16_manual:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpacksswb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vmovd %edi, %xmm2
+; AVX2-NEXT: vpbroadcastd %xmm2, %ymm2
+; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[0,0,0,0,0,0,0,0,9,9,9,9,9,9,9,9,18,18,18,18,18,18,18,18,27,27,27,27,27,27,27,27]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpeqb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: _mm256_mask_packss_epi16_manual:
+; AVX512: # %bb.0:
+; AVX512-NEXT: kmovd %edi, %k1
+; AVX512-NEXT: vpacksswb %ymm2, %ymm1, %ymm0 {%k1}
+; AVX512-NEXT: retq
+ %sh = shufflevector <16 x i16> %a, <16 x i16> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %minv = tail call <32 x i16> @llvm.smax.v32i16(<32 x i16> %sh, <32 x i16> splat (i16 -128))
+ %sat = tail call <32 x i16> @llvm.smin.v32i16(<32 x i16> %minv, <32 x i16> splat (i16 127))
+ %tr = trunc <32 x i16> %sat to <32 x i8>
+ %mk = bitcast i32 %k to <32 x i1>
+ %res = select <32 x i1> %mk, <32 x i8> %tr, <32 x i8> %src
+ ret <32 x i8> %res
+}
+
+define <64 x i8> @_mm512_mask_packss_epi16_manual(<64 x i8> %src, i64 noundef %k, <32 x i16> %a, <32 x i16> %b) unnamed_addr {
+; AVX2-LABEL: _mm512_mask_packss_epi16_manual:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpacksswb %ymm5, %ymm3, %ymm3
+; AVX2-NEXT: vpacksswb %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vmovq %rdi, %xmm4
+; AVX2-NEXT: vpbroadcastq %xmm4, %ymm4
+; AVX2-NEXT: vpshufb {{.*#+}} ymm5 = ymm4[0,0,0,0,0,0,0,0,9,9,9,9,9,9,9,9,18,18,18,18,18,18,18,18,27,27,27,27,27,27,27,27]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm6 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; AVX2-NEXT: vpand %ymm6, %ymm5, %ymm5
+; AVX2-NEXT: vpcmpeqb %ymm6, %ymm5, %ymm5
+; AVX2-NEXT: vpblendvb %ymm5, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm4[4,4,4,4,4,4,4,4,13,13,13,13,13,13,13,13,22,22,22,22,22,22,22,22,31,31,31,31,31,31,31,31]
+; AVX2-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpeqb %ymm6, %ymm2, %ymm2
+; AVX2-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: _mm512_mask_packss_epi16_manual:
+; AVX512: # %bb.0:
+; AVX512-NEXT: kmovq %rdi, %k1
+; AVX512-NEXT: vpacksswb %zmm2, %zmm1, %zmm0 {%k1}
+; AVX512-NEXT: retq
+ %sh = shufflevector <32 x i16> %a, <32 x i16> %b, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %minv = tail call <64 x i16> @llvm.smax.v64i16(<64 x i16> %sh, <64 x i16> splat (i16 -128))
+ %sat = tail call <64 x i16> @llvm.smin.v64i16(<64 x i16> %minv, <64 x i16> splat (i16 127))
+ %tr = trunc <64 x i16> %sat to <64 x i8>
+ %mk = bitcast i64 %k to <64 x i1>
+ %res = select <64 x i1> %mk, <64 x i8> %tr, <64 x i8> %src
+ ret <64 x i8> %res
+}
+
+define <8 x i16> @_mm_mask_packss_epi32_manual(<8 x i16> %src, i8 noundef %k, <4 x i32> %a, <4 x i32> %b) unnamed_addr {
+; AVX2-LABEL: _mm_mask_packss_epi32_manual:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vmovd %edi, %xmm2
+; AVX2-NEXT: vpbroadcastb %xmm2, %xmm2
+; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128]
+; AVX2-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: _mm_mask_packss_epi32_manual:
+; AVX512: # %bb.0:
+; AVX512-NEXT: kmovd %edi, %k1
+; AVX512-NEXT: vpackssdw %xmm2, %xmm1, %xmm0 {%k1}
+; AVX512-NEXT: retq
+ %sh = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %minv = tail call <8 x i32> @llvm.smax.v8i32(<8 x i32> %sh, <8 x i32> splat (i32 -32768))
+ %sat = tail call <8 x i32> @llvm.smin.v8i32(<8 x i32> %minv, <8 x i32> splat (i32 32767))
+ %tr = trunc <8 x i32> %sat to <8 x i16>
+ %mk = bitcast i8 %k to <8 x i1>
+ %res = select <8 x i1> %mk, <8 x i16> %tr, <8 x i16> %src
+ ret <8 x i16> %res
+}
+
+define <16 x i16> @_mm256_mask_packss_epi32_manual(<16 x i16> %src, i16 noundef %k, <8 x i32> %a, <8 x i32> %b) unnamed_addr {
+; AVX2-LABEL: _mm256_mask_packss_epi32_manual:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpackssdw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vmovd %edi, %xmm2
+; AVX2-NEXT: vpbroadcastw %xmm2, %ymm2
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpeqw %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: _mm256_mask_packss_epi32_manual:
+; AVX512: # %bb.0:
+; AVX512-NEXT: kmovd %edi, %k1
+; AVX512-NEXT: vpackssdw %ymm2, %ymm1, %ymm0 {%k1}
+; AVX512-NEXT: retq
+ %sh = shufflevector <8 x i32> %a, <8 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
+ %minv = tail call <16 x i32> @llvm.smax.v16i32(<16 x i32> %sh, <16 x i32> splat (i32 -32768))
+ %sat = tail call <16 x i32> @llvm.smin.v16i32(<16 x i32> %minv, <16 x i32> splat (i32 32767))
+ %tr = trunc <16 x i32> %sat to <16 x i16>
+ %mk = bitcast i16 %k to <16 x i1>
+ %res = select <16 x i1> %mk, <16 x i16> %tr, <16 x i16> %src
+ ret <16 x i16> %res
+}
+
+define <32 x i16> @_mm512_mask_packss_epi32_manual(<32 x i16> %src, i32 noundef %k, <16 x i32> %a, <16 x i32> %b) unnamed_addr {
+; AVX2-LABEL: _mm512_mask_packss_epi32_manual:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpackssdw %ymm5, %ymm3, %ymm3
+; AVX2-NEXT: vpackssdw %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vmovd %edi, %xmm4
+; AVX2-NEXT: vpbroadcastw %xmm4, %ymm4
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX2-NEXT: vpand %ymm5, %ymm4, %ymm4
+; AVX2-NEXT: vpcmpeqw %ymm5, %ymm4, %ymm4
+; AVX2-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: shrl $16, %edi
+; AVX2-NEXT: vmovd %edi, %xmm2
+; AVX2-NEXT: vpbroadcastw %xmm2, %ymm2
+; AVX2-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpeqw %ymm5, %ymm2, %ymm2
+; AVX2-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: _mm512_mask_packss_epi32_manual:
+; AVX512: # %bb.0:
+; AVX512-NEXT: kmovd %edi, %k1
+; AVX512-NEXT: vpackssdw %zmm2, %zmm1, %zmm0 {%k1}
+; AVX512-NEXT: retq
+ %sh = shufflevector <16 x i32> %a, <16 x i32> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 24, i32 25, i32 26, i32 27, i32 12, i32 13, i32 14, i32 15, i32 28, i32 29, i32 30, i32 31>
+ %minv = tail call <32 x i32> @llvm.smax.v32i32(<32 x i32> %sh, <32 x i32> splat (i32 -32768))
+ %sat = tail call <32 x i32> @llvm.smin.v32i32(<32 x i32> %minv, <32 x i32> splat (i32 32767))
+ %tr = trunc <32 x i32> %sat to <32 x i16>
+ %mk = bitcast i32 %k to <32 x i1>
+ %res = select <32 x i1> %mk, <32 x i16> %tr, <32 x i16> %src
+ ret <32 x i16> %res
+}
+
+declare <16 x i16> @llvm.smax.v16i16(<16 x i16>, <16 x i16>)
+declare <32 x i16> @llvm.smax.v32i16(<32 x i16>, <32 x i16>)
+declare <64 x i16> @llvm.smax.v64i16(<64 x i16>, <64 x i16>)
+
+declare <16 x i16> @llvm.smin.v16i16(<16 x i16>, <16 x i16>)
+declare <32 x i16> @llvm.smin.v32i16(<32 x i16>, <32 x i16>)
+declare <64 x i16> @llvm.smin.v64i16(<64 x i16>, <64 x i16>)
+
+declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.smax.v16i32(<16 x i32>, <16 x i32>)
+declare <32 x i32> @llvm.smax.v32i32(<32 x i32>, <32 x i32>)
+
+declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>)
+declare <16 x i32> @llvm.smin.v16i32(<16 x i32>, <16 x i32>)
+declare <32 x i32> @llvm.smin.v32i32(<32 x i32>, <32 x i32>)
diff --git a/llvm/test/CodeGen/X86/masked_packus.ll b/llvm/test/CodeGen/X86/masked_packus.ll
new file mode 100644
index 0000000000000..471a5959c9bd9
--- /dev/null
+++ b/llvm/test/CodeGen/X86/masked_packus.ll
@@ -0,0 +1,197 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,avx512vl | FileCheck %s --check-prefixes=AVX512
+
+define <16 x i8> @_mm_mask_packus_epi16_manual(<16 x i8> %src, i16 noundef %k, <8 x i16> %a, <8 x i16> %b) unnamed_addr {
+; AVX2-LABEL: _mm_mask_packus_epi16_manual:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vmovd %edi, %xmm2
+; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; AVX2-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: _mm_mask_packus_epi16_manual:
+; AVX512: # %bb.0:
+; AVX512-NEXT: kmovd %edi, %k1
+; AVX512-NEXT: vpackuswb %xmm2, %xmm1, %xmm0 {%k1}
+; AVX512-NEXT: retq
+ %sh = shufflevector <8 x i16> %a, <8 x i16> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %sat = tail call <16 x i16> @llvm.umin.v16i16(<16 x i16> %sh, <16 x i16> splat (i16 255))
+ %tr = trunc nuw <16 x i16> %sat to <16 x i8>
+ %mk = bitcast i16 %k to <16 x i1>
+ %res = select <16 x i1> %mk, <16 x i8> %tr, <16 x i8> %src
+ ret <16 x i8> %res
+}
+
+define <32 x i8> @_mm256_mask_packus_epi16_manual(<32 x i8> %src, i32 noundef %k, <16 x i16> %a, <16 x i16> %b) unnamed_addr {
+; AVX2-LABEL: _mm256_mask_packus_epi16_manual:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vmovd %edi, %xmm2
+; AVX2-NEXT: vpbroadcastd %xmm2, %ymm2
+; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[0,0,0,0,0,0,0,0,9,9,9,9,9,9,9,9,18,18,18,18,18,18,18,18,27,27,27,27,27,27,27,27]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,...
[truncated]
|
optimize (masked) saturating packs
manual
packssandpackusoperations like_mm512_packss_epi32and its masked variant don't optimize well.https://godbolt.org/z/Gendfd1jj
This requires some additional logic to recognize the shuffle mask. There are some existing functions for various masks, but not the sort of lane interleaving one that is needed here. Maybe there is some better way though?
When the right simd width is not available, packss optimizes much better than packus. I don't see how that is due to my code though, I suspect that is some other problem. The signed case removes the truncation fully when the packs is inserted, while the unsigned version leaves the
uminintact.Not that important really, but maybe it is indicative of some underlying problem?