diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp index 746742e14d080..969e569a6f849 100644 --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -663,11 +663,11 @@ bool VectorCombine::foldExtractExtract(Instruction &I) { /// shuffle. bool VectorCombine::foldInsExtFNeg(Instruction &I) { // Match an insert (op (extract)) pattern. - Value *DestVec; - uint64_t Index; + Value *DstVec; + uint64_t ExtIdx, InsIdx; Instruction *FNeg; - if (!match(&I, m_InsertElt(m_Value(DestVec), m_OneUse(m_Instruction(FNeg)), - m_ConstantInt(Index)))) + if (!match(&I, m_InsertElt(m_Value(DstVec), m_OneUse(m_Instruction(FNeg)), + m_ConstantInt(InsIdx)))) return false; // Note: This handles the canonical fneg instruction and "fsub -0.0, X". @@ -675,48 +675,49 @@ bool VectorCombine::foldInsExtFNeg(Instruction &I) { Instruction *Extract; if (!match(FNeg, m_FNeg(m_CombineAnd( m_Instruction(Extract), - m_ExtractElt(m_Value(SrcVec), m_SpecificInt(Index)))))) + m_ExtractElt(m_Value(SrcVec), m_ConstantInt(ExtIdx)))))) return false; - auto *VecTy = cast(I.getType()); - auto *ScalarTy = VecTy->getScalarType(); + auto *DstVecTy = cast(DstVec->getType()); + auto *DstVecScalarTy = DstVecTy->getScalarType(); auto *SrcVecTy = dyn_cast(SrcVec->getType()); - if (!SrcVecTy || ScalarTy != SrcVecTy->getScalarType()) + if (!SrcVecTy || DstVecScalarTy != SrcVecTy->getScalarType()) return false; // Ignore bogus insert/extract index. - unsigned NumElts = VecTy->getNumElements(); - if (Index >= NumElts) + unsigned NumDstElts = DstVecTy->getNumElements(); + unsigned NumSrcElts = SrcVecTy->getNumElements(); + if (InsIdx >= NumDstElts || ExtIdx >= NumSrcElts || NumDstElts == 1) return false; // We are inserting the negated element into the same lane that we extracted // from. This is equivalent to a select-shuffle that chooses all but the // negated element from the destination vector. - SmallVector Mask(NumElts); + SmallVector Mask(NumDstElts); std::iota(Mask.begin(), Mask.end(), 0); - Mask[Index] = Index + NumElts; + Mask[InsIdx] = (ExtIdx % NumDstElts) + NumDstElts; InstructionCost OldCost = - TTI.getArithmeticInstrCost(Instruction::FNeg, ScalarTy, CostKind) + - TTI.getVectorInstrCost(I, VecTy, CostKind, Index); + TTI.getArithmeticInstrCost(Instruction::FNeg, DstVecScalarTy, CostKind) + + TTI.getVectorInstrCost(I, DstVecTy, CostKind, InsIdx); // If the extract has one use, it will be eliminated, so count it in the // original cost. If it has more than one use, ignore the cost because it will // be the same before/after. if (Extract->hasOneUse()) - OldCost += TTI.getVectorInstrCost(*Extract, VecTy, CostKind, Index); + OldCost += TTI.getVectorInstrCost(*Extract, SrcVecTy, CostKind, ExtIdx); InstructionCost NewCost = - TTI.getArithmeticInstrCost(Instruction::FNeg, VecTy, CostKind) + - TTI.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, VecTy, Mask, + TTI.getArithmeticInstrCost(Instruction::FNeg, SrcVecTy, CostKind) + + TTI.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, DstVecTy, Mask, CostKind); - bool NeedLenChg = SrcVecTy->getNumElements() != NumElts; + bool NeedLenChg = SrcVecTy->getNumElements() != NumDstElts; // If the lengths of the two vectors are not equal, // we need to add a length-change vector. Add this cost. SmallVector SrcMask; if (NeedLenChg) { - SrcMask.assign(NumElts, PoisonMaskElem); - SrcMask[Index] = Index; + SrcMask.assign(NumDstElts, PoisonMaskElem); + SrcMask[(ExtIdx % NumDstElts)] = ExtIdx; NewCost += TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, SrcVecTy, SrcMask, CostKind); } @@ -725,15 +726,15 @@ bool VectorCombine::foldInsExtFNeg(Instruction &I) { return false; Value *NewShuf; - // insertelt DestVec, (fneg (extractelt SrcVec, Index)), Index + // insertelt DstVec, (fneg (extractelt SrcVec, Index)), Index Value *VecFNeg = Builder.CreateFNegFMF(SrcVec, FNeg); if (NeedLenChg) { - // shuffle DestVec, (shuffle (fneg SrcVec), poison, SrcMask), Mask + // shuffle DstVec, (shuffle (fneg SrcVec), poison, SrcMask), Mask Value *LenChgShuf = Builder.CreateShuffleVector(VecFNeg, SrcMask); - NewShuf = Builder.CreateShuffleVector(DestVec, LenChgShuf, Mask); + NewShuf = Builder.CreateShuffleVector(DstVec, LenChgShuf, Mask); } else { - // shuffle DestVec, (fneg SrcVec), Mask - NewShuf = Builder.CreateShuffleVector(DestVec, VecFNeg, Mask); + // shuffle DstVec, (fneg SrcVec), Mask + NewShuf = Builder.CreateShuffleVector(DstVec, VecFNeg, Mask); } replaceValue(I, *NewShuf); diff --git a/llvm/test/Transforms/PhaseOrdering/X86/addsub-inseltpoison.ll b/llvm/test/Transforms/PhaseOrdering/X86/addsub-inseltpoison.ll index a3af048c4e442..1603ee1a6a301 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/addsub-inseltpoison.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/addsub-inseltpoison.ll @@ -104,11 +104,9 @@ define void @add_aggregate_store(<2 x float> %a0, <2 x float> %a1, <2 x float> % ; PR58139 define <2 x double> @_mm_complexmult_pd_naive(<2 x double> %a, <2 x double> %b) { ; SSE-LABEL: @_mm_complexmult_pd_naive( -; SSE-NEXT: [[B1:%.*]] = extractelement <2 x double> [[B:%.*]], i64 1 -; SSE-NEXT: [[TMP1:%.*]] = fneg double [[B1]] ; SSE-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[A:%.*]], <2 x double> poison, <2 x i32> -; SSE-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[B]], <2 x double> poison, <2 x i32> -; SSE-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[TMP3]], double [[TMP1]], i64 0 +; SSE-NEXT: [[TMP3:%.*]] = fneg <2 x double> [[B:%.*]] +; SSE-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> [[B]], <2 x i32> ; SSE-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP2]], [[TMP4]] ; SSE-NEXT: [[TMP6:%.*]] = shufflevector <2 x double> [[A]], <2 x double> poison, <2 x i32> zeroinitializer ; SSE-NEXT: [[TMP7:%.*]] = tail call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[TMP6]], <2 x double> [[B]], <2 x double> [[TMP5]]) diff --git a/llvm/test/Transforms/PhaseOrdering/X86/addsub.ll b/llvm/test/Transforms/PhaseOrdering/X86/addsub.ll index 40dc2aaeced57..e228d4dae202d 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/addsub.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/addsub.ll @@ -104,11 +104,9 @@ define void @add_aggregate_store(<2 x float> %a0, <2 x float> %a1, <2 x float> % ; PR58139 define <2 x double> @_mm_complexmult_pd_naive(<2 x double> %a, <2 x double> %b) { ; SSE-LABEL: @_mm_complexmult_pd_naive( -; SSE-NEXT: [[B1:%.*]] = extractelement <2 x double> [[B:%.*]], i64 1 -; SSE-NEXT: [[TMP1:%.*]] = fneg double [[B1]] ; SSE-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[A:%.*]], <2 x double> poison, <2 x i32> -; SSE-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[B]], <2 x double> poison, <2 x i32> -; SSE-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[TMP3]], double [[TMP1]], i64 0 +; SSE-NEXT: [[TMP3:%.*]] = fneg <2 x double> [[B:%.*]] +; SSE-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> [[B]], <2 x i32> ; SSE-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP2]], [[TMP4]] ; SSE-NEXT: [[TMP6:%.*]] = shufflevector <2 x double> [[A]], <2 x double> poison, <2 x i32> zeroinitializer ; SSE-NEXT: [[TMP7:%.*]] = tail call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[TMP6]], <2 x double> [[B]], <2 x double> [[TMP5]]) diff --git a/llvm/test/Transforms/VectorCombine/X86/extract-fneg-insert.ll b/llvm/test/Transforms/VectorCombine/X86/extract-fneg-insert.ll index cd2bc757eb9d2..a77a665497042 100644 --- a/llvm/test/Transforms/VectorCombine/X86/extract-fneg-insert.ll +++ b/llvm/test/Transforms/VectorCombine/X86/extract-fneg-insert.ll @@ -47,9 +47,9 @@ define <4 x float> @ext2_v4f32(<4 x float> %x, <4 x float> %y) { define <4 x float> @ext2_v2f32v4f32(<2 x float> %x, <4 x float> %y) { ; CHECK-LABEL: @ext2_v2f32v4f32( -; CHECK-NEXT: [[TMP1:%.*]] = fneg <2 x float> [[X:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <4 x i32> -; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x float> [[Y:%.*]], <4 x float> [[TMP2]], <4 x i32> +; CHECK-NEXT: [[E:%.*]] = extractelement <2 x float> [[X:%.*]], i32 2 +; CHECK-NEXT: [[N:%.*]] = fneg float [[E]] +; CHECK-NEXT: [[R:%.*]] = insertelement <4 x float> [[Y:%.*]], float [[N]], i32 2 ; CHECK-NEXT: ret <4 x float> [[R]] ; %e = extractelement <2 x float> %x, i32 2 @@ -73,17 +73,11 @@ define <2 x double> @ext1_v2f64(<2 x double> %x, <2 x double> %y) { } define <4 x double> @ext1_v2f64v4f64(<2 x double> %x, <4 x double> %y) { -; SSE-LABEL: @ext1_v2f64v4f64( -; SSE-NEXT: [[E:%.*]] = extractelement <2 x double> [[X:%.*]], i32 1 -; SSE-NEXT: [[N:%.*]] = fneg nsz double [[E]] -; SSE-NEXT: [[R:%.*]] = insertelement <4 x double> [[Y:%.*]], double [[N]], i32 1 -; SSE-NEXT: ret <4 x double> [[R]] -; -; AVX-LABEL: @ext1_v2f64v4f64( -; AVX-NEXT: [[TMP1:%.*]] = fneg nsz <2 x double> [[X:%.*]] -; AVX-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <4 x i32> -; AVX-NEXT: [[R:%.*]] = shufflevector <4 x double> [[Y:%.*]], <4 x double> [[TMP2]], <4 x i32> -; AVX-NEXT: ret <4 x double> [[R]] +; CHECK-LABEL: @ext1_v2f64v4f64( +; CHECK-NEXT: [[TMP1:%.*]] = fneg nsz <2 x double> [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <4 x i32> +; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x double> [[Y:%.*]], <4 x double> [[TMP2]], <4 x i32> +; CHECK-NEXT: ret <4 x double> [[R]] ; %e = extractelement <2 x double> %x, i32 1 %n = fneg nsz double %e @@ -105,9 +99,9 @@ define <8 x float> @ext7_v8f32(<8 x float> %x, <8 x float> %y) { define <8 x float> @ext7_v4f32v8f32(<4 x float> %x, <8 x float> %y) { ; CHECK-LABEL: @ext7_v4f32v8f32( -; CHECK-NEXT: [[E:%.*]] = extractelement <4 x float> [[X:%.*]], i32 3 -; CHECK-NEXT: [[N:%.*]] = fneg float [[E]] -; CHECK-NEXT: [[R:%.*]] = insertelement <8 x float> [[Y:%.*]], float [[N]], i32 7 +; CHECK-NEXT: [[TMP1:%.*]] = fneg <4 x float> [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <8 x i32> +; CHECK-NEXT: [[R:%.*]] = shufflevector <8 x float> [[Y:%.*]], <8 x float> [[TMP2]], <8 x i32> ; CHECK-NEXT: ret <8 x float> [[R]] ; %e = extractelement <4 x float> %x, i32 3 @@ -141,12 +135,20 @@ define <8 x float> @ext7_v8f32_use1(<8 x float> %x, <8 x float> %y) { } define <8 x float> @ext7_v4f32v8f32_use1(<4 x float> %x, <8 x float> %y) { -; CHECK-LABEL: @ext7_v4f32v8f32_use1( -; CHECK-NEXT: [[E:%.*]] = extractelement <4 x float> [[X:%.*]], i32 3 -; CHECK-NEXT: call void @use(float [[E]]) -; CHECK-NEXT: [[N:%.*]] = fneg float [[E]] -; CHECK-NEXT: [[R:%.*]] = insertelement <8 x float> [[Y:%.*]], float [[N]], i32 3 -; CHECK-NEXT: ret <8 x float> [[R]] +; SSE-LABEL: @ext7_v4f32v8f32_use1( +; SSE-NEXT: [[E:%.*]] = extractelement <4 x float> [[X:%.*]], i32 3 +; SSE-NEXT: call void @use(float [[E]]) +; SSE-NEXT: [[TMP1:%.*]] = fneg <4 x float> [[X]] +; SSE-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <8 x i32> +; SSE-NEXT: [[R:%.*]] = shufflevector <8 x float> [[Y:%.*]], <8 x float> [[TMP2]], <8 x i32> +; SSE-NEXT: ret <8 x float> [[R]] +; +; AVX-LABEL: @ext7_v4f32v8f32_use1( +; AVX-NEXT: [[E:%.*]] = extractelement <4 x float> [[X:%.*]], i32 3 +; AVX-NEXT: call void @use(float [[E]]) +; AVX-NEXT: [[N:%.*]] = fneg float [[E]] +; AVX-NEXT: [[R:%.*]] = insertelement <8 x float> [[Y:%.*]], float [[N]], i32 3 +; AVX-NEXT: ret <8 x float> [[R]] ; %e = extractelement <4 x float> %x, i32 3 call void @use(float %e) @@ -220,9 +222,8 @@ define <4 x double> @ext_index_var_v2f64v4f64(<2 x double> %x, <4 x double> %y, define <2 x double> @ext1_v2f64_ins0(<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: @ext1_v2f64_ins0( -; CHECK-NEXT: [[E:%.*]] = extractelement <2 x double> [[X:%.*]], i32 1 -; CHECK-NEXT: [[N:%.*]] = fneg nsz double [[E]] -; CHECK-NEXT: [[R:%.*]] = insertelement <2 x double> [[Y:%.*]], double [[N]], i32 0 +; CHECK-NEXT: [[TMP1:%.*]] = fneg nsz <2 x double> [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = shufflevector <2 x double> [[Y:%.*]], <2 x double> [[TMP1]], <2 x i32> ; CHECK-NEXT: ret <2 x double> [[R]] ; %e = extractelement <2 x double> %x, i32 1 @@ -234,9 +235,9 @@ define <2 x double> @ext1_v2f64_ins0(<2 x double> %x, <2 x double> %y) { ; Negative test - extract from an index greater than the vector width of the destination define <2 x double> @ext3_v4f64v2f64(<4 x double> %x, <2 x double> %y) { ; CHECK-LABEL: @ext3_v4f64v2f64( -; CHECK-NEXT: [[E:%.*]] = extractelement <4 x double> [[X:%.*]], i32 3 -; CHECK-NEXT: [[N:%.*]] = fneg nsz double [[E]] -; CHECK-NEXT: [[R:%.*]] = insertelement <2 x double> [[Y:%.*]], double [[N]], i32 1 +; CHECK-NEXT: [[TMP1:%.*]] = fneg nsz <4 x double> [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[TMP1]], <4 x double> poison, <2 x i32> +; CHECK-NEXT: [[R:%.*]] = shufflevector <2 x double> [[Y:%.*]], <2 x double> [[TMP2]], <2 x i32> ; CHECK-NEXT: ret <2 x double> [[R]] ; %e = extractelement <4 x double> %x, i32 3 @@ -246,11 +247,17 @@ define <2 x double> @ext3_v4f64v2f64(<4 x double> %x, <2 x double> %y) { } define <4 x double> @ext1_v2f64v4f64_ins0(<2 x double> %x, <4 x double> %y) { -; CHECK-LABEL: @ext1_v2f64v4f64_ins0( -; CHECK-NEXT: [[E:%.*]] = extractelement <2 x double> [[X:%.*]], i32 1 -; CHECK-NEXT: [[N:%.*]] = fneg nsz double [[E]] -; CHECK-NEXT: [[R:%.*]] = insertelement <4 x double> [[Y:%.*]], double [[N]], i32 0 -; CHECK-NEXT: ret <4 x double> [[R]] +; SSE-LABEL: @ext1_v2f64v4f64_ins0( +; SSE-NEXT: [[TMP1:%.*]] = fneg nsz <2 x double> [[X:%.*]] +; SSE-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <4 x i32> +; SSE-NEXT: [[R:%.*]] = shufflevector <4 x double> [[Y:%.*]], <4 x double> [[TMP2]], <4 x i32> +; SSE-NEXT: ret <4 x double> [[R]] +; +; AVX-LABEL: @ext1_v2f64v4f64_ins0( +; AVX-NEXT: [[E:%.*]] = extractelement <2 x double> [[X:%.*]], i32 1 +; AVX-NEXT: [[N:%.*]] = fneg nsz double [[E]] +; AVX-NEXT: [[R:%.*]] = insertelement <4 x double> [[Y:%.*]], double [[N]], i32 0 +; AVX-NEXT: ret <4 x double> [[R]] ; %e = extractelement <2 x double> %x, i32 1 %n = fneg nsz double %e