Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[VectorCombine] support mismatching extract/insert indices for foldInsExtFNeg #126408

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 26 additions & 25 deletions llvm/lib/Transforms/Vectorize/VectorCombine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -663,60 +663,61 @@ bool VectorCombine::foldExtractExtract(Instruction &I) {
/// shuffle.
bool VectorCombine::foldInsExtFNeg(Instruction &I) {
// Match an insert (op (extract)) pattern.
Value *DestVec;
uint64_t Index;
Value *DstVec;
uint64_t ExtIdx, InsIdx;
Instruction *FNeg;
if (!match(&I, m_InsertElt(m_Value(DestVec), m_OneUse(m_Instruction(FNeg)),
m_ConstantInt(Index))))
if (!match(&I, m_InsertElt(m_Value(DstVec), m_OneUse(m_Instruction(FNeg)),
m_ConstantInt(InsIdx))))
return false;

// Note: This handles the canonical fneg instruction and "fsub -0.0, X".
Value *SrcVec;
Instruction *Extract;
if (!match(FNeg, m_FNeg(m_CombineAnd(
m_Instruction(Extract),
m_ExtractElt(m_Value(SrcVec), m_SpecificInt(Index))))))
m_ExtractElt(m_Value(SrcVec), m_ConstantInt(ExtIdx))))))
return false;

auto *VecTy = cast<FixedVectorType>(I.getType());
auto *ScalarTy = VecTy->getScalarType();
auto *DstVecTy = cast<FixedVectorType>(DstVec->getType());
auto *DstVecScalarTy = DstVecTy->getScalarType();
auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcVec->getType());
if (!SrcVecTy || ScalarTy != SrcVecTy->getScalarType())
if (!SrcVecTy || DstVecScalarTy != SrcVecTy->getScalarType())
return false;

// Ignore bogus insert/extract index.
unsigned NumElts = VecTy->getNumElements();
if (Index >= NumElts)
unsigned NumDstElts = DstVecTy->getNumElements();
unsigned NumSrcElts = SrcVecTy->getNumElements();
if (InsIdx >= NumDstElts || ExtIdx >= NumSrcElts || NumDstElts == 1)
return false;

// We are inserting the negated element into the same lane that we extracted
// from. This is equivalent to a select-shuffle that chooses all but the
// negated element from the destination vector.
SmallVector<int> Mask(NumElts);
SmallVector<int> Mask(NumDstElts);
std::iota(Mask.begin(), Mask.end(), 0);
Mask[Index] = Index + NumElts;
Mask[InsIdx] = (ExtIdx % NumDstElts) + NumDstElts;
InstructionCost OldCost =
TTI.getArithmeticInstrCost(Instruction::FNeg, ScalarTy, CostKind) +
TTI.getVectorInstrCost(I, VecTy, CostKind, Index);
TTI.getArithmeticInstrCost(Instruction::FNeg, DstVecScalarTy, CostKind) +
TTI.getVectorInstrCost(I, DstVecTy, CostKind, InsIdx);

// If the extract has one use, it will be eliminated, so count it in the
// original cost. If it has more than one use, ignore the cost because it will
// be the same before/after.
if (Extract->hasOneUse())
OldCost += TTI.getVectorInstrCost(*Extract, VecTy, CostKind, Index);
OldCost += TTI.getVectorInstrCost(*Extract, SrcVecTy, CostKind, ExtIdx);

InstructionCost NewCost =
TTI.getArithmeticInstrCost(Instruction::FNeg, VecTy, CostKind) +
TTI.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, VecTy, Mask,
TTI.getArithmeticInstrCost(Instruction::FNeg, SrcVecTy, CostKind) +
TTI.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, DstVecTy, Mask,
CostKind);

bool NeedLenChg = SrcVecTy->getNumElements() != NumElts;
bool NeedLenChg = SrcVecTy->getNumElements() != NumDstElts;
// If the lengths of the two vectors are not equal,
// we need to add a length-change vector. Add this cost.
SmallVector<int> SrcMask;
if (NeedLenChg) {
SrcMask.assign(NumElts, PoisonMaskElem);
SrcMask[Index] = Index;
SrcMask.assign(NumDstElts, PoisonMaskElem);
SrcMask[(ExtIdx % NumDstElts)] = ExtIdx;
NewCost += TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
SrcVecTy, SrcMask, CostKind);
}
Expand All @@ -725,15 +726,15 @@ bool VectorCombine::foldInsExtFNeg(Instruction &I) {
return false;

Value *NewShuf;
// insertelt DestVec, (fneg (extractelt SrcVec, Index)), Index
// insertelt DstVec, (fneg (extractelt SrcVec, Index)), Index
Value *VecFNeg = Builder.CreateFNegFMF(SrcVec, FNeg);
if (NeedLenChg) {
// shuffle DestVec, (shuffle (fneg SrcVec), poison, SrcMask), Mask
// shuffle DstVec, (shuffle (fneg SrcVec), poison, SrcMask), Mask
Value *LenChgShuf = Builder.CreateShuffleVector(VecFNeg, SrcMask);
NewShuf = Builder.CreateShuffleVector(DestVec, LenChgShuf, Mask);
NewShuf = Builder.CreateShuffleVector(DstVec, LenChgShuf, Mask);
} else {
// shuffle DestVec, (fneg SrcVec), Mask
NewShuf = Builder.CreateShuffleVector(DestVec, VecFNeg, Mask);
// shuffle DstVec, (fneg SrcVec), Mask
NewShuf = Builder.CreateShuffleVector(DstVec, VecFNeg, Mask);
}

replaceValue(I, *NewShuf);
Expand Down
6 changes: 2 additions & 4 deletions llvm/test/Transforms/PhaseOrdering/X86/addsub-inseltpoison.ll
Original file line number Diff line number Diff line change
Expand Up @@ -104,11 +104,9 @@ define void @add_aggregate_store(<2 x float> %a0, <2 x float> %a1, <2 x float> %
; PR58139
define <2 x double> @_mm_complexmult_pd_naive(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: @_mm_complexmult_pd_naive(
; SSE-NEXT: [[B1:%.*]] = extractelement <2 x double> [[B:%.*]], i64 1
; SSE-NEXT: [[TMP1:%.*]] = fneg double [[B1]]
; SSE-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[A:%.*]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
; SSE-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[B]], <2 x double> poison, <2 x i32> <i32 poison, i32 0>
; SSE-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[TMP3]], double [[TMP1]], i64 0
; SSE-NEXT: [[TMP3:%.*]] = fneg <2 x double> [[B:%.*]]
; SSE-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> [[B]], <2 x i32> <i32 1, i32 2>
; SSE-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP2]], [[TMP4]]
; SSE-NEXT: [[TMP6:%.*]] = shufflevector <2 x double> [[A]], <2 x double> poison, <2 x i32> zeroinitializer
; SSE-NEXT: [[TMP7:%.*]] = tail call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[TMP6]], <2 x double> [[B]], <2 x double> [[TMP5]])
Expand Down
6 changes: 2 additions & 4 deletions llvm/test/Transforms/PhaseOrdering/X86/addsub.ll
Original file line number Diff line number Diff line change
Expand Up @@ -104,11 +104,9 @@ define void @add_aggregate_store(<2 x float> %a0, <2 x float> %a1, <2 x float> %
; PR58139
define <2 x double> @_mm_complexmult_pd_naive(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: @_mm_complexmult_pd_naive(
; SSE-NEXT: [[B1:%.*]] = extractelement <2 x double> [[B:%.*]], i64 1
; SSE-NEXT: [[TMP1:%.*]] = fneg double [[B1]]
; SSE-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[A:%.*]], <2 x double> poison, <2 x i32> <i32 1, i32 1>
; SSE-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[B]], <2 x double> poison, <2 x i32> <i32 poison, i32 0>
; SSE-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[TMP3]], double [[TMP1]], i64 0
; SSE-NEXT: [[TMP3:%.*]] = fneg <2 x double> [[B:%.*]]
; SSE-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> [[B]], <2 x i32> <i32 1, i32 2>
; SSE-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP2]], [[TMP4]]
; SSE-NEXT: [[TMP6:%.*]] = shufflevector <2 x double> [[A]], <2 x double> poison, <2 x i32> zeroinitializer
; SSE-NEXT: [[TMP7:%.*]] = tail call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[TMP6]], <2 x double> [[B]], <2 x double> [[TMP5]])
Expand Down
75 changes: 41 additions & 34 deletions llvm/test/Transforms/VectorCombine/X86/extract-fneg-insert.ll
Original file line number Diff line number Diff line change
Expand Up @@ -47,9 +47,9 @@ define <4 x float> @ext2_v4f32(<4 x float> %x, <4 x float> %y) {

define <4 x float> @ext2_v2f32v4f32(<2 x float> %x, <4 x float> %y) {
; CHECK-LABEL: @ext2_v2f32v4f32(
; CHECK-NEXT: [[TMP1:%.*]] = fneg <2 x float> [[X:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <4 x i32> <i32 poison, i32 poison, i32 2, i32 poison>
; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x float> [[Y:%.*]], <4 x float> [[TMP2]], <4 x i32> <i32 0, i32 1, i32 6, i32 3>
; CHECK-NEXT: [[E:%.*]] = extractelement <2 x float> [[X:%.*]], i32 2
; CHECK-NEXT: [[N:%.*]] = fneg float [[E]]
; CHECK-NEXT: [[R:%.*]] = insertelement <4 x float> [[Y:%.*]], float [[N]], i32 2
; CHECK-NEXT: ret <4 x float> [[R]]
;
%e = extractelement <2 x float> %x, i32 2
Expand All @@ -73,17 +73,11 @@ define <2 x double> @ext1_v2f64(<2 x double> %x, <2 x double> %y) {
}

define <4 x double> @ext1_v2f64v4f64(<2 x double> %x, <4 x double> %y) {
; SSE-LABEL: @ext1_v2f64v4f64(
; SSE-NEXT: [[E:%.*]] = extractelement <2 x double> [[X:%.*]], i32 1
; SSE-NEXT: [[N:%.*]] = fneg nsz double [[E]]
; SSE-NEXT: [[R:%.*]] = insertelement <4 x double> [[Y:%.*]], double [[N]], i32 1
; SSE-NEXT: ret <4 x double> [[R]]
;
; AVX-LABEL: @ext1_v2f64v4f64(
; AVX-NEXT: [[TMP1:%.*]] = fneg nsz <2 x double> [[X:%.*]]
; AVX-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <4 x i32> <i32 poison, i32 1, i32 poison, i32 poison>
; AVX-NEXT: [[R:%.*]] = shufflevector <4 x double> [[Y:%.*]], <4 x double> [[TMP2]], <4 x i32> <i32 0, i32 5, i32 2, i32 3>
; AVX-NEXT: ret <4 x double> [[R]]
; CHECK-LABEL: @ext1_v2f64v4f64(
; CHECK-NEXT: [[TMP1:%.*]] = fneg nsz <2 x double> [[X:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <4 x i32> <i32 poison, i32 1, i32 poison, i32 poison>
; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x double> [[Y:%.*]], <4 x double> [[TMP2]], <4 x i32> <i32 0, i32 5, i32 2, i32 3>
; CHECK-NEXT: ret <4 x double> [[R]]
;
%e = extractelement <2 x double> %x, i32 1
%n = fneg nsz double %e
Expand All @@ -105,9 +99,9 @@ define <8 x float> @ext7_v8f32(<8 x float> %x, <8 x float> %y) {

define <8 x float> @ext7_v4f32v8f32(<4 x float> %x, <8 x float> %y) {
; CHECK-LABEL: @ext7_v4f32v8f32(
; CHECK-NEXT: [[E:%.*]] = extractelement <4 x float> [[X:%.*]], i32 3
; CHECK-NEXT: [[N:%.*]] = fneg float [[E]]
; CHECK-NEXT: [[R:%.*]] = insertelement <8 x float> [[Y:%.*]], float [[N]], i32 7
; CHECK-NEXT: [[TMP1:%.*]] = fneg <4 x float> [[X:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <8 x i32> <i32 poison, i32 poison, i32 poison, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT: [[R:%.*]] = shufflevector <8 x float> [[Y:%.*]], <8 x float> [[TMP2]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 11>
; CHECK-NEXT: ret <8 x float> [[R]]
;
%e = extractelement <4 x float> %x, i32 3
Expand Down Expand Up @@ -141,12 +135,20 @@ define <8 x float> @ext7_v8f32_use1(<8 x float> %x, <8 x float> %y) {
}

define <8 x float> @ext7_v4f32v8f32_use1(<4 x float> %x, <8 x float> %y) {
; CHECK-LABEL: @ext7_v4f32v8f32_use1(
; CHECK-NEXT: [[E:%.*]] = extractelement <4 x float> [[X:%.*]], i32 3
; CHECK-NEXT: call void @use(float [[E]])
; CHECK-NEXT: [[N:%.*]] = fneg float [[E]]
; CHECK-NEXT: [[R:%.*]] = insertelement <8 x float> [[Y:%.*]], float [[N]], i32 3
; CHECK-NEXT: ret <8 x float> [[R]]
; SSE-LABEL: @ext7_v4f32v8f32_use1(
; SSE-NEXT: [[E:%.*]] = extractelement <4 x float> [[X:%.*]], i32 3
; SSE-NEXT: call void @use(float [[E]])
; SSE-NEXT: [[TMP1:%.*]] = fneg <4 x float> [[X]]
; SSE-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <8 x i32> <i32 poison, i32 poison, i32 poison, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
; SSE-NEXT: [[R:%.*]] = shufflevector <8 x float> [[Y:%.*]], <8 x float> [[TMP2]], <8 x i32> <i32 0, i32 1, i32 2, i32 11, i32 4, i32 5, i32 6, i32 7>
; SSE-NEXT: ret <8 x float> [[R]]
;
; AVX-LABEL: @ext7_v4f32v8f32_use1(
; AVX-NEXT: [[E:%.*]] = extractelement <4 x float> [[X:%.*]], i32 3
; AVX-NEXT: call void @use(float [[E]])
; AVX-NEXT: [[N:%.*]] = fneg float [[E]]
; AVX-NEXT: [[R:%.*]] = insertelement <8 x float> [[Y:%.*]], float [[N]], i32 3
; AVX-NEXT: ret <8 x float> [[R]]
;
%e = extractelement <4 x float> %x, i32 3
call void @use(float %e)
Expand Down Expand Up @@ -220,9 +222,8 @@ define <4 x double> @ext_index_var_v2f64v4f64(<2 x double> %x, <4 x double> %y,

define <2 x double> @ext1_v2f64_ins0(<2 x double> %x, <2 x double> %y) {
; CHECK-LABEL: @ext1_v2f64_ins0(
; CHECK-NEXT: [[E:%.*]] = extractelement <2 x double> [[X:%.*]], i32 1
; CHECK-NEXT: [[N:%.*]] = fneg nsz double [[E]]
; CHECK-NEXT: [[R:%.*]] = insertelement <2 x double> [[Y:%.*]], double [[N]], i32 0
; CHECK-NEXT: [[TMP1:%.*]] = fneg nsz <2 x double> [[X:%.*]]
; CHECK-NEXT: [[R:%.*]] = shufflevector <2 x double> [[Y:%.*]], <2 x double> [[TMP1]], <2 x i32> <i32 3, i32 1>
; CHECK-NEXT: ret <2 x double> [[R]]
;
%e = extractelement <2 x double> %x, i32 1
Expand All @@ -234,9 +235,9 @@ define <2 x double> @ext1_v2f64_ins0(<2 x double> %x, <2 x double> %y) {
; Negative test - extract from an index greater than the vector width of the destination
define <2 x double> @ext3_v4f64v2f64(<4 x double> %x, <2 x double> %y) {
; CHECK-LABEL: @ext3_v4f64v2f64(
; CHECK-NEXT: [[E:%.*]] = extractelement <4 x double> [[X:%.*]], i32 3
; CHECK-NEXT: [[N:%.*]] = fneg nsz double [[E]]
; CHECK-NEXT: [[R:%.*]] = insertelement <2 x double> [[Y:%.*]], double [[N]], i32 1
; CHECK-NEXT: [[TMP1:%.*]] = fneg nsz <4 x double> [[X:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[TMP1]], <4 x double> poison, <2 x i32> <i32 poison, i32 3>
; CHECK-NEXT: [[R:%.*]] = shufflevector <2 x double> [[Y:%.*]], <2 x double> [[TMP2]], <2 x i32> <i32 0, i32 3>
; CHECK-NEXT: ret <2 x double> [[R]]
;
%e = extractelement <4 x double> %x, i32 3
Expand All @@ -246,11 +247,17 @@ define <2 x double> @ext3_v4f64v2f64(<4 x double> %x, <2 x double> %y) {
}

define <4 x double> @ext1_v2f64v4f64_ins0(<2 x double> %x, <4 x double> %y) {
; CHECK-LABEL: @ext1_v2f64v4f64_ins0(
; CHECK-NEXT: [[E:%.*]] = extractelement <2 x double> [[X:%.*]], i32 1
; CHECK-NEXT: [[N:%.*]] = fneg nsz double [[E]]
; CHECK-NEXT: [[R:%.*]] = insertelement <4 x double> [[Y:%.*]], double [[N]], i32 0
; CHECK-NEXT: ret <4 x double> [[R]]
; SSE-LABEL: @ext1_v2f64v4f64_ins0(
; SSE-NEXT: [[TMP1:%.*]] = fneg nsz <2 x double> [[X:%.*]]
; SSE-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <4 x i32> <i32 poison, i32 1, i32 poison, i32 poison>
; SSE-NEXT: [[R:%.*]] = shufflevector <4 x double> [[Y:%.*]], <4 x double> [[TMP2]], <4 x i32> <i32 5, i32 1, i32 2, i32 3>
; SSE-NEXT: ret <4 x double> [[R]]
;
; AVX-LABEL: @ext1_v2f64v4f64_ins0(
; AVX-NEXT: [[E:%.*]] = extractelement <2 x double> [[X:%.*]], i32 1
; AVX-NEXT: [[N:%.*]] = fneg nsz double [[E]]
; AVX-NEXT: [[R:%.*]] = insertelement <4 x double> [[Y:%.*]], double [[N]], i32 0
; AVX-NEXT: ret <4 x double> [[R]]
;
%e = extractelement <2 x double> %x, i32 1
%n = fneg nsz double %e
Expand Down