diff --git a/infersparsitytest.daph b/infersparsitytest.daph new file mode 100644 index 000000000..3640a253e --- /dev/null +++ b/infersparsitytest.daph @@ -0,0 +1,131 @@ +// -------------------------------------------------------------------- +// Outer Binary +// -------------------------------------------------------------------- + +print("Outer Binary"); + +// we take 3 matrices of different sparsities to take into account all the possible cases for the different ops. We also need the row and column matrix for every sparsity. +empty_col = rand(2, 1, 1, 1, 0.0, -1); +empty_row = rand(1, 2, 1, 1, 0.0, -1); +half_col = rand(2, 1, 1, 1, 0.5, -1); +half_row = rand(1, 2, 1, 1, 0.5, -1); +full_col = rand(2, 1, 1, 1, 1.0, -1); +full_row = rand(1, 2, 1, 1, 1.0, -1); +unknown_col = sin(rand(2, 1, 1, 1, 0.5, -1)); +unknown_row = sin(rand(1, 2, 1, 1, 0.5, -1)); + +print("outeradd"); +print(sparsity(outerAdd(empty_col, empty_row))); +print(sparsity(outerAdd(empty_col, half_row))); +print(sparsity(outerAdd(half_col, half_row))); +print(sparsity(outerAdd(unknown_col, half_row))); +print(sparsity(outerAdd(unknown_col, unknown_row))); + +print("outersub"); +print(sparsity(outerSub(empty_col, empty_row))); +print(sparsity(outerSub(empty_col, half_row))); +print(sparsity(outerSub(half_col, half_row))); +print(sparsity(outerSub(unknown_col, half_row))); +print(sparsity(outerSub(unknown_col, unknown_row))); + +print("outermul"); +print(sparsity(outerMul(empty_col, empty_row))); +print(sparsity(outerMul(empty_col, half_row))); +print(sparsity(outerMul(half_col, half_row))); +print(sparsity(outerMul(unknown_col, empty_row))); +print(sparsity(outerMul(unknown_col, half_row))); +print(sparsity(outerMul(unknown_col, unknown_row))); + +print("outerdiv"); +// TODO: Fix division by 0 inconsistency in #802 before uncommenting the following test cases AND adding the results to sparsity_3.txt. +//print(sparsity(outerDiv(empty_col, empty_row))); +//print(sparsity(outerDiv(empty_col, half_row))); +//print(sparsity(outerDiv(half_col, half_row))); +print(sparsity(outerDiv(unknown_col, half_row))); +print(sparsity(outerDiv(half_col, unknown_row))); +print(sparsity(outerDiv(unknown_col, unknown_row))); + +print("outerpow"); +print(sparsity(outerPow(empty_col, empty_row))); +print(sparsity(outerPow(empty_col, half_row))); +print(sparsity(outerPow(half_col, half_row))); +print(sparsity(outerPow(unknown_col, half_row))); +print(sparsity(outerPow(unknown_col, unknown_row))); + +print("outermod"); +print(sparsity(outerMod(empty_col, empty_row))); +print(sparsity(outerMod(empty_col, half_row))); +print(sparsity(outerMod(half_col, half_row))); +print(sparsity(outerMod(unknown_col, half_row))); +print(sparsity(outerMod(unknown_col, unknown_row))); + +print("outermin"); +print(sparsity(outerMin(empty_col, empty_row))); +print(sparsity(outerMin(empty_col, half_row))); +print(sparsity(outerMin(full_col, full_row))); +print(sparsity(outerMin(unknown_col, empty_row))); + +print("outermax"); +print(sparsity(outerMax(empty_col, empty_row))); +print(sparsity(outerMax(empty_col, half_row))); +print(sparsity(outerMax(full_col, full_row))); +print(sparsity(outerMax(unknown_col, empty_row))); + +print("outerand"); +print(sparsity(outerAnd(empty_col, empty_row))); +print(sparsity(outerAnd(empty_col, half_row))); +print(sparsity(outerAnd(half_col, half_row))); +print(sparsity(outerAnd(unknown_col, empty_row))); +print(sparsity(outerAnd(unknown_col, full_row))); + +print("outeror"); +print(sparsity(outerOr(empty_col, empty_row))); +print(sparsity(outerOr(empty_col, half_row))); +print(sparsity(outerOr(half_col, empty_row))); +print(sparsity(outerOr(half_col, half_row))); +print(sparsity(outerOr(unknown_col, empty_row))); +print(sparsity(outerOr(unknown_col, full_row))); + +print("outereq"); +print(sparsity(outerEq(empty_col, empty_row))); +print(sparsity(outerEq(full_col, full_row))); +print(sparsity(outerEq(full_col, empty_row))); +print(sparsity(outerEq(empty_col, full_row))); +print(sparsity(outerEq(unknown_col, empty_row))); +print(sparsity(outerEq(unknown_col, unknown_row))); + +print("outerneq"); +print(sparsity(outerNeq(empty_col, empty_row))); +print(sparsity(outerNeq(full_col, full_row))); +print(sparsity(outerNeq(full_col, empty_row))); +print(sparsity(outerNeq(empty_col, full_row))); +print(sparsity(outerNeq(unknown_col, empty_row))); +print(sparsity(outerNeq(unknown_col, unknown_row))); + +print("outerlt"); +print(sparsity(outerLt(empty_col, empty_row))); +print(sparsity(outerLt(full_col, full_row))); +print(sparsity(outerLt(full_col, empty_row))); +print(sparsity(outerLt(unknown_col, empty_row))); +print(sparsity(outerLt(unknown_col, unknown_row))); + +print("outerle"); +print(sparsity(outerLe(empty_col, empty_row))); +print(sparsity(outerLe(full_col, full_row))); +print(sparsity(outerLe(full_col, empty_row))); +print(sparsity(outerLe(unknown_col, empty_row))); +print(sparsity(outerLe(unknown_col, unknown_row))); + +print("outergt"); +print(sparsity(outerGt(empty_col, empty_row))); +print(sparsity(outerGt(full_col, full_row))); +print(sparsity(outerGt(full_col, empty_row))); +print(sparsity(outerGt(unknown_col, empty_row))); +print(sparsity(outerGt(unknown_col, unknown_row))); + +print("outerge"); +print(sparsity(outerGe(empty_col, empty_row))); +print(sparsity(outerGe(full_col, full_row))); +print(sparsity(outerGe(full_col, empty_row))); +print(sparsity(outerGe(unknown_col, empty_row))); +print(sparsity(outerGe(unknown_col, unknown_row))); diff --git a/src/ir/daphneir/DaphneInferSparsityOpInterface.cpp b/src/ir/daphneir/DaphneInferSparsityOpInterface.cpp index 2759011f2..d5a033b88 100644 --- a/src/ir/daphneir/DaphneInferSparsityOpInterface.cpp +++ b/src/ir/daphneir/DaphneInferSparsityOpInterface.cpp @@ -100,6 +100,916 @@ std::vector daphne::ReadOp::inferSparsity() { return {-1.0}; } + +// -------------------------------------------------------------------- +// Data Generation +// -------------------------------------------------------------------- + +std::vector daphne::FillOp::inferSparsity() { + auto co = CompilerUtils::constantOfAnyType(getArg()); + if (!co) { + return {-1.0}; + } + + double v = 0.0; + + auto valueAttr = co->getAttr("value"); + if (auto floatAttr = valueAttr.dyn_cast()) { + v = floatAttr.getValueAsDouble(); + } else if (auto intAttr = valueAttr.dyn_cast()) { + if (intAttr.getType().isSignlessInteger()) { + v = static_cast(intAttr.getInt()); + } else if (intAttr.getType().isSignedInteger()) { + v = static_cast(intAttr.getSInt()); + } + } else { + throw std::runtime_error("Unsupported type for FillOp sparsity inference"); + } + + if (v == -1.0) { + return {-1.0}; + } else if (v == 0.0) { + return {0.0}; + } else { + return {1.0}; + } +} + +std::vector daphne::SampleOp::inferSparsity() { + /* + * Infers the sparsity of the sample operation based on several conditions. + * + * If vRange is a float: + * - Return -1 due to the incredibly low change of an element being 0 in the double range + * + * If withReplacement = 1: + * - The chance of an element being sparse is exactly 1 - 1/range. + * + * If size == range: + * - There will be exactly one 0 element, so the sparsity will be 1 - 1/size. + * + * 4. If size < range: + * - Use combinatorics to estimate the sparsity. For details, refer to the overleaf document mentioned in #766. + */ + int64_t vSize; + bool vReplace; + try { + vSize = CompilerUtils::constantOrThrow(getSize()); + vReplace = CompilerUtils::constantOrThrow(getWithReplacement()); + } catch (const std::runtime_error & e) { + return {-1.0}; + } + + auto co = CompilerUtils::constantOfAnyType(getRange()); + if (!co) { + return {-1.0}; + } + + int64_t vRange = 0; + auto valueAttr = co->getAttr("value"); + if (auto floatAttr = valueAttr.dyn_cast()) { + return {-1.0}; + } else if (auto intAttr = valueAttr.dyn_cast()) { + vRange = intAttr.getSInt(); + } else { + throw std::runtime_error("Unsupported type for SampleOp sparsity inference"); + } + + if (vReplace == 1) { + return {1.0 - 1.0/(double)vRange}; + } + + if (vSize == vRange) { + return {1.0 - 1.0/(double)vSize}; + } + + return {1.0 - vSize/(double)vRange}; +} + +std::vector daphne::SeqOp::inferSparsity() { + Type fromTy = getFrom().getType(); + if(fromTy.isF64()) { + try { + double vFrom = CompilerUtils::constantOrThrow(getFrom()); + double vTo = CompilerUtils::constantOrThrow(getTo()); + double vInc = CompilerUtils::constantOrThrow(getInc()); + + if ((vFrom < 0.0 && vInc < 0.0) || (vFrom > 0.0 && vInc > 0.0) || (vFrom < 0.0 && vTo < 0.0) || (vFrom > 0.0 && vTo > 0.0)) { + return {1.0}; + } else if (fmod(vFrom, vInc) == 0.0) { + int numRows = abs((vTo - vFrom) / vInc) + 1.0; + return {1 - (1.0 / (double)numRows)}; + } + return {1.0}; + } + catch(const std::runtime_error & e) { + return {-1.0}; + } + } + if(fromTy.isF32()) { + try { + float vFrom = CompilerUtils::constantOrThrow(getFrom()); + float vTo = CompilerUtils::constantOrThrow(getTo()); + float vInc = CompilerUtils::constantOrThrow(getInc()); + + if ((vFrom < 0.0 && vInc < 0.0) || (vFrom > 0.0 && vInc > 0.0) || (vFrom < 0.0 && vTo < 0.0) || (vFrom > 0.0 && vTo > 0.0)) { + return {1.0}; + } else if (fmod(vFrom, vInc) == 0.0) { + int numRows = abs((vTo - vFrom) / vInc) + 1.0; + return {1 - (1.0 / (double)numRows)}; + } + return {1.0}; + } + catch(const std::runtime_error & e) { + return {-1.0}; + } + } + else if(fromTy.isSignedInteger(64)) { + try { + int64_t vFrom = CompilerUtils::constantOrThrow(getFrom()); + int64_t vTo = CompilerUtils::constantOrThrow(getTo()); + int64_t vInc = CompilerUtils::constantOrThrow(getInc()); + + if ((vFrom < 0 && vInc < 0) || (vFrom > 0 && vInc > 0) || (vFrom < 0 && vTo < 0) || (vFrom > 0 && vTo > 0)) { + return {1.0}; + } else if (fmod(vFrom, vInc) == 0) { + int numRows = abs((vTo - vFrom) / vInc) + 1; + return {1 - (1.0 / (double)numRows)}; + } + return {1.0}; + } + catch(const std::runtime_error & e) { + return {-1.0}; + } + } + throw ErrorHandler::compilerError( + getLoc(), "InferSparsityOpInterface (daphne::SeqOp::inferSparsity)", + "at the moment, sparsity inference for SeqOp supports only F64/F32 and " + "SI64 value types"); +} + + + +// -------------------------------------------------------------------- +// Elementwise Binary +// -------------------------------------------------------------------- + +std::vector daphne::EwAddOp::inferSparsity() { + /** + * Uses the probability P(A || B) to estimate the output sparsity. + */ + auto lhs = getLhs().getType().dyn_cast(); + double lhsSparsity = 0.0; + if (lhs) { + lhsSparsity = lhs.getSparsity(); + } else { + lhsSparsity = -1.0; + } + + auto rhs = getRhs().getType().dyn_cast(); + double rhsSparsity = 0.0; + if (rhs) { + rhsSparsity = rhs.getSparsity(); + } else { + rhsSparsity = -1.0; + } + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } else if (lhsSparsity == 0.0) { + return {rhsSparsity}; + } else if (rhsSparsity == 0.0) { + return {lhsSparsity}; + } + return {lhsSparsity + rhsSparsity - lhsSparsity * rhsSparsity}; +} + +std::vector daphne::EwSubOp::inferSparsity() { + /** + * Uses the probability P(A || B) to estimate the output sparsity. + */ + auto lhs = getLhs().getType().dyn_cast(); + double lhsSparsity = 0.0; + if (lhs) { + lhsSparsity = lhs.getSparsity(); + } else { + lhsSparsity = -1.0; + } + + auto rhs = getRhs().getType().dyn_cast(); + double rhsSparsity = 0.0; + if (rhs) { + rhsSparsity = rhs.getSparsity(); + } else { + rhsSparsity = -1.0; + } + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } else if (lhsSparsity == 0.0) { + return {rhsSparsity}; + } else if (rhsSparsity == 0.0) { + return {lhsSparsity}; + } + return {lhsSparsity + rhsSparsity - lhsSparsity * rhsSparsity}; +} + +std::vector daphne::EwMulOp::inferSparsity() { + /** + * Uses the probability P(A && B) to estimate the output sparsity. + */ + auto lhs = getLhs().getType().dyn_cast(); + double lhsSparsity = 0.0; + lhsSparsity = lhs.getSparsity(); + + auto rhs = getRhs().getType().dyn_cast(); + double rhsSparsity = 0.0; + rhsSparsity = rhs.getSparsity(); + + if (lhsSparsity == -1.0) { + return {rhsSparsity}; + } else if (rhsSparsity == -1.0) { + return{lhsSparsity}; + } else if (lhsSparsity == -1.0 && rhsSparsity == -1.0) { + return {-1.0}; + } + + return {lhsSparsity * rhsSparsity}; +} + +std::vector daphne::EwDivOp::inferSparsity() { + auto lhs = getLhs().getType().dyn_cast(); + double lhsSparsity = 0.0; + if (lhs) { + lhsSparsity = lhs.getSparsity(); + } else { + lhsSparsity = -1.0; + } + + auto rhs = getRhs().getType().dyn_cast(); + double rhsSparsity = 0.0; + if (rhs) { + rhsSparsity = rhs.getSparsity(); + } else { + rhsSparsity = -1.0; + } + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } + return {lhsSparsity}; +} + +std::vector daphne::EwPowOp::inferSparsity() { + /** + * If the lhs sparsity is unknown and the rhs sparsity is zero, the resulting matrix will always have a sparsity of 1 + * If the rhs sparsity is unknown, the resulting sparsity will be 1 if the lhs sparsity is 1 + * If both sparsities are known, first handle the trivial cases and then use P(A && !B). + */ + auto lhs = getLhs().getType().dyn_cast(); + double lhsSparsity = 0.0; + if (lhs) { + lhsSparsity = lhs.getSparsity(); + } else { + lhsSparsity = -1.0; + } + + auto rhs = getRhs().getType().dyn_cast(); + double rhsSparsity = 0.0; + if (rhs) { + rhsSparsity = rhs.getSparsity(); + } else { + rhsSparsity = -1.0; + } + + if (lhsSparsity == -1.0 && rhsSparsity == -1.0) { + return {-1.0}; + } else if (lhsSparsity == -1.0) { + if (rhsSparsity == 0.0) { + return {1.0}; + } else { + return {-1.0}; + } + } else if (rhsSparsity == -1.0) { + if (lhsSparsity == 1.0) { + return {1.0}; + } else { + return {-1.0}; + } + } + + if (lhsSparsity == 1.0 || rhsSparsity == 0.0) { + return {1.0}; + } else if (lhsSparsity == 0.0 && rhsSparsity == 1.0) { + return {0.0}; + } + + return {lhsSparsity + (1 - rhsSparsity) - lhsSparsity * (1 - rhsSparsity)}; +} + +std::vector daphne::EwModOp::inferSparsity() { + /* + * Returns a sparsity of 0 only if the lhssparsity is known and 0 as well. + * In other cases we either know too little about the value distribution in the matrices or have a chance of 0mod0, which results in an error. + */ + auto lhs = getLhs().getType().dyn_cast(); + double lhsSparsity = 0.0; + if (lhs) { + lhsSparsity = lhs.getSparsity(); + } else { + lhsSparsity = -1.0; + } + + if (lhsSparsity == 0) { + return {0.0}; + } + return {-1.0}; +} + +std::vector daphne::EwAndOp::inferSparsity() { + /** + * Uses the probability P(A && B) to estimate the output sparsity. + */ + auto lhs = getLhs().getType().dyn_cast(); + double lhsSparsity = 0.0; + if (lhs) { + lhsSparsity = lhs.getSparsity(); + } else { + lhsSparsity = -1.0; + } + + auto rhs = getRhs().getType().dyn_cast(); + double rhsSparsity = 0.0; + if (rhs) { + rhsSparsity = rhs.getSparsity(); + } else { + rhsSparsity = -1.0; + } + + if ((lhsSparsity == -1.0 && rhsSparsity == -1.0) || (lhsSparsity != 0.0 && rhsSparsity == -1.0) || (lhsSparsity == -1.0 && rhsSparsity != 0.0)) { + return {-1.0}; + } + return {lhsSparsity * rhsSparsity}; +} + +std::vector daphne::EwOrOp::inferSparsity() { + /** + * Uses the probability P(A || B) to estimate the output sparsity. + */ + auto lhs = getLhs().getType().dyn_cast(); + double lhsSparsity = 0.0; + if (lhs) { + lhsSparsity = lhs.getSparsity(); + } else { + lhsSparsity = -1.0; + } + + auto rhs = getRhs().getType().dyn_cast(); + double rhsSparsity = 0.0; + if (rhs) { + rhsSparsity = rhs.getSparsity(); + } else { + rhsSparsity = -1.0; + } + + if ((lhsSparsity == -1.0 && rhsSparsity == -1.0) || (lhsSparsity != 1.0 && rhsSparsity == -1.0) || (lhsSparsity == -1.0 && rhsSparsity != 1.0)) { + return {-1.0}; + } else if (lhsSparsity == 1.0 || rhsSparsity == 1.0) { + return {1.0}; + } else if (lhsSparsity == 0.0) { + return {rhsSparsity}; + } else if (rhsSparsity == 0.0) { + return {lhsSparsity}; + } + return {lhsSparsity + rhsSparsity - lhsSparsity * rhsSparsity}; +} + +std::vector daphne::EwXorOp::inferSparsity() { + /** + * Uses the probability P(A ⊕ B) to estimate the output sparsity. + */ + auto lhs = getLhs().getType().dyn_cast(); + double lhsSparsity = 0.0; + if (lhs) { + lhsSparsity = lhs.getSparsity(); + } else { + lhsSparsity = -1.0; + } + + auto rhs = getRhs().getType().dyn_cast(); + double rhsSparsity = 0.0; + if (rhs) { + rhsSparsity = rhs.getSparsity(); + } else { + rhsSparsity = -1.0; + } + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } else if ((lhsSparsity == 1.0 && rhsSparsity == 1.0) || (lhsSparsity == 0.0 && rhsSparsity == 0.0)) { + return {0.0}; + } else if ((lhsSparsity == 1.0 && rhsSparsity == 0.0) || (lhsSparsity == 0.0 && rhsSparsity == 1.0)) { + return {1.0}; + } + // TODO This function requires testing after #551 is implemented. + return {lhsSparsity + rhsSparsity - 2 * (lhsSparsity * rhsSparsity)}; +} + +std::vector daphne::EwEqOp::inferSparsity() { + /* + * If both input matrices have a sparsity of 0, the output sparsity will be 1. + * If one matrix has a sparsity of 0 and the other 1, the output sparsity will always be 0. + */ + auto lhs = getLhs().getType().dyn_cast(); + double lhsSparsity = 0.0; + if (lhs) { + lhsSparsity = lhs.getSparsity(); + } else { + lhsSparsity = -1.0; + } + + auto rhs = getRhs().getType().dyn_cast(); + double rhsSparsity = 0.0; + if (rhs) { + rhsSparsity = rhs.getSparsity(); + } else { + rhsSparsity = -1.0; + } + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } else if (lhsSparsity == 0.0 && rhsSparsity == 0.0) { + return {1.0}; + } else if ((lhsSparsity == 1.0 && rhsSparsity == 0.0) || (lhsSparsity == 0.0 && rhsSparsity == 1.0)) { + return {0.0}; + } + return {-1.0}; +} + +std::vector daphne::EwNeqOp::inferSparsity() { + /* + * The inverted method of EwEq. + */ + auto lhs = getLhs().getType().dyn_cast(); + double lhsSparsity = 0.0; + if (lhs) { + lhsSparsity = lhs.getSparsity(); + } else { + lhsSparsity = -1.0; + } + + auto rhs = getRhs().getType().dyn_cast(); + double rhsSparsity = 0.0; + if (rhs) { + rhsSparsity = rhs.getSparsity(); + } else { + rhsSparsity = -1.0; + } + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } else if (lhsSparsity == 0.0 && rhsSparsity == 0.0) { + return {0.0}; + } else if ((lhsSparsity == 1.0 && rhsSparsity == 0.0) || (lhsSparsity == 0.0 && rhsSparsity == 1.0) || (lhsSparsity == 1.0 && rhsSparsity == 1.0)) { + return {1.0}; + } + return {-1.0}; +} + +std::vector daphne::EwLeOp::inferSparsity() { + /* + * Returns a sparsity of 1 if both input matrices have a sparsity of 0. + * Unknown output sparsity for all other cases. + */ + auto lhs = getLhs().getType().dyn_cast(); + double lhsSparsity = 0.0; + if (lhs) { + lhsSparsity = lhs.getSparsity(); + } else { + lhsSparsity = -1.0; + } + + auto rhs = getRhs().getType().dyn_cast(); + double rhsSparsity = 0.0; + if (rhs) { + rhsSparsity = rhs.getSparsity(); + } else { + rhsSparsity = -1.0; + } + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } else if (lhsSparsity == 0.0 && rhsSparsity == 0.0) { + return {1.0}; + } + return {-1.0}; +} + +std::vector daphne::EwGeOp::inferSparsity() { + /* + * Returns a sparsity of 1 if both input matrices have a sparsity of 0. + * Unknown output sparsity for all other cases. + */ + auto lhs = getLhs().getType().dyn_cast(); + double lhsSparsity = 0.0; + if (lhs) { + lhsSparsity = lhs.getSparsity(); + } else { + lhsSparsity = -1.0; + } + + auto rhs = getRhs().getType().dyn_cast(); + double rhsSparsity = 0.0; + if (rhs) { + rhsSparsity = rhs.getSparsity(); + } else { + rhsSparsity = -1.0; + } + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } else if (lhsSparsity == 0.0 && rhsSparsity == 0.0) { + return {1.0}; + } + return {-1.0}; +} + + +// -------------------------------------------------------------------- +// Outer Binary +// -------------------------------------------------------------------- + +std::vector daphne::OuterAddOp::inferSparsity() { + /** + * Uses the probability P(A || B) to estimate the output sparsity. + */ + auto lhs = getLhs().getType().dyn_cast(); + auto rhs = getRhs().getType().dyn_cast(); + + double lhsSparsity = lhs.getSparsity(); + double rhsSparsity = rhs.getSparsity(); + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } else if (lhsSparsity == 0.0) { + return {rhsSparsity}; + } else if (rhsSparsity == 0.0) { + return {lhsSparsity}; + } + return {lhsSparsity + rhsSparsity - lhsSparsity * rhsSparsity}; +} + +std::vector daphne::OuterSubOp::inferSparsity() { + /** + * Uses the probability P(A || B) to estimate the output sparsity. + */ + auto lhs = getLhs().getType().dyn_cast(); + auto rhs = getRhs().getType().dyn_cast(); + + double lhsSparsity = lhs.getSparsity(); + double rhsSparsity = rhs.getSparsity(); + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } else if (lhsSparsity == 0.0) { + return {rhsSparsity}; + } else if (rhsSparsity == 0.0) { + return {lhsSparsity}; + } + return {lhsSparsity + rhsSparsity - lhsSparsity * rhsSparsity}; +} + +std::vector daphne::OuterMulOp::inferSparsity() { + /** + * Uses the probability P(A && B) to estimate the output sparsity. + */ + auto lhs = getLhs().getType().dyn_cast(); + auto rhs = getRhs().getType().dyn_cast(); + + double lhsSparsity = lhs.getSparsity(); + double rhsSparsity = rhs.getSparsity(); + + if ((lhsSparsity == -1.0 && rhsSparsity == -1.0) || (lhsSparsity == -1.0 && rhsSparsity != 0.0) || (lhsSparsity != 0.0 && rhsSparsity == -1.0)) { + return {-1.0}; + } + return {lhsSparsity * rhsSparsity}; +} + +std::vector daphne::OuterDivOp::inferSparsity() { + auto lhs = getLhs().getType().dyn_cast(); + auto rhs = getRhs().getType().dyn_cast(); + + double lhsSparsity = lhs.getSparsity(); + double rhsSparsity = rhs.getSparsity(); + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } + return {lhsSparsity}; +} + +std::vector daphne::OuterPowOp::inferSparsity() { + /** + * First handles trivial cases and then uses the probability of P(A && !B). + */ + auto lhs = getLhs().getType().dyn_cast(); + auto rhs = getRhs().getType().dyn_cast(); + + double lhsSparsity = lhs.getSparsity(); + double rhsSparsity = rhs.getSparsity(); + + if (lhsSparsity == -1.0 && rhsSparsity == -1.0) { + return {-1.0}; + } else if (lhsSparsity == -1.0) { + if (rhsSparsity == 0.0) { + return {1.0}; + } else { + return {-1.0}; + } + } else if (rhsSparsity == -1.0) { + if (lhsSparsity == 1.0) { + return {1.0}; + } else { + return {-1.0}; + } + } + + if (lhsSparsity == 1.0 || rhsSparsity == 0.0) { + return {1.0}; + } else if (lhsSparsity == 0.0 && rhsSparsity == 1.0) { + return {0.0}; + } + + return {lhsSparsity + (1 - rhsSparsity) - lhsSparsity * (1 - rhsSparsity)}; +} + +std::vector daphne::OuterModOp::inferSparsity() { + auto lhs = getLhs().getType().dyn_cast(); + + double lhsSparsity = lhs.getSparsity(); + + if (lhsSparsity == 0) { + return {0.0}; + } + return {-1.0}; +} + +std::vector daphne::OuterAndOp::inferSparsity() { + /** + * Uses the probability P(A && B) to estimate the output sparsity. + */ + auto lhs = getLhs().getType().dyn_cast(); + auto rhs = getRhs().getType().dyn_cast(); + + double lhsSparsity = lhs.getSparsity(); + double rhsSparsity = rhs.getSparsity(); + + if ((lhsSparsity == -1.0 && rhsSparsity == -1.0) || (lhsSparsity != 0.0 && rhsSparsity == -1.0) || (lhsSparsity == -1.0 && rhsSparsity != 0.0)) { + return {-1.0}; + } + return {lhsSparsity * rhsSparsity}; +} + +std::vector daphne::OuterOrOp::inferSparsity() { + /** + * Uses the probability P(A || B) to estimate the output sparsity. + */ + auto lhs = getLhs().getType().dyn_cast(); + auto rhs = getRhs().getType().dyn_cast(); + + double lhsSparsity = lhs.getSparsity(); + double rhsSparsity = rhs.getSparsity(); + + if ((lhsSparsity == -1.0 && rhsSparsity == -1.0) || (lhsSparsity != 1.0 && rhsSparsity == -1.0) || (lhsSparsity == -1.0 && rhsSparsity != 1.0)) { + return {-1.0}; + } else if (lhsSparsity == 1.0 || rhsSparsity == 1.0) { + return {1.0}; + } else if (lhsSparsity == 0.0) { + return {rhsSparsity}; + } else if (rhsSparsity == 0.0) { + return {lhsSparsity}; + } + return {lhsSparsity + rhsSparsity - lhsSparsity * rhsSparsity}; +} + +std::vector daphne::OuterXorOp::inferSparsity() { + /** + * Uses the probability P(A ⊕ B) to estimate the output sparsity. + */ + auto lhs = getLhs().getType().dyn_cast(); + auto rhs = getRhs().getType().dyn_cast(); + + double lhsSparsity = lhs.getSparsity(); + double rhsSparsity = rhs.getSparsity(); + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } else if ((lhsSparsity == 1.0 && rhsSparsity == 1.0) || (lhsSparsity == 0.0 && rhsSparsity == 0.0)) { + return {0.0}; + } else if ((lhsSparsity == 1.0 && rhsSparsity == 0.0) || (lhsSparsity == 0.0 && rhsSparsity == 1.0)) { + return {1.0}; + } + // TODO This function requires testing after #551 is implemented. + return {lhsSparsity + rhsSparsity - 2 * (lhsSparsity * rhsSparsity)}; +} + +std::vector daphne::OuterEqOp::inferSparsity() { + auto lhs = getLhs().getType().dyn_cast(); + auto rhs = getRhs().getType().dyn_cast(); + + double lhsSparsity = lhs.getSparsity(); + double rhsSparsity = rhs.getSparsity(); + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } else if (lhsSparsity == 0.0 && rhsSparsity == 0.0) { + return {1.0}; + } else if ((lhsSparsity == 1.0 && rhsSparsity == 0.0) || (lhsSparsity == 0.0 && rhsSparsity == 1.0)) { + return {0.0}; + } + return {-1.0}; +} + +std::vector daphne::OuterNeqOp::inferSparsity() { + auto lhs = getLhs().getType().dyn_cast(); + auto rhs = getRhs().getType().dyn_cast(); + + double lhsSparsity = lhs.getSparsity(); + double rhsSparsity = rhs.getSparsity(); + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } else if (lhsSparsity == 0.0 && rhsSparsity == 0.0) { + return {0.0}; + } else if ((lhsSparsity == 1.0 && rhsSparsity == 0.0) || (lhsSparsity == 0.0 && rhsSparsity == 1.0)) { + return {1.0}; + } + return {-1.0}; +} + +std::vector daphne::OuterLeOp::inferSparsity() { + auto lhs = getLhs().getType().dyn_cast(); + auto rhs = getRhs().getType().dyn_cast(); + + double lhsSparsity = lhs.getSparsity(); + double rhsSparsity = rhs.getSparsity(); + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } else if (lhsSparsity == 0.0 && rhsSparsity == 0.0) { + return {1.0}; + } + return {-1.0}; +} + +std::vector daphne::OuterGeOp::inferSparsity() { + auto lhs = getLhs().getType().dyn_cast(); + auto rhs = getRhs().getType().dyn_cast(); + + double lhsSparsity = lhs.getSparsity(); + double rhsSparsity = rhs.getSparsity(); + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } else if (lhsSparsity == 0.0 && rhsSparsity == 0.0) { + return {1.0}; + } + return {-1.0}; +} + +// -------------------------------------------------------------------- +// Reorganization +// -------------------------------------------------------------------- + +std::vector daphne::ColBindOp::inferSparsity() { + /** + * Sparsity is estimated by finding the ratio of non-zero cells in the input matrices and the output matrix. + */ + auto lhs = getLhs().getType().dyn_cast(); + auto rhs = getRhs().getType().dyn_cast(); + + double lhsSparsity = lhs.getSparsity(); + double rhsSparsity = rhs.getSparsity(); + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } + + int64_t lhsCols = lhs.getNumCols(); + int64_t lhsRows = lhs.getNumRows(); + int64_t rhsCols = rhs.getNumCols(); + int64_t rhsRows = rhs.getNumRows(); + + int64_t lhsCells = lhsCols * lhsRows; + int64_t rhsCells = rhsCols * rhsRows; + + return {(lhsSparsity * lhsCells + rhsSparsity * rhsCells) / (lhsCells + rhsCells)}; +} + +std::vector daphne::RowBindOp::inferSparsity() { + /** + * Sparsity is estimated by finding the ratio of non-zero cells in the input matrices and the output matrix. + */ + auto lhs = getLhs().getType().dyn_cast(); + auto rhs = getRhs().getType().dyn_cast(); + + double lhsSparsity = lhs.getSparsity(); + double rhsSparsity = rhs.getSparsity(); + + if (lhsSparsity == -1.0 || rhsSparsity == -1.0) { + return {-1.0}; + } + + int64_t lhsCols = lhs.getNumCols(); + int64_t lhsRows = lhs.getNumRows(); + int64_t rhsCols = rhs.getNumCols(); + int64_t rhsRows = rhs.getNumRows(); + + int64_t lhsCells = lhsCols * lhsRows; + int64_t rhsCells = rhsCols * rhsRows; + + return {(lhsSparsity * lhsCells + rhsSparsity * rhsCells) / (lhsCells + rhsCells)}; +} + +// ---------------------------------------------------------------------------- +// Row/column-wise aggregation +// ---------------------------------------------------------------------------- + +std::vector daphne::RowAggMaxOp::inferSparsity() { + auto argTy = getArg().getType().dyn_cast(); + auto rows = argTy.getNumRows(); + auto columns = argTy.getNumCols(); + auto sparsity = argTy.getSparsity(); + + if (sparsity == -1.0) { + return {-1.0}; + } else if (sparsity * (rows * columns) < columns) { + return {0.0}; + } + return {1.0}; +} + + +// -------------------------------------------------------------------- +// Other +// -------------------------------------------------------------------- + +std::vector daphne::ReplaceOp::inferSparsity() { + /* + * Returns 1.0 if all zeros are replaced with a non-zero scalar, the same sparsity in case the zeros were to be "replaced" with zeros + * and -1 in all other cases. + * + * Two seperate attribute if-else statements are needed as the pattern and replacement can have different datatypes. + */ + auto co = CompilerUtils::constantOfAnyType(getPattern()); + if (!co) { + return {-1.0}; + } + + double vPattern = 0.0; + auto valueAttr = co->getAttr("value"); + if (auto floatAttr = valueAttr.dyn_cast()) { + vPattern = floatAttr.getValueAsDouble(); + } else if (auto intAttr = valueAttr.dyn_cast()) { + if (intAttr.getType().isSignlessInteger()) { + vPattern = static_cast(intAttr.getInt()); + } else if (intAttr.getType().isSignedInteger()) { + vPattern = static_cast(intAttr.getSInt()); + } + } else { + throw std::runtime_error("Unsupported type for FillOp sparsity inference"); + } + + co = CompilerUtils::constantOfAnyType(getReplacement()); + if (!co) { + return {-1.0}; + } + + double vReplace = 0.0; + valueAttr = co->getAttr("value"); + if (auto floatAttr = valueAttr.dyn_cast()) { + vReplace = floatAttr.getValueAsDouble(); + } else if (auto intAttr = valueAttr.dyn_cast()) { + if (intAttr.getType().isSignlessInteger()) { + vReplace = static_cast(intAttr.getInt()); + } else if (intAttr.getType().isSignedInteger()) { + vReplace = static_cast(intAttr.getSInt()); + } + } else { + throw std::runtime_error("Unsupported type for FillOp sparsity inference"); + } + + auto argTy = getArg().getType().dyn_cast(); + auto sparsity = argTy.getSparsity(); + + if (vPattern == 0.0 && vReplace != 0.0) { + return {1.0}; + } else if (vPattern == 0.0 && vReplace == 0.0) { + return {sparsity}; + } + return {-1.0}; +} + // **************************************************************************** // Sparsity inference trait implementations // **************************************************************************** @@ -152,7 +1062,15 @@ std::vector daphne::tryInferSparsity(Operation *op) { sparsity = 1.0; } - if (op->hasTrait()) { + if(op->hasTrait()) { + sparsity = getSparsityOrUnknownFromType(op->getOperand(0)); + } + + if(op->hasTrait()) { + sparsity = -1.0; + } + + if(op->hasTrait()) { auto spLhs = getSparsityOrUnknownFromType(op->getOperand(0)); auto spRhs = getSparsityOrUnknownFromType(op->getOperand(1)); if (spLhs != -1.0 && spRhs != -1.0) @@ -172,6 +1090,64 @@ std::vector daphne::tryInferSparsity(Operation *op) { sparsity = spRhs; } + if(op->hasTrait()) { + auto spLhs = getSparsityOrUnknownFromType(op->getOperand(0)); + if (op->getNumOperands() > 1) { + auto spRhs = getSparsityOrUnknownFromType(op->getOperand(1)); + if (spLhs == 0.0 && spRhs == 0.0) { + sparsity = 0.0; + } else { + sparsity = -1.0; + } + } else { + if (spLhs == 0.0) { + sparsity = 0.0; + } else { + sparsity = -1.0; + } + } + } + + if(op->hasTrait()) { + auto spLhs = getSparsityOrUnknownFromType(op->getOperand(0)); + if (op->getNumOperands() > 1) { + auto spRhs = getSparsityOrUnknownFromType(op->getOperand(1)); + if (spLhs == 1.0 && spRhs == 1.0) { + sparsity = 1.0; + } else { + sparsity = -1.0; + } + } else { + if (spLhs == 1.0) { + sparsity = 1.0; + } else { + sparsity = -1.0; + } + } + } + + if(op->hasTrait()) { + auto spLhs = getSparsityOrUnknownFromType(op->getOperand(0)); + if (op->getNumOperands() > 1) { + auto spRhs = getSparsityOrUnknownFromType(op->getOperand(1)); + if (spLhs == 0.0 && spRhs == 0.0) { + sparsity = 0.0; + } else if (spLhs == 1.0 && spRhs == 1.0) { + sparsity = 1.0; + } else { + sparsity = -1.0; + } + } else { + if (spLhs == 0.0) { + sparsity = 0.0; + } else if (spLhs == 1.0) { + sparsity = 1.0; + } else { + sparsity = -1.0; + } + } + } + // Our parametric traits addressing a certain argument are supported // for up to 10 arguments (this can easily be changed here). // There does not seem to be a way in MLIR do it more generically, @@ -189,4 +1165,4 @@ std::vector daphne::tryInferSparsity(Operation *op) { sparsities.push_back(-1); return sparsities; } -} +} \ No newline at end of file diff --git a/src/ir/daphneir/DaphneInferSparsityOpInterface.h b/src/ir/daphneir/DaphneInferSparsityOpInterface.h index dc779e859..03bd1086a 100644 --- a/src/ir/daphneir/DaphneInferSparsityOpInterface.h +++ b/src/ir/daphneir/DaphneInferSparsityOpInterface.h @@ -38,6 +38,12 @@ namespace mlir::OpTrait { template class CompletelyDense : public TraitBase {}; +template +class SparsityRemains : public TraitBase {}; + +template +class SparsityUnknown : public TraitBase {}; + template struct SparsityFromIthScalar { template class Impl : public TraitBase {}; }; @@ -48,6 +54,9 @@ template struct SparsityFromIthArg { template class EwSparseIfEither : public TraitBase {}; template class EwSparseIfBoth : public TraitBase {}; +template class SparseIfAllInputSparse : public TraitBase {}; +template class DenseIfAllInputDense : public TraitBase {}; +template class SparsityRemainsIfAllInputOneOrZero : public TraitBase {}; } // namespace mlir::OpTrait diff --git a/src/ir/daphneir/DaphneInferSparsityTraits.td b/src/ir/daphneir/DaphneInferSparsityTraits.td index 770436e90..aee1d3412 100644 --- a/src/ir/daphneir/DaphneInferSparsityTraits.td +++ b/src/ir/daphneir/DaphneInferSparsityTraits.td @@ -24,6 +24,12 @@ include "mlir/IR/OpBase.td" // it won't for all the other cases estimating with 1.0 is fine def CompletelyDense : NativeOpTrait<"CompletelyDense">; +// Passes the input sparsity to the output +def SparsityRemains : NativeOpTrait<"SparsityRemains">; + +// Sets the sparsity to -1.0 if the sparsity will be unknown after an operation +def SparsityUnknown : NativeOpTrait<"SparsityUnknown">; + class SparsityFromIthScalar : ParamNativeOpTrait<"SparsityFromIthScalar", !cast(i)>; class SparsityFromIthArg : ParamNativeOpTrait<"SparsityFromIthArg", !cast(i)>; def SparsityFromArg : SparsityFromIthArg<0>; @@ -36,5 +42,11 @@ def SparsityFromArg : SparsityFromIthArg<0>; def EwSparseIfBoth : NativeOpTrait<"EwSparseIfBoth">; // if either element is zero, the result is zero def EwSparseIfEither : NativeOpTrait<"EwSparseIfEither">; +// if all input elements are completely sparse, the resulting sparsity is 0.0 (works for 1 or 2 operands) +def SparseIfAllInputSparse : NativeOpTrait<"SparseIfAllInputSparse">; +// if all input elements are completely dense, the resulting sparsity is 1.0 (works for 1 or 2 operands) +def DenseIfAllInputDense : NativeOpTrait<"DenseIfAllInputDense">; +// if all input elements are either completely sparse or completely dense, the resulting sparsity is 0.0 or 1.0 respectively (works for 1 or 2 operands) +def SparsityRemainsIfAllInputOneOrZero : NativeOpTrait<"SparsityRemainsIfAllInputOneOrZero">; #endif //SRC_IR_DAPHNEIR_DAPHNEINFERSPARSITYTRAITS_TD \ No newline at end of file diff --git a/src/ir/daphneir/DaphneOps.td b/src/ir/daphneir/DaphneOps.td index d934f8463..9ceda4ebb 100644 --- a/src/ir/daphneir/DaphneOps.td +++ b/src/ir/daphneir/DaphneOps.td @@ -97,6 +97,7 @@ def Daphne_ConvertDenseMatrixToMemRef : Daphne_Op<"convertDenseMatrixToMemRef", def Daphne_FillOp : Daphne_Op<"fill", [ DataTypeMat, ValueTypeFromFirstArg, + DeclareOpInterfaceMethods, NumRowsFromIthScalar<1>, NumColsFromIthScalar<2>, CUDASupport ]> { let arguments = (ins AnyScalar:$arg, Size:$numRows, Size:$numCols); @@ -141,7 +142,8 @@ def Daphne_RandMatrixOp : Daphne_Op<"randMatrix", [ def Daphne_SampleOp : Daphne_Op<"sample", [ DataTypeMat, ValueTypeFromFirstArg, - NumRowsFromIthScalar<1>, OneCol + NumRowsFromIthScalar<1>, OneCol, + DeclareOpInterfaceMethods ]> { let arguments = (ins AnyScalar:$range, Size:$size, BoolScalar:$withReplacement, Seed:$seed); let results = (outs MatrixOrU:$res); @@ -150,7 +152,7 @@ def Daphne_SampleOp : Daphne_Op<"sample", [ def Daphne_SeqOp : Daphne_Op<"seq", [ DataTypeMat, ValueTypeFromArgs, OneCol, DeclareOpInterfaceMethods, - CompletelyDense, + DeclareOpInterfaceMethods, CastArgsToResType ]> { let arguments = (ins NumScalar:$from, NumScalar:$to, NumScalar:$inc); @@ -166,6 +168,13 @@ def Daphne_TypeOfOp : Daphne_Op<"typeOf"> { let results = (outs StrScalar:$res); } +def SparsityOp : Daphne_Op<"sparsity", [DataTypeSca]> { + let arguments = (ins MatrixOf<[AnyScalar]>:$arg); + let results = (outs FloatScalar:$res); + + let hasCanonicalizeMethod = 1; +} + class Daphne_NumOp traits = []> : Daphne_Op { @@ -179,13 +188,6 @@ def Daphne_NumRowsOp : Daphne_NumOp<"numRows">; def Daphne_NumColsOp : Daphne_NumOp<"numCols">; def Daphne_NumCellsOp : Daphne_NumOp<"numCells">; -def SparsityOp : Daphne_Op<"sparsity", [DataTypeSca]> { - let arguments = (ins MatrixOf<[AnyScalar]>:$arg); - let results = (outs FloatScalar:$res); - - let hasCanonicalizeMethod = 1; -} - // **************************************************************************** // Matrix multiplication // **************************************************************************** @@ -225,11 +227,11 @@ def Daphne_EwMinusOp : Daphne_EwUnaryOp<"ewMinus", NumScalar, [ValueTypeFromFirs let hasFolder = 1; let hasCanonicalizeMethod = 1; } -def Daphne_EwAbsOp : Daphne_EwUnaryOp<"ewAbs", NumScalar, [ValueTypeFromFirstArg]>; -def Daphne_EwSignOp : Daphne_EwUnaryOp<"ewSign", NumScalar, [ValueTypeFromFirstArg]>; -def Daphne_EwExpOp : Daphne_EwUnaryOp<"ewExp", NumScalar, [ValueTypeFromArgsFP]>; -def Daphne_EwLnOp : Daphne_EwUnaryOp<"ewLn", NumScalar, [ValueTypeFromArgsFP]>; -def Daphne_EwSqrtOp : Daphne_EwUnaryOp<"ewSqrt", NumScalar, [ValueTypeFromArgsFP, DeclareOpInterfaceMethods]>; +def Daphne_EwAbsOp : Daphne_EwUnaryOp<"ewAbs", NumScalar, [ValueTypeFromFirstArg, SparsityRemains]>; +def Daphne_EwSignOp : Daphne_EwUnaryOp<"ewSign", NumScalar, [ValueTypeFromFirstArg, SparsityRemains]>; +def Daphne_EwExpOp : Daphne_EwUnaryOp<"ewExp", NumScalar, [ValueTypeFromArgsFP, CompletelyDense]>; +def Daphne_EwLnOp : Daphne_EwUnaryOp<"ewLn", NumScalar, [ValueTypeFromArgsFP, CompletelyDense]>; +def Daphne_EwSqrtOp : Daphne_EwUnaryOp<"ewSqrt", NumScalar, [ValueTypeFromArgsFP, DeclareOpInterfaceMethods, SparsityRemains]>; // ---------------------------------------------------------------------------- // Logical @@ -241,29 +243,29 @@ def Daphne_EwNegOp : Daphne_EwUnaryOp<"ewNeg", NumScalar, [ValueTypeFromFirstArg // Rounding // ---------------------------------------------------------------------------- -def Daphne_EwRoundOp : Daphne_EwUnaryOp<"ewRound", NumScalar, [ValueTypeFromFirstArg]>; -def Daphne_EwFloorOp : Daphne_EwUnaryOp<"ewFloor", NumScalar, [ValueTypeFromFirstArg]>; -def Daphne_EwCeilOp : Daphne_EwUnaryOp<"ewCeil", NumScalar, [ValueTypeFromFirstArg]>; +def Daphne_EwRoundOp : Daphne_EwUnaryOp<"ewRound", NumScalar, [ValueTypeFromFirstArg, SparseIfAllInputSparse]>; +def Daphne_EwFloorOp : Daphne_EwUnaryOp<"ewFloor", NumScalar, [ValueTypeFromFirstArg, SparseIfAllInputSparse]>; +def Daphne_EwCeilOp : Daphne_EwUnaryOp<"ewCeil", NumScalar, [ValueTypeFromFirstArg, SparsityRemainsIfAllInputOneOrZero]>; // ---------------------------------------------------------------------------- // Trigonometric // ---------------------------------------------------------------------------- -def Daphne_EwSinOp : Daphne_EwUnaryOp<"ewSin", NumScalar, [ValueTypeFromArgsFP]>; -def Daphne_EwCosOp : Daphne_EwUnaryOp<"ewCos", NumScalar, [ValueTypeFromArgsFP]>; -def Daphne_EwTanOp : Daphne_EwUnaryOp<"ewTan", NumScalar, [ValueTypeFromArgsFP]>; -def Daphne_EwSinhOp : Daphne_EwUnaryOp<"ewSinh", NumScalar, [ValueTypeFromArgsFP]>; -def Daphne_EwCoshOp : Daphne_EwUnaryOp<"ewCosh", NumScalar, [ValueTypeFromArgsFP]>; -def Daphne_EwTanhOp : Daphne_EwUnaryOp<"ewTanh", NumScalar, [ValueTypeFromArgsFP]>; -def Daphne_EwAsinOp : Daphne_EwUnaryOp<"ewAsin", NumScalar, [ValueTypeFromArgsFP]>; -def Daphne_EwAcosOp : Daphne_EwUnaryOp<"ewAcos", NumScalar, [ValueTypeFromArgsFP]>; -def Daphne_EwAtanOp : Daphne_EwUnaryOp<"ewAtan", NumScalar, [ValueTypeFromArgsFP]>; +def Daphne_EwSinOp : Daphne_EwUnaryOp<"ewSin", NumScalar, [ValueTypeFromArgsFP, SparsityUnknown]>; +def Daphne_EwCosOp : Daphne_EwUnaryOp<"ewCos", NumScalar, [ValueTypeFromArgsFP, SparsityUnknown]>; +def Daphne_EwTanOp : Daphne_EwUnaryOp<"ewTan", NumScalar, [ValueTypeFromArgsFP, SparsityUnknown]>; +def Daphne_EwSinhOp : Daphne_EwUnaryOp<"ewSinh", NumScalar, [ValueTypeFromArgsFP, SparsityRemains]>; +def Daphne_EwCoshOp : Daphne_EwUnaryOp<"ewCosh", NumScalar, [ValueTypeFromArgsFP, CompletelyDense]>; +def Daphne_EwTanhOp : Daphne_EwUnaryOp<"ewTanh", NumScalar, [ValueTypeFromArgsFP, SparsityRemains]>; +def Daphne_EwAsinOp : Daphne_EwUnaryOp<"ewAsin", NumScalar, [ValueTypeFromArgsFP, SparsityRemains]>; +def Daphne_EwAcosOp : Daphne_EwUnaryOp<"ewAcos", NumScalar, [ValueTypeFromArgsFP, SparsityUnknown]>; +def Daphne_EwAtanOp : Daphne_EwUnaryOp<"ewAtan", NumScalar, [ValueTypeFromArgsFP, SparsityRemains]>; // ---------------------------------------------------------------------------- // Comparison // ---------------------------------------------------------------------------- -def Daphne_EwIsNanOp : Daphne_EwUnaryOp<"ewIsnan", NumScalar, [ValueTypeFromFirstArg]>; +def Daphne_EwIsNanOp : Daphne_EwUnaryOp<"ewIsnan", NumScalar, [ValueTypeFromFirstArg, SparsityUnknown]>; // **************************************************************************** // Elementwise binary @@ -295,42 +297,42 @@ class Daphne_EwBinaryOp traits = []> // ---------------------------------------------------------------------------- // TODO Make EwAddOp Commutative again (see #449). -def Daphne_EwAddOp : Daphne_EwBinaryOp<"ewAdd", NumScalar, [ValueTypeFromArgs, CastArgsToResType/*, Commutative*/, EwSparseIfBoth, CUDASupport]> { +def Daphne_EwAddOp : Daphne_EwBinaryOp<"ewAdd", NumScalar, [ValueTypeFromArgs, CastArgsToResType/*, Commutative*/, DeclareOpInterfaceMethods, CUDASupport]> { let hasCanonicalizeMethod = 1; } -def Daphne_EwSubOp : Daphne_EwBinaryOp<"ewSub", NumScalar, [ValueTypeFromArgs, CastArgsToResType, EwSparseIfBoth, CUDASupport]> { +def Daphne_EwSubOp : Daphne_EwBinaryOp<"ewSub", NumScalar, [ValueTypeFromArgs, CastArgsToResType, DeclareOpInterfaceMethods, CUDASupport]> { let hasCanonicalizeMethod = 1; } -def Daphne_EwMulOp : Daphne_EwBinaryOp<"ewMul", NumScalar, [ValueTypeFromArgs, CastArgsToResType, Commutative, EwSparseIfEither, CUDASupport]> { +def Daphne_EwMulOp : Daphne_EwBinaryOp<"ewMul", NumScalar, [ValueTypeFromArgs, CastArgsToResType, Commutative, DeclareOpInterfaceMethods, CUDASupport]> { let hasCanonicalizeMethod = 1; } -def Daphne_EwDivOp : Daphne_EwBinaryOp<"ewDiv", NumScalar, [ValueTypeFromArgs, CastArgsToResType, CUDASupport]> { +def Daphne_EwDivOp : Daphne_EwBinaryOp<"ewDiv", NumScalar, [ValueTypeFromArgs, CastArgsToResType, DeclareOpInterfaceMethods, CUDASupport]> { let hasCanonicalizeMethod = 1; } -def Daphne_EwPowOp : Daphne_EwBinaryOp<"ewPow", NumScalar, [ValueTypeFromArgs, CastArgsToResType, CUDASupport]>; -def Daphne_EwModOp : Daphne_EwBinaryOp<"ewMod", NumScalar, [ValueTypeFromArgs, CastArgsToResType]>; -def Daphne_EwLogOp : Daphne_EwBinaryOp<"ewLog", NumScalar, [ValueTypeFromArgsFP, CastArgsToResType]>; +def Daphne_EwPowOp : Daphne_EwBinaryOp<"ewPow", NumScalar, [ValueTypeFromArgs, CastArgsToResType, DeclareOpInterfaceMethods, CUDASupport]>; +def Daphne_EwModOp : Daphne_EwBinaryOp<"ewMod", NumScalar, [ValueTypeFromArgs, CastArgsToResType, DeclareOpInterfaceMethods]>; +def Daphne_EwLogOp : Daphne_EwBinaryOp<"ewLog", NumScalar, [ValueTypeFromArgsFP, CastArgsToResType, SparsityUnknown]>; // ---------------------------------------------------------------------------- // Min/max // ---------------------------------------------------------------------------- -def Daphne_EwMinOp : Daphne_EwBinaryOp<"ewMin", AnyScalar, [ValueTypeFromArgs, CastArgsToResType, Commutative]>; -def Daphne_EwMaxOp : Daphne_EwBinaryOp<"ewMax", AnyScalar, [ValueTypeFromArgs, CastArgsToResType, Commutative, CUDASupport]>; +def Daphne_EwMinOp : Daphne_EwBinaryOp<"ewMin", AnyScalar, [ValueTypeFromArgs, SparsityRemainsIfAllInputOneOrZero, CastArgsToResType, Commutative]>; +def Daphne_EwMaxOp : Daphne_EwBinaryOp<"ewMax", AnyScalar, [ValueTypeFromArgs, SparsityRemainsIfAllInputOneOrZero, CastArgsToResType, Commutative, CUDASupport]>; // ---------------------------------------------------------------------------- // Logical // ---------------------------------------------------------------------------- -def Daphne_EwAndOp : Daphne_EwBinaryOp<"ewAnd", NumScalar, [Commutative, ValueTypeFromArgsInt, CastArgsToResType]>; -def Daphne_EwOrOp : Daphne_EwBinaryOp<"ewOr" , NumScalar, [Commutative, ValueTypeFromArgsInt, CastArgsToResType]>; -def Daphne_EwXorOp : Daphne_EwBinaryOp<"ewXor", NumScalar, [Commutative, ValueTypeFromArgsInt, CastArgsToResType]>; +def Daphne_EwAndOp : Daphne_EwBinaryOp<"ewAnd", NumScalar, [Commutative, ValueTypeFromArgsInt, CastArgsToResType, DeclareOpInterfaceMethods]>; +def Daphne_EwOrOp : Daphne_EwBinaryOp<"ewOr" , NumScalar, [Commutative, ValueTypeFromArgsInt, CastArgsToResType, DeclareOpInterfaceMethods]>; +def Daphne_EwXorOp : Daphne_EwBinaryOp<"ewXor", NumScalar, [Commutative, ValueTypeFromArgsInt, CastArgsToResType, DeclareOpInterfaceMethods]>; // ---------------------------------------------------------------------------- // Bitwise // ---------------------------------------------------------------------------- -def Daphne_EwBitwiseAndOp : Daphne_EwBinaryOp<"ewBitwiseAnd", NumScalar, [Commutative, ValueTypeFromArgsInt, CastArgsToResType]>; +def Daphne_EwBitwiseAndOp : Daphne_EwBinaryOp<"ewBitwiseAnd", NumScalar, [Commutative, ValueTypeFromArgsInt, CastArgsToResType, EwSparseIfBoth]>; // ---------------------------------------------------------------------------- // Strings @@ -349,12 +351,12 @@ class Daphne_EwCmpOp traits = []> //let results = (outs AnyTypeOf<[MatrixOf<[BoolScalar]>, BoolScalar, Unknown]>:$res); } -def Daphne_EwEqOp : Daphne_EwCmpOp<"ewEq" , AnyScalar, [Commutative]>; -def Daphne_EwNeqOp : Daphne_EwCmpOp<"ewNeq", AnyScalar, [Commutative, CUDASupport]>; -def Daphne_EwLtOp : Daphne_EwCmpOp<"ewLt" , AnyScalar>; -def Daphne_EwLeOp : Daphne_EwCmpOp<"ewLe" , AnyScalar>; -def Daphne_EwGtOp : Daphne_EwCmpOp<"ewGt" , AnyScalar>; -def Daphne_EwGeOp : Daphne_EwCmpOp<"ewGe" , AnyScalar>; +def Daphne_EwEqOp : Daphne_EwCmpOp<"ewEq" , AnyScalar, [Commutative, DeclareOpInterfaceMethods]>; +def Daphne_EwNeqOp : Daphne_EwCmpOp<"ewNeq", AnyScalar, [Commutative, DeclareOpInterfaceMethods, CUDASupport]>; +def Daphne_EwLtOp : Daphne_EwCmpOp<"ewLt" , AnyScalar, [SparseIfAllInputSparse]>; +def Daphne_EwLeOp : Daphne_EwCmpOp<"ewLe" , AnyScalar, [DeclareOpInterfaceMethods]>; +def Daphne_EwGtOp : Daphne_EwCmpOp<"ewGt" , AnyScalar, [SparseIfAllInputSparse]>; +def Daphne_EwGeOp : Daphne_EwCmpOp<"ewGe" , AnyScalar, [DeclareOpInterfaceMethods]>; // **************************************************************************** // Outer binary (generalized outer product) @@ -374,28 +376,28 @@ class Daphne_OuterBinaryOp traits = [] // Arithmetic // ---------------------------------------------------------------------------- -def Daphne_OuterAddOp : Daphne_OuterBinaryOp<"outerAdd", NumScalar, [ValueTypeFromArgs]>; -def Daphne_OuterSubOp : Daphne_OuterBinaryOp<"outerSub", NumScalar, [ValueTypeFromArgs]>; -def Daphne_OuterMulOp : Daphne_OuterBinaryOp<"outerMul", NumScalar, [ValueTypeFromArgs]>; -def Daphne_OuterDivOp : Daphne_OuterBinaryOp<"outerDiv", NumScalar, [ValueTypeFromArgs]>; -def Daphne_OuterPowOp : Daphne_OuterBinaryOp<"outerPow", NumScalar, [ValueTypeFromArgs]>; -def Daphne_OuterModOp : Daphne_OuterBinaryOp<"outerMod", NumScalar, [ValueTypeFromArgs]>; -def Daphne_OuterLogOp : Daphne_OuterBinaryOp<"outerLog", NumScalar, [ValueTypeFromArgsFP]>; +def Daphne_OuterAddOp : Daphne_OuterBinaryOp<"outerAdd", NumScalar, [ValueTypeFromArgs, DeclareOpInterfaceMethods]>; +def Daphne_OuterSubOp : Daphne_OuterBinaryOp<"outerSub", NumScalar, [ValueTypeFromArgs, DeclareOpInterfaceMethods]>; +def Daphne_OuterMulOp : Daphne_OuterBinaryOp<"outerMul", NumScalar, [ValueTypeFromArgs, DeclareOpInterfaceMethods]>; +def Daphne_OuterDivOp : Daphne_OuterBinaryOp<"outerDiv", NumScalar, [ValueTypeFromArgs, DeclareOpInterfaceMethods]>; +def Daphne_OuterPowOp : Daphne_OuterBinaryOp<"outerPow", NumScalar, [ValueTypeFromArgs, DeclareOpInterfaceMethods]>; +def Daphne_OuterModOp : Daphne_OuterBinaryOp<"outerMod", NumScalar, [ValueTypeFromArgs, DeclareOpInterfaceMethods]>; +def Daphne_OuterLogOp : Daphne_OuterBinaryOp<"outerLog", NumScalar, [ValueTypeFromArgsFP, SparsityUnknown]>; // ---------------------------------------------------------------------------- // Min/max // ---------------------------------------------------------------------------- -def Daphne_OuterMinOp : Daphne_OuterBinaryOp<"outerMin", AnyScalar, [ValueTypeFromArgs]>; -def Daphne_OuterMaxOp : Daphne_OuterBinaryOp<"outerMax", AnyScalar, [ValueTypeFromArgs]>; +def Daphne_OuterMinOp : Daphne_OuterBinaryOp<"outerMin", AnyScalar, [ValueTypeFromArgs, SparsityRemainsIfAllInputOneOrZero]>; +def Daphne_OuterMaxOp : Daphne_OuterBinaryOp<"outerMax", AnyScalar, [ValueTypeFromArgs, SparsityRemainsIfAllInputOneOrZero]>; // ---------------------------------------------------------------------------- // Logical // ---------------------------------------------------------------------------- -def Daphne_OuterAndOp : Daphne_OuterBinaryOp<"outerAnd", NumScalar, [ValueTypeFromArgsInt]>; -def Daphne_OuterOrOp : Daphne_OuterBinaryOp<"outerOr" , NumScalar, [ValueTypeFromArgsInt]>; -def Daphne_OuterXorOp : Daphne_OuterBinaryOp<"outerXor", NumScalar, [ValueTypeFromArgsInt]>; +def Daphne_OuterAndOp : Daphne_OuterBinaryOp<"outerAnd", NumScalar, [ValueTypeFromArgsInt, DeclareOpInterfaceMethods]>; +def Daphne_OuterOrOp : Daphne_OuterBinaryOp<"outerOr" , NumScalar, [ValueTypeFromArgsInt, DeclareOpInterfaceMethods]>; +def Daphne_OuterXorOp : Daphne_OuterBinaryOp<"outerXor", NumScalar, [ValueTypeFromArgsInt, DeclareOpInterfaceMethods]>; // ---------------------------------------------------------------------------- // Strings @@ -414,12 +416,12 @@ class Daphne_OuterCmpOp traits = //let results = (outs AnyTypeOf<[MatrixOf<[BoolScalar]>, BoolScalar, Unknown]>:$res); } -def Daphne_OuterEqOp : Daphne_OuterCmpOp<"outerEq" , AnyScalar>; -def Daphne_OuterNeqOp : Daphne_OuterCmpOp<"outerNeq", AnyScalar>; -def Daphne_OuterLtOp : Daphne_OuterCmpOp<"outerLt" , AnyScalar>; -def Daphne_OuterLeOp : Daphne_OuterCmpOp<"outerLe" , AnyScalar>; -def Daphne_OuterGtOp : Daphne_OuterCmpOp<"outerGt" , AnyScalar>; -def Daphne_OuterGeOp : Daphne_OuterCmpOp<"outerGe" , AnyScalar>; +def Daphne_OuterEqOp : Daphne_OuterCmpOp<"outerEq" , AnyScalar, [DeclareOpInterfaceMethods]>; +def Daphne_OuterNeqOp : Daphne_OuterCmpOp<"outerNeq", AnyScalar, [DeclareOpInterfaceMethods]>; +def Daphne_OuterLtOp : Daphne_OuterCmpOp<"outerLt" , AnyScalar, [SparseIfAllInputSparse]>; +def Daphne_OuterLeOp : Daphne_OuterCmpOp<"outerLe" , AnyScalar, [DeclareOpInterfaceMethods]>; +def Daphne_OuterGtOp : Daphne_OuterCmpOp<"outerGt" , AnyScalar, [SparseIfAllInputSparse]>; +def Daphne_OuterGeOp : Daphne_OuterCmpOp<"outerGe" , AnyScalar, [DeclareOpInterfaceMethods]>; // **************************************************************************** // Elementwise ternary @@ -478,25 +480,25 @@ class Daphne_ColAggOp; def Daphne_RowAggSumOp : Daphne_RowAggOp<"sumRow" , NumScalar, NumScalar, [ValueTypeFromFirstArg, CastArgsToResType, - CUDASupport, DeclareOpInterfaceMethods]>; -def Daphne_RowAggMinOp : Daphne_RowAggOp<"minRow" , AnyScalar, AnyScalar, [ValueTypeFromFirstArg, CastArgsToResType, DeclareOpInterfaceMethods]>; + CUDASupport, DeclareOpInterfaceMethods, SparseIfAllInputSparse]>; +def Daphne_RowAggMinOp : Daphne_RowAggOp<"minRow" , AnyScalar, AnyScalar, [ValueTypeFromFirstArg, CastArgsToResType, DeclareOpInterfaceMethods, SparsityRemainsIfAllInputOneOrZero]>; def Daphne_RowAggMaxOp : Daphne_RowAggOp<"maxRow" , AnyScalar, AnyScalar, [ValueTypeFromFirstArg, CastArgsToResType, - CUDASupport, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]>; -def Daphne_RowAggIdxMinOp : Daphne_RowAggOp<"idxminRow", NumScalar, Size, [ValueTypeSize]>; -def Daphne_RowAggIdxMaxOp : Daphne_RowAggOp<"idxmaxRow", NumScalar, Size, [ValueTypeSize]>; -def Daphne_RowAggMeanOp : Daphne_RowAggOp<"meanRow" , NumScalar, NumScalar, [ValueTypeFromArgsFP, CastArgsToResType]>; -def Daphne_RowAggVarOp : Daphne_RowAggOp<"varRow" , NumScalar, NumScalar, [ValueTypeFromArgsFP, CastArgsToResType]>; -def Daphne_RowAggStddevOp : Daphne_RowAggOp<"stddevRow", NumScalar, NumScalar, [ValueTypeFromArgsFP, CastArgsToResType]>; + CUDASupport, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]>; +def Daphne_RowAggIdxMinOp : Daphne_RowAggOp<"idxminRow", NumScalar, Size, [ValueTypeSize, SparseIfAllInputSparse]>; +def Daphne_RowAggIdxMaxOp : Daphne_RowAggOp<"idxmaxRow", NumScalar, Size, [ValueTypeSize, SparseIfAllInputSparse]>; +def Daphne_RowAggMeanOp : Daphne_RowAggOp<"meanRow" , NumScalar, NumScalar, [ValueTypeFromArgsFP, CastArgsToResType, SparseIfAllInputSparse]>; +def Daphne_RowAggVarOp : Daphne_RowAggOp<"varRow" , NumScalar, NumScalar, [ValueTypeFromArgsFP, CastArgsToResType, SparseIfAllInputSparse]>; +def Daphne_RowAggStddevOp : Daphne_RowAggOp<"stddevRow", NumScalar, NumScalar, [ValueTypeFromArgsFP, CastArgsToResType, SparseIfAllInputSparse]>; def Daphne_ColAggSumOp : Daphne_ColAggOp<"sumCol" , NumScalar, NumScalar, [ValueTypeFromFirstArg, CastArgsToResType, - CUDASupport, DeclareOpInterfaceMethods]>; -def Daphne_ColAggMinOp : Daphne_ColAggOp<"minCol" , AnyScalar, AnyScalar, [ValueTypeFromFirstArg, CastArgsToResType]>; -def Daphne_ColAggMaxOp : Daphne_ColAggOp<"maxCol" , AnyScalar, AnyScalar, [ValueTypeFromFirstArg, CastArgsToResType]>; -def Daphne_ColAggIdxMinOp : Daphne_ColAggOp<"idxminCol", NumScalar, Size, [ValueTypeSize]>; -def Daphne_ColAggIdxMaxOp : Daphne_ColAggOp<"idxmaxCol", NumScalar, Size, [ValueTypeSize]>; -def Daphne_ColAggMeanOp : Daphne_ColAggOp<"meanCol" , NumScalar, NumScalar, [ValueTypeFromArgsFP, CastArgsToResType]>; -def Daphne_ColAggVarOp : Daphne_ColAggOp<"varCol" , NumScalar, NumScalar, [ValueTypeFromArgsFP, CastArgsToResType]>; -def Daphne_ColAggStddevOp : Daphne_ColAggOp<"stddevCol", NumScalar, NumScalar, [ValueTypeFromArgsFP, CastArgsToResType]>; + CUDASupport, DeclareOpInterfaceMethods, SparseIfAllInputSparse]>; +def Daphne_ColAggMinOp : Daphne_ColAggOp<"minCol" , AnyScalar, AnyScalar, [ValueTypeFromFirstArg, CastArgsToResType, SparsityRemainsIfAllInputOneOrZero]>; +def Daphne_ColAggMaxOp : Daphne_ColAggOp<"maxCol" , AnyScalar, AnyScalar, [ValueTypeFromFirstArg, CastArgsToResType, SparsityRemainsIfAllInputOneOrZero]>; +def Daphne_ColAggIdxMinOp : Daphne_ColAggOp<"idxminCol", NumScalar, Size, [ValueTypeSize, SparseIfAllInputSparse]>; +def Daphne_ColAggIdxMaxOp : Daphne_ColAggOp<"idxmaxCol", NumScalar, Size, [ValueTypeSize, SparseIfAllInputSparse]>; +def Daphne_ColAggMeanOp : Daphne_ColAggOp<"meanCol" , NumScalar, NumScalar, [ValueTypeFromArgsFP, CastArgsToResType, SparseIfAllInputSparse]>; +def Daphne_ColAggVarOp : Daphne_ColAggOp<"varCol" , NumScalar, NumScalar, [ValueTypeFromArgsFP, CastArgsToResType, SparseIfAllInputSparse]>; +def Daphne_ColAggStddevOp : Daphne_ColAggOp<"stddevCol", NumScalar, NumScalar, [ValueTypeFromArgsFP, CastArgsToResType, SparseIfAllInputSparse]>; // ---------------------------------------------------------------------------- // Cumulative aggregation @@ -509,10 +511,10 @@ class Daphne_CumAggOp traits = []> let results = (outs MatrixOf<[scalarType]>); } -def Daphne_CumAggSumOp : Daphne_CumAggOp<"sumCum" , NumScalar, [ValueTypeFromFirstArg]>; -def Daphne_CumAggProdOp : Daphne_CumAggOp<"prodCum", NumScalar, [ValueTypeFromFirstArg]>; -def Daphne_CumAggMinOp : Daphne_CumAggOp<"minCum" , AnyScalar, [ValueTypeFromFirstArg]>; -def Daphne_CumAggMaxOp : Daphne_CumAggOp<"maxCum" , AnyScalar, [ValueTypeFromFirstArg]>; +def Daphne_CumAggSumOp : Daphne_CumAggOp<"sumCum" , NumScalar, [ValueTypeFromFirstArg, SparseIfAllInputSparse]>; +def Daphne_CumAggProdOp : Daphne_CumAggOp<"prodCum", NumScalar, [ValueTypeFromFirstArg, SparsityUnknown]>; +def Daphne_CumAggMinOp : Daphne_CumAggOp<"minCum" , AnyScalar, [ValueTypeFromFirstArg, SparsityRemainsIfAllInputOneOrZero]>; +def Daphne_CumAggMaxOp : Daphne_CumAggOp<"maxCum" , AnyScalar, [ValueTypeFromFirstArg, SparsityRemainsIfAllInputOneOrZero]>; // ---------------------------------------------------------------------------- // Grouped aggregation @@ -603,7 +605,8 @@ def Daphne_ExtractRowOp : Daphne_Op<"extractRow", [ def Daphne_SliceRowOp : Daphne_Op<"sliceRow", [ TypeFromFirstArg, - DeclareOpInterfaceMethods + DeclareOpInterfaceMethods, + SparsityFromArg ]> { let summary = "Copies the specified rows from the argument to the result."; @@ -645,7 +648,8 @@ def Daphne_ExtractColOp : Daphne_Op<"extractCol", [ def Daphne_SliceColOp : Daphne_Op<"sliceCol", [ DeclareOpInterfaceMethods, - DeclareOpInterfaceMethods + DeclareOpInterfaceMethods, + SparsityFromArg ]> { let summary = "Copies the specified columns from the argument to the result."; @@ -715,6 +719,7 @@ class Daphne_BindOp traits = []> : Daphne_Op, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, NumRowsFromAllArgs, NumColsFromSumOfAllArgs, CUDASupport @@ -722,11 +727,12 @@ def Daphne_ColBindOp : Daphne_BindOp<"colBind", [ def Daphne_RowBindOp : Daphne_BindOp<"rowBind", [ ValueTypeFromArgs, - NumRowsFromSumOfAllArgs, NumColsFromAllArgs + NumRowsFromSumOfAllArgs, NumColsFromAllArgs, + DeclareOpInterfaceMethods ]>; def Daphne_ReverseOp : Daphne_Op<"reverse", [ - TypeFromFirstArg, ShapeFromArg + TypeFromFirstArg, ShapeFromArg, SparsityFromArg ]> { let arguments = (ins MatrixOrU:$arg); let results = (outs MatrixOrU:$res); @@ -736,7 +742,8 @@ def Daphne_OrderOp : Daphne_Op<"order", [ DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, // due to possibility of returning indexes SameVariadicOperandSize, - DeclareOpInterfaceMethods + DeclareOpInterfaceMethods, + SparsityFromArg ]> { // TODO Maybe colIdxs and ascs should be attributes. let arguments = (ins MatrixOrFrame:$arg, Variadic:$colIdxs, Variadic:$ascs, BoolScalar:$returnIdxs); @@ -748,7 +755,7 @@ def Daphne_OrderOp : Daphne_Op<"order", [ // **************************************************************************** def Daphne_EigenOp : Daphne_Op<"eigenCal", [TypeFromFirstArg, DeclareOpInterfaceMethods, - DeclareOpInterfaceMethods]> { + DeclareOpInterfaceMethods, SparsityUnknown]> { let arguments = (ins MatrixOf<[FloatScalar]>:$arg); let results = (outs MatrixOf<[FloatScalar]>:$eigenValues, MatrixOf<[FloatScalar]>:$eigenVectors); } @@ -940,7 +947,7 @@ def Daphne_SoftmaxForwardOp : Daphne_Op<"softmaxForward", [ CUDASupport ]> { // **************************************************************************** def Daphne_DiagVectorOp : Daphne_Op<"diagVector", [ - TypeFromFirstArg, NumRowsFromArg, OneCol + TypeFromFirstArg, NumRowsFromArg, OneCol, SparsityFromArg ]> { let arguments = (ins MatrixOrU:$arg); let results = (outs MatrixOrU:$res); @@ -954,14 +961,14 @@ def Daphne_TriOp : Daphne_Op<"tri", [ } def Daphne_SolveOp : Daphne_Op<"solve", [ - DataTypeMat, ValueTypeFromArgs, NumRowsFromArg, OneCol, CUDASupport, CastArgsToResType + DataTypeMat, ValueTypeFromArgs, NumRowsFromArg, OneCol, CUDASupport, CastArgsToResType, SparsityUnknown ]> { let arguments = (ins MatrixOf<[NumScalar]>:$a, MatrixOf<[NumScalar]>:$b); let results = (outs MatrixOf<[NumScalar]>:$x); } def Daphne_ReplaceOp : Daphne_Op<"replace", [ - DataTypeFromFirstArg, ValueTypeFromArgs, ShapeFromArg, CastArgsToResType + DataTypeFromFirstArg, ValueTypeFromArgs, ShapeFromArg, CastArgsToResType, DeclareOpInterfaceMethods ]> { let arguments = (ins MatrixOrU:$arg, AnyScalar:$pattern, AnyScalar:$replacement); let results = (outs MatrixOrU:$res); @@ -969,7 +976,8 @@ def Daphne_ReplaceOp : Daphne_Op<"replace", [ def Daphne_CTableOp : Daphne_Op<"ctable", [ DataTypeMat, ValueTypeFromThirdArg, - DeclareOpInterfaceMethods + DeclareOpInterfaceMethods, + SparsityUnknown ]> { let arguments = (ins MatrixOrU:$lhs, MatrixOrU:$rhs, NumScalar:$weight, SI64:$resNumRows, SI64:$resNumCols); let results = (outs MatrixOrU:$res); @@ -979,7 +987,7 @@ def Daphne_SyrkOp : Daphne_Op<"syrk", [ TypeFromFirstArg, DeclareOpInterfaceMethods, NumRowsFromArgNumCols, NumColsFromArg, CUDASupport,FPGAOPENCLSupport, - CastArgsToResType + CastArgsToResType, SparsityUnknown ]> { // TODO: support `A @ t(A)` operation let summary = [{Performs the operation `t(A) @ A`}]; @@ -991,7 +999,7 @@ def Daphne_GemvOp : Daphne_Op<"gemv", [ DeclareOpInterfaceMethods, DataTypeMat, ValueTypeFromArgs, NumRowsFromArgNumCols, OneCol, CUDASupport, - CastArgsToResType + CastArgsToResType, SparsityUnknown ]> { // TODO: support `A @ x` operation let summary = [{Performs the operation `t(A) @ x`}]; diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 89f686aa2..f218cf5f1 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -64,6 +64,7 @@ set(TEST_SOURCES api/cli/codegen/MapOpTest.cpp + ir/daphneir/InferSparsityTest.cpp ir/daphneir/InferTypesTest.cpp api/cli/operations/CanonicalizationConstantFoldingOpTest.cpp diff --git a/test/ir/daphneir/InferSparsityTest.cpp b/test/ir/daphneir/InferSparsityTest.cpp new file mode 100644 index 000000000..4b7b79f67 --- /dev/null +++ b/test/ir/daphneir/InferSparsityTest.cpp @@ -0,0 +1,36 @@ +/* + * Copyright 2021 The DAPHNE Consortium + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +#include + +#include + +const std::string dirPath = "test/ir/daphneir/"; + +#define MAKE_TEST_CASE(name, count) \ + TEST_CASE(name, TAG_OPERATIONS) { \ + for(unsigned i = 1; i <= count; i++) { \ + DYNAMIC_SECTION(name "_" << i << ".daphne") { \ + compareDaphneToRefSimple(dirPath, name, i); \ + } \ + } \ + } + +MAKE_TEST_CASE("sparsity", 4) \ No newline at end of file diff --git a/test/ir/daphneir/sparsity_1.daphne b/test/ir/daphneir/sparsity_1.daphne new file mode 100644 index 000000000..805e6bb3a --- /dev/null +++ b/test/ir/daphneir/sparsity_1.daphne @@ -0,0 +1,28 @@ +// -------------------------------------------------------------------- +// Data Generation +// -------------------------------------------------------------------- + +print("FillOp"); +print(sparsity(fill(0, 2, 2))); +print(sparsity(fill(1, 2, 2))); +print(sparsity(fill(0.0, 2, 2))); +print(sparsity(fill(0.01, 2, 2))); + +print("SampleOp"); +X = [1, 1, 0, 0](2, 2); +print(sparsity(replace(X, 0, 1))); +print(sparsity(replace(X, 1, 0))); + +print("SeqOp"); +// TODO Commented lines require negative numbers to be known at compile-time (see #774). +// The tests need to be adjusted as well, but unfortunately I couldnt just comment out the lines there. +print(sparsity(seq(0, 1, 1))); +print(sparsity(seq(0, 7, 1))); +print(sparsity(seq(1, 3, 1))); +//print(sparsity(seq(2, -1, -1))); +//print(sparsity(seq(-2, -1, 1))); +print(sparsity(seq(0.0, 0.01, 0.01))); +print(sparsity(seq(0.0, 0.07, 0.01))); +print(sparsity(seq(0.01, 0.03, 0.01))); +//print(sparsity(seq(0.02, -0.01, -0.01))); +//print(sparsity(seq(-0.02, -0.01, 0.01))); \ No newline at end of file diff --git a/test/ir/daphneir/sparsity_1.txt b/test/ir/daphneir/sparsity_1.txt new file mode 100644 index 000000000..9212264ac --- /dev/null +++ b/test/ir/daphneir/sparsity_1.txt @@ -0,0 +1,15 @@ +FillOp +0 +1 +0 +1 +SampleOp +1 +-1 +SeqOp +0.5 +0.875 +1 +0.5 +0.875 +1 diff --git a/test/ir/daphneir/sparsity_2.daphne b/test/ir/daphneir/sparsity_2.daphne new file mode 100644 index 000000000..e7fb6ccb4 --- /dev/null +++ b/test/ir/daphneir/sparsity_2.daphne @@ -0,0 +1,190 @@ +// -------------------------------------------------------------------- +// Unary +// -------------------------------------------------------------------- + +X = rand(5, 5, 1, 1, 0.5, -1); +// row slicing +print(sparsity(X[:2,])); +print(sparsity(X[2,])); +print(sparsity(X[2:,])); + +// col slicing +print(sparsity(X[,:2])); +print(sparsity(X[,2])); +print(sparsity(X[,2:])); + +// -------------------------------------------------------------------- +// Elementwise Unary +// -------------------------------------------------------------------- +print("Elementwise Unary"); +X = rand(2, 2, 1, 1, 0.5, -1); + +print("sin"); +print(sparsity(sin(X))); + +print("cos"); +print(sparsity(cos(X))); + +print("tan"); +print(sparsity(tan(X))); + +print("asin"); +print(sparsity(asin(X))); + +print("acos"); +print(sparsity(acos(X))); + +print("atan"); +print(sparsity(atan(X))); + +print("sinh"); +print(sparsity(sinh(X))); + +print("cosh"); +print(sparsity(cosh(X))); + +print("tanh"); +print(sparsity(tanh(X))); + +zero = rand(1, 1, 0, 2, 0.0, -1); +full = rand(1, 1, 0, 2, 1.0, -1); + +print("round"); +print(sparsity(round(zero))); +print(sparsity(round(full))); + +print("floor"); +print(sparsity(floor(zero))); +print(sparsity(floor(full))); + +print("ceil"); +print(sparsity(ceil(zero))); +print(sparsity(ceil(full))); + +// -------------------------------------------------------------------- +// Elementwise Binary +// -------------------------------------------------------------------- + +print("Elementwise Binary"); + +// we take 3 matrices of different sparsities to take into account all the possible cases for the different ops. +empty = rand(2, 2, 1, 1, 0.0, -1); +half = rand(2, 2, 1, 1, 0.5, -1); +full = rand(2, 2, 1, 1, 1.0, -1); +unknown = sin(rand(2, 2, 1, 1, 0.5, -1)); + +print("ewadd"); +print(sparsity(empty + empty)); +print(sparsity(empty + half)); +print(sparsity(half + half)); +print(sparsity(unknown + half)); +print(sparsity(unknown + unknown)); + + +print("ewsub"); +print(sparsity(empty - empty)); +print(sparsity(empty - half)); +print(sparsity(half - half)); +print(sparsity(unknown - half)); +print(sparsity(unknown - unknown)); + +print("ewmul"); +print(sparsity(empty * empty)); +print(sparsity(empty * half)); +print(sparsity(half * half)); +print(sparsity(unknown * empty)); +print(sparsity(unknown * half)); +print(sparsity(unknown * unknown)); + +print("ewdiv"); +print(sparsity(empty / empty)); +print(sparsity(empty / half)); +print(sparsity(half / half)); +print(sparsity(unknown / half)); +print(sparsity(half / unknown)); +print(sparsity(unknown / unknown)); + +print("ewpow"); +print(sparsity(empty ^ empty)); +print(sparsity(empty ^ half)); +print(sparsity(half ^ half)); +print(sparsity(unknown ^ half)); +print(sparsity(unknown ^ unknown)); + +print("ewmod"); +print(sparsity(empty % empty)); +print(sparsity(empty % half)); +print(sparsity(half % half)); +print(sparsity(unknown % half)); +print(sparsity(unknown % unknown)); + +print("ewmin"); +print(sparsity(min(empty, empty))); +print(sparsity(min(empty, half))); +print(sparsity(min(full, full))); +print(sparsity(min(unknown, empty))); + +print("ewmax"); +print(sparsity(max(empty, empty))); +print(sparsity(max(empty, half))); +print(sparsity(max(full, full))); +print(sparsity(max(unknown, empty))); + +print("ewand"); +print(sparsity(empty && empty)); +print(sparsity(empty && half)); +print(sparsity(half && half)); +print(sparsity(unknown && empty)); +print(sparsity(unknown && full)); + +print("ewor"); +print(sparsity(empty || empty)); +print(sparsity(empty || half)); +print(sparsity(half || empty)); +print(sparsity(half || half)); +print(sparsity(unknown || empty)); +print(sparsity(unknown || full)); + +print("eweq"); +print(sparsity(empty == empty)); +print(sparsity(full == full)); +print(sparsity(full == empty)); +print(sparsity(empty == full)); +print(sparsity(unknown == empty)); +print(sparsity(unknown == unknown)); + +print("ewneq"); +print(sparsity(empty != empty)); +print(sparsity(full != full)); +print(sparsity(full != empty)); +print(sparsity(empty != full)); +print(sparsity(unknown != empty)); +print(sparsity(unknown != unknown)); + +print("ewlt"); +print(sparsity(empty < empty)); +print(sparsity(full < full)); +print(sparsity(full < empty)); +print(sparsity(unknown < empty)); +print(sparsity(unknown < unknown)); + +print("ewle"); +print(sparsity(empty <= empty)); +print(sparsity(full <= full)); +print(sparsity(full <= empty)); +print(sparsity(unknown <= empty)); +print(sparsity(unknown <= unknown)); + +print("ewgt"); +print(sparsity(empty > empty)); +print(sparsity(full > full)); +print(sparsity(full > empty)); +print(sparsity(unknown > empty)); +print(sparsity(unknown > unknown)); + +print("ewge"); +print(sparsity(empty >= empty)); +print(sparsity(full >= full)); +print(sparsity(full >= empty)); +print(sparsity(unknown >= empty)); +print(sparsity(unknown >= unknown)); \ No newline at end of file diff --git a/test/ir/daphneir/sparsity_2.txt b/test/ir/daphneir/sparsity_2.txt new file mode 100644 index 000000000..1b162e572 --- /dev/null +++ b/test/ir/daphneir/sparsity_2.txt @@ -0,0 +1,134 @@ +0.5 +0.5 +0.5 +0.5 +0.5 +0.5 +Elementwise Unary +sin +-1 +cos +-1 +tan +-1 +asin +0.5 +acos +-1 +atan +0.5 +sinh +0.5 +cosh +1 +tanh +0.5 +round +0 +-1 +floor +0 +-1 +ceil +0 +1 +Elementwise Binary +ewadd +0 +0.5 +0.75 +-1 +-1 +ewsub +0 +0.5 +0.75 +-1 +-1 +ewmul +0 +0 +0.25 +-0 +-1 +-1 +ewdiv +0 +0 +0.5 +-1 +-1 +-1 +ewpow +1 +0.5 +0.75 +-1 +-1 +ewmod +0 +0 +-1 +-1 +-1 +ewmin +0 +-1 +1 +-1 +ewmax +0 +-1 +1 +-1 +ewand +0 +0 +0.25 +-0 +-1 +ewor +0 +0.5 +0.5 +0.75 +-1 +1 +eweq +1 +-1 +0 +0 +-1 +-1 +ewneq +0 +-1 +1 +1 +-1 +-1 +ewlt +0 +-1 +-1 +-1 +-1 +ewle +1 +-1 +-1 +-1 +-1 +ewgt +0 +-1 +-1 +-1 +-1 +ewge +1 +-1 +-1 +-1 +-1 diff --git a/test/ir/daphneir/sparsity_3.daphne b/test/ir/daphneir/sparsity_3.daphne new file mode 100644 index 000000000..fa82261d2 --- /dev/null +++ b/test/ir/daphneir/sparsity_3.daphne @@ -0,0 +1,130 @@ +// -------------------------------------------------------------------- +// Outer Binary +// -------------------------------------------------------------------- + +print("Outer Binary"); + +// we take 3 matrices of different sparsities to take into account all the possible cases for the different ops. +empty_col = rand(2, 1, 1, 1, 0.0, -1); +empty_row = rand(1, 2, 1, 1, 0.0, -1); +half_col = rand(2, 1, 1, 1, 0.5, -1); +half_row = rand(1, 2, 1, 1, 0.5, -1); +full_col = rand(2, 1, 1, 1, 1.0, -1); +full_row = rand(1, 2, 1, 1, 1.0, -1); +unknown_col = sin(rand(2, 1, 1, 1, 0.5, -1)); +unknown_row = sin(rand(1, 2, 1, 1, 0.5, -1)); + +print("outeradd"); +print(sparsity(outerAdd(empty_col, empty_row))); +print(sparsity(outerAdd(empty_col, half_row))); +print(sparsity(outerAdd(half_col, half_row))); +print(sparsity(outerAdd(unknown_col, half_row))); +print(sparsity(outerAdd(unknown_col, unknown_row))); + +print("outersub"); +print(sparsity(outerSub(empty_col, empty_row))); +print(sparsity(outerSub(empty_col, half_row))); +print(sparsity(outerSub(half_col, half_row))); +print(sparsity(outerSub(unknown_col, half_row))); +print(sparsity(outerSub(unknown_col, unknown_row))); + +print("outermul"); +print(sparsity(outerMul(empty_col, empty_row))); +print(sparsity(outerMul(empty_col, half_row))); +print(sparsity(outerMul(half_col, half_row))); +print(sparsity(outerMul(unknown_col, empty_row))); +print(sparsity(outerMul(unknown_col, half_row))); +print(sparsity(outerMul(unknown_col, unknown_row))); + +print("outerdiv"); +// print(sparsity(outerDiv(empty_col, empty_row))); +// print(sparsity(outerDiv(empty_col, half_row))); +// print(sparsity(outerDiv(half_col, half_row))); +print(sparsity(outerDiv(unknown_col, half_row))); +print(sparsity(outerDiv(half_col, unknown_row))); +print(sparsity(outerDiv(unknown_col, unknown_row))); + +print("outerpow"); +print(sparsity(outerPow(empty_col, empty_row))); +print(sparsity(outerPow(empty_col, half_row))); +print(sparsity(outerPow(half_col, half_row))); +print(sparsity(outerPow(unknown_col, half_row))); +print(sparsity(outerPow(unknown_col, unknown_row))); + +print("outermod"); +print(sparsity(outerMod(empty_col, empty_row))); +print(sparsity(outerMod(empty_col, half_row))); +print(sparsity(outerMod(half_col, half_row))); +print(sparsity(outerMod(unknown_col, half_row))); +print(sparsity(outerMod(unknown_col, unknown_row))); + +print("outermin"); +print(sparsity(outerMin(empty_col, empty_row))); +print(sparsity(outerMin(empty_col, half_row))); +print(sparsity(outerMin(full_col, full_row))); +print(sparsity(outerMin(unknown_col, empty_row))); + +print("outermax"); +print(sparsity(outerMax(empty_col, empty_row))); +print(sparsity(outerMax(empty_col, half_row))); +print(sparsity(outerMax(full_col, full_row))); +print(sparsity(outerMax(unknown_col, empty_row))); + +print("outerand"); +print(sparsity(outerAnd(empty_col, empty_row))); +print(sparsity(outerAnd(empty_col, half_row))); +print(sparsity(outerAnd(half_col, half_row))); +print(sparsity(outerAnd(unknown_col, empty_row))); +print(sparsity(outerAnd(unknown_col, full_row))); + +print("outeror"); +print(sparsity(outerOr(empty_col, empty_row))); +print(sparsity(outerOr(empty_col, half_row))); +print(sparsity(outerOr(half_col, empty_row))); +print(sparsity(outerOr(half_col, half_row))); +print(sparsity(outerOr(unknown_col, empty_row))); +print(sparsity(outerOr(unknown_col, full_row))); + +print("outereq"); +print(sparsity(outerEq(empty_col, empty_row))); +print(sparsity(outerEq(full_col, full_row))); +print(sparsity(outerEq(full_col, empty_row))); +print(sparsity(outerEq(empty_col, full_row))); +print(sparsity(outerEq(unknown_col, empty_row))); +print(sparsity(outerEq(unknown_col, unknown_row))); + +print("outerneq"); +print(sparsity(outerNeq(empty_col, empty_row))); +print(sparsity(outerNeq(full_col, full_row))); +print(sparsity(outerNeq(full_col, empty_row))); +print(sparsity(outerNeq(empty_col, full_row))); +print(sparsity(outerNeq(unknown_col, empty_row))); +print(sparsity(outerNeq(unknown_col, unknown_row))); + +print("outerlt"); +print(sparsity(outerLt(empty_col, empty_row))); +print(sparsity(outerLt(full_col, full_row))); +print(sparsity(outerLt(full_col, empty_row))); +print(sparsity(outerLt(unknown_col, empty_row))); +print(sparsity(outerLt(unknown_col, unknown_row))); + +print("outerle"); +print(sparsity(outerLe(empty_col, empty_row))); +print(sparsity(outerLe(full_col, full_row))); +print(sparsity(outerLe(full_col, empty_row))); +print(sparsity(outerLe(unknown_col, empty_row))); +print(sparsity(outerLe(unknown_col, unknown_row))); + +print("outergt"); +print(sparsity(outerGt(empty_col, empty_row))); +print(sparsity(outerGt(full_col, full_row))); +print(sparsity(outerGt(full_col, empty_row))); +print(sparsity(outerGt(unknown_col, empty_row))); +print(sparsity(outerGt(unknown_col, unknown_row))); + +print("outerge"); +print(sparsity(outerGe(empty_col, empty_row))); +print(sparsity(outerGe(full_col, full_row))); +print(sparsity(outerGe(full_col, empty_row))); +print(sparsity(outerGe(unknown_col, empty_row))); +print(sparsity(outerGe(unknown_col, unknown_row))); \ No newline at end of file diff --git a/test/ir/daphneir/sparsity_3.txt b/test/ir/daphneir/sparsity_3.txt new file mode 100644 index 000000000..06bba5388 --- /dev/null +++ b/test/ir/daphneir/sparsity_3.txt @@ -0,0 +1,97 @@ +Outer Binary +outeradd +0 +0.5 +0.75 +-1 +-1 +outersub +0 +0.5 +0.75 +-1 +-1 +outermul +0 +0 +0.25 +-0 +-1 +-1 +outerdiv +-1 +-1 +-1 +outerpow +1 +0.5 +0.75 +-1 +-1 +outermod +0 +0 +-1 +-1 +-1 +outermin +0 +-1 +1 +-1 +outermax +0 +-1 +1 +-1 +outerand +0 +0 +0.25 +-0 +-1 +outeror +0 +0.5 +0.5 +0.75 +-1 +1 +outereq +1 +-1 +0 +0 +-1 +-1 +outerneq +0 +-1 +1 +1 +-1 +-1 +outerlt +0 +-1 +-1 +-1 +-1 +outerle +1 +-1 +-1 +-1 +-1 +outergt +0 +-1 +-1 +-1 +-1 +outerge +1 +-1 +-1 +-1 +-1 diff --git a/test/ir/daphneir/sparsity_4.daphne b/test/ir/daphneir/sparsity_4.daphne new file mode 100644 index 000000000..b4de5f137 --- /dev/null +++ b/test/ir/daphneir/sparsity_4.daphne @@ -0,0 +1,100 @@ +// -------------------------------------------------------------------- +// Full/row/column aggregation +// -------------------------------------------------------------------- + +empty = rand(2, 2, 1, 1, 0.0, -1); +half = rand(2, 2, 1, 1, 0.5, -1); +full = rand(2, 2, 1, 1, 1.0, -1); + +print("Full/row/column aggregation"); +print("sum"); +print(sparsity(sum(empty, 0))); +print(sparsity(sum(half, 0))); +print(sparsity(sum(full, 0))); +print(sparsity(sum(empty, 1))); +print(sparsity(sum(half, 1))); +print(sparsity(sum(full, 1))); + +print("min"); +print(sparsity(aggMin(empty, 0))); +print(sparsity(aggMin(half, 0))); +print(sparsity(aggMin(full, 0))); +print(sparsity(aggMin(empty, 1))); +print(sparsity(aggMin(half, 1))); +print(sparsity(aggMin(full, 1))); + +print("max"); +print(sparsity(aggMax(empty, 0))); +print(sparsity(aggMax(half, 0))); +print(sparsity(aggMax(full, 0))); +print(sparsity(aggMax(empty, 1))); +print(sparsity(aggMax(half, 1))); +print(sparsity(aggMax(full, 1))); + +print("idxmin"); +print(sparsity(idxMin(empty, 0))); +print(sparsity(idxMin(half, 0))); +print(sparsity(idxMin(full, 0))); +print(sparsity(idxMin(empty, 1))); +print(sparsity(idxMin(half, 1))); +print(sparsity(idxMin(full, 1))); + +print("idxmax"); +print(sparsity(idxMax(empty, 0))); +print(sparsity(idxMax(half, 0))); +print(sparsity(idxMax(full, 0))); +print(sparsity(idxMax(empty, 1))); +print(sparsity(idxMax(half, 1))); +print(sparsity(idxMax(full, 1))); + +print("mean"); +print(sparsity(mean(empty, 0))); +print(sparsity(mean(half, 0))); +print(sparsity(mean(full, 0))); +print(sparsity(mean(empty, 1))); +print(sparsity(mean(half, 1))); +print(sparsity(mean(full, 1))); + +print("var"); +print(sparsity(var(empty, 0))); +print(sparsity(var(half, 0))); +print(sparsity(var(full, 0))); +print(sparsity(var(empty, 1))); +print(sparsity(var(half, 1))); +print(sparsity(var(full, 1))); + +print("stddev"); +print(sparsity(stddev(empty, 0))); +print(sparsity(stddev(half, 0))); +print(sparsity(stddev(full, 0))); +print(sparsity(stddev(empty, 1))); +print(sparsity(stddev(half, 1))); +print(sparsity(stddev(full, 1))); + + +// -------------------------------------------------------------------- +// Cumulative aggregation +// -------------------------------------------------------------------- + +print("Cumulative aggregation"); + +print("CumSum"); +print(sparsity(cumSum(empty))); +print(sparsity(cumSum(half))); +print(sparsity(cumSum(full))); + +print("CumProd"); +print(sparsity(cumProd(empty))); +print(sparsity(cumProd(half))); +print(sparsity(cumProd(full))); + +print("CumMin"); +print(sparsity(cumMin(empty))); +print(sparsity(cumMin(half))); +print(sparsity(cumMin(full))); + +print("CumMax"); +print(sparsity(cumMax(empty))); +print(sparsity(cumMax(half))); +print(sparsity(cumMax(full))); + diff --git a/test/ir/daphneir/sparsity_4.txt b/test/ir/daphneir/sparsity_4.txt new file mode 100644 index 000000000..377ed24a0 --- /dev/null +++ b/test/ir/daphneir/sparsity_4.txt @@ -0,0 +1,74 @@ +Full/row/column aggregation +sum +0 +-1 +-1 +0 +-1 +-1 +min +0 +-1 +1 +0 +-1 +1 +max +0 +-1 +1 +0 +-1 +1 +idxmin +0 +-1 +-1 +0 +-1 +-1 +idxmax +0 +-1 +-1 +0 +-1 +-1 +mean +0 +-1 +-1 +0 +-1 +-1 +var +0 +-1 +-1 +0 +-1 +-1 +stddev +0 +-1 +-1 +0 +-1 +-1 +Cumulative aggregation +CumSum +0 +-1 +-1 +CumProd +-1 +-1 +-1 +CumMin +0 +-1 +1 +CumMax +0 +-1 +1 diff --git a/test/ir/daphneir/sparsity_5.daphne b/test/ir/daphneir/sparsity_5.daphne new file mode 100644 index 000000000..49b13e4c3 --- /dev/null +++ b/test/ir/daphneir/sparsity_5.daphne @@ -0,0 +1,59 @@ +// -------------------------------------------------------------------- +// Reorganization +// -------------------------------------------------------------------- + +lhs = rand(2, 2, 0, 2, 0.25, -1); +rhs = rand(2, 2, 0, 2, 0.75, -1); +unknown = sin(rand(2, 2, 0, 2, 0.75, -1)); + +print("Reorganization"); + +print("reshape"); +print(sparsity(reshape(lhs, 1, 4))); +print(sparsity(reshape(rhs, 1, 4))); +print(sparsity(reshape(unknown, 1, 4))); + +print("transpose"); +print(sparsity(transpose(lhs))); +print(sparsity(transpose(rhs))); +print(sparsity(transpose(unknown))); + +print("cbind"); +print(sparsity(cbind(lhs, rhs))); +print(sparsity(cbind(lhs, unknown))); + +print("rbind"); +print(sparsity(rbind(lhs, rhs))); +print(sparsity(rbind(lhs, unknown))); + +print("reverse"); +print(sparsity(reverse(lhs))); +print(sparsity(reverse(rhs))); +print(sparsity(reverse(unknown))); + +// -------------------------------------------------------------------- +// Other Operations +// -------------------------------------------------------------------- + +print("Other Operations"); + +print("diagVector"); +print(sparsity(diagVector(lhs))); +print(sparsity(diagVector(rhs))); +print(sparsity(diagVector(unknown))); + +print("lowerTri"); +print(sparsity(lowerTri(lhs, 0, 0))); +print(sparsity(lowerTri(rhs, 0, 0))); +print(sparsity(lowerTri(unknown, 0, 0))); + +print("upperTri"); +print(sparsity(upperTri(lhs, 0, 0))); +print(sparsity(upperTri(rhs, 0, 0))); +print(sparsity(upperTri(unknown, 0, 0))); + +print("replace"); +print(sparsity(replace(lhs, 0, 1))); +print(sparsity(replace(unknown, 0, 1))); +print(sparsity(replace(lhs, 0, 0))); +print(sparsity(replace(lhs, 1, 0))); \ No newline at end of file diff --git a/test/ir/daphneir/sparsity_5.txt b/test/ir/daphneir/sparsity_5.txt new file mode 100644 index 000000000..77e2fd9bb --- /dev/null +++ b/test/ir/daphneir/sparsity_5.txt @@ -0,0 +1,37 @@ +Reorganization +reshape +0.25 +0.75 +-1 +transpose +0.25 +0.75 +-1 +cbind +0.5 +-1 +rbind +0.5 +-1 +reverse +0.25 +0.75 +-1 +Other Operations +diagVector +0.25 +0.75 +-1 +lowerTri +0.125 +0.375 +-1 +upperTri +0.125 +0.375 +-1 +replace +1 +1 +0.25 +-1