Skip to content

Commit

Permalink
[tmva][sofie] Update in README list of new implemented operators
Browse files Browse the repository at this point in the history
Fix also sone compilation wanrings and remove some commented code
  • Loading branch information
lmoneta committed Feb 5, 2025
1 parent f405ea6 commit 317aa3c
Show file tree
Hide file tree
Showing 6 changed files with 15 additions and 20 deletions.
9 changes: 9 additions & 0 deletions tmva/sofie/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,8 @@ Here is the updated list of supported ONNX operators
- [x] ConstantOfShape
- [x] Conv
- [x] ConvTranspose
- [x] Cos
- [x] Einsum
- [x] Elu
- [x] Equal
- [x] Erf
Expand Down Expand Up @@ -110,9 +112,14 @@ Here is the updated list of supported ONNX operators
- [x] Min
- [x] Mul
- [x] Neg
- [x] Pad
- [x] Pool
- [x] Pow
- [x] Range
- [x] RandomNormal
- [x] RandomNormalLike
- [x] RandomUniform
- [x] RandomUniformLike
- [x] Reciprocal
- [x] ReduceMean
- [x] ReduceProd
Expand All @@ -123,6 +130,7 @@ Here is the updated list of supported ONNX operators
- [x] RNN
- [x] Selu
- [x] Sigmoid
- [x] Sin
- [x] Slice
- [x] Softmax
- [x] Split
Expand All @@ -133,6 +141,7 @@ Here is the updated list of supported ONNX operators
- [x] TopK
- [x] Transpose
- [x] Unsqueeze
- [x] Where

The above operators are supported for tensors of the following types:

Expand Down
9 changes: 1 addition & 8 deletions tmva/sofie/inc/TMVA/ROperator_BasicBinary.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -180,24 +180,17 @@ public:
size_t length = ConvertShapeToLength(fShapeY);
std::string typeName = TensorType<T>::Name();
// Broadcast A if it's uninitialized
// use broadcasting function where we pass an already allocated tensor to minimize memory allocations
if (fShapeA != fShapeY) {
out << SP << "// Broadcasting uninitialized tensor " << fNA << "\n";
//out << SP << "{\n";
out << SP << "TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<" << typeName << ">(tensor_" << fNA << ", " << ConvertShapeToString(fShapeA) << ", " << ConvertShapeToString(fShapeY)
<< ", fTensor_" << fNBroadcastedA << ");\n";
//out << SP << SP << "std::copy(data, data + " << length << ", tensor_" << fNBroadcastedA << ");\n";
//out << SP << SP << "delete[] data;\n";
//out << SP << "}\n";
}
// Broadcast B if it's uninitialized
if (fShapeB != fShapeY) {
out << SP << "// Broadcasting uninitialized tensor " << fNB << "\n";
//out << SP << "{\n";
out << SP << "TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<" << typeName << ">(tensor_" << fNB << ", " << ConvertShapeToString(fShapeB) << ", " << ConvertShapeToString(fShapeY)
<< ", fTensor_" << fNBroadcastedB << ");\n";
//out << SP << SP << "std::copy(data, data + " << length << ", tensor_" << fNBroadcastedB << ");\n";
//out << SP << SP << "delete[] data;\n";
//out << SP << "}\n";
}
const std::string& nameA = fNBroadcastedA.empty()? fNA : fNBroadcastedA;
const std::string& nameB = fNBroadcastedB.empty()? fNB : fNBroadcastedB;
Expand Down
5 changes: 3 additions & 2 deletions tmva/sofie/inc/TMVA/ROperator_Einsum.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include "TMVA/RModel.hxx"

#include <sstream>
#include <cassert>

namespace TMVA{
namespace Experimental{
Expand Down Expand Up @@ -230,8 +231,8 @@ public:
if (fGemmType.empty()) {
int outDims = fShapeY.size();
int inDims = fSumLabels.length();
assert(outDims == fOutputLabels.size());
assert(inDims == fSumDims.size());
assert(outDims == int(fOutputLabels.size()));
assert(inDims == int(fSumDims.size()));
for (int i = 0; i < outDims; i++) {
for (int j = 0; j < i; j++) out << SP;
std::string l {fOutputLabels[i]};
Expand Down
3 changes: 0 additions & 3 deletions tmva/sofie/inc/TMVA/ROperator_Gemm.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -182,9 +182,6 @@ namespace SOFIE{
}
}

// throw std::runtime_error("TMVA SOFIE Gemm Op Input Tensors have not compatible shapes. A " +
// ConvertDynamicShapeToString(fShapeA) + " B " + ConvertDynamicShapeToString(fShapeB) );

fShapeY = DynamicShapeInference({fShapeA, fShapeB})[0];
std::vector<size_t> shapeY;
if (!fIsDynamic) {
Expand Down
7 changes: 1 addition & 6 deletions tmva/sofie/inc/TMVA/ROperator_Split.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -108,20 +108,15 @@ public:
throw std::runtime_error("TMVA SOFIE Operator Split called to Generate without being initialized first");
}

// compute input and output strides
auto input_strides = UTILITY::ComputeStrideFromShape(fInputShape);
std::vector<std::vector<size_t>> output_strides;
for (size_t i = 0; i < fOutputShapes.size(); i++) {
output_strides.emplace_back( UTILITY::ComputeStrideFromShape(fOutputShapes[i]));
}

// generate now the code for split
std::stringstream out;
out << "\n" << SP << "//------ Split\n";
out << SP << "size_t " << OpName << "_axis_offset = 0;\n";
// unroll the loop on split outputs
for (size_t i = 0; i < fNYs.size(); i++) {
int length = ConvertShapeToLength(fOutputShapes[i]);
size_t length = ConvertShapeToLength(fOutputShapes[i]);
auto output_strides = UTILITY::ComputeStrideFromShape(fOutputShapes[i]);

out << SP << "for (int id = 0; id < " << length << " ; id++){\n";
Expand Down
2 changes: 1 addition & 1 deletion tmva/sofie/inc/TMVA/SOFIE_common.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ public:
bool IsConstantTensor() const { return fConstant;}
// query if tensor needs to be written in a weight file. Constant tensors are not written in a file
bool IsWeightTensor() const { return !fConstant && !fIsNotWritable;}

// set not writable initialized tensors - i.e. tensor that must not be written in a file
void SetNotWritable() { fIsNotWritable = true;}

template <class T = void>
Expand Down

0 comments on commit 317aa3c

Please sign in to comment.