Skip to content

Commit

Permalink
Use xt::zeros instead of pytensor constructor for 1d-tensors.
Browse files Browse the repository at this point in the history
To avoid issue where pytensor is initialized with shape (1,) instead of {size,}.
Fixes #125.
  • Loading branch information
constantinpape committed Apr 25, 2019
1 parent 0aaf7d9 commit 5178c66
Show file tree
Hide file tree
Showing 16 changed files with 32 additions and 32 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ namespace agglo{
const auto & graph = self->graph();
typedef typename xt::pytensor<uint64_t, 1>::shape_type ShapeType;
ShapeType shape = {graph.edgeIdUpperBound() + 1};
xt::pytensor<uint64_t, 1> mtimes(shape);
xt::pytensor<uint64_t, 1> mtimes = xt::zeros<uint64_t>(shape);
{
py::gil_scoped_release allowThreads;
self->runAndGetMergeTimes(mtimes, verbose);
Expand All @@ -63,8 +63,8 @@ namespace agglo{
typedef typename xt::pytensor<uint64_t, 1>::shape_type ShapeType;
ShapeType shape = {graph.edgeIdUpperBound() + 1};

xt::pytensor<double, 1> dheight(shape);
xt::pytensor<uint64_t, 1> mtimes (shape);
xt::pytensor<double, 1> dheight = xt::zeros<double>(shape);
xt::pytensor<uint64_t, 1> mtimes = xt::zeros<uint64_t>(shape);
{
py::gil_scoped_release allowThreads;
self->runAndGetMergeTimesAndDendrogramHeight(mtimes, dheight,verbose);
Expand All @@ -81,7 +81,7 @@ namespace agglo{
const auto & graph = self->graph();
typedef typename xt::pytensor<double, 1>::shape_type ShapeType;
ShapeType shape = {graph.edgeIdUpperBound() + 1};
xt::pytensor<double, 1> dheight(shape);
xt::pytensor<double, 1> dheight = xt::zeros<double>(shape);
{
py::gil_scoped_release allowThreads;
self->runAndGetDendrogramHeight(dheight,verbose);
Expand All @@ -99,7 +99,7 @@ namespace agglo{
const auto & graph = self->graph();
typedef typename xt::pytensor<double, 1>::shape_type ShapeType;
ShapeType shape = {graph.edgeIdUpperBound() + 1};
xt::pytensor<double, 1> transformed(shape);
xt::pytensor<double, 1> transformed = xt::zeros<double>(shape);
{
py::gil_scoped_release allowThreads;
self->ucmTransform(edgeValues, transformed);
Expand Down
2 changes: 1 addition & 1 deletion src/python/lib/cgp/bounds.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ namespace cgp{

typedef typename xt::pytensor<uint32_t, 1>::shape_type ShapeType;
ShapeType shape = {int64_t(cell1Labels.size())};
xt::pytensor<uint32_t, 1> ret(shape);
xt::pytensor<uint32_t, 1> ret = xt::zeros<uint32_t>(shape);
for(auto i=0; i<ret.size(); ++i){
ret[i] = cell1Labels[i];
}
Expand Down
4 changes: 2 additions & 2 deletions src/python/lib/distributed/distributed_graph.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -86,10 +86,10 @@ namespace distributed {
//
typedef typename xt::pytensor<EdgeIndexType, 1>::shape_type ShapeType;
ShapeType innerShape = {static_cast<int64_t>(innerEdgesVec.size())};
xt::pytensor<EdgeIndexType, 1> innerEdges(innerShape);
xt::pytensor<EdgeIndexType, 1> innerEdges = xt::zeros<EdgeIndexType>(innerShape);

ShapeType outerShape = {static_cast<int64_t>(outerEdgesVec.size())};
xt::pytensor<EdgeIndexType, 1> outerEdges(outerShape);
xt::pytensor<EdgeIndexType, 1> outerEdges = xt::zeros<EdgeIndexType>(outerShape);

{
py::gil_scoped_release allowThreads;
Expand Down
4 changes: 2 additions & 2 deletions src/python/lib/distributed/graph_extraction.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -152,10 +152,10 @@ namespace distributed {
//
typedef typename xt::pytensor<EdgeIndexType, 1>::shape_type ShapeType;
ShapeType innerShape = {static_cast<int64_t>(innerEdgesVec.size())};
xt::pytensor<EdgeIndexType, 1> innerEdges(innerShape);
xt::pytensor<EdgeIndexType, 1> innerEdges = xt::zeros<EdgeIndexType>(innerShape);

ShapeType outerShape = {static_cast<int64_t>(outerEdgesVec.size())};
xt::pytensor<EdgeIndexType, 1> outerEdges(outerShape);
xt::pytensor<EdgeIndexType, 1> outerEdges = xt::zeros<EdgeIndexType>(outerShape);

typedef typename xt::pytensor<NodeType, 2>::shape_type UvShapeType;
UvShapeType uvShape = {static_cast<int64_t>(uvIdsVec.size()), 2L};
Expand Down
2 changes: 1 addition & 1 deletion src/python/lib/filters/gaussian_curvature.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ namespace filters{
){
typedef typename xt::pytensor<float, 1>::shape_type ShapeType;
ShapeType shape = {coords.shape()[0]};
xt::pytensor<float, 1> out(shape);
xt::pytensor<float, 1> out = xt::zeros<float>(shape);
self(coords, out, loop);
return out;
})
Expand Down
2 changes: 1 addition & 1 deletion src/python/lib/graph/connected_components.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ namespace graph{
){
const auto & g = self.graph();
const std::size_t size = g.nodeIdUpperBound()+1;
xt::pytensor<uint64_t, 1> ccLabels({size});
xt::pytensor<uint64_t, 1> ccLabels = xt::zeros<uint64_t>({size});
for(const auto node : g.nodes()){
ccLabels[node] = self.componentLabel(node);
}
Expand Down
4 changes: 2 additions & 2 deletions src/python/lib/graph/opt/common/export_solver_base.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ namespace common{
self->optimize(nodeLabels, nullptr);
}
ShapeType shape = {graph.nodeIdUpperBound() + 1};
xt::pytensor<uint64_t, 1> array(shape);
xt::pytensor<uint64_t, 1> array = xt::zeros<uint64_t>(shape);
for(auto node : graph.nodes()){
array(node) = nodeLabels[node];
}
Expand All @@ -80,7 +80,7 @@ namespace common{
self->optimize(nodeLabels, visitor);
}
ShapeType shape = {graph.nodeIdUpperBound() + 1};
xt::pytensor<uint64_t, 1> array(shape);
xt::pytensor<uint64_t, 1> array = xt::zeros<uint64_t>(shape);
for(auto node : graph.nodes()){
array(node) = nodeLabels[node];
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ namespace lifted_multicut{
self->optimize(nodeLabels, nullptr);
}
ShapeType shape = {graph.nodeIdUpperBound() + 1};
xt::pytensor<uint64_t, 1> array(shape);
xt::pytensor<uint64_t, 1> array = xt::zeros<uint64_t>(shape);
for(auto node : graph.nodes()){
array(node) = nodeLabels[node];
}
Expand All @@ -76,7 +76,7 @@ namespace lifted_multicut{
self->optimize(nodeLabels, visitor);
}
ShapeType shape = {graph.nodeIdUpperBound() + 1};
xt::pytensor<uint64_t, 1> array(shape);
xt::pytensor<uint64_t, 1> array = xt::zeros<uint64_t>(shape);
for(auto node : graph.nodes()){
array(node) = nodeLabels[node];
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ namespace lifted_multicut{

typedef typename xt::pytensor<uint64_t, 1>::shape_type ShapeType;
ShapeType shape = {int64_t(dist.size())};
xt::pytensor<uint64_t, 1> array(shape);
xt::pytensor<uint64_t, 1> array = xt::zeros<uint64_t>(shape);

for(std::size_t i=0; i<dist.size(); ++i){
array(i) = dist[i];
Expand Down
4 changes: 2 additions & 2 deletions src/python/lib/graph/opt/minstcut/minstcut_base.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ namespace minstcut{
self->optimize(nodeLabels, nullptr);
}
ShapeType shape = {graph.nodeIdUpperBound() + 1};
xt::pytensor<uint64_t, 1> array(shape);
xt::pytensor<uint64_t, 1> array = xt::zeros<uint64_t>(shape);
for(auto node : graph.nodes()){
array(node) = nodeLabels[node];
}
Expand All @@ -74,7 +74,7 @@ namespace minstcut{
self->optimize(nodeLabels, visitor);
}
ShapeType shape = {graph.nodeIdUpperBound() + 1};
xt::pytensor<uint64_t, 1> array(shape);
xt::pytensor<uint64_t, 1> array = xt::zeros<uint64_t>(shape);
for(auto node : graph.nodes()){
array(node) = nodeLabels[node];
}
Expand Down
4 changes: 2 additions & 2 deletions src/python/lib/graph/opt/multicut/multicut_base.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ namespace multicut{
const auto & graph = self->objective().graph();
typename McBase::NodeLabelsType nodeLabels(graph,0);
ShapeType shape = {graph.nodeIdUpperBound() + 1};
xt::pytensor<uint64_t, 1> array(shape);
xt::pytensor<uint64_t, 1> array = xt::zeros<uint64_t>(shape);
{
py::gil_scoped_release allowThreads;
self->optimize(nodeLabels, nullptr);
Expand All @@ -74,7 +74,7 @@ namespace multicut{
const auto & graph = self->objective().graph();
typename McBase::NodeLabelsType nodeLabels(graph,0);
ShapeType shape = {graph.nodeIdUpperBound() + 1};
xt::pytensor<uint64_t, 1> array(shape);
xt::pytensor<uint64_t, 1> array = xt::zeros<uint64_t>(shape);
{
py::gil_scoped_release allowThreads;
self->optimize(nodeLabels, visitor);
Expand Down
6 changes: 3 additions & 3 deletions src/python/lib/graph/opt/multicut/multicut_visitor_base.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ namespace multicut{
const auto vec = visitor.iterations();
typedef typename xt::pytensor<uint32_t, 1>::shape_type ShapeType;
ShapeType shape = {static_cast<int64_t>(vec.size())};
xt::pytensor<uint32_t, 1> ret(shape);
xt::pytensor<uint32_t, 1> ret = xt::zeros<uint32_t>(shape);
for(auto i=0; i<vec.size(); ++i)
ret[i] = vec[i];
return ret;
Expand All @@ -94,7 +94,7 @@ namespace multicut{
const auto vec = visitor.energies();
typedef typename xt::pytensor<double, 1>::shape_type ShapeType;
ShapeType shape = {static_cast<int64_t>(vec.size())};
xt::pytensor<double, 1> ret(shape);
xt::pytensor<double, 1> ret = xt::zeros<double>(shape);
for(auto i=0; i<vec.size(); ++i)
ret[i] = vec[i];
return ret;
Expand All @@ -103,7 +103,7 @@ namespace multicut{
const auto vec = visitor.runtimes();
typedef typename xt::pytensor<double, 1>::shape_type ShapeType;
ShapeType shape = {static_cast<int64_t>(vec.size())};
xt::pytensor<double, 1> ret(shape);
xt::pytensor<double, 1> ret = xt::zeros<double>(shape);
for(auto i=0; i<vec.size(); ++i)
ret[i] = vec[i];
return ret;
Expand Down
2 changes: 1 addition & 1 deletion src/python/lib/graph/opt/multicut/perturb_and_map.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ namespace multicut{

typedef xt::pytensor<double, 1>::shape_type ShapeType;
ShapeType shape = {static_cast<int64_t>(nEdges)};
xt::pytensor<double, 1> rarray(shape);
xt::pytensor<double, 1> rarray = xt::zeros<double>(shape);
for(auto edge: graph.edges())
rarray(edge) = edgeState[edge];
return rarray;
Expand Down
6 changes: 3 additions & 3 deletions src/python/lib/graph/undirected_grid_graph.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ namespace graph{
){
typedef typename xt::pytensor<float, 1>::shape_type ShapeType;
ShapeType shape = {g.edgeIdUpperBound() + 1};
xt::pytensor<float, 1> out(shape);
xt::pytensor<float, 1> out = xt::zeros<float>(shape);
g.affinitiesToEdgeMap(affinities, out);
return out;
},
Expand Down Expand Up @@ -182,8 +182,8 @@ namespace graph{
typedef typename xt::pytensor<uint64_t, 2>::shape_type UvShape;
UvShape uvShape = {nLiftedTot, 2};

xt::pytensor<float, 1> localFeatures(localShape);
xt::pytensor<float, 1> liftedFeatures(liftedShape);
xt::pytensor<float, 1> localFeatures = xt::zeros<float>(localShape);
xt::pytensor<float, 1> liftedFeatures = xt::zeros<float>(liftedShape);
xt::pytensor<uint64_t, 2> liftedUvs(uvShape);
int64_t nLifted;
{
Expand Down
6 changes: 3 additions & 3 deletions src/python/lib/graph/undirected_list_graph.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ namespace graph{
.def("serialize",
[](const GraphType & g) {
typename xt::pytensor<uint64_t, 1>::shape_type shape = {static_cast<int64_t>(g.serializationSize())};
xt::pytensor<uint64_t, 1> out(shape);
xt::pytensor<uint64_t, 1> out = xt::zeros<uint64_t>(shape);
auto ptr = &out(0);
g.serialize(ptr);
return out;
Expand Down Expand Up @@ -129,8 +129,8 @@ namespace graph{

typedef typename xt::pytensor<uint32_t, 1>::shape_type TensorShapeType;
TensorShapeType tensorShape = {static_cast<int64_t>(g.numberOfEdges())};
xt::pytensor<uint32_t, 1> offsetsIndex(tensorShape);
xt::pytensor<float, 1> aff({tensorShape});
xt::pytensor<uint32_t, 1> offsetsIndex = xt::zeros<uint32_t>(tensorShape);
xt::pytensor<float, 1> aff = xt::zeros<float>(tensorShape);

u=0;
for(int p0=0; p0<shape[0]; ++p0)
Expand Down
2 changes: 1 addition & 1 deletion src/python/lib/tools/edge_mapping.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ namespace tools{
const int numberOfThreads) {
typedef typename xt::pytensor<EdgeType, 1>::shape_type ShapeType;
ShapeType shape = {static_cast<int64_t>(self.numberOfEdges())};
xt::pytensor<EdgeType, 1> edgeMapping(shape);
xt::pytensor<EdgeType, 1> edgeMapping = xt::zeros<EdgeType>(shape);

{
py::gil_scoped_release allowThreads;
Expand Down

0 comments on commit 5178c66

Please sign in to comment.