From a41fc8b039b7adaba6040cbfb5aa4e0f35869fc2 Mon Sep 17 00:00:00 2001 From: Vincent Michalski Date: Thu, 21 Jan 2016 12:57:25 -0500 Subject: [PATCH] first try --- .gitignore | 2 +- theano/sandbox/gpuarray/tests/test_abstractconv.py | 3 +++ theano/tensor/nnet/opt.py | 14 +++++++++++++- 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index e2b07a9552a..6d6397f166b 100644 --- a/.gitignore +++ b/.gitignore @@ -36,4 +36,4 @@ distribute-*.tar.gz Theano.suo .ipynb_checkpoints .pydevproject - +.ropeproject diff --git a/theano/sandbox/gpuarray/tests/test_abstractconv.py b/theano/sandbox/gpuarray/tests/test_abstractconv.py index 51fe16ed8a8..9cfcc2c8c72 100644 --- a/theano/sandbox/gpuarray/tests/test_abstractconv.py +++ b/theano/sandbox/gpuarray/tests/test_abstractconv.py @@ -81,6 +81,7 @@ def run_fwd(self, inputs_shape, filters_shape, ref=dnn_conv, filter_flip=filter_flip, input_shape=imshp, filter_shape=kshp) + self.assertTrue(hasattr(c.tag, 'trace')) f_ref = theano.function([], c_ref, mode=mode) f = theano.function([], c, mode) @@ -124,6 +125,7 @@ def run_gradweight(self, inputs_shape, filters_shape, output_shape, filter_flip=filter_flip, subsample=subsample, imshp=imshp, kshp=kshp) + self.assertTrue(hasattr(c.tag, 'trace')) c = c(inputs, output, filters_shape[-2:]) c_ref = ref(inputs, output, filters_shape, @@ -176,6 +178,7 @@ def run_gradinput(self, inputs_shape, filters_shape, output_shape, ref=dnn_gradi subsample=subsample, filter_flip=filter_flip, imshp=imshp, kshp=kshp) + self.assertTrue(hasattr(c.tag, 'trace')) c = c(filters, output, inputs_shape[-2:]) c_ref = ref(filters, output, inputs_shape, border_mode=border_mode, subsample=subsample, diff --git a/theano/tensor/nnet/opt.py b/theano/tensor/nnet/opt.py index d73c49e70c1..b2f75d73fe0 100644 --- a/theano/tensor/nnet/opt.py +++ b/theano/tensor/nnet/opt.py @@ -17,7 +17,8 @@ AbstractConv2d_gradWeights, AbstractConv2d_gradInputs) from theano.tensor.nnet.abstract_conv import get_conv_output_shape -from theano.tensor.opt import register_specialize_device +from theano.tensor.opt import (copy_stack_trace, + register_specialize_device) from theano.tensor import TensorType # Cpu implementation @@ -75,6 +76,7 @@ def local_abstractconv_gemm(node): kern = kern[:, :, ::-1, ::-1] rval = CorrMM(border_mode=node.op.border_mode, subsample=node.op.subsample)(img, kern) + copy_stack_trace(node.outputs[0], rval) return [rval] @@ -96,6 +98,7 @@ def local_abstractconv_gradweight_gemm(node): if node.op.filter_flip: rval = rval[:, :, ::-1, ::-1] rval = theano.tensor.patternbroadcast(rval, node.outputs[0].broadcastable) + copy_stack_trace(node.outputs[0], rval) return [rval] @@ -117,6 +120,7 @@ def local_abstractconv_gradinputs_gemm(node): rval = CorrMM_gradInputs(border_mode=node.op.border_mode, subsample=node.op.subsample)(kern, topgrad, shape) + copy_stack_trace(node.outputs[0], rval) return [rval] @@ -141,6 +145,8 @@ def local_conv2d_cpu(node): node.op.imshp, node.op.kshp, border_mode=node.op.border_mode, subsample=node.op.subsample) + + copy_stack_trace(node.outputs[0], rval) return [rval] @@ -181,6 +187,7 @@ def local_conv2d_gradweight_cpu(node): rval = rval[:, :, ::-1, ::-1] rval = theano.tensor.patternbroadcast(rval, node.outputs[0].broadcastable) + copy_stack_trace(node.outputs[0], rval) return [rval] dx, dy = node.op.subsample @@ -251,6 +258,8 @@ def local_conv2d_gradweight_cpu(node): res = res[:, :, ::-1, ::-1] res = theano.tensor.patternbroadcast(res, node.outputs[0].broadcastable) + + copy_stack_trace(node.outputs[0], res) return [res] @@ -284,6 +293,8 @@ def local_conv2d_gradinputs_cpu(node): rval = rval.dimshuffle(0, 4, 1, 2) rval = theano.tensor.patternbroadcast(rval, node.outputs[0].broadcastable) + + copy_stack_trace(node.outputs[0], rval) return [rval] # Conv2d Implementation @@ -333,6 +344,7 @@ def local_conv2d_gradinputs_cpu(node): direction_hint='bprop inputs') din = din(topgrad, filters) din = theano.tensor.patternbroadcast(din, node.outputs[0].broadcastable) + copy_stack_trace(node.outputs[0], din) return [din]