From 0aeb67b544e7b385647b17342fd2eccf3cb9a0e2 Mon Sep 17 00:00:00 2001 From: Nicholas Leonard Date: Mon, 10 Jul 2017 17:20:30 -0400 Subject: [PATCH] parameters() uses torch.type instead of type --- Bilinear.lua | 2 +- Container.lua | 4 ++-- DontCast.lua | 2 +- FlattenTable.lua | 16 ++++++++-------- Identity.lua | 2 +- IndexLinear.lua | 4 ++-- SparseLinear.lua | 12 ++++++------ SpatialFullConvolution.lua | 10 +++++----- VolumetricFullConvolution.lua | 10 +++++----- hessian.lua | 4 ++-- utils.lua | 6 +++--- 11 files changed, 36 insertions(+), 36 deletions(-) diff --git a/Bilinear.lua b/Bilinear.lua index 9350b03ec..3c0f6db3d 100644 --- a/Bilinear.lua +++ b/Bilinear.lua @@ -2,7 +2,7 @@ local Bilinear, parent = torch.class('nn.Bilinear', 'nn.Module') local function isint(x) return type(x) == 'number' and x == math.floor(x) end function Bilinear:__assertInput(input) - assert(input and type(input) == 'table' and #input == 2, + assert(input and torch.type(input) == 'table' and #input == 2, 'input should be a table containing two data Tensors') assert(input[1]:nDimension() == 2 and input[2]:nDimension() == 2, 'input Tensors should be two-dimensional') diff --git a/Container.lua b/Container.lua index 7e264bab9..67fac9f13 100644 --- a/Container.lua +++ b/Container.lua @@ -105,7 +105,7 @@ end function Container:parameters() local function tinsert(to, from) - if type(from) == 'table' then + if torch.type(from) == 'table' then for i=1,#from do tinsert(to,from[i]) end @@ -131,7 +131,7 @@ function Container:clearState() if self[f] then if torch.isTensor(self[f]) then self[f] = self[f].new() - elseif type(self[f]) == 'table' then + elseif torch.type(self[f]) == 'table' then self[f] = {} else self[f] = nil diff --git a/DontCast.lua b/DontCast.lua index b89f5436b..eaa39b61d 100644 --- a/DontCast.lua +++ b/DontCast.lua @@ -19,7 +19,7 @@ local function recursiveTypeCopy(dst, src, type_str) end local function tableTensorType(src) - if type(src) == 'table' then + if type(src) == 'table' then -- Note: don't use torch.type here local type_str, found for k,v in pairs(src) do type_str, found = tableTensorType(v) diff --git a/FlattenTable.lua b/FlattenTable.lua index 1c182557c..3fe2fd5e5 100644 --- a/FlattenTable.lua +++ b/FlattenTable.lua @@ -12,7 +12,7 @@ end local function flatten(output, input) local input_map -- has the same structure as input, but stores the -- indices to the corresponding output - if type(input) == 'table' then + if torch.type(input) == 'table' then input_map = {} -- forward DFS order for i = 1, #input do @@ -30,8 +30,8 @@ local function checkMapping(output, input, input_map) if input_map == nil or output == nil or input == nil then return false end - if type(input) == 'table' then - if type(input_map) ~= 'table' then + if torch.type(input) == 'table' then + if torch.type(input_map) ~= 'table' then return false end if #input ~= #input_map then @@ -46,7 +46,7 @@ local function checkMapping(output, input, input_map) end return true else - if type(input_map) ~= 'number' then + if torch.type(input_map) ~= 'number' then return false end return output[input_map] == input @@ -56,7 +56,7 @@ end -- During BPROP we have to build a gradInput with the same shape as the -- input. This is a recursive function to build up a gradInput local function inverseFlatten(gradOutput, input_map) - if type(input_map) == 'table' then + if torch.type(input_map) == 'table' then local gradInput = {} for i = 1, #input_map do gradInput[#gradInput + 1] = inverseFlatten(gradOutput, input_map[i]) @@ -68,7 +68,7 @@ local function inverseFlatten(gradOutput, input_map) end function FlattenTable:updateOutput(input) - assert(type(input) == 'table', 'input must be a table') + assert(torch.type(input) == 'table', 'input must be a table') -- to avoid updating rebuilding the flattened table every updateOutput call -- we will do a DFS pass over the existing output table and the inputs to -- see if it needs to be rebuilt. @@ -80,8 +80,8 @@ function FlattenTable:updateOutput(input) end function FlattenTable:updateGradInput(input, gradOutput) - assert(type(input) == 'table', 'input must be a table') - assert(type(input) == 'table', 'gradOutput must be a table') + assert(torch.type(input) == 'table', 'input must be a table') + assert(torch.type(input) == 'table', 'gradOutput must be a table') -- If the input changes between the updateOutput and updateGradInput call, -- then we may have to rebuild the input_map! However, let's assume that -- the input_map is valid and that forward has already been called. diff --git a/Identity.lua b/Identity.lua index 5e6ccb624..647aee399 100644 --- a/Identity.lua +++ b/Identity.lua @@ -17,7 +17,7 @@ function Identity:clearState() if self[f] then if torch.isTensor(self[f]) then self[f] = self[f].new() - elseif type(self[f]) == 'table' then + elseif torch.type(self[f]) == 'table' then self[f] = {} else self[f] = nil diff --git a/IndexLinear.lua b/IndexLinear.lua index 2ddbcbdb9..6b6b200d7 100644 --- a/IndexLinear.lua +++ b/IndexLinear.lua @@ -73,7 +73,7 @@ function IndexLinear:reset(stdv) end function IndexLinear:reshapeInput(input) - assert(type(input) == 'table') + assert(torch.type(input) == 'table') local ninputs = 0 for _, v in ipairs(input) do @@ -108,7 +108,7 @@ function IndexLinear:reshapeInput(input) -- { torch.LongTensor(size1), torch.LongTensor(size2), ..., torch.LongTensor(sizeN) }, -- batch of keys -- { torch.Tensor(size1), torch.Tensor(size2), ..., torch.Tensor(sizeN) }, -- batch of values, -- } - if type(keys) == 'table' and type(values) == 'table' then + if torch.type(keys) == 'table' and torch.type(values) == 'table' then lkeys, lvalues = keys, values self.isFlat = false self.noBatch = false diff --git a/SparseLinear.lua b/SparseLinear.lua index 7c3edad19..4888fc19e 100644 --- a/SparseLinear.lua +++ b/SparseLinear.lua @@ -15,7 +15,7 @@ function SparseLinear:__init(inputSize, outputSize, doGradInput) self.gradWeight = torch.Tensor(outputSize, inputSize):zero() self.gradBias = torch.Tensor(outputSize):zero() - assert(type(self.doGradInput) == type(true)) + assert(type(self.doGradInput) == 'boolean') self.lastInput = nil self.sparseUpdate = NO_LAST_INPUT @@ -39,7 +39,7 @@ function SparseLinear:reset(stdv) end function SparseLinear:reshapeInput(input) - if type(input) == 'table' then + if torch.type(input) == 'table' then return input, true, false else if input:dim() == 2 then @@ -57,7 +57,7 @@ function SparseLinear:updateOutput(input) local input, batchMode, legacyMode = self:reshapeInput(input) self.legacyMode = legacyMode - if legacyMode then + if legacyMode then input.THNN.SparseLinear_legacyUpdateOutput( input:cdata(), self.output:cdata(), @@ -149,8 +149,8 @@ function SparseLinear:accGradParameters(input, gradOutput, scale) end function SparseLinear:updateGradInput(input, gradOutput) - if self.legacyMode then - if type(self.gradInput) ~= type(gradOutput) then self.gradInput = gradOutput.new() end + if self.legacyMode then + if torch.type(self.gradInput) ~= torch.type(gradOutput) then self.gradInput = gradOutput.new() end self.gradInput:resizeAs(input) else self.gradInput = {} @@ -185,7 +185,7 @@ function SparseLinear:updateGradInput(input, gradOutput) return self.gradInput end --- These functions do sparse updates / zeros. However, if we accumulated +-- These functions do sparse updates / zeros. However, if we accumulated -- gradients multiple times, we can't depend on the last input to do sparse -- updates. function SparseLinear:updateParameters(learningRate) diff --git a/SpatialFullConvolution.lua b/SpatialFullConvolution.lua index e6019bc18..d28579b9a 100644 --- a/SpatialFullConvolution.lua +++ b/SpatialFullConvolution.lua @@ -72,7 +72,7 @@ function SpatialFullConvolution:updateOutput(input) -- The input can be a table where the second element indicates the target -- output size, in which case the adj factors are computed automatically - if type(inputTensor) == 'table' then + if torch.type(inputTensor) == 'table' then inputTensor = input[1] local targetTensor = input[2] local tDims = targetTensor:dim() @@ -113,7 +113,7 @@ function SpatialFullConvolution:updateGradInput(input, gradOutput) -- The input can be a table where the second element indicates the target -- output size, in which case the adj factors are computed automatically - if type(inputTensor) == 'table' then + if torch.type(inputTensor) == 'table' then inputTensor = input[1] local targetTensor = input[2] local tDims = targetTensor:dim() @@ -122,7 +122,7 @@ function SpatialFullConvolution:updateGradInput(input, gradOutput) adjW = calculateAdj(tW, self.kW, self.padW, self.dW) adjH = calculateAdj(tH, self.kH, self.padH, self.dH) -- Momentarily extract the gradInput tensor - if type(self.gradInput) == 'table' then + if torch.type(self.gradInput) == 'table' then self.gradInput = self.gradInput[1] or inputTensor.new() end end @@ -139,7 +139,7 @@ function SpatialFullConvolution:updateGradInput(input, gradOutput) adjW, adjH ) - if type(input) == 'table' then + if torch.type(input) == 'table' then -- Create a zero tensor to be expanded and used as gradInput[2]. self.zeroScalar = self.zeroScalar or input[2].new(1):zero() self.ones:resize(input[2]:dim()):fill(1) @@ -162,7 +162,7 @@ function SpatialFullConvolution:accGradParameters(input, gradOutput, scale) -- The input can be a table where the second element indicates the target -- output size, in which case the adj factors are computed automatically - if type(inputTensor) == 'table' then + if torch.type(inputTensor) == 'table' then inputTensor = input[1] local targetTensor = input[2] local tDims = targetTensor:dim() diff --git a/VolumetricFullConvolution.lua b/VolumetricFullConvolution.lua index 0ce23401e..60843e73d 100644 --- a/VolumetricFullConvolution.lua +++ b/VolumetricFullConvolution.lua @@ -93,7 +93,7 @@ function VolumetricFullConvolution:updateOutput(input) -- The input can be a table where the second element indicates the target -- output size, in which case the adj factors are computed automatically - if type(inputTensor) == 'table' then + if torch.type(inputTensor) == 'table' then inputTensor = input[1] local targetTensor = input[2] local tDims = targetTensor:dim() @@ -128,7 +128,7 @@ function VolumetricFullConvolution:updateGradInput(input, gradOutput) -- The input can be a table where the second element indicates the target -- output size, in which case the adj factors are computed automatically - if type(inputTensor) == 'table' then + if torch.type(inputTensor) == 'table' then inputTensor = input[1] local targetTensor = input[2] local tDims = targetTensor:dim() @@ -139,7 +139,7 @@ function VolumetricFullConvolution:updateGradInput(input, gradOutput) adjW = calculateAdj(tW, self.kW, self.padW, self.dW) adjH = calculateAdj(tH, self.kH, self.padH, self.dH) -- Momentarily extract the gradInput tensor - if type(self.gradInput) == 'table' then + if torch.type(self.gradInput) == 'table' then self.gradInput = self.gradInput[1] end end @@ -156,7 +156,7 @@ function VolumetricFullConvolution:updateGradInput(input, gradOutput) adjT, adjW, adjH ) - if type(input) == 'table' then + if torch.type(input) == 'table' then -- Create a zero tensor to be expanded and used as gradInput[2]. self.zeroScalar = self.zeroScalar or input[2].new(1):zero() self.ones:resize(input[2]:dim()):fill(1) @@ -177,7 +177,7 @@ function VolumetricFullConvolution:accGradParameters(input, gradOutput, scale) -- The input can be a table where the second element indicates the target -- output size, in which case the adj factors are computed automatically - if type(inputTensor) == 'table' then + if torch.type(inputTensor) == 'table' then inputTensor = input[1] local targetTensor = input[2] local tDims = targetTensor:dim() diff --git a/hessian.lua b/hessian.lua index 33ef2b0e3..7518e1a1f 100644 --- a/hessian.lua +++ b/hessian.lua @@ -216,7 +216,7 @@ function nn.hessian.enable() function nn.SpatialConvolution.initDiagHessianParameters(self) initDiagHessianParameters(self,{'gradWeight','gradBias'},{'diagHessianWeight','diagHessianBias'}) end - + ---------------------------------------------------------------------- -- SpatialConvolutionLocal ---------------------------------------------------------------------- @@ -361,7 +361,7 @@ function nn.hessian.enable() function nn.Sequential.parameters(self) local function tinsert(to, from) - if type(from) == 'table' then + if torch.type(from) == 'table' then for i=1,#from do tinsert(to,from[i]) end diff --git a/utils.lua b/utils.lua index 17b52afb3..09ce1b9db 100644 --- a/utils.lua +++ b/utils.lua @@ -158,7 +158,7 @@ function nn.utils.addSingletonDimension(...) else view, t, dim = select(1,...) assert(torch.isTensor(view), - "output tensor expected, got " .. type(view)) + "output tensor expected, got " .. torch.type(view)) end assert(torch.isTensor(t), "input tensor expected") @@ -202,14 +202,14 @@ end -- nn.utils.clearState(self, '_buffer', '_buffer2') function nn.utils.clear(self, ...) local arg = {...} - if #arg > 0 and type(arg[1]) == 'table' then + if #arg > 0 and torch.type(arg[1]) == 'table' then arg = arg[1] end local function clear(f) if self[f] then if torch.isTensor(self[f]) then self[f]:set() - elseif type(self[f]) == 'table' then + elseif torch.type(self[f]) == 'table' then self[f] = {} else self[f] = nil