-
Notifications
You must be signed in to change notification settings - Fork 958
/
Copy pathTemporalRowConvolution.lua
120 lines (103 loc) · 3.15 KB
/
TemporalRowConvolution.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
local THNN = require "nn.THNN"
local TemporalRowConvolution, parent = torch.class("nn.TemporalRowConvolution", "nn.Module")
function TemporalRowConvolution:__init(inputFrameSize, kW, dW, featFirst)
parent.__init(self)
self.inputFrameSize = inputFrameSize
self.kW = kW
self.dW = dW or 1
self.weight = torch.Tensor(inputFrameSize, 1, kW)
self.bias = torch.Tensor(inputFrameSize)
self.gradWeight = torch.Tensor(inputFrameSize, 1, kW)
self.gradBias = torch.Tensor(inputFrameSize)
-- Set to true for batch x inputFrameSize x nInputFrame
self.featFirst = featFirst and true or false
self:reset()
end
function TemporalRowConvolution:noBias()
self.bias = nil
self.gradBias = nil
return self
end
function TemporalRowConvolution:reset(stdv)
if stdv then
stdv = stdv * math.sqrt(3)
else
stdv = 1 / math.sqrt(self.kW * self.inputFrameSize)
end
self.weight:uniform(-stdv, stdv)
self.bias:uniform(-stdv, stdv)
end
function TemporalRowConvolution:updateOutput(input)
assert(input.THNN, torch.type(input)..".THNN backend not imported")
self.finput = self.finput or input.new()
self.fgradInput = self.fgradInput or input.new()
input.THNN.TemporalRowConvolution_updateOutput(
input:cdata(),
self.output:cdata(),
self.weight:cdata(),
THNN.optionalTensor(self.bias),
self.finput:cdata(),
self.fgradInput:cdata(),
self.kW,
self.dW,
0, -- would be self.padW
self.featFirst
)
return self.output
end
function TemporalRowConvolution:updateGradInput(input, gradOutput)
assert(input.THNN, torch.type(input)..".THNN backend not imported")
if self.gradInput then
input.THNN.TemporalRowConvolution_updateGradInput(
input:cdata(),
gradOutput:cdata(),
self.gradInput:cdata(),
self.weight:cdata(),
self.finput:cdata(),
self.fgradInput:cdata(),
self.kW,
self.dW,
0, -- would be self.padW
self.featFirst
)
return self.gradInput
end
end
function TemporalRowConvolution:accGradParameters(input, gradOutput, scale)
assert(input.THNN, torch.type(input)..".THNN backend not imported")
input.THNN.TemporalRowConvolution_accGradParameters(
input:cdata(),
gradOutput:cdata(),
self.gradWeight:cdata(),
THNN.optionalTensor(self.gradBias),
self.finput:cdata(),
self.fgradInput:cdata(),
self.kW,
self.dW,
0, -- would be self.padW
self.featFirst,
scale or 1)
end
function TemporalRowConvolution:type(type, tensorCache)
if self.finput then self.finput:set() end
if self.fgradInput then self.fgradInput:set() end
return parent.type(self, type, tensorCache)
end
function TemporalRowConvolution:__tostring__()
local s = string.format("%s(%d, %d", torch.type(self), self.inputFrameSize, self.kW)
if self.dW ~= 1 then
s = s .. string.format(", %d", self.dW)
end
if self.padW and self.padW ~= 0 then -- currently padding is not supported
s = s .. ", " .. self.padW
end
if self.bias then
return s .. ")"
else
return s .. ") without bias"
end
end
function TemporalRowConvolution:clearState()
nn.utils.clear(self, "finput", "fgradInput", "_input", "_gradOutput")
return parent.clearState(self)
end