-
-
Notifications
You must be signed in to change notification settings - Fork 16
/
Copy pathnnlib.jl
108 lines (89 loc) · 2.83 KB
/
nnlib.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
using NNlib
using NNlib: expand
using NNlib: PoolDims
import NNlib: conv, depthwiseconv
function NNlib.conv(
x::Tensor{xT, N}, w::Tensor, b::Tensor{T}, cdims::DenseConvDims{M, K, S, P, D}
) where {T, N, xT, M, K, S, P, D}
stride = NNlib.stride(cdims)
padding = NNlib.padding(cdims)
dilation = NNlib.dilation(cdims)
op = conv2d(
x,
w,
b;
stride = collect(stride),
padding = [padding[1]; padding[3]],
dilation = collect(dilation),
)
return op
end
function NNlib.conv(x::Tensor, w::Tensor, cdims::DenseConvDims)
b = zeros(Tensor{Float32}, size(w)[end]; dev = on(w))
op = conv(x, w, b, cdims)
return op
end
function NNlib.depthwiseconv(
x::Tensor{xT, N}, w::Tensor, b::Tensor{T}; stride = 1, pad = 0, dilation = 1
) where {T, N, xT}
op = _depthwise_conv2d(
x,
w,
b;
stride = collect(stride),
padding = collect(pad),
dilation = collect(dilation),
)
return op
end
function NNlib.depthwiseconv(x::Tensor, w::Tensor; stride = 1, pad = 0, dilation = 1)
b = zeros(Tensor{Float32}, size(w)[end]; dev = on(w))
op = depthwiseconv(
x, w, b; stride = collect(stride), pad = collect(pad), dilation = collect(dilation)
)
return op
end
function NNlib.relu(t::Tensor{T, N}) where {T, N}
ptr = Ref(Ptr{Cvoid}())
atg_relu(ptr, t.ptr)
return Tensor{T, N}(ptr[], on(t))
end
function NNlib.leakyrelu(t::Tensor{T, N}) where {T, N}
ptr = Ref(Ptr{Cvoid}())
atg_leaky_relu(ptr, t.ptr)
return Tensor{T, N}(ptr[], on(t))
end
function NNlib.sigmoid(t::Tensor{T, N}) where {T, N}
ptr = Ref(Ptr{Cvoid}())
atg_sigmoid(ptr, t.ptr)
return Tensor{T, N}(ptr[], on(t))
end
function NNlib.tanh(t::Tensor{T, N}) where {T, N}
ptr = Ref(Ptr{Cvoid}())
atg_tanh(ptr, t.ptr)
return Tensor{T, N}(ptr[], on(t))
end
function NNlib.softmax(t::Tensor{T, N}; dims = 1) where {T, N}
return _softmax(t, dims, options[T])
end
function NNlib.∇softmax(Δ, xs::Tensor; dims = 1)
t = tensor(Δ; dev = on(xs))
sf = softmax(xs; dims = dims)
return sf .* (t .- sum(t .* sf; dims = dims))
end
function NNlib.meanpool(t::Tensor, pdims::PoolDims{N, K, S, P, D}) where {N, K, S, P, D}
ks = collect(NNlib.kernel_size(pdims))
stride = collect(NNlib.stride(pdims))
padding = NNlib.padding(pdims)
# op_sz = NNlib.output_size(pdims)
return _meanpool(t, ks; stride = stride, padding = [padding[1]; padding[3]])
end
function NNlib.maxpool(t::Tensor, pdims::PoolDims{N, K, S, P, D}) where {N, K, S, P, D}
ks = collect(NNlib.kernel_size(pdims))
stride = collect(NNlib.stride(pdims))
padding = NNlib.padding(pdims)
dilation = collect(NNlib.dilation(pdims))
return _maxpool(
t, ks; stride = stride, padding = [padding[1]; padding[3]], dilation = dilation
)
end