@@ -26,18 +26,6 @@ void THSNN_Module_register_module(const NNModule module, const char* name, const
26
26
);
27
27
}
28
28
29
- NNModule THSNN_Module_load (const char * location, const char * name)
30
- {
31
- CATCH_RETURN_NNModule (
32
- auto module = new torch::nn::Module ();
33
- auto input = torch::serialize::InputArchive ();
34
-
35
- input.load_from (location);
36
- module->load (input);
37
- res = new std::shared_ptr<torch::nn::Module>(module);
38
- );
39
- }
40
-
41
29
int THSNN_Module_has_parameter (const NNModule module, const char * name)
42
30
{
43
31
CATCH_RETURN (int , 0 , (*module)->named_parameters ().contains (name));
@@ -135,212 +123,206 @@ class CustomModule : public torch::nn::Module
135
123
136
124
};
137
125
138
- NNModule THSNN_custom_module (const char * name,
126
+ NNModule THSNN_CustomModule_ctor (const char * name,
139
127
const char ** names,
140
128
at::Tensor** parameters,
141
129
const bool * require_grad,
142
130
const int length,
143
- Tensor (*forward)(Tensor),
144
- NNAnyModule *outAsAnyModule)
131
+ Tensor (*forward)(Tensor))
145
132
{
146
133
CATCH_RETURN_NNModule (
147
134
auto mod = new CustomModule (name, names, parameters, require_grad, length, forward);
148
-
149
- // Keep a boxed version of the module in case we add it to a Sequential later (the C++ templating means
150
- // a Module can only be boxed to AnyModule at the point its static type is known).
151
- if (outAsAnyModule != NULL )
152
- {
153
- auto modShared = new std::shared_ptr<CustomModule>(mod);
154
- auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<CustomModule>(*modShared));
155
- *outAsAnyModule = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
156
- }
157
135
res = new std::shared_ptr<torch::nn::Module>((torch::nn::Module*)mod);
158
136
);
159
137
}
160
138
161
- NNModule THSNN_ReLU_ctor (bool inplace, NNAnyModule* outAsAnyModule)
139
+ NNAnyModule THSNN_CustomModule_wrap (const NNModule module)
140
+ {
141
+ CATCH_RETURN_NNAnyModule (
142
+ auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<CustomModule>(*((*module)->as <CustomModule>())));
143
+ res = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
144
+ );
145
+ }
146
+
147
+ NNModule THSNN_ReLU_ctor (bool inplace)
162
148
{
163
149
CATCH_RETURN_NNModule (
164
150
auto opts = torch::nn::ReLUOptions (inplace);
165
151
auto mod = std::make_shared<torch::nn::ReLUImpl>(opts);
166
-
167
- // Keep a boxed version of the module in case we add it to a Sequential later (the C++ templating means
168
- // a Module can only be boxed to AnyModule at the point its static type is known).
169
- if (outAsAnyModule != NULL )
170
- {
171
- auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::ReLUImpl>(*mod));
172
- *outAsAnyModule = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
173
- }
174
-
175
152
res = new std::shared_ptr<torch::nn::Module>(mod);
176
153
);
177
154
}
178
155
156
+ NNAnyModule THSNN_ReLU_wrap (const NNModule module)
157
+ {
158
+ CATCH_RETURN_NNAnyModule (
159
+ auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::ReLUImpl>(*((*module)->as <torch::nn::ReLU>())));
160
+ res = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
161
+ );
162
+ }
163
+
179
164
Tensor THSNN_ReLU_forward (const NNModule module, const Tensor tensor)
180
165
{
181
166
CATCH_TENSOR ((*module)->as <torch::nn::ReLU>()->forward (*tensor));
182
167
}
183
168
184
- NNModule THSNN_Dropout_ctor (double probability, NNAnyModule* outAsAnyModule )
169
+ NNModule THSNN_Dropout_ctor (double probability)
185
170
{
186
171
CATCH_RETURN_NNModule (
187
172
auto opts = torch::nn::DropoutOptions (probability);
188
173
auto mod = std::make_shared<torch::nn::DropoutImpl>(opts);
189
-
190
- // Keep a boxed version of the module in case we add it to a Sequential later (the C++ templating means
191
- // a Module can only be boxed to AnyModule at the point its static type is known).
192
- if (outAsAnyModule != NULL )
193
- {
194
- auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::DropoutImpl>(*mod));
195
- *outAsAnyModule = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
196
- }
197
-
198
174
res = new std::shared_ptr<torch::nn::Module>(mod);
199
175
);
200
176
}
201
177
178
+ NNAnyModule THSNN_Dropout_wrap (const NNModule module)
179
+ {
180
+ CATCH_RETURN_NNAnyModule (
181
+ auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::DropoutImpl>(*((*module)->as <torch::nn::Dropout>())));
182
+ res = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
183
+ );
184
+ }
185
+
202
186
Tensor THSNN_Dropout_forward (const NNModule module, const Tensor tensor)
203
187
{
204
188
CATCH_TENSOR ((*module)->as <torch::nn::Dropout>()->forward (*tensor));
205
189
}
206
190
207
- NNModule THSNN_FeatureAlphaDropout_ctor (double probability, NNAnyModule* outAsAnyModule )
191
+ NNModule THSNN_FeatureAlphaDropout_ctor (double probability)
208
192
{
209
193
CATCH_RETURN_NNModule (
210
194
auto opts = torch::nn::FeatureAlphaDropoutOptions (probability);
211
195
auto mod = std::make_shared<torch::nn::FeatureAlphaDropoutImpl>(opts);
212
-
213
- // Keep a boxed version of the module in case we add it to a Sequential later (the C++ templating means
214
- // a Module can only be boxed to AnyModule at the point its static type is known).
215
- if (outAsAnyModule != NULL )
216
- {
217
- auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::FeatureAlphaDropoutImpl>(*mod));
218
- *outAsAnyModule = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
219
- }
220
196
res = new std::shared_ptr<torch::nn::Module>(mod);
221
197
);
222
198
}
223
199
200
+ NNAnyModule THSNN_FeatureAlphaDropout_wrap (const NNModule module)
201
+ {
202
+ CATCH_RETURN_NNAnyModule (
203
+ auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::FeatureAlphaDropoutImpl>(*((*module)->as <torch::nn::FeatureAlphaDropout>())));
204
+ res = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
205
+ );
206
+ }
207
+
224
208
Tensor THSNN_FeatureAlphaDropout_forward (const NNModule module, const Tensor tensor)
225
209
{
226
210
CATCH_TENSOR ((*module)->as <torch::nn::FeatureAlphaDropout>()->forward (*tensor));
227
211
}
228
212
229
- NNModule THSNN_LogSoftMax_ctor ( int64_t dim, NNAnyModule* outAsAnyModule )
213
+ NNModule THSNN_LogSoftmax_ctor ( const int64_t dim)
230
214
{
231
215
CATCH_RETURN_NNModule (
232
216
auto opts = torch::nn::LogSoftmaxOptions (dim);
233
217
auto mod = std::make_shared<torch::nn::LogSoftmaxImpl>(opts);
234
-
235
- // Keep a boxed version of the module in case we add it to a Sequential later (the C++ templating means
236
- // a Module can only be boxed to AnyModule at the point its static type is known).
237
- if (outAsAnyModule != NULL )
238
- {
239
- auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::LogSoftmaxImpl>(*mod));
240
- *outAsAnyModule = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
241
- }
242
218
res = new std::shared_ptr<torch::nn::Module>(mod);
243
219
);
244
220
}
245
221
246
- Tensor THSNN_LogSoftMax_forward (const NNModule module, const Tensor tensor)
222
+ NNAnyModule THSNN_LogSoftmax_wrap (const NNModule module)
223
+ {
224
+ CATCH_RETURN_NNAnyModule (
225
+ auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::LogSoftmaxImpl>(*((*module)->as <torch::nn::LogSoftmax>())));
226
+ res = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
227
+ );
228
+ }
229
+
230
+ Tensor THSNN_LogSoftmax_forward (const NNModule module, const Tensor tensor)
247
231
{
248
232
CATCH_TENSOR ((*module)->as <torch::nn::LogSoftmax>()->forward (*tensor));
249
233
}
250
234
251
- NNModule THSNN_AvgPool2d_ctor (const int64_t * kernelSize, const int kernelSizeLength, const int64_t * stride, const int strideLength,
252
- NNAnyModule* outAsAnyModule)
235
+ NNModule THSNN_AvgPool2d_ctor (const int64_t * kernelSize, const int kernelSizeLength, const int64_t * stride, const int strideLength)
253
236
{
254
237
CATCH_RETURN_NNModule (
255
238
auto opts = torch::nn::AvgPool2dOptions (at::ArrayRef<int64_t >(kernelSize, kernelSizeLength));
256
239
if (stride)
257
240
opts = opts.stride (at::ArrayRef<int64_t >(stride, strideLength));
258
241
auto mod = std::make_shared<torch::nn::AvgPool2dImpl>(opts);
259
-
260
- // Keep a boxed version of the module in case we add it to a Sequential later (the C++ templating means
261
- // a Module can only be boxed to AnyModule at the point its static type is known).
262
- if (outAsAnyModule != NULL )
263
- {
264
- auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::AvgPool2dImpl>(*mod));
265
- *outAsAnyModule = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
266
- }
267
242
res = new std::shared_ptr<torch::nn::Module>(mod);
268
243
);
269
244
}
270
245
246
+ NNAnyModule THSNN_AvgPool2d_wrap (const NNModule module)
247
+ {
248
+ CATCH_RETURN_NNAnyModule (
249
+ auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::AvgPool2dImpl>(*((*module)->as <torch::nn::AvgPool2d>())));
250
+ res = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
251
+ );
252
+ }
253
+
271
254
Tensor THSNN_AvgPool2d_forward (const NNModule module, const Tensor tensor)
272
255
{
273
256
CATCH_TENSOR ((*module)->as <torch::nn::AvgPool2d>()->forward (*tensor));
274
257
}
275
258
276
- NNModule THSNN_AdaptiveAvgPool2d_ctor (const int64_t * kernelSize, const int kernelSizeLength,
277
- NNAnyModule* outAsAnyModule)
259
+ NNModule THSNN_AdaptiveAvgPool2d_ctor (const int64_t * kernelSize, const int kernelSizeLength)
278
260
{
279
261
CATCH_RETURN_NNModule (
280
262
auto opts = torch::nn::AdaptiveAvgPool2dOptions (at::ArrayRef<int64_t >(kernelSize, kernelSizeLength));
281
263
auto mod = std::make_shared<torch::nn::AdaptiveAvgPool2dImpl>(opts);
282
-
283
- // Keep a boxed version of the module in case we add it to a Sequential later (the C++ templating means
284
- // a Module can only be boxed to AnyModule at the point its static type is known).
285
- if (outAsAnyModule != NULL )
286
- {
287
- auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::AdaptiveAvgPool2dImpl>(*mod));
288
- *outAsAnyModule = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
289
- }
290
264
res = new std::shared_ptr<torch::nn::Module>(mod);
291
265
);
292
266
}
293
267
268
+ NNAnyModule THSNN_AdaptiveAvgPool2d_wrap (const NNModule module)
269
+ {
270
+ CATCH_RETURN_NNAnyModule (
271
+ auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::AdaptiveAvgPool2dImpl>(*((*module)->as <torch::nn::AdaptiveAvgPool2d>())));
272
+ res = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
273
+ );
274
+ }
275
+
294
276
Tensor THSNN_AdaptiveAvgPool2d_forward (const NNModule module, const Tensor tensor)
295
277
{
296
278
CATCH_TENSOR ((*module)->as <torch::nn::AdaptiveAvgPool2d>()->forward (*tensor));
297
279
}
298
280
299
- NNModule THSNN_MaxPool2d_ctor (const int64_t * kernelSize, const int kernelSizeLength, const int64_t * stride, const int strideLength,
300
- NNAnyModule* outAsAnyModule)
281
+ NNModule THSNN_MaxPool2d_ctor (const int64_t * kernelSize, const int kernelSizeLength, const int64_t * stride, const int strideLength)
301
282
{
302
283
CATCH_RETURN_NNModule (
303
284
auto opts = torch::nn::MaxPool2dOptions (at::ArrayRef<int64_t >(kernelSize, kernelSizeLength));
304
285
auto mod = std::make_shared<torch::nn::MaxPool2dImpl>(opts);
305
286
if (stride)
306
287
opts = opts.stride (at::ArrayRef<int64_t >(stride, strideLength));
307
288
308
- // Keep a boxed version of the module in case we add it to a Sequential later (the C++ templating means
309
- // a Module can only be boxed to AnyModule at the point its static type is known).
310
- if (outAsAnyModule != NULL )
311
- {
312
- auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::MaxPool2dImpl>(*mod));
313
- *outAsAnyModule = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
314
- }
315
289
res = new std::shared_ptr<torch::nn::Module>(mod);
316
290
)
317
291
}
318
292
293
+ NNAnyModule THSNN_MaxPool2d_wrap (const NNModule module)
294
+ {
295
+ CATCH_RETURN_NNAnyModule (
296
+ auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::MaxPool2dImpl>(*((*module)->as <torch::nn::MaxPool2d>())));
297
+ res = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
298
+ );
299
+ }
300
+
319
301
Tensor THSNN_MaxPool2d_forward (const NNModule module, const Tensor tensor)
320
302
{
321
303
CATCH_TENSOR ((*module)->as <torch::nn::MaxPool2d>()->forward (*tensor));
322
304
}
323
305
324
- NNModule THSNN_Linear_ctor (const int64_t input_size, const int64_t output_size, const bool bias,
325
- NNAnyModule* outAsAnyModule)
306
+ NNModule THSNN_Linear_ctor (const int64_t input_size, const int64_t output_size, const bool bias)
326
307
{
327
308
CATCH_RETURN_NNModule (
328
309
auto opts = torch::nn::LinearOptions (input_size, output_size);
329
310
opts = opts.bias (bias);
330
311
331
312
auto mod = std::make_shared<torch::nn::LinearImpl>(opts);
332
-
333
- // Keep a boxed version of the module in case we add it to a Sequential later (the C++ templating means
334
- // a Module can only be boxed to AnyModule at the point its static type is known).
335
- if (outAsAnyModule != NULL )
336
- {
337
- auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::LinearImpl>(*mod));
338
- *outAsAnyModule = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
339
- }
340
313
res = new std::shared_ptr<torch::nn::Module>(mod);
341
314
);
342
315
}
343
316
317
+ NNAnyModule THSNN_Linear_wrap (const NNModule module)
318
+ {
319
+ // CATCH_RETURN_NNAnyModule(
320
+ auto p = (*module)->as <torch::nn::Linear>();
321
+ auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::LinearImpl>(*p));
322
+ return new std::shared_ptr<torch::nn::AnyModule>(wrapped);
323
+ // );
324
+ }
325
+
344
326
Tensor THSNN_Linear_forward (const NNModule module, const Tensor tensor)
345
327
{
346
328
CATCH_TENSOR ((*module)->as <torch::nn::Linear>()->forward (*tensor));
@@ -371,25 +353,25 @@ void THSNN_Linear_set_weight(const NNModule module, const Tensor weight)
371
353
}
372
354
373
355
NNModule THSNN_Conv2d_ctor (const int64_t inputChannel, const int64_t outputChannel,
374
- const int64_t kernelSize, const int64_t stride, const int64_t padding,
375
- NNAnyModule* outAsAnyModule)
356
+ const int64_t kernelSize, const int64_t stride, const int64_t padding)
376
357
{
377
358
CATCH_RETURN_NNModule (
378
359
auto opts = torch::nn::Conv2dOptions (inputChannel, outputChannel, kernelSize).stride (stride).padding (padding);
379
360
380
- auto mod = std::make_shared<torch::nn::Conv2dImpl>(opts);
381
-
382
- // Keep a boxed version of the module in case we add it to a Sequential later (the C++ templating means
383
- // a Module can only be boxed to AnyModule at the point its static type is known).
384
- if (outAsAnyModule != NULL )
385
- {
386
- auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::Conv2dImpl>(*mod));
387
- *outAsAnyModule = new std::shared_ptr<torch::nn::AnyModule>(wrapped);
388
- }
361
+ auto mod = std::make_shared<torch::nn::Conv2dImpl>(opts);
389
362
res = new std::shared_ptr<torch::nn::Module>(mod);
390
363
);
391
364
}
392
365
366
+ NNAnyModule THSNN_Conv2d_wrap (const NNModule module)
367
+ {
368
+ // CATCH_RETURN_NNAnyModule(
369
+ auto p = (*module)->as <torch::nn::Conv2d>();
370
+ auto wrapped = std::make_shared<torch::nn::AnyModule>(torch::nn::ModuleHolder<torch::nn::Conv2dImpl>(*p));
371
+ return new std::shared_ptr<torch::nn::AnyModule>(wrapped);
372
+ // );
373
+ }
374
+
393
375
Tensor THSNN_Conv2d_forward (const NNModule module, const Tensor tensor)
394
376
{
395
377
CATCH_TENSOR ((*module)->as <torch::nn::Conv2d>()->forward (*tensor));
0 commit comments