You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Setting up Multi Scale Gradient loss...
Done
Will write images to: ./out/depth
spike/r128
Found 1 samples in ./DENSE
./DENSE/test_sequence_00_town10
spike/r128
----- [3, 6, 12]
---- new version 4 ----
Loading initial model weights from: /home/huliwen/spikecamera/MDE-SpikingCamera-main/s2d_weights/debug_A100_SpikeTransformerUNetConv_LocalGlobal-Swin3D-T/model_best.pth.tar
999
(1, 224, 224)
Traceback (most recent call last):
File "/home/huliwen/spikecamera/MDE-SpikingCamera-main/test_DENSE.py", line 493, in
main(config, args.path_to_model, args.output_path, args.data_folder)
File "/home/huliwen/spikecamera/MDE-SpikingCamera-main/test_DENSE.py", line 299, in main
new_predicted_targets, new_super_states, new_states_lstm = model(input,
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py", line 166, in forward
return self.module(*inputs[0], **kwargs[0])
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/huliwen/spikecamera/MDE-SpikingCamera-main/model/S2DepthNet.py", line 129, in forward
encoded_xs = self.encoder(spike_tensor)
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/huliwen/spikecamera/MDE-SpikingCamera-main/model/encoder_transformer.py", line 65, in forward
features = self.swin3d(inputs)
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/huliwen/spikecamera/MDE-SpikingCamera-main/model/swin_transformer_3d.py", line 818, in forward
x = self.patch_embed(x)
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/huliwen/spikecamera/MDE-SpikingCamera-main/model/swin_transformer_3d.py", line 109, in forward
outi_global = self.global_head(x)
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 457, in forward
return self._conv_forward(input, self.weight, self.bias)
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 453, in _conv_forward
return F.conv2d(input, weight, bias, self.stride,
RuntimeError: Given groups=1, weight of size [48, 128, 3, 3], expected input[1, 1, 224, 224] to have 128 channels, but got 1 channels instead
The text was updated successfully, but these errors were encountered:
Setting up Multi Scale Gradient loss...
Done
Will write images to: ./out/depth
spike/r128
Found 1 samples in ./DENSE
./DENSE/test_sequence_00_town10
spike/r128
----- [3, 6, 12]
---- new version 4 ----
Loading initial model weights from: /home/huliwen/spikecamera/MDE-SpikingCamera-main/s2d_weights/debug_A100_SpikeTransformerUNetConv_LocalGlobal-Swin3D-T/model_best.pth.tar
999
(1, 224, 224)
Traceback (most recent call last):
File "/home/huliwen/spikecamera/MDE-SpikingCamera-main/test_DENSE.py", line 493, in
main(config, args.path_to_model, args.output_path, args.data_folder)
File "/home/huliwen/spikecamera/MDE-SpikingCamera-main/test_DENSE.py", line 299, in main
new_predicted_targets, new_super_states, new_states_lstm = model(input,
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py", line 166, in forward
return self.module(*inputs[0], **kwargs[0])
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/huliwen/spikecamera/MDE-SpikingCamera-main/model/S2DepthNet.py", line 129, in forward
encoded_xs = self.encoder(spike_tensor)
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/huliwen/spikecamera/MDE-SpikingCamera-main/model/encoder_transformer.py", line 65, in forward
features = self.swin3d(inputs)
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/huliwen/spikecamera/MDE-SpikingCamera-main/model/swin_transformer_3d.py", line 818, in forward
x = self.patch_embed(x)
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/huliwen/spikecamera/MDE-SpikingCamera-main/model/swin_transformer_3d.py", line 109, in forward
outi_global = self.global_head(x)
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 457, in forward
return self._conv_forward(input, self.weight, self.bias)
File "/home/huliwen/anaconda3/envs/spikedepth/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 453, in _conv_forward
return F.conv2d(input, weight, bias, self.stride,
RuntimeError: Given groups=1, weight of size [48, 128, 3, 3], expected input[1, 1, 224, 224] to have 128 channels, but got 1 channels instead
The text was updated successfully, but these errors were encountered: