Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

jupyter分割图片生成报错 #12

Open
ggnogg opened this issue Dec 13, 2024 · 1 comment
Open

jupyter分割图片生成报错 #12

ggnogg opened this issue Dec 13, 2024 · 1 comment

Comments

@ggnogg
Copy link

ggnogg commented Dec 13, 2024


RuntimeError Traceback (most recent call last)
Cell In[18], line 6
4 image = data["image"].cuda()
5 mask = data["mask"].cuda()
----> 6 out = model.forward(image)
7 out = torch.sigmoid(out)
8 out[out < 0.5] = 0

File D:\Simon\downloads\Edgedetection\MobileUNETR\experiments_medical\isic_2016\exp_2_dice_b8_a2../../..\architectures\mobileunetr.py:1016, in MViTxxsSegPretrained.forward(self, x)
1015 def forward(self, x):
-> 1016 enc_dict = self.encoder(x)
1017 btlneck = self.bottleneck(enc_dict["hidden_states"][-1])
1018 dec_out = self.decoder(btlneck, list(enc_dict["hidden_states"]))

File D:\Simon\downloads\anaconda\envs\munetr\lib\site-packages\torch\nn\modules\module.py:1736, in Module._wrapped_call_impl(self, *args, **kwargs)
1734 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1735 else:
-> 1736 return self._call_impl(*args, **kwargs)

File D:\Simon\downloads\anaconda\envs\munetr\lib\site-packages\torch\nn\modules\module.py:1747, in Module._call_impl(self, *args, **kwargs)
1742 # If we don't have any hooks, we want to skip the rest of the logic in
1743 # this function, and just call forward.
1744 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1745 or _global_backward_pre_hooks or _global_backward_hooks
1746 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1747 return forward_call(*args, **kwargs)
1749 result = None
1750 called_always_called_hooks = set()

File D:\Simon\downloads\Edgedetection\MobileUNETR\experiments_medical\isic_2016\exp_2_dice_b8_a2../../..\architectures\mobileunetr.py:949, in MViTxxsEncoderPretrained.forward(self, x)
947 def forward(self, x):
948 raw_input = x.clone()
--> 949 hidden_states = self.encoder.forward(x, output_hidden_states=True).hidden_states
950 out_dict = {"raw_input": raw_input, "hidden_states": hidden_states}
951 return out_dict

File D:\Simon\downloads\anaconda\envs\munetr\lib\site-packages\transformers\models\mobilevit\modeling_mobilevit.py:754, in MobileViTModel.forward(self, pixel_values, output_hidden_states, return_dict)
751 if pixel_values is None:
752 raise ValueError("You have to specify pixel_values")
--> 754 embedding_output = self.conv_stem(pixel_values)
756 encoder_outputs = self.encoder(
757 embedding_output,
758 output_hidden_states=output_hidden_states,
759 return_dict=return_dict,
760 )
762 if self.expand_output:

File D:\Simon\downloads\anaconda\envs\munetr\lib\site-packages\torch\nn\modules\module.py:1736, in Module._wrapped_call_impl(self, *args, **kwargs)
1734 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1735 else:
-> 1736 return self._call_impl(*args, **kwargs)

File D:\Simon\downloads\anaconda\envs\munetr\lib\site-packages\torch\nn\modules\module.py:1747, in Module._call_impl(self, *args, **kwargs)
1742 # If we don't have any hooks, we want to skip the rest of the logic in
1743 # this function, and just call forward.
1744 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1745 or _global_backward_pre_hooks or _global_backward_hooks
1746 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1747 return forward_call(*args, **kwargs)
1749 result = None
1750 called_always_called_hooks = set()

File D:\Simon\downloads\anaconda\envs\munetr\lib\site-packages\transformers\models\mobilevit\modeling_mobilevit.py:133, in MobileViTConvLayer.forward(self, features)
132 def forward(self, features: torch.Tensor) -> torch.Tensor:
--> 133 features = self.convolution(features)
134 if self.normalization is not None:
135 features = self.normalization(features)

File D:\Simon\downloads\anaconda\envs\munetr\lib\site-packages\torch\nn\modules\module.py:1736, in Module._wrapped_call_impl(self, *args, **kwargs)
1734 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1735 else:
-> 1736 return self._call_impl(*args, **kwargs)

File D:\Simon\downloads\anaconda\envs\munetr\lib\site-packages\torch\nn\modules\module.py:1747, in Module._call_impl(self, *args, **kwargs)
1742 # If we don't have any hooks, we want to skip the rest of the logic in
1743 # this function, and just call forward.
1744 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1745 or _global_backward_pre_hooks or _global_backward_hooks
1746 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1747 return forward_call(*args, **kwargs)
1749 result = None
1750 called_always_called_hooks = set()

File D:\Simon\downloads\anaconda\envs\munetr\lib\site-packages\torch\nn\modules\conv.py:554, in Conv2d.forward(self, input)
553 def forward(self, input: Tensor) -> Tensor:
--> 554 return self._conv_forward(input, self.weight, self.bias)

File D:\Simon\downloads\anaconda\envs\munetr\lib\site-packages\torch\nn\modules\conv.py:549, in Conv2d._conv_forward(self, input, weight, bias)
537 if self.padding_mode != "zeros":
538 return F.conv2d(
539 F.pad(
540 input, self._reversed_padding_repeated_twice, mode=self.padding_mode
(...)
547 self.groups,
548 )
--> 549 return F.conv2d(
550 input, weight, bias, self.stride, self.padding, self.dilation, self.groups
551 )

RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (torch.FloatTensor) should be the same

@ggnogg
Copy link
Author

ggnogg commented Dec 13, 2024

already solved

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant