Skip to content

Commit

Permalink
Merge pull request #6 from Yiran-ASU/yiran-fix
Browse files Browse the repository at this point in the history
Fixed the segmentation fault in CamemBERT and VIT
  • Loading branch information
tancheng authored Oct 9, 2024
2 parents 06126cc + b29b271 commit 0ea2b53
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 2 deletions.
9 changes: 7 additions & 2 deletions experiments/CamemBERT/model/CamemBERT.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,9 @@ def __init__(self, model_name: str):
self.layer3 = AutoModelForTokenClassification.from_pretrained(model_name).classifier

def forward(self, input):
x = input.reshape(1, 7, 768)
# Return only the logits.
x = self.layer1(input).last_hidden_state
x = self.layer1(x).last_hidden_state
x = self.layer2(x)
x = self.layer3(x)
return x
Expand All @@ -44,7 +45,11 @@ def forward(self, input):

print("Parsing sentence tokens.")
example_input = prepare_sentence_tokens(model_name, sentence)
print(example_input.shape)
print("example_input shape: ", example_input.shape)

# The original example_input shape is [1, 7, 768], now we reshape it into [1, 7*768]
example_input = example_input.reshape(1, 7*768)
print("example_input shape after reshaping: ", example_input.shape)

print("Instantiating model.")
model = OnlyLogitsHuggingFaceModel(model_name)
Expand Down
6 changes: 6 additions & 0 deletions experiments/VIT/model/vit.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ def __init__(self):
self.layer2 = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').vit.layernorm
self.layer3 = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').classifier
def forward(self,x):
x = x.reshape(1, 197, 768)
x = self.layer1(x).last_hidden_state
x = self.layer2(x)
x = self.layer3(x)
Expand All @@ -38,6 +39,11 @@ def forward(self,x):
example_input = model(inputs)
print(example_input.shape)

# The original example_input shape is [1, 197, 768], now we reshape it into [1, 197*768]
example_input = example_input.reshape(1, 197*768)
print("example_input shape after reshaping: ", example_input.shape)


vit_model = vit().eval()
output = vit_model(example_input)
print(output.shape)
Expand Down

0 comments on commit 0ea2b53

Please sign in to comment.