-
Notifications
You must be signed in to change notification settings - Fork 167
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #375 from bghira/main
fix --aspect_bucket_rounding not being applied correctly | rebuild image sample handling to be structured object-oriented logic | fix early epoch exit problem | max epochs vs max steps ambiguity reduced by setting default to 0 for one of them | fixes for LoRA text encoder save/load hooks | optimise trainer | 300% performance gain by removing the torch anomaly detector | fix dataset race condition where a single image dataset was not being detected | AMD documentation for install, dependencies thanks to Beinsezii | fix for wandb timestep distribution chart values racing ahead of reality
- Loading branch information
Showing
32 changed files
with
699 additions
and
605 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,96 @@ | ||
from PIL import Image | ||
|
||
|
||
class BaseCropping: | ||
def __init__(self, image: Image = None, image_metadata: dict = None): | ||
self.image = image | ||
self.image_metadata = image_metadata | ||
if self.image: | ||
self.original_width, self.original_height = self.image.size | ||
elif self.image_metadata: | ||
self.original_width, self.original_height = self.image_metadata[ | ||
"original_size" | ||
] | ||
|
||
def crop(self, target_width, target_height): | ||
raise NotImplementedError("Subclasses must implement this method") | ||
|
||
|
||
class CornerCropping(BaseCropping): | ||
def crop(self, target_width, target_height): | ||
left = max(0, self.original_width - target_width) | ||
top = max(0, self.original_height - target_height) | ||
right = self.original_width | ||
bottom = self.original_height | ||
if self.image: | ||
return self.image.crop((left, top, right, bottom)), (left, top) | ||
elif self.image_metadata: | ||
return self.image_metadata, (left, top) | ||
|
||
|
||
class CenterCropping(BaseCropping): | ||
def crop(self, target_width, target_height): | ||
left = (self.original_width - target_width) / 2 | ||
top = (self.original_height - target_height) / 2 | ||
right = (self.original_width + target_width) / 2 | ||
bottom = (self.original_height + target_height) / 2 | ||
if self.image: | ||
return self.image.crop((left, top, right, bottom)), (left, top) | ||
elif self.image_metadata: | ||
return self.image_metadata, (left, top) | ||
|
||
|
||
class RandomCropping(BaseCropping): | ||
def crop(self, target_width, target_height): | ||
import random | ||
|
||
left = random.randint(0, max(0, self.original_width - target_width)) | ||
top = random.randint(0, max(0, self.original_height - target_height)) | ||
right = left + target_width | ||
bottom = top + target_height | ||
if self.image: | ||
return self.image.crop((left, top, right, bottom)), (left, top) | ||
elif self.image_metadata: | ||
return self.image_metadata, (left, top) | ||
|
||
|
||
class FaceCropping(RandomCropping): | ||
def crop( | ||
self, | ||
image: Image.Image, | ||
target_width: int, | ||
target_height: int, | ||
): | ||
# Import modules | ||
import cv2 | ||
import numpy as np | ||
|
||
# Detect a face in the image | ||
face_cascade = cv2.CascadeClassifier( | ||
cv2.data.haarcascades + "haarcascade_frontalface_default.xml" | ||
) | ||
image = image.convert("RGB") | ||
image = np.array(image) | ||
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | ||
faces = face_cascade.detectMultiScale(gray, 1.1, 4) | ||
if len(faces) > 0: | ||
# Get the largest face | ||
face = max(faces, key=lambda f: f[2] * f[3]) | ||
x, y, w, h = face | ||
left = max(0, x - 0.5 * w) | ||
top = max(0, y - 0.5 * h) | ||
right = min(image.shape[1], x + 1.5 * w) | ||
bottom = min(image.shape[0], y + 1.5 * h) | ||
image = Image.fromarray(image) | ||
return image.crop((left, top, right, bottom)), (left, top) | ||
else: | ||
# Crop the image from a random position | ||
return super.crop(image, target_width, target_height) | ||
|
||
|
||
crop_handlers = { | ||
"corner": CornerCropping, | ||
"centre": CenterCropping, | ||
"center": CenterCropping, | ||
"random": RandomCropping, | ||
} |
Oops, something went wrong.