forked from hhhzzyang/Comfyui_Lama
-
Notifications
You must be signed in to change notification settings - Fork 0
/
LamaRemove.py
142 lines (108 loc) · 3.67 KB
/
LamaRemove.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import os
import sys
import torch
from omegaconf import OmegaConf
from pathlib import Path
import torch
sys.path.insert(0, str(Path(__file__).resolve().parent))
from saicinpainting.evaluation.utils import move_to_device
from saicinpainting.training.trainers import load_checkpoint
from saicinpainting.evaluation.data import pad_tensor_to_modulo
MODELS_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config")
def inpaint_img_with_lama(
img,
mask,
config_p: OmegaConf,
model,
mod=8,
device="cuda"
):
batch = {}
print(img.shape)
print(mask.shape)
mask=mask*255
batch['image'] = img.permute(0,3, 1, 2)
batch['mask'] = mask[None, None]
unpad_to_size = [batch['image'].shape[2], batch['image'].shape[3]]
batch['image'] = pad_tensor_to_modulo(batch['image'], mod)
batch['mask'] = pad_tensor_to_modulo(batch['mask'], mod)
batch = move_to_device(batch, device)
batch['mask'] = (batch['mask'] > 0) * 1
batch = model(batch)
cur_res = batch[config_p.out_key][0].permute(1, 2, 0)
cur_res = cur_res.detach().cpu().numpy()
if unpad_to_size is not None:
orig_height, orig_width = unpad_to_size
cur_res = cur_res[:orig_height, :orig_width]
#cur_res = np.clip(cur_res * 255, 0, 255).astype('uint8')
return cur_res
class LamaApply:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"image": ("IMAGE",),
"mask": ("MASK",),
"lama":("LAMA",),
"config":("YAML_CONFIG",),
},
}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "lama_remove"
CATEGORY = "lama"
def lama_remove(self,image,mask,config,lama):
device = "cuda" if torch.cuda.is_available() else "cpu"
img_inpainted = inpaint_img_with_lama(
image, mask, config, lama, device=device)
img = torch.from_numpy(img_inpainted)[None,]
return (img,)
class LamaModelLoader:
@classmethod
def INPUT_TYPES(s):
return {
"required":{
"config":("YAML_CONFIG",),
},
}
RETURN_TYPES = ("LAMA","YAML_CONFIG")
FUNCTION = "load_lama"
CATEGORY = "lama"
def load_lama(self,config):
device = torch.device(config.device)
config.training_model.predict_only = True
config.visualizer.kind = 'noop'
checkpoint_path = os.path.join(MODELS_DIR,config.model.checkpoint)
model = load_checkpoint(config, checkpoint_path, strict=False)
model.to(device)
model.freeze()
return (model,config)
class YamlConfigLoader:
@classmethod
def INPUT_TYPES(s):
files = [f for f in os.listdir(CONFIG_DIR) if f.endswith('.yaml')]
return {
"required":{
"yaml_config": (files,),
},
}
RETURN_TYPES = ("YAML_CONFIG",)
FUNCTION = "load_yaml"
CATEGORY = "load_yaml"
def load_yaml(self,yaml_config):
yaml_path=os.path.join(CONFIG_DIR, yaml_config)
config = OmegaConf.load(yaml_path)
return (config,)
# A dictionary that contains all nodes you want to export with their names
# NOTE: names should be globally unique
NODE_CLASS_MAPPINGS = {
"LamaModelLoader":LamaModelLoader,
"LamaApply": LamaApply,
"YamlConfigLoader":YamlConfigLoader,
}
# A dictionary that contains the friendly/humanly readable titles for the nodes
NODE_DISPLAY_NAME_MAPPINGS = {
"LamaModelLoader":"LamaModelLoader",
"LamaApply": "LamaApply",
"YamlConfigLoader":"YamlConfigLoader"
}