forked from sborquez/VQGAN_CLIP_docker
-
Notifications
You must be signed in to change notification settings - Fork 0
/
extra.py
79 lines (72 loc) · 1.82 KB
/
extra.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
# Utils
import os
import sys
import math
import warnings
from pathlib import Path
from tqdm import tqdm
from tqdm.notebook import tqdm as tqdm_notebook
from base64 import b64encode
import imageio
from IPython.core.display import display, HTML, Image as DImage
#import wandb as wandb TODO: add Weight and Biases
import datetime
__utils__ = [
"os", "sys", "Path", "tqdm", "tqdm_notebook", "display", "HTML", "DImage",
"datetime", "imageio", "b64encode", "warnings", "math"
]
# DataScience-CPU
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
__cpu_all__ = [
"plt", "sns", "np", "pd", "Image"
]
# DataScience-GPU
try:
import cupy as cp
import cudf
import cuml
__gpu_all__ = ["cp", "cudf", "cuml"]
except:
__gpu_all__ = []
#warnings.warn("GPU skipped!")
# Distributed DataScience
try:
import dask_cudf
__dist_all__ = ["dask_cudf"]
except:
__dist_all__ = []
#warnings.warn("Dask skipped!")
# VQGAN+CLIP
try:
from omegaconf import OmegaConf
sys.path.append('..')
sys.path.append('../taming-transformers')
from taming.models import cond_transformer, vqgan
import torch
from torch import nn, optim
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms import functional as TF
from CLIP import clip
import kornia.augmentation as K
__vqgan_clip__ = [
"OmegaConf", "clip", "K", "TF", "F", "transforms",
"nn", "optim", "torch", "cond_transformer", "vqgan",
]
except:
__vqgan_clip__ = []
raise OSError("Can't import VQGAN+CLIP requirements.")
def reset_kernel():
os._exit(00)
__all__ = [
"reset_kernel"
]
__all__ += __utils__
__all__ += __cpu_all__
__all__ += __gpu_all__
__all__ += __dist_all__
__all__ += __vqgan_clip__