|
import time |
|
|
|
import numpy as np |
|
import torch |
|
from torch.utils.data import DataLoader |
|
from torchvision import transforms |
|
import sys |
|
import os |
|
import cv2 |
|
import random |
|
from transformers import CLIPImageProcessor |
|
|
|
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) |
|
from toolkit.paths import SD_SCRIPTS_ROOT |
|
import torchvision.transforms.functional |
|
from toolkit.image_utils import show_img, show_tensors |
|
|
|
sys.path.append(SD_SCRIPTS_ROOT) |
|
|
|
from library.model_util import load_vae |
|
from toolkit.data_transfer_object.data_loader import DataLoaderBatchDTO |
|
from toolkit.data_loader import AiToolkitDataset, get_dataloader_from_datasets, \ |
|
trigger_dataloader_setup_epoch |
|
from toolkit.config_modules import DatasetConfig |
|
import argparse |
|
from tqdm import tqdm |
|
|
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('dataset_folder', type=str, default='input') |
|
parser.add_argument('--epochs', type=int, default=1) |
|
|
|
|
|
|
|
args = parser.parse_args() |
|
|
|
dataset_folder = args.dataset_folder |
|
resolution = 1024 |
|
bucket_tolerance = 64 |
|
batch_size = 1 |
|
|
|
clip_processor = CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch16") |
|
|
|
class FakeAdapter: |
|
def __init__(self): |
|
self.clip_image_processor = clip_processor |
|
|
|
|
|
|
|
class FakeSD: |
|
def __init__(self): |
|
self.adapter = FakeAdapter() |
|
|
|
|
|
|
|
|
|
dataset_config = DatasetConfig( |
|
dataset_path=dataset_folder, |
|
|
|
|
|
resolution=resolution, |
|
|
|
default_caption='default', |
|
|
|
buckets=True, |
|
bucket_tolerance=bucket_tolerance, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
) |
|
|
|
dataloader: DataLoader = get_dataloader_from_datasets([dataset_config], batch_size=batch_size, sd=FakeSD()) |
|
|
|
|
|
|
|
dataloader_iterator = iter(dataloader) |
|
for epoch in range(args.epochs): |
|
for batch in tqdm(dataloader): |
|
batch: 'DataLoaderBatchDTO' |
|
img_batch = batch.tensor |
|
batch_size, channels, height, width = img_batch.shape |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
big_img = img_batch |
|
|
|
|
|
show_tensors(big_img) |
|
|
|
|
|
|
|
|
|
|
|
|
|
time.sleep(0.2) |
|
|
|
if epoch < args.epochs - 1: |
|
trigger_dataloader_setup_epoch(dataloader) |
|
|
|
cv2.destroyAllWindows() |
|
|
|
print('done') |
|
|