Spaces:
Build error
Build error
import os | |
import random | |
import numpy as np | |
#from tqdm import tqdm | |
#import matplotlib.pyplot as plt | |
import tensorflow as tf | |
#import tensorflow_addons as tfa | |
#from tensorflow import keras | |
#from tensorflow.keras import layers | |
from glob import glob | |
from PIL import Image | |
import gradio as gr | |
from huggingface_hub import from_pretrained_keras | |
model = from_pretrained_keras("RobotJelly/GauGAN-Image-generation") | |
def predict(image_file): | |
# print(image_file) | |
# img = Image.open(image_file) | |
# image_file = str(img) | |
print("image_file-->", image_file) | |
image_list = [] | |
segmentation_map = image_file.replace("images", "segmentation_map").replace("jpg", "png") | |
labels = image_file.replace("images", "segmentation_labels").replace("jpg", "bmp") | |
print("labels", labels) | |
image_list = [segmentation_map, image_file, labels] | |
image = tf.image.decode_png(tf.io.read_file(image_list[1]), channels=3) | |
image = tf.cast(image, tf.float32) / 127.5 - 1 | |
segmentation_file = tf.image.decode_png(tf.io.read_file(image_list[0]), channels=3) | |
segmentation_file = tf.cast(segmentation_file, tf.float32)/127.5 - 1 | |
label_file = tf.image.decode_bmp(tf.io.read_file(image_list[2]), channels=0) | |
label_file = tf.squeeze(label_file) | |
image_list = [segmentation_file, image, label_file] | |
crop_size = tf.convert_to_tensor((256, 256)) | |
image_shape = tf.shape(image_list[1])[:2] | |
margins = image_shape - crop_size | |
y1 = tf.random.uniform(shape=(), maxval=margins[0], dtype=tf.int32) | |
x1 = tf.random.uniform(shape=(), maxval=margins[1], dtype=tf.int32) | |
y2 = y1 + crop_size[0] | |
x2 = x1 + crop_size[1] | |
cropped_images = [] | |
for img in image_list: | |
cropped_images.append(img[y1:y2, x1:x2]) | |
final_img_list = [tf.expand_dims(cropped_images[0], axis=0), tf.expand_dims(cropped_images[1], axis=0), tf.expand_dims(tf.one_hot(cropped_images[2], 12), axis=0)] | |
# print(final_img_list[0].shape) | |
# print(final_img_list[1].shape) | |
# print(final_img_list[2].shape) | |
latent_vector = tf.random.normal(shape=(1, 256), mean=0.0, stddev=2.0) | |
# Generate fake images | |
# fake_image = tf.squeeze(model.predict([latent_vector, final_img_list[2]]), axis=0) | |
fake_image = model.predict([latent_vector, final_img_list[2]]) | |
real_images = final_img_list | |
# return tf.squeeze(real_images[1], axis=0), fake_image | |
return [(real_images[0][0]+1)/2, (fake_image[0]+1)/2] | |
# input | |
input = [gr.inputs.Image(type="filepath", label="Ground Truth - Real Image")] | |
facades_data = [] | |
data_dir = 'examples/' | |
for idx, images in enumerate(os.listdir(data_dir)): | |
image = os.path.join(data_dir, images) | |
if os.path.isfile(image) and idx < 6: | |
facades_data.append(image) | |
# output | |
output = [gr.outputs.Image(type="numpy", label="Mask/Segmentation used"), gr.outputs.Image(type="numpy", label="Generated - Conditioned Images")] | |
title = "GauGAN For Conditional Image Generation" | |
description = "Upload an Image or take one from examples to generate realistic images that are conditioned on cue images and segmentation maps" | |
gr.Interface(fn=predict, inputs = input, outputs = output, examples=facades_data, allow_flagging=False, analytics_enabled=False, | |
title=title, description=description, article="<center>Space By: <u><a href='https://github.com/robotjellyzone'><b>Kavya Bisht</b></a></u> \n Based on <a href='https://keras.io/examples/generative/gaugan/'><b>this notebook</b></a></center>").launch(enable_queue=True, debug=True) |