RobotJelly's picture
app.py
b36ca7d
raw
history blame
3.86 kB
import os
import random
import numpy as np
import tensorflow as tf
from glob import glob
from PIL import Image
import gradio as gr
from huggingface_hub import from_pretrained_keras
model = from_pretrained_keras("RobotJelly/GauGAN-Image-generation")
def predict(image_file, segmentation_png, bitmap_img):
image_list = [segmentation_png, image_file, bitmap_img]
#segmentation_map = image_file.replace("images", "segmentation_map").replace("jpg", "png")
#labels = image_file.replace("images", "segmentation_labels").replace("jpg", "bmp")
#print("labels", labels)
#image_list = [segmentation_map, image_file, labels]
image = tf.image.decode_png(tf.io.read_file(image_list[1]), channels=3)
image = tf.cast(image, tf.float32) / 127.5 - 1
segmentation_file = tf.image.decode_png(tf.io.read_file(image_list[0]), channels=3)
segmentation_file = tf.cast(segmentation_file, tf.float32)/127.5 - 1
label_file = tf.image.decode_bmp(tf.io.read_file(image_list[2]), channels=0)
label_file = tf.squeeze(label_file)
image_list = [segmentation_file, image, label_file]
crop_size = tf.convert_to_tensor((256, 256))
image_shape = tf.shape(image_list[1])[:2]
margins = image_shape - crop_size
y1 = tf.random.uniform(shape=(), maxval=margins[0], dtype=tf.int32)
x1 = tf.random.uniform(shape=(), maxval=margins[1], dtype=tf.int32)
y2 = y1 + crop_size[0]
x2 = x1 + crop_size[1]
cropped_images = []
for img in image_list:
cropped_images.append(img[y1:y2, x1:x2])
final_img_list = [tf.expand_dims(cropped_images[0], axis=0), tf.expand_dims(cropped_images[1], axis=0), tf.expand_dims(tf.one_hot(cropped_images[2], 12), axis=0)]
# print(final_img_list[0].shape)
# print(final_img_list[1].shape)
# print(final_img_list[2].shape)
latent_vector = tf.random.normal(shape=(1, 256), mean=0.0, stddev=2.0)
# Generate fake images
# fake_image = tf.squeeze(model.predict([latent_vector, final_img_list[2]]), axis=0)
fake_image = model.predict([latent_vector, final_img_list[2]])
#real_images = final_img_list
fake = Image.fromarray((fake_image[0]+1)/2)
# return tf.squeeze(real_images[1], axis=0), fake_image
return fake
# input
input = [gr.inputs.Image(type="filepath", label="Ground Truth - Real Image (jpg)"),
gr.inputs.Image(type="filepath", label="Segementated image (png)"),
gr.inputs.Image(type="filepath", label="corresponding bitmap image (bmp)")]
#facades_data = []
#data_dir = 'examples/'
#for idx, images in enumerate(os.listdir(data_dir)):
# image = os.path.join(data_dir, images)
# if os.path.isfile(image) and idx < 6:
# facades_data.append(image)
examples = [["facades_data/cmp_b0010.jpg", "facades_data/cmp_b0010.png", "facades_data/cmp_b0010.bmp"],
["facades_data/cmp_b0020.jpg", "facades_data/cmp_b0020.png", "facades_data/cmp_b0020.bmp"],
["facades_data/cmp_b0030.jpg", "facades_data/cmp_b0030.png", "facades_data/cmp_b0030.bmp"],
["facades_data/cmp_b0040.jpg", "facades_data/cmp_b0040.png", "facades_data/cmp_b0040.bmp"],
["facades_data/cmp_b0050.jpg", "facades_data/cmp_b0050.png", "facades_data/cmp_b0050.bmp"]]
# output
output = [gr.outputs.Image(type="pil", label="Generated - Conditioned Images")]
title = "GauGAN For Conditional Image Generation"
description = "Upload an Image or take one from examples to generate realistic images that are conditioned on cue images and segmentation maps"
gr.Interface(fn=predict, inputs = input, outputs = output, examples=examples, allow_flagging=False, analytics_enabled=False,
title=title, description=description, article="<center>Space By: <u><a href='https://github.com/robotjellyzone'><b>Kavya Bisht</b></a></u> \n Based on <a href='https://keras.io/examples/generative/gaugan/'><b>this notebook</b></a></center>").launch(enable_queue=True, debug=True)