ysharma's picture
ysharma HF Staff
create app.py
ee3b2de
raw
history blame
2.01 kB
import PIL
import requests
import torch
import gradio as gr
import random
from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
model_id = "timbrooks/instruct-pix2pix"
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="fp16", safety_checker=None)
pipe.to("cuda")
pipe.enable_attention_slicing()
#image = PIL.Image.open("./example.png")
#image = PIL.ImageOps.exif_transpose(image)
#image = image.convert("RGB")
#image
#prompt = "turn him into cyborg"
#pipe(prompt, image=image, num_inference_steps=20, image_guidance_scale=1).images[0]
counter = 0
def chat(image_in, message, history): #, progress=gr.Progress(track_tqdm=True)):
#progress(0, desc="Starting...")
global counter
counter += 1
#if message == "revert": --to add revert functionality later
if counter > 1:
# Open the image
image_in = Image.open("./edited_image.png")
prompt = message #eg - "turn him into cyborg"
edited_image = pipe(prompt, image=image_in, num_inference_steps=20, image_guidance_scale=1).images[0]
edited_image.save("edited_image.png") #("./edited_image.png")
history = history or []
add_text_list = ["There you go", "Enjoy your image!", "Nice work! Wonder what you gonna do next!", "Way to go!", "Does this work for you?", "Something like this?"]
#Resizing the image for better display
response = random.choice(add_text_list) + '<img src="/file=edited_image.png" style="width: 200px; height: 200px;">'
history.append((message, response))
return history, history
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
image_in = gr.Image(type='pil', label="Original Image")
text_in = gr.Textbox()
state_in = gr.State()
b1 = gr.Button('Edit the image!')
chatbot = gr.Chatbot()
b1.click(chat,[image_in, text_in, state_in], [chatbot, state_in])
#demo.queue()
demo.launch(debug=True, width="80%", height=1500)