File size: 1,577 Bytes
8726b78
 
 
 
 
 
 
 
 
 
 
c0231df
8726b78
 
 
 
 
 
9a53bc3
8726b78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
from diffusers import DiffusionPipeline
import torch
from controlnet_aux import OpenposeDetector
from diffusers import UniPCMultistepScheduler
from PIL import Image
import requests
from io import BytesIO
import gradio as gr

model = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-openpose")
model_id = "Jccqqqqq/Personajes"
pipe = StableDiffusionControlNetPipeline.from_pretrained(
    model_id,
    controlnet=controlnet,
    torch_dtype=torch.float16,
)
pipe.to("cpu")
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
generators = []
def generate(prompt, inputimage, seed, steps, negative):
  generator = torch.Generator(device="cpu").manual_seed(seed)
  response = requests.get(inputimage)
  img = Image.open(BytesIO(response.content))
  img = model(img)
  img.save("/content/test.png")
  image = pipe(prompt,img,negative_prompt=negative,generator=generator,num_inference_steps=steps).images[0]
  return image
demo = gr.Interface(fn=generate, inputs=[gr.Textbox(placeholder="Prompt"),gr.Textbox(placeholder="Image"), gr.Number(precision=0, label="seed"), gr.Number(precision=0, label="steps", value=20),gr.Textbox(placeholder="Negative Prompt",value="monochrome, lowres, bad anatomy, worst quality, low quality")], outputs=gr.Image(type="pil"), title="test")

if __name__ == "__main__":
    demo.queue()
    demo.launch(inline=False, debug=True)