File size: 1,489 Bytes
cea8877
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import gradio as gr
from diffusers import StableDiffusionPipeline
import torch
import huggingface_hub as hf
import os

my_file = open("./style_name.txt", "r")
  
# reading the file
data = my_file.read()
  
# replacing end splitting the text 
# when newline ('\n') is seen.
data_into_list = data.split("\n")[:-1]

my_file.close()

hf.login(token=os.environ['model_token'])
#remember to login with token before loading model
def text_to_hair(prompt, guidance_scale=8, num_inference_steps=30, styles=data_into_list, model_path ="DiningSystem/hair-model2"):
    
    pipe = StableDiffusionPipeline.from_pretrained(os.environ['bmd'], torch_dtype=torch.float16, use_auth_token=True)
    pipe.unet.load_attn_procs(model_path)
    pipe.to("cuda")
    image = pipe(prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).images[0]
    #image.save(save_name) #comment if don't want to save image
    return image #PIL format

demo = gr.Interface(fn=text_to_hair, inputs=["text", gr.Slider(5, 20, value=8, label="Guidance_scale", info="Choose between 5 and 20 to improve image's content"), 
                                             gr.Slider(20, 500, value=20, label="Num_infer_steps", info="Choose between 20 and 500 to improve image's resolution"),
                                            gr.Dropdown(data_into_list, interactive=True, label="Some suggestion hairstyles", info="For your suggestion and reference!")],
                    outputs="image")
demo.launch()