Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,8 +5,12 @@ from PIL import Image
|
|
5 |
import gradio as gr
|
6 |
from datetime import datetime
|
7 |
|
8 |
-
#
|
9 |
-
api_token = os.getenv("
|
|
|
|
|
|
|
|
|
10 |
|
11 |
# List of models with aliases
|
12 |
models = [
|
@@ -61,6 +65,10 @@ prompts = [
|
|
61 |
|
62 |
# Function to generate images
|
63 |
def generate_image(prompt_alias, team, model_alias, height, width, num_inference_steps, guidance_scale, seed):
|
|
|
|
|
|
|
|
|
64 |
# Find the selected prompt and model
|
65 |
prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"]
|
66 |
model_name = next(m for m in models if m["alias"] == model_alias)["name"]
|
@@ -82,21 +90,24 @@ def generate_image(prompt_alias, team, model_alias, height, width, num_inference
|
|
82 |
client = InferenceClient(model_name, token=api_token)
|
83 |
|
84 |
# Generate the image
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
|
|
|
|
|
|
93 |
|
94 |
# Save the image with a timestamped filename
|
95 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
96 |
output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team.lower()}.png"
|
97 |
image.save(output_filename)
|
98 |
|
99 |
-
return output_filename
|
100 |
|
101 |
# Gradio Interface
|
102 |
with gr.Blocks() as demo:
|
@@ -115,20 +126,31 @@ with gr.Blocks() as demo:
|
|
115 |
generate_button = gr.Button("Generate Image")
|
116 |
with gr.Row():
|
117 |
output_image = gr.Image(label="Generated Image")
|
|
|
|
|
118 |
|
119 |
# Function to handle button click
|
120 |
def generate(prompt_alias, team, model_alias, height, width, num_inference_steps, guidance_scale, seed):
|
121 |
try:
|
122 |
-
|
123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
except Exception as e:
|
125 |
-
|
|
|
126 |
|
127 |
# Connect the button to the function
|
128 |
generate_button.click(
|
129 |
generate,
|
130 |
inputs=[prompt_dropdown, team_dropdown, model_dropdown, height_input, width_input, num_inference_steps_input, guidance_scale_input, seed_input],
|
131 |
-
outputs=output_image
|
132 |
)
|
133 |
|
134 |
# Launch the Gradio app
|
|
|
5 |
import gradio as gr
|
6 |
from datetime import datetime
|
7 |
|
8 |
+
# Debugging: Check if the Hugging Face token is available
|
9 |
+
api_token = os.getenv("HF_TOKEN")
|
10 |
+
if not api_token:
|
11 |
+
print("ERROR: Hugging Face token (HF_TOKEN) is missing. Please set it as an environment variable.")
|
12 |
+
else:
|
13 |
+
print("Hugging Face token loaded successfully.")
|
14 |
|
15 |
# List of models with aliases
|
16 |
models = [
|
|
|
65 |
|
66 |
# Function to generate images
|
67 |
def generate_image(prompt_alias, team, model_alias, height, width, num_inference_steps, guidance_scale, seed):
|
68 |
+
# Debugging: Check if the token is available
|
69 |
+
if not api_token:
|
70 |
+
return None, "ERROR: Hugging Face token (HF_TOKEN) is missing. Please set it as an environment variable."
|
71 |
+
|
72 |
# Find the selected prompt and model
|
73 |
prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"]
|
74 |
model_name = next(m for m in models if m["alias"] == model_alias)["name"]
|
|
|
90 |
client = InferenceClient(model_name, token=api_token)
|
91 |
|
92 |
# Generate the image
|
93 |
+
try:
|
94 |
+
image = client.text_to_image(
|
95 |
+
prompt,
|
96 |
+
guidance_scale=guidance_scale,
|
97 |
+
num_inference_steps=num_inference_steps,
|
98 |
+
width=width,
|
99 |
+
height=height,
|
100 |
+
seed=seed
|
101 |
+
)
|
102 |
+
except Exception as e:
|
103 |
+
return None, f"ERROR: Failed to generate image. Details: {e}"
|
104 |
|
105 |
# Save the image with a timestamped filename
|
106 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
107 |
output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team.lower()}.png"
|
108 |
image.save(output_filename)
|
109 |
|
110 |
+
return output_filename, "Image generated successfully!"
|
111 |
|
112 |
# Gradio Interface
|
113 |
with gr.Blocks() as demo:
|
|
|
126 |
generate_button = gr.Button("Generate Image")
|
127 |
with gr.Row():
|
128 |
output_image = gr.Image(label="Generated Image")
|
129 |
+
with gr.Row():
|
130 |
+
status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False)
|
131 |
|
132 |
# Function to handle button click
|
133 |
def generate(prompt_alias, team, model_alias, height, width, num_inference_steps, guidance_scale, seed):
|
134 |
try:
|
135 |
+
# Update status to indicate rendering
|
136 |
+
status_text.update("Rendering image... Please wait.")
|
137 |
+
yield None, "Rendering image... Please wait." # Yield to update the UI
|
138 |
+
|
139 |
+
# Generate the image
|
140 |
+
image_path, message = generate_image(prompt_alias, team, model_alias, height, width, num_inference_steps, guidance_scale, seed)
|
141 |
+
|
142 |
+
# Update status and return the image
|
143 |
+
status_text.update(message)
|
144 |
+
yield image_path, message
|
145 |
except Exception as e:
|
146 |
+
status_text.update(f"An error occurred: {e}")
|
147 |
+
yield None, f"An error occurred: {e}"
|
148 |
|
149 |
# Connect the button to the function
|
150 |
generate_button.click(
|
151 |
generate,
|
152 |
inputs=[prompt_dropdown, team_dropdown, model_dropdown, height_input, width_input, num_inference_steps_input, guidance_scale_input, seed_input],
|
153 |
+
outputs=[output_image, status_text]
|
154 |
)
|
155 |
|
156 |
# Launch the Gradio app
|