andresampa commited on
Commit
6886623
·
verified ·
1 Parent(s): 0b3232a

fix bugs on generate function

Browse files
Files changed (1) hide show
  1. app.py +20 -17
app.py CHANGED
@@ -5,10 +5,12 @@ from PIL import Image
5
  import gradio as gr
6
  from datetime import datetime
7
 
 
 
 
8
  # Debugging: Check if the Hugging Face token is available
9
- api_token = os.getenv("HF_CTB_TOKEN")
10
  if not api_token:
11
- print("ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it as an environment variable.")
12
  else:
13
  print("Hugging Face token loaded successfully.")
14
 
@@ -67,11 +69,14 @@ prompts = [
67
  def generate_image(prompt_alias, team, model_alias, height, width, num_inference_steps, guidance_scale, seed):
68
  # Debugging: Check if the token is available
69
  if not api_token:
70
- return None, "ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it as an environment variable."
71
 
72
  # Find the selected prompt and model
73
- prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"]
74
- model_name = next(m for m in models if m["alias"] == model_alias)["name"]
 
 
 
75
 
76
  # Determine the enemy color
77
  enemy_color = "blue" if team.lower() == "red" else "red"
@@ -87,7 +92,10 @@ def generate_image(prompt_alias, team, model_alias, height, width, num_inference
87
  seed = random.randint(0, 1000000)
88
 
89
  # Initialize the InferenceClient
90
- client = InferenceClient(model_name, token=api_token)
 
 
 
91
 
92
  # Generate the image
93
  try:
@@ -105,7 +113,10 @@ def generate_image(prompt_alias, team, model_alias, height, width, num_inference
105
  # Save the image with a timestamped filename
106
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
107
  output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team.lower()}.png"
108
- image.save(output_filename)
 
 
 
109
 
110
  return output_filename, "Image generated successfully!"
111
 
@@ -132,19 +143,11 @@ with gr.Blocks() as demo:
132
  # Function to handle button click
133
  def generate(prompt_alias, team, model_alias, height, width, num_inference_steps, guidance_scale, seed):
134
  try:
135
- # Update status to indicate rendering
136
- status_text.update("Rendering image... Please wait.")
137
- yield None, "Rendering image... Please wait." # Yield to update the UI
138
-
139
  # Generate the image
140
  image_path, message = generate_image(prompt_alias, team, model_alias, height, width, num_inference_steps, guidance_scale, seed)
141
-
142
- # Update status and return the image
143
- status_text.update(message)
144
- yield image_path, message
145
  except Exception as e:
146
- status_text.update(f"An error occurred: {e}")
147
- yield None, f"An error occurred: {e}"
148
 
149
  # Connect the button to the function
150
  generate_button.click(
 
5
  import gradio as gr
6
  from datetime import datetime
7
 
8
+ # Retrieve the Hugging Face token from environment variables
9
+ api_token = os.getenv("HF_TOKEN")
10
+
11
  # Debugging: Check if the Hugging Face token is available
 
12
  if not api_token:
13
+ print("ERROR: Hugging Face token (HF_TOKEN) is missing. Please set it as an environment variable.")
14
  else:
15
  print("Hugging Face token loaded successfully.")
16
 
 
69
  def generate_image(prompt_alias, team, model_alias, height, width, num_inference_steps, guidance_scale, seed):
70
  # Debugging: Check if the token is available
71
  if not api_token:
72
+ return None, "ERROR: Hugging Face token (HF_TOKEN) is missing. Please set it as an environment variable."
73
 
74
  # Find the selected prompt and model
75
+ try:
76
+ prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"]
77
+ model_name = next(m for m in models if m["alias"] == model_alias)["name"]
78
+ except StopIteration:
79
+ return None, "ERROR: Invalid prompt or model selected."
80
 
81
  # Determine the enemy color
82
  enemy_color = "blue" if team.lower() == "red" else "red"
 
92
  seed = random.randint(0, 1000000)
93
 
94
  # Initialize the InferenceClient
95
+ try:
96
+ client = InferenceClient(model_name, token=api_token)
97
+ except Exception as e:
98
+ return None, f"ERROR: Failed to initialize InferenceClient. Details: {e}"
99
 
100
  # Generate the image
101
  try:
 
113
  # Save the image with a timestamped filename
114
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
115
  output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team.lower()}.png"
116
+ try:
117
+ image.save(output_filename)
118
+ except Exception as e:
119
+ return None, f"ERROR: Failed to save image. Details: {e}"
120
 
121
  return output_filename, "Image generated successfully!"
122
 
 
143
  # Function to handle button click
144
  def generate(prompt_alias, team, model_alias, height, width, num_inference_steps, guidance_scale, seed):
145
  try:
 
 
 
 
146
  # Generate the image
147
  image_path, message = generate_image(prompt_alias, team, model_alias, height, width, num_inference_steps, guidance_scale, seed)
148
+ return image_path, message
 
 
 
149
  except Exception as e:
150
+ return None, f"An error occurred: {e}"
 
151
 
152
  # Connect the button to the function
153
  generate_button.click(