Andre commited on
Commit
0fe0f24
·
1 Parent(s): f601bfb
Files changed (3) hide show
  1. colab.ipynb +17 -13
  2. img_gen_logic copy.py +65 -0
  3. img_gen_logic.py +27 -61
colab.ipynb CHANGED
@@ -15,7 +15,6 @@
15
  "from img_gen_logic import generate_image\n",
16
  "from config_colab import models, prompts, api_token\n",
17
  "\n",
18
- "\n",
19
  "# Initialize the InferenceClient with the default model\n",
20
  "client = InferenceClient(models[0][\"name\"], token=api_token)\n",
21
  "\n",
@@ -110,6 +109,8 @@
110
  "def on_generate_button_clicked(b):\n",
111
  " with output:\n",
112
  " clear_output(wait=True) # Clear previous output\n",
 
 
113
  " selected_prompt = prompt_dropdown.value\n",
114
  " selected_team = team_dropdown.value\n",
115
  " selected_model = model_dropdown.value\n",
@@ -121,17 +122,6 @@
121
  " custom_prompt = custom_prompt_input.value\n",
122
  "\n",
123
  " # Debug: Show selected parameters\n",
124
- " print(f\"Selected Model: {model_dropdown.label}\")\n",
125
- " print(f\"Selected Prompt: {prompt_dropdown.label}\")\n",
126
- " print(f\"Selected Team: {selected_team}\")\n",
127
- " print(f\"Height: {height}\")\n",
128
- " print(f\"Width: {width}\")\n",
129
- " print(f\"Inference Steps: {num_inference_steps}\")\n",
130
- " print(f\"Guidance Scale: {guidance_scale}\")\n",
131
- " print(f\"Seed: {seed}\")\n",
132
- " print(f\"Custom Prompt: {custom_prompt}\")\n",
133
- "\n",
134
- " # Debug: Show selected parameters\n",
135
  " print(\"=== Debug: Selected Parameters ===\")\n",
136
  " print(f\"Selected Model: {selected_model} (Alias: {model_dropdown.label})\")\n",
137
  " print(f\"Selected Prompt: {selected_prompt} (Alias: {prompt_dropdown.label})\")\n",
@@ -143,18 +133,32 @@
143
  " print(f\"Seed: {seed}\")\n",
144
  " print(f\"Custom Prompt: {custom_prompt}\")\n",
145
  " print(\"==================================\")\n",
 
146
  " # Generate the image\n",
 
147
  " image, message = generate_image(selected_prompt, selected_team, selected_model, height, width, num_inference_steps, guidance_scale, seed, custom_prompt)\n",
148
  "\n",
 
 
 
 
 
 
149
  " if isinstance(image, str):\n",
 
150
  " print(image)\n",
151
  " else:\n",
152
  " # Debug: Indicate that the image is being displayed and saved\n",
 
153
  " print(\"Image generated successfully!\")\n",
154
  " print(\"Displaying image...\")\n",
155
  "\n",
156
  " # Display the image in the notebook\n",
157
- " display(image)\n",
 
 
 
 
158
  "\n",
159
  "# Attach the button click event handler\n",
160
  "generate_button.on_click(on_generate_button_clicked)\n",
 
15
  "from img_gen_logic import generate_image\n",
16
  "from config_colab import models, prompts, api_token\n",
17
  "\n",
 
18
  "# Initialize the InferenceClient with the default model\n",
19
  "client = InferenceClient(models[0][\"name\"], token=api_token)\n",
20
  "\n",
 
109
  "def on_generate_button_clicked(b):\n",
110
  " with output:\n",
111
  " clear_output(wait=True) # Clear previous output\n",
112
+ "\n",
113
+ " # Get selected values from widgets\n",
114
  " selected_prompt = prompt_dropdown.value\n",
115
  " selected_team = team_dropdown.value\n",
116
  " selected_model = model_dropdown.value\n",
 
122
  " custom_prompt = custom_prompt_input.value\n",
123
  "\n",
124
  " # Debug: Show selected parameters\n",
 
 
 
 
 
 
 
 
 
 
 
125
  " print(\"=== Debug: Selected Parameters ===\")\n",
126
  " print(f\"Selected Model: {selected_model} (Alias: {model_dropdown.label})\")\n",
127
  " print(f\"Selected Prompt: {selected_prompt} (Alias: {prompt_dropdown.label})\")\n",
 
133
  " print(f\"Seed: {seed}\")\n",
134
  " print(f\"Custom Prompt: {custom_prompt}\")\n",
135
  " print(\"==================================\")\n",
136
+ "\n",
137
  " # Generate the image\n",
138
+ " print(\"=== Debug: Calling generate_image ===\")\n",
139
  " image, message = generate_image(selected_prompt, selected_team, selected_model, height, width, num_inference_steps, guidance_scale, seed, custom_prompt)\n",
140
  "\n",
141
+ " # Debug: Check the output of generate_image\n",
142
+ " print(\"=== Debug: generate_image Output ===\")\n",
143
+ " print(f\"Image: {image}\")\n",
144
+ " print(f\"Message: {message}\")\n",
145
+ " print(\"====================================\")\n",
146
+ "\n",
147
  " if isinstance(image, str):\n",
148
+ " print(\"=== Debug: Error ===\")\n",
149
  " print(image)\n",
150
  " else:\n",
151
  " # Debug: Indicate that the image is being displayed and saved\n",
152
+ " print(\"=== Debug: Image Generation ===\")\n",
153
  " print(\"Image generated successfully!\")\n",
154
  " print(\"Displaying image...\")\n",
155
  "\n",
156
  " # Display the image in the notebook\n",
157
+ " if image is not None:\n",
158
+ " display(image)\n",
159
+ " else:\n",
160
+ " print(\"=== Debug: Error ===\")\n",
161
+ " print(\"No image was returned by generate_image.\")\n",
162
  "\n",
163
  "# Attach the button click event handler\n",
164
  "generate_button.on_click(on_generate_button_clicked)\n",
img_gen_logic copy.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # img_gen_logic.py
2
+ import random
3
+ from huggingface_hub import InferenceClient
4
+ from PIL import Image
5
+ from datetime import datetime
6
+ from config import api_token, models, prompts # Direct import
7
+
8
+
9
+ def generate_image(prompt_alias, team, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1):
10
+ # Debugging: Check if the token is available
11
+ if not api_token:
12
+ return None, "ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it as an environment variable."
13
+
14
+ # Find the selected prompt and model
15
+ try:
16
+ prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"]
17
+ model_name = next(m for m in models if m["alias"] == model_alias)["name"]
18
+ except StopIteration:
19
+ return None, "ERROR: Invalid prompt or model selected."
20
+
21
+ # Determine the enemy color
22
+ enemy_color = "blue" if team.lower() == "red" else "red"
23
+ prompt = prompt.format(enemy_color=enemy_color)
24
+
25
+ if team.lower() == "red":
26
+ prompt += " The winning army is dressed in red armor and banners."
27
+ elif team.lower() == "blue":
28
+ prompt += " The winning army is dressed in blue armor and banners."
29
+
30
+ # Append the custom prompt (if provided)
31
+ if custom_prompt and len(custom_prompt.strip()) > 0:
32
+ prompt += " " + custom_prompt.strip()
33
+
34
+ # Randomize the seed if needed
35
+ if seed == -1:
36
+ seed = random.randint(0, 1000000)
37
+
38
+ # Initialize the InferenceClient
39
+ try:
40
+ client = InferenceClient(model_name, token=api_token)
41
+ except Exception as e:
42
+ return None, f"ERROR: Failed to initialize InferenceClient. Details: {e}"
43
+
44
+ # Generate the image
45
+ try:
46
+ image = client.text_to_image(
47
+ prompt,
48
+ guidance_scale=guidance_scale,
49
+ num_inference_steps=num_inference_steps,
50
+ width=width,
51
+ height=height,
52
+ seed=seed
53
+ )
54
+ except Exception as e:
55
+ return None, f"ERROR: Failed to generate image. Details: {e}"
56
+
57
+ # Save the image with a timestamped filename
58
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
59
+ output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team.lower()}.png"
60
+ try:
61
+ image.save(output_filename)
62
+ except Exception as e:
63
+ return None, f"ERROR: Failed to save image. Details: {e}"
64
+
65
+ return output_filename, "Image generated successfully!"
img_gen_logic.py CHANGED
@@ -1,65 +1,31 @@
1
  # img_gen_logic.py
2
- import random
3
- from huggingface_hub import InferenceClient
4
  from PIL import Image
5
- from datetime import datetime
6
- from config import api_token, models, prompts # Direct import
7
-
8
-
9
- def generate_image(prompt_alias, team, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1):
10
- # Debugging: Check if the token is available
11
- if not api_token:
12
- return None, "ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it as an environment variable."
13
-
14
- # Find the selected prompt and model
15
- try:
16
- prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"]
17
- model_name = next(m for m in models if m["alias"] == model_alias)["name"]
18
- except StopIteration:
19
- return None, "ERROR: Invalid prompt or model selected."
20
-
21
- # Determine the enemy color
22
- enemy_color = "blue" if team.lower() == "red" else "red"
23
- prompt = prompt.format(enemy_color=enemy_color)
24
-
25
- if team.lower() == "red":
26
- prompt += " The winning army is dressed in red armor and banners."
27
- elif team.lower() == "blue":
28
- prompt += " The winning army is dressed in blue armor and banners."
29
-
30
- # Append the custom prompt (if provided)
31
- if custom_prompt and len(custom_prompt.strip()) > 0:
32
- prompt += " " + custom_prompt.strip()
33
-
34
- # Randomize the seed if needed
35
- if seed == -1:
36
- seed = random.randint(0, 1000000)
37
-
38
- # Initialize the InferenceClient
39
- try:
40
- client = InferenceClient(model_name, token=api_token)
41
- except Exception as e:
42
- return None, f"ERROR: Failed to initialize InferenceClient. Details: {e}"
43
-
44
- # Generate the image
45
  try:
46
- image = client.text_to_image(
47
- prompt,
48
- guidance_scale=guidance_scale,
49
- num_inference_steps=num_inference_steps,
50
- width=width,
51
- height=height,
52
- seed=seed
53
- )
 
54
  except Exception as e:
55
- return None, f"ERROR: Failed to generate image. Details: {e}"
56
-
57
- # Save the image with a timestamped filename
58
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
59
- output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team.lower()}.png"
60
- try:
61
- image.save(output_filename)
62
- except Exception as e:
63
- return None, f"ERROR: Failed to save image. Details: {e}"
64
-
65
- return output_filename, "Image generated successfully!"
 
1
  # img_gen_logic.py
 
 
2
  from PIL import Image
3
+ import numpy as np
4
+
5
+ def generate_image(prompt, team, model, height, width, num_inference_steps, guidance_scale, seed, custom_prompt):
6
+ print("=== Debug: Inside generate_image ===")
7
+ print(f"Prompt: {prompt}")
8
+ print(f"Team: {team}")
9
+ print(f"Model: {model}")
10
+ print(f"Height: {height}")
11
+ print(f"Width: {width}")
12
+ print(f"Inference Steps: {num_inference_steps}")
13
+ print(f"Guidance Scale: {guidance_scale}")
14
+ print(f"Seed: {seed}")
15
+ print(f"Custom Prompt: {custom_prompt}")
16
+
17
+ # Simulate API call or image generation logic
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  try:
19
+ # Replace this with your actual image generation logic
20
+ print("=== Debug: Simulating API Call ===")
21
+ # Example: Return a placeholder image or error message
22
+ if not prompt:
23
+ return "Error: Prompt is required.", None
24
+ else:
25
+ # Simulate a successful image generation
26
+ image = Image.fromarray(np.random.randint(0, 255, (height, width, 3), dtype=np.uint8))
27
+ return image, "Image generated successfully."
28
  except Exception as e:
29
+ print(f"=== Debug: Error in generate_image ===")
30
+ print(str(e))
31
+ return str(e), None