Spaces:
Sleeping
Sleeping
Andre
commited on
Commit
·
e1fd06b
1
Parent(s):
0fe0f24
Update
Browse files- img_gen_logic copy.py +0 -65
- img_gen_logic.py +61 -27
img_gen_logic copy.py
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
# img_gen_logic.py
|
2 |
-
import random
|
3 |
-
from huggingface_hub import InferenceClient
|
4 |
-
from PIL import Image
|
5 |
-
from datetime import datetime
|
6 |
-
from config import api_token, models, prompts # Direct import
|
7 |
-
|
8 |
-
|
9 |
-
def generate_image(prompt_alias, team, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1):
|
10 |
-
# Debugging: Check if the token is available
|
11 |
-
if not api_token:
|
12 |
-
return None, "ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it as an environment variable."
|
13 |
-
|
14 |
-
# Find the selected prompt and model
|
15 |
-
try:
|
16 |
-
prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"]
|
17 |
-
model_name = next(m for m in models if m["alias"] == model_alias)["name"]
|
18 |
-
except StopIteration:
|
19 |
-
return None, "ERROR: Invalid prompt or model selected."
|
20 |
-
|
21 |
-
# Determine the enemy color
|
22 |
-
enemy_color = "blue" if team.lower() == "red" else "red"
|
23 |
-
prompt = prompt.format(enemy_color=enemy_color)
|
24 |
-
|
25 |
-
if team.lower() == "red":
|
26 |
-
prompt += " The winning army is dressed in red armor and banners."
|
27 |
-
elif team.lower() == "blue":
|
28 |
-
prompt += " The winning army is dressed in blue armor and banners."
|
29 |
-
|
30 |
-
# Append the custom prompt (if provided)
|
31 |
-
if custom_prompt and len(custom_prompt.strip()) > 0:
|
32 |
-
prompt += " " + custom_prompt.strip()
|
33 |
-
|
34 |
-
# Randomize the seed if needed
|
35 |
-
if seed == -1:
|
36 |
-
seed = random.randint(0, 1000000)
|
37 |
-
|
38 |
-
# Initialize the InferenceClient
|
39 |
-
try:
|
40 |
-
client = InferenceClient(model_name, token=api_token)
|
41 |
-
except Exception as e:
|
42 |
-
return None, f"ERROR: Failed to initialize InferenceClient. Details: {e}"
|
43 |
-
|
44 |
-
# Generate the image
|
45 |
-
try:
|
46 |
-
image = client.text_to_image(
|
47 |
-
prompt,
|
48 |
-
guidance_scale=guidance_scale,
|
49 |
-
num_inference_steps=num_inference_steps,
|
50 |
-
width=width,
|
51 |
-
height=height,
|
52 |
-
seed=seed
|
53 |
-
)
|
54 |
-
except Exception as e:
|
55 |
-
return None, f"ERROR: Failed to generate image. Details: {e}"
|
56 |
-
|
57 |
-
# Save the image with a timestamped filename
|
58 |
-
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
59 |
-
output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team.lower()}.png"
|
60 |
-
try:
|
61 |
-
image.save(output_filename)
|
62 |
-
except Exception as e:
|
63 |
-
return None, f"ERROR: Failed to save image. Details: {e}"
|
64 |
-
|
65 |
-
return output_filename, "Image generated successfully!"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
img_gen_logic.py
CHANGED
@@ -1,31 +1,65 @@
|
|
1 |
# img_gen_logic.py
|
|
|
|
|
2 |
from PIL import Image
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
try:
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
return image, "Image generated successfully."
|
28 |
except Exception as e:
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# img_gen_logic.py
|
2 |
+
import random
|
3 |
+
from huggingface_hub import InferenceClient
|
4 |
from PIL import Image
|
5 |
+
from datetime import datetime
|
6 |
+
from config import api_token, models, prompts # Direct import
|
7 |
+
|
8 |
+
|
9 |
+
def generate_image(prompt_alias, team, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1):
|
10 |
+
# Debugging: Check if the token is available
|
11 |
+
if not api_token:
|
12 |
+
return None, "ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it as an environment variable."
|
13 |
+
|
14 |
+
# Find the selected prompt and model
|
15 |
+
try:
|
16 |
+
prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"]
|
17 |
+
model_name = next(m for m in models if m["alias"] == model_alias)["name"]
|
18 |
+
except StopIteration:
|
19 |
+
return None, "ERROR: Invalid prompt or model selected."
|
20 |
+
|
21 |
+
# Determine the enemy color
|
22 |
+
enemy_color = "blue" if team.lower() == "red" else "red"
|
23 |
+
prompt = prompt.format(enemy_color=enemy_color)
|
24 |
+
|
25 |
+
if team.lower() == "red":
|
26 |
+
prompt += " The winning army is dressed in red armor and banners."
|
27 |
+
elif team.lower() == "blue":
|
28 |
+
prompt += " The winning army is dressed in blue armor and banners."
|
29 |
+
|
30 |
+
# Append the custom prompt (if provided)
|
31 |
+
if custom_prompt and len(custom_prompt.strip()) > 0:
|
32 |
+
prompt += " " + custom_prompt.strip()
|
33 |
+
|
34 |
+
# Randomize the seed if needed
|
35 |
+
if seed == -1:
|
36 |
+
seed = random.randint(0, 1000000)
|
37 |
+
|
38 |
+
# Initialize the InferenceClient
|
39 |
+
try:
|
40 |
+
client = InferenceClient(model_name, token=api_token)
|
41 |
+
except Exception as e:
|
42 |
+
return None, f"ERROR: Failed to initialize InferenceClient. Details: {e}"
|
43 |
+
|
44 |
+
# Generate the image
|
45 |
try:
|
46 |
+
image = client.text_to_image(
|
47 |
+
prompt,
|
48 |
+
guidance_scale=guidance_scale,
|
49 |
+
num_inference_steps=num_inference_steps,
|
50 |
+
width=width,
|
51 |
+
height=height,
|
52 |
+
seed=seed
|
53 |
+
)
|
|
|
54 |
except Exception as e:
|
55 |
+
return None, f"ERROR: Failed to generate image. Details: {e}"
|
56 |
+
|
57 |
+
# Save the image with a timestamped filename
|
58 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
59 |
+
output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team.lower()}.png"
|
60 |
+
try:
|
61 |
+
image.save(output_filename)
|
62 |
+
except Exception as e:
|
63 |
+
return None, f"ERROR: Failed to save image. Details: {e}"
|
64 |
+
|
65 |
+
return output_filename, "Image generated successfully!"
|