Andre commited on
Commit
e1fd06b
·
1 Parent(s): 0fe0f24
Files changed (2) hide show
  1. img_gen_logic copy.py +0 -65
  2. img_gen_logic.py +61 -27
img_gen_logic copy.py DELETED
@@ -1,65 +0,0 @@
1
- # img_gen_logic.py
2
- import random
3
- from huggingface_hub import InferenceClient
4
- from PIL import Image
5
- from datetime import datetime
6
- from config import api_token, models, prompts # Direct import
7
-
8
-
9
- def generate_image(prompt_alias, team, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1):
10
- # Debugging: Check if the token is available
11
- if not api_token:
12
- return None, "ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it as an environment variable."
13
-
14
- # Find the selected prompt and model
15
- try:
16
- prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"]
17
- model_name = next(m for m in models if m["alias"] == model_alias)["name"]
18
- except StopIteration:
19
- return None, "ERROR: Invalid prompt or model selected."
20
-
21
- # Determine the enemy color
22
- enemy_color = "blue" if team.lower() == "red" else "red"
23
- prompt = prompt.format(enemy_color=enemy_color)
24
-
25
- if team.lower() == "red":
26
- prompt += " The winning army is dressed in red armor and banners."
27
- elif team.lower() == "blue":
28
- prompt += " The winning army is dressed in blue armor and banners."
29
-
30
- # Append the custom prompt (if provided)
31
- if custom_prompt and len(custom_prompt.strip()) > 0:
32
- prompt += " " + custom_prompt.strip()
33
-
34
- # Randomize the seed if needed
35
- if seed == -1:
36
- seed = random.randint(0, 1000000)
37
-
38
- # Initialize the InferenceClient
39
- try:
40
- client = InferenceClient(model_name, token=api_token)
41
- except Exception as e:
42
- return None, f"ERROR: Failed to initialize InferenceClient. Details: {e}"
43
-
44
- # Generate the image
45
- try:
46
- image = client.text_to_image(
47
- prompt,
48
- guidance_scale=guidance_scale,
49
- num_inference_steps=num_inference_steps,
50
- width=width,
51
- height=height,
52
- seed=seed
53
- )
54
- except Exception as e:
55
- return None, f"ERROR: Failed to generate image. Details: {e}"
56
-
57
- # Save the image with a timestamped filename
58
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
59
- output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team.lower()}.png"
60
- try:
61
- image.save(output_filename)
62
- except Exception as e:
63
- return None, f"ERROR: Failed to save image. Details: {e}"
64
-
65
- return output_filename, "Image generated successfully!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
img_gen_logic.py CHANGED
@@ -1,31 +1,65 @@
1
  # img_gen_logic.py
 
 
2
  from PIL import Image
3
- import numpy as np
4
-
5
- def generate_image(prompt, team, model, height, width, num_inference_steps, guidance_scale, seed, custom_prompt):
6
- print("=== Debug: Inside generate_image ===")
7
- print(f"Prompt: {prompt}")
8
- print(f"Team: {team}")
9
- print(f"Model: {model}")
10
- print(f"Height: {height}")
11
- print(f"Width: {width}")
12
- print(f"Inference Steps: {num_inference_steps}")
13
- print(f"Guidance Scale: {guidance_scale}")
14
- print(f"Seed: {seed}")
15
- print(f"Custom Prompt: {custom_prompt}")
16
-
17
- # Simulate API call or image generation logic
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  try:
19
- # Replace this with your actual image generation logic
20
- print("=== Debug: Simulating API Call ===")
21
- # Example: Return a placeholder image or error message
22
- if not prompt:
23
- return "Error: Prompt is required.", None
24
- else:
25
- # Simulate a successful image generation
26
- image = Image.fromarray(np.random.randint(0, 255, (height, width, 3), dtype=np.uint8))
27
- return image, "Image generated successfully."
28
  except Exception as e:
29
- print(f"=== Debug: Error in generate_image ===")
30
- print(str(e))
31
- return str(e), None
 
 
 
 
 
 
 
 
 
1
  # img_gen_logic.py
2
+ import random
3
+ from huggingface_hub import InferenceClient
4
  from PIL import Image
5
+ from datetime import datetime
6
+ from config import api_token, models, prompts # Direct import
7
+
8
+
9
+ def generate_image(prompt_alias, team, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1):
10
+ # Debugging: Check if the token is available
11
+ if not api_token:
12
+ return None, "ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it as an environment variable."
13
+
14
+ # Find the selected prompt and model
15
+ try:
16
+ prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"]
17
+ model_name = next(m for m in models if m["alias"] == model_alias)["name"]
18
+ except StopIteration:
19
+ return None, "ERROR: Invalid prompt or model selected."
20
+
21
+ # Determine the enemy color
22
+ enemy_color = "blue" if team.lower() == "red" else "red"
23
+ prompt = prompt.format(enemy_color=enemy_color)
24
+
25
+ if team.lower() == "red":
26
+ prompt += " The winning army is dressed in red armor and banners."
27
+ elif team.lower() == "blue":
28
+ prompt += " The winning army is dressed in blue armor and banners."
29
+
30
+ # Append the custom prompt (if provided)
31
+ if custom_prompt and len(custom_prompt.strip()) > 0:
32
+ prompt += " " + custom_prompt.strip()
33
+
34
+ # Randomize the seed if needed
35
+ if seed == -1:
36
+ seed = random.randint(0, 1000000)
37
+
38
+ # Initialize the InferenceClient
39
+ try:
40
+ client = InferenceClient(model_name, token=api_token)
41
+ except Exception as e:
42
+ return None, f"ERROR: Failed to initialize InferenceClient. Details: {e}"
43
+
44
+ # Generate the image
45
  try:
46
+ image = client.text_to_image(
47
+ prompt,
48
+ guidance_scale=guidance_scale,
49
+ num_inference_steps=num_inference_steps,
50
+ width=width,
51
+ height=height,
52
+ seed=seed
53
+ )
 
54
  except Exception as e:
55
+ return None, f"ERROR: Failed to generate image. Details: {e}"
56
+
57
+ # Save the image with a timestamped filename
58
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
59
+ output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team.lower()}.png"
60
+ try:
61
+ image.save(output_filename)
62
+ except Exception as e:
63
+ return None, f"ERROR: Failed to save image. Details: {e}"
64
+
65
+ return output_filename, "Image generated successfully!"