Spaces:
Sleeping
Sleeping
Andre
commited on
Commit
·
e1d6915
1
Parent(s):
ad23bc7
“Update”
Browse files- __pycache__/example-hello-world.cpython-310.pyc +0 -0
- app.py +1 -1
- ctb-modal.py +24 -0
- example-hello-world.py +30 -0
- index.html +0 -0
- src/download_flux_modal.py +34 -0
- src/img_gen_modal.py +88 -0
__pycache__/example-hello-world.cpython-310.pyc
ADDED
Binary file (728 Bytes). View file
|
|
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
# app.py
|
2 |
-
#
|
3 |
from src.gradio_interface import demo
|
4 |
|
5 |
# Launch the Gradio app
|
|
|
1 |
# app.py
|
2 |
+
#IMPORT gradio_interface
|
3 |
from src.gradio_interface import demo
|
4 |
|
5 |
# Launch the Gradio app
|
ctb-modal.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# modal_app.py
|
2 |
+
import modal
|
3 |
+
#IMPORT gradio_interface
|
4 |
+
from src.gradio_interface import demo
|
5 |
+
|
6 |
+
# Create a Modal app
|
7 |
+
app = modal.App("ctb-image-generator")
|
8 |
+
|
9 |
+
# Define the Modal image
|
10 |
+
image = modal.Image.debian_slim().pip_install(
|
11 |
+
"diffusers",
|
12 |
+
"transformers",
|
13 |
+
"torch",
|
14 |
+
"accelerate",
|
15 |
+
"gradio"
|
16 |
+
)
|
17 |
+
|
18 |
+
@app.local_entrypoint()
|
19 |
+
def main():
|
20 |
+
with modal.enable_output():
|
21 |
+
demo.launch()
|
22 |
+
|
23 |
+
if __name__ == "__main__":
|
24 |
+
main()
|
example-hello-world.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
import modal
|
4 |
+
|
5 |
+
app = modal.App("example-hello-world")
|
6 |
+
|
7 |
+
@app.function()
|
8 |
+
def f(i):
|
9 |
+
if i % 2 == 0:
|
10 |
+
print("hello", i)
|
11 |
+
else:
|
12 |
+
print("world", i, file=sys.stderr)
|
13 |
+
|
14 |
+
return i * i
|
15 |
+
|
16 |
+
@app.local_entrypoint()
|
17 |
+
def main():
|
18 |
+
# run the function locally
|
19 |
+
print(f.local(1000))
|
20 |
+
|
21 |
+
# run the function remotely on Modal
|
22 |
+
print(f.remote(1000))
|
23 |
+
|
24 |
+
# run the function in parallel and remotely on Modal
|
25 |
+
total = 0
|
26 |
+
for ret in f.map(range(200)):
|
27 |
+
total += ret
|
28 |
+
|
29 |
+
print(total)
|
30 |
+
|
index.html
ADDED
File without changes
|
src/download_flux_modal.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import modal
|
2 |
+
from pathlib import Path
|
3 |
+
|
4 |
+
# Create or get existing volume
|
5 |
+
volume = modal.Volume.from_name("flux-model-vol", create_if_missing=True)
|
6 |
+
MODEL_DIR = Path("/models")
|
7 |
+
|
8 |
+
# Set up image with dependencies
|
9 |
+
download_image = (
|
10 |
+
modal.Image.debian_slim()
|
11 |
+
.pip_install("huggingface_hub[hf_transfer]")
|
12 |
+
.env({"HF_HUB_ENABLE_HF_TRANSFER": "1"}) # Enable fast Rust download client
|
13 |
+
)
|
14 |
+
|
15 |
+
# Create Modal app
|
16 |
+
app = modal.App("flux-model-setup")
|
17 |
+
|
18 |
+
@app.function(
|
19 |
+
volumes={MODEL_DIR: volume},
|
20 |
+
image=download_image,
|
21 |
+
)
|
22 |
+
def download_flux():
|
23 |
+
from huggingface_hub import snapshot_download
|
24 |
+
|
25 |
+
repo_id = "black-forest-labs/FLUX.1-dev"
|
26 |
+
snapshot_download(
|
27 |
+
repo_id=repo_id,
|
28 |
+
local_dir=MODEL_DIR / repo_id.split("/")[1]
|
29 |
+
)
|
30 |
+
print(f"FLUX model downloaded to {MODEL_DIR / repo_id.split('/')[1]}")
|
31 |
+
|
32 |
+
@app.local_entrypoint()
|
33 |
+
def main():
|
34 |
+
download_flux()
|
src/img_gen_modal.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#img_gen_modal.py
|
2 |
+
import modal
|
3 |
+
import sys
|
4 |
+
import os
|
5 |
+
import random
|
6 |
+
from datetime import datetime
|
7 |
+
import random
|
8 |
+
import io
|
9 |
+
|
10 |
+
|
11 |
+
@app.function(
|
12 |
+
image=image,
|
13 |
+
#gpu="T4",
|
14 |
+
timeout=600
|
15 |
+
)
|
16 |
+
def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640,
|
17 |
+
num_inference_steps=20, guidance_scale=2.0, seed=-1):
|
18 |
+
import torch
|
19 |
+
from diffusers import StableDiffusionPipeline
|
20 |
+
|
21 |
+
# Find the selected prompt and model
|
22 |
+
try:
|
23 |
+
prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"]
|
24 |
+
model_name = next(m for m in models if m["alias"] == model_alias)["name"]
|
25 |
+
except StopIteration:
|
26 |
+
return None, "ERROR: Invalid prompt or model selected."
|
27 |
+
|
28 |
+
# Determine the enemy color
|
29 |
+
enemy_color = "blue" if team_color.lower() == "red" else "red"
|
30 |
+
|
31 |
+
# Print the original prompt and dynamic values for debugging
|
32 |
+
print("Original Prompt:")
|
33 |
+
print(prompt)
|
34 |
+
print(f"Enemy Color: {enemy_color}")
|
35 |
+
print(f"Team Color: {team_color.lower()}")
|
36 |
+
|
37 |
+
# Format the prompt
|
38 |
+
prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color)
|
39 |
+
|
40 |
+
# Print the formatted prompt for debugging
|
41 |
+
print("\nFormatted Prompt:")
|
42 |
+
print(prompt)
|
43 |
+
|
44 |
+
# Append custom prompt if provided
|
45 |
+
if custom_prompt and len(custom_prompt.strip()) > 0:
|
46 |
+
prompt += " " + custom_prompt.strip()
|
47 |
+
|
48 |
+
# Randomize seed if needed
|
49 |
+
if seed == -1:
|
50 |
+
seed = random.randint(0, 1000000)
|
51 |
+
|
52 |
+
# Initialize the pipeline
|
53 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
54 |
+
model_name,
|
55 |
+
torch_dtype=torch.float16,
|
56 |
+
use_safetensors=True,
|
57 |
+
variant="fp16"
|
58 |
+
)
|
59 |
+
pipe.to("cpu")
|
60 |
+
|
61 |
+
# Generate the image
|
62 |
+
try:
|
63 |
+
image = pipe(
|
64 |
+
prompt,
|
65 |
+
guidance_scale=guidance_scale,
|
66 |
+
num_inference_steps=num_inference_steps,
|
67 |
+
width=width,
|
68 |
+
height=height,
|
69 |
+
generator=torch.Generator("cuda").manual_seed(seed)
|
70 |
+
).images[0]
|
71 |
+
|
72 |
+
# Convert PIL image to bytes
|
73 |
+
img_byte_arr = io.BytesIO()
|
74 |
+
image.save(img_byte_arr, format='PNG')
|
75 |
+
img_byte_arr = img_byte_arr.getvalue()
|
76 |
+
except Exception as e:
|
77 |
+
return None, f"ERROR: Failed to generate image. Details: {e}"
|
78 |
+
|
79 |
+
# Save the image with a timestamped filename
|
80 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
81 |
+
output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png"
|
82 |
+
try:
|
83 |
+
image.save(output_filename)
|
84 |
+
except Exception as e:
|
85 |
+
return img_byte_arr, "Image generated successfully!"
|
86 |
+
except Exception as e:
|
87 |
+
return None, f"ERROR: Failed to generate image. Details: {e}"
|
88 |
+
return output_filename, "Image generated successfully!"
|