diff --git a/.DS_Store b/.DS_Store index 77852193b550c4672d0be04af66d4cf36898c61d..a53e1a864701015ec0d72f5821183f76782f106d 100644 Binary files a/.DS_Store and b/.DS_Store differ diff --git a/.gitattributes b/.gitattributes index e2b65bfe0846f60fd83e4a63330f3360e636854c..a6344aac8c09253b3b630fb776ae94478aa0275b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,125 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250130_204829_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250130_205034_flux.1-dev_night_battle_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250130_212304_flux.1-dev_night_battle_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250130_212349_flux.1-dev_snowy_battlefield_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250130_212424_flux.1-dev_siege_tower_attack_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_023325_flux.1-dev_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_023633_flux.1-dev_marching_army_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_030037_flux.1-dev_night_battle_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_041200_flux.1-dev_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_041614_flux.1-dev_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_042042_flux.1-dev_night_battle_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_042801_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_045958_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_050217_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_050622_flux.1-dev_boiling_oil_defense_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_051056_flux.1-dev_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_051644_flux.1-dev_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_051838_flux.1-dev_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_051952_flux.1-dev_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_052036_flux.1-dev_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_052105_flux.1-dev_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_052324_flux.1-dev_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_064901_flux.1-dev_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_065801_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_070041_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_070255_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_075043_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_075252_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_082122_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_082224_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_082246_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_082344_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_082404_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_082444_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_082522_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_082553_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_082629_flux.1-dev_siege_tower_attack_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_084919_flux.1-dev_siege_tower_attack_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_145354_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_145558_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_145646_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_145715_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_145739_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_145756_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_145828_flux.1-dev_marching_army_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_145850_flux.1-dev_siege_tower_attack_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250131_145911_flux.1-dev_knight_duel_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250201_231219_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250201_231312_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250201_231354_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250201_231432_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250201_234155_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250201_234234_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250202_012316_flux.1-dev_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250202_012456_flux.1-dev_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250202_012802_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250202_012814_flux.1-dev_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250202_013404_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250202_014853_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250202_020010_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250202_021449_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250202_021744_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250202_021941_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250202_220825_flux.1-dev_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250202_220856_flux.1-dev_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_002848_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_003810_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_005215_flux.1-dev_modal_local_night_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_011632_flux.1-dev_modal_local_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_011659_flux.1-dev_modal_local_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_033539_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_033552_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_050713_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_050755_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_053946_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_054200_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_054637_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_054716_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_054734_flux.1-dev_modal_local_siege_tower_attack_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_054752_flux.1-dev_modal_local_night_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_054807_flux.1-dev_modal_local_marching_army_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_054826_flux.1-dev_modal_local_siege_tower_attack_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_054859_flux.1-dev_modal_local_siege_tower_attack_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_154822_flux.1-dev_modal_local_siege_tower_attack_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_171357_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_180759_flux.1-dev_modal_local_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_185242_flux.1-dev_modal_local_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_203049_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_203628_flux.1-dev_modal_local_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_212222_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_212454_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_222431_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250203_225712_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250204_012912_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250204_030437_flux.1-dev_modal_local_night_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250204_030944_flux.1-dev_modal_local_night_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250204_031139_flux.1-dev_modal_local_night_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250204_031547_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250204_031935_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250204_032132_flux.1-dev_modal_local_night_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250206_020153_flux.1-dev_modal_local_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250209_053636_flux.1-dev_modal_local_burning_castle_battle_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250209_053658_flux.1-dev_modal_local_burning_castle_battle_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250209_172043_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250209_172355_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250209_182544_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250210_030137_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250210_030229_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250210_030309_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250210_030518_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250210_030540_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250210_030605_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250210_030714_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250210_030813_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250210_030855_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250210_030942_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250210_031150_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250210_031204_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250210_031219_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250210_042321_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250210_042352_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text -modal_volume_download/images/20250210_042552_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..bcc762d7be34e0746bb7c4caefb827d721c93705 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +venv/ +modal_volume_download/ \ No newline at end of file diff --git a/__pycache__/example-hello-world.cpython-310.pyc b/__pycache__/example-hello-world.cpython-310.pyc deleted file mode 100644 index c4b0a67342fd185b89afde3dc374f104df095178..0000000000000000000000000000000000000000 Binary files a/__pycache__/example-hello-world.cpython-310.pyc and /dev/null differ diff --git a/app.py b/app.py index 42d4277195ea339f32dbe07961365f04521541bc..bf889ff98715c3d33eff30409395bc8fd56b1fc8 100644 --- a/app.py +++ b/app.py @@ -1,6 +1,51 @@ # app.py -#IMPORT gradio_interface -from src.gradio_interface import demo +from config.config import models, prompts, api_token # Direct import +import gradio as gr +from src.img_gen import generate_image +import base64 -# Launch the Gradio app -demo.launch() +# Gradio Interface +def gradio_interface(): + # LOAD CUSTOM CSS + try: + with open("config/layout.css", "r") as f: + custom_css = f.read() + except FileNotFoundError: + print("Error: aaa.css not found!") + custom_css = "" # Or provide default CSS + + with gr.Blocks(css=custom_css) as demo: + gr.Markdown("# CtB AI Image Generator - Inference version (HF)") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + #model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(): + generate_button = gr.Button("Generate Image") + with gr.Row(): + output_image = gr.Image(elem_classes="output-image", label="Generated Image", show_label=False, scale=1, width="100%") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + + # Connect the button to the function + generate_button.click( + generate_image, + inputs=[prompt_dropdown, + team_dropdown, + custom_prompt_input, + #model_dropdown, + ], + outputs=[output_image, status_text] + ) + return demo + +# Create the demo instance +demo = gradio_interface() + +# Only launch if running directly +if __name__ == "__main__": + demo.queue().launch() \ No newline at end of file diff --git a/app_live.py b/app_live.py new file mode 100644 index 0000000000000000000000000000000000000000..9e69eb7f52d8c927ee91666584b16ad3feab59a7 --- /dev/null +++ b/app_live.py @@ -0,0 +1,130 @@ +import gradio as gr +import numpy as np +import random +import torch +from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL +from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast +from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os +import sentencepiece +from huggingface_hub import login +from transformers import AutoTokenizer +from datetime import datetime + + +MAX_SEED = np.iinfo(np.int32).max +MAX_IMAGE_SIZE = 2048 + +dtype = torch.bfloat16 +device = "cuda" if torch.cuda.is_available() else "cpu" + +CACHE_DIR = "/model_cache" + +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + .pip_install_from_requirements("requirements.txt") + .env({ + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + }) +) + +app = modal.App("img-gen-modal-live", image=image) +with image.imports(): + import os + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) + +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="L40S", + timeout=300) +def infer(prompt, seed=42, randomize_seed=False, width=640, height=360, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)): + taef1 = AutoencoderTiny.from_pretrained("/data/taef1", torch_dtype=dtype).to(device) + good_vae = AutoencoderKL.from_pretrained("/data/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device) + pipe = DiffusionPipeline.from_pretrained("/data/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device) + torch.cuda.empty_cache() + + pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe) + + if randomize_seed: + seed = random.randint(0, MAX_SEED) + generator = torch.Generator().manual_seed(seed) + + for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images( + prompt=prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + generator=generator, + output_type="pil", + good_vae=good_vae, + ): + yield img, seed + +examples = [ + "a tiny astronaut hatching from an egg on the moon", + "a cat holding a sign that says hello world", + "an anime illustration of a wiener schnitzel", +] + +css=""" +#col-container { + margin: 0 auto; + max-width: 520px; +} +""" + +hf_token = os.environ["HF_TOKEN"] +print("Initializing HF TOKEN") +print(hf_token) +print("HF TOKEN:") +login(token=hf_token) + +with gr.Blocks(css=css) as demo: + f = modal.Function.from_name("img-gen-modal-live", "infer") + + with gr.Column(elem_id="col-container"): + gr.Markdown(f"""# FLUX.1 [dev] +12B param rectified flow transformer guidance-distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/) +[[non-commercial license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)] [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-dev)] + """) + + with gr.Row(): + prompt = gr.Text(label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False) + run_button = gr.Button("Run", scale=0) + + result = gr.Image(label="Result", show_label=False) + + with gr.Accordion("Advanced Settings", open=False): + seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0) + randomize_seed = gr.Checkbox(label="Randomize seed", value=True) + + with gr.Row(): + width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=640) + height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=360) + + with gr.Row(): + guidance_scale = gr.Slider(label="Guidance Scale", minimum=1, maximum=15, step=0.1, value=3.5) + num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=50, step=1, value=28) + + gr.Examples( + examples=examples, + fn=f.remote, + inputs=[prompt], + outputs=[result, seed], + cache_examples="lazy" + ) + + gr.on( + triggers=[run_button.click, prompt.submit], + fn=lambda *args: [next(f.remote_gen(*args)), seed], # Adjusted to process generator + inputs=[prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps], + outputs=[result, seed] + ) + +demo.launch() diff --git a/app_modal.py b/app_modal.py new file mode 100644 index 0000000000000000000000000000000000000000..79e43638eeba1ffd75066c34cc59d1a730587995 --- /dev/null +++ b/app_modal.py @@ -0,0 +1,82 @@ +# app_modal.py +import gradio as gr +import modal +from config.config import models, models_modal, prompts, api_token # Direct import +from config.config import prompts, models, models_modal # Indirect import +#from img_gen import generate_image + +print("Hello from gradio_interface_head!") + +# Modal remote function synchronously +def generate(cpu_gpu, prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input): + # Debug: + debug_message = f"Debug: Button clicked! Inputs - Prompt: {prompt_dropdown}, Team: {team_dropdown}, Model: {model_dropdown}, Custom Prompt: {custom_prompt_input}" + print(debug_message) # Print to console for debugging + try: + # Check for CPU/GPU dropdown option + if cpu_gpu == "GPU": + f = modal.Function.from_name("img-gen-modal", "generate_image_gpu") + else: + f = modal.Function.from_name("img-gen-modal", "generate_image_cpu") + + # Import the remote function + image_path, message = f.remote( + prompt_dropdown, + team_dropdown, + model_dropdown, + custom_prompt_input, + ) + return image_path, message + except Exception as e: + return None, f"Error calling generate_image function: {e}" + +def gradio_interface_modal(): + try: + with open("config/layout.css", "r") as f: + custom_css = f.read() + except FileNotFoundError: + print("Error: aaa.css not found!") + custom_css = "" # Or provide default CSS + + with modal.enable_output(): + #from config.config import prompts, models # Indirect import + # Gradio Interface + with gr.Blocks( + css=custom_css + ) as demo: + gr.Markdown("# CtB AI Image Generator - Cloud version (Modal volume)") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models_modal], label="Select Model", value=models_modal[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(elem_classes="row-class"): + cpu_gpu = gr.Dropdown(choices=["CPU", "GPU"], label="Select CPU/GPU", value="GPU") + generate_button = gr.Button("Generate Image") + with gr.Row(): + output_image = gr.Image(elem_classes="output-image", label="Generated Image", show_label=False, scale=1) + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + print("Building cudasdasrer...") + + ##Connect the button to the call_generate function + ##had do do it to handle gradio/modal interaction) + generate_button.click( + generate, + inputs=[ + cpu_gpu, + prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], + outputs=[output_image, status_text], + ) + return demo + +# Create the demo instance +demo = gradio_interface_modal() + +# Only launch if running directly +if __name__ == "__main__": + with modal.enable_output(): + demo.queue().launch() diff --git a/colab.ipynb b/colab.ipynb index 4f92060c31ba42fc283d8a655459718066892301..5077d7200a37c7e34770d81de01f2b3c7e12b80e 100644 --- a/colab.ipynb +++ b/colab.ipynb @@ -50,13 +50,6 @@ " style={\"description_width\": \"initial\"}\n", ")\n", "\n", - "# Input for height\n", - "height_input = widgets.IntText(\n", - " value=360,\n", - " description=\"Height:\",\n", - " style={\"description_width\": \"initial\"}\n", - ")\n", - "\n", "# Input for width\n", "width_input = widgets.IntText(\n", " value=640,\n", @@ -64,6 +57,13 @@ " style={\"description_width\": \"initial\"}\n", ")\n", "\n", + "# Input for height\n", + "height_input = widgets.IntText(\n", + " value=360,\n", + " description=\"Height:\",\n", + " style={\"description_width\": \"initial\"}\n", + ")\n", + "\n", "# Input for number of inference steps\n", "num_inference_steps_input = widgets.IntSlider(\n", " value=20,\n", @@ -147,7 +147,7 @@ " # Generate the image\n", " print(\"=== Debug: Calling generate_image ===\")\n", " image = generate_image(\n", - " selected_prompt, selected_team, selected_model, height, width,\n", + " selected_prompt, selected_team, selected_model, width, height,\n", " num_inference_steps, guidance_scale, seed, custom_prompt, api_token,\n", " randomize_seed=randomize_seed_checkbox.value\n", " )\n", @@ -176,24 +176,6 @@ "# Attach the button click event handler\n", "generate_button.on_click(on_generate_button_clicked)\n", "\n", - "def save_image(image, model_label, seed, prompt_label, team):\n", - " \"\"\"\n", - " Save the generated image with a timestamped filename.\n", - "\n", - " Args:\n", - " image (PIL.Image.Image): The generated image.\n", - " model_label (str): The label of the selected model.\n", - " prompt_label (str): The seed. The label of the selected prompt.\n", - " team (str): The selected team (\"Red\" or \"Blue\").\n", - "\n", - " Returns:\n", - " str: The filename of the saved image.\n", - " \"\"\"\n", - " timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n", - " output_filename = f\"{timestamp}_{model_label.replace(' ', '_').lower()}_{seed}_{prompt_label.replace(' ', '_').lower()}_{team.lower()}.png\"\n", - " image.save(output_filename)\n", - " return output_filename\n", - "\n", "# Display the widgets\n", "display(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input, seed_input, randomize_seed_checkbox, generate_button, output)" ] diff --git a/colab/.DS_Store b/colab/.DS_Store index 903efaa84d4c125ab79e775d7473f4fadb63a3b5..23f99f3b6971946c8174558f5cd0ff08310db42f 100644 Binary files a/colab/.DS_Store and b/colab/.DS_Store differ diff --git a/modal_volume_download/.DS_Store b/config/.DS_Store similarity index 93% rename from modal_volume_download/.DS_Store rename to config/.DS_Store index 0ccbb5de6454eccc2a11dc746f91de81881724fd..a19991e7c6d77a1d1165e5cdfdefd1fd293f0e1b 100644 Binary files a/modal_volume_download/.DS_Store and b/config/.DS_Store differ diff --git a/config/__pycache__/__init__.cpython-310.pyc b/config/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bec37269799d49dd1b6a98af7d39e7156e635bdb Binary files /dev/null and b/config/__pycache__/__init__.cpython-310.pyc differ diff --git a/config/__pycache__/config.cpython-310.pyc b/config/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b19e14bd60b13e13b264cc44971b4506c246609b Binary files /dev/null and b/config/__pycache__/config.cpython-310.pyc differ diff --git a/config/__pycache__/config.cpython-311.pyc b/config/__pycache__/config.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82f38a75511d6fb7e45065136caa8e8d1c4a4368 Binary files /dev/null and b/config/__pycache__/config.cpython-311.pyc differ diff --git a/config/__pycache__/config.cpython-39.pyc b/config/__pycache__/config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8179a4384c09ad30973546890fd6c7da29bae150 Binary files /dev/null and b/config/__pycache__/config.cpython-39.pyc differ diff --git a/config/__pycache__/models.cpython-310.pyc b/config/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f89492add4b9f9249807fa397c76cc523eb0139 Binary files /dev/null and b/config/__pycache__/models.cpython-310.pyc differ diff --git a/config/__pycache__/models.cpython-311.pyc b/config/__pycache__/models.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..227241d88f80f2bb0823d104699ec3fa0cdc32e2 Binary files /dev/null and b/config/__pycache__/models.cpython-311.pyc differ diff --git a/config/__pycache__/models.cpython-39.pyc b/config/__pycache__/models.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d4a94d4d8fb8bfa44caece0b4120677d9b9aad6 Binary files /dev/null and b/config/__pycache__/models.cpython-39.pyc differ diff --git a/config/__pycache__/prompts.cpython-310.pyc b/config/__pycache__/prompts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b7e4da5188280dfa9fb854285d72e9dbab0666b Binary files /dev/null and b/config/__pycache__/prompts.cpython-310.pyc differ diff --git a/config/__pycache__/prompts.cpython-311.pyc b/config/__pycache__/prompts.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..645df3c16a4578477610f17555e141aed1888e71 Binary files /dev/null and b/config/__pycache__/prompts.cpython-311.pyc differ diff --git a/config/__pycache__/prompts.cpython-39.pyc b/config/__pycache__/prompts.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3d63e5347171c21bf639090499a97c2b8b8b570 Binary files /dev/null and b/config/__pycache__/prompts.cpython-39.pyc differ diff --git a/config/config.py b/config/config.py index ce288e52d313e7ea570a3f02c129e5183815aff5..a96cc54ddd4dcb4124f943066fd6a3692d447697 100644 --- a/config/config.py +++ b/config/config.py @@ -1,12 +1,12 @@ # config.py import os -from config.prompts import prompts # Import prompts from prompts.py -from config.models import models +from config.prompts import prompts +from config.models import models, models_modal # Retrieve the Hugging Face token api_token = os.getenv("HF_TOKEN") # Debugging: Print prompt and model options +print("##### IMPORTING CONFIG #####") print("Prompt Options:", [p["alias"] for p in prompts]) print("Model Options:", [m["alias"] for m in models]) - diff --git a/config/config_colab.py b/config/config_colab.py index 6ab0b995f11e359f6ec9a4a7a6b36a2a6aeb1cf7..1ab6747243a061da293bc18c45885aaee3901bb2 100644 --- a/config/config_colab.py +++ b/config/config_colab.py @@ -1,7 +1,7 @@ # config_colab.py from google.colab import userdata from config.prompts import prompts # Import prompts from prompts.py -from config.models import models +from config.models import models, models_modal # Retrieve the Hugging Face token from Colab secrets api_token = userdata.get("HF_TOKEN") diff --git a/config/layout.css b/config/layout.css new file mode 100644 index 0000000000000000000000000000000000000000..08ff88a9a69110b6833989763ce09742933a73bb --- /dev/null +++ b/config/layout.css @@ -0,0 +1,11 @@ +.row-class { + display: flex; + align-items: stretch; /* Ensures all children have the same height */ +} +.row-class > * { + flex: 1; +} +.output-image img { + width: 2500px; /* Force image to fill container width */ + object-fit: cover; /* ACTIVATE FOR IMAGE-FIT CONTAINER */ +} \ No newline at end of file diff --git a/config/models.py b/config/models.py index 7c557ff1f127c3bb97db62f6c299df27e618f46e..f34d2b5914af377612977f223aa2985b875f3898 100644 --- a/config/models.py +++ b/config/models.py @@ -1,8 +1,16 @@ # List of models with aliases models = [ {"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"}, - {"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"} + {"alias": "FLUX.1-dev_modal_local", "name": "FLUX.1-dev"}, + {"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"}, + {"alias": "FLUX.1-schnell", "name": "black-forest-labs/FLUX.1-schnell"}, + {"alias": "FLUX.1-schnell_modal_local", "name": "FLUX.1-schnell"}, ] -# Debugging: Print prompt and model options -#print("Model Options:", [m["alias"] for m in models]) +models_modal = [ + {"alias": "FLUX.1-dev_modal_local", "name": "FLUX.1-dev"}, + {"alias": "FLUX.1-schnell_modal_local", "name": "FLUX.1-schnell"}, + #{"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"}, + #{"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"}, + #{"alias": "FLUX.1-schnell", "name": "black-forest-labs/FLUX.1-schnell"}, +] \ No newline at end of file diff --git a/modal_volume_download/images/.DS_Store b/diffusers_version/.DS_Store similarity index 89% rename from modal_volume_download/images/.DS_Store rename to diffusers_version/.DS_Store index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..9c22b43d61900aaba4f01e244e01fb7e7eae092d 100644 Binary files a/modal_volume_download/images/.DS_Store and b/diffusers_version/.DS_Store differ diff --git a/diffusers_version/app_diffusers.py b/diffusers_version/app_diffusers.py new file mode 100644 index 0000000000000000000000000000000000000000..518309ddd472dd45f43e8e8a213e18512d8af401 --- /dev/null +++ b/diffusers_version/app_diffusers.py @@ -0,0 +1,9 @@ +# app.py +#IMPORT gradio_interface +from src.gradio_interface_diffusers import demo +from config.config import models, prompts, api_token # Direct import +import sys +import os + +# Launch the Gradio app +demo.queue().launch() \ No newline at end of file diff --git a/diffusers_version/config/__pycache__/config.cpython-39.pyc b/diffusers_version/config/__pycache__/config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23e8778b26e6cf0f279bdda8c512e40cf54123a0 Binary files /dev/null and b/diffusers_version/config/__pycache__/config.cpython-39.pyc differ diff --git a/diffusers_version/config/__pycache__/models.cpython-39.pyc b/diffusers_version/config/__pycache__/models.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f412bf225a2bffe99ec2c39a25bffc0bc3922f60 Binary files /dev/null and b/diffusers_version/config/__pycache__/models.cpython-39.pyc differ diff --git a/diffusers_version/config/__pycache__/prompts.cpython-39.pyc b/diffusers_version/config/__pycache__/prompts.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de507c9d6d760b566161cdf8c113e6dfa76a3318 Binary files /dev/null and b/diffusers_version/config/__pycache__/prompts.cpython-39.pyc differ diff --git a/diffusers_version/config/config.py b/diffusers_version/config/config.py new file mode 100644 index 0000000000000000000000000000000000000000..9cef99b43ac0569cbf06e9bf6b637829993e9202 --- /dev/null +++ b/diffusers_version/config/config.py @@ -0,0 +1,14 @@ +# config.py +import os +from config.prompts import prompts # Direct Import prompts from prompts.py +from config.models import models # Direct Import models + +# Retrieve the Hugging Face token +api_token = os.getenv("HF_TOKEN") + +# Debugging: Print prompt and model options +print("Prompt Options:", [p["alias"] for p in prompts]) +print("Model Options:", [m["alias"] for m in models]) + +gpu = "T4" + diff --git a/diffusers_version/config/models.py b/diffusers_version/config/models.py new file mode 100644 index 0000000000000000000000000000000000000000..ed3fe505a08483d2cd29ee29b5dcb6c566cbc135 --- /dev/null +++ b/diffusers_version/config/models.py @@ -0,0 +1,12 @@ +# List of models with aliases +models = [ + {"alias": "FLUX.1-dev_modal_volume", "name": "FLUX.1-dev"}, + {"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"}, + {"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"}, + {"alias": "FLUX.1-schnell", "name": "black-forest-labs/FLUX.1-schnell"}, + #{"alias": "Andre", "name": "Andre"} + + +] +# Debugging: Print prompt and model options +#print("Model Options:", [m["alias"] for m in models]) diff --git a/diffusers_version/config/prompts.py b/diffusers_version/config/prompts.py new file mode 100644 index 0000000000000000000000000000000000000000..cc1e8ee26276a65a706ec945c950477f0d3dc299 --- /dev/null +++ b/diffusers_version/config/prompts.py @@ -0,0 +1,46 @@ + +# List of prompts with intense combat +# + +prompts = [ + { + "alias": "Castle Siege", + "text": "A medieval castle under siege, with archers firing arrows from the walls, knights charging on horses, and catapults launching fireballs. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is fiercely attacking the castle, with soldiers scaling ladders and clashing swords with the defenders. Arrows fly through the air, explosions light up the battlefield, and injured knights lie on the ground. Fire engulfs parts of the castle, and the air is thick with smoke and chaos. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Forest Battle", + "text": "A fierce battle between two armies in a dense forest, with knights wielding swords and axes, horses rearing, and the ground covered in mud and blood. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is locked in brutal combat, with soldiers fighting hand-to-hand amidst the trees. Arrows whiz past, and the sounds of clashing steel echo through the forest. Injured soldiers scream in pain, and the forest is littered with broken weapons and shields. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Boiling Oil Defense", + "text": "A dramatic moment in a medieval siege, with a knight leading a charge against a castle gate, while defenders pour boiling oil from the walls. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is relentlessly attacking, with soldiers screaming as they are hit by the oil. Knights clash swords at the gate, and arrows rain down from above. The ground is littered with the bodies of fallen soldiers, and the air is filled with the smell of burning flesh. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Burning Castle Battle", + "text": "A chaotic battlefield with knights on horseback clashing with infantry, archers firing volleys of arrows, and a castle burning in the background. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is fighting fiercely, with soldiers engaging in brutal melee combat. Flames light up the scene as knights charge through the chaos. Injured soldiers crawl on the ground, and the air is filled with the sounds of clashing steel and screams of pain. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Heroic Last Stand", + "text": "A heroic last stand of a small group of knights defending a bridge against a massive army, with arrows flying and swords clashing. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is overwhelming the defenders, but the knights fight bravely, cutting down enemy soldiers as they advance. The bridge is littered with bodies and broken weapons. Blood stains the ground, and the air is thick with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Siege Tower Attack", + "text": "A medieval siege tower approaching a castle wall, with knights scaling ladders and defenders throwing rocks and shooting arrows. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is fighting desperately to breach the walls, with soldiers clashing swords on the battlements. Arrows fly in all directions, and the siege tower is engulfed in flames. Injured soldiers fall from the ladders, and the ground is littered with the bodies of the fallen. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Knight Duel", + "text": "A dramatic duel between two knights in the middle of a battlefield, with their armies watching and the castle in the background. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is engaged in fierce combat all around, with soldiers clashing swords and firing arrows. The duelists fight with skill and determination, their blades flashing in the sunlight. Injured soldiers lie on the ground, and the air is filled with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style. " + }, + { + "alias": "Night Battle", + "text": "A night battle during a medieval siege, with torches lighting the scene, knights fighting in the shadows, and the castle walls looming in the background. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is locked in brutal combat, with soldiers clashing swords and firing arrows in the dim light. Flames from burning siege equipment illuminate the chaos. Injured soldiers scream in pain, and the ground is littered with the bodies of the fallen. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Marching Army", + "text": "A massive army of knights and infantry marching towards a distant castle, with banners flying and the sun setting behind them. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is engaging in skirmishes along the way, with soldiers clashing swords and firing arrows. The battlefield is alive with the sounds of combat and the clash of steel. Injured soldiers lie on the ground, and the air is thick with the smell of blood and smoke. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Snowy Battlefield", + "text": "A medieval battle in a snowy landscape, with knights in heavy armor fighting on a frozen lake, and the castle visible in the distance. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is locked in fierce combat, with soldiers slipping on the ice as they clash swords. Arrows fly through the air, and the snow is stained red with blood. Injured soldiers crawl on the ground, and the air is filled with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style." + } +] diff --git a/diffusers_version/src/__pycache__/gradio_interface_diffusers.cpython-39.pyc b/diffusers_version/src/__pycache__/gradio_interface_diffusers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07facf387bca4570f0ad1d4a2d54f6f714d135e0 Binary files /dev/null and b/diffusers_version/src/__pycache__/gradio_interface_diffusers.cpython-39.pyc differ diff --git a/diffusers_version/src/__pycache__/img_gen_diffusers.cpython-39.pyc b/diffusers_version/src/__pycache__/img_gen_diffusers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5bf4c846293320d6eaf648ec8af129ce4dea937 Binary files /dev/null and b/diffusers_version/src/__pycache__/img_gen_diffusers.cpython-39.pyc differ diff --git a/diffusers_version/src/gradio_interface_diffusers.py b/diffusers_version/src/gradio_interface_diffusers.py new file mode 100644 index 0000000000000000000000000000000000000000..27a680dbccdcbb7e2494b349c7dc63ed562af6a2 --- /dev/null +++ b/diffusers_version/src/gradio_interface_diffusers.py @@ -0,0 +1,68 @@ +# gradio_interface.py +import gradio as gr +from config.config import prompts, models # Indirect import +from src.img_gen_diffusers import generate_image + +print("Hello from gradio_interface_head!") + +# Modal remote function synchronously +def generate(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input): + # Debug: Print a message instead of generating an image + debug_message = f"Debug: Button clicked! Inputs - Prompt: {prompt_dropdown}, Team: {team_dropdown}, Model: {model_dropdown}, Custom Prompt: {custom_prompt_input}" + print(debug_message) # Print to console for debugging + try: + # Import the remote function + image_path, message = generate_image(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" + +def generate_gpu(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input): + # Debug: Print a message instead of generating an image + debug_message = f"Debug: Button clicked! Inputs - Prompt: {prompt_dropdown}, Team: {team_dropdown}, Model: {model_dropdown}, Custom Prompt: {custom_prompt_input}" + print(debug_message) # Print to console for debugging + try: + # Import the remote function + f = modal.Function.from_name("img-gen-modal-gpu", "generate_image") + image_path, message = f.remote(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" + + +def gradio_interface_diffusers(): + from config.config import prompts, models # Indirect import + # Gradio Interface + with gr.Blocks() as demo: + gr.Markdown("# CtB AI Image Generator") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(): + generate_button = gr.Button("Generate Image") + with gr.Row(): + output_image = gr.Image(label="Generated Image") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + print("Building cudasdasrer...") + + ##Connect the button to the call_generate function + ##had do do it to handle gradio/modal interaction) + generate_button.click( + generate, + inputs=[prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], + outputs=[output_image, status_text], + ) + return demo + +# Create the demo instance +demo = gradio_interface_diffusers() + +# Only launch if running directly +if __name__ == "__main__": + demo.queue().launch() diff --git a/diffusers_version/src/img_gen_diffusers.py b/diffusers_version/src/img_gen_diffusers.py new file mode 100644 index 0000000000000000000000000000000000000000..bf0676d2ea04be80bb3aec443bfabdedad8bb830 --- /dev/null +++ b/diffusers_version/src/img_gen_diffusers.py @@ -0,0 +1,183 @@ +# img_gen.py +import random +import io +from config.config import prompts, models # Indirect import +import os +import gradio as gr +import diffusers +import os +import torch +import sentencepiece +import torch +from huggingface_hub import login +from transformers import AutoTokenizer +import random +from datetime import datetime + +# MAIN GENERATE IMAGE FUNCTION +def generate_image(prompt_alias, + team_color, + model_alias, + custom_prompt, + height=360, + width=640, + num_inference_steps=20, + guidance_scale=2.0, + seed=-1, + progress=gr.Progress(track_tqdm=True) # Add progress parameter + ): + print("Hello from ctb_modal!") + # progress(0, desc="Starting...") # Initial progress + # yield "Initializing image generation..." # Yield the initial message + + print("Running debug check...") + # Debug function to check installed packages + def check_dependencies(): + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + "sentencepiece" + ] + + for package in packages: + try: + import importlib + module = importlib.import_module(package) + print(f" {package} is installed. Version:") + except ImportError: + print(f" {package} is NOT installed.") + + check_dependencies() + # progress(0.2, desc="Preprocessing input...") + # yield "Preprocessing inputs..." # Yield the preprocessing message + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + try: + from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + # Use absolute path with leading slash + local_path = f"{model_name}" # Changed from "data/" to "/data/" + print(f"Loading model from local path: {local_path}") + + # Debug: Check if the directory exists and list its contents + if os.path.exists(local_path): + print("Directory exists. Contents:") + for item in os.listdir(local_path): + print(f" - {item}") + else: + print(f"Directory does not exist: {local_path}") + print("Contents of /:") + print(os.listdir("/")) + # CHECK FOR TORCH USING CUDA + print("CHECK FOR TORCH USING CUDA") + print(f"CUDA available: {torch.cuda.is_available()}") + if torch.cuda.is_available(): + print("inside if") + print(f"CUDA device count: {torch.cuda.device_count()}") + print(f"Current device: {torch.cuda.current_device()}") + print(f"Device name: {torch.cuda.get_device_name(torch.cuda.current_device())}") + + # progress(0.5, desc="Running the model...") + # yield "Running the model..." # Yield the model running message + + # INITIALIZING CPU PIPE + print("-----INITIALIZING PIPE-----") + pipe = FluxPipeline.from_pretrained( + local_path, + torch_dtype=torch.bfloat16, + #torch_dtype=torch.float16, + #torch_dtype=torch.float32, + local_files_only=True + ) + + if torch.cuda.is_available(): + print("CUDA available") + print("using gpu") + pipe = pipe.to("cuda") + pipe_message = "CUDA" + else: + print("CUDA not available") + print("using cpu") + #pipe = pipe.to("cpu") + pipe_message = "CPU" + #pipe.enable_model_cpu_offload() # Use official recommended method + print(f"-----{pipe_message} PIPE INITIALIZED-----") + print(f"Using device: {pipe.device}") + except Exception as e: + print(f"Detailed error: {str(e)}") + return None, f"ERROR: Failed to initialize PIPE2. Details: {e}" + try: + print("-----SENDING IMG GEN TO PIPE-----") + print("-----HOLD ON-----") + # progress(0.8, desc="Postprocessing the output...") + # yield "Postprocessing the output..." # Yield the postprocessing message + + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + max_sequence_length=512 + # seed=seed + ).images[0] + print("-----RENDER DONE!-----") + print(image) + except Exception as e: + return f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + try: + print("-----IMAGE GENERATED SUCCESSFULLY!-----") + print("-----CALL THE BANNERS!-----") + print("-----SAVING TO DISK-----") + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"/data/images/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # Save the image using PIL's save method + image.save(output_filename) + print(f"File path: {output_filename}") + except Exception as e: + print(f"ERROR: Failed to save image. Details: {e}") + # Return the filename and success message + return image, "Image generated successfully! Call the banners!" diff --git a/examples/example-chat-w-pdf.py b/examples/example-chat-w-pdf.py new file mode 100644 index 0000000000000000000000000000000000000000..bcaffe626e507e6f114a4d05a6cbf2c4046a1a75 --- /dev/null +++ b/examples/example-chat-w-pdf.py @@ -0,0 +1,35 @@ +from pathlib import Path +from urllib.request import urlopen +from uuid import uuid4 + +import modal + +MINUTES = 60 # seconds + +app = modal.App("chat-with-pdf") + + +CACHE_DIR = "/hf-cache" + +model_image = ( + modal.Image.debian_slim(python_version="3.12") + .apt_install("git") + .pip_install( + [ + "git+https://github.com/illuin-tech/colpali.git@782edcd50108d1842d154730ad3ce72476a2d17d", # we pin the commit id + "hf_transfer==0.1.8", + "qwen-vl-utils==0.0.8", + "torchvision==0.19.1", + ] + ) + .env({"HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HUB_CACHE": CACHE_DIR}) +) + + +# These dependencies are only installed remotely, so we can’t import them locally. Use the .imports context manager to import them only on Modal instead. + +with model_image.imports(): + import torch + from colpali_engine.models import ColQwen2, ColQwen2Processor + from qwen_vl_utils import process_vision_info + from transformers import AutoProcessor, Qwen2VLForConditionalGeneration diff --git a/examples/example-flux.py b/examples/example-flux.py new file mode 100644 index 0000000000000000000000000000000000000000..627cf4a6a3e7286a603ff0fd46104d0b4f225180 --- /dev/null +++ b/examples/example-flux.py @@ -0,0 +1,57 @@ +import time +from io import BytesIO +from pathlib import Path + +import modal + + +cuda_version = "12.4.0" # should be no greater than host CUDA version +flavor = "devel" # includes full CUDA toolkit +operating_sys = "ubuntu22.04" +tag = f"{cuda_version}-{flavor}-{operating_sys}" + +cuda_dev_image = modal.Image.from_registry( + f"nvidia/cuda:{tag}", add_python="3.11" +).entrypoint([]) + + +diffusers_commit_sha = "81cf3b2f155f1de322079af28f625349ee21ec6b" + +flux_image = ( + cuda_dev_image.apt_install( + "git", + "libglib2.0-0", + "libsm6", + "libxrender1", + "libxext6", + "ffmpeg", + "libgl1", + ) + .pip_install( + "invisible_watermark==0.2.0", + "transformers==4.44.0", + "huggingface_hub[hf_transfer]==0.26.2", + "accelerate==0.33.0", + "safetensors==0.4.4", + "sentencepiece==0.2.0", + "torch==2.5.0", + f"git+https://github.com/huggingface/diffusers.git@{diffusers_commit_sha}", + "numpy<2", + ) + .env({"HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HUB_CACHE_DIR": "/cache"}) +) + + +flux_image = flux_image.env( + { + "TORCHINDUCTOR_CACHE_DIR": "/root/.inductor-cache", + "TORCHINDUCTOR_FX_GRAPH_CACHE": "1", + } +) + + +app = modal.App("example-flux", image=flux_image) + +with flux_image.imports(): + import torch + from diffusers import FluxPipeline diff --git a/example-hello-world.py b/examples/example-hello-world.py similarity index 100% rename from example-hello-world.py rename to examples/example-hello-world.py diff --git a/examples/example-text-to-image.py b/examples/example-text-to-image.py new file mode 100644 index 0000000000000000000000000000000000000000..4ceee85c892cf821cec7cdb90fc1cab876a4af52 --- /dev/null +++ b/examples/example-text-to-image.py @@ -0,0 +1,135 @@ + +import io +import random +import time +from pathlib import Path + +import modal + +MINUTES = 60 + + +app = modal.App("example-text-to-image") + + +CACHE_DIR = "/cache" + +image = ( + modal.Image.debian_slim(python_version="3.12") + .pip_install( + "accelerate==0.33.0", + "diffusers==0.31.0", + "fastapi[standard]==0.115.4", + "huggingface-hub[hf_transfer]==0.25.2", + "sentencepiece==0.2.0", + "torch==2.5.1", + "torchvision==0.20.1", + "transformers~=4.44.0", + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", # faster downloads + "HF_HUB_CACHE_DIR": CACHE_DIR, + } + ) +) + +with image.imports(): + import diffusers + import torch + from fastapi import Response +MODEL_ID = "adamo1139/stable-diffusion-3.5-large-turbo-ungated" +MODEL_REVISION_ID = "9ad870ac0b0e5e48ced156bb02f85d324b7275d2" + +cache_volume = modal.Volume.from_name("hf-hub-cache", create_if_missing=True) + + +@app.cls( + image=image, + gpu="H100", + timeout=10 * MINUTES, + volumes={CACHE_DIR: cache_volume}, +) +class Inference: + @modal.enter() + def load_pipeline(self): + self.pipe = diffusers.StableDiffusion3Pipeline.from_pretrained( + MODEL_ID, + revision=MODEL_REVISION_ID, + torch_dtype=torch.bfloat16, + ).to("cuda") + + @modal.method() + def run( + self, prompt: str, batch_size: int = 4, seed: int = None + ) -> list[bytes]: + seed = seed if seed is not None else random.randint(0, 2**32 - 1) + print("seeding RNG with", seed) + torch.manual_seed(seed) + images = self.pipe( + prompt, + num_images_per_prompt=batch_size, # outputting multiple images per prompt is much cheaper than separate calls + num_inference_steps=4, # turbo is tuned to run in four steps + guidance_scale=0.0, # turbo doesn't use CFG + max_sequence_length=512, # T5-XXL text encoder supports longer sequences, more complex prompts + ).images + + image_output = [] + for image in images: + with io.BytesIO() as buf: + image.save(buf, format="PNG") + image_output.append(buf.getvalue()) + torch.cuda.empty_cache() # reduce fragmentation + return image_output + + @modal.web_endpoint(docs=True) + def web(self, prompt: str, seed: int = None): + return Response( + content=self.run.local( # run in the same container + prompt, batch_size=1, seed=seed + )[0], + media_type="image/png", + ) + +@app.local_entrypoint() +def entrypoint( + samples: int = 4, + prompt: str = "A princess riding on a pony", + batch_size: int = 4, + seed: int = None, +): + print( + f"prompt => {prompt}", + f"samples => {samples}", + f"batch_size => {batch_size}", + f"seed => {seed}", + sep="\n", + ) + + output_dir = Path("/tmp/stable-diffusion") + output_dir.mkdir(exist_ok=True, parents=True) + + inference_service = Inference() + + for sample_idx in range(samples): + start = time.time() + images = inference_service.run.remote(prompt, batch_size, seed) + duration = time.time() - start + print(f"Run {sample_idx+1} took {duration:.3f}s") + if sample_idx: + print( + f"\tGenerated {len(images)} image(s) at {(duration)/len(images):.3f}s / image." + ) + for batch_idx, image_bytes in enumerate(images): + output_path = ( + output_dir + / f"output_{slugify(prompt)[:64]}_{str(sample_idx).zfill(2)}_{str(batch_idx).zfill(2)}.png" + ) + if not batch_idx: + print("Saving outputs", end="\n\t") + print( + output_path, + end="\n" + ("\t" if batch_idx < len(images) - 1 else ""), + ) + output_path.write_bytes(image_bytes) + diff --git a/examples/example-text-to-video.py b/examples/example-text-to-video.py new file mode 100644 index 0000000000000000000000000000000000000000..d4ad4ee5989c7a5f6ec869279d713d9fb85d5314 --- /dev/null +++ b/examples/example-text-to-video.py @@ -0,0 +1,32 @@ +import string +import time +from pathlib import Path + +import modal + +app = modal.App() + +image = ( + modal.Image.debian_slim(python_version="3.11") + .apt_install("git") + .pip_install( + "torch==2.5.1", + "accelerate==1.1.1", + "hf_transfer==0.1.8", + "sentencepiece==0.2.0", + "imageio==2.36.0", + "imageio-ffmpeg==0.5.1", + "git+https://github.com/huggingface/transformers@30335093276212ce74938bdfd85bfd5df31a668a", + "git+https://github.com/huggingface/diffusers@99c0483b67427de467f11aa35d54678fd36a7ea2", + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", + "HF_HOME": "/models", + } + ) +) + + +https://modal.com/docs/examples/mochi + diff --git a/examples/example_check_imports_volume.py b/examples/example_check_imports_volume.py new file mode 100644 index 0000000000000000000000000000000000000000..7594baabc786b9fc5f8b7abf622e3bb09c4aaaeb --- /dev/null +++ b/examples/example_check_imports_volume.py @@ -0,0 +1,32 @@ + def check_dependencies(): + import importlib + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + "sentencepiece" + ] + + for package in packages: + try: + import importlib + module = importlib.import_module(package) + print(f" {package} is installed. Version: {module.__version__}") + except ImportError: + print(f" {package} is NOT installed.") + + check_dependencies() + + def check_volume_contents(): + model_path = "/data/FLUX.1-dev" + if os.path.exists(model_path): + print(f"Contents of {model_path}:") + print(os.listdir(model_path)) + else: + print(f"Model path {model_path} does not exist.") + + check_volume_contents() \ No newline at end of file diff --git a/examples/example_dynamic_decorator.py b/examples/example_dynamic_decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..00606aa6ddc3b5ff000d899f6cec98262b1dded0 --- /dev/null +++ b/examples/example_dynamic_decorator.py @@ -0,0 +1,49 @@ + +import modal + +# Define the Modal image and app +image = modal.Image.debian_slim(python_version="3.9") +app = modal.App("example-app", image=image) + +# Define the volume +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) + +# Load configuration (e.g., from a config file or environment variables) +cpu = 8 # Set to 0 to disable CPU +memory = 70000 # Memory in MB +gpu = "a100-80gb" # Set to None to disable GPU + +# Dynamically construct the decorator arguments +decorator_args = { + "volumes": {"/data": flux_model_vol}, + "secrets": [modal.Secret.from_name("huggingface-token")], + "cpu": cpu, + "memory": memory, +} + +# Remove GPU if CPU is set +if cpu > 0: + print("CPU is set, removing GPU parameter.") + decorator_args.pop("gpu", None) # Remove 'gpu' if it exists +else: + print("CPU is not set, keeping GPU parameter.") + decorator_args["gpu"] = gpu + +# Debug: Print the final decorator arguments +print("Decorator arguments:", decorator_args) + +# Apply the decorator dynamically +@app.function(**decorator_args) +def my_function(): + import os + + # Example: List the contents of the volume + print("Contents of /data:") + print(os.listdir("/data")) + + # Your function code here + return f"Function executed with CPU={cpu}, Memory={memory}, GPU={gpu if 'gpu' in decorator_args else 'None'}" + +# Call the function +result = my_function.remote() +print(result) \ No newline at end of file diff --git a/examples/example_image_settings.py b/examples/example_image_settings.py new file mode 100644 index 0000000000000000000000000000000000000000..991226a04a5fe2683e4673ecc6931e4b6064e5e6 --- /dev/null +++ b/examples/example_image_settings.py @@ -0,0 +1,52 @@ + image = ( +modal.Image.from_registry( + "nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.11" + ) + .pip_install( + "numpy", + "pandas", + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio", + "safetensors", + "pillow", + ) # Install Python packages + .run_commands("echo 'Image build complete!'") # Run a shell command +) + + +# CHECK INSTALLS +def function(): +# Import libraries and print their versions + import numpy as np + import pandas as pd + import torch + import diffusers + import transformers + import gradio as gr + from PIL import Image as PILImage + + print("Hello from ctb_modal!") + print("NumPy version:", np.__version__) + print("Pandas version:", pd.__version__) + print("PyTorch version:", torch.__version__) + print("Diffusers version:", diffusers.__version__) # Corrected: Use the library's __version__ + print("Transformers version:", transformers.__version__) # Corrected: Use the library's __version__ + print("Gradio version:", gr.__version__) + print("Pillow version:", PILImage.__version__) + + + # # Run the function locally (for testing) +# if __name__ == "__main__": +# print("Running the function locally...") +# main.local() + + + + + + +image = ( + modal.Image.debian_slim(python_version="3.9") # Base image \ No newline at end of file diff --git a/examples/example_img_gen.py b/examples/example_img_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..2339c9923348d5763cf92765969dbc6be867f9e6 --- /dev/null +++ b/examples/example_img_gen.py @@ -0,0 +1,79 @@ +#img_gen_modal.py +import modal +import random +from datetime import datetime +import random +import io +from config.config import prompts, models # Indirect import +import os +import torch +from huggingface_hub import login +from transformers import AutoTokenizer + +# Define the Modal image +image = ( + #modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME" + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + +#flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +@app.function( + secrets=[modal.Secret.from_name("huggingface-token")], + #volumes={"/data": flux_model_vol}, + gpu="t4", + timeout=600 + ) + +def generate_image(): + import torch + from diffusers import FluxPipeline + + pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16) + + prompt = "A cat holding a sign that says hello world" + image = pipe( + prompt, + height=1024, + width=1024, + guidance_scale=3.5, + num_inference_steps=50, + max_sequence_length=512, + generator=torch.Generator("cpu").manual_seed(0) + ).images[0] + image.save("flux-dev.png") + +generate_image() + diff --git a/examples/example_loading_model.py b/examples/example_loading_model.py new file mode 100644 index 0000000000000000000000000000000000000000..32fa56f5eb494643cc034c73dbf03e3c9fb4b0a0 --- /dev/null +++ b/examples/example_loading_model.py @@ -0,0 +1,32 @@ +import gradio as gr +import modal +from modal import App, Image, Volume +from transformers import AutoModel, AutoTokenizer +import os + +app = App("gradio-app") +volume = Volume.from_name("flux-model-vol-2") +image = Image.debian_slim().pip_install("transformers", "torch", "sentencepiece", "gradio") + +@app.function(image=image, volumes={"/data": volume}) +def load_model(): + model_name = "FLUX.1-dev" + cache_dir = f"/data/{model_name}" + + print(f"Loading model {model_name} from cache...") + model = AutoModel.from_pretrained(cache_dir) + tokenizer = AutoTokenizer.from_pretrained(cache_dir) + + print(f"Model {model_name} loaded successfully!") + return model, tokenizer + +def predict(input_text): + model, tokenizer = load_model.remote() + inputs = tokenizer(input_text, return_tensors="pt") + outputs = model(**inputs) + return tokenizer.decode(outputs.logits.argmax(dim=-1)[0]) + +if __name__ == "__main__": + with app.run(): + iface = gr.Interface(fn=predict, inputs="text", outputs="text") + iface.launch() \ No newline at end of file diff --git a/examples/example_output_dir.py b/examples/example_output_dir.py new file mode 100644 index 0000000000000000000000000000000000000000..d5abd8209dd392ddb9d16c39d41117634fe4f751 --- /dev/null +++ b/examples/example_output_dir.py @@ -0,0 +1,25 @@ +import pathlib + +volume = modal.Volume.from_name("my-volume") +VOL_MOUNT_PATH = pathlib.Path("/vol") + +@app.function( + gpu="A10G", + timeout=2 * 60 * 60, # run for at most two hours + volumes={VOL_MOUNT_PATH: volume}, +) +def finetune(): + from transformers import Seq2SeqTrainer + ... + + training_args = Seq2SeqTrainingArguments( + output_dir=str(VOL_MOUNT_PATH / "model"), + # ... more args here + ) + + trainer = Seq2SeqTrainer( + model=model, + args=training_args, + train_dataset=tokenized_xsum_train, + eval_dataset=tokenized_xsum_test, + ) \ No newline at end of file diff --git a/examples/functions.py b/examples/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..885cc10dda82101761c62b07181a1241d906f655 --- /dev/null +++ b/examples/functions.py @@ -0,0 +1,69 @@ +import modal + +# Print debug information +print("Importing Modal and setting up the app...") + +# Define a custom image with Python and some dependencies +print("Building custom image...") +image = ( + modal.Image.debian_slim(python_version="3.11") # Base image + .pip_install( + "numpy", + "pandas", + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio", + "safetensors", + "pillow", + ) # Install Python packages + .run_commands("echo 'Image build complete!'") # Run a shell command +) + +# Define a function to run inside the container +#@app.function(image=image) + +# Define the Modal app +app = modal.App("functions-app") + +@app.function() +def message_func (message = "default"): + print("message function") + new_message = message + " ok, it works!" + return new_message + + + +@app.local_entrypoint() +def main(): + # Import libraries and print their versions + # import numpy as np + # import pandas as pd + # import torch + # import diffusers + # import transformers + # import gradio as gr + # from PIL import Image as PILImage + + # print("def main function") + # print("Hello from Modal!") + # print("NumPy version:", np.__version__) + # print("Pandas version:", pd.__version__) + # print("PyTorch version:", torch.__version__) + # print("Diffusers version:", diffusers.__version__) # Corrected: Use the library's __version__ + # print("Transformers version:", transformers.__version__) # Corrected: Use the library's __version__ + # print("Gradio version:", gr.__version__) + # print("Pillow version:", PILImage.__version__) + + remote_message = "remote message!" + local_message = "local message" + message_func.remote(remote_message) + message_func.local(local_message) + + +# # # Run the function locally (for testing) +# if __name__ == "__main__": +# print("Running the function locally...") +# main.local() +# main.remote() diff --git a/examples/modal_functions_remote_call.py b/examples/modal_functions_remote_call.py new file mode 100644 index 0000000000000000000000000000000000000000..f76c9aacbf9c2f0bfcb74d2195c42c8c6b757978 --- /dev/null +++ b/examples/modal_functions_remote_call.py @@ -0,0 +1,51 @@ +import modal + +image = ( + modal.Image.debian_slim(python_version="3.11") # Base image + .pip_install( + "numpy", + "pandas", + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio", + "safetensors", + "pillow", + ) # Install Python packages + .run_commands("echo 'Image build complete!'") # Run a shell command +) + +app = modal.App("functions-call-app", image=image) +@app.function() +def main(): + #Import libraries and print their versions + import numpy as np + import pandas as pd + import torch + import diffusers + import transformers + import gradio as gr + from PIL import Image as PILImage + + print("def main function") + print("Hello from Modal!") + print("NumPy version:", np.__version__) + print("Pandas version:", pd.__version__) + print("PyTorch version:", torch.__version__) + print("Diffusers version:", diffusers.__version__) # Corrected: Use the library's __version__ + print("Transformers version:", transformers.__version__) # Corrected: Use the library's __version__ + print("Gradio version:", gr.__version__) + print("Pillow version:", PILImage.__version__) + + f = modal.Function.from_name("functions-app", "message_func") + messageNEW = "Remote call Hello World!" + messageTEMP = "TEMP" + result = f.remote(messageNEW) + print(result) + +# # Run the function locally (for testing) +if __name__ == "__main__": + print("Running the function locally...") + main.local() + main.remote() diff --git a/examples/modal_image_header.py b/examples/modal_image_header.py new file mode 100644 index 0000000000000000000000000000000000000000..3011bb1875cf96003a2c8c1f29cbc58fafe9e642 --- /dev/null +++ b/examples/modal_image_header.py @@ -0,0 +1,58 @@ +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME" + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + import transformers + from huggingface_hub import InferenceClient, login + + +@app.function( + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="t4", + timeout=600 +) +def generate_image + + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + #modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + "torch" + ) +) \ No newline at end of file diff --git a/examples/modal_vol_data_access.py b/examples/modal_vol_data_access.py new file mode 100644 index 0000000000000000000000000000000000000000..403ac19067a7ac162d40a3b5ec768ccbdfa4adb4 --- /dev/null +++ b/examples/modal_vol_data_access.py @@ -0,0 +1,36 @@ +# img_gen.py +#img_gen_modal.py +import modal +import os +import shutil + + +# Define the Modal image +image = ( + modal.Image.debian_slim(python_version="3.9") + ) + +# Create a Modal app +app = modal.App("img-see-vol-data", image=image) + +flux_model_vol = modal.Volume.from_name("flux-model-vol",create_if_missing=False) # Reference your volume + +@app.function(volumes={"/data": flux_model_vol} + ) +def main(): + + # Define where to store the model + download_model_name = "black-forest-labs/FLUX.1-dev" # e.g., "stabilityai/stable-diffusion-2" + local_dir = "data/FLUX.1-dev" + + + model_path = "/data" + if os.path.exists(model_path): + print(f"Contents of {model_path}:") + print(os.listdir(model_path)) + else: + print(f"Model path {model_path} does not exist.") + + + + diff --git a/images/.DS_Store b/images/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..6a624842020acc653c3a8de4d1dcf4dfeb6f2738 Binary files /dev/null and b/images/.DS_Store differ diff --git a/index.html b/index.html index 7c4a013e52c76442ab80ee5572399a30373600a2..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644 --- a/index.html +++ b/index.html @@ -1 +0,0 @@ -aaa \ No newline at end of file diff --git a/index.php b/index.php new file mode 100644 index 0000000000000000000000000000000000000000..3a5a1ea039757224c950c2cfe513e76068421582 --- /dev/null +++ b/index.php @@ -0,0 +1,190 @@ + + + + + + + + Dark Theme Image Gallery + + +aa + + + + + + + + + \ No newline at end of file diff --git a/index_reverse.php b/index_reverse.php new file mode 100644 index 0000000000000000000000000000000000000000..9bf1fb977e7535a6c34268593532688411aa9860 --- /dev/null +++ b/index_reverse.php @@ -0,0 +1,188 @@ + + + + + + + + Dark Theme Image Gallery + + + + + + + + + + + + \ No newline at end of file diff --git a/live-preview-example/.DS_Store b/live-preview-example/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..ef0cfe0f40ae89d7d4a6421960ac93101d7fd221 Binary files /dev/null and b/live-preview-example/.DS_Store differ diff --git a/live-preview-example/app_live_bones_text.txt b/live-preview-example/app_live_bones_text.txt new file mode 100644 index 0000000000000000000000000000000000000000..a8eef73e4a67a50dba9b055dfb1abe3f016871fa --- /dev/null +++ b/live-preview-example/app_live_bones_text.txt @@ -0,0 +1,194 @@ +import gradio as gr +import numpy as np +import random +import torch +from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL +from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast +from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os +import sentencepiece +from huggingface_hub import login +from transformers import AutoTokenizer +from datetime import datetime + +dtype = torch.bfloat16 +device = "cuda" if torch.cuda.is_available() else "cpu" + + + +MAX_SEED = np.iinfo(np.int32).max +MAX_IMAGE_SIZE = 2048 + + + + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + .pip_install_from_requirements("requirements.txt") + #modal.Image.debian_slim(python_version="3.9") # Base image + # .apt_install( + # "git", + # ) + # .pip_install( + # "diffusers", + # f"git+https://github.com/huggingface/transformers.git" + # ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal-live", image=image) +with image.imports(): + import os + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + + +# GPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="L40S", + timeout = 300 + ) +def infer(prompt, seed=42, randomize_seed=False, width=640, height=360, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)): + taef1 = AutoencoderTiny.from_pretrained("/data/taef1", torch_dtype=dtype).to(device) + good_vae = AutoencoderKL.from_pretrained("/data/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device) + pipe = DiffusionPipeline.from_pretrained("/data/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device) + torch.cuda.empty_cache() + + pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe) + + if randomize_seed: + seed = random.randint(0, MAX_SEED) + generator = torch.Generator().manual_seed(seed) + + for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images( + prompt=prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + generator=generator, + output_type="pil", + good_vae=good_vae, + ): + yield (img, seed) + + +examples = [ + "a tiny astronaut hatching from an egg on the moon", + "a cat holding a sign that says hello world", + "an anime illustration of a wiener schnitzel", +] + +css=""" +#col-container { + margin: 0 auto; + max-width: 520px; +} +""" +print("Initializing HF TOKEN") +hf_token = os.environ["HF_TOKEN"] +print(hf_token) +print("HF TOKEN:") +login(token=hf_token) + + +with gr.Blocks(css=css) as demo: + f = modal.Function.from_name("img-gen-modal-live", "infer") + + with gr.Column(elem_id="col-container"): + gr.Markdown(f"""# FLUX.1 [dev] +12B param rectified flow transformer guidance-distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/) +[[non-commercial license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)] [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-dev)] + """) + + with gr.Row(): + + prompt = gr.Text( + label="Prompt", + show_label=False, + max_lines=1, + placeholder="Enter your prompt", + container=False, + ) + + run_button = gr.Button("Run", scale=0) + + result = gr.Image(label="Result", show_label=False) + + with gr.Accordion("Advanced Settings", open=False): + + seed = gr.Slider( + label="Seed", + minimum=0, + maximum=MAX_SEED, + step=1, + value=0, + ) + + randomize_seed = gr.Checkbox(label="Randomize seed", value=True) + + with gr.Row(): + + width = gr.Slider( + label="Width", + minimum=256, + maximum=MAX_IMAGE_SIZE, + step=32, + value=640, + ) + + height = gr.Slider( + label="Height", + minimum=256, + maximum=MAX_IMAGE_SIZE, + step=32, + value=360, + ) + + with gr.Row(): + + guidance_scale = gr.Slider( + label="Guidance Scale", + minimum=1, + maximum=15, + step=0.1, + value=3.5, + ) + + num_inference_steps = gr.Slider( + label="Number of inference steps", + minimum=1, + maximum=50, + step=1, + value=28, + ) + + gr.Examples( + examples = examples, + fn = f.remote, + inputs = [prompt], + outputs = [result, seed], + cache_examples="lazy" + ) + + gr.on( + triggers=[run_button.click, prompt.submit], + fn = f.remote_gen, + inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps], + outputs = [result, seed] + ) + +demo.launch() \ No newline at end of file diff --git a/live-preview-example/app_live_example3.py b/live-preview-example/app_live_example3.py new file mode 100644 index 0000000000000000000000000000000000000000..f951c944d2291548095f33409a206973f9de5a62 --- /dev/null +++ b/live-preview-example/app_live_example3.py @@ -0,0 +1,193 @@ +import gradio as gr +import numpy as np +import random +import torch +from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL +from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast +from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os +import sentencepiece +from huggingface_hub import login +from transformers import AutoTokenizer +from datetime import datetime + + + + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + .pip_install_from_requirements("requirements.txt") + #modal.Image.debian_slim(python_version="3.9") # Base image + # .apt_install( + # "git", + # ) + # .pip_install( + # "diffusers", + # f"git+https://github.com/huggingface/transformers.git" + # ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) +MAX_SEED = np.iinfo(np.int32).max +MAX_IMAGE_SIZE = 2048 +# Create a Modal app +app = modal.App("img-gen-modal-live-example3", image=image) +with image.imports(): + import os + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + + +# GPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="L40S", + timeout = 300 + ) +def infer(prompt="horse on the moon", seed=42, randomize_seed=False, width=640, height=360, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)): + + dtype = torch.bfloat16 + device = "cuda" if torch.cuda.is_available() else "cpu" + + + + taef1 = AutoencoderTiny.from_pretrained("/data/taef1", torch_dtype=dtype).to(device) + good_vae = AutoencoderKL.from_pretrained("/data/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device) + pipe = DiffusionPipeline.from_pretrained("/data/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device) + torch.cuda.empty_cache() + + pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe) + + + seed = random.randint(0, MAX_SEED) + generator = torch.Generator().manual_seed(seed) + + for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images( + prompt=prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + generator=generator, + output_type="pil", + good_vae=good_vae, + ): + yield (img, seed) + + +examples = [ + "a tiny astronaut hatching from an egg on the moon", + "a cat holding a sign that says hello world", + "an anime illustration of a wiener schnitzel", +] + +css=""" +#col-container { + margin: 0 auto; + max-width: 520px; +} +""" +# print("Initializing HF TOKEN") +# hf_token = os.environ["HF_TOKEN"] +# print(hf_token) +# print("HF TOKEN:") +# login(token=hf_token) + + +with gr.Blocks(css=css) as demo: + f = modal.Function.from_name("img-gen-modal-live-example3", "infer") + + with gr.Column(elem_id="col-container"): + gr.Markdown(f"""# FLUX.1 [dev] +12B param rectified flow transformer guidance-distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/) +[[non-commercial license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)] [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-dev)] + """) + + with gr.Row(): + + prompt = gr.Text( + label="Prompt", + show_label=False, + max_lines=1, + placeholder="Enter your prompt", + container=False, + ) + + run_button = gr.Button("Run", scale=0) + + result = gr.Image(label="Result", show_label=False) + + with gr.Accordion("Advanced Settings", open=False): + + seed = gr.Slider( + label="Seed", + minimum=0, + maximum=MAX_SEED, + step=1, + value=0, + ) + + randomize_seed = gr.Checkbox(label="Randomize seed", value=True) + + with gr.Row(): + + width = gr.Slider( + label="Width", + minimum=256, + maximum=MAX_IMAGE_SIZE, + step=32, + value=640, + ) + + height = gr.Slider( + label="Height", + minimum=256, + maximum=MAX_IMAGE_SIZE, + step=32, + value=360, + ) + + with gr.Row(): + + guidance_scale = gr.Slider( + label="Guidance Scale", + minimum=1, + maximum=15, + step=0.1, + value=3.5, + ) + + num_inference_steps = gr.Slider( + label="Number of inference steps", + minimum=1, + maximum=50, + step=1, + value=28, + ) + + gr.Examples( + examples = examples, + fn = f.remote, + inputs = [prompt], + outputs = [result, seed], + cache_examples="lazy" + ) + + gr.on( + triggers=[run_button.click, prompt.submit], + fn = f.remote_gen, + inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps], + outputs = [result, seed] + ) + +demo.launch() \ No newline at end of file diff --git a/live-preview-example/app_live_example4.py b/live-preview-example/app_live_example4.py new file mode 100644 index 0000000000000000000000000000000000000000..70f6f3904e9ed0313a4f9be9cbf54ab546a08235 --- /dev/null +++ b/live-preview-example/app_live_example4.py @@ -0,0 +1,97 @@ +import gradio as gr +import numpy as np +import random +import torch +from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL +from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast +from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os +import sentencepiece +from huggingface_hub import login +from transformers import AutoTokenizer +from datetime import datetime +from PIL import Image + + + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + .pip_install_from_requirements("requirements.txt") + #modal.Image.debian_slim(python_version="3.9") # Base image + # .apt_install( + # "git", + # ) + # .pip_install( + # "diffusers", + # f"git+https://github.com/huggingface/transformers.git" + # ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal-live", image=image) +with image.imports(): + import os + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + + +# GPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="L40S", + timeout = 300 + ) +def main(): + + def latents_to_rgb(latents): + weights = ( + (60, -60, 25, -70), + (60, -5, 15, -50), + (60, 10, -5, -35), + ) + + weights_tensor = torch.t(torch.tensor(weights, dtype=latents.dtype).to(latents.device)) + biases_tensor = torch.tensor((150, 140, 130), dtype=latents.dtype).to(latents.device) + rgb_tensor = torch.einsum("...lxy,lr -> ...rxy", latents, weights_tensor) + biases_tensor.unsqueeze(-1).unsqueeze(-1) + image_array = rgb_tensor.clamp(0, 255).byte().cpu().numpy().transpose(1, 2, 0) + + return Image.fromarray(image_array) + + def decode_tensors(pipe, step, timestep, callback_kwargs): + latents = callback_kwargs["latents"] + + image = latents_to_rgb(latents[0]) + image.save(f"{step}.png") + + return callback_kwargs + model_name = "FLUX.1-dev" + model_path = f"/data/{model_name}" + + pipeline = DiffusionPipeline.from_pretrained( + model_path, + torch_dtype=torch.bfloat16, + use_safetensors=True + ).to("cuda") + + image = pipeline( + prompt="A croissant shaped like a cute bear.", + negative_prompt="Deformed, ugly, bad anatomy", + width=300, + height=200, + callback_on_step_end=decode_tensors, + callback_on_step_end_tensor_inputs=["latents"], + ).images[0] + + \ No newline at end of file diff --git a/live-preview-example/example1/app.py b/live-preview-example/example1/app.py new file mode 100644 index 0000000000000000000000000000000000000000..110a331d45bcb7b82f3d1d257f57f73676cfe226 --- /dev/null +++ b/live-preview-example/example1/app.py @@ -0,0 +1,141 @@ +###### GOT IT FROM https://huggingface.co/spaces/MohamedRashad/Flux-Redux/commit/457f14925b8158597bd551cd6dba8458caf5ef9b + +import gradio as gr +import numpy as np +import random +#import spaces +import torch +from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL +from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast +from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images + +dtype = torch.bfloat16 +device = "cuda" if torch.cuda.is_available() else "cpu" + +taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device) +good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device) +pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device) +torch.cuda.empty_cache() + +MAX_SEED = np.iinfo(np.int32).max +MAX_IMAGE_SIZE = 2048 + +pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe) + +#@spaces.GPU(duration=75) +def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)): + if randomize_seed: + seed = random.randint(0, MAX_SEED) + generator = torch.Generator().manual_seed(seed) + + for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images( + prompt=prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + generator=generator, + output_type="pil", + good_vae=good_vae, + ): + yield img, seed + +examples = [ + "a tiny astronaut hatching from an egg on the moon", + "a cat holding a sign that says hello world", + "an anime illustration of a wiener schnitzel", +] + +css=""" +#col-container { + margin: 0 auto; + max-width: 520px; +} +""" + +with gr.Blocks(css=css) as demo: + + with gr.Column(elem_id="col-container"): + gr.Markdown(f"""# FLUX.1 [dev] +12B param rectified flow transformer guidance-distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/) +[[non-commercial license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)] [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-dev)] + """) + + with gr.Row(): + + prompt = gr.Text( + label="Prompt", + show_label=False, + max_lines=1, + placeholder="Enter your prompt", + container=False, + ) + + run_button = gr.Button("Run", scale=0) + + result = gr.Image(label="Result", show_label=False) + + with gr.Accordion("Advanced Settings", open=False): + + seed = gr.Slider( + label="Seed", + minimum=0, + maximum=MAX_SEED, + step=1, + value=0, + ) + + randomize_seed = gr.Checkbox(label="Randomize seed", value=True) + + with gr.Row(): + + width = gr.Slider( + label="Width", + minimum=256, + maximum=MAX_IMAGE_SIZE, + step=32, + value=1024, + ) + + height = gr.Slider( + label="Height", + minimum=256, + maximum=MAX_IMAGE_SIZE, + step=32, + value=1024, + ) + + with gr.Row(): + + guidance_scale = gr.Slider( + label="Guidance Scale", + minimum=1, + maximum=15, + step=0.1, + value=3.5, + ) + + num_inference_steps = gr.Slider( + label="Number of inference steps", + minimum=1, + maximum=50, + step=1, + value=28, + ) + + gr.Examples( + examples = examples, + fn = infer, + inputs = [prompt], + outputs = [result, seed], + cache_examples="lazy" + ) + + gr.on( + triggers=[run_button.click, prompt.submit], + fn = infer, + inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps], + outputs = [result, seed] + ) + +demo.launch() \ No newline at end of file diff --git a/live-preview-example/example1/live_preview_helpers.py b/live-preview-example/example1/live_preview_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..1aa8143aa10b546c9db9d2392e07816df6666cd7 --- /dev/null +++ b/live-preview-example/example1/live_preview_helpers.py @@ -0,0 +1,166 @@ +import torch +import numpy as np +from diffusers import FluxPipeline, AutoencoderTiny, FlowMatchEulerDiscreteScheduler +from typing import Any, Dict, List, Optional, Union + +# Helper functions +def calculate_shift( + image_seq_len, + base_seq_len: int = 256, + max_seq_len: int = 4096, + base_shift: float = 0.5, + max_shift: float = 1.16, +): + m = (max_shift - base_shift) / (max_seq_len - base_seq_len) + b = base_shift - m * base_seq_len + mu = image_seq_len * m + b + return mu + +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + +# FLUX pipeline function +@torch.inference_mode() +def flux_pipe_call_that_returns_an_iterable_of_images( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 28, + timesteps: List[int] = None, + guidance_scale: float = 3.5, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + max_sequence_length: int = 512, + good_vae: Optional[Any] = None, +): + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + # 1. Check inputs + self.check_inputs( + prompt, + prompt_2, + height, + width, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._joint_attention_kwargs = joint_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + + # 3. Encode prompt + lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None + prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + lora_scale=lora_scale, + ) + # 4. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels // 4 + latents, latent_image_ids = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + # 5. Prepare timesteps + sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) + image_seq_len = latents.shape[1] + mu = calculate_shift( + image_seq_len, + self.scheduler.config.base_image_seq_len, + self.scheduler.config.max_image_seq_len, + self.scheduler.config.base_shift, + self.scheduler.config.max_shift, + ) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + timesteps, + sigmas, + mu=mu, + ) + self._num_timesteps = len(timesteps) + + # Handle guidance + guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None + + # 6. Denoising loop + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + timestep = t.expand(latents.shape[0]).to(latents.dtype) + + noise_pred = self.transformer( + hidden_states=latents, + timestep=timestep / 1000, + guidance=guidance, + pooled_projections=pooled_prompt_embeds, + encoder_hidden_states=prompt_embeds, + txt_ids=text_ids, + img_ids=latent_image_ids, + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, + )[0] + # Yield intermediate result + latents_for_image = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents_for_image = (latents_for_image / self.vae.config.scaling_factor) + self.vae.config.shift_factor + image = self.vae.decode(latents_for_image, return_dict=False)[0] + yield self.image_processor.postprocess(image, output_type=output_type)[0] + + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + torch.cuda.empty_cache() + + # Final image using good_vae + latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents = (latents / good_vae.config.scaling_factor) + good_vae.config.shift_factor + image = good_vae.decode(latents, return_dict=False)[0] + self.maybe_free_model_hooks() + torch.cuda.empty_cache() + yield self.image_processor.postprocess(image, output_type=output_type)[0] \ No newline at end of file diff --git a/live-preview-example/example2/live_preview_working_code.py b/live-preview-example/example2/live_preview_working_code.py new file mode 100644 index 0000000000000000000000000000000000000000..cf1f77638d911b3c544d75776d32e710dd538bb7 --- /dev/null +++ b/live-preview-example/example2/live_preview_working_code.py @@ -0,0 +1,58 @@ +##### GOT IT FROM https://github.com/huggingface/diffusers/issues/3579 + + +import torch +import torchvision +from PIL import Image +from diffusers import EulerDiscreteScheduler, StableDiffusionPipeline + +device = "cuda" if torch.cuda.is_available() else "cpu" + +pipe = StableDiffusionPipeline.from_pretrained( + "prompthero/openjourney-v4", torch_dtype=torch.float16, safety_checker=None) +pipe = pipe.to(device) +pipe.enable_attention_slicing() +pipe.scheduler = EulerDiscreteScheduler.from_config( + pipe.scheduler.config, use_karras_sigmas=True +) + +prompt = "A futuristic cityscape at sunset" + +negative_prompt = "low quality" + +# num_images_per_prompt=4, + +def progress(step, timestep, latents): + print(step, timestep, latents[0][0][0][0]) + + with torch.no_grad(): + + latents = 1 / 0.18215 * latents + image = pipe.vae.decode(latents).sample + + image = (image / 2 + 0.5).clamp(0, 1) + + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # convert to PIL Images + image = pipe.numpy_to_pil(image) + + # do something with the Images + for i, img in enumerate(image): + img.save(f"step_{step}_img{i}.png") + + +result = pipe(prompt=prompt, + num_inference_steps=20, + height=512, width=512, + guidance_scale=7, + negative_prompt=negative_prompt, + callback=progress, + callback_steps=5 + ) + +image = result.images[0] + +image.save(f"outputs/cikar goster.png") +print(result.nsfw_content_detected) diff --git a/live-preview-example/example3_FLUX.1-dev_live_preview b/live-preview-example/example3_FLUX.1-dev_live_preview new file mode 160000 index 0000000000000000000000000000000000000000..2f733451dcd2c6690953bf03ced2b9d89e6546f3 --- /dev/null +++ b/live-preview-example/example3_FLUX.1-dev_live_preview @@ -0,0 +1 @@ +Subproject commit 2f733451dcd2c6690953bf03ced2b9d89e6546f3 diff --git a/live-preview-example/example4_official_REF/app.py b/live-preview-example/example4_official_REF/app.py new file mode 100644 index 0000000000000000000000000000000000000000..570802470371f583788cb61e74d184011dc8acb5 --- /dev/null +++ b/live-preview-example/example4_official_REF/app.py @@ -0,0 +1,41 @@ +def latents_to_rgb(latents): + weights = ( + (60, -60, 25, -70), + (60, -5, 15, -50), + (60, 10, -5, -35), + ) + + weights_tensor = torch.t(torch.tensor(weights, dtype=latents.dtype).to(latents.device)) + biases_tensor = torch.tensor((150, 140, 130), dtype=latents.dtype).to(latents.device) + rgb_tensor = torch.einsum("...lxy,lr -> ...rxy", latents, weights_tensor) + biases_tensor.unsqueeze(-1).unsqueeze(-1) + image_array = rgb_tensor.clamp(0, 255).byte().cpu().numpy().transpose(1, 2, 0) + + return Image.fromarray(image_array) + +def decode_tensors(pipe, step, timestep, callback_kwargs): + latents = callback_kwargs["latents"] + + image = latents_to_rgb(latents[0]) + image.save(f"{step}.png") + + return callback_kwargs + + + +from diffusers import AutoPipelineForText2Image +import torch +from PIL import Image + +pipeline = AutoPipelineForText2Image.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + torch_dtype=torch.float16, + variant="fp16", + use_safetensors=True +).to("cuda") + +image = pipeline( + prompt="A croissant shaped like a cute bear.", + negative_prompt="Deformed, ugly, bad anatomy", + callback_on_step_end=decode_tensors, + callback_on_step_end_tensor_inputs=["latents"], +).images[0] \ No newline at end of file diff --git a/live_preview_helpers.py b/live_preview_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..1aa8143aa10b546c9db9d2392e07816df6666cd7 --- /dev/null +++ b/live_preview_helpers.py @@ -0,0 +1,166 @@ +import torch +import numpy as np +from diffusers import FluxPipeline, AutoencoderTiny, FlowMatchEulerDiscreteScheduler +from typing import Any, Dict, List, Optional, Union + +# Helper functions +def calculate_shift( + image_seq_len, + base_seq_len: int = 256, + max_seq_len: int = 4096, + base_shift: float = 0.5, + max_shift: float = 1.16, +): + m = (max_shift - base_shift) / (max_seq_len - base_seq_len) + b = base_shift - m * base_seq_len + mu = image_seq_len * m + b + return mu + +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + +# FLUX pipeline function +@torch.inference_mode() +def flux_pipe_call_that_returns_an_iterable_of_images( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 28, + timesteps: List[int] = None, + guidance_scale: float = 3.5, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + max_sequence_length: int = 512, + good_vae: Optional[Any] = None, +): + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + # 1. Check inputs + self.check_inputs( + prompt, + prompt_2, + height, + width, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._joint_attention_kwargs = joint_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + + # 3. Encode prompt + lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None + prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + lora_scale=lora_scale, + ) + # 4. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels // 4 + latents, latent_image_ids = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + # 5. Prepare timesteps + sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) + image_seq_len = latents.shape[1] + mu = calculate_shift( + image_seq_len, + self.scheduler.config.base_image_seq_len, + self.scheduler.config.max_image_seq_len, + self.scheduler.config.base_shift, + self.scheduler.config.max_shift, + ) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + timesteps, + sigmas, + mu=mu, + ) + self._num_timesteps = len(timesteps) + + # Handle guidance + guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None + + # 6. Denoising loop + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + timestep = t.expand(latents.shape[0]).to(latents.dtype) + + noise_pred = self.transformer( + hidden_states=latents, + timestep=timestep / 1000, + guidance=guidance, + pooled_projections=pooled_prompt_embeds, + encoder_hidden_states=prompt_embeds, + txt_ids=text_ids, + img_ids=latent_image_ids, + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, + )[0] + # Yield intermediate result + latents_for_image = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents_for_image = (latents_for_image / self.vae.config.scaling_factor) + self.vae.config.shift_factor + image = self.vae.decode(latents_for_image, return_dict=False)[0] + yield self.image_processor.postprocess(image, output_type=output_type)[0] + + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + torch.cuda.empty_cache() + + # Final image using good_vae + latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents = (latents / good_vae.config.scaling_factor) + good_vae.config.shift_factor + image = good_vae.decode(latents, return_dict=False)[0] + self.maybe_free_model_hooks() + torch.cuda.empty_cache() + yield self.image_processor.postprocess(image, output_type=output_type)[0] \ No newline at end of file diff --git a/local_version/app_local.py b/local_version/app_local.py new file mode 100644 index 0000000000000000000000000000000000000000..0a2664575bba08de13b6e42c8349051a80304ea4 --- /dev/null +++ b/local_version/app_local.py @@ -0,0 +1,47 @@ +# app_local.py +import gradio as gr +from config.config import models, prompts, api_token # Direct import +from src.img_gen_local import generate_image + +# Gradio Interface +def gradio_interface(): + with gr.Blocks(css=""" + .output-image img { + width: 2500px; /* Force image to fill container width */ + object-fit: cover; /* ACTIVATE FOR IMAGE-FIT CONTAINER */ + } + """) as demo: + gr.Markdown("# CtB AI Image Generator - local version") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + #model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(): + generate_button = gr.Button("Generate Image") + with gr.Row(): + output_image = gr.Image(elem_classes="output-image", label="Generated Image", show_label=False, scale=1, width="100%") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + + # Connect the button to the function + generate_button.click( + generate_image, + inputs=[prompt_dropdown, + team_dropdown, + custom_prompt_input, + #model_dropdown, + ], + outputs=[output_image, status_text] + ) + return demo + +# Create the demo instance +demo = gradio_interface() + +# Only launch if running directly +if __name__ == "__main__": + demo.queue().launch() \ No newline at end of file diff --git a/local_version/img_gen_local.py b/local_version/img_gen_local.py new file mode 100644 index 0000000000000000000000000000000000000000..a4f3a92d94c31f02c5f8974a035f3934aa635d46 --- /dev/null +++ b/local_version/img_gen_local.py @@ -0,0 +1,172 @@ +#img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os +import gradio as gr + +#MOVED FROM IMAGE IMPORT LIST +import torch +import sentencepiece +import torch +from huggingface_hub import login +from transformers import AutoTokenizer +import random +from datetime import datetime +from diffusers import FluxPipeline, FluxTransformer2DModel, GGUFQuantizationConfig, AutoPipelineForText2Image +from src.check_dependecies import check_dependencies + +# MAIN GENERATE IMAGE FUNCTION +def generate_image( + prompt_alias, + team_color, + custom_prompt, + model_alias="FLUX.1-dev", + height=36, + width=64, + num_inference_steps=2, + guidance_scale=2.0, + seed=-1, + progress=gr.Progress(track_tqdm=True) # Add progress parameter + ): + print("Hello from ctb_local!") + + print("Running debug check...") + # Debug function to check installed packages + check_dependencies() + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + try: + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + # Use absolute path with leading slash + model_path = f"models/{model_alias}" + print(f"Loading model from local path: {model_path}") + + # Debug: Check if the directory exists and list its contents + if os.path.exists(model_path): + print("Directory exists. Contents:") + for item in os.listdir(model_path): + print(f" - {item}") + else: + # print(f"Directory does not exist: {local_path}") + print(f"Contents of {model_path}:") + # print(os.listdir("/data")) + # CHECK FOR TORCH USING CUDA + print("CHECK FOR TORCH USING CUDA") + print(f"CUDA available: {torch.cuda.is_available()}") + if torch.cuda.is_available(): + print("inside if") + print(f"CUDA device count: {torch.cuda.device_count()}") + print(f"Current device: {torch.cuda.current_device()}") + print(f"Device name: {torch.cuda.get_device_name(torch.cuda.current_device())}") + + ######### INITIALIZING CPU PIPE ########## + print("-----LOADING QUANTA-----") + ckpt_path = ( + "models/FLUX.1-dev-gguf/flux1-dev-Q2_K.gguf" + ) + transformer = FluxTransformer2DModel.from_single_file( + ckpt_path, + quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16), + torch_dtype=torch.bfloat16, + ) + + print("-----INITIALIZING PIPE-----") + pipe = FluxPipeline.from_pretrained( + model_path, + transformer = transformer, + torch_dtype=torch.bfloat16, + #torch_dtype=torch.float16, + #torch_dtype=torch.float32, + local_files_only=True, + ) + if torch.cuda.is_available(): + print("CUDA available") + print("using gpu") + pipe = pipe.to("cuda") + pipe_message = "CUDA" + else: + print("CUDA not available") + print("using cpu") + pipe = pipe.to("cpu") + pipe_message = "CPU" + #pipe.enable_model_cpu_offload() # Use official recommended method + + print(f"-----{pipe_message} PIPE INITIALIZED-----") + print(f"Using device: {pipe.device}") + except Exception as e: + print(f"Detailed error: {str(e)}") + return None, f"ERROR: Failed to initialize PIPE2. Details: {e}" + try: + print("-----SENDING IMG GEN TO PIPE-----") + print("-----HOLD ON-----") + + ########## SENDING IMG GEN TO PIPE - WORKING CODE ########## + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + max_sequence_length=512, + # seed=seed + ).images[0] + ############################################################# + + print("-----IMAGE GENERATED SUCCESSFULLY!-----") + print(image) + + except Exception as e: + return f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + try: + print("-----SAVING-----") + print("-----DONE!-----") + print("-----CALL THE BANNERS!-----") + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"/images/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # Save the image using PIL's save method + image.save(output_filename) + print(f"File path: {output_filename}") + except Exception as e: + print(f"ERROR: Failed to save image. Details: {e}") + # Return the filename and success message + return image \ No newline at end of file diff --git a/modal_bones/.DS_Store b/modal_bones/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..cd60f616d423f18666fbf53982de4ff4c4da72b4 Binary files /dev/null and b/modal_bones/.DS_Store differ diff --git a/modal_bones/app_modal_bones.py b/modal_bones/app_modal_bones.py new file mode 100644 index 0000000000000000000000000000000000000000..29b24ac3c9310724c07115e1c69e59ee5a854910 --- /dev/null +++ b/modal_bones/app_modal_bones.py @@ -0,0 +1,8 @@ +# app.py +#IMPORT gradio_interface +from src.gradio_interface_modal import demo +from config.config import models, prompts, api_token # Direct import + + +# Launch the Gradio app +demo.queue().launch() \ No newline at end of file diff --git a/modal_bones/config/modal_bones_config.py b/modal_bones/config/modal_bones_config.py new file mode 100644 index 0000000000000000000000000000000000000000..112a4df934bc423b571451a2774f5915b9f13661 --- /dev/null +++ b/modal_bones/config/modal_bones_config.py @@ -0,0 +1,73 @@ +# gradio_interface.py +import gradio as gr +import modal +from config.config import prompts, models # Indirect import +#from img_gen import generate_image + +print("Hello from gradio_interface_head!") + +# Modal remote function synchronously +def generate(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input): + # Debug: Print a message instead of generating an image + debug_message = f"Debug: Button clicked! Inputs - Prompt: {prompt_dropdown}, Team: {team_dropdown}, Model: {model_dropdown}, Custom Prompt: {custom_prompt_input}" + print(debug_message) # Print to console for debugging + try: + # Import the remote function + f = modal.Function.from_name("img-gen-modal", "generate_image") + image_path, message = f.remote(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" + +def generate_gpu(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input): + # Debug: Print a message instead of generating an image + debug_message = f"Debug: Button clicked! Inputs - Prompt: {prompt_dropdown}, Team: {team_dropdown}, Model: {model_dropdown}, Custom Prompt: {custom_prompt_input}" + print(debug_message) # Print to console for debugging + try: + # Import the remote function + f = modal.Function.from_name("img-gen-modal-gpu", "generate_image") + image_path, message = f.remote(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" + + +def gradio_interface_modal(): + with modal.enable_output(): + from config.config import prompts, models # Indirect import + # Gradio Interface + with gr.Blocks() as demo: + gr.Markdown("# CtB AI Image Generator") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(): + generate_button = gr.Button("Generate Image") + output = gr.Textbox(label="Output") + with gr.Row(): + output_image = gr.Image(label="Generated Image") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + print("Building cudasdasrer...") + + ##Connect the button to the call_generate function + ##had do do it to handle gradio/modal interaction) + generate_button.click( + generate, + inputs=[prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], + outputs=[output_image, status_text], + ) + return demo + +# Create the demo instance +demo = gradio_interface_modal() + +# Only launch if running directly +if __name__ == "__main__": + with modal.enable_output(): + demo.queue().launch() diff --git a/modal_bones/src/modal_bones_gradio_interface.py b/modal_bones/src/modal_bones_gradio_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..112a4df934bc423b571451a2774f5915b9f13661 --- /dev/null +++ b/modal_bones/src/modal_bones_gradio_interface.py @@ -0,0 +1,73 @@ +# gradio_interface.py +import gradio as gr +import modal +from config.config import prompts, models # Indirect import +#from img_gen import generate_image + +print("Hello from gradio_interface_head!") + +# Modal remote function synchronously +def generate(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input): + # Debug: Print a message instead of generating an image + debug_message = f"Debug: Button clicked! Inputs - Prompt: {prompt_dropdown}, Team: {team_dropdown}, Model: {model_dropdown}, Custom Prompt: {custom_prompt_input}" + print(debug_message) # Print to console for debugging + try: + # Import the remote function + f = modal.Function.from_name("img-gen-modal", "generate_image") + image_path, message = f.remote(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" + +def generate_gpu(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input): + # Debug: Print a message instead of generating an image + debug_message = f"Debug: Button clicked! Inputs - Prompt: {prompt_dropdown}, Team: {team_dropdown}, Model: {model_dropdown}, Custom Prompt: {custom_prompt_input}" + print(debug_message) # Print to console for debugging + try: + # Import the remote function + f = modal.Function.from_name("img-gen-modal-gpu", "generate_image") + image_path, message = f.remote(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" + + +def gradio_interface_modal(): + with modal.enable_output(): + from config.config import prompts, models # Indirect import + # Gradio Interface + with gr.Blocks() as demo: + gr.Markdown("# CtB AI Image Generator") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(): + generate_button = gr.Button("Generate Image") + output = gr.Textbox(label="Output") + with gr.Row(): + output_image = gr.Image(label="Generated Image") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + print("Building cudasdasrer...") + + ##Connect the button to the call_generate function + ##had do do it to handle gradio/modal interaction) + generate_button.click( + generate, + inputs=[prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], + outputs=[output_image, status_text], + ) + return demo + +# Create the demo instance +demo = gradio_interface_modal() + +# Only launch if running directly +if __name__ == "__main__": + with modal.enable_output(): + demo.queue().launch() diff --git a/modal_bones/src/modal_bones_img_gen.py b/modal_bones/src/modal_bones_img_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..b1dc8e276297e740b606547f9dda3dfd0625e113 --- /dev/null +++ b/modal_bones/src/modal_bones_img_gen.py @@ -0,0 +1,235 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os +import gradio as gr + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + #modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "xformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import diffusers + import os + import torch + import sentencepiece + import torch + from huggingface_hub import login + from transformers import AutoTokenizer + import random + from datetime import datetime + import xformers + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + + +# CPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="L40S", + #cpu = 1, + timeout = 300 + ) +# MAIN GENERATE IMAGE FUNCTION +def generate_image(prompt_alias, + team_color, + model_alias, + custom_prompt, + height=360, + width=640, + num_inference_steps=20, + guidance_scale=2.0, + seed=-1, + progress=gr.Progress(track_tqdm=True) # Add progress parameter + ): + with modal.enable_output(): + print("Hello from ctb_modal!") + # progress(0, desc="Starting...") # Initial progress + # yield "Initializing image generation..." # Yield the initial message + + print("Running debug check...") + # Debug function to check installed packages + def check_dependencies(): + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + "sentencepiece" + ] + + for package in packages: + try: + import importlib + module = importlib.import_module(package) + print(f" {package} is installed. Version:") + except ImportError: + print(f" {package} is NOT installed.") + + check_dependencies() + # progress(0.2, desc="Preprocessing input...") + # yield "Preprocessing inputs..." # Yield the preprocessing message + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + try: + from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + # Use absolute path with leading slash + local_path = f"/data/{model_name}" # Changed from "data/" to "/data/" + print(f"Loading model from local path: {local_path}") + + # Debug: Check if the directory exists and list its contents + if os.path.exists(local_path): + print("Directory exists. Contents:") + for item in os.listdir(local_path): + print(f" - {item}") + else: + print(f"Directory does not exist: {local_path}") + print("Contents of /data:") + print(os.listdir("/data")) + # CHECK FOR TORCH USING CUDA + print("CHECK FOR TORCH USING CUDA") + print(f"CUDA available: {torch.cuda.is_available()}") + if torch.cuda.is_available(): + print("inside if") + print(f"CUDA device count: {torch.cuda.device_count()}") + print(f"Current device: {torch.cuda.current_device()}") + print(f"Device name: {torch.cuda.get_device_name(torch.cuda.current_device())}") + + # progress(0.5, desc="Running the model...") + # yield "Running the model..." # Yield the model running message + + # INITIALIZING CPU PIPE + print("-----INITIALIZING PIPE-----") + pipe = FluxPipeline.from_pretrained( + local_path, + torch_dtype=torch.bfloat16, + #torch_dtype=torch.float16, + #torch_dtype=torch.float32, + local_files_only=True + ) + + if torch.cuda.is_available(): + print("CUDA available") + print("using gpu") + pipe = pipe.to("cuda") + pipe_message = "CUDA" + else: + print("CUDA not available") + print("using cpu") + #pipe = pipe.to("cpu") + pipe_message = "CPU" + #pipe.enable_model_cpu_offload() # Use official recommended method + print(f"-----{pipe_message} PIPE INITIALIZED-----") + print(f"Using device: {pipe.device}") + except Exception as e: + print(f"Detailed error: {str(e)}") + return None, f"ERROR: Failed to initialize PIPE2. Details: {e}" + try: + print("-----SENDING IMG GEN TO PIPE-----") + print("-----HOLD ON-----") + # progress(0.8, desc="Postprocessing the output...") + # yield "Postprocessing the output..." # Yield the postprocessing message + + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + max_sequence_length=512 + # seed=seed + ).images[0] + print("-----RENDER DONE!-----") + print(image) + except Exception as e: + return f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + try: + print("-----IMAGE GENERATED SUCCESSFULLY!-----") + print("-----CALL THE BANNERS!-----") + print("-----SAVING TO DISK-----") + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"/data/images/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # Save the image using PIL's save method + image.save(output_filename) + print(f"File path: {output_filename}") + except Exception as e: + print(f"ERROR: Failed to save image. Details: {e}") + # Return the filename and success message + return image, "Image generated successfully! Call the banners!" diff --git a/modal_volume_download/images/20250130_204829_flux.1-dev_forest_battle_red.png b/modal_volume_download/images/20250130_204829_flux.1-dev_forest_battle_red.png deleted file mode 100644 index 2f16e93e03d70a50f644c97a16d8a710eca6b83c..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250130_204829_flux.1-dev_forest_battle_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7b4002154bfeb3179b40e58b2e8ec8d4e9d2339995e077c93e87d575140062b8 -size 419156 diff --git a/modal_volume_download/images/20250130_205034_flux.1-dev_night_battle_red.png b/modal_volume_download/images/20250130_205034_flux.1-dev_night_battle_red.png deleted file mode 100644 index 89b28689985e3a0993c43e27b6c8b94d46b32bfb..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250130_205034_flux.1-dev_night_battle_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f664677433d11f25bca050a15065c7c28c8763908412ed8690062ec3d88b5b46 -size 335765 diff --git a/modal_volume_download/images/20250130_212304_flux.1-dev_night_battle_red.png b/modal_volume_download/images/20250130_212304_flux.1-dev_night_battle_red.png deleted file mode 100644 index 80e899565fde751a879db509627e3b91d304093e..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250130_212304_flux.1-dev_night_battle_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:11de40074e81bac03f12acdbe2f66a760d79e8f35e59500269ed909a8c7c054f -size 344784 diff --git a/modal_volume_download/images/20250130_212349_flux.1-dev_snowy_battlefield_red.png b/modal_volume_download/images/20250130_212349_flux.1-dev_snowy_battlefield_red.png deleted file mode 100644 index 414b08d5f049f3b61006b99601e6d9781d6c4644..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250130_212349_flux.1-dev_snowy_battlefield_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a44434d9d44970d0a3a8e7c0857efb38bb2ba6021cdd843bebdf08311f099e6a -size 425057 diff --git a/modal_volume_download/images/20250130_212424_flux.1-dev_siege_tower_attack_red.png b/modal_volume_download/images/20250130_212424_flux.1-dev_siege_tower_attack_red.png deleted file mode 100644 index 46aeb5e6b1e6b3d897951a75eab35fe8c1dcbfe4..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250130_212424_flux.1-dev_siege_tower_attack_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3cbec259f6b5f146df73ee361d9095cae03717ffd939bf822cfbbabd108c5fba -size 411131 diff --git a/modal_volume_download/images/20250131_023325_flux.1-dev_castle_siege_red.png b/modal_volume_download/images/20250131_023325_flux.1-dev_castle_siege_red.png deleted file mode 100644 index 368ce14b224be26891474618b166205ffdca5f75..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_023325_flux.1-dev_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f49f6d283a655c2bd50f45d66054dd480a7f173e1981dee306b84bc4ccf1b0e2 -size 397482 diff --git a/modal_volume_download/images/20250131_023633_flux.1-dev_marching_army_red.png b/modal_volume_download/images/20250131_023633_flux.1-dev_marching_army_red.png deleted file mode 100644 index aa716ccbb84a2980e39df1abef508a632dfc2cd2..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_023633_flux.1-dev_marching_army_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e67462cbcf25d8fdced445aaf4937598d98a15ee66c2bd6336a3f86407e63516 -size 347138 diff --git a/modal_volume_download/images/20250131_030037_flux.1-dev_night_battle_red.png b/modal_volume_download/images/20250131_030037_flux.1-dev_night_battle_red.png deleted file mode 100644 index d5c995127c633d9e73413f8c9e422117fb10d0d5..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_030037_flux.1-dev_night_battle_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ad97cc81c4c061edcd4d6e7b7dfcc51e61415dc2a3be6bf747605774b37142c0 -size 361267 diff --git a/modal_volume_download/images/20250131_041200_flux.1-dev_castle_siege_red.png b/modal_volume_download/images/20250131_041200_flux.1-dev_castle_siege_red.png deleted file mode 100644 index 1e466d4f07cb24252f93a912edfa850af71c457c..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_041200_flux.1-dev_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7b615bb51bd4cd3d938aaa890b28467b055c85c916ed006b0b1c83b1a2780e40 -size 390122 diff --git a/modal_volume_download/images/20250131_041614_flux.1-dev_castle_siege_red.png b/modal_volume_download/images/20250131_041614_flux.1-dev_castle_siege_red.png deleted file mode 100644 index f63f6b4d320851448341858599e253e3bb42c662..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_041614_flux.1-dev_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ea499a27904694f4f2c2a7b0fafdaedaf4b3e71bb944e96c492bd40cf2cca9ee -size 450920 diff --git a/modal_volume_download/images/20250131_042042_flux.1-dev_night_battle_red.png b/modal_volume_download/images/20250131_042042_flux.1-dev_night_battle_red.png deleted file mode 100644 index ef7e614bf11025b4bec4a99cadd0ff6166b31987..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_042042_flux.1-dev_night_battle_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ae08d737cbb1b55452d10d18e4c7335a5117c754a7574255541989a6004b3450 -size 347426 diff --git a/modal_volume_download/images/20250131_042801_flux.1-dev_forest_battle_red.png b/modal_volume_download/images/20250131_042801_flux.1-dev_forest_battle_red.png deleted file mode 100644 index da8bf40bc01f209c6749de28dff862d8e7d4d7d1..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_042801_flux.1-dev_forest_battle_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f062ad03c12c74654b7323d9416b4690b64ff77e8326fd6ff8efbb48a5181818 -size 405967 diff --git a/modal_volume_download/images/20250131_045958_flux.1-dev_forest_battle_red.png b/modal_volume_download/images/20250131_045958_flux.1-dev_forest_battle_red.png deleted file mode 100644 index 8671f1e49e8588e4580561fec56896d29ec18bbb..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_045958_flux.1-dev_forest_battle_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2fe7b2b2eae0ec2fde6419abf33790186d1594b2483847c967124824c3d5dcf9 -size 439871 diff --git a/modal_volume_download/images/20250131_050217_flux.1-dev_forest_battle_red.png b/modal_volume_download/images/20250131_050217_flux.1-dev_forest_battle_red.png deleted file mode 100644 index d6f150c81704576116bc1c26693baf06dce81729..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_050217_flux.1-dev_forest_battle_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:aeecb0e43e7c2ff7fadcd5b187299b38176ac6837f577cfddb04c2094afe0cd1 -size 446569 diff --git a/modal_volume_download/images/20250131_050622_flux.1-dev_boiling_oil_defense_red.png b/modal_volume_download/images/20250131_050622_flux.1-dev_boiling_oil_defense_red.png deleted file mode 100644 index af0d1b9e1398a822d54f43d928a1d55ee093f332..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_050622_flux.1-dev_boiling_oil_defense_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a396bee0237cf6bef10af55b7f1e30ad9cf0071b713b7033a50b47bffa6866c8 -size 394949 diff --git a/modal_volume_download/images/20250131_051056_flux.1-dev_boiling_oil_defense_blue.png b/modal_volume_download/images/20250131_051056_flux.1-dev_boiling_oil_defense_blue.png deleted file mode 100644 index f6ee6f886cfcfc73f0ae93360724c9fe6431c4db..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_051056_flux.1-dev_boiling_oil_defense_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9c1dc2f6a080c8dd5fc027e7bd300bf4bf65062d99f303654f67450dbe7003d3 -size 375490 diff --git a/modal_volume_download/images/20250131_051644_flux.1-dev_boiling_oil_defense_blue.png b/modal_volume_download/images/20250131_051644_flux.1-dev_boiling_oil_defense_blue.png deleted file mode 100644 index 3366196c191c2f94a106b76c60ae51338486e048..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_051644_flux.1-dev_boiling_oil_defense_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e175218be3212c4306b512590822da70513cd34c170569b88edfc7f1eee63f14 -size 388899 diff --git a/modal_volume_download/images/20250131_051838_flux.1-dev_boiling_oil_defense_blue.png b/modal_volume_download/images/20250131_051838_flux.1-dev_boiling_oil_defense_blue.png deleted file mode 100644 index b1b74374c9dfb634baf77d21acea395420c4b5d1..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_051838_flux.1-dev_boiling_oil_defense_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:eeae4a90834cc1924ff885e505ba3531ec5330b573d07973acb0d5fb7226d58e -size 393601 diff --git a/modal_volume_download/images/20250131_051952_flux.1-dev_boiling_oil_defense_blue.png b/modal_volume_download/images/20250131_051952_flux.1-dev_boiling_oil_defense_blue.png deleted file mode 100644 index 567ee47eb5db5b1372ea7f08d4bd647374f4d822..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_051952_flux.1-dev_boiling_oil_defense_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:12594660ce4c51cc8ee86755e0d7f8f77b68a9ef8013a335b47a6fdfcc3a7ff5 -size 380862 diff --git a/modal_volume_download/images/20250131_052036_flux.1-dev_boiling_oil_defense_blue.png b/modal_volume_download/images/20250131_052036_flux.1-dev_boiling_oil_defense_blue.png deleted file mode 100644 index 2b612323c5737f30da21290c6dc1767f3c21aca1..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_052036_flux.1-dev_boiling_oil_defense_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:32a5dc1515e586a26a9b316fcd7c9151d576f9a29788b9fa727b5bca6ca76f44 -size 377491 diff --git a/modal_volume_download/images/20250131_052105_flux.1-dev_boiling_oil_defense_blue.png b/modal_volume_download/images/20250131_052105_flux.1-dev_boiling_oil_defense_blue.png deleted file mode 100644 index b12a74b2ff5784a65c727cc7d3dc3b7983cc50a4..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_052105_flux.1-dev_boiling_oil_defense_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:67875afe3f518a9e253823cd41edf15ffab6b14d609f395dca5ee97ca8e4e513 -size 377925 diff --git a/modal_volume_download/images/20250131_052324_flux.1-dev_boiling_oil_defense_blue.png b/modal_volume_download/images/20250131_052324_flux.1-dev_boiling_oil_defense_blue.png deleted file mode 100644 index 33b8973d1dc7e345ed68bb141ee64c9c49588358..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_052324_flux.1-dev_boiling_oil_defense_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e48505ed7f7cef665d07a75e322ced0f734e8a34bec355cb4822f7a9b399afb2 -size 402421 diff --git a/modal_volume_download/images/20250131_064901_flux.1-dev_boiling_oil_defense_blue.png b/modal_volume_download/images/20250131_064901_flux.1-dev_boiling_oil_defense_blue.png deleted file mode 100644 index 522432ebd0e736e5d05791ed4803656cfbd8bd9b..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_064901_flux.1-dev_boiling_oil_defense_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c3341cbdd2460ad58894161b97117277e7d7b0626c24384ce2ad2f0fe147e710 -size 374860 diff --git a/modal_volume_download/images/20250131_065801_flux.1-dev_snowy_battlefield_blue.png b/modal_volume_download/images/20250131_065801_flux.1-dev_snowy_battlefield_blue.png deleted file mode 100644 index d4731c8755567f2b793c65549d91340bd7bfc911..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_065801_flux.1-dev_snowy_battlefield_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:efe1bc19277564658d571b1bea05e6010b44aa0ec819e29ea33a0d2045e5ec51 -size 405338 diff --git a/modal_volume_download/images/20250131_070041_flux.1-dev_snowy_battlefield_blue.png b/modal_volume_download/images/20250131_070041_flux.1-dev_snowy_battlefield_blue.png deleted file mode 100644 index 4336c2def944e9d7367d37bf574797975b66621a..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_070041_flux.1-dev_snowy_battlefield_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4eef8df0110712415f40075877dd98228ba2088084748d3a170b53a91ba7bcee -size 388118 diff --git a/modal_volume_download/images/20250131_070255_flux.1-dev_snowy_battlefield_blue.png b/modal_volume_download/images/20250131_070255_flux.1-dev_snowy_battlefield_blue.png deleted file mode 100644 index 0c79b866fd98a60db1011fd6622f2f4e34ad9060..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_070255_flux.1-dev_snowy_battlefield_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e58252f72417344ac22576b69db6c10ffb9774b129fcc3438aa43509349a447e -size 403920 diff --git a/modal_volume_download/images/20250131_075043_flux.1-dev_snowy_battlefield_blue.png b/modal_volume_download/images/20250131_075043_flux.1-dev_snowy_battlefield_blue.png deleted file mode 100644 index 8289c718e7183d7832cbf0b742127bb7a00e60aa..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_075043_flux.1-dev_snowy_battlefield_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ce17909d7bb06bc7ca1c611bc4e594f30aa87a55c123a9a8f320c15209271813 -size 365322 diff --git a/modal_volume_download/images/20250131_075252_flux.1-dev_snowy_battlefield_blue.png b/modal_volume_download/images/20250131_075252_flux.1-dev_snowy_battlefield_blue.png deleted file mode 100644 index 21a6c2699be1891d44b15a74a1731cd73c8930d0..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_075252_flux.1-dev_snowy_battlefield_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e27eb538f12028f2f8268b48c311f688a7dcd90f8d80ab1419c06a7ad8efd8f7 -size 401919 diff --git a/modal_volume_download/images/20250131_082122_flux.1-dev_snowy_battlefield_blue.png b/modal_volume_download/images/20250131_082122_flux.1-dev_snowy_battlefield_blue.png deleted file mode 100644 index e32116d6ef037502dfac576ec86022b056f8449b..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_082122_flux.1-dev_snowy_battlefield_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:40857311cf5d3ad43724c3ba4c01621d05992607f0ce9b8ae742b91217a985e2 -size 375198 diff --git a/modal_volume_download/images/20250131_082224_flux.1-dev_snowy_battlefield_blue.png b/modal_volume_download/images/20250131_082224_flux.1-dev_snowy_battlefield_blue.png deleted file mode 100644 index f9379ea6483d225d426a7a051d69602875c175be..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_082224_flux.1-dev_snowy_battlefield_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:10a67988b0eea40f6e78fb88f9b5088a57328ed69b389dbc4d9fd319402380db -size 407430 diff --git a/modal_volume_download/images/20250131_082246_flux.1-dev_snowy_battlefield_blue.png b/modal_volume_download/images/20250131_082246_flux.1-dev_snowy_battlefield_blue.png deleted file mode 100644 index 1409176945c3da0619a33446bc48e1f68c401d45..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_082246_flux.1-dev_snowy_battlefield_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:179daeaaaf122f33b4098f343580ec4534e267fe2842481a13d3ef8e93d19057 -size 396973 diff --git a/modal_volume_download/images/20250131_082344_flux.1-dev_snowy_battlefield_blue.png b/modal_volume_download/images/20250131_082344_flux.1-dev_snowy_battlefield_blue.png deleted file mode 100644 index 8cbd21b408d51dcc9ec96744135562e731b72d95..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_082344_flux.1-dev_snowy_battlefield_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:054db721c7870cf5d2a8949945f573f6acf4b9ed5833d229aa0f2d6f182ae5af -size 395912 diff --git a/modal_volume_download/images/20250131_082404_flux.1-dev_snowy_battlefield_blue.png b/modal_volume_download/images/20250131_082404_flux.1-dev_snowy_battlefield_blue.png deleted file mode 100644 index e54930b915dbfe748a1c7869186d38b9bea6b4c3..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_082404_flux.1-dev_snowy_battlefield_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:39da040f0e10b4c3cc811e52d036e7cd4d0e98415804ea04bbcb5102421c0b5f -size 407990 diff --git a/modal_volume_download/images/20250131_082444_flux.1-dev_snowy_battlefield_blue.png b/modal_volume_download/images/20250131_082444_flux.1-dev_snowy_battlefield_blue.png deleted file mode 100644 index 1343934f62270781c47a8056d390a0800f370657..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_082444_flux.1-dev_snowy_battlefield_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:452fe9b6f87b1f929c430391ffb8499f9c43e51c3ad94018f9ce8ad07cfbd199 -size 398077 diff --git a/modal_volume_download/images/20250131_082522_flux.1-dev_snowy_battlefield_blue.png b/modal_volume_download/images/20250131_082522_flux.1-dev_snowy_battlefield_blue.png deleted file mode 100644 index 9bb0147100c0191ef7aec1ac3260d81c312e9667..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_082522_flux.1-dev_snowy_battlefield_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2536b3f06fd3e614d4f9b1efac63964dd14f7db35425820d75fd3cdf70c39987 -size 404258 diff --git a/modal_volume_download/images/20250131_082553_flux.1-dev_snowy_battlefield_blue.png b/modal_volume_download/images/20250131_082553_flux.1-dev_snowy_battlefield_blue.png deleted file mode 100644 index 39d2e1e805a40f1bd630f476ff1212ef0904eb16..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_082553_flux.1-dev_snowy_battlefield_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e671bfdb1578f5e587cd250cc1289df62784d407c5c14073de586bd63c3e9bc4 -size 406184 diff --git a/modal_volume_download/images/20250131_082629_flux.1-dev_siege_tower_attack_blue.png b/modal_volume_download/images/20250131_082629_flux.1-dev_siege_tower_attack_blue.png deleted file mode 100644 index 76ee4fc6e91ae6fe38429ec236d3c0f3ea64fb15..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_082629_flux.1-dev_siege_tower_attack_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ea5b479fe268ec2786a9c4c5ea7545f7fbee271172fa6f3ec6df8d051d180e9e -size 426397 diff --git a/modal_volume_download/images/20250131_084919_flux.1-dev_siege_tower_attack_blue.png b/modal_volume_download/images/20250131_084919_flux.1-dev_siege_tower_attack_blue.png deleted file mode 100644 index c9fc7cb99491a1b8bc571c6b334e8264a64c23a5..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_084919_flux.1-dev_siege_tower_attack_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7f98b38b4e65063f4c588d0d55bf1faabb8a5139e57d3e678d7c30853a747f9c -size 398883 diff --git a/modal_volume_download/images/20250131_145354_flux.1-dev_forest_battle_red.png b/modal_volume_download/images/20250131_145354_flux.1-dev_forest_battle_red.png deleted file mode 100644 index 6246035d27da0cf5f1bf1144ebe42ff542494192..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_145354_flux.1-dev_forest_battle_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:487c8d916ed86686fd38a512ff2c669adeb9746f0a811659234c2e401c98a7bb -size 422198 diff --git a/modal_volume_download/images/20250131_145558_flux.1-dev_forest_battle_red.png b/modal_volume_download/images/20250131_145558_flux.1-dev_forest_battle_red.png deleted file mode 100644 index f49913485593c96aea1b960d7437565eeee1dc61..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_145558_flux.1-dev_forest_battle_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:562fca0a9e67d51876897cdef6040016389b47b3f75f866ad106c67d2138499f -size 438178 diff --git a/modal_volume_download/images/20250131_145646_flux.1-dev_forest_battle_red.png b/modal_volume_download/images/20250131_145646_flux.1-dev_forest_battle_red.png deleted file mode 100644 index b3e07d63d4500076a063285431024a900833716f..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_145646_flux.1-dev_forest_battle_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:722c2e95248a22304a2aa67f06bd0a53c4804c5d2a8ac5176a116ab5eb04fad4 -size 410992 diff --git a/modal_volume_download/images/20250131_145715_flux.1-dev_forest_battle_red.png b/modal_volume_download/images/20250131_145715_flux.1-dev_forest_battle_red.png deleted file mode 100644 index b1b30ad12de3204b59e14b548a1f97d3acb301a8..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_145715_flux.1-dev_forest_battle_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8d5fdf6a62cf6a07e35cd6502042ba2fe3591cd13b07fa889784846b766b2cd4 -size 434451 diff --git a/modal_volume_download/images/20250131_145739_flux.1-dev_forest_battle_red.png b/modal_volume_download/images/20250131_145739_flux.1-dev_forest_battle_red.png deleted file mode 100644 index 306eabbf33cf7f1a41fddfd68d7a75b7de9282a1..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_145739_flux.1-dev_forest_battle_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0b236f61879ae495713bd620ea770f7c77e678004795259af22a410d8daf1ebf -size 443587 diff --git a/modal_volume_download/images/20250131_145756_flux.1-dev_forest_battle_red.png b/modal_volume_download/images/20250131_145756_flux.1-dev_forest_battle_red.png deleted file mode 100644 index 28af80318bff8f58640373d81f84acb47a0e8045..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_145756_flux.1-dev_forest_battle_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c1cb7b0aeb7843660a32b0f0a740646ec2f49e38aeb14ba774ffd32a7b84ff29 -size 420884 diff --git a/modal_volume_download/images/20250131_145828_flux.1-dev_marching_army_blue.png b/modal_volume_download/images/20250131_145828_flux.1-dev_marching_army_blue.png deleted file mode 100644 index 21419e0c47beb81a423164f53a795145503d900b..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_145828_flux.1-dev_marching_army_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ac633181ed34e8591d160a73a3137a232b8f65380a613dcede60905bc9af6e6c -size 379601 diff --git a/modal_volume_download/images/20250131_145850_flux.1-dev_siege_tower_attack_blue.png b/modal_volume_download/images/20250131_145850_flux.1-dev_siege_tower_attack_blue.png deleted file mode 100644 index 36cf6d61186cc533bbee3a425c33078cc9f0ea66..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_145850_flux.1-dev_siege_tower_attack_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:426ef9c92d227f4b57a27eedfe74c63a13db0d0dfaf9e2b4ea97cebd6b644d85 -size 410276 diff --git a/modal_volume_download/images/20250131_145911_flux.1-dev_knight_duel_blue.png b/modal_volume_download/images/20250131_145911_flux.1-dev_knight_duel_blue.png deleted file mode 100644 index fc887ba8af335926d583bfd70f7f76e25cadfe8a..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250131_145911_flux.1-dev_knight_duel_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e6fa3c3e1a5f94cafc262e31fe95c04889ce870f2a4811ed849956304e884db0 -size 394270 diff --git a/modal_volume_download/images/20250201_231219_flux.1-dev_castle_siege_blue.png b/modal_volume_download/images/20250201_231219_flux.1-dev_castle_siege_blue.png deleted file mode 100644 index be46ab8f06afc210d81c9a94218b1cc5e0ecd8ec..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250201_231219_flux.1-dev_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b383f903fc342a6726137a9fbc4309e821712e11049325f9e4456a881c50dfff -size 391024 diff --git a/modal_volume_download/images/20250201_231312_flux.1-dev_castle_siege_blue.png b/modal_volume_download/images/20250201_231312_flux.1-dev_castle_siege_blue.png deleted file mode 100644 index ba8f4453dd24ba1ac946955589a2591aa2f6f320..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250201_231312_flux.1-dev_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b3778083d928c64d7c9bd63e6a1d3820b968cec802ffd6989a4260ce6856b6fb -size 383788 diff --git a/modal_volume_download/images/20250201_231354_flux.1-dev_castle_siege_blue.png b/modal_volume_download/images/20250201_231354_flux.1-dev_castle_siege_blue.png deleted file mode 100644 index 2d12d81c9e74760dde19999d207cd2bdbaf4217e..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250201_231354_flux.1-dev_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3d466ee1c04393abbc2a0e6e617a8ae15552772eade7d021c7bbac30eb801745 -size 398065 diff --git a/modal_volume_download/images/20250201_231432_flux.1-dev_castle_siege_blue.png b/modal_volume_download/images/20250201_231432_flux.1-dev_castle_siege_blue.png deleted file mode 100644 index 1d66f573287683592916f47d1f6dc267ca01d796..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250201_231432_flux.1-dev_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:036d547df7f311dad0b82dc94ab14875d1f2c61ce20b8f453d98391dd334b66a -size 399880 diff --git a/modal_volume_download/images/20250201_234155_flux.1-dev_castle_siege_blue.png b/modal_volume_download/images/20250201_234155_flux.1-dev_castle_siege_blue.png deleted file mode 100644 index c139f529fd04ce9f1d06c8afedd90c2bb59d86d4..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250201_234155_flux.1-dev_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b9c92f7fd1818611d61418c90afa4afcdfe3d4208ad0aadb23a94522083cb295 -size 401411 diff --git a/modal_volume_download/images/20250201_234234_flux.1-dev_castle_siege_blue.png b/modal_volume_download/images/20250201_234234_flux.1-dev_castle_siege_blue.png deleted file mode 100644 index 0e9366c2e1df1a2ae2d2d31f2a6aee81c5fb72a4..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250201_234234_flux.1-dev_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fdceb4e3b2352bec90317806f70ca86b4b098eb44974c0da44798e18e7319d9a -size 391700 diff --git a/modal_volume_download/images/20250202_012316_flux.1-dev_castle_siege_red.png b/modal_volume_download/images/20250202_012316_flux.1-dev_castle_siege_red.png deleted file mode 100644 index 884749835cadd60eee7ea0f1f59d1cd7fc8cfe44..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250202_012316_flux.1-dev_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:21a647a476790a53e0415de7fa0c17e43d917d587ca6afab0f21ebfa6dec7a2e -size 376644 diff --git a/modal_volume_download/images/20250202_012456_flux.1-dev_castle_siege_red.png b/modal_volume_download/images/20250202_012456_flux.1-dev_castle_siege_red.png deleted file mode 100644 index ae53c85907eeb28110333574bfc2c557fd5a639f..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250202_012456_flux.1-dev_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:451bcf91be66f955b775c491858d10f1fcb3d1b6ca4a35d07adc26f91712cd93 -size 404134 diff --git a/modal_volume_download/images/20250202_012802_flux.1-dev_castle_siege_blue.png b/modal_volume_download/images/20250202_012802_flux.1-dev_castle_siege_blue.png deleted file mode 100644 index 01cc41e9f9b27d00e50a421d4725f8aa678f6f46..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250202_012802_flux.1-dev_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5f737737362f8268e46083c22d0d09f8281da7123626c02357f142d200fb8acd -size 383567 diff --git a/modal_volume_download/images/20250202_012814_flux.1-dev_castle_siege_red.png b/modal_volume_download/images/20250202_012814_flux.1-dev_castle_siege_red.png deleted file mode 100644 index 144134562cb34112b7c061fa1b6f65ce77c3ae64..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250202_012814_flux.1-dev_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:03bd9cdd202016a1e046760052a9b7ad878d1935c53641e7bd5b79194194f810 -size 379454 diff --git a/modal_volume_download/images/20250202_013404_flux.1-dev_castle_siege_blue.png b/modal_volume_download/images/20250202_013404_flux.1-dev_castle_siege_blue.png deleted file mode 100644 index c9e3ceecdc5c69202aa7d10c832219be699e05c1..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250202_013404_flux.1-dev_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e8d15cc41dbc9783e095eefd4be592e4385a012172d8c71326fff136ddb8cda0 -size 380035 diff --git a/modal_volume_download/images/20250202_014853_flux.1-dev_castle_siege_blue.png b/modal_volume_download/images/20250202_014853_flux.1-dev_castle_siege_blue.png deleted file mode 100644 index 412e727c58d3bab818152d36230414a16346f9e4..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250202_014853_flux.1-dev_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:31a1f6c98b77668b33d0c3b542c507004f782ce1908d01971654ae645fe60b37 -size 415804 diff --git a/modal_volume_download/images/20250202_020010_flux.1-dev_castle_siege_blue.png b/modal_volume_download/images/20250202_020010_flux.1-dev_castle_siege_blue.png deleted file mode 100644 index 00fc1f41e8aef0f9fc78c1d67c62ecb5469b87b9..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250202_020010_flux.1-dev_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:65393178ceca41d1091346875b8d6e26def4c9946e45fed81a9c718695c99cc3 -size 398863 diff --git a/modal_volume_download/images/20250202_021449_flux.1-dev_castle_siege_blue.png b/modal_volume_download/images/20250202_021449_flux.1-dev_castle_siege_blue.png deleted file mode 100644 index 98f351571aefe70fcb8e6fd3f002602dd38075f9..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250202_021449_flux.1-dev_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:236780941055a8ab39ccf57e3b104d58bfd4c7ddc889cada2f2b982de51c9c14 -size 376831 diff --git a/modal_volume_download/images/20250202_021744_flux.1-dev_castle_siege_blue.png b/modal_volume_download/images/20250202_021744_flux.1-dev_castle_siege_blue.png deleted file mode 100644 index f7a5dff01bf248d502ce441245a974d623c4b27a..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250202_021744_flux.1-dev_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e85fbe47c60d9a335a40555dbf18f945fcfe1383882ef2a6ccfc603eaa17635f -size 367948 diff --git a/modal_volume_download/images/20250202_021941_flux.1-dev_castle_siege_blue.png b/modal_volume_download/images/20250202_021941_flux.1-dev_castle_siege_blue.png deleted file mode 100644 index e005dda96ce454c66534bcc3f647ebf07371d021..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250202_021941_flux.1-dev_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:bba4895dbdf3c03e13a0233167c90ad23aa80417755903cd1b519f29362e994a -size 414716 diff --git a/modal_volume_download/images/20250202_220825_flux.1-dev_castle_siege_red.png b/modal_volume_download/images/20250202_220825_flux.1-dev_castle_siege_red.png deleted file mode 100644 index be0c5da9601c1f2de35787f8b98301da29f4ba09..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250202_220825_flux.1-dev_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f87d35d80ccba592207255e331517327449985273142b6c9bc07c5d7cf63b3b5 -size 384554 diff --git a/modal_volume_download/images/20250202_220856_flux.1-dev_castle_siege_red.png b/modal_volume_download/images/20250202_220856_flux.1-dev_castle_siege_red.png deleted file mode 100644 index e2c0d9fc4025291d7d6fb456d6359847ad9a03a6..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250202_220856_flux.1-dev_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c2f38ef9f17fa37a810adb03403f88359a1cf1819e864fbc6f6e5e8ec1db2a7f -size 378256 diff --git a/modal_volume_download/images/20250203_002848_flux.1-dev_castle_siege_blue.png b/modal_volume_download/images/20250203_002848_flux.1-dev_castle_siege_blue.png deleted file mode 100644 index b4342a5aff0cd9565e63150c709f682a02b8ca8a..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_002848_flux.1-dev_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ded63d3855f3f00f01618faddf5c4d5a858bb5bb91f9182fdfff63c193e9ed71 -size 393795 diff --git a/modal_volume_download/images/20250203_003810_flux.1-dev_castle_siege_blue.png b/modal_volume_download/images/20250203_003810_flux.1-dev_castle_siege_blue.png deleted file mode 100644 index b5e93f991ab207cbc9dc1abc79bfbd8fe57f8c86..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_003810_flux.1-dev_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:208e59b4cc6e9a5386d39f9dfab2c98d9b6f316d38f0613449bbd1bdbfa65f0a -size 384534 diff --git a/modal_volume_download/images/20250203_005215_flux.1-dev_modal_local_night_battle_blue.png b/modal_volume_download/images/20250203_005215_flux.1-dev_modal_local_night_battle_blue.png deleted file mode 100644 index ea512c6d22f092b23374d4dec3769c8f0da54c3c..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_005215_flux.1-dev_modal_local_night_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:da9dd270e755c14e53d3e2204609dabb976fcd1d5e8dea48d528fda5190b1335 -size 344776 diff --git a/modal_volume_download/images/20250203_011632_flux.1-dev_modal_local_castle_siege_blue.png b/modal_volume_download/images/20250203_011632_flux.1-dev_modal_local_castle_siege_blue.png deleted file mode 100644 index 2fd3c186a1c1dbd12da5662a2844582f391816c2..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_011632_flux.1-dev_modal_local_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:40811e9c74cc169f0e1e0e0e052b438b8401574d9023affa946ec1c01e57014c -size 409000 diff --git a/modal_volume_download/images/20250203_011659_flux.1-dev_modal_local_castle_siege_blue.png b/modal_volume_download/images/20250203_011659_flux.1-dev_modal_local_castle_siege_blue.png deleted file mode 100644 index f4404585913dd6b6226ebe79baa801d7b51e0d29..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_011659_flux.1-dev_modal_local_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:851fcf55bb9782263fb313dc945ab34b7a68a609e22065340441b4a8976818f8 -size 423315 diff --git a/modal_volume_download/images/20250203_033539_flux.1-dev_modal_local_castle_siege_red.png b/modal_volume_download/images/20250203_033539_flux.1-dev_modal_local_castle_siege_red.png deleted file mode 100644 index 6bad17e6261e67c493454bde0620d4e0ecf39829..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_033539_flux.1-dev_modal_local_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6b63a94742446506dce8b9460e2a659ac5145bf96ad4138e61147c07825985e8 -size 392081 diff --git a/modal_volume_download/images/20250203_033552_flux.1-dev_modal_local_castle_siege_red.png b/modal_volume_download/images/20250203_033552_flux.1-dev_modal_local_castle_siege_red.png deleted file mode 100644 index 9b5b66b7838cde4712527408aedf7f6ddacdfd90..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_033552_flux.1-dev_modal_local_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e7a72ac2fbe8e51f61800d6c76bcd4ba76d452b2ec17363fc7c861d9629fea69 -size 378623 diff --git a/modal_volume_download/images/20250203_050713_flux.1-dev_modal_local_forest_battle_blue.png b/modal_volume_download/images/20250203_050713_flux.1-dev_modal_local_forest_battle_blue.png deleted file mode 100644 index 003dfb52a45ebd7e03aa81c742b4736524235349..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_050713_flux.1-dev_modal_local_forest_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e6e73bb2f93405acbe9d3d335a03bb2d305cb44694080ada1d34f0961f35aca1 -size 424978 diff --git a/modal_volume_download/images/20250203_050755_flux.1-dev_modal_local_forest_battle_blue.png b/modal_volume_download/images/20250203_050755_flux.1-dev_modal_local_forest_battle_blue.png deleted file mode 100644 index 397069b80047fbbf16d07328468d8b909461a0c3..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_050755_flux.1-dev_modal_local_forest_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c924d18541ae4ed5d5d3bbc5ec8b7461affb7a7120b65323ef4ea5036a3c5533 -size 436723 diff --git a/modal_volume_download/images/20250203_053946_flux.1-dev_modal_local_forest_battle_blue.png b/modal_volume_download/images/20250203_053946_flux.1-dev_modal_local_forest_battle_blue.png deleted file mode 100644 index 0d2d6df7ecf2136ddde323289cd4e01c78bc1322..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_053946_flux.1-dev_modal_local_forest_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:746118c0d538621285dddf5d44d5027793561063d01134d5aaf99e765c203111 -size 432434 diff --git a/modal_volume_download/images/20250203_054200_flux.1-dev_modal_local_forest_battle_blue.png b/modal_volume_download/images/20250203_054200_flux.1-dev_modal_local_forest_battle_blue.png deleted file mode 100644 index baf4123b7a642b9cf638e035bc61078b5a8909a1..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_054200_flux.1-dev_modal_local_forest_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c05609ab4f6cbefd8187c8ef7a91b6ce243e257961c1cb0a6be166aaaaaa18e0 -size 433108 diff --git a/modal_volume_download/images/20250203_054637_flux.1-dev_modal_local_forest_battle_blue.png b/modal_volume_download/images/20250203_054637_flux.1-dev_modal_local_forest_battle_blue.png deleted file mode 100644 index a329adc9a39c1cee1c627f5a7af3485975a1c90f..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_054637_flux.1-dev_modal_local_forest_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:86c54e7d37dbff6cd8f20cf1b5235f57d0f18465ea59fc5d94a3fed4a164bcd1 -size 408170 diff --git a/modal_volume_download/images/20250203_054716_flux.1-dev_modal_local_forest_battle_blue.png b/modal_volume_download/images/20250203_054716_flux.1-dev_modal_local_forest_battle_blue.png deleted file mode 100644 index e1c00c0e1988493d4abf656ef896e5348b7d709d..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_054716_flux.1-dev_modal_local_forest_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:53c697267be049ffc04aa6c8c9168a585327e15754dfc944a843d5d3eb6df37e -size 423260 diff --git a/modal_volume_download/images/20250203_054734_flux.1-dev_modal_local_siege_tower_attack_blue.png b/modal_volume_download/images/20250203_054734_flux.1-dev_modal_local_siege_tower_attack_blue.png deleted file mode 100644 index ecccd0462d3380047e00bbbb0caa023391f79fae..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_054734_flux.1-dev_modal_local_siege_tower_attack_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:00d7a14b626c4614ae5cbf92244d4ac69e036694acc8294873c2912fa52b4b05 -size 411478 diff --git a/modal_volume_download/images/20250203_054752_flux.1-dev_modal_local_night_battle_blue.png b/modal_volume_download/images/20250203_054752_flux.1-dev_modal_local_night_battle_blue.png deleted file mode 100644 index 0b96d5d69fc07008b1dc082bc93b1b004cb864a7..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_054752_flux.1-dev_modal_local_night_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7d11c0c262d35c5186fda3ecb0b2566ff78a2e8378cf0f72aed326d6717ffc76 -size 363255 diff --git a/modal_volume_download/images/20250203_054807_flux.1-dev_modal_local_marching_army_blue.png b/modal_volume_download/images/20250203_054807_flux.1-dev_modal_local_marching_army_blue.png deleted file mode 100644 index 992be6c23bc2e4772f7fade52bfb6ce5869689d7..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_054807_flux.1-dev_modal_local_marching_army_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:50118795f3e7db03a7a8135aa7b042b44f0b63ad3e77f35d5d7eed418577ed7f -size 362462 diff --git a/modal_volume_download/images/20250203_054826_flux.1-dev_modal_local_siege_tower_attack_blue.png b/modal_volume_download/images/20250203_054826_flux.1-dev_modal_local_siege_tower_attack_blue.png deleted file mode 100644 index 357b3548a1a0bc89fea3dc1887924ad489aabe38..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_054826_flux.1-dev_modal_local_siege_tower_attack_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:af7a0fbea4b07644bf1ab917c8c51b6f53c031065f5cc68ec1614a716b27c32d -size 434856 diff --git a/modal_volume_download/images/20250203_054859_flux.1-dev_modal_local_siege_tower_attack_blue.png b/modal_volume_download/images/20250203_054859_flux.1-dev_modal_local_siege_tower_attack_blue.png deleted file mode 100644 index 1fbc12585bc1804b980527215b8069db5562f971..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_054859_flux.1-dev_modal_local_siege_tower_attack_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9bdb3e3a6ebbc2136939ef694a3337c0b96aca424aa1b47056e069cfd349a18c -size 410808 diff --git a/modal_volume_download/images/20250203_154822_flux.1-dev_modal_local_siege_tower_attack_blue.png b/modal_volume_download/images/20250203_154822_flux.1-dev_modal_local_siege_tower_attack_blue.png deleted file mode 100644 index d3e9254bb01d6a0c9c4b7b4e7ff2d3fd4454c37a..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_154822_flux.1-dev_modal_local_siege_tower_attack_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:897ca848c77caae6d6186b4da32b30519d271ab3e20818557e969be3053d3eaa -size 387560 diff --git a/modal_volume_download/images/20250203_171357_flux.1-dev_modal_local_forest_battle_blue.png b/modal_volume_download/images/20250203_171357_flux.1-dev_modal_local_forest_battle_blue.png deleted file mode 100644 index 3a6c6e6b1dd67cda1aa601fee9bca6f8017356f4..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_171357_flux.1-dev_modal_local_forest_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7a85ffcdf8f1c81f5e1179ed93174ff6fe984e300b63c56394730dcc875e7f80 -size 399015 diff --git a/modal_volume_download/images/20250203_180759_flux.1-dev_modal_local_castle_siege_blue.png b/modal_volume_download/images/20250203_180759_flux.1-dev_modal_local_castle_siege_blue.png deleted file mode 100644 index 847ed73d815af389a077c965a322d8649c4f5ba6..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_180759_flux.1-dev_modal_local_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4ce84cdbb7677c790de900290f2fcc500a2a48188d5b99de65f57356c7003e8f -size 399216 diff --git a/modal_volume_download/images/20250203_185242_flux.1-dev_modal_local_snowy_battlefield_blue.png b/modal_volume_download/images/20250203_185242_flux.1-dev_modal_local_snowy_battlefield_blue.png deleted file mode 100644 index 7f0f619837beced0135937a725bcc769d7ed3ba6..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_185242_flux.1-dev_modal_local_snowy_battlefield_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f6bbdcc47b636e50b12fbe2cf054902c19ae8ec467252ff00355ad372cdf8b8f -size 395076 diff --git a/modal_volume_download/images/20250203_203049_flux.1-dev_modal_local_castle_siege_red.png b/modal_volume_download/images/20250203_203049_flux.1-dev_modal_local_castle_siege_red.png deleted file mode 100644 index 4cb6ee9063c47264dc43cde8e9a445d8558e7059..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_203049_flux.1-dev_modal_local_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d1b983124221f6fea1c0b09f00997ad64b6fe7545908e2aa3f7f130393010648 -size 405150 diff --git a/modal_volume_download/images/20250203_203628_flux.1-dev_modal_local_castle_siege_blue.png b/modal_volume_download/images/20250203_203628_flux.1-dev_modal_local_castle_siege_blue.png deleted file mode 100644 index 1c7d813004112a4e56a2453fa8a8b9c01f4567f0..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_203628_flux.1-dev_modal_local_castle_siege_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8f511dc36c08e032b3c7dd8b5f882a6c1491379064dbbc8d26f78cedc7e62419 -size 379425 diff --git a/modal_volume_download/images/20250203_212222_flux.1-dev_modal_local_castle_siege_red.png b/modal_volume_download/images/20250203_212222_flux.1-dev_modal_local_castle_siege_red.png deleted file mode 100644 index 73a4947b176843e2d854c1c1d097a3181589b22d..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_212222_flux.1-dev_modal_local_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f0a45daba03b7f54632aa0e3b7dfdb11a6b8a292a6a9ed20405fa99f3d09e5c0 -size 391831 diff --git a/modal_volume_download/images/20250203_212454_flux.1-dev_modal_local_castle_siege_red.png b/modal_volume_download/images/20250203_212454_flux.1-dev_modal_local_castle_siege_red.png deleted file mode 100644 index c39a67917157a8f0f64d349b2300ab3d0cc51fad..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_212454_flux.1-dev_modal_local_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:14bea7199725fb262b357530a051acc5d54e8604b9eedb756267527a6d00c421 -size 408122 diff --git a/modal_volume_download/images/20250203_222431_flux.1-dev_modal_local_forest_battle_blue.png b/modal_volume_download/images/20250203_222431_flux.1-dev_modal_local_forest_battle_blue.png deleted file mode 100644 index 5ceb2ff213312536705ea05ed32a9bc45112bb69..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_222431_flux.1-dev_modal_local_forest_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ffd8d0f06443964186bcb09f4b3f661e15b850e2a2c0fc2ca635b3599856adb2 -size 419392 diff --git a/modal_volume_download/images/20250203_225712_flux.1-dev_modal_local_forest_battle_blue.png b/modal_volume_download/images/20250203_225712_flux.1-dev_modal_local_forest_battle_blue.png deleted file mode 100644 index a7805f4538ec9d1bbe1e92199b1c3e8e7066ecc4..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250203_225712_flux.1-dev_modal_local_forest_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8727374d7b04ab64acf4c2e5bbca3e2000045a66024840f960d7a21e678d85a1 -size 426670 diff --git a/modal_volume_download/images/20250204_012912_flux.1-dev_modal_local_castle_siege_red.png b/modal_volume_download/images/20250204_012912_flux.1-dev_modal_local_castle_siege_red.png deleted file mode 100644 index d3e181c4c69679ad71330a4ddef2a94c266dfa5d..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250204_012912_flux.1-dev_modal_local_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e20f3ddff995c94b8fe6bd5c9d1f4eb8d71faf551c817040d461a1ec5a56acd9 -size 392871 diff --git a/modal_volume_download/images/20250204_030437_flux.1-dev_modal_local_night_battle_blue.png b/modal_volume_download/images/20250204_030437_flux.1-dev_modal_local_night_battle_blue.png deleted file mode 100644 index e41d47aa2a82db4935fb73e0460d86b525ffa8ac..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250204_030437_flux.1-dev_modal_local_night_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6913133fe4ab20633eb105a96ab3726b65dcbee975817e9e29368dd5b1395427 -size 348309 diff --git a/modal_volume_download/images/20250204_030944_flux.1-dev_modal_local_night_battle_blue.png b/modal_volume_download/images/20250204_030944_flux.1-dev_modal_local_night_battle_blue.png deleted file mode 100644 index 914374d044c1ae7e2a43912e54b7753e302944b4..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250204_030944_flux.1-dev_modal_local_night_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:86bfab077ecba180b7062499bd57f899b0ac81329eca2570f439cb8fc3443a2b -size 351676 diff --git a/modal_volume_download/images/20250204_031139_flux.1-dev_modal_local_night_battle_blue.png b/modal_volume_download/images/20250204_031139_flux.1-dev_modal_local_night_battle_blue.png deleted file mode 100644 index 4e490f55ee38563b027a8c138bdff93b24a55a46..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250204_031139_flux.1-dev_modal_local_night_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7c64e13643e5d3412e1a84a7350799435189ea9aacb20823f417cdd9e517ee85 -size 352041 diff --git a/modal_volume_download/images/20250204_031547_flux.1-dev_modal_local_castle_siege_red.png b/modal_volume_download/images/20250204_031547_flux.1-dev_modal_local_castle_siege_red.png deleted file mode 100644 index 86460f63600f2d7f34cddbf848c6394fe39ebd07..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250204_031547_flux.1-dev_modal_local_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8495bcff04e21aeb5c2364f669ce4cb4548c369a1a093e4a29c6e6712f2270be -size 400580 diff --git a/modal_volume_download/images/20250204_031935_flux.1-dev_modal_local_castle_siege_red.png b/modal_volume_download/images/20250204_031935_flux.1-dev_modal_local_castle_siege_red.png deleted file mode 100644 index c288a17c4a5ade5c1e3112b50fadea37f3828aef..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250204_031935_flux.1-dev_modal_local_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fee2b57d0faef9f74b12ba452e93703aef9cc3535760f9148ce6801b9c4263d6 -size 402268 diff --git a/modal_volume_download/images/20250204_032132_flux.1-dev_modal_local_night_battle_blue.png b/modal_volume_download/images/20250204_032132_flux.1-dev_modal_local_night_battle_blue.png deleted file mode 100644 index 2ef2c473ec3880630ab6682f9d093a697c7b9fb5..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250204_032132_flux.1-dev_modal_local_night_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cd7edfde68ab9c6965711dad3e25ab151a798d80f5c3906b011387870f4407ab -size 348613 diff --git a/modal_volume_download/images/20250206_020153_flux.1-dev_modal_local_boiling_oil_defense_blue.png b/modal_volume_download/images/20250206_020153_flux.1-dev_modal_local_boiling_oil_defense_blue.png deleted file mode 100644 index 42558cd263f6bc6c9bdc389937aa2182a344d832..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250206_020153_flux.1-dev_modal_local_boiling_oil_defense_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:793d41fb6290c45209d3803e26f555d10cce7e04637b984e63a85863e5fabe45 -size 419317 diff --git a/modal_volume_download/images/20250209_053636_flux.1-dev_modal_local_burning_castle_battle_red.png b/modal_volume_download/images/20250209_053636_flux.1-dev_modal_local_burning_castle_battle_red.png deleted file mode 100644 index f3074cd57f53c8b1c3a2bcf1991c817df01acb60..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250209_053636_flux.1-dev_modal_local_burning_castle_battle_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b23d21dbcd4869d0236dd5c1aa8be608eb08dd22bf70b655a64ea0a55924e42b -size 382917 diff --git a/modal_volume_download/images/20250209_053658_flux.1-dev_modal_local_burning_castle_battle_red.png b/modal_volume_download/images/20250209_053658_flux.1-dev_modal_local_burning_castle_battle_red.png deleted file mode 100644 index a581ce655cc0597f26312eb753374076163af675..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250209_053658_flux.1-dev_modal_local_burning_castle_battle_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:88b3a243c4250e117d46ada630053874aa8b59e81c749b445039456775c36870 -size 375071 diff --git a/modal_volume_download/images/20250209_172043_flux.1-dev_modal_local_burning_castle_battle_blue.png b/modal_volume_download/images/20250209_172043_flux.1-dev_modal_local_burning_castle_battle_blue.png deleted file mode 100644 index dc3ee76aa156be126cf6fcceec7f8fe68f914ad9..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250209_172043_flux.1-dev_modal_local_burning_castle_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7470adfbb0ac3103738003c2f6c15eb9b3b26dec43b2eb3d104fb5c4f44135b1 -size 2877098 diff --git a/modal_volume_download/images/20250209_172355_flux.1-dev_modal_local_burning_castle_battle_blue.png b/modal_volume_download/images/20250209_172355_flux.1-dev_modal_local_burning_castle_battle_blue.png deleted file mode 100644 index 494dc8b5bc40dc23e4837c18f2180e72672b67f1..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250209_172355_flux.1-dev_modal_local_burning_castle_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c5a1399ddfddf8dd07d293c76f06d8c69e88915b7065c2d32b6e927b311e704c -size 402386 diff --git a/modal_volume_download/images/20250209_182544_flux.1-dev_modal_local_castle_siege_red.png b/modal_volume_download/images/20250209_182544_flux.1-dev_modal_local_castle_siege_red.png deleted file mode 100644 index d6b2c0cab1860a8a256b50ce02d26f7fac27146a..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250209_182544_flux.1-dev_modal_local_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:edf14a260fc3fdd37bf85ef0854ea1e5b7169a237ab710a3016aa084d0b033ba -size 393996 diff --git a/modal_volume_download/images/20250210_030137_flux.1-dev_modal_local_burning_castle_battle_blue.png b/modal_volume_download/images/20250210_030137_flux.1-dev_modal_local_burning_castle_battle_blue.png deleted file mode 100644 index 39336b76e88e3a729f2018031e991baefc459d40..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250210_030137_flux.1-dev_modal_local_burning_castle_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0dbdc0fc02a1b18650f9b605b6f8995e03c78f0c346e8a352de59c96617143e0 -size 378641 diff --git a/modal_volume_download/images/20250210_030229_flux.1-dev_modal_local_burning_castle_battle_blue.png b/modal_volume_download/images/20250210_030229_flux.1-dev_modal_local_burning_castle_battle_blue.png deleted file mode 100644 index 0db476866359bc9872d7f8a53bc31e2fe9ece021..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250210_030229_flux.1-dev_modal_local_burning_castle_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b2f2c865b5b1fbd52f1442347a8f7398540566c9d07e67aa1b88f3b982a7c05b -size 383448 diff --git a/modal_volume_download/images/20250210_030309_flux.1-dev_modal_local_burning_castle_battle_blue.png b/modal_volume_download/images/20250210_030309_flux.1-dev_modal_local_burning_castle_battle_blue.png deleted file mode 100644 index 276be2e9da01c3cc8eff93d34c865a948a972815..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250210_030309_flux.1-dev_modal_local_burning_castle_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6c0dac31a765d5e6084fa7550f82120ab2bee134c20c5f3e6c031d7ea9ed54f3 -size 369053 diff --git a/modal_volume_download/images/20250210_030518_flux.1-dev_modal_local_burning_castle_battle_blue.png b/modal_volume_download/images/20250210_030518_flux.1-dev_modal_local_burning_castle_battle_blue.png deleted file mode 100644 index 72aece8a7ba4467cba77e65940a266690816f033..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250210_030518_flux.1-dev_modal_local_burning_castle_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4476c9eae80713735d5b80202ec5b683bc3e27b318d7540429b20ba592e0ee14 -size 387230 diff --git a/modal_volume_download/images/20250210_030540_flux.1-dev_modal_local_burning_castle_battle_blue.png b/modal_volume_download/images/20250210_030540_flux.1-dev_modal_local_burning_castle_battle_blue.png deleted file mode 100644 index d4e87a3bd01f722ee3cbaaa44101f578a83674fe..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250210_030540_flux.1-dev_modal_local_burning_castle_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:339359c20ce852f0a733065dee450fdf804dc1ad7adae7a87ab08b9038d05f2d -size 400715 diff --git a/modal_volume_download/images/20250210_030605_flux.1-dev_modal_local_burning_castle_battle_blue.png b/modal_volume_download/images/20250210_030605_flux.1-dev_modal_local_burning_castle_battle_blue.png deleted file mode 100644 index 058415d7fb5c2807dea3520c95521085419aca3c..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250210_030605_flux.1-dev_modal_local_burning_castle_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4f0a1d259d84193f88174028a4b881ab9d38b088cfce3558d275445366251ad9 -size 380441 diff --git a/modal_volume_download/images/20250210_030714_flux.1-dev_modal_local_burning_castle_battle_blue.png b/modal_volume_download/images/20250210_030714_flux.1-dev_modal_local_burning_castle_battle_blue.png deleted file mode 100644 index 7ccc4364210f210f82a36581d6d38f04d651f77b..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250210_030714_flux.1-dev_modal_local_burning_castle_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:49000f567b5a7a0ab171cd349cf777591e39217524cd13242ad2b9b8f0dc64e4 -size 371073 diff --git a/modal_volume_download/images/20250210_030813_flux.1-dev_modal_local_burning_castle_battle_blue.png b/modal_volume_download/images/20250210_030813_flux.1-dev_modal_local_burning_castle_battle_blue.png deleted file mode 100644 index 5daebd67f839ade28b03710f8380822c3f9dc4d5..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250210_030813_flux.1-dev_modal_local_burning_castle_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:19a1ad031c7ab8dcb6545f0beb88ab164138bf33ff72c482e2c6d2dbc11b2fe6 -size 390519 diff --git a/modal_volume_download/images/20250210_030855_flux.1-dev_modal_local_burning_castle_battle_blue.png b/modal_volume_download/images/20250210_030855_flux.1-dev_modal_local_burning_castle_battle_blue.png deleted file mode 100644 index 4c4742b0b2690df370d4d5519e631ded3108ee24..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250210_030855_flux.1-dev_modal_local_burning_castle_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3c3a128f3cdb56ac98f99b5765148b1cbc332a4f129e815f90c63d7ad3ec1ef8 -size 385759 diff --git a/modal_volume_download/images/20250210_030942_flux.1-dev_modal_local_burning_castle_battle_blue.png b/modal_volume_download/images/20250210_030942_flux.1-dev_modal_local_burning_castle_battle_blue.png deleted file mode 100644 index 351d5a93d07c38ae25f7d2dd895616dfcafa9269..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250210_030942_flux.1-dev_modal_local_burning_castle_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ccef8737a78cdf069e49e6ca587f18d5e367e5d7ad96fb1cc48233430326f812 -size 382506 diff --git a/modal_volume_download/images/20250210_031150_flux.1-dev_modal_local_burning_castle_battle_blue.png b/modal_volume_download/images/20250210_031150_flux.1-dev_modal_local_burning_castle_battle_blue.png deleted file mode 100644 index 5963b266d0126fb49ec33b55b9a4121898bd30d5..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250210_031150_flux.1-dev_modal_local_burning_castle_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:878d397bc951a1ae78e10e90c2d2d496da31424a9dba80c4af558184a22bd0c1 -size 367810 diff --git a/modal_volume_download/images/20250210_031204_flux.1-dev_modal_local_burning_castle_battle_blue.png b/modal_volume_download/images/20250210_031204_flux.1-dev_modal_local_burning_castle_battle_blue.png deleted file mode 100644 index 873ced43ef0d8861addab7838b7348950bd2cf8f..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250210_031204_flux.1-dev_modal_local_burning_castle_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:52987567ed822a4d89b05cb7487a039680d6e2346113f1bc2ec27e1cd0eecbe4 -size 389934 diff --git a/modal_volume_download/images/20250210_031219_flux.1-dev_modal_local_burning_castle_battle_blue.png b/modal_volume_download/images/20250210_031219_flux.1-dev_modal_local_burning_castle_battle_blue.png deleted file mode 100644 index 79431e923e3ab72a00edead22408f66353df834c..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250210_031219_flux.1-dev_modal_local_burning_castle_battle_blue.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:507df046ba5428fb8f5aa317996a0ab5c3e8d8ad05c888c18d34c849378decf7 -size 365406 diff --git a/modal_volume_download/images/20250210_042321_flux.1-dev_modal_local_castle_siege_red.png b/modal_volume_download/images/20250210_042321_flux.1-dev_modal_local_castle_siege_red.png deleted file mode 100644 index 9807fbc6e4aab328eeb894acda0832044685f70e..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250210_042321_flux.1-dev_modal_local_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:76eb54866bf40a541035e2314599d8ab6165086bbafc7e161da721002b914ca9 -size 392192 diff --git a/modal_volume_download/images/20250210_042352_flux.1-dev_modal_local_castle_siege_red.png b/modal_volume_download/images/20250210_042352_flux.1-dev_modal_local_castle_siege_red.png deleted file mode 100644 index da614538cdebea5715cdef54cc7df05af9aba96a..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250210_042352_flux.1-dev_modal_local_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:afa06274e6d04a8929afd2e078fb63bdc4c47a87f1b2fd8cb66d02d628b0013b -size 410257 diff --git a/modal_volume_download/images/20250210_042552_flux.1-dev_modal_local_castle_siege_red.png b/modal_volume_download/images/20250210_042552_flux.1-dev_modal_local_castle_siege_red.png deleted file mode 100644 index 3f63a1ea1d5d25234eec944b5149872d2c10c229..0000000000000000000000000000000000000000 --- a/modal_volume_download/images/20250210_042552_flux.1-dev_modal_local_castle_siege_red.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:143dc49fe25fab6dd60f5a998b79d4c3857b8865766142f7c64cfe010be6c340 -size 386975 diff --git a/old/.DS_Store b/old/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..99ad910d622938fd5a8c4c399039d7f59cb27669 Binary files /dev/null and b/old/.DS_Store differ diff --git a/ctb-modal.py b/old/ctb-modal.py similarity index 100% rename from ctb-modal.py rename to old/ctb-modal.py diff --git a/old/img_gen_modal copy.py b/old/img_gen_modal copy.py new file mode 100644 index 0000000000000000000000000000000000000000..ff25dd80fe919c67b2cd7a046a49d5cd6366c311 --- /dev/null +++ b/old/img_gen_modal copy.py @@ -0,0 +1,287 @@ +#img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os +import gradio as gr + +#MOVED FROM IMAGE IMPORT LIST +import torch +import sentencepiece +import torch +from huggingface_hub import login +from transformers import AutoTokenizer +import random +from datetime import datetime +#import xformers + + +########## LIVE PREVIEW TEST 1/3 ########## +#from live_preview_helpers import flux_pipe_call_that_returns_an_iterable_of_images +########################################### + + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + .pip_install_from_requirements("requirements.txt") + #modal.Image.debian_slim(python_version="3.9") # Base image + + # .apt_install( + # "git", + # ) + # .pip_install( + # "diffusers", + # "transformers", + # "xformers", + # "torch", + # "accelerate", + # "gradio>=4.44.1", + # "safetensors", + # "pillow", + # "sentencepiece", + # "hf_transfer", + # "huggingface_hub[hf_transfer]", + # "aria2", # aria2 for ultra-fast parallel downloads + # f"git+https://github.com/huggingface/transformers.git" + # ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import os + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +############ LIVE PREVIEW 2/3 ################## +# dtype = torch.bfloat16 +# device = "cuda" if torch.cuda.is_available() else "cpu" + +# taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device) +# good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device) +# pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device) +# torch.cuda.empty_cache() + +# MAX_SEED = np.iinfo(np.int32).max +# MAX_IMAGE_SIZE = 2048 + +#pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe) +################################################# + + +# GPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="L40S", + timeout = 300 + ) +def generate_image_gpu(prompt_alias, team_color, model_alias, custom_prompt): + image = generate_image(prompt_alias, team_color, model_alias, custom_prompt) + return image, "Image generated successfully! Call the banners!" + + +# CPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + cpu = 1, + timeout = 300 + ) +def generate_image_cpu(prompt_alias, team_color, model_alias, custom_prompt): + image = generate_image(prompt_alias, team_color, model_alias, custom_prompt) + return image, "Image generated successfully! Call the banners!" + +# MAIN GENERATE IMAGE FUNCTION +def generate_image( + prompt_alias, + team_color, + model_alias, + custom_prompt, + height=360, + width=640, + num_inference_steps=20, + guidance_scale=2.0, + seed=-1, + progress=gr.Progress(track_tqdm=True) # Add progress parameter + ): + with modal.enable_output(): + print("Hello from ctb_modal!") + + print("Running debug check...") + # Debug function to check installed packages + def check_dependencies(): + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + "sentencepiece" + ] + + for package in packages: + try: + import importlib + module = importlib.import_module(package) + print(f" {package} is installed. Version:") + except ImportError: + print(f" {package} is NOT installed.") + + check_dependencies() + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + try: + from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + # Use absolute path with leading slash + local_path = f"/data/{model_name}" # Changed from "data/" to "/data/" + print(f"Loading model from local path: {local_path}") + + # Debug: Check if the directory exists and list its contents + if os.path.exists(local_path): + print("Directory exists. Contents:") + for item in os.listdir(local_path): + print(f" - {item}") + else: + print(f"Directory does not exist: {local_path}") + print("Contents of /data:") + print(os.listdir("/data")) + # CHECK FOR TORCH USING CUDA + print("CHECK FOR TORCH USING CUDA") + print(f"CUDA available: {torch.cuda.is_available()}") + if torch.cuda.is_available(): + print("inside if") + print(f"CUDA device count: {torch.cuda.device_count()}") + print(f"Current device: {torch.cuda.current_device()}") + print(f"Device name: {torch.cuda.get_device_name(torch.cuda.current_device())}") + + ########## INITIALIZING CPU PIPE ########## + + print("-----INITIALIZING PIPE-----") + pipe = FluxPipeline.from_pretrained( + local_path, + torch_dtype=torch.bfloat16, + #torch_dtype=torch.float16, + #torch_dtype=torch.float32, + local_files_only=True + ) + if torch.cuda.is_available(): + print("CUDA available") + print("using gpu") + pipe = pipe.to("cuda") + pipe_message = "CUDA" + else: + print("CUDA not available") + print("using cpu") + pipe = pipe.to("cpu") + pipe_message = "CPU" + # pipe.enable_model_cpu_offload() # Use official recommended method + + + print(f"-----{pipe_message} PIPE INITIALIZED-----") + print(f"Using device: {pipe.device}") + except Exception as e: + print(f"Detailed error: {str(e)}") + return None, f"ERROR: Failed to initialize PIPE2. Details: {e}" + try: + print("-----SENDING IMG GEN TO PIPE-----") + print("-----HOLD ON-----") + + + + # ################ LIVE PREVIEW TEST 3/3 #################### + # seed = random.randint(0, MAX_SEED) + # generator = torch.Generator().manual_seed(seed) + + # for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images( + # prompt=prompt, + # guidance_scale=guidance_scale, + # num_inference_steps=num_inference_steps, + # width=width, + # height=height, + # generator=generator, + # output_type="pil", + # good_vae=good_vae, + # ): + # yield img, seed + # ############################################################ + + + ########## SENDING IMG GEN TO PIPE - WORKING CODE ########## + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + max_sequence_length=512, + # seed=seed + ).images[0] + ############################################################# + + print("-----IMAGE GENERATED SUCCESSFULLY!-----") + print(image) + + except Exception as e: + return f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + try: + print("-----SAVING-----") + print("-----DONE!-----") + print("-----CALL THE BANNERS!-----") + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"/data/images/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # Save the image using PIL's save method + image.save(output_filename) + print(f"File path: {output_filename}") + except Exception as e: + print(f"ERROR: Failed to save image. Details: {e}") + # Return the filename and success message + return image \ No newline at end of file diff --git a/old/old v1/img_gen_modal copy 2.py b/old/old v1/img_gen_modal copy 2.py new file mode 100644 index 0000000000000000000000000000000000000000..d5ee2de84f80c278a9b049095cc570cb6e2b6f17 --- /dev/null +++ b/old/old v1/img_gen_modal copy 2.py @@ -0,0 +1,121 @@ +import modal +import random +from datetime import datetime +import io +import os +from config.config import models, prompts + +volume = modal.Volume.from_name("flux-model-vol") + +# Define the Modal image +image = (modal.Image.debian_slim(python_version="3.9") + .pip_install( + "ninja", + "packaging", + "wheel", + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch>=2.0.1", # PyTorch with a minimum version + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface + "safetensors", # For safe model loading + "pillow", # For image processing + "datasets", # For datasets (if needed) + ) +) +with image.imports(): + import diffusers + import torch + from fastapi import Response + +app = modal.App("ctb-ai-img-gen-modal", image=image) + +@app.local_entrypoint() +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + import torch + from diffusers import StableDiffusionPipeline + # Debug function to check installed packages + + +def check_dependencies(): + import importlib + # Load the pipeline + self.model_dir = model_dir + self.device = "cuda" + self.torch_dtype = torch.float16 + + #@modal.method() + def run( + self, + prompt_alias: str, + team_color: str, + model_alias: str, + custom_prompt: str, + height: int = 360, + width: int = 640, + num_inference_steps: int = 20, + guidance_scale: float = 2.0, + seed: int = -1, + ) -> tuple[str, str]: + import torch + from diffusers import StableDiffusionPipeline + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Format the prompt + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Append custom prompt if provided + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Set seed + seed = seed if seed != -1 else random.randint(0, 2**32 - 1) + print("seeding RNG with", seed) + torch.manual_seed(seed) + + # Load the pipeline + model_path = os.path.join(self.model_dir, model_name) + pipe = StableDiffusionPipeline.from_pretrained( + model_path, + torch_dtype=self.torch_dtype, + safety_checker=None, # Disable safety checker + feature_extractor=None, # Disable feature extractor + ).to(self.device) + + # Generate the image + try: + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + generator=torch.Generator(self.device).manual_seed(seed) + ).images[0] + + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + image.save(output_filename) + + return output_filename, "Image generated successfully!" + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + +# Function to be called from the Gradio interface +def generate(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + try: + # Generate the image + image_path, message = generate_image(prompt_alias, team_color, model_alias, custom_prompt, height, width, num_inference_steps, guidance_scale, seed) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" diff --git a/old/old v1/img_gen_modal copy 3.py b/old/old v1/img_gen_modal copy 3.py new file mode 100644 index 0000000000000000000000000000000000000000..fbf15254ca4b3506996a532f9eb511361ba80124 --- /dev/null +++ b/old/old v1/img_gen_modal copy 3.py @@ -0,0 +1,114 @@ +import modal +import random +from datetime import datetime +import os +from config.config import models, prompts + +# Define the Modal image (same as in modal_app.py) +image = modal.Image.debian_slim(python_version="3.11").pip_install( + "diffusers", + "transformers", + "torch>=2.0.1", + "accelerate", + "gradio", + "safetensors", + "pillow", +) + +# Create a Modal app +app = modal.App("ctb-ai-img-gen-modal", image=image) + +# Define a volume for caching models +volume = modal.Volume.from_name("flux-model-vol") + +@app.cls( + gpu="H100", # Use H100 GPU for maximum performance + container_idle_timeout=20 * 60, # 20 minutes + timeout=60 * 60, # 1 hour + volumes={"/cache": volume}, +) +class Model: + def __init__(self): + self.device = "cuda" + self.torch_dtype = torch.bfloat16 + self.model_dir = "/cache/models" + + @modal.enter() + def setup(self): + import torch + from diffusers import StableDiffusionPipeline + + # Load the model + self.pipe = StableDiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-2-1", + torch_dtype=self.torch_dtype, + safety_checker=None, + feature_extractor=None, + ).to(self.device) + + # Optimize the model + self.pipe = self.optimize(self.pipe) + + def optimize(self, pipe): + import torch + + # Fuse QKV projections + pipe.unet.fuse_qkv_projections() + pipe.vae.fuse_qkv_projections() + + # Switch memory layout + pipe.unet.to(memory_format=torch.channels_last) + pipe.vae.to(memory_format=torch.channels_last) + + # Compile the model + pipe.unet = torch.compile(pipe.unet, mode="max-autotune", fullgraph=True) + pipe.vae.decode = torch.compile(pipe.vae.decode, mode="max-autotune", fullgraph=True) + + return pipe + + @modal.method() + def generate(self, prompt_alias, team_color, model_alias, custom_prompt): + import torch + from diffusers import StableDiffusionPipeline + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Format the prompt + enemy_color = "blue" if team_color.lower() == "red" else "red" + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + if custom_prompt.strip(): + prompt += " " + custom_prompt.strip() + + # Set seed + seed = random.randint(0, 2**32 - 1) + torch.manual_seed(seed) + + # Generate the image + try: + image = self.pipe( + prompt, + guidance_scale=2.0, + num_inference_steps=20, + width=640, + height=360, + generator=torch.Generator(self.device).manual_seed(seed) + ).images[0] + + # Save the image + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + image.save(output_filename) + + return output_filename, "Image generated successfully!" + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + +# Function to be called from the Gradio interface +def generate(prompt_alias, team_color, model_alias, custom_prompt): + model = Model() + return model.generate.remote(prompt_alias, team_color, model_alias, custom_prompt) \ No newline at end of file diff --git a/old/old v1/img_gen_modal copy 4.py b/old/old v1/img_gen_modal copy 4.py new file mode 100644 index 0000000000000000000000000000000000000000..80ffa3b74ba2dae444f88ce64a6f3cd2e501ffa3 --- /dev/null +++ b/old/old v1/img_gen_modal copy 4.py @@ -0,0 +1,142 @@ +#img_gen_modal.py +import modal +import random +from datetime import datetime +import random +import io +from config.config import prompts, models # Indirect import + +# Define the Modal image +image = ( + modal.Image.debian_slim(python_version="3.11") # Base image + .pip_install( + "numpy", + "pandas", + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio", + "safetensors", + "pillow", + ) # Install Python packages + .run_commands("echo 'Image build complete!'") # Run a shell command +) +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +flux_model_vol = modal.Volume.from_name("flux-model-vol") # Reference your volume + +# def on_button_click(): +# f = modal.Function.from_name("functions-app", "message") +# messageNEW = "Remote call Hello World!" +# message.remote((messageNEW)) +# #return message.remote((messageNEW)) + + +@app.function(volumes={"/data": flux_model_vol}) +def generate(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + + # Debug: Print a message when the function starts + print("Starting main function inside the container...") + + # Import libraries and print their versions + import numpy as np + import pandas as pd + import torch + import diffusers + import transformers + import gradio as gr + from PIL import Image as PILImage + + print("Hello from img_gen_modal!") + print("NumPy version:", np.__version__) + print("Pandas version:", pd.__version__) + print("PyTorch version:", torch.__version__) + print("Diffusers version:", diffusers.__version__) # Corrected: Use the library's __version__ + print("Transformers version:", transformers.__version__) # Corrected: Use the library's __version__ + print("Gradio version:", gr.__version__) + print("Pillow version:", PILImage.__version__) + + def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, + num_inference_steps=20, guidance_scale=2.0, seed=-1): + import torch + from diffusers import StableDiffusionPipeline + from config.config import prompts, models # Indirect import + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + # Format the prompt + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append custom prompt if provided + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # Initialize the pipeline + pipe = StableDiffusionPipeline.from_pretrained( + model_name, + torch_dtype=torch.float16, + use_safetensors=True, + variant="fp16" + ) + pipe.to("cpu") + + # Generate the image + try: + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + generator=torch.Generator("cuda").manual_seed(seed) + ).images[0] + except Exception as e: + return None, f"An error occurred ON PIPE: {e}" + + # # Convert PIL image to bytes + # img_byte_arr = io.BytesIO() + # image.save(img_byte_arr, format='PNG') + # img_byte_arr = img_byte_arr.getvalue() + # except Exception as e: + # return None, f"ERROR: Failed to generate image. Details: {e}" + + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + try: + image.save(output_filename) + except Exception as e: + return img_byte_arr, "Image generated successfully!" + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + return output_filename, "Image generated successfully!" + + +# Run the function locally (for testing) +@app.local_entrypoint() +def main(): + print("Running the function locally...") + generate.remote("horse", "blue", "FLUX.1-dev", "bear", height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1) \ No newline at end of file diff --git a/old/old v1/img_gen_modal copy.py b/old/old v1/img_gen_modal copy.py new file mode 100644 index 0000000000000000000000000000000000000000..df8d315f2605fef3c70771fe4519472c1bda8647 --- /dev/null +++ b/old/old v1/img_gen_modal copy.py @@ -0,0 +1,147 @@ +import modal +import random +from datetime import datetime +import io +import os +from config.config import models, prompts + +volume = modal.Volume.from_name("flux-model-vol") + +# Define the Modal image +image = (modal.Image.debian_slim(python_version="3.9") + .pip_install( + "ninja", + "packaging", + "wheel", + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch>=2.0.1", # PyTorch with a minimum version + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface + "safetensors", # For safe model loading + "pillow", # For image processing + "datasets", # For datasets (if needed) + ) +) + +app = modal.App("ctb-ai-img-gen-modal", image=image) + +class Inference: + @modal.enter() + def load_pipeline(self): + import torch + from diffusers import StableDiffusionPipeline + # Debug function to check installed packages + + +def check_dependencies(): + import importlib + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio>=4.44.1", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + ] + + for package in packages: + try: + module = importlib.import_module(package) + print(f"✅ {package} is installed. Version: {module.__version__}") + except ImportError: + print(f"❌ {package} is NOT installed.") + + + + # Check if the directory exists + model_dir = "/volume/FLUX.1-dev" + if not os.path.exists(model_dir): + raise FileNotFoundError(f"Model directory not found at {model_dir}") + + print(f"Model directory found at {model_dir}! Proceeding with image generation...") + print("Contents of FLUX.1-dev:") + print(os.listdir(model_dir)) + + # Load the pipeline + self.model_dir = model_dir + self.device = "cuda" + self.torch_dtype = torch.float16 + + @modal.method() + def run( + self, + prompt_alias: str, + team_color: str, + model_alias: str, + custom_prompt: str, + height: int = 360, + width: int = 640, + num_inference_steps: int = 20, + guidance_scale: float = 2.0, + seed: int = -1, + ) -> tuple[str, str]: + import torch + from diffusers import StableDiffusionPipeline + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Format the prompt + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Append custom prompt if provided + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Set seed + seed = seed if seed != -1 else random.randint(0, 2**32 - 1) + print("seeding RNG with", seed) + torch.manual_seed(seed) + + # Load the pipeline + model_path = os.path.join(self.model_dir, model_name) + pipe = StableDiffusionPipeline.from_pretrained( + model_path, + torch_dtype=self.torch_dtype, + safety_checker=None, # Disable safety checker + feature_extractor=None, # Disable feature extractor + ).to(self.device) + + # Generate the image + try: + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + generator=torch.Generator(self.device).manual_seed(seed) + ).images[0] + + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + image.save(output_filename) + + return output_filename, "Image generated successfully!" + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + +# Function to be called from the Gradio interface +def generate(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + from src.img_gen_modal import Inference + try: + # Generate the image + image_path, message = Inference(prompt_alias, team_color, model_alias, custom_prompt, height, width, num_inference_steps, guidance_scale, seed) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" diff --git a/old/old v1/img_gen_modal copy2.py b/old/old v1/img_gen_modal copy2.py new file mode 100644 index 0000000000000000000000000000000000000000..c0ad11b8f600e3f62a4b9f56674aa9009e6faac5 --- /dev/null +++ b/old/old v1/img_gen_modal copy2.py @@ -0,0 +1,134 @@ +#img_gen_modal.py +import modal +# import random +# from datetime import datetime +# import random +# import io + +# Define the Modal image +print("Building custom image...") +image = ( + modal.Image.debian_slim(python_version="3.11") # Base image + .pip_install( + "numpy", + "pandas", + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio", + "safetensors", + "pillow", + ) # Install Python packages + .run_commands("echo 'Image build complete!'") # Run a shell command +) +# Create a Modal app +app = modal.App("ctb-ai-img-gen-modal", image=image) +volume = modal.Volume.from_name("flux-model-vol") # Reference your volume + +@app.local_entrypoint() +def generate(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + + # Debug: Print a message when the function starts + print("Starting main function inside the container...") + + # Import libraries and print their versions + import numpy as np + import pandas as pd + import torch + import diffusers + import transformers + import gradio as gr + from PIL import Image as PILImage + + print("Hello from Modal!") + print("NumPy version:", np.__version__) + print("Pandas version:", pd.__version__) + print("PyTorch version:", torch.__version__) + print("Diffusers version:", diffusers.__version__) # Corrected: Use the library's __version__ + print("Transformers version:", transformers.__version__) # Corrected: Use the library's __version__ + print("Gradio version:", gr.__version__) + print("Pillow version:", PILImage.__version__) + + + + try: + # Generate the image + image_path, message = generate_image(prompt_alias, team_color, model_alias, custom_prompt, height, width, num_inference_steps, guidance_scale, seed) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" + +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, + num_inference_steps=20, guidance_scale=2.0, seed=-1): + import torch + from diffusers import StableDiffusionPipeline + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + # Format the prompt + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append custom prompt if provided + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # Initialize the pipeline + pipe = StableDiffusionPipeline.from_pretrained( + model_name, + torch_dtype=torch.float16, + use_safetensors=True, + variant="fp16" + ) + pipe.to("cpu") + + # Generate the image + try: + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + generator=torch.Generator("cuda").manual_seed(seed) + ).images[0] + + # Convert PIL image to bytes + img_byte_arr = io.BytesIO() + image.save(img_byte_arr, format='PNG') + img_byte_arr = img_byte_arr.getvalue() + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + try: + image.save(output_filename) + except Exception as e: + return img_byte_arr, "Image generated successfully!" + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + return output_filename, "Image generated successfully!" \ No newline at end of file diff --git a/old/old v1/img_gen_modal_old.py b/old/old v1/img_gen_modal_old.py new file mode 100644 index 0000000000000000000000000000000000000000..0e37adc9af2f7a06bc88daaac171d21e859d1e81 --- /dev/null +++ b/old/old v1/img_gen_modal_old.py @@ -0,0 +1,146 @@ +#img_gen_modal.py +import modal +import sys +import os +import random +from datetime import datetime +import random +import io +from config.config import models, prompts # Indirect import +import gradio as gr + +volume = modal.Volume.from_name("flux-model-vol") # Reference your volume + +app = modal.App("ctb-ai-img-gen-mondal") + + +# Gradio Interface +with gr.Blocks() as demo: + gr.Markdown("# CtB AI Image Generator") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(): + generate_button = gr.Button("Generate Image") + with gr.Row(): + output_image = gr.Image(label="Generated Image") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + + +@app.function( + volumes={"/volume": volume}, # Mount the volume to /volume + #gpu="T4", + timeout=600 +) +def generate(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + import gradio as gr + try: + # Generate the image + image_path, message = generate_image(prompt_alias, team_color, model_alias, custom_prompt, height, width, num_inference_steps, guidance_scale, seed) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" + +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, + num_inference_steps=20, guidance_scale=2.0, seed=-1): + import torch + from diffusers import StableDiffusionPipeline + + # Debug: Check if the volume is mounted correctly + print("Debug: Checking volume contents...") + try: + volume_contents = os.listdir("/volume") + print(f"Debug: Volume contents: {volume_contents}") + except Exception as e: + print(f"Debug: Error checking volume contents: {e}") + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Debug: Check if the model directory exists + print(f"Debug: Checking if model directory exists: {model_name}") + if not os.path.exists(model_name): + return None, f"ERROR: Model directory not found at {model_name}" + + # Initialize the pipeline using the local model + print("Debug: Loading model...") + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + # Format the prompt + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append custom prompt if provided + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + + # Initialize the pipeline + pipe = StableDiffusionPipeline.from_pretrained( + model_name, + torch_dtype=torch.float16, + use_safetensors=True, + variant="fp16" + ) + pipe.to("cpu") + + # Connect the button to the function + generate_button.click( + generate, + inputs=[prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], + outputs=[output_image, status_text] + ) + # Generate the image + try: + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + generator=torch.Generator("cuda").manual_seed(seed) + ).images[0] + + # Convert PIL image to bytes + img_byte_arr = io.BytesIO() + image.save(img_byte_arr, format='PNG') + img_byte_arr = img_byte_arr.getvalue() + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + try: + image.save(output_filename) + except Exception as e: + return img_byte_arr, "Image generated successfully!" + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + return output_filename, "Image generated successfully!" diff --git a/old/old v1/img_gen_modal_old2.py b/old/old v1/img_gen_modal_old2.py new file mode 100644 index 0000000000000000000000000000000000000000..44b021aa25e67ded8e14a040dba897817158132e --- /dev/null +++ b/old/old v1/img_gen_modal_old2.py @@ -0,0 +1,153 @@ +#img_gen_modal.py +import modal +import sys +import os +import random +from datetime import datetime +import random +import io +from config.config import models, prompts # Indirect import +import gradio as gr + +volume = modal.Volume.from_name("flux-model-vol") # Reference your volume + +# Define the Modal image +image = ( + modal.Image.from_registry( + "nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.11" + ) + .pip_install( + "ninja", + "packaging", + "wheel", + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch>=2.0.1", # PyTorch with a minimum version + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface + "safetensors", # For safe model loading + "pillow", # For image processing + "datasets", # For datasets (if needed) + ) +) + +app = modal.App("ctb-ai-img-gen-mondal", image=image) + +f = modal.Function.lookup("ctb-ai-img-gen-mondal", "generate_image") + +def generate(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + import gradio as gr + try: + # Generate the image + image_path, message = f.remote(prompt_alias, team_color, model_alias, custom_prompt, height, width, num_inference_steps, guidance_scale, seed) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" + +@app.function( + volumes={"/volume": volume}, # Mount the volume to /volume + #gpu="T4", + timeout=600 +) +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, + num_inference_steps=20, guidance_scale=2.0, seed=-1): + import torch + from diffusers import StableDiffusionPipeline + + # Check if the directory exists + import os + model_dir = "/volume/FLUX.1-dev" + if not os.path.exists(model_dir): + raise FileNotFoundError(f"Model directory not found at {model_dir}") + + # Your image generation code here + print(f"Model directory found at {model_dir}! Proceeding with image generation...") + + # Example: List contents of the directory + print("Contents of FLUX.1-dev:") + print(os.listdir(model_dir)) + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Debug: Check if the model directory exists + print(f"Debug: Checking if model directory exists: {model_name}") + if not os.path.exists(model_name): + return None, f"ERROR: Model directory not found at {model_name}" + + # Initialize the pipeline using the local model + print("Debug: Loading model...") + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + # Format the prompt + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append custom prompt if provided + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + + # Initialize the pipeline + pipe = StableDiffusionPipeline.from_pretrained( + model_name, + torch_dtype=torch.float16, + use_safetensors=True, + #variant="fp16" + ) + pipe.to("cuda") + + # Connect the button to the function + generate_button.click( + generate, + inputs=[prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], + outputs=[output_image, status_text] + ) + # Generate the image + try: + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + generator=torch.Generator("cuda").manual_seed(seed) + ).images[0] + + # Convert PIL image to bytes + img_byte_arr = io.BytesIO() + image.save(img_byte_arr, format='PNG') + img_byte_arr = img_byte_arr.getvalue() + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + try: + image.save(output_filename) + except Exception as e: + return img_byte_arr, "Image generated successfully!" + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + return output_filename, "Image generated successfully!" diff --git a/old/old v3/Dockerfile b/old/old v3/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..8e22aef2d7ec26b3d6a69c07ec0fa27ad5277b32 --- /dev/null +++ b/old/old v3/Dockerfile @@ -0,0 +1,77 @@ +# Use the official NVIDIA CUDA image as the base +FROM nvidia/cuda:12.4.0-devel-ubuntu22.04 + +# Set environment variables +ENV PYTHONUNBUFFERED=1 +ENV DEBIAN_FRONTEND=noninteractive +ENV TORCH_CUDA_ARCH_LIST="8.0" # CUDA architecture for Hopper (H100) + +# Debug: Print starting message +RUN echo "🚀 Starting Docker build process..." + +# Install system dependencies +RUN echo "🔧 Installing system dependencies..." \ + && apt-get update \ + && apt-get install -y \ + git \ + libglib2.0-0 \ + libsm6 \ + libxrender1 \ + libxext6 \ + ffmpeg \ + libgl1 \ + python3.11 \ + python3.11-dev \ + python3.11-distutils \ + curl \ + && rm -rf /var/lib/apt/lists/* \ + && echo "✅ System dependencies installed." + +# Install pip for Python 3.11 +RUN echo "📦 Installing pip for Python 3.11..." \ + && curl -sS https://bootstrap.pypa.io/get-pip.py | python3.11 \ + && echo "✅ pip installed." + +# Install PyTorch and other dependencies +RUN echo "📦 Installing Python dependencies..." \ + && pip install \ + torch==2.5.0 \ + transformers==4.44.0 \ + accelerate==0.33.0 \ + gradio>=4.44.1 \ + safetensors==0.4.4 \ + pillow==10.3.0 \ + invisible_watermark==0.2.0 \ + huggingface_hub[hf_transfer]==0.26.2 \ + sentencepiece==0.2.0 \ + numpy<2 \ + && echo "✅ Python dependencies installed." + +# Clone and install diffusers from GitHub +RUN echo "📦 Cloning and installing diffusers from GitHub..." \ + && git clone https://github.com/huggingface/diffusers.git /tmp/diffusers \ + && cd /tmp/diffusers \ + && pip install . \ + && rm -rf /tmp/diffusers \ + && echo "✅ diffusers installed from GitHub." + +# Set up cache directories for torch.compile +RUN echo "📦 Setting up cache directories for torch.compile..." \ + && mkdir -p /root/.inductor-cache /root/.nv /root/.triton \ + && echo "✅ Cache directories created." + +# Set environment variables for torch.compile +ENV TORCHINDUCTOR_CACHE_DIR=/root/.inductor-cache +ENV TORCHINDUCTOR_FX_GRAPH_CACHE=1 + +# Set the working directory +WORKDIR /app + +# Copy the application code +COPY . /app + +# Debug: Print completion message +RUN echo "🎉 Docker build completed successfully!" + +# Set the entrypoint +ENTRYPOINT ["python3.11"] \ No newline at end of file diff --git a/old/old v3/app copy1.py b/old/old v3/app copy1.py new file mode 100644 index 0000000000000000000000000000000000000000..faa9e793a120e694bfb56f25a54bbb799e0bfca0 --- /dev/null +++ b/old/old v3/app copy1.py @@ -0,0 +1,165 @@ +import os +import random +from huggingface_hub import InferenceClient +from PIL import Image +import gradio as gr +from datetime import datetime + +# Retrieve the Hugging Face token from environment variables +api_token = os.getenv("HF_CTB_TOKEN") + +# Debugging: Check if the Hugging Face token is available +if not api_token: + print("ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it as an environment variable.") +else: + print("Hugging Face token loaded successfully.") + +# List of models with aliases +models = [ + {"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"}, + {"alias": "Stable Diffusion 3.5 turbo", "name": "stabilityai/stable-diffusion-3.5-large-turbo"}, + {"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"} +] + +# List of prompts with intense combat +prompts = [ + { + "alias": "Castle Siege", + "text": "A medieval castle under siege, with archers firing arrows from the walls, knights charging on horses, and catapults launching fireballs. The enemy army, dressed in {enemy_color} armor, is fiercely attacking the castle, with soldiers scaling ladders and clashing swords with the defenders. Arrows fly through the air, explosions light up the battlefield, and injured knights lie on the ground. Fire engulfs parts of the castle, and the air is thick with smoke and chaos. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Forest Battle", + "text": "A fierce battle between two armies in a dense forest, with knights wielding swords and axes, horses rearing, and the ground covered in mud and blood. The enemy army, dressed in {enemy_color} armor, is locked in brutal combat, with soldiers fighting hand-to-hand amidst the trees. Arrows whiz past, and the sounds of clashing steel echo through the forest. Injured soldiers scream in pain, and the forest is littered with broken weapons and shields. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Boiling Oil Defense", + "text": "A dramatic moment in a medieval siege, with a knight leading a charge against a castle gate, while defenders pour boiling oil from the walls. The enemy army, dressed in {enemy_color} armor, is relentlessly attacking, with soldiers screaming as they are hit by the oil. Knights clash swords at the gate, and arrows rain down from above. The ground is littered with the bodies of fallen soldiers, and the air is filled with the smell of burning flesh. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Burning Castle Battle", + "text": "A chaotic battlefield with knights on horseback clashing with infantry, archers firing volleys of arrows, and a castle burning in the background. The enemy army, dressed in {enemy_color} armor, is fighting fiercely, with soldiers engaging in brutal melee combat. Flames light up the scene as knights charge through the chaos. Injured soldiers crawl on the ground, and the air is filled with the sounds of clashing steel and screams of pain. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Heroic Last Stand", + "text": "A heroic last stand of a small group of knights defending a bridge against a massive army, with arrows flying and swords clashing. The enemy army, dressed in {enemy_color} armor, is overwhelming the defenders, but the knights fight bravely, cutting down enemy soldiers as they advance. The bridge is littered with bodies and broken weapons. Blood stains the ground, and the air is thick with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Siege Tower Attack", + "text": "A medieval siege tower approaching a castle wall, with knights scaling ladders and defenders throwing rocks and shooting arrows. The enemy army, dressed in {enemy_color} armor, is fighting desperately to breach the walls, with soldiers clashing swords on the battlements. Arrows fly in all directions, and the siege tower is engulfed in flames. Injured soldiers fall from the ladders, and the ground is littered with the bodies of the fallen. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Knight Duel", + "text": "A dramatic duel between two knights in the middle of a battlefield, with their armies watching and the castle in the background. The enemy army, dressed in {enemy_color} armor, is engaged in fierce combat all around, with soldiers clashing swords and firing arrows. The duelists fight with skill and determination, their blades flashing in the sunlight. Injured soldiers lie on the ground, and the air is filled with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Night Battle", + "text": "A night battle during a medieval siege, with torches lighting the scene, knights fighting in the shadows, and the castle walls looming in the background. The enemy army, dressed in {enemy_color} armor, is locked in brutal combat, with soldiers clashing swords and firing arrows in the dim light. Flames from burning siege equipment illuminate the chaos. Injured soldiers scream in pain, and the ground is littered with the bodies of the fallen. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Marching Army", + "text": "A massive army of knights and infantry marching towards a distant castle, with banners flying and the sun setting behind them. The enemy army, dressed in {enemy_color} armor, is engaging in skirmishes along the way, with soldiers clashing swords and firing arrows. The battlefield is alive with the sounds of combat and the clash of steel. Injured soldiers lie on the ground, and the air is thick with the smell of blood and smoke. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Snowy Battlefield", + "text": "A medieval battle in a snowy landscape, with knights in heavy armor fighting on a frozen lake, and the castle visible in the distance. The enemy army, dressed in {enemy_color} armor, is locked in fierce combat, with soldiers slipping on the ice as they clash swords. Arrows fly through the air, and the snow is stained red with blood. Injured soldiers crawl on the ground, and the air is filled with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style." + } +] + +# Debugging: Print prompt and model options +print("Prompt Options:", [p["alias"] for p in prompts]) +print("Model Options:", [m["alias"] for m in models]) + +# Function to generate images +def generate_image(prompt_alias, team, model_alias, height, width, num_inference_steps, guidance_scale, seed): + # Debugging: Check if the token is available + if not api_token: + return None, "ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it as an environment variable." + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team.lower() == "red" else "red" + prompt = prompt.format(enemy_color=enemy_color) + + if team.lower() == "red": + prompt += " The winning army is dressed in red armor and banners." + elif team.lower() == "blue": + prompt += " The winning army is dressed in blue armor and banners." + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # Initialize the InferenceClient + try: + client = InferenceClient(model_name, token=api_token) + except Exception as e: + return None, f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + # Generate the image + try: + image = client.text_to_image( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + seed=seed + ) + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team.lower()}.png" + try: + image.save(output_filename) + except Exception as e: + return None, f"ERROR: Failed to save image. Details: {e}" + + return output_filename, "Image generated successfully!" + +# Gradio Interface +with gr.Blocks() as demo: + gr.Markdown("# CtB AI Image Generator") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + height_input = gr.Number(value=360, label="Height") + width_input = gr.Number(value=640, label="Width") + num_inference_steps_input = gr.Slider(minimum=10, maximum=100, value=20, label="Inference Steps") + guidance_scale_input = gr.Slider(minimum=1.0, maximum=20.0, value=2.0, step=0.5, label="Guidance Scale") + seed_input = gr.Number(value=-1, label="Seed (-1 for random)") + with gr.Row(): + generate_button = gr.Button("Generate Image") + with gr.Row(): + output_image = gr.Image(label="Generated Image") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + + # Function to handle button click + def generate(prompt_alias, team, model_alias, height, width, num_inference_steps, guidance_scale, seed): + try: + # Generate the image + image_path, message = generate_image(prompt_alias, team, model_alias, height, width, num_inference_steps, guidance_scale, seed) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" + + # Connect the button to the function + generate_button.click( + generate, + inputs=[prompt_dropdown, team_dropdown, model_dropdown, height_input, width_input, num_inference_steps_input, guidance_scale_input, seed_input], + outputs=[output_image, status_text] + ) + +# Launch the Gradio app +demo.launch() \ No newline at end of file diff --git a/old/old v3/app copy2.py b/old/old v3/app copy2.py new file mode 100644 index 0000000000000000000000000000000000000000..54dfc6244ea3af60ea1769c6266fd7348f541a24 --- /dev/null +++ b/old/old v3/app copy2.py @@ -0,0 +1,166 @@ +import os +import random +from huggingface_hub import InferenceClient +from PIL import Image +import gradio as gr +from datetime import datetime + +# Retrieve the Hugging Face token from environment variables +api_token = os.getenv("HF_CTB_TOKEN") + +# Debugging: Check if the Hugging Face token is available +if not api_token: + print("ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it as an environment variable.") +else: + print("Hugging Face token loaded successfully.") + +# List of models with aliases +models = [ + {"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"}, + #{"alias": "Stable Diffusion 3.5 turbo", "name": "stabilityai/stable-diffusion-3.5-large-turbo"}, + {"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"} +] + +# List of prompts with intense combat +prompts = [ + { + "alias": "Castle Siege", + "text": "A medieval castle under siege, with archers firing arrows from the walls, knights charging on horses, and catapults launching fireballs. The enemy army, dressed in {enemy_color} armor, is fiercely attacking the castle, with soldiers scaling ladders and clashing swords with the defenders. Arrows fly through the air, explosions light up the battlefield, and injured knights lie on the ground. Fire engulfs parts of the castle, and the air is thick with smoke and chaos. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Forest Battle", + "text": "A fierce battle between two armies in a dense forest, with knights wielding swords and axes, horses rearing, and the ground covered in mud and blood. The enemy army, dressed in {enemy_color} armor, is locked in brutal combat, with soldiers fighting hand-to-hand amidst the trees. Arrows whiz past, and the sounds of clashing steel echo through the forest. Injured soldiers scream in pain, and the forest is littered with broken weapons and shields. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Boiling Oil Defense", + "text": "A dramatic moment in a medieval siege, with a knight leading a charge against a castle gate, while defenders pour boiling oil from the walls. The enemy army, dressed in {enemy_color} armor, is relentlessly attacking, with soldiers screaming as they are hit by the oil. Knights clash swords at the gate, and arrows rain down from above. The ground is littered with the bodies of fallen soldiers, and the air is filled with the smell of burning flesh. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Burning Castle Battle", + "text": "A chaotic battlefield with knights on horseback clashing with infantry, archers firing volleys of arrows, and a castle burning in the background. The enemy army, dressed in {enemy_color} armor, is fighting fiercely, with soldiers engaging in brutal melee combat. Flames light up the scene as knights charge through the chaos. Injured soldiers crawl on the ground, and the air is filled with the sounds of clashing steel and screams of pain. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Heroic Last Stand", + "text": "A heroic last stand of a small group of knights defending a bridge against a massive army, with arrows flying and swords clashing. The enemy army, dressed in {enemy_color} armor, is overwhelming the defenders, but the knights fight bravely, cutting down enemy soldiers as they advance. The bridge is littered with bodies and broken weapons. Blood stains the ground, and the air is thick with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Siege Tower Attack", + "text": "A medieval siege tower approaching a castle wall, with knights scaling ladders and defenders throwing rocks and shooting arrows. The enemy army, dressed in {enemy_color} armor, is fighting desperately to breach the walls, with soldiers clashing swords on the battlements. Arrows fly in all directions, and the siege tower is engulfed in flames. Injured soldiers fall from the ladders, and the ground is littered with the bodies of the fallen. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Knight Duel", + "text": "A dramatic duel between two knights in the middle of a battlefield, with their armies watching and the castle in the background. The enemy army, dressed in {enemy_color} armor, is engaged in fierce combat all around, with soldiers clashing swords and firing arrows. The duelists fight with skill and determination, their blades flashing in the sunlight. Injured soldiers lie on the ground, and the air is filled with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Night Battle", + "text": "A night battle during a medieval siege, with torches lighting the scene, knights fighting in the shadows, and the castle walls looming in the background. The enemy army, dressed in {enemy_color} armor, is locked in brutal combat, with soldiers clashing swords and firing arrows in the dim light. Flames from burning siege equipment illuminate the chaos. Injured soldiers scream in pain, and the ground is littered with the bodies of the fallen. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Marching Army", + "text": "A massive army of knights and infantry marching towards a distant castle, with banners flying and the sun setting behind them. The enemy army, dressed in {enemy_color} armor, is engaging in skirmishes along the way, with soldiers clashing swords and firing arrows. The battlefield is alive with the sounds of combat and the clash of steel. Injured soldiers lie on the ground, and the air is thick with the smell of blood and smoke. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Snowy Battlefield", + "text": "A medieval battle in a snowy landscape, with knights in heavy armor fighting on a frozen lake, and the castle visible in the distance. The enemy army, dressed in {enemy_color} armor, is locked in fierce combat, with soldiers slipping on the ice as they clash swords. Arrows fly through the air, and the snow is stained red with blood. Injured soldiers crawl on the ground, and the air is filled with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style." + } +] + +# Debugging: Print prompt and model options +print("Prompt Options:", [p["alias"] for p in prompts]) +print("Model Options:", [m["alias"] for m in models]) + +# Function to generate images +def generate_image(prompt_alias, team, model_alias, height, width, num_inference_steps, guidance_scale, seed): + # Debugging: Check if the token is available + if not api_token: + return None, "ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it as an environment variable." + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team.lower() == "red" else "red" + prompt = prompt.format(enemy_color=enemy_color) + + if team.lower() == "red": + prompt += " The winning army is dressed in red armor and banners." + elif team.lower() == "blue": + prompt += " The winning army is dressed in blue armor and banners." + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # Initialize the InferenceClient + try: + client = InferenceClient(model_name, token=api_token) + except Exception as e: + return None, f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + # Generate the image + try: + image = client.text_to_image( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + seed=seed + ) + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team.lower()}.png" + try: + image.save(output_filename) + except Exception as e: + return None, f"ERROR: Failed to save image. Details: {e}" + + return output_filename, "Image generated successfully!" + +# Gradio Interface +with gr.Blocks() as demo: + gr.Markdown("# CtB AI Image Generator") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + #with gr.Row(): + # Commented-out dialog boxes (can be re-enabled later) + # height_input = gr.Number(value=360, label="Height") + # width_input = gr.Number(value=640, label="Width") + # num_inference_steps_input = gr.Slider(minimum=10, maximum=100, value=20, label="Inference Steps") + # guidance_scale_input = gr.Slider(minimum=1.0, maximum=20.0, value=2.0, step=0.5, label="Guidance Scale") + # seed_input = gr.Number(value=-1, label="Seed (-1 for random)") + with gr.Row(): + generate_button = gr.Button("Generate Image") + with gr.Row(): + output_image = gr.Image(label="Generated Image") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + + # Function to handle button click + def generate(prompt_alias, team, model_alias, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + try: + # Generate the image + image_path, message = generate_image(prompt_alias, team, model_alias, height, width, num_inference_steps, guidance_scale, seed) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" + + # Connect the button to the function + generate_button.click( + generate, + inputs=[prompt_dropdown, team_dropdown, model_dropdown], # Removed commented-out inputs + outputs=[output_image, status_text] + ) + +# Launch the Gradio app p +demo.launch() \ No newline at end of file diff --git a/old/old v3/app copy3.py b/old/old v3/app copy3.py new file mode 100644 index 0000000000000000000000000000000000000000..d1e6bb1fb23a4c86856dd5c07debf99c469f2614 --- /dev/null +++ b/old/old v3/app copy3.py @@ -0,0 +1,172 @@ +import os +import random +from huggingface_hub import InferenceClient +from PIL import Image +import gradio as gr +from datetime import datetime + +# Retrieve the Hugging Face token from environment variables +api_token = os.getenv("HF_CTB_TOKEN") + +# Debugging: Check if the Hugging Face token is available +if not api_token: + print("ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it as an environment variable.") +else: + print("Hugging Face token loaded successfully.") + +# List of models with aliases +models = [ + {"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"}, + {"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"} +] + +# List of prompts with intense combat +prompts = [ + { + "alias": "Castle Siege", + "text": "A medieval castle under siege, with archers firing arrows from the walls, knights charging on horses, and catapults launching fireballs. The enemy army, dressed in {enemy_color} armor, is fiercely attacking the castle, with soldiers scaling ladders and clashing swords with the defenders. Arrows fly through the air, explosions light up the battlefield, and injured knights lie on the ground. Fire engulfs parts of the castle, and the air is thick with smoke and chaos. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Forest Battle", + "text": "A fierce battle between two armies in a dense forest, with knights wielding swords and axes, horses rearing, and the ground covered in mud and blood. The enemy army, dressed in {enemy_color} armor, is locked in brutal combat, with soldiers fighting hand-to-hand amidst the trees. Arrows whiz past, and the sounds of clashing steel echo through the forest. Injured soldiers scream in pain, and the forest is littered with broken weapons and shields. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Boiling Oil Defense", + "text": "A dramatic moment in a medieval siege, with a knight leading a charge against a castle gate, while defenders pour boiling oil from the walls. The enemy army, dressed in {enemy_color} armor, is relentlessly attacking, with soldiers screaming as they are hit by the oil. Knights clash swords at the gate, and arrows rain down from above. The ground is littered with the bodies of fallen soldiers, and the air is filled with the smell of burning flesh. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Burning Castle Battle", + "text": "A chaotic battlefield with knights on horseback clashing with infantry, archers firing volleys of arrows, and a castle burning in the background. The enemy army, dressed in {enemy_color} armor, is fighting fiercely, with soldiers engaging in brutal melee combat. Flames light up the scene as knights charge through the chaos. Injured soldiers crawl on the ground, and the air is filled with the sounds of clashing steel and screams of pain. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Heroic Last Stand", + "text": "A heroic last stand of a small group of knights defending a bridge against a massive army, with arrows flying and swords clashing. The enemy army, dressed in {enemy_color} armor, is overwhelming the defenders, but the knights fight bravely, cutting down enemy soldiers as they advance. The bridge is littered with bodies and broken weapons. Blood stains the ground, and the air is thick with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Siege Tower Attack", + "text": "A medieval siege tower approaching a castle wall, with knights scaling ladders and defenders throwing rocks and shooting arrows. The enemy army, dressed in {enemy_color} armor, is fighting desperately to breach the walls, with soldiers clashing swords on the battlements. Arrows fly in all directions, and the siege tower is engulfed in flames. Injured soldiers fall from the ladders, and the ground is littered with the bodies of the fallen. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Knight Duel", + "text": "A dramatic duel between two knights in the middle of a battlefield, with their armies watching and the castle in the background. The enemy army, dressed in {enemy_color} armor, is engaged in fierce combat all around, with soldiers clashing swords and firing arrows. The duelists fight with skill and determination, their blades flashing in the sunlight. Injured soldiers lie on the ground, and the air is filled with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Night Battle", + "text": "A night battle during a medieval siege, with torches lighting the scene, knights fighting in the shadows, and the castle walls looming in the background. The enemy army, dressed in {enemy_color} armor, is locked in brutal combat, with soldiers clashing swords and firing arrows in the dim light. Flames from burning siege equipment illuminate the chaos. Injured soldiers scream in pain, and the ground is littered with the bodies of the fallen. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Marching Army", + "text": "A massive army of knights and infantry marching towards a distant castle, with banners flying and the sun setting behind them. The enemy army, dressed in {enemy_color} armor, is engaging in skirmishes along the way, with soldiers clashing swords and firing arrows. The battlefield is alive with the sounds of combat and the clash of steel. Injured soldiers lie on the ground, and the air is thick with the smell of blood and smoke. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Snowy Battlefield", + "text": "A medieval battle in a snowy landscape, with knights in heavy armor fighting on a frozen lake, and the castle visible in the distance. The enemy army, dressed in {enemy_color} armor, is locked in fierce combat, with soldiers slipping on the ice as they clash swords. Arrows fly through the air, and the snow is stained red with blood. Injured soldiers crawl on the ground, and the air is filled with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style." + } +] + +# Debugging: Print prompt and model options +print("Prompt Options:", [p["alias"] for p in prompts]) +print("Model Options:", [m["alias"] for m in models]) + +# Function to generate images +def generate_image(prompt_alias, team, model_alias, custom_prompt, height, width, num_inference_steps, guidance_scale, seed): + # Debugging: Check if the token is available + if not api_token: + return None, "ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it as an environment variable." + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team.lower() == "red" else "red" + prompt = prompt.format(enemy_color=enemy_color) + + if team.lower() == "red": + prompt += " The winning army is dressed in red armor and banners." + elif team.lower() == "blue": + prompt += " The winning army is dressed in blue armor and banners." + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # Initialize the InferenceClient + try: + client = InferenceClient(model_name, token=api_token) + except Exception as e: + return None, f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + # Generate the image + try: + image = client.text_to_image( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + seed=seed + ) + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team.lower()}.png" + try: + image.save(output_filename) + except Exception as e: + return None, f"ERROR: Failed to save image. Details: {e}" + + return output_filename, "Image generated successfully!" + +# Gradio Interface +with gr.Blocks() as demo: + gr.Markdown("# CtB AI Image Generator") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + #with gr.Row(): + # Commented-out dialog boxes (can be re-enabled later) + # height_input = gr.Number(value=360, label="Height") + # width_input = gr.Number(value=640, label="Width") + # num_inference_steps_input = gr.Slider(minimum=10, maximum=100, value=20, label="Inference Steps") + # guidance_scale_input = gr.Slider(minimum=1.0, maximum=20.0, value=2.0, step=0.5, label="Guidance Scale") + # seed_input = gr.Number(value=-1, label="Seed (-1 for random)") + with gr.Row(): + generate_button = gr.Button("Generate Image") + with gr.Row(): + output_image = gr.Image(label="Generated Image") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + + # Function to handle button click + def generate(prompt_alias, team, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + try: + # Generate the image + image_path, message = generate_image(prompt_alias, team, model_alias, custom_prompt, height, width, num_inference_steps, guidance_scale, seed) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" + + # Connect the button to the function + generate_button.click( + generate, + inputs=[prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], # Added custom_prompt_input + outputs=[output_image, status_text] + ) + +# Launch the Gradio app +demo.launch() \ No newline at end of file diff --git a/old/old v3/ctb_modal copy.py b/old/old v3/ctb_modal copy.py new file mode 100644 index 0000000000000000000000000000000000000000..6d732ebd15e7d6a0366769352ec750930fd7efed --- /dev/null +++ b/old/old v3/ctb_modal copy.py @@ -0,0 +1,64 @@ +import modal +from src.gradio_interface_modal import gradio_interface_modal +from config.config import prompts, models # Indirect import +# Define the Modal image +image = ( + modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME" + } +) +) +# Create the Modal app +app = modal.App("ctb-image-generator-modal", image = image) +print("Modal app created.") + +# Entry point for local execution +@app.function(secrets=[modal.Secret.from_name("huggingface-token")]) +def main(): + with modal.enable_output(): + print("Hello from ctb_modal!") + print("Running debug check...") + # Debug function to check installed packages + def check_dependencies(): + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + "sentencepiece" + ] + + for package in packages: + try: + import importlib + module = importlib.import_module(package) + print(f" {package} is installed. Version:") + except ImportError: + print(f" {package} is NOT installed.") + + check_dependencies() + print("Launching Gradio interface...") + # demo.launch() + gradio_interface_modal() + + diff --git a/old/old v3/ctb_modal_old.py b/old/old v3/ctb_modal_old.py new file mode 100644 index 0000000000000000000000000000000000000000..0136877cbfbf65e8540e30e0c931959b9fca5c21 --- /dev/null +++ b/old/old v3/ctb_modal_old.py @@ -0,0 +1,30 @@ +# modal_app.py +import modal +#IMPORT gradio_interface +from src.gradio_interface import demo + +# Create a Modal app +app = modal.App("ctb-image-generator") + +image = ( + modal.Image.debian_slim() # Start with a lightweight Debian-based image + .apt_install("git") # Install system-level dependencies (if needed) + .pip_install( + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface + "safetensors", # For safe model loading + "pillow", # For image processing + "datasets", # For datasets (if needed) + ) +) + +@app.local_entrypoint() +def main(): + with modal.enable_output(): + demo.launch() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/old/old v3/ctb_modal_old_old.py b/old/old v3/ctb_modal_old_old.py new file mode 100644 index 0000000000000000000000000000000000000000..6c70f0cbdc2219e9714575470a5bfd7260b60c7a --- /dev/null +++ b/old/old v3/ctb_modal_old_old.py @@ -0,0 +1,60 @@ +from src.gradio_interface import demo +import modal + + +# Define the Modal image +image = ( + modal.Image.from_registry( + "nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.11" + ) + .pip_install( + "ninja", + "packaging", + "wheel", + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch>=2.0.1", # PyTorch with a minimum version + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface + "safetensors", # For safe model loading + "pillow", # For image processing + "datasets", # For datasets (if needed) + ) +) + +# Create a Modal app +app = modal.App("ctb-image-generator", image=image) + +# Debug function to check installed packages +def check_dependencies(): + import importlib + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio>=4.44.1", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + ] + + for package in packages: + try: + module = importlib.import_module(package) + print(f"✅ {package} is installed. Version: {module.__version__}") + except ImportError: + print(f"❌ {package} is NOT installed.") + +@app.local_entrypoint() +def main(): + print("🚀 Starting Modal app...") + with modal.enable_output(): + print("🔍 Running debug check...") + check_dependencies() + print("🎨 Launching Gradio interface...") + demo.launch() + with modal.enable_output(): + demo.launch() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/old/old v3/download_flux_modal2.py b/old/old v3/download_flux_modal2.py new file mode 100644 index 0000000000000000000000000000000000000000..8870581362a73da2cafaff7ea8156be474a203bd --- /dev/null +++ b/old/old v3/download_flux_modal2.py @@ -0,0 +1,48 @@ +import modal +import os +from pathlib import Path + +# Create or get existing volume +volume = modal.Volume.from_name("flux-model-vol-2", create_if_missing=True) +MODEL_DIR = Path("/data/models") + +# Set up image with dependencies +download_image = ( + modal.Image.debian_slim() + .pip_install("huggingface_hub[hf_transfer]") + .env({"HF_HUB_ENABLE_HF_TRANSFER": "1"}) # Enable fast Rust download client +) + +# Create Modal app +app = modal.App("flux-model-setup") + +@app.function( + volumes={MODEL_DIR: volume}, + image=download_image, + secrets=[modal.Secret.from_name("huggingface-token")] # Correct secrets syntax +) +def download_flux(): + from huggingface_hub import snapshot_download + + # Get token from environment variable + token = os.getenv("HF_TOKEN") + if not token: + raise ValueError("HF_TOKEN not found in environment variables. Ensure the secret is correctly set.") + + repo_id = "black-forest-labs/FLUX.1-dev" + local_dir = MODEL_DIR / repo_id.split("/")[1] + + # Ensure the directory exists + local_dir.mkdir(parents=True, exist_ok=True) + + # Download the model + snapshot_download( + repo_id=repo_id, + local_dir=local_dir, + token=token + ) + print(f"FLUX model downloaded to {local_dir}") + +@app.local_entrypoint() +def main(): + download_flux.remote() \ No newline at end of file diff --git a/old/old v3/download_flux_modal3.py b/old/old v3/download_flux_modal3.py new file mode 100644 index 0000000000000000000000000000000000000000..9a09959f2eca0e874dc6ef3b0ba8d53d8ee9eafe --- /dev/null +++ b/old/old v3/download_flux_modal3.py @@ -0,0 +1,41 @@ +import modal +import os +from pathlib import Path + +# Create or get existing volume +volume = modal.Volume.from_name("flux-model-vol-3", create_if_missing=True) + +# Set model storage directory +MODEL_DIR = "/data/models" + +# Set up image with dependencies +download_image = ( + modal.Image.debian_slim() + .pip_install("huggingface_hub[hf_transfer]", "transformers") + .env({"HF_HUB_ENABLE_HF_TRANSFER": "1"}) # Enables optimized downloads +) + +# Create Modal app +app = modal.App("flux-model-setup") + +@app.function( + volumes={"/data/models": volume}, # Fix volume mounting syntax + image=download_image, + secrets=[modal.Secret.from_name("huggingface-token")] +) +def download_flux(): + from huggingface_hub import snapshot_download + import transformers # Ensure transformers is available + + repo_id = "black-forest-labs/FLUX.1-dev" + local_dir = f"{MODEL_DIR}/{repo_id.split('/')[-1]}" # Store model in /data/models/FLUX.1-dev + + # Download the model without large weight files for efficiency + snapshot_download( + repo_id, + local_dir=local_dir, + revision="main", # Define revision explicitly + ignore_patterns=["*.pt", "*.bin"], # Skip large model weights + ) + + # Ensure proper caching diff --git a/old/old v3/download_flux_modal4.py b/old/old v3/download_flux_modal4.py new file mode 100644 index 0000000000000000000000000000000000000000..2e9e703de48aba3cdbaa81842fe34c25338758c6 --- /dev/null +++ b/old/old v3/download_flux_modal4.py @@ -0,0 +1,42 @@ +import modal +import os +from pathlib import Path + +# Create or get existing volume +volume = modal.Volume.from_name("flux-model-vol-5", create_if_missing=True) + +# Set model storage directory +MODEL_DIR = "/data/models" + +# Set up image with dependencies +download_image = ( + modal.Image.debian_slim() + .pip_install("huggingface_hub[hf_transfer]", "transformers") + .env({"HF_HUB_ENABLE_HF_TRANSFER": "1"}) # Enables optimized downloads +) + +# Create Modal app +app = modal.App("flux-model-setup") + +@app.function( + volumes={"/data/models": volume}, # Fix volume mounting syntax + image=download_image, + secrets=[modal.Secret.from_name("huggingface-token")] +) +def download_flux(): + from huggingface_hub import snapshot_download + import transformers # Ensure transformers is available + + repo_id = "black-forest-labs/FLUX.1-dev" + #local_dir = f"{MODEL_DIR}/{repo_id.split('/')[-1]}" # Store model in /data/models/FLUX.1-dev + local_dir = f"{MODEL_DIR}/{repo_id}" # Store model in /data/models/FLUX + + # Download the model without large weight files for efficiency + snapshot_download( + repo_id, + local_dir=local_dir, + revision="main", # Define revision explicitly + ignore_patterns=["*.pt", "*.bin"], # Skip large model weights + ) + + # Ensure proper caching diff --git a/old/old v3/gradio_interface_modal copy.py b/old/old v3/gradio_interface_modal copy.py new file mode 100644 index 0000000000000000000000000000000000000000..32836d5061809a29bfcbe803bcf0e575d60ee5dd --- /dev/null +++ b/old/old v3/gradio_interface_modal copy.py @@ -0,0 +1,56 @@ +# gradio_interface.py +import gradio as gr +import modal +from config.config import prompts, models # Indirect import +#from src.img_gen_modal import generate + +print("Hello from gradio_interface_head!") + +# Modal remote function synchronously +def call_generate(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input): + try: + # Import the remote function + f = modal.Function.from_name("img-gen-modal", "generate") + image_path, message = f.remote(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" + + +def gradio_interface_modal(): + with modal.enable_output(): + from config.config import prompts, models # Indirect import + # Gradio Interface + with gr.Blocks() as demo: + gr.Markdown("# CtB AI Image Generator") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(): + generate_button = gr.Button("Generate Image") + output = gr.Textbox(label="Output") + with gr.Row(): + output_image = gr.Image(label="Generated Image") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + from src.img_gen_modal import generate + print("Building cudasdasrer...") + + #Connect the button to the call_generate function + #had do do it to handle gradio/modal interaction) + generate_button.click( + call_generate, + inputs=[prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], + outputs=[output_image, status_text] + ) + demo.launch() + + + + + diff --git a/old/old v3/gradio_interface_old.py b/old/old v3/gradio_interface_old.py new file mode 100644 index 0000000000000000000000000000000000000000..83fc04ac72583601c6e1d612d3ddc642afdd29f6 --- /dev/null +++ b/old/old v3/gradio_interface_old.py @@ -0,0 +1,39 @@ +# gradio_interface.py +import gradio as gr +from config.config import prompts, models # Direct import +from img_gen_modal import generate + +# Gradio Interface +with gr.Blocks() as demo: + gr.Markdown("# CtB AI Image Generator") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(): + generate_button = gr.Button("Generate Image") + with gr.Row(): + output_image = gr.Image(label="Generated Image") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + from img_gen_modal import generate + print("Building cudasdasrer...") + + + # Connect the button to the function + # generate_button.click( + # generate, + # inputs=[prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], + # outputs=[output_image, status_text] + # ) + + + generate_button.click( + generate, + inputs=[prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], + outputs=[output_image, status_text] +) \ No newline at end of file diff --git a/old/old v3/img_gen_modal copy 2.py b/old/old v3/img_gen_modal copy 2.py new file mode 100644 index 0000000000000000000000000000000000000000..705a5baef342fed3948bcf1d5e11a6f1969e165b --- /dev/null +++ b/old/old v3/img_gen_modal copy 2.py @@ -0,0 +1,126 @@ +#img_gen_modal.py +import modal +import random +from datetime import datetime +import random +import io +from config.config import prompts, models # Indirect import +import os +import torch +from huggingface_hub import login +from transformers import AutoTokenizer + +# Define the Modal image +image = ( + #modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME" + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + +#flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +@app.function( + secrets=[modal.Secret.from_name("huggingface-token")], + #volumes={"/data": flux_model_vol}, + gpu="t4", + timeout=600 + ) + +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, + num_inference_steps=20, guidance_scale=2.0, seed=-1): + import torch + from diffusers import StableDiffusionPipeline + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + # Format the prompt + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append custom prompt if provided + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # Initialize the pipeline + pipe = StableDiffusionPipeline.from_pretrained( + model_name, + #torch_dtype=torch.float16, + use_safetensors=True, + #variant="fp16" + ) + pipe.to("cpu") + + # Generate the image + try: + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + generator=torch.Generator("cpu").manual_seed(seed) + ).images[0] + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + try: + image.save(output_filename) + except Exception as e: + return None, f"ERROR: Failed to save image. Details: {e}" + + return output_filename, "Image generated successfully!" \ No newline at end of file diff --git a/old/old v3/img_gen_modal copy 3.py b/old/old v3/img_gen_modal copy 3.py new file mode 100644 index 0000000000000000000000000000000000000000..d5c0bc50d9c29b84b1d1e85c4732ee34626252f1 --- /dev/null +++ b/old/old v3/img_gen_modal copy 3.py @@ -0,0 +1,134 @@ +# img_gen_modal.py +import modal +import random +from datetime import datetime +import random +import io +from config.config import prompts, models # Indirect import +import os +import torch +from huggingface_hub import login +from transformers import AutoTokenizer + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git", + f"git+https://github.com/huggingface/diffusers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME" + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + import transformers + + +@app.function( + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="t4", + timeout=600 +) +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, + num_inference_steps=20, guidance_scale=2.0, seed=-1): + import torch + import diffusers # Corrected import + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + # Format the prompt + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append custom prompt if provided + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # Initialize the pipeline + try: + # Corrected import statement + # HF LOGIN + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + from diffusers import FluxPipeline # Replace with the correct pipeline if FluxPipeline is not available + # Use a standard pipeline for now + pipe = FluxPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.bfloat16) + pipe = pipe.to("cuda") + except Exception as e: + return None, f"ERROR: Failed to initialize pipeline. Details: {e}" + + # Generate the image + try: + image = pipe( + prompt, + height=height, + width=width, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + generator=torch.Generator("cuda").manual_seed(seed) + ).images[0] + image.save("generated_image.png") + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + try: + image.save(output_filename) + except Exception as e: + return None, f"ERROR: Failed to save image. Details: {e}" + + return output_filename, "Image generated successfully!" \ No newline at end of file diff --git a/old/old v3/img_gen_modal copy 4.py b/old/old v3/img_gen_modal copy 4.py new file mode 100644 index 0000000000000000000000000000000000000000..456ac1a8ad75499f12b3aadf6d864c5708f76755 --- /dev/null +++ b/old/old v3/img_gen_modal copy 4.py @@ -0,0 +1,131 @@ +# img_gen.py +import sys +import os +import random +from huggingface_hub import InferenceClient +from datetime import datetime +from config.config import models, prompts, api_token # Direct import +import modal + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME" + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + import transformers + from huggingface_hub import InferenceClient, login + + +@app.function( + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="t4", + timeout=600 +) +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + #from huggingface_hub import InferenceClient + + + + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # if team.lower() == "red": + # winning_team_text = " The winning army is dressed in red armor and banners." + # elif team.lower() == "blue": + # winning_team_text = " The winning army is dressed in blue armor and banners." + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + + # HF LOGIN + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + # Initialize the InferenceClient + try: + client = InferenceClient(model_name, token=api_token) + except Exception as e: + return None, f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + #Generate the image + try: + image = client.text_to_image( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + seed=seed + ) + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + try: + image.save(output_filename) + except Exception as e: + return None, f"ERROR: Failed to save image. Details: {e}" + + return output_filename, "Image generated successfully!" \ No newline at end of file diff --git a/old/old v3/img_gen_modal copy 5.py b/old/old v3/img_gen_modal copy 5.py new file mode 100644 index 0000000000000000000000000000000000000000..3fbd2552639b8d3fdd0fee58f63dbc4bb081420e --- /dev/null +++ b/old/old v3/img_gen_modal copy 5.py @@ -0,0 +1,121 @@ +# img_gen.py +import sys +import os +import random +from huggingface_hub import InferenceClient +from datetime import datetime +from config.config import models, prompts, api_token # Direct import +import modal + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME" + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + import transformers + from huggingface_hub import InferenceClient, login + + +@app.function( + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="t4", + timeout=600 +) +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # if team.lower() == "red": + # winning_team_text = " The winning army is dressed in red armor and banners." + # elif team.lower() == "blue": + # winning_team_text = " The winning army is dressed in blue armor and banners." + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # Initialize the InferenceClient + try: + print ("starting inference") + print("token:") + print (api_token) + client = InferenceClient(model_name, token=api_token) + except Exception as e: + return None, f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + #Generate the image + try: + image = client.text_to_image( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + seed=seed + ) + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + #return prompt # For testing purposes, return the formatted prompt + + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + try: + image.save(output_filename) + except Exception as e: + return None, f"ERROR: Failed to save image. Details: {e}" + + return output_filename, "Image generated successfully!" \ No newline at end of file diff --git a/old/old v3/img_gen_modal copy.py b/old/old v3/img_gen_modal copy.py new file mode 100644 index 0000000000000000000000000000000000000000..31c987c1a1aa77c3746e2badcbbba9dfdb777971 --- /dev/null +++ b/old/old v3/img_gen_modal copy.py @@ -0,0 +1,136 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +from datetime import datetime +import random +import io +from config.config import prompts, models # Indirect import +import os +import torch +from huggingface_hub import login +from transformers import AutoTokenizer + +# Define the Modal image +image = ( + #modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME" + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + +#flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +@app.function( + secrets=[modal.Secret.from_name("huggingface-token")], + #volumes={"/data": flux_model_vol}, + gpu="a100-80gb" + ) +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=36, width=64, num_inference_steps=2, guidance_scale=2.0, seed=-1): + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + try: + from diffusers import DiffusionPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + print("Initializing PIPE") + pipe = DiffusionPipeline.from_pretrained(model_name) + pipe = pipe.to("cuda") + except Exception as e: + print(f"Detailed error: {str(e)}") + return None, f"ERROR: Failed to initialize PIPE. Details: {e}" + try: + print("Sending img gen to pipe") + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + # seed=seed + ).images[0] + image.save("image.png") + + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + try: + # The pipeline typically returns images in a specific format + # Usually it's image.images[0] for the first generated image + image_output = image.images[0] # Get the actual PIL Image from the output + image_output.save(output_filename) # Save using PIL's save method + except Exception as e: + return None, f"ERROR: Failed to save image. Details: {e}" + print(f"Image output type: {type(image)}") + print(f"Image output attributes: {dir(image)}") + + return output_filename, "Image generated successfully!" \ No newline at end of file diff --git a/old/old v3/img_gen_modal_cpu.py b/old/old v3/img_gen_modal_cpu.py new file mode 100644 index 0000000000000000000000000000000000000000..488904bfbb51c4730f27d28007857ec81985da8b --- /dev/null +++ b/old/old v3/img_gen_modal_cpu.py @@ -0,0 +1,190 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + #modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + + + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal CPU app +app = modal.App("img-gen-modal-cpu", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + import torch + from huggingface_hub import login + from transformers import AutoTokenizer + import random + from datetime import datetime + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + + +# CPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + #gpu="a100-80gb", + cpu = 2, + memory = 160000, + timeout=6000 + ) +# MAIN GENERATE IMAGE FUNCTION +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + #with modal.enable_output(): + print("Hello from ctb_modal!") + print("Running debug check...") + # Debug function to check installed packages + def check_dependencies(): + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + "sentencepiece" + ] + + for package in packages: + try: + import importlib + module = importlib.import_module(package) + print(f" {package} is installed. Version:") + except ImportError: + print(f" {package} is NOT installed.") + + check_dependencies() + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + try: + from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + # Use absolute path with leading slash + local_path = f"/data/{model_name}" # Changed from "data/" to "/data/" + print(f"Loading model from local path: {local_path}") + + # Debug: Check if the directory exists and list its contents + if os.path.exists(local_path): + print("Directory exists. Contents:") + for item in os.listdir(local_path): + print(f" - {item}") + else: + print(f"Directory does not exist: {local_path}") + print("Contents of /data:") + print(os.listdir("/data")) + + # INITIALIZING PIPE + print("Initializing PIPE2") + pipe = FluxPipeline.from_pretrained( + local_path, + torch_dtype=torch.bfloat16, + local_files_only=True + ) + pipe.enable_model_cpu_offload() # Use official recommended method + #pipe = pipe.to("cpu") + + except Exception as e: + print(f"Detailed error: {str(e)}") + return None, f"ERROR: Failed to initialize PIPE. Details: {e}" + try: + print("Sending img gen to pipe") + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + # seed=seed + ).images[0] + print("render done") + print(image) + except Exception as e: + return f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + + try: + print("SAVING") + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"/data/images/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # Save the image using PIL's save method + image.save(output_filename) + #print(f"Image saved! File path: {output_filename}") + print("Image generated successfully!") + except Exception as e: + print(f"ERROR: Failed to save image. Details: {e}") + # Return the filename and success message + return image, "Image generated successfully!" diff --git a/old/old v3/img_gen_modal_gpu.py b/old/old v3/img_gen_modal_gpu.py new file mode 100644 index 0000000000000000000000000000000000000000..e6dbd01166c5c703949f6de8a21196eff8cb9f1c --- /dev/null +++ b/old/old v3/img_gen_modal_gpu.py @@ -0,0 +1,188 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + #modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal GPU app +app = modal.App("img-gen-modal-gpu", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + import torch + from huggingface_hub import login + from transformers import AutoTokenizer + import random + from datetime import datetime + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + + +# CPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="L40S" + #memory = 70000 + ) +# MAIN GENERATE IMAGE FUNCTION +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + #with modal.enable_output(): + print("Hello from ctb_modal!") + print("Running debug check...") + # Debug function to check installed packages + def check_dependencies(): + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + "sentencepiece" + ] + + for package in packages: + try: + import importlib + module = importlib.import_module(package) + print(f" {package} is installed. Version:") + except ImportError: + print(f" {package} is NOT installed.") + + check_dependencies() + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + try: + from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + # Use absolute path with leading slash + local_path = f"/data/{model_name}" # Changed from "data/" to "/data/" + print(f"Loading model from local path: {local_path}") + + # Debug: Check if the directory exists and list its contents + if os.path.exists(local_path): + print("Directory exists. Contents:") + for item in os.listdir(local_path): + print(f" - {item}") + else: + print(f"Directory does not exist: {local_path}") + print("Contents of /data:") + print(os.listdir("/data")) + + # INITIALIZING PIPE + print("Initializing PIPE2") + pipe = FluxPipeline.from_pretrained( + local_path, + torch_dtype=torch.bfloat16, + local_files_only=True + ) + #pipe.enable_model_cpu_offload() # Use official recommended method + #pipe = pipe.to("cuda") + pipe = pipe.to("cuda") + + except Exception as e: + print(f"Detailed error: {str(e)}") + return None, f"ERROR: Failed to initialize PIPE. Details: {e}" + try: + print("Sending img gen to pipe") + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + # seed=seed + ).images[0] + print("render done") + print(image) + except Exception as e: + return f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + + try: + print("SAVING") + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"/data/images/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # Save the image using PIL's save method + image.save(output_filename) + print(f"Image saved! File path: {output_filename}") + except Exception as e: + print(f"ERROR: Failed to save image. Details: {e}") + # Return the filename and success message + return image, "Image generated successfully!" diff --git a/old/old v3/install_requirements.py b/old/old v3/install_requirements.py new file mode 100644 index 0000000000000000000000000000000000000000..b2b64272f1f3dfba518ab15a71b6ea15a9c9afaf --- /dev/null +++ b/old/old v3/install_requirements.py @@ -0,0 +1,16 @@ +import modal + +# Create an image and install dependencies directly +image = modal.Image.from_pip([ + "gradio", + "torch", + "diffusers" +]) + +@modal.function(image=image) +def test_function(): + import gradio + print("Gradio is installed successfully!") + +if __name__ == "__main__": + test_function() diff --git a/old/old v4/app copy.py b/old/old v4/app copy.py new file mode 100644 index 0000000000000000000000000000000000000000..9871a31916b1593a924ae210b5b916941c40d6fc --- /dev/null +++ b/old/old v4/app copy.py @@ -0,0 +1,51 @@ +import modal +from src.gradio_interface import gradio_interface +from config.config import prompts, models # Indirect import + +# Define the Modal image +image = ( + #modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME" + } +) +) + + +# Create the Modal app +app = modal.App("ctb-image-generator-modal", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + import importlib +print("Modal app created.") + +# Entry point for local execution +@app.local_entrypoint() +def main(): + print("Launching Gradio interface...") + # demo.launch() + gradio_interface.launch() + + diff --git a/old/old v4/gradio_interface copy.py b/old/old v4/gradio_interface copy.py new file mode 100644 index 0000000000000000000000000000000000000000..6b5599b5f14526291e9ef89c55b7146a8728f575 --- /dev/null +++ b/old/old v4/gradio_interface copy.py @@ -0,0 +1,30 @@ +# gradio_interface.py (HuggingFace Spaces) +import gradio as gr +from config.config import prompts, models, api_token # Direct import +from src.img_gen import generate_image + + +# Gradio Interface +with gr.Blocks() as demo: + gr.Markdown("# CtB AI Image Generator - Inference version (HF)") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(): + generate_button = gr.Button("Generate Image") + with gr.Row(): + output_image = gr.Image(label="Generated Image") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + + # Connect the button to the function + generate_button.click( + generate_image, + inputs=[prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], + outputs=[output_image, status_text] + ) \ No newline at end of file diff --git a/old/old v4/img_gen copy.py b/old/old v4/img_gen copy.py new file mode 100644 index 0000000000000000000000000000000000000000..bd97b64ecba4ad5c9e92cd9ade94707a2d7f7346 --- /dev/null +++ b/old/old v4/img_gen copy.py @@ -0,0 +1,104 @@ +# img_gen.py +import sys +import os +import random +from huggingface_hub import InferenceClient, login +from datetime import datetime +from config.config import models, prompts, api_token # Direct import + +# def generate(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): +# try: +# # Generate the image +# image_path, message = generate_image(prompt_alias, team_color, model_alias, custom_prompt, height, width, num_inference_steps, guidance_scale, seed) +# return image_path, message +# except Exception as e: +# return None, f"An error occurred: {e}" + +def generate_image( + prompt_alias, + team_color, + model_alias, + custom_prompt, + height=360, + width=640, + num_inference_steps=20, + guidance_scale=2.0, + seed=-1): + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # if team.lower() == "red": + # winning_team_text = " The winning army is dressed in red armor and banners." + # elif team.lower() == "blue": + # winning_team_text = " The winning army is dressed in blue armor and banners." + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # HF LOGIN + print("Initializing HF TOKEN") + print (api_token) + # login(token=api_token) + # print("model_name:") + # print(model_name) + + + # Initialize the InferenceClient + try: + print("-----INITIALIZING INFERENCE-----") + client = InferenceClient(model_name, token=api_token) + print("Inference activated") + except Exception as e: + return None, f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + #Generate the image + try: + print("-----GENERATING IMAGE-----") + image = client.text_to_image( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + seed=seed + ) + print("-----IMAGE GENERATED SUCCESSFULLY!-----") + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + # Save the image with a timestamped filename + print("-----SAVING-----", image) + path = "images" + + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{path}/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + try: + image.save(output_filename) + except Exception as e: + return None, f"ERROR: Failed to save image. Details: {e}" + print("-----DONE!-----") + return output_filename, "Image generated successfully!" \ No newline at end of file diff --git a/old/old v4/img_gen_modal copy.py b/old/old v4/img_gen_modal copy.py new file mode 100644 index 0000000000000000000000000000000000000000..a98acd84f2b50bec5eb770cc7dafe3ac910d447c --- /dev/null +++ b/old/old v4/img_gen_modal copy.py @@ -0,0 +1,235 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os +import gradio as gr + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + #modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "xformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import diffusers + import os + import torch + import sentencepiece + import torch + from huggingface_hub import login + from transformers import AutoTokenizer + import random + from datetime import datetime + import xformers + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + + +# CPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="L40S", + #cpu = 1, + timeout = 300 + ) +# MAIN GENERATE IMAGE FUNCTION +def generate_image(prompt_alias, + team_color, + model_alias, + custom_prompt, + height=360, + width=640, + num_inference_steps=20, + guidance_scale=2.0, + seed=-1, + progress=gr.Progress(track_tqdm=True) # Add progress parameter + ): + with modal.enable_output(): + print("Hello from ctb_modal!") + # progress(0, desc="Starting...") # Initial progress + # yield "Initializing image generation..." # Yield the initial message + + print("Running debug check...") + # Debug function to check installed packages + def check_dependencies(): + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + "sentencepiece" + ] + + for package in packages: + try: + import importlib + module = importlib.import_module(package) + print(f" {package} is installed. Version:") + except ImportError: + print(f" {package} is NOT installed.") + + check_dependencies() + # progress(0.2, desc="Preprocessing input...") + # yield "Preprocessing inputs..." # Yield the preprocessing message + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + try: + from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + # Use absolute path with leading slash + local_path = f"/data/{model_name}" # Changed from "data/" to "/data/" + print(f"Loading model from local path: {local_path}") + + # Debug: Check if the directory exists and list its contents + if os.path.exists(local_path): + print("Directory exists. Contents:") + for item in os.listdir(local_path): + print(f" - {item}") + else: + print(f"Directory does not exist: {local_path}") + print("Contents of /data:") + print(os.listdir("/data")) + # CHECK FOR TORCH USING CUDA + print("CHECK FOR TORCH USING CUDA") + print(f"CUDA available: {torch.cuda.is_available()}") + if torch.cuda.is_available(): + print("inside if") + print(f"CUDA device count: {torch.cuda.device_count()}") + print(f"Current device: {torch.cuda.current_device()}") + print(f"Device name: {torch.cuda.get_device_name(torch.cuda.current_device())}") + + # progress(0.5, desc="Running the model...") + # yield "Running the model..." # Yield the model running message + + # INITIALIZING CPU PIPE + print("-----INITIALIZING PIPE-----") + pipe = FluxPipeline.from_pretrained( + local_path, + torch_dtype=torch.bfloat16, + #torch_dtype=torch.float16, + #torch_dtype=torch.float32, + local_files_only=True + ) + + if torch.cuda.is_available(): + print("CUDA available") + print("using gpu") + pipe = pipe.to("cuda") + pipe_message = "CUDA" + else: + print("CUDA not available") + print("using cpu") + #pipe = pipe.to("cpu") + pipe_message = "CPU" + #pipe.enable_model_cpu_offload() # Use official recommended method + print(f"-----{pipe_message} PIPE INITIALIZED-----") + print(f"Using device: {pipe.device}") + except Exception as e: + print(f"Detailed error: {str(e)}") + return None, f"ERROR: Failed to initialize PIPE2. Details: {e}" + try: + print("-----SENDING IMG GEN TO PIPE-----") + print("-----HOLD ON-----") + # progress(0.8, desc="Postprocessing the output...") + # yield "Postprocessing the output..." # Yield the postprocessing message + + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + max_sequence_length=512 + # seed=seed + ).images[0] + print("-----RENDER DONE!-----") + print(image) + except Exception as e: + return f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + try: + print("-----IMAGE GENERATED SUCCESSFULLY!-----") + print("-----CALL THE BANNERS!-----") + print("-----SAVING TO DISK-----") + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"/data/images/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # Save the image using PIL's save method + image.save(output_filename) + print(f"File path: {output_filename}") + except Exception as e: + print(f"ERROR: Failed to save image. Details: {e}") + # Return the filename and success message + return image, "Image generated successfully! Call the banners!" \ No newline at end of file diff --git a/old/old v4/img_gen_modal_cpu.py b/old/old v4/img_gen_modal_cpu.py new file mode 100644 index 0000000000000000000000000000000000000000..07fc18a1dbd8bbb45d0d8b0983ef0527a70644b0 --- /dev/null +++ b/old/old v4/img_gen_modal_cpu.py @@ -0,0 +1,236 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os +import gradio as gr + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + #modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "xformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal-cpu", image=image) +with image.imports(): + import diffusers + import os + import torch + import sentencepiece + import torch + from huggingface_hub import login + from transformers import AutoTokenizer + import random + from datetime import datetime + import xformers + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + + +# CPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + #gpu="L40S", + cpu = 1, + timeout = 300 + ) +# MAIN GENERATE IMAGE FUNCTION +def generate_image( + prompt_alias, + team_color, + model_alias, + custom_prompt, + height=360, + width=640, + num_inference_steps=20, + guidance_scale=2.0, + seed=-1, + progress=gr.Progress(track_tqdm=True) # Add progress parameter + ): + with modal.enable_output(): + print("Hello from ctb_modal!") + # progress(0, desc="Starting...") # Initial progress + # yield "Initializing image generation..." # Yield the initial message + + print("Running debug check...") + # Debug function to check installed packages + def check_dependencies(): + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + "sentencepiece" + ] + + for package in packages: + try: + import importlib + module = importlib.import_module(package) + print(f" {package} is installed. Version:") + except ImportError: + print(f" {package} is NOT installed.") + + check_dependencies() + # progress(0.2, desc="Preprocessing input...") + # yield "Preprocessing inputs..." # Yield the preprocessing message + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + try: + from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + # Use absolute path with leading slash + local_path = f"/data/{model_name}" # Changed from "data/" to "/data/" + print(f"Loading model from local path: {local_path}") + + # Debug: Check if the directory exists and list its contents + if os.path.exists(local_path): + print("Directory exists. Contents:") + for item in os.listdir(local_path): + print(f" - {item}") + else: + print(f"Directory does not exist: {local_path}") + print("Contents of /data:") + print(os.listdir("/data")) + # CHECK FOR TORCH USING CUDA + print("CHECK FOR TORCH USING CUDA") + print(f"CUDA available: {torch.cuda.is_available()}") + if torch.cuda.is_available(): + print("inside if") + print(f"CUDA device count: {torch.cuda.device_count()}") + print(f"Current device: {torch.cuda.current_device()}") + print(f"Device name: {torch.cuda.get_device_name(torch.cuda.current_device())}") + + # progress(0.5, desc="Running the model...") + # yield "Running the model..." # Yield the model running message + + # INITIALIZING CPU PIPE + print("-----INITIALIZING PIPE-----") + pipe = FluxPipeline.from_pretrained( + local_path, + torch_dtype=torch.bfloat16, + #torch_dtype=torch.float16, + #torch_dtype=torch.float32, + local_files_only=True + ) + + if torch.cuda.is_available(): + print("CUDA available") + print("using gpu") + pipe = pipe.to("cuda") + pipe_message = "CUDA" + else: + print("CUDA not available") + print("using cpu") + #pipe = pipe.to("cpu") + pipe_message = "CPU" + #pipe.enable_model_cpu_offload() # Use official recommended method + print(f"-----{pipe_message} PIPE INITIALIZED-----") + print(f"Using device: {pipe.device}") + except Exception as e: + print(f"Detailed error: {str(e)}") + return None, f"ERROR: Failed to initialize PIPE2. Details: {e}" + try: + print("-----SENDING IMG GEN TO PIPE-----") + print("-----HOLD ON-----") + # progress(0.8, desc="Postprocessing the output...") + # yield "Postprocessing the output..." # Yield the postprocessing message + + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + max_sequence_length=512 + # seed=seed + ).images[0] + print("-----RENDER DONE!-----") + print(image) + except Exception as e: + return f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + try: + print("-----IMAGE GENERATED SUCCESSFULLY!-----") + print("-----CALL THE BANNERS!-----") + print("-----SAVING TO DISK-----") + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"/data/images/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # Save the image using PIL's save method + image.save(output_filename) + print(f"File path: {output_filename}") + except Exception as e: + print(f"ERROR: Failed to save image. Details: {e}") + # Return the filename and success message + return image, "Image generated successfully! Call the banners!" diff --git a/old/old v4/img_gen_modal_gpu.py b/old/old v4/img_gen_modal_gpu.py new file mode 100644 index 0000000000000000000000000000000000000000..815137168d8afa86329e9aadf8316ce91b62e095 --- /dev/null +++ b/old/old v4/img_gen_modal_gpu.py @@ -0,0 +1,239 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os +import gradio as gr + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + #modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "xformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal-gpu", image=image) +with image.imports(): + import diffusers + import os + import torch + import sentencepiece + import torch + from huggingface_hub import login + from transformers import AutoTokenizer + import random + from datetime import datetime + import xformers + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + + +# CPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="L40S", + #cpu = 1, + timeout = 300 + ) + + + +# MAIN GENERATE IMAGE FUNCTION +def generate_image( + prompt_alias, + team_color, + model_alias, + custom_prompt, + height=360, + width=640, + num_inference_steps=20, + guidance_scale=2.0, + seed=-1, + progress=gr.Progress(track_tqdm=True) # Add progress parameter + ): + with modal.enable_output(): + print("Hello from ctb_modal!") + # progress(0, desc="Starting...") # Initial progress + # yield "Initializing image generation..." # Yield the initial message + + print("Running debug check...") + # Debug function to check installed packages + def check_dependencies(): + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + "sentencepiece" + ] + + for package in packages: + try: + import importlib + module = importlib.import_module(package) + print(f" {package} is installed. Version:") + except ImportError: + print(f" {package} is NOT installed.") + + check_dependencies() + # progress(0.2, desc="Preprocessing input...") + # yield "Preprocessing inputs..." # Yield the preprocessing message + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + try: + from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + # Use absolute path with leading slash + local_path = f"/data/{model_name}" # Changed from "data/" to "/data/" + print(f"Loading model from local path: {local_path}") + + # Debug: Check if the directory exists and list its contents + if os.path.exists(local_path): + print("Directory exists. Contents:") + for item in os.listdir(local_path): + print(f" - {item}") + else: + print(f"Directory does not exist: {local_path}") + print("Contents of /data:") + print(os.listdir("/data")) + # CHECK FOR TORCH USING CUDA + print("CHECK FOR TORCH USING CUDA") + print(f"CUDA available: {torch.cuda.is_available()}") + if torch.cuda.is_available(): + print("inside if") + print(f"CUDA device count: {torch.cuda.device_count()}") + print(f"Current device: {torch.cuda.current_device()}") + print(f"Device name: {torch.cuda.get_device_name(torch.cuda.current_device())}") + + # progress(0.5, desc="Running the model...") + # yield "Running the model..." # Yield the model running message + + # INITIALIZING CPU PIPE + print("-----INITIALIZING PIPE-----") + pipe = FluxPipeline.from_pretrained( + local_path, + torch_dtype=torch.bfloat16, + #torch_dtype=torch.float16, + #torch_dtype=torch.float32, + local_files_only=True + ) + + if torch.cuda.is_available(): + print("CUDA available") + print("using gpu") + pipe = pipe.to("cuda") + pipe_message = "CUDA" + else: + print("CUDA not available") + print("using cpu") + #pipe = pipe.to("cpu") + pipe_message = "CPU" + #pipe.enable_model_cpu_offload() # Use official recommended method + print(f"-----{pipe_message} PIPE INITIALIZED-----") + print(f"Using device: {pipe.device}") + except Exception as e: + print(f"Detailed error: {str(e)}") + return None, f"ERROR: Failed to initialize PIPE2. Details: {e}" + try: + print("-----SENDING IMG GEN TO PIPE-----") + print("-----HOLD ON-----") + # progress(0.8, desc="Postprocessing the output...") + # yield "Postprocessing the output..." # Yield the postprocessing message + + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + max_sequence_length=512 + # seed=seed + ).images[0] + print("-----RENDER DONE!-----") + print(image) + except Exception as e: + return f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + try: + print("-----IMAGE GENERATED SUCCESSFULLY!-----") + print("-----CALL THE BANNERS!-----") + print("-----SAVING TO DISK-----") + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"/data/images/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # Save the image using PIL's save method + image.save(output_filename) + print(f"File path: {output_filename}") + except Exception as e: + print(f"ERROR: Failed to save image. Details: {e}") + # Return the filename and success message + return image, "Image generated successfully! Call the banners!" diff --git a/old/old v5/.DS_Store b/old/old v5/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..fc9758b7f2c882fb37b1ecd24a8a1386cebb0513 Binary files /dev/null and b/old/old v5/.DS_Store differ diff --git a/old/old v5/app.py b/old/old v5/app.py new file mode 100644 index 0000000000000000000000000000000000000000..22e997a9c4659b105c80c6bcf28483e8ea95a09d --- /dev/null +++ b/old/old v5/app.py @@ -0,0 +1,8 @@ +# app.py +#IMPORT gradio_interface +from src.gradio_interface import demo +from config.config import models, prompts, api_token # Direct import + + +# Launch the Gradio app +demo.queue().launch() \ No newline at end of file diff --git a/old/old v5/config/__pycache__/__init__.cpython-310.pyc b/old/old v5/config/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bec37269799d49dd1b6a98af7d39e7156e635bdb Binary files /dev/null and b/old/old v5/config/__pycache__/__init__.cpython-310.pyc differ diff --git a/old/old v5/config/__pycache__/config.cpython-310.pyc b/old/old v5/config/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b19e14bd60b13e13b264cc44971b4506c246609b Binary files /dev/null and b/old/old v5/config/__pycache__/config.cpython-310.pyc differ diff --git a/old/old v5/config/__pycache__/config.cpython-311.pyc b/old/old v5/config/__pycache__/config.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b7a033e1d4b9d761145aa19bb90151199cec489 Binary files /dev/null and b/old/old v5/config/__pycache__/config.cpython-311.pyc differ diff --git a/old/old v5/config/__pycache__/config.cpython-39.pyc b/old/old v5/config/__pycache__/config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8179a4384c09ad30973546890fd6c7da29bae150 Binary files /dev/null and b/old/old v5/config/__pycache__/config.cpython-39.pyc differ diff --git a/old/old v5/config/__pycache__/models.cpython-310.pyc b/old/old v5/config/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f89492add4b9f9249807fa397c76cc523eb0139 Binary files /dev/null and b/old/old v5/config/__pycache__/models.cpython-310.pyc differ diff --git a/old/old v5/config/__pycache__/models.cpython-311.pyc b/old/old v5/config/__pycache__/models.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab88c151c5824d19b9d16fd2c9535b5524d947a5 Binary files /dev/null and b/old/old v5/config/__pycache__/models.cpython-311.pyc differ diff --git a/old/old v5/config/__pycache__/models.cpython-39.pyc b/old/old v5/config/__pycache__/models.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d4a94d4d8fb8bfa44caece0b4120677d9b9aad6 Binary files /dev/null and b/old/old v5/config/__pycache__/models.cpython-39.pyc differ diff --git a/old/old v5/config/__pycache__/prompts.cpython-310.pyc b/old/old v5/config/__pycache__/prompts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b7e4da5188280dfa9fb854285d72e9dbab0666b Binary files /dev/null and b/old/old v5/config/__pycache__/prompts.cpython-310.pyc differ diff --git a/old/old v5/config/__pycache__/prompts.cpython-311.pyc b/old/old v5/config/__pycache__/prompts.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..645df3c16a4578477610f17555e141aed1888e71 Binary files /dev/null and b/old/old v5/config/__pycache__/prompts.cpython-311.pyc differ diff --git a/old/old v5/config/__pycache__/prompts.cpython-39.pyc b/old/old v5/config/__pycache__/prompts.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3d63e5347171c21bf639090499a97c2b8b8b570 Binary files /dev/null and b/old/old v5/config/__pycache__/prompts.cpython-39.pyc differ diff --git a/old/old v5/config/config.py b/old/old v5/config/config.py new file mode 100644 index 0000000000000000000000000000000000000000..42d12b4fc137871a73f3d9f51caf0bcb894cae72 --- /dev/null +++ b/old/old v5/config/config.py @@ -0,0 +1,13 @@ +# config.py +import os +from config.prompts import prompts # Direct Import prompts from prompts.py +from config.models import models, models_modal # Direct Import models + +# Retrieve the Hugging Face token +api_token = os.getenv("HF_TOKEN") + +# Debugging: Print prompt and model options +print("Prompt Options:", [p["alias"] for p in prompts]) +print("Model Options:", [m["alias"] for m in models]) + + diff --git a/old/old v5/config/config_colab.py b/old/old v5/config/config_colab.py new file mode 100644 index 0000000000000000000000000000000000000000..6ab0b995f11e359f6ec9a4a7a6b36a2a6aeb1cf7 --- /dev/null +++ b/old/old v5/config/config_colab.py @@ -0,0 +1,21 @@ +# config_colab.py +from google.colab import userdata +from config.prompts import prompts # Import prompts from prompts.py +from config.models import models + +# Retrieve the Hugging Face token from Colab secrets +api_token = userdata.get("HF_TOKEN") + +# Debugging: Check if the Hugging Face token is available +if not api_token: + print("=== Debug: Error ===") + print("ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it in Colab secrets.") +else: + print("=== Debug: Success ===") + print("Hugging Face token loaded successfully.") + +# Debugging: Print prompt and model options +print("=== Debug: Available Options ===") +print("Prompt Options:", [p["alias"] for p in prompts]) +print("Model Options:", [m["alias"] for m in models]) +print("=================================") \ No newline at end of file diff --git a/old/old v5/config/models old.py b/old/old v5/config/models old.py new file mode 100644 index 0000000000000000000000000000000000000000..5bde7962bdce5b89342d47bc554932b49564fb9e --- /dev/null +++ b/old/old v5/config/models old.py @@ -0,0 +1,12 @@ +# List of models with aliases +models = [ + #{"alias": "FLUX.1-devold", "name": "black-forest-labs/FLUX.1-dev"}, + #{"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"}, + {"alias": "FLUX.1-dev", "name": "FLUX.1-dev"}, + {"alias": "FLUX.1-schnell", "name": "black-forest-labs/FLUX.1-schnell"}, + #{"alias": "Andre", "name": "Andre"} + + +] +# Debugging: Print prompt and model options +#print("Model Options:", [m["alias"] for m in models]) \ No newline at end of file diff --git a/old/old v5/config/models.py b/old/old v5/config/models.py new file mode 100644 index 0000000000000000000000000000000000000000..0a7a4384d8854c27eb4b783318100c74ead59b71 --- /dev/null +++ b/old/old v5/config/models.py @@ -0,0 +1,23 @@ +# List of models with aliases +models = [ + {"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"}, + {"alias": "FLUX.1-dev_modal_local", "name": "FLUX.1-dev"}, + {"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"}, + {"alias": "FLUX.1-schnell", "name": "black-forest-labs/FLUX.1-schnell"}, + {"alias": "FLUX.1-schnell_modal_local", "name": "FLUX.1-schnell"}, +] + + +models_modal = [ + {"alias": "FLUX.1-dev_modal_local", "name": "FLUX.1-dev"}, + {"alias": "FLUX.1-schnell_modal_local", "name": "FLUX.1-schnell"}, + #{"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"}, + #{"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"}, + #{"alias": "FLUX.1-schnell", "name": "black-forest-labs/FLUX.1-schnell"}, + + +] + + +# Debugging: Print prompt and model options +#print("Model Options:", [m["alias"] for m in models]) diff --git a/old/old v5/config/prompts.py b/old/old v5/config/prompts.py new file mode 100644 index 0000000000000000000000000000000000000000..cc1e8ee26276a65a706ec945c950477f0d3dc299 --- /dev/null +++ b/old/old v5/config/prompts.py @@ -0,0 +1,46 @@ + +# List of prompts with intense combat +# + +prompts = [ + { + "alias": "Castle Siege", + "text": "A medieval castle under siege, with archers firing arrows from the walls, knights charging on horses, and catapults launching fireballs. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is fiercely attacking the castle, with soldiers scaling ladders and clashing swords with the defenders. Arrows fly through the air, explosions light up the battlefield, and injured knights lie on the ground. Fire engulfs parts of the castle, and the air is thick with smoke and chaos. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Forest Battle", + "text": "A fierce battle between two armies in a dense forest, with knights wielding swords and axes, horses rearing, and the ground covered in mud and blood. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is locked in brutal combat, with soldiers fighting hand-to-hand amidst the trees. Arrows whiz past, and the sounds of clashing steel echo through the forest. Injured soldiers scream in pain, and the forest is littered with broken weapons and shields. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Boiling Oil Defense", + "text": "A dramatic moment in a medieval siege, with a knight leading a charge against a castle gate, while defenders pour boiling oil from the walls. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is relentlessly attacking, with soldiers screaming as they are hit by the oil. Knights clash swords at the gate, and arrows rain down from above. The ground is littered with the bodies of fallen soldiers, and the air is filled with the smell of burning flesh. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Burning Castle Battle", + "text": "A chaotic battlefield with knights on horseback clashing with infantry, archers firing volleys of arrows, and a castle burning in the background. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is fighting fiercely, with soldiers engaging in brutal melee combat. Flames light up the scene as knights charge through the chaos. Injured soldiers crawl on the ground, and the air is filled with the sounds of clashing steel and screams of pain. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Heroic Last Stand", + "text": "A heroic last stand of a small group of knights defending a bridge against a massive army, with arrows flying and swords clashing. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is overwhelming the defenders, but the knights fight bravely, cutting down enemy soldiers as they advance. The bridge is littered with bodies and broken weapons. Blood stains the ground, and the air is thick with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Siege Tower Attack", + "text": "A medieval siege tower approaching a castle wall, with knights scaling ladders and defenders throwing rocks and shooting arrows. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is fighting desperately to breach the walls, with soldiers clashing swords on the battlements. Arrows fly in all directions, and the siege tower is engulfed in flames. Injured soldiers fall from the ladders, and the ground is littered with the bodies of the fallen. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Knight Duel", + "text": "A dramatic duel between two knights in the middle of a battlefield, with their armies watching and the castle in the background. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is engaged in fierce combat all around, with soldiers clashing swords and firing arrows. The duelists fight with skill and determination, their blades flashing in the sunlight. Injured soldiers lie on the ground, and the air is filled with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style. " + }, + { + "alias": "Night Battle", + "text": "A night battle during a medieval siege, with torches lighting the scene, knights fighting in the shadows, and the castle walls looming in the background. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is locked in brutal combat, with soldiers clashing swords and firing arrows in the dim light. Flames from burning siege equipment illuminate the chaos. Injured soldiers scream in pain, and the ground is littered with the bodies of the fallen. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Marching Army", + "text": "A massive army of knights and infantry marching towards a distant castle, with banners flying and the sun setting behind them. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is engaging in skirmishes along the way, with soldiers clashing swords and firing arrows. The battlefield is alive with the sounds of combat and the clash of steel. Injured soldiers lie on the ground, and the air is thick with the smell of blood and smoke. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Snowy Battlefield", + "text": "A medieval battle in a snowy landscape, with knights in heavy armor fighting on a frozen lake, and the castle visible in the distance. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is locked in fierce combat, with soldiers slipping on the ice as they clash swords. Arrows fly through the air, and the snow is stained red with blood. Injured soldiers crawl on the ground, and the air is filled with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style." + } +] diff --git a/old/old v5/config/prompts_old.py b/old/old v5/config/prompts_old.py new file mode 100644 index 0000000000000000000000000000000000000000..5d6335ab80b292b7e41def3cf9aca66e1f2af009 --- /dev/null +++ b/old/old v5/config/prompts_old.py @@ -0,0 +1,46 @@ + +# List of prompts with intense combat +# THIS MESSAGE WAS APPENDED IN THE FINAL +# winning_team_text = " The winning army is dressed in red armor and banners." +prompts = [ + { + "alias": "Castle Siege", + "text": "A medieval castle under siege, with archers firing arrows from the walls, knights charging on horses, and catapults launching fireballs. The enemy army, dressed in {enemy_color} armor, is fiercely attacking the castle, with soldiers scaling ladders and clashing swords with the defenders. Arrows fly through the air, explosions light up the battlefield, and injured knights lie on the ground. Fire engulfs parts of the castle, and the air is thick with smoke and chaos. Unreal Engine render style, photorealistic, realistic fantasy style.{winning_team_text}" + }, + { + "alias": "Forest Battle", + "text": "A fierce battle between two armies in a dense forest, with knights wielding swords and axes, horses rearing, and the ground covered in mud and blood. . The enemy army, dressed in {enemy_color} armor, is locked in brutal combat, with soldiers fighting hand-to-hand amidst the trees. Arrows whiz past, and the sounds of clashing steel echo through the forest. Injured soldiers scream in pain, and the forest is littered with broken weapons and shields. Unreal Engine render style, photorealistic, realistic fantasy style.{winning_team_text}" + }, + { + "alias": "Boiling Oil Defense", + "text": "A dramatic moment in a medieval siege, with a knight leading a charge against a castle gate, while defenders pour boiling oil from the walls. The enemy army, dressed in {enemy_color} armor, is relentlessly attacking, with soldiers screaming as they are hit by the oil. Knights clash swords at the gate, and arrows rain down from above. The ground is littered with the bodies of fallen soldiers, and the air is filled with the smell of burning flesh. Unreal Engine render style, photorealistic, realistic fantasy style.{winning_team_text}" + }, + { + "alias": "Burning Castle Battle", + "text": "A chaotic battlefield with knights on horseback clashing with infantry, archers firing volleys of arrows, and a castle burning in the background. The enemy army, dressed in {enemy_color} armor, is fighting fiercely, with soldiers engaging in brutal melee combat. Flames light up the scene as knights charge through the chaos. Injured soldiers crawl on the ground, and the air is filled with the sounds of clashing steel and screams of pain. Unreal Engine render style, photorealistic, realistic fantasy style.{winning_team_text}" + }, + { + "alias": "Heroic Last Stand", + "text": "A heroic last stand of a small group of knights defending a bridge against a massive army, with arrows flying and swords clashing. The enemy army, dressed in {enemy_color} armor, is overwhelming the defenders, but the knights fight bravely, cutting down enemy soldiers as they advance. The bridge is littered with bodies and broken weapons. Blood stains the ground, and the air is thick with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style." + }, + { + "alias": "Siege Tower Attack", + "text": "A medieval siege tower approaching a castle wall, with knights scaling ladders and defenders throwing rocks and shooting arrows. The enemy army, dressed in {enemy_color} armor, is fighting desperately to breach the walls, with soldiers clashing swords on the battlements. Arrows fly in all directions, and the siege tower is engulfed in flames. Injured soldiers fall from the ladders, and the ground is littered with the bodies of the fallen. Unreal Engine render style, photorealistic, realistic fantasy style.{winning_team_text}" + }, + { + "alias": "Knight Duel", + "text": "A dramatic duel between two knights in the middle of a battlefield, with their armies watching and the castle in the background. The enemy army, dressed in {enemy_color} armor, is engaged in fierce combat all around, with soldiers clashing swords and firing arrows. The duelists fight with skill and determination, their blades flashing in the sunlight. Injured soldiers lie on the ground, and the air is filled with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style.{winning_team_text}" + }, + { + "alias": "Night Battle", + "text": "A night battle during a medieval siege, with torches lighting the scene, knights fighting in the shadows, and the castle walls looming in the background. The enemy army, dressed in {enemy_color} armor, is locked in brutal combat, with soldiers clashing swords and firing arrows in the dim light. Flames from burning siege equipment illuminate the chaos. Injured soldiers scream in pain, and the ground is littered with the bodies of the fallen. Unreal Engine render style, photorealistic, realistic fantasy style.{winning_team_text}" + }, + { + "alias": "Marching Army", + "text": "A massive army of knights and infantry marching towards a distant castle, with banners flying and the sun setting behind them. The enemy army, dressed in {enemy_color} armor, is engaging in skirmishes along the way, with soldiers clashing swords and firing arrows. The battlefield is alive with the sounds of combat and the clash of steel. Injured soldiers lie on the ground, and the air is thick with the smell of blood and smoke. Unreal Engine render style, photorealistic, realistic fantasy style.{winning_team_text}" + }, + { + "alias": "Snowy Battlefield", + "text": "A medieval battle in a snowy landscape, with knights in heavy armor fighting on a frozen lake, and the castle visible in the distance. The enemy army, dressed in {enemy_color} armor, is locked in fierce combat, with soldiers slipping on the ice as they clash swords. Arrows fly through the air, and the snow is stained red with blood. Injured soldiers crawl on the ground, and the air is filled with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style.{winning_team_text}" + } +] diff --git a/old/old v5/src/gradio_interface.py b/old/old v5/src/gradio_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..fead73e76e1f5561338f3aea8d461fe85f57e82f --- /dev/null +++ b/old/old v5/src/gradio_interface.py @@ -0,0 +1,43 @@ +# gradio_interface.py (HuggingFace Spaces) +import gradio as gr +from config.config import prompts, models, api_token # Direct import +from src.img_gen import generate_image + + +# Gradio Interface +def gradio_interface(): + with gr.Blocks() as demo: + gr.Markdown("# CtB AI Image Generator - Inference version (HF)") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + #model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(): + generate_button = gr.Button("Generate Image") + with gr.Row(): + output_image = gr.Image(label="Generated Image", show_label=False, scale=0, width=50) + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + + # Connect the button to the function + generate_button.click( + generate_image, + inputs=[prompt_dropdown, + team_dropdown, + custom_prompt_input, + #model_dropdown, + ], + outputs=[output_image, status_text] + ) + return demo + +# Create the demo instance +demo = gradio_interface() + +# Only launch if running directly +if __name__ == "__main__": + demo.queue().launch() \ No newline at end of file diff --git a/old/old v5/src/img_gen.py b/old/old v5/src/img_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..a77d941053e57026530c1b31acd8c0174cee6b54 --- /dev/null +++ b/old/old v5/src/img_gen.py @@ -0,0 +1,98 @@ +# img_gen.py +import sys +import os +import random +from huggingface_hub import InferenceClient, login +from datetime import datetime +from config.config import models, prompts, api_token # Direct import + +def generate_image( + prompt_alias, + team_color, + custom_prompt, + model_alias="FLUX.1-dev", + height=360, + width=640, + num_inference_steps=20, + guidance_scale=2.0, + seed=-1): + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + #model_name = next(m for m in models if m["alias"] == model_alias)["name"] + model_name = f"black-forest-labs/{model_alias}" + + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # if team.lower() == "red": + # winning_team_text = " The winning army is dressed in red armor and banners." + # elif team.lower() == "blue": + # winning_team_text = " The winning army is dressed in blue armor and banners." + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # HF LOGIN + print("Initializing HF TOKEN") + print (api_token) + # login(token=api_token) + # print("model_name:") + # print(model_name) + + + # Initialize the InferenceClient + try: + print("-----INITIALIZING INFERENCE-----") + client = InferenceClient(model_name, token=api_token) + print("Inference activated") + except Exception as e: + return None, f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + #Generate the image + try: + print("-----GENERATING IMAGE-----") + image = client.text_to_image( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + seed=seed + ) + print("-----IMAGE GENERATED SUCCESSFULLY!-----") + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + # Save the image with a timestamped filename + print("-----SAVING-----", image) + path = "images" + + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{path}/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + try: + image.save(output_filename) + except Exception as e: + return None, f"ERROR: Failed to save image. Details: {e}" + print("-----DONE!-----") + return output_filename, "Image generated successfully!" \ No newline at end of file diff --git a/old/old v6/app copy.py b/old/old v6/app copy.py new file mode 100644 index 0000000000000000000000000000000000000000..22e997a9c4659b105c80c6bcf28483e8ea95a09d --- /dev/null +++ b/old/old v6/app copy.py @@ -0,0 +1,8 @@ +# app.py +#IMPORT gradio_interface +from src.gradio_interface import demo +from config.config import models, prompts, api_token # Direct import + + +# Launch the Gradio app +demo.queue().launch() \ No newline at end of file diff --git a/old/old v6/app_local copy.py b/old/old v6/app_local copy.py new file mode 100644 index 0000000000000000000000000000000000000000..bf6e0999802154d7ca9b9c393ef5742c2e9cc3df --- /dev/null +++ b/old/old v6/app_local copy.py @@ -0,0 +1,8 @@ +# app.py +#IMPORT gradio_interface +from src.gradio_interface_local import demo +from config.config import models, prompts, api_token # Direct import + + +# Launch the Gradio app +demo.queue().launch() \ No newline at end of file diff --git a/old/old v6/app_modal copy.py b/old/old v6/app_modal copy.py new file mode 100644 index 0000000000000000000000000000000000000000..024c0c3ae8998df2832007567bafda260a1bd397 --- /dev/null +++ b/old/old v6/app_modal copy.py @@ -0,0 +1,8 @@ +# app.py +#IMPORT gradio_interface +from src.gradio_interface_modal import demo +from config.config import models, models_modal, prompts, api_token # Direct import + + +# Launch the Gradio app +demo.queue().launch() \ No newline at end of file diff --git a/old/old v6/gradio_interface.py b/old/old v6/gradio_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..141960631b8d628e61ddfb53ff5c524b35148444 --- /dev/null +++ b/old/old v6/gradio_interface.py @@ -0,0 +1,48 @@ +# gradio_interface.py (HuggingFace Spaces) +import gradio as gr +from config.config import prompts, models, api_token # Direct import +from src.img_gen import generate_image + + +# Gradio Interface +def gradio_interface(): + with gr.Blocks(css=""" + .output-image img { + width: 2500px; /* Force image to fill container width */ + object-fit: cover; /* ACTIVATE FOR IMAGE-FIT CONTAINER */ + } + """) as demo: + gr.Markdown("# CtB AI Image Generator - Inference version (HF)") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + #model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(): + generate_button = gr.Button("Generate Image") + with gr.Row(): + output_image = gr.Image(elem_classes="output-image", label="Generated Image", show_label=False, scale=1, width="100%") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + + # Connect the button to the function + generate_button.click( + generate_image, + inputs=[prompt_dropdown, + team_dropdown, + custom_prompt_input, + #model_dropdown, + ], + outputs=[output_image, status_text] + ) + return demo + +# Create the demo instance +demo = gradio_interface() + +# Only launch if running directly +if __name__ == "__main__": + demo.queue().launch() \ No newline at end of file diff --git a/old/old v6/gradio_interface_local.py b/old/old v6/gradio_interface_local.py new file mode 100644 index 0000000000000000000000000000000000000000..9e3c3d743225ef8b7afe81029b0ac03ba4e792af --- /dev/null +++ b/old/old v6/gradio_interface_local.py @@ -0,0 +1,48 @@ +# gradio_interface.py (HuggingFace Spaces) +import gradio as gr +from config.config import prompts, models, api_token # Direct import +from src.img_gen_local import generate_image + + +# Gradio Interface +def gradio_interface(): + with gr.Blocks(css=""" + .output-image img { + width: 2500px; /* Force image to fill container width */ + object-fit: cover; /* ACTIVATE FOR IMAGE-FIT CONTAINER */ + } + """) as demo: + gr.Markdown("# CtB AI Image Generator - local version") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + #model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(): + generate_button = gr.Button("Generate Image") + with gr.Row(): + output_image = gr.Image(elem_classes="output-image", label="Generated Image", show_label=False, scale=1, width="100%") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + + # Connect the button to the function + generate_button.click( + generate_image, + inputs=[prompt_dropdown, + team_dropdown, + custom_prompt_input, + #model_dropdown, + ], + outputs=[output_image, status_text] + ) + return demo + +# Create the demo instance +demo = gradio_interface() + +# Only launch if running directly +if __name__ == "__main__": + demo.queue().launch() \ No newline at end of file diff --git a/old/old v6/gradio_interface_modal.py b/old/old v6/gradio_interface_modal.py new file mode 100644 index 0000000000000000000000000000000000000000..c105def7257b78f9e0cc3464c2fcd21d75177f01 --- /dev/null +++ b/old/old v6/gradio_interface_modal.py @@ -0,0 +1,87 @@ +# gradio_interface.py +import gradio as gr +import modal +from config.config import prompts, models, models_modal # Indirect import +#from img_gen import generate_image + +print("Hello from gradio_interface_head!") + +# Modal remote function synchronously +def generate(cpu_gpu, prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input): + # Debug: + debug_message = f"Debug: Button clicked! Inputs - Prompt: {prompt_dropdown}, Team: {team_dropdown}, Model: {model_dropdown}, Custom Prompt: {custom_prompt_input}" + print(debug_message) # Print to console for debugging + try: + # Check for CPU/GPU dropdown option + if cpu_gpu == "GPU": + f = modal.Function.from_name("img-gen-modal", "generate_image_gpu") + else: + f = modal.Function.from_name("img-gen-modal", "generate_image_cpu") + + # Import the remote function + image_path, message = f.remote( + prompt_dropdown, + team_dropdown, + model_dropdown, + custom_prompt_input, + ) + return image_path, message + except Exception as e: + return None, f"Error calling generate_image function: {e}" + +def gradio_interface_modal(): + with modal.enable_output(): + #from config.config import prompts, models # Indirect import + # Gradio Interface + with gr.Blocks( + css=""" + .row-class { + display: flex; + align-items: stretch; /* Ensures all children have the same height */ + } + .row-class > * { + flex: 1; + } + .output-image img { + width: 2500px; /* Force image to fill container width */ + object-fit: cover; /* ACTIVATE FOR IMAGE-FIT CONTAINER */ + + } + """ + ) as demo: + gr.Markdown("# CtB AI Image Generator - Local version (Modal volume)") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models_modal], label="Select Model", value=models_modal[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(elem_classes="row-class"): + cpu_gpu = gr.Dropdown(choices=["CPU", "GPU"], label="Select CPU/GPU", value="GPU") + generate_button = gr.Button("Generate Image") + with gr.Row(): + output_image = gr.Image(elem_classes="output-image", label="Generated Image", show_label=False, scale=1) + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + print("Building cudasdasrer...") + + ##Connect the button to the call_generate function + ##had do do it to handle gradio/modal interaction) + generate_button.click( + generate, + inputs=[ + cpu_gpu, + prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], + outputs=[output_image, status_text], + ) + return demo + +# Create the demo instance +demo = gradio_interface_modal() + +# Only launch if running directly +if __name__ == "__main__": + with modal.enable_output(): + demo.queue().launch() diff --git a/old/old v6/img_gen_modal copy.py b/old/old v6/img_gen_modal copy.py new file mode 100644 index 0000000000000000000000000000000000000000..594b6255a47b5d901c9b85e34a472d2e4f9d3242 --- /dev/null +++ b/old/old v6/img_gen_modal copy.py @@ -0,0 +1,288 @@ +#img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models_modal # Indirect import +import os +import gradio as gr + +#MOVED FROM IMAGE IMPORT LIST +import torch +import sentencepiece +import torch +from huggingface_hub import login +from transformers import AutoTokenizer +import random +from datetime import datetime +from diffusers.callbacks import SDXLCFGCutoffCallback +from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline, AutoencoderTiny, AutoencoderKL, DiffusionPipeline, FluxTransformer2DModel, GGUFQuantizationConfig +from PIL import Image +from src.check_dependecies import check_dependencies +#import xformers + + +from live_preview_helpers import flux_pipe_call_that_returns_an_iterable_of_images + + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + .pip_install_from_requirements("requirements.txt") + #modal.Image.debian_slim(python_version="3.9") # Base image + # .apt_install( + # "git", + # ) + # .pip_install( + # "diffusers", + # f"git+https://github.com/huggingface/transformers.git" + # ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import os + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +# GPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="L40S", + timeout = 300 + ) +def generate_image_gpu(prompt_alias, team_color, model_alias, custom_prompt): + image = generate_image(prompt_alias, team_color, model_alias, custom_prompt) + return image, "Image generated successfully! Call the banners!" + + +# CPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + cpu = 1, + timeout = 30000 + ) +def generate_image_cpu(prompt_alias, team_color, model_alias, custom_prompt): + image = generate_image(prompt_alias, team_color, model_alias, custom_prompt) + return image, "Image generated successfully! Call the banners!" + +# MAIN GENERATE IMAGE FUNCTION +def generate_image( + prompt_alias, + team_color, + model_alias, + custom_prompt, + height=360, + width=640, + num_inference_steps=20, + guidance_scale=2.0, + seed=-1, + progress=gr.Progress(track_tqdm=True) # Add progress parameter + ): + with modal.enable_output(): + print("Hello from ctb_modal!") + + check_dependencies() + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models_modal if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + try: + from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + # Use absolute path with leading slash + local_path = f"/data/{model_name}" # Changed from "data/" to "/data/" + print(f"Loading model from local path: {local_path}") + + # Debug: Check if the directory exists and list its contents + if os.path.exists(local_path): + print("Directory exists. Contents:") + for item in os.listdir(local_path): + print(f" - {item}") + else: + print(f"Directory does not exist: {local_path}") + print("Contents of /data:") + print(os.listdir("/data")) + # CHECK FOR TORCH USING CUDA + print("CHECK FOR TORCH USING CUDA") + print(f"CUDA available: {torch.cuda.is_available()}") + if torch.cuda.is_available(): + print("inside if") + print(f"CUDA device count: {torch.cuda.device_count()}") + print(f"Current device: {torch.cuda.current_device()}") + print(f"Device name: {torch.cuda.get_device_name(torch.cuda.current_device())}") + + + ########## INITIALIZING CPU PIPE ########## + + # ########## LIVE PREVIEW FROM REPO DEMO PART 1 ########## + # dtype = torch.bfloat16 + # taef1 = AutoencoderTiny.from_pretrained("/data/taef1", torch_dtype=dtype).to(device) + # good_vae = AutoencoderKL.from_pretrained(local_path, subfolder="vae", torch_dtype=dtype).to(device) + + # pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe) + # ##################################################### + + print("-----LOADING QUANTA-----") + ckpt_path = ( + "/data/FLUX.1-dev-gguf/flux1-dev-Q8_0.gguf" + ) + transformer = FluxTransformer2DModel.from_single_file( + ckpt_path, + quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16), + torch_dtype=torch.bfloat16, + )pip + print("-----INITIALIZING PIPE-----") + pipe = FluxPipeline.from_pretrained( + local_path, + torch_dtype=torch.bfloat16, + transformer=transformer, + #torch_dtype=torch.float16, + #torch_dtype=torch.float32, + #vae=taef1, + local_files_only=True, + ) + #torch.cuda.empty_cache() + + if torch.cuda.is_available(): + print("CUDA available") + print("using gpu") + pipe = pipe.to("cuda") + pipe_message = "CUDA" + pipe.enable_model_cpu_offload() # Use official recommended method + + else: + print("CUDA not available") + print("using cpu") + pipe = pipe.to("cpu") + pipe_message = "CPU" + + + print(f"-----{pipe_message} PIPE INITIALIZED-----") + print(f"Using device: {pipe.device}") + except Exception as e: + print(f"Detailed error: {str(e)}") + return None, f"ERROR: Failed to initialize PIPE2. Details: {e}" + try: + print("-----SENDING IMG GEN TO PIPE-----") + print("-----HOLD ON-----") + + # ################ LIVE PREVIEW FROM DEMO REPO PART2 #################### + # seed = random.randint(0, MAX_SEED) + # generator = torch.Generator().manual_seed(seed) + + # for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images( + # prompt=prompt, + # guidance_scale=guidance_scale, + # num_inference_steps=num_inference_steps, + # width=width, + # height=height, + # generator=generator, + # output_type="pil", + # good_vae=good_vae, + # ): + # yield img, seed + # ############################################################ + + + # ########## LATENTS ########## + # # live preview function to get the latents + # # official reference guideline + # def latents_to_rgb(latents): + # weights = ( + # (60, -60, 25, -70), + # (60, -5, 15, -50), + # (60, 10, -5, -35), + # ) + + # weights_tensor = torch.t(torch.tensor(weights, dtype=latents.dtype).to(latents.device)) + # biases_tensor = torch.tensor((150, 140, 130), dtype=latents.dtype).to(latents.device) + # rgb_tensor = torch.einsum("...lxy,lr -> ...rxy", latents, weights_tensor) + biases_tensor.unsqueeze(-1).unsqueeze(-1) + # image_array = rgb_tensor.clamp(0, 255).byte().cpu().numpy().transpose(1, 2, 0) + + # return Image.fromarray(image_array) + + # def decode_tensors(pipe, step, timestep, callback_kwargs): + # latents = callback_kwargs["latents"] + + # image = latents_to_rgb(latents[0]) + # image.save(f"{step}.png") + + # return callback_kwargs + # ############################################################ + + ########## SENDING IMG GEN TO PIPE - WORKING CODE ########## + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + max_sequence_length=512, + #callback_on_step_end=decode_tensors, + #callback_on_step_end_tensor_inputs=["latents"], + # seed=seed + ).images[0] + ############################################################# + + print("-----IMAGE GENERATED SUCCESSFULLY!-----") + print(image) + + except Exception as e: + return f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + try: + print("-----SAVING-----") + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"/data/images/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # Save the image using PIL's save method + image.save(output_filename) + print("-----DONE!-----") + print("-----CALL THE BANNERS!-----") + print(f"File path: {output_filename}") + except Exception as e: + print(f"ERROR: Failed to save image. Details: {e}") + # Return the filename and success message + return image \ No newline at end of file diff --git a/old/old v6/img_gen_modal old.py b/old/old v6/img_gen_modal old.py new file mode 100644 index 0000000000000000000000000000000000000000..594b6255a47b5d901c9b85e34a472d2e4f9d3242 --- /dev/null +++ b/old/old v6/img_gen_modal old.py @@ -0,0 +1,288 @@ +#img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models_modal # Indirect import +import os +import gradio as gr + +#MOVED FROM IMAGE IMPORT LIST +import torch +import sentencepiece +import torch +from huggingface_hub import login +from transformers import AutoTokenizer +import random +from datetime import datetime +from diffusers.callbacks import SDXLCFGCutoffCallback +from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline, AutoencoderTiny, AutoencoderKL, DiffusionPipeline, FluxTransformer2DModel, GGUFQuantizationConfig +from PIL import Image +from src.check_dependecies import check_dependencies +#import xformers + + +from live_preview_helpers import flux_pipe_call_that_returns_an_iterable_of_images + + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + .pip_install_from_requirements("requirements.txt") + #modal.Image.debian_slim(python_version="3.9") # Base image + # .apt_install( + # "git", + # ) + # .pip_install( + # "diffusers", + # f"git+https://github.com/huggingface/transformers.git" + # ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import os + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +# GPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="L40S", + timeout = 300 + ) +def generate_image_gpu(prompt_alias, team_color, model_alias, custom_prompt): + image = generate_image(prompt_alias, team_color, model_alias, custom_prompt) + return image, "Image generated successfully! Call the banners!" + + +# CPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + cpu = 1, + timeout = 30000 + ) +def generate_image_cpu(prompt_alias, team_color, model_alias, custom_prompt): + image = generate_image(prompt_alias, team_color, model_alias, custom_prompt) + return image, "Image generated successfully! Call the banners!" + +# MAIN GENERATE IMAGE FUNCTION +def generate_image( + prompt_alias, + team_color, + model_alias, + custom_prompt, + height=360, + width=640, + num_inference_steps=20, + guidance_scale=2.0, + seed=-1, + progress=gr.Progress(track_tqdm=True) # Add progress parameter + ): + with modal.enable_output(): + print("Hello from ctb_modal!") + + check_dependencies() + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models_modal if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + try: + from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + # Use absolute path with leading slash + local_path = f"/data/{model_name}" # Changed from "data/" to "/data/" + print(f"Loading model from local path: {local_path}") + + # Debug: Check if the directory exists and list its contents + if os.path.exists(local_path): + print("Directory exists. Contents:") + for item in os.listdir(local_path): + print(f" - {item}") + else: + print(f"Directory does not exist: {local_path}") + print("Contents of /data:") + print(os.listdir("/data")) + # CHECK FOR TORCH USING CUDA + print("CHECK FOR TORCH USING CUDA") + print(f"CUDA available: {torch.cuda.is_available()}") + if torch.cuda.is_available(): + print("inside if") + print(f"CUDA device count: {torch.cuda.device_count()}") + print(f"Current device: {torch.cuda.current_device()}") + print(f"Device name: {torch.cuda.get_device_name(torch.cuda.current_device())}") + + + ########## INITIALIZING CPU PIPE ########## + + # ########## LIVE PREVIEW FROM REPO DEMO PART 1 ########## + # dtype = torch.bfloat16 + # taef1 = AutoencoderTiny.from_pretrained("/data/taef1", torch_dtype=dtype).to(device) + # good_vae = AutoencoderKL.from_pretrained(local_path, subfolder="vae", torch_dtype=dtype).to(device) + + # pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe) + # ##################################################### + + print("-----LOADING QUANTA-----") + ckpt_path = ( + "/data/FLUX.1-dev-gguf/flux1-dev-Q8_0.gguf" + ) + transformer = FluxTransformer2DModel.from_single_file( + ckpt_path, + quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16), + torch_dtype=torch.bfloat16, + )pip + print("-----INITIALIZING PIPE-----") + pipe = FluxPipeline.from_pretrained( + local_path, + torch_dtype=torch.bfloat16, + transformer=transformer, + #torch_dtype=torch.float16, + #torch_dtype=torch.float32, + #vae=taef1, + local_files_only=True, + ) + #torch.cuda.empty_cache() + + if torch.cuda.is_available(): + print("CUDA available") + print("using gpu") + pipe = pipe.to("cuda") + pipe_message = "CUDA" + pipe.enable_model_cpu_offload() # Use official recommended method + + else: + print("CUDA not available") + print("using cpu") + pipe = pipe.to("cpu") + pipe_message = "CPU" + + + print(f"-----{pipe_message} PIPE INITIALIZED-----") + print(f"Using device: {pipe.device}") + except Exception as e: + print(f"Detailed error: {str(e)}") + return None, f"ERROR: Failed to initialize PIPE2. Details: {e}" + try: + print("-----SENDING IMG GEN TO PIPE-----") + print("-----HOLD ON-----") + + # ################ LIVE PREVIEW FROM DEMO REPO PART2 #################### + # seed = random.randint(0, MAX_SEED) + # generator = torch.Generator().manual_seed(seed) + + # for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images( + # prompt=prompt, + # guidance_scale=guidance_scale, + # num_inference_steps=num_inference_steps, + # width=width, + # height=height, + # generator=generator, + # output_type="pil", + # good_vae=good_vae, + # ): + # yield img, seed + # ############################################################ + + + # ########## LATENTS ########## + # # live preview function to get the latents + # # official reference guideline + # def latents_to_rgb(latents): + # weights = ( + # (60, -60, 25, -70), + # (60, -5, 15, -50), + # (60, 10, -5, -35), + # ) + + # weights_tensor = torch.t(torch.tensor(weights, dtype=latents.dtype).to(latents.device)) + # biases_tensor = torch.tensor((150, 140, 130), dtype=latents.dtype).to(latents.device) + # rgb_tensor = torch.einsum("...lxy,lr -> ...rxy", latents, weights_tensor) + biases_tensor.unsqueeze(-1).unsqueeze(-1) + # image_array = rgb_tensor.clamp(0, 255).byte().cpu().numpy().transpose(1, 2, 0) + + # return Image.fromarray(image_array) + + # def decode_tensors(pipe, step, timestep, callback_kwargs): + # latents = callback_kwargs["latents"] + + # image = latents_to_rgb(latents[0]) + # image.save(f"{step}.png") + + # return callback_kwargs + # ############################################################ + + ########## SENDING IMG GEN TO PIPE - WORKING CODE ########## + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + max_sequence_length=512, + #callback_on_step_end=decode_tensors, + #callback_on_step_end_tensor_inputs=["latents"], + # seed=seed + ).images[0] + ############################################################# + + print("-----IMAGE GENERATED SUCCESSFULLY!-----") + print(image) + + except Exception as e: + return f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + try: + print("-----SAVING-----") + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"/data/images/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # Save the image using PIL's save method + image.save(output_filename) + print("-----DONE!-----") + print("-----CALL THE BANNERS!-----") + print(f"File path: {output_filename}") + except Exception as e: + print(f"ERROR: Failed to save image. Details: {e}") + # Return the filename and success message + return image \ No newline at end of file diff --git a/old/old v6/models old.py b/old/old v6/models old.py new file mode 100644 index 0000000000000000000000000000000000000000..5bde7962bdce5b89342d47bc554932b49564fb9e --- /dev/null +++ b/old/old v6/models old.py @@ -0,0 +1,12 @@ +# List of models with aliases +models = [ + #{"alias": "FLUX.1-devold", "name": "black-forest-labs/FLUX.1-dev"}, + #{"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"}, + {"alias": "FLUX.1-dev", "name": "FLUX.1-dev"}, + {"alias": "FLUX.1-schnell", "name": "black-forest-labs/FLUX.1-schnell"}, + #{"alias": "Andre", "name": "Andre"} + + +] +# Debugging: Print prompt and model options +#print("Model Options:", [m["alias"] for m in models]) \ No newline at end of file diff --git a/old/old v6/models.py b/old/old v6/models.py new file mode 100644 index 0000000000000000000000000000000000000000..f34d2b5914af377612977f223aa2985b875f3898 --- /dev/null +++ b/old/old v6/models.py @@ -0,0 +1,16 @@ +# List of models with aliases +models = [ + {"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"}, + {"alias": "FLUX.1-dev_modal_local", "name": "FLUX.1-dev"}, + {"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"}, + {"alias": "FLUX.1-schnell", "name": "black-forest-labs/FLUX.1-schnell"}, + {"alias": "FLUX.1-schnell_modal_local", "name": "FLUX.1-schnell"}, +] + +models_modal = [ + {"alias": "FLUX.1-dev_modal_local", "name": "FLUX.1-dev"}, + {"alias": "FLUX.1-schnell_modal_local", "name": "FLUX.1-schnell"}, + #{"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"}, + #{"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"}, + #{"alias": "FLUX.1-schnell", "name": "black-forest-labs/FLUX.1-schnell"}, +] \ No newline at end of file diff --git a/old/old_live_test/app_LIVE2.py b/old/old_live_test/app_LIVE2.py new file mode 100644 index 0000000000000000000000000000000000000000..7fc330debbcabae2fca76f8f76c2b548eb2d5363 --- /dev/null +++ b/old/old_live_test/app_LIVE2.py @@ -0,0 +1,117 @@ +import gradio as gr +import numpy as np +#import spaces +from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL +from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast +#from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images +import modal + +MAX_SEED = np.iinfo(np.int32).max +MAX_IMAGE_SIZE = 640 + +examples = [ + "a tiny astronaut hatching from an egg on the moon", + "a cat holding a sign that says hello world", + "an anime illustration of a wiener schnitzel", +] + +css=""" +#col-container { + margin: 0 auto; + max-width: 520px; +} +""" + +with gr.Blocks(css=css) as demo: + + with gr.Column(elem_id="col-container"): + gr.Markdown(f"""# FLUX.1 [dev] +12B param rectified flow transformer guidance-distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/) +[[non-commercial license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)] [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-dev)] + """) + + with gr.Row(): + + prompt = gr.Text( + label="Prompt", + show_label=False, + max_lines=1, + placeholder="Enter your prompt", + container=False, + ) + + run_button = gr.Button("Run", scale=0) + + result = gr.Image(label="Result", show_label=False) + + with gr.Accordion("Advanced Settings", open=False): + + seed = gr.Slider( + label="Seed", + minimum=0, + maximum=MAX_SEED, + step=1, + value=0, + ) + + randomize_seed = gr.Checkbox(label="Randomize seed", value=True) + + with gr.Row(): + + width = gr.Slider( + label="Width", + minimum=256, + maximum=MAX_IMAGE_SIZE, + step=32, + value=1024, + ) + + height = gr.Slider( + label="Height", + minimum=256, + maximum=MAX_IMAGE_SIZE, + step=32, + value=1024, + ) + + with gr.Row(): + + guidance_scale = gr.Slider( + label="Guidance Scale", + minimum=1, + maximum=15, + step=0.1, + value=3.5, + ) + + num_inference_steps = gr.Slider( + label="Number of inference steps", + minimum=1, + maximum=50, + step=1, + value=28, + ) + + f = modal.Function.from_name("live-preview-test", "infer") + gr.Examples( + examples = examples, + fn = f.remote_gen, + inputs = [prompt], + outputs = [result, seed], + cache_examples="lazy" + ) + # def generate(prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps): + # f = modal.Function.from_name("live-preview-test", "infer") + # # Import the remote function + # result, seed = f.remote(prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps) + # return result, seed + + + gr.on( + triggers=[run_button.click, prompt.submit], + fn = f.remote_gen, + inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps], + outputs = [result, seed] + ) + +demo.launch() \ No newline at end of file diff --git a/old/old_live_test/img_gen_modal_LIVE1.py b/old/old_live_test/img_gen_modal_LIVE1.py new file mode 100644 index 0000000000000000000000000000000000000000..cb67d2552c3fc59d17efcdc0ba8205e8803bbc67 --- /dev/null +++ b/old/old_live_test/img_gen_modal_LIVE1.py @@ -0,0 +1,275 @@ +#img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os +import gradio as gr +import torch +import sentencepiece +import torch +from huggingface_hub import login +from transformers import AutoTokenizer +import random +from datetime import datetime + +####### IMPORTS FOR LIVE PREVIEW +import numpy as np +from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL +from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast +from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images + + + + +######### LIVE PREVIEW TEST 1/3 ########## +from live_preview_helpers import flux_pipe_call_that_returns_an_iterable_of_images +########################################## + + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9").pip_install_from_requirements("requirements.txt") + #modal.Image.debian_slim(python_version="3.9") # Base image + + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import os + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + + +# GPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="L40S", + timeout = 300 + ) +def generate_image_gpu(prompt_alias, team_color, model_alias, custom_prompt): + image = generate_image(prompt_alias, team_color, model_alias, custom_prompt) + return image, "Image generated successfully! Call the banners!" + + +# CPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + cpu = 1, + timeout = 300 + ) +def generate_image_cpu(prompt_alias, team_color, model_alias, custom_prompt): + image = generate_image(prompt_alias, team_color, model_alias, custom_prompt) + return image, "Image generated successfully! Call the banners!" + +# MAIN GENERATE IMAGE FUNCTION +def generate_image( + prompt_alias, + team_color, + model_alias, + custom_prompt, + height=360, + width=640, + num_inference_steps=20, + guidance_scale=2.0, + seed=-1, + progress=gr.Progress(track_tqdm=True) # Add progress parameter + ): + with modal.enable_output(): + print("Hello from ctb_modal!") + + + ########### LIVE PREVIEW 2/3 ################## + dtype = torch.bfloat16 + device = "cuda" if torch.cuda.is_available() else "cpu" + + taef1 = AutoencoderTiny.from_pretrained("/data/taef1", torch_dtype=dtype).to(device) + good_vae = AutoencoderKL.from_pretrained("/data/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device) + pipe_LIVE = DiffusionPipeline.from_pretrained("/data/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device) + torch.cuda.empty_cache() + + MAX_SEED = np.iinfo(np.int32).max + MAX_IMAGE_SIZE = 2048 + + pipe_LIVE.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe_LIVE) + ################################################# + + print("Running debug check...") + # Debug function to check installed packages + def check_dependencies(): + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + "sentencepiece" + ] + + for package in packages: + try: + import importlib + module = importlib.import_module(package) + print(f" {package} is installed. Version:") + except ImportError: + print(f" {package} is NOT installed.") + + check_dependencies() + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + try: + #from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + # Use absolute path with leading slash + local_path = f"/data/{model_name}" # Changed from "data/" to "/data/" + print(f"Loading model from local path: {local_path}") + + # Debug: Check if the directory exists and list its contents + if os.path.exists(local_path): + print("Directory exists. Contents:") + for item in os.listdir(local_path): + print(f" - {item}") + else: + print(f"Directory does not exist: {local_path}") + print("Contents of /data:") + print(os.listdir("/data")) + # CHECK FOR TORCH USING CUDA + print("CHECK FOR TORCH USING CUDA") + print(f"CUDA available: {torch.cuda.is_available()}") + if torch.cuda.is_available(): + print("inside if") + print(f"CUDA device count: {torch.cuda.device_count()}") + print(f"Current device: {torch.cuda.current_device()}") + print(f"Device name: {torch.cuda.get_device_name(torch.cuda.current_device())}") + + + + # ########## INITIALIZING CPU PIPE ########## + # print("-----INITIALIZING PIPE-----") + # pipe = FluxPipeline.from_pretrained( + # local_path, + # torch_dtype=torch.bfloat16, + # #torch_dtype=torch.float16, + # #torch_dtype=torch.float32, + # local_files_only=True + # ) + # if torch.cuda.is_available(): + # print("CUDA available") + # print("using gpu") + # pipe = pipe.to("cuda") + # pipe_message = "CUDA" + # else: + # print("CUDA not available") + # print("using cpu") + # pipe = pipe.to("cpu") + # pipe_message = "CPU" + # # pipe.enable_model_cpu_offload() # Use official recommended method + # print(f"-----{pipe_message} PIPE INITIALIZED-----") + # print(f"Using device: {pipe.device}") + # except Exception as e: + # print(f"Detailed error: {str(e)}") + # return None, f"ERROR: Failed to initialize PIPE2. Details: {e}" + # try: + # print("-----SENDING IMG GEN TO PIPE-----") + # print("-----HOLD ON-----") + # ################################################ + + + ################ LIVE PREVIEW TEST 3/3 #################### + print("-----SENDING IMG GEN TO PIPE LIVE-----") + print("-----HOLD ON-----") + seed = random.randint(0, MAX_SEED) + generator = torch.Generator().manual_seed(seed) + + for image in pipe_LIVE.flux_pipe_call_that_returns_an_iterable_of_images( + prompt=prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + generator=generator, + output_type="pil", + good_vae=good_vae, + ): + yield image, "Update" + ############################################################ + + + # ########## SENDING IMG GEN TO PIPE - WORKING CODE ########## + # image = pipe( + # prompt, + # guidance_scale=guidance_scale, + # num_inference_steps=num_inference_steps, + # width=width, + # height=height, + # max_sequence_length=512, + # # seed=seed + # ).images[0] + # ############################################################# + + print("-----IMAGE GENERATED SUCCESSFULLY!-----") + print(image) + + except Exception as e: + return f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + try: + print("-----SAVING-----") + print("-----DONE!-----") + print("-----CALL THE BANNERS!-----") + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"/data/images/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # Save the image using PIL's save method + image.save(output_filename) + print(f"File path: {output_filename}") + except Exception as e: + print(f"ERROR: Failed to save image. Details: {e}") + # Return the filename and success message + return image \ No newline at end of file diff --git a/old/old_live_test/img_gen_modal_LIVE2.py b/old/old_live_test/img_gen_modal_LIVE2.py new file mode 100644 index 0000000000000000000000000000000000000000..372af37dccfd159d359711d09791682095a63edc --- /dev/null +++ b/old/old_live_test/img_gen_modal_LIVE2.py @@ -0,0 +1,105 @@ +#img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os +import gradio as gr + +#MOVED FROM IMAGE IMPORT LIST +import torch +import sentencepiece +import torch +from huggingface_hub import login +from transformers import AutoTokenizer +import random +from datetime import datetime +#import xformers + +import gradio as gr +import numpy as np +#import spaces +from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL +from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast +from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images + + +CACHE_DIR = "/model_cache" + +MAX_SEED = np.iinfo(np.int32).max +MAX_IMAGE_SIZE = 2048 + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9").pip_install_from_requirements("requirements.txt") + #modal.Image.debian_slim(python_version="3.9") # Base image + + # .apt_install( + # "git", + # ) + # .pip_install( + # "diffusers", + # "transformers", + # "xformers", + # "torch", + # "accelerate", + # "gradio>=4.44.1", + # "safetensors", + # "pillow", + # "sentencepiece", + # "hf_transfer", + # "huggingface_hub[hf_transfer]", + # "aria2", # aria2 for ultra-fast parallel downloads + # f"git+https://github.com/huggingface/transformers.git" + # ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("live-preview-test", image=image) +with image.imports(): + import os + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + + +# GPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="L40S", + timeout = 300 + ) +def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)): + + dtype = torch.bfloat16 + device = "cuda" if torch.cuda.is_available() else "cpu" + + taef1 = AutoencoderTiny.from_pretrained("/data/taef1", torch_dtype=dtype).to(device) + good_vae = AutoencoderKL.from_pretrained("/data/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device) + pipe = DiffusionPipeline.from_pretrained("/data/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device) + torch.cuda.empty_cache() + + pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe) + + + if randomize_seed: + seed = random.randint(0, MAX_SEED) + generator = torch.Generator().manual_seed(seed) + + for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images( + prompt=prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + generator=generator, + output_type="pil", + good_vae=good_vae, + ): + yield img, seed + + diff --git a/old/old_modal_volumes_version/.DS_Store b/old/old_modal_volumes_version/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5a92a0b310e083a6500f9ee8cdf002865e930123 Binary files /dev/null and b/old/old_modal_volumes_version/.DS_Store differ diff --git a/old/old_modal_volumes_version/__pycache__/ctb_modal.cpython-310.pyc b/old/old_modal_volumes_version/__pycache__/ctb_modal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9304bd53104341bf3085088033bb97ddc592225 Binary files /dev/null and b/old/old_modal_volumes_version/__pycache__/ctb_modal.cpython-310.pyc differ diff --git a/old/old_modal_volumes_version/__pycache__/ctb_modal2.cpython-310.pyc b/old/old_modal_volumes_version/__pycache__/ctb_modal2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a3ecea2c3f8d943e15ca71ba438bc02ebc0a722 Binary files /dev/null and b/old/old_modal_volumes_version/__pycache__/ctb_modal2.cpython-310.pyc differ diff --git a/old/old_modal_volumes_version/old 1.0.1/ctb_modal.py b/old/old_modal_volumes_version/old 1.0.1/ctb_modal.py new file mode 100644 index 0000000000000000000000000000000000000000..ed5a6d359d456089ea4cfc333f0f11a43e67c4c8 --- /dev/null +++ b/old/old_modal_volumes_version/old 1.0.1/ctb_modal.py @@ -0,0 +1,41 @@ +import modal +from src.gradio_interface_modal import demo +from config.config import prompts, models, api_token # Indirect import +import gradio as gr +import importlib + +#FUNZIONA MA NON SI VEDE L'IMMAGINE!! + +#Entry point for local execution +#Create the Modal app +app = modal.App("ctb-image-generator-modal", secrets=[modal.Secret.from_name("huggingface-token")]) +@app.local_entrypoint() +def main(): + #with modal.enable_output(): + print("Hello from ctb_modal!") + print("Running debug check...") + # Debug function to check installed packages + def check_dependencies(): + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + "sentencepiece" + ] + + for package in packages: + try: + import importlib + module = importlib.import_module(package) + print(f" {package} is installed. Version:") + except ImportError: + print(f" {package} is NOT installed.") + + check_dependencies() + print("Launching Gradio interface...") + # demo.launch() + demo.launch() \ No newline at end of file diff --git a/old/old_modal_volumes_version/old 1.0.1/ctb_modal2 old.py b/old/old_modal_volumes_version/old 1.0.1/ctb_modal2 old.py new file mode 100644 index 0000000000000000000000000000000000000000..b7af17e66854078afd7eaadefbac45a496753e1b --- /dev/null +++ b/old/old_modal_volumes_version/old 1.0.1/ctb_modal2 old.py @@ -0,0 +1,41 @@ +import modal +from src.gradio_interface_modal import gradio_interface_modal +from config.config import prompts, models # Indirect import + +# Create the Modal app +app = modal.App("ctb-image-generator") +print("Modal app created.") + +# Entry point for local execution +@app.local_entrypoint() +def main(): + with modal.enable_output(): + print("Hello from ctb_modal!") + print("Running debug check...") + # Debug function to check installed packages + def check_dependencies(): + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + "sentencepiece" + ] + + for package in packages: + try: + import importlib + module = importlib.import_module(package) + print(f" {package} is installed. Version: {module.__version__}") + except ImportError: + print(f" {package} is NOT installed.") + + check_dependencies() + print("Launching Gradio interface...") + # demo.launch() + gradio_interface_modal() + + diff --git a/old/old_modal_volumes_version/old 1.0.1/src/gradio_interface_modal.py b/old/old_modal_volumes_version/old 1.0.1/src/gradio_interface_modal.py new file mode 100644 index 0000000000000000000000000000000000000000..616b81bd47f6d6439f59eba4dac8287a72b1bfd5 --- /dev/null +++ b/old/old_modal_volumes_version/old 1.0.1/src/gradio_interface_modal.py @@ -0,0 +1,57 @@ +# gradio_interface.py +import gradio as gr +import modal +from config.config import prompts, models # Indirect import +#from img_gen import generate_image + +print("Hello from gradio_interface_head!") + +# Modal remote function synchronously +def generate(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input): + # Debug: Print a message instead of generating an image + debug_message = f"Debug: Button clicked! Inputs - Prompt: {prompt_dropdown}, Team: {team_dropdown}, Model: {model_dropdown}, Custom Prompt: {custom_prompt_input}" + print(debug_message) # Print to console for debugging + try: + # Import the remote function + f = modal.Function.from_name("img-gen-modal", "generate_image") + image_path, message = f.remote(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" + + +#def gradio_interface_modal(): +with modal.enable_output(): + from config.config import prompts, models # Indirect import + # Gradio Interface + with gr.Blocks() as demo: + gr.Markdown("# CtB AI Image Generator") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(): + generate_button = gr.Button("Generate Image") + output = gr.Textbox(label="Output") + with gr.Row(): + output_image = gr.Image(label="Generated Image") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + print("Building cudasdasrer...") + + #Connect the button to the call_generate function + #had do do it to handle gradio/modal interaction) + generate_button.click( + generate, + inputs=[prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], + outputs=[output_image, status_text] + ) + + + + + diff --git a/old/old_modal_volumes_version/old 1.0.1/src/gradio_interface_modal2.py b/old/old_modal_volumes_version/old 1.0.1/src/gradio_interface_modal2.py new file mode 100644 index 0000000000000000000000000000000000000000..d29c61412e119e8bb84af1301f65eee49606b1df --- /dev/null +++ b/old/old_modal_volumes_version/old 1.0.1/src/gradio_interface_modal2.py @@ -0,0 +1,58 @@ +# gradio_interface.py +import gradio as gr +import modal +from config.config import prompts, models # Indirect import +#from src.img_gen_modal import generate + +print("Hello from gradio_interface_head!") + +# Modal remote function synchronously +def generate(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input): + # Debug: Print a message instead of generating an image + debug_message = f"Debug: Button clicked! Inputs - Prompt: {prompt_dropdown}, Team: {team_dropdown}, Model: {model_dropdown}, Custom Prompt: {custom_prompt_input}" + print(debug_message) # Print to console for debugging + try: + # Import the remote function + f = modal.Function.from_name("img-gen-modal", "generate_image") + image_path, message = f.remote(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" + + +def gradio_interface_modal(): + with modal.enable_output(): + from config.config import prompts, models # Indirect import + # Gradio Interface + with gr.Blocks() as demo: + gr.Markdown("# CtB AI Image Generator") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(): + generate_button = gr.Button("Generate Image") + output = gr.Textbox(label="Output") + with gr.Row(): + output_image = gr.Image(label="Generated Image") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + print("Building cudasdasrer...") + + #Connect the button to the call_generate function + #had do do it to handle gradio/modal interaction) + generate_button.click( + generate, + inputs=[prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], + outputs=[output_image, status_text] + ) + + + + + + diff --git a/old/old_modal_volumes_version/old 1.0.1/src/img_gen_modal old.py b/old/old_modal_volumes_version/old 1.0.1/src/img_gen_modal old.py new file mode 100644 index 0000000000000000000000000000000000000000..d831d589e0dfe9bbfd563682533220aa4ef9bfe7 --- /dev/null +++ b/old/old_modal_volumes_version/old 1.0.1/src/img_gen_modal old.py @@ -0,0 +1,143 @@ +# img_gen.py +import sys +import os +import random +from huggingface_hub import InferenceClient +from datetime import datetime +from config.config import models, prompts, api_token # Direct import +import modal + + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME" + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + import transformers + from huggingface_hub import InferenceClient, login + + +@app.function( + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="t4", + timeout=600 +) +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + import diffusers + import os + import gradio + import torch + import sentencepiece + import transformers + from huggingface_hub import InferenceClient, login + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return "default_image.png", "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # if team.lower() == "red": + # winning_team_text = " The winning army is dressed in red armor and banners." + # elif team.lower() == "blue": + # winning_team_text = " The winning army is dressed in blue armor and banners." + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # Initialize the InferenceClient + try: + print ("starting inference") + print("token:") + print (api_token) + client = InferenceClient(model_name, token=api_token) + except Exception as e: + return "default_image.png", f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + #Generate the image + try: + image = client.text_to_image( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + seed=seed + ) + except Exception as e: + return "default_image.png", f"ERROR: Failed to generate image. Details: {e}" + print ("done") + print(image) + image.save("output_image.png") + + #Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + try: + # Save the image to the current working directory + image.save(output_filename) + print(f"Image saved as: {output_filename}") + except Exception as e: + return "default_image.png", f"ERROR: Failed to save image. Details: {e}" + # Return the filename and success message + print(f"{output_filename}, Image generated successfully!") + # Return the filename and success message + return output_filename, "Image generated successfully!" + + # Display the saved image locally + if output_filename: + from PIL import Image + img = Image.open(output_filename) + img.show() + else: + print(message) diff --git a/old/old_modal_volumes_version/old 1.0.1/src/img_gen_modal.py b/old/old_modal_volumes_version/old 1.0.1/src/img_gen_modal.py new file mode 100644 index 0000000000000000000000000000000000000000..d831d589e0dfe9bbfd563682533220aa4ef9bfe7 --- /dev/null +++ b/old/old_modal_volumes_version/old 1.0.1/src/img_gen_modal.py @@ -0,0 +1,143 @@ +# img_gen.py +import sys +import os +import random +from huggingface_hub import InferenceClient +from datetime import datetime +from config.config import models, prompts, api_token # Direct import +import modal + + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME" + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + import transformers + from huggingface_hub import InferenceClient, login + + +@app.function( + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="t4", + timeout=600 +) +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + import diffusers + import os + import gradio + import torch + import sentencepiece + import transformers + from huggingface_hub import InferenceClient, login + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return "default_image.png", "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # if team.lower() == "red": + # winning_team_text = " The winning army is dressed in red armor and banners." + # elif team.lower() == "blue": + # winning_team_text = " The winning army is dressed in blue armor and banners." + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # Initialize the InferenceClient + try: + print ("starting inference") + print("token:") + print (api_token) + client = InferenceClient(model_name, token=api_token) + except Exception as e: + return "default_image.png", f"ERROR: Failed to initialize InferenceClient. Details: {e}" + + #Generate the image + try: + image = client.text_to_image( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + seed=seed + ) + except Exception as e: + return "default_image.png", f"ERROR: Failed to generate image. Details: {e}" + print ("done") + print(image) + image.save("output_image.png") + + #Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + try: + # Save the image to the current working directory + image.save(output_filename) + print(f"Image saved as: {output_filename}") + except Exception as e: + return "default_image.png", f"ERROR: Failed to save image. Details: {e}" + # Return the filename and success message + print(f"{output_filename}, Image generated successfully!") + # Return the filename and success message + return output_filename, "Image generated successfully!" + + # Display the saved image locally + if output_filename: + from PIL import Image + img = Image.open(output_filename) + img.show() + else: + print(message) diff --git a/old/old_modal_volumes_version/old 1.0/img_gen_modal2.py b/old/old_modal_volumes_version/old 1.0/img_gen_modal2.py new file mode 100644 index 0000000000000000000000000000000000000000..d8e1ecc6b06c2505ade41e13aef2ba12f53572ac --- /dev/null +++ b/old/old_modal_volumes_version/old 1.0/img_gen_modal2.py @@ -0,0 +1,169 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +from datetime import datetime +import random +import io +from config.config import prompts, models # Indirect import +import os +import torch +from huggingface_hub import login +from transformers import AutoTokenizer + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + modal.Image.from_registry( + "nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9" + ) + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +@app.function(gpu="t4", volumes={"/models": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + # gpu="a100-80gb" + ) +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # DOWNLOADING FROM HERE KEEPS THE /MODELS/ DIRECTORY + # WITH A SCRIPT IT GOES AWAY + # def download_flux(): + # from huggingface_hub import snapshot_download + # import transformers + + # repo_id = "black-forest-labs/FLUX.1-schnell" + # local_dir = "/data/models/FLUX.1-schnell" + + # # **FASTEST METHOD:** Use max_workers for parallel download + # snapshot_download( + # repo_id, + # local_dir=local_dir, + # revision="main", + # #ignore_patterns=["*.pt", "*.bin"], # Skip large model weights + # max_workers=8 # Higher concurrency for parallel chunk downloads + # ) + + # transformers.utils.move_cache() + # print(f"FLUX model downloaded to {local_dir}") + # download_flux() + + try: + from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + # First check if model exists in the volume + local_path = "models/" + model_name + print(f"Loading model from local path: {local_path}") + # Debug: Check if the directory exists and list its contents + for item in os.listdir(local_path): + print(f" - {item}") + + print("Initializing PIPE") + # Initialize the pipeline + #cache_dir = "/cache_" + pipe = FluxPipeline.from_pretrained("data/" + model_name, torch_dtype=torch.bfloat16,local_files_only=True, + #cache_dir=cache_dir + ) + pipe = pipe.to("cuda") + + except Exception as e: + print(f"Detailed error: {str(e)}") + return None, f"ERROR: Failed to initialize PIPE. Details: {e}" + + try: + print("Sending img gen to pipe") + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + # seed=seed + ).images[0] + image.save("image.png") + + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + # # Save the image with a timestamped filename + # timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + # output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # try: + # # The pipeline typically returns images in a specific format + # # Usually it's image.images[0] for the first generated image + # image_output = image.images[0] # Get the actual PIL Image from the output + # image_output.save(output_filename) # Save using PIL's save method + # except Exception as e: + # return None, f"ERROR: Failed to save image. Details: {e}" + # print(f"Image output type: {type(image)}") + # print(f"Image output attributes: {dir(image)}") \ No newline at end of file diff --git a/old/old_modal_volumes_version/old 1.0/img_gen_modal3.py b/old/old_modal_volumes_version/old 1.0/img_gen_modal3.py new file mode 100644 index 0000000000000000000000000000000000000000..b90ec494abb402758e806d4579dd52abf5b809e3 --- /dev/null +++ b/old/old_modal_volumes_version/old 1.0/img_gen_modal3.py @@ -0,0 +1,169 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +from datetime import datetime +import random +import io +from config.config import prompts, models # Indirect import +import os +import torch +from huggingface_hub import login +from transformers import AutoTokenizer + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + modal.Image.from_registry( + "nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9" + ) + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +@app.function(gpu="t4", volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + # gpu="a100-80gb" + ) +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # DOWNLOADING FROM HERE KEEPS THE /MODELS/ DIRECTORY + # WITH A SCRIPT IT GOES AWAY + # def download_flux(): + # from huggingface_hub import snapshot_download + # import transformers + + # repo_id = "black-forest-labs/FLUX.1-schnell" + # local_dir = "/data/models/FLUX.1-schnell" + + # # **FASTEST METHOD:** Use max_workers for parallel download + # snapshot_download( + # repo_id, + # local_dir=local_dir, + # revision="main", + # #ignore_patterns=["*.pt", "*.bin"], # Skip large model weights + # max_workers=8 # Higher concurrency for parallel chunk downloads + # ) + + # transformers.utils.move_cache() + # print(f"FLUX model downloaded to {local_dir}") + # download_flux() + + try: + from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + # First check if model exists in the volume + local_path = "data/" + model_name + print(f"Loading model from local path: {local_path}") + # Debug: Check if the directory exists and list its contents + for item in os.listdir(local_path): + print(f" - {item}") + + print("Initializing PIPE") + # Initialize the pipeline + #cache_dir = "/cache_" + pipe = FluxPipeline.from_pretrained(local_path, torch_dtype=torch.bfloat16,local_files_only=True, + #cache_dir=cache_dir + ) + pipe = pipe.to("cuda") + + except Exception as e: + print(f"Detailed error: {str(e)}") + return None, f"ERROR: Failed to initialize PIPE. Details: {e}" + + try: + print("Sending img gen to pipe") + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + # seed=seed + ).images[0] + image.save("image.png") + + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + # # Save the image with a timestamped filename + # timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + # output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # try: + # # The pipeline typically returns images in a specific format + # # Usually it's image.images[0] for the first generated image + # image_output = image.images[0] # Get the actual PIL Image from the output + # image_output.save(output_filename) # Save using PIL's save method + # except Exception as e: + # return None, f"ERROR: Failed to save image. Details: {e}" + # print(f"Image output type: {type(image)}") + # print(f"Image output attributes: {dir(image)}") \ No newline at end of file diff --git a/old/old_modal_volumes_version/old 1.0/img_gen_modal4.py b/old/old_modal_volumes_version/old 1.0/img_gen_modal4.py new file mode 100644 index 0000000000000000000000000000000000000000..10efce954940abff8d8ae9b299dfc78bcdf0a86f --- /dev/null +++ b/old/old_modal_volumes_version/old 1.0/img_gen_modal4.py @@ -0,0 +1,176 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +from datetime import datetime +import random +import io +from config.config import prompts, models # Indirect import +import os +import torch +from huggingface_hub import login +from transformers import AutoTokenizer + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + #modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +@app.function(gpu="A100", volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + # gpu="a100-80gb" + ) +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=36, width=64, num_inference_steps=2, guidance_scale=2.0, seed=-1): + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # DOWNLOADING FROM HERE KEEPS THE /MODELS/ DIRECTORY + # WITH A SCRIPT IT GOES AWAY + # def download_flux(): + # from huggingface_hub import snapshot_download + # import transformers + + # repo_id = "black-forest-labs/FLUX.1-schnell" + # local_dir = "/data/models/FLUX.1-schnell" + + # # **FASTEST METHOD:** Use max_workers for parallel download + # snapshot_download( + # repo_id, + # local_dir=local_dir, + # revision="main", + # #ignore_patterns=["*.pt", "*.bin"], # Skip large model weights + # max_workers=8 # Higher concurrency for parallel chunk downloads + # ) + + # transformers.utils.move_cache() + # print(f"FLUX model downloaded to {local_dir}") + # download_flux() + + try: + from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + # Use absolute path with leading slash + local_path = f"/data/{model_name}" # Changed from "data/" to "/data/" + print(f"Loading model from local path: {local_path}") + + # Debug: Check if the directory exists and list its contents + if os.path.exists(local_path): + print("Directory exists. Contents:") + for item in os.listdir(local_path): + print(f" - {item}") + else: + print(f"Directory does not exist: {local_path}") + print("Contents of /data:") + print(os.listdir("/data")) + + print("Initializing PIPE") + pipe = FluxPipeline.from_pretrained( + local_path, + torch_dtype=torch.bfloat16, + local_files_only=True + ) + pipe = pipe.to("cuda") + + except Exception as e: + print(f"Detailed error: {str(e)}") + return None, f"ERROR: Failed to initialize PIPE. Details: {e}" + try: + print("Sending img gen to pipe") + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + # seed=seed + ).images[0] + image.save("image.png") + + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + # # Save the image with a timestamped filename + # timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + # output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # try: + # # The pipeline typically returns images in a specific format + # # Usually it's image.images[0] for the first generated image + # image_output = image.images[0] # Get the actual PIL Image from the output + # image_output.save(output_filename) # Save using PIL's save method + # except Exception as e: + # return None, f"ERROR: Failed to save image. Details: {e}" + # print(f"Image output type: {type(image)}") + # print(f"Image output attributes: {dir(image)}") \ No newline at end of file diff --git a/old/old_modal_volumes_version/old examples/ctb_modal_example.py b/old/old_modal_volumes_version/old examples/ctb_modal_example.py new file mode 100644 index 0000000000000000000000000000000000000000..51e527fb0e60600e965b14ac98bfb603d9e3b65b --- /dev/null +++ b/old/old_modal_volumes_version/old examples/ctb_modal_example.py @@ -0,0 +1,41 @@ +import modal +from src.gradio_interface_modal_example import gradio_interface_modal +from config.config import prompts, models # Indirect import + +# Create the Modal app +app = modal.App("ctb-image-generator") +print("Modal app created.") + +# Entry point for local execution +@app.local_entrypoint() +def main(): + with modal.enable_output(): + print("Hello from ctb_modal!") + print("Running debug check...") + # Debug function to check installed packages + def check_dependencies(): + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + "sentencepiece" + ] + + for package in packages: + try: + import importlib + module = importlib.import_module(package) + print(f" {package} is installed. Version: {module.__version__}") + except ImportError: + print(f" {package} is NOT installed.") + + check_dependencies() + print("Launching Gradio interface...") + # demo.launch() + gradio_interface_modal() + + diff --git a/old/old_modal_volumes_version/old examples/download_flux_modal.py b/old/old_modal_volumes_version/old examples/download_flux_modal.py new file mode 100644 index 0000000000000000000000000000000000000000..dd4d0cb2858eeada5c0ecfafafbe01dc369f0125 --- /dev/null +++ b/old/old_modal_volumes_version/old examples/download_flux_modal.py @@ -0,0 +1,33 @@ +import modal +import os + +app = modal.App("flux-model-setup") + +# Attach the newly created volume +volume = modal.Volume.from_name("flux-model-vol") + +@app.function( + volumes={"/data/models": volume}, # Mount the volume inside the container + image=modal.Image.debian_slim().pip_install("huggingface_hub[hf_transfer]", "transformers"), + secrets=[modal.Secret.from_name("huggingface-token")] +) +def download_flux(): + from huggingface_hub import snapshot_download + import transformers # Ensure transformers is available + + repo_id = "black-forest-labs/FLUX.1-dev" + local_dir = "/data/models/FLUX.1-dev" # Store model inside mounted volume + + snapshot_download( + repo_id, + local_dir=local_dir, + revision="main", + ignore_patterns=["*.pt", "*.bin"] # Skip large model weights + ) + + transformers.utils.move_cache() + print(f"FLUX model downloaded to {local_dir}") + +@app.local_entrypoint() +def main(): + download_flux.remote() diff --git a/old/old_modal_volumes_version/old examples/download_flux_modal2.py b/old/old_modal_volumes_version/old examples/download_flux_modal2.py new file mode 100644 index 0000000000000000000000000000000000000000..a69e1b74908687a6fdb5e49ca5d319692101df61 --- /dev/null +++ b/old/old_modal_volumes_version/old examples/download_flux_modal2.py @@ -0,0 +1,21 @@ +# DOWNLOADING FROM HERE KEEPS THE /MODELS/ DIRECTORY + # WITH A SCRIPT IT GOES AWAY + # def download_flux(): + # from huggingface_hub import snapshot_download + # import transformers + + # repo_id = "black-forest-labs/FLUX.1-schnell" + # local_dir = "/data/models/FLUX.1-schnell" + + # # **FASTEST METHOD:** Use max_workers for parallel download + # snapshot_download( + # repo_id, + # local_dir=local_dir, + # revision="main", + # #ignore_patterns=["*.pt", "*.bin"], # Skip large model weights + # max_workers=8 # Higher concurrency for parallel chunk downloads + # ) + + # transformers.utils.move_cache() + # print(f"FLUX model downloaded to {local_dir}") + # download_flux() \ No newline at end of file diff --git a/old/old_modal_volumes_version/old examples/gradio_interface_modal_example.py b/old/old_modal_volumes_version/old examples/gradio_interface_modal_example.py new file mode 100644 index 0000000000000000000000000000000000000000..f9a0c2a6d14d7964623102d53994901cd15df315 --- /dev/null +++ b/old/old_modal_volumes_version/old examples/gradio_interface_modal_example.py @@ -0,0 +1,58 @@ +# gradio_interface.py +import gradio as gr +import modal +from config.config import prompts, models # Indirect import +#from src.img_gen_modal import generate + +print("Hello from gradio_interface_head!") + +# Modal remote function synchronously +def generate(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input): + # Debug: Print a message instead of generating an image + debug_message = f"Debug: Button clicked! Inputs - Prompt: {prompt_dropdown}, Team: {team_dropdown}, Model: {model_dropdown}, Custom Prompt: {custom_prompt_input}" + print(debug_message) # Print to console for debugging + try: + # Import the remote function + f = modal.Function.from_name("img-gen-modal-example", "generate_image") + image_path, message = f.remote(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input) + return image_path, message + except Exception as e: + return None, f"An error occurred: {e}" + + +def gradio_interface_modal(): + with modal.enable_output(): + from config.config import prompts, models # Indirect import + # Gradio Interface + with gr.Blocks() as demo: + gr.Markdown("# CtB AI Image Generator") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(): + generate_button = gr.Button("Generate Image") + output = gr.Textbox(label="Output") + with gr.Row(): + output_image = gr.Image(label="Generated Image") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + print("Building cudasdasrer...") + + #Connect the button to the call_generate function + #had do do it to handle gradio/modal interaction) + generate_button.click( + generate, + inputs=[prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], + outputs=[output_image, status_text] + ) + demo.launch() + + + + + diff --git a/old/old_modal_volumes_version/old examples/img_gen_modal_example.py b/old/old_modal_volumes_version/old examples/img_gen_modal_example.py new file mode 100644 index 0000000000000000000000000000000000000000..5d79cc93771fb16f7c7c87b04a28360de57f92bc --- /dev/null +++ b/old/old_modal_volumes_version/old examples/img_gen_modal_example.py @@ -0,0 +1,169 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +from datetime import datetime +import random +import io +from config.config import prompts, models # Indirect import +import os +import torch +from huggingface_hub import login +from transformers import AutoTokenizer + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + #modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal-example", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="a100-80gb" + ) +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=50, width=50, num_inference_steps=2, guidance_scale=2.0, seed=-1): + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # DOWNLOADING FROM HERE KEEPS THE /MODELS/ DIRECTORY + # WITH A SCRIPT IT GOES AWAY + # def download_flux(): + # from huggingface_hub import snapshot_download + # import transformers + + # repo_id = "black-forest-labs/FLUX.1-schnell" + # local_dir = "/data/models/FLUX.1-schnell" + + # # **FASTEST METHOD:** Use max_workers for parallel download + # snapshot_download( + # repo_id, + # local_dir=local_dir, + # revision="main", + # #ignore_patterns=["*.pt", "*.bin"], # Skip large model weights + # max_workers=8 # Higher concurrency for parallel chunk downloads + # ) + + # transformers.utils.move_cache() + # print(f"FLUX model downloaded to {local_dir}") + # download_flux() + + try: + from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + # Use absolute path with leading slash + local_path = f"/data/{model_name}" # Changed from "data/" to "/data/" + print(f"Loading model from local path: {local_path}") + + # Debug: Check if the directory exists and list its contents + if os.path.exists(local_path): + print("Directory exists. Contents:") + for item in os.listdir(local_path): + print(f" - {item}") + else: + print(f"Directory does not exist: {local_path}") + print("Contents of /data:") + print(os.listdir("/data")) + + print("Initializing PIPE") + pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16) + pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power + + prompt = "A cat holding a sign that says hello world" + image = pipe( + prompt, + height=1024, + width=1024, + guidance_scale=3.5, + num_inference_steps=50, + max_sequence_length=512, + generator=torch.Generator("gpu").manual_seed(0) + ).images[0] + image.save("flux-dev.png") + + except Exception as e: + return None, f"ERROR: Failed to generate image. Details: {e}" + + # # Save the image with a timestamped filename + # timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + # output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # try: + # # The pipeline typically returns images in a specific format + # # Usually it's image.images[0] for the first generated image + # image_output = image.images[0] # Get the actual PIL Image from the output + # image_output.save(output_filename) # Save using PIL's save method + # except Exception as e: + # return None, f"ERROR: Failed to save image. Details: {e}" + # print(f"Image output type: {type(image)}") + # print(f"Image output attributes: {dir(image)}") \ No newline at end of file diff --git a/old/old_modal_volumes_version/old examples/img_gen_modal_example2.py b/old/old_modal_volumes_version/old examples/img_gen_modal_example2.py new file mode 100644 index 0000000000000000000000000000000000000000..9ab130a81f42947b923e30bbd34ceaf27d11fa90 --- /dev/null +++ b/old/old_modal_volumes_version/old examples/img_gen_modal_example2.py @@ -0,0 +1,204 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +from datetime import datetime +import random +import io +from config.config import prompts, models # Indirect import +import os +import torch +from huggingface_hub import login +from transformers import AutoTokenizer + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + #modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("img-gen-modal-example", image=image) +with image.imports(): + import diffusers + import os + import gradio + import torch + import sentencepiece + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +@app.function( + gpu="t4", # or "A100" depending on what you're using + volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + timeout=1800 # Add timeout of 30 minutes +) + +def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=50, width=50, num_inference_steps=2, guidance_scale=2.0, seed=-1): + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + # DOWNLOADING FROM HERE KEEPS THE /MODELS/ DIRECTORY + # WITH A SCRIPT IT GOES AWAY + # def download_flux(): + # from huggingface_hub import snapshot_download + # import transformers + + # repo_id = "black-forest-labs/FLUX.1-schnell" + # local_dir = "/data/models/FLUX.1-schnell" + + # # **FASTEST METHOD:** Use max_workers for parallel download + # snapshot_download( + # repo_id, + # local_dir=local_dir, + # revision="main", + # #ignore_patterns=["*.pt", "*.bin"], # Skip large model weights + # max_workers=8 # Higher concurrency for parallel chunk downloads + # ) + + # transformers.utils.move_cache() + # print(f"FLUX model downloaded to {local_dir}") + # download_flux() + + try: + from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + login(token=hf_token) + + local_path = f"/data/{model_name}" + print(f"Loading model from local path: {local_path}") + + # Check model files + if os.path.exists(local_path): + print("Directory exists. Contents:") + required_files = ['model_index.json', 'scheduler', 'vae'] + found_files = os.listdir(local_path) + print("Found files:", found_files) + missing_files = [f for f in required_files if f not in found_files] + if missing_files: + print(f"Warning: Missing required files: {missing_files}") + + print("Initializing pipeline...") + pipe = FluxPipeline.from_pretrained( + local_path, + torch_dtype=torch.bfloat16, + local_files_only=True, + device_map="auto" # Add this for better GPU utilization + ) + # Move to GPU and print memory usage + #pipe = pipe.to("cuda") + torch.cuda.empty_cache() # Clear any unused memory + print(f"GPU memory allocated: {torch.cuda.memory_allocated()/1e9:.2f} GB") + + except Exception as e: + print(f"Error occurred: {type(e).__name__}") + print(f"Error message: {str(e)}") + print(f"Error details: {e.__dict__}") + + # Verify CUDA availability and model device + print(f"CUDA available: {torch.cuda.is_available()}") + print(f"Current device: {torch.cuda.current_device()}") + print(f"Device name: {torch.cuda.get_device_name()}") + + + print("Starting image generation...") + # Set a small test generation first + test_output = pipe( + "test", + num_inference_steps=1, + width=64, + height=64 + ) + print("Test generation successful!") + + # Now do the actual generation + print("Starting main generation with parameters:") + print(f"Prompt: {prompt}") + print(f"Steps: {num_inference_steps}") + print(f"Size: {width}x{height}") + print(f"Guidance scale: {guidance_scale}") + + output = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + ) + + image = output.images[0] + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"/data/generated_{timestamp}.png" + + image.save(output_filename) + print(f"Image saved to {output_filename}") + return image, "Success!" + + # # Save the image with a timestamped filename + # timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + # output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # try: + # # The pipeline typically returns images in a specific format + # # Usually it's image.images[0] for the first generated image + # image_output = image.images[0] # Get the actual PIL Image from the output + # image_output.save(output_filename) # Save using PIL's save method + # except Exception as e: + # return None, f"ERROR: Failed to save image. Details: {e}" + # print(f"Image output type: {type(image)}") + # print(f"Image output attributes: {dir(image)}") \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 33082473fe73fdd6a013a3b8236d30002c008d21..199518a88cb823b960aab88aac8ad218a20b052c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,13 @@ -accelerate -diffusers -invisible_watermark torch +diffusers transformers -xformers -IPython +accelerate gradio -huggingface_hub -Pillow \ No newline at end of file +safetensors +pillow +numpy<2 +invisible_watermark +huggingface_hub[hf_transfer] +sentencepiece +opencv-python==4.5.5.64 +gguf \ No newline at end of file diff --git a/requirements_freeze_ok.txt b/requirements_freeze_ok.txt new file mode 100644 index 0000000000000000000000000000000000000000..77d0a35cf7daf8274b687d3131c421f59b24ac42 --- /dev/null +++ b/requirements_freeze_ok.txt @@ -0,0 +1,88 @@ +accelerate==1.3.0 +aiofiles==23.2.1 +aiohappyeyeballs==2.4.4 +aiohttp==3.11.11 +aiosignal==1.3.2 +annotated-types==0.7.0 +anyio==4.8.0 +attrs==25.1.0 +certifi==2025.1.31 +charset-normalizer==3.4.1 +click==8.1.8 +diffusers==0.32.2 +fastapi==0.115.8 +ffmpy==0.5.0 +filelock==3.17.0 +frozenlist==1.5.0 +fsspec==2025.2.0 +gradio==5.14.0 +gradio_client==1.7.0 +grpclib==0.4.7 +h11==0.14.0 +h2==4.2.0 +hpack==4.1.0 +httpcore==1.0.7 +httpx==0.28.1 +huggingface-hub==0.28.1 +hyperframe==6.1.0 +idna==3.10 +importlib_metadata==8.6.1 +invisible-watermark==0.2.0 +Jinja2==3.1.5 +markdown-it-py==3.0.0 +MarkupSafe==2.1.5 +mdurl==0.1.2 +modal==0.73.8 +mpmath==1.3.0 +multidict==6.1.0 +networkx==3.4.2 +numpy==1.26.4 +opencv-python==4.5.5.64 +orjson==3.10.15 +packaging==24.2 +pandas==2.2.3 +pillow==11.1.0 +propcache==0.2.1 +protobuf==5.29.3 +psutil==6.1.1 +pydantic==2.10.6 +pydantic_core==2.27.2 +pydub==0.25.1 +Pygments==2.19.1 +python-dateutil==2.9.0.post0 +python-multipart==0.0.20 +pytz==2025.1 +PyWavelets==1.8.0 +PyYAML==6.0.2 +regex==2024.11.6 +requests==2.32.3 +rich==13.9.4 +ruff==0.9.4 +safehttpx==0.1.6 +safetensors==0.5.2 +semantic-version==2.10.0 +sentencepiece==0.2.0 +shellingham==1.5.4 +sigtools==4.0.1 +six==1.17.0 +sniffio==1.3.1 +starlette==0.45.3 +sympy==1.13.3 +synchronicity==0.9.10 +tokenizers==0.21.0 +toml==0.10.2 +tomlkit==0.13.2 +torch==2.2.2 +tqdm==4.67.1 +transformers==4.48.2 +typer==0.15.1 +types-certifi==2021.10.8.3 +types-toml==0.10.8.20240310 +typing_extensions==4.12.2 +tzdata==2025.1 +urllib3==2.3.0 +uvicorn==0.34.0 +watchfiles==1.0.4 +websockets==14.2 +yarl==1.18.3 +zipp==3.21.0 diff --git a/src/.DS_Store b/src/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..3706fec4b03d9798ed235686f98a0ff5404445b7 Binary files /dev/null and b/src/.DS_Store differ diff --git a/src/__pycache__/__init__.cpython-310.pyc b/src/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa2a29aa680c57f5625b71d2ded3bb41e72eed5d Binary files /dev/null and b/src/__pycache__/__init__.cpython-310.pyc differ diff --git a/src/__pycache__/__init__.cpython-311.pyc b/src/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f101990196803d4976d8cfa4df4f1540b10ee48e Binary files /dev/null and b/src/__pycache__/__init__.cpython-311.pyc differ diff --git a/src/__pycache__/check_dependecies.cpython-311.pyc b/src/__pycache__/check_dependecies.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c3fa6d4c308ab9ab42688f9d27f54d8a170e224 Binary files /dev/null and b/src/__pycache__/check_dependecies.cpython-311.pyc differ diff --git a/src/__pycache__/download_flux_modal.cpython-310.pyc b/src/__pycache__/download_flux_modal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5cb8e53d6786fba97b89430fed6773ba27fd5c2 Binary files /dev/null and b/src/__pycache__/download_flux_modal.cpython-310.pyc differ diff --git a/src/__pycache__/download_flux_modal2.cpython-310.pyc b/src/__pycache__/download_flux_modal2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4d1b0f6b8c8fb7c5a2bf675d72dd59a6836de2e Binary files /dev/null and b/src/__pycache__/download_flux_modal2.cpython-310.pyc differ diff --git a/src/__pycache__/download_flux_modal3.cpython-310.pyc b/src/__pycache__/download_flux_modal3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4009a194d63c18e5dda47984392f9e9975253b2d Binary files /dev/null and b/src/__pycache__/download_flux_modal3.cpython-310.pyc differ diff --git a/src/__pycache__/download_flux_modal4.cpython-310.pyc b/src/__pycache__/download_flux_modal4.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33e5a5c5a5c2c18cfd02d0e43c5b4abd549f4da4 Binary files /dev/null and b/src/__pycache__/download_flux_modal4.cpython-310.pyc differ diff --git a/src/__pycache__/download_flux_modal_HF.cpython-310.pyc b/src/__pycache__/download_flux_modal_HF.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..106c42e9ec722395028433c2998a892c7b0efd32 Binary files /dev/null and b/src/__pycache__/download_flux_modal_HF.cpython-310.pyc differ diff --git a/src/__pycache__/gradio_interface.cpython-310.pyc b/src/__pycache__/gradio_interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f3e3f03d7b11a94199d9cfc76c68e35f7a5b740 Binary files /dev/null and b/src/__pycache__/gradio_interface.cpython-310.pyc differ diff --git a/src/__pycache__/gradio_interface.cpython-311.pyc b/src/__pycache__/gradio_interface.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1257395fd0f00f00afa50cb48201464c07a106ea Binary files /dev/null and b/src/__pycache__/gradio_interface.cpython-311.pyc differ diff --git a/src/__pycache__/gradio_interface.cpython-39.pyc b/src/__pycache__/gradio_interface.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b89a3d0906f15c5998461b9ee1ce9f1feee288a7 Binary files /dev/null and b/src/__pycache__/gradio_interface.cpython-39.pyc differ diff --git a/src/__pycache__/gradio_interface_local.cpython-311.pyc b/src/__pycache__/gradio_interface_local.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fa46c7e1f56b890cc48a9663bb536cee9be28a3 Binary files /dev/null and b/src/__pycache__/gradio_interface_local.cpython-311.pyc differ diff --git a/src/__pycache__/gradio_interface_modal.cpython-310.pyc b/src/__pycache__/gradio_interface_modal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84a4056488fb090e83b8aa9e2925474a051e54d5 Binary files /dev/null and b/src/__pycache__/gradio_interface_modal.cpython-310.pyc differ diff --git a/src/__pycache__/gradio_interface_modal.cpython-311.pyc b/src/__pycache__/gradio_interface_modal.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35314ab54fe71707e6b3f6e14380f85e854342a3 Binary files /dev/null and b/src/__pycache__/gradio_interface_modal.cpython-311.pyc differ diff --git a/src/__pycache__/gradio_interface_modal_example.cpython-310.pyc b/src/__pycache__/gradio_interface_modal_example.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7609e56c2a5df41aa619b3105a401bd504c435d Binary files /dev/null and b/src/__pycache__/gradio_interface_modal_example.cpython-310.pyc differ diff --git a/src/__pycache__/img_gen.cpython-310.pyc b/src/__pycache__/img_gen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89d9a918f9f1b5f6499d8d9c1a474b6f1474e6c3 Binary files /dev/null and b/src/__pycache__/img_gen.cpython-310.pyc differ diff --git a/src/__pycache__/img_gen.cpython-311.pyc b/src/__pycache__/img_gen.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e03700b43f5c6c3c835c2a8cb4968c493d3263bb Binary files /dev/null and b/src/__pycache__/img_gen.cpython-311.pyc differ diff --git a/src/__pycache__/img_gen.cpython-39.pyc b/src/__pycache__/img_gen.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..506d226b71e1acf1dac68556683ea94022140a67 Binary files /dev/null and b/src/__pycache__/img_gen.cpython-39.pyc differ diff --git a/src/__pycache__/img_gen_colab.cpython-39.pyc b/src/__pycache__/img_gen_colab.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a115f9a82166f1c0c808a851fee165e0069eb76 Binary files /dev/null and b/src/__pycache__/img_gen_colab.cpython-39.pyc differ diff --git a/src/__pycache__/img_gen_local.cpython-311.pyc b/src/__pycache__/img_gen_local.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a5affd260191ffc49f9f5d8027c31be24f291f5 Binary files /dev/null and b/src/__pycache__/img_gen_local.cpython-311.pyc differ diff --git a/src/__pycache__/img_gen_modal.cpython-310.pyc b/src/__pycache__/img_gen_modal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5447eeab85c9b03ecebd3c47818f3e1aa0856929 Binary files /dev/null and b/src/__pycache__/img_gen_modal.cpython-310.pyc differ diff --git a/src/__pycache__/img_gen_modal.cpython-311.pyc b/src/__pycache__/img_gen_modal.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..119f20b10e01bf8fd7bd05cd54ce5fab923a1761 Binary files /dev/null and b/src/__pycache__/img_gen_modal.cpython-311.pyc differ diff --git a/src/__pycache__/img_gen_modal_LIVE2.cpython-311.pyc b/src/__pycache__/img_gen_modal_LIVE2.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..875e1de373885d764d96d778a59a9b7e7cc4742c Binary files /dev/null and b/src/__pycache__/img_gen_modal_LIVE2.cpython-311.pyc differ diff --git a/src/__pycache__/img_gen_modal_cpu.cpython-310.pyc b/src/__pycache__/img_gen_modal_cpu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..938ce5de759751f7ccda24d7b0c9f04ff4f04466 Binary files /dev/null and b/src/__pycache__/img_gen_modal_cpu.cpython-310.pyc differ diff --git a/src/__pycache__/img_gen_modal_example.cpython-310.pyc b/src/__pycache__/img_gen_modal_example.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a5dde90b4fd65e0e1289519eafe06205f2c6c04 Binary files /dev/null and b/src/__pycache__/img_gen_modal_example.cpython-310.pyc differ diff --git a/src/__pycache__/img_gen_modal_example2.cpython-310.pyc b/src/__pycache__/img_gen_modal_example2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5241b989f0a5ddc0bd889c76e427d00d2eb86ae Binary files /dev/null and b/src/__pycache__/img_gen_modal_example2.cpython-310.pyc differ diff --git a/src/__pycache__/img_gen_modal_gpu.cpython-310.pyc b/src/__pycache__/img_gen_modal_gpu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8669678481685a95a4915cf3172d1e9fce1e0dc5 Binary files /dev/null and b/src/__pycache__/img_gen_modal_gpu.cpython-310.pyc differ diff --git a/src/__pycache__/img_gen_modal_ok.cpython-310.pyc b/src/__pycache__/img_gen_modal_ok.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84aa66a15f5d745815268c71a18377f39dd801b4 Binary files /dev/null and b/src/__pycache__/img_gen_modal_ok.cpython-310.pyc differ diff --git a/src/__pycache__/install_requirements.cpython-310.pyc b/src/__pycache__/install_requirements.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1e851509664422f768f89f31be37d150a0dabb6 Binary files /dev/null and b/src/__pycache__/install_requirements.cpython-310.pyc differ diff --git a/src/__pycache__/live_preview_helpers.cpython-310.pyc b/src/__pycache__/live_preview_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3530a09059ceb7441130860d8a06b5833284fe0 Binary files /dev/null and b/src/__pycache__/live_preview_helpers.cpython-310.pyc differ diff --git a/src/__pycache__/live_preview_helpers.cpython-311.pyc b/src/__pycache__/live_preview_helpers.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2c607beeb171f2b81b3cbc8a4524a415307420f Binary files /dev/null and b/src/__pycache__/live_preview_helpers.cpython-311.pyc differ diff --git a/src/__pycache__/volumes_test.cpython-310.pyc b/src/__pycache__/volumes_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1467073229bd8aa7ff8c221f7754bbc59a85eefe Binary files /dev/null and b/src/__pycache__/volumes_test.cpython-310.pyc differ diff --git a/src/check_dependecies.py b/src/check_dependecies.py new file mode 100644 index 0000000000000000000000000000000000000000..51dc51e6db36ee41755232de3a83141ea08ae41b --- /dev/null +++ b/src/check_dependecies.py @@ -0,0 +1,22 @@ +print("Running debug check...") +# Debug function to check installed packages +def check_dependencies(): + packages = [ + "diffusers", # For Stable Diffusion + "transformers", # For Hugging Face models + "torch", # PyTorch + "accelerate", # For distributed training/inference + "gradio", # For the Gradio interface (updated to latest version) + "safetensors", # For safe model loading + "pillow", # For image processing + "sentencepiece", + "gguf", + ] + + for package in packages: + try: + import importlib + module = importlib.import_module(package) + print(f" {package} is installed. Version:") + except ImportError: + print(f" {package} is NOT installed.") diff --git a/src/img_gen.py b/src/img_gen.py index 558f85d61b6fd50fd8b47e51aaa4a277c741657c..b48d59f3754c00b3ee7192acc70859a266547fe1 100644 --- a/src/img_gen.py +++ b/src/img_gen.py @@ -2,24 +2,36 @@ import sys import os import random -from huggingface_hub import InferenceClient +from huggingface_hub import InferenceClient, login from datetime import datetime from config.config import models, prompts, api_token # Direct import -def generate(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): +def generate_image( + prompt_alias, + team_color, + custom_prompt, + model_alias="FLUX.1-dev", + width=640, + height=360, + num_inference_steps=20, + guidance_scale=2.0, + seed=-1): + + # LOAD CSS try: - # Generate the image - image_path, message = generate_image(prompt_alias, team_color, model_alias, custom_prompt, height, width, num_inference_steps, guidance_scale, seed) - return image_path, message - except Exception as e: - return None, f"An error occurred: {e}" + with open("src/aaa.css", "r") as f: + custom_css = f.read() + except FileNotFoundError: + print("Error: aaa.css not found!") + custom_css = "" # Or provide default CSS -def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1): # Find the selected prompt and model try: prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] - model_name = next(m for m in models if m["alias"] == model_alias)["name"] + #model_name = next(m for m in models if m["alias"] == model_alias)["name"] + model_name = f"black-forest-labs/{model_alias}" + except StopIteration: return None, "ERROR: Invalid prompt or model selected." @@ -39,26 +51,38 @@ def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height= prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) - # Print the formatted prompt for debugging - print("\nFormatted Prompt:") - print(prompt) - # Append the custom prompt (if provided) if custom_prompt and len(custom_prompt.strip()) > 0: prompt += " " + custom_prompt.strip() + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + # Randomize the seed if needed if seed == -1: seed = random.randint(0, 1000000) + # HF LOGIN + print("Initializing HF TOKEN") + print (api_token) + # login(token=api_token) + # print("model_name:") + # print(model_name) + + # Initialize the InferenceClient try: + print("-----INITIALIZING INFERENCE-----") client = InferenceClient(model_name, token=api_token) + print("Inference activated") except Exception as e: return None, f"ERROR: Failed to initialize InferenceClient. Details: {e}" #Generate the image try: + print("-----GENERATING IMAGE-----") + print("-----HOLD ON-----") image = client.text_to_image( prompt, guidance_scale=guidance_scale, @@ -67,17 +91,21 @@ def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height= height=height, seed=seed ) + print("-----IMAGE GENERATED SUCCESSFULLY!-----") except Exception as e: return None, f"ERROR: Failed to generate image. Details: {e}" - #return prompt # For testing purposes, return the formatted prompt - # Save the image with a timestamped filename + print("-----SAVING-----", image) + path = "images" + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") - output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + output_filename = f"{path}/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" try: image.save(output_filename) except Exception as e: return None, f"ERROR: Failed to save image. Details: {e}" + print("-----DONE!-----") + print("-----CALL THE BANNERS!-----") return output_filename, "Image generated successfully!" \ No newline at end of file diff --git a/src/img_gen_colab.py b/src/img_gen_colab.py index 72e727b70e3d43ce2f01cc017e697df03e7d2d4c..c4ffd4c463c4e26e91713d1fdba7300eff22c457 100644 --- a/src/img_gen_colab.py +++ b/src/img_gen_colab.py @@ -4,7 +4,7 @@ from PIL import Image import random from datetime import datetime -def generate_image(prompt, team_color, model_name, height, width, num_inference_steps, guidance_scale, seed, custom_prompt, api_token, randomize_seed=True): +def generate_image(prompt, team_color, model_name, width, height, num_inference_steps, guidance_scale, seed, custom_prompt, api_token, randomize_seed=True): """ Generate an image using the Hugging Face Inference API. diff --git a/src/img_gen_modal.py b/src/img_gen_modal.py index 98950d6bf800fbea0380d8ef75bf27989c79bda1..9993a3278845245898e47a3b301fd7900e97df1b 100644 --- a/src/img_gen_modal.py +++ b/src/img_gen_modal.py @@ -1,88 +1,290 @@ #img_gen_modal.py import modal -import sys +import random +import io +from config.config import prompts, models_modal # Indirect import import os +import gradio as gr + +#MOVED FROM IMAGE IMPORT LIST +import torch +import sentencepiece +import torch +from huggingface_hub import login +from transformers import AutoTokenizer import random from datetime import datetime -import random -import io +from diffusers.callbacks import SDXLCFGCutoffCallback +from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline, AutoencoderTiny, AutoencoderKL, DiffusionPipeline, FluxTransformer2DModel, GGUFQuantizationConfig +from PIL import Image +from src.check_dependecies import check_dependencies +import numpy as np + +#import xformers -@app.function( - image=image, - #gpu="T4", - timeout=600 +from live_preview_helpers import flux_pipe_call_that_returns_an_iterable_of_images + +MAX_SEED = np.iinfo(np.int32).max +MAX_IMAGE_SIZE = 2048 + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + .pip_install_from_requirements("requirements.txt") + #modal.Image.debian_slim(python_version="3.9") # Base image + # .apt_install( + # "git", + # ) + # .pip_install( + # "diffusers", + # f"git+https://github.com/huggingface/transformers.git" + # ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) ) -def generate_image(prompt_alias, team_color, model_alias, custom_prompt, height=360, width=640, - num_inference_steps=20, guidance_scale=2.0, seed=-1): - import torch - from diffusers import StableDiffusionPipeline - - # Find the selected prompt and model - try: - prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] - model_name = next(m for m in models if m["alias"] == model_alias)["name"] - except StopIteration: - return None, "ERROR: Invalid prompt or model selected." - - # Determine the enemy color - enemy_color = "blue" if team_color.lower() == "red" else "red" - - # Print the original prompt and dynamic values for debugging - print("Original Prompt:") - print(prompt) - print(f"Enemy Color: {enemy_color}") - print(f"Team Color: {team_color.lower()}") - - # Format the prompt - prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + +# Create a Modal app +app = modal.App("img-gen-modal", image=image) +with image.imports(): + import os + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +# GPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + gpu="L40S", + timeout = 300 + ) +def generate_image_gpu(prompt_alias, team_color, model_alias, custom_prompt): + image = generate_image(prompt_alias, team_color, model_alias, custom_prompt) + return image, "Image generated successfully! Call the banners!" + + +# CPU FUNCTION +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + cpu = 1, + timeout = 30000 + ) +def generate_image_cpu(prompt_alias, team_color, model_alias, custom_prompt): + image = generate_image(prompt_alias, team_color, model_alias, custom_prompt) + return image, "Image generated successfully! Call the banners!" + +# MAIN GENERATE IMAGE FUNCTION +def generate_image( + prompt_alias, + team_color, + model_alias, + custom_prompt, + width=640, + height=360, + num_inference_steps=20, + guidance_scale=2.0, + seed=-1, + progress=gr.Progress(track_tqdm=True) # Add progress parameter + ): + with modal.enable_output(): + print("Hello from ctb_modal!") + + check_dependencies() + + # Find the selected prompt and model + try: + prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"] + model_name = next(m for m in models_modal if m["alias"] == model_alias)["name"] + except StopIteration: + return None, "ERROR: Invalid prompt or model selected." + + # Determine the enemy color + enemy_color = "blue" if team_color.lower() == "red" else "red" + + # Print the original prompt and dynamic values for debugging + print("Original Prompt:") + print(prompt) + print(f"Enemy Color: {enemy_color}") + print(f"Team Color: {team_color.lower()}") + + prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color) + + # Print the formatted prompt for debugging + print("\nFormatted Prompt:") + print(prompt) + + # Append the custom prompt (if provided) + if custom_prompt and len(custom_prompt.strip()) > 0: + prompt += " " + custom_prompt.strip() + + # Randomize the seed if needed + if seed == -1: + seed = random.randint(0, 1000000) + + try: + from diffusers import FluxPipeline + print("Initializing HF TOKEN") + hf_token = os.environ["HF_TOKEN"] + print(hf_token) + print("HF TOKEN:") + login(token=hf_token) + print("model_name:") + print(model_name) + + # Use absolute path with leading slash + model_path = f"/data/{model_name}" # Changed from "data/" to "/data/" + print(f"Loading model from local path: {model_path}") + + # Debug: Check if the directory exists and list its contents + if os.path.exists(model_path): + print("Directory exists. Contents:") + for item in os.listdir(model_path): + print(f" - {item}") + else: + print(f"Directory does not exist: {model_path}") + print("Contents of /data:") + print(os.listdir("/data")) + # CHECK FOR TORCH USING CUDA + print("CHECK FOR TORCH USING CUDA") + print(f"CUDA available: {torch.cuda.is_available()}") + if torch.cuda.is_available(): + print("inside if") + print(f"CUDA device count: {torch.cuda.device_count()}") + print(f"Current device: {torch.cuda.current_device()}") + print(f"Device name: {torch.cuda.get_device_name(torch.cuda.current_device())}") + + + ########## INITIALIZING CPU PIPE ########## - # Print the formatted prompt for debugging - print("\nFormatted Prompt:") - print(prompt) - - # Append custom prompt if provided - if custom_prompt and len(custom_prompt.strip()) > 0: - prompt += " " + custom_prompt.strip() - - # Randomize seed if needed - if seed == -1: - seed = random.randint(0, 1000000) - - # Initialize the pipeline - pipe = StableDiffusionPipeline.from_pretrained( - model_name, - torch_dtype=torch.float16, - use_safetensors=True, - variant="fp16" - ) - pipe.to("cpu") - - # Generate the image - try: - image = pipe( - prompt, - guidance_scale=guidance_scale, - num_inference_steps=num_inference_steps, - width=width, - height=height, - generator=torch.Generator("cuda").manual_seed(seed) - ).images[0] + # ########## LIVE PREVIEW FROM REPO DEMO ########## + # print("-----INITIALIZING LIVE PREVIEW CODE FROM DEMO -----") + # dtype = torch.bfloat16 + # device = "cuda" if torch.cuda.is_available() else "cpu" + + # taef1 = AutoencoderTiny.from_pretrained("/data/taef1", torch_dtype=dtype).to(device) + # good_vae = AutoencoderKL.from_pretrained(model_path, subfolder="vae", torch_dtype=dtype).to(device) + # pipe = DiffusionPipeline.from_pretrained(model_path, torch_dtype=dtype, vae=taef1).to(device) + # torch.cuda.empty_cache() + + # pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe) + # print("-----INITIALIZING LIVE PREVIEW CODE FROM DEMO PART2-----") + # seed = random.randint(0, MAX_SEED) + # generator = torch.Generator().manual_seed(seed) + + # for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images( + # prompt=prompt, + # guidance_scale=guidance_scale, + # num_inference_steps=num_inference_steps, + # width=width, + # height=height, + # generator=generator, + # output_type="pil", + # good_vae=good_vae, + # ): + # yield img, seed + # ############################################################ + + + print("-----INITIALIZING PIPE-----") + pipe = FluxPipeline.from_pretrained( + model_path, + torch_dtype=torch.bfloat16, + #torch_dtype=torch.float16, + #torch_dtype=torch.float32, + #vae=taef1, + local_files_only=True, + ) + #torch.cuda.empty_cache() + + if torch.cuda.is_available(): + print("CUDA available") + print("using gpu") + pipe = pipe.to("cuda") + pipe_message = "CUDA" + #pipe.enable_model_cpu_offload() # official recommended method but is running slower w it + + else: + print("CUDA not available") + print("using cpu") + pipe = pipe.to("cpu") + pipe_message = "CPU" + + + print(f"-----{pipe_message} PIPE INITIALIZED-----") + print(f"Using device: {pipe.device}") + except Exception as e: + print(f"Detailed error: {str(e)}") + return None, f"ERROR: Failed to initialize PIPE2. Details: {e}" + try: + print("-----SENDING IMG GEN TO PIPE-----") + print("-----HOLD ON-----") + + + + # ########## LATENTS ########## + # # live preview function to get the latents + # # official reference guideline + # def latents_to_rgb(latents): + # weights = ( + # (60, -60, 25, -70), + # (60, -5, 15, -50), + # (60, 10, -5, -35), + # ) + + # weights_tensor = torch.t(torch.tensor(weights, dtype=latents.dtype).to(latents.device)) + # biases_tensor = torch.tensor((150, 140, 130), dtype=latents.dtype).to(latents.device) + # rgb_tensor = torch.einsum("...lxy,lr -> ...rxy", latents, weights_tensor) + biases_tensor.unsqueeze(-1).unsqueeze(-1) + # image_array = rgb_tensor.clamp(0, 255).byte().cpu().numpy().transpose(1, 2, 0) + + # return Image.fromarray(image_array) + + # def decode_tensors(pipe, step, timestep, callback_kwargs): + # latents = callback_kwargs["latents"] + + # image = latents_to_rgb(latents[0]) + # image.save(f"{step}.png") + + # return callback_kwargs + # ############################################################ + + + + + ########## SENDING IMG GEN TO PIPE - WORKING CODE ########## + image = pipe( + prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + width=width, + height=height, + max_sequence_length=512, + #callback_on_step_end=decode_tensors, + #callback_on_step_end_tensor_inputs=["latents"], + # seed=seed + ).images[0] + ############################################################# + + print("-----IMAGE GENERATED SUCCESSFULLY!-----") + print(image) + + except Exception as e: + return f"ERROR: Failed to initialize InferenceClient. Details: {e}" - # Convert PIL image to bytes - img_byte_arr = io.BytesIO() - image.save(img_byte_arr, format='PNG') - img_byte_arr = img_byte_arr.getvalue() - except Exception as e: - return None, f"ERROR: Failed to generate image. Details: {e}" - - # Save the image with a timestamped filename - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") - output_filename = f"{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" - try: - image.save(output_filename) - except Exception as e: - return img_byte_arr, "Image generated successfully!" - except Exception as e: - return None, f"ERROR: Failed to generate image. Details: {e}" - return output_filename, "Image generated successfully!" \ No newline at end of file + try: + print("-----SAVING-----") + # Save the image with a timestamped filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_filename = f"/data/images/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png" + # Save the image using PIL's save method + image.save(output_filename) + print("-----DONE!-----") + print("-----CALL THE BANNERS!-----") + print(f"File path: {output_filename}") + except Exception as e: + print(f"ERROR: Failed to save image. Details: {e}") + # Return the filename and success message + return image \ No newline at end of file diff --git a/src/live_preview_helpers.py b/src/live_preview_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..1aa8143aa10b546c9db9d2392e07816df6666cd7 --- /dev/null +++ b/src/live_preview_helpers.py @@ -0,0 +1,166 @@ +import torch +import numpy as np +from diffusers import FluxPipeline, AutoencoderTiny, FlowMatchEulerDiscreteScheduler +from typing import Any, Dict, List, Optional, Union + +# Helper functions +def calculate_shift( + image_seq_len, + base_seq_len: int = 256, + max_seq_len: int = 4096, + base_shift: float = 0.5, + max_shift: float = 1.16, +): + m = (max_shift - base_shift) / (max_seq_len - base_seq_len) + b = base_shift - m * base_seq_len + mu = image_seq_len * m + b + return mu + +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + +# FLUX pipeline function +@torch.inference_mode() +def flux_pipe_call_that_returns_an_iterable_of_images( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 28, + timesteps: List[int] = None, + guidance_scale: float = 3.5, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + max_sequence_length: int = 512, + good_vae: Optional[Any] = None, +): + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + # 1. Check inputs + self.check_inputs( + prompt, + prompt_2, + height, + width, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._joint_attention_kwargs = joint_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + + # 3. Encode prompt + lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None + prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + lora_scale=lora_scale, + ) + # 4. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels // 4 + latents, latent_image_ids = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + # 5. Prepare timesteps + sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) + image_seq_len = latents.shape[1] + mu = calculate_shift( + image_seq_len, + self.scheduler.config.base_image_seq_len, + self.scheduler.config.max_image_seq_len, + self.scheduler.config.base_shift, + self.scheduler.config.max_shift, + ) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + timesteps, + sigmas, + mu=mu, + ) + self._num_timesteps = len(timesteps) + + # Handle guidance + guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None + + # 6. Denoising loop + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + timestep = t.expand(latents.shape[0]).to(latents.dtype) + + noise_pred = self.transformer( + hidden_states=latents, + timestep=timestep / 1000, + guidance=guidance, + pooled_projections=pooled_prompt_embeds, + encoder_hidden_states=prompt_embeds, + txt_ids=text_ids, + img_ids=latent_image_ids, + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, + )[0] + # Yield intermediate result + latents_for_image = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents_for_image = (latents_for_image / self.vae.config.scaling_factor) + self.vae.config.shift_factor + image = self.vae.decode(latents_for_image, return_dict=False)[0] + yield self.image_processor.postprocess(image, output_type=output_type)[0] + + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + torch.cuda.empty_cache() + + # Final image using good_vae + latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents = (latents / good_vae.config.scaling_factor) + good_vae.config.shift_factor + image = good_vae.decode(latents, return_dict=False)[0] + self.maybe_free_model_hooks() + torch.cuda.empty_cache() + yield self.image_processor.postprocess(image, output_type=output_type)[0] \ No newline at end of file diff --git a/src/load_quanta.py b/src/load_quanta.py new file mode 100644 index 0000000000000000000000000000000000000000..1afaded5a8d965cd0f15c5ddb3bf1f7cc55c96eb --- /dev/null +++ b/src/load_quanta.py @@ -0,0 +1,22 @@ +print("-----LOADING QUANTA-----") +ckpt_path = ( + "/data/FLUX.1-dev-gguf/flux1-dev-Q8_0.gguf" +) +transformer = FluxTransformer2DModel.from_single_file( + ckpt_path, + quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16), + torch_dtype=torch.bfloat16, +) + + +print("-----INITIALIZING PIPE-----") +pipe = FluxPipeline.from_pretrained( + local_path, + torch_dtype=torch.bfloat16, + transformer=transformer, + #torch_dtype=torch.float16, + #torch_dtype=torch.float32, + #vae=taef1, + local_files_only=True, +) +#torch.cuda.empty_cache() \ No newline at end of file diff --git a/tests/test1.py b/tests/test1.py new file mode 100644 index 0000000000000000000000000000000000000000..c5ff4507b39999c68a3bfee153ab2b6696c0ea52 --- /dev/null +++ b/tests/test1.py @@ -0,0 +1,84 @@ +import modal +from src.gradio_interface import demo + +# Print debug information +print("Importing Modal and setting up the app...") + +# Define the Modal app +app = modal.App(name="example-app") + +# Define a custom image with Python and some dependencies +print("Building custom image...") +image = ( + modal.Image.debian_slim(python_version="3.11") # Base image + .pip_install( + "numpy", + "pandas", + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio", + "safetensors", + "pillow", + ) # Install Python packages + .run_commands("echo 'Image build complete!'") # Run a shell command +) + +# Define a function to run inside the container +@app.function(image=image) +def main(): + # Debug: Print a message when the function starts + print("Starting main function inside the container...") + + # Import libraries and print their versions + import numpy as np + import pandas as pd + import torch + import diffusers + import transformers + import gradio as gr + from PIL import Image as PILImage + + + + print("Hello from Modal!") + print("NumPy version:", np.__version__) + print("Pandas version:", pd.__version__) + print("PyTorch version:", torch.__version__) + print("Diffusers version:", diffusers.__version__) # Corrected: Use the library's __version__ + print("Transformers version:", transformers.__version__) # Corrected: Use the library's __version__ + print("Gradio version:", gr.__version__) + print("Pillow version:", PILImage.__version__) + + # Create a simple DataFrame + df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + print("DataFrame:\n", df) + + # Test PyTorch + tensor = torch.tensor([1, 2, 3]) + print("PyTorch tensor:", tensor) + + # Test Diffusers (load a simple pipeline) + print("Loading Diffusers pipeline...") + pipe = diffusers.DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") + print("Diffusers pipeline loaded successfully!") + + # Test Gradio (create a simple interface) + def greet(name): + return f"Hello {name}!" + + print("Creating Gradio interface...") + iface = gr.Interface(fn=greet, inputs="text", outputs="text") + print("Gradio interface created successfully!") + + # Debug: Print a message when the function ends + print("Main function execution complete!") + + # Launch gradio-interface + demo.launch() + +# Run the function locally (for testing) +if __name__ == "__main__": + print("Running the function locally...") + main.local() \ No newline at end of file diff --git a/tests/test2.py b/tests/test2.py new file mode 100644 index 0000000000000000000000000000000000000000..77b79ef46580d7029aaa963e2499c4e026f90b6b --- /dev/null +++ b/tests/test2.py @@ -0,0 +1,57 @@ +# gradio_interface.py +import gradio as gr +import modal +from config.config import prompts, models # Direct import +from src.img_gen_modal import generate + +def on_button_click(): + f = modal.Function.from_name("functions-app", "message") + messageNEW = "Remote call Hello World!" + f.remote((messageNEW)) + #return message.remote((messageNEW)) + + +# generate_button.click( +# generate, +# inputs=[prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], +# outputs=[output_image, status_text] +# ) + + +# Gradio Interface +with gr.Blocks() as demo: + gr.Markdown("# CtB AI Image Generator") + with gr.Row(): + # Set default values for dropdowns + prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"]) + team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red") + model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"]) + with gr.Row(): + # Add a text box for custom user input (max 200 characters) + custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200) + with gr.Row(): + generate_button = gr.Button("Generate Image") + output = gr.Textbox(label="Output") + # Set up the button click event + generate_button.click(fn=on_button_click, outputs=output) + with gr.Row(): + output_image = gr.Image(label="Generated Image") + with gr.Row(): + status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False) + print("Building cudasdasrer...") + + +demo.launch() + + # Connect the button to the function + # generate_button.click( + # generate, + # inputs=[prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input], + # outputs=[output_image, status_text] + # ) + + + + + + diff --git a/tests/test4.py b/tests/test4.py new file mode 100644 index 0000000000000000000000000000000000000000..43ff0b098a09beee021b03fa995c2a732834bf52 --- /dev/null +++ b/tests/test4.py @@ -0,0 +1,108 @@ +import time +from io import BytesIO +from pathlib import Path +import modal + +flux_image = ( + cuda_dev_image.apt_install( + "git", + "libglib2.0-0", + "libsm6", + "libxrender1", + "libxext6", + "ffmpeg", + "libgl1", + ) + .pip_install( + "invisible_watermark==0.2.0", + "transformers==4.44.0", + "huggingface_hub[hf_transfer]==0.26.2", + "accelerate==0.33.0", + "safetensors==0.4.4", + "sentencepiece==0.2.0", + "torch==2.5.0", + f"git+https://github.com/huggingface/diffusers.git@{diffusers_commit_sha}", + "numpy<2", + ) + #.env({"HF_TOKEN": "1", "HF_HUB_CACHE_DIR": "/cache"}) + ) + + + # flux_image = flux_image.env( + # { + # "TORCHINDUCTOR_CACHE_DIR": "/root/.inductor-cache", + # "TORCHINDUCTOR_FX_GRAPH_CACHE": "1", + # } + # ) + + + + with flux_image.imports(): + import torch + from diffusers import FluxPipeline + + MINUTES = 60 # seconds + VARIANT = "schnell" # or "dev", but note [dev] requires you to accept terms and conditions on HF + NUM_INFERENCE_STEPS = 40 # use ~50 for [dev], smaller for [schnell] + + +app = modal.App("example-flux", image=flux_image) + +@app.local_entrypoint() +def main (): + cuda_version = "12.4.0" # should be no greater than host CUDA version + flavor = "devel" # includes full CUDA toolkit + operating_sys = "ubuntu22.04" + tag = f"{cuda_version}-{flavor}-{operating_sys}" + + cuda_dev_image = modal.Image.from_registry( + f"nvidia/cuda:{tag}", add_python="3.11" + ).entrypoint([]) + + + + diffusers_commit_sha = "81cf3b2f155f1de322079af28f625349ee21ec6b" + + + @app.cls( + gpu="H100", # fastest GPU on Modal + container_idle_timeout=20 * MINUTES, + timeout=60 * MINUTES, # leave plenty of time for compilation + volumes={ # add Volumes to store serializable compilation artifacts, see section on torch.compile below + "/cache": modal.Volume.from_name( + "hf-hub-cache", create_if_missing=True + ), + "/root/.nv": modal.Volume.from_name("nv-cache", create_if_missing=True), + "/root/.triton": modal.Volume.from_name( + "triton-cache", create_if_missing=True + ), + "/root/.inductor-cache": modal.Volume.from_name( + "inductor-cache", create_if_missing=True + ), + }, + ) + class Model: + compile: int = ( # see section on torch.compile below for details + modal.parameter(default=0) + ) + + @modal.enter() + def enter(self): + pipe = FluxPipeline.from_pretrained( + f"black-forest-labs/FLUX.1-{VARIANT}", torch_dtype=torch.bfloat16 + ).to("cuda") # move model to GPU + self.pipe = optimize(pipe, compile=bool(self.compile)) + + @modal.method() + def inference(self, prompt: str) -> bytes: + print("🎨 generating image...") + out = self.pipe( + prompt, + output_type="pil", + num_inference_steps=NUM_INFERENCE_STEPS, + ).images[0] + + byte_stream = BytesIO() + out.save(byte_stream, format="JPEG") + return byte_stream.getvalue() + diff --git a/tools/__pycache__/download_flux_gguf_modal_HF.cpython-311.pyc b/tools/__pycache__/download_flux_gguf_modal_HF.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3068c534d95d8ba227173f4c35e410f47b553f06 Binary files /dev/null and b/tools/__pycache__/download_flux_gguf_modal_HF.cpython-311.pyc differ diff --git a/tools/__pycache__/download_flux_modal_HF.cpython-311.pyc b/tools/__pycache__/download_flux_modal_HF.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16f25d810d515bb7609673956977685a6f9cda4b Binary files /dev/null and b/tools/__pycache__/download_flux_modal_HF.cpython-311.pyc differ diff --git a/tools/__pycache__/see_vol_data.cpython-310.pyc b/tools/__pycache__/see_vol_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2bbc140f1b6361010c630a442f9c43fa6f244d0 Binary files /dev/null and b/tools/__pycache__/see_vol_data.cpython-310.pyc differ diff --git a/tools/__pycache__/tools_dowload_quanta_dir.cpython-311.pyc b/tools/__pycache__/tools_dowload_quanta_dir.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd0de6ab0a4ce74ac518629526a4a6aede96178c Binary files /dev/null and b/tools/__pycache__/tools_dowload_quanta_dir.cpython-311.pyc differ diff --git a/tools/check_processor.py b/tools/check_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..9fbb33e83fca6d4e53592a94c17e7bcfc6c25c51 --- /dev/null +++ b/tools/check_processor.py @@ -0,0 +1,23 @@ +import torch +print(f"Current device: {torch.cuda.current_device()}") # Shows current GPU device number +print(f"Device being used: {next(model.parameters()).device}") # Shows device for a specific model +print(f"Is CUDA available? {torch.cuda.is_available()}") # Checks if CUDA is available + +import tensorflow as tf +print(f"Devices available: {tf.config.list_physical_devices()}") +print(f"Using GPU: {tf.test.is_gpu_available()}") + +import psutil +import platform + +print(f"CPU count: {psutil.cpu_count()}") +print(f"CPU info: {platform.processor()}") + +# Requires nvidia-smi +import subprocess +try: + print(subprocess.check_output(['nvidia-smi']).decode()) +except: + print("nvidia-smi not available") + + \ No newline at end of file diff --git a/tools/download_flux_HF.py b/tools/download_flux_HF.py new file mode 100644 index 0000000000000000000000000000000000000000..6236469140b5abec1dd38c09d004a00223e9f76e --- /dev/null +++ b/tools/download_flux_HF.py @@ -0,0 +1,25 @@ +import os + + +from huggingface_hub import snapshot_download +import transformers + +repo_id = "black-forest-labs/FLUX.1-dev" +local_dir = "models/FLUX.1-dev" + +# **FASTEST METHOD:** Use max_workers for parallel download +print("Calling snapshot_download") +snapshot_download( + repo_id, + local_dir=local_dir, + revision="main", + #ignore_patterns=["*.pt", "*.bin"], # Skip large model weights + max_workers=8 # Higher concurrency for parallel chunk downloads +) +print("Called snapshot_download") + + +transformers.utils.move_cache() +print(f"Model downloaded to {local_dir}") + + diff --git a/tools/download_flux_gguf_modal_HF.py b/tools/download_flux_gguf_modal_HF.py new file mode 100644 index 0000000000000000000000000000000000000000..3f2da0f2a3e927e1d09908358de0fc6c85a8d758 --- /dev/null +++ b/tools/download_flux_gguf_modal_HF.py @@ -0,0 +1,45 @@ +################# RUN IT W MODAL RUN TO DOWNLOAD ON MODAL VOLUME ########### + + +import modal +import os + +app = modal.App("flux-model-setup") + +# Persistent volume for storing models +volume = modal.Volume.from_name("flux-model-vol", create_if_missing=True) + +# Image with dependencies +download_image = ( + modal.Image.debian_slim() + .pip_install("huggingface_hub[hf_transfer]", "transformers", "aria2") # aria2 for ultra-fast parallel downloads + .env({"HF_HUB_ENABLE_HF_TRANSFER": "1"}) # Enable fast Rust-based downloads +) + +@app.function( + volumes={"/data": volume}, + image=download_image, + secrets=[modal.Secret.from_name("huggingface-token")] +) +def download_flux(): + from huggingface_hub import snapshot_download + import transformers + + repo_id = "city96/FLUX.1-dev-gguf" + local_dir = "/data/FLUX.1-dev-gguf" + + # **FASTEST METHOD:** Use max_workers for parallel download + snapshot_download( + repo_id, + local_dir=local_dir, + revision="main", + #ignore_patterns=["*.pt", "*.bin"], # Skip large model weights + max_workers=8 # Higher concurrency for parallel chunk downloads + ) + + transformers.utils.move_cache() + print(f"Model downloaded to {local_dir}") + +@app.local_entrypoint() +def main(): + download_flux.remote() diff --git a/tools/download_flux_modal_HF.py b/tools/download_flux_modal_HF.py new file mode 100644 index 0000000000000000000000000000000000000000..c6e0bcaff691a72b47b857cfb29c9191cf90beaf --- /dev/null +++ b/tools/download_flux_modal_HF.py @@ -0,0 +1,45 @@ +################# RUN IT W MODAL RUN TO DOWNLOAD ON MODAL VOLUME ########### + + +import modal +import os + +app = modal.App("flux-model-setup") + +# Persistent volume for storing models +volume = modal.Volume.from_name("flux-model-vol", create_if_missing=True) + +# Image with dependencies +download_image = ( + modal.Image.debian_slim() + .pip_install("huggingface_hub[hf_transfer]", "transformers", "aria2") # aria2 for ultra-fast parallel downloads + .env({"HF_HUB_ENABLE_HF_TRANSFER": "1"}) # Enable fast Rust-based downloads +) + +@app.function( + volumes={"/data": volume}, + image=download_image, + secrets=[modal.Secret.from_name("huggingface-token")] +) +def download_flux(): + from huggingface_hub import snapshot_download + import transformers + + repo_id = "black-forest-labs/FLUX.1-dev" + local_dir = "/models/" + + # **FASTEST METHOD:** Use max_workers for parallel download + snapshot_download( + repo_id, + local_dir=local_dir, + revision="main", + #ignore_patterns=["*.pt", "*.bin"], # Skip large model weights + max_workers=8 # Higher concurrency for parallel chunk downloads + ) + + transformers.utils.move_cache() + print(f"Model downloaded to {local_dir}") + +@app.local_entrypoint() +def main(): + download_flux.remote() diff --git a/tools/modal_gpu_BENCHMARK.py b/tools/modal_gpu_BENCHMARK.py new file mode 100644 index 0000000000000000000000000000000000000000..2acc78852af3408d51f38c92d85d1b0206f2c740 --- /dev/null +++ b/tools/modal_gpu_BENCHMARK.py @@ -0,0 +1,45 @@ + +# MODAL GPU's + +# REMEMBER TO SET PIPE TO CUDA +pipe = pipe.to("cuda") + + +#-------------CPU-LIST------------------- +# NVIDIA H100 Tensor Core GPU class. +# The flagship data center GPU of the Hopper architecture. Enhanced support for FP8 precision and a Transformer Engine that provides up to 4X faster training over the prior generation for GPT-3 (175B) models. +#BFLOAT16 BENCHMARK - 38sec (~$0.20) +gpu="H100" + +# NVIDIA A100 Tensor Core GPU class. +# The flagship data center GPU of the Ampere architecture. Available in 40GB and 80GB GPU memory configurations. +#BFLOAT16 BENCHMARK - 37sec ($0.10) +gpu="A100" #40gb +#BFLOAT16 BENCHMARK - #42sec (~$0.10) +gpu=modal.gpu.A100(size="80GB") # 80gb - + +# NVIDIA L40S GPU class. +# The L40S is a data center GPU for the Ada Lovelace architecture. It has 48 GB of on-chip GDDR6 RAM and enhanced support for FP8 precision. +#FLOAT32 BENCHMARk - ERROR +#BFLOAT16 BENCHMARK - 28sec ($0.03) +#FLOAT16 BENCHMARK - 50sec +gpu="L40S" # 48gb + +#A mid-tier data center GPU based on the Ampere architecture, providing 24 GB of memory. 10G GPUs deliver up to 3.3x better ML training performance, 3x better ML inference performance, and 3x better graphics performance, in comparison to NVIDIA T4 GPUs. +#BFLOAT16 BENCHMARK - ERROR cuda out of memory - 44 sec (~$0.02 / $0.04) +gpu="A10G" #24gb + +# NVIDIA L4 Tensor Core GPU class. +# A mid-tier data center GPU based on the Ada Lovelace architecture, providing 24GB of GPU memory. Includes RTX (ray tracing) support. +#BFLOAT16 BENCHMARK - ERROR cuda out of memory - 40sec (~$0.02) +gpu="L4" # 48gb + +#NVIDIA T4 Tensor Core GPU class. +#A low-cost data center GPU based on the Turing architecture, providing 16GB of GPU memory. +#BFLOAT16 BENCHMARK - ERROR cuda out of memory - 37sec (~$0.02) +gpu="T4" + +#CPU +#BFLOAT16 BENCHMARK - ERROR out of memory - 37sec (~$0.02) +#FLOAT16 BENCHMARK - ERROR out of memory - 600sec (~$0.10) +cpu=2 \ No newline at end of file diff --git a/tools/temp util codes.py b/tools/temp util codes.py new file mode 100644 index 0000000000000000000000000000000000000000..aff24b2cb0f7f03441d9d74c9b833378e8295c20 --- /dev/null +++ b/tools/temp util codes.py @@ -0,0 +1,47 @@ + + + +#script_execution_control + +# This checks whether the current script is being run as the main program or if it is being imported as a module into another script. +# When a Python script is executed, Python sets the special variable __name__ to "__main__" for that script. +# If the script is imported as a module into another script, __name__ is set to the name of the module (e.g., the filename without the .py extension). +# Prevents Unintended Execution: +# If you import this script as a module in another script, the code inside the if __name__ == "__main__": block will not run. This prevents unintended execution of the main() function when the script is imported. + +if __name__ == "__main__": + main() + + + +# HUGGING FACE LOGIN + +login(token=hf_token) + + +# MODAL SECRETS +#Here's how you can pass huggingface-token to your Modal function: + +import os +import modal + +app = modal.App() + +@app.function(secrets=[modal.Secret.from_name("huggingface-token")]) +def f(): + print(os.environ["HF_TOKEN"]) + + +#CONVERT PIL IMGS + + # Convert PIL image to NumPy array + numpy_array = np.array(image) + print(numpy_array.shape) # Should print (352, 640, 3) for height, width, channels + # Convert PIL image to NumPy array + numpy_array = np.array(pil_image) + # Convert RGB to BGR for OpenCV + opencv_image = cv2.cvtColor(numpy_array, cv2.COLOR_RGB2BGR) + # Now you can use it with OpenCV functions + cv2.imshow("OpenCV Image", opencv_image) + cv2.waitKey(0) + cv2.destroyAllWindows() \ No newline at end of file diff --git a/tools/test_volume.py b/tools/test_volume.py new file mode 100644 index 0000000000000000000000000000000000000000..acec9f1a348657501ebbd064203b7bb4854dcaec --- /dev/null +++ b/tools/test_volume.py @@ -0,0 +1,13 @@ + +import modal +import os + +app = modal.App("test-volume") + +@app.function(volumes={"/my_vol": modal.Volume.from_name("flux-model-vol")}) +def test_func(): + print("Contents of the volume:", os.listdir("/my_vol")) + +@app.local_entrypoint() +def main(): + test_func.call() diff --git a/tools/tools_create_dir.py b/tools/tools_create_dir.py new file mode 100644 index 0000000000000000000000000000000000000000..41ed728b94f30f64d2459191fbd87b98c34db888 --- /dev/null +++ b/tools/tools_create_dir.py @@ -0,0 +1,81 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os + + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + #modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("tools-test-dir", image=image) +with image.imports(): + import os + from datetime import datetime + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + #gpu="a100-80gb" + ) +def test_dir(): + + import os + import urllib.request + + + # Define the path of the new directory + new_directory = "FLUX.1-dev-gguf" + + # Create the directory (and parent directories if needed) + os.makedirs(f"/data/{new_directory}", exist_ok=True) + + url = "https://huggingface.co/city96/FLUX.1-dev-gguf/resolve/main/flux1-dev-Q8_0.gguf" + urllib.request.urlretrieve(url, "/data/FLUX.1-dev-gguf/flux1-dev-Q8_0.gguf") + + print("Download complete!") + + + #print(f"Directory created: {new_directory}") + + # Get the current working directory + current_directory = os.getcwd() + # List the contents of the current directory + print("Contents of current modal directory:") + print(os.listdir(current_directory)) + + # VOLUME DIRECTORY + volume_directory = f"{current_directory}/data/" + print(f"Current volume directory: {volume_directory}") + print(os.listdir(volume_directory)) + + + flux_model_vol.de + + + + + diff --git a/tools/tools_dir_structure.py b/tools/tools_dir_structure.py new file mode 100644 index 0000000000000000000000000000000000000000..861461857d6d184bb62bac636e418ce410986bf9 --- /dev/null +++ b/tools/tools_dir_structure.py @@ -0,0 +1,80 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os + + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + #modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("tools-test-dir", image=image) +with image.imports(): + import diffusers + import os + import gradio + from datetime import datetime + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + #gpu="a100-80gb" + ) +def test_dir(): + + # OS + # Get the current working directory (should be /root) + current_directory = os.getcwd() + print(f"Current working directory: {current_directory}") + + # List the contents of the current directory + print("Contents of current modal directory:") + print(os.listdir(current_directory)) + + # MODAL + print ("MODAL") + print ("MODAL ROOT") + # List contents of the volume + file_entries = flux_model_vol.listdir("/") # Replace "/data" with your volume path + # Extract and print only the paths + paths = [entry.path for entry in file_entries] + + print("Paths in volume:") + print(paths) + + + diff --git a/tools/tools_dowload_quanta_dir.py b/tools/tools_dowload_quanta_dir.py new file mode 100644 index 0000000000000000000000000000000000000000..37d0e343ac0b080e21b6475b6072a319082e31fa --- /dev/null +++ b/tools/tools_dowload_quanta_dir.py @@ -0,0 +1,81 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os + + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + #modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("tools-test-dir", image=image) +with image.imports(): + import os + from datetime import datetime + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + #gpu="a100-80gb" + ) +def test_dir(): + + import os + import urllib.request + + + # Define the path of the new directory + new_directory = "FLUX.1-dev-gguf" + + # Create the directory (and parent directories if needed) + os.makedirs(f"/data/{new_directory}", exist_ok=True) + + url = "https://huggingface.co/city96/FLUX.1-dev-gguf/resolve/main/flux1-dev-Q8_0.gguf" + urllib.request.urlretrieve(url, "/data/FLUX.1-dev-gguf/flux1-dev-Q8_0.gguf") + + print("Download complete!") + + + #print(f"Directory created: {new_directory}") + + # Get the current working directory + current_directory = os.getcwd() + # List the contents of the current directory + print("Contents of current modal directory:") + print(os.listdir(current_directory)) + + # VOLUME DIRECTORY + volume_directory = f"{current_directory}/data/" + print(f"Current volume directory: {volume_directory}") + print(os.listdir(volume_directory)) + + + #flux_model_vol.de + + + + + diff --git a/tools/tools_remove_file.py b/tools/tools_remove_file.py new file mode 100644 index 0000000000000000000000000000000000000000..c54039d2323d4c8e437c7520d88afaad732d6350 --- /dev/null +++ b/tools/tools_remove_file.py @@ -0,0 +1,56 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os + + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + #modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("tools-test-dir", image=image) +with image.imports(): + import os + from datetime import datetime + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + #gpu="a100-80gb" + ) +def test_dir(): + + import os + + flux_model_vol.remove_file("20250130_185339_flux.1-dev_castle_siege_red.png", recursive = False) + flux_model_vol.remove_file("20250130_185025_flux.1-dev_castle_siege_red.png", recursive = False) + flux_model_vol.remove_file("20250130_184952_flux.1-dev_castle_siege_red.png", recursive = False) + flux_model_vol.remove_file("20250130_184323_flux.1-dev_castle_siege_red.png", recursive = False) + + + + + diff --git a/tools/tools_test_dir.py b/tools/tools_test_dir.py new file mode 100644 index 0000000000000000000000000000000000000000..b63703ad2d8ed0ca08415657785a6b737f4b8455 --- /dev/null +++ b/tools/tools_test_dir.py @@ -0,0 +1,79 @@ +# img_gen.py +#img_gen_modal.py +# img_gen.py +# img_gen_modal.py +import modal +import random +import io +from config.config import prompts, models # Indirect import +import os + + +CACHE_DIR = "/model_cache" + +# Define the Modal image +image = ( + #modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9") + modal.Image.debian_slim(python_version="3.9") # Base image + + .apt_install( + "git", + ) + .pip_install( + "diffusers", + "transformers", + "torch", + "accelerate", + "gradio>=4.44.1", + "safetensors", + "pillow", + "sentencepiece", + "hf_transfer", + "huggingface_hub[hf_transfer]", + "aria2", # aria2 for ultra-fast parallel downloads + f"git+https://github.com/huggingface/transformers.git" + ) + .env( + { + "HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR + } + ) +) + +# Create a Modal app +app = modal.App("tools-test-dir", image=image) +with image.imports(): + import diffusers + import os + import gradio + from datetime import datetime + +flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume + +@app.function(volumes={"/data": flux_model_vol}, + secrets=[modal.Secret.from_name("huggingface-token")], + #gpu="a100-80gb" + ) +def test_dir(): + + #with modal.enable_output(): + print("Hello from TEST DIR!") + + #import os + # Get the current working directory + current_directory = os.getcwd() + print(f"Current working directory: {current_directory}") + + # List the contents of the current directory + print("Contents of current directory:") + print(os.listdir(current_directory)) + + # Define the Modal volume path (replace with your actual volume mount path) + modal_volume_path = "/data/" + + # Check if the Modal volume path exists + if os.path.exists(modal_volume_path): + print(f"Contents of Modal volume at {modal_volume_path}:") + print(os.listdir(modal_volume_path)) + else: + print(f"Modal volume path {modal_volume_path} does not exist.") \ No newline at end of file diff --git a/tools/volumes_test.py b/tools/volumes_test.py new file mode 100644 index 0000000000000000000000000000000000000000..474a5506d385f048d25cd859a5e5e06fb55200f6 --- /dev/null +++ b/tools/volumes_test.py @@ -0,0 +1,41 @@ + +import modal +import sys +import os +import random +from datetime import datetime +import random +import io + +# Create a Modal app +app = modal.App("ctb-image-generator") + +volume = modal.Volume.from_name("flux-model-vol") # Reference your volume + +@app.function( + #gpu="t4", # No GPU needed for debugging volume + volumes={"/model": volume} # Replace `volume` with your actual volume name if needed +) +def debug_volume(): + import os + + # Directory to check inside the container + model_dir = "/model" + + try: + if os.path.exists(model_dir): + print(f"Volume successfully mounted at: {model_dir}") + print("Files and directories in the mounted volume:") + + # List files in the mounted directory + for root, dirs, files in os.walk(model_dir): + print(f"Root: {root}") + for dir_name in dirs: + print(f" Directory: {dir_name}") + for file_name in files: + print(f" File: {file_name}") + else: + print(f"Volume not found at: {model_dir}") + print("Ensure the volume is correctly mounted and mapped.") + except Exception as e: + print(f"Error while accessing volume: {e}")