Spaces:
Sleeping
Sleeping
Andre
commited on
Commit
·
f601bfb
1
Parent(s):
90e082e
Update
Browse files- colab.ipynb +13 -2
- config_colab.py +5 -3
colab.ipynb
CHANGED
|
@@ -13,8 +13,7 @@
|
|
| 13 |
"from huggingface_hub import InferenceClient\n",
|
| 14 |
"from IPython.display import display, clear_output\n",
|
| 15 |
"from img_gen_logic import generate_image\n",
|
| 16 |
-
"from config_colab import models, api_token\n",
|
| 17 |
-
"from prompts import prompts # Import prompts from prompts.py\n",
|
| 18 |
"\n",
|
| 19 |
"\n",
|
| 20 |
"# Initialize the InferenceClient with the default model\n",
|
|
@@ -132,6 +131,18 @@
|
|
| 132 |
" print(f\"Seed: {seed}\")\n",
|
| 133 |
" print(f\"Custom Prompt: {custom_prompt}\")\n",
|
| 134 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 135 |
" # Generate the image\n",
|
| 136 |
" image, message = generate_image(selected_prompt, selected_team, selected_model, height, width, num_inference_steps, guidance_scale, seed, custom_prompt)\n",
|
| 137 |
"\n",
|
|
|
|
| 13 |
"from huggingface_hub import InferenceClient\n",
|
| 14 |
"from IPython.display import display, clear_output\n",
|
| 15 |
"from img_gen_logic import generate_image\n",
|
| 16 |
+
"from config_colab import models, prompts, api_token\n",
|
|
|
|
| 17 |
"\n",
|
| 18 |
"\n",
|
| 19 |
"# Initialize the InferenceClient with the default model\n",
|
|
|
|
| 131 |
" print(f\"Seed: {seed}\")\n",
|
| 132 |
" print(f\"Custom Prompt: {custom_prompt}\")\n",
|
| 133 |
"\n",
|
| 134 |
+
" # Debug: Show selected parameters\n",
|
| 135 |
+
" print(\"=== Debug: Selected Parameters ===\")\n",
|
| 136 |
+
" print(f\"Selected Model: {selected_model} (Alias: {model_dropdown.label})\")\n",
|
| 137 |
+
" print(f\"Selected Prompt: {selected_prompt} (Alias: {prompt_dropdown.label})\")\n",
|
| 138 |
+
" print(f\"Selected Team: {selected_team}\")\n",
|
| 139 |
+
" print(f\"Height: {height}\")\n",
|
| 140 |
+
" print(f\"Width: {width}\")\n",
|
| 141 |
+
" print(f\"Inference Steps: {num_inference_steps}\")\n",
|
| 142 |
+
" print(f\"Guidance Scale: {guidance_scale}\")\n",
|
| 143 |
+
" print(f\"Seed: {seed}\")\n",
|
| 144 |
+
" print(f\"Custom Prompt: {custom_prompt}\")\n",
|
| 145 |
+
" print(\"==================================\")\n",
|
| 146 |
" # Generate the image\n",
|
| 147 |
" image, message = generate_image(selected_prompt, selected_team, selected_model, height, width, num_inference_steps, guidance_scale, seed, custom_prompt)\n",
|
| 148 |
"\n",
|
config_colab.py
CHANGED
|
@@ -7,18 +7,20 @@ api_token = userdata.get("HF_CTB_TOKEN")
|
|
| 7 |
|
| 8 |
# Debugging: Check if the Hugging Face token is available
|
| 9 |
if not api_token:
|
|
|
|
| 10 |
print("ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it in Colab secrets.")
|
| 11 |
else:
|
|
|
|
| 12 |
print("Hugging Face token loaded successfully.")
|
| 13 |
|
| 14 |
-
|
| 15 |
# List of models with aliases
|
| 16 |
models = [
|
| 17 |
{"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"},
|
| 18 |
{"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"}
|
| 19 |
]
|
| 20 |
|
| 21 |
-
|
| 22 |
# Debugging: Print prompt and model options
|
|
|
|
| 23 |
print("Prompt Options:", [p["alias"] for p in prompts])
|
| 24 |
-
print("Model Options:", [m["alias"] for m in models])
|
|
|
|
|
|
| 7 |
|
| 8 |
# Debugging: Check if the Hugging Face token is available
|
| 9 |
if not api_token:
|
| 10 |
+
print("=== Debug: Error ===")
|
| 11 |
print("ERROR: Hugging Face token (HF_CTB_TOKEN) is missing. Please set it in Colab secrets.")
|
| 12 |
else:
|
| 13 |
+
print("=== Debug: Success ===")
|
| 14 |
print("Hugging Face token loaded successfully.")
|
| 15 |
|
|
|
|
| 16 |
# List of models with aliases
|
| 17 |
models = [
|
| 18 |
{"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"},
|
| 19 |
{"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"}
|
| 20 |
]
|
| 21 |
|
|
|
|
| 22 |
# Debugging: Print prompt and model options
|
| 23 |
+
print("=== Debug: Available Options ===")
|
| 24 |
print("Prompt Options:", [p["alias"] for p in prompts])
|
| 25 |
+
print("Model Options:", [m["alias"] for m in models])
|
| 26 |
+
print("=================================")
|