Spaces:
Sleeping
Sleeping
Andre
commited on
Commit
·
4f48282
1
Parent(s):
1fc8953
update 1.1
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .DS_Store +0 -0
- .gitattributes +0 -122
- .gitignore +2 -0
- __pycache__/example-hello-world.cpython-310.pyc +0 -0
- app.py +49 -4
- app_live.py +130 -0
- app_modal.py +82 -0
- colab.ipynb +8 -26
- colab/.DS_Store +0 -0
- {modal_volume_download → config}/.DS_Store +0 -0
- config/__pycache__/__init__.cpython-310.pyc +0 -0
- config/__pycache__/config.cpython-310.pyc +0 -0
- config/__pycache__/config.cpython-311.pyc +0 -0
- config/__pycache__/config.cpython-39.pyc +0 -0
- config/__pycache__/models.cpython-310.pyc +0 -0
- config/__pycache__/models.cpython-311.pyc +0 -0
- config/__pycache__/models.cpython-39.pyc +0 -0
- config/__pycache__/prompts.cpython-310.pyc +0 -0
- config/__pycache__/prompts.cpython-311.pyc +0 -0
- config/__pycache__/prompts.cpython-39.pyc +0 -0
- config/config.py +3 -3
- config/config_colab.py +1 -1
- config/layout.css +11 -0
- config/models.py +11 -3
- {modal_volume_download/images → diffusers_version}/.DS_Store +0 -0
- diffusers_version/app_diffusers.py +9 -0
- diffusers_version/config/__pycache__/config.cpython-39.pyc +0 -0
- diffusers_version/config/__pycache__/models.cpython-39.pyc +0 -0
- diffusers_version/config/__pycache__/prompts.cpython-39.pyc +0 -0
- diffusers_version/config/config.py +14 -0
- diffusers_version/config/models.py +12 -0
- diffusers_version/config/prompts.py +46 -0
- diffusers_version/src/__pycache__/gradio_interface_diffusers.cpython-39.pyc +0 -0
- diffusers_version/src/__pycache__/img_gen_diffusers.cpython-39.pyc +0 -0
- diffusers_version/src/gradio_interface_diffusers.py +68 -0
- diffusers_version/src/img_gen_diffusers.py +183 -0
- examples/example-chat-w-pdf.py +35 -0
- examples/example-flux.py +57 -0
- example-hello-world.py → examples/example-hello-world.py +0 -0
- examples/example-text-to-image.py +135 -0
- examples/example-text-to-video.py +32 -0
- examples/example_check_imports_volume.py +32 -0
- examples/example_dynamic_decorator.py +49 -0
- examples/example_image_settings.py +52 -0
- examples/example_img_gen.py +79 -0
- examples/example_loading_model.py +32 -0
- examples/example_output_dir.py +25 -0
- examples/functions.py +69 -0
- examples/modal_functions_remote_call.py +51 -0
- examples/modal_image_header.py +58 -0
.DS_Store
CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
|
|
.gitattributes
CHANGED
@@ -33,125 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
-
modal_volume_download/images/20250130_204829_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text
|
37 |
-
modal_volume_download/images/20250130_205034_flux.1-dev_night_battle_red.png filter=lfs diff=lfs merge=lfs -text
|
38 |
-
modal_volume_download/images/20250130_212304_flux.1-dev_night_battle_red.png filter=lfs diff=lfs merge=lfs -text
|
39 |
-
modal_volume_download/images/20250130_212349_flux.1-dev_snowy_battlefield_red.png filter=lfs diff=lfs merge=lfs -text
|
40 |
-
modal_volume_download/images/20250130_212424_flux.1-dev_siege_tower_attack_red.png filter=lfs diff=lfs merge=lfs -text
|
41 |
-
modal_volume_download/images/20250131_023325_flux.1-dev_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
42 |
-
modal_volume_download/images/20250131_023633_flux.1-dev_marching_army_red.png filter=lfs diff=lfs merge=lfs -text
|
43 |
-
modal_volume_download/images/20250131_030037_flux.1-dev_night_battle_red.png filter=lfs diff=lfs merge=lfs -text
|
44 |
-
modal_volume_download/images/20250131_041200_flux.1-dev_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
45 |
-
modal_volume_download/images/20250131_041614_flux.1-dev_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
46 |
-
modal_volume_download/images/20250131_042042_flux.1-dev_night_battle_red.png filter=lfs diff=lfs merge=lfs -text
|
47 |
-
modal_volume_download/images/20250131_042801_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text
|
48 |
-
modal_volume_download/images/20250131_045958_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text
|
49 |
-
modal_volume_download/images/20250131_050217_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text
|
50 |
-
modal_volume_download/images/20250131_050622_flux.1-dev_boiling_oil_defense_red.png filter=lfs diff=lfs merge=lfs -text
|
51 |
-
modal_volume_download/images/20250131_051056_flux.1-dev_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text
|
52 |
-
modal_volume_download/images/20250131_051644_flux.1-dev_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text
|
53 |
-
modal_volume_download/images/20250131_051838_flux.1-dev_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text
|
54 |
-
modal_volume_download/images/20250131_051952_flux.1-dev_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text
|
55 |
-
modal_volume_download/images/20250131_052036_flux.1-dev_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text
|
56 |
-
modal_volume_download/images/20250131_052105_flux.1-dev_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text
|
57 |
-
modal_volume_download/images/20250131_052324_flux.1-dev_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text
|
58 |
-
modal_volume_download/images/20250131_064901_flux.1-dev_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text
|
59 |
-
modal_volume_download/images/20250131_065801_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text
|
60 |
-
modal_volume_download/images/20250131_070041_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text
|
61 |
-
modal_volume_download/images/20250131_070255_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text
|
62 |
-
modal_volume_download/images/20250131_075043_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text
|
63 |
-
modal_volume_download/images/20250131_075252_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text
|
64 |
-
modal_volume_download/images/20250131_082122_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text
|
65 |
-
modal_volume_download/images/20250131_082224_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text
|
66 |
-
modal_volume_download/images/20250131_082246_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text
|
67 |
-
modal_volume_download/images/20250131_082344_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text
|
68 |
-
modal_volume_download/images/20250131_082404_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text
|
69 |
-
modal_volume_download/images/20250131_082444_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text
|
70 |
-
modal_volume_download/images/20250131_082522_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text
|
71 |
-
modal_volume_download/images/20250131_082553_flux.1-dev_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text
|
72 |
-
modal_volume_download/images/20250131_082629_flux.1-dev_siege_tower_attack_blue.png filter=lfs diff=lfs merge=lfs -text
|
73 |
-
modal_volume_download/images/20250131_084919_flux.1-dev_siege_tower_attack_blue.png filter=lfs diff=lfs merge=lfs -text
|
74 |
-
modal_volume_download/images/20250131_145354_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text
|
75 |
-
modal_volume_download/images/20250131_145558_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text
|
76 |
-
modal_volume_download/images/20250131_145646_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text
|
77 |
-
modal_volume_download/images/20250131_145715_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text
|
78 |
-
modal_volume_download/images/20250131_145739_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text
|
79 |
-
modal_volume_download/images/20250131_145756_flux.1-dev_forest_battle_red.png filter=lfs diff=lfs merge=lfs -text
|
80 |
-
modal_volume_download/images/20250131_145828_flux.1-dev_marching_army_blue.png filter=lfs diff=lfs merge=lfs -text
|
81 |
-
modal_volume_download/images/20250131_145850_flux.1-dev_siege_tower_attack_blue.png filter=lfs diff=lfs merge=lfs -text
|
82 |
-
modal_volume_download/images/20250131_145911_flux.1-dev_knight_duel_blue.png filter=lfs diff=lfs merge=lfs -text
|
83 |
-
modal_volume_download/images/20250201_231219_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
84 |
-
modal_volume_download/images/20250201_231312_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
85 |
-
modal_volume_download/images/20250201_231354_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
86 |
-
modal_volume_download/images/20250201_231432_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
87 |
-
modal_volume_download/images/20250201_234155_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
88 |
-
modal_volume_download/images/20250201_234234_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
89 |
-
modal_volume_download/images/20250202_012316_flux.1-dev_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
90 |
-
modal_volume_download/images/20250202_012456_flux.1-dev_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
91 |
-
modal_volume_download/images/20250202_012802_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
92 |
-
modal_volume_download/images/20250202_012814_flux.1-dev_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
93 |
-
modal_volume_download/images/20250202_013404_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
94 |
-
modal_volume_download/images/20250202_014853_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
95 |
-
modal_volume_download/images/20250202_020010_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
96 |
-
modal_volume_download/images/20250202_021449_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
97 |
-
modal_volume_download/images/20250202_021744_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
98 |
-
modal_volume_download/images/20250202_021941_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
99 |
-
modal_volume_download/images/20250202_220825_flux.1-dev_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
100 |
-
modal_volume_download/images/20250202_220856_flux.1-dev_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
101 |
-
modal_volume_download/images/20250203_002848_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
102 |
-
modal_volume_download/images/20250203_003810_flux.1-dev_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
103 |
-
modal_volume_download/images/20250203_005215_flux.1-dev_modal_local_night_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
104 |
-
modal_volume_download/images/20250203_011632_flux.1-dev_modal_local_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
105 |
-
modal_volume_download/images/20250203_011659_flux.1-dev_modal_local_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
106 |
-
modal_volume_download/images/20250203_033539_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
107 |
-
modal_volume_download/images/20250203_033552_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
108 |
-
modal_volume_download/images/20250203_050713_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
109 |
-
modal_volume_download/images/20250203_050755_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
110 |
-
modal_volume_download/images/20250203_053946_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
111 |
-
modal_volume_download/images/20250203_054200_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
112 |
-
modal_volume_download/images/20250203_054637_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
113 |
-
modal_volume_download/images/20250203_054716_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
114 |
-
modal_volume_download/images/20250203_054734_flux.1-dev_modal_local_siege_tower_attack_blue.png filter=lfs diff=lfs merge=lfs -text
|
115 |
-
modal_volume_download/images/20250203_054752_flux.1-dev_modal_local_night_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
116 |
-
modal_volume_download/images/20250203_054807_flux.1-dev_modal_local_marching_army_blue.png filter=lfs diff=lfs merge=lfs -text
|
117 |
-
modal_volume_download/images/20250203_054826_flux.1-dev_modal_local_siege_tower_attack_blue.png filter=lfs diff=lfs merge=lfs -text
|
118 |
-
modal_volume_download/images/20250203_054859_flux.1-dev_modal_local_siege_tower_attack_blue.png filter=lfs diff=lfs merge=lfs -text
|
119 |
-
modal_volume_download/images/20250203_154822_flux.1-dev_modal_local_siege_tower_attack_blue.png filter=lfs diff=lfs merge=lfs -text
|
120 |
-
modal_volume_download/images/20250203_171357_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
121 |
-
modal_volume_download/images/20250203_180759_flux.1-dev_modal_local_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
122 |
-
modal_volume_download/images/20250203_185242_flux.1-dev_modal_local_snowy_battlefield_blue.png filter=lfs diff=lfs merge=lfs -text
|
123 |
-
modal_volume_download/images/20250203_203049_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
124 |
-
modal_volume_download/images/20250203_203628_flux.1-dev_modal_local_castle_siege_blue.png filter=lfs diff=lfs merge=lfs -text
|
125 |
-
modal_volume_download/images/20250203_212222_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
126 |
-
modal_volume_download/images/20250203_212454_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
127 |
-
modal_volume_download/images/20250203_222431_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
128 |
-
modal_volume_download/images/20250203_225712_flux.1-dev_modal_local_forest_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
129 |
-
modal_volume_download/images/20250204_012912_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
130 |
-
modal_volume_download/images/20250204_030437_flux.1-dev_modal_local_night_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
131 |
-
modal_volume_download/images/20250204_030944_flux.1-dev_modal_local_night_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
132 |
-
modal_volume_download/images/20250204_031139_flux.1-dev_modal_local_night_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
133 |
-
modal_volume_download/images/20250204_031547_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
134 |
-
modal_volume_download/images/20250204_031935_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
135 |
-
modal_volume_download/images/20250204_032132_flux.1-dev_modal_local_night_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
136 |
-
modal_volume_download/images/20250206_020153_flux.1-dev_modal_local_boiling_oil_defense_blue.png filter=lfs diff=lfs merge=lfs -text
|
137 |
-
modal_volume_download/images/20250209_053636_flux.1-dev_modal_local_burning_castle_battle_red.png filter=lfs diff=lfs merge=lfs -text
|
138 |
-
modal_volume_download/images/20250209_053658_flux.1-dev_modal_local_burning_castle_battle_red.png filter=lfs diff=lfs merge=lfs -text
|
139 |
-
modal_volume_download/images/20250209_172043_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
140 |
-
modal_volume_download/images/20250209_172355_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
141 |
-
modal_volume_download/images/20250209_182544_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
142 |
-
modal_volume_download/images/20250210_030137_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
143 |
-
modal_volume_download/images/20250210_030229_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
144 |
-
modal_volume_download/images/20250210_030309_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
145 |
-
modal_volume_download/images/20250210_030518_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
146 |
-
modal_volume_download/images/20250210_030540_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
147 |
-
modal_volume_download/images/20250210_030605_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
148 |
-
modal_volume_download/images/20250210_030714_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
149 |
-
modal_volume_download/images/20250210_030813_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
150 |
-
modal_volume_download/images/20250210_030855_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
151 |
-
modal_volume_download/images/20250210_030942_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
152 |
-
modal_volume_download/images/20250210_031150_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
153 |
-
modal_volume_download/images/20250210_031204_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
154 |
-
modal_volume_download/images/20250210_031219_flux.1-dev_modal_local_burning_castle_battle_blue.png filter=lfs diff=lfs merge=lfs -text
|
155 |
-
modal_volume_download/images/20250210_042321_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
156 |
-
modal_volume_download/images/20250210_042352_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
157 |
-
modal_volume_download/images/20250210_042552_flux.1-dev_modal_local_castle_siege_red.png filter=lfs diff=lfs merge=lfs -text
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
venv/
|
2 |
+
modal_volume_download/
|
__pycache__/example-hello-world.cpython-310.pyc
DELETED
Binary file (728 Bytes)
|
|
app.py
CHANGED
@@ -1,6 +1,51 @@
|
|
1 |
# app.py
|
2 |
-
#
|
3 |
-
|
|
|
|
|
4 |
|
5 |
-
#
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# app.py
|
2 |
+
from config.config import models, prompts, api_token # Direct import
|
3 |
+
import gradio as gr
|
4 |
+
from src.img_gen import generate_image
|
5 |
+
import base64
|
6 |
|
7 |
+
# Gradio Interface
|
8 |
+
def gradio_interface():
|
9 |
+
# LOAD CUSTOM CSS
|
10 |
+
try:
|
11 |
+
with open("config/layout.css", "r") as f:
|
12 |
+
custom_css = f.read()
|
13 |
+
except FileNotFoundError:
|
14 |
+
print("Error: aaa.css not found!")
|
15 |
+
custom_css = "" # Or provide default CSS
|
16 |
+
|
17 |
+
with gr.Blocks(css=custom_css) as demo:
|
18 |
+
gr.Markdown("# CtB AI Image Generator - Inference version (HF)")
|
19 |
+
with gr.Row():
|
20 |
+
# Set default values for dropdowns
|
21 |
+
prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"])
|
22 |
+
team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red")
|
23 |
+
#model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"])
|
24 |
+
with gr.Row():
|
25 |
+
# Add a text box for custom user input (max 200 characters)
|
26 |
+
custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200)
|
27 |
+
with gr.Row():
|
28 |
+
generate_button = gr.Button("Generate Image")
|
29 |
+
with gr.Row():
|
30 |
+
output_image = gr.Image(elem_classes="output-image", label="Generated Image", show_label=False, scale=1, width="100%")
|
31 |
+
with gr.Row():
|
32 |
+
status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False)
|
33 |
+
|
34 |
+
# Connect the button to the function
|
35 |
+
generate_button.click(
|
36 |
+
generate_image,
|
37 |
+
inputs=[prompt_dropdown,
|
38 |
+
team_dropdown,
|
39 |
+
custom_prompt_input,
|
40 |
+
#model_dropdown,
|
41 |
+
],
|
42 |
+
outputs=[output_image, status_text]
|
43 |
+
)
|
44 |
+
return demo
|
45 |
+
|
46 |
+
# Create the demo instance
|
47 |
+
demo = gradio_interface()
|
48 |
+
|
49 |
+
# Only launch if running directly
|
50 |
+
if __name__ == "__main__":
|
51 |
+
demo.queue().launch()
|
app_live.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
import random
|
4 |
+
import torch
|
5 |
+
from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
|
6 |
+
from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
|
7 |
+
from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
|
8 |
+
import modal
|
9 |
+
import random
|
10 |
+
import io
|
11 |
+
from config.config import prompts, models # Indirect import
|
12 |
+
import os
|
13 |
+
import sentencepiece
|
14 |
+
from huggingface_hub import login
|
15 |
+
from transformers import AutoTokenizer
|
16 |
+
from datetime import datetime
|
17 |
+
|
18 |
+
|
19 |
+
MAX_SEED = np.iinfo(np.int32).max
|
20 |
+
MAX_IMAGE_SIZE = 2048
|
21 |
+
|
22 |
+
dtype = torch.bfloat16
|
23 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
24 |
+
|
25 |
+
CACHE_DIR = "/model_cache"
|
26 |
+
|
27 |
+
image = (
|
28 |
+
modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9")
|
29 |
+
.pip_install_from_requirements("requirements.txt")
|
30 |
+
.env({
|
31 |
+
"HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME", "HF_HUB_CACHE": CACHE_DIR
|
32 |
+
})
|
33 |
+
)
|
34 |
+
|
35 |
+
app = modal.App("img-gen-modal-live", image=image)
|
36 |
+
with image.imports():
|
37 |
+
import os
|
38 |
+
|
39 |
+
flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True)
|
40 |
+
|
41 |
+
@app.function(volumes={"/data": flux_model_vol},
|
42 |
+
secrets=[modal.Secret.from_name("huggingface-token")],
|
43 |
+
gpu="L40S",
|
44 |
+
timeout=300)
|
45 |
+
def infer(prompt, seed=42, randomize_seed=False, width=640, height=360, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
|
46 |
+
taef1 = AutoencoderTiny.from_pretrained("/data/taef1", torch_dtype=dtype).to(device)
|
47 |
+
good_vae = AutoencoderKL.from_pretrained("/data/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
|
48 |
+
pipe = DiffusionPipeline.from_pretrained("/data/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device)
|
49 |
+
torch.cuda.empty_cache()
|
50 |
+
|
51 |
+
pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
|
52 |
+
|
53 |
+
if randomize_seed:
|
54 |
+
seed = random.randint(0, MAX_SEED)
|
55 |
+
generator = torch.Generator().manual_seed(seed)
|
56 |
+
|
57 |
+
for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
|
58 |
+
prompt=prompt,
|
59 |
+
guidance_scale=guidance_scale,
|
60 |
+
num_inference_steps=num_inference_steps,
|
61 |
+
width=width,
|
62 |
+
height=height,
|
63 |
+
generator=generator,
|
64 |
+
output_type="pil",
|
65 |
+
good_vae=good_vae,
|
66 |
+
):
|
67 |
+
yield img, seed
|
68 |
+
|
69 |
+
examples = [
|
70 |
+
"a tiny astronaut hatching from an egg on the moon",
|
71 |
+
"a cat holding a sign that says hello world",
|
72 |
+
"an anime illustration of a wiener schnitzel",
|
73 |
+
]
|
74 |
+
|
75 |
+
css="""
|
76 |
+
#col-container {
|
77 |
+
margin: 0 auto;
|
78 |
+
max-width: 520px;
|
79 |
+
}
|
80 |
+
"""
|
81 |
+
|
82 |
+
hf_token = os.environ["HF_TOKEN"]
|
83 |
+
print("Initializing HF TOKEN")
|
84 |
+
print(hf_token)
|
85 |
+
print("HF TOKEN:")
|
86 |
+
login(token=hf_token)
|
87 |
+
|
88 |
+
with gr.Blocks(css=css) as demo:
|
89 |
+
f = modal.Function.from_name("img-gen-modal-live", "infer")
|
90 |
+
|
91 |
+
with gr.Column(elem_id="col-container"):
|
92 |
+
gr.Markdown(f"""# FLUX.1 [dev]
|
93 |
+
12B param rectified flow transformer guidance-distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/)
|
94 |
+
[[non-commercial license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)] [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-dev)]
|
95 |
+
""")
|
96 |
+
|
97 |
+
with gr.Row():
|
98 |
+
prompt = gr.Text(label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False)
|
99 |
+
run_button = gr.Button("Run", scale=0)
|
100 |
+
|
101 |
+
result = gr.Image(label="Result", show_label=False)
|
102 |
+
|
103 |
+
with gr.Accordion("Advanced Settings", open=False):
|
104 |
+
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
105 |
+
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
106 |
+
|
107 |
+
with gr.Row():
|
108 |
+
width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=640)
|
109 |
+
height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=360)
|
110 |
+
|
111 |
+
with gr.Row():
|
112 |
+
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1, maximum=15, step=0.1, value=3.5)
|
113 |
+
num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=50, step=1, value=28)
|
114 |
+
|
115 |
+
gr.Examples(
|
116 |
+
examples=examples,
|
117 |
+
fn=f.remote,
|
118 |
+
inputs=[prompt],
|
119 |
+
outputs=[result, seed],
|
120 |
+
cache_examples="lazy"
|
121 |
+
)
|
122 |
+
|
123 |
+
gr.on(
|
124 |
+
triggers=[run_button.click, prompt.submit],
|
125 |
+
fn=lambda *args: [next(f.remote_gen(*args)), seed], # Adjusted to process generator
|
126 |
+
inputs=[prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
|
127 |
+
outputs=[result, seed]
|
128 |
+
)
|
129 |
+
|
130 |
+
demo.launch()
|
app_modal.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# app_modal.py
|
2 |
+
import gradio as gr
|
3 |
+
import modal
|
4 |
+
from config.config import models, models_modal, prompts, api_token # Direct import
|
5 |
+
from config.config import prompts, models, models_modal # Indirect import
|
6 |
+
#from img_gen import generate_image
|
7 |
+
|
8 |
+
print("Hello from gradio_interface_head!")
|
9 |
+
|
10 |
+
# Modal remote function synchronously
|
11 |
+
def generate(cpu_gpu, prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input):
|
12 |
+
# Debug:
|
13 |
+
debug_message = f"Debug: Button clicked! Inputs - Prompt: {prompt_dropdown}, Team: {team_dropdown}, Model: {model_dropdown}, Custom Prompt: {custom_prompt_input}"
|
14 |
+
print(debug_message) # Print to console for debugging
|
15 |
+
try:
|
16 |
+
# Check for CPU/GPU dropdown option
|
17 |
+
if cpu_gpu == "GPU":
|
18 |
+
f = modal.Function.from_name("img-gen-modal", "generate_image_gpu")
|
19 |
+
else:
|
20 |
+
f = modal.Function.from_name("img-gen-modal", "generate_image_cpu")
|
21 |
+
|
22 |
+
# Import the remote function
|
23 |
+
image_path, message = f.remote(
|
24 |
+
prompt_dropdown,
|
25 |
+
team_dropdown,
|
26 |
+
model_dropdown,
|
27 |
+
custom_prompt_input,
|
28 |
+
)
|
29 |
+
return image_path, message
|
30 |
+
except Exception as e:
|
31 |
+
return None, f"Error calling generate_image function: {e}"
|
32 |
+
|
33 |
+
def gradio_interface_modal():
|
34 |
+
try:
|
35 |
+
with open("config/layout.css", "r") as f:
|
36 |
+
custom_css = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print("Error: aaa.css not found!")
|
39 |
+
custom_css = "" # Or provide default CSS
|
40 |
+
|
41 |
+
with modal.enable_output():
|
42 |
+
#from config.config import prompts, models # Indirect import
|
43 |
+
# Gradio Interface
|
44 |
+
with gr.Blocks(
|
45 |
+
css=custom_css
|
46 |
+
) as demo:
|
47 |
+
gr.Markdown("# CtB AI Image Generator - Cloud version (Modal volume)")
|
48 |
+
with gr.Row():
|
49 |
+
# Set default values for dropdowns
|
50 |
+
prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"])
|
51 |
+
team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red")
|
52 |
+
model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models_modal], label="Select Model", value=models_modal[0]["alias"])
|
53 |
+
with gr.Row():
|
54 |
+
# Add a text box for custom user input (max 200 characters)
|
55 |
+
custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200)
|
56 |
+
with gr.Row(elem_classes="row-class"):
|
57 |
+
cpu_gpu = gr.Dropdown(choices=["CPU", "GPU"], label="Select CPU/GPU", value="GPU")
|
58 |
+
generate_button = gr.Button("Generate Image")
|
59 |
+
with gr.Row():
|
60 |
+
output_image = gr.Image(elem_classes="output-image", label="Generated Image", show_label=False, scale=1)
|
61 |
+
with gr.Row():
|
62 |
+
status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False)
|
63 |
+
print("Building cudasdasrer...")
|
64 |
+
|
65 |
+
##Connect the button to the call_generate function
|
66 |
+
##had do do it to handle gradio/modal interaction)
|
67 |
+
generate_button.click(
|
68 |
+
generate,
|
69 |
+
inputs=[
|
70 |
+
cpu_gpu,
|
71 |
+
prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input],
|
72 |
+
outputs=[output_image, status_text],
|
73 |
+
)
|
74 |
+
return demo
|
75 |
+
|
76 |
+
# Create the demo instance
|
77 |
+
demo = gradio_interface_modal()
|
78 |
+
|
79 |
+
# Only launch if running directly
|
80 |
+
if __name__ == "__main__":
|
81 |
+
with modal.enable_output():
|
82 |
+
demo.queue().launch()
|
colab.ipynb
CHANGED
@@ -50,13 +50,6 @@
|
|
50 |
" style={\"description_width\": \"initial\"}\n",
|
51 |
")\n",
|
52 |
"\n",
|
53 |
-
"# Input for height\n",
|
54 |
-
"height_input = widgets.IntText(\n",
|
55 |
-
" value=360,\n",
|
56 |
-
" description=\"Height:\",\n",
|
57 |
-
" style={\"description_width\": \"initial\"}\n",
|
58 |
-
")\n",
|
59 |
-
"\n",
|
60 |
"# Input for width\n",
|
61 |
"width_input = widgets.IntText(\n",
|
62 |
" value=640,\n",
|
@@ -64,6 +57,13 @@
|
|
64 |
" style={\"description_width\": \"initial\"}\n",
|
65 |
")\n",
|
66 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
"# Input for number of inference steps\n",
|
68 |
"num_inference_steps_input = widgets.IntSlider(\n",
|
69 |
" value=20,\n",
|
@@ -147,7 +147,7 @@
|
|
147 |
" # Generate the image\n",
|
148 |
" print(\"=== Debug: Calling generate_image ===\")\n",
|
149 |
" image = generate_image(\n",
|
150 |
-
" selected_prompt, selected_team, selected_model,
|
151 |
" num_inference_steps, guidance_scale, seed, custom_prompt, api_token,\n",
|
152 |
" randomize_seed=randomize_seed_checkbox.value\n",
|
153 |
" )\n",
|
@@ -176,24 +176,6 @@
|
|
176 |
"# Attach the button click event handler\n",
|
177 |
"generate_button.on_click(on_generate_button_clicked)\n",
|
178 |
"\n",
|
179 |
-
"def save_image(image, model_label, seed, prompt_label, team):\n",
|
180 |
-
" \"\"\"\n",
|
181 |
-
" Save the generated image with a timestamped filename.\n",
|
182 |
-
"\n",
|
183 |
-
" Args:\n",
|
184 |
-
" image (PIL.Image.Image): The generated image.\n",
|
185 |
-
" model_label (str): The label of the selected model.\n",
|
186 |
-
" prompt_label (str): The seed. The label of the selected prompt.\n",
|
187 |
-
" team (str): The selected team (\"Red\" or \"Blue\").\n",
|
188 |
-
"\n",
|
189 |
-
" Returns:\n",
|
190 |
-
" str: The filename of the saved image.\n",
|
191 |
-
" \"\"\"\n",
|
192 |
-
" timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
|
193 |
-
" output_filename = f\"{timestamp}_{model_label.replace(' ', '_').lower()}_{seed}_{prompt_label.replace(' ', '_').lower()}_{team.lower()}.png\"\n",
|
194 |
-
" image.save(output_filename)\n",
|
195 |
-
" return output_filename\n",
|
196 |
-
"\n",
|
197 |
"# Display the widgets\n",
|
198 |
"display(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input, seed_input, randomize_seed_checkbox, generate_button, output)"
|
199 |
]
|
|
|
50 |
" style={\"description_width\": \"initial\"}\n",
|
51 |
")\n",
|
52 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
"# Input for width\n",
|
54 |
"width_input = widgets.IntText(\n",
|
55 |
" value=640,\n",
|
|
|
57 |
" style={\"description_width\": \"initial\"}\n",
|
58 |
")\n",
|
59 |
"\n",
|
60 |
+
"# Input for height\n",
|
61 |
+
"height_input = widgets.IntText(\n",
|
62 |
+
" value=360,\n",
|
63 |
+
" description=\"Height:\",\n",
|
64 |
+
" style={\"description_width\": \"initial\"}\n",
|
65 |
+
")\n",
|
66 |
+
"\n",
|
67 |
"# Input for number of inference steps\n",
|
68 |
"num_inference_steps_input = widgets.IntSlider(\n",
|
69 |
" value=20,\n",
|
|
|
147 |
" # Generate the image\n",
|
148 |
" print(\"=== Debug: Calling generate_image ===\")\n",
|
149 |
" image = generate_image(\n",
|
150 |
+
" selected_prompt, selected_team, selected_model, width, height,\n",
|
151 |
" num_inference_steps, guidance_scale, seed, custom_prompt, api_token,\n",
|
152 |
" randomize_seed=randomize_seed_checkbox.value\n",
|
153 |
" )\n",
|
|
|
176 |
"# Attach the button click event handler\n",
|
177 |
"generate_button.on_click(on_generate_button_clicked)\n",
|
178 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
"# Display the widgets\n",
|
180 |
"display(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input, seed_input, randomize_seed_checkbox, generate_button, output)"
|
181 |
]
|
colab/.DS_Store
CHANGED
Binary files a/colab/.DS_Store and b/colab/.DS_Store differ
|
|
{modal_volume_download → config}/.DS_Store
RENAMED
Binary files a/modal_volume_download/.DS_Store and b/config/.DS_Store differ
|
|
config/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (163 Bytes). View file
|
|
config/__pycache__/config.cpython-310.pyc
ADDED
Binary file (605 Bytes). View file
|
|
config/__pycache__/config.cpython-311.pyc
ADDED
Binary file (981 Bytes). View file
|
|
config/__pycache__/config.cpython-39.pyc
ADDED
Binary file (621 Bytes). View file
|
|
config/__pycache__/models.cpython-310.pyc
ADDED
Binary file (470 Bytes). View file
|
|
config/__pycache__/models.cpython-311.pyc
ADDED
Binary file (564 Bytes). View file
|
|
config/__pycache__/models.cpython-39.pyc
ADDED
Binary file (468 Bytes). View file
|
|
config/__pycache__/prompts.cpython-310.pyc
ADDED
Binary file (6.39 kB). View file
|
|
config/__pycache__/prompts.cpython-311.pyc
ADDED
Binary file (6.55 kB). View file
|
|
config/__pycache__/prompts.cpython-39.pyc
ADDED
Binary file (6.38 kB). View file
|
|
config/config.py
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
# config.py
|
2 |
import os
|
3 |
-
from config.prompts import prompts
|
4 |
-
from config.models import models
|
5 |
|
6 |
# Retrieve the Hugging Face token
|
7 |
api_token = os.getenv("HF_TOKEN")
|
8 |
|
9 |
# Debugging: Print prompt and model options
|
|
|
10 |
print("Prompt Options:", [p["alias"] for p in prompts])
|
11 |
print("Model Options:", [m["alias"] for m in models])
|
12 |
-
|
|
|
1 |
# config.py
|
2 |
import os
|
3 |
+
from config.prompts import prompts
|
4 |
+
from config.models import models, models_modal
|
5 |
|
6 |
# Retrieve the Hugging Face token
|
7 |
api_token = os.getenv("HF_TOKEN")
|
8 |
|
9 |
# Debugging: Print prompt and model options
|
10 |
+
print("##### IMPORTING CONFIG #####")
|
11 |
print("Prompt Options:", [p["alias"] for p in prompts])
|
12 |
print("Model Options:", [m["alias"] for m in models])
|
|
config/config_colab.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
# config_colab.py
|
2 |
from google.colab import userdata
|
3 |
from config.prompts import prompts # Import prompts from prompts.py
|
4 |
-
from config.models import models
|
5 |
|
6 |
# Retrieve the Hugging Face token from Colab secrets
|
7 |
api_token = userdata.get("HF_TOKEN")
|
|
|
1 |
# config_colab.py
|
2 |
from google.colab import userdata
|
3 |
from config.prompts import prompts # Import prompts from prompts.py
|
4 |
+
from config.models import models, models_modal
|
5 |
|
6 |
# Retrieve the Hugging Face token from Colab secrets
|
7 |
api_token = userdata.get("HF_TOKEN")
|
config/layout.css
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.row-class {
|
2 |
+
display: flex;
|
3 |
+
align-items: stretch; /* Ensures all children have the same height */
|
4 |
+
}
|
5 |
+
.row-class > * {
|
6 |
+
flex: 1;
|
7 |
+
}
|
8 |
+
.output-image img {
|
9 |
+
width: 2500px; /* Force image to fill container width */
|
10 |
+
object-fit: cover; /* ACTIVATE FOR IMAGE-FIT CONTAINER */
|
11 |
+
}
|
config/models.py
CHANGED
@@ -1,8 +1,16 @@
|
|
1 |
# List of models with aliases
|
2 |
models = [
|
3 |
{"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"},
|
4 |
-
{"alias": "
|
|
|
|
|
|
|
5 |
]
|
6 |
|
7 |
-
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# List of models with aliases
|
2 |
models = [
|
3 |
{"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"},
|
4 |
+
{"alias": "FLUX.1-dev_modal_local", "name": "FLUX.1-dev"},
|
5 |
+
{"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"},
|
6 |
+
{"alias": "FLUX.1-schnell", "name": "black-forest-labs/FLUX.1-schnell"},
|
7 |
+
{"alias": "FLUX.1-schnell_modal_local", "name": "FLUX.1-schnell"},
|
8 |
]
|
9 |
|
10 |
+
models_modal = [
|
11 |
+
{"alias": "FLUX.1-dev_modal_local", "name": "FLUX.1-dev"},
|
12 |
+
{"alias": "FLUX.1-schnell_modal_local", "name": "FLUX.1-schnell"},
|
13 |
+
#{"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"},
|
14 |
+
#{"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"},
|
15 |
+
#{"alias": "FLUX.1-schnell", "name": "black-forest-labs/FLUX.1-schnell"},
|
16 |
+
]
|
{modal_volume_download/images → diffusers_version}/.DS_Store
RENAMED
Binary files a/modal_volume_download/images/.DS_Store and b/diffusers_version/.DS_Store differ
|
|
diffusers_version/app_diffusers.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# app.py
|
2 |
+
#IMPORT gradio_interface
|
3 |
+
from src.gradio_interface_diffusers import demo
|
4 |
+
from config.config import models, prompts, api_token # Direct import
|
5 |
+
import sys
|
6 |
+
import os
|
7 |
+
|
8 |
+
# Launch the Gradio app
|
9 |
+
demo.queue().launch()
|
diffusers_version/config/__pycache__/config.cpython-39.pyc
ADDED
Binary file (631 Bytes). View file
|
|
diffusers_version/config/__pycache__/models.cpython-39.pyc
ADDED
Binary file (409 Bytes). View file
|
|
diffusers_version/config/__pycache__/prompts.cpython-39.pyc
ADDED
Binary file (6.4 kB). View file
|
|
diffusers_version/config/config.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# config.py
|
2 |
+
import os
|
3 |
+
from config.prompts import prompts # Direct Import prompts from prompts.py
|
4 |
+
from config.models import models # Direct Import models
|
5 |
+
|
6 |
+
# Retrieve the Hugging Face token
|
7 |
+
api_token = os.getenv("HF_TOKEN")
|
8 |
+
|
9 |
+
# Debugging: Print prompt and model options
|
10 |
+
print("Prompt Options:", [p["alias"] for p in prompts])
|
11 |
+
print("Model Options:", [m["alias"] for m in models])
|
12 |
+
|
13 |
+
gpu = "T4"
|
14 |
+
|
diffusers_version/config/models.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# List of models with aliases
|
2 |
+
models = [
|
3 |
+
{"alias": "FLUX.1-dev_modal_volume", "name": "FLUX.1-dev"},
|
4 |
+
{"alias": "FLUX.1-dev", "name": "black-forest-labs/FLUX.1-dev"},
|
5 |
+
{"alias": "Midjourney", "name": "strangerzonehf/Flux-Midjourney-Mix2-LoRA"},
|
6 |
+
{"alias": "FLUX.1-schnell", "name": "black-forest-labs/FLUX.1-schnell"},
|
7 |
+
#{"alias": "Andre", "name": "Andre"}
|
8 |
+
|
9 |
+
|
10 |
+
]
|
11 |
+
# Debugging: Print prompt and model options
|
12 |
+
#print("Model Options:", [m["alias"] for m in models])
|
diffusers_version/config/prompts.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# List of prompts with intense combat
|
3 |
+
#
|
4 |
+
|
5 |
+
prompts = [
|
6 |
+
{
|
7 |
+
"alias": "Castle Siege",
|
8 |
+
"text": "A medieval castle under siege, with archers firing arrows from the walls, knights charging on horses, and catapults launching fireballs. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is fiercely attacking the castle, with soldiers scaling ladders and clashing swords with the defenders. Arrows fly through the air, explosions light up the battlefield, and injured knights lie on the ground. Fire engulfs parts of the castle, and the air is thick with smoke and chaos. Unreal Engine render style, photorealistic, realistic fantasy style."
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"alias": "Forest Battle",
|
12 |
+
"text": "A fierce battle between two armies in a dense forest, with knights wielding swords and axes, horses rearing, and the ground covered in mud and blood. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is locked in brutal combat, with soldiers fighting hand-to-hand amidst the trees. Arrows whiz past, and the sounds of clashing steel echo through the forest. Injured soldiers scream in pain, and the forest is littered with broken weapons and shields. Unreal Engine render style, photorealistic, realistic fantasy style."
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"alias": "Boiling Oil Defense",
|
16 |
+
"text": "A dramatic moment in a medieval siege, with a knight leading a charge against a castle gate, while defenders pour boiling oil from the walls. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is relentlessly attacking, with soldiers screaming as they are hit by the oil. Knights clash swords at the gate, and arrows rain down from above. The ground is littered with the bodies of fallen soldiers, and the air is filled with the smell of burning flesh. Unreal Engine render style, photorealistic, realistic fantasy style."
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"alias": "Burning Castle Battle",
|
20 |
+
"text": "A chaotic battlefield with knights on horseback clashing with infantry, archers firing volleys of arrows, and a castle burning in the background. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is fighting fiercely, with soldiers engaging in brutal melee combat. Flames light up the scene as knights charge through the chaos. Injured soldiers crawl on the ground, and the air is filled with the sounds of clashing steel and screams of pain. Unreal Engine render style, photorealistic, realistic fantasy style."
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"alias": "Heroic Last Stand",
|
24 |
+
"text": "A heroic last stand of a small group of knights defending a bridge against a massive army, with arrows flying and swords clashing. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is overwhelming the defenders, but the knights fight bravely, cutting down enemy soldiers as they advance. The bridge is littered with bodies and broken weapons. Blood stains the ground, and the air is thick with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style."
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"alias": "Siege Tower Attack",
|
28 |
+
"text": "A medieval siege tower approaching a castle wall, with knights scaling ladders and defenders throwing rocks and shooting arrows. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is fighting desperately to breach the walls, with soldiers clashing swords on the battlements. Arrows fly in all directions, and the siege tower is engulfed in flames. Injured soldiers fall from the ladders, and the ground is littered with the bodies of the fallen. Unreal Engine render style, photorealistic, realistic fantasy style."
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"alias": "Knight Duel",
|
32 |
+
"text": "A dramatic duel between two knights in the middle of a battlefield, with their armies watching and the castle in the background. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is engaged in fierce combat all around, with soldiers clashing swords and firing arrows. The duelists fight with skill and determination, their blades flashing in the sunlight. Injured soldiers lie on the ground, and the air is filled with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style. "
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"alias": "Night Battle",
|
36 |
+
"text": "A night battle during a medieval siege, with torches lighting the scene, knights fighting in the shadows, and the castle walls looming in the background. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is locked in brutal combat, with soldiers clashing swords and firing arrows in the dim light. Flames from burning siege equipment illuminate the chaos. Injured soldiers scream in pain, and the ground is littered with the bodies of the fallen. Unreal Engine render style, photorealistic, realistic fantasy style."
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"alias": "Marching Army",
|
40 |
+
"text": "A massive army of knights and infantry marching towards a distant castle, with banners flying and the sun setting behind them. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is engaging in skirmishes along the way, with soldiers clashing swords and firing arrows. The battlefield is alive with the sounds of combat and the clash of steel. Injured soldiers lie on the ground, and the air is thick with the smell of blood and smoke. Unreal Engine render style, photorealistic, realistic fantasy style."
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"alias": "Snowy Battlefield",
|
44 |
+
"text": "A medieval battle in a snowy landscape, with knights in heavy armor fighting on a frozen lake, and the castle visible in the distance. The winning army is dressed in {team_color} armor and banners. The enemy army, dressed in {enemy_color} armor and banners, is locked in fierce combat, with soldiers slipping on the ice as they clash swords. Arrows fly through the air, and the snow is stained red with blood. Injured soldiers crawl on the ground, and the air is filled with the sounds of battle. Unreal Engine render style, photorealistic, realistic fantasy style."
|
45 |
+
}
|
46 |
+
]
|
diffusers_version/src/__pycache__/gradio_interface_diffusers.cpython-39.pyc
ADDED
Binary file (2.73 kB). View file
|
|
diffusers_version/src/__pycache__/img_gen_diffusers.cpython-39.pyc
ADDED
Binary file (4.41 kB). View file
|
|
diffusers_version/src/gradio_interface_diffusers.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# gradio_interface.py
|
2 |
+
import gradio as gr
|
3 |
+
from config.config import prompts, models # Indirect import
|
4 |
+
from src.img_gen_diffusers import generate_image
|
5 |
+
|
6 |
+
print("Hello from gradio_interface_head!")
|
7 |
+
|
8 |
+
# Modal remote function synchronously
|
9 |
+
def generate(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input):
|
10 |
+
# Debug: Print a message instead of generating an image
|
11 |
+
debug_message = f"Debug: Button clicked! Inputs - Prompt: {prompt_dropdown}, Team: {team_dropdown}, Model: {model_dropdown}, Custom Prompt: {custom_prompt_input}"
|
12 |
+
print(debug_message) # Print to console for debugging
|
13 |
+
try:
|
14 |
+
# Import the remote function
|
15 |
+
image_path, message = generate_image(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input)
|
16 |
+
return image_path, message
|
17 |
+
except Exception as e:
|
18 |
+
return None, f"An error occurred: {e}"
|
19 |
+
|
20 |
+
def generate_gpu(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input):
|
21 |
+
# Debug: Print a message instead of generating an image
|
22 |
+
debug_message = f"Debug: Button clicked! Inputs - Prompt: {prompt_dropdown}, Team: {team_dropdown}, Model: {model_dropdown}, Custom Prompt: {custom_prompt_input}"
|
23 |
+
print(debug_message) # Print to console for debugging
|
24 |
+
try:
|
25 |
+
# Import the remote function
|
26 |
+
f = modal.Function.from_name("img-gen-modal-gpu", "generate_image")
|
27 |
+
image_path, message = f.remote(prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input)
|
28 |
+
return image_path, message
|
29 |
+
except Exception as e:
|
30 |
+
return None, f"An error occurred: {e}"
|
31 |
+
|
32 |
+
|
33 |
+
def gradio_interface_diffusers():
|
34 |
+
from config.config import prompts, models # Indirect import
|
35 |
+
# Gradio Interface
|
36 |
+
with gr.Blocks() as demo:
|
37 |
+
gr.Markdown("# CtB AI Image Generator")
|
38 |
+
with gr.Row():
|
39 |
+
# Set default values for dropdowns
|
40 |
+
prompt_dropdown = gr.Dropdown(choices=[p["alias"] for p in prompts], label="Select Prompt", value=prompts[0]["alias"])
|
41 |
+
team_dropdown = gr.Dropdown(choices=["Red", "Blue"], label="Select Team", value="Red")
|
42 |
+
model_dropdown = gr.Dropdown(choices=[m["alias"] for m in models], label="Select Model", value=models[0]["alias"])
|
43 |
+
with gr.Row():
|
44 |
+
# Add a text box for custom user input (max 200 characters)
|
45 |
+
custom_prompt_input = gr.Textbox(label="Custom Prompt (Optional)", placeholder="Enter additional details (max 200 chars)...", max_lines=1, max_length=200)
|
46 |
+
with gr.Row():
|
47 |
+
generate_button = gr.Button("Generate Image")
|
48 |
+
with gr.Row():
|
49 |
+
output_image = gr.Image(label="Generated Image")
|
50 |
+
with gr.Row():
|
51 |
+
status_text = gr.Textbox(label="Status", placeholder="Waiting for input...", interactive=False)
|
52 |
+
print("Building cudasdasrer...")
|
53 |
+
|
54 |
+
##Connect the button to the call_generate function
|
55 |
+
##had do do it to handle gradio/modal interaction)
|
56 |
+
generate_button.click(
|
57 |
+
generate,
|
58 |
+
inputs=[prompt_dropdown, team_dropdown, model_dropdown, custom_prompt_input],
|
59 |
+
outputs=[output_image, status_text],
|
60 |
+
)
|
61 |
+
return demo
|
62 |
+
|
63 |
+
# Create the demo instance
|
64 |
+
demo = gradio_interface_diffusers()
|
65 |
+
|
66 |
+
# Only launch if running directly
|
67 |
+
if __name__ == "__main__":
|
68 |
+
demo.queue().launch()
|
diffusers_version/src/img_gen_diffusers.py
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# img_gen.py
|
2 |
+
import random
|
3 |
+
import io
|
4 |
+
from config.config import prompts, models # Indirect import
|
5 |
+
import os
|
6 |
+
import gradio as gr
|
7 |
+
import diffusers
|
8 |
+
import os
|
9 |
+
import torch
|
10 |
+
import sentencepiece
|
11 |
+
import torch
|
12 |
+
from huggingface_hub import login
|
13 |
+
from transformers import AutoTokenizer
|
14 |
+
import random
|
15 |
+
from datetime import datetime
|
16 |
+
|
17 |
+
# MAIN GENERATE IMAGE FUNCTION
|
18 |
+
def generate_image(prompt_alias,
|
19 |
+
team_color,
|
20 |
+
model_alias,
|
21 |
+
custom_prompt,
|
22 |
+
height=360,
|
23 |
+
width=640,
|
24 |
+
num_inference_steps=20,
|
25 |
+
guidance_scale=2.0,
|
26 |
+
seed=-1,
|
27 |
+
progress=gr.Progress(track_tqdm=True) # Add progress parameter
|
28 |
+
):
|
29 |
+
print("Hello from ctb_modal!")
|
30 |
+
# progress(0, desc="Starting...") # Initial progress
|
31 |
+
# yield "Initializing image generation..." # Yield the initial message
|
32 |
+
|
33 |
+
print("Running debug check...")
|
34 |
+
# Debug function to check installed packages
|
35 |
+
def check_dependencies():
|
36 |
+
packages = [
|
37 |
+
"diffusers", # For Stable Diffusion
|
38 |
+
"transformers", # For Hugging Face models
|
39 |
+
"torch", # PyTorch
|
40 |
+
"accelerate", # For distributed training/inference
|
41 |
+
"gradio", # For the Gradio interface (updated to latest version)
|
42 |
+
"safetensors", # For safe model loading
|
43 |
+
"pillow", # For image processing
|
44 |
+
"sentencepiece"
|
45 |
+
]
|
46 |
+
|
47 |
+
for package in packages:
|
48 |
+
try:
|
49 |
+
import importlib
|
50 |
+
module = importlib.import_module(package)
|
51 |
+
print(f" {package} is installed. Version:")
|
52 |
+
except ImportError:
|
53 |
+
print(f" {package} is NOT installed.")
|
54 |
+
|
55 |
+
check_dependencies()
|
56 |
+
# progress(0.2, desc="Preprocessing input...")
|
57 |
+
# yield "Preprocessing inputs..." # Yield the preprocessing message
|
58 |
+
|
59 |
+
# Find the selected prompt and model
|
60 |
+
try:
|
61 |
+
prompt = next(p for p in prompts if p["alias"] == prompt_alias)["text"]
|
62 |
+
model_name = next(m for m in models if m["alias"] == model_alias)["name"]
|
63 |
+
except StopIteration:
|
64 |
+
return None, "ERROR: Invalid prompt or model selected."
|
65 |
+
|
66 |
+
# Determine the enemy color
|
67 |
+
enemy_color = "blue" if team_color.lower() == "red" else "red"
|
68 |
+
|
69 |
+
# Print the original prompt and dynamic values for debugging
|
70 |
+
print("Original Prompt:")
|
71 |
+
print(prompt)
|
72 |
+
print(f"Enemy Color: {enemy_color}")
|
73 |
+
print(f"Team Color: {team_color.lower()}")
|
74 |
+
|
75 |
+
prompt = prompt.format(team_color=team_color.lower(), enemy_color=enemy_color)
|
76 |
+
|
77 |
+
# Print the formatted prompt for debugging
|
78 |
+
print("\nFormatted Prompt:")
|
79 |
+
print(prompt)
|
80 |
+
|
81 |
+
# Append the custom prompt (if provided)
|
82 |
+
if custom_prompt and len(custom_prompt.strip()) > 0:
|
83 |
+
prompt += " " + custom_prompt.strip()
|
84 |
+
|
85 |
+
# Randomize the seed if needed
|
86 |
+
if seed == -1:
|
87 |
+
seed = random.randint(0, 1000000)
|
88 |
+
|
89 |
+
try:
|
90 |
+
from diffusers import FluxPipeline
|
91 |
+
print("Initializing HF TOKEN")
|
92 |
+
hf_token = os.environ["HF_TOKEN"]
|
93 |
+
print(hf_token)
|
94 |
+
print("HF TOKEN:")
|
95 |
+
login(token=hf_token)
|
96 |
+
print("model_name:")
|
97 |
+
print(model_name)
|
98 |
+
|
99 |
+
# Use absolute path with leading slash
|
100 |
+
local_path = f"{model_name}" # Changed from "data/" to "/data/"
|
101 |
+
print(f"Loading model from local path: {local_path}")
|
102 |
+
|
103 |
+
# Debug: Check if the directory exists and list its contents
|
104 |
+
if os.path.exists(local_path):
|
105 |
+
print("Directory exists. Contents:")
|
106 |
+
for item in os.listdir(local_path):
|
107 |
+
print(f" - {item}")
|
108 |
+
else:
|
109 |
+
print(f"Directory does not exist: {local_path}")
|
110 |
+
print("Contents of /:")
|
111 |
+
print(os.listdir("/"))
|
112 |
+
# CHECK FOR TORCH USING CUDA
|
113 |
+
print("CHECK FOR TORCH USING CUDA")
|
114 |
+
print(f"CUDA available: {torch.cuda.is_available()}")
|
115 |
+
if torch.cuda.is_available():
|
116 |
+
print("inside if")
|
117 |
+
print(f"CUDA device count: {torch.cuda.device_count()}")
|
118 |
+
print(f"Current device: {torch.cuda.current_device()}")
|
119 |
+
print(f"Device name: {torch.cuda.get_device_name(torch.cuda.current_device())}")
|
120 |
+
|
121 |
+
# progress(0.5, desc="Running the model...")
|
122 |
+
# yield "Running the model..." # Yield the model running message
|
123 |
+
|
124 |
+
# INITIALIZING CPU PIPE
|
125 |
+
print("-----INITIALIZING PIPE-----")
|
126 |
+
pipe = FluxPipeline.from_pretrained(
|
127 |
+
local_path,
|
128 |
+
torch_dtype=torch.bfloat16,
|
129 |
+
#torch_dtype=torch.float16,
|
130 |
+
#torch_dtype=torch.float32,
|
131 |
+
local_files_only=True
|
132 |
+
)
|
133 |
+
|
134 |
+
if torch.cuda.is_available():
|
135 |
+
print("CUDA available")
|
136 |
+
print("using gpu")
|
137 |
+
pipe = pipe.to("cuda")
|
138 |
+
pipe_message = "CUDA"
|
139 |
+
else:
|
140 |
+
print("CUDA not available")
|
141 |
+
print("using cpu")
|
142 |
+
#pipe = pipe.to("cpu")
|
143 |
+
pipe_message = "CPU"
|
144 |
+
#pipe.enable_model_cpu_offload() # Use official recommended method
|
145 |
+
print(f"-----{pipe_message} PIPE INITIALIZED-----")
|
146 |
+
print(f"Using device: {pipe.device}")
|
147 |
+
except Exception as e:
|
148 |
+
print(f"Detailed error: {str(e)}")
|
149 |
+
return None, f"ERROR: Failed to initialize PIPE2. Details: {e}"
|
150 |
+
try:
|
151 |
+
print("-----SENDING IMG GEN TO PIPE-----")
|
152 |
+
print("-----HOLD ON-----")
|
153 |
+
# progress(0.8, desc="Postprocessing the output...")
|
154 |
+
# yield "Postprocessing the output..." # Yield the postprocessing message
|
155 |
+
|
156 |
+
image = pipe(
|
157 |
+
prompt,
|
158 |
+
guidance_scale=guidance_scale,
|
159 |
+
num_inference_steps=num_inference_steps,
|
160 |
+
width=width,
|
161 |
+
height=height,
|
162 |
+
max_sequence_length=512
|
163 |
+
# seed=seed
|
164 |
+
).images[0]
|
165 |
+
print("-----RENDER DONE!-----")
|
166 |
+
print(image)
|
167 |
+
except Exception as e:
|
168 |
+
return f"ERROR: Failed to initialize InferenceClient. Details: {e}"
|
169 |
+
|
170 |
+
try:
|
171 |
+
print("-----IMAGE GENERATED SUCCESSFULLY!-----")
|
172 |
+
print("-----CALL THE BANNERS!-----")
|
173 |
+
print("-----SAVING TO DISK-----")
|
174 |
+
# Save the image with a timestamped filename
|
175 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
176 |
+
output_filename = f"/data/images/{timestamp}_{model_alias.replace(' ', '_').lower()}_{prompt_alias.replace(' ', '_').lower()}_{team_color.lower()}.png"
|
177 |
+
# Save the image using PIL's save method
|
178 |
+
image.save(output_filename)
|
179 |
+
print(f"File path: {output_filename}")
|
180 |
+
except Exception as e:
|
181 |
+
print(f"ERROR: Failed to save image. Details: {e}")
|
182 |
+
# Return the filename and success message
|
183 |
+
return image, "Image generated successfully! Call the banners!"
|
examples/example-chat-w-pdf.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
from urllib.request import urlopen
|
3 |
+
from uuid import uuid4
|
4 |
+
|
5 |
+
import modal
|
6 |
+
|
7 |
+
MINUTES = 60 # seconds
|
8 |
+
|
9 |
+
app = modal.App("chat-with-pdf")
|
10 |
+
|
11 |
+
|
12 |
+
CACHE_DIR = "/hf-cache"
|
13 |
+
|
14 |
+
model_image = (
|
15 |
+
modal.Image.debian_slim(python_version="3.12")
|
16 |
+
.apt_install("git")
|
17 |
+
.pip_install(
|
18 |
+
[
|
19 |
+
"git+https://github.com/illuin-tech/colpali.git@782edcd50108d1842d154730ad3ce72476a2d17d", # we pin the commit id
|
20 |
+
"hf_transfer==0.1.8",
|
21 |
+
"qwen-vl-utils==0.0.8",
|
22 |
+
"torchvision==0.19.1",
|
23 |
+
]
|
24 |
+
)
|
25 |
+
.env({"HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HUB_CACHE": CACHE_DIR})
|
26 |
+
)
|
27 |
+
|
28 |
+
|
29 |
+
# These dependencies are only installed remotely, so we can’t import them locally. Use the .imports context manager to import them only on Modal instead.
|
30 |
+
|
31 |
+
with model_image.imports():
|
32 |
+
import torch
|
33 |
+
from colpali_engine.models import ColQwen2, ColQwen2Processor
|
34 |
+
from qwen_vl_utils import process_vision_info
|
35 |
+
from transformers import AutoProcessor, Qwen2VLForConditionalGeneration
|
examples/example-flux.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
from io import BytesIO
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
import modal
|
6 |
+
|
7 |
+
|
8 |
+
cuda_version = "12.4.0" # should be no greater than host CUDA version
|
9 |
+
flavor = "devel" # includes full CUDA toolkit
|
10 |
+
operating_sys = "ubuntu22.04"
|
11 |
+
tag = f"{cuda_version}-{flavor}-{operating_sys}"
|
12 |
+
|
13 |
+
cuda_dev_image = modal.Image.from_registry(
|
14 |
+
f"nvidia/cuda:{tag}", add_python="3.11"
|
15 |
+
).entrypoint([])
|
16 |
+
|
17 |
+
|
18 |
+
diffusers_commit_sha = "81cf3b2f155f1de322079af28f625349ee21ec6b"
|
19 |
+
|
20 |
+
flux_image = (
|
21 |
+
cuda_dev_image.apt_install(
|
22 |
+
"git",
|
23 |
+
"libglib2.0-0",
|
24 |
+
"libsm6",
|
25 |
+
"libxrender1",
|
26 |
+
"libxext6",
|
27 |
+
"ffmpeg",
|
28 |
+
"libgl1",
|
29 |
+
)
|
30 |
+
.pip_install(
|
31 |
+
"invisible_watermark==0.2.0",
|
32 |
+
"transformers==4.44.0",
|
33 |
+
"huggingface_hub[hf_transfer]==0.26.2",
|
34 |
+
"accelerate==0.33.0",
|
35 |
+
"safetensors==0.4.4",
|
36 |
+
"sentencepiece==0.2.0",
|
37 |
+
"torch==2.5.0",
|
38 |
+
f"git+https://github.com/huggingface/diffusers.git@{diffusers_commit_sha}",
|
39 |
+
"numpy<2",
|
40 |
+
)
|
41 |
+
.env({"HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HUB_CACHE_DIR": "/cache"})
|
42 |
+
)
|
43 |
+
|
44 |
+
|
45 |
+
flux_image = flux_image.env(
|
46 |
+
{
|
47 |
+
"TORCHINDUCTOR_CACHE_DIR": "/root/.inductor-cache",
|
48 |
+
"TORCHINDUCTOR_FX_GRAPH_CACHE": "1",
|
49 |
+
}
|
50 |
+
)
|
51 |
+
|
52 |
+
|
53 |
+
app = modal.App("example-flux", image=flux_image)
|
54 |
+
|
55 |
+
with flux_image.imports():
|
56 |
+
import torch
|
57 |
+
from diffusers import FluxPipeline
|
example-hello-world.py → examples/example-hello-world.py
RENAMED
File without changes
|
examples/example-text-to-image.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import io
|
3 |
+
import random
|
4 |
+
import time
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
import modal
|
8 |
+
|
9 |
+
MINUTES = 60
|
10 |
+
|
11 |
+
|
12 |
+
app = modal.App("example-text-to-image")
|
13 |
+
|
14 |
+
|
15 |
+
CACHE_DIR = "/cache"
|
16 |
+
|
17 |
+
image = (
|
18 |
+
modal.Image.debian_slim(python_version="3.12")
|
19 |
+
.pip_install(
|
20 |
+
"accelerate==0.33.0",
|
21 |
+
"diffusers==0.31.0",
|
22 |
+
"fastapi[standard]==0.115.4",
|
23 |
+
"huggingface-hub[hf_transfer]==0.25.2",
|
24 |
+
"sentencepiece==0.2.0",
|
25 |
+
"torch==2.5.1",
|
26 |
+
"torchvision==0.20.1",
|
27 |
+
"transformers~=4.44.0",
|
28 |
+
)
|
29 |
+
.env(
|
30 |
+
{
|
31 |
+
"HF_HUB_ENABLE_HF_TRANSFER": "1", # faster downloads
|
32 |
+
"HF_HUB_CACHE_DIR": CACHE_DIR,
|
33 |
+
}
|
34 |
+
)
|
35 |
+
)
|
36 |
+
|
37 |
+
with image.imports():
|
38 |
+
import diffusers
|
39 |
+
import torch
|
40 |
+
from fastapi import Response
|
41 |
+
MODEL_ID = "adamo1139/stable-diffusion-3.5-large-turbo-ungated"
|
42 |
+
MODEL_REVISION_ID = "9ad870ac0b0e5e48ced156bb02f85d324b7275d2"
|
43 |
+
|
44 |
+
cache_volume = modal.Volume.from_name("hf-hub-cache", create_if_missing=True)
|
45 |
+
|
46 |
+
|
47 |
+
@app.cls(
|
48 |
+
image=image,
|
49 |
+
gpu="H100",
|
50 |
+
timeout=10 * MINUTES,
|
51 |
+
volumes={CACHE_DIR: cache_volume},
|
52 |
+
)
|
53 |
+
class Inference:
|
54 |
+
@modal.enter()
|
55 |
+
def load_pipeline(self):
|
56 |
+
self.pipe = diffusers.StableDiffusion3Pipeline.from_pretrained(
|
57 |
+
MODEL_ID,
|
58 |
+
revision=MODEL_REVISION_ID,
|
59 |
+
torch_dtype=torch.bfloat16,
|
60 |
+
).to("cuda")
|
61 |
+
|
62 |
+
@modal.method()
|
63 |
+
def run(
|
64 |
+
self, prompt: str, batch_size: int = 4, seed: int = None
|
65 |
+
) -> list[bytes]:
|
66 |
+
seed = seed if seed is not None else random.randint(0, 2**32 - 1)
|
67 |
+
print("seeding RNG with", seed)
|
68 |
+
torch.manual_seed(seed)
|
69 |
+
images = self.pipe(
|
70 |
+
prompt,
|
71 |
+
num_images_per_prompt=batch_size, # outputting multiple images per prompt is much cheaper than separate calls
|
72 |
+
num_inference_steps=4, # turbo is tuned to run in four steps
|
73 |
+
guidance_scale=0.0, # turbo doesn't use CFG
|
74 |
+
max_sequence_length=512, # T5-XXL text encoder supports longer sequences, more complex prompts
|
75 |
+
).images
|
76 |
+
|
77 |
+
image_output = []
|
78 |
+
for image in images:
|
79 |
+
with io.BytesIO() as buf:
|
80 |
+
image.save(buf, format="PNG")
|
81 |
+
image_output.append(buf.getvalue())
|
82 |
+
torch.cuda.empty_cache() # reduce fragmentation
|
83 |
+
return image_output
|
84 |
+
|
85 |
+
@modal.web_endpoint(docs=True)
|
86 |
+
def web(self, prompt: str, seed: int = None):
|
87 |
+
return Response(
|
88 |
+
content=self.run.local( # run in the same container
|
89 |
+
prompt, batch_size=1, seed=seed
|
90 |
+
)[0],
|
91 |
+
media_type="image/png",
|
92 |
+
)
|
93 |
+
|
94 |
+
@app.local_entrypoint()
|
95 |
+
def entrypoint(
|
96 |
+
samples: int = 4,
|
97 |
+
prompt: str = "A princess riding on a pony",
|
98 |
+
batch_size: int = 4,
|
99 |
+
seed: int = None,
|
100 |
+
):
|
101 |
+
print(
|
102 |
+
f"prompt => {prompt}",
|
103 |
+
f"samples => {samples}",
|
104 |
+
f"batch_size => {batch_size}",
|
105 |
+
f"seed => {seed}",
|
106 |
+
sep="\n",
|
107 |
+
)
|
108 |
+
|
109 |
+
output_dir = Path("/tmp/stable-diffusion")
|
110 |
+
output_dir.mkdir(exist_ok=True, parents=True)
|
111 |
+
|
112 |
+
inference_service = Inference()
|
113 |
+
|
114 |
+
for sample_idx in range(samples):
|
115 |
+
start = time.time()
|
116 |
+
images = inference_service.run.remote(prompt, batch_size, seed)
|
117 |
+
duration = time.time() - start
|
118 |
+
print(f"Run {sample_idx+1} took {duration:.3f}s")
|
119 |
+
if sample_idx:
|
120 |
+
print(
|
121 |
+
f"\tGenerated {len(images)} image(s) at {(duration)/len(images):.3f}s / image."
|
122 |
+
)
|
123 |
+
for batch_idx, image_bytes in enumerate(images):
|
124 |
+
output_path = (
|
125 |
+
output_dir
|
126 |
+
/ f"output_{slugify(prompt)[:64]}_{str(sample_idx).zfill(2)}_{str(batch_idx).zfill(2)}.png"
|
127 |
+
)
|
128 |
+
if not batch_idx:
|
129 |
+
print("Saving outputs", end="\n\t")
|
130 |
+
print(
|
131 |
+
output_path,
|
132 |
+
end="\n" + ("\t" if batch_idx < len(images) - 1 else ""),
|
133 |
+
)
|
134 |
+
output_path.write_bytes(image_bytes)
|
135 |
+
|
examples/example-text-to-video.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import string
|
2 |
+
import time
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
import modal
|
6 |
+
|
7 |
+
app = modal.App()
|
8 |
+
|
9 |
+
image = (
|
10 |
+
modal.Image.debian_slim(python_version="3.11")
|
11 |
+
.apt_install("git")
|
12 |
+
.pip_install(
|
13 |
+
"torch==2.5.1",
|
14 |
+
"accelerate==1.1.1",
|
15 |
+
"hf_transfer==0.1.8",
|
16 |
+
"sentencepiece==0.2.0",
|
17 |
+
"imageio==2.36.0",
|
18 |
+
"imageio-ffmpeg==0.5.1",
|
19 |
+
"git+https://github.com/huggingface/transformers@30335093276212ce74938bdfd85bfd5df31a668a",
|
20 |
+
"git+https://github.com/huggingface/diffusers@99c0483b67427de467f11aa35d54678fd36a7ea2",
|
21 |
+
)
|
22 |
+
.env(
|
23 |
+
{
|
24 |
+
"HF_HUB_ENABLE_HF_TRANSFER": "1",
|
25 |
+
"HF_HOME": "/models",
|
26 |
+
}
|
27 |
+
)
|
28 |
+
)
|
29 |
+
|
30 |
+
|
31 |
+
https://modal.com/docs/examples/mochi
|
32 |
+
|
examples/example_check_imports_volume.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def check_dependencies():
|
2 |
+
import importlib
|
3 |
+
packages = [
|
4 |
+
"diffusers", # For Stable Diffusion
|
5 |
+
"transformers", # For Hugging Face models
|
6 |
+
"torch", # PyTorch
|
7 |
+
"accelerate", # For distributed training/inference
|
8 |
+
"gradio", # For the Gradio interface (updated to latest version)
|
9 |
+
"safetensors", # For safe model loading
|
10 |
+
"pillow", # For image processing
|
11 |
+
"sentencepiece"
|
12 |
+
]
|
13 |
+
|
14 |
+
for package in packages:
|
15 |
+
try:
|
16 |
+
import importlib
|
17 |
+
module = importlib.import_module(package)
|
18 |
+
print(f" {package} is installed. Version: {module.__version__}")
|
19 |
+
except ImportError:
|
20 |
+
print(f" {package} is NOT installed.")
|
21 |
+
|
22 |
+
check_dependencies()
|
23 |
+
|
24 |
+
def check_volume_contents():
|
25 |
+
model_path = "/data/FLUX.1-dev"
|
26 |
+
if os.path.exists(model_path):
|
27 |
+
print(f"Contents of {model_path}:")
|
28 |
+
print(os.listdir(model_path))
|
29 |
+
else:
|
30 |
+
print(f"Model path {model_path} does not exist.")
|
31 |
+
|
32 |
+
check_volume_contents()
|
examples/example_dynamic_decorator.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import modal
|
3 |
+
|
4 |
+
# Define the Modal image and app
|
5 |
+
image = modal.Image.debian_slim(python_version="3.9")
|
6 |
+
app = modal.App("example-app", image=image)
|
7 |
+
|
8 |
+
# Define the volume
|
9 |
+
flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True)
|
10 |
+
|
11 |
+
# Load configuration (e.g., from a config file or environment variables)
|
12 |
+
cpu = 8 # Set to 0 to disable CPU
|
13 |
+
memory = 70000 # Memory in MB
|
14 |
+
gpu = "a100-80gb" # Set to None to disable GPU
|
15 |
+
|
16 |
+
# Dynamically construct the decorator arguments
|
17 |
+
decorator_args = {
|
18 |
+
"volumes": {"/data": flux_model_vol},
|
19 |
+
"secrets": [modal.Secret.from_name("huggingface-token")],
|
20 |
+
"cpu": cpu,
|
21 |
+
"memory": memory,
|
22 |
+
}
|
23 |
+
|
24 |
+
# Remove GPU if CPU is set
|
25 |
+
if cpu > 0:
|
26 |
+
print("CPU is set, removing GPU parameter.")
|
27 |
+
decorator_args.pop("gpu", None) # Remove 'gpu' if it exists
|
28 |
+
else:
|
29 |
+
print("CPU is not set, keeping GPU parameter.")
|
30 |
+
decorator_args["gpu"] = gpu
|
31 |
+
|
32 |
+
# Debug: Print the final decorator arguments
|
33 |
+
print("Decorator arguments:", decorator_args)
|
34 |
+
|
35 |
+
# Apply the decorator dynamically
|
36 |
+
@app.function(**decorator_args)
|
37 |
+
def my_function():
|
38 |
+
import os
|
39 |
+
|
40 |
+
# Example: List the contents of the volume
|
41 |
+
print("Contents of /data:")
|
42 |
+
print(os.listdir("/data"))
|
43 |
+
|
44 |
+
# Your function code here
|
45 |
+
return f"Function executed with CPU={cpu}, Memory={memory}, GPU={gpu if 'gpu' in decorator_args else 'None'}"
|
46 |
+
|
47 |
+
# Call the function
|
48 |
+
result = my_function.remote()
|
49 |
+
print(result)
|
examples/example_image_settings.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
image = (
|
2 |
+
modal.Image.from_registry(
|
3 |
+
"nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.11"
|
4 |
+
)
|
5 |
+
.pip_install(
|
6 |
+
"numpy",
|
7 |
+
"pandas",
|
8 |
+
"diffusers",
|
9 |
+
"transformers",
|
10 |
+
"torch",
|
11 |
+
"accelerate",
|
12 |
+
"gradio",
|
13 |
+
"safetensors",
|
14 |
+
"pillow",
|
15 |
+
) # Install Python packages
|
16 |
+
.run_commands("echo 'Image build complete!'") # Run a shell command
|
17 |
+
)
|
18 |
+
|
19 |
+
|
20 |
+
# CHECK INSTALLS
|
21 |
+
def function():
|
22 |
+
# Import libraries and print their versions
|
23 |
+
import numpy as np
|
24 |
+
import pandas as pd
|
25 |
+
import torch
|
26 |
+
import diffusers
|
27 |
+
import transformers
|
28 |
+
import gradio as gr
|
29 |
+
from PIL import Image as PILImage
|
30 |
+
|
31 |
+
print("Hello from ctb_modal!")
|
32 |
+
print("NumPy version:", np.__version__)
|
33 |
+
print("Pandas version:", pd.__version__)
|
34 |
+
print("PyTorch version:", torch.__version__)
|
35 |
+
print("Diffusers version:", diffusers.__version__) # Corrected: Use the library's __version__
|
36 |
+
print("Transformers version:", transformers.__version__) # Corrected: Use the library's __version__
|
37 |
+
print("Gradio version:", gr.__version__)
|
38 |
+
print("Pillow version:", PILImage.__version__)
|
39 |
+
|
40 |
+
|
41 |
+
# # Run the function locally (for testing)
|
42 |
+
# if __name__ == "__main__":
|
43 |
+
# print("Running the function locally...")
|
44 |
+
# main.local()
|
45 |
+
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
|
50 |
+
|
51 |
+
image = (
|
52 |
+
modal.Image.debian_slim(python_version="3.9") # Base image
|
examples/example_img_gen.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#img_gen_modal.py
|
2 |
+
import modal
|
3 |
+
import random
|
4 |
+
from datetime import datetime
|
5 |
+
import random
|
6 |
+
import io
|
7 |
+
from config.config import prompts, models # Indirect import
|
8 |
+
import os
|
9 |
+
import torch
|
10 |
+
from huggingface_hub import login
|
11 |
+
from transformers import AutoTokenizer
|
12 |
+
|
13 |
+
# Define the Modal image
|
14 |
+
image = (
|
15 |
+
#modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9")
|
16 |
+
modal.Image.debian_slim(python_version="3.9") # Base image
|
17 |
+
|
18 |
+
.apt_install(
|
19 |
+
"git",
|
20 |
+
)
|
21 |
+
.pip_install(
|
22 |
+
"diffusers",
|
23 |
+
"transformers",
|
24 |
+
"torch",
|
25 |
+
"accelerate",
|
26 |
+
"gradio>=4.44.1",
|
27 |
+
"safetensors",
|
28 |
+
"pillow",
|
29 |
+
"sentencepiece",
|
30 |
+
"hf_transfer",
|
31 |
+
"huggingface_hub[hf_transfer]",
|
32 |
+
"aria2", # aria2 for ultra-fast parallel downloads
|
33 |
+
f"git+https://github.com/huggingface/transformers.git"
|
34 |
+
)
|
35 |
+
.env(
|
36 |
+
{
|
37 |
+
"HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME"
|
38 |
+
}
|
39 |
+
)
|
40 |
+
)
|
41 |
+
|
42 |
+
# Create a Modal app
|
43 |
+
app = modal.App("img-gen-modal", image=image)
|
44 |
+
with image.imports():
|
45 |
+
import diffusers
|
46 |
+
import os
|
47 |
+
import gradio
|
48 |
+
import torch
|
49 |
+
import sentencepiece
|
50 |
+
|
51 |
+
#flux_model_vol = modal.Volume.from_name("flux-model-vol", create_if_missing=True) # Reference your volume
|
52 |
+
|
53 |
+
@app.function(
|
54 |
+
secrets=[modal.Secret.from_name("huggingface-token")],
|
55 |
+
#volumes={"/data": flux_model_vol},
|
56 |
+
gpu="t4",
|
57 |
+
timeout=600
|
58 |
+
)
|
59 |
+
|
60 |
+
def generate_image():
|
61 |
+
import torch
|
62 |
+
from diffusers import FluxPipeline
|
63 |
+
|
64 |
+
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
|
65 |
+
|
66 |
+
prompt = "A cat holding a sign that says hello world"
|
67 |
+
image = pipe(
|
68 |
+
prompt,
|
69 |
+
height=1024,
|
70 |
+
width=1024,
|
71 |
+
guidance_scale=3.5,
|
72 |
+
num_inference_steps=50,
|
73 |
+
max_sequence_length=512,
|
74 |
+
generator=torch.Generator("cpu").manual_seed(0)
|
75 |
+
).images[0]
|
76 |
+
image.save("flux-dev.png")
|
77 |
+
|
78 |
+
generate_image()
|
79 |
+
|
examples/example_loading_model.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import modal
|
3 |
+
from modal import App, Image, Volume
|
4 |
+
from transformers import AutoModel, AutoTokenizer
|
5 |
+
import os
|
6 |
+
|
7 |
+
app = App("gradio-app")
|
8 |
+
volume = Volume.from_name("flux-model-vol-2")
|
9 |
+
image = Image.debian_slim().pip_install("transformers", "torch", "sentencepiece", "gradio")
|
10 |
+
|
11 |
+
@app.function(image=image, volumes={"/data": volume})
|
12 |
+
def load_model():
|
13 |
+
model_name = "FLUX.1-dev"
|
14 |
+
cache_dir = f"/data/{model_name}"
|
15 |
+
|
16 |
+
print(f"Loading model {model_name} from cache...")
|
17 |
+
model = AutoModel.from_pretrained(cache_dir)
|
18 |
+
tokenizer = AutoTokenizer.from_pretrained(cache_dir)
|
19 |
+
|
20 |
+
print(f"Model {model_name} loaded successfully!")
|
21 |
+
return model, tokenizer
|
22 |
+
|
23 |
+
def predict(input_text):
|
24 |
+
model, tokenizer = load_model.remote()
|
25 |
+
inputs = tokenizer(input_text, return_tensors="pt")
|
26 |
+
outputs = model(**inputs)
|
27 |
+
return tokenizer.decode(outputs.logits.argmax(dim=-1)[0])
|
28 |
+
|
29 |
+
if __name__ == "__main__":
|
30 |
+
with app.run():
|
31 |
+
iface = gr.Interface(fn=predict, inputs="text", outputs="text")
|
32 |
+
iface.launch()
|
examples/example_output_dir.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pathlib
|
2 |
+
|
3 |
+
volume = modal.Volume.from_name("my-volume")
|
4 |
+
VOL_MOUNT_PATH = pathlib.Path("/vol")
|
5 |
+
|
6 |
+
@app.function(
|
7 |
+
gpu="A10G",
|
8 |
+
timeout=2 * 60 * 60, # run for at most two hours
|
9 |
+
volumes={VOL_MOUNT_PATH: volume},
|
10 |
+
)
|
11 |
+
def finetune():
|
12 |
+
from transformers import Seq2SeqTrainer
|
13 |
+
...
|
14 |
+
|
15 |
+
training_args = Seq2SeqTrainingArguments(
|
16 |
+
output_dir=str(VOL_MOUNT_PATH / "model"),
|
17 |
+
# ... more args here
|
18 |
+
)
|
19 |
+
|
20 |
+
trainer = Seq2SeqTrainer(
|
21 |
+
model=model,
|
22 |
+
args=training_args,
|
23 |
+
train_dataset=tokenized_xsum_train,
|
24 |
+
eval_dataset=tokenized_xsum_test,
|
25 |
+
)
|
examples/functions.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import modal
|
2 |
+
|
3 |
+
# Print debug information
|
4 |
+
print("Importing Modal and setting up the app...")
|
5 |
+
|
6 |
+
# Define a custom image with Python and some dependencies
|
7 |
+
print("Building custom image...")
|
8 |
+
image = (
|
9 |
+
modal.Image.debian_slim(python_version="3.11") # Base image
|
10 |
+
.pip_install(
|
11 |
+
"numpy",
|
12 |
+
"pandas",
|
13 |
+
"diffusers",
|
14 |
+
"transformers",
|
15 |
+
"torch",
|
16 |
+
"accelerate",
|
17 |
+
"gradio",
|
18 |
+
"safetensors",
|
19 |
+
"pillow",
|
20 |
+
) # Install Python packages
|
21 |
+
.run_commands("echo 'Image build complete!'") # Run a shell command
|
22 |
+
)
|
23 |
+
|
24 |
+
# Define a function to run inside the container
|
25 |
+
#@app.function(image=image)
|
26 |
+
|
27 |
+
# Define the Modal app
|
28 |
+
app = modal.App("functions-app")
|
29 |
+
|
30 |
+
@app.function()
|
31 |
+
def message_func (message = "default"):
|
32 |
+
print("message function")
|
33 |
+
new_message = message + " ok, it works!"
|
34 |
+
return new_message
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
@app.local_entrypoint()
|
39 |
+
def main():
|
40 |
+
# Import libraries and print their versions
|
41 |
+
# import numpy as np
|
42 |
+
# import pandas as pd
|
43 |
+
# import torch
|
44 |
+
# import diffusers
|
45 |
+
# import transformers
|
46 |
+
# import gradio as gr
|
47 |
+
# from PIL import Image as PILImage
|
48 |
+
|
49 |
+
# print("def main function")
|
50 |
+
# print("Hello from Modal!")
|
51 |
+
# print("NumPy version:", np.__version__)
|
52 |
+
# print("Pandas version:", pd.__version__)
|
53 |
+
# print("PyTorch version:", torch.__version__)
|
54 |
+
# print("Diffusers version:", diffusers.__version__) # Corrected: Use the library's __version__
|
55 |
+
# print("Transformers version:", transformers.__version__) # Corrected: Use the library's __version__
|
56 |
+
# print("Gradio version:", gr.__version__)
|
57 |
+
# print("Pillow version:", PILImage.__version__)
|
58 |
+
|
59 |
+
remote_message = "remote message!"
|
60 |
+
local_message = "local message"
|
61 |
+
message_func.remote(remote_message)
|
62 |
+
message_func.local(local_message)
|
63 |
+
|
64 |
+
|
65 |
+
# # # Run the function locally (for testing)
|
66 |
+
# if __name__ == "__main__":
|
67 |
+
# print("Running the function locally...")
|
68 |
+
# main.local()
|
69 |
+
# main.remote()
|
examples/modal_functions_remote_call.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import modal
|
2 |
+
|
3 |
+
image = (
|
4 |
+
modal.Image.debian_slim(python_version="3.11") # Base image
|
5 |
+
.pip_install(
|
6 |
+
"numpy",
|
7 |
+
"pandas",
|
8 |
+
"diffusers",
|
9 |
+
"transformers",
|
10 |
+
"torch",
|
11 |
+
"accelerate",
|
12 |
+
"gradio",
|
13 |
+
"safetensors",
|
14 |
+
"pillow",
|
15 |
+
) # Install Python packages
|
16 |
+
.run_commands("echo 'Image build complete!'") # Run a shell command
|
17 |
+
)
|
18 |
+
|
19 |
+
app = modal.App("functions-call-app", image=image)
|
20 |
+
@app.function()
|
21 |
+
def main():
|
22 |
+
#Import libraries and print their versions
|
23 |
+
import numpy as np
|
24 |
+
import pandas as pd
|
25 |
+
import torch
|
26 |
+
import diffusers
|
27 |
+
import transformers
|
28 |
+
import gradio as gr
|
29 |
+
from PIL import Image as PILImage
|
30 |
+
|
31 |
+
print("def main function")
|
32 |
+
print("Hello from Modal!")
|
33 |
+
print("NumPy version:", np.__version__)
|
34 |
+
print("Pandas version:", pd.__version__)
|
35 |
+
print("PyTorch version:", torch.__version__)
|
36 |
+
print("Diffusers version:", diffusers.__version__) # Corrected: Use the library's __version__
|
37 |
+
print("Transformers version:", transformers.__version__) # Corrected: Use the library's __version__
|
38 |
+
print("Gradio version:", gr.__version__)
|
39 |
+
print("Pillow version:", PILImage.__version__)
|
40 |
+
|
41 |
+
f = modal.Function.from_name("functions-app", "message_func")
|
42 |
+
messageNEW = "Remote call Hello World!"
|
43 |
+
messageTEMP = "TEMP"
|
44 |
+
result = f.remote(messageNEW)
|
45 |
+
print(result)
|
46 |
+
|
47 |
+
# # Run the function locally (for testing)
|
48 |
+
if __name__ == "__main__":
|
49 |
+
print("Running the function locally...")
|
50 |
+
main.local()
|
51 |
+
main.remote()
|
examples/modal_image_header.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Define the Modal image
|
2 |
+
image = (
|
3 |
+
modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9")
|
4 |
+
.apt_install(
|
5 |
+
"git",
|
6 |
+
)
|
7 |
+
.pip_install(
|
8 |
+
"diffusers",
|
9 |
+
"transformers",
|
10 |
+
"torch",
|
11 |
+
"accelerate",
|
12 |
+
"gradio>=4.44.1",
|
13 |
+
"safetensors",
|
14 |
+
"pillow",
|
15 |
+
"sentencepiece",
|
16 |
+
"hf_transfer",
|
17 |
+
"huggingface_hub[hf_transfer]",
|
18 |
+
"aria2", # aria2 for ultra-fast parallel downloads
|
19 |
+
)
|
20 |
+
.env(
|
21 |
+
{
|
22 |
+
"HF_HUB_ENABLE_HF_TRANSFER": "1", "HF_HOME": "HF_HOME"
|
23 |
+
}
|
24 |
+
)
|
25 |
+
)
|
26 |
+
|
27 |
+
# Create a Modal app
|
28 |
+
app = modal.App("img-gen-modal", image=image)
|
29 |
+
with image.imports():
|
30 |
+
import diffusers
|
31 |
+
import os
|
32 |
+
import gradio
|
33 |
+
import torch
|
34 |
+
import sentencepiece
|
35 |
+
import transformers
|
36 |
+
from huggingface_hub import InferenceClient, login
|
37 |
+
|
38 |
+
|
39 |
+
@app.function(
|
40 |
+
secrets=[modal.Secret.from_name("huggingface-token")],
|
41 |
+
gpu="t4",
|
42 |
+
timeout=600
|
43 |
+
)
|
44 |
+
def generate_image
|
45 |
+
|
46 |
+
|
47 |
+
# Define the Modal image
|
48 |
+
image = (
|
49 |
+
modal.Image.from_registry("nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.9")
|
50 |
+
#modal.Image.debian_slim(python_version="3.9") # Base image
|
51 |
+
|
52 |
+
.apt_install(
|
53 |
+
"git",
|
54 |
+
)
|
55 |
+
.pip_install(
|
56 |
+
"torch"
|
57 |
+
)
|
58 |
+
)
|