fantaxy commited on
Commit
03af426
·
verified ·
1 Parent(s): 0133b45

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +196 -57
app.py CHANGED
@@ -1,48 +1,107 @@
1
- import spaces
 
 
 
2
  import gradio as gr
3
  import numpy as np
4
- import random
5
  import torch
6
- from diffusers import AuraFlowPipeline
 
 
 
 
 
 
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
9
 
10
- # Initialize the AuraFlow v0.3 pipeline
11
- pipe = AuraFlowPipeline.from_pretrained(
12
- "fal/AuraFlow-v0.3",
13
- torch_dtype=torch.float16
14
- ).to(device)
15
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
  MAX_IMAGE_SIZE = 1024
18
 
19
- @spaces.GPU
20
- def infer(prompt,
21
- negative_prompt="",
22
- seed=42,
23
- randomize_seed=False,
24
- width=1024,
25
- height=1024,
26
- guidance_scale=5.0,
27
- num_inference_steps=28,
28
- progress=gr.Progress(track_tqdm=True)):
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  if randomize_seed:
31
  seed = random.randint(0, MAX_SEED)
32
-
33
  generator = torch.Generator(device=device).manual_seed(seed)
34
 
35
- image = pipe(
36
- prompt=prompt,
37
- negative_prompt=negative_prompt,
 
38
  width=width,
39
  height=height,
40
- guidance_scale=guidance_scale,
41
- num_inference_steps=num_inference_steps,
42
- generator=generator
43
  ).images[0]
44
-
45
- return image, seed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
  css = """
48
  footer {
@@ -50,38 +109,118 @@ footer {
50
  }
51
  """
52
 
53
- with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
54
-
55
- with gr.Row():
56
- with gr.Column(scale=1):
57
- prompt = gr.Text(label="Prompt", placeholder="Enter your prompt")
58
- negative_prompt = gr.Text(label="Negative prompt", placeholder="Enter a negative prompt")
59
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
60
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
61
- width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
62
- height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
63
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=5.0)
64
- num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=50, step=1, value=28)
65
- run_button = gr.Button("Generate")
66
-
67
- with gr.Column(scale=1):
68
- result = gr.Image(label="Generated Image")
69
- seed_output = gr.Number(label="Seed used")
70
-
71
- run_button.click(
72
- fn=infer,
73
- inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
74
- outputs=[result, seed_output]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  )
76
 
77
- gr.Examples(
78
- examples=[
79
- "A photo of a lavender cat",
80
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
81
- "An astronaut riding a green horse",
82
- "A delicious ceviche cheesecake slice",
 
 
 
 
 
 
83
  ],
84
- inputs=prompt,
85
  )
86
 
87
- demo.queue().launch(server_name="0.0.0.0", share=False)
 
 
1
+ import random
2
+ import os
3
+ import uuid
4
+ from datetime import datetime
5
  import gradio as gr
6
  import numpy as np
7
+ import spaces
8
  import torch
9
+ from diffusers import DiffusionPipeline
10
+ from PIL import Image
11
+
12
+ # Create permanent storage directory
13
+ SAVE_DIR = "saved_images" # Gradio will handle the persistence
14
+ if not os.path.exists(SAVE_DIR):
15
+ os.makedirs(SAVE_DIR, exist_ok=True)
16
 
17
  device = "cuda" if torch.cuda.is_available() else "cpu"
18
+ repo_id = "black-forest-labs/FLUX.1-dev"
19
+ adapter_id = "seawolf2357/flux-lora-car-rolls-royce"
20
 
21
+ pipeline = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16)
22
+ pipeline.load_lora_weights(adapter_id)
23
+ pipeline = pipeline.to(device)
 
 
24
 
25
  MAX_SEED = np.iinfo(np.int32).max
26
  MAX_IMAGE_SIZE = 1024
27
 
28
+ def save_generated_image(image, prompt):
29
+ # Generate unique filename with timestamp
30
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
31
+ unique_id = str(uuid.uuid4())[:8]
32
+ filename = f"{timestamp}_{unique_id}.png"
33
+ filepath = os.path.join(SAVE_DIR, filename)
34
+
35
+ # Save the image
36
+ image.save(filepath)
 
37
 
38
+ # Save metadata
39
+ metadata_file = os.path.join(SAVE_DIR, "metadata.txt")
40
+ with open(metadata_file, "a", encoding="utf-8") as f:
41
+ f.write(f"{filename}|{prompt}|{timestamp}\n")
42
+
43
+ return filepath
44
+
45
+ def load_generated_images():
46
+ if not os.path.exists(SAVE_DIR):
47
+ return []
48
+
49
+ # Load all images from the directory
50
+ image_files = [os.path.join(SAVE_DIR, f) for f in os.listdir(SAVE_DIR)
51
+ if f.endswith(('.png', '.jpg', '.jpeg', '.webp'))]
52
+ # Sort by creation time (newest first)
53
+ image_files.sort(key=lambda x: os.path.getctime(x), reverse=True)
54
+ return image_files
55
+
56
+ def load_predefined_images():
57
+ # Return empty list since we're not using predefined images
58
+ return []
59
+
60
+ @spaces.GPU(duration=120)
61
+ def inference(
62
+ prompt: str,
63
+ seed: int,
64
+ randomize_seed: bool,
65
+ width: int,
66
+ height: int,
67
+ guidance_scale: float,
68
+ num_inference_steps: int,
69
+ lora_scale: float,
70
+ progress: gr.Progress = gr.Progress(track_tqdm=True),
71
+ ):
72
  if randomize_seed:
73
  seed = random.randint(0, MAX_SEED)
 
74
  generator = torch.Generator(device=device).manual_seed(seed)
75
 
76
+ image = pipeline(
77
+ prompt=prompt,
78
+ guidance_scale=guidance_scale,
79
+ num_inference_steps=num_inference_steps,
80
  width=width,
81
  height=height,
82
+ generator=generator,
83
+ joint_attention_kwargs={"scale": lora_scale},
 
84
  ).images[0]
85
+
86
+ # Save the generated image
87
+ filepath = save_generated_image(image, prompt)
88
+
89
+ # Return the image, seed, and updated gallery
90
+ return image, seed, load_generated_images()
91
+
92
+ examples = [
93
+ "A majestic Rolls-Royce Phantom parked in front of a grand Mediterranean villa at golden hour, its iconic Spirit of Ecstasy gleaming in the sunset. The celestial silver paintwork catches the warm light, while blooming jasmine and bougainvillea frame the scene. The handcrafted details of the grill reflect the sophistication of old-world luxury. [trigger]",
94
+
95
+ "A Rolls-Royce Ghost gliding through misty London streets at dawn, its starlight headliner illuminating the bespoke interior. Rain droplets on the flawless black paint create a constellation of reflections, while the illuminated Pantheon grille casts a gentle glow on the wet cobblestones. [trigger]",
96
+
97
+ "A pristine Rolls-Royce Cullinan ascending a snow-covered Alpine road, its Arctic White exterior complementing the winter landscape. The SUV's powerful presence is highlighted by the rising sun glinting off its chrome details, while the heated interior cocoons its passengers in ultimate luxury. [trigger]",
98
+
99
+ "A vintage Rolls-Royce Silver Cloud III positioned in the courtyard of a historic English manor, surrounded by perfectly manicured topiary gardens. The classic two-tone paint scheme gleams under the afternoon sun, while its polished wooden dashboard and leather interior speak of timeless craftsmanship. [trigger]",
100
+
101
+ "A Rolls-Royce Wraith Black Badge cruising along the Monaco coastline at twilight, its dark chrome finish absorbing the neon lights of the principality. The sleek fastback silhouette cuts through the Mediterranean air, while its illuminated starlight roof mirrors the emerging stars above. [trigger]",
102
+
103
+ "A bespoke Rolls-Royce Dawn convertible beneath cherry blossoms in Kyoto, its custom champagne paintwork harmonizing with the falling petals. The handstitched leather interior matches the color of the sunset, while the polished teak deck echoes the traditional architecture of nearby temples. [trigger]"
104
+ ]
105
 
106
  css = """
107
  footer {
 
109
  }
110
  """
111
 
112
+ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css, analytics_enabled=False) as demo:
113
+ gr.HTML('<div class="title"> Flux lora RollsRoyce</div>')
114
+ gr.HTML('<div class="title">😄Image to Video Explore: <a href="https://huggingface.co/spaces/ginigen/theater" target="_blank">https://huggingface.co/spaces/ginigen/theater</a></div>')
115
+
116
+ with gr.Tabs() as tabs:
117
+ with gr.Tab("Generation"):
118
+ with gr.Column(elem_id="col-container"):
119
+ with gr.Row():
120
+ prompt = gr.Text(
121
+ label="Prompt",
122
+ show_label=False,
123
+ max_lines=1,
124
+ placeholder="Enter your prompt",
125
+ container=False,
126
+ )
127
+ run_button = gr.Button("Run", scale=0)
128
+
129
+ result = gr.Image(label="Result", show_label=False)
130
+
131
+ with gr.Accordion("Advanced Settings", open=False):
132
+ seed = gr.Slider(
133
+ label="Seed",
134
+ minimum=0,
135
+ maximum=MAX_SEED,
136
+ step=1,
137
+ value=42,
138
+ )
139
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
140
+
141
+ with gr.Row():
142
+ width = gr.Slider(
143
+ label="Width",
144
+ minimum=256,
145
+ maximum=MAX_IMAGE_SIZE,
146
+ step=32,
147
+ value=1024,
148
+ )
149
+ height = gr.Slider(
150
+ label="Height",
151
+ minimum=256,
152
+ maximum=MAX_IMAGE_SIZE,
153
+ step=32,
154
+ value=768,
155
+ )
156
+
157
+ with gr.Row():
158
+ guidance_scale = gr.Slider(
159
+ label="Guidance scale",
160
+ minimum=0.0,
161
+ maximum=10.0,
162
+ step=0.1,
163
+ value=3.5,
164
+ )
165
+ num_inference_steps = gr.Slider(
166
+ label="Number of inference steps",
167
+ minimum=1,
168
+ maximum=50,
169
+ step=1,
170
+ value=30,
171
+ )
172
+ lora_scale = gr.Slider(
173
+ label="LoRA scale",
174
+ minimum=0.0,
175
+ maximum=1.0,
176
+ step=0.1,
177
+ value=1.0,
178
+ )
179
+
180
+ gr.Examples(
181
+ examples=examples,
182
+ inputs=[prompt],
183
+ outputs=[result, seed],
184
+ )
185
+
186
+ with gr.Tab("Gallery"):
187
+ gallery_header = gr.Markdown("### Generated Images Gallery")
188
+ generated_gallery = gr.Gallery(
189
+ label="Generated Images",
190
+ columns=6,
191
+ show_label=False,
192
+ value=load_generated_images(),
193
+ elem_id="generated_gallery",
194
+ height="auto"
195
+ )
196
+ refresh_btn = gr.Button("🔄 Refresh Gallery")
197
+
198
+
199
+ # Event handlers
200
+ def refresh_gallery():
201
+ return load_generated_images()
202
+
203
+ refresh_btn.click(
204
+ fn=refresh_gallery,
205
+ inputs=None,
206
+ outputs=generated_gallery,
207
  )
208
 
209
+ gr.on(
210
+ triggers=[run_button.click, prompt.submit],
211
+ fn=inference,
212
+ inputs=[
213
+ prompt,
214
+ seed,
215
+ randomize_seed,
216
+ width,
217
+ height,
218
+ guidance_scale,
219
+ num_inference_steps,
220
+ lora_scale,
221
  ],
222
+ outputs=[result, seed, generated_gallery],
223
  )
224
 
225
+ demo.queue()
226
+ demo.launch()