mrfakename commited on
Commit
b77578a
·
verified ·
1 Parent(s): d20ce72

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -10,8 +10,8 @@ MODELS = {
10
  MODEL_CACHE = {}
11
  for id, model in MODELS.items():
12
  print(f"Loading model {model}...")
13
- MODEL_CACHE[model] = FluxPipeline.from_pretrained(model, torch_dtype=torch.bfloat16)
14
- MODEL_CACHE[model].enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power
15
  print(f"Loaded model {model}")
16
 
17
  @spaces.GPU
@@ -30,7 +30,7 @@ def generate(text):
30
  # image.save("flux-dev.png")
31
 
32
  with gr.Blocks() as demo:
33
- prompt = gr.Textbox("Prompt")
34
  btn = gr.Button("Generate", variant="primary")
35
  out = gr.Image(label="Generated image", interactive=False)
36
  btn.click(generate,inputs=prompt,outputs=out)
 
10
  MODEL_CACHE = {}
11
  for id, model in MODELS.items():
12
  print(f"Loading model {model}...")
13
+ MODEL_CACHE[id] = FluxPipeline.from_pretrained(model, torch_dtype=torch.bfloat16)
14
+ MODEL_CACHE[id].enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power
15
  print(f"Loaded model {model}")
16
 
17
  @spaces.GPU
 
30
  # image.save("flux-dev.png")
31
 
32
  with gr.Blocks() as demo:
33
+ prompt = gr.Textbox(label="Prompt")
34
  btn = gr.Button("Generate", variant="primary")
35
  out = gr.Image(label="Generated image", interactive=False)
36
  btn.click(generate,inputs=prompt,outputs=out)