gokaygokay commited on
Commit
aca93c2
·
1 Parent(s): e25da44
Files changed (1) hide show
  1. app.py +17 -17
app.py CHANGED
@@ -1,26 +1,26 @@
1
- import os
2
  import torch
3
  import gradio as gr
4
  from diffusers import FluxTransformer2DModel, FluxPipeline, BitsAndBytesConfig
5
  from transformers import T5EncoderModel, BitsAndBytesConfig as BitsAndBytesConfigTF
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  def generate_image(prompt, negative_prompt="", num_inference_steps=30, guidance_scale=7.5):
8
- # Initialize Flux pipeline
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
-
11
- dtype = torch.bfloat16
12
- single_file_base_model = "camenduru/FLUX.1-dev-diffusers"
13
- file_url = "https://huggingface.co/lodestones/Chroma/resolve/main/chroma-unlocked-v31.safetensors"
14
-
15
- quantization_config_tf = BitsAndBytesConfigTF(load_in_8bit=True, bnb_8bit_compute_dtype=torch.bfloat16)
16
- text_encoder_2 = T5EncoderModel.from_pretrained(single_file_base_model, subfolder="text_encoder_2", torch_dtype=dtype, config=single_file_base_model, quantization_config=quantization_config_tf)
17
-
18
- quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16)
19
- transformer = FluxTransformer2DModel.from_single_file(file_url, subfolder="transformer", torch_dtype=dtype, config=single_file_base_model, quantization_config=quantization_config)
20
-
21
- flux_pipeline = FluxPipeline.from_pretrained(single_file_base_model, transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=dtype)
22
- flux_pipeline.to(device)
23
-
24
  # Generate image
25
  image = flux_pipeline(
26
  prompt=prompt,
 
1
+ import spaces
2
  import torch
3
  import gradio as gr
4
  from diffusers import FluxTransformer2DModel, FluxPipeline, BitsAndBytesConfig
5
  from transformers import T5EncoderModel, BitsAndBytesConfig as BitsAndBytesConfigTF
6
 
7
+ # Initialize model outside the function
8
+ device = "cuda" if torch.cuda.is_available() else "cpu"
9
+ dtype = torch.bfloat16
10
+ single_file_base_model = "camenduru/FLUX.1-dev-diffusers"
11
+ file_url = "https://huggingface.co/lodestones/Chroma/resolve/main/chroma-unlocked-v31.safetensors"
12
+
13
+ quantization_config_tf = BitsAndBytesConfigTF(load_in_8bit=True, bnb_8bit_compute_dtype=torch.bfloat16)
14
+ text_encoder_2 = T5EncoderModel.from_pretrained(single_file_base_model, subfolder="text_encoder_2", torch_dtype=dtype, config=single_file_base_model, quantization_config=quantization_config_tf)
15
+
16
+ quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16)
17
+ transformer = FluxTransformer2DModel.from_single_file(file_url, subfolder="transformer", torch_dtype=dtype, config=single_file_base_model, quantization_config=quantization_config)
18
+
19
+ flux_pipeline = FluxPipeline.from_pretrained(single_file_base_model, transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=dtype)
20
+ flux_pipeline.to(device)
21
+
22
+ @spaces.GPU()
23
  def generate_image(prompt, negative_prompt="", num_inference_steps=30, guidance_scale=7.5):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  # Generate image
25
  image = flux_pipeline(
26
  prompt=prompt,