Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
import spaces # Import this first to avoid CUDA initialization issues
|
| 2 |
import os
|
| 3 |
import gradio as gr
|
| 4 |
import numpy as np
|
|
@@ -7,18 +6,13 @@ from accelerate import dispatch_model, infer_auto_device_map
|
|
| 7 |
from accelerate.utils import get_balanced_memory
|
| 8 |
from torch.cuda.amp import autocast
|
| 9 |
import torch
|
|
|
|
| 10 |
import random
|
| 11 |
import time
|
| 12 |
from PIL import Image
|
| 13 |
from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, FluxTransformer2DModel
|
| 14 |
from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
|
| 15 |
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
# Define the device
|
| 19 |
-
dtype = torch.bfloat16
|
| 20 |
-
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
| 21 |
-
|
| 22 |
# Use the 'waffles' environment variable as the access token
|
| 23 |
hf_token = os.getenv('waffles')
|
| 24 |
|
|
@@ -26,6 +20,14 @@ hf_token = os.getenv('waffles')
|
|
| 26 |
if not hf_token:
|
| 27 |
raise ValueError("Hugging Face API token not found. Please set the 'waffles' environment variable.")
|
| 28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
# Load LoRAs from JSON file
|
| 30 |
with open('loras.json', 'r') as f:
|
| 31 |
loras = json.load(f)
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
import numpy as np
|
|
|
|
| 6 |
from accelerate.utils import get_balanced_memory
|
| 7 |
from torch.cuda.amp import autocast
|
| 8 |
import torch
|
| 9 |
+
import spaces # Import this first to avoid CUDA initialization issues
|
| 10 |
import random
|
| 11 |
import time
|
| 12 |
from PIL import Image
|
| 13 |
from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, FluxTransformer2DModel
|
| 14 |
from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
|
| 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
# Use the 'waffles' environment variable as the access token
|
| 17 |
hf_token = os.getenv('waffles')
|
| 18 |
|
|
|
|
| 20 |
if not hf_token:
|
| 21 |
raise ValueError("Hugging Face API token not found. Please set the 'waffles' environment variable.")
|
| 22 |
|
| 23 |
+
# Define the device
|
| 24 |
+
dtype = torch.bfloat16
|
| 25 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
| 26 |
+
|
| 27 |
+
count0 = torch.zeros(1).to(device)
|
| 28 |
+
count1 = torch.zeros(1).to(device)
|
| 29 |
+
count2 = torch.zeros(1).to(device)
|
| 30 |
+
|
| 31 |
# Load LoRAs from JSON file
|
| 32 |
with open('loras.json', 'r') as f:
|
| 33 |
loras = json.load(f)
|