Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -6,18 +6,18 @@ import tempfile
|
|
6 |
import spaces
|
7 |
|
8 |
@spaces.GPU
|
9 |
-
def initialize_pipeline():
|
10 |
-
# Check if CUDA is available and set the device
|
11 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
-
|
13 |
# Initialize the pipeline with CUDA support
|
14 |
pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16")
|
15 |
pipeline.to(device)
|
16 |
-
return pipeline
|
17 |
|
18 |
def generate_gif(prompt, image, negative_prompt, num_inference_steps, guidance_scale, seed):
|
19 |
-
#
|
20 |
-
|
|
|
|
|
|
|
21 |
|
22 |
# Set the generator seed
|
23 |
generator = torch.Generator(device=device).manual_seed(seed)
|
|
|
6 |
import spaces
|
7 |
|
8 |
@spaces.GPU
|
9 |
+
def initialize_pipeline(device):
|
|
|
|
|
|
|
10 |
# Initialize the pipeline with CUDA support
|
11 |
pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16")
|
12 |
pipeline.to(device)
|
13 |
+
return pipeline
|
14 |
|
15 |
def generate_gif(prompt, image, negative_prompt, num_inference_steps, guidance_scale, seed):
|
16 |
+
# Check if CUDA is available and set the device
|
17 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
18 |
+
|
19 |
+
# Initialize the pipeline within the function
|
20 |
+
pipeline = initialize_pipeline(device)
|
21 |
|
22 |
# Set the generator seed
|
23 |
generator = torch.Generator(device=device).manual_seed(seed)
|