pm6six commited on
Commit
f9a94e1
·
verified ·
1 Parent(s): d531bf9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -11
app.py CHANGED
@@ -4,36 +4,43 @@ from diffusers import CogVideoXImageToVideoPipeline
4
  from diffusers.utils import export_to_video, load_image
5
  import torch
6
 
7
- # Streamlit interface for uploading an image and inputting a prompt
8
- st.title("Image to Video with Hugging Face")
9
- st.write("Upload an image and provide a prompt to generate a video.")
10
 
11
- # File uploader for the input image
 
12
  uploaded_file = st.file_uploader("Upload an image (JPG or PNG):", type=["jpg", "jpeg", "png"])
13
  prompt = st.text_input("Enter your prompt:", "A little girl is riding a bicycle at high speed. Focused, detailed, realistic.")
14
 
15
  if uploaded_file and prompt:
16
  try:
17
- # Save the uploaded file to a temporary location
 
 
 
 
 
18
  with open("uploaded_image.jpg", "wb") as f:
19
  f.write(uploaded_file.read())
 
20
 
21
  # Load the image
 
22
  image = load_image("uploaded_image.jpg")
 
23
 
24
- # Initialize the CogVideoX pipeline
25
  st.write("Initializing the pipeline...")
26
  pipe = CogVideoXImageToVideoPipeline.from_pretrained(
27
  "THUDM/CogVideoX1.5-5B-I2V",
28
  torch_dtype=torch.bfloat16
29
  )
30
-
31
  pipe.enable_sequential_cpu_offload()
32
  pipe.vae.enable_tiling()
33
  pipe.vae.enable_slicing()
 
34
 
35
- # Generate the video
36
- st.write("Generating video... this may take a while.")
37
  video_frames = pipe(
38
  prompt=prompt,
39
  image=image,
@@ -43,15 +50,20 @@ if uploaded_file and prompt:
43
  guidance_scale=6,
44
  generator=torch.Generator(device="cuda").manual_seed(42),
45
  ).frames[0]
 
46
 
47
- # Export the video
 
48
  video_path = "output.mp4"
49
  export_to_video(video_frames, video_path, fps=8)
 
50
 
51
- # Display the video in Streamlit
52
  st.video(video_path)
53
 
54
  except Exception as e:
55
  st.error(f"An error occurred: {e}")
 
56
  else:
57
  st.write("Please upload an image and provide a prompt to get started.")
 
 
4
  from diffusers.utils import export_to_video, load_image
5
  import torch
6
 
7
+ st.write("App started.")
 
 
8
 
9
+ # Streamlit interface
10
+ st.title("Image to Video with Hugging Face")
11
  uploaded_file = st.file_uploader("Upload an image (JPG or PNG):", type=["jpg", "jpeg", "png"])
12
  prompt = st.text_input("Enter your prompt:", "A little girl is riding a bicycle at high speed. Focused, detailed, realistic.")
13
 
14
  if uploaded_file and prompt:
15
  try:
16
+ # Debugging
17
+ st.write(f"Uploaded file: {uploaded_file.name}")
18
+ st.write(f"Prompt: {prompt}")
19
+
20
+ # Save uploaded file
21
+ st.write("Saving uploaded image...")
22
  with open("uploaded_image.jpg", "wb") as f:
23
  f.write(uploaded_file.read())
24
+ st.write("Uploaded image saved successfully.")
25
 
26
  # Load the image
27
+ st.write("Loading image...")
28
  image = load_image("uploaded_image.jpg")
29
+ st.write("Image loaded successfully.")
30
 
31
+ # Initialize pipeline
32
  st.write("Initializing the pipeline...")
33
  pipe = CogVideoXImageToVideoPipeline.from_pretrained(
34
  "THUDM/CogVideoX1.5-5B-I2V",
35
  torch_dtype=torch.bfloat16
36
  )
 
37
  pipe.enable_sequential_cpu_offload()
38
  pipe.vae.enable_tiling()
39
  pipe.vae.enable_slicing()
40
+ st.write("Pipeline initialized successfully.")
41
 
42
+ # Generate video
43
+ st.write("Generating video... This may take a while.")
44
  video_frames = pipe(
45
  prompt=prompt,
46
  image=image,
 
50
  guidance_scale=6,
51
  generator=torch.Generator(device="cuda").manual_seed(42),
52
  ).frames[0]
53
+ st.write("Video generated successfully.")
54
 
55
+ # Export video
56
+ st.write("Exporting video...")
57
  video_path = "output.mp4"
58
  export_to_video(video_frames, video_path, fps=8)
59
+ st.write("Video exported successfully.")
60
 
61
+ # Display video
62
  st.video(video_path)
63
 
64
  except Exception as e:
65
  st.error(f"An error occurred: {e}")
66
+ st.write(f"Debug info: {e}")
67
  else:
68
  st.write("Please upload an image and provide a prompt to get started.")
69
+