yoshibomball123 commited on
Commit
0c52219
Β·
verified Β·
1 Parent(s): 252100c

Uploaded app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -0
app.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import torch
4
+ from transformers import pipeline
5
+ import cv2
6
+
7
+ # Load Hugging Face face swap model using the DeepFaceLab model by senhan007
8
+ face_swap_model = pipeline("image-to-image", model="senhan007/DeepFaceLab")
9
+
10
+ def swap_faces(image, video):
11
+ image_path = "uploaded_image.jpg"
12
+ video_path = "uploaded_video.mp4"
13
+ output_path = "swapped_video.mp4"
14
+
15
+ # Save uploaded files
16
+ image.save(image_path)
17
+ video.save(video_path)
18
+
19
+ # Open the video file
20
+ video_cap = cv2.VideoCapture(video_path)
21
+ frame_rate = video_cap.get(cv2.CAP_PROP_FPS)
22
+ width = int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
23
+ height = int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
24
+
25
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
26
+ output_video = cv2.VideoWriter(output_path, fourcc, frame_rate, (width, height))
27
+
28
+ while True:
29
+ ret, frame = video_cap.read()
30
+ if not ret:
31
+ break
32
+
33
+ # Apply the face swap (replace with model logic)
34
+ # For now, this is just a placeholder; you'll need to integrate the model inference here.
35
+ swapped_frame = frame
36
+
37
+ output_video.write(swapped_frame)
38
+
39
+ video_cap.release()
40
+ output_video.release()
41
+
42
+ return output_path
43
+
44
+ # Create the Gradio interface
45
+ iface = gr.Interface(
46
+ fn=swap_faces,
47
+ inputs=[
48
+ gr.Image(type="pil", label="Upload Reference Image"),
49
+ gr.Video(type="file", label="Upload Video"),
50
+ ],
51
+ outputs=gr.Video(label="Face Swapped Video"),
52
+ title="Video Face Swap",
53
+ description="Upload a reference image and a video to swap faces."
54
+ )
55
+
56
+ iface.launch()