Spaces:
Runtime error
Runtime error
File size: 2,577 Bytes
661a57f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
import os
import gradio as gr
from transformers import pipeline
import cv2
# Load Hugging Face Gender Classifier model (ProjectPersonal/GenderClassifier)
gender_classifier = pipeline("image-classification", model="ProjectPersonal/GenderClassifier")
# Load Hugging Face face swap model (DeepFaceLab by senhan007)
face_swap_model = pipeline("image-to-image", model="senhan007/DeepFaceLab")
def detect_gender(image_path):
# Use the gender classifier to detect gender from the image
result = gender_classifier(image_path)
# Return the predicted gender (male or female)
return result[0]['label'].lower() # This will return "male" or "female"
def swap_faces(image, video, selected_gender):
# Save uploaded files
image_path = "uploaded_image.jpg"
video_path = "uploaded_video.mp4"
output_path = "swapped_video.mp4"
image.save(image_path)
video.save(video_path)
# Detect gender of the reference image
detected_gender = detect_gender(image_path)
# If the detected gender matches the selected gender or if "all" is selected, proceed with the face swap
if selected_gender == "all" or detected_gender == selected_gender:
video_cap = cv2.VideoCapture(video_path)
frame_rate = video_cap.get(cv2.CAP_PROP_FPS)
width = int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
output_video = cv2.VideoWriter(output_path, fourcc, frame_rate, (width, height))
while True:
ret, frame = video_cap.read()
if not ret:
break
# Apply the face swap model (you need to insert the actual face swap model logic here)
swapped_frame = frame # Placeholder, replace with actual model inference
output_video.write(swapped_frame)
video_cap.release()
output_video.release()
else:
# Return the original video without face swap if gender doesn't match
output_path = video_path # No face swap applied
return output_path
# Gradio UI
iface = gr.Interface(
fn=swap_faces,
inputs=[
gr.Image(type="pil", label="Upload Reference Image"),
gr.Video(type="file", label="Upload Video"),
gr.Dropdown(choices=["all", "male", "female"], label="Select Gender")
],
outputs=gr.Video(label="Face Swapped Video"),
title="Video Face Swap",
description="Upload a reference image and a video to swap faces based on gender selection."
)
iface.launch()
|