Spaces:
Runtime error
Runtime error
import torch | |
import gradio as gr | |
from PIL import Image | |
import requests | |
from io import BytesIO | |
# Load the model | |
model_url = "https://huggingface.co/facebook/sapiens/resolve/main/sapiens_lite_host/torchscript/normal/checkpoints/sapiens_0.3b/sapiens_0.3b.pt" | |
model = torch.jit.load(model_url, map_location=torch.device('cpu')) | |
# Define inference function | |
def predict(image): | |
# Preprocess image | |
image = image.convert("RGB") | |
input_tensor = torch.from_numpy(np.array(image)).permute(2, 0, 1).unsqueeze(0).float() / 255.0 | |
# Run model | |
with torch.no_grad(): | |
output = model(input_tensor) | |
# Postprocess output | |
output_image = output.squeeze().permute(1, 2, 0).numpy() | |
output_image = (output_image * 255).astype(np.uint8) | |
return Image.fromarray(output_image) | |
# Gradio Interface | |
iface = gr.Interface( | |
fn=predict, | |
inputs=gr.Image(type="pil"), | |
outputs=gr.Image(type="pil"), | |
title="Sapiens Body Part Segmentation", | |
description="Upload an image to segment body parts using the Sapiens model." | |
) | |
# Launch the interface | |
if __name__ == "__main__": | |
iface.launch() | |