|
from fastapi import FastAPI, File, UploadFile, Form |
|
from fastapi.responses import StreamingResponse |
|
from fastapi.staticfiles import StaticFiles |
|
import shutil |
|
import cv2 |
|
import numpy as np |
|
import dlib |
|
from torchvision import transforms |
|
import torch.nn.functional as F |
|
import gradio as gr |
|
import os |
|
import torch |
|
from io import BytesIO |
|
|
|
app = FastAPI() |
|
|
|
|
|
model = None |
|
|
|
def load_model(): |
|
global model |
|
from vtoonify_model import Model |
|
model = Model(device='cuda' if torch.cuda.is_available() else 'cpu') |
|
model.load_model('cartoon1') |
|
|
|
|
|
@app.post("/upload/") |
|
async def process_image(file: UploadFile = File(...), top: int = Form(...), bottom: int = Form(...), left: int = Form(...), right: int = Form(...)): |
|
global model |
|
if model is None: |
|
load_model() |
|
|
|
|
|
contents = await file.read() |
|
|
|
|
|
nparr = np.frombuffer(contents, np.uint8) |
|
frame_rgb = cv2.imdecode(nparr, cv2.IMREAD_COLOR) |
|
|
|
|
|
aligned_face, instyle, message = model.detect_and_align_image(frame_rgb, top, bottom, left, right) |
|
processed_image, message = model.image_toonify(aligned_face, instyle, model.exstyle, style_degree=0.5, style_type='cartoon1') |
|
|
|
|
|
_, encoded_image = cv2.imencode('.jpg', processed_image) |
|
|
|
|
|
return StreamingResponse(io.BytesIO(encoded_image.tobytes()), media_type="image/jpeg") |
|
|
|
|
|
app.mount("/", StaticFiles(directory="AB", html=True), name="static") |
|
|
|
|
|
@app.get("/") |
|
def index(): |
|
return FileResponse(path="/app/AB/index.html", media_type="text/html") |
|
|