Update main.py
Browse files
main.py
CHANGED
|
@@ -1,36 +1,30 @@
|
|
| 1 |
-
from
|
| 2 |
-
from fastapi import
|
| 3 |
-
from fastapi.responses import FileResponse
|
| 4 |
from fastapi.staticfiles import StaticFiles
|
| 5 |
-
import
|
| 6 |
-
import
|
| 7 |
import numpy as np
|
| 8 |
-
|
| 9 |
from vtoonify_model import Model
|
| 10 |
|
| 11 |
app = FastAPI()
|
| 12 |
-
model = Model(device='cuda' if torch.cuda.is_available() else 'cpu')
|
| 13 |
-
|
| 14 |
-
@app.post("/upload/")
|
| 15 |
-
async def process_image(file: UploadFile = File(...)):
|
| 16 |
-
# Save the uploaded image locally
|
| 17 |
-
with open("uploaded_image.jpg", "wb") as buffer:
|
| 18 |
-
shutil.copyfileobj(file.file, buffer)
|
| 19 |
-
|
| 20 |
-
# Load the model (assuming 'cartoon1' is always used)
|
| 21 |
-
exstyle, load_info = model.load_model('cartoon1')
|
| 22 |
-
|
| 23 |
-
# Process the uploaded image
|
| 24 |
-
top, bottom, left, right = 200, 200, 200, 200
|
| 25 |
-
aligned_face, _, input_info = model.detect_and_align_image("uploaded_image.jpg", top, bottom, left, right)
|
| 26 |
-
processed_image, message = model.image_toonify(aligned_face, None, exstyle, style_degree=0.5, style_type='cartoon1')
|
| 27 |
-
|
| 28 |
-
# Save the processed image
|
| 29 |
-
with open("result_image.jpg", "wb") as result_buffer:
|
| 30 |
-
result_buffer.write(processed_image)
|
| 31 |
|
| 32 |
-
|
| 33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
app.mount("/", StaticFiles(directory="AB", html=True), name="static")
|
| 36 |
|
|
|
|
| 1 |
+
from fastapi import FastAPI, File, UploadFile,Form
|
| 2 |
+
from fastapi.responses import FileResponse, StreamingResponse
|
|
|
|
| 3 |
from fastapi.staticfiles import StaticFiles
|
| 4 |
+
from fastapi import FastAPI, File, UploadFile
|
| 5 |
+
from pydantic import BaseModel
|
| 6 |
import numpy as np
|
| 7 |
+
import cv2
|
| 8 |
from vtoonify_model import Model
|
| 9 |
|
| 10 |
app = FastAPI()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
+
# Load the model
|
| 13 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 14 |
+
model = Model(device)
|
| 15 |
+
exstyle, message = model.load_model("cartoon1")
|
| 16 |
+
|
| 17 |
+
class ImageRequest(BaseModel):
|
| 18 |
+
image_file: UploadFile = File(...)
|
| 19 |
+
|
| 20 |
+
@app.post("/toonify/image/")
|
| 21 |
+
async def toonify_image(image_request: ImageRequest):
|
| 22 |
+
image = await image_request.image_file.read()
|
| 23 |
+
nparr = np.frombuffer(image, np.uint8)
|
| 24 |
+
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
| 25 |
+
aligned_face, instyle, message = model.detect_and_align_image(img, 200, 200, 200, 200) # Hardcoded values
|
| 26 |
+
toonified_img, message = model.image_toonify(aligned_face, instyle, exstyle, style_degree=0.5, style_type="cartoon1")
|
| 27 |
+
return {"toonified_image": toonified_img, "message": message}
|
| 28 |
|
| 29 |
app.mount("/", StaticFiles(directory="AB", html=True), name="static")
|
| 30 |
|