Spaces:
Paused
Paused
mie035
commited on
Commit
·
fba2ed6
1
Parent(s):
d045149
mod
Browse files- Dockerfile +31 -0
- README.md +23 -4
- api.py +30 -0
- app.py +31 -31
- public/bed.png +0 -0
- requirements.txt +3 -1
- view/index.css +11 -0
- view/index.html +25 -0
- view/index.js +75 -0
Dockerfile
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use the official Python 3.9 image
|
| 2 |
+
FROM python:3.9
|
| 3 |
+
|
| 4 |
+
RUN apt-get update && apt-get upgrade -y
|
| 5 |
+
|
| 6 |
+
RUN apt-get install -y libgl1-mesa-dev
|
| 7 |
+
|
| 8 |
+
# Set the working directory to /code
|
| 9 |
+
WORKDIR /code
|
| 10 |
+
|
| 11 |
+
# Copy the current directory contents into the container at /code
|
| 12 |
+
COPY ./requirements.txt /code/requirements.txt
|
| 13 |
+
|
| 14 |
+
# Install requirements.txt
|
| 15 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
| 16 |
+
|
| 17 |
+
# Set up a new user named "user" with user ID 1000
|
| 18 |
+
RUN useradd -m -u 1000 user
|
| 19 |
+
# Switch to the "user" user
|
| 20 |
+
USER user
|
| 21 |
+
# Set home to the user's home directory
|
| 22 |
+
ENV HOME=/home/user \
|
| 23 |
+
PATH=/home/user/.local/bin:$PATH
|
| 24 |
+
|
| 25 |
+
# Set the working directory to the user's home directory
|
| 26 |
+
WORKDIR $HOME/app
|
| 27 |
+
|
| 28 |
+
# Copy the current directory contents into the container at $HOME/app setting the owner to the user
|
| 29 |
+
COPY --chown=user . $HOME/app
|
| 30 |
+
|
| 31 |
+
CMD ["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
|
@@ -3,11 +3,30 @@ title: Image2mesh
|
|
| 3 |
emoji: 👁
|
| 4 |
colorFrom: green
|
| 5 |
colorTo: blue
|
| 6 |
-
sdk:
|
| 7 |
-
|
| 8 |
-
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
-
duplicated_from: mattiagatti/image2mesh
|
| 11 |
---
|
| 12 |
|
| 13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
emoji: 👁
|
| 4 |
colorFrom: green
|
| 5 |
colorTo: blue
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_file: api.py
|
|
|
|
| 8 |
pinned: false
|
|
|
|
| 9 |
---
|
| 10 |
|
| 11 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
| 12 |
+
|
| 13 |
+
addded by nagauta below
|
| 14 |
+
I modified the app below to use it as an API.(nagauta)
|
| 15 |
+
https://huggingface.co/spaces/mattiagatti/image2mesh
|
| 16 |
+
|
| 17 |
+
## here is my note to startup enviroment construction
|
| 18 |
+
# env.
|
| 19 |
+
## local
|
| 20 |
+
Machine: MacBook Air (M1, 2020)
|
| 21 |
+
OS: Monterey 12.6.3
|
| 22 |
+
RAM: 16GB
|
| 23 |
+
|
| 24 |
+
# anaconda
|
| 25 |
+
conda create -n i2m python=3.9
|
| 26 |
+
conda activate i2m
|
| 27 |
+
pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
| 28 |
+
uvicorn api:app --reload
|
| 29 |
+
|
| 30 |
+
# Docker
|
| 31 |
+
docker build -t i2m .
|
| 32 |
+
docker run -it -p 7860:7860 i2m
|
api.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI,UploadFile
|
| 2 |
+
import shutil
|
| 3 |
+
from fastapi.responses import HTMLResponse
|
| 4 |
+
from fastapi.responses import FileResponse
|
| 5 |
+
from fastapi.staticfiles import StaticFiles
|
| 6 |
+
import app as predictor
|
| 7 |
+
from PIL import Image, ImageFilter
|
| 8 |
+
|
| 9 |
+
app = FastAPI()
|
| 10 |
+
|
| 11 |
+
app.mount("/view", StaticFiles(directory="view", html=True), name="view")
|
| 12 |
+
app.mount("/public", StaticFiles(directory="public", html=True), name="public")
|
| 13 |
+
|
| 14 |
+
@app.get("/")
|
| 15 |
+
def index() -> FileResponse:
|
| 16 |
+
return FileResponse(path="./view/index.html", media_type="text/html")
|
| 17 |
+
|
| 18 |
+
@app.post("/prediction/")
|
| 19 |
+
async def predict(targetImage: UploadFile):
|
| 20 |
+
path = f'public/{targetImage.filename}'# api/filesディレクトリを作成しておく
|
| 21 |
+
with open(path, 'wb+') as buffer:
|
| 22 |
+
shutil.copyfileobj(targetImage.file, buffer)
|
| 23 |
+
im = Image.open(path)
|
| 24 |
+
# todo quality指定できるようにする
|
| 25 |
+
depth_image, mesh_path = predictor.predict(im, 3)
|
| 26 |
+
print(mesh_path)
|
| 27 |
+
return {
|
| 28 |
+
"path":"public",
|
| 29 |
+
"name":mesh_path
|
| 30 |
+
}
|
app.py
CHANGED
|
@@ -76,7 +76,7 @@ def generate_mesh(image, depth_image, quality):
|
|
| 76 |
|
| 77 |
# save the mesh
|
| 78 |
temp_name = next(tempfile._get_candidate_names()) + '.obj'
|
| 79 |
-
o3d.io.write_triangle_mesh(temp_name, mesh)
|
| 80 |
|
| 81 |
return temp_name
|
| 82 |
|
|
@@ -91,33 +91,33 @@ def predict(image, quality):
|
|
| 91 |
|
| 92 |
return depth_image, mesh_path
|
| 93 |
|
| 94 |
-
|
| 95 |
-
# GUI
|
| 96 |
-
title = 'Image2Mesh'
|
| 97 |
-
description = 'Demo based on my <a href="https://towardsdatascience.com/generate-a-3d-mesh-from-an-image-with-python' \
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
examples = [[f'examples/{name}', 3] for name in sorted(os.listdir('examples'))]
|
| 102 |
-
|
| 103 |
-
# example image source:
|
| 104 |
-
# N. Silberman, D. Hoiem, P. Kohli, and Rob Fergus,
|
| 105 |
-
# Indoor Segmentation and Support Inference from RGBD Images (2012)
|
| 106 |
-
|
| 107 |
-
iface = gr.Interface(
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
)
|
| 123 |
-
iface.launch()
|
|
|
|
| 76 |
|
| 77 |
# save the mesh
|
| 78 |
temp_name = next(tempfile._get_candidate_names()) + '.obj'
|
| 79 |
+
o3d.io.write_triangle_mesh("public/" + temp_name, mesh)
|
| 80 |
|
| 81 |
return temp_name
|
| 82 |
|
|
|
|
| 91 |
|
| 92 |
return depth_image, mesh_path
|
| 93 |
|
| 94 |
+
if __name__ == '__main__':
|
| 95 |
+
# GUI
|
| 96 |
+
title = 'Image2Mesh'
|
| 97 |
+
description = 'Demo based on my <a href="https://towardsdatascience.com/generate-a-3d-mesh-from-an-image-with-python' \
|
| 98 |
+
'-12210c73e5cc">article</a>. This demo predicts the depth of an image and then generates the 3D mesh. ' \
|
| 99 |
+
'Choosing a higher quality increases the time to generate the mesh. You can download the mesh by ' \
|
| 100 |
+
'clicking the top-right button on the 3D viewer. '
|
| 101 |
+
examples = [[f'examples/{name}', 3] for name in sorted(os.listdir('examples'))]
|
| 102 |
+
|
| 103 |
+
# example image source:
|
| 104 |
+
# N. Silberman, D. Hoiem, P. Kohli, and Rob Fergus,
|
| 105 |
+
# Indoor Segmentation and Support Inference from RGBD Images (2012)
|
| 106 |
+
|
| 107 |
+
iface = gr.Interface(
|
| 108 |
+
fn=predict,
|
| 109 |
+
inputs=[
|
| 110 |
+
gr.Image(type='pil', label='Input Image'),
|
| 111 |
+
gr.Slider(1, 5, step=1, value=3, label='Mesh quality')
|
| 112 |
+
],
|
| 113 |
+
outputs=[
|
| 114 |
+
gr.Image(label='Depth'),
|
| 115 |
+
gr.Model3D(label='3D Model', clear_color=[0.0, 0.0, 0.0, 0.0])
|
| 116 |
+
],
|
| 117 |
+
examples=examples,
|
| 118 |
+
allow_flagging='never',
|
| 119 |
+
cache_examples=False,
|
| 120 |
+
title=title,
|
| 121 |
+
description=description
|
| 122 |
+
)
|
| 123 |
+
iface.launch()
|
public/bed.png
ADDED
|
requirements.txt
CHANGED
|
@@ -4,4 +4,6 @@ numpy
|
|
| 4 |
open3d
|
| 5 |
Pillow
|
| 6 |
torch
|
| 7 |
-
transformers
|
|
|
|
|
|
|
|
|
| 4 |
open3d
|
| 5 |
Pillow
|
| 6 |
torch
|
| 7 |
+
transformers
|
| 8 |
+
fastapi
|
| 9 |
+
uvicorn[standard]
|
view/index.css
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
html, body {
|
| 2 |
+
width: 100%;
|
| 3 |
+
height: 100%;
|
| 4 |
+
overflow: hidden;
|
| 5 |
+
}
|
| 6 |
+
|
| 7 |
+
#renderCanvas {
|
| 8 |
+
width : 100%;
|
| 9 |
+
height : 100%;
|
| 10 |
+
touch-action: none;
|
| 11 |
+
}
|
view/index.html
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
| 6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 7 |
+
<link rel="stylesheet" type="text/css" href="/view/index.css" />
|
| 8 |
+
<title>Image to 3D</title>
|
| 9 |
+
</head>
|
| 10 |
+
<body>
|
| 11 |
+
<form id="upload">
|
| 12 |
+
<input type="file" id="targetImage">
|
| 13 |
+
<button>変換する</button>
|
| 14 |
+
</form>
|
| 15 |
+
<canvas id="renderCanvas"></canvas>
|
| 16 |
+
|
| 17 |
+
<script src="https://preview.babylonjs.com/babylon.js"></script>
|
| 18 |
+
<script src="https://preview.babylonjs.com/loaders/babylonjs.loaders.min.js"></script>
|
| 19 |
+
<script src="https://preview.babylonjs.com/serializers/babylonjs.serializers.min.js"></script>
|
| 20 |
+
<script src="https://preview.babylonjs.com/materialsLibrary/babylonjs.materials.min.js"></script>
|
| 21 |
+
<script src="https://cdn.babylonjs.com/loaders/babylon.objFileLoader.js"></script>
|
| 22 |
+
<script src="https://code.jquery.com/pep/0.4.1/pep.js"></script>
|
| 23 |
+
<script src="/view/index.js"></script>
|
| 24 |
+
</body>
|
| 25 |
+
</html>
|
view/index.js
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
function main() {
|
| 2 |
+
|
| 3 |
+
prepareToUploadFile();
|
| 4 |
+
// load3dModel();
|
| 5 |
+
|
| 6 |
+
}
|
| 7 |
+
function prepareToUploadFile(){
|
| 8 |
+
/*
|
| 9 |
+
* 送信イベントが発生したら実行
|
| 10 |
+
*/
|
| 11 |
+
document.querySelector("#upload").addEventListener("submit", (e)=>{
|
| 12 |
+
const targetImage = document.querySelector("#targetImage");
|
| 13 |
+
// 規定の送信処理をキャンセル(画面遷移などしない)
|
| 14 |
+
e.preventDefault();
|
| 15 |
+
|
| 16 |
+
// 送信データの準備
|
| 17 |
+
const formData = new FormData();
|
| 18 |
+
formData.append("targetImage", targetImage.files[0]); // ファイル内容を詰める
|
| 19 |
+
|
| 20 |
+
const param = {
|
| 21 |
+
method: "POST",
|
| 22 |
+
body: formData
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
// アップロードする
|
| 26 |
+
fetch(`${window.origin}/prediction`, param)
|
| 27 |
+
.then((res)=>{
|
| 28 |
+
return( res.json() );
|
| 29 |
+
})
|
| 30 |
+
.then((json)=>{
|
| 31 |
+
// 通信が成功した際の処理
|
| 32 |
+
load3dModel(json["path"], json["name"]);
|
| 33 |
+
})
|
| 34 |
+
.catch((error)=>{
|
| 35 |
+
// エラー処理
|
| 36 |
+
alert("残念、なんかエラー!")
|
| 37 |
+
});
|
| 38 |
+
});
|
| 39 |
+
}
|
| 40 |
+
function load3dModel(rcPath, rcName){
|
| 41 |
+
|
| 42 |
+
const canvas = document.getElementById('renderCanvas');
|
| 43 |
+
const engine = new BABYLON.Engine(canvas);
|
| 44 |
+
// ここから
|
| 45 |
+
function createScene() {
|
| 46 |
+
// シーンを作成
|
| 47 |
+
const scene = new BABYLON.Scene(engine);
|
| 48 |
+
// カメラを作成
|
| 49 |
+
const camera = new BABYLON.ArcRotateCamera("camera", -Math.PI / 2, Math.PI / 2.5, 3, new BABYLON.Vector3(0, 0, 0), scene);
|
| 50 |
+
// カメラがユーザからの入力で動くように
|
| 51 |
+
camera.attachControl(canvas, true);
|
| 52 |
+
// ライトを作成
|
| 53 |
+
const light = new BABYLON.HemisphericLight("light", new BABYLON.Vector3(0, 1, 0), scene);
|
| 54 |
+
|
| 55 |
+
// 画像を読み込む
|
| 56 |
+
BABYLON.SceneLoader.ImportMesh("", `/${rcPath}/`, `${rcName}`, scene, function (newMeshes) {
|
| 57 |
+
// Create a default arc rotate camera and light.
|
| 58 |
+
// createArcRotateCamera
|
| 59 |
+
scene.createDefaultCameraOrLight(true, true, true);
|
| 60 |
+
});
|
| 61 |
+
return scene;
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
const scene = createScene();
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
engine.runRenderLoop(() => {
|
| 68 |
+
scene.render();
|
| 69 |
+
});
|
| 70 |
+
|
| 71 |
+
window.addEventListener('resize', () => {
|
| 72 |
+
engine.resize();
|
| 73 |
+
});
|
| 74 |
+
}
|
| 75 |
+
window.addEventListener('DOMContentLoaded', main);
|