Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files- src/backend/api/mcp_server.py +100 -0
- src/backend/api/web.py +116 -0
src/backend/api/mcp_server.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import platform
|
2 |
+
|
3 |
+
import uvicorn
|
4 |
+
from backend.device import get_device_name
|
5 |
+
from backend.models.device import DeviceInfo
|
6 |
+
from constants import APP_VERSION, DEVICE
|
7 |
+
from context import Context
|
8 |
+
from fastapi import FastAPI, Request
|
9 |
+
from fastapi_mcp import FastApiMCP
|
10 |
+
from state import get_settings
|
11 |
+
from fastapi.middleware.cors import CORSMiddleware
|
12 |
+
from models.interface_types import InterfaceType
|
13 |
+
from fastapi.staticfiles import StaticFiles
|
14 |
+
|
15 |
+
SERVER_PORT = 8000
|
16 |
+
|
17 |
+
app_settings = get_settings()
|
18 |
+
app = FastAPI(
|
19 |
+
title="FastSD CPU",
|
20 |
+
description="Fast stable diffusion on CPU",
|
21 |
+
version=APP_VERSION,
|
22 |
+
license_info={
|
23 |
+
"name": "MIT",
|
24 |
+
"identifier": "MIT",
|
25 |
+
},
|
26 |
+
describe_all_responses=True,
|
27 |
+
describe_full_response_schema=True,
|
28 |
+
)
|
29 |
+
origins = ["*"]
|
30 |
+
|
31 |
+
app.add_middleware(
|
32 |
+
CORSMiddleware,
|
33 |
+
allow_origins=origins,
|
34 |
+
allow_credentials=True,
|
35 |
+
allow_methods=["*"],
|
36 |
+
allow_headers=["*"],
|
37 |
+
)
|
38 |
+
|
39 |
+
context = Context(InterfaceType.API_SERVER)
|
40 |
+
app.mount("/results", StaticFiles(directory="results"), name="results")
|
41 |
+
|
42 |
+
|
43 |
+
@app.get(
|
44 |
+
"/info",
|
45 |
+
description="Get system information",
|
46 |
+
summary="Get system information",
|
47 |
+
operation_id="get_system_info",
|
48 |
+
)
|
49 |
+
async def info() -> dict:
|
50 |
+
device_info = DeviceInfo(
|
51 |
+
device_type=DEVICE,
|
52 |
+
device_name=get_device_name(),
|
53 |
+
os=platform.system(),
|
54 |
+
platform=platform.platform(),
|
55 |
+
processor=platform.processor(),
|
56 |
+
)
|
57 |
+
return device_info.model_dump()
|
58 |
+
|
59 |
+
|
60 |
+
@app.post(
|
61 |
+
"/generate",
|
62 |
+
description="Generate image from text prompt",
|
63 |
+
summary="Text to image generation",
|
64 |
+
operation_id="generate",
|
65 |
+
)
|
66 |
+
async def generate(
|
67 |
+
prompt: str,
|
68 |
+
request: Request,
|
69 |
+
) -> str:
|
70 |
+
"""
|
71 |
+
Returns URL of the generated image for text prompt
|
72 |
+
"""
|
73 |
+
app_settings.settings.lcm_diffusion_setting.prompt = prompt
|
74 |
+
images = context.generate_text_to_image(app_settings.settings)
|
75 |
+
image_names = context.save_images(
|
76 |
+
images,
|
77 |
+
app_settings.settings,
|
78 |
+
)
|
79 |
+
# url = request.url_for("results", path=image_names[0]) - Claude Desktop returns api_server
|
80 |
+
url = f"http://localhost:{SERVER_PORT}/results/{image_names[0]}"
|
81 |
+
image_url = f"The generated image available at the URL {url}"
|
82 |
+
return image_url
|
83 |
+
|
84 |
+
|
85 |
+
def start_mcp_server(port: int = 8000):
|
86 |
+
global SERVER_PORT
|
87 |
+
SERVER_PORT = port
|
88 |
+
print(f"Starting MCP server on port {port}...")
|
89 |
+
mcp = FastApiMCP(
|
90 |
+
app,
|
91 |
+
name="FastSDCPU MCP",
|
92 |
+
description="MCP server for FastSD CPU API",
|
93 |
+
)
|
94 |
+
|
95 |
+
mcp.mount()
|
96 |
+
uvicorn.run(
|
97 |
+
app,
|
98 |
+
host="0.0.0.0",
|
99 |
+
port=port,
|
100 |
+
)
|
src/backend/api/web.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import platform
|
2 |
+
|
3 |
+
import uvicorn
|
4 |
+
from fastapi import FastAPI
|
5 |
+
from fastapi.middleware.cors import CORSMiddleware
|
6 |
+
|
7 |
+
from backend.api.models.response import StableDiffusionResponse
|
8 |
+
from backend.base64_image import base64_image_to_pil, pil_image_to_base64_str
|
9 |
+
from backend.device import get_device_name
|
10 |
+
from backend.models.device import DeviceInfo
|
11 |
+
from backend.models.lcmdiffusion_setting import DiffusionTask, LCMDiffusionSetting
|
12 |
+
from constants import APP_VERSION, DEVICE
|
13 |
+
from context import Context
|
14 |
+
from models.interface_types import InterfaceType
|
15 |
+
from state import get_settings
|
16 |
+
|
17 |
+
app_settings = get_settings()
|
18 |
+
app = FastAPI(
|
19 |
+
title="FastSD CPU",
|
20 |
+
description="Fast stable diffusion on CPU",
|
21 |
+
version=APP_VERSION,
|
22 |
+
license_info={
|
23 |
+
"name": "MIT",
|
24 |
+
"identifier": "MIT",
|
25 |
+
},
|
26 |
+
docs_url="/api/docs",
|
27 |
+
redoc_url="/api/redoc",
|
28 |
+
openapi_url="/api/openapi.json",
|
29 |
+
)
|
30 |
+
print(app_settings.settings.lcm_diffusion_setting)
|
31 |
+
origins = ["*"]
|
32 |
+
app.add_middleware(
|
33 |
+
CORSMiddleware,
|
34 |
+
allow_origins=origins,
|
35 |
+
allow_credentials=True,
|
36 |
+
allow_methods=["*"],
|
37 |
+
allow_headers=["*"],
|
38 |
+
)
|
39 |
+
context = Context(InterfaceType.API_SERVER)
|
40 |
+
|
41 |
+
|
42 |
+
@app.get("/api/")
|
43 |
+
async def root():
|
44 |
+
return {"message": "Welcome to FastSD CPU API"}
|
45 |
+
|
46 |
+
|
47 |
+
@app.get(
|
48 |
+
"/api/info",
|
49 |
+
description="Get system information",
|
50 |
+
summary="Get system information",
|
51 |
+
)
|
52 |
+
async def info():
|
53 |
+
device_info = DeviceInfo(
|
54 |
+
device_type=DEVICE,
|
55 |
+
device_name=get_device_name(),
|
56 |
+
os=platform.system(),
|
57 |
+
platform=platform.platform(),
|
58 |
+
processor=platform.processor(),
|
59 |
+
)
|
60 |
+
return device_info.model_dump()
|
61 |
+
|
62 |
+
|
63 |
+
@app.get(
|
64 |
+
"/api/config",
|
65 |
+
description="Get current configuration",
|
66 |
+
summary="Get configurations",
|
67 |
+
)
|
68 |
+
async def config():
|
69 |
+
return app_settings.settings
|
70 |
+
|
71 |
+
|
72 |
+
@app.get(
|
73 |
+
"/api/models",
|
74 |
+
description="Get available models",
|
75 |
+
summary="Get available models",
|
76 |
+
)
|
77 |
+
async def models():
|
78 |
+
return {
|
79 |
+
"lcm_lora_models": app_settings.lcm_lora_models,
|
80 |
+
"stable_diffusion": app_settings.stable_diffsuion_models,
|
81 |
+
"openvino_models": app_settings.openvino_lcm_models,
|
82 |
+
"lcm_models": app_settings.lcm_models,
|
83 |
+
}
|
84 |
+
|
85 |
+
|
86 |
+
@app.post(
|
87 |
+
"/api/generate",
|
88 |
+
description="Generate image(Text to image,Image to Image)",
|
89 |
+
summary="Generate image(Text to image,Image to Image)",
|
90 |
+
)
|
91 |
+
async def generate(diffusion_config: LCMDiffusionSetting) -> StableDiffusionResponse:
|
92 |
+
app_settings.settings.lcm_diffusion_setting = diffusion_config
|
93 |
+
if diffusion_config.diffusion_task == DiffusionTask.image_to_image:
|
94 |
+
app_settings.settings.lcm_diffusion_setting.init_image = base64_image_to_pil(
|
95 |
+
diffusion_config.init_image
|
96 |
+
)
|
97 |
+
|
98 |
+
images = context.generate_text_to_image(app_settings.settings)
|
99 |
+
|
100 |
+
if images:
|
101 |
+
images_base64 = [pil_image_to_base64_str(img) for img in images]
|
102 |
+
else:
|
103 |
+
images_base64 = []
|
104 |
+
return StableDiffusionResponse(
|
105 |
+
latency=round(context.latency, 2),
|
106 |
+
images=images_base64,
|
107 |
+
error=context.error,
|
108 |
+
)
|
109 |
+
|
110 |
+
|
111 |
+
def start_web_server(port: int = 8000):
|
112 |
+
uvicorn.run(
|
113 |
+
app,
|
114 |
+
host="0.0.0.0",
|
115 |
+
port=port,
|
116 |
+
)
|