coo7's picture
Update internal/apiserver/router.go
1cefc24 verified
package apiserver
import (
"monica-proxy/internal/middleware"
"monica-proxy/internal/monica"
"monica-proxy/internal/types"
"net/http"
"github.com/labstack/echo/v4"
"github.com/sashabaranov/go-openai"
)
// RegisterRoutes 注册 Echo 路由
func RegisterRoutes(e *echo.Echo) {
// 主页路由不需要认证,所以要放在认证中间件之前
e.GET("/", handleHome)
// 添加Bearer Token认证中间件,只对API路由生效
apiGroup := e.Group("/hf")
apiGroup.Use(middleware.BearerAuth())
// API路由
apiGroup.POST("/v1/chat/completions", handleChatCompletion)
apiGroup.GET("/v1/models", handleListModels)
}
// handleHome 处理主页请求
func handleHome(c echo.Context) error {
html := `
<!DOCTYPE html>
<html>
<head>
<title>Monica Proxy API</title>
<style>
body {
font-family: system-ui, -apple-system, sans-serif;
max-width: 800px;
margin: 0 auto;
padding: 2rem;
line-height: 1.6;
}
code {
background: #f4f4f4;
padding: 0.2em 0.4em;
border-radius: 3px;
}
pre {
background: #f4f4f4;
padding: 1em;
border-radius: 5px;
overflow-x: auto;
}
</style>
</head>
<body>
<h1>Monica Proxy API</h1>
<p>This is a proxy service that converts Monica's service to ChatGPT-compatible API format.</p>
<h2>API Endpoints</h2>
<ul>
<li><code>POST /hf/v1/chat/completions</code> - Chat completion endpoint</li>
<li><code>GET /hf/v1/models</code> - List available models</li>
</ul>
<h2>Example Usage</h2>
<pre>
curl -X POST https://xxx-xxx.hf.space/hf/v1/chat/completions \
-H "Authorization: Bearer YOUR_BEARER_TOKEN" \
-H "Content-Type: application/json" \
-d '{
"model": "gpt-4o-mini",
"messages": [
{
"role": "user",
"content": "Hello!"
}
],
"stream": true
}'</pre>
</body>
</html>`
return c.HTML(http.StatusOK, html)
}
// handleChatCompletion 接收 ChatGPT 形式的对话请求并转发给 Monica
func handleChatCompletion(c echo.Context) error {
var req openai.ChatCompletionRequest
if err := c.Bind(&req); err != nil {
return c.JSON(http.StatusBadRequest, map[string]interface{}{
"error": "Invalid request payload",
})
}
// 检查请求是否包含消息
if len(req.Messages) == 0 {
return c.JSON(http.StatusBadRequest, map[string]interface{}{
"error": "No messages found",
})
}
// 将 ChatGPTRequest 转换为 MonicaRequest
monicaReq, err := types.ChatGPTToMonica(req)
if err != nil {
return c.JSON(http.StatusInternalServerError, map[string]interface{}{
"error": err.Error(),
})
}
// 调用 Monica 并获取 SSE Stream
stream, err := monica.SendMonicaRequest(c.Request().Context(), monicaReq)
if err != nil {
return c.JSON(http.StatusInternalServerError, map[string]interface{}{
"error": err.Error(),
})
}
// Resty 不会自动关闭 Body,需要我们自己来处理
defer stream.RawBody().Close()
// 这里直接用流式方式把 SSE 数据返回
c.Response().Header().Set(echo.HeaderContentType, "text/event-stream")
c.Response().Header().Set("Cache-Control", "no-cache")
c.Response().Header().Set("Transfer-Encoding", "chunked")
c.Response().WriteHeader(http.StatusOK)
// 将 Monica 的 SSE 数据逐行读出,再以 SSE 格式返回给调用方
if err := monica.StreamMonicaSSEToClient(req.Model, c.Response().Writer, stream.RawBody()); err != nil {
return err
}
return nil
}
// handleListModels 返回支持的模型列表
func handleListModels(c echo.Context) error {
models := types.GetSupportedModels()
return c.JSON(http.StatusOK, models)
}