File size: 3,747 Bytes
1fd6586 bea8fae 506d434 bea8fae 1fd6586 506d434 1fd6586 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
package apiserver
import (
"monica-proxy/internal/middleware"
"monica-proxy/internal/monica"
"monica-proxy/internal/types"
"net/http"
"github.com/labstack/echo/v4"
"github.com/sashabaranov/go-openai"
)
// RegisterRoutes 注册 Echo 路由
func RegisterRoutes(e *echo.Echo) {
// 主页路由不需要认证,所以要放在认证中间件之前
e.GET("/", handleHome)
// 添加Bearer Token认证中间件,只对API路由生效
apiGroup := e.Group("/hf")
apiGroup.Use(middleware.BearerAuth())
// API路由
apiGroup.POST("/v1/chat/completions", handleChatCompletion)
apiGroup.GET("/v1/models", handleListModels)
}
// handleHome 处理主页请求
func handleHome(c echo.Context) error {
html := `
<!DOCTYPE html>
<html>
<head>
<title>Monica Proxy API</title>
<style>
body {
font-family: system-ui, -apple-system, sans-serif;
max-width: 800px;
margin: 0 auto;
padding: 2rem;
line-height: 1.6;
}
code {
background: #f4f4f4;
padding: 0.2em 0.4em;
border-radius: 3px;
}
pre {
background: #f4f4f4;
padding: 1em;
border-radius: 5px;
overflow-x: auto;
}
</style>
</head>
<body>
<h1>Monica Proxy API</h1>
<p>This is a proxy service that converts Monica's service to ChatGPT-compatible API format.</p>
<h2>API Endpoints</h2>
<ul>
<li><code>POST /hf/v1/chat/completions</code> - Chat completion endpoint</li>
<li><code>GET /hf/v1/models</code> - List available models</li>
</ul>
<h2>Example Usage</h2>
<pre>
curl -X POST https://xxx-xxx.hf.space/hf/v1/chat/completions \
-H "Authorization: Bearer YOUR_BEARER_TOKEN" \
-H "Content-Type: application/json" \
-d '{
"model": "gpt-4o-mini",
"messages": [
{
"role": "user",
"content": "Hello!"
}
],
"stream": true
}'</pre>
</body>
</html>`
return c.HTML(http.StatusOK, html)
}
// handleChatCompletion 接收 ChatGPT 形式的对话请求并转发给 Monica
func handleChatCompletion(c echo.Context) error {
var req openai.ChatCompletionRequest
if err := c.Bind(&req); err != nil {
return c.JSON(http.StatusBadRequest, map[string]interface{}{
"error": "Invalid request payload",
})
}
// 检查请求是否包含消息
if len(req.Messages) == 0 {
return c.JSON(http.StatusBadRequest, map[string]interface{}{
"error": "No messages found",
})
}
// 将 ChatGPTRequest 转换为 MonicaRequest
monicaReq, err := types.ChatGPTToMonica(req)
if err != nil {
return c.JSON(http.StatusInternalServerError, map[string]interface{}{
"error": err.Error(),
})
}
// 调用 Monica 并获取 SSE Stream
stream, err := monica.SendMonicaRequest(c.Request().Context(), monicaReq)
if err != nil {
return c.JSON(http.StatusInternalServerError, map[string]interface{}{
"error": err.Error(),
})
}
// Resty 不会自动关闭 Body,需要我们自己来处理
defer stream.RawBody().Close()
// 这里直接用流式方式把 SSE 数据返回
c.Response().Header().Set(echo.HeaderContentType, "text/event-stream")
c.Response().Header().Set("Cache-Control", "no-cache")
c.Response().Header().Set("Transfer-Encoding", "chunked")
c.Response().WriteHeader(http.StatusOK)
// 将 Monica 的 SSE 数据逐行读出,再以 SSE 格式返回给调用方
if err := monica.StreamMonicaSSEToClient(req.Model, c.Response().Writer, stream.RawBody()); err != nil {
return err
}
return nil
}
// handleListModels 返回支持的模型列表
func handleListModels(c echo.Context) error {
models := types.GetSupportedModels()
return c.JSON(http.StatusOK, models)
}
|