Spaces:
Sleeping
Sleeping
File size: 2,961 Bytes
556af05 1461bea 556af05 1461bea 556af05 1461bea 556af05 1461bea 556af05 1461bea 556af05 1461bea 556af05 6b16c76 556af05 1461bea 556af05 1461bea |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
import gradio as gr
from transformers import AutoTokenizer, AutoModel
import torch
import spaces
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import List, Dict, Any
import time
# 创建 FastAPI 应用
app = FastAPI()
# 配置 CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# 加载模型和分词器
model_name = "BAAI/bge-m3"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(model_name)
model.eval()
class EmbeddingRequest(BaseModel):
input: List[str] | str
model: str | None = model_name
encoding_format: str | None = "float"
user: str | None = None
class EmbeddingResponse(BaseModel):
object: str = "list"
data: List[Dict[str, Any]]
model: str
usage: Dict[str, int]
@spaces.GPU()
def get_embedding(text: str) -> List[float]:
inputs = tokenizer(
text,
padding=True,
truncation=True,
max_length=512,
return_tensors="pt"
).to(model.device)
with torch.no_grad():
outputs = model(**inputs)
embeddings = outputs.last_hidden_state[:, 0, :].cpu().numpy()
return embeddings[0].tolist()
@app.post("/v1/embeddings", response_model=EmbeddingResponse)
@spaces.GPU()
async def create_embeddings(request: EmbeddingRequest):
if isinstance(request.input, str):
input_texts = [request.input]
else:
input_texts = request.input
embeddings = []
total_tokens = 0
for text in input_texts:
tokens = tokenizer.encode(text)
total_tokens += len(tokens)
embedding = get_embedding(text)
embeddings.append({
"object": "embedding",
"embedding": embedding,
"index": len(embeddings)
})
response = EmbeddingResponse(
data=embeddings,
model=request.model or model_name,
usage={
"prompt_tokens": total_tokens,
"total_tokens": total_tokens
}
)
return response
@spaces.GPU()
def gradio_embedding(text: str) -> Dict:
request = EmbeddingRequest(input=text)
response = create_embeddings(request)
return response.dict()
# 创建 Gradio 界面
demo = gr.Interface(
fn=gradio_embedding,
inputs=gr.Textbox(lines=3, placeholder="输入要进行编码的文本..."),
outputs=gr.Json(),
title="BGE-M3 Embeddings (OpenAI 兼容格式)",
description="输入文本,获取其对应的嵌入向量,返回格式与 OpenAI API 兼容。",
examples=[
["这是一个示例文本。"],
["人工智能正在改变世界。"]
]
)
# 挂载 Gradio 应用到 FastAPI
app = gr.mount_gradio_app(app, demo, path="/")
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860) |