Spaces:
Sleeping
Sleeping
sanbo
commited on
Commit
·
f1b18bb
1
Parent(s):
11347db
update sth. at 2025-03-03 19:15:11
Browse files
README.md
CHANGED
@@ -56,4 +56,42 @@ curl -X POST https://sanbo1200-jina-embeddings-v3.hf.space/hf/v1/embeddings \
|
|
56 |
}'
|
57 |
|
58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
```
|
|
|
56 |
}'
|
57 |
|
58 |
|
59 |
+
|
60 |
+
|
61 |
+
|
62 |
+
curl https://api.jina.ai/v1/embeddings \
|
63 |
+
-H "Content-Type: application/json" \
|
64 |
+
-H "Authorization: Bearer jina_xxxx" \
|
65 |
+
-d @- <<EOFEOF
|
66 |
+
{
|
67 |
+
"model": "jina-clip-v2",
|
68 |
+
"dimensions": 1024,
|
69 |
+
"normalized": true,
|
70 |
+
"embedding_type": "float",
|
71 |
+
"input": [
|
72 |
+
{
|
73 |
+
"text": "A beautiful sunset over the beach"
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"text": "Un beau coucher de soleil sur la plage"
|
77 |
+
},
|
78 |
+
{
|
79 |
+
"text": "海滩上美丽的日落"
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"text": "浜辺に沈む美しい夕日"
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"image": "https://i.ibb.co/nQNGqL0/beach1.jpg"
|
86 |
+
},
|
87 |
+
{
|
88 |
+
"image": "https://i.ibb.co/r5w8hG8/beach2.jpg"
|
89 |
+
},
|
90 |
+
{
|
91 |
+
"image": "R0lGODlhEAAQAMQAAORHHOVSKudfOulrSOp3WOyDZu6QdvCchPGolfO0o/XBs/fNwfjZ0frl3/zy7////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAkAABAALAAAAAAQABAAAAVVICSOZGlCQAosJ6mu7fiyZeKqNKToQGDsM8hBADgUXoGAiqhSvp5QAnQKGIgUhwFUYLCVDFCrKUE1lBavAViFIDlTImbKC5Gm2hB0SlBCBMQiB0UjIQA7"
|
92 |
+
}
|
93 |
+
]
|
94 |
+
}
|
95 |
+
EOFEOF
|
96 |
+
|
97 |
```
|
app.py
CHANGED
@@ -35,8 +35,10 @@ class EmbeddingRequest(BaseModel):
|
|
35 |
return values
|
36 |
|
37 |
class EmbeddingResponse(BaseModel):
|
38 |
-
|
39 |
-
|
|
|
|
|
40 |
|
41 |
class EmbeddingService:
|
42 |
def __init__(self):
|
@@ -114,14 +116,29 @@ app.add_middleware(
|
|
114 |
@app.post("/hf/v1/chat/completions", response_model=EmbeddingResponse)
|
115 |
async def generate_embeddings(request: EmbeddingRequest):
|
116 |
try:
|
|
|
|
|
|
|
117 |
embedding = await asyncio.get_running_loop().run_in_executor(
|
118 |
None,
|
119 |
embedding_service.get_embedding,
|
120 |
request.inputs # 使用合并后的输入字段
|
121 |
)
|
|
|
122 |
return EmbeddingResponse(
|
123 |
-
|
124 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
)
|
126 |
except Exception as e:
|
127 |
raise HTTPException(status_code=500, detail=str(e))
|
@@ -166,4 +183,4 @@ async def startup_event():
|
|
166 |
if __name__ == "__main__":
|
167 |
asyncio.run(embedding_service.initialize())
|
168 |
gr.mount_gradio_app(app, iface, path="/ui")
|
169 |
-
uvicorn.run(app, host="0.0.0.0", port=7860, workers=
|
|
|
35 |
return values
|
36 |
|
37 |
class EmbeddingResponse(BaseModel):
|
38 |
+
object: str = "list"
|
39 |
+
data: List[Dict[str, any]]
|
40 |
+
model: str
|
41 |
+
usage: Dict[str, int]
|
42 |
|
43 |
class EmbeddingService:
|
44 |
def __init__(self):
|
|
|
116 |
@app.post("/hf/v1/chat/completions", response_model=EmbeddingResponse)
|
117 |
async def generate_embeddings(request: EmbeddingRequest):
|
118 |
try:
|
119 |
+
# 计算token数量
|
120 |
+
token_count = len(embedding_service.tokenizer.encode(request.inputs))
|
121 |
+
|
122 |
embedding = await asyncio.get_running_loop().run_in_executor(
|
123 |
None,
|
124 |
embedding_service.get_embedding,
|
125 |
request.inputs # 使用合并后的输入字段
|
126 |
)
|
127 |
+
|
128 |
return EmbeddingResponse(
|
129 |
+
object="list",
|
130 |
+
data=[
|
131 |
+
{
|
132 |
+
"object": "embedding",
|
133 |
+
"index": 0,
|
134 |
+
"embedding": embedding
|
135 |
+
}
|
136 |
+
],
|
137 |
+
model=request.model,
|
138 |
+
usage={
|
139 |
+
"prompt_tokens": token_count,
|
140 |
+
"total_tokens": token_count
|
141 |
+
}
|
142 |
)
|
143 |
except Exception as e:
|
144 |
raise HTTPException(status_code=500, detail=str(e))
|
|
|
183 |
if __name__ == "__main__":
|
184 |
asyncio.run(embedding_service.initialize())
|
185 |
gr.mount_gradio_app(app, iface, path="/ui")
|
186 |
+
uvicorn.run(app, host="0.0.0.0", port=7860, workers=2)
|