model
Browse files- README.md +4 -4
- src/proxy_lite/app.py +1 -1
- src/proxy_lite/configs/default.yaml +1 -1
README.md
CHANGED
|
@@ -90,7 +90,7 @@ By default, Proxy Lite will point to an endpoint set up on HuggingFace spaces.
|
|
| 90 |
We recommend hosting your own endpoint with vLLM, you can use the following command:
|
| 91 |
|
| 92 |
```bash
|
| 93 |
-
vllm serve --model convergence-ai/proxy-lite \
|
| 94 |
--trust-remote-code \
|
| 95 |
--enable-auto-tool-choice \
|
| 96 |
--tool-call-parser hermes \
|
|
@@ -137,7 +137,7 @@ config = RunnerConfig.from_dict(
|
|
| 137 |
"name": "proxy_lite",
|
| 138 |
"client": {
|
| 139 |
"name": "convergence",
|
| 140 |
-
"model_id": "convergence-ai/proxy-lite",
|
| 141 |
"api_base": "https://convergence-ai-demo-api.hf.space/v1",
|
| 142 |
},
|
| 143 |
},
|
|
@@ -197,7 +197,7 @@ from transformers import AutoProcessor
|
|
| 197 |
from proxy_lite.tools import ReturnValueTool, BrowserTool
|
| 198 |
from proxy_lite.serializer import OpenAICompatableSerializer
|
| 199 |
|
| 200 |
-
processor = AutoProcessor.from_pretrained("convergence-ai/proxy-lite")
|
| 201 |
tools = OpenAICompatableSerializer().serialize_tools([ReturnValueTool(), BrowserTool(session=None)])
|
| 202 |
|
| 203 |
templated_messages = processor.apply_chat_template(
|
|
@@ -223,7 +223,7 @@ from openai import OpenAI
|
|
| 223 |
client = OpenAI(base_url="http://convergence-ai-demo-api.hf.space/v1")
|
| 224 |
|
| 225 |
response = client.chat.completions.create(
|
| 226 |
-
model="convergence-ai/proxy-lite",
|
| 227 |
messages=message_history,
|
| 228 |
tools=tools,
|
| 229 |
tool_choice="auto",
|
|
|
|
| 90 |
We recommend hosting your own endpoint with vLLM, you can use the following command:
|
| 91 |
|
| 92 |
```bash
|
| 93 |
+
vllm serve --model convergence-ai/proxy-lite-3b \
|
| 94 |
--trust-remote-code \
|
| 95 |
--enable-auto-tool-choice \
|
| 96 |
--tool-call-parser hermes \
|
|
|
|
| 137 |
"name": "proxy_lite",
|
| 138 |
"client": {
|
| 139 |
"name": "convergence",
|
| 140 |
+
"model_id": "convergence-ai/proxy-lite-3b",
|
| 141 |
"api_base": "https://convergence-ai-demo-api.hf.space/v1",
|
| 142 |
},
|
| 143 |
},
|
|
|
|
| 197 |
from proxy_lite.tools import ReturnValueTool, BrowserTool
|
| 198 |
from proxy_lite.serializer import OpenAICompatableSerializer
|
| 199 |
|
| 200 |
+
processor = AutoProcessor.from_pretrained("convergence-ai/proxy-lite-3b")
|
| 201 |
tools = OpenAICompatableSerializer().serialize_tools([ReturnValueTool(), BrowserTool(session=None)])
|
| 202 |
|
| 203 |
templated_messages = processor.apply_chat_template(
|
|
|
|
| 223 |
client = OpenAI(base_url="http://convergence-ai-demo-api.hf.space/v1")
|
| 224 |
|
| 225 |
response = client.chat.completions.create(
|
| 226 |
+
model="convergence-ai/proxy-lite-3b",
|
| 227 |
messages=message_history,
|
| 228 |
tools=tools,
|
| 229 |
tool_choice="auto",
|
src/proxy_lite/app.py
CHANGED
|
@@ -28,7 +28,7 @@ def get_user_config(config_expander):
|
|
| 28 |
"name": "proxy_lite",
|
| 29 |
"client": {
|
| 30 |
"name": "convergence",
|
| 31 |
-
"model_id": "convergence-ai/proxy-lite",
|
| 32 |
"api_base": "https://convergence-ai-demo-api.hf.space/v1",
|
| 33 |
},
|
| 34 |
},
|
|
|
|
| 28 |
"name": "proxy_lite",
|
| 29 |
"client": {
|
| 30 |
"name": "convergence",
|
| 31 |
+
"model_id": "convergence-ai/proxy-lite-3b",
|
| 32 |
"api_base": "https://convergence-ai-demo-api.hf.space/v1",
|
| 33 |
},
|
| 34 |
},
|
src/proxy_lite/configs/default.yaml
CHANGED
|
@@ -14,7 +14,7 @@ solver:
|
|
| 14 |
name: proxy_lite
|
| 15 |
client:
|
| 16 |
name: convergence
|
| 17 |
-
model_id: convergence-ai/proxy-lite
|
| 18 |
api_base: https://convergence-ai-demo-api.hf.space/v1
|
| 19 |
local_view: true
|
| 20 |
task_timeout: 1800
|
|
|
|
| 14 |
name: proxy_lite
|
| 15 |
client:
|
| 16 |
name: convergence
|
| 17 |
+
model_id: convergence-ai/proxy-lite-3b
|
| 18 |
api_base: https://convergence-ai-demo-api.hf.space/v1
|
| 19 |
local_view: true
|
| 20 |
task_timeout: 1800
|