Update README.md
Browse files
README.md
CHANGED
@@ -14,6 +14,130 @@ model-index:
|
|
14 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
15 |
should probably proofread and complete it, then remove this comment. -->
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
# train_2024-06-17-19-49-05
|
18 |
|
19 |
This model is a fine-tuned version of [Qwen/Qwen2-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2-1.5B-Instruct) on the glaive_toolcall_zh and the glaive_toolcall_en datasets.
|
|
|
14 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
15 |
should probably proofread and complete it, then remove this comment. -->
|
16 |
|
17 |
+
# Install some dependency
|
18 |
+
```bash
|
19 |
+
pip install openai huggingface_hub
|
20 |
+
```
|
21 |
+
|
22 |
+
# Download lora
|
23 |
+
```python
|
24 |
+
from huggingface_hub import snapshot_download
|
25 |
+
snapshot_download(
|
26 |
+
repo_id="svjack/Qwen2-1_5B_Function_Call_tiny_lora",
|
27 |
+
repo_type="model",
|
28 |
+
local_dir="Qwen2-1_5B_Function_Call_tiny_lora",
|
29 |
+
local_dir_use_symlinks = False
|
30 |
+
)
|
31 |
+
```
|
32 |
+
|
33 |
+
# Start OpenAI style api server
|
34 |
+
```bash
|
35 |
+
python src/api.py \
|
36 |
+
--model_name_or_path Qwen/Qwen2-1.5B-Instruct \
|
37 |
+
--template qwen \
|
38 |
+
--adapter_name_or_path Qwen2-1_5B_Function_Call_tiny_lora
|
39 |
+
```
|
40 |
+
|
41 |
+
# Inference
|
42 |
+
```python
|
43 |
+
import json
|
44 |
+
import os
|
45 |
+
from typing import Sequence
|
46 |
+
|
47 |
+
from openai import OpenAI
|
48 |
+
from transformers.utils.versions import require_version
|
49 |
+
|
50 |
+
require_version("openai>=1.5.0", "To fix: pip install openai>=1.5.0")
|
51 |
+
|
52 |
+
def calculate_gpa(grades: Sequence[str], hours: Sequence[int]) -> float:
|
53 |
+
grade_to_score = {"A": 4, "B": 3, "C": 2}
|
54 |
+
total_score, total_hour = 0, 0
|
55 |
+
for grade, hour in zip(grades, hours):
|
56 |
+
total_score += grade_to_score[grade] * hour
|
57 |
+
total_hour += hour
|
58 |
+
return round(total_score / total_hour, 2)
|
59 |
+
|
60 |
+
client = OpenAI(
|
61 |
+
api_key="0",
|
62 |
+
base_url="http://localhost:{}/v1".format(os.environ.get("API_PORT", 8000)),
|
63 |
+
)
|
64 |
+
|
65 |
+
tools = [
|
66 |
+
{
|
67 |
+
"type": "function",
|
68 |
+
"function": {
|
69 |
+
"name": "calculate_gpa",
|
70 |
+
"description": "Calculate the Grade Point Average (GPA) based on grades and credit hours",
|
71 |
+
"parameters": {
|
72 |
+
"type": "object",
|
73 |
+
"properties": {
|
74 |
+
"grades": {"type": "array", "items": {"type": "string"}, "description": "The grades"},
|
75 |
+
"hours": {"type": "array", "items": {"type": "integer"}, "description": "The credit hours"},
|
76 |
+
},
|
77 |
+
"required": ["grades", "hours"],
|
78 |
+
},
|
79 |
+
},
|
80 |
+
}
|
81 |
+
]
|
82 |
+
tool_map = {"calculate_gpa": calculate_gpa}
|
83 |
+
|
84 |
+
messages = []
|
85 |
+
messages.append({"role": "user", "content": "My grades are A, A, B, and C. The credit hours are 3, 4, 3, and 2."})
|
86 |
+
|
87 |
+
result = client.chat.completions.create(messages=messages,
|
88 |
+
model="Qwen/Qwen2-1.5B-Instruct", tools=tools)
|
89 |
+
|
90 |
+
result.choices[0].message.tool_calls
|
91 |
+
|
92 |
+
messages.append(result.choices[0].message)
|
93 |
+
tool_call = result.choices[0].message.tool_calls[0].function
|
94 |
+
print(tool_call)
|
95 |
+
|
96 |
+
name, arguments = tool_call.name, json.loads(tool_call.arguments)
|
97 |
+
tool_result = tool_map[name](**arguments)
|
98 |
+
|
99 |
+
messages.append({"role": "tool", "content": json.dumps({"gpa": tool_result}, ensure_ascii=False)})
|
100 |
+
|
101 |
+
result = client.chat.completions.create(messages=messages, model="test", tools=tools)
|
102 |
+
print(result.choices[0].message.content)
|
103 |
+
```
|
104 |
+
|
105 |
+
# Output
|
106 |
+
```
|
107 |
+
Function(arguments='{"grades": ["A", "A", "B", "C"], "hours": [3, 4, 3, 2]}', name='calculate_gpa')
|
108 |
+
Your calculated GPA is 3.42.
|
109 |
+
```
|
110 |
+
|
111 |
+
# Inference
|
112 |
+
```python
|
113 |
+
messages = []
|
114 |
+
messages.append({"role": "user", "content": "我的成绩分别是A,A,B,C学分分别是3, 4, 3,和2"})
|
115 |
+
|
116 |
+
result = client.chat.completions.create(messages=messages,
|
117 |
+
model="Qwen/Qwen2-1.5B-Instruct", tools=tools)
|
118 |
+
|
119 |
+
result.choices[0].message.tool_calls
|
120 |
+
|
121 |
+
messages.append(result.choices[0].message)
|
122 |
+
tool_call = result.choices[0].message.tool_calls[0].function
|
123 |
+
print(tool_call)
|
124 |
+
|
125 |
+
name, arguments = tool_call.name, json.loads(tool_call.arguments)
|
126 |
+
tool_result = tool_map[name](**arguments)
|
127 |
+
|
128 |
+
messages.append({"role": "tool", "content": json.dumps({"gpa": tool_result}, ensure_ascii=False)})
|
129 |
+
|
130 |
+
result = client.chat.completions.create(messages=messages, model="test", tools=tools)
|
131 |
+
print(result.choices[0].message.content)
|
132 |
+
```
|
133 |
+
|
134 |
+
# Output
|
135 |
+
```
|
136 |
+
Function(arguments='{"grades": ["A", "A", "B", "C"], "hours": [3, 4, 3, 2]}', name='calculate_gpa')
|
137 |
+
你的GPA是3.42。
|
138 |
+
```
|
139 |
+
|
140 |
+
|
141 |
# train_2024-06-17-19-49-05
|
142 |
|
143 |
This model is a fine-tuned version of [Qwen/Qwen2-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2-1.5B-Instruct) on the glaive_toolcall_zh and the glaive_toolcall_en datasets.
|