Add pipeline tag and library name
Browse filesThis PR adds the `pipeline_tag` and `library_name` to the model card metadata. The `pipeline_tag` is set to `text-generation` as the model generates text (user profiles and dialogue). The `library_name` is set to `transformers` given the code examples.
README.md
CHANGED
@@ -1,12 +1,15 @@
|
|
1 |
---
|
2 |
-
|
|
|
3 |
datasets:
|
4 |
- wangkevin02/LMSYS-USP
|
5 |
language:
|
6 |
- en
|
7 |
-
|
8 |
-
|
|
|
9 |
---
|
|
|
10 |
# Profile Generator
|
11 |
|
12 |
## Model Description
|
@@ -20,8 +23,6 @@ The **Profile Generator** is a model designed to extract and generate detailed u
|
|
20 |
| LMSYS-USP | GPT4o | 86.89 | 25.64 | 82.24 | 3.71 | 84.50 | 4.42 |
|
21 |
| LMSYS-USP | Distill-llama3 | 86.15 | 23.81 | 81.95 | 3.71 | 84.00 | 4.36 |
|
22 |
|
23 |
-
|
24 |
-
|
25 |
> *Note*: Our model is subject to the following constraints:
|
26 |
>
|
27 |
> 1. **Maximum Context Length**: Supports up to **4,096 tokens**. Exceeding this may degrade performance; keep inputs within this limit for best results.
|
@@ -67,10 +68,13 @@ def extract_user_messages(messages: List[Dict[str, str]]) -> List[str]:
|
|
67 |
# Prepare messages for model input
|
68 |
def prepare_messages(utterances: List[str], config: ProfileConfig, tokenizer) -> str:
|
69 |
"""Prepare messages for model input with optimized formatting."""
|
70 |
-
user_prompt = "".join(f"[User]: {u}
|
|
|
|
|
71 |
formatted_msg = [
|
72 |
{"role": "system", "content": config.system_prompt},
|
73 |
-
{"role": "user", "content": f"{config.instruction}
|
|
|
74 |
]
|
75 |
return tokenizer.apply_chat_template(
|
76 |
formatted_msg,
|
@@ -168,8 +172,6 @@ print(f"profile:{profile}")
|
|
168 |
|
169 |
## Citation
|
170 |
|
171 |
-
|
172 |
-
|
173 |
If you find this model useful, please cite:
|
174 |
|
175 |
```plaintext
|
|
|
1 |
---
|
2 |
+
base_model:
|
3 |
+
- meta-llama/Meta-Llama-3-8B-Instruct
|
4 |
datasets:
|
5 |
- wangkevin02/LMSYS-USP
|
6 |
language:
|
7 |
- en
|
8 |
+
license: mit
|
9 |
+
pipeline_tag: text-generation
|
10 |
+
library_name: transformers
|
11 |
---
|
12 |
+
|
13 |
# Profile Generator
|
14 |
|
15 |
## Model Description
|
|
|
23 |
| LMSYS-USP | GPT4o | 86.89 | 25.64 | 82.24 | 3.71 | 84.50 | 4.42 |
|
24 |
| LMSYS-USP | Distill-llama3 | 86.15 | 23.81 | 81.95 | 3.71 | 84.00 | 4.36 |
|
25 |
|
|
|
|
|
26 |
> *Note*: Our model is subject to the following constraints:
|
27 |
>
|
28 |
> 1. **Maximum Context Length**: Supports up to **4,096 tokens**. Exceeding this may degrade performance; keep inputs within this limit for best results.
|
|
|
68 |
# Prepare messages for model input
|
69 |
def prepare_messages(utterances: List[str], config: ProfileConfig, tokenizer) -> str:
|
70 |
"""Prepare messages for model input with optimized formatting."""
|
71 |
+
user_prompt = "".join(f"[User]: {u}
|
72 |
+
---
|
73 |
+
" for u in utterances)
|
74 |
formatted_msg = [
|
75 |
{"role": "system", "content": config.system_prompt},
|
76 |
+
{"role": "user", "content": f"{config.instruction}
|
77 |
+
{user_prompt}"}
|
78 |
]
|
79 |
return tokenizer.apply_chat_template(
|
80 |
formatted_msg,
|
|
|
172 |
|
173 |
## Citation
|
174 |
|
|
|
|
|
175 |
If you find this model useful, please cite:
|
176 |
|
177 |
```plaintext
|