GPTfree api
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,65 +1,43 @@
|
|
1 |
import os
|
|
|
2 |
from vllm import LLM
|
3 |
from vllm.sampling_params import SamplingParams
|
4 |
-
from huggingface_hub import hf_hub_download
|
5 |
from datetime import datetime, timedelta
|
6 |
|
7 |
-
#
|
8 |
-
|
9 |
-
if not hf_token:
|
10 |
-
raise ValueError("Hugging Face token is not set in environment variables.")
|
11 |
-
login(hf_token)
|
12 |
-
|
13 |
-
model_name = "mistralai/Pixtral-Large-Instruct-2411"
|
14 |
|
|
|
15 |
def load_system_prompt(repo_id: str, filename: str) -> str:
|
16 |
-
"""指定されたリポジトリからSYSTEM_PROMPT.txtをダウンロードし、フォーマット済みプロンプトを返す"""
|
17 |
file_path = hf_hub_download(repo_id=repo_id, filename=filename)
|
18 |
with open(file_path, 'r') as file:
|
19 |
system_prompt = file.read()
|
20 |
-
# 日付とモデル名でフォーマット
|
21 |
today = datetime.today().strftime('%Y-%m-%d')
|
22 |
yesterday = (datetime.today() - timedelta(days=1)).strftime('%Y-%m-%d')
|
23 |
model_name = repo_id.split("/")[-1]
|
24 |
return system_prompt.format(name=model_name, today=today, yesterday=yesterday)
|
25 |
|
26 |
-
#
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
|
32 |
-
|
33 |
-
messages = [
|
34 |
-
{"role": "system", "content": SYSTEM_PROMPT},
|
35 |
-
{
|
36 |
-
"role": "user",
|
37 |
-
"content": [
|
38 |
-
{
|
39 |
-
"type": "text",
|
40 |
-
"text": "Which of the depicted countries has the best food? Which the second and third and fourth? Name the country, its color on the map and one its city that is visible on the map, but is not the capital. Make absolutely sure to only name a city that can be seen on the map.",
|
41 |
-
},
|
42 |
-
{"type": "image_url", "image_url": {"url": image_url}},
|
43 |
-
],
|
44 |
-
},
|
45 |
-
]
|
46 |
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
|
51 |
-
llm = LLM(
|
52 |
-
model=model_name,
|
53 |
-
config_format="mistral",
|
54 |
-
load_format="mistral",
|
55 |
-
tokenizer_mode="mistral",
|
56 |
-
tensor_parallel_size=1, # CPUモードでは並列数は1
|
57 |
-
device="cpu", # 明示的にCPUを指定
|
58 |
-
limit_mm_per_prompt={"image": 4} # マルチモーダル入力制限
|
59 |
-
)
|
60 |
|
61 |
-
#
|
62 |
-
|
|
|
63 |
|
64 |
-
|
65 |
-
print(outputs[0].outputs[0].text)
|
|
|
1 |
import os
|
2 |
+
import tempfile
|
3 |
from vllm import LLM
|
4 |
from vllm.sampling_params import SamplingParams
|
5 |
+
from huggingface_hub import hf_hub_download
|
6 |
from datetime import datetime, timedelta
|
7 |
|
8 |
+
# モデル名
|
9 |
+
model_name = "mistralai/Mistral-7B-Instruct-v0.2"
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
+
# SYSTEM_PROMPTのロード関数
|
12 |
def load_system_prompt(repo_id: str, filename: str) -> str:
|
|
|
13 |
file_path = hf_hub_download(repo_id=repo_id, filename=filename)
|
14 |
with open(file_path, 'r') as file:
|
15 |
system_prompt = file.read()
|
|
|
16 |
today = datetime.today().strftime('%Y-%m-%d')
|
17 |
yesterday = (datetime.today() - timedelta(days=1)).strftime('%Y-%m-%d')
|
18 |
model_name = repo_id.split("/")[-1]
|
19 |
return system_prompt.format(name=model_name, today=today, yesterday=yesterday)
|
20 |
|
21 |
+
# 一時ディレクトリをキャッシュ用に設定
|
22 |
+
with tempfile.TemporaryDirectory() as tmpdirname:
|
23 |
+
os.environ["TRANSFORMERS_CACHE"] = tmpdirname
|
24 |
+
os.environ["HF_HOME"] = tmpdirname
|
25 |
+
os.environ["HF_TOKEN"] = os.getenv("HF_TOKEN") # 環境変数からトークンを取得
|
26 |
|
27 |
+
SYSTEM_PROMPT = load_system_prompt(model_name, "SYSTEM_PROMPT.txt")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
+
messages = [
|
30 |
+
{"role": "system", "content": SYSTEM_PROMPT},
|
31 |
+
{
|
32 |
+
"role": "user",
|
33 |
+
"content": "Which of the depicted countries has the best food?",
|
34 |
+
},
|
35 |
+
]
|
36 |
|
37 |
+
sampling_params = SamplingParams(max_tokens=512)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
+
# モデルロードと実行
|
40 |
+
llm = LLM(model=model_name, trust_remote_code=True, tensor_parallel_size=1, device="cpu")
|
41 |
+
outputs = llm.chat(messages, sampling_params=sampling_params)
|
42 |
|
43 |
+
print(outputs[0].outputs[0].text)
|
|