File size: 3,937 Bytes
1be8a56 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "1dabd320-903a-4f6c-9634-e8bd5993f90f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7860\n",
"* To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from transformers import AutoModelForCausalLM, AutoTokenizer\n",
"import torch\n",
"import gradio as gr\n",
"\n",
"# مدل و توکنایزر\n",
"model_name = \"HuggingFaceTB/SmolLM2-360M-Instruct\"\n",
"tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
"model = AutoModelForCausalLM.from_pretrained(model_name)\n",
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
"model.to(device)\n",
"model.eval()\n",
"\n",
"# پیامهای اولیه مکالمه\n",
"def format_messages(user_prompt):\n",
" system_message = \"<|im_start|>system\\n<|im_end|>\\n\"\n",
" user_message = f\"<|im_start|>user\\n{user_prompt}<|im_end|>\\n\"\n",
" assistant_prefix = \"<|im_start|>assistant\\n\"\n",
" full_prompt = system_message + user_message + assistant_prefix\n",
" return full_prompt\n",
"\n",
"# تابع چت\n",
"def chat(user_input):\n",
" prompt = format_messages(user_input)\n",
" inputs = tokenizer(prompt, return_tensors=\"pt\").to(device)\n",
"\n",
" with torch.no_grad():\n",
" outputs = model.generate(\n",
" **inputs,\n",
" max_new_tokens=256,\n",
" do_sample=True,\n",
" temperature=0.7,\n",
" top_p=0.9,\n",
" pad_token_id=tokenizer.eos_token_id\n",
" )\n",
"\n",
" response = tokenizer.decode(outputs[0], skip_special_tokens=True)\n",
" \n",
" # پاسخ مدل بعد از آخرین <|im_start|>assistant\n",
" if \"<|im_start|>assistant\" in response:\n",
" response = response.split(\"<|im_start|>assistant\")[-1].strip()\n",
" if \"<|im_end|>\" in response:\n",
" response = response.split(\"<|im_end|>\")[0].strip()\n",
" \n",
" return response\n",
"\n",
"# رابط گرافیکی Gradio\n",
"interface = gr.Interface(\n",
" fn=chat,\n",
" inputs=gr.Textbox(lines=3, placeholder=\"پیام خود را وارد کنید...\"),\n",
" outputs=\"text\",\n",
" title=\"💬 SmolLM2 Chatbot\",\n",
" description=\"مدل سبک و مکالمهمحور SmolLM2 از Hugging Face با فرمت قالب رسمی\"\n",
")\n",
"\n",
"interface.launch()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0f00bd7a-f925-4970-aeb6-96f1dcbad3c8",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python [conda env:base] *",
"language": "python",
"name": "conda-base-py"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|