File size: 3,165 Bytes
7c07f27 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
{
"cells": [
{
"cell_type": "code",
"execution_count": 31,
"id": "b368a208-7b0f-4928-aad6-94030a47d573",
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "6d72bc7458d64ec7af180321e7d9d7aa",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"###load models\n",
"base_model = \"meta-llama/Llama-3.2-3B-Instruct\"\n",
"fine_tuned_model = \"/home/marco/llama-3.2-instruct-offensive-classification-1.0.0\"\n",
"\n",
"from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n",
"from peft import PeftModel\n",
"import torch\n",
"\n",
"\n",
"# Reload tokenizer and model\n",
"tokenizer = AutoTokenizer.from_pretrained(fine_tuned_model)\n",
"\n",
"model = AutoModelForCausalLM.from_pretrained(\n",
" fine_tuned_model,\n",
" return_dict=True,\n",
" low_cpu_mem_usage=True,\n",
" torch_dtype=torch.float16,\n",
" device_map=\"auto\",\n",
" trust_remote_code=True,\n",
" offload_buffers=True\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "54e39123-1ed6-4990-8295-6df1e0563fc5",
"metadata": {},
"outputs": [],
"source": [
"text = \"You are a pig!\""
]
},
{
"cell_type": "code",
"execution_count": 33,
"id": "1b68121f-3215-46f6-901b-406be4e05a06",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Device set to use cpu\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Offensive\n"
]
}
],
"source": [
"###Start Prompt\n",
"prompt = f\"\"\"Classify the text into Hatespeech, Offensive, Normal and return the answer as the corresponding label.\n",
"text: {text}\n",
"label: \"\"\".strip()\n",
"\n",
"pipe = pipeline(\n",
" \"text-generation\",\n",
" model=model,\n",
" tokenizer=tokenizer,\n",
" torch_dtype=torch.float16,\n",
" device_map=\"auto\"\n",
")\n",
"\n",
"outputs = pipe(prompt, max_new_tokens=2, do_sample=True, temperature=0.1, pad_token_id=tokenizer.eos_token_id)\n",
"print(outputs[0][\"generated_text\"].split(\"label: \")[-1].strip())"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d709317d-b9cf-4590-9caf-ac74842f6be2",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.5"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|