Spaces:
Sleeping
Sleeping
File size: 1,847 Bytes
dd1a934 b0c3821 dd1a934 b0c3821 dd1a934 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from transformers import AutoTokenizer, LlamaForCausalLM\n",
"\n",
"model_id = \"meta-llama/Llama-3.2-1B-Instruct\"\n",
"tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir=\"wm_detector/static/hf_cache\")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[4438, 311, 1304, 264, 19692]\n",
"['How', 'Ġto', 'Ġmake', 'Ġa', 'Ġcake']\n",
"['How', ' to', ' make', ' a', ' cake']\n"
]
}
],
"source": [
"def tokenize_text(text):\n",
" return tokenizer.encode(text, add_special_tokens=False)\n",
"\n",
"text = \"How to make a cake\"\n",
"token_ids = tokenize_text(text)\n",
"tokens = tokenizer.convert_ids_to_tokens(token_ids)\n",
"token_strs = [tokenizer.convert_tokens_to_string([token]) for token in tokens]\n",
"decoded = tokenizer.decode(tokenize_text(text))\n",
"\n",
"print(token_ids)\n",
"print(tokens)\n",
"print(token_strs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "base",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|