{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "from transformers import AutoTokenizer, LlamaForCausalLM\n", "\n", "model_id = \"meta-llama/Llama-3.2-1B-Instruct\"\n", "tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir=\"wm_detector/static/hf_cache\")" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[4438, 311, 1304, 264, 19692]\n", "['How', 'Ġto', 'Ġmake', 'Ġa', 'Ġcake']\n", "['How', ' to', ' make', ' a', ' cake']\n" ] } ], "source": [ "def tokenize_text(text):\n", " return tokenizer.encode(text, add_special_tokens=False)\n", "\n", "text = \"How to make a cake\"\n", "token_ids = tokenize_text(text)\n", "tokens = tokenizer.convert_ids_to_tokens(token_ids)\n", "token_strs = [tokenizer.convert_tokens_to_string([token]) for token in tokens]\n", "decoded = tokenizer.decode(tokenize_text(text))\n", "\n", "print(token_ids)\n", "print(tokens)\n", "print(token_strs)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "base", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.2" } }, "nbformat": 4, "nbformat_minor": 2 }