TheFrenchDemos commited on
Commit
063e65c
·
1 Parent(s): dd1a934

initial commit

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. sandbox.ipynb +1 -2
app.py CHANGED
@@ -12,7 +12,7 @@ app = Flask(__name__)
12
 
13
  # Minimal setup: pick a detector (example: MarylandDetector)
14
  model_id = "meta-llama/Llama-3.2-1B-Instruct"
15
- tokenizer = AutoTokenizer.from_pretrained(model_id)
16
  detector = MarylandDetector(tokenizer=tokenizer)
17
 
18
  def tokenize_text(text):
 
12
 
13
  # Minimal setup: pick a detector (example: MarylandDetector)
14
  model_id = "meta-llama/Llama-3.2-1B-Instruct"
15
+ tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir="static/hf_cache")
16
  detector = MarylandDetector(tokenizer=tokenizer)
17
 
18
  def tokenize_text(text):
sandbox.ipynb CHANGED
@@ -9,14 +9,13 @@
9
  },
10
  {
11
  "cell_type": "code",
12
- "execution_count": 2,
13
  "metadata": {},
14
  "outputs": [],
15
  "source": [
16
  "from transformers import AutoTokenizer, LlamaForCausalLM\n",
17
  "\n",
18
  "model_id = \"meta-llama/Llama-3.2-1B-Instruct\"\n",
19
- "# cache should be in static/hf_cache\n",
20
  "tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir=\"static/hf_cache\")"
21
  ]
22
  },
 
9
  },
10
  {
11
  "cell_type": "code",
12
+ "execution_count": null,
13
  "metadata": {},
14
  "outputs": [],
15
  "source": [
16
  "from transformers import AutoTokenizer, LlamaForCausalLM\n",
17
  "\n",
18
  "model_id = \"meta-llama/Llama-3.2-1B-Instruct\"\n",
 
19
  "tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir=\"static/hf_cache\")"
20
  ]
21
  },