joaogante HF Staff commited on
Commit
e849ad4
·
verified ·
1 Parent(s): 4b1483e

Trying gemma 2

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -6,8 +6,8 @@ import gradio as gr
6
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
7
  import time
8
 
9
- model_id = "EleutherAI/pythia-6.9b-deduped"
10
- assistant_id = "EleutherAI/pythia-70m-deduped"
11
  torch_device = "cuda" if torch.cuda.is_available() else "cpu"
12
  print("Running on device:", torch_device)
13
  print("CPU threads:", torch.get_num_threads())
 
6
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
7
  import time
8
 
9
+ model_id = "google/gemma-2-27b-it"
10
+ assistant_id = "google/gemma-2-2b-it"
11
  torch_device = "cuda" if torch.cuda.is_available() else "cpu"
12
  print("Running on device:", torch_device)
13
  print("CPU threads:", torch.get_num_threads())