anubhav-singh commited on
Commit
464350c
·
verified ·
1 Parent(s): a9dfa8b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -8
app.py CHANGED
@@ -1,16 +1,14 @@
1
  import torch
2
- from transformers import AutoModel, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
3
  import gradio as gr
4
  from threading import Thread
5
 
6
- torch.random.manual_seed(0)
7
-
8
- model = AutoModel.from_pretrained(
9
  "DuckyBlender/racist-phi3",
10
- device_map="auto",
11
  torch_dtype=torch.float16,
12
  trust_remote_code=True,
13
- low_cpu_mem_usage=True,
14
  )
15
  tokenizer = AutoTokenizer.from_pretrained("DuckyBlender/racist-phi3")
16
  device = torch.device("cpu")
@@ -39,8 +37,8 @@ def predict(message, history):
39
  max_new_tokens=512,
40
  do_sample=True,
41
  top_p=0.90,
42
- top_k=100,
43
- temperature=0.8,
44
  num_beams=1,
45
  stopping_criteria=StoppingCriteriaList([stop])
46
  )
 
1
  import torch
2
+ from transformers import AutoModel,AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
3
  import gradio as gr
4
  from threading import Thread
5
 
6
+ model = AutoModelForCausalLM.from_pretrained(
 
 
7
  "DuckyBlender/racist-phi3",
8
+
9
  torch_dtype=torch.float16,
10
  trust_remote_code=True,
11
+
12
  )
13
  tokenizer = AutoTokenizer.from_pretrained("DuckyBlender/racist-phi3")
14
  device = torch.device("cpu")
 
37
  max_new_tokens=512,
38
  do_sample=True,
39
  top_p=0.90,
40
+ top_k=1000,
41
+ temperature=0.9,
42
  num_beams=1,
43
  stopping_criteria=StoppingCriteriaList([stop])
44
  )