vilarin commited on
Commit
f494de5
·
verified ·
1 Parent(s): d6a6e58

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -22
app.py CHANGED
@@ -1,16 +1,22 @@
 
 
 
 
 
 
 
1
  import os
2
  import time
3
  import spaces
4
  import torch
5
- from transformers import AutoModelForCausalLM, AutoTokenizer
6
  import gradio as gr
7
  from threading import Thread
8
 
9
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
10
- MODEL = "evabyte/EvaByte-SFT"
11
- MODEL_BASE = "evabyte/EvaByte"
12
 
13
- TITLE = "<h1><center>EvaByte</center></h1>"
14
 
15
  PLACEHOLDER = """
16
  <center>
@@ -33,12 +39,23 @@ h3 {
33
 
34
  device = "cuda" # for GPU usage or "cpu" for CPU usage
35
 
36
- tokenizer = AutoTokenizer.from_pretrained(MODEL, trust_remote_code=True)
 
 
 
 
 
 
37
  model = AutoModelForCausalLM.from_pretrained(
38
  MODEL,
39
- torch_dtype=torch.bfloat16,
40
  device_map="auto",
41
- trust_remote_code=True).eval().to(device)
 
 
 
 
 
42
 
43
  @spaces.GPU()
44
  def stream_chat(
@@ -46,8 +63,10 @@ def stream_chat(
46
  history: list,
47
  system_prompt: str,
48
  temperature: float = 0.8,
49
- max_new_tokens: int = 512,
50
  top_p: float = 1.0,
 
 
51
  ):
52
  print(f'message: {message}')
53
  print(f'history: {history}')
@@ -63,26 +82,33 @@ def stream_chat(
63
 
64
  conversation.append({"role": "user", "content": message})
65
 
66
- input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(device)
67
-
68
- gen_out = model.multi_byte_generate(
 
 
69
  input_ids=input_ids,
70
  max_new_tokens = max_new_tokens,
71
  do_sample = False if temperature == 0 else True,
72
  top_p = top_p,
 
 
 
73
  temperature = temperature,
 
 
74
  )
75
 
76
- response = tokenizer.decode(
77
- gen_out[0][input_ids.shape[1]:],
78
- skip_special_tokens=False,
79
- clean_up_tokenization_spaces=False
80
- )
 
 
 
81
 
82
- for i in range(len(response)):
83
- time.sleep(0.02)
84
- yield response[: i + 1]
85
-
86
  chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER)
87
 
88
  with gr.Blocks(css=CSS, theme="soft") as demo:
@@ -95,7 +121,7 @@ with gr.Blocks(css=CSS, theme="soft") as demo:
95
  additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
96
  additional_inputs=[
97
  gr.Textbox(
98
- value="You are a helpful assistant.",
99
  label="System Prompt",
100
  lines=5,
101
  render=False,
@@ -112,7 +138,7 @@ with gr.Blocks(css=CSS, theme="soft") as demo:
112
  minimum=128,
113
  maximum=8192,
114
  step=1,
115
- value= 512,
116
  label="Max new tokens",
117
  render=False,
118
  ),
@@ -124,6 +150,22 @@ with gr.Blocks(css=CSS, theme="soft") as demo:
124
  label="top_p",
125
  render=False,
126
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
  ],
128
  examples=[
129
  ["Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option."],
 
1
+ import subprocess
2
+ subprocess.run(
3
+ 'pip install flash-attn --no-build-isolation',
4
+ env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"},
5
+ shell=True
6
+ )
7
+
8
  import os
9
  import time
10
  import spaces
11
  import torch
12
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextIteratorStreamer
13
  import gradio as gr
14
  from threading import Thread
15
 
16
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
17
+ MODEL = "NousResearch/DeepHermes-3-Llama-3-8B-Preview"
 
18
 
19
+ TITLE = "<h1><center>DeepHermes-3-Llama-3-8B</center></h1>"
20
 
21
  PLACEHOLDER = """
22
  <center>
 
39
 
40
  device = "cuda" # for GPU usage or "cpu" for CPU usage
41
 
42
+ quantization_config = BitsAndBytesConfig(
43
+ load_in_4bit=True,
44
+ bnb_4bit_compute_dtype=torch.bfloat16,
45
+ bnb_4bit_use_double_quant=True,
46
+ bnb_4bit_quant_type= "nf4")
47
+
48
+ tokenizer = AutoTokenizer.from_pretrained(MODEL)
49
  model = AutoModelForCausalLM.from_pretrained(
50
  MODEL,
51
+ torch_dtype=torch.float16,
52
  device_map="auto",
53
+ attn_implementation="flash_attention_2",
54
+ quantization_config=quantization_config)
55
+
56
+ # Ensure `pad_token_id` is set
57
+ if tokenizer.pad_token_id is None:
58
+ tokenizer.pad_token_id = tokenizer.eos_token_id
59
 
60
  @spaces.GPU()
61
  def stream_chat(
 
63
  history: list,
64
  system_prompt: str,
65
  temperature: float = 0.8,
66
+ max_new_tokens: int = 2500,
67
  top_p: float = 1.0,
68
+ top_k: int = 20,
69
+ penalty: float = 1.1,
70
  ):
71
  print(f'message: {message}')
72
  print(f'history: {history}')
 
82
 
83
  conversation.append({"role": "user", "content": message})
84
 
85
+ input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(model.device)
86
+
87
+ streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
88
+
89
+ generate_kwargs = dict(
90
  input_ids=input_ids,
91
  max_new_tokens = max_new_tokens,
92
  do_sample = False if temperature == 0 else True,
93
  top_p = top_p,
94
+ top_k = top_k,
95
+ eos_token_id = tokenizer.eos_token_id,
96
+ pad_token_id = tokenizer.pad_token_id,
97
  temperature = temperature,
98
+ repetition_penalty=penalty,
99
+ streamer=streamer,
100
  )
101
 
102
+ with torch.no_grad():
103
+ thread = Thread(target=model.generate, kwargs=generate_kwargs)
104
+ thread.start()
105
+
106
+ buffer = ""
107
+ for new_text in streamer:
108
+ buffer += new_text
109
+ yield buffer
110
 
111
+
 
 
 
112
  chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER)
113
 
114
  with gr.Blocks(css=CSS, theme="soft") as demo:
 
121
  additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
122
  additional_inputs=[
123
  gr.Textbox(
124
+ value="You are a deep thinking AI, you may use extremely long chains of thought to deeply consider the problem and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. You should enclose your thoughts and internal monologue inside <think> </think> tags, and then provide your solution or response to the problem.",
125
  label="System Prompt",
126
  lines=5,
127
  render=False,
 
138
  minimum=128,
139
  maximum=8192,
140
  step=1,
141
+ value= 2500,
142
  label="Max new tokens",
143
  render=False,
144
  ),
 
150
  label="top_p",
151
  render=False,
152
  ),
153
+ gr.Slider(
154
+ minimum=1,
155
+ maximum=20,
156
+ step=1,
157
+ value=20,
158
+ label="top_k",
159
+ render=False,
160
+ ),
161
+ gr.Slider(
162
+ minimum=0.0,
163
+ maximum=2.0,
164
+ step=0.1,
165
+ value=1.1,
166
+ label="Repetition penalty",
167
+ render=False,
168
+ ),
169
  ],
170
  examples=[
171
  ["Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option."],