amirgame197 commited on
Commit
dd04363
·
verified ·
1 Parent(s): 4a0aa49

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -74
app.py CHANGED
@@ -1,31 +1,36 @@
1
  import gradio as gr
2
  from gradio_client import Client
3
  from huggingface_hub import InferenceClient
 
4
  import random
 
5
  ss_client = Client("https://omnibus-html-image-current-tab.hf.space/")
6
 
7
- models=[
8
  "google/gemma-7b",
9
  "google/gemma-7b-it",
10
  "google/gemma-2b",
11
  "google/gemma-2b-it"
12
  ]
13
- clients=[
14
- InferenceClient(models[0]),
15
- InferenceClient(models[1]),
16
- InferenceClient(models[2]),
17
- InferenceClient(models[3]),
 
18
  ]
19
 
20
- VERBOSE=False
 
 
 
 
21
 
22
  def load_models(inp):
23
- if VERBOSE==True:
24
  print(type(inp))
25
  print(inp)
26
  print(models[inp])
27
- #client_z.clear()
28
- #client_z.append(InferenceClient(models[inp]))
29
  return gr.update(label=models[inp])
30
 
31
  def format_prompt(message, history, cust_p):
@@ -34,75 +39,64 @@ def format_prompt(message, history, cust_p):
34
  for user_prompt, bot_response in history:
35
  prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
36
  prompt += f"<start_of_turn>model{bot_response}<end_of_turn></s>"
37
- if VERBOSE==True:
38
  print(prompt)
39
- #prompt += f"<start_of_turn>user\n{message}<end_of_turn>\n<start_of_turn>model\n"
40
- prompt+=cust_p.replace("USER_INPUT",message)
41
  return prompt
42
 
43
- def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p):
44
- #token max=8192
45
- print(client_choice)
46
- hist_len=0
47
- client=clients[int(client_choice)-1]
48
  if not history:
49
  history = []
50
- hist_len=0
51
  if not memory:
52
  memory = []
53
- mem_len=0
54
  if memory:
55
- for ea in memory[0-chat_mem:]:
56
- hist_len+=len(str(ea))
57
- in_len=len(system_prompt+prompt)+hist_len
58
 
59
- if (in_len+tokens) > 8000:
60
- history.append((prompt,"Wait, that's too many tokens, please reduce the 'Chat Memory' value, or reduce the 'Max new tokens' value"))
61
- yield history,memory
62
  else:
63
  generate_kwargs = dict(
64
- #temperature=temp,
65
  max_new_tokens=tokens,
66
- #top_p=top_p,
67
- #repetition_penalty=rep_p,
68
- #do_sample=True,
69
- #seed=seed,
70
  )
71
  if system_prompt:
72
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:],cust_p)
73
  else:
74
- formatted_prompt = format_prompt(prompt, memory[0-chat_mem:],cust_p)
75
-
76
-
 
77
  chat = [
78
- { "role": "user", "content": f"{formatted_prompt}" },
79
- ]
80
 
81
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
82
  output = ""
83
  for response in stream:
84
  output += response.token.text
85
- yield [(prompt,output)],memory
86
- history.append((prompt,output))
87
- memory.append((prompt,output))
88
- yield history,memory
89
-
90
- if VERBOSE==True:
91
- print("\n######### HIST "+str(in_len))
92
- print("\n######### TOKENS "+str(tokens))
93
 
94
  def clear_fn():
95
- return None,None,None,None
96
- rand_val=random.randint(1,1111111111111111)
97
 
98
- def check_rand(inp,val):
99
- if inp==True:
100
- return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1,1111111111111111))
 
 
101
  else:
102
  return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
103
-
104
  with gr.Blocks() as app:
105
- memory=gr.State()
106
  chat_b = gr.Chatbot(height=500)
107
  with gr.Group():
108
  with gr.Row():
@@ -114,27 +108,26 @@ with gr.Blocks() as app:
114
  btn = gr.Button("Chat")
115
  with gr.Column(scale=1):
116
  with gr.Group():
117
- stop_btn=gr.Button("Stop")
118
- clear_btn=gr.Button("Clear")
119
- client_choice=gr.Dropdown(label="Models",type='index',choices=[c for c in models],value=models[0],interactive=True)
120
- with gr.Accordion("Prompt Format",open=False):
121
- custom_prompt=gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=5,value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
122
  with gr.Column(scale=1):
123
  with gr.Group():
124
  rand = gr.Checkbox(label="Random Seed", value=True)
125
- seed=gr.Slider(label="Seed", minimum=1, maximum=1111111111111111,step=1, value=rand_val)
126
- tokens = gr.Slider(label="Max new tokens",value=1600,minimum=0,maximum=8000,step=64,interactive=True, visible=True,info="The maximum number of tokens")
127
- temp=gr.Slider(label="Temperature",step=0.01, minimum=0.01, maximum=1.0, value=0.9)
128
- top_p=gr.Slider(label="Top-P",step=0.01, minimum=0.01, maximum=1.0, value=0.9)
129
- rep_p=gr.Slider(label="Repetition Penalty",step=0.1, minimum=0.1, maximum=2.0, value=1.0)
130
- chat_mem=gr.Number(label="Chat Memory", info="Number of previous chats to retain",value=4)
131
-
132
-
133
- client_choice.change(load_models,client_choice,[chat_b])
134
- app.load(load_models,client_choice,[chat_b])
135
-
136
- chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
137
- go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
138
-
139
- clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b,memory])
140
- app.queue(default_concurrency_limit=10).launch()
 
1
  import gradio as gr
2
  from gradio_client import Client
3
  from huggingface_hub import InferenceClient
4
+ from deep_translator import GoogleTranslator
5
  import random
6
+
7
  ss_client = Client("https://omnibus-html-image-current-tab.hf.space/")
8
 
9
+ models = [
10
  "google/gemma-7b",
11
  "google/gemma-7b-it",
12
  "google/gemma-2b",
13
  "google/gemma-2b-it"
14
  ]
15
+
16
+ clients = [
17
+ InferenceClient(models[0]),
18
+ InferenceClient(models[1]),
19
+ InferenceClient(models[2]),
20
+ InferenceClient(models[3]),
21
  ]
22
 
23
+ VERBOSE = False
24
+
25
+ def translate_to_english(prompt):
26
+ translated_prompt = GoogleTranslator(source='auto', target='en').translate(prompt)
27
+ return translated_prompt
28
 
29
  def load_models(inp):
30
+ if VERBOSE == True:
31
  print(type(inp))
32
  print(inp)
33
  print(models[inp])
 
 
34
  return gr.update(label=models[inp])
35
 
36
  def format_prompt(message, history, cust_p):
 
39
  for user_prompt, bot_response in history:
40
  prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
41
  prompt += f"<start_of_turn>model{bot_response}<end_of_turn></s>"
42
+ if VERBOSE == True:
43
  print(prompt)
44
+ prompt += cust_p.replace("USER_INPUT", message)
 
45
  return prompt
46
 
47
+ def chat_inf(system_prompt, prompt, history, memory, client_choice, seed, temp, tokens, top_p, rep_p, chat_mem, cust_p):
48
+ hist_len = 0
49
+ client = clients[int(client_choice) - 1]
 
 
50
  if not history:
51
  history = []
 
52
  if not memory:
53
  memory = []
54
+
55
  if memory:
56
+ for ea in memory[0 - chat_mem:]:
57
+ hist_len += len(str(ea))
58
+ in_len = len(system_prompt + prompt) + hist_len
59
 
60
+ if (in_len + tokens) > 8000:
61
+ history.append((prompt, "Wait, that's too many tokens, please reduce the 'Chat Memory' value, or reduce the 'Max new tokens' value"))
62
+ yield history, memory
63
  else:
64
  generate_kwargs = dict(
 
65
  max_new_tokens=tokens,
 
 
 
 
66
  )
67
  if system_prompt:
68
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0 - chat_mem:], cust_p)
69
  else:
70
+ formatted_prompt = format_prompt(prompt, memory[0 - chat_mem:], cust_p)
71
+
72
+ translated_prompt = translate_to_english(formatted_prompt)
73
+
74
  chat = [
75
+ {"role": "user", "content": f"{translated_prompt}"},
76
+ ]
77
 
78
+ stream = client.text_generation(translated_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
79
  output = ""
80
  for response in stream:
81
  output += response.token.text
82
+ yield [(prompt, output)], memory
83
+ history.append((prompt, output))
84
+ memory.append((prompt, output))
85
+ yield history, memory
 
 
 
 
86
 
87
  def clear_fn():
88
+ return None, None, None, None
 
89
 
90
+ rand_val = random.randint(1, 1111111111111111)
91
+
92
+ def check_rand(inp, val):
93
+ if inp == True:
94
+ return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1, 1111111111111111))
95
  else:
96
  return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
97
+
98
  with gr.Blocks() as app:
99
+ memory = gr.State()
100
  chat_b = gr.Chatbot(height=500)
101
  with gr.Group():
102
  with gr.Row():
 
108
  btn = gr.Button("Chat")
109
  with gr.Column(scale=1):
110
  with gr.Group():
111
+ stop_btn = gr.Button("Stop")
112
+ clear_btn = gr.Button("Clear")
113
+ client_choice = gr.Dropdown(label="Models", type='index', choices=[c for c in models], value=models[0], interactive=True)
114
+ with gr.Accordion("Prompt Format", open=False):
115
+ custom_prompt = gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=5, value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
116
  with gr.Column(scale=1):
117
  with gr.Group():
118
  rand = gr.Checkbox(label="Random Seed", value=True)
119
+ seed = gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, step=1, value=rand_val)
120
+ tokens = gr.Slider(label="Max new tokens", value=1600, minimum=0, maximum=8000, step=64, interactive=True, visible=True, info="The maximum number of tokens")
121
+ temp = gr.Slider(label="Temperature", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
122
+ top_p = gr.Slider(label="Top-P", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
123
+ rep_p = gr.Slider(label="Repetition Penalty", step=0.1, minimum=0.1, maximum=2.0, value=1.0)
124
+ chat_mem = gr.Number(label="Chat Memory", info="Number of previous chats to retain", value=4)
125
+
126
+ client_choice.change(load_models, client_choice, [chat_b])
127
+ app.load(load_models, client_choice, [chat_b])
128
+
129
+ chat_sub = inp.submit(check_rand, [rand, seed], seed).then(chat_inf, [sys_inp, inp, chat_b, memory, client_choice, seed, temp, tokens, top_p, rep_p, chat_mem, custom_prompt], [chat_b, memory])
130
+ go = btn.click(check_rand, [rand, seed], seed).then(chat_inf, [sys_inp, inp, chat_b, memory, client_choice, seed, temp, tokens, top_p, rep_p, chat_mem, custom_prompt], [chat_b, memory])
131
+
132
+ clear_btn.click(clear_fn, None, [inp, sys_inp, chat_b, memory])
133
+ app.queue(default_concurrency_limit=10).launch()