amirgame197 commited on
Commit
bcb52b4
·
verified ·
1 Parent(s): e965f4a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -77
app.py CHANGED
@@ -1,40 +1,32 @@
1
  import gradio as gr
2
  from gradio_client import Client
3
  from huggingface_hub import InferenceClient
4
- from deep_translator import GoogleTranslator
5
  import random
6
-
7
  ss_client = Client("https://omnibus-html-image-current-tab.hf.space/")
8
 
9
- models = [
10
  "google/gemma-7b",
11
  "google/gemma-7b-it",
12
  "google/gemma-2b",
13
  "google/gemma-2b-it"
14
  ]
15
-
16
- clients = [
17
- InferenceClient(models[0]),
18
- InferenceClient(models[1]),
19
- InferenceClient(models[2]),
20
- InferenceClient(models[3]),
21
  ]
22
 
23
- VERBOSE = False
24
-
25
- def translate_to_english(prompt):
26
- translated_prompt = GoogleTranslator(source='auto', target='en').translate(prompt)
27
- return translated_prompt
28
-
29
- def translate_to_persian_text(response):
30
- translated_response = GoogleTranslator(source='auto', target='fa').translate(response)
31
- return translated_response
32
 
33
  def load_models(inp):
34
- if VERBOSE == True:
35
  print(type(inp))
36
  print(inp)
37
  print(models[inp])
 
 
38
  return gr.update(label=models[inp])
39
 
40
  def format_prompt(message, history, cust_p):
@@ -43,69 +35,81 @@ def format_prompt(message, history, cust_p):
43
  for user_prompt, bot_response in history:
44
  prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
45
  prompt += f"<start_of_turn>model{bot_response}<end_of_turn></s>"
46
- if VERBOSE == True:
47
  print(prompt)
48
- prompt += cust_p.replace("USER_INPUT", message)
 
49
  return prompt
50
 
51
- def chat_inf(system_prompt, prompt, history, memory, client_choice, seed, temp, tokens, top_p, rep_p, chat_mem, custom_prompt, translate_to_persian):
52
- hist_len = 0
53
- client = clients[int(client_choice) - 1]
 
 
 
 
 
 
 
 
54
  if not history:
55
  history = []
 
56
  if not memory:
57
  memory = []
58
-
59
  if memory:
60
- for ea in memory[0 - chat_mem:]:
61
- hist_len += len(str(ea))
62
- in_len = len(system_prompt + prompt) + hist_len
63
 
64
- if (in_len + tokens) > 8000:
65
- history.append((prompt, "Wait, that's too many tokens, please reduce the 'Chat Memory' value, or reduce the 'Max new tokens' value"))
66
- yield history, memory
67
  else:
68
  generate_kwargs = dict(
 
69
  max_new_tokens=tokens,
 
 
 
 
70
  )
71
  if system_prompt:
72
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0 - chat_mem:], custom_prompt)
73
  else:
74
- formatted_prompt = format_prompt(prompt, memory[0 - chat_mem:], custom_prompt)
75
-
76
- translated_prompt = translate_to_english(formatted_prompt)
77
-
78
  chat = [
79
- {"role": "user", "content": f"{translated_prompt}"},
80
- ]
81
 
82
- stream = client.text_generation(translated_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
83
  output = ""
84
  for response in stream:
85
  output += response.token.text
86
- if translate_to_persian:
87
- output = translate_to_persian_text(output)
88
- yield [(prompt, output)], memory
89
- history.append((prompt, output))
90
- memory.append((prompt, output))
91
- yield history, memory
 
 
92
 
93
  def clear_fn():
94
- return None, None, None, None
 
95
 
96
- rand_val = random.randint(1, 1111111111111111)
97
-
98
- def check_rand(inp, val):
99
- if inp == True:
100
- return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1, 1111111111111111))
101
  else:
102
  return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
103
-
104
- def chat_wrapper(sys_inp, inp, chat_b, memory, client_choice, seed, temp, tokens, top_p, rep_p, chat_mem, custom_prompt, translate_to_persian_checkbox):
105
- return chat_inf(sys_inp, inp, chat_b, memory, client_choice, seed, temp, tokens, top_p, rep_p, chat_mem, custom_prompt, translate_to_persian_checkbox)
106
-
107
  with gr.Blocks() as app:
108
- memory = gr.State()
109
  chat_b = gr.Chatbot(height=500)
110
  with gr.Group():
111
  with gr.Row():
@@ -117,27 +121,27 @@ with gr.Blocks() as app:
117
  btn = gr.Button("Chat")
118
  with gr.Column(scale=1):
119
  with gr.Group():
120
- stop_btn = gr.Button("Stop")
121
- clear_btn = gr.Button("Clear")
122
- client_choice = gr.Dropdown(label="Models", type='index', choices=[c for c in models], value=models[0], interactive=True)
123
- with gr.Accordion("Prompt Format", open=False):
124
- custom_prompt = gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=5, value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
125
  with gr.Column(scale=1):
126
  with gr.Group():
127
- translate_to_persian_checkbox = gr.Checkbox(label="Translate to Persian", value=True)
128
  rand = gr.Checkbox(label="Random Seed", value=True)
129
- seed = gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, step=1, value=rand_val)
130
- tokens = gr.Slider(label="Max new tokens", value=1600, minimum=0, maximum=8000, step=64, interactive=True, visible=True, info="The maximum number of tokens")
131
- temp = gr.Slider(label="Temperature", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
132
- top_p = gr.Slider(label="Top-P", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
133
- rep_p = gr.Slider(label="Repetition Penalty", step=0.1, minimum=0.1, maximum=2.0, value=1.0)
134
- chat_mem = gr.Number(label="Chat Memory", info="Number of previous chats to retain", value=4)
135
-
136
- client_choice.change(load_models, client_choice, [chat_b])
137
- app.load(load_models, client_choice, [chat_b])
138
-
139
- chat_sub = inp.submit(check_rand, [rand, seed], seed).then(chat_wrapper, [sys_inp, inp, chat_b, memory, client_choice, seed, temp, tokens, top_p, rep_p, chat_mem, custom_prompt, translate_to_persian_checkbox]).then(memory.update)
140
- go = btn.click(check_rand, [rand, seed], seed).then(chat_wrapper, [sys_inp, inp, chat_b, memory, client_choice, seed, temp, tokens, top_p, rep_p, chat_mem, custom_prompt, translate_to_persian_checkbox]).then(memory.update)
141
-
142
- clear_btn.click(clear_fn, None, [inp, sys_inp, chat_b, memory])
143
- app.queue(default_concurrency_limit=10).launch()
 
 
1
  import gradio as gr
2
  from gradio_client import Client
3
  from huggingface_hub import InferenceClient
 
4
  import random
5
+ from deep_translator import GoogleTranslator
6
  ss_client = Client("https://omnibus-html-image-current-tab.hf.space/")
7
 
8
+ models=[
9
  "google/gemma-7b",
10
  "google/gemma-7b-it",
11
  "google/gemma-2b",
12
  "google/gemma-2b-it"
13
  ]
14
+ clients=[
15
+ InferenceClient(models[0]),
16
+ InferenceClient(models[1]),
17
+ InferenceClient(models[2]),
18
+ InferenceClient(models[3]),
 
19
  ]
20
 
21
+ VERBOSE=False
 
 
 
 
 
 
 
 
22
 
23
  def load_models(inp):
24
+ if VERBOSE==True:
25
  print(type(inp))
26
  print(inp)
27
  print(models[inp])
28
+ #client_z.clear()
29
+ #client_z.append(InferenceClient(models[inp]))
30
  return gr.update(label=models[inp])
31
 
32
  def format_prompt(message, history, cust_p):
 
35
  for user_prompt, bot_response in history:
36
  prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
37
  prompt += f"<start_of_turn>model{bot_response}<end_of_turn></s>"
38
+ if VERBOSE==True:
39
  print(prompt)
40
+ #prompt += f"<start_of_turn>user\n{message}<end_of_turn>\n<start_of_turn>model\n"
41
+ prompt+=cust_p.replace("USER_INPUT",message)
42
  return prompt
43
 
44
+ def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p):
45
+ #token max=8192
46
+ if(len(prompt) > 2000):
47
+ translatedtext1 = GoogleTranslator(source='auto', target='en').translate(prompt[0:2000])
48
+ translatedtext2 = GoogleTranslator(source='auto', target='en').translate(prompt[2000:(len(prompt))])
49
+ prompt = translatedtext1 + translatedtext2
50
+ else:
51
+ prompt = GoogleTranslator(source='auto', target='en').translate(prompt)
52
+ print(client_choice)
53
+ hist_len=0
54
+ client=clients[int(client_choice)-1]
55
  if not history:
56
  history = []
57
+ hist_len=0
58
  if not memory:
59
  memory = []
60
+ mem_len=0
61
  if memory:
62
+ for ea in memory[0-chat_mem:]:
63
+ hist_len+=len(str(ea))
64
+ in_len=len(system_prompt+prompt)+hist_len
65
 
66
+ if (in_len+tokens) > 8000:
67
+ history.append((prompt,"Wait, that's too many tokens, please reduce the 'Chat Memory' value, or reduce the 'Max new tokens' value"))
68
+ yield history,memory
69
  else:
70
  generate_kwargs = dict(
71
+ #temperature=temp,
72
  max_new_tokens=tokens,
73
+ #top_p=top_p,
74
+ #repetition_penalty=rep_p,
75
+ #do_sample=True,
76
+ #seed=seed,
77
  )
78
  if system_prompt:
79
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:],cust_p)
80
  else:
81
+ formatted_prompt = format_prompt(prompt, memory[0-chat_mem:],cust_p)
82
+
83
+
 
84
  chat = [
85
+ { "role": "user", "content": f"{formatted_prompt}" },
86
+ ]
87
 
88
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
89
  output = ""
90
  for response in stream:
91
  output += response.token.text
92
+ yield [(prompt,output)],memory
93
+ history.append((prompt,output))
94
+ memory.append((prompt,output))
95
+ yield history,memory
96
+
97
+ if VERBOSE==True:
98
+ print("\n######### HIST "+str(in_len))
99
+ print("\n######### TOKENS "+str(tokens))
100
 
101
  def clear_fn():
102
+ return None,None,None,None
103
+ rand_val=random.randint(1,1111111111111111)
104
 
105
+ def check_rand(inp,val):
106
+ if inp==True:
107
+ return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1,1111111111111111))
 
 
108
  else:
109
  return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
110
+
 
 
 
111
  with gr.Blocks() as app:
112
+ memory=gr.State()
113
  chat_b = gr.Chatbot(height=500)
114
  with gr.Group():
115
  with gr.Row():
 
121
  btn = gr.Button("Chat")
122
  with gr.Column(scale=1):
123
  with gr.Group():
124
+ stop_btn=gr.Button("Stop")
125
+ clear_btn=gr.Button("Clear")
126
+ client_choice=gr.Dropdown(label="Models",type='index',choices=[c for c in models],value=models[0],interactive=True)
127
+ with gr.Accordion("Prompt Format",open=False):
128
+ custom_prompt=gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=5,value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
129
  with gr.Column(scale=1):
130
  with gr.Group():
 
131
  rand = gr.Checkbox(label="Random Seed", value=True)
132
+ seed=gr.Slider(label="Seed", minimum=1, maximum=1111111111111111,step=1, value=rand_val)
133
+ tokens = gr.Slider(label="Max new tokens",value=1600,minimum=0,maximum=8000,step=64,interactive=True, visible=True,info="The maximum number of tokens")
134
+ temp=gr.Slider(label="Temperature",step=0.01, minimum=0.01, maximum=1.0, value=0.9)
135
+ top_p=gr.Slider(label="Top-P",step=0.01, minimum=0.01, maximum=1.0, value=0.9)
136
+ rep_p=gr.Slider(label="Repetition Penalty",step=0.1, minimum=0.1, maximum=2.0, value=1.0)
137
+ chat_mem=gr.Number(label="Chat Memory", info="Number of previous chats to retain",value=4)
138
+
139
+
140
+ client_choice.change(load_models,client_choice,[chat_b])
141
+ app.load(load_models,client_choice,[chat_b])
142
+
143
+ chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
144
+ go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
145
+
146
+ clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b,memory])
147
+ app.queue(default_concurrency_limit=10).launch()