vericudebuget commited on
Commit
93655a0
·
verified ·
1 Parent(s): 8e2d2a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -1
app.py CHANGED
@@ -13,7 +13,28 @@ def format_prompt(message, history):
13
  return prompt
14
 
15
  def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=9048, top_p=0.95, repetition_penalty=1.0):
16
- # ... existing code ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  for response in stream:
19
  output += response.token.text
 
13
  return prompt
14
 
15
  def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=9048, top_p=0.95, repetition_penalty=1.0):
16
+ def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=9048, top_p=0.95, repetition_penalty=1.0):
17
+ temperature = max(float(temperature), 1e-2)
18
+ top_p = float(top_p)
19
+
20
+ generate_kwargs = dict(
21
+ temperature=temperature,
22
+ max_new_tokens=max_new_tokens,
23
+ top_p=top_p,
24
+ repetition_penalty=repetition_penalty,
25
+ do_sample=True,
26
+ seed=42,
27
+ )
28
+
29
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
30
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
31
+ output = ""
32
+
33
+ for response in stream:
34
+ output += response.token.text
35
+ yield output
36
+ return output
37
+
38
 
39
  for response in stream:
40
  output += response.token.text