richardkimsm89 commited on
Commit
619bc26
·
verified ·
1 Parent(s): 602e24b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -20
app.py CHANGED
@@ -8,42 +8,27 @@ client = InferenceClient(model)
8
 
9
  def fn(
10
  prompt,
11
- #history: list[tuple[str, str]],
12
  history,
13
- #system_prompt,
14
  max_tokens,
15
  temperature,
16
  top_p,
17
  ):
18
- #messages = [{"role": "system", "content": system_prompt}]
19
- #history.append({"role": "user", "content": prompt})
20
 
21
- messages = [{"role": "user", "content": prompt}]
22
 
23
  history.append(messages[0])
24
 
25
- #for val in history:
26
- # if val[0]:
27
- # messages.append({"role": "user", "content": val[0]})
28
- # if val[1]:
29
- # messages.append({"role": "assistant", "content": val[1]})
30
-
31
- #messages.append({"role": "user", "content": prompt})
32
-
33
  stream = client.chat.completions.create(
34
  model = model,
35
- #messages = messages,
36
  messages = history,
37
  max_tokens = max_tokens,
38
  temperature = temperature,
39
  top_p = top_p,
40
  stream = True
41
  )
42
-
43
- #response = ""
44
- #for chunk in stream:
45
- # response += chunk.choices[0].delta.content
46
- #return response
47
 
48
  chunks = []
49
  for chunk in stream:
@@ -54,7 +39,7 @@ app = gr.ChatInterface(
54
  fn = fn,
55
  type = "messages",
56
  additional_inputs = [
57
- #gr.Textbox(value="You are a helpful assistant.", label="System Prompt"),
58
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max Tokens"),
59
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
60
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P"),
 
8
 
9
  def fn(
10
  prompt,
 
11
  history,
12
+ system_prompt,
13
  max_tokens,
14
  temperature,
15
  top_p,
16
  ):
17
+ messages = [{"role": "system", "content": system_prompt}]
18
+ messages.append({"role": "user", "content": prompt})
19
 
20
+ #messages = [{"role": "user", "content": prompt}]
21
 
22
  history.append(messages[0])
23
 
 
 
 
 
 
 
 
 
24
  stream = client.chat.completions.create(
25
  model = model,
 
26
  messages = history,
27
  max_tokens = max_tokens,
28
  temperature = temperature,
29
  top_p = top_p,
30
  stream = True
31
  )
 
 
 
 
 
32
 
33
  chunks = []
34
  for chunk in stream:
 
39
  fn = fn,
40
  type = "messages",
41
  additional_inputs = [
42
+ gr.Textbox(value="You are a helpful assistant.", label="System Prompt"),
43
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max Tokens"),
44
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
45
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P"),