Reality123b commited on
Commit
464da3a
·
verified ·
1 Parent(s): eb87efd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -14
app.py CHANGED
@@ -1,12 +1,15 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- # Initialize the InferenceClient
 
 
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
 
7
  def respond(
8
  message,
9
- history: list[dict], # Use a list of dictionaries instead of tuples
10
  system_message,
11
  max_tokens,
12
  temperature,
@@ -15,13 +18,15 @@ def respond(
15
  messages = [{"role": "system", "content": system_message}]
16
 
17
  for val in history:
18
- messages.append({"role": val['role'], "content": val['content']})
 
 
 
19
 
20
  messages.append({"role": "user", "content": message})
21
 
22
  response = ""
23
-
24
- # Use chat_completion to get responses
25
  for message in client.chat_completion(
26
  messages,
27
  max_tokens=max_tokens,
@@ -30,15 +35,17 @@ def respond(
30
  top_p=top_p,
31
  ):
32
  token = message.choices[0].delta.content
 
33
  response += token
34
  yield response
35
 
36
- # Create the Gradio Interface for API
37
- api_interface = gr.Interface(
38
- fn=respond,
39
- inputs=[
40
- gr.Textbox(label="Message"),
41
- gr.JSON(label="History"),
 
42
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
43
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
44
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
@@ -50,9 +57,8 @@ api_interface = gr.Interface(
50
  label="Top-p (nucleus sampling)",
51
  ),
52
  ],
53
- outputs=gr.Textbox(label="Response"),
54
  )
55
 
56
- # Launch the API
57
  if __name__ == "__main__":
58
- api_interface.launch(server_name="0.0.0.0", server_port=7860, share=False) # Set share=False to avoid Hugging Face Spaces
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ """
5
+ For more information on huggingface_hub Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
+ """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
+
10
  def respond(
11
  message,
12
+ history: list[tuple[str, str]],
13
  system_message,
14
  max_tokens,
15
  temperature,
 
18
  messages = [{"role": "system", "content": system_message}]
19
 
20
  for val in history:
21
+ if val[0]:
22
+ messages.append({"role": "user", "content": val[0]})
23
+ if val[1]:
24
+ messages.append({"role": "assistant", "content": val[1]})
25
 
26
  messages.append({"role": "user", "content": message})
27
 
28
  response = ""
29
+
 
30
  for message in client.chat_completion(
31
  messages,
32
  max_tokens=max_tokens,
 
35
  top_p=top_p,
36
  ):
37
  token = message.choices[0].delta.content
38
+
39
  response += token
40
  yield response
41
 
42
+
43
+ """
44
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
+ """
46
+ demo = gr.ChatInterface(
47
+ respond,
48
+ additional_inputs=[
49
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
 
57
  label="Top-p (nucleus sampling)",
58
  ),
59
  ],
 
60
  )
61
 
62
+
63
  if __name__ == "__main__":
64
+ demo.launch(share=True)