AWeirdDev commited on
Commit
2313a0d
·
verified ·
1 Parent(s): d9db5d8

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -0
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # THE BELOW CODE DOES NOT OWN BY ME.
2
+ # THIS IS MODIFIED FROM THE SPACE: ehristoforu/mistral-7b-chat
3
+ # huggingface.co/spaces/ehristoforu/mistral-7b-chat
4
+
5
+ from huggingface_hub import InferenceClient
6
+ import gradio as gr
7
+
8
+ client = InferenceClient(
9
+ "mistralai/Mistral-7B-Instruct-v0.2"
10
+ )
11
+
12
+
13
+ def format_prompt(message, history):
14
+ prompt = "<s>"
15
+ for user_prompt, bot_response in history:
16
+ prompt += f"[INST] {user_prompt} [/INST]"
17
+ prompt += f" {bot_response}</s> "
18
+ prompt += f"[INST] {message} [/INST]"
19
+ return prompt
20
+
21
+ def generate(
22
+ prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
23
+ ):
24
+ temperature = float(temperature)
25
+ if temperature < 1e-2:
26
+ temperature = 1e-2
27
+ top_p = float(top_p)
28
+
29
+ generate_kwargs = dict(
30
+ temperature=temperature,
31
+ max_new_tokens=max_new_tokens,
32
+ top_p=top_p,
33
+ repetition_penalty=repetition_penalty,
34
+ do_sample=True,
35
+ seed=42,
36
+ )
37
+
38
+ formatted_prompt = format_prompt(prompt, history)
39
+
40
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
41
+ output = ""
42
+
43
+ for response in stream:
44
+ output += response.token.text
45
+ yield output
46
+ return output
47
+
48
+
49
+ additional_inputs=[
50
+ gr.Slider(
51
+ label="Temperature",
52
+ value=0.9,
53
+ minimum=0.0,
54
+ maximum=1.0,
55
+ step=0.05,
56
+ interactive=True,
57
+ info="Higher values produce more diverse outputs",
58
+ ),
59
+ gr.Slider(
60
+ label="Max new tokens",
61
+ value=256,
62
+ minimum=0,
63
+ maximum=1048,
64
+ step=64,
65
+ interactive=True,
66
+ info="The maximum numbers of new tokens",
67
+ ),
68
+ gr.Slider(
69
+ label="Top-p (nucleus sampling)",
70
+ value=0.90,
71
+ minimum=0.0,
72
+ maximum=1,
73
+ step=0.05,
74
+ interactive=True,
75
+ info="Higher values sample more low-probability tokens",
76
+ ),
77
+ gr.Slider(
78
+ label="Repetition penalty",
79
+ value=1.2,
80
+ minimum=1.0,
81
+ maximum=2.0,
82
+ step=0.05,
83
+ interactive=True,
84
+ info="Penalize repeated tokens",
85
+ )
86
+ ]
87
+
88
+
89
+ gr.ChatInterface(
90
+ fn=generate,
91
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
92
+ additional_inputs=additional_inputs,
93
+ title="""Mistral 7B"""
94
+ ).launch(show_api=True)