simonraj commited on
Commit
eea2924
Β·
1 Parent(s): 00e7681

Upload app3.py

Browse files
Files changed (1) hide show
  1. app3.py +169 -0
app3.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import json
4
+ import requests
5
+ from dotenv import load_dotenv
6
+ from thinking_routines import thinking_routine_prompt, thinking_routine_examples
7
+
8
+ load_dotenv()
9
+
10
+
11
+ #Streaming endpoint
12
+ API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
13
+
14
+ #Huggingface provided GPT4 OpenAI API Key
15
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
16
+
17
+ #Inferenec function
18
+ def predict(system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], history=[]):
19
+
20
+ headers = {
21
+ "Content-Type": "application/json",
22
+ "Authorization": f"Bearer {OPENAI_API_KEY}"
23
+ }
24
+ print(f"system message is ^^ {system_msg}")
25
+ if system_msg.strip() == '':
26
+ initial_message = [{"role": "user", "content": f"{inputs}"},]
27
+ multi_turn_message = []
28
+ else:
29
+ initial_message= [{"role": "system", "content": system_msg},
30
+ {"role": "user", "content": f"{inputs}"},]
31
+ multi_turn_message = [{"role": "system", "content": system_msg},]
32
+
33
+ if chat_counter == 0 :
34
+ payload = {
35
+ "model": "gpt-4",
36
+ "messages": initial_message ,
37
+ "temperature" : 1.0,
38
+ "top_p":1.0,
39
+ "n" : 1,
40
+ "stream": True,
41
+ "presence_penalty":0,
42
+ "frequency_penalty":0,
43
+ }
44
+ print(f"chat_counter - {chat_counter}")
45
+ else: #if chat_counter != 0 :
46
+ messages=multi_turn_message # Of the type of - [{"role": "system", "content": system_msg},]
47
+ for data in chatbot:
48
+ user = {}
49
+ user["role"] = "user"
50
+ user["content"] = data[0]
51
+ assistant = {}
52
+ assistant["role"] = "assistant"
53
+ assistant["content"] = data[1]
54
+ messages.append(user)
55
+ messages.append(assistant)
56
+ temp = {}
57
+ temp["role"] = "user"
58
+ temp["content"] = inputs
59
+ messages.append(temp)
60
+ #messages
61
+ payload = {
62
+ "model": "gpt-4",
63
+ "messages": messages, # Of the type of [{"role": "user", "content": f"{inputs}"}],
64
+ "temperature" : temperature, #1.0,
65
+ "top_p": top_p, #1.0,
66
+ "n" : 1,
67
+ "stream": True,
68
+ "presence_penalty":0,
69
+ "frequency_penalty":0,}
70
+
71
+ chat_counter+=1
72
+
73
+ history.append(inputs)
74
+ print(f"Logging : payload is - {payload}")
75
+ # make a POST request to the API endpoint using the requests.post method, passing in stream=True
76
+ response = requests.post(API_URL, headers=headers, json=payload, stream=True)
77
+ print(f"Logging : response code - {response}")
78
+ token_counter = 0
79
+ partial_words = ""
80
+
81
+ counter=0
82
+ for chunk in response.iter_lines():
83
+ #Skipping first chunk
84
+ if counter == 0:
85
+ counter+=1
86
+ continue
87
+ # check whether each line is non-empty
88
+ if chunk.decode() :
89
+ chunk = chunk.decode()
90
+ # decode each line as response data is in bytes
91
+ if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
92
+ partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
93
+ if token_counter == 0:
94
+ history.append(" " + partial_words)
95
+ else:
96
+ history[-1] = partial_words
97
+ chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
98
+ token_counter+=1
99
+ yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history}
100
+
101
+ #Resetting to blank
102
+ def reset_textbox():
103
+ return gr.update(value='')
104
+
105
+ #to set a component as visible=False
106
+ def set_visible_false():
107
+ return gr.update(visible=False)
108
+
109
+ #to set a component as visible=True
110
+ def set_visible_true():
111
+ return gr.update(visible=True)
112
+
113
+
114
+
115
+ title = """<h1 align="center">πŸŽ‰πŸ“š Super Learning Buddy for Primary School Students πŸ€–βœ¨</h1>"""
116
+
117
+ #Using info to add additional information about System message in GPT4
118
+ system_msg_info = """πŸ€–βœ¨ Choose your own learning buddy from the list! πŸ“šπŸš€"""
119
+
120
+ # Modifying existing Gradio Theme
121
+ theme = gr.themes.Soft(primary_hue="indigo", secondary_hue="blue", neutral_hue="blue",
122
+ text_size=gr.themes.sizes.text_lg)
123
+
124
+ with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""",
125
+ theme=theme) as demo:
126
+ gr.HTML(title)
127
+ gr.HTML("""<h3 align="center">πŸŽ‰πŸš€ Meet your fun Learning Buddy, ready for all thinking adventures! 🧠✨</h1>""")
128
+
129
+ with gr.Column(elem_id = "col_container"):
130
+ #GPT4 API Key is provided by Huggingface
131
+ with gr.Accordion(label="Select you Coach:", open=False):
132
+ system_msg = gr.Textbox(label="Choose the type of Coach to help guide you", info = system_msg_info, value="")
133
+ accordion_msg = gr.HTML(value="🚧 To set System message you will have to refresh the app", visible=False)
134
+
135
+ with gr.Accordion(label="Examples of Coaches using thinking routines:", open=False):
136
+ gr.Examples(
137
+ examples=[[thinking_routine_prompt(subject, routine)] for subject, routine in thinking_routine_examples],
138
+ inputs=system_msg,
139
+ )
140
+
141
+ chatbot = gr.Chatbot(label='Learning Buddy', elem_id="chatbot")
142
+ inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter")
143
+ state = gr.State([])
144
+
145
+ with gr.Row():
146
+ with gr.Column(scale=7):
147
+ b1 = gr.Button().style(full_width=True)
148
+ with gr.Column(scale=3):
149
+ server_status_code = gr.Textbox(label="Status code from OpenAI server", )
150
+
151
+ #top_p, temperature
152
+ with gr.Accordion("Parameters", open=False):
153
+ top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
154
+ temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
155
+ chat_counter = gr.Number(value=0, visible=False, precision=0)
156
+
157
+ #Event handling
158
+ inputs.submit( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
159
+ b1.click( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
160
+
161
+ inputs.submit(set_visible_false, [], [system_msg])
162
+ b1.click(set_visible_false, [], [system_msg])
163
+ inputs.submit(set_visible_true, [], [accordion_msg])
164
+ b1.click(set_visible_true, [], [accordion_msg])
165
+
166
+ b1.click(reset_textbox, [], [inputs])
167
+ inputs.submit(reset_textbox, [], [inputs])
168
+
169
+ demo.queue(max_size=99, concurrency_count=20).launch(share=True)