Gopala Krishna commited on
Commit
f7e596e
·
1 Parent(s): c0416e8

Working Clean

Browse files
.vs/MyChatGPTDavinci/v17/.wsuo CHANGED
Binary files a/.vs/MyChatGPTDavinci/v17/.wsuo and b/.vs/MyChatGPTDavinci/v17/.wsuo differ
 
.vs/ProjectSettings.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "CurrentProjectSetting": null
3
+ }
.vs/slnx.sqlite CHANGED
Binary files a/.vs/slnx.sqlite and b/.vs/slnx.sqlite differ
 
app.py CHANGED
@@ -1,10 +1,11 @@
1
-
2
- import os
3
- import openai
4
  import gradio as gr
 
 
 
5
 
6
  try:
7
- openai.api_key = os.environ["OPENAI_API_KEY"]
8
 
9
  except KeyError:
10
  error_message = "System is at capacity right now.Please try again later"
@@ -16,26 +17,121 @@ else:
16
  {"role": "system", "content": "My AI Assistant"},
17
  ]
18
 
19
- messages = [
20
- {"role": "system", "content": ""},
21
- ]
22
-
23
- def chatbot(input):
24
- try:
25
- if input:
26
- messages.append({"role": "user", "content": input})
27
- prompt = "\n".join([f"{m['role']}: {m['content']}" for m in messages])
28
- chat = openai.Completion.create(
29
- engine="text-davinci-003", prompt=prompt, max_tokens=1024, n=1, stop=None, temperature=0.7
30
- )
31
- reply = chat.choices[0].text.strip()
32
- messages.append({"role": "assistant", "content": reply})
33
- return reply
34
- except openai.error.OpenAIError as e:
35
- return "System is at capacity right now.Please try again later"
36
-
37
- inputs = gr.inputs.Textbox(lines=7, label="Query")
38
- outputs = gr.outputs.Textbox(label="Response")
39
-
40
- gr.Interface(fn=chatbot, inputs=inputs, outputs=outputs,
41
- theme=gr.themes.Default(primary_hue="slate")).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
 
 
2
  import gradio as gr
3
+ import json
4
+ import requests
5
+ import openai
6
 
7
  try:
8
+ openai.api_key = os.environ["OPENAI_API_KEY"]
9
 
10
  except KeyError:
11
  error_message = "System is at capacity right now.Please try again later"
 
17
  {"role": "system", "content": "My AI Assistant"},
18
  ]
19
 
20
+ #Streaming endpoint for OPENAI ChatGPT
21
+ API_URL = "https://api.openai.com/v1/chat/completions"
22
+ top_p_chatgpt = 1.0
23
+ temperature_chatgpt = 1.0
24
+
25
+ #Predict function for CHATGPT
26
+ def chatbot(inputs, chat_counter_chatgpt, chatbot_chatgpt=[], history=[]):
27
+ #Define payload and header for chatgpt API
28
+ payload = {
29
+ "model": "gpt-3.5-turbo",
30
+ "messages": [{"role": "user", "content": f"{inputs}"}],
31
+ "temperature" : 1.0,
32
+ "top_p":1.0,
33
+ "n" : 1,
34
+ "stream": True,
35
+ "presence_penalty":0,
36
+ "frequency_penalty":0,
37
+ }
38
+
39
+ headers = {
40
+ "Content-Type": "application/json",
41
+ "Authorization": f"Bearer {openai.api_key}"
42
+ }
43
+
44
+ #Handling the different roles for ChatGPT
45
+ if chat_counter_chatgpt != 0 :
46
+ messages=[]
47
+ for data in chatbot_chatgpt:
48
+ temp1 = {}
49
+ temp1["role"] = "user"
50
+ temp1["content"] = data[0]
51
+ temp2 = {}
52
+ temp2["role"] = "assistant"
53
+ temp2["content"] = data[1]
54
+ messages.append(temp1)
55
+ messages.append(temp2)
56
+ temp3 = {}
57
+ temp3["role"] = "user"
58
+ temp3["content"] = inputs
59
+ messages.append(temp3)
60
+ payload = {
61
+ "engine": "text-davinci-003",
62
+ "messages": messages, #[{"role": "user", "content": f"{inputs}"}],
63
+ "temperature" : temperature_chatgpt, #1.0,
64
+ "top_p": top_p_chatgpt, #1.0,
65
+ "n" : 1,
66
+ "stream": True,
67
+ "presence_penalty":0,
68
+ "frequency_penalty":0,
69
+ }
70
+
71
+ chat_counter_chatgpt+=1
72
+
73
+ history.append("You asked: "+ inputs)
74
+
75
+ # make a POST request to the API endpoint using the requests.post method, passing in stream=True
76
+ response = requests.post(API_URL, headers=headers, json=payload, stream=True)
77
+ token_counter = 0
78
+ partial_words = ""
79
+
80
+ counter=0
81
+ for chunk in response.iter_lines():
82
+ #Skipping the first chunk
83
+ if counter == 0:
84
+ counter+=1
85
+ continue
86
+ # check whether each line is non-empty
87
+ if chunk.decode() :
88
+ chunk = chunk.decode()
89
+ # decode each line as response data is in bytes
90
+ if len(chunk) > 13 and "content" in json.loads(chunk[6:])['choices'][0]["delta"]:
91
+ partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
92
+ if token_counter == 0:
93
+ history.append(" " + partial_words)
94
+ else:
95
+ history[-1] = partial_words
96
+ chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
97
+ token_counter+=1
98
+ yield chat, history, chat_counter_chatgpt # this resembles {chatbot: chat, state: history}
99
+
100
+
101
+ def reset_textbox():
102
+ return gr.update(value="")
103
+
104
+ def reset_chat(chatbot, state):
105
+ return None, []
106
+
107
+ with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-right: auto;}
108
+ #chatgpt {height: 400px; overflow: auto;}} """, theme=gr.themes.Default(primary_hue="slate") ) as ChatGPTDavinci:
109
+ with gr.Row():
110
+ with gr.Column(scale=14):
111
+ with gr.Box():
112
+ with gr.Row():
113
+ with gr.Column(scale=13):
114
+ inputs = gr.Textbox(label="Ask me anything ⤵️ Try: Value of pi" )
115
+ with gr.Column(scale=1):
116
+ b1 = gr.Button('Submit', elem_id = 'submit').style(full_width=True)
117
+ b2 = gr.Button('Clear', elem_id = 'clear').style(full_width=True)
118
+ state_chatgpt = gr.State([])
119
+
120
+ with gr.Box():
121
+ with gr.Row():
122
+ chatbot_chatgpt = gr.Chatbot(elem_id="chatgpt", label='My ChatGPT Davinci')
123
+ chat_counter_chatgpt = gr.Number(value=0, visible=False, precision=0)
124
+
125
+
126
+ inputs.submit(reset_textbox, [], [inputs])
127
+
128
+ b1.click( chatbot,
129
+ [ inputs, chat_counter_chatgpt, chatbot_chatgpt, state_chatgpt],
130
+ [chatbot_chatgpt, state_chatgpt],)
131
+
132
+ b2.click(reset_chat, [chatbot_chatgpt, state_chatgpt], [chatbot_chatgpt, state_chatgpt])
133
+
134
+ ChatGPTDavinci.queue(concurrency_count=16).launch(height= 2500, debug=True)
135
+
136
+
137
+