Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ Init_system_prompt = "You are an AI Assistant that tries to teach kids various s
|
|
9 |
system_message = {"role": "system", "content": Init_system_prompt}
|
10 |
|
11 |
system_prompts = {
|
12 |
-
"English": "You are an AI Assistant that tries to teach kids various subjects. You are given learning material and you task is to ask questions given the material and then you also grade answers and give feedback
|
13 |
"Finnish": "Olet tekoälyavustaja jonka tehtävänä on auttaa lapsia oppimaan koulussa. Sinulle annetaan oppimateriaalia tekstinä ja sinun tehtäväsi on kysyä kysymyksiä annetusta tekstistä, arvostella vastauksia ja antaa palautetta kuinka vastauksia voidaan parantaa."
|
14 |
}
|
15 |
|
@@ -23,11 +23,36 @@ lang_mapping = {
|
|
23 |
"Finnish": "fin"
|
24 |
}
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
import os
|
27 |
from PIL import Image
|
28 |
|
29 |
import pytesseract
|
30 |
-
|
31 |
path = os.system("which tesseract >> path.txt")
|
32 |
with open("path.txt", 'r') as file:
|
33 |
tesseract_path = file.read().replace('\n', '')
|
@@ -37,9 +62,7 @@ with open("path.txt", 'r') as file:
|
|
37 |
|
38 |
def print_files(files):
|
39 |
for file in files:
|
40 |
-
print(file.__dir__())
|
41 |
print(file.name)
|
42 |
-
print(file.file)
|
43 |
|
44 |
|
45 |
def create_data(files, language_selection):
|
@@ -52,46 +75,83 @@ def create_data(files, language_selection):
|
|
52 |
print(e)
|
53 |
pass
|
54 |
system_prompt = system_prompts[language_selection]
|
|
|
55 |
|
56 |
-
return question_context, system_prompt
|
57 |
|
58 |
|
59 |
-
########### TAB
|
60 |
|
61 |
def user(user_message, history):
|
62 |
return "", history + [[user_message, None]]
|
63 |
|
|
|
|
|
|
|
64 |
def bot(history, messages_history, api_key, system_prompt, teksti_contexti, temperature, max_tokens, chatgpt_model, max_context_size_for_question, language_selection):
|
65 |
user_message = history[-1][0]
|
66 |
-
|
67 |
bot_message, messages_history = ask_gpt(user_message, messages_history, api_key, system_prompt, teksti_contexti, temperature, max_tokens, chatgpt_model, max_context_size_for_question, language_selection)
|
68 |
messages_history += [{"role": "assistant", "content": bot_message}]
|
69 |
history[-1][1] = bot_message
|
70 |
-
|
71 |
-
|
|
|
72 |
|
73 |
def ask_gpt(message, messages_history, api_key, system_prompt, context, temperature, max_tokens, chatgpt_model, max_context_size_for_question, language_selection):
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
def init_history(messages_history, system_prompt):
|
91 |
messages_history = []
|
92 |
messages_history += [{"role": "system", "content": system_prompt}]
|
93 |
-
msg_log = gr.Textbox
|
94 |
-
system_prompt = gr.Textbox
|
95 |
return messages_history, system_prompt, msg_log
|
96 |
|
97 |
|
@@ -114,42 +174,46 @@ with gr.Blocks() as demo:
|
|
114 |
with gr.Row():
|
115 |
teksti_contexti = gr.Textbox(value='Tähän tulee konteksti', label='Created context')
|
116 |
|
117 |
-
############# TAB
|
118 |
|
119 |
with gr.Tab("Chat"):
|
120 |
gr.Markdown("""<h1><center>ChatGPT
|
121 |
ChatBot with Gradio and OpenAI</center></h1>
|
122 |
""")
|
|
|
123 |
with gr.Row():
|
124 |
system_prompt = gr.Textbox(value=Init_system_prompt, label='Insert system message here')
|
125 |
chatgpt_model = gr.Dropdown(choices=["gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613"], value='gpt-3.5-turbo',label='ChatGPT model to use', interactive=True)
|
126 |
-
temperature = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, value=0.7, label='
|
127 |
-
max_tokens = gr.Slider(minimum=10, maximum=600, step=10, value=
|
128 |
-
max_context_size_for_question = gr.Slider(minimum=
|
129 |
with gr.Row():
|
130 |
chatbot = gr.Chatbot(label='ChatGPT Chat')
|
131 |
state = gr.State([])
|
132 |
with gr.Row():
|
133 |
msg = gr.Textbox()
|
134 |
with gr.Row():
|
|
|
135 |
clear = gr.Button("Clear")
|
136 |
-
with gr.Row():
|
137 |
-
msg_log = gr.Textbox("Tähän tulee message history", label='Message history')
|
138 |
|
139 |
|
140 |
with gr.Accordion("Klikkaa avataksesi ohjeet"):
|
141 |
gr.Markdown("Ohjeet tulee tänne")
|
142 |
|
143 |
|
144 |
-
# TAB 1 (
|
145 |
-
create_context_btn.click(create_data, [files, language_selection], [teksti_contexti, system_prompt])
|
146 |
|
147 |
|
148 |
-
# TAB
|
149 |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
150 |
-
bot, [chatbot, state, api_key, system_prompt, teksti_contexti, temperature, max_tokens, chatgpt_model, max_context_size_for_question, language_selection], [chatbot, state
|
151 |
)
|
152 |
-
|
|
|
|
|
|
|
|
|
153 |
|
154 |
|
155 |
demo.launch(debug=True)
|
|
|
9 |
system_message = {"role": "system", "content": Init_system_prompt}
|
10 |
|
11 |
system_prompts = {
|
12 |
+
"English": "You are an AI Assistant that tries to teach kids various subjects. You are given learning material and you task is to ask questions given the material and then you also grade answers and give feedback.",
|
13 |
"Finnish": "Olet tekoälyavustaja jonka tehtävänä on auttaa lapsia oppimaan koulussa. Sinulle annetaan oppimateriaalia tekstinä ja sinun tehtäväsi on kysyä kysymyksiä annetusta tekstistä, arvostella vastauksia ja antaa palautetta kuinka vastauksia voidaan parantaa."
|
14 |
}
|
15 |
|
|
|
23 |
"Finnish": "fin"
|
24 |
}
|
25 |
|
26 |
+
grading_button_mapping = {
|
27 |
+
"English": gr.Button(value="Grade my answer"),
|
28 |
+
"Finnish": gr.Button(value="Arvostele vastaukseni")
|
29 |
+
}
|
30 |
+
|
31 |
+
new_question_button_mapping = {
|
32 |
+
"English": gr.Button(value="New question"),
|
33 |
+
"Finnish": gr.Button(value="Uusi kysymys")
|
34 |
+
}
|
35 |
+
|
36 |
+
clear_button__mapping = {
|
37 |
+
"English": gr.Button(value="Clear messages"),
|
38 |
+
"Finnish": gr.Button(value="Tyhjennä viestit")
|
39 |
+
}
|
40 |
+
|
41 |
+
grading_prompt_start_mapping = {
|
42 |
+
"English": "You are given question and answer. Give grading between one to ten with reasoning and give feedback how answer should be improved to get better grade. Here are rules how to format your answer, it is really important to stick to these rules:\n Your answer should be devided into the following sections separated by newline: Grading, feedback, reasoning for feedback. \n Do not repeat Answer or Question.",
|
43 |
+
"Finnish": "Saat kysymyksen ja vastauksen. Arvostele vastaus asteikolla yhdestä kymmeneen perusteluineen ja anna palautetta kuinka vastausta tulisi parantaa paremman arvosanan saamiseksi. \n Tässä ovat ohjeet vastauksesi muotoilulle:\n Vastauksesi tulee olla jaettuna seuraaviin osioihin rivinvaihdolla erotettuna: Arvostelu, palaute, perustelut palautteelle. \n Älä toista seuraavia osioita: Kysymys, Vastaus."
|
44 |
+
}
|
45 |
+
|
46 |
+
new_question_prompt = {
|
47 |
+
"English": "Next question please",
|
48 |
+
"Finnish": "Saisinko uuden kysymyksen"
|
49 |
+
}
|
50 |
+
|
51 |
import os
|
52 |
from PIL import Image
|
53 |
|
54 |
import pytesseract
|
55 |
+
os.system("rm -f path.txt")
|
56 |
path = os.system("which tesseract >> path.txt")
|
57 |
with open("path.txt", 'r') as file:
|
58 |
tesseract_path = file.read().replace('\n', '')
|
|
|
62 |
|
63 |
def print_files(files):
|
64 |
for file in files:
|
|
|
65 |
print(file.name)
|
|
|
66 |
|
67 |
|
68 |
def create_data(files, language_selection):
|
|
|
75 |
print(e)
|
76 |
pass
|
77 |
system_prompt = system_prompts[language_selection]
|
78 |
+
|
79 |
|
80 |
+
return question_context, system_prompt, new_question_button_mapping[language_selection], clear_button__mapping[language_selection]
|
81 |
|
82 |
|
83 |
+
########### TAB 2 (CHAT) FUNCTIONS #############################
|
84 |
|
85 |
def user(user_message, history):
|
86 |
return "", history + [[user_message, None]]
|
87 |
|
88 |
+
def add_new_question(language_selection, history):
|
89 |
+
return history + [[new_question_prompt[language_selection], None]]
|
90 |
+
|
91 |
def bot(history, messages_history, api_key, system_prompt, teksti_contexti, temperature, max_tokens, chatgpt_model, max_context_size_for_question, language_selection):
|
92 |
user_message = history[-1][0]
|
|
|
93 |
bot_message, messages_history = ask_gpt(user_message, messages_history, api_key, system_prompt, teksti_contexti, temperature, max_tokens, chatgpt_model, max_context_size_for_question, language_selection)
|
94 |
messages_history += [{"role": "assistant", "content": bot_message}]
|
95 |
history[-1][1] = bot_message
|
96 |
+
return history, messages_history
|
97 |
+
|
98 |
+
|
99 |
|
100 |
def ask_gpt(message, messages_history, api_key, system_prompt, context, temperature, max_tokens, chatgpt_model, max_context_size_for_question, language_selection):
|
101 |
+
if message == new_question_prompt[language_selection]:
|
102 |
+
max_possible_position = len(context)- max_context_size_for_question
|
103 |
+
start = random.randint(0,max_possible_position)
|
104 |
+
messages_history += [{"role": "user", "content": context[start:start+max_context_size_for_question] + question_strings[language_selection]}]
|
105 |
+
|
106 |
+
openai.api_key = api_key
|
107 |
+
response = openai.ChatCompletion.create(
|
108 |
+
model=chatgpt_model,
|
109 |
+
messages=messages_history,
|
110 |
+
temperature=temperature,
|
111 |
+
max_tokens=max_tokens
|
112 |
+
)
|
113 |
+
return response['choices'][0]['message']['content'], messages_history
|
114 |
+
else:
|
115 |
+
if len(messages_history) <= 1:
|
116 |
+
max_possible_position = len(context)- max_context_size_for_question
|
117 |
+
start = random.randint(0,max_possible_position)
|
118 |
+
messages_history += [{"role": "user", "content": context[start:start+max_context_size_for_question] + question_strings[language_selection]}]
|
119 |
+
|
120 |
+
openai.api_key = api_key
|
121 |
+
response = openai.ChatCompletion.create(
|
122 |
+
model=chatgpt_model,
|
123 |
+
messages=messages_history,
|
124 |
+
temperature=temperature,
|
125 |
+
max_tokens=max_tokens
|
126 |
+
)
|
127 |
+
return response['choices'][0]['message']['content'], messages_history
|
128 |
+
else:
|
129 |
+
question = messages_history[-1]['content']
|
130 |
+
if language_selection == 'English':
|
131 |
+
prompt_start = grading_prompt_start_mapping[language_selection]
|
132 |
+
prompt_end = 'Question: ' + question + '\n Answer: ' + message + '\n'
|
133 |
+
full_prompt = prompt_start + prompt_end
|
134 |
+
elif language_selection == 'Finnish':
|
135 |
+
prompt_start = grading_prompt_start_mapping[language_selection]
|
136 |
+
prompt_end = 'Kysymys: ' + question + '\n Vastaus: ' + message + '\n'
|
137 |
+
full_prompt = prompt_start + prompt_end
|
138 |
+
|
139 |
+
messages_history += [{"role": "user", "content": full_prompt}]
|
140 |
+
|
141 |
+
openai.api_key = api_key
|
142 |
+
response = openai.ChatCompletion.create(
|
143 |
+
model=chatgpt_model,
|
144 |
+
messages=messages_history,
|
145 |
+
temperature=temperature,
|
146 |
+
max_tokens=max_tokens
|
147 |
+
)
|
148 |
+
return prompt_end.replace('\n', '<br>') + response['choices'][0]['message']['content'].replace('\n', '<br>'), messages_history
|
149 |
|
150 |
def init_history(messages_history, system_prompt):
|
151 |
messages_history = []
|
152 |
messages_history += [{"role": "system", "content": system_prompt}]
|
153 |
+
msg_log = gr.Textbox(value="Tähän tulee message history")
|
154 |
+
system_prompt = gr.Textbox(value=system_prompt, label='Insert system message here')
|
155 |
return messages_history, system_prompt, msg_log
|
156 |
|
157 |
|
|
|
174 |
with gr.Row():
|
175 |
teksti_contexti = gr.Textbox(value='Tähän tulee konteksti', label='Created context')
|
176 |
|
177 |
+
############# TAB 2 ##########################
|
178 |
|
179 |
with gr.Tab("Chat"):
|
180 |
gr.Markdown("""<h1><center>ChatGPT
|
181 |
ChatBot with Gradio and OpenAI</center></h1>
|
182 |
""")
|
183 |
+
new_question_state_msg = gr.State(value=[["New_question", None]])#, hidden=True)
|
184 |
with gr.Row():
|
185 |
system_prompt = gr.Textbox(value=Init_system_prompt, label='Insert system message here')
|
186 |
chatgpt_model = gr.Dropdown(choices=["gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613"], value='gpt-3.5-turbo',label='ChatGPT model to use', interactive=True)
|
187 |
+
temperature = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, value=0.7, label='Temperature')
|
188 |
+
max_tokens = gr.Slider(minimum=10, maximum=600, step=10, value=200, label='Max tokens')
|
189 |
+
max_context_size_for_question = gr.Slider(minimum=200, maximum=2000, step=50, value=300, label='Max context for questions')
|
190 |
with gr.Row():
|
191 |
chatbot = gr.Chatbot(label='ChatGPT Chat')
|
192 |
state = gr.State([])
|
193 |
with gr.Row():
|
194 |
msg = gr.Textbox()
|
195 |
with gr.Row():
|
196 |
+
new_question = gr.Button(value="New_question")
|
197 |
clear = gr.Button("Clear")
|
|
|
|
|
198 |
|
199 |
|
200 |
with gr.Accordion("Klikkaa avataksesi ohjeet"):
|
201 |
gr.Markdown("Ohjeet tulee tänne")
|
202 |
|
203 |
|
204 |
+
# TAB 1 (CREATE CONTEXT) Interactive elements:
|
205 |
+
create_context_btn.click(create_data, [files, language_selection], [teksti_contexti, system_prompt, new_question, clear])
|
206 |
|
207 |
|
208 |
+
# TAB 2 (CHAT) Interactive elements:
|
209 |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
210 |
+
bot, [chatbot, state, api_key, system_prompt, teksti_contexti, temperature, max_tokens, chatgpt_model, max_context_size_for_question, language_selection], [chatbot, state]
|
211 |
)
|
212 |
+
new_question.click(add_new_question, [language_selection, chatbot], [chatbot], queue=False).then(
|
213 |
+
bot, [chatbot, state, api_key, system_prompt, teksti_contexti, temperature, max_tokens, chatgpt_model, max_context_size_for_question, language_selection], [chatbot, state]
|
214 |
+
)
|
215 |
+
|
216 |
+
clear.click(lambda: None, None, chatbot, queue=False).success(init_history, [state, system_prompt], [state, system_prompt])
|
217 |
|
218 |
|
219 |
demo.launch(debug=True)
|