Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,7 @@ import pandas as pd
|
|
3 |
from io import StringIO
|
4 |
import openai
|
5 |
import pytesseract
|
|
|
6 |
|
7 |
|
8 |
|
@@ -12,7 +13,8 @@ import numpy as np
|
|
12 |
import time
|
13 |
|
14 |
# Set up OpenAI API key
|
15 |
-
|
|
|
16 |
|
17 |
Init_system_prompt = "You are an AI Assistant that tries to teach kids various subjects. You are given learning material and you task is to ask questions given the material and then you also grade answers and give feedback how to improve the answers"
|
18 |
system_message = {"role": "system", "content": Init_system_prompt}
|
@@ -26,7 +28,7 @@ path = os.system("which tesseract >> path.txt")
|
|
26 |
with open("path.txt", 'r') as file:
|
27 |
tesseract_path = file.read().replace('\n', '')
|
28 |
|
29 |
-
|
30 |
########### TAB 1 (UPLOAD) FUNCTIONS #############################
|
31 |
|
32 |
def print_files(files):
|
@@ -56,7 +58,7 @@ def create_data(files):
|
|
56 |
def user(user_message, history):
|
57 |
return "", history + [[user_message, None]]
|
58 |
|
59 |
-
def bot(history, messages_history, system_prompt, teksti_contexti, temperature, max_tokens, chatgpt_model):
|
60 |
user_message = history[-1][0]
|
61 |
|
62 |
bot_message, messages_history = ask_gpt(user_message, messages_history, system_prompt, teksti_contexti, temperature, max_tokens,chatgpt_model)
|
@@ -65,10 +67,13 @@ def bot(history, messages_history, system_prompt, teksti_contexti, temperature,
|
|
65 |
time.sleep(0.2)
|
66 |
return history, messages_history, str(messages_history)
|
67 |
|
68 |
-
def ask_gpt(message, messages_history,
|
|
|
69 |
if len(messages_history) < 1:
|
70 |
messages_history = [{"role": "system", "content": system_prompt}]
|
71 |
-
|
|
|
|
|
72 |
print(messages_history)
|
73 |
response = openai.ChatCompletion.create(
|
74 |
model=chatgpt_model,
|
@@ -117,8 +122,9 @@ with gr.Blocks() as demo:
|
|
117 |
with gr.Row():
|
118 |
system_prompt = gr.Textbox(value=Init_system_prompt, label='Insert system message here')
|
119 |
chatgpt_model = gr.Dropdown(choices=["gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613"], value='gpt-3.5-turbo',label='ChatGPT model to use', interactive=True)
|
120 |
-
temperature = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, value=0.
|
121 |
max_tokens = gr.Slider(minimum=10, maximum=600, step=10, value=100, label='Max tokens')
|
|
|
122 |
with gr.Row():
|
123 |
chatbot = gr.Chatbot(label='ChatGPT Chat')
|
124 |
state = gr.State([])
|
@@ -143,7 +149,7 @@ with gr.Blocks() as demo:
|
|
143 |
|
144 |
# TAB 3 (CHAT) Interactive elements:
|
145 |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
146 |
-
bot, [chatbot, state, system_prompt, teksti_contexti, temperature, max_tokens, chatgpt_model], [chatbot, state, msg_log]
|
147 |
)
|
148 |
clear.click(lambda: None, None, chatbot, queue=False).success(init_history, [state, system_prompt], [state, system_prompt, msg_log])
|
149 |
|
|
|
3 |
from io import StringIO
|
4 |
import openai
|
5 |
import pytesseract
|
6 |
+
import random
|
7 |
|
8 |
|
9 |
|
|
|
13 |
import time
|
14 |
|
15 |
# Set up OpenAI API key
|
16 |
+
from api_key import api_key
|
17 |
+
openai.api_key = api_key#os.environ["CHATGPT_API_KEY"]
|
18 |
|
19 |
Init_system_prompt = "You are an AI Assistant that tries to teach kids various subjects. You are given learning material and you task is to ask questions given the material and then you also grade answers and give feedback how to improve the answers"
|
20 |
system_message = {"role": "system", "content": Init_system_prompt}
|
|
|
28 |
with open("path.txt", 'r') as file:
|
29 |
tesseract_path = file.read().replace('\n', '')
|
30 |
|
31 |
+
|
32 |
########### TAB 1 (UPLOAD) FUNCTIONS #############################
|
33 |
|
34 |
def print_files(files):
|
|
|
58 |
def user(user_message, history):
|
59 |
return "", history + [[user_message, None]]
|
60 |
|
61 |
+
def bot(history, messages_history, system_prompt, teksti_contexti, temperature, max_tokens, chatgpt_model, max_context_size_for_question):
|
62 |
user_message = history[-1][0]
|
63 |
|
64 |
bot_message, messages_history = ask_gpt(user_message, messages_history, system_prompt, teksti_contexti, temperature, max_tokens,chatgpt_model)
|
|
|
67 |
time.sleep(0.2)
|
68 |
return history, messages_history, str(messages_history)
|
69 |
|
70 |
+
def ask_gpt(message, messages_history, system_prompt, context, temperature, max_tokens, chatgpt_model, max_context_size_for_question):
|
71 |
+
messages_history, system_prompt, _ = init_history(messages_history, system_prompt)
|
72 |
if len(messages_history) < 1:
|
73 |
messages_history = [{"role": "system", "content": system_prompt}]
|
74 |
+
max_possible_position = len(context)- max_context_size_for_question
|
75 |
+
start = random.randint(0,max_possible_position)
|
76 |
+
messages_history += [{"role": "user", "content": context[start:start+max_context_size_for_question] + '\n Please ask a question about the previous paragramph?'}]
|
77 |
print(messages_history)
|
78 |
response = openai.ChatCompletion.create(
|
79 |
model=chatgpt_model,
|
|
|
122 |
with gr.Row():
|
123 |
system_prompt = gr.Textbox(value=Init_system_prompt, label='Insert system message here')
|
124 |
chatgpt_model = gr.Dropdown(choices=["gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613"], value='gpt-3.5-turbo',label='ChatGPT model to use', interactive=True)
|
125 |
+
temperature = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, value=0.7, label='temperature')
|
126 |
max_tokens = gr.Slider(minimum=10, maximum=600, step=10, value=100, label='Max tokens')
|
127 |
+
max_context_size_for_question = gr.Slider(minimum=10, maximum=600, step=10, value=100, label='Max context for questions')
|
128 |
with gr.Row():
|
129 |
chatbot = gr.Chatbot(label='ChatGPT Chat')
|
130 |
state = gr.State([])
|
|
|
149 |
|
150 |
# TAB 3 (CHAT) Interactive elements:
|
151 |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
152 |
+
bot, [chatbot, state, system_prompt, teksti_contexti, temperature, max_tokens, chatgpt_model, max_context_size_for_question], [chatbot, state, msg_log]
|
153 |
)
|
154 |
clear.click(lambda: None, None, chatbot, queue=False).success(init_history, [state, system_prompt], [state, system_prompt, msg_log])
|
155 |
|