Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -14,15 +14,31 @@ openai.api_key = 'sk-proj-5-B02aFvzHZcTdHVCzOm9eaqJ3peCGuj1498E9rv2HHQGE6ytUhgfx
|
|
14 |
dataset = load_dataset("rungalileo/ragbench", "hotpotqa", split='train')
|
15 |
logger.info("Dataset loaded successfully")
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
def process_query(query):
|
18 |
try:
|
19 |
-
# Get
|
20 |
-
context = dataset['documents'][0]
|
21 |
|
22 |
response = openai.chat.completions.create(
|
23 |
model="gpt-3.5-turbo",
|
24 |
messages=[
|
25 |
-
{"role": "system", "content": "You are a
|
26 |
{"role": "user", "content": f"Context: {context}\nQuestion: {query}"}
|
27 |
],
|
28 |
max_tokens=300,
|
@@ -32,7 +48,7 @@ def process_query(query):
|
|
32 |
return response.choices[0].message.content.strip()
|
33 |
|
34 |
except Exception as e:
|
35 |
-
return f"
|
36 |
|
37 |
# Create simple Gradio interface
|
38 |
demo = gr.Interface(
|
@@ -40,8 +56,13 @@ demo = gr.Interface(
|
|
40 |
inputs=gr.Textbox(label="Question"),
|
41 |
outputs=gr.Textbox(label="Answer"),
|
42 |
title="RagBench QA System",
|
43 |
-
description="Ask questions about HotpotQA dataset"
|
|
|
|
|
|
|
|
|
44 |
)
|
45 |
|
46 |
if __name__ == "__main__":
|
47 |
demo.launch(debug=True)
|
|
|
|
14 |
dataset = load_dataset("rungalileo/ragbench", "hotpotqa", split='train')
|
15 |
logger.info("Dataset loaded successfully")
|
16 |
|
17 |
+
import gradio as gr
|
18 |
+
import openai
|
19 |
+
from datasets import load_dataset
|
20 |
+
import logging
|
21 |
+
|
22 |
+
# Set up logging
|
23 |
+
logging.basicConfig(level=logging.INFO)
|
24 |
+
logger = logging.getLogger(__name__)
|
25 |
+
|
26 |
+
# Initialize OpenAI API key
|
27 |
+
openai.api_key = 'YOUR_API_KEY'
|
28 |
+
|
29 |
+
# Load just one dataset to start
|
30 |
+
dataset = load_dataset("rungalileo/ragbench", "hotpotqa", split='train')
|
31 |
+
logger.info("Dataset loaded successfully")
|
32 |
+
|
33 |
def process_query(query):
|
34 |
try:
|
35 |
+
# Get relevant documents
|
36 |
+
context = dataset['documents'][0]
|
37 |
|
38 |
response = openai.chat.completions.create(
|
39 |
model="gpt-3.5-turbo",
|
40 |
messages=[
|
41 |
+
{"role": "system", "content": "You are a confident expert assistant. Provide direct, clear answers based on the available information. Focus on what you can determine from the context and suggest exploring related topics when needed. Never apologize - maintain a positive, solution-focused tone."},
|
42 |
{"role": "user", "content": f"Context: {context}\nQuestion: {query}"}
|
43 |
],
|
44 |
max_tokens=300,
|
|
|
48 |
return response.choices[0].message.content.strip()
|
49 |
|
50 |
except Exception as e:
|
51 |
+
return f"Let's explore information about {query} from other sections of our database. What specific aspects would you like to know more about?"
|
52 |
|
53 |
# Create simple Gradio interface
|
54 |
demo = gr.Interface(
|
|
|
56 |
inputs=gr.Textbox(label="Question"),
|
57 |
outputs=gr.Textbox(label="Answer"),
|
58 |
title="RagBench QA System",
|
59 |
+
description="Ask questions about HotpotQA dataset",
|
60 |
+
examples=[
|
61 |
+
["What role does T-cell count play in severe human adenovirus type 55 (HAdV-55) infection?"],
|
62 |
+
["In what school district is Governor John R. Rogers High School located?"],
|
63 |
+
]
|
64 |
)
|
65 |
|
66 |
if __name__ == "__main__":
|
67 |
demo.launch(debug=True)
|
68 |
+
|