Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
import pandas as pd
|
@@ -7,6 +225,60 @@ from src import preprocess_and_load_df, load_agent, ask_agent, decorate_with_cod
|
|
7 |
from dotenv import load_dotenv
|
8 |
from langchain_groq.chat_models import ChatGroq
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
load_dotenv("Groq.txt")
|
11 |
Groq_Token = os.environ["GROQ_API_KEY"]
|
12 |
models = {"llama3":"llama3-70b-8192","mixtral": "mixtral-8x7b-32768", "llama2": "llama2-70b-4096", "gemma": "gemma-7b-it"}
|
@@ -42,83 +314,83 @@ st.markdown("<div style='text-align:center;'>Choose a query from <b>Select a pro
|
|
42 |
# inference_server = "https://api-inference.huggingface.co/models/codellama/CodeLlama-13b-hf"
|
43 |
# inference_server = "https://api-inference.huggingface.co/models/pandasai/bamboo-llm"
|
44 |
|
45 |
-
model_name = st.sidebar.selectbox("Select LLM:", ["llama3","mixtral", "gemma"])
|
46 |
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
'Which state has the highest average PM2.5?',
|
66 |
-
'Plot monthly PM2.5 in Gujarat for 2023.',
|
67 |
-
'What is the name of the month with the highest average PM2.5 overall?')
|
68 |
-
|
69 |
-
waiting_lines = ("Thinking...", "Just a moment...", "Let me think...", "Working on it...", "Processing...", "Hold on...", "One moment...", "On it...")
|
70 |
-
|
71 |
-
# agent = load_agent(df, context="", inference_server=inference_server, name=model_name)
|
72 |
-
|
73 |
-
# Initialize chat history
|
74 |
-
if "responses" not in st.session_state:
|
75 |
-
st.session_state.responses = []
|
76 |
-
|
77 |
-
# Display chat responses from history on app rerun
|
78 |
-
for response in st.session_state.responses:
|
79 |
-
if not response["no_response"]:
|
80 |
-
show_response(st, response)
|
81 |
-
|
82 |
-
show = True
|
83 |
-
|
84 |
-
if prompt := st.sidebar.selectbox("Select a Prompt:", questions):
|
85 |
-
|
86 |
-
# add a note "select custom prompt to ask your own question"
|
87 |
-
st.sidebar.info("Select 'Custom Prompt' to ask your own question.")
|
88 |
-
|
89 |
-
if prompt == 'Custom Prompt':
|
90 |
-
show = False
|
91 |
-
# React to user input
|
92 |
-
prompt = st.chat_input("Ask me anything about air quality!", key=10)
|
93 |
-
if prompt : show = True
|
94 |
-
if show :
|
95 |
-
|
96 |
-
# Add user input to chat history
|
97 |
-
response = get_from_user(prompt)
|
98 |
-
response["no_response"] = False
|
99 |
-
st.session_state.responses.append(response)
|
100 |
|
101 |
-
|
102 |
-
|
|
|
|
|
103 |
|
104 |
-
no_response = False
|
105 |
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
|
|
|
|
|
|
|
|
|
|
112 |
|
113 |
-
df_check = pd.read_csv("Data.csv")
|
114 |
-
df_check["Timestamp"] = pd.to_datetime(df_check["Timestamp"])
|
115 |
-
df_check = df_check.head(5)
|
116 |
|
117 |
-
new_line = "\n"
|
118 |
|
119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
import pandas as pd
|
123 |
import matplotlib.pyplot as plt
|
124 |
|
@@ -128,84 +400,113 @@ df = pd.read_csv("Data.csv")
|
|
128 |
df["Timestamp"] = pd.to_datetime(df["Timestamp"])
|
129 |
|
130 |
import geopandas as gpd
|
|
|
131 |
india = gpd.read_file("https://gist.githubusercontent.com/jbrobst/56c13bbbf9d97d187fea01ca62ea5112/raw/e388c4cae20aa53cb5090210a42ebb9b765c0a36/india_states.geojson")
|
132 |
india.loc[india['ST_NM'].isin(['Ladakh', 'Jammu & Kashmir']), 'ST_NM'] = 'Jammu and Kashmir'
|
133 |
|
|
|
134 |
# df.dtypes
|
135 |
{new_line.join(map(lambda x: '# '+x, str(df_check.dtypes).split(new_line)))}
|
136 |
|
137 |
-
# {prompt.strip()}
|
138 |
# <your code here>
|
139 |
```
|
140 |
"""
|
141 |
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
"""
|
167 |
-
answer
|
168 |
-
code
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
# update variable `answer` when code is executed
|
176 |
-
exec(code)
|
177 |
-
ran = True
|
178 |
-
no_response = False
|
179 |
-
except Exception as e:
|
180 |
-
no_response = True
|
181 |
-
exception = e
|
182 |
-
if code is not None:
|
183 |
-
answer = f"!!!Faced an error while working on your query. Please try again!!!"
|
184 |
-
|
185 |
-
if type(answer) != str:
|
186 |
answer = f"!!!Faced an error while working on your query. Please try again!!!"
|
187 |
-
|
188 |
-
response = {"role": "assistant", "content": answer, "gen_code": code, "ex_code": code, "last_prompt": prompt, "no_response": no_response}
|
189 |
-
|
190 |
-
# Get response from agent
|
191 |
-
# response = ask_question(model_name=model_name, question=prompt)
|
192 |
-
# response = ask_agent(agent, prompt)
|
193 |
-
|
194 |
-
if ran:
|
195 |
-
break
|
196 |
|
197 |
-
|
198 |
-
|
199 |
-
# Add agent response to chat history
|
200 |
-
print("Adding response")
|
201 |
|
202 |
-
st.session_state.
|
203 |
-
|
204 |
-
|
205 |
-
if no_response:
|
206 |
-
print("No response")
|
207 |
-
st.error(f"Failed to generate right output due to the following error:\n\n{exception}")
|
208 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
209 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
210 |
|
211 |
-
prompt = 'Custom Prompt'
|
|
|
1 |
+
# import streamlit as st
|
2 |
+
# import os
|
3 |
+
# import pandas as pd
|
4 |
+
# import random
|
5 |
+
# from os.path import join
|
6 |
+
# from src import preprocess_and_load_df, load_agent, ask_agent, decorate_with_code, show_response, get_from_user, load_smart_df, ask_question
|
7 |
+
# from dotenv import load_dotenv
|
8 |
+
# from langchain_groq.chat_models import ChatGroq
|
9 |
+
|
10 |
+
# load_dotenv("Groq.txt")
|
11 |
+
# Groq_Token = os.environ["GROQ_API_KEY"]
|
12 |
+
# models = {"llama3":"llama3-70b-8192","mixtral": "mixtral-8x7b-32768", "llama2": "llama2-70b-4096", "gemma": "gemma-7b-it"}
|
13 |
+
|
14 |
+
# self_path = os.path.dirname(os.path.abspath(__file__))
|
15 |
+
|
16 |
+
# # Using HTML and CSS to center the title
|
17 |
+
# st.write(
|
18 |
+
# """
|
19 |
+
# <style>
|
20 |
+
# .title {
|
21 |
+
# text-align: center;
|
22 |
+
# color: #17becf;
|
23 |
+
# }
|
24 |
+
# """,
|
25 |
+
# unsafe_allow_html=True,
|
26 |
+
# )
|
27 |
+
|
28 |
+
# # Displaying the centered title
|
29 |
+
# st.markdown("<h2 class='title'>VayuBuddy</h2>", unsafe_allow_html=True)
|
30 |
+
# st.markdown("<div style='text-align:center; padding: 20px;'>VayuBuddy makes pollution monitoring easier by bridging the gap between users and datasets.<br>No coding required—just meaningful insights at your fingertips!</div>", unsafe_allow_html=True)
|
31 |
+
|
32 |
+
# # Center-aligned instruction text with bold formatting
|
33 |
+
# st.markdown("<div style='text-align:center;'>Choose a query from <b>Select a prompt</b> or type a query in the <b>chat box</b>, select a <b>LLM</b> (Large Language Model), and press enter to generate a response.</div>", unsafe_allow_html=True)
|
34 |
+
# # os.environ["PANDASAI_API_KEY"] = "$2a$10$gbmqKotzJOnqa7iYOun8eO50TxMD/6Zw1pLI2JEoqncwsNx4XeBS2"
|
35 |
+
|
36 |
+
# # with open(join(self_path, "context1.txt")) as f:
|
37 |
+
# # context = f.read().strip()
|
38 |
+
|
39 |
+
# # agent = load_agent(join(self_path, "app_trial_1.csv"), context)
|
40 |
+
# # df = preprocess_and_load_df(join(self_path, "Data.csv"))
|
41 |
+
# # inference_server = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
|
42 |
+
# # inference_server = "https://api-inference.huggingface.co/models/codellama/CodeLlama-13b-hf"
|
43 |
+
# # inference_server = "https://api-inference.huggingface.co/models/pandasai/bamboo-llm"
|
44 |
+
|
45 |
+
# model_name = st.sidebar.selectbox("Select LLM:", ["llama3","mixtral", "gemma"])
|
46 |
+
|
47 |
+
# questions = ('Custom Prompt',
|
48 |
+
# 'Plot the monthly average PM2.5 for the year 2023.',
|
49 |
+
# 'Which month in which year has the highest average PM2.5 overall?',
|
50 |
+
# 'Which month in which year has the highest PM2.5 overall?',
|
51 |
+
# 'Which month has the highest average PM2.5 in 2023 for Mumbai?',
|
52 |
+
# 'Plot and compare monthly timeseries of pollution for Mumbai and Bengaluru.',
|
53 |
+
# 'Plot the yearly average PM2.5.',
|
54 |
+
# 'Plot the monthly average PM2.5 of Delhi, Mumbai and Bengaluru for the year 2022.',
|
55 |
+
# 'Which month has the highest pollution?',
|
56 |
+
# 'Which city has the highest PM2.5 level in July 2022?',
|
57 |
+
# 'Plot and compare monthly timeseries of PM2.5 for Mumbai and Bengaluru.',
|
58 |
+
# 'Plot and compare the monthly average PM2.5 of Delhi, Mumbai and Bengaluru for the year 2022.',
|
59 |
+
# 'Plot the monthly average PM2.5.',
|
60 |
+
# 'Plot the monthly average PM10 for the year 2023.',
|
61 |
+
# 'Which (month, year) has the highest PM2.5?',
|
62 |
+
# 'Plot the monthly average PM2.5 of Delhi for the year 2022.',
|
63 |
+
# 'Plot the monthly average PM2.5 of Bengaluru for the year 2022.',
|
64 |
+
# 'Plot the monthly average PM2.5 of Mumbai for the year 2022.',
|
65 |
+
# 'Which state has the highest average PM2.5?',
|
66 |
+
# 'Plot monthly PM2.5 in Gujarat for 2023.',
|
67 |
+
# 'What is the name of the month with the highest average PM2.5 overall?')
|
68 |
+
|
69 |
+
# waiting_lines = ("Thinking...", "Just a moment...", "Let me think...", "Working on it...", "Processing...", "Hold on...", "One moment...", "On it...")
|
70 |
+
|
71 |
+
# # agent = load_agent(df, context="", inference_server=inference_server, name=model_name)
|
72 |
+
|
73 |
+
# # Initialize chat history
|
74 |
+
# if "responses" not in st.session_state:
|
75 |
+
# st.session_state.responses = []
|
76 |
+
|
77 |
+
# # Display chat responses from history on app rerun
|
78 |
+
# for response in st.session_state.responses:
|
79 |
+
# if not response["no_response"]:
|
80 |
+
# show_response(st, response)
|
81 |
+
|
82 |
+
# show = True
|
83 |
+
|
84 |
+
# if prompt := st.sidebar.selectbox("Select a Prompt:", questions):
|
85 |
+
|
86 |
+
# # add a note "select custom prompt to ask your own question"
|
87 |
+
# st.sidebar.info("Select 'Custom Prompt' to ask your own question.")
|
88 |
+
|
89 |
+
# if prompt == 'Custom Prompt':
|
90 |
+
# show = False
|
91 |
+
# # React to user input
|
92 |
+
# prompt = st.chat_input("Ask me anything about air quality!", key=10)
|
93 |
+
# if prompt : show = True
|
94 |
+
# if show :
|
95 |
+
|
96 |
+
# # Add user input to chat history
|
97 |
+
# response = get_from_user(prompt)
|
98 |
+
# response["no_response"] = False
|
99 |
+
# st.session_state.responses.append(response)
|
100 |
+
|
101 |
+
# # Display user input
|
102 |
+
# show_response(st, response)
|
103 |
+
|
104 |
+
# no_response = False
|
105 |
+
|
106 |
+
# # select random waiting line
|
107 |
+
# with st.spinner(random.choice(waiting_lines)):
|
108 |
+
# ran = False
|
109 |
+
# for i in range(1):
|
110 |
+
# print(f"Attempt {i+1}")
|
111 |
+
# llm = ChatGroq(model=models[model_name], api_key=os.getenv("GROQ_API"), temperature=0)
|
112 |
+
|
113 |
+
# df_check = pd.read_csv("Data.csv")
|
114 |
+
# df_check["Timestamp"] = pd.to_datetime(df_check["Timestamp"])
|
115 |
+
# df_check = df_check.head(5)
|
116 |
+
|
117 |
+
# new_line = "\n"
|
118 |
+
|
119 |
+
# parameters = {"font.size": 12}
|
120 |
+
|
121 |
+
# template = f"""```python
|
122 |
+
# import pandas as pd
|
123 |
+
# import matplotlib.pyplot as plt
|
124 |
+
|
125 |
+
# # plt.rcParams.update({parameters})
|
126 |
+
|
127 |
+
# df = pd.read_csv("Data.csv")
|
128 |
+
# df["Timestamp"] = pd.to_datetime(df["Timestamp"])
|
129 |
+
|
130 |
+
# import geopandas as gpd
|
131 |
+
# india = gpd.read_file("https://gist.githubusercontent.com/jbrobst/56c13bbbf9d97d187fea01ca62ea5112/raw/e388c4cae20aa53cb5090210a42ebb9b765c0a36/india_states.geojson")
|
132 |
+
# india.loc[india['ST_NM'].isin(['Ladakh', 'Jammu & Kashmir']), 'ST_NM'] = 'Jammu and Kashmir'
|
133 |
+
|
134 |
+
# # df.dtypes
|
135 |
+
# {new_line.join(map(lambda x: '# '+x, str(df_check.dtypes).split(new_line)))}
|
136 |
+
|
137 |
+
# # {prompt.strip()}
|
138 |
+
# # <your code here>
|
139 |
+
# ```
|
140 |
+
# """
|
141 |
+
|
142 |
+
# query = f"""I have a pandas dataframe data of PM2.5 and PM10.
|
143 |
+
# * The columns are 'Timestamp', 'station', 'PM2.5', 'PM10', 'address', 'city', 'latitude', 'longitude',and 'state'.
|
144 |
+
# * Frequency of data is daily.
|
145 |
+
# * `pollution` generally means `PM2.5`.
|
146 |
+
# * You already have df, so don't read the csv file
|
147 |
+
# * Don't print anything, but save result in a variable `answer` and make it global.
|
148 |
+
# * Unless explicitly mentioned, don't consider the result as a plot.
|
149 |
+
# * PM2.5 guidelines: India: 60, WHO: 15.
|
150 |
+
# * PM10 guidelines: India: 100, WHO: 50.
|
151 |
+
# * If result is a plot, show the India and WHO guidelines in the plot.
|
152 |
+
# * If result is a plot make it in tight layout, save it and save path in `answer`. Example: `answer='plot.png'`
|
153 |
+
# * If result is a plot, rotate x-axis tick labels by 45 degrees,
|
154 |
+
# * If result is not a plot, save it as a string in `answer`. Example: `answer='The city is Mumbai'`
|
155 |
+
# * I have a geopandas.geodataframe india containining the coordinates required to plot Indian Map with states.
|
156 |
+
# * If the query asks you to plot on India Map, use that geodataframe to plot and then add more points as per the requirements using the similar code as follows : v = ax.scatter(df['longitude'], df['latitude']). If the colorbar is required, use the following code : plt.colorbar(v)
|
157 |
+
# * If the query asks you to plot on India Map plot the India Map in Beige color
|
158 |
+
# * Whenever you do any sort of aggregation, report the corresponding standard deviation, standard error and the number of data points for that aggregation.
|
159 |
+
# * Whenever you're reporting a floating point number, round it to 2 decimal places.
|
160 |
+
# * Always report the unit of the data. Example: `The average PM2.5 is 45.67 µg/m³`
|
161 |
+
|
162 |
+
# Complete the following code.
|
163 |
+
|
164 |
+
# {template}
|
165 |
+
|
166 |
+
# """
|
167 |
+
# answer = None
|
168 |
+
# code = None
|
169 |
+
# try:
|
170 |
+
# answer = llm.invoke(query)
|
171 |
+
# code = f"""
|
172 |
+
# {template.split("```python")[1].split("```")[0]}
|
173 |
+
# {answer.content.split("```python")[1].split("```")[0]}
|
174 |
+
# """
|
175 |
+
# # update variable `answer` when code is executed
|
176 |
+
# exec(code)
|
177 |
+
# ran = True
|
178 |
+
# no_response = False
|
179 |
+
# except Exception as e:
|
180 |
+
# no_response = True
|
181 |
+
# exception = e
|
182 |
+
# if code is not None:
|
183 |
+
# answer = f"!!!Faced an error while working on your query. Please try again!!!"
|
184 |
+
|
185 |
+
# if type(answer) != str:
|
186 |
+
# answer = f"!!!Faced an error while working on your query. Please try again!!!"
|
187 |
+
|
188 |
+
# response = {"role": "assistant", "content": answer, "gen_code": code, "ex_code": code, "last_prompt": prompt, "no_response": no_response}
|
189 |
+
|
190 |
+
# # Get response from agent
|
191 |
+
# # response = ask_question(model_name=model_name, question=prompt)
|
192 |
+
# # response = ask_agent(agent, prompt)
|
193 |
+
|
194 |
+
# if ran:
|
195 |
+
# break
|
196 |
+
|
197 |
+
# # Display agent response
|
198 |
+
# if code is not None:
|
199 |
+
# # Add agent response to chat history
|
200 |
+
# print("Adding response")
|
201 |
+
|
202 |
+
# st.session_state.responses.append(response)
|
203 |
+
# show_response(st, response)
|
204 |
+
|
205 |
+
# if no_response:
|
206 |
+
# print("No response")
|
207 |
+
# st.error(f"Failed to generate right output due to the following error:\n\n{exception}")
|
208 |
+
|
209 |
+
|
210 |
+
|
211 |
+
# prompt = 'Custom Prompt'
|
212 |
+
|
213 |
+
|
214 |
+
|
215 |
+
|
216 |
+
|
217 |
+
####################################################Added User Feedback###################################################
|
218 |
+
|
219 |
import streamlit as st
|
220 |
import os
|
221 |
import pandas as pd
|
|
|
225 |
from dotenv import load_dotenv
|
226 |
from langchain_groq.chat_models import ChatGroq
|
227 |
|
228 |
+
|
229 |
+
from datasets import Dataset, load_dataset, concatenate_datasets
|
230 |
+
import streamlit as st
|
231 |
+
from st_img_pastebutton import paste
|
232 |
+
from io import BytesIO
|
233 |
+
import base64
|
234 |
+
from streamlit_feedback import streamlit_feedback
|
235 |
+
import uuid
|
236 |
+
|
237 |
+
from huggingface_hub import login, HfFolder
|
238 |
+
import os
|
239 |
+
|
240 |
+
# Set the token
|
241 |
+
token = os.getenv("HF_TOKEN") # Replace "YOUR_AUTHENTICATION_TOKEN" with your actual token
|
242 |
+
|
243 |
+
# Login using the token
|
244 |
+
|
245 |
+
login(token=token)
|
246 |
+
|
247 |
+
model_name = st.sidebar.selectbox("Select LLM:", ["llama3","mixtral", "gemma"])
|
248 |
+
|
249 |
+
contact_details = """
|
250 |
+
**Feel free to reach out to us:**
|
251 |
+
- [Nipun Batra](mailto:[email protected])
|
252 |
+
- [Zeel B Patel](mailto:[email protected])
|
253 |
+
- [Yash J Bachwana](mailto:[email protected])
|
254 |
+
"""
|
255 |
+
for _ in range(12):
|
256 |
+
st.sidebar.markdown(" ")
|
257 |
+
|
258 |
+
# Display contact details with message
|
259 |
+
st.sidebar.markdown("<hr>", unsafe_allow_html=True)
|
260 |
+
st.sidebar.markdown(contact_details, unsafe_allow_html=True)
|
261 |
+
|
262 |
+
# Function to push feedback data to Hugging Face Hub dataset
|
263 |
+
def push_to_dataset(feedback, comments,output,code,error):
|
264 |
+
# Load existing dataset or create a new one if it doesn't exist
|
265 |
+
try:
|
266 |
+
ds = load_dataset("YashB1/Feedbacks_eoc", split="evaluation")
|
267 |
+
except FileNotFoundError:
|
268 |
+
# If dataset doesn't exist, create a new one
|
269 |
+
ds = Dataset.from_dict({"feedback": [], "comments": [], "error": [], "output": [], "code": []})
|
270 |
+
|
271 |
+
# Add new feedback to the dataset
|
272 |
+
new_data = {"feedback": [feedback], "comments": [comments], "error": [error], "output": [output], "code": [code]} # Convert feedback and comments to lists
|
273 |
+
new_data = Dataset.from_dict(new_data)
|
274 |
+
|
275 |
+
ds = concatenate_datasets([ds, new_data])
|
276 |
+
|
277 |
+
# Push the updated dataset to Hugging Face Hub
|
278 |
+
ds.push_to_hub("YashB1/Feedbacks_eoc", split="evaluation")
|
279 |
+
|
280 |
+
|
281 |
+
|
282 |
load_dotenv("Groq.txt")
|
283 |
Groq_Token = os.environ["GROQ_API_KEY"]
|
284 |
models = {"llama3":"llama3-70b-8192","mixtral": "mixtral-8x7b-32768", "llama2": "llama2-70b-4096", "gemma": "gemma-7b-it"}
|
|
|
314 |
# inference_server = "https://api-inference.huggingface.co/models/codellama/CodeLlama-13b-hf"
|
315 |
# inference_server = "https://api-inference.huggingface.co/models/pandasai/bamboo-llm"
|
316 |
|
317 |
+
# model_name = st.sidebar.selectbox("Select LLM:", ["llama3","mixtral", "gemma"])
|
318 |
|
319 |
+
|
320 |
+
if 'question_state' not in st.session_state:
|
321 |
+
st.session_state.question_state = False
|
322 |
+
|
323 |
+
if 'fbk' not in st.session_state:
|
324 |
+
st.session_state.fbk = str(uuid.uuid4())
|
325 |
+
|
326 |
+
if 'feedback' not in st.session_state:
|
327 |
+
st.session_state.feedback = None
|
328 |
+
|
329 |
+
if "chat_history" not in st.session_state:
|
330 |
+
st.session_state.chat_history = []
|
331 |
+
|
332 |
+
|
333 |
+
def display_answer():
|
334 |
+
for entry in st.session_state.chat_history:
|
335 |
+
with st.chat_message("human"):
|
336 |
+
st.write(entry["question"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
337 |
|
338 |
+
# st.write(entry["answer"])
|
339 |
+
# print(entry["answer"])
|
340 |
+
show_response(st, entry["answer"])
|
341 |
+
|
342 |
|
|
|
343 |
|
344 |
+
def fbcb(response):
|
345 |
+
"""Update the history with feedback.
|
346 |
+
|
347 |
+
The question and answer are already saved in history.
|
348 |
+
Now we will add the feedback in that history entry.
|
349 |
+
"""
|
350 |
+
|
351 |
+
display_answer() # display hist
|
352 |
+
|
353 |
+
# Create a new feedback by changing the key of feedback component.
|
354 |
+
st.session_state.fbk = str(uuid.uuid4())
|
355 |
|
|
|
|
|
|
|
356 |
|
|
|
357 |
|
358 |
+
question = st.chat_input(placeholder="Ask your question here .... !!!!")
|
359 |
+
if question:
|
360 |
+
# We need this because of feedback. That question above
|
361 |
+
# is a stopper. If user hits the feedback button, streamlit
|
362 |
+
# reruns the code from top and we cannot enter back because
|
363 |
+
# of that chat_input.
|
364 |
+
st.session_state.prompt = question
|
365 |
+
st.session_state.question_state = True
|
366 |
|
367 |
+
|
368 |
+
|
369 |
+
|
370 |
+
# We are now free because st.session_state.question_state is True.
|
371 |
+
# But there are consequences. We will have to handle
|
372 |
+
# the double runs of create_answer() and display_answer()
|
373 |
+
# just to get the user feedback.
|
374 |
+
if st.session_state.question_state:
|
375 |
+
|
376 |
+
waiting_lines = ("Thinking...", "Just a moment...", "Let me think...", "Working on it...", "Processing...", "Hold on...", "One moment...", "On it...")
|
377 |
+
with st.spinner(random.choice(waiting_lines)):
|
378 |
+
ran = False
|
379 |
+
for i in range(5):
|
380 |
+
print(f"Attempt {i+1}")
|
381 |
+
llm = ChatGroq(model=models[model_name], api_key=os.getenv("GROQ_API"), temperature=0)
|
382 |
+
|
383 |
+
df_check = pd.read_csv("Data.csv")
|
384 |
+
df_check["Timestamp"] = pd.to_datetime(df_check["Timestamp"])
|
385 |
+
df_check = df_check.head(5)
|
386 |
+
|
387 |
+
new_line = "\n"
|
388 |
+
|
389 |
+
parameters = {"font.size": 12}
|
390 |
+
# If the query asks you to make a gif/animation, don't use savefig to save it. Instead use ani.save(answer, writer='pillow').
|
391 |
+
# If the query asks you to make a gif/animation, don't use colormaps .
|
392 |
+
|
393 |
+
template = f"""```python
|
394 |
import pandas as pd
|
395 |
import matplotlib.pyplot as plt
|
396 |
|
|
|
400 |
df["Timestamp"] = pd.to_datetime(df["Timestamp"])
|
401 |
|
402 |
import geopandas as gpd
|
403 |
+
file_path = "india_states.geojson"
|
404 |
india = gpd.read_file("https://gist.githubusercontent.com/jbrobst/56c13bbbf9d97d187fea01ca62ea5112/raw/e388c4cae20aa53cb5090210a42ebb9b765c0a36/india_states.geojson")
|
405 |
india.loc[india['ST_NM'].isin(['Ladakh', 'Jammu & Kashmir']), 'ST_NM'] = 'Jammu and Kashmir'
|
406 |
|
407 |
+
|
408 |
# df.dtypes
|
409 |
{new_line.join(map(lambda x: '# '+x, str(df_check.dtypes).split(new_line)))}
|
410 |
|
411 |
+
# {st.session_state.prompt.strip()}
|
412 |
# <your code here>
|
413 |
```
|
414 |
"""
|
415 |
|
416 |
+
query = f"""I have a pandas dataframe data of PM2.5 and PM10.
|
417 |
+
* The columns are 'Timestamp', 'station', 'PM2.5', 'PM10', 'address', 'city', 'latitude', 'longitude',and 'state'.
|
418 |
+
* Frequency of data is daily.
|
419 |
+
* `pollution` generally means `PM2.5`.
|
420 |
+
* You already have df, so don't read the csv file
|
421 |
+
* Don't print anything, but save result in a variable `answer` and make it global.
|
422 |
+
* Unless explicitly mentioned, don't consider the result as a plot.
|
423 |
+
* PM2.5 guidelines: India: 60, WHO: 15.
|
424 |
+
* PM10 guidelines: India: 100, WHO: 50.
|
425 |
+
* If query asks to plot calendarmap, use library calmap.
|
426 |
+
* If result is a plot, show the India and WHO guidelines in the plot.
|
427 |
+
* If result is a plot make it in tight layout, save it and save path in `answer`. Example: `answer='plot.png'`
|
428 |
+
* If result is a plot, rotate x-axis tick labels by 45 degrees,
|
429 |
+
* If result is not a plot, save it as a string in `answer`. Example: `answer='The city is Mumbai'`
|
430 |
+
* I have a geopandas.geodataframe india containining the coordinates required to plot Indian Map with states.
|
431 |
+
* If the query asks you to plot on India Map, use that geodataframe to plot and then add more points as per the requirements using the similar code as follows : v = ax.scatter(df['longitude'], df['latitude']). If the colorbar is required, use the following code : plt.colorbar(v)
|
432 |
+
* If the query asks you to plot on India Map plot the India Map in Beige color
|
433 |
+
* Whenever you do any sort of aggregation, report the corresponding standard deviation, standard error and the number of data points for that aggregation.
|
434 |
+
* Whenever you're reporting a floating point number, round it to 2 decimal places.
|
435 |
+
* Always report the unit of the data. Example: `The average PM2.5 is 45.67 µg/m³`
|
436 |
+
|
437 |
+
Complete the following code.
|
438 |
+
|
439 |
+
{template}
|
440 |
+
|
441 |
+
"""
|
442 |
+
answer = None
|
443 |
+
code = None
|
444 |
+
exception = None
|
445 |
+
try:
|
446 |
+
answer = llm.invoke(query)
|
447 |
+
code = f"""
|
448 |
+
{template.split("```python")[1].split("```")[0]}
|
449 |
+
{answer.content.split("```python")[1].split("```")[0]}
|
450 |
"""
|
451 |
+
# update variable `answer` when code is executed
|
452 |
+
exec(code)
|
453 |
+
ran = True
|
454 |
+
no_response = False
|
455 |
+
except Exception as e:
|
456 |
+
no_response = True
|
457 |
+
exception = e
|
458 |
+
if code is not None:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
459 |
answer = f"!!!Faced an error while working on your query. Please try again!!!"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
460 |
|
461 |
+
if type(answer) != str:
|
462 |
+
answer = f"!!!Faced an error while working on your query. Please try again!!!"
|
|
|
|
|
463 |
|
464 |
+
response = {"role": "assistant", "content": answer, "gen_code": code, "ex_code": code, "last_prompt": st.session_state.prompt, "no_response": no_response,"exception": exception}
|
465 |
+
# print(response)
|
|
|
|
|
|
|
|
|
466 |
|
467 |
+
if ran:
|
468 |
+
break
|
469 |
+
|
470 |
+
# Display agent response
|
471 |
+
if code is not None:
|
472 |
+
# Add agent response to chat history
|
473 |
+
if response['content'][-4:] == ".gif" :
|
474 |
+
# Provide a button to show the gif, we don't want it to run forever
|
475 |
+
st.image(response['content'], use_column_width=True)
|
476 |
+
response['content'] = ""
|
477 |
+
|
478 |
|
479 |
+
print("Adding response : ")
|
480 |
+
|
481 |
+
|
482 |
+
message_id = len(st.session_state.chat_history)
|
483 |
+
st.session_state.chat_history.append({
|
484 |
+
"question": st.session_state.prompt,
|
485 |
+
"answer": response,
|
486 |
+
"message_id": message_id,
|
487 |
+
})
|
488 |
+
display_answer()
|
489 |
+
|
490 |
+
|
491 |
+
if no_response:
|
492 |
+
print("No response")
|
493 |
+
st.error(f"Failed to generate right output due to the following error:\n\n{exception}")
|
494 |
+
|
495 |
+
|
496 |
+
# display_answer()
|
497 |
+
# Pressing a button in feedback reruns the code.
|
498 |
+
st.session_state.feedback = streamlit_feedback(
|
499 |
+
feedback_type="thumbs",
|
500 |
+
optional_text_label="[Optional]",
|
501 |
+
align="flex-start",
|
502 |
+
key=st.session_state.fbk,
|
503 |
+
on_submit=fbcb
|
504 |
+
)
|
505 |
+
print("FeedBack",st.session_state.feedback)
|
506 |
+
if st.session_state.feedback :
|
507 |
+
push_to_dataset(st.session_state.feedback['score'],st.session_state.feedback['text'],answer,code,exception)
|
508 |
+
st.success("Feedback submitted successfully!")
|
509 |
+
|
510 |
+
|
511 |
+
|
512 |
|
|