YashB1 commited on
Commit
daa1dd9
·
verified ·
1 Parent(s): 25414e5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -197
app.py CHANGED
@@ -1,200 +1,3 @@
1
- # import streamlit as st
2
- # import os
3
- # import pandas as pd
4
- # import random
5
- # from os.path import join
6
- # from src import preprocess_and_load_df, load_agent, ask_agent, decorate_with_code, show_response, get_from_user, load_smart_df, ask_question
7
- # from dotenv import load_dotenv
8
- # from langchain_groq.chat_models import ChatGroq
9
-
10
- # load_dotenv("Groq.txt")
11
- # Groq_Token = os.environ["GROQ_API_KEY"]
12
- # models = {"llama3":"llama3-70b-8192","mixtral": "mixtral-8x7b-32768", "llama2": "llama2-70b-4096", "gemma": "gemma-7b-it"}
13
-
14
- # self_path = os.path.dirname(os.path.abspath(__file__))
15
-
16
- # # Using HTML and CSS to center the title
17
- # st.write(
18
- # """
19
- # <style>
20
- # .title {
21
- # text-align: center;
22
- # color: #17becf;
23
- # }
24
- # """,
25
- # unsafe_allow_html=True,
26
- # )
27
-
28
- # # Displaying the centered title
29
- # st.markdown("<h2 class='title'>VayuBuddy</h2>", unsafe_allow_html=True)
30
-
31
- # # os.environ["PANDASAI_API_KEY"] = "$2a$10$gbmqKotzJOnqa7iYOun8eO50TxMD/6Zw1pLI2JEoqncwsNx4XeBS2"
32
-
33
- # # with open(join(self_path, "context1.txt")) as f:
34
- # # context = f.read().strip()
35
-
36
- # # agent = load_agent(join(self_path, "app_trial_1.csv"), context)
37
- # # df = preprocess_and_load_df(join(self_path, "Data.csv"))
38
- # # inference_server = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
39
- # # inference_server = "https://api-inference.huggingface.co/models/codellama/CodeLlama-13b-hf"
40
- # # inference_server = "https://api-inference.huggingface.co/models/pandasai/bamboo-llm"
41
-
42
- # model_name = st.sidebar.selectbox("Select LLM:", ["llama3","mixtral", "llama2", "gemma"])
43
-
44
-
45
-
46
-
47
-
48
- # questions = ('Custom Prompt',
49
- # 'Plot the monthly average PM2.5 for the year 2023.',
50
- # 'Which month has the highest average PM2.5 overall?',
51
- # 'Which month has the highest PM2.5 overall?',
52
- # 'Which month has the highest average PM2.5 in 2023 for Mumbai?',
53
- # 'Plot and compare monthly timeseries of pollution for Mumbai and Bengaluru.',
54
- # 'Plot the yearly average PM2.5.',
55
- # 'Plot the monthly average PM2.5 of Delhi',
56
- # 'Mumbai and Bengaluru for the year 2022.',
57
- # 'Which month has the highest pollution?',
58
- # 'Plot the monthly average PM2.5 of Delhi for the year 2022.',
59
- # 'Which city has the highest PM2.5 level in July 2022?',
60
- # 'Plot and compare monthly timeseries of PM2.5 for Mumbai and Bengaluru.',
61
- # 'Plot and compare the monthly average PM2.5 of Delhi, Mumbai and Bengaluru for the year 2022.',
62
- # 'Plot the monthly average PM2.5.',
63
- # 'Plot the monthly average PM10 for the year 2023.',
64
- # 'Which month has the highest PM2.5?',
65
- # 'Plot the monthly average PM2.5 of Delhi for the year 2022.',
66
- # 'Plot the monthly average PM2.5 of Bengaluru for the year 2022.',
67
- # 'Plot the monthly average PM2.5 of Mumbai for the year 2022.',
68
- # 'Which state has the highest average PM2.5?',
69
- # 'Plot monthly PM2.5 in Gujarat for 2023.',
70
- # 'What is the name of the month with the highest average PM2.5 overall?')
71
-
72
- # waiting_lines = ("Thinking...", "Just a moment...", "Let me think...", "Working on it...", "Processing...", "Hold on...", "One moment...", "On it...")
73
-
74
- # # agent = load_agent(df, context="", inference_server=inference_server, name=model_name)
75
-
76
- # # Initialize chat history
77
- # if "responses" not in st.session_state:
78
- # st.session_state.responses = []
79
-
80
- # # Display chat responses from history on app rerun
81
- # for response in st.session_state.responses:
82
- # if not response["no_response"]:
83
- # show_response(st, response)
84
-
85
- # show = True
86
-
87
- # prompt = st.sidebar.selectbox("Select a Prompt:", questions)
88
-
89
- # # add a note "select custom prompt to ask your own question"
90
-
91
-
92
- # if prompt == 'Custom Prompt':
93
- # show = False
94
- # # React to user input
95
- # prompt = st.chat_input("Ask me anything about air quality!", key=10)
96
- # if prompt:
97
- # show = True
98
-
99
-
100
- # if show:
101
-
102
- # # Add user input to chat history
103
- # response = get_from_user(prompt)
104
- # response["no_response"] = False
105
- # st.session_state.responses.append(response)
106
-
107
- # # Display user input
108
- # show_response(st, response)
109
-
110
- # no_response = False
111
-
112
- # # select random waiting line
113
- # with st.spinner(random.choice(waiting_lines)):
114
- # ran = False
115
- # for i in range(5):
116
- # llm = ChatGroq(model=models[model_name], api_key=os.getenv("GROQ_API"), temperature=0.1)
117
-
118
- # df_check = pd.read_csv("Data.csv")
119
- # df_check["Timestamp"] = pd.to_datetime(df_check["Timestamp"])
120
- # df_check = df_check.head(5)
121
-
122
- # new_line = "\n"
123
-
124
- # template = f"""```python
125
- # import pandas as pd
126
- # import matplotlib.pyplot as plt
127
-
128
- # df = pd.read_csv("Data.csv")
129
- # df["Timestamp"] = pd.to_datetime(df["Timestamp"])
130
-
131
- # # df.dtypes
132
- # {new_line.join(map(lambda x: '# '+x, str(df_check.dtypes).split(new_line)))}
133
-
134
- # # {prompt.strip()}
135
- # # <your code here>
136
- # ```
137
- # """
138
-
139
- # query = f"""I have a pandas dataframe data of PM2.5 and PM10.
140
- # * Frequency of data is daily.
141
- # * `pollution` generally means `PM2.5`.
142
- # * You already have df, so don't read the csv file
143
- # * Don't print, but save result in a variable `answer` and make it global.
144
- # * Unless explicitly mentioned, don't consider the result as a plot.
145
- # * PM2.5 guidelines: India: 60, WHO: 15.
146
- # * PM10 guidelines: India: 100, WHO: 50.
147
- # * If result is a plot, show the India and WHO guidelines in the plot.
148
- # * If result is a plot make it in tight layout, save it and save path in `answer`. Example: `answer='plot.png'`
149
- # * If result is a plot, rotate x-axis tick labels by 45 degrees,
150
- # * If result is not a plot, save it as a string in `answer`. Example: `answer='The city is Mumbai'`
151
- # * Whenever you do any sort of aggregation, report the corresponding standard deviation, standard error and the number of data points for that aggregation.
152
- # * Whenever you're reporting a floating point number, round it to 2 decimal places.
153
- # * Always report the unit of the data. Example: `The average PM2.5 is 45.67 µg/m³`
154
-
155
-
156
- # Complete the following code.
157
-
158
- # {template}
159
-
160
- # """
161
-
162
- # answer = llm.invoke(query)
163
- # code = f"""
164
- # {template.split("```python")[1].split("```")[0]}
165
- # {answer.content.split("```python")[1].split("```")[0]}
166
- # """
167
- # # update variable `answer` when code is executed
168
- # try:
169
- # exec(code)
170
- # ran = True
171
- # no_response = False
172
- # except Exception as e:
173
- # no_response = True
174
- # exception = e
175
-
176
- # response = {"role": "assistant", "content": answer, "gen_code": code, "ex_code": code, "last_prompt": prompt, "no_response": no_response}
177
-
178
- # # Get response from agent
179
- # # response = ask_question(model_name=model_name, question=prompt)
180
- # # response = ask_agent(agent, prompt)
181
-
182
- # if ran:
183
- # break
184
-
185
- # if no_response:
186
- # st.error(f"Failed to generate right output due to the following error:\n\n{exception}")
187
- # # Add agent response to chat history
188
- # st.session_state.responses.append(response)
189
-
190
- # # Display agent response
191
- # if not no_response:
192
- # show_response(st, response)
193
-
194
- # del prompt
195
-
196
-
197
-
198
  import streamlit as st
199
  import os
200
  import pandas as pd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  import os
3
  import pandas as pd