prathampatel1 commited on
Commit
e329747
·
verified ·
1 Parent(s): cf030c4

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +515 -0
  2. requirements.txt +58 -0
app.py ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+ from datetime import datetime, timedelta
5
+ import plotly.express as px
6
+ import plotly.graph_objects as go
7
+ from sklearn.ensemble import IsolationForest
8
+ from sklearn.linear_model import LinearRegression
9
+ import random
10
+ import calendar
11
+
12
+ # Set random seed for reproducibility
13
+ np.random.seed(42)
14
+
15
+ def generate_device_data(num_days=90, device_type="home"):
16
+ """Generate synthetic energy consumption data for devices with enhanced patterns"""
17
+ dates = pd.date_range(end=datetime.now(), periods=num_days*24, freq='h')
18
+
19
+ if device_type == "home":
20
+ devices = {
21
+ 'HVAC': {'base': 8, 'var': 4, 'peak_hours': [14, 15, 16, 17], 'weekend_factor': 1.2},
22
+ 'Refrigerator': {'base': 2, 'var': 0.5, 'peak_hours': [12, 13, 14], 'weekend_factor': 1.0},
23
+ 'Washing Machine': {'base': 1, 'var': 0.8, 'peak_hours': [10, 19, 20], 'weekend_factor': 1.5},
24
+ 'Lighting': {'base': 1.5, 'var': 0.3, 'peak_hours': [18, 19, 20, 21], 'weekend_factor': 1.1},
25
+ 'Television': {'base': 0.5, 'var': 0.2, 'peak_hours': [20, 21, 22], 'weekend_factor': 1.3}
26
+ }
27
+ else:
28
+ devices = {
29
+ 'HVAC System': {'base': 20, 'var': 8, 'peak_hours': [14, 15, 16, 17], 'weekend_factor': 0.6},
30
+ 'Server Room': {'base': 15, 'var': 3, 'peak_hours': [12, 13, 14], 'weekend_factor': 0.9},
31
+ 'Office Equipment': {'base': 10, 'var': 4, 'peak_hours': [9, 10, 11, 14, 15], 'weekend_factor': 0.4},
32
+ 'Lighting': {'base': 8, 'var': 2, 'peak_hours': [9, 10, 11, 14, 15], 'weekend_factor': 0.5},
33
+ 'Kitchen Appliances': {'base': 5, 'var': 2, 'peak_hours': [12, 13], 'weekend_factor': 0.3}
34
+ }
35
+
36
+ data = []
37
+
38
+ for date in dates:
39
+ hour = date.hour
40
+ is_weekend = date.weekday() >= 5
41
+
42
+ for device, params in devices.items():
43
+ # Add seasonal variation
44
+ seasonal_factor = 1 + 0.3 * np.sin(2 * np.pi * date.dayofyear / 365)
45
+
46
+ # Add peak hour variation
47
+ peak_factor = 1.5 if hour in params['peak_hours'] else 1
48
+
49
+ # Add weekend variation
50
+ weekend_factor = params['weekend_factor'] if is_weekend else 1
51
+
52
+ # Base consumption with random variation
53
+ consumption = (params['base'] * seasonal_factor * peak_factor * weekend_factor +
54
+ np.random.normal(0, params['var']))
55
+
56
+ # Add some anomalies (3% chance)
57
+ if np.random.random() < 0.03:
58
+ consumption *= np.random.choice([1.5, 2.0, 0.5])
59
+
60
+ data.append({
61
+ 'Date': date,
62
+ 'Device': device,
63
+ 'Consumption': max(0, consumption),
64
+ 'Hour': hour,
65
+ 'Weekday': date.strftime('%A'),
66
+ 'Weekend': is_weekend
67
+ })
68
+
69
+ return pd.DataFrame(data)
70
+
71
+ def detect_anomalies(df):
72
+ """Enhanced anomaly detection using Isolation Forest with multiple features"""
73
+ iso_forest = IsolationForest(contamination=0.03, random_state=42)
74
+ by_device = df.groupby('Device')
75
+
76
+ anomalies = []
77
+ for device, group in by_device:
78
+ # Use multiple features for anomaly detection
79
+ features = group[['Consumption', 'Hour']].copy()
80
+ features['Weekend'] = group['Weekend'].astype(int)
81
+
82
+ predictions = iso_forest.fit_predict(features)
83
+ anomaly_indices = predictions == -1
84
+
85
+ anomaly_data = group[anomaly_indices]
86
+
87
+ for _, row in anomaly_data.iterrows():
88
+ anomalies.append({
89
+ 'Device': device,
90
+ 'Date': row['Date'],
91
+ 'Consumption': row['Consumption'],
92
+ 'Hour': row['Hour'],
93
+ 'Weekday': row['Weekday']
94
+ })
95
+
96
+ return pd.DataFrame(anomalies)
97
+
98
+ def generate_insights(df):
99
+ """Generate detailed insights from the energy consumption data"""
100
+ insights = []
101
+
102
+ # Peak usage analysis
103
+ peak_hours = df.groupby(['Device', 'Hour'])['Consumption'].mean().reset_index()
104
+ for device in df['Device'].unique():
105
+ device_peaks = peak_hours[peak_hours['Device'] == device].nlargest(3, 'Consumption')
106
+ insights.append({
107
+ 'Type': 'Peak Hours',
108
+ 'Device': device,
109
+ 'Description': f"Peak usage hours: {', '.join(map(str, device_peaks['Hour']))}",
110
+ 'Impact': 'High'
111
+ })
112
+
113
+ # Weekend vs Weekday analysis
114
+ weekend_comparison = df.groupby(['Device', 'Weekend'])['Consumption'].mean().unstack()
115
+ for device in weekend_comparison.index:
116
+ diff_pct = ((weekend_comparison.loc[device, True] - weekend_comparison.loc[device, False]) /
117
+ weekend_comparison.loc[device, False] * 100)
118
+ insights.append({
119
+ 'Type': 'Weekend Pattern',
120
+ 'Device': device,
121
+ 'Description': f"{'Higher' if diff_pct > 0 else 'Lower'} weekend usage by {abs(diff_pct):.1f}%",
122
+ 'Impact': 'Medium' if abs(diff_pct) < 20 else 'High'
123
+ })
124
+
125
+ return pd.DataFrame(insights)
126
+
127
+ def predict_consumption(df, days_ahead=30):
128
+ """Predict future consumption using linear regression with multiple features"""
129
+ predictions = []
130
+
131
+ for device in df['Device'].unique():
132
+ device_data = df[df['Device'] == device].copy()
133
+
134
+ # Create features for prediction
135
+ device_data['Day_of_Week'] = device_data['Date'].dt.dayofweek
136
+ device_data['Month'] = device_data['Date'].dt.month
137
+ device_data['Day_of_Year'] = device_data['Date'].dt.dayofyear
138
+
139
+ X = device_data[['Hour', 'Day_of_Week', 'Month', 'Day_of_Year']]
140
+ y = device_data['Consumption']
141
+
142
+ model = LinearRegression()
143
+ model.fit(X, y)
144
+
145
+ # Generate future dates
146
+ future_dates = pd.date_range(
147
+ start=df['Date'].max() + timedelta(hours=1),
148
+ periods=days_ahead*24,
149
+ freq='h'
150
+ )
151
+
152
+ future_X = pd.DataFrame({
153
+ 'Hour': future_dates.hour,
154
+ 'Day_of_Week': future_dates.dayofweek,
155
+ 'Month': future_dates.month,
156
+ 'Day_of_Year': future_dates.dayofyear
157
+ })
158
+
159
+ future_predictions = model.predict(future_X)
160
+
161
+ for date, pred in zip(future_dates, future_predictions):
162
+ predictions.append({
163
+ 'Date': date,
164
+ 'Device': device,
165
+ 'Predicted_Consumption': max(0, pred)
166
+ })
167
+
168
+ return pd.DataFrame(predictions)
169
+
170
+ # Streamlit UI
171
+ st.set_page_config(page_title="SEMS - Smart Energy Management System", layout="wide", initial_sidebar_state="expanded")
172
+
173
+ # Custom CSS
174
+ st.markdown("""
175
+ <style>
176
+ .main {
177
+ padding: 2rem;
178
+ }
179
+ .stMetric {
180
+ background-color: #f0f2f6;
181
+ padding: 1rem;
182
+ border-radius: 0.5rem;
183
+ }
184
+ .insight-card {
185
+ background-color: #ffffff;
186
+ padding: 1rem;
187
+ border-radius: 0.5rem;
188
+ margin: 0.5rem 0;
189
+ border: 1px solid #e0e0e0;
190
+ }
191
+ </style>
192
+ """, unsafe_allow_html=True)
193
+
194
+ st.title("🏢 SEMS - Smart Energy Management System")
195
+
196
+ # Sidebar configuration
197
+ st.sidebar.title("Configuration")
198
+ user_type = st.sidebar.radio("Select User Type", ["Home", "Organization"])
199
+ analysis_period = st.sidebar.slider("Analysis Period (Days)", 30, 180, 90)
200
+
201
+ # Generate data
202
+ data = generate_device_data(num_days=analysis_period, device_type=user_type.lower())
203
+
204
+ # Main tabs
205
+ tab1, tab2, tab3, tab4 = st.tabs([
206
+ "📊 Usage Dashboard",
207
+ "🔍 Detailed Analysis",
208
+ "⚠️ Peak Usage Detection",
209
+ "📈 Forecasting"
210
+ ])
211
+
212
+ with tab1:
213
+ st.header("Energy Usage Dashboard")
214
+
215
+ # Key metrics
216
+ col1, col2, col3 = st.columns(3)
217
+
218
+ total_consumption = data['Consumption'].sum()
219
+ avg_daily = data.groupby(data['Date'].dt.date)['Consumption'].sum().mean()
220
+ peak_hour = data.groupby('Hour')['Consumption'].mean().idxmax()
221
+
222
+ col1.metric("Total Consumption", f"{total_consumption:.1f} kWh")
223
+ col2.metric("Average Daily Usage", f"{avg_daily:.1f} kWh")
224
+ col3.metric("Peak Usage Hour", f"{peak_hour}:00")
225
+
226
+ # Daily consumption trend
227
+ st.subheader("Daily Consumption Trend")
228
+ daily_consumption = data.groupby(['Date', 'Device'])['Consumption'].sum().reset_index()
229
+ fig = px.line(daily_consumption, x='Date', y='Consumption', color='Device',
230
+ title='Energy Consumption Over Time')
231
+ fig.update_layout(height=400)
232
+ st.plotly_chart(fig, use_container_width=True)
233
+
234
+ # Device-wise distribution
235
+ col1, col2 = st.columns(2)
236
+
237
+ with col1:
238
+ device_total = data.groupby('Device')['Consumption'].sum().sort_values(ascending=True)
239
+ fig = px.bar(device_total, orientation='h',
240
+ title='Total Consumption by Device')
241
+ st.plotly_chart(fig, use_container_width=True)
242
+
243
+ with col2:
244
+ hourly_avg = data.groupby(['Hour', 'Device'])['Consumption'].mean().reset_index()
245
+ fig = px.line(hourly_avg, x='Hour', y='Consumption', color='Device',
246
+ title='Average Hourly Consumption Pattern')
247
+ st.plotly_chart(fig, use_container_width=True)
248
+
249
+ with tab2:
250
+ st.header("Detailed Analysis")
251
+
252
+ # Weekday vs Weekend analysis
253
+ st.subheader("Weekday vs Weekend Consumption")
254
+ weekly_pattern = data.groupby(['Weekday', 'Device'])['Consumption'].mean().reset_index()
255
+ fig = px.bar(weekly_pattern, x='Weekday', y='Consumption', color='Device',
256
+ title='Average Consumption by Day of Week')
257
+ st.plotly_chart(fig, use_container_width=True)
258
+
259
+ # Hourly heatmap
260
+ st.subheader("Hourly Consumption Heatmap")
261
+ hourly_data = data.pivot_table(
262
+ values='Consumption',
263
+ index='Hour',
264
+ columns='Weekday',
265
+ aggfunc='mean'
266
+ )
267
+
268
+ fig = px.imshow(hourly_data,
269
+ labels=dict(x="Day of Week", y="Hour of Day", color="Consumption"),
270
+ aspect="auto",
271
+ title="Consumption Intensity by Hour and Day")
272
+ st.plotly_chart(fig, use_container_width=True)
273
+
274
+ # Display insights
275
+ st.subheader("Key Insights")
276
+ insights = generate_insights(data)
277
+
278
+ for _, insight in insights.iterrows():
279
+ with st.expander(f"{insight['Device']} - {insight['Type']} (Impact: {insight['Impact']})"):
280
+ st.write(insight['Description'])
281
+
282
+ with tab3:
283
+ st.header("Peak Usage Detection")
284
+
285
+ # Detect anomalies
286
+ anomalies = detect_anomalies(data)
287
+
288
+ if not anomalies.empty:
289
+ st.warning(f"Detected {len(anomalies)} anomalies in energy consumption")
290
+
291
+ # Plot with anomalies
292
+ fig = go.Figure()
293
+
294
+ for device in data['Device'].unique():
295
+ device_data = data[data['Device'] == device]
296
+ device_anomalies = anomalies[anomalies['Device'] == device]
297
+
298
+ fig.add_trace(go.Scatter(
299
+ x=device_data['Date'],
300
+ y=device_data['Consumption'],
301
+ name=f"{device} (normal)",
302
+ mode='lines'
303
+ ))
304
+
305
+ if not device_anomalies.empty:
306
+ fig.add_trace(go.Scatter(
307
+ x=device_anomalies['Date'],
308
+ y=device_anomalies['Consumption'],
309
+ name=f"{device} (anomaly)",
310
+ mode='markers',
311
+ marker=dict(size=10, symbol='x', color='red')
312
+ ))
313
+
314
+ fig.update_layout(
315
+ title='Energy Consumption with Detected Anomalies',
316
+ height=500
317
+ )
318
+ st.plotly_chart(fig, use_container_width=True)
319
+
320
+ # Anomaly details in an expandable table
321
+ st.subheader("Peak Usage Details")
322
+ for device in anomalies['Device'].unique():
323
+ device_anomalies = anomalies[anomalies['Device'] == device].copy()
324
+ device_anomalies['Date'] = device_anomalies['Date'].dt.strftime('%Y-%m-%d %H:%M')
325
+
326
+ with st.expander(f"Anomalies for {device}"):
327
+ st.dataframe(
328
+ device_anomalies[['Date', 'Consumption', 'Hour', 'Weekday']],
329
+ use_container_width=True
330
+ )
331
+
332
+ with tab4:
333
+ st.header("Consumption Forecasting")
334
+
335
+ # Generate predictions
336
+ predictions = predict_consumption(data)
337
+
338
+ # Plot historical data and predictions
339
+ st.subheader("Consumption Forecast")
340
+
341
+ for device in predictions['Device'].unique():
342
+ with st.expander(f"Forecast for {device}"):
343
+ historical = data[data['Device'] == device]
344
+ device_predictions = predictions[predictions['Device'] == device]
345
+
346
+ fig = go.Figure()
347
+
348
+ # Historical data
349
+ fig.add_trace(go.Scatter(
350
+ x=historical['Date'],
351
+ y=historical['Consumption'],
352
+ name='Historical',
353
+ line=dict(color='blue')
354
+ ))
355
+
356
+ # Predictions
357
+ fig.add_trace(go.Scatter(
358
+ x=device_predictions['Date'],
359
+ y=device_predictions['Predicted_Consumption'],
360
+ name='Forecast',
361
+ line=dict(color='red', dash='dash')
362
+ ))
363
+
364
+ fig.update_layout(
365
+ title=f'Energy Consumption Forecast - {device}',
366
+ xaxis_title='Date',
367
+ yaxis_title='Consumption (kWh)',
368
+ height=400
369
+ )
370
+
371
+ st.plotly_chart(fig, use_container_width=True)
372
+
373
+ # Summary statistics
374
+ col1, col2, col3 = st.columns(3)
375
+
376
+ avg_historical = historical['Consumption'].mean()
377
+ avg_predicted = device_predictions['Predicted_Consumption'].mean()
378
+ change_pct = (avg_predicted - avg_historical) / avg_historical * 100
379
+
380
+ col1.metric(
381
+ "Average Historical Usage",
382
+ f"{avg_historical:.2f} kWh"
383
+ )
384
+ col2.metric(
385
+ "Average Predicted Usage",
386
+ f"{avg_predicted:.2f} kWh"
387
+ )
388
+ col3.metric(
389
+ "Expected Change",
390
+ f"{change_pct:+.1f}%",
391
+ delta_color="inverse"
392
+ )
393
+
394
+ # Additional insights section
395
+ st.subheader("Energy Saving Opportunities")
396
+
397
+ # Calculate potential savings based on patterns
398
+ def calculate_savings_opportunities(historical_data, predictions_data):
399
+ opportunities = []
400
+
401
+ # Check for peak hour reduction potential
402
+ peak_hours = historical_data.groupby('Hour')['Consumption'].mean()
403
+ top_peak_hours = peak_hours.nlargest(3)
404
+ potential_peak_savings = top_peak_hours.sum() * 0.2 # Assume 20% reduction possible
405
+
406
+ opportunities.append({
407
+ 'Type': 'Peak Hour Reduction',
408
+ 'Description': f'Reduce usage during peak hours ({", ".join(map(str, top_peak_hours.index))}:00)',
409
+ 'Potential_Savings': f'{potential_peak_savings:.2f} kWh per day'
410
+ })
411
+
412
+ # Check for weekend optimization
413
+ weekend_data = historical_data[historical_data['Weekend']]
414
+ weekday_data = historical_data[~historical_data['Weekend']]
415
+ if weekend_data['Consumption'].mean() > weekday_data['Consumption'].mean():
416
+ weekend_savings = (weekend_data['Consumption'].mean() - weekday_data['Consumption'].mean()) * 2
417
+ opportunities.append({
418
+ 'Type': 'Weekend Optimization',
419
+ 'Description': 'Optimize weekend consumption patterns',
420
+ 'Potential_Savings': f'{weekend_savings:.2f} kWh per weekend'
421
+ })
422
+
423
+ # Seasonal optimization
424
+ seasonal_data = historical_data.copy()
425
+ seasonal_data['Month'] = seasonal_data['Date'].dt.month
426
+ monthly_avg = seasonal_data.groupby('Month')['Consumption'].mean()
427
+ seasonal_variation = monthly_avg.max() - monthly_avg.min()
428
+
429
+ if seasonal_variation > monthly_avg.mean() * 0.3: # If variation is more than 30%
430
+ opportunities.append({
431
+ 'Type': 'Seasonal Optimization',
432
+ 'Description': 'Implement seasonal usage strategies',
433
+ 'Potential_Savings': f'{seasonal_variation:.2f} kWh per month'
434
+ })
435
+
436
+ return pd.DataFrame(opportunities)
437
+
438
+ savings_opportunities = calculate_savings_opportunities(data, predictions)
439
+
440
+ for _, opportunity in savings_opportunities.iterrows():
441
+ with st.expander(f"💡 {opportunity['Type']}"):
442
+ st.write(f"**Description:** {opportunity['Description']}")
443
+ st.write(f"**Potential Savings:** {opportunity['Potential_Savings']}")
444
+
445
+ # Add specific recommendations based on opportunity type
446
+ if opportunity['Type'] == 'Peak Hour Reduction':
447
+ st.write("""
448
+ **Recommendations:**
449
+ - Schedule high-energy activities during off-peak hours
450
+ - Use automated controls to limit non-essential usage during peak times
451
+ - Consider energy storage solutions for peak shifting
452
+ """)
453
+ elif opportunity['Type'] == 'Weekend Optimization':
454
+ st.write("""
455
+ **Recommendations:**
456
+ - Review weekend device scheduling
457
+ - Implement automatic shutdown for unused equipment
458
+ - Optimize temperature settings for unoccupied periods
459
+ """)
460
+ elif opportunity['Type'] == 'Seasonal Optimization':
461
+ st.write("""
462
+ **Recommendations:**
463
+ - Adjust HVAC settings seasonally
464
+ - Implement weather-based control strategies
465
+ - Schedule maintenance during shoulder seasons
466
+ """)
467
+
468
+ # Add export functionality
469
+ if st.sidebar.button("Export Analysis Report"):
470
+ # Create report dataframe
471
+ report_data = {
472
+ 'Metric': [
473
+ 'Total Consumption',
474
+ 'Average Daily Usage',
475
+ 'Peak Usage Hour',
476
+ 'Number of Anomalies',
477
+ 'Forecast Trend'
478
+ ],
479
+ 'Value': [
480
+ f"{total_consumption:.1f} kWh",
481
+ f"{avg_daily:.1f} kWh",
482
+ f"{peak_hour}:00",
483
+ len(anomalies),
484
+ f"{change_pct:+.1f}% (30-day forecast)"
485
+ ]
486
+ }
487
+ report_df = pd.DataFrame(report_data)
488
+
489
+ # Convert to CSV
490
+ csv = report_df.to_csv(index=False)
491
+ st.sidebar.download_button(
492
+ label="Download Report",
493
+ data=csv,
494
+ file_name="energy_analysis_report.csv",
495
+ mime="text/csv"
496
+ )
497
+
498
+ # Add help section in sidebar
499
+ with st.sidebar.expander("ℹ️ Help"):
500
+ st.write("""
501
+ **Using the Dashboard:**
502
+ 1. Select your user type (Home/Organization)
503
+ 2. Adjust the analysis period using the slider
504
+ 3. Navigate through tabs to view different analyses
505
+ 4. Use expanders to see detailed information
506
+ 5. Export your analysis report using the button above
507
+
508
+ For additional support, contact our team at [email protected]
509
+ """)
510
+
511
+ # Add system status
512
+ st.sidebar.markdown("---")
513
+ st.sidebar.markdown("### System Status")
514
+ st.sidebar.markdown("✅ All Systems Operational")
515
+ st.sidebar.markdown(f"Last Updated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
requirements.txt ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ altair==5.5.0
2
+ attrs==24.3.0
3
+ blinker==1.9.0
4
+ cachetools==5.5.0
5
+ certifi==2024.12.14
6
+ charset-normalizer==3.4.1
7
+ click==8.1.8
8
+ cmdstanpy==1.2.5
9
+ contourpy==1.3.1
10
+ cycler==0.12.1
11
+ fonttools==4.55.3
12
+ gitdb==4.0.12
13
+ GitPython==3.1.44
14
+ holidays==0.64
15
+ idna==3.10
16
+ importlib_resources==6.5.2
17
+ Jinja2==3.1.5
18
+ joblib==1.4.2
19
+ jsonschema==4.23.0
20
+ jsonschema-specifications==2024.10.1
21
+ kiwisolver==1.4.8
22
+ markdown-it-py==3.0.0
23
+ MarkupSafe==3.0.2
24
+ matplotlib==3.10.0
25
+ mdurl==0.1.2
26
+ narwhals==1.21.1
27
+ numpy==2.2.1
28
+ packaging==24.2
29
+ pandas==2.2.3
30
+ pillow==11.1.0
31
+ plotly==5.24.1
32
+ prophet==1.1.6
33
+ protobuf==5.29.2
34
+ pyarrow==18.1.0
35
+ pydeck==0.9.1
36
+ Pygments==2.19.1
37
+ pyparsing==3.2.1
38
+ python-dateutil==2.9.0.post0
39
+ pytz==2024.2
40
+ referencing==0.35.1
41
+ requests==2.32.3
42
+ rich==13.9.4
43
+ rpds-py==0.22.3
44
+ scikit-learn==1.6.0
45
+ scipy==1.15.0
46
+ six==1.17.0
47
+ smmap==5.0.2
48
+ stanio==0.5.1
49
+ streamlit==1.41.1
50
+ tenacity==9.0.0
51
+ threadpoolctl==3.5.0
52
+ toml==0.10.2
53
+ tornado==6.4.2
54
+ tqdm==4.67.1
55
+ typing_extensions==4.12.2
56
+ tzdata==2024.2
57
+ urllib3==2.3.0
58
+ watchdog==6.0.0