arithescientist commited on
Commit
c4655e6
Β·
verified Β·
1 Parent(s): c755e72

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +442 -0
app.py ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import pandas as pd
4
+ import matplotlib.pyplot as plt
5
+ from scipy import stats
6
+ from rapidfuzz import fuzz
7
+
8
+ #######################################
9
+ # Debug Logging Function
10
+ #######################################
11
+ def debug_print(message):
12
+ print(message)
13
+
14
+ #######################################
15
+ # Data Generation Functions
16
+ #######################################
17
+ def generate_case_data(num_records=5000):
18
+ debug_print("Generating case data...")
19
+ lob_list = np.random.choice(["Modern Life", "Xbox", "CAPS", "Devices", "Modern Work"], num_records)
20
+ issue_types = np.random.choice(["Billing", "Technical", "Hacking", "Service", "Access"], num_records)
21
+ advocates = np.random.choice(["Alice", "Bob", "Charlie", "Diana", "Eve"], num_records)
22
+
23
+ start_date = pd.Timestamp("2021-01-01")
24
+ end_date = pd.Timestamp("2023-12-31")
25
+ days_range = (end_date - start_date).days
26
+
27
+ # Generate random case dates over 3 years
28
+ case_dates = start_date + pd.to_timedelta(np.random.randint(0, days_range, num_records), unit='D')
29
+
30
+ # Simulated release dates per LOB (set in early 2022)
31
+ lob_release_dates = {
32
+ "Modern Life": pd.Timestamp("2022-01-01"),
33
+ "Xbox": pd.Timestamp("2022-02-01"),
34
+ "CAPS": pd.Timestamp("2022-03-01"),
35
+ "Devices": pd.Timestamp("2022-04-01"),
36
+ "Modern Work": pd.Timestamp("2022-05-01")
37
+ }
38
+ release_dates = np.array([lob_release_dates[lob] for lob in lob_list])
39
+ pre_release = case_dates < release_dates
40
+
41
+ CSAT = np.where(pre_release, np.random.normal(80, 5, num_records), np.random.normal(85, 5, num_records))
42
+ days_to_close = np.where(pre_release, np.random.normal(5, 1, num_records), np.random.normal(4, 1, num_records))
43
+ first_contact_resolution = np.where(pre_release, np.random.normal(70, 8, num_records), np.random.normal(75, 8, num_records))
44
+ CPI = np.where(pre_release, np.random.normal(50, 5, num_records), np.random.normal(45, 5, num_records))
45
+
46
+ # For the main case dataset, we do NOT include initiative utilization columns.
47
+ debug_print("Case data generated.")
48
+ return pd.DataFrame({
49
+ "serial_number": np.arange(1, num_records + 1),
50
+ "advocate": advocates,
51
+ "LOB": lob_list,
52
+ "issue_type": issue_types,
53
+ "case_date": case_dates,
54
+ "CSAT": CSAT,
55
+ "days_to_close": days_to_close,
56
+ "first_contact_resolution": first_contact_resolution,
57
+ "CPI": CPI
58
+ })
59
+
60
+ def generate_advocate_adoption_data():
61
+ debug_print("Generating advocate adoption data...")
62
+ advocates = ["Alice", "Bob", "Charlie", "Diana", "Eve"]
63
+ # Ensure adoption dates fall roughly in mid-2022
64
+ adoption_dates = {
65
+ "Symbiosis_adoption_date": ["2022-06-05", "2022-06-10", "2022-06-08", "2022-06-12", "2022-06-07"],
66
+ "Voice Translation_adoption_date": ["2022-06-03", "2022-06-07", "2022-06-05", "2022-06-09", "2022-06-04"],
67
+ "NoteHero_adoption_date": ["2022-06-02", "2022-06-06", "2022-06-04", "2022-06-08", "2022-06-03"]
68
+ }
69
+ df = pd.DataFrame({
70
+ "advocate": advocates,
71
+ "Symbiosis_adoption_date": pd.to_datetime(adoption_dates["Symbiosis_adoption_date"]),
72
+ "Voice Translation_adoption_date": pd.to_datetime(adoption_dates["Voice Translation_adoption_date"]),
73
+ "NoteHero_adoption_date": pd.to_datetime(adoption_dates["NoteHero_adoption_date"])
74
+ })
75
+ # Convert to date objects
76
+ for col in ["Symbiosis_adoption_date", "Voice Translation_adoption_date", "NoteHero_adoption_date"]:
77
+ df[col] = df[col].dt.date
78
+ debug_print("Advocate adoption data generated.")
79
+ return df
80
+
81
+ def generate_utilization_data():
82
+ debug_print("Generating initiative utilization data...")
83
+ # Create a standalone DataFrame with serial numbers and binary flags for each initiative.
84
+ df = global_case_data.copy()[["serial_number"]].copy()
85
+ # For demonstration, we simulate 50% usage for each initiative.
86
+ for initiative in ["Voice Translation_utilized", "Symbiosis_utilized", "NoteHero_utilized"]:
87
+ df[initiative] = np.random.choice([0, 1], size=len(df), p=[0.5, 0.5])
88
+ debug_print("Initiative utilization data generated.")
89
+ return df
90
+
91
+ #######################################
92
+ # Global Data Setup
93
+ #######################################
94
+ global_case_data = generate_case_data(num_records=5000)
95
+ global_advocate_adoption = generate_advocate_adoption_data()
96
+ global_initiative_utilization = generate_utilization_data()
97
+ # Ensure that the utilization dataset only contains serial numbers that are in the main dataset.
98
+ valid_serials = set(global_case_data["serial_number"])
99
+ global_initiative_utilization = global_initiative_utilization[global_initiative_utilization["serial_number"].isin(valid_serials)]
100
+ debug_print("Global datasets generated.")
101
+
102
+ #######################################
103
+ # Helper Calculation Functions
104
+ #######################################
105
+ def calculate_throughput(df, start_date, end_date):
106
+ df_filtered = df.loc[(df["case_date"] >= start_date) & (df["case_date"] <= end_date)]
107
+ num_cases = len(df_filtered)
108
+ num_days = (end_date - start_date).days or 1
109
+ return num_cases / num_days if num_cases > 0 else 0
110
+
111
+ def calculate_throughput_per_advocate(df, start_date, end_date):
112
+ df_filtered = df.loc[(df["case_date"] >= start_date) & (df["case_date"] <= end_date)]
113
+ if df_filtered.empty:
114
+ return None
115
+ throughput = df_filtered.groupby(["LOB", "advocate"]).size() / (end_date - start_date).days
116
+ return throughput
117
+
118
+ #######################################
119
+ # Analysis Functions
120
+ #######################################
121
+ def analyze_overall_impact(release_date_str, lob_filter, issue_filter, kpi, one_tailed):
122
+ debug_print("Running Overall Impact Analysis...")
123
+ try:
124
+ # Parse release date as a date object (no .dt on a scalar)
125
+ release_date = pd.to_datetime(release_date_str).date()
126
+ except Exception as e:
127
+ return f"Error parsing release date: {str(e)}", None
128
+
129
+ df = global_case_data.copy()
130
+ df["case_date"] = pd.to_datetime(df["case_date"]).dt.date
131
+
132
+ if lob_filter != "All":
133
+ df = df[df["LOB"] == lob_filter]
134
+ if issue_filter != "All":
135
+ df = df[df["issue_type"] == issue_filter]
136
+ if df.empty:
137
+ return "No data available for the selected filters.", None
138
+
139
+ pre_data = df[df["case_date"] < release_date]
140
+ post_data = df[df["case_date"] >= release_date]
141
+ if pre_data.empty or post_data.empty:
142
+ return "No data available for the selected date range.", None
143
+
144
+ if kpi.lower() == "throughput":
145
+ throughput_pre = calculate_throughput(pre_data, pre_data["case_date"].min(), pre_data["case_date"].max())
146
+ throughput_post = calculate_throughput(post_data, post_data["case_date"].min(), post_data["case_date"].max())
147
+ t_stat, p_value = stats.ttest_ind(np.array([throughput_pre]), np.array([throughput_post]), equal_var=False)
148
+ else:
149
+ pre_vals, post_vals = pre_data[kpi].values, post_data[kpi].values
150
+ t_stat, p_value = stats.ttest_ind(pre_vals, post_vals, equal_var=False)
151
+ if one_tailed:
152
+ p_value = p_value / 2
153
+ significance = "Significant" if p_value < 0.05 and t_stat > 0 else "Not Significant"
154
+ else:
155
+ significance = "Significant" if p_value < 0.05 else "Not Significant"
156
+
157
+ analysis_text = f"""Overall Impact Analysis for KPI: {kpi}
158
+ Filters - LOB: {lob_filter}, Issue Type: {issue_filter}
159
+ Global Release Date: {release_date}
160
+
161
+ T-Test: T-Statistic = {t_stat:.3f}, P-Value = {p_value:.3f} ({significance})
162
+ """
163
+ # Here you could also add additional aggregated results if needed.
164
+ fig, ax = plt.subplots(figsize=(6, 4))
165
+ if kpi.lower() == "throughput":
166
+ # For throughput, show a simple bar graph with aggregated throughput (for demonstration)
167
+ ax.bar(["Pre", "Post"], [throughput_pre, throughput_post], color=["blue", "green"])
168
+ ax.set_ylabel("Throughput (cases/day)")
169
+ else:
170
+ ax.boxplot([pre_data[kpi].values, post_data[kpi].values], labels=["Pre", "Post"])
171
+ ax.set_ylabel(kpi)
172
+ ax.set_title("Overall Impact Analysis")
173
+ plt.tight_layout()
174
+ plt.close(fig)
175
+ return analysis_text, fig
176
+
177
+ def analyze_all_advocates_impact(method, initiative, lob_filter, issue_filter, kpi, one_tailed,
178
+ adoption_file, adoption_name_col, adoption_date_col, utilization_file):
179
+ try:
180
+ debug_print("πŸš€ Running Advocate Impact Analysis...")
181
+ df = global_case_data.copy()
182
+
183
+ if lob_filter != "All":
184
+ df = df[df["LOB"] == lob_filter]
185
+ if issue_filter != "All":
186
+ df = df[df["issue_type"] == issue_filter]
187
+ if df.empty:
188
+ debug_print("⚠ No cases available for the selected filters.")
189
+ return "No data available for the selected filters.", None, None
190
+
191
+ df["case_date"] = pd.to_datetime(df["case_date"], utc=True, errors="coerce").dt.normalize().dt.date
192
+ debug_print(f"βœ… Data filtered. {len(df)} cases remain.")
193
+ debug_print(f"πŸ—“ Min case date: {df['case_date'].min()}, Max case date: {df['case_date'].max()}")
194
+
195
+ # For Initiative Utilization, use standalone DF
196
+ utilization_df = global_initiative_utilization.copy()
197
+ if method == "Initiative Utilization" and utilization_file is not None:
198
+ try:
199
+ util_df = pd.read_csv(utilization_file.name)
200
+ except Exception:
201
+ try:
202
+ util_df = pd.read_excel(utilization_file.name)
203
+ except Exception as e:
204
+ debug_print(f"❌ Error reading utilization file: {str(e)}")
205
+ return f"Error reading utilization file: {str(e)}", None, None
206
+ if "serial_number" not in util_df.columns:
207
+ debug_print("⚠ The uploaded utilization file must have a 'serial_number' column.")
208
+ return "The uploaded utilization file must have a 'serial_number' column.", None, None
209
+ utilization_df = util_df.copy()
210
+ debug_print(f"βœ… Uploaded initiative utilization file processed: {utilization_df.shape[0]} rows.")
211
+ else:
212
+ debug_print("πŸ“‚ No initiative utilization file uploaded; using default global initiative utilization data.")
213
+
214
+ # Build adoption mapping for Adoption Date method
215
+ adoption_mapping = {}
216
+ if method == "Adoption Date" and adoption_file is not None:
217
+ try:
218
+ uploaded_df = pd.read_csv(adoption_file.name)
219
+ except Exception:
220
+ try:
221
+ uploaded_df = pd.read_excel(adoption_file.name)
222
+ except Exception as e:
223
+ debug_print(f"❌ Error reading adoption file: {str(e)}")
224
+ return f"Error reading adoption file: {str(e)}", None, None
225
+ if adoption_name_col not in uploaded_df.columns or adoption_date_col not in uploaded_df.columns:
226
+ debug_print("⚠ Specified columns not found in the uploaded adoption file.")
227
+ return "Specified columns not found in the uploaded adoption file.", None, None
228
+ debug_print("πŸ“‚ Processing uploaded adoption file...")
229
+ for idx, row in uploaded_df.iterrows():
230
+ name_uploaded = str(row[adoption_name_col])
231
+ adoption_date = pd.to_datetime(row[adoption_date_col], utc=True, errors="coerce")
232
+ if pd.isnull(adoption_date):
233
+ debug_print(f"⚠ Skipping invalid adoption date for {name_uploaded}")
234
+ continue
235
+ adoption_date = adoption_date.date()
236
+ # Map using fuzzy matching on the default global adoption names
237
+ for adv in df["advocate"].unique():
238
+ score = fuzz.ratio(name_uploaded.lower(), adv.lower())
239
+ if score >= 95:
240
+ adoption_mapping[adv] = min(adoption_mapping.get(adv, adoption_date), adoption_date)
241
+ debug_print(f"βœ… Uploaded adoption file processed. Mapped {len(adoption_mapping)} advocates.")
242
+ else:
243
+ debug_print("πŸ“‚ No adoption file uploaded; using default global adoption data.")
244
+
245
+ # Normalize global adoption dates
246
+ for col in ["Symbiosis_adoption_date", "Voice Translation_adoption_date", "NoteHero_adoption_date"]:
247
+ global_advocate_adoption[col] = pd.to_datetime(global_advocate_adoption[col], utc=True, errors="coerce")
248
+ global_advocate_adoption[col] = global_advocate_adoption[col].apply(lambda x: x.date() if pd.notnull(x) else None)
249
+
250
+ all_pre_vals, all_post_vals = [], []
251
+ results = []
252
+
253
+ debug_print("πŸ”Ž Processing advocates...")
254
+
255
+ for adv in df["advocate"].unique():
256
+ try:
257
+ df_adv = df[df["advocate"] == adv]
258
+
259
+ if method == "Adoption Date":
260
+ if adv in adoption_mapping:
261
+ adoption_date = adoption_mapping[adv]
262
+ else:
263
+ col_name = initiative + "_adoption_date"
264
+ adoption_series = global_advocate_adoption.loc[global_advocate_adoption["advocate"] == adv, col_name]
265
+ if adoption_series.empty or pd.isnull(adoption_series.values[0]):
266
+ debug_print(f"⚠ Skipping {adv}: No valid adoption date found.")
267
+ continue
268
+ adoption_date = adoption_series.values[0]
269
+
270
+ if pd.isnull(adoption_date):
271
+ debug_print(f"⚠ Skipping {adv}: Adoption date is NULL after conversion.")
272
+ continue
273
+
274
+ debug_print(f"πŸ”Ž Processing {adv}: Adoption Date = {adoption_date}")
275
+
276
+ pre_data = df_adv[df_adv["case_date"] < adoption_date]
277
+ post_data = df_adv[df_adv["case_date"] >= adoption_date]
278
+
279
+ debug_print(f" {adv}: Pre-data count = {len(pre_data)}, Post-data count = {len(post_data)}")
280
+
281
+ if pre_data.empty:
282
+ debug_print(f"⚠ Skipping {adv}: No pre-adoption cases.")
283
+ continue
284
+ if post_data.empty:
285
+ debug_print(f"⚠ Skipping {adv}: No post-adoption cases.")
286
+ continue
287
+
288
+ slice_info = f"Adoption Date: {adoption_date}"
289
+
290
+ elif method == "Initiative Utilization":
291
+ col_name = initiative + "_utilized"
292
+ df_adv = df_adv.copy()
293
+ df_adv = df_adv.merge(utilization_df[["serial_number", col_name]], on="serial_number", how="left")
294
+ df_adv[col_name] = df_adv[col_name].fillna(0)
295
+ pre_data = df_adv[df_adv[col_name] == 0]
296
+ post_data = df_adv[df_adv[col_name] == 1]
297
+ slice_info = "Initiative Utilization"
298
+ else:
299
+ continue
300
+
301
+ if pre_data.empty or post_data.empty:
302
+ debug_print(f"⚠ Advocate {adv}: Not enough data; skipping.")
303
+ continue
304
+
305
+ if kpi.lower() == "throughput":
306
+ pre_val = calculate_throughput(pre_data, pre_data["case_date"].min(), pre_data["case_date"].max())
307
+ post_val = calculate_throughput(post_data, post_data["case_date"].min(), post_data["case_date"].max())
308
+ else:
309
+ pre_val = np.mean(pre_data[kpi].values)
310
+ post_val = np.mean(post_data[kpi].values)
311
+
312
+ pct_change = ((post_val - pre_val) / pre_val) * 100 if pre_val else np.nan
313
+ results.append({
314
+ "advocate": adv,
315
+ "Pre_Mean": pre_val,
316
+ "Post_Mean": post_val,
317
+ "Percent_Change": pct_change,
318
+ "Slice_Info": slice_info
319
+ })
320
+
321
+ all_pre_vals.extend(pre_data[kpi].values)
322
+ all_post_vals.extend(post_data[kpi].values)
323
+
324
+ debug_print(f"βœ… Processed {adv}: {pct_change:.2f}% change.")
325
+ except Exception as e:
326
+ debug_print(f"❌ Error processing {adv}: {str(e)}")
327
+
328
+ if not results:
329
+ debug_print("⚠ No valid advocates found for analysis.")
330
+ return "No valid advocates found for analysis. Check the case date ranges.", None, None
331
+
332
+ results_df = pd.DataFrame(results).sort_values(by="Percent_Change", ascending=False)
333
+
334
+ # Perform aggregated T-Test
335
+ try:
336
+ if len(all_pre_vals) > 1 and len(all_post_vals) > 1:
337
+ t_stat, p_value = stats.ttest_ind(all_pre_vals, all_post_vals, equal_var=False)
338
+ if one_tailed:
339
+ p_value = p_value / 2
340
+ significance = "Statistically Significant" if p_value < 0.05 and t_stat > 0 else "Not Statistically Significant"
341
+ else:
342
+ significance = "Statistically Significant" if p_value < 0.05 else "Not Statistically Significant"
343
+ else:
344
+ t_stat, p_value = np.nan, np.nan
345
+ significance = "Insufficient Data for Statistical Test"
346
+ except Exception as e:
347
+ debug_print(f"❌ Error performing T-Test: {str(e)}")
348
+ return f"Error performing T-Test: {str(e)}", None, None
349
+
350
+ pre_mean = np.mean(all_pre_vals) if len(all_pre_vals) > 0 else np.nan
351
+ post_mean = np.mean(all_post_vals) if len(all_post_vals) > 0 else np.nan
352
+ overall_pct_change = ((post_mean - pre_mean) / pre_mean) * 100 if pre_mean else np.nan
353
+
354
+ overall_summary = f"""πŸ“Š Aggregated Advocate Impact Analysis using method '{method}' for initiative '{initiative}' on KPI '{kpi}'.
355
+ Number of advocates analyzed: {len(results_df)}
356
+
357
+ Aggregated Pre vs Post Analysis:
358
+ - Pre-Adoption Mean: {pre_mean:.2f}
359
+ - Post-Adoption Mean: {post_mean:.2f}
360
+ - Percent Change: {overall_pct_change:.2f}%
361
+
362
+ T-Test Results:
363
+ - T-Statistic: {t_stat:.3f}
364
+ - P-Value: {p_value:.3f}
365
+ - Result: {significance}
366
+ """
367
+ fig, ax = plt.subplots(figsize=(6, 4))
368
+ ax.bar(["Pre-Adoption", "Post-Adoption"], [pre_mean, post_mean], color=["blue", "green"])
369
+ ax.set_title(f"Aggregated Impact of {initiative} on {kpi}")
370
+ ax.set_ylabel(kpi)
371
+ plt.tight_layout()
372
+ plt.close(fig)
373
+
374
+ debug_print("🎯 Advocate Impact Analysis completed.")
375
+ return overall_summary, fig, results_df
376
+
377
+ except Exception as e:
378
+ debug_print(f"❌ Fatal Error in Function: {str(e)}")
379
+ return f"Fatal Error: {str(e)}", None, None
380
+
381
+
382
+ with gr.Blocks() as demo:
383
+ gr.Markdown("# Impact Analysis Dashboard")
384
+
385
+ with gr.Tabs():
386
+ # Tab 1: Overall Impact Analysis
387
+ with gr.TabItem("Overall Impact Analysis"):
388
+ gr.Markdown("### Overall Impact Analysis (Global Release Date)")
389
+ overall_release_date = gr.Textbox(label="Global Release Date (YYYY-MM-DD)", placeholder="e.g., 2022-01-15")
390
+ overall_lob = gr.Dropdown(choices=["All", "Modern Life", "Xbox", "CAPS", "Devices", "Modern Work"],
391
+ label="Filter by LOB", value="All")
392
+ overall_issue = gr.Dropdown(choices=["All", "Billing", "Technical", "Hacking", "Service", "Access"],
393
+ label="Filter by Issue Type", value="All")
394
+ overall_kpi = gr.Dropdown(choices=["CSAT", "days_to_close", "first_contact_resolution", "CPI", "throughput"],
395
+ label="Select KPI", value="CSAT")
396
+ one_tailed_overall = gr.Checkbox(label="Use One-Tailed T-Test")
397
+ overall_btn = gr.Button("Analyze Overall Impact")
398
+ overall_output = gr.Textbox(label="Overall Impact Analysis Results")
399
+ overall_plot = gr.Plot(label="Overall Impact Graph")
400
+
401
+ overall_btn.click(analyze_overall_impact,
402
+ inputs=[overall_release_date, overall_lob, overall_issue, overall_kpi, one_tailed_overall],
403
+ outputs=[overall_output, overall_plot])
404
+
405
+ # Tab 2: Advocate Impact Analysis
406
+ with gr.TabItem("Advocate Impact Analysis"):
407
+ gr.Markdown("### Advocate Impact Analysis (Aggregated Pre vs Post)")
408
+ adoption_method = gr.Radio(choices=["Adoption Date", "Initiative Utilization"],
409
+ label="Method", value="Adoption Date")
410
+ initiative_select = gr.Dropdown(choices=["Symbiosis", "Voice Translation", "NoteHero"],
411
+ label="Select Initiative", value="Symbiosis")
412
+ adv_lob = gr.Dropdown(choices=["All", "Modern Life", "Xbox", "CAPS", "Devices", "Modern Work"],
413
+ label="Filter by LOB", value="All")
414
+ adv_issue = gr.Dropdown(choices=["All", "Billing", "Technical", "Hacking", "Service", "Access"],
415
+ label="Filter by Issue Type", value="All")
416
+ adv_kpi = gr.Dropdown(choices=["CSAT", "days_to_close", "first_contact_resolution", "CPI", "throughput"],
417
+ label="Select KPI", value="CSAT")
418
+ one_tailed_adv = gr.Checkbox(label="Use One-Tailed T-Test")
419
+ with gr.Accordion("Optional File Uploads (Click to expand)", open=False):
420
+ gr.Markdown("Upload is optional. For Adoption Date method, upload a CSV/Excel with two columns (Advocate Name and Adoption Date). For Initiative Utilization, upload a CSV/Excel with a 'serial_number' column.")
421
+ adoption_file = gr.File(label="Upload Adoption Date File (optional)")
422
+ adoption_name_col = gr.Textbox(label="Adoption File: Advocate Name Column", placeholder="e.g., Name")
423
+ adoption_date_col = gr.Textbox(label="Adoption File: Adoption Date Column", placeholder="e.g., AdoptionDate")
424
+ utilization_file = gr.File(label="Upload Initiative Utilization File (optional)")
425
+ adv_btn = gr.Button("Analyze Advocate Impact")
426
+ adv_overall_output = gr.Textbox(label="Aggregated Advocate Impact Summary")
427
+ adv_plot = gr.Plot(label="Aggregated Advocate Impact Graph")
428
+ adv_table = gr.Dataframe(label="Advocate Impact Details")
429
+
430
+ adv_btn.click(analyze_all_advocates_impact,
431
+ inputs=[adoption_method, initiative_select, adv_lob, adv_issue, adv_kpi, one_tailed_adv,
432
+ adoption_file, adoption_name_col, adoption_date_col, utilization_file],
433
+ outputs=[adv_overall_output, adv_plot, adv_table])
434
+
435
+ # Optional Debug Logs Tab
436
+ with gr.TabItem("Debug Logs"):
437
+ gr.Markdown("### Debug Logs")
438
+ debug_btn = gr.Button("Refresh Debug Logs")
439
+ debug_output = gr.Textbox(label="Debug Logs", lines=15)
440
+ debug_btn.click(lambda: "Check console output for debug logs.", inputs=[], outputs=[debug_output])
441
+
442
+ demo.launch()