import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import gradio as gr import io import base64 import tempfile import os from datetime import datetime # --- Matplotlib Plot to Base64 --- def fig_to_base64(fig): """Converts a Matplotlib figure to a base64 encoded PNG string.""" buf = io.BytesIO() fig.savefig(buf, format='png', bbox_inches='tight') plt.close(fig) # Close the figure to free memory buf.seek(0) img_str = base64.b64encode(buf.read()).decode('utf-8') return f"data:image/png;base64,{img_str}" # --- EDA Helper Functions (Adapted from Colab) --- def get_initial_inspection_html(df): """Generates HTML for initial data inspection.""" html = "

1. Initial Data Inspection

" # Head html += "

(a) First 5 Rows (Head):

" html += df.head().to_html(classes='table table-striped', border=1) # Tail html += "

(b) Last 5 Rows (Tail):

" html += df.tail().to_html(classes='table table-striped', border=1) # Shape html += "

(c) Dataset Shape:

" html += f"

Number of Rows: {df.shape[0]}

" html += f"

Number of Columns: {df.shape[1]}

" # Info html += "

(d) Data Types and Non-Null Counts (Info):

" buffer = io.StringIO() df.info(buf=buffer) info_str = buffer.getvalue() html += f"
{info_str}
" # Column Names html += "

(e) Column Names:

" html += f"

{list(df.columns)}

" return html def get_descriptive_stats_html(df): """Generates HTML for descriptive statistics.""" html = "

2. Descriptive Statistics

" # Numerical html += "

(a) Numerical Columns Statistics:

" try: num_stats = df.describe(include=np.number) if not num_stats.empty: html += num_stats.to_html(classes='table table-striped', border=1, float_format='%.2f') else: html += "

No numerical columns found.

" except Exception as e: html += f"

Error generating numerical stats: {e}

" # Categorical html += "

(b) Categorical/Object Columns Statistics:

" try: cat_stats = df.describe(include=['object', 'category']) if not cat_stats.empty: html += cat_stats.to_html(classes='table table-striped', border=1) else: html += "

No categorical/object columns found.

" except Exception as e: html += f"

Error generating categorical stats: {e}

" return html def identify_column_types_html(df): """Generates HTML listing identified column types.""" html = "

3. Identifying Column Types

" numerical_cols = df.select_dtypes(include=np.number).columns.tolist() categorical_cols = df.select_dtypes(include=['object', 'category']).columns.tolist() datetime_cols = df.select_dtypes(include=['datetime', 'datetime64']).columns.tolist() boolean_cols = df.select_dtypes(include=['bool']).columns.tolist() other_cols = df.columns.difference(numerical_cols + categorical_cols + datetime_cols + boolean_cols).tolist() html += f"

Numerical Columns ({len(numerical_cols)}): {numerical_cols}

" html += f"

Categorical Columns ({len(categorical_cols)}): {categorical_cols}

" html += f"

DateTime Columns ({len(datetime_cols)}): {datetime_cols}

" html += f"

Boolean Columns ({len(boolean_cols)}): {boolean_cols}

" if other_cols: html += f"

Other/Unclassified Columns ({len(other_cols)}): {other_cols}

" # Store for later use (return them) return html, numerical_cols, categorical_cols # Return lists as well def analyze_missing_values_html(df): """Generates HTML for missing value analysis.""" html = "

4. Missing Value Analysis

" missing_values = df.isnull().sum() missing_percent = (missing_values / len(df)) * 100 missing_table = pd.concat([missing_values, missing_percent], axis=1, keys=['Missing Count', 'Missing (%)']) missing_table = missing_table[missing_table['Missing Count'] > 0].sort_values('Missing (%)', ascending=False) if not missing_table.empty: html += "

(a) Columns with Missing Values:

" html += missing_table.to_html(classes='table table-striped', border=1, float_format='%.2f') # Heatmap html += "

(b) Missing Values Heatmap:

" try: fig, ax = plt.subplots(figsize=(15, 7)) sns.heatmap(df.isnull(), cbar=False, cmap='viridis', ax=ax) ax.set_title('Heatmap of Missing Values per Column') img_str = fig_to_base64(fig) html += f'Missing Values Heatmap
' html += "

Consider strategies like imputation or deletion based on the results.

" except Exception as e: html += f"

Could not generate missing value heatmap. Error: {e}

" else: html += "

No missing values found in the dataset. Great!

" return html def analyze_univariate_numerical_html(df, numerical_cols): """Generates HTML for univariate analysis of numerical columns.""" html = "

5. Univariate Analysis (Numerical Columns)

" html += "

Analyzing distributions of individual numerical features using Histograms and Box Plots.

" if not numerical_cols: html += "

No numerical columns found to analyze.

" return html for col in numerical_cols: html += f"

Analyzing: '{col}'

" try: # Create subplots fig, axes = plt.subplots(1, 2, figsize=(16, 5)) # 1 row, 2 columns # Plot Histogram sns.histplot(df[col], kde=True, bins=30, ax=axes[0]) axes[0].set_title(f'Histogram of {col}') axes[0].set_xlabel(col) axes[0].set_ylabel('Frequency') # Plot Box Plot sns.boxplot(y=df[col], ax=axes[1]) axes[1].set_title(f'Box Plot of {col}') axes[1].set_ylabel(col) plt.tight_layout() img_str = fig_to_base64(fig) html += f'Plots for {col}
' # Skewness skewness = df[col].skew() html += f"

Skewness: {skewness:.2f} " if skewness > 0.5: html += "(Moderately Right-Skewed)" elif skewness < -0.5: html += "(Moderately Left-Skewed)" else: html += "(Approximately Symmetric)" html += "


" except Exception as e: html += f"

Could not generate plots for {col}. Error: {e}


" return html def analyze_univariate_categorical_html(df, categorical_cols): """Generates HTML for univariate analysis of categorical columns.""" html = "

6. Univariate Analysis (Categorical Columns)

" html += "

Analyzing frequency distributions of individual categorical features using Count Plots.

" if not categorical_cols: html += "

No categorical/object columns found to analyze.

" return html plot_threshold = 50 # Max unique values for plotting for col in categorical_cols: html += f"

Analyzing: '{col}'

" try: unique_count = df[col].nunique() html += f"

Number of Unique Values: {unique_count}

" if unique_count == 0: html += "

Column has no values.


" continue elif unique_count > plot_threshold: html += f"

Skipping plot as unique value count ({unique_count}) exceeds threshold ({plot_threshold}). Showing Top 15 value counts instead.

" top_15_counts = df[col].value_counts().head(15) html += "
" + top_15_counts.to_string() + "

" else: # Plot Count Plot fig, ax = plt.subplots(figsize=(10, max(5, unique_count * 0.3))) # Adjust height plot_order = df[col].value_counts().index sns.countplot(y=df[col], order=plot_order, palette='viridis', ax=ax) ax.set_title(f'Frequency Count of {col}') ax.set_xlabel('Count') ax.set_ylabel(col) plt.tight_layout() img_str = fig_to_base64(fig) html += f'Count Plot for {col}
' except Exception as e: html += f"

Could not generate plot/counts for {col}. Error: {e}


" return html def analyze_bivariate_numerical_html(df, numerical_cols): """Generates HTML for bivariate analysis of numerical columns.""" html = "

7. Bivariate Analysis (Numerical vs. Numerical)

" html += "

Analyzing relationships between pairs of numerical features using Correlation Matrix and Pair Plots.

" if len(numerical_cols) < 2: html += "

Need at least two numerical columns for this analysis.

" return html # Correlation Heatmap html += "

(a) Correlation Matrix Heatmap:

" try: correlation_matrix = df[numerical_cols].corr() fig, ax = plt.subplots(figsize=(12, 10)) sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', fmt=".2f", linewidths=.5, ax=ax) ax.set_title('Correlation Matrix of Numerical Features') img_str = fig_to_base64(fig) html += f'Correlation Matrix
' html += "

Interpretation: Values close to +1 indicate strong positive linear correlation, close to -1 indicate strong negative linear correlation, close to 0 indicate weak or no linear correlation.

" except Exception as e: html += f"

Could not generate correlation heatmap. Error: {e}

" # Pair Plot pairplot_threshold = 7 # Limit features for pairplot html += f"

(b) Pair Plot (Threshold: {pairplot_threshold} features):

" if len(numerical_cols) <= pairplot_threshold: html += f"

Generating Pair Plot for {len(numerical_cols)} numerical features... (May take a moment)

" try: pair_plot_fig = sns.pairplot(df[numerical_cols], diag_kind='kde') pair_plot_fig.fig.suptitle('Pair Plot of Numerical Features', y=1.02) # Adjust title position # Convert the PairGrid object's figure to base64 img_str = fig_to_base64(pair_plot_fig.fig) html += f'Pair Plot
' except Exception as e: html += f"

Could not generate pair plot. Error: {e}

" html += "

Pairplots can sometimes fail with certain data types or distributions, or if memory is limited.

" else: html += f"

Skipping Pair Plot because the number of numerical features ({len(numerical_cols)}) exceeds the threshold ({pairplot_threshold}).

" return html def analyze_bivariate_num_cat_html(df, numerical_cols, categorical_cols): """Generates HTML for bivariate analysis of numerical vs. categorical columns.""" html = "

8. Bivariate Analysis (Numerical vs. Categorical)

" html += "

Analyzing distributions of numerical features across different categories using Box Plots.

" if not numerical_cols or not categorical_cols: html += "

Need both numerical and categorical columns for this analysis.

" return html cat_nunique_threshold = 20 cats_to_analyze = [col for col in categorical_cols if df[col].nunique() <= cat_nunique_threshold] if not cats_to_analyze: html += f"

No categorical columns with a reasonable number of unique values (<= {cat_nunique_threshold}) found for plotting against numerical features.

" return html html += f"

Analyzing numerical columns against these categorical columns (max {cat_nunique_threshold} unique values): {cats_to_analyze}

" for num_col in numerical_cols: for cat_col in cats_to_analyze: html += f"

Analyzing: '{num_col}' vs '{cat_col}'

" try: # Check if category column has data if df[cat_col].isnull().all() or df[cat_col].nunique() == 0: html += f"

Skipping plot: Categorical column '{cat_col}' has no valid data or only one unique value after dropping NaNs.


" continue fig, ax = plt.subplots(figsize=(12, 6)) sns.boxplot(x=df[cat_col], y=df[num_col], palette='viridis', ax=ax, order=sorted(df[cat_col].dropna().unique())) # Added order and dropna ax.set_title(f'Box Plot of {num_col} by {cat_col}') ax.set_xlabel(cat_col) ax.set_ylabel(num_col) # Rotate x-axis labels if they are long or numerous if df[cat_col].nunique() > 5: plt.xticks(rotation=45, ha='right') plt.tight_layout() img_str = fig_to_base64(fig) html += f'Box plot of {num_col} by {cat_col}
' except Exception as e: html += f"

Could not generate box plot for '{num_col}' vs '{cat_col}'. Error: {e}


" return html def get_analysis_summary_html(df, missing_table_html): """Generates HTML for the summary section.""" html = "

9. Analysis Summary & Next Steps

" html += "

This automated analysis provided a first look at the dataset's structure, content, distributions, and basic relationships.

" html += "

Key Observations (Auto-Generated Summary):

" html += f"" html += "

Potential Next Steps:

" html += "
    " html += "
  1. Data Cleaning: Address missing values (imputation/deletion), correct data types if needed, handle outliers (if appropriate).
  2. " html += "
  3. Feature Engineering: Create new features from existing ones (e.g., extracting date parts, combining categories).
  4. " html += "
  5. Deeper Analysis: Explore relationships further (statistical tests, different plots, multivariate analysis).
  6. " html += "
  7. Domain-Specific Analysis: Apply subject matter expertise for targeted questions.
  8. " html += "
  9. Modeling: Prepare data and build machine learning models if applicable.
  10. " html += "
" return html def get_bonus_guide_html(): """Generates HTML for the bonus guide.""" html = """

Bonus: How to Understand & Read Any Dataset

Approaching a new dataset systematically:

  1. Understand the Context: Source, purpose, data dictionary, timeframe.
  2. Load and Get a First Look: Use tools like pandas, check dimensions (`.shape`), peek at data (`.head()`, `.tail()`).
  3. Examine Metadata and Structure: Check column names (`.columns`), data types (`.info()`), memory usage. Correct types if necessary.
  4. Summarize the Data: Use `.describe()` for numerical (mean, median, std, min/max, quartiles) and categorical (unique count, top value, frequency) summaries. Check `.value_counts()` for specific categories.
  5. Handle Missing Data: Identify (`.isnull().sum()`) and quantify missing values. Decide on a strategy (deletion, imputation).
  6. Visualize (EDA):
  7. Ask Questions: Formulate specific questions based on context and initial findings.
  8. Iterate and Document: Data understanding is iterative. Document findings and decisions.
""" return html # --- Main Gradio Function --- def generate_eda_report(uploaded_file): """ Main function called by Gradio. Takes an uploaded file, performs EDA, and returns the path to a generated HTML report file. """ start_time = datetime.now() if uploaded_file is None: raise gr.Error("No file uploaded! Please upload a CSV file.") try: # Set visualization styles globally for the run sns.set(style="whitegrid") plt.rcParams['figure.figsize'] = (12, 6) pd.set_option('display.max_columns', 50) pd.set_option('display.float_format', lambda x: '%.2f' % x) # Check file size (example: 100MB limit) file_size_mb = os.path.getsize(uploaded_file.name) / (1024 * 1024) if file_size_mb > 100: raise gr.Error(f"File size ({file_size_mb:.2f} MB) exceeds the 100 MB limit.") # Read the CSV file # Use the temporary path provided by Gradio's File component df = pd.read_csv(uploaded_file.name) # Start building the HTML report html_content = """ Automated EDA Report

📊 Automated Data Explorer & Visualizer Report 📊

""" report_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") html_content += f"

Report generated on: {report_time}

" html_content += f"

Input file: {os.path.basename(uploaded_file.name)}

" # --- Run EDA Steps --- # 1. Initial Inspection html_content += get_initial_inspection_html(df) html_content += "
" # 2. Descriptive Statistics html_content += get_descriptive_stats_html(df) html_content += "
" # 3. Identify Column Types col_types_html, num_cols, cat_cols = identify_column_types_html(df) html_content += col_types_html html_content += "
" # 4. Missing Values missing_html = analyze_missing_values_html(df) html_content += missing_html html_content += "
" # 5. Univariate Numerical html_content += analyze_univariate_numerical_html(df, num_cols) html_content += "
" # 6. Univariate Categorical html_content += analyze_univariate_categorical_html(df, cat_cols) html_content += "
" # 7. Bivariate Numerical vs Numerical html_content += analyze_bivariate_numerical_html(df, num_cols) html_content += "
" # 8. Bivariate Numerical vs Categorical html_content += analyze_bivariate_num_cat_html(df, num_cols, cat_cols) html_content += "
" # 9. Summary html_content += get_analysis_summary_html(df, missing_html) # Pass missing_html to check if missing values were found html_content += "
" # 10. Bonus Guide html_content += get_bonus_guide_html() # --- Finalize HTML --- html_content += f"

--- End of Report ---

" end_time = datetime.now() duration = end_time - start_time html_content += f"

Analysis completed in {duration.total_seconds():.2f} seconds.

" html_content += """ """ # Save HTML content to a temporary file # Use tempfile for better cross-platform compatibility and automatic cleanup with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix=".html", encoding='utf-8') as temp_file: temp_file.write(html_content) report_path = temp_file.name # Get the path of the temp file # Return the path to the generated HTML file for Gradio output return report_path except pd.errors.ParserError: raise gr.Error("Error parsing CSV file. Please ensure it is a valid CSV format and delimiter is correctly inferred (usually comma).") except FileNotFoundError: raise gr.Error("Uploaded file not found. Please try uploading again.") except ValueError as ve: # Catch specific value errors like Colab's upload error raise gr.Error(f"Value Error: {ve}") except Exception as e: # Generic error catch - useful for debugging import traceback tb_str = traceback.format_exc() print(f"An unexpected error occurred: {e}\n{tb_str}") # Log to console raise gr.Error(f"An unexpected error occurred during analysis: {e}. Check console logs if running locally.") # --- Gradio Interface Setup --- description = """ **Effortless Dataset Insights 📊** Upload your CSV dataset (max 100MB) and get an automated Exploratory Data Analysis (EDA) report. The report includes: 1. Basic Info (Shape, Data Types, Head/Tail) 2. Descriptive Statistics 3. Missing Value Analysis & Heatmap 4. Univariate Analysis (Histograms, Box Plots, Count Plots) 5. Bivariate Analysis (Correlation Heatmap, Pair Plot [small datasets], Box Plots by Category) 6. Summary & Next Steps Guide The output will be an HTML file that you can download and view in your browser. """ iface = gr.Interface( fn=generate_eda_report, inputs=gr.File(label="Upload CSV Dataset", file_types=[".csv"]), outputs=gr.File(label="Download EDA Report (.html)"), title="Effortless Dataset Insights", description=description, allow_flagging="never", examples=[ # You can add paths to example CSV files here if you host them somewhere # e.g., ["./examples/sample_data.csv"] # Ensure these files exist if you uncomment this ], theme=gr.themes.Soft() # Optional: Apply a theme ) # --- Launch the App --- if __name__ == "__main__": iface.launch()