{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Which Economic Tasks are Performed with AI? Evidence from Millions of Claude Conversations\n", "\n", "_Handa et al., 2025_" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import matplotlib.pyplot as plt\n", "import seaborn as sns\n", "from textwrap import wrap\n", "import numpy as np\n", "\n", "palette = sns.color_palette(\"colorblind\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Create O*NET / SOC Merged Dataframe" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "def merge_onet_soc_data() -> pd.DataFrame:\n", " \"\"\"\n", " Merges O*NET task statements with SOC (Standard Occupational Classification) data\n", " based on major group codes.\n", " \n", " Args:\n", " onet_path (str): Path to the O*NET task statements CSV file\n", " soc_path (str): Path to the SOC structure CSV file\n", " \n", " Returns:\n", " pd.DataFrame: Merged DataFrame containing O*NET data with SOC major group titles\n", " \"\"\"\n", "\n", " # Read and process O*NET data\n", " onet_df = pd.read_csv(\"onet_task_statements.csv\")\n", " onet_df[\"soc_major_group\"] = onet_df[\"O*NET-SOC Code\"].str[:2]\n", " \n", " # Read and process SOC data\n", " soc_df = pd.read_csv(\"SOC_Structure.csv\")\n", " soc_df = soc_df.dropna(subset=['Major Group'])\n", " soc_df[\"soc_major_group\"] = soc_df[\"Major Group\"].str[:2]\n", " \n", " # Merge datasets\n", " merged_df = onet_df.merge(\n", " soc_df[['soc_major_group', 'SOC or O*NET-SOC 2019 Title']],\n", " on='soc_major_group',\n", " how='left'\n", " )\n", "\n", " return merged_df" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "task_occupations_df = merge_onet_soc_data()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "task_occupations_df[\"Title\"].nunique()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Update cluster mappings to include data from the merged_df\n", "task_occupations_df[\"task_normalized\"] = task_occupations_df[\"Task\"].str.lower().str.strip()\n", "# Some tasks are included multiple times, so we need to count the number of occurrences per task\n", "task_occupations_df[\"n_occurrences\"] = task_occupations_df.groupby(\"task_normalized\")[\"Title\"].transform(\"nunique\")\n", "\n", "task_occupations_df" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Load Task Mappings and Join" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "task_mappings_df = pd.read_csv(\"onet_task_mappings.csv\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "grouped_with_occupations = task_mappings_df.merge(\n", " task_occupations_df,\n", " left_on=\"task_name\",\n", " right_on=\"task_normalized\",\n", " how=\"left\"\n", ")\n", "\n", "grouped_with_occupations[\"pct_occ_scaled\"] = 100 * (grouped_with_occupations[\"pct\"] / grouped_with_occupations[\"n_occurrences\"]) / (grouped_with_occupations[\"pct\"] / grouped_with_occupations[\"n_occurrences\"]).sum()\n", "grouped_with_occupations[\"pct_occ_scaled\"].sum()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "grouped_with_occupations" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## EXPERIMENTS" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### TASKS" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "# Set style and increase font sizes\n", "plt.rcParams['font.size'] = 12 # Base font size\n", "plt.rcParams['axes.titlesize'] = 14 # Title font size\n", "plt.rcParams['axes.labelsize'] = 12 # Axis labels size\n", "plt.rcParams['xtick.labelsize'] = 11 # X-axis tick labels size\n", "plt.rcParams['ytick.labelsize'] = 11 # Y-axis tick labels size\n", "plt.rcParams['legend.fontsize'] = 11 # Legend font size\n", "plt.rcParams['figure.titlesize'] = 16 # Figure title size\n", "\n", "# If you're using seaborn, you can also set its context\n", "sns.set_context(\"notebook\", font_scale=1.2)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Get top 10 tasks overall to ensure consistent comparison\n", "total_tasks = (grouped_with_occupations.groupby(\"Task\")[\"pct_occ_scaled\"]\n", " .sum()\n", " .sort_values(ascending=False))\n", "top_10_tasks = total_tasks.head(10).index\n", "\n", "# Create plot dataframe with all groups\n", "plot_df = (grouped_with_occupations[grouped_with_occupations[\"Task\"].isin(top_10_tasks)]\n", " .groupby([\"Task\"])[\"pct_occ_scaled\"]\n", " .sum()\n", " .reset_index())\n", "\n", "# Sort tasks by overall frequency\n", "task_order = (plot_df.groupby(\"Task\")[\"pct_occ_scaled\"]\n", " .sum()\n", " .sort_values(ascending=False)\n", " .index)\n", "plot_df[\"Task\"] = pd.Categorical(plot_df[\"Task\"], categories=task_order, ordered=True)\n", "\n", "# Create the plot\n", "plt.figure(figsize=(16, 12))\n", "sns.barplot(\n", " data=plot_df,\n", " x=\"pct_occ_scaled\",\n", " y=\"Task\",\n", " color=palette[0],\n", ")\n", "\n", "# Wrap task titles\n", "ax = plt.gca()\n", "ax.set_yticklabels(['\\n'.join(wrap(label.get_text(), width=40)) \n", " for label in ax.get_yticklabels()])\n", "\n", "# Modify legend\n", "handles, labels = ax.get_legend_handles_labels()\n", "\n", "# Wrap task labels\n", "ax = plt.gca()\n", "ax.set_yticklabels(['\\n'.join(wrap(label.get_text(), width=40)) \n", " for label in ax.get_yticklabels()])\n", "\n", "# Format x-axis as percentages\n", "ax.xaxis.set_major_formatter(plt.FuncFormatter(lambda x, _: f'{x:.1f}%'))\n", "\n", "# Customize the plot\n", "plt.title('Top Tasks by % of Conversations')\n", "plt.xlabel('Percentage of Records')\n", "plt.ylabel('O*NET Task')\n", "\n", "# Adjust layout to prevent label cutoff\n", "plt.tight_layout()\n", "\n", "plt.show()\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### OCCUPATIONS" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "grouped_with_occupations.groupby(\"Title\")[\"pct_occ_scaled\"].sum()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Calculate percentages per group and occupation\n", "plot_df = (grouped_with_occupations.groupby(\"Title\")[\"pct_occ_scaled\"]\n", " .sum()\n", " .reset_index())\n", "\n", "# Get top occupations overall\n", "total_occs = (plot_df.groupby(\"Title\")[\"pct_occ_scaled\"]\n", " .sum()\n", " .sort_values(ascending=False))\n", "top_occs = total_occs.head(15).index\n", "\n", "# Filter for top occupations\n", "plot_df = plot_df[plot_df[\"Title\"].isin(top_occs)]\n", "\n", "# Sort occupations by overall frequency\n", "occ_order = (plot_df.groupby(\"Title\")[\"pct_occ_scaled\"]\n", " .sum()\n", " .sort_values(ascending=False)\n", " .index)\n", "plot_df[\"Title\"] = pd.Categorical(plot_df[\"Title\"], categories=occ_order, ordered=True)\n", "\n", "# Create the plot\n", "plt.figure(figsize=(18, 16))\n", "sns.barplot(\n", " data=plot_df,\n", " x=\"pct_occ_scaled\",\n", " y=\"Title\",\n", " color=palette[0],\n", ")\n", "\n", "# Wrap occupation titles\n", "ax = plt.gca()\n", "ax.set_yticklabels(['\\n'.join(wrap(label.get_text(), width=40)) \n", " for label in ax.get_yticklabels()])\n", "\n", "# Format x-axis as percentages\n", "ax.xaxis.set_major_formatter(plt.FuncFormatter(lambda x, _: f'{x:.1f}%'))\n", "\n", "# Customize the plot\n", "plt.title('Top Occupations by % of Conversations')\n", "plt.xlabel('Percentage of Conversations')\n", "plt.ylabel('Occupation')\n", "\n", "# Adjust layout to prevent label cutoff\n", "plt.tight_layout()\n", "\n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### OCCUPATIONAL CATEGORIES" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Calculate percentages per group and occupational category\n", "plot_df = (grouped_with_occupations.groupby(\"SOC or O*NET-SOC 2019 Title\")[\"pct_occ_scaled\"]\n", " .sum()\n", " .reset_index())\n", "\n", "# Sort categories by group-1 frequency\n", "cat_order = plot_df.sort_values(\"pct_occ_scaled\", ascending=False)[\"SOC or O*NET-SOC 2019 Title\"]\n", "plot_df[\"SOC or O*NET-SOC 2019 Title\"] = pd.Categorical(\n", " plot_df[\"SOC or O*NET-SOC 2019 Title\"], \n", " categories=cat_order, \n", " ordered=True\n", ")\n", "\n", "# Create the plot\n", "plt.figure(figsize=(18, 16))\n", "sns.barplot(\n", " data=plot_df,\n", " x=\"pct_occ_scaled\",\n", " y=\"SOC or O*NET-SOC 2019 Title\",\n", " color=palette[0],\n", ")\n", "\n", "# Wrap category labels and remove \" Occupations\" string\n", "ax = plt.gca()\n", "ax.set_yticklabels(['\\n'.join(wrap(label.get_text().replace(\" Occupations\", \"\"), width=60)) \n", " for label in ax.get_yticklabels()])\n", "\n", "# Format x-axis as percentages\n", "ax.xaxis.set_major_formatter(plt.FuncFormatter(lambda x, _: f'{x:.1f}%'))\n", "\n", "# Customize the plot\n", "plt.title('Occupational Categories by % of Conversations')\n", "plt.xlabel('Percentage of Conversations')\n", "plt.ylabel('Occupational Category')\n", "\n", "# Adjust layout to prevent label cutoff\n", "plt.tight_layout()\n", "\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "grouped_with_occupations" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [], "source": [ "# Load employment data\n", "bls_employment_df = pd.read_csv(\"bls_employment_may_2023.csv\")\n", "\n", "claude_employment_df = grouped_with_occupations.groupby(\"SOC or O*NET-SOC 2019 Title\")[\"pct_occ_scaled\"].sum().reset_index(name='claude_distribution')\n", "\n", "employment_df = claude_employment_df.merge(bls_employment_df, \n", " on='SOC or O*NET-SOC 2019 Title',\n", " how='left')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Calculate percentages and setup data\n", "plot_df = employment_df.copy()\n", "\n", "def get_distribution(df, value_column):\n", " total = df[value_column].sum()\n", " return (df[value_column] / total * 100).round(1)\n", "\n", "plot_df['bls_pct'] = get_distribution(plot_df, 'bls_distribution')\n", "plot_df['claude_pct'] = get_distribution(plot_df, 'claude_distribution')\n", "plot_df['clean_label'] = plot_df['SOC or O*NET-SOC 2019 Title'].str.replace(' Occupations', '')\n", "plot_df['pct_difference'] = plot_df['claude_pct'] - plot_df['bls_pct']\n", "plot_df = plot_df.sort_values('bls_pct', ascending=True)\n", "\n", "# Create the plot\n", "fig, ax = plt.subplots(figsize=(20, 12))\n", "\n", "# Set colors\n", "claude_color = palette[1] \n", "bls_color = palette[0] \n", "\n", "# Create lines and circles\n", "y_positions = range(len(plot_df))\n", "for i, row in enumerate(plot_df.itertuples()):\n", " # Determine color based on which value is larger\n", " line_color = claude_color if row.claude_pct > row.bls_pct else bls_color\n", " \n", " # Draw the line between bls and claude percentages\n", " ax.plot([row.bls_pct, row.claude_pct], [i, i], \n", " color=line_color, \n", " linestyle='-', \n", " linewidth=2.5,\n", " zorder=1)\n", " \n", " # Determine label positioning\n", " if row.claude_pct > row.bls_pct:\n", " bls_ha = 'right'\n", " claude_ha = 'left'\n", " bls_offset = -0.4\n", " claude_offset = 0.4\n", " else:\n", " bls_ha = 'left'\n", " claude_ha = 'right'\n", " bls_offset = 0.4\n", " claude_offset = -0.4\n", "\n", " # Plot BLS percentage\n", " ax.scatter([row.bls_pct], [i], \n", " color=bls_color,\n", " s=200,\n", " zorder=2,\n", " label='% of U.S. workers (BLS)' if i == 0 else \"\")\n", " ax.text(row.bls_pct + bls_offset,\n", " i,\n", " f'{row.bls_pct:.1f}%',\n", " ha=bls_ha,\n", " va='center',\n", " color=bls_color)\n", " \n", " # Plot Claude's percentage\n", " ax.scatter([row.claude_pct], [i], \n", " color=claude_color,\n", " s=200,\n", " zorder=2,\n", " label='% of Claude conversations' if i == 0 else \"\")\n", " ax.text(row.claude_pct + claude_offset,\n", " i,\n", " f'{row.claude_pct:.1f}%',\n", " ha=claude_ha,\n", " va='center',\n", " color=claude_color)\n", "\n", "# Customize the plot\n", "ax.set_xlabel('Percentage')\n", "ax.set_ylabel('Occupational Category')\n", "\n", "# Add percentage formatter to x-axis\n", "ax.xaxis.set_major_formatter(plt.FuncFormatter(lambda x, _: f'{x:.1f}%'))\n", "\n", "# Set y-axis labels\n", "ax.set_yticks(y_positions)\n", "ax.set_yticklabels(plot_df['clean_label'])\n", "\n", "# Add legend\n", "handles, labels = ax.get_legend_handles_labels()\n", "handles = handles[::-1]\n", "labels = labels[::-1]\n", "ax.legend(handles, labels, loc='lower right', bbox_to_anchor=(1.0, 0.0))\n", "\n", "# Adjust grid and layout\n", "ax.grid(axis='x', linestyle='--', alpha=0.3)\n", "ax.set_axisbelow(True)\n", "\n", "# Set axis limits with padding\n", "max_val = max(plot_df['bls_pct'].max(), plot_df['claude_pct'].max())\n", "min_val = min(plot_df['bls_pct'].min(), plot_df['claude_pct'].min())\n", "padding = (max_val - min_val) * 0.15\n", "ax.set_xlim(min_val - padding, max_val + padding)\n", "ax.set_ylim(-1, len(plot_df))\n", "\n", "# Adjust layout\n", "plt.tight_layout()\n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### USAGE BY WAGE" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [], "source": [ "# Read and process wage data\n", "wage_df = pd.read_csv(\"wage_data.csv\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "wage_df" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Join wage and occupation data\n", "grouped_with_occupations_and_wage = grouped_with_occupations.merge(wage_df, left_on=\"O*NET-SOC Code\", right_on=\"SOCcode\", how=\"left\")\n", "grouped_with_occupations_and_wage" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def create_wage_distribution_plot(plot_df):\n", " # Create figure\n", " plt.figure(figsize=(24, 12))\n", " \n", " # Create scatter plot\n", " sns.scatterplot(data=plot_df,\n", " x='MedianSalary',\n", " y='pct_occ_scaled',\n", " alpha=0.5,\n", " size='pct_occ_scaled',\n", " sizes=(60, 400),\n", " color=palette[0],\n", " legend=False)\n", " \n", " # Style the plot\n", " plt.xlabel('Median Wage ($)')\n", " plt.ylabel('Percent of Conversations')\n", " plt.gca().yaxis.set_major_formatter(plt.FuncFormatter(lambda y, _: '{:.1f}%'.format(y)))\n", " \n", " # Add title\n", " plt.title('Wage Distribution by % of Conversations'), \n", " \n", " # Annotate points\n", " # Top points by percentage\n", " top_n = 7\n", " for _, row in plot_df.nlargest(top_n, 'pct_occ_scaled').iterrows():\n", " plt.annotate('\\n'.join(wrap(row['Title'], width=20)), \n", " (row['MedianSalary'], row['pct_occ_scaled']),\n", " xytext=(5, 5), \n", " textcoords='offset points')\n", " \n", " # Extreme salary points\n", " n_extremes = 2\n", " # Annotate lowest and highest salaries\n", " for df_subset in [plot_df.nsmallest(n_extremes, 'MedianSalary'),\n", " plot_df.nlargest(n_extremes, 'MedianSalary')]:\n", " for i, row in enumerate(df_subset.iterrows()):\n", " if i != 0: # Skip if already annotated in top_n\n", " plt.annotate('\\n'.join(wrap(row[1]['Title'], width=20)), \n", " (row[1]['MedianSalary'], row[1]['pct_occ_scaled']),\n", " xytext=(5, -15),\n", " textcoords='offset points')\n", " \n", " # Formatting\n", " plt.ylim(bottom=0)\n", " plt.grid(True, linestyle='--', alpha=0.7)\n", " plt.gca().xaxis.set_major_formatter(plt.FuncFormatter(lambda x, p: f'${x:,.0f}'))\n", " \n", " plt.tight_layout()\n", " \n", " plt.show()\n", " plt.close()\n", "\n", "# Create aggregation dictionary, excluding groupby columns\n", "groupby_cols = [\"Title\"]\n", "agg_dict = {col: 'first' for col in grouped_with_occupations_and_wage.columns \n", " if col not in groupby_cols}\n", "agg_dict['pct_occ_scaled'] = 'sum'\n", "\n", "\n", "plot_df = (grouped_with_occupations_and_wage\n", " .groupby(groupby_cols)\n", " .agg(agg_dict)\n", " .reset_index()\n", " .copy())\n", " \n", "# Filter out null values and very low salaries\n", "plot_df = plot_df[plot_df[\"MedianSalary\"].notnull() & \n", " (plot_df[\"MedianSalary\"] > 100)]\n", " \n", "# Create and save plot for current group\n", "create_wage_distribution_plot(plot_df)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### AUTOMATION VS AUGMENTATION" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "automation_vs_augmentation_df = pd.read_csv(\"automation_vs_augmentation.csv\")\n", "\n", "def adjust_color_brightness(color, factor):\n", " \"\"\"Adjust the brightness of a color by a factor\"\"\"\n", " # Convert color to RGB if it's not already\n", " if isinstance(color, str):\n", " color = mcolors.to_rgb(color)\n", " # Make brighter by scaling RGB values\n", " return tuple(min(1.0, c * factor) for c in color)\n", "\n", "def plot_interaction_modes(df):\n", " # Load in dataframe\n", " plot_df = df.copy()\n", " \n", " # Convert cluster_name to lowercase first, then filter and normalize\n", " plot_df['interaction_type'] = plot_df['interaction_type'].str.lower()\n", " plot_df = plot_df[plot_df['interaction_type'] != 'none']\n", " total = plot_df['pct'].sum()\n", " plot_df['pct'] = plot_df['pct'] / total\n", " \n", " # Create category mapping\n", " category_map = {\n", " 'directive': 'Automation',\n", " 'feedback loop': 'Automation',\n", " 'task iteration': 'Augmentation',\n", " 'learning': 'Augmentation',\n", " 'validation': 'Augmentation'\n", " }\n", " \n", " # Add category column\n", " plot_df['category'] = plot_df['interaction_type'].map(category_map)\n", " \n", " # Convert to title case for plotting\n", " plot_df['interaction_type'] = plot_df['interaction_type'].str.title()\n", " \n", " # Create color variants\n", " colors_a = [\n", " palette[1],\n", " adjust_color_brightness(palette[1], 1.3)\n", " ]\n", " \n", " colors_b = [\n", " palette[2],\n", " adjust_color_brightness(palette[2], 1.3),\n", " adjust_color_brightness(palette[2], 1.6)\n", " ]\n", " \n", " # Create the stacked bar plot\n", " plt.figure(figsize=(16, 6))\n", " \n", " # Create separate dataframes for each category and sort them to match visual order\n", " automation_df = plot_df[plot_df['category'] == 'Automation'].sort_values('interaction_type', ascending=False)\n", " augmentation_df = plot_df[plot_df['category'] == 'Augmentation'].sort_values('interaction_type', ascending=False)\n", " \n", " # Calculate positions for the bars\n", " bar_positions = [0, 1]\n", " bar_width = 0.8\n", " \n", " # Create the stacked bars for each category\n", " left_auto = 0\n", " handles, labels = [], [] # Initialize empty lists for legend ordering\n", " \n", " # First plot automation bars but save their handles/labels\n", " auto_handles, auto_labels = [], []\n", " for i, (_, row) in enumerate(automation_df.iterrows()):\n", " bar = plt.barh(0, row['pct'], left=left_auto, height=bar_width, \n", " color=colors_a[i])\n", " auto_handles.append(bar)\n", " auto_labels.append(row['interaction_type'])\n", " plt.text(left_auto + row['pct']/2, 0, \n", " f'{row[\"pct\"]*100:.1f}%', \n", " ha='center', va='center',\n", " color='white')\n", " left_auto += row['pct']\n", " \n", " # Plot augmentation bars and save handles/labels\n", " left_aug = 0\n", " aug_handles, aug_labels = [], []\n", " for i, (_, row) in enumerate(augmentation_df.iterrows()):\n", " bar = plt.barh(1, row['pct'], left=left_aug, height=bar_width,\n", " color=colors_b[i])\n", " aug_handles.append(bar)\n", " aug_labels.append(row['interaction_type'])\n", " plt.text(left_aug + row['pct']/2, 1, \n", " f'{row[\"pct\"]*100:.1f}%', \n", " ha='center', va='center',\n", " color='white')\n", " left_aug += row['pct']\n", " \n", " # Customize the plot\n", " plt.yticks(bar_positions, ['Automation', 'Augmentation'])\n", " plt.xlabel('Percentage of Conversations')\n", " \n", " # Create legend with custom order\n", " # Combine handles and labels in the desired order\n", " all_handles = aug_handles + auto_handles\n", " all_labels = aug_labels + auto_labels\n", " \n", " # Create legend with specified order\n", " desired_order = ['Validation', 'Task Iteration', 'Learning', 'Feedback Loop', 'Directive'] \n", " ordered_handles = []\n", " ordered_labels = []\n", " \n", " for label in desired_order:\n", " idx = all_labels.index(label)\n", " ordered_handles.append(all_handles[idx])\n", " ordered_labels.append(all_labels[idx])\n", " \n", " plt.legend(ordered_handles, ordered_labels, loc='lower right')\n", " \n", " plt.tight_layout()\n", "\n", "plot_interaction_modes(automation_vs_augmentation_df)" ] } ], "metadata": { "kernelspec": { "display_name": "py311", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.11" } }, "nbformat": 4, "nbformat_minor": 2 }