codeShare commited on
Commit
3a0a26e
·
verified ·
1 Parent(s): 2031c29

Upload CLIP_B32_finetune_cluster.ipynb

Browse files
Files changed (1) hide show
  1. CLIP_B32_finetune_cluster.ipynb +1 -1
CLIP_B32_finetune_cluster.ipynb CHANGED
@@ -1 +1 @@
1
- {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_B32_finetune_cluster.ipynb","timestamp":1760880784010},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_B32_finetune_cluster.ipynb","timestamp":1760509652530},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_B32_finetune_cluster.ipynb","timestamp":1760371508137},{"file_id":"1wufnt5hqKHLuoX9wDdyzarENUjOO_s3N","timestamp":1760363981901},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_cluster.ipynb","timestamp":1760363231133}],"gpuType":"T4","authorship_tag":"ABX9TyMmz5WShSFb5vkeVXJkOhLY"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU"},"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"G9yAxL_ViF7y"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"markdown","source":["Install Required Libraries\n","Run this cell to install the necessary packages. CLIP requires PyTorch, and we'll use scikit-learn for clustering, along with Pillow for image loading and matplotlib for visualization."],"metadata":{"id":"ji2qFha2icZi"}},{"cell_type":"code","source":["#@markdown Unzip training data from drive to /content/ (if required)\n","path = '/content/drive/MyDrive/training_data.zip' #@param {type:'string'}\n","\n","%cd /content/\n","!unzip {path}"],"metadata":{"id":"59Tf9llpSGoz"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["!pip install ftfy regex tqdm\n","!pip install git+https://github.com/openai/CLIP.git\n","!pip install scikit-learn matplotlib pillow umap-learn # UMAP is optional for 2D visualization"],"metadata":{"id":"WncaEzzGiaO2"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Load Images and Extract CLIP Embeddings\n","\n","Upload your images the normal way ( `/content/`) prior to running this cell.\n","\n","This code loads all images (supports JPG, PNG, etc.), preprocesses them, and extracts 512-dimensional embeddings using the ViT-B/32 CLIP model."],"metadata":{"id":"EnqyKHcOilVA"}},{"cell_type":"code","source":["!pip install open_clip_torch\n","\n","import os\n","import numpy as np\n","import torch\n","import open_clip\n","from PIL import Image\n","\n","# Configuration\n","image_dir = '/content/' # Update this path\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","model_name = \"ViT-B-32\" # Available per error message\n","pretrained = \"laion400m_e32\" # Robust pretrained weights\n","\n","# Load OpenCLIP model\n","model, _, preprocess = open_clip.create_model_and_transforms(model_name, pretrained=pretrained)\n","model.to(device)\n","model.eval()\n","\n","# Load images and extract embeddings\n","embeddings = []\n","image_paths = []\n","image_names = []\n","\n","for filename in os.listdir(image_dir):\n"," if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp')):\n"," img_path = os.path.join(image_dir, filename)\n"," try:\n"," image = preprocess(Image.open(img_path)).unsqueeze(0).to(device)\n"," with torch.no_grad():\n"," embedding = model.encode_image(image)\n"," embeddings.append(embedding.cpu().numpy().flatten())\n"," image_paths.append(img_path)\n"," image_names.append(filename)\n"," print(f\"Processed: {filename}\")\n"," except Exception as e:\n"," print(f\"Error processing {filename}: {e}\")\n","\n","embeddings = np.array(embeddings)\n","print(f\"Extracted embeddings for {len(embeddings)} images. Shape: {embeddings.shape}\")"],"metadata":{"id":"IcqN15af460q"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Perform Clustering\n","We'll use K-Means clustering on the embeddings. You can choose the number of clusters (`n_clusters`) based on your dataset size (e.g., try 5-10). We'll also compute the silhouette score to evaluate cluster quality (higher is better).\n","\n","For visualization, we'll optionally reduce dimensions to 2D using UMAP."],"metadata":{"id":"HQsc2r-ii6cK"}},{"cell_type":"code","source":["from umap import UMAP # For 2D projection (optional)\n","import os\n","import numpy as np\n","import torch\n","import clip\n","from PIL import Image\n","import matplotlib.pyplot as plt\n","from sklearn.cluster import KMeans\n","from sklearn.metrics import silhouette_score\n","import warnings\n","warnings.filterwarnings('ignore')\n","#@markdown Choose number of clusters (experiment with this)\n","n_clusters = 50 # @param {type:'slider' , min:1 , max:100, step:1}\n","\n","# Perform K-Means clustering\n","kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)\n","cluster_labels = kmeans.fit_predict(embeddings)\n","\n","# Evaluate clustering quality\n","sil_score = silhouette_score(embeddings, cluster_labels)\n","print(f\"Silhouette Score: {sil_score:.3f} (closer to 1 is better)\")\n","\n","# Optional: 2D visualization with UMAP\n","reducer = UMAP(random_state=42, n_components=2)\n","embed_2d = reducer.fit_transform(embeddings)\n","\n","plt.figure(figsize=(10, 8))\n","scatter = plt.scatter(embed_2d[:, 0], embed_2d[:, 1], c=cluster_labels, cmap='tab10', s=50)\n","plt.colorbar(scatter)\n","plt.title(f'2D UMAP Projection of CLIP Embeddings (K={n_clusters} Clusters)')\n","plt.xlabel('UMAP 1')\n","plt.ylabel('UMAP 2')\n","plt.show()"],"metadata":{"id":"WM9wug70jCtR"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Sort Images into Clusters\n","This creates subdirectories for each cluster and moves/copies the images there. Set `move_files=True` to move (or False to copy)."],"metadata":{"id":"aWSOgPj5jLLI"}},{"cell_type":"code","source":["import shutil\n","import os\n","from PIL import Image\n","import ipywidgets as widgets\n","from IPython.display import display\n","\n","# Create output directories\n","output_dir = '/content/clusters' # Output base directory\n","os.makedirs(output_dir, exist_ok=True)\n","\n","move_files = False # Set to True to move files, False to copy\n","\n","# Create directories for each cluster\n","for i in range(n_clusters):\n"," cluster_dir = os.path.join(output_dir, f'cluster_{i}')\n"," os.makedirs(cluster_dir, exist_ok=True)\n","\n","# Create dropdown for selecting image format\n","format_dropdown = widgets.Dropdown(\n"," options=['JPEG', 'PNG', 'WEBP'],\n"," value='JPEG', # Default format\n"," description='Output Format:',\n"," style={'description_width': 'initial'}\n",")\n","\n","# Create slider for compression quality\n","quality_slider = widgets.IntSlider(\n"," value=100, # Default to lossless/max quality\n"," min=0,\n"," max=100,\n"," step=1,\n"," description='Quality:',\n"," style={'description_width': 'initial'},\n"," continuous_update=False\n",")\n","\n","# Display widgets\n","display(format_dropdown)\n","display(quality_slider)\n","\n","# Function to convert and save images\n","def convert_and_save_images():\n"," selected_format = format_dropdown.value\n"," quality = quality_slider.value\n","\n"," for idx, label in enumerate(cluster_labels):\n"," src_path = image_paths[idx] # Use full path\n"," # Create destination filename with selected extension\n"," dst_filename = os.path.splitext(image_names[idx])[0] + f'.{selected_format.lower()}'\n"," dst_path = os.path.join(output_dir, f'cluster_{label}', dst_filename)\n","\n"," try:\n"," # Open and convert image\n"," with Image.open(src_path).convert('RGB') as img:\n"," if selected_format == 'JPEG':\n"," img.save(dst_path, 'JPEG', quality=quality, optimize=True)\n"," elif selected_format == 'PNG':\n"," # PNG compression: 0 (max compression) to 9 (no compression)\n"," # Map quality 0-100 to PNG compression 9-0\n"," png_compression = int(9 - (quality / 100 * 9))\n"," img.save(dst_path, 'PNG', compress_level=png_compression)\n"," elif selected_format == 'WEBP':\n"," img.save(dst_path, 'WEBP', quality=quality)\n","\n"," if move_files:\n"," os.remove(src_path) # Delete original if moving\n"," print(f\"Assigned {image_names[idx]} as {dst_filename} to cluster_{label}\")\n"," except Exception as e:\n"," print(f\"Error converting {image_names[idx]} to {selected_format}: {e}\")\n","\n"," print(f\"Images sorted into {n_clusters} clusters in '{output_dir}' as .{selected_format.lower()}\")\n","\n","# Button to trigger conversion\n","convert_button = widgets.Button(\n"," description='Convert Images',\n"," button_style='primary',\n"," tooltip='Click to convert and sort images'\n",")\n","\n","# Button click handler\n","def on_button_clicked(b):\n"," convert_and_save_images()\n","\n","convert_button.on_click(on_button_clicked)\n","display(convert_button)"],"metadata":{"id":"xb9SZn8Rq6Cd"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Visualize Sample Images per Cluster\n","Display a few sample images from each cluster to inspect the results."],"metadata":{"id":"Tg_q68KnjUb5"}},{"cell_type":"code","source":["from PIL import Image\n","import matplotlib.pyplot as plt\n","import os\n","\n","def display_cluster_samples(cluster_dir, n_samples=3):\n"," # Updated to include .webp files\n"," images = [f for f in os.listdir(cluster_dir) if f.lower().endswith('.webp')][:n_samples]\n"," if not images:\n"," print(f\"No images in {cluster_dir}\")\n"," return\n","\n"," fig, axs = plt.subplots(1, len(images), figsize=(5 * len(images), 5))\n"," if len(images) == 1:\n"," axs = [axs]\n"," for j, img_file in enumerate(images):\n"," img_path = os.path.join(cluster_dir, img_file)\n"," try:\n"," img = Image.open(img_path).convert('RGB') # Ensure RGB for display\n"," axs[j].imshow(img)\n"," axs[j].set_title(img_file)\n"," axs[j].axis('off')\n"," except Exception as e:\n"," print(f\"Error displaying {img_file}: {e}\")\n"," plt.show()\n","\n","# Display samples from each cluster\n","for i in range(n_clusters):\n"," cluster_dir = os.path.join(output_dir, f'cluster_{i}')\n"," print(f\"\\nSamples from Cluster {i}:\")\n"," display_cluster_samples(cluster_dir)"],"metadata":{"id":"pzy3-9bBT231"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["#@markdown Upload to Google Drive as .zip folder (Be mindful of Google Drive Terms of Service)\n","drive_folder_name = 'clusters' # @param {type:'string'}\n","\n","%cd /content/\n","!zip -r /content/drive/MyDrive/{drive_folder_name}.zip {output_dir}\n","\n"],"metadata":{"id":"w2Gzortz0NuD"},"execution_count":null,"outputs":[]}]}
 
1
+ {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_B32_finetune_cluster.ipynb","timestamp":1760890109028},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_B32_finetune_cluster.ipynb","timestamp":1760880784010},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_B32_finetune_cluster.ipynb","timestamp":1760509652530},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_B32_finetune_cluster.ipynb","timestamp":1760371508137},{"file_id":"1wufnt5hqKHLuoX9wDdyzarENUjOO_s3N","timestamp":1760363981901},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_cluster.ipynb","timestamp":1760363231133}],"gpuType":"T4","authorship_tag":"ABX9TyOP8KMhl1SIT4XG3AiDhbNj"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU"},"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"G9yAxL_ViF7y"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"markdown","source":["Install Required Libraries\n","Run this cell to install the necessary packages. CLIP requires PyTorch, and we'll use scikit-learn for clustering, along with Pillow for image loading and matplotlib for visualization."],"metadata":{"id":"ji2qFha2icZi"}},{"cell_type":"code","source":["#@markdown Unzip training data from drive to /content/ (if required)\n","path = '/content/drive/MyDrive/training_data.zip' #@param {type:'string'}\n","\n","%cd /content/\n","!unzip {path}"],"metadata":{"id":"59Tf9llpSGoz"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["!pip install ftfy regex tqdm\n","!pip install git+https://github.com/openai/CLIP.git\n","!pip install scikit-learn matplotlib pillow umap-learn # UMAP is optional for 2D visualization"],"metadata":{"id":"WncaEzzGiaO2"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Load Images and Extract CLIP Embeddings\n","\n","Upload your images the normal way ( `/content/`) prior to running this cell.\n","\n","This code loads all images (supports JPG, PNG, etc.), preprocesses them, and extracts 512-dimensional embeddings using the ViT-B/32 CLIP model."],"metadata":{"id":"EnqyKHcOilVA"}},{"cell_type":"code","source":["!pip install open_clip_torch\n","\n","import os\n","import numpy as np\n","import torch\n","import open_clip\n","from PIL import Image\n","\n","# Configuration\n","image_dir = '/content/' # Update this path\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","model_name = \"ViT-B-32\" # Available per error message\n","pretrained = \"laion400m_e32\" # Robust pretrained weights\n","\n","# Load OpenCLIP model\n","model, _, preprocess = open_clip.create_model_and_transforms(model_name, pretrained=pretrained)\n","model.to(device)\n","model.eval()\n","\n","# Load images and extract embeddings\n","embeddings = []\n","image_paths = []\n","image_names = []\n","\n","for filename in os.listdir(image_dir):\n"," if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp')):\n"," img_path = os.path.join(image_dir, filename)\n"," try:\n"," image = preprocess(Image.open(img_path)).unsqueeze(0).to(device)\n"," with torch.no_grad():\n"," embedding = model.encode_image(image)\n"," embeddings.append(embedding.cpu().numpy().flatten())\n"," image_paths.append(img_path)\n"," image_names.append(filename)\n"," print(f\"Processed: {filename}\")\n"," except Exception as e:\n"," print(f\"Error processing {filename}: {e}\")\n","\n","embeddings = np.array(embeddings)\n","print(f\"Extracted embeddings for {len(embeddings)} images. Shape: {embeddings.shape}\")"],"metadata":{"id":"IcqN15af460q"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Perform Clustering\n","We'll use K-Means clustering on the embeddings. You can choose the number of clusters (`n_clusters`) based on your dataset size (e.g., try 5-10). We'll also compute the silhouette score to evaluate cluster quality (higher is better).\n","\n","For visualization, we'll optionally reduce dimensions to 2D using UMAP."],"metadata":{"id":"HQsc2r-ii6cK"}},{"cell_type":"code","source":["from umap import UMAP # For 2D projection (optional)\n","import os\n","import numpy as np\n","import torch\n","import clip\n","from PIL import Image\n","import matplotlib.pyplot as plt\n","from sklearn.cluster import KMeans\n","from sklearn.metrics import silhouette_score\n","import warnings\n","warnings.filterwarnings('ignore')\n","#@markdown Choose number of clusters (experiment with this)\n","n_clusters = 50 # @param {type:'slider' , min:1 , max:200, step:1}\n","\n","# Perform K-Means clustering\n","kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)\n","cluster_labels = kmeans.fit_predict(embeddings)\n","\n","# Evaluate clustering quality\n","sil_score = silhouette_score(embeddings, cluster_labels)\n","print(f\"Silhouette Score: {sil_score:.3f} (closer to 1 is better)\")\n","\n","# Optional: 2D visualization with UMAP\n","reducer = UMAP(random_state=42, n_components=2)\n","embed_2d = reducer.fit_transform(embeddings)\n","\n","plt.figure(figsize=(10, 8))\n","scatter = plt.scatter(embed_2d[:, 0], embed_2d[:, 1], c=cluster_labels, cmap='tab10', s=50)\n","plt.colorbar(scatter)\n","plt.title(f'2D UMAP Projection of CLIP Embeddings (K={n_clusters} Clusters)')\n","plt.xlabel('UMAP 1')\n","plt.ylabel('UMAP 2')\n","plt.show()"],"metadata":{"id":"WM9wug70jCtR"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import shutil\n","import os\n","from PIL import Image\n","\n","# Create output directories\n","output_dir = '/content/clusters' # Output base directory\n","os.makedirs(output_dir, exist_ok=True)\n","\n","move_files = False # Set to True to move files, False to copy\n","\n","# Create directories for each cluster\n","for i in range(n_clusters):\n"," cluster_dir = os.path.join(output_dir, f'cluster_{i}')\n"," os.makedirs(cluster_dir, exist_ok=True)\n","\n","# Form inputs using Colab's # @param\n","output_format = \"JPEG\" # @param [\"JPEG\", \"PNG\", \"WEBP\"]\n","quality = 100 # @param {type:\"slider\", min:0, max:100, step:1}\n","\n","# Function to convert and save images\n","for idx, label in enumerate(cluster_labels):\n"," src_path = image_paths[idx] # Use full path\n"," # Create destination filename with selected extension\n"," dst_filename = os.path.splitext(image_names[idx])[0] + f'.{output_format.lower()}'\n"," dst_path = os.path.join(output_dir, f'cluster_{label}', dst_filename)\n","\n"," try:\n"," # Open and convert image\n"," with Image.open(src_path).convert('RGB') as img:\n"," if output_format == 'JPEG':\n"," img.save(dst_path, 'JPEG', quality=quality, optimize=True)\n"," elif output_format == 'PNG':\n"," # PNG compression: 0 (max compression) to 9 (no compression)\n"," # Map quality 0-100 to PNG compression 9-0\n"," png_compression = int(9 - (quality / 100 * 9))\n"," img.save(dst_path, 'PNG', compress_level=png_compression)\n"," elif output_format == 'WEBP':\n"," img.save(dst_path, 'WEBP', quality=quality)\n","\n"," if move_files:\n"," os.remove(src_path) # Delete original if moving\n"," print(f\"Assigned {image_names[idx]} as {dst_filename} to cluster_{label}\")\n"," except Exception as e:\n"," print(f\"Error converting {image_names[idx]} to {output_format}: {e}\")\n","\n","print(f\"Images sorted into {n_clusters} clusters in '{output_dir}' as .{output_format.lower()}\")"],"metadata":{"id":"1fMT3PmCOSyh"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Sort Images into Clusters\n","This creates subdirectories for each cluster and moves/copies the images there. Set `move_files=True` to move (or False to copy)."],"metadata":{"id":"aWSOgPj5jLLI"}},{"cell_type":"code","source":["from PIL import Image\n","import matplotlib.pyplot as plt\n","import os\n","\n","def display_cluster_samples(cluster_dir, n_samples=3):\n"," # Updated to include .jpg, .png, and .webp files\n"," images = [f for f in os.listdir(cluster_dir) if f.lower().endswith(('.jpg', '.jpeg', '.png', '.webp'))][:n_samples]\n"," if not images:\n"," print(f\"No images in {cluster_dir}\")\n"," return\n","\n"," fig, axs = plt.subplots(1, len(images), figsize=(5 * len(images), 5))\n"," if len(images) == 1:\n"," axs = [axs]\n"," for j, img_file in enumerate(images):\n"," img_path = os.path.join(cluster_dir, img_file)\n"," try:\n"," img = Image.open(img_path).convert('RGB') # Ensure RGB for display\n"," axs[j].imshow(img)\n"," axs[j].set_title(img_file)\n"," axs[j].axis('off')\n"," except Exception as e:\n"," print(f\"Error displaying {img_file}: {e}\")\n"," plt.show()\n","\n","# Display samples from each cluster\n","for i in range(n_clusters):\n"," cluster_dir = os.path.join(output_dir, f'cluster_{i}')\n"," print(f\"\\nSamples from Cluster {i}:\")\n"," display_cluster_samples(cluster_dir)"],"metadata":{"id":"ANg2q03kN6C_"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Visualize Sample Images per Cluster\n","Display a few sample images from each cluster to inspect the results."],"metadata":{"id":"Tg_q68KnjUb5"}},{"cell_type":"code","source":["#@markdown Upload to Google Drive as .zip folder (Be mindful of Google Drive Terms of Service)\n","drive_folder_name = 'clusters' # @param {type:'string'}\n","\n","%cd /content/\n","!zip -r /content/drive/MyDrive/{drive_folder_name}.zip {output_dir}\n","\n"],"metadata":{"id":"w2Gzortz0NuD"},"execution_count":null,"outputs":[]}]}