add exception
Browse files
    	
        modular_graph_and_candidates.py
    CHANGED
    
    | @@ -126,9 +126,15 @@ def embedding_similarity_clusters(models_root: Path, missing: List[str], thr: fl | |
| 126 | 
             
                print("Encoding embeddings...")
         | 
| 127 | 
             
                batch_size = 1
         | 
| 128 | 
             
                for i in tqdm(range(0, len(names), batch_size), desc="Models", leave=False):
         | 
| 129 | 
            -
                     | 
| 130 | 
            -
             | 
| 131 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 132 |  | 
| 133 | 
             
                embeddings = np.vstack(all_embeddings).astype(np.float32)
         | 
| 134 | 
             
                norms = np.linalg.norm(embeddings, axis=1, keepdims=True) + 1e-12
         | 
|  | |
| 126 | 
             
                print("Encoding embeddings...")
         | 
| 127 | 
             
                batch_size = 1
         | 
| 128 | 
             
                for i in tqdm(range(0, len(names), batch_size), desc="Models", leave=False):
         | 
| 129 | 
            +
                    try:
         | 
| 130 | 
            +
                        batch = [texts[names[i]]]
         | 
| 131 | 
            +
                        emb = model.encode(batch, convert_to_numpy=True, show_progress_bar=False)
         | 
| 132 | 
            +
                        all_embeddings.append(emb)
         | 
| 133 | 
            +
                    except Exception as e:
         | 
| 134 | 
            +
                        print(f"⚠️  GPU worker error for {names[i]}: {e}")
         | 
| 135 | 
            +
                        # Create zero embedding as placeholder to maintain consistency
         | 
| 136 | 
            +
                        zero_emb = np.zeros((1, model.get_sentence_embedding_dimension()), dtype=np.float32)
         | 
| 137 | 
            +
                        all_embeddings.append(zero_emb)
         | 
| 138 |  | 
| 139 | 
             
                embeddings = np.vstack(all_embeddings).astype(np.float32)
         | 
| 140 | 
             
                norms = np.linalg.norm(embeddings, axis=1, keepdims=True) + 1e-12
         | 
