File size: 3,244 Bytes
e67edda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
#!/usr/bin/env python3
import sys
import subprocess
from pathlib import Path
from typing import List
import json
from tqdm import tqdm

from sentence_transformers import SentenceTransformer
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema import Document
from langchain.embeddings.base import Embeddings
from langchain_community.vectorstores import FAISS

def load_settings(path: Path):
    if not path.exists():
        print(f"Settings file not found: {path}", file=sys.stderr)
        sys.exit(1)
    return json.loads(path.read_text(encoding='utf-8'))

def clone_repo(repo_url: str, local_path: Path) -> None:
    if not local_path.exists():
        print(f"Cloning repo {repo_url} into {local_path}...")
        subprocess.run(["git", "clone", repo_url, str(local_path)], check=True)
    else:
        print(f"Repository already exists at {local_path}")


def extract_repo_files(repo_path: Path) -> List[Document]:
    docs: List[Document] = []
    allowed_extensions = {'.cs', '.cpp', '.c', '.h', '.hpp'}
    all_files = [p for p in repo_path.rglob('*') if p.is_file() and p.suffix in allowed_extensions]
    for path in tqdm(all_files, desc="Reading repo files"):
        try:
            text = path.read_text(encoding='utf-8', errors='ignore')
            docs.append(Document(page_content=text, metadata={'source': str(path)}))
        except Exception as e:
            print(f"Warning: could not read {path}: {e}", file=sys.stderr)
    return docs


def build_embeddings_index(
    repo_path: Path,
    index_path: Path,
    embed_model_name: str
) -> None:
    splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=64)
    raw_docs = extract_repo_files(repo_path)

    chunks: List[Document] = []
    for doc in raw_docs:
        splits = splitter.split_text(doc.page_content)
        for chunk_text in splits:
            chunks.append(Document(page_content=chunk_text, metadata=doc.metadata))

    embedder = SentenceTransformer(embed_model_name)
    class BTEmbeddings(Embeddings):
        def embed_documents(self, texts: List[str]) -> List[List[float]]:
            return embedder.encode(texts, show_progress_bar=True)
        def embed_query(self, text: str) -> List[float]:
            return embedder.encode([text])[0]

    embedding = BTEmbeddings()

    if not index_path.exists():
        print("Building FAISS index...")
        vectorstore = FAISS.from_documents(chunks, embedding)
        vectorstore.save_local(str(index_path))
        print("FAISS index built and saved.")
    else:
        print(f"FAISS index already exists at {index_path}.")


def main():
    # Configuration
    BASE_DIR = Path(__file__).resolve().parent
    SETTINGS_PATH = BASE_DIR.parent / 'settings.json'

    # Load settings
    settings = load_settings(SETTINGS_PATH)

    EMBED_MODEL = settings['embed_model']
    OUT_DIR      = BASE_DIR.parent / 'data' / 'rag'
    OUT_DIR.mkdir(parents=True, exist_ok=True)

    repo_url = settings['repository']
    local_repo = OUT_DIR / 'repo'
    vector_index_path = OUT_DIR / 'faiss_index'

    clone_repo(repo_url, local_repo)
    build_embeddings_index(local_repo, vector_index_path, EMBED_MODEL)

if __name__ == '__main__':
    main()