File size: 2,795 Bytes
0861ec7
 
 
fad1ac4
0861ec7
 
6153fbc
0861ec7
 
 
820aa6d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0861ec7
820aa6d
0861ec7
820aa6d
 
 
 
 
 
 
 
 
 
 
0861ec7
820aa6d
 
 
 
0861ec7
820aa6d
 
 
 
 
 
0861ec7
820aa6d
2f9d235
 
820aa6d
 
 
0861ec7
820aa6d
 
 
 
 
 
6153fbc
820aa6d
 
 
 
 
 
 
 
 
4121aea
820aa6d
 
 
 
 
 
 
fad1ac4
820aa6d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import faiss
import numpy as np
from fastapi import FastAPI, Query
from fastapi.responses import JSONResponse
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from typing import List, Dict

app = FastAPI()

FIELDS = (
    "full_name",
    "description",
    "default_branch",
    "open_issues",
    "stargazers_count",
    "forks_count",
    "watchers_count",
    "license",
    "size",
    "fork",
    "updated_at",
    "has_build_zig",
    "has_build_zig_zon",
    "created_at",
)

model = SentenceTransformer("all-MiniLM-L6-v2")

def load_dataset_with_fields(name, include_readme=False):
    dataset = load_dataset(name)["train"]
    repo_texts = [
        " ".join(str(x.get(field, "")) for field in FIELDS) +
        (" " + x.get("readme_content", "") if include_readme else "") +
        " " + " ".join(x.get("topics", []))
        for x in dataset
    ]
    if not include_readme:
        dataset = [{k: v for k, v in item.items() if k != "readme_content"} for item in dataset]
    return dataset, repo_texts

datasets = {
    "packages": load_dataset_with_fields("zigistry/packages", include_readme=True),
    "programs": load_dataset_with_fields("zigistry/programs", include_readme=True),
}

indices = {}
for key, (dataset, repo_texts) in datasets.items():
    repo_embeddings = model.encode(repo_texts)
    index = faiss.IndexFlatL2(repo_embeddings.shape[1])
    index.add(np.array(repo_embeddings))
    indices[key] = (index, dataset)

def filter_results_by_distance(distances, idxs, dataset, threshold_ratio=0.3):
    if len(distances) == 0:
        return []
    min_distance = np.min(distances)
    max_distance = np.max(distances)
    threshold = min_distance + ((max_distance - min_distance) * threshold_ratio)

    results = [
        dataset[int(i)]
        for d, i in zip(distances, idxs)
        if d <= threshold
    ]
    return results

@app.get("/searchPackages/")
def search_packages(q: str):
    key = "packages"
    index, dataset = indices[key]
    query_embedding = model.encode([q])
    distances, idxs = index.search(np.array(query_embedding), len(dataset))
    results = filter_results_by_distance(distances[0], idxs[0], dataset)
    headers = {"Access-Control-Allow-Origin": "*", "Content-Type": "application/json"}
    return JSONResponse(content=results, headers=headers)

@app.get("/searchPrograms/")
def search_programs(q: str):
    key = "programs"
    index, dataset = indices[key]
    query_embedding = model.encode([q])
    distances, idxs = index.search(np.array(query_embedding), len(dataset))
    results = filter_results_by_distance(distances[0], idxs[0], dataset)
    headers = {"Access-Control-Allow-Origin": "*", "Content-Type": "application/json"}
    return JSONResponse(content=results, headers=headers)