File size: 3,842 Bytes
0861ec7 fad1ac4 0861ec7 2f9d235 0861ec7 2f9d235 04adf6e fad1ac4 04adf6e fad1ac4 0861ec7 4121aea 2f9d235 fad1ac4 2f9d235 4121aea 0861ec7 04adf6e 2f9d235 fad1ac4 2f9d235 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
import faiss
import numpy as np
from fastapi import FastAPI, Query
from fastapi.responses import JSONResponse
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
app = FastAPI()
FIELDS = (
"full_name",
"description",
"default_branch",
"open_issues",
"stargazers_count",
"forks_count",
"watchers_count",
"license",
"size",
"fork",
"updated_at",
"has_build_zig",
"has_build_zig_zon",
"created_at",
)
model = SentenceTransformer("all-MiniLM-L6-v2")
def load_dataset_with_fields(name, include_readme=False):
dataset = load_dataset(name)["train"]
repo_texts = [
" ".join(str(x.get(field, "")) for field in FIELDS) +
(" " + x.get("readme_content", "")) * include_readme +
" " + " ".join(x.get("topics", []))
for x in dataset
]
if not include_readme:
dataset = [{k: v for k, v in item.items() if k != "readme_content"} for item in dataset]
return dataset, repo_texts
datasets = {
"packages": load_dataset_with_fields("zigistry/packages", include_readme=True),
"programs": load_dataset_with_fields("zigistry/programs", include_readme=True),
}
indices = {}
for key, (dataset, repo_texts) in datasets.items():
repo_embeddings = model.encode(repo_texts)
index = faiss.IndexFlatL2(repo_embeddings.shape[1])
index.add(np.array(repo_embeddings))
indices[key] = (index, dataset)
scroll_data = {
"infiniteScrollPackages": load_dataset_with_fields("zigistry/packages", include_readme=False)[0],
"infiniteScrollPrograms": load_dataset_with_fields("zigistry/programs", include_readme=False)[0],
}
def filter_results_by_distance(distances, idxs, dataset, max_results=50, threshold=0.6):
"""
Only return results that are likely relevant (distance-based filtering).
Lower distance = more similar.
Threshold is a fraction of the *minimum* distance found.
"""
if len(distances) == 0:
return []
min_dist = np.min(distances)
cutoff = min_dist + ((max(distances) - min_dist) * threshold)
filtered = [
dataset[int(i)]
for d, i in zip(distances, idxs)
if d <= cutoff
]
return filtered[:max_results]
@app.get("/infiniteScrollPackages/")
def infinite_scroll_packages(q: int = Query(0, ge=0)):
start = q * 10
content = scroll_data["infiniteScrollPackages"][start : start + 10]
headers = {"Access-Control-Allow-Origin": "*", "Content-Type": "application/json"}
return JSONResponse(content=content, headers=headers)
@app.get("/infiniteScrollPrograms/")
def infinite_scroll_programs(q: int = Query(0, ge=0)):
start = q * 10
content = scroll_data["infiniteScrollPrograms"][start : start + 10]
headers = {"Access-Control-Allow-Origin": "*", "Content-Type": "application/json"}
return JSONResponse(content=content, headers=headers)
@app.get("/searchPackages/")
def search_packages(q: str):
key = "packages"
index, dataset = indices[key]
query_embedding = model.encode([q])
distances, idxs = index.search(np.array(query_embedding), len(dataset))
# Only keep results that are likely relevant
results = filter_results_by_distance(distances[0], idxs[0], dataset)
headers = {"Access-Control-Allow-Origin": "*", "Content-Type": "application/json"}
return JSONResponse(content=results, headers=headers)
@app.get("/searchPrograms/")
def search_programs(q: str):
key = "programs"
index, dataset = indices[key]
query_embedding = model.encode([q])
distances, idxs = index.search(np.array(query_embedding), len(dataset))
results = filter_results_by_distance(distances[0], idxs[0], dataset)
headers = {"Access-Control-Allow-Origin": "*", "Content-Type": "application/json"}
return JSONResponse(content=results, headers=headers) |