api-ai / app.py
RohanVashisht's picture
Update app.py
04adf6e verified
raw
history blame
2.52 kB
import faiss
import numpy as np
from fastapi import FastAPI, Query
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
app = FastAPI()
FIELDS = (
"full_name", "description", "watchers_count", "forks_count", "license",
"default_branch", "has_build_zig", "has_build_zig_zon", "fork",
"open_issues", "stargazers_count", "updated_at", "created_at",
"size"
)
model = SentenceTransformer("all-MiniLM-L6-v2")
def load_dataset_with_fields(name, include_readme=False):
dataset = load_dataset(name)["train"]
repo_texts = [
" ".join(str(x.get(field, "")) for field in FIELDS) +
(" " + x.get("readme_content", "")) * include_readme +
" " + " ".join(x.get("topics", []))
for x in dataset
]
if not include_readme:
dataset = [{k: v for k, v in item.items() if k != "readme_content"} for item in dataset]
return dataset, repo_texts
datasets = {
"packages": load_dataset_with_fields("zigistry/packages", include_readme=True),
"programs": load_dataset_with_fields("zigistry/programs", include_readme=True),
}
indices = {}
for key, (dataset, repo_texts) in datasets.items():
repo_embeddings = model.encode(repo_texts)
index = faiss.IndexFlatL2(repo_embeddings.shape[1])
index.add(np.array(repo_embeddings))
indices[key] = (index, dataset)
scroll_data = {
"infiniteScrollPackages": load_dataset_with_fields("zigistry/packages", include_readme=False)[0],
"infiniteScrollPrograms": load_dataset_with_fields("zigistry/programs", include_readme=False)[0],
}
@app.get("/infiniteScrollPackages/")
def infinite_scroll_packages(q: int = Query(0, ge=0)):
start = q * 10
return scroll_data["infiniteScrollPackages"][start : start + 10]
@app.get("/infiniteScrollPrograms/")
def infinite_scroll_programs(q: int = Query(0, ge=0)):
start = q * 10
return scroll_data["infiniteScrollPrograms"][start : start + 10]
@app.get("/searchSomething/")
def search_something(q: str):
key = "packages" if "package" in q.lower() else "programs"
if key not in indices:
return {"error": "Invalid category"}
index, dataset = indices[key]
query_embedding = model.encode([q])
distances, indices_ = index.search(np.array(query_embedding), len(dataset))
min_distance = distances[0][0]
threshold = min_distance * 1.5
results = [dataset[int(i)] for d, i in zip(distances[0], indices_[0]) if d <= threshold]
return results[:280] if len(results) > 280 else results