Spaces:
Sleeping
Sleeping
File size: 5,793 Bytes
c26ed9b 5c717fb a2df113 c60740f c26ed9b 5c717fb c26ed9b c60740f 5c717fb c60740f 5c717fb 877a41e 5c717fb 877a41e c26ed9b 5c717fb c60740f 5c717fb c60740f 5c717fb c60740f 5c717fb a2df113 5c717fb c60740f 5c717fb a2df113 5e9f512 7160c8d c60740f 5c717fb c60740f 5c717fb c60740f 5c717fb c60740f 5c717fb c60740f c26ed9b 5c717fb c26ed9b 5c717fb 37a13eb 5c717fb c26ed9b 5c717fb c26ed9b 5c717fb c26ed9b 5c717fb b8a76c3 5c717fb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
import warnings
warnings.filterwarnings('ignore')
import streamlit as st
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from transformers import AutoTokenizer, AutoModel
import torch
from torch.utils.data import DataLoader, Dataset
from datetime import datetime
from typing import List, Dict, Any
from functools import partial
# Configure GPU if available
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Initialize session state
if 'history' not in st.session_state:
st.session_state.history = []
if 'feedback' not in st.session_state:
st.session_state.feedback = {}
# Define subset size
SUBSET_SIZE = 1000
# Caching key resources: Model, Tokenizer, and Precomputed Embeddings
@st.cache_resource
def load_model_and_tokenizer():
"""
Load the pre-trained model and tokenizer using Hugging Face Transformers.
Cached to ensure it loads only once.
"""
model_name = "Salesforce/codet5-small"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(model_name).to(device)
model.eval()
return tokenizer, model
@st.cache_resource
def load_data():
"""
Load and sample the dataset from Hugging Face.
Returns a DataFrame with a fixed subset of repositories.
"""
dataset = load_dataset("frankjosh/filtered_dataset")
data = pd.DataFrame(dataset['train'])
data = data.sample(n=min(SUBSET_SIZE, len(data)), random_state=42).reset_index(drop=True)
return data
@st.cache_resource
def precompute_embeddings(data: pd.DataFrame, tokenizer, model, batch_size=16):
"""
Precompute embeddings for repository metadata to optimize query performance.
"""
class TextDataset(Dataset):
def __init__(self, texts: List[str], tokenizer, max_length=512):
self.texts = texts
self.tokenizer = tokenizer
self.max_length = max_length
def __len__(self):
return len(self.texts)
def __getitem__(self, idx):
return self.tokenizer(
self.texts[idx],
padding='max_length',
truncation=True,
max_length=self.max_length,
return_tensors="pt"
)
def collate_fn(batch, pad_token_id):
max_length = max(inputs['input_ids'].shape[1] for inputs in batch)
input_ids, attention_mask = [], []
for inputs in batch:
input_ids.append(torch.nn.functional.pad(
inputs['input_ids'].squeeze(),
(0, max_length - inputs['input_ids'].shape[1]),
value=pad_token_id
))
attention_mask.append(torch.nn.functional.pad(
inputs['attention_mask'].squeeze(),
(0, max_length - inputs['attention_mask'].shape[1]),
value=0
))
return {
'input_ids': torch.stack(input_ids),
'attention_mask': torch.stack(attention_mask)
}
def generate_embeddings_batch(model, batch, device):
with torch.no_grad():
batch = {k: v.to(device) for k, v in batch.items()}
outputs = model.encoder(**batch)
return outputs.last_hidden_state.mean(dim=1).cpu().numpy()
dataset = TextDataset(data['text'].tolist(), tokenizer)
dataloader = DataLoader(
dataset, batch_size=batch_size, shuffle=False,
collate_fn=partial(collate_fn, pad_token_id=tokenizer.pad_token_id)
)
embeddings = []
for batch in dataloader:
batch_embeddings = generate_embeddings_batch(model, batch, device)
embeddings.extend(batch_embeddings)
data['embedding'] = embeddings
return data
@torch.no_grad()
def generate_query_embedding(model, tokenizer, query: str) -> np.ndarray:
"""
Generate embedding for a user query using the pre-trained model.
"""
inputs = tokenizer(
query, return_tensors="pt", padding=True,
truncation=True, max_length=512
).to(device)
outputs = model.encoder(**inputs)
return outputs.last_hidden_state.mean(dim=1).cpu().numpy()
def find_similar_repos(query_embedding: np.ndarray, data: pd.DataFrame, top_n=5) -> pd.DataFrame:
"""
Compute cosine similarity and return the top N most similar repositories.
"""
similarities = cosine_similarity([query_embedding], np.stack(data['embedding'].values))[0]
data['similarity'] = similarities
return data.nlargest(top_n, 'similarity')
def display_recommendations(recommendations: pd.DataFrame):
"""
Display the recommended repositories in the Streamlit app interface.
"""
st.markdown("### π― Top Recommendations")
for idx, row in recommendations.iterrows():
st.markdown(f"### {idx + 1}. {row['repo']}")
st.metric("Match Score", f"{row['similarity']:.2%}")
st.markdown(f"[View Repository]({row['url']})")
# Main workflow
st.title("Repository Recommender System π")
st.caption("Find repositories based on your project description.")
# Load resources
tokenizer, model = load_model_and_tokenizer()
data = load_data()
data = precompute_embeddings(data, tokenizer, model)
# User input
user_query = st.text_area(
"Describe your project:", height=150,
placeholder="Example: A machine learning project for customer churn prediction..."
)
if st.button("π Search Repositories"):
if user_query.strip():
with st.spinner("Finding relevant repositories..."):
query_embedding = generate_query_embedding(model, tokenizer, user_query)
recommendations = find_similar_repos(query_embedding, data)
display_recommendations(recommendations)
else:
st.error("Please provide a project description.")
|