Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	File size: 4,158 Bytes
			
			5819c8e bf01e94 5819c8e bf01e94 5819c8e 917296d cafbed9 5819c8e bf01e94 9c5aa2f bf01e94 9c5aa2f  | 
								1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112  | 
								import os
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from transformers import pipeline
import gradio as gr
# Access the Hugging Face token from the environment variable
HF_TOKEN = os.getenv("HF_Token")
from huggingface_hub import login
# Log in with token
login(token=os.getenv("HF_Token"))
# Load and preprocess the data
def preprocess_data(file_path):
    """Load and preprocess the CSV data."""
    data = pd.read_csv(file_path)
    # Clean column names
    data.columns = data.columns.str.strip().str.replace('#', 'Count').str.replace(' ', '_')
    # Handle missing values (if any)
    data.fillna(0, inplace=True)
    return data
# Convert data into a retrievable knowledge base
def create_knowledge_base(data):
    """Transform the data into a knowledge base suitable for retrieval."""
    # Combine relevant fields into a single text-based feature for embedding
    data['Knowledge_Text'] = data.apply(lambda row: (
        f"Player: {row['Player_Name']}, Position: {row['Main_Possition']}, "
        f"Date: {row['Date']}, Session: {row['Session_Name']}, "
        f"Played Time: {row['Played_Time_(min)']} minutes, Top Speed: {row['Top_Speed_(km/h)']} km/h, "
        f"Distance Covered: {row['Dist._Covered_(m)']} meters, "
        f"Intensity: {row['Session_Intensity']}, "
        f"RPE: {row['RPE']}, s-RPE: {row['s-RPE']}"
    ), axis=1)
    return data[['Player_ID', 'Knowledge_Text']]
# Create a similarity-based retrieval function
def query_knowledge_base(knowledge_base, query, vectorizer):
    """Query the knowledge base using cosine similarity."""
    query_vec = vectorizer.transform([query])
    knowledge_vec = vectorizer.transform(knowledge_base['Knowledge_Text'])
    # Compute cosine similarities
    similarities = cosine_similarity(query_vec, knowledge_vec).flatten()
    # Retrieve the most relevant rows
    top_indices = np.argsort(similarities)[::-1][:5]  # Top 5 results
    return knowledge_base.iloc[top_indices], similarities[top_indices]
# Main pipeline with LLM integration and prompt engineering
def main_pipeline(file_path, user_query):
    """End-to-end pipeline for the RAG system with Llama3.2 and prompt engineering."""
    # Preprocess data
    data = preprocess_data(file_path)
    knowledge_base = create_knowledge_base(data)
    # Create TF-IDF Vectorizer
    vectorizer = TfidfVectorizer()
    vectorizer.fit(knowledge_base['Knowledge_Text'])
    # Query the knowledge base
    results, scores = query_knowledge_base(knowledge_base, user_query, vectorizer)
    # Format retrieved knowledge for LLM input
    retrieved_text = "\n".join(results['Knowledge_Text'].tolist())
    # Use Llama3.2 for question answering with prompt engineering
    llm = pipeline("text-generation", model="meta-llama/Llama-3.2-1B-Instruct")
    prompt = (
        f"You are an expert sports analyst. Based on the following training data, provide a detailed and insightful answer to the user's question. "
        f"Always include relevant numerical data in your response. Limit your response to a maximum of 200 words.\n\n"
        f"Training Data:\n{retrieved_text}\n\n"
        f"User Question: {user_query}\n\nAnswer:"
    )
    response = llm(prompt, max_new_tokens=200, num_return_sequences=1)
    # Extract the answer part only
    answer = response[0]['generated_text'].split("Answer:", 1)[-1].strip()
    return answer
# Gradio interface
def query_interface(file_path, user_query):
    try:
        result = main_pipeline(file_path.name, user_query)
        return result
    except Exception as e:
        return str(e)
# Launch Gradio app
file_input = gr.File(label="Upload CSV File")
text_input = gr.Textbox(label="Ask a Question", lines=2, placeholder="Enter your query here...")
output = gr.Textbox(label="Answer")
interface = gr.Interface(
    fn=query_interface,
    inputs=[file_input, text_input],
    outputs=output,
    title="RAG Training Data Query System",
    description="Upload a CSV file containing training data and ask detailed questions about it."
)
interface.launch()
 |