File size: 1,346 Bytes
4b4260f
 
 
87392ed
 
 
4b4260f
7c15719
4b4260f
 
 
 
 
 
87392ed
4b4260f
 
 
 
87392ed
4b4260f
 
da32198
87392ed
da32198
87392ed
 
da32198
87392ed
4b4260f
da32198
 
4b4260f
87392ed
da32198
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import faiss
import pickle
import numpy as np
import torch
from sentence_transformers import SentenceTransformer
from transformers import AutoModelForCausalLM, AutoTokenizer

def load_faiss_index(index_path="faiss_index/faiss_index.faiss", doc_path="faiss_index/documents.pkl"):
    index = faiss.read_index(index_path)
    with open(doc_path, "rb") as f:
        documents = pickle.load(f)
    return index, documents

def get_embedding_model():
    return SentenceTransformer("sentence-transformers/multi-qa-MiniLM-L6-cos-v1")

def query_index(question, index, documents, model, k=3):
    question_embedding = model.encode([question])
    _, indices = index.search(np.array(question_embedding).astype("float32"), k)
    return [documents[i] for i in indices[0]]

def generate_answer(question, context):
    model_id = "Salesforce/codegen-350M-mono"

    tokenizer = AutoTokenizer.from_pretrained(model_id)
    tokenizer.pad_token = tokenizer.eos_token

    model = AutoModelForCausalLM.from_pretrained(model_id)

    prompt = f"Voici un contexte :\n{context}\n\nQuestion : {question}\nRéponse :"
    inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
    outputs = model.generate(**inputs, max_new_tokens=128, pad_token_id=tokenizer.eos_token_id)
    return tokenizer.decode(outputs[0], skip_special_tokens=True)