File size: 1,949 Bytes
ff6c924
ca1537a
 
 
 
 
ff6c924
2ca051d
 
ff6c924
 
2ca051d
ff6c924
 
 
 
 
 
 
 
 
 
ca1537a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff6c924
ca1537a
 
 
 
 
 
 
2ca051d
ff6c924
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import os
import faiss
import pickle
import numpy as np
import re
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer, pipeline
from huggingface_hub import InferenceClient

# Choix du modèle
HF_TOKEN = os.environ.get("edup2")

if HF_TOKEN:
    MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.1"
    client = InferenceClient(MODEL_NAME, token=HF_TOKEN)
    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN)
    use_client = True
else:
    MODEL_NAME = "google/flan-t5-base"
    generator = pipeline("text2text-generation", model=MODEL_NAME)
    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
    use_client = False

def load_faiss_index(index_path="faiss_index/faiss_index.faiss", doc_path="faiss_index/documents.pkl"):
    index = faiss.read_index(index_path)
    with open(doc_path, "rb") as f:
        documents = pickle.load(f)
    return index, documents

def get_embedding_model():
    return SentenceTransformer("sentence-transformers/multi-qa-MiniLM-L6-cos-v1")

def query_index(question, index, documents, model, k=3):
    question_embedding = model.encode([question])
    _, indices = index.search(np.array(question_embedding).astype("float32"), k)
    return [documents[i] for i in indices[0]]

def nettoyer_context(context):
    context = re.sub(r"\[\'(.*?)\'\]", r"\1", context)
    context = context.replace("None", "")
    return context

def generate_answer(question, context):
    prompt = f"""Voici des informations sur des établissements et formations :

{context}

Formule ta réponse comme un conseiller d’orientation bienveillant, de manière fluide et naturelle.

Question : {question}
Réponse :"""

    if use_client:
        response = client.text_generation(prompt=prompt, max_new_tokens=300, timeout=30)
        return response
    else:
        result = generator(prompt, max_new_tokens=256, do_sample=True)
        return result[0]["generated_text"]