import os import spacy import gradio as gr from sentence_transformers import SentenceTransformer from sklearn.metrics.pairwise import cosine_similarity import numpy as np import zipfile zip_path = "en_core_web_sm-3.0.0.zip" # Carica il file ZIP nella cartella del progetto extraction_dir = "./extracted_models" # Scegli una sottocartella per l'estrazione test_dir = "./extracted_models/en_core_web_sm-3.0.0" # Cartella dopo l'estrazione # Verifica se la cartella esiste già if not os.path.exists(test_dir): # Se la cartella non esiste, decomprimi il file ZIP with zipfile.ZipFile(zip_path, 'r') as zip_ref: zip_ref.extractall(extraction_dir) print(f"Modello estratto correttamente nella cartella {extraction_dir}") # Percorso della cartella estratta model_path = os.path.join(extraction_dir, "en_core_web_sm-3.0.0") # Assicurati che sia corretto # Carica il modello nlp = spacy.load(model_path) # Carica il modello SentenceTransformer #model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2', device='cpu') #model = SentenceTransformer('sentence-transformers/msmarco-distilbert-base-v4', device='cpu') #model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2', device='cpu') model = SentenceTransformer('sentence-transformers/paraphrase-multilingual-mpnet-base-v2', device='cpu') #model = SentenceTransformer('sentence-transformers/all-distilroberta-v1', device='cpu') # Preprocessamento manuale (carica il manuale da un file o base di dati) with open('testo.txt', 'r', encoding='utf-8') as file: text = file.read() # Tokenizza il testo in frasi usando SpaCy doc = nlp(text) sentences = [sent.text for sent in doc.sents] # Estrarre frasi dal testo # Crea gli embedding per il manuale embeddings = model.encode(sentences, batch_size=8, show_progress_bar=True) # Funzione per ottenere le frasi più rilevanti def find_relevant_sentences(query): query_embedding = model.encode([query]) similarities = cosine_similarity(query_embedding, embeddings).flatten() # Filtra i risultati in base alla similitudine threshold = 0.2 filtered_results = [(idx, sim) for idx, sim in enumerate(similarities) if sim >= threshold] # Ordina i risultati per similitudine filtered_results.sort(key=lambda x: x[1], reverse=True) # Ottieni le frasi più rilevanti top_n = 5 relevant_sentences = [sentences[idx] for idx, _ in filtered_results[:top_n]] doc = nlp(" ".join(relevant_sentences)) grouped_results = [sent.text for sent in doc.sents] # Pulizia cleaned_results = [text.replace("\n", " ") for text in grouped_results] # Rimuove gli a capo final_output = " ".join(cleaned_results) # Combina tutte le frasi in un unico testo return final_output examples = [ ["irresponsible use of the machine?"], ["If I have a problem how can I get help? "], ["precautions when using the cutting machine"], ["How do I change the knife of the cutting machine?"], ] # Interfaccia Gradio iface = gr.Interface( fn=find_relevant_sentences, inputs=gr.Textbox(label="Insert your query"), outputs=gr.Textbox(label="Relevant sentences"), examples=examples, title="Manual Querying System", description="Enter a question about the machine, and this tool will find the most relevant sentences from the manual." ) # Avvia l'app Gradio iface.launch()