Spaces:
Sleeping
Sleeping
import transformers | |
import pickle | |
import os | |
import re | |
import numpy as np | |
import torchvision | |
import nltk | |
import torch | |
import pandas as pd | |
import requests | |
import zipfile | |
import tempfile | |
from PyPDF2 import PdfReader | |
from fastapi import FastAPI, HTTPException | |
from fastapi.middleware.cors import CORSMiddleware | |
from pydantic import BaseModel | |
from transformers import ( | |
AutoTokenizer, | |
AutoModelForSeq2SeqLM, | |
AutoModelForTokenClassification, | |
AutoModelForCausalLM, | |
pipeline, | |
Qwen2Tokenizer, | |
BartForConditionalGeneration | |
) | |
from sentence_transformers import SentenceTransformer, CrossEncoder, util | |
from sklearn.metrics.pairwise import cosine_similarity | |
from bs4 import BeautifulSoup | |
from huggingface_hub import hf_hub_download | |
from safetensors.torch import load_file | |
from typing import List, Dict, Optional | |
from safetensors.numpy import load_file | |
from safetensors.torch import safe_open | |
nltk.download('punkt_tab') | |
# Initialize FastAPI app | |
app = FastAPI() | |
# Add CORS middleware | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=["*"], | |
allow_credentials=True, | |
allow_methods=["*"], | |
allow_headers=["*"], | |
) | |
# Global variables for models and data | |
models = {} | |
data = {} | |
class QueryRequest(BaseModel): | |
query: str | |
language_code: int = 1 | |
class MedicalProfile(BaseModel): | |
conditions: str | |
daily_symptoms: str | |
class ChatQuery(BaseModel): | |
query: str | |
language_code: int = 1 | |
conversation_id: str | |
class ChatMessage(BaseModel): | |
role: str | |
content: str | |
timestamp: str | |
def init_nltk(): | |
"""Initialize NLTK resources""" | |
try: | |
nltk.download('punkt', quiet=True) | |
return True | |
except Exception as e: | |
print(f"Error initializing NLTK: {e}") | |
return False | |
def load_models(): | |
"""Initialize all required models""" | |
try: | |
print("Loading models...") | |
# Set device | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
print(f"Device set to use {device}") | |
# Embedding models | |
models['embedding_model'] = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') | |
models['cross_encoder'] = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2', max_length=512) | |
models['semantic_model'] = SentenceTransformer('all-MiniLM-L6-v2') | |
# Translation models | |
models['ar_to_en_tokenizer'] = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-ar-en") | |
models['ar_to_en_model'] = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-ar-en") | |
models['en_to_ar_tokenizer'] = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-ar") | |
models['en_to_ar_model'] = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-ar") | |
#Attention model | |
models['att_tokenizer'] = AutoTokenizer.from_pretrained("facebook/bart-base") | |
models['att_model'] = BartForConditionalGeneration.from_pretrained("facebook/bart-base") | |
# NER model | |
models['bio_tokenizer'] = AutoTokenizer.from_pretrained("blaze999/Medical-NER") | |
models['bio_model'] = AutoModelForTokenClassification.from_pretrained("blaze999/Medical-NER") | |
models['ner_pipeline'] = pipeline("ner", model=models['bio_model'], tokenizer=models['bio_tokenizer']) | |
# LLM model | |
model_name = "M4-ai/Orca-2.0-Tau-1.8B" | |
models['llm_tokenizer'] = AutoTokenizer.from_pretrained(model_name) | |
models['llm_model'] = AutoModelForCausalLM.from_pretrained(model_name) | |
print("Models loaded successfully") | |
return True | |
except Exception as e: | |
print(f"Error loading models: {e}") | |
return False | |
def load_embeddings() -> Optional[Dict[str, np.ndarray]]: | |
try: | |
# Locate or download embeddings file | |
embeddings_path = 'embeddings.safetensors' | |
if not os.path.exists(embeddings_path): | |
print("File not found locally. Attempting to download from Hugging Face Hub...") | |
embeddings_path = hf_hub_download( | |
repo_id=os.environ.get('HF_SPACE_ID', 'thechaiexperiment/TeaRAG'), | |
filename="embeddings.safetensors", | |
repo_type="space" | |
) | |
# Initialize a dictionary to store embeddings | |
embeddings = {} | |
# Open the safetensors file | |
with safe_open(embeddings_path, framework="pt") as f: | |
keys = f.keys() | |
#0print(f"Available keys in the .safetensors file: {list(keys)}") # Debugging info | |
# Iterate over the keys and load tensors | |
for key in keys: | |
try: | |
tensor = f.get_tensor(key) | |
if not isinstance(tensor, torch.Tensor): | |
raise TypeError(f"Value for key {key} is not a valid PyTorch tensor.") | |
# Convert tensor to NumPy array | |
embeddings[key] = tensor.numpy() | |
except Exception as key_error: | |
print(f"Failed to process key {key}: {key_error}") | |
if embeddings: | |
print("Embeddings successfully loaded.") | |
else: | |
print("No embeddings could be loaded. Please check the file format and content.") | |
return embeddings | |
except Exception as e: | |
print(f"Error loading embeddings: {e}") | |
return None | |
def normalize_key(key: str) -> str: | |
"""Normalize embedding keys to match metadata IDs.""" | |
match = re.search(r'file_(\d+)', key) | |
if match: | |
return match.group(1) # Extract the numeric part | |
return key | |
def load_recipes_embeddings() -> Optional[Dict[str, np.ndarray]]: | |
try: | |
embeddings_path = 'recipes_embeddings.safetensors' | |
if not os.path.exists(embeddings_path): | |
print("File not found locally. Attempting to download from Hugging Face Hub...") | |
embeddings_path = hf_hub_download( | |
repo_id=os.environ.get('HF_SPACE_ID', 'thechaiexperiment/TeaRAG'), | |
filename="embeddings.safetensors", | |
repo_type="space" | |
) | |
# Using safe_open from safetensors to load embeddings | |
embeddings = {} | |
from safetensors.numpy import safe_open | |
with safe_open(embeddings_path, framework="pt") as f: | |
keys = list(f.keys()) | |
for key in keys: | |
try: | |
normalized_key = normalize_key(key) | |
tensor = f.get_tensor(key) | |
embeddings[normalized_key] = tensor.numpy() | |
except Exception as key_error: | |
print(f"Failed to process key {key}: {key_error}") | |
if embeddings: | |
print(f"Successfully loaded {len(embeddings)} embeddings.") | |
else: | |
print("No embeddings could be loaded. Please check the file format and content.") | |
return embeddings | |
except Exception as e: | |
print(f"Error loading embeddings: {e}") | |
return None | |
def load_documents_data(folder_path='downloaded_articles/downloaded_articles'): | |
"""Load document data from HTML articles in a specified folder.""" | |
try: | |
print("Loading documents data...") | |
# Check if the folder exists | |
if not os.path.exists(folder_path) or not os.path.isdir(folder_path): | |
print(f"Error: Folder '{folder_path}' not found") | |
return False | |
# List all HTML files in the folder | |
html_files = [f for f in os.listdir(folder_path) if f.endswith('.html')] | |
if not html_files: | |
print(f"No HTML files found in folder '{folder_path}'") | |
return False | |
documents = [] | |
# Iterate through each HTML file and parse the content | |
for file_name in html_files: | |
file_path = os.path.join(folder_path, file_name) | |
try: | |
with open(file_path, 'r', encoding='utf-8') as file: | |
# Parse the HTML file | |
soup = BeautifulSoup(file, 'html.parser') | |
# Extract text content (or customize this as per your needs) | |
text = soup.get_text(separator='\n').strip() | |
documents.append({"file_name": file_name, "content": text}) | |
except Exception as e: | |
print(f"Error reading file {file_name}: {e}") | |
# Convert the list of documents to a DataFrame | |
data['df'] = pd.DataFrame(documents) | |
if data['df'].empty: | |
print("No valid documents loaded.") | |
return False | |
print(f"Successfully loaded {len(data['df'])} document records.") | |
return True | |
except Exception as e: | |
print(f"Error loading docs: {e}") | |
return None | |
def load_data(): | |
"""Load all required data""" | |
embeddings_success = load_embeddings() | |
documents_success = load_documents_data() | |
if not embeddings_success: | |
print("Warning: Failed to load embeddings, falling back to basic functionality") | |
if not documents_success: | |
print("Warning: Failed to load documents data, falling back to basic functionality") | |
return True | |
# Initialize application | |
print("Initializing application...") | |
init_success = load_models() and load_data() | |
def translate_text(text, source_to_target='ar_to_en'): | |
"""Translate text between Arabic and English""" | |
try: | |
if source_to_target == 'ar_to_en': | |
tokenizer = models['ar_to_en_tokenizer'] | |
model = models['ar_to_en_model'] | |
else: | |
tokenizer = models['en_to_ar_tokenizer'] | |
model = models['en_to_ar_model'] | |
inputs = tokenizer(text, return_tensors="pt", truncation=True) | |
outputs = model.generate(**inputs) | |
return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
except Exception as e: | |
print(f"Translation error: {e}") | |
return text | |
def embed_query_text(query_text): | |
embedding = models['embedding_model'] | |
query_embedding = embedding.encode([query_text]) | |
return query_embedding | |
def query_embeddings(query_embedding, embeddings_data=None, n_results=5): | |
embeddings_data = load_embeddings() | |
if not embeddings_data: | |
print("No embeddings data available.") | |
return [] | |
try: | |
doc_ids = list(embeddings_data.keys()) | |
doc_embeddings = np.array(list(embeddings_data.values())) | |
similarities = cosine_similarity(query_embedding, doc_embeddings).flatten() | |
top_indices = similarities.argsort()[-n_results:][::-1] | |
return [(doc_ids[i], similarities[i]) for i in top_indices] | |
except Exception as e: | |
print(f"Error in query_embeddings: {e}") | |
return [] | |
def query_recipes_embeddings(query_embedding, embeddings_data=None, n_results=5): | |
embeddings_data = load_recipes_embeddings() | |
if not embeddings_data: | |
print("No embeddings data available.") | |
return [] | |
try: | |
doc_ids = list(embeddings_data.keys()) | |
doc_embeddings = np.array(list(embeddings_data.values())) | |
similarities = cosine_similarity(query_embedding, doc_embeddings).flatten() | |
top_indices = similarities.argsort()[-n_results:][::-1] | |
return [(doc_ids[i], similarities[i]) for i in top_indices] | |
except Exception as e: | |
print(f"Error in query_embeddings: {e}") | |
return [] | |
def get_page_title(url): | |
try: | |
response = requests.get(url) | |
if response.status_code == 200: | |
soup = BeautifulSoup(response.text, 'html.parser') | |
title = soup.find('title') | |
return title.get_text() if title else "No title found" | |
else: | |
return None | |
except requests.exceptions.RequestException: | |
return None | |
def retrieve_document_texts(doc_ids, folder_path='downloaded_articles/downloaded_articles'): | |
texts = [] | |
for doc_id in doc_ids: | |
file_path = os.path.join(folder_path, doc_id) | |
try: | |
# Check if the file exists | |
if not os.path.exists(file_path): | |
print(f"Warning: Document file not found: {file_path}") | |
texts.append("") | |
continue | |
# Read and parse the HTML file | |
with open(file_path, 'r', encoding='utf-8') as file: | |
soup = BeautifulSoup(file, 'html.parser') | |
text = soup.get_text(separator=' ', strip=True) | |
texts.append(text) | |
except Exception as e: | |
print(f"Error retrieving document {doc_id}: {e}") | |
texts.append("") | |
return texts | |
def retrieve_rec_texts(document_ids, folder_path='downloaded_articles/downloaded_articles', metadata_path = 'recipes_metadata.xlsx'): | |
# Load metadata file to map document IDs to original file names | |
metadata_path = 'recipes_metadata.xlsx' | |
metadata_df = pd.read_excel(metadata_path) | |
# Ensure column names are as expected | |
if "id" not in metadata_df.columns or "original_file_name" not in metadata_df.columns: | |
raise ValueError("Metadata file must contain 'id' and 'original_file_name' columns.") | |
# Create a mapping of ID to original file name | |
id_to_file_name = dict(zip(metadata_df["id"].astype(str), metadata_df["original_file_name"])) | |
document_texts = [] | |
for doc_id in document_ids: | |
# Get the original file name for the given document ID | |
original_file_name = id_to_file_name.get(doc_id) | |
if not original_file_name: | |
print(f"Warning: No original file name found for document ID {doc_id}") | |
continue | |
# Construct the file path using the original file name | |
file_path = os.path.join(folder_path, original_file_name) | |
# Check if the file exists and read its content | |
if os.path.exists(file_path): | |
with open(file_path, "r", encoding="utf-8") as f: | |
document_texts.append(f.read()) | |
else: | |
print(f"Warning: File not found for {file_path}") | |
return document_texts | |
def rerank_documents(query, document_ids, document_texts, cross_encoder_model): | |
try: | |
# Prepare pairs for the cross-encoder | |
pairs = [(query, doc) for doc in document_texts] | |
# Get scores using the cross-encoder model | |
scores = cross_encoder_model.predict(pairs) | |
# Combine scores with document IDs and texts | |
scored_documents = list(zip(scores, document_ids, document_texts)) | |
# Sort by scores in descending order | |
scored_documents.sort(key=lambda x: x[0], reverse=True) | |
# Print reranked results | |
print("Reranked results:") | |
for idx, (score, doc_id, doc) in enumerate(scored_documents): | |
print(f"Rank {idx + 1} (Score: {score:.4f}, Document ID: {doc_id})") | |
return scored_documents | |
except Exception as e: | |
print(f"Error reranking documents: {e}") | |
return [] | |
def extract_entities(text, ner_pipeline=None): | |
try: | |
# Use the provided pipeline or default to the model dictionary | |
if ner_pipeline is None: | |
ner_pipeline = models['ner_pipeline'] | |
# Perform NER using the pipeline | |
ner_results = ner_pipeline(text) | |
# Extract unique entities that start with "B-" | |
entities = {result['word'] for result in ner_results if result['entity'].startswith("B-")} | |
return list(entities) | |
except Exception as e: | |
print(f"Error extracting entities: {e}") | |
return [] | |
def match_entities(query_entities, sentence_entities): | |
try: | |
query_set, sentence_set = set(query_entities), set(sentence_entities) | |
matches = query_set.intersection(sentence_set) | |
return len(matches) | |
except Exception as e: | |
print(f"Error matching entities: {e}") | |
return 0 | |
def extract_relevant_portions(document_texts, query, max_portions=3, portion_size=1, min_query_words=1): | |
relevant_portions = {} | |
# Extract entities from the query | |
query_entities = extract_entities(query) | |
print(f"Extracted Query Entities: {query_entities}") | |
for doc_id, doc_text in enumerate(document_texts): | |
sentences = nltk.sent_tokenize(doc_text) # Split document into sentences | |
doc_relevant_portions = [] | |
# Extract entities from the entire document | |
#ner_biobert = models['ner_pipeline'] | |
doc_entities = extract_entities(doc_text) | |
print(f"Document {doc_id} Entities: {doc_entities}") | |
for i, sentence in enumerate(sentences): | |
# Extract entities from the sentence | |
sentence_entities = extract_entities(sentence) | |
# Compute relevance score | |
relevance_score = match_entities(query_entities, sentence_entities) | |
# Select sentences with at least `min_query_words` matching entities | |
if relevance_score >= min_query_words: | |
start_idx = max(0, i - portion_size // 2) | |
end_idx = min(len(sentences), i + portion_size // 2 + 1) | |
portion = " ".join(sentences[start_idx:end_idx]) | |
doc_relevant_portions.append(portion) | |
if len(doc_relevant_portions) >= max_portions: | |
break | |
# Fallback: Include most entity-dense sentences if no relevant portions were found | |
if not doc_relevant_portions and len(doc_entities) > 0: | |
print(f"Fallback: Selecting sentences with most entities for Document {doc_id}") | |
sorted_sentences = sorted(sentences, key=lambda s: len(extract_entities(s, ner_biobert)), reverse=True) | |
for fallback_sentence in sorted_sentences[:max_portions]: | |
doc_relevant_portions.append(fallback_sentence) | |
# Add the extracted portions to the result dictionary | |
relevant_portions[f"Document_{doc_id}"] = doc_relevant_portions | |
return relevant_portions | |
def remove_duplicates(selected_parts): | |
unique_sentences = set() | |
unique_selected_parts = [] | |
for sentence in selected_parts: | |
if sentence not in unique_sentences: | |
unique_selected_parts.append(sentence) | |
unique_sentences.add(sentence) | |
return unique_selected_parts | |
def extract_entities(text): | |
try: | |
biobert_tokenizer = models['bio_tokenizer'] | |
biobert_model = models['bio_model'] | |
inputs = biobert_tokenizer(text, return_tensors="pt") | |
outputs = biobert_model(**inputs) | |
predictions = torch.argmax(outputs.logits, dim=2) | |
tokens = biobert_tokenizer.convert_ids_to_tokens(inputs.input_ids[0]) | |
entities = [ | |
tokens[i] | |
for i in range(len(tokens)) | |
if predictions[0][i].item() != 0 # Assuming 0 is the label for non-entity | |
] | |
return entities | |
except Exception as e: | |
print(f"Error extracting entities: {e}") | |
return [] | |
def enhance_passage_with_entities(passage, entities): | |
return f"{passage}\n\nEntities: {', '.join(entities)}" | |
def create_prompt(question, passage): | |
prompt = (""" | |
As a medical expert, you are required to answer the following question based only on the provided passage. Do not include any information not present in the passage. Your response should directly reflect the content of the passage. Maintain accuracy and relevance to the provided information. | |
Passage: {passage} | |
Question: {question} | |
Answer: | |
""") | |
return prompt.format(passage=passage, question=question) | |
def generate_answer(prompt, max_length=860, temperature=0.2): | |
tokenizer_f = models['llm_tokenizer'] | |
model_f = models['llm_model'] | |
inputs = tokenizer_f(prompt, return_tensors="pt", truncation=True) | |
# Start timing | |
#start_time = time.time() | |
# Generate the output | |
output_ids = model_f.generate( | |
inputs.input_ids, | |
max_length=max_length, | |
num_return_sequences=1, | |
temperature=temperature, | |
pad_token_id=tokenizer_f.eos_token_id | |
) | |
# End timing | |
#end_time = time.time() | |
# Calculate the duration | |
#duration = end_time - start_time | |
# Decode the answer | |
answer = tokenizer_f.decode(output_ids[0], skip_special_tokens=True) | |
# Extract keywords from the passage and answer | |
passage_keywords = set(prompt.lower().split()) # Adjusted to check keywords in the full prompt | |
answer_keywords = set(answer.lower().split()) | |
# Verify if the answer aligns with the passage | |
if passage_keywords.intersection(answer_keywords): | |
return answer #, duration | |
else: | |
return "Sorry, I can't help with that." #, duration | |
def remove_answer_prefix(text): | |
prefix = "Answer:" | |
if prefix in text: | |
return text.split(prefix, 1)[-1].strip() # Split only once to avoid splitting at other occurrences of "Answer:" | |
return text | |
def remove_incomplete_sentence(text): | |
# Check if the text ends with a period | |
if not text.endswith('.'): | |
# Find the last period or the end of the string | |
last_period_index = text.rfind('.') | |
if last_period_index != -1: | |
# Remove everything after the last period | |
return text[:last_period_index + 1].strip() | |
return text | |
import traceback | |
language_code = 1 | |
query_text = "recipes and meals for vegan diabetes headache fatigue" | |
print(f"Generated query text: {query_text}") | |
try: | |
# Generate the query embedding | |
print("Generating query embedding...") | |
query_embedding = embed_query_text(query_text) | |
if query_embedding is None: | |
raise ValueError("Failed to generate query embedding.") | |
print(f"Query embedding generated: {query_embedding}") | |
# Load embeddings and retrieve initial results | |
print("Loading recipe embeddings...") | |
embeddings_data = load_recipes_embeddings() | |
print("Embeddings loaded. Retrieving initial results...") | |
initial_results = query_recipes_embeddings(query_embedding, embeddings_data, n_results=10) | |
if not initial_results: | |
raise ValueError("No relevant recipes found.") | |
print(f"Initial results: {initial_results}") | |
# Extract document IDs | |
document_ids = [doc_id for doc_id, _ in initial_results] | |
print(f"Document IDs: {document_ids}") | |
# Retrieve document texts | |
folder_path = 'downloaded_articles/downloaded_articles' | |
print("Retrieving document texts...") | |
document_texts = retrieve_rec_texts(document_ids, folder_path) | |
if not document_texts: | |
raise ValueError("Failed to retrieve document texts.") | |
print(f"Document texts retrieved: {document_texts}") | |
# Load recipe metadata from DataFrame | |
file_path = 'recipes_metadata.xlsx' | |
print("Loading metadata from Excel...") | |
metadata_df = pd.read_excel(file_path) | |
print(f"Metadata loaded: {metadata_df.head()}") | |
# Extract relevant portions | |
print("Extracting relevant portions...") | |
relevant_portions = extract_relevant_portions( | |
document_texts, query_text, max_portions=3, portion_size=1, min_query_words=1 | |
) | |
print(f"Relevant portions: {relevant_portions}") | |
# Flatten portions | |
print("Flattening relevant portions...") | |
flattened_relevant_portions = [] | |
for doc_id, portions in relevant_portions.items(): | |
flattened_relevant_portions.extend(portions) | |
unique_selected_parts = remove_duplicates(flattened_relevant_portions) | |
print(f"Unique selected parts: {unique_selected_parts}") | |
# Combine parts into a single context | |
combined_parts = " ".join(unique_selected_parts) | |
print(f"Combined parts: {combined_parts}") | |
context = [query_text] + unique_selected_parts | |
print(f"Context: {context}") | |
# Extract entities | |
print("Extracting entities...") | |
entities = extract_entities(query_text) | |
print(f"Entities: {entities}") | |
# Enhance passage with entities | |
print("Enhancing passage with entities...") | |
passage = enhance_passage_with_entities(combined_parts, entities) | |
print(f"Enhanced passage: {passage}") | |
# Create the prompt | |
print("Creating prompt...") | |
prompt = create_prompt(query_text, passage) | |
print(f"Prompt: {prompt}") | |
# Generate the answer | |
print("Generating answer...") | |
answer = generate_answer(prompt) | |
print(f"Answer: {answer}") | |
answer_part = answer.split("Answer:")[-1].strip() | |
print(f"Answer part: {answer_part}") | |
# Clean and finalize the answer | |
print("Cleaning answer...") | |
cleaned_answer = remove_answer_prefix(answer_part) | |
print(f"Cleaned answer: {cleaned_answer}") | |
final_answer = remove_incomplete_sentence(cleaned_answer) | |
print(f"Final answer: {final_answer}") | |
# Translate if needed | |
if language_code == 0: | |
print("Translating answer to Arabic...") | |
final_answer = translate_en_to_ar(final_answer) | |
# Display the answer | |
if final_answer: | |
print("Final Answer:") | |
print(final_answer) | |
else: | |
print("Sorry, I can't help with that.") | |
except Exception as e: | |
print("An error occurred:") | |
print(traceback.format_exc()) | |
async def root(): | |
return {"message": "Welcome to the FastAPI application! Use the /health endpoint to check health, and /api/query for processing queries."} | |
async def health_check(): | |
"""Health check endpoint""" | |
status = { | |
'status': 'healthy', | |
'models_loaded': bool(models), | |
'embeddings_loaded': bool(data.get('embeddings')), | |
'documents_loaded': not data.get('df', pd.DataFrame()).empty | |
} | |
return status | |
async def chat_endpoint(chat_query: ChatQuery): | |
try: | |
query_text = chat_query.query | |
language_code = chat_query.language_code | |
query_embedding = embed_query_text(query_text) # Embed the query text | |
embeddings_data = load_embeddings () | |
folder_path = 'downloaded_articles/downloaded_articles' | |
initial_results = query_embeddings(query_embedding, embeddings_data, n_results=5) | |
document_ids = [doc_id for doc_id, _ in initial_results] | |
document_texts = retrieve_document_texts(document_ids, folder_path) | |
cross_encoder = models['cross_encoder'] | |
scores = cross_encoder.predict([(query_text, doc) for doc in document_texts]) | |
scored_documents = list(zip(scores, document_ids, document_texts)) | |
scored_documents.sort(key=lambda x: x[0], reverse=True) | |
relevant_portions = extract_relevant_portions(document_texts, query_text, max_portions=3, portion_size=1, min_query_words=1) | |
flattened_relevant_portions = [] | |
for doc_id, portions in relevant_portions.items(): | |
flattened_relevant_portions.extend(portions) | |
unique_selected_parts = remove_duplicates(flattened_relevant_portions) | |
combined_parts = " ".join(unique_selected_parts) | |
context = [query_text] + unique_selected_parts | |
entities = extract_entities(query_text) | |
passage = enhance_passage_with_entities(combined_parts, entities) | |
prompt = create_prompt(query_text, passage) | |
answer = generate_answer(prompt) | |
answer_part = answer.split("Answer:")[-1].strip() | |
cleaned_answer = remove_answer_prefix(answer_part) | |
final_answer = remove_incomplete_sentence(cleaned_answer) | |
if language_code == 0: | |
final_answer = translate_en_to_ar(final_answer) | |
if final_answer: | |
print("Answer:") | |
print(final_answer) | |
else: | |
print("Sorry, I can't help with that.") | |
return { | |
"response": final_answer, | |
"conversation_id": chat_query.conversation_id, | |
"success": True | |
} | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=str(e)) | |
async def resources_endpoint(profile: MedicalProfile): | |
try: | |
# Build the query text | |
query_text = profile.conditions + " " + profile.daily_symptoms | |
print(f"Generated query text: {query_text}") | |
# Generate the query embedding | |
query_embedding = embed_query_text(query_text) | |
if query_embedding is None: | |
raise ValueError("Failed to generate query embedding.") | |
# Load embeddings and retrieve initial results | |
embeddings_data = load_embeddings() | |
folder_path = 'downloaded_articles/downloaded_articles' | |
initial_results = query_embeddings(query_embedding, embeddings_data, n_results=6) | |
if not initial_results: | |
raise ValueError("No relevant documents found.") | |
# Extract document IDs | |
document_ids = [doc_id for doc_id, _ in initial_results] | |
# Load document metadata (URL mappings) | |
file_path = 'finalcleaned_excel_file.xlsx' | |
df = pd.read_excel(file_path) | |
file_name_to_url = {f"article_{index}.html": url for index, url in enumerate(df['Unnamed: 0'])} | |
# Map file names to original URLs | |
resources = [] | |
for file_name in document_ids: | |
original_url = file_name_to_url.get(file_name, None) | |
if original_url: | |
title = get_page_title(original_url) or "Unknown Title" | |
resources.append({"file_name": file_name, "title": title, "url": original_url}) | |
else: | |
resources.append({"file_name": file_name, "title": "Unknown", "url": None}) | |
# Retrieve document texts | |
document_texts = retrieve_document_texts(document_ids, folder_path) | |
if not document_texts: | |
raise ValueError("Failed to retrieve document texts.") | |
# Perform re-ranking | |
cross_encoder = models['cross_encoder'] | |
scores = cross_encoder.predict([(query_text, doc) for doc in document_texts]) | |
scores = [float(score) for score in scores] # Convert to native Python float | |
# Combine scores with resources | |
for i, resource in enumerate(resources): | |
resource["score"] = scores[i] if i < len(scores) else 0.0 | |
# Sort resources by score | |
resources.sort(key=lambda x: x["score"], reverse=True) | |
# Limit response to top 5 resources | |
return {"resources": resources[:5], "success": True} | |
except ValueError as ve: | |
# Handle expected errors | |
raise HTTPException(status_code=400, detail=str(ve)) | |
except Exception as e: | |
# Handle unexpected errors | |
print(f"Unexpected error: {e}") | |
raise HTTPException(status_code=500, detail="An unexpected error occurred.") | |
async def recipes_endpoint(profile: MedicalProfile): | |
try: | |
# Build the query text for recipes | |
recipe_query = ( | |
f"Recipes foods and meals suitable for someone with: " | |
f"{profile.conditions} and experiencing {profile.daily_symptoms}" | |
) | |
query_text = recipe_query | |
print(f"Generated query text: {query_text}") | |
# Generate the query embedding | |
query_embedding = embed_query_text(query_text) | |
if query_embedding is None: | |
raise ValueError("Failed to generate query embedding.") | |
# Load embeddings and retrieve initial results | |
embeddings_data = load_recipes_embeddings() | |
folder_path = 'downloaded_articles/downloaded_articles' | |
initial_results = query_recipes_embeddings(query_embedding, embeddings_data, n_results=10) | |
if not initial_results: | |
raise ValueError("No relevant recipes found.") | |
print(initial_results) | |
# Extract document IDs | |
document_ids = [doc_id for doc_id, _ in initial_results] | |
print(document_ids) | |
# Retrieve document texts | |
document_texts = retrieve_rec_texts(document_ids, folder_path) | |
if not document_texts: | |
raise ValueError("Failed to retrieve document texts.") | |
print(document_texts) | |
# Load recipe metadata from DataFrame | |
folder_path='downloaded_articles/downloaded_articles' | |
file_path = 'recipes_metadata.xlsx' | |
metadata_path = 'recipes_metadata.xlsx' | |
metadata_df = pd.read_excel(file_path) | |
relevant_portions = extract_relevant_portions(document_texts, query_text, max_portions=3, portion_size=1, min_query_words=1) | |
print(relevant_portions) | |
flattened_relevant_portions = [] | |
for doc_id, portions in relevant_portions.items(): | |
flattened_relevant_portions.extend(portions) | |
unique_selected_parts = remove_duplicates(flattened_relevant_portions) | |
print(unique_selected_parts) | |
combined_parts = " ".join(unique_selected_parts) | |
print(combined_parts) | |
context = [query_text] + unique_selected_parts | |
print(context) | |
entities = extract_entities(query_text) | |
print(entities) | |
passage = enhance_passage_with_entities(combined_parts, entities) | |
print(passage) | |
prompt = create_prompt(query_text, passage) | |
print(prompt) | |
answer = generate_answer(prompt) | |
print(answer) | |
answer_part = answer.split("Answer:")[-1].strip() | |
print(answer_part) | |
cleaned_answer = remove_answer_prefix(answer_part) | |
print(cleaned_answer) | |
final_answer = remove_incomplete_sentence(cleaned_answer) | |
print(final_answer ) | |
if language_code == 0: | |
final_answer = translate_en_to_ar(final_answer) | |
if final_answer: | |
print("Answer:") | |
print(final_answer) | |
else: | |
print("Sorry, I can't help with that.") | |
return { | |
"response": final_answer, | |
} | |
except ValueError as ve: | |
# Handle expected errors | |
raise HTTPException(status_code=400, detail=str(ve)) | |
except Exception as e: | |
# Handle unexpected errors | |
print(f"Unexpected error: {e}") | |
raise HTTPException(status_code=500, detail="An unexpected error occurred.") | |
if not init_success: | |
print("Warning: Application initialized with partial functionality") | |
# For running locally | |
if __name__ == "__main__": | |
import uvicorn | |
uvicorn.run(app, host="0.0.0.0", port=7860) | |