thechaiexperiment commited on
Commit
51c53aa
·
1 Parent(s): 4a3efe8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +148 -87
app.py CHANGED
@@ -1,5 +1,11 @@
 
 
1
  import os
2
  import numpy as np
 
 
 
 
3
  from fastapi import FastAPI, HTTPException
4
  from fastapi.middleware.cors import CORSMiddleware
5
  from pydantic import BaseModel
@@ -8,17 +14,17 @@ from transformers import (
8
  AutoModelForSeq2SeqLM,
9
  AutoModelForTokenClassification,
10
  AutoModelForCausalLM,
11
- pipeline
 
 
12
  )
13
- from sentence_transformers import SentenceTransformer, CrossEncoder
14
  from sklearn.metrics.pairwise import cosine_similarity
15
  from bs4 import BeautifulSoup
16
- import nltk
17
- import torch
18
- import pandas as pd
19
  from huggingface_hub import hf_hub_download
20
- from safetensors.torch import load_file # Import Safetensors loader
21
  from typing import List, Dict, Optional
 
22
 
23
  # Initialize FastAPI app
24
  app = FastAPI()
@@ -77,6 +83,7 @@ def load_models():
77
  # Embedding models
78
  models['embedding'] = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
79
  models['cross_encoder'] = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2', max_length=512)
 
80
 
81
  # Translation models
82
  models['ar_to_en_tokenizer'] = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-ar-en")
@@ -84,6 +91,10 @@ def load_models():
84
  models['en_to_ar_tokenizer'] = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-ar")
85
  models['en_to_ar_model'] = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-ar")
86
 
 
 
 
 
87
  # NER model
88
  models['bio_tokenizer'] = AutoTokenizer.from_pretrained("blaze999/Medical-NER")
89
  models['bio_model'] = AutoModelForTokenClassification.from_pretrained("blaze999/Medical-NER")
@@ -100,41 +111,71 @@ def load_models():
100
  print(f"Error loading models: {e}")
101
  return False
102
 
103
-
104
  def load_embeddings() -> Optional[Dict[str, np.ndarray]]:
105
  """Load embeddings from Safetensors file"""
106
  try:
 
107
  embeddings_path = 'embeddings.safetensors'
108
  if not os.path.exists(embeddings_path):
 
109
  embeddings_path = hf_hub_download(
110
- repo_id=os.environ.get('thechaiexperiment/TeaRAG', ''),
111
  filename="embeddings.safetensors",
112
  repo_type="space"
113
  )
114
-
 
115
  embeddings = load_file(embeddings_path)
116
  if not isinstance(embeddings, dict):
117
- raise ValueError("Invalid format for embeddings in Safetensors file.")
 
 
 
 
 
 
 
 
 
 
118
 
119
- # Convert to dictionary with numpy arrays
120
- return {k: tensor.numpy() for k, tensor in embeddings.items()}
121
  except Exception as e:
122
  print(f"Error loading embeddings: {e}")
123
  return None
124
 
125
-
126
- def load_documents_data():
127
- """Load document data with error handling"""
128
  try:
129
  print("Loading documents data...")
130
- docs_path = 'finalcleaned_excel_file.xlsx'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
132
- if not os.path.exists(docs_path):
133
- print(f"Error: {docs_path} not found")
134
  return False
135
-
136
- data['df'] = pd.read_excel(docs_path)
137
- print(f"Successfully loaded {len(data['df'])} document records")
138
  return True
139
  except Exception as e:
140
  print(f"Error loading documents data: {e}")
@@ -174,14 +215,17 @@ def embed_query_text(query_text):
174
  query_embedding = embedding.encode([query_text])
175
  return query_embedding
176
 
177
- def query_embeddings(query_embedding, n_results=5):
178
- """Find relevant documents using embedding similarity"""
179
- if not data['embeddings']:
 
 
 
 
180
  return []
181
-
182
  try:
183
- doc_ids = list(data['embeddings'].keys())
184
- doc_embeddings = np.array(list(data['embeddings'].values()))
185
  similarities = cosine_similarity(query_embedding, doc_embeddings).flatten()
186
  top_indices = similarities.argsort()[-n_results:][::-1]
187
  return [(doc_ids[i], similarities[i]) for i in top_indices]
@@ -189,66 +233,85 @@ def query_embeddings(query_embedding, n_results=5):
189
  print(f"Error in query_embeddings: {e}")
190
  return []
191
 
192
- def retrieve_document_text(doc_id):
193
- """Retrieve document text from HTML file"""
194
- try:
195
- file_path = os.path.join('downloaded_articles', doc_id)
196
- if not os.path.exists(file_path):
197
- print(f"Warning: Document file not found: {file_path}")
198
- return ""
199
-
200
- with open(file_path, 'r', encoding='utf-8') as file:
201
- soup = BeautifulSoup(file, 'html.parser')
202
- return soup.get_text(separator=' ', strip=True)
203
- except Exception as e:
204
- print(f"Error retrieving document {doc_id}: {e}")
205
- return ""
206
-
207
-
208
- def rerank_documents(query, doc_texts):
209
- """Rerank documents using cross-encoder"""
 
 
 
 
210
  try:
211
- pairs = [(query, doc) for doc in doc_texts]
212
- scores = models['cross_encoder'].predict(pairs)
213
- return scores
 
 
 
 
 
 
 
 
 
 
214
  except Exception as e:
215
  print(f"Error reranking documents: {e}")
216
- return np.zeros(len(doc_texts))
217
 
218
- def extract_entities(text):
219
- """Extract medical entities from text using NER"""
220
  try:
221
- results = models['ner_pipeline'](text)
222
- return list({result['word'] for result in results if result['entity'].startswith("B-")})
 
 
 
 
 
 
223
  except Exception as e:
224
  print(f"Error extracting entities: {e}")
225
  return []
 
226
  def match_entities(query_entities, sentence_entities):
227
- query_set, sentence_set = set(query_entities), set(sentence_entities)
228
- matches = query_set.intersection(sentence_set)
229
- return len(matches)
 
 
 
 
230
 
231
  def extract_relevant_portions(document_texts, query, max_portions=3, portion_size=1, min_query_words=1):
232
  relevant_portions = {}
233
-
234
  # Extract entities from the query
235
  query_entities = extract_entities(query, ner_biobert)
236
  print(f"Extracted Query Entities: {query_entities}")
237
  for doc_id, doc_text in enumerate(document_texts):
238
  sentences = nltk.sent_tokenize(doc_text) # Split document into sentences
239
  doc_relevant_portions = []
240
-
241
  # Extract entities from the entire document
242
  doc_entities = extract_entities(doc_text, ner_biobert)
243
  print(f"Document {doc_id} Entities: {doc_entities}")
244
-
245
  for i, sentence in enumerate(sentences):
246
  # Extract entities from the sentence
247
  sentence_entities = extract_entities(sentence, ner_biobert)
248
-
249
  # Compute relevance score
250
  relevance_score = match_entities(query_entities, sentence_entities)
251
-
252
  # Select sentences with at least `min_query_words` matching entities
253
  if relevance_score >= min_query_words:
254
  start_idx = max(0, i - portion_size // 2)
@@ -257,40 +320,43 @@ def extract_relevant_portions(document_texts, query, max_portions=3, portion_siz
257
  doc_relevant_portions.append(portion)
258
  if len(doc_relevant_portions) >= max_portions:
259
  break
260
-
261
- # Add fallback to include the most entity-dense sentences if no results
262
  if not doc_relevant_portions and len(doc_entities) > 0:
263
  print(f"Fallback: Selecting sentences with most entities for Document {doc_id}")
264
  sorted_sentences = sorted(sentences, key=lambda s: len(extract_entities(s, ner_biobert)), reverse=True)
265
  for fallback_sentence in sorted_sentences[:max_portions]:
266
  doc_relevant_portions.append(fallback_sentence)
267
-
268
  relevant_portions[f"Document_{doc_id}"] = doc_relevant_portions
269
-
270
  return relevant_portions
271
-
272
 
273
  def remove_duplicates(selected_parts):
274
  unique_sentences = set()
275
  unique_selected_parts = []
276
-
277
  for sentence in selected_parts:
278
  if sentence not in unique_sentences:
279
  unique_selected_parts.append(sentence)
280
  unique_sentences.add(sentence)
281
-
282
  return unique_selected_parts
283
 
284
  def extract_entities(text):
285
- inputs = biobert_tokenizer(text, return_tensors="pt")
286
- outputs = biobert_model(**inputs)
287
- predictions = torch.argmax(outputs.logits, dim=2)
288
- tokens = biobert_tokenizer.convert_ids_to_tokens(inputs.input_ids[0])
289
- entities = [tokens[i] for i in range(len(tokens)) if predictions[0][i].item() != 0] # Assume 0 is the label for non-entity
290
- return entities
 
 
 
 
 
 
 
 
 
291
 
292
  def enhance_passage_with_entities(passage, entities):
293
- # Example: Add entities to the passage for better context
294
  return f"{passage}\n\nEntities: {', '.join(entities)}"
295
 
296
  def create_prompt(question, passage):
@@ -304,13 +370,12 @@ def create_prompt(question, passage):
304
  Answer:
305
  """)
306
  return prompt.format(passage=passage, question=question)
307
-
308
  def generate_answer(prompt, max_length=860, temperature=0.2):
309
  inputs = tokenizer_f(prompt, return_tensors="pt", truncation=True)
310
-
311
  # Start timing
312
  start_time = time.time()
313
-
314
  output_ids = model_f.generate(
315
  inputs.input_ids,
316
  max_length=max_length,
@@ -318,28 +383,25 @@ def generate_answer(prompt, max_length=860, temperature=0.2):
318
  temperature=temperature,
319
  pad_token_id=tokenizer_f.eos_token_id
320
  )
321
-
322
  # End timing
323
  end_time = time.time()
324
-
325
  # Calculate the duration
326
  duration = end_time - start_time
327
-
328
  # Decode the answer
329
  answer = tokenizer_f.decode(output_ids[0], skip_special_tokens=True)
330
-
331
- passage_keywords = set(passage.lower().split())
332
  answer_keywords = set(answer.lower().split())
333
-
334
  if passage_keywords.intersection(answer_keywords):
335
  return answer, duration
336
  else:
337
  return "Sorry, I can't help with that.", duration
338
-
339
  def remove_answer_prefix(text):
340
  prefix = "Answer:"
341
  if prefix in text:
342
- return text.split(prefix)[-1].strip()
343
  return text
344
 
345
  def remove_incomplete_sentence(text):
@@ -356,7 +418,6 @@ def remove_incomplete_sentence(text):
356
  async def root():
357
  return {"message": "Welcome to the FastAPI application! Use the /health endpoint to check health, and /api/query for processing queries."}
358
 
359
-
360
  @app.get("/health")
361
  async def health_check():
362
  """Health check endpoint"""
 
1
+ import transformers
2
+ import pickle
3
  import os
4
  import numpy as np
5
+ import torchvision
6
+ import nltk
7
+ import torch
8
+ import pandas as pd
9
  from fastapi import FastAPI, HTTPException
10
  from fastapi.middleware.cors import CORSMiddleware
11
  from pydantic import BaseModel
 
14
  AutoModelForSeq2SeqLM,
15
  AutoModelForTokenClassification,
16
  AutoModelForCausalLM,
17
+ pipeline,
18
+ Qwen2Tokenizer,
19
+ BartForConditionalGeneration
20
  )
21
+ from sentence_transformers import SentenceTransformer, CrossEncoder, util
22
  from sklearn.metrics.pairwise import cosine_similarity
23
  from bs4 import BeautifulSoup
 
 
 
24
  from huggingface_hub import hf_hub_download
25
+ from safetensors.torch import load_file
26
  from typing import List, Dict, Optional
27
+ from safetensors.numpy import load_file
28
 
29
  # Initialize FastAPI app
30
  app = FastAPI()
 
83
  # Embedding models
84
  models['embedding'] = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
85
  models['cross_encoder'] = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2', max_length=512)
86
+ models['semantic_model'] = SentenceTransformer('all-MiniLM-L6-v2')
87
 
88
  # Translation models
89
  models['ar_to_en_tokenizer'] = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-ar-en")
 
91
  models['en_to_ar_tokenizer'] = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-ar")
92
  models['en_to_ar_model'] = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-ar")
93
 
94
+ #Attention model
95
+ models['att_tokenizer'] = AutoTokenizer.from_pretrained("facebook/bart-base")
96
+ models['att_model'] = BartForConditionalGeneration.from_pretrained("facebook/bart-base")
97
+
98
  # NER model
99
  models['bio_tokenizer'] = AutoTokenizer.from_pretrained("blaze999/Medical-NER")
100
  models['bio_model'] = AutoModelForTokenClassification.from_pretrained("blaze999/Medical-NER")
 
111
  print(f"Error loading models: {e}")
112
  return False
113
 
 
114
  def load_embeddings() -> Optional[Dict[str, np.ndarray]]:
115
  """Load embeddings from Safetensors file"""
116
  try:
117
+ # Locate or download embeddings file
118
  embeddings_path = 'embeddings.safetensors'
119
  if not os.path.exists(embeddings_path):
120
+ print("File not found locally. Attempting to download from Hugging Face Hub...")
121
  embeddings_path = hf_hub_download(
122
+ repo_id=os.environ.get('HF_SPACE_ID', 'thechaiexperiment/TeaRAG'),
123
  filename="embeddings.safetensors",
124
  repo_type="space"
125
  )
126
+
127
+ # Load Safetensors file
128
  embeddings = load_file(embeddings_path)
129
  if not isinstance(embeddings, dict):
130
+ raise ValueError("Expected a dictionary in the Safetensors file.")
131
+
132
+ # Validate and convert tensors to numpy arrays
133
+ result = {}
134
+ for key, tensor in embeddings.items():
135
+ if not hasattr(tensor, 'numpy'):
136
+ raise TypeError(f"Value for key {key} is not a tensor or cannot be converted to numpy.")
137
+ result[key] = tensor.numpy()
138
+
139
+ print("Embeddings successfully loaded.")
140
+ return result
141
 
 
 
142
  except Exception as e:
143
  print(f"Error loading embeddings: {e}")
144
  return None
145
 
146
+ def load_documents_data(folder_path='downloaded_articles/downloaded_articles'):
147
+ """Load document data from HTML articles in a specified folder."""
 
148
  try:
149
  print("Loading documents data...")
150
+ # Check if the folder exists
151
+ if not os.path.exists(folder_path) or not os.path.isdir(folder_path):
152
+ print(f"Error: Folder '{folder_path}' not found")
153
+ return False
154
+ # List all HTML files in the folder
155
+ html_files = [f for f in os.listdir(folder_path) if f.endswith('.html')]
156
+ if not html_files:
157
+ print(f"No HTML files found in folder '{folder_path}'")
158
+ return False
159
+ documents = []
160
+ # Iterate through each HTML file and parse the content
161
+ for file_name in html_files:
162
+ file_path = os.path.join(folder_path, file_name)
163
+ try:
164
+ with open(file_path, 'r', encoding='utf-8') as file:
165
+ # Parse the HTML file
166
+ soup = BeautifulSoup(file, 'html.parser')
167
+ # Extract text content (or customize this as per your needs)
168
+ text = soup.get_text(separator='\n').strip()
169
+ documents.append({"file_name": file_name, "content": text})
170
+ except Exception as e:
171
+ print(f"Error reading file {file_name}: {e}")
172
+ # Convert the list of documents to a DataFrame
173
+ data['df'] = pd.DataFrame(documents)
174
 
175
+ if data['df'].empty:
176
+ print("No valid documents loaded.")
177
  return False
178
+ print(f"Successfully loaded {len(data['df'])} document records.")
 
 
179
  return True
180
  except Exception as e:
181
  print(f"Error loading documents data: {e}")
 
215
  query_embedding = embedding.encode([query_text])
216
  return query_embedding
217
 
218
+ from sklearn.metrics.pairwise import cosine_similarity
219
+ import numpy as np
220
+
221
+ def query_embeddings(query_embedding, embeddings_data=None, n_results=5):
222
+ embeddings_data = embeddings_data or data.get('embeddings', {})
223
+ if not embeddings_data:
224
+ print("No embeddings data available.")
225
  return []
 
226
  try:
227
+ doc_ids = list(embeddings_data.keys())
228
+ doc_embeddings = np.array(list(embeddings_data.values()))
229
  similarities = cosine_similarity(query_embedding, doc_embeddings).flatten()
230
  top_indices = similarities.argsort()[-n_results:][::-1]
231
  return [(doc_ids[i], similarities[i]) for i in top_indices]
 
233
  print(f"Error in query_embeddings: {e}")
234
  return []
235
 
236
+ def retrieve_document_texts(doc_ids, folder_path='downloaded_articles/downloaded_articles'):
237
+ texts = []
238
+ for doc_id in doc_ids:
239
+ file_path = os.path.join(folder_path, doc_id)
240
+ try:
241
+ # Check if the file exists
242
+ if not os.path.exists(file_path):
243
+ print(f"Warning: Document file not found: {file_path}")
244
+ texts.append("")
245
+ continue
246
+ # Read and parse the HTML file
247
+ with open(file_path, 'r', encoding='utf-8') as file:
248
+ soup = BeautifulSoup(file, 'html.parser')
249
+ text = soup.get_text(separator=' ', strip=True)
250
+ texts.append(text)
251
+ except Exception as e:
252
+ print(f"Error retrieving document {doc_id}: {e}")
253
+ texts.append("")
254
+ return texts
255
+
256
+
257
+ def rerank_documents(query, document_ids, document_texts, cross_encoder_model):
258
  try:
259
+ # Prepare pairs for the cross-encoder
260
+ pairs = [(query, doc) for doc in document_texts]
261
+ # Get scores using the cross-encoder model
262
+ scores = cross_encoder_model.predict(pairs)
263
+ # Combine scores with document IDs and texts
264
+ scored_documents = list(zip(scores, document_ids, document_texts))
265
+ # Sort by scores in descending order
266
+ scored_documents.sort(key=lambda x: x[0], reverse=True)
267
+ # Print reranked results
268
+ print("Reranked results:")
269
+ for idx, (score, doc_id, doc) in enumerate(scored_documents):
270
+ print(f"Rank {idx + 1} (Score: {score:.4f}, Document ID: {doc_id})")
271
+ return scored_documents
272
  except Exception as e:
273
  print(f"Error reranking documents: {e}")
274
+ return []
275
 
276
+ def extract_entities(text, ner_pipeline=None):
 
277
  try:
278
+ # Use the provided pipeline or default to the model dictionary
279
+ if ner_pipeline is None:
280
+ ner_pipeline = models['ner_pipeline']
281
+ # Perform NER using the pipeline
282
+ ner_results = ner_pipeline(text)
283
+ # Extract unique entities that start with "B-"
284
+ entities = {result['word'] for result in ner_results if result['entity'].startswith("B-")}
285
+ return list(entities)
286
  except Exception as e:
287
  print(f"Error extracting entities: {e}")
288
  return []
289
+
290
  def match_entities(query_entities, sentence_entities):
291
+ try:
292
+ query_set, sentence_set = set(query_entities), set(sentence_entities)
293
+ matches = query_set.intersection(sentence_set)
294
+ return len(matches)
295
+ except Exception as e:
296
+ print(f"Error matching entities: {e}")
297
+ return 0
298
 
299
  def extract_relevant_portions(document_texts, query, max_portions=3, portion_size=1, min_query_words=1):
300
  relevant_portions = {}
 
301
  # Extract entities from the query
302
  query_entities = extract_entities(query, ner_biobert)
303
  print(f"Extracted Query Entities: {query_entities}")
304
  for doc_id, doc_text in enumerate(document_texts):
305
  sentences = nltk.sent_tokenize(doc_text) # Split document into sentences
306
  doc_relevant_portions = []
 
307
  # Extract entities from the entire document
308
  doc_entities = extract_entities(doc_text, ner_biobert)
309
  print(f"Document {doc_id} Entities: {doc_entities}")
 
310
  for i, sentence in enumerate(sentences):
311
  # Extract entities from the sentence
312
  sentence_entities = extract_entities(sentence, ner_biobert)
 
313
  # Compute relevance score
314
  relevance_score = match_entities(query_entities, sentence_entities)
 
315
  # Select sentences with at least `min_query_words` matching entities
316
  if relevance_score >= min_query_words:
317
  start_idx = max(0, i - portion_size // 2)
 
320
  doc_relevant_portions.append(portion)
321
  if len(doc_relevant_portions) >= max_portions:
322
  break
323
+ # Fallback: Include most entity-dense sentences if no relevant portions were found
 
324
  if not doc_relevant_portions and len(doc_entities) > 0:
325
  print(f"Fallback: Selecting sentences with most entities for Document {doc_id}")
326
  sorted_sentences = sorted(sentences, key=lambda s: len(extract_entities(s, ner_biobert)), reverse=True)
327
  for fallback_sentence in sorted_sentences[:max_portions]:
328
  doc_relevant_portions.append(fallback_sentence)
329
+ # Add the extracted portions to the result dictionary
330
  relevant_portions[f"Document_{doc_id}"] = doc_relevant_portions
 
331
  return relevant_portions
 
332
 
333
  def remove_duplicates(selected_parts):
334
  unique_sentences = set()
335
  unique_selected_parts = []
 
336
  for sentence in selected_parts:
337
  if sentence not in unique_sentences:
338
  unique_selected_parts.append(sentence)
339
  unique_sentences.add(sentence)
 
340
  return unique_selected_parts
341
 
342
  def extract_entities(text):
343
+ try:
344
+ inputs = biobert_tokenizer(text, return_tensors="pt")
345
+ outputs = biobert_model(**inputs)
346
+ predictions = torch.argmax(outputs.logits, dim=2)
347
+
348
+ tokens = biobert_tokenizer.convert_ids_to_tokens(inputs.input_ids[0])
349
+ entities = [
350
+ tokens[i]
351
+ for i in range(len(tokens))
352
+ if predictions[0][i].item() != 0 # Assuming 0 is the label for non-entity
353
+ ]
354
+ return entities
355
+ except Exception as e:
356
+ print(f"Error extracting entities: {e}")
357
+ return []
358
 
359
  def enhance_passage_with_entities(passage, entities):
 
360
  return f"{passage}\n\nEntities: {', '.join(entities)}"
361
 
362
  def create_prompt(question, passage):
 
370
  Answer:
371
  """)
372
  return prompt.format(passage=passage, question=question)
373
+
374
  def generate_answer(prompt, max_length=860, temperature=0.2):
375
  inputs = tokenizer_f(prompt, return_tensors="pt", truncation=True)
 
376
  # Start timing
377
  start_time = time.time()
378
+ # Generate the output
379
  output_ids = model_f.generate(
380
  inputs.input_ids,
381
  max_length=max_length,
 
383
  temperature=temperature,
384
  pad_token_id=tokenizer_f.eos_token_id
385
  )
 
386
  # End timing
387
  end_time = time.time()
 
388
  # Calculate the duration
389
  duration = end_time - start_time
 
390
  # Decode the answer
391
  answer = tokenizer_f.decode(output_ids[0], skip_special_tokens=True)
392
+ # Extract keywords from the passage and answer
393
+ passage_keywords = set(prompt.lower().split()) # Adjusted to check keywords in the full prompt
394
  answer_keywords = set(answer.lower().split())
395
+ # Verify if the answer aligns with the passage
396
  if passage_keywords.intersection(answer_keywords):
397
  return answer, duration
398
  else:
399
  return "Sorry, I can't help with that.", duration
400
+
401
  def remove_answer_prefix(text):
402
  prefix = "Answer:"
403
  if prefix in text:
404
+ return text.split(prefix, 1)[-1].strip() # Split only once to avoid splitting at other occurrences of "Answer:"
405
  return text
406
 
407
  def remove_incomplete_sentence(text):
 
418
  async def root():
419
  return {"message": "Welcome to the FastAPI application! Use the /health endpoint to check health, and /api/query for processing queries."}
420
 
 
421
  @app.get("/health")
422
  async def health_check():
423
  """Health check endpoint"""