Spaces:
Sleeping
Sleeping
Commit
·
293d81f
1
Parent(s):
8f457e6
Update app.py
Browse files
app.py
CHANGED
@@ -315,13 +315,13 @@ def match_entities(query_entities, sentence_entities):
|
|
315 |
def extract_relevant_portions(document_texts, query, max_portions=3, portion_size=1, min_query_words=1):
|
316 |
relevant_portions = {}
|
317 |
# Extract entities from the query
|
318 |
-
#ner_biobert = models['ner_pipeline']
|
319 |
query_entities = extract_entities(query)
|
320 |
print(f"Extracted Query Entities: {query_entities}")
|
321 |
for doc_id, doc_text in enumerate(document_texts):
|
322 |
sentences = nltk.sent_tokenize(doc_text) # Split document into sentences
|
323 |
doc_relevant_portions = []
|
324 |
# Extract entities from the entire document
|
|
|
325 |
doc_entities = extract_entities(doc_text, ner_biobert)
|
326 |
print(f"Document {doc_id} Entities: {doc_entities}")
|
327 |
for i, sentence in enumerate(sentences):
|
|
|
315 |
def extract_relevant_portions(document_texts, query, max_portions=3, portion_size=1, min_query_words=1):
|
316 |
relevant_portions = {}
|
317 |
# Extract entities from the query
|
|
|
318 |
query_entities = extract_entities(query)
|
319 |
print(f"Extracted Query Entities: {query_entities}")
|
320 |
for doc_id, doc_text in enumerate(document_texts):
|
321 |
sentences = nltk.sent_tokenize(doc_text) # Split document into sentences
|
322 |
doc_relevant_portions = []
|
323 |
# Extract entities from the entire document
|
324 |
+
ner_biobert = models['ner_pipeline']
|
325 |
doc_entities = extract_entities(doc_text, ner_biobert)
|
326 |
print(f"Document {doc_id} Entities: {doc_entities}")
|
327 |
for i, sentence in enumerate(sentences):
|