File size: 3,240 Bytes
19d69d5
1104bf8
3201a95
a496016
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3201a95
 
 
 
 
 
a496016
1104bf8
 
a496016
1104bf8
 
a496016
 
1104bf8
 
 
 
 
 
1de34ec
 
 
 
 
 
 
1104bf8
 
 
 
 
 
 
 
1de34ec
1104bf8
 
 
 
1de34ec
1104bf8
 
 
 
1de34ec
1104bf8
 
 
 
 
 
a496016
1104bf8
 
 
1de34ec
1104bf8
 
a496016
1104bf8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
from minicheck_web.minicheck import MiniCheck
from web_retrieval import *


def sort_chunks_single_doc_claim(used_chunk, support_prob_per_chunk):
    '''
    Sort the chunks in a single document based on the probability of "supported" in descending order.
    This function is used when a user document is provided.
    '''

    flattened_docs = [doc for chunk in used_chunk for doc in chunk]
    flattened_scores = [score for chunk in support_prob_per_chunk for score in chunk]

    doc_score = list(zip(flattened_docs, flattened_scores))
    ranked_doc_score = sorted(doc_score, key=lambda x: x[1], reverse=True)

    ranked_docs, scores = zip(*ranked_doc_score)

    return ranked_docs, scores
            

class EndpointHandler():
    def __init__(self, path="./"):
        self.scorer = MiniCheck(path=path)

    def __call__(self, data):

        # Using user-provided document to do fact-checking
        if len(data['inputs']['docs']) == 1 and data['inputs']['docs'][0] != '':
            _, _, used_chunk, support_prob_per_chunk = self.scorer.score(data=data)
            ranked_docs, scores = sort_chunks_single_doc_claim(used_chunk, support_prob_per_chunk)

            outputs = {
            'ranked_docs': ranked_docs,
            'scores': scores
            }
            
        else:
            assert len(data['inputs']['claims']) == 1, "Only one claim is allowed for web retrieval for the current version."

            claim = data['inputs']['claims'][0]
            ranked_docs, scores, ranked_urls = self.search_relevant_docs(claim)

            outputs = {
                'ranked_docs': ranked_docs,
                'scores': scores,
                'ranked_urls': ranked_urls
            }
            
        return outputs
    
    
    def search_relevant_docs(self, claim, timeout=10, max_search_results_per_query=5, allow_duplicated_urls=False):

        search_results = search_google(claim, timeout=timeout)

        print('Searching webpages...')
        start = time()
        with concurrent.futures.ThreadPoolExecutor() as e:
            scraped_results = e.map(scrape_url, search_results, itertools.repeat(timeout))
        end = time()
        print(f"Finished searching in {round((end - start), 1)} seconds.\n")
        scraped_results = [(r[0][:50000], r[1]) for r in scraped_results if r[0] and '��' not in r[0] and ".pdf" not in r[1]]

        retrieved_docs, urls = zip(*scraped_results[:max_search_results_per_query])

        print('Scoring webpages...')
        start = time()
        retrieved_data = {
            'inputs': {
                'docs': list(retrieved_docs),
                'claims': [claim]*len(retrieved_docs)
            }
        }
        _, _, used_chunk, support_prob_per_chunk = self.scorer.score(data=retrieved_data)
        end = time()
        num_chunks = len([item for items in used_chunk for item in items])
        print(f'Finished {num_chunks} entailment checks in {round((end - start), 1)} seconds ({round(num_chunks / (end - start) * 60)} Doc./min).')

        ranked_docs, scores, ranked_urls = order_doc_score_url(used_chunk, support_prob_per_chunk, urls, allow_duplicated_urls=allow_duplicated_urls)

        return ranked_docs, scores, ranked_urls