File size: 3,037 Bytes
e44062d
 
71dcf5a
e44062d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cb75685
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import pandas as pd
path_to_data = "./docStore/"
from appStore.prep_utils import create_chunks

def process_iati():
    """
    this will read the iati files and create the chunks
    """
    orgas_df = pd.read_csv(f"{path_to_data}iati_files/project_orgas.csv")
    region_df = pd.read_csv(f"{path_to_data}iati_files/project_region.csv")
    sector_df = pd.read_csv(f"{path_to_data}iati_files/project_sector.csv")
    status_df = pd.read_csv(f"{path_to_data}iati_files/project_status.csv")
    texts_df = pd.read_csv(f"{path_to_data}iati_files/project_texts.csv")

    projects_df = pd.merge(orgas_df, region_df, on='iati_id', how='inner')
    projects_df = pd.merge(projects_df, sector_df, on='iati_id', how='inner')
    projects_df = pd.merge(projects_df, status_df, on='iati_id', how='inner')
    projects_df = pd.merge(projects_df, texts_df, on='iati_id', how='inner')
    projects_df = projects_df[projects_df.client.str.contains('bmz')].reset_index(drop=True)

    projects_df.drop(columns= ['orga_abbreviation', 'client',
       'orga_full_name', 'country', 
       'country_flag', 'crs_5_code', 'crs_3_code','country_code_list',
       'sgd_pred_code','crs_5_name', 'crs_3_name', 'sgd_pred_str'], inplace=True)
    #print(projects_df.columns)
    projects_df['text_size'] = projects_df.apply(lambda x: len((x['title_main'] + x['description_main']).split()), axis=1)
    projects_df['chunks'] = projects_df.apply(lambda x:create_chunks(x['title_main'] + x['description_main']),axis=1)
    projects_df = projects_df.explode(column=['chunks'], ignore_index=True)
    projects_df['source'] = 'IATI'
    projects_df.rename(columns = {'iati_id':'id','iati_orga_id':'org'}, inplace=True)

    return projects_df

def process_giz_worldwide():
    """
    this will read the giz_worldwide files and create the chunks
    """
    giz_df = pd.read_json(f'{path_to_data}giz_worldwide/data_giz_website.json')
    giz_df = giz_df.rename(columns={'content':'project_description'})
    giz_df['text_size'] = giz_df.apply(lambda x: len((x['project_name'] + x['project_description']).split()), axis=1)
    giz_df['chunks'] = giz_df.apply(lambda x:create_chunks(x['project_name'] + x['project_description']),axis=1)
    print("initial df length:",len(giz_df))
    giz_df = giz_df.explode(column=['chunks'], ignore_index=True)
    print("new df length:",len(giz_df))
    print(giz_df.columns)
    #giz_df.drop(columns = ['filename', 'url', 'name', 'mail', 
    #                    'language', 'start_year', 'end_year','poli_trager'], inplace=True)
    giz_df['source'] = 'GIZ_WORLDWIDE'
    return giz_df

def remove_duplicates(results_list):
    """
    Return a new list of results with duplicates removed, 
    based on 'url' in metadata.
    """
    unique_results = []
    seen_urls = set()

    for r in results_list:
        # Safely get the URL from metadata
        url = r.payload['metadata'].get('url', None)
        if url not in seen_urls:
            seen_urls.add(url)
            unique_results.append(r)

    return unique_results