File size: 21,751 Bytes
47756f1
 
 
 
 
 
 
 
 
 
83a24ec
47756f1
 
 
 
 
 
 
83a24ec
b125eed
b0ab312
 
 
 
 
 
 
 
 
 
47756f1
dbd62d7
1968c31
dbd62d7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47756f1
 
1968c31
47756f1
 
 
 
84050ab
47756f1
 
 
aeb4fb8
2f12850
aeb4fb8
47756f1
 
eb83e3d
47756f1
0a2b1df
47756f1
 
84050ab
f1aec70
3f5271b
f1aec70
 
329d6cf
8f420e0
55c1b89
b0ab312
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55c1b89
b125eed
 
55c1b89
329d6cf
e17479b
b125eed
 
 
 
87597d0
 
 
 
b125eed
 
 
87597d0
 
b125eed
b0ab312
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fa42253
b0ab312
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b125eed
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
# set path
import glob, os, sys; 
sys.path.append('../utils')

#import needed libraries
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import streamlit as st
from utils.target_classifier import load_targetClassifier, target_classification
import logging
logger = logging.getLogger(__name__)
from utils.config import get_classifier_params
from utils.preprocessing import paraLengthCheck
from io import BytesIO
import xlsxwriter
import plotly.express as px
from utils.target_classifier import label_dict
from appStore.rag import run_query
from math import exp
import re
import json

import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')

import openai
openai_api_key = os.environ["OPEN_AI_KEY"]

# Declare all the necessary variables
classifier_identifier = 'target'
params  = get_classifier_params(classifier_identifier)

@st.cache_data
def to_excel(df,sectorlist):
    len_df = len(df)
    output = BytesIO()
    writer = pd.ExcelWriter(output, engine='xlsxwriter')
    df.to_excel(writer, index=False, sheet_name='Sheet1')
    workbook = writer.book
    worksheet = writer.sheets['Sheet1']
    worksheet.data_validation('S2:S{}'.format(len_df), 
                              {'validate': 'list', 
                               'source': ['No', 'Yes', 'Discard']})
    worksheet.data_validation('X2:X{}'.format(len_df), 
                              {'validate': 'list', 
                               'source': sectorlist + ['Blank']})
    worksheet.data_validation('T2:T{}'.format(len_df), 
                              {'validate': 'list', 
                               'source': sectorlist + ['Blank']})
    worksheet.data_validation('U2:U{}'.format(len_df), 
                              {'validate': 'list', 
                               'source': sectorlist + ['Blank']})                               
    worksheet.data_validation('V2:V{}'.format(len_df), 
                              {'validate': 'list', 
                               'source': sectorlist + ['Blank']})
    worksheet.data_validation('W2:U{}'.format(len_df), 
                              {'validate': 'list', 
                               'source': sectorlist + ['Blank']})                            
    writer.save()
    processed_data = output.getvalue()
    return processed_data

def app():
    
    ### Main app code ###
    with st.container():
        
        if 'key1' in st.session_state:
           
            # Load the existing dataset
            df = st.session_state.key1

            # Filter out all paragraphs that do not have a reference to groups 
            df = df[df['Vulnerability Label'].apply(lambda x: len(x) > 0 and 'Other' not in x)]

            # Load the classifier model
            classifier = load_targetClassifier(classifier_name=params['model_name'])
         
            st.session_state['{}_classifier'.format(classifier_identifier)] = classifier
                
            df = target_classification(haystack_doc=df,
                                        threshold= params['threshold'])

            # Rename column 
            df.rename(columns={'Target Label': 'Specific action/target/measure mentioned'}, inplace=True)


            st.session_state.key2 = df



vc_prompt="""
    You are assessing the accuracy of a multi-label classifier. The classifier seeks to assess the relevance of a given passage of context to any of 18 classes:

    'Agricultural communities',
    'Children',
    'Coastal communities',
    'Ethnic, racial or other minorities',
    'Fishery communities',
    'Informal sector workers',
    'Members of indigenous and local communities',
    'Migrants and displaced persons',
    'Older persons',
    'Other',
    'Persons living in poverty',
    'Persons with disabilities',
    'Persons with pre-existing health conditions',
    'Residents of drought-prone regions',
    'Rural populations',
    'Sexual minorities (LGBTQI+)',
    'Urban populations',
    'Women and other genders'

    If there is a semantic relevance or keyword(s) match between labels and context, then assess accuracy as a boolean True.
    Assessing class relevance may be tricky in some cases as the context can use technical language which is sometimes ambiguous. Please take your time to ensure a robust assessment.
    If you can't decide, err on the side of the classifier, and assume it is correct. 

    Use the examples below for reference:

    EXAMPLE 1

    LABEL: ['Agricultural communities', 'Residents of drought-prone regions']

    CONTEXT: "Future climatic predictions for Kenya indicate possible temperature increase of 1C by 2020 and 2.3C by 2050. These changes unless effectively mitigated, will likely result to erosion of the productive assets and the weakening of coping strategies and resilience of rain-fed farming systems, especially in the arid and semi-arid lands."
    
    RESPONSE: True

    EXAMPLE 2

    LABEL: ['Fishery communities']

    CONTEXT: "The reduced water availability resulting from frequent droughts also limits aquaculture development. Forests and agroforestry The farmed fisheries resources include the trout fish in cold water high altitude areas and tilapia, catfish, common carp for warmer water low altitude areas. Figure 5 shows the quantities and monetary value of fish produced in Kenya between 2005 and 2016."

    RESPONSE: True

    EXAMPLE 3

    LABEL: ['Persons with disabilities']

    CONTEXT: "In addressing climate change issues, public entities are required to undertake public awareness and consultations, and ensure gender mainstreaming, in line with the Constitution and the Climate Change Bill (2014).  5. Means of implementation Kenya's contribution will be implemented with both domestic and international support."

    RESPONSE: False

    EXAMPLE 4

    LABEL: ['Children', 'Women and other genders']

    CONTEXT: "Enhance quality control and food safety by relevant institutions along crop, livestock and fisheries value chains. Enhance use of low greenhouse gas emitting fish production technologies and practices. Promote integrated farming systems comprising crops, livestock, aquaculture and farm forestry. Create awareness and capacity build women, youth and venerable groups (WY&VG) on CSA."

    RESPONSE: True

    EXAMPLE 5

    LABEL: ['Ethnic, racial or other minorities']

    CONTEXT: "Harmonize livestock vaccinations across the bordering counties and across the international borders. Facilitate management of veterinary drug residues, carcasses and agrochemicals. Promote efficient use of farm mechanization. Promote mechanized and animal powered conservation tillage practices as compared to conventional tillage. Promote value addition of farm produce through cottage industries."

    RESPONSE: False

    EXAMPLE 6

    LABEL: ['Agricultural communities']

    CONTEXT: "There is also no traceability mechanism for produce and products from farm to folk. Value addition will ensure longer shelf life, reduced transaction costs and higher incomes. Summary of Actions: Identify and promote existing value addition technologies. Incentivize the private sector to invest in agricultural value addition."

    RESPONSE: False

    EXAMPLE 7

    LABEL: ['Agricultural communities', 'Rural populations']

    CONTEXT: "Kenya's total greenhouse gas (GHG) emissions are relatively low, standing at 73 MtCO2eq in 2010, out of which 75%/ are from the land use, land-use change and forestry (LULUCF) and agriculture sectors. This may be explained by the reliance on wood fuel by a large proportion of the population coupled with the increasing demand for agricultural land and urban development."

    RESPONSE: True

    Return the assessment as a boolean True or False.
    Return only the boolean, and nothing else.

    Now assess the following sample:


    """

tma_prompt="""
    You are assessing the accuracy of a binary ('YES'/'NO') classifier. The classifier classifies a given passage of text as to whether it contains reference to a target, measure, action, and plans
    in the context of the United Nations Framework Convention on Climate Change (UNFCCC) and the Paris Agreement.
    The text is extracted from Nationally Determined Contributions (NDCs) documents.
    The concepts of targets, measures, actions, and plans are defined below:

    1. Targets

        •   Definition: Targets in the NDCs refer to the specific, quantified objectives that each country sets for itself to reduce greenhouse gas (GHG) emissions and mitigate climate change. These targets reflect the level of ambition a country is willing to commit to in its climate action.
        •   Example in NDCs: A common form of a target is a percentage reduction in GHG emissions by a certain year, such as “reducing emissions by 50% by 2030 compared to 1990 levels.” Targets can also be sector-specific, such as setting renewable energy capacity goals.

    2. Measures

        •   Definition: Measures are the policies, regulations, and actions that are implemented to achieve the targets set in the NDCs. These are the instruments through which a country can ensure that it is on the right path to meet its climate goals.
        •   Example in NDCs: Measures can include implementing a carbon tax, introducing renewable energy incentives, or regulations to improve energy efficiency in buildings or transportation sectors. These could also involve reforestation or land-use changes to enhance carbon sinks.

    3. Actions

        •   Definition: Actions refer to the specific activities, projects, or steps that are undertaken to implement the measures and meet the set targets. Actions are the tangible efforts that contribute to reducing emissions or adapting to climate change impacts.
        •   Example in NDCs: Actions might include building solar or wind power plants, electrifying transportation systems, or retrofitting existing infrastructure to make it more energy efficient. Actions are often the ground-level, operational steps that translate plans into reality.

    
    If you agree with the classifier, then assess accuracy as a boolean True.
    Note - assessing targets, measures and actions may be tricky in some cases as the text can use technical language which is sometimes ambiguous. 
    Please take your time to ensure a robust assessment.
    If you can't decide, err on the side of the classifier, and assume it is correct. 

    EXAMPLE 1:

    LABEL: 'YES'

    CONTEXT: "This will lead to more climate related vulnerabilities thereby predisposing farming communities to food insecurity and more poverty. In response to this scenario, the Government has been exploring innovative and transformative measures to assist stakeholders across the agricultural value chains to manage the effects of current and projected change of climate patterns."

    RESPONSE: True

    EXAMPLE 2:

    LABEL: 'NO'

    CONTEXT: "Kenya's total greenhouse gas (GHG) emissions are relatively low, standing at 73 MtCO2eq in 2010, out of which 75%/ are from the land use, land-use change and forestry (LULUCF) and agriculture sectors. This may be explained by the reliance on wood fuel by a large proportion of the population coupled with the increasing demand for agricultural land and urban development."

    RESPONSE: True

    EXAMPLE 3:

    LABEL: 'YES'

    CONTEXT: "1.1 National Circumstances Kenya is located in the Greater Horn of Africa region, which is highly vulnerable to the impacts of climate change. More than 80% of the country’s landmass is arid and semi-arid land (ASAL) with poor infrastructure, and other developmental challenges."

    RESPONSE: True

    Return the assessment as a boolean True or False.
    Return only the boolean, and nothing else.

    Now assess the following sample:

    """



def send_to_chatgpt_api(context, label, prompt, openai_api_key, logprobs_flag=None, logprobs_n=None):
    
    # Combine the result object and context with the new prompt
    combined_message = f"""
    {prompt}
    LABEL: {label}
    CONTEXT: {context}
    RESPONSE:
    """

    # Set up the OpenAI API
    openai.api_key = openai_api_key


    # Send the combined message to the ChatGPT API
    response = openai.ChatCompletion.create(
        # model="gpt-4o-mini",
        model="gpt-4o-mini-2024-07-18",
        # model="gpt-4o-2024-08-06",
        messages=[
            {"role": "system", "content": "You are ChatGPT."},
            {"role": "user", "content": combined_message}
        ],
        logprobs=logprobs_flag,  # whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message..
        top_logprobs=logprobs_n,
    )
    
    # Get the response from the API
    if logprobs_flag:
        gpt_response = response.choices[0].logprobs.content[0].top_logprobs[0]

    else:
        gpt_response = response.choices[0].message['content']
    
    return gpt_response


# Fuzzy matching
def levenshtein_distance(a, b):
    # Initialize the matrix
    dp = [[0 for _ in range(len(b) + 1)] for _ in range(len(a) + 1)]

    # Base cases
    for i in range(len(a) + 1):
        dp[i][0] = i
    for j in range(len(b) + 1):
        dp[0][j] = j

    # Fill the matrix
    for i in range(1, len(a) + 1):
        for j in range(1, len(b) + 1):
            if a[i - 1] == b[j - 1]:
                cost = 0
            else:
                cost = 1
            dp[i][j] = min(dp[i - 1][j] + 1,      # Deletion
                           dp[i][j - 1] + 1,      # Insertion
                           dp[i - 1][j - 1] + cost)  # Substitution

    # Return the Levenshtein distance
    return dp[-1][-1]

def similarity_score(a, b):
    max_len = max(len(a), len(b))
    if max_len == 0:
        return 1.0
    return (max_len - levenshtein_distance(a, b)) / max_len

def remove_stopwords(text):
    stop_words = set(stopwords.words('english'))
    # Tokenize the string and filter out stopwords
    return ' '.join([word for word in text.split() if word.lower() not in stop_words])

def fuzzy_match_sequence(sequence, long_string, threshold=0.4):
    # If the sequence is a single string, split it into phrases (based on commas or similar punctuation)
    if isinstance(sequence, str):
        sequence = re.split(r',\s*|\s+', sequence)

    # Remove stopwords from both the sequence and the long string
    sequence = [remove_stopwords(phrase) for phrase in sequence]
    long_string = remove_stopwords(long_string)

    # Ensure that the input is now a list or tuple
    if not isinstance(sequence, (list, tuple)):
        sequence = list(sequence)

    # Split the long string into words
    long_string_words = long_string.split()

    # Perform Levenshtein-based fuzzy matching and calculate overall similarity score
    total_score = 0
    matches = []
    count_high_prob_matches = 0

    for word in sequence:
        # Find the best match for the current word in the long string
        best_match_score, best_match_word = max((similarity_score(word, ls_word), ls_word) for ls_word in long_string_words)
        
        # Only count matches with a similarity score above the threshold
        if best_match_score >= threshold:
            count_high_prob_matches += 1
            # Cap the total score at 1 and calculate contribution
            total_score += min(best_match_score, 1 - total_score)  # Ensure the score doesn't go above 1

        matches.append(f"Keyword '{word}' matched '{best_match_word}' with a similarity score of {best_match_score:.2f}")

    # The total score should never exceed 1, ensure it is capped at 1
    total_score = round(min(total_score, 1),2)

    return total_score


def target_display(model_sel_name, doc_name):
    
    ### TABLE Output ###

    # Assign dataframe a name
    df = st.session_state['key2']
    st.write(df)

    ### RAG Output by group ##

    # Expand the DataFrame
    df_expand = (
        df.query("`Specific action/target/measure mentioned` == 'YES'")
        .explode('Vulnerability Label')
        )
    # Group by 'Vulnerability Label' and concatenate 'text'
    df_agg = df_expand.groupby('Vulnerability Label')['text'].agg('; '.join).reset_index()

    # st.write(df_agg)

    st.markdown("----")
    st.markdown('**SUMMARY OF GOALS BY VULNERABILITY LABEL:**')

    # Check if the results are already in session state
    if 'results_df' not in st.session_state:
        # Initialize an empty list to store the results
        summary_list = []
        results_list = []

        # Process the data in the loop
        for i in range(0, len(df_agg)):
            st.write(df_agg['Vulnerability Label'].iloc[i])
            
            # Run query to get the result
            result = run_query(
                context=df_agg['text'].iloc[i], 
                label=df_agg['Vulnerability Label'].iloc[i], 
                model_sel_name=model_sel_name
            )

            # Store the Vulnerability Label and the response in a list of dictionaries
            summary_list.append({
                'document': doc_name,
                'text': df_agg['text'].iloc[i],
                'label': df_agg['Vulnerability Label'].iloc[i],
                'summary': result.get_full_content()
            })


        # Process the data in the loop
        for i in range(0, len(df)):

            # Send the result to the ChatGPT API and get the labeled response
            vc_response = send_to_chatgpt_api(
                context = df['text'].iloc[i],
                label = df['Vulnerability Label'].iloc[i],
                prompt = vc_prompt,
                openai_api_key=openai_api_key,
                logprobs_flag=True,
                logprobs_n=1)

            tma_response = send_to_chatgpt_api(
                context = df['text'].iloc[i],
                label = df['Specific action/target/measure mentioned'].iloc[i],
                prompt = tma_prompt,
                openai_api_key=openai_api_key,
                logprobs_flag=True,
                logprobs_n=1)


            # Convert logprobs to % scale
            vc_prob = np.round(np.exp(vc_response.logprob),2)
            vc_token = vc_response.token

            # Convert contrary predictions to probability of positive prediction (inverse)
            if vc_token == 'False':
                vc_prob_cnv = round(1 - vc_prob,2)
            else:
                vc_prob_cnv = vc_prob

            # Do some fuzzy matching to check for class-related keywords in the text
            vc_keywords = fuzzy_match_sequence(str(df['Vulnerability Label'].iloc[i]), str(df['text'].iloc[i]))

            # Compute vulnerability classifciation eval
            vc_eval = False
            if vc_prob_cnv > 0.5 or vc_keywords > 0:
                vc_eval = True

            # Convert logprobs to % scale
            tma_prob = np.round(np.exp(tma_response.logprob),2)
            tma_token = tma_response.token

            # Convert contrary predictions to probability of positive prediction (inverse)
            if tma_token == 'False':
                tma_prob_cnv = round(1 - tma_prob,2)
            else:
                tma_prob_cnv = tma_prob

            # Compute TMA classification eval
            tma_eval = False
            if tma_prob_cnv > 0.5:
                tma_eval = True

            # Store the Vulnerability Label and the response in a list of dictionaries
            results_list.append({
                'document': doc_name,
                'text': df['text'].iloc[i],
                'page': df['page'].iloc[i],
                'label': df['Vulnerability Label'].iloc[i],
                'target': df['Specific action/target/measure mentioned'].iloc[i],
                'VC_prob': vc_prob_cnv,
                'VC_keywords': vc_keywords,
                'VC_eval': vc_eval,
                'TMA_prob': tma_prob_cnv,
                'TMA_eval': tma_eval,
                'VC_check': None,
                'TMA_check': None,
            })

        # Once the loop is done, convert results to a DataFrame and store in session state
        st.session_state['results_df'] = pd.DataFrame(results_list)
        st.session_state['summary_df'] = pd.DataFrame(summary_list)

    df_full = st.session_state['key1']
    num_paragraphs = len(df_full['Vulnerability Label'])
    num_references = len(df['Vulnerability Label'])

    meta_list = []
    # Store the Vulnerability Label and the response in a list of dictionaries
    meta_list.append({
        'document': doc_name,
        'paragraphs': num_paragraphs,
        'references': num_references,
    })

    st.session_state['meta_df'] = pd.DataFrame(meta_list)

    # Retrieve the results from session state
    meta_df = st.session_state['meta_df']
    summary_df = st.session_state['summary_df']
    results_df = st.session_state['results_df']

    # Use an in-memory buffer to hold the Excel file
    excel_buffer = BytesIO()

    # Create an Excel writer and write each DataFrame to a separate sheet
    with pd.ExcelWriter(excel_buffer, engine='xlsxwriter') as writer:
        meta_df.to_excel(writer, sheet_name='Meta', index=False)
        summary_df.to_excel(writer, sheet_name='Summary', index=False)
        results_df.to_excel(writer, sheet_name='Results', index=False)

    # Ensure the buffer is ready for downloading
    excel_buffer.seek(0)

    # Create a download button for the Excel file
    st.download_button(
        label="Download LLM Evaluation",
        data=excel_buffer,
        file_name='eval_' + str.split(doc_name,".")[0] + '.xlsx',
        mime='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
    )