File size: 2,607 Bytes
b125eed
 
 
 
 
 
 
5729146
b125eed
 
 
3ad72e2
5729146
b125eed
ffd98eb
6d6cf98
2008110
 
0535445
b125eed
0a16a11
0535445
 
 
b125eed
0535445
b125eed
 
2008110
5729146
 
 
 
b125eed
b798c40
ffd98eb
b125eed
 
 
5729146
ceedd37
 
5729146
0535445
 
 
 
 
 
5729146
b125eed
 
 
0535445
b125eed
0535445
b125eed
 
4fc9fba
b125eed
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import os
import numpy as np
import pandas as pd
import openai
from haystack.schema import Document
import streamlit as st
from tenacity import retry, stop_after_attempt, wait_random_exponential
from huggingface_hub import InferenceClient


# Get openai API key
hf_token = os.environ["HF_API_KEY"]

# define a special function for putting the prompt together (as we can't use haystack)
def get_prompt(context, label):
  base_prompt="Summarize the following context efficiently in bullet points, the less the better - but keep concrete goals. \
  Summarize only elements of the context that address vulnerability of "+label+" to climate change. \
  If there is no mention of "+label+" in the context, return nothing. \
  Do not include an introduction sentence, just the bullet points as per below. \
  Formatting example: \
    - Bullet point 1 \
    - Bullet point 2 \
  "

  prompt = base_prompt+"; Context: "+context+"; Answer:"

  return prompt


# # exception handling for issuing multiple API calls to openai (exponential backoff)
# @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
# def completion_with_backoff(**kwargs):
#     return openai.ChatCompletion.create(**kwargs)

# construct query, send to HF API and process response
def run_query(context, label):
    '''
    For non-streamed completion, enable the following 2 lines and comment out the code below
    '''
    chatbot_role = """You are an analyst specializing in climate change impact assessments and producing insights from policy documents."""
    messages = [{"role": "system", "content": chatbot_role},{"role": "user", "content": get_prompt(context, label)}]
    
    # Initialize the client, pointing it to one of the available models
    client = InferenceClient("meta-llama/Meta-Llama-3.1-405B-Instruct", token = hf_token)

    chat_completion = client.chat.completions.create(
        messages=messages,
        stream=True
    )

    # iterate through the streamed output
    report = []
    res_box = st.empty()
    for chunk in chat_completion:
        # extract the object containing the text (totally different structure when streaming)
        chunk_message = chunk.choices[0].delta
        # test to make sure there is text in the object (some don't have)
        if 'content' in chunk_message:
            report.append(chunk_message['content']) # extract the message
            # add the latest text and merge it with all previous
            result = "".join(report).strip()
            # res_box.success(result) # output to response text box
            res_box.success(result)