Spaces:
Sleeping
Sleeping
| import os | |
| import numpy as np | |
| import pandas as pd | |
| import openai | |
| from haystack.schema import Document | |
| import streamlit as st | |
| from tenacity import retry, stop_after_attempt, wait_random_exponential | |
| from huggingface_hub import InferenceClient | |
| # Get openai API key | |
| hf_token = os.environ["HF_API_KEY"] | |
| # define a special function for putting the prompt together (as we can't use haystack) | |
| def get_prompt(context, label): | |
| base_prompt="Summarize the following context efficiently in bullet points, the less the better - but keep concrete goals. \ | |
| Summarize only elements of the context that address vulnerability of "+label+" to climate change. \ | |
| If there is no mention of "+label+" in the context, return nothing. \ | |
| Do not include an introduction sentence, just the bullet points as per below. \ | |
| Formatting example: \ | |
| - Bullet point 1 \ | |
| - Bullet point 2 \ | |
| " | |
| prompt = base_prompt+"; Context: "+context+"; Answer:" | |
| return prompt | |
| # # exception handling for issuing multiple API calls to openai (exponential backoff) | |
| # @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6)) | |
| # def completion_with_backoff(**kwargs): | |
| # return openai.ChatCompletion.create(**kwargs) | |
| # construct query, send to HF API and process response | |
| def run_query(context, label): | |
| ''' | |
| For non-streamed completion, enable the following 2 lines and comment out the code below | |
| ''' | |
| chatbot_role = """You are an analyst specializing in climate change impact assessments and producing insights from policy documents.""" | |
| messages = [{"role": "system", "content": chatbot_role},{"role": "user", "content": get_prompt(context, label)}] | |
| # Initialize the client, pointing it to one of the available models | |
| client = InferenceClient("meta-llama/Meta-Llama-3.1-405B-Instruct", token = hf_token) | |
| chat_completion = client.chat.completions.create( | |
| messages=messages, | |
| stream=True | |
| ) | |
| # iterate through the streamed output | |
| report = [] | |
| res_box = st.empty() | |
| for chunk in chat_completion: | |
| # extract the object containing the text (totally different structure when streaming) | |
| chunk_message = chunk.choices[0].delta | |
| # test to make sure there is text in the object (some don't have) | |
| if 'content' in chunk_message: | |
| report.append(chunk_message['content']) # extract the message | |
| # add the latest text and merge it with all previous | |
| result = "".join(report).strip() | |
| # res_box.success(result) # output to response text box | |
| res_box.success(result) | |