# Import necessary libraries import os import gradio as gr from azure.storage.fileshare import ShareServiceClient # Import custom modules from climateqa.engine.embeddings import get_embeddings_function from climateqa.engine.llm import get_llm from climateqa.engine.vectorstore import get_pinecone_vectorstore from climateqa.engine.reranker import get_reranker from climateqa.engine.graph import make_graph_agent from climateqa.engine.chains.retrieve_papers import find_papers from climateqa.chat import start_chat, chat_stream, finish_chat from front.tabs import (create_config_modal, create_examples_tab, create_papers_tab, create_figures_tab, create_chat_interface, create_about_tab) from front.utils import process_figures from utils import create_user_id import logging logging.basicConfig(level=logging.WARNING) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppresses INFO and WARNING logs logging.getLogger().setLevel(logging.WARNING) # Load environment variables in local mode try: from dotenv import load_dotenv load_dotenv() except Exception as e: pass # Set up Gradio Theme theme = gr.themes.Base( primary_hue="blue", secondary_hue="red", font=[gr.themes.GoogleFont("Poppins"), "ui-sans-serif", "system-ui", "sans-serif"], ) # Azure Blob Storage credentials account_key = os.environ["BLOB_ACCOUNT_KEY"] if len(account_key) == 86: account_key += "==" credential = { "account_key": account_key, "account_name": os.environ["BLOB_ACCOUNT_NAME"], } account_url = os.environ["BLOB_ACCOUNT_URL"] file_share_name = "climateqa" service = ShareServiceClient(account_url=account_url, credential=credential) share_client = service.get_share_client(file_share_name) user_id = create_user_id() # Create vectorstore and retriever embeddings_function = get_embeddings_function() vectorstore = get_pinecone_vectorstore(embeddings_function, index_name=os.getenv("PINECONE_API_INDEX")) vectorstore_graphs = get_pinecone_vectorstore(embeddings_function, index_name=os.getenv("PINECONE_API_INDEX_OWID"), text_key="description") vectorstore_region = get_pinecone_vectorstore(embeddings_function, index_name=os.getenv("PINECONE_API_INDEX_REGION")) llm = get_llm(provider="openai",max_tokens = 1024,temperature = 0.0) reranker = get_reranker("nano") agent = make_graph_agent(llm=llm, vectorstore_ipcc=vectorstore, vectorstore_graphs=vectorstore_graphs, vectorstore_region = vectorstore_region, reranker=reranker, threshold_docs=0)#TODO put back default 0.2 async def chat(query, history, audience, sources, reports, relevant_content_sources_selection, search_only): async for event in chat_stream(agent, query, history, audience, sources, reports, relevant_content_sources_selection, search_only, share_client, user_id): yield event # -------------------------------------------------------------------- # Gradio # -------------------------------------------------------------------- # Function to update modal visibility def update_config_modal_visibility(config_open): new_config_visibility_status = not config_open return gr.update(visible=new_config_visibility_status), new_config_visibility_status def update_sources_number_display(sources_textbox, figures_cards, current_graphs, papers_html): sources_number = sources_textbox.count("