from huggingface_hub import InferenceClient import gradio as gr import datetime import re import requests import json # Initialize the InferenceClient client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") # Enter your API key here api_key = "1e8cd0385845a649e448cde4917058d6" # Define the system prompt templates system_prompt_templates = { r"\btime\b|\bhour\b|\bclock\b": "server log: ~This message was sent at {formatted_time}. The actual year is 2024.~", r"\bweather\b|\bforecast\b|\bmeteo": "server log: ~The current weather conditions in {city_name} are {weather_description} with a high of {current_temperature_c}°C ({current_temperature_f}°F) and a pressure of {current_pressure_hpa} hPa ({current_pressure_inHg} inHg) and humidity of {current_humidity}%.~", r"\bdate\b|\bcalendar\b": "server log: ~Today's date is {formatted_date}.~", r"\bpolitics\b|\belection\b": "server log: ~This conversation is taking place in a politically neutral environment.~" } def format_prompt(message, history, system_prompt): prompt = "" for user_prompt, bot_response in history: prompt += f"\[INST\] {user_prompt} \[/INST\]" prompt += f" {bot_response} " prompt += f"\[INST\] {message} \[/INST\]" return prompt def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=9048, top_p=0.95, repetition_penalty=1.0): temperature = max(float(temperature), 1e-2) top_p = float(top_p) generate_kwargs = dict( temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42, ) # Get current time and date now = datetime.datetime.now() formatted_time = now.strftime("%H.%M.%S, %B, %Y") formatted_date = now.strftime("%B %d, %Y") # Check for keywords in the user's input and update the system prompt accordingly city_name = None weather_description = None current_temperature_c = None current_temperature_f = None current_pressure_hpa = None current_pressure_inHg = None current_humidity = None for keyword, template in system_prompt_templates.items(): if re.search(keyword, prompt, re.IGNORECASE): if keyword == r"\bweather\b|\bforecast\b|\bmeteo": base_url = "http://api.openweathermap.org/data/2.5/weather?" complete_url = base_url + "appid=" + api_key + "&q=" + city_name response = requests.get(complete_url) x = response.json() if x["cod"] != "404": y = x["main"] current_temperature_c = y["temp"] - 273.15 # Convert from Kelvin to Celsius current_temperature_f = current_temperature_c * 9/5 + 32 # Convert from Celsius to Fahrenheit current_pressure_hpa = y["pressure"] current_pressure_inHg = current_pressure_hpa * 0.02953 # Convert from hPa to inHg current_humidity = y["humidity"] z = x["weather"] weather_description = z[0]["description"] city_name = x["name"] else: print("City Not Found") city_name = "unknown" weather_description = "unknown" current_temperature_c = 0 current_temperature_f = 32 current_pressure_hpa = 0 current_pressure_inHg = 0 current_humidity = 0 system_prompt = template.format(formatted_time=formatted_time, formatted_date=formatted_date, city_name=city_name, weather_description=weather_description, current_temperature_c=current_temperature_c, current_temperature_f=current_temperature_f, current_pressure_hpa=current_pressure_hpa, current_pressure_inHg=current_pressure_inHg, current_humidity=current_humidity) break formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history) stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) output = "" for response in stream: output += response.token.text yield output with gr.Blocks() as demo: chatbot = gr.Chatbot(show_label=True, show_share_button=False, show_copy_button=True, likeable=True, layout="panel") with gr.Row(): with gr.Column(scale=3): user_input = gr.Textbox(label="Your message", placeholder="Type your message here...") with gr.Column(scale=1): submit_button = gr.Button("Send") def check_keywords(text): for keyword, _ in system_prompt_templates.items(): if re.search(keyword, text, re.IGNORECASE): return True return False user_input.submit( fn=generate, inputs=[user_input, chatbot, gr.Textbox(label="System Prompt", max_lines=1, interactive=True)], outputs=chatbot, every=200, _js=None ) demo.launch(show_api=False)