from huggingface_hub import InferenceClient
import gradio as gr
import datetime

# Initialize the InferenceClient
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")

def format_prompt(message, history):
    prompt = "<s>"
    for user_prompt, bot_response in history:
        prompt += f"\[INST\] {user_prompt} \[/INST\]"
        prompt += f" {bot_response}</s> "
    prompt += f"\[INST\] {message} \[/INST\]"
    return prompt

def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=9048, top_p=0.95, repetition_penalty=1.0):
    temperature = max(float(temperature), 1e-2)
    top_p = float(top_p)
    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )

    # Get current time
    now = datetime.datetime.now()
    formatted_time = now.strftime("%H.%M.%S, %B, %Y")
    system_prompt = f"server log: ~This message was sent at {formatted_time}. The actual year is 2024.~"

    formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""
    for response in stream:
        output += response.token.text
        yield (prompt, output)

additional_inputs = [
    gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
    gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
    gr.Slider(label="Max new tokens", value=9048, minimum=256, maximum=9048, step=64, interactive=True, info="The maximum numbers of new tokens"),
    gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
    gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
]

app = gr.Blocks(theme=gr.themes.Soft())
with app:
    chatbot = gr.Chatbot()
    text_input = gr.Textbox(label="Your message")

    def process_message(message, history):
        for response in generate(message, history, additional_inputs[0].value):
            yield response

    text_input.submit(process_message, inputs=[text_input, chatbot], outputs=[chatbot, text_input])
    app.launch(show_api=False)