copywriter / app.py
JeCabrera's picture
Update app.py
5906ce7 verified
raw
history blame
734 Bytes
import gradio as gr
import gemini_gradio
import time
def echo(message, history, system_prompt, tokens):
response = f"System prompt: {system_prompt}\n Message: {message}."
for i in range(min(len(response), int(tokens))):
time.sleep(0.05)
yield response[: i+1]
with gr.Blocks() as demo:
system_prompt = gr.Textbox("You are helpful AI.", label="System Prompt")
slider = gr.Slider(10, 100, render=False)
gemini_interface = gr.load(
name="gemini-1.5-flash", # Se establece el modelo por defecto
src=gemini_gradio.registry,
fill_height=True,
echo, additional_inputs=[system_prompt, slider]
chatbot=gr.Chatbot(type="messages")
)
demo.launch(ssr_mode=False)