File size: 2,341 Bytes
471735c
4bde338
69a8ba9
5766f55
471735c
69a8ba9
471735c
69a8ba9
 
 
471735c
69a8ba9
471735c
5766f55
 
 
471735c
5766f55
 
55771b2
 
5766f55
 
471735c
 
c198b20
471735c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3706199
471735c
 
 
 
3706199
471735c
3706199
 
471735c
 
 
 
3706199
471735c
 
 
 
 
 
ae628c2
 
 
471735c
 
3706199
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import os
import time
import gradio as gr
import google.generativeai as genai
from typing import Optional, List

# Cargar la clave API desde el entorno
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")

if not GOOGLE_API_KEY:
    raise ValueError("GOOGLE_API_KEY is not set.")

# Configurar la API de Gemini
genai.configure(api_key=GOOGLE_API_KEY)

def transform_history(history):
    """Transforma el historial en el formato esperado por Gemini."""
    new_history = []
    for chat in history:
        new_history.append({"role": "user", "content": chat[0]})
        new_history.append({"role": "assistant", "content": chat[1]})
    return new_history

def bot(files: Optional[List[str]], model_choice: str, system_instruction: Optional[str], history):
    """Procesa la interacci贸n del chatbot."""
    chat_history = transform_history(history)
    
    if system_instruction:
        chat_history.insert(0, {"role": "system", "content": system_instruction})
    
    # Configuraci贸n del modelo Flash 1.5
    generation_config = genai.types.GenerationConfig(
        temperature=0.7,
        max_output_tokens=8192,
        top_k=10,
        top_p=0.9
    )
    
    response = genai.ChatCompletion.create(
        model=model_choice,
        messages=chat_history + [{"role": "user", "content": history[-1][0]}],
        generation_config=generation_config
    )

    reply = response['candidates'][0]['content']
    for i in range(len(reply)):
        time.sleep(0.05)
        yield history + [{"role": "assistant", "content": reply[:i + 1]}]

# Interfaz con Gradio
with gr.Blocks() as demo:
    chatbot = gr.Chatbot(elem_id="chatbot", bubble_full_width=False, type="messages")
    
    chat_input = gr.Textbox(
        placeholder="Escribe un mensaje...",
        show_label=False
    )
    
    submit_btn = gr.Button("Enviar")
    system_input = gr.Textbox(placeholder="Instrucci贸n del sistema (opcional)", show_label=True, lines=2)
    model_choice = gr.Dropdown(choices=["gemini-1.5-flash"], value="gemini-1.5-flash", label="Modelo")
    
    submit_btn.click(
        bot,  # Funci贸n que manejar谩 la acci贸n del bot贸n
        inputs=[chat_input, model_choice, system_input, chatbot],  # Aseg煤rate de que las entradas sean v谩lidas
        outputs=chatbot  # El chatbot ser谩 actualizado con la respuesta
    )
    
demo.launch()