File size: 2,646 Bytes
471735c
4bde338
69a8ba9
5766f55
471735c
69a8ba9
471735c
69a8ba9
 
 
471735c
69a8ba9
471735c
5766f55
 
 
471735c
5766f55
8acb879
 
 
 
5766f55
 
471735c
 
c198b20
471735c
 
 
 
 
 
 
 
 
 
 
 
8acb879
 
 
 
 
 
 
471735c
 
8acb879
471735c
 
3706199
471735c
 
 
8acb879
3706199
471735c
3706199
d827448
471735c
 
 
 
3706199
471735c
 
 
 
8acb879
471735c
8acb879
471735c
8acb879
 
 
471735c
 
3706199
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import os
import time
import gradio as gr
import google.generativeai as genai
from typing import Optional, List

# Cargar la clave API desde el entorno
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")

if not GOOGLE_API_KEY:
    raise ValueError("GOOGLE_API_KEY is not set.")

# Configurar la API de Gemini
genai.configure(api_key=GOOGLE_API_KEY)

def transform_history(history):
    """Transforma el historial en el formato esperado por Gemini."""
    new_history = []
    for user, assistant in history:
        new_history.append({"role": "user", "content": user})
        if assistant:
            new_history.append({"role": "assistant", "content": assistant})
    return new_history

def bot(files: Optional[List[str]], model_choice: str, system_instruction: Optional[str], history):
    """Procesa la interacci贸n del chatbot."""
    chat_history = transform_history(history)
    
    if system_instruction:
        chat_history.insert(0, {"role": "system", "content": system_instruction})
    
    # Configuraci贸n del modelo Flash 1.5
    generation_config = genai.types.GenerationConfig(
        temperature=0.7,
        max_output_tokens=8192,
        top_k=10,
        top_p=0.9
    )
    
    # Si se incluyen archivos, procesa esa entrada multimodal
    if files:
        for file_path in files:
            with open(file_path, "r") as file:
                file_content = file.read()
                chat_history.append({"role": "user", "content": f"Archivo cargado: {file_content}"})
    
    response = genai.ChatCompletion.create(
        model=model_choice,
        messages=chat_history,
        generation_config=generation_config
    )

    reply = response['candidates'][0]['content']
    for i in range(len(reply)):
        time.sleep(0.05)
        yield history + [[None, reply[:i + 1]]]  # Agrega la respuesta progresivamente al historial

# Interfaz con Gradio
with gr.Blocks() as demo:
    chatbot = gr.Chatbot(elem_id="chatbot", bubble_full_width=False, type="messages")
    
    chat_input = gr.Textbox(
        placeholder="Escribe un mensaje...",
        show_label=False
    )
    
    submit_btn = gr.Button("Enviar")
    system_input = gr.Textbox(placeholder="Instrucci贸n del sistema (opcional)", show_label=True, lines=2)
    model_choice = gr.Dropdown(choices=["gemini-1.5-flash"], value="gemini-1.5-flash", label="Modelo")
    file_input = gr.File(label="Subir archivo (opcional)", file_types=[".txt", ".md", ".json"])
    
    # Manejar el env铆o del mensaje
    submit_btn.click(
        bot,
        inputs=[file_input, model_choice, system_input, chatbot],
        outputs=chatbot
    )
    
demo.launch()