JeCabrera commited on
Commit
471735c
verified
1 Parent(s): 55771b2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -46
app.py CHANGED
@@ -1,70 +1,69 @@
 
1
  import time
2
  import gradio as gr
3
  import google.generativeai as genai
4
- import os
5
- from dotenv import load_dotenv
6
-
7
- # Cargar variables de entorno
8
- load_dotenv()
9
 
10
- # Configurar clave de API
11
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
12
 
13
  if not GOOGLE_API_KEY:
14
- raise ValueError("GOOGLE_API_KEY is not configurada en las variables de entorno.")
15
 
 
16
  genai.configure(api_key=GOOGLE_API_KEY)
17
 
18
- # Transformar el historial a formato Gemini
19
  def transform_history(history):
 
20
  new_history = []
21
  for chat in history:
22
  new_history.append({"role": "user", "content": chat[0]})
23
  new_history.append({"role": "assistant", "content": chat[1]})
24
  return new_history
25
 
26
- # Agregar mensaje al historial
27
- def add_message(history, message):
28
- for file in message["files"]:
29
- history.append({"role": "user", "content": {"path": file}})
30
- if message["text"] is not None:
31
- history.append({"role": "user", "content": message["text"]})
32
- return history, gr.MultimodalTextbox(value=None, interactive=False)
33
-
34
- # Generar respuesta del chatbot
35
- def bot(history):
36
  chat_history = transform_history(history)
37
- try:
38
- response = genai.chat(
39
- model="gemini-1.5-flash", # Aseg煤rate de usar el modelo correcto
40
- messages=chat_history + [{"role": "user", "content": history[-1][0]}],
41
- max_output_tokens=150
42
- )
43
- reply = response["candidates"][0]["content"]
 
 
 
 
 
 
 
 
 
 
44
 
45
- # Mostrar respuesta de manera progresiva
46
- for i in range(len(reply)):
47
- time.sleep(0.05)
48
- yield history + [{"role": "assistant", "content": reply[:i + 1]}]
49
- except Exception as e:
50
- yield history + [{"role": "assistant", "content": f"Error: {e}"}]
51
 
52
- # Crear interfaz Gradio
53
  with gr.Blocks() as demo:
54
  chatbot = gr.Chatbot(elem_id="chatbot", bubble_full_width=False, type="messages")
55
-
56
- chat_input = gr.MultimodalTextbox(
57
- interactive=True,
58
- file_count="multiple",
59
- placeholder="Escribe un mensaje o sube un archivo...",
60
- show_label=False,
61
- sources=["microphone", "upload"],
62
  )
63
-
64
- chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
65
- bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
66
- bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
67
-
68
- chatbot.like(lambda x: print(x.index, x.value, x.liked), None, None, like_user_message=True)
69
-
 
 
 
 
70
  demo.launch()
 
1
+ import os
2
  import time
3
  import gradio as gr
4
  import google.generativeai as genai
5
+ from typing import Optional, List
 
 
 
 
6
 
7
+ # Cargar la clave API desde el entorno
8
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
9
 
10
  if not GOOGLE_API_KEY:
11
+ raise ValueError("GOOGLE_API_KEY is not set.")
12
 
13
+ # Configurar la API de Gemini
14
  genai.configure(api_key=GOOGLE_API_KEY)
15
 
 
16
  def transform_history(history):
17
+ """Transforma el historial en el formato esperado por Gemini."""
18
  new_history = []
19
  for chat in history:
20
  new_history.append({"role": "user", "content": chat[0]})
21
  new_history.append({"role": "assistant", "content": chat[1]})
22
  return new_history
23
 
24
+ def bot(files: Optional[List[str]], model_choice: str, system_instruction: Optional[str], history):
25
+ """Procesa la interacci贸n del chatbot."""
 
 
 
 
 
 
 
 
26
  chat_history = transform_history(history)
27
+
28
+ if system_instruction:
29
+ chat_history.insert(0, {"role": "system", "content": system_instruction})
30
+
31
+ # Configuraci贸n del modelo Flash 1.5
32
+ generation_config = genai.types.GenerationConfig(
33
+ temperature=0.7,
34
+ max_output_tokens=8192,
35
+ top_k=10,
36
+ top_p=0.9
37
+ )
38
+
39
+ response = genai.ChatCompletion.create(
40
+ model=model_choice,
41
+ messages=chat_history + [{"role": "user", "content": history[-1][0]}],
42
+ generation_config=generation_config
43
+ )
44
 
45
+ reply = response['candidates'][0]['content']
46
+ for i in range(len(reply)):
47
+ time.sleep(0.05)
48
+ yield history + [{"role": "assistant", "content": reply[:i + 1]}]
 
 
49
 
50
+ # Interfaz con Gradio
51
  with gr.Blocks() as demo:
52
  chatbot = gr.Chatbot(elem_id="chatbot", bubble_full_width=False, type="messages")
53
+
54
+ chat_input = gr.Textbox(
55
+ placeholder="Escribe un mensaje...",
56
+ show_label=False
 
 
 
57
  )
58
+
59
+ submit_btn = gr.Button("Enviar")
60
+ system_input = gr.Textbox(placeholder="Instrucci贸n del sistema (opcional)", show_label=True, lines=2)
61
+ model_choice = gr.Dropdown(choices=["gemini-1.5-flash"], value="gemini-1.5-flash", label="Modelo")
62
+
63
+ submit_btn.click(
64
+ bot,
65
+ inputs=[None, model_choice, system_input, chatbot],
66
+ outputs=chatbot
67
+ )
68
+
69
  demo.launch()