JeCabrera commited on
Commit
55771b2
·
verified ·
1 Parent(s): d621521

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -20
app.py CHANGED
@@ -2,15 +2,16 @@ import time
2
  import gradio as gr
3
  import google.generativeai as genai
4
  import os
5
- from PIL import Image
6
  from dotenv import load_dotenv
7
 
 
 
8
 
9
- # Cargar clave API desde variables de entorno
10
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
11
 
12
  if not GOOGLE_API_KEY:
13
- raise ValueError("GOOGLE_API_KEY is not set in environment variables.")
14
 
15
  genai.configure(api_key=GOOGLE_API_KEY)
16
 
@@ -18,35 +19,40 @@ genai.configure(api_key=GOOGLE_API_KEY)
18
  def transform_history(history):
19
  new_history = []
20
  for chat in history:
21
- new_history.append({"parts": [{"text": chat[0]}], "role": "user"})
22
- new_history.append({"parts": [{"text": chat[1]}], "role": "model"})
23
  return new_history
24
 
 
25
  def add_message(history, message):
26
- for x in message["files"]:
27
- history.append({"role": "user", "content": {"path": x}})
28
  if message["text"] is not None:
29
  history.append({"role": "user", "content": message["text"]})
30
  return history, gr.MultimodalTextbox(value=None, interactive=False)
31
 
 
32
  def bot(history):
33
  chat_history = transform_history(history)
34
- response = genai.ChatCompletion.create(
35
- model="gemini-1.5-flash", # Usar el modelo adecuado
36
- messages=chat_history + [{"role": "user", "content": history[-1][0]}],
37
- max_tokens=150,
38
- )
 
 
39
 
40
- reply = response['choices'][0]['message']['content']
41
-
42
- # Responder con cada carácter progresivamente
43
- for i in range(len(reply)):
44
- time.sleep(0.05)
45
- yield history + [{"role": "assistant", "content": reply[:i + 1]}]
46
 
 
47
  with gr.Blocks() as demo:
48
  chatbot = gr.Chatbot(elem_id="chatbot", bubble_full_width=False, type="messages")
49
-
50
  chat_input = gr.MultimodalTextbox(
51
  interactive=True,
52
  file_count="multiple",
@@ -54,7 +60,7 @@ with gr.Blocks() as demo:
54
  show_label=False,
55
  sources=["microphone", "upload"],
56
  )
57
-
58
  chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
59
  bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
60
  bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
 
2
  import gradio as gr
3
  import google.generativeai as genai
4
  import os
 
5
  from dotenv import load_dotenv
6
 
7
+ # Cargar variables de entorno
8
+ load_dotenv()
9
 
10
+ # Configurar clave de API
11
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
12
 
13
  if not GOOGLE_API_KEY:
14
+ raise ValueError("GOOGLE_API_KEY is not configurada en las variables de entorno.")
15
 
16
  genai.configure(api_key=GOOGLE_API_KEY)
17
 
 
19
  def transform_history(history):
20
  new_history = []
21
  for chat in history:
22
+ new_history.append({"role": "user", "content": chat[0]})
23
+ new_history.append({"role": "assistant", "content": chat[1]})
24
  return new_history
25
 
26
+ # Agregar mensaje al historial
27
  def add_message(history, message):
28
+ for file in message["files"]:
29
+ history.append({"role": "user", "content": {"path": file}})
30
  if message["text"] is not None:
31
  history.append({"role": "user", "content": message["text"]})
32
  return history, gr.MultimodalTextbox(value=None, interactive=False)
33
 
34
+ # Generar respuesta del chatbot
35
  def bot(history):
36
  chat_history = transform_history(history)
37
+ try:
38
+ response = genai.chat(
39
+ model="gemini-1.5-flash", # Asegúrate de usar el modelo correcto
40
+ messages=chat_history + [{"role": "user", "content": history[-1][0]}],
41
+ max_output_tokens=150
42
+ )
43
+ reply = response["candidates"][0]["content"]
44
 
45
+ # Mostrar respuesta de manera progresiva
46
+ for i in range(len(reply)):
47
+ time.sleep(0.05)
48
+ yield history + [{"role": "assistant", "content": reply[:i + 1]}]
49
+ except Exception as e:
50
+ yield history + [{"role": "assistant", "content": f"Error: {e}"}]
51
 
52
+ # Crear interfaz Gradio
53
  with gr.Blocks() as demo:
54
  chatbot = gr.Chatbot(elem_id="chatbot", bubble_full_width=False, type="messages")
55
+
56
  chat_input = gr.MultimodalTextbox(
57
  interactive=True,
58
  file_count="multiple",
 
60
  show_label=False,
61
  sources=["microphone", "upload"],
62
  )
63
+
64
  chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
65
  bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
66
  bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])