Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -3,8 +3,6 @@ SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>
|
|
3 |
|
4 |
import os
|
5 |
import time
|
6 |
-
from typing import Optional, Tuple
|
7 |
-
|
8 |
import google.generativeai as genai
|
9 |
import gradio as gr
|
10 |
from dotenv import load_dotenv
|
@@ -22,44 +20,61 @@ if not GOOGLE_API_KEY:
|
|
22 |
raise ValueError("GOOGLE_API_KEY is not set in environment variables.")
|
23 |
|
24 |
# Configuraci贸n del modelo Gemini
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
genai.configure(api_key=GOOGLE_API_KEY)
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
top_p=0.9
|
31 |
)
|
32 |
|
33 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
def bot_response(
|
35 |
model_choice: str,
|
36 |
-
system_instruction:
|
37 |
text_prompt: str,
|
38 |
chatbot: list,
|
39 |
) -> Tuple[list, str]:
|
40 |
"""
|
41 |
-
Env铆a el mensaje al modelo
|
42 |
"""
|
43 |
if not text_prompt.strip():
|
44 |
return chatbot, "Por favor, escribe un mensaje v谩lido."
|
45 |
|
|
|
|
|
|
|
46 |
# Configurar el modelo
|
47 |
-
|
48 |
-
model_name=model_choice,
|
49 |
-
generation_config=generation_config,
|
50 |
-
system_instruction=system_instruction or "You are an assistant."
|
51 |
-
)
|
52 |
|
53 |
-
#
|
54 |
-
response =
|
|
|
55 |
|
56 |
-
#
|
57 |
-
generated_text =
|
58 |
-
for chunk in response:
|
59 |
-
generated_text += chunk.text
|
60 |
|
61 |
-
# Actualizar el historial
|
62 |
chatbot.append((text_prompt, generated_text))
|
|
|
63 |
return chatbot, ""
|
64 |
|
65 |
# Componentes de la interfaz
|
|
|
3 |
|
4 |
import os
|
5 |
import time
|
|
|
|
|
6 |
import google.generativeai as genai
|
7 |
import gradio as gr
|
8 |
from dotenv import load_dotenv
|
|
|
20 |
raise ValueError("GOOGLE_API_KEY is not set in environment variables.")
|
21 |
|
22 |
# Configuraci贸n del modelo Gemini
|
23 |
+
generation_config = {
|
24 |
+
"temperature": 1,
|
25 |
+
"top_p": 0.95,
|
26 |
+
"top_k": 40,
|
27 |
+
"max_output_tokens": 8192,
|
28 |
+
"response_mime_type": "text/plain",
|
29 |
+
}
|
30 |
+
|
31 |
genai.configure(api_key=GOOGLE_API_KEY)
|
32 |
+
|
33 |
+
model = genai.GenerativeModel(
|
34 |
+
model_name="gemini-1.5-flash",
|
35 |
+
generation_config=generation_config
|
|
|
36 |
)
|
37 |
|
38 |
+
# Inicializar la sesi贸n de chat
|
39 |
+
chat = model.start_chat(history=[])
|
40 |
+
|
41 |
+
# Funci贸n para transformar el historial de Gradio al formato de Gemini
|
42 |
+
def transform_history(history):
|
43 |
+
new_history = []
|
44 |
+
for chat_entry in history:
|
45 |
+
new_history.append({"parts": [{"text": chat_entry[0]}], "role": "user"})
|
46 |
+
new_history.append({"parts": [{"text": chat_entry[1]}], "role": "model"})
|
47 |
+
return new_history
|
48 |
+
|
49 |
+
# Funci贸n de respuesta que maneja el historial
|
50 |
def bot_response(
|
51 |
model_choice: str,
|
52 |
+
system_instruction: str,
|
53 |
text_prompt: str,
|
54 |
chatbot: list,
|
55 |
) -> Tuple[list, str]:
|
56 |
"""
|
57 |
+
Env铆a el mensaje al modelo, obtiene la respuesta y actualiza el historial.
|
58 |
"""
|
59 |
if not text_prompt.strip():
|
60 |
return chatbot, "Por favor, escribe un mensaje v谩lido."
|
61 |
|
62 |
+
# Transformar el historial al formato que espera Gemini
|
63 |
+
transformed_history = transform_history(chatbot)
|
64 |
+
|
65 |
# Configurar el modelo
|
66 |
+
chat.history = transformed_history
|
|
|
|
|
|
|
|
|
67 |
|
68 |
+
# Enviar el mensaje y obtener la respuesta
|
69 |
+
response = chat.send_message(text_prompt)
|
70 |
+
response.resolve()
|
71 |
|
72 |
+
# Obtener el texto generado por el modelo
|
73 |
+
generated_text = response.text
|
|
|
|
|
74 |
|
75 |
+
# Actualizar el historial con la pregunta y la respuesta
|
76 |
chatbot.append((text_prompt, generated_text))
|
77 |
+
|
78 |
return chatbot, ""
|
79 |
|
80 |
# Componentes de la interfaz
|