JeCabrera commited on
Commit
8cb552a
·
verified ·
1 Parent(s): abe77c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -34
app.py CHANGED
@@ -2,7 +2,6 @@ TITLE = """<h1 align="center">Gemini Playground ✨</h1>"""
2
  SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""
3
 
4
  import os
5
- import time
6
  import uuid
7
  from typing import List, Tuple, Optional, Union
8
 
@@ -27,6 +26,9 @@ IMAGE_CACHE_DIRECTORY = "/tmp"
27
  IMAGE_WIDTH = 512
28
  CHAT_HISTORY = List[Tuple[Optional[Union[Tuple[str], str]], Optional[str]]]
29
 
 
 
 
30
  # Función para transformar el historial del chat
31
  def transform_history(history: CHAT_HISTORY):
32
  """
@@ -41,14 +43,27 @@ def transform_history(history: CHAT_HISTORY):
41
  return transformed
42
 
43
  # Función de generación de respuesta
44
- def response(message: str, history: CHAT_HISTORY):
 
 
45
  """
46
  Genera una respuesta basada en el historial del chat y el mensaje del usuario.
47
  """
48
- global chat
49
- chat.history = transform_history(history)
50
- model_response = chat.send_message(message)
51
- model_response.resolve()
 
 
 
 
 
 
 
 
 
 
 
52
  return model_response.text
53
 
54
  # Preprocesamiento de imágenes
@@ -68,7 +83,7 @@ def cache_pil_image(image: Image.Image) -> str:
68
  # Subir imágenes
69
  def upload(files: Optional[List[str]], chatbot: CHAT_HISTORY) -> CHAT_HISTORY:
70
  for file in files:
71
- image = Image.open(file).convert('RGB')
72
  image_preview = preprocess_image(image)
73
  if image_preview:
74
  gr.Image(image_preview).render()
@@ -87,32 +102,10 @@ def bot(
87
  files: Optional[List[str]],
88
  model_choice: str,
89
  system_instruction: str,
90
- chatbot: CHAT_HISTORY
91
  ):
92
- if not GOOGLE_API_KEY:
93
- raise ValueError("GOOGLE_API_KEY is not set.")
94
-
95
- # Configurar la API con la clave
96
- genai.configure(api_key=GOOGLE_API_KEY)
97
- generation_config = genai.types.GenerationConfig(
98
- temperature=0.7,
99
- max_output_tokens=8192,
100
- top_k=10,
101
- top_p=0.9
102
- )
103
-
104
  text_prompt = chatbot[-1][0] if chatbot and chatbot[-1][0] and isinstance(chatbot[-1][0], str) else ""
105
- transformed_history = transform_history(chatbot)
106
-
107
- # Crear el modelo con la instrucción del sistema
108
- model = genai.GenerativeModel(
109
- model_name=model_choice,
110
- generation_config=generation_config,
111
- system_instruction=system_instruction
112
- )
113
-
114
- # Generar la respuesta usando la función `response`
115
- bot_reply = response(text_prompt, transformed_history)
116
  chatbot[-1] = (text_prompt, bot_reply)
117
  return chatbot
118
 
@@ -121,7 +114,7 @@ system_instruction_component = gr.Textbox(
121
  placeholder="Enter system instruction...", show_label=True, scale=8
122
  )
123
  chatbot_component = gr.Chatbot(
124
- label='Gemini',
125
  bubble_full_width=False,
126
  scale=2,
127
  height=300
@@ -142,14 +135,14 @@ model_choice_component = gr.Dropdown(
142
 
143
  user_inputs = [
144
  text_prompt_component,
145
- chatbot_component
146
  ]
147
 
148
  bot_inputs = [
149
  upload_button_component,
150
  model_choice_component,
151
  system_instruction_component,
152
- chatbot_component
153
  ]
154
 
155
  # Interfaz de usuario
 
2
  SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""
3
 
4
  import os
 
5
  import uuid
6
  from typing import List, Tuple, Optional, Union
7
 
 
26
  IMAGE_WIDTH = 512
27
  CHAT_HISTORY = List[Tuple[Optional[Union[Tuple[str], str]], Optional[str]]]
28
 
29
+ # Configurar la API de Gemini
30
+ genai.configure(api_key=GOOGLE_API_KEY)
31
+
32
  # Función para transformar el historial del chat
33
  def transform_history(history: CHAT_HISTORY):
34
  """
 
43
  return transformed
44
 
45
  # Función de generación de respuesta
46
+ def response(
47
+ message: str, history: CHAT_HISTORY, model_choice: str, system_instruction: str
48
+ ) -> str:
49
  """
50
  Genera una respuesta basada en el historial del chat y el mensaje del usuario.
51
  """
52
+ generation_config = genai.types.GenerationConfig(
53
+ temperature=0.7,
54
+ max_output_tokens=8192,
55
+ top_k=10,
56
+ top_p=0.9
57
+ )
58
+
59
+ model = genai.GenerativeModel(
60
+ model_name=model_choice,
61
+ generation_config=generation_config,
62
+ system_instruction=system_instruction,
63
+ )
64
+
65
+ transformed_history = transform_history(history)
66
+ model_response = model.chat(messages=transformed_history + [{"role": "user", "content": message}])
67
  return model_response.text
68
 
69
  # Preprocesamiento de imágenes
 
83
  # Subir imágenes
84
  def upload(files: Optional[List[str]], chatbot: CHAT_HISTORY) -> CHAT_HISTORY:
85
  for file in files:
86
+ image = Image.open(file).convert("RGB")
87
  image_preview = preprocess_image(image)
88
  if image_preview:
89
  gr.Image(image_preview).render()
 
102
  files: Optional[List[str]],
103
  model_choice: str,
104
  system_instruction: str,
105
+ chatbot: CHAT_HISTORY,
106
  ):
 
 
 
 
 
 
 
 
 
 
 
 
107
  text_prompt = chatbot[-1][0] if chatbot and chatbot[-1][0] and isinstance(chatbot[-1][0], str) else ""
108
+ bot_reply = response(text_prompt, chatbot, model_choice, system_instruction)
 
 
 
 
 
 
 
 
 
 
109
  chatbot[-1] = (text_prompt, bot_reply)
110
  return chatbot
111
 
 
114
  placeholder="Enter system instruction...", show_label=True, scale=8
115
  )
116
  chatbot_component = gr.Chatbot(
117
+ label="Gemini",
118
  bubble_full_width=False,
119
  scale=2,
120
  height=300
 
135
 
136
  user_inputs = [
137
  text_prompt_component,
138
+ chatbot_component,
139
  ]
140
 
141
  bot_inputs = [
142
  upload_button_component,
143
  model_choice_component,
144
  system_instruction_component,
145
+ chatbot_component,
146
  ]
147
 
148
  # Interfaz de usuario