JeCabrera commited on
Commit
60be30f
verified
1 Parent(s): acf4f25

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -65
app.py CHANGED
@@ -5,11 +5,11 @@ import os
5
  import time
6
  import uuid
7
  from typing import List, Tuple, Optional, Dict, Union
8
-
9
  import google.generativeai as genai
10
  import gradio as gr
11
  from PIL import Image
12
  from dotenv import load_dotenv
 
13
 
14
  # Cargar las variables de entorno desde el archivo .env
15
  load_dotenv()
@@ -27,6 +27,7 @@ IMAGE_CACHE_DIRECTORY = "/tmp"
27
  IMAGE_WIDTH = 512
28
  CHAT_HISTORY = List[Tuple[Optional[Union[Tuple[str], str]], Optional[str]]]
29
 
 
30
  def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
31
  return [sequence.strip() for sequence in stop_sequences.split(",")] if stop_sequences else None
32
 
@@ -47,7 +48,6 @@ def upload(files: Optional[List[str]], chatbot: CHAT_HISTORY) -> CHAT_HISTORY:
47
  image = Image.open(file).convert('RGB')
48
  image_preview = preprocess_image(image)
49
  if image_preview:
50
- # Display a preview of the uploaded image
51
  gr.Image(image_preview).render()
52
  image_path = cache_pil_image(image)
53
  chatbot.append(((image_path,), None))
@@ -65,13 +65,18 @@ def bot(
65
  stop_sequences: str,
66
  top_k: int,
67
  top_p: float,
 
68
  chatbot: CHAT_HISTORY
69
  ):
70
  if not GOOGLE_API_KEY:
71
  raise ValueError("GOOGLE_API_KEY is not set.")
72
 
73
- # Configurar la API con la clave
74
  genai.configure(api_key=GOOGLE_API_KEY)
 
 
 
 
 
75
  generation_config = genai.types.GenerationConfig(
76
  temperature=temperature,
77
  max_output_tokens=max_output_tokens,
@@ -80,11 +85,9 @@ def bot(
80
  top_p=top_p
81
  )
82
 
83
- text_prompt = [chatbot[-1][0]] if chatbot and chatbot[-1][0] and isinstance(chatbot[-1][0], str) else []
84
- image_prompt = [preprocess_image(Image.open(file).convert('RGB')) for file in files] if files else []
85
- model_name = 'gemini-1.5-flash'
86
- model = genai.GenerativeModel(model_name)
87
- response = model.generate_content(text_prompt + image_prompt, stream=True, generation_config=generation_config)
88
 
89
  chatbot[-1][1] = ""
90
  for chunk in response:
@@ -94,67 +97,29 @@ def bot(
94
  time.sleep(0.01)
95
  yield chatbot
96
 
97
- chatbot_component = gr.Chatbot(
98
- label='Gemini',
99
- bubble_full_width=False,
100
- scale=2,
101
- height=300
102
- )
103
- text_prompt_component = gr.Textbox(
104
- placeholder="Message...", show_label=False, autofocus=True, scale=8
105
- )
106
- upload_button_component = gr.UploadButton(
107
- label="Upload Images", file_count="multiple", file_types=["image"], scale=1
108
- )
109
- run_button_component = gr.Button(value="Run", variant="primary", scale=1)
110
- temperature_component = gr.Slider(
111
- minimum=0,
112
- maximum=1.0,
113
- value=0.4,
114
- step=0.05,
115
- label="Temperature",
116
- )
117
- max_output_tokens_component = gr.Slider(
118
- minimum=1,
119
- maximum=2048,
120
- value=1024,
121
- step=1,
122
- label="Token limit",
123
- )
124
- stop_sequences_component = gr.Textbox(
125
- label="Add stop sequence",
126
- value="",
127
- type="text",
128
- placeholder="STOP, END",
129
- )
130
- top_k_component = gr.Slider(
131
- minimum=1,
132
- maximum=40,
133
- value=32,
134
- step=1,
135
- label="Top-K",
136
- )
137
- top_p_component = gr.Slider(
138
- minimum=0,
139
- maximum=1,
140
- value=1,
141
- step=0.01,
142
- label="Top-P",
143
  )
144
 
145
- user_inputs = [
146
- text_prompt_component,
147
- chatbot_component
148
- ]
 
 
 
 
 
 
149
 
 
150
  bot_inputs = [
151
- upload_button_component,
152
- temperature_component,
153
- max_output_tokens_component,
154
- stop_sequences_component,
155
- top_k_component,
156
- top_p_component,
157
- chatbot_component
158
  ]
159
 
160
  with gr.Blocks() as demo:
 
5
  import time
6
  import uuid
7
  from typing import List, Tuple, Optional, Dict, Union
 
8
  import google.generativeai as genai
9
  import gradio as gr
10
  from PIL import Image
11
  from dotenv import load_dotenv
12
+ from langdetect import detect
13
 
14
  # Cargar las variables de entorno desde el archivo .env
15
  load_dotenv()
 
27
  IMAGE_WIDTH = 512
28
  CHAT_HISTORY = List[Tuple[Optional[Union[Tuple[str], str]], Optional[str]]]
29
 
30
+ # Preprocesamiento y configuraci贸n de secuencias y im谩genes
31
  def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
32
  return [sequence.strip() for sequence in stop_sequences.split(",")] if stop_sequences else None
33
 
 
48
  image = Image.open(file).convert('RGB')
49
  image_preview = preprocess_image(image)
50
  if image_preview:
 
51
  gr.Image(image_preview).render()
52
  image_path = cache_pil_image(image)
53
  chatbot.append(((image_path,), None))
 
65
  stop_sequences: str,
66
  top_k: int,
67
  top_p: float,
68
+ model_name: str, # Recibimos el modelo seleccionado
69
  chatbot: CHAT_HISTORY
70
  ):
71
  if not GOOGLE_API_KEY:
72
  raise ValueError("GOOGLE_API_KEY is not set.")
73
 
 
74
  genai.configure(api_key=GOOGLE_API_KEY)
75
+
76
+ # Detectar el idioma del texto ingresado
77
+ text_prompt = [chatbot[-1][0]] if chatbot and chatbot[-1][0] and isinstance(chatbot[-1][0], str) else []
78
+ detected_language = detect(text_prompt[-1]) if text_prompt else 'en'
79
+
80
  generation_config = genai.types.GenerationConfig(
81
  temperature=temperature,
82
  max_output_tokens=max_output_tokens,
 
85
  top_p=top_p
86
  )
87
 
88
+ # Configurar el modelo seleccionado
89
+ model = genai.GenerativeModel(model_name) # Usamos el modelo seleccionado por el usuario
90
+ response = model.generate_content(text_prompt + [], stream=True, generation_config=generation_config)
 
 
91
 
92
  chatbot[-1][1] = ""
93
  for chunk in response:
 
97
  time.sleep(0.01)
98
  yield chatbot
99
 
100
+ # Dropdown para seleccionar el modelo
101
+ model_dropdown = gr.Dropdown(
102
+ label="Selecciona un modelo",
103
+ choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"], # Opciones de modelo
104
+ value="gemini-1.5-flash", # Valor predeterminado
105
+ type="value", # Valor que se selecciona
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  )
107
 
108
+ # Componente Gradio
109
+ chatbot_component = gr.Chatbot(label='Gemini', bubble_full_width=False, scale=2, height=300)
110
+ text_prompt_component = gr.Textbox(placeholder="Message...", show_label=False, autofocus=True, scale=8)
111
+ upload_button_component = gr.UploadButton(label="Upload Images", file_count="multiple", file_types=["image"], scale=1)
112
+ run_button_component = gr.Button(value="Run", variant="primary", scale=1)
113
+ temperature_component = gr.Slider(minimum=0, maximum=1.0, value=0.4, step=0.05, label="Temperature")
114
+ max_output_tokens_component = gr.Slider(minimum=1, maximum=2048, value=1024, step=1, label="Token limit")
115
+ stop_sequences_component = gr.Textbox(label="Add stop sequence", value="", type="text", placeholder="STOP, END")
116
+ top_k_component = gr.Slider(minimum=1, maximum=40, value=32, step=1, label="Top-K")
117
+ top_p_component = gr.Slider(minimum=0, maximum=1, value=1, step=0.01, label="Top-P")
118
 
119
+ user_inputs = [text_prompt_component, chatbot_component]
120
  bot_inputs = [
121
+ upload_button_component, temperature_component, max_output_tokens_component,
122
+ stop_sequences_component, top_k_component, top_p_component, model_dropdown, chatbot_component
 
 
 
 
 
123
  ]
124
 
125
  with gr.Blocks() as demo: