JeCabrera commited on
Commit
834a27f
·
verified ·
1 Parent(s): 67eabfa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -31
app.py CHANGED
@@ -28,37 +28,55 @@ IMAGE_WIDTH = 512
28
  CHAT_HISTORY = List[Tuple[Optional[Union[Tuple[str], str]], Optional[str]]]
29
 
30
  def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
31
- """Redimensiona la imagen para que se ajuste a la aplicación."""
32
  if image:
33
  image_height = int(image.height * IMAGE_WIDTH / image.width)
34
  return image.resize((IMAGE_WIDTH, image_height))
35
 
36
  def cache_pil_image(image: Image.Image) -> str:
37
- """Guarda la imagen procesada en el sistema de archivos temporal."""
38
  image_filename = f"{uuid.uuid4()}.jpeg"
39
  os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
40
  image_path = os.path.join(IMAGE_CACHE_DIRECTORY, image_filename)
41
  image.save(image_path, "JPEG")
42
  return image_path
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  def upload(files: Optional[List[str]], chatbot: CHAT_HISTORY) -> CHAT_HISTORY:
45
- """Sube los archivos y los agrega al historial de chat."""
46
  for file in files:
47
- if file.name.endswith(('.jpg', '.jpeg', '.png')):
48
- image = Image.open(file).convert('RGB')
49
- image_preview = preprocess_image(image)
50
- if image_preview:
51
- # Muestra una vista previa de la imagen subida
52
- gr.Image(image_preview).render()
53
- image_path = cache_pil_image(image)
54
- chatbot.append(((image_path,), None))
55
- else:
56
- # Si es un PDF u otro tipo de archivo, solo se guarda la ruta
57
- chatbot.append(((file.name,), None))
 
58
  return chatbot
59
 
60
  def user(text_prompt: str, chatbot: CHAT_HISTORY):
61
- """Procesa la entrada del usuario y la agrega al historial."""
62
  if text_prompt:
63
  chatbot.append((text_prompt, None))
64
  return "", chatbot
@@ -68,27 +86,22 @@ def bot(
68
  model_choice: str,
69
  chatbot: CHAT_HISTORY
70
  ):
71
- """Genera una respuesta utilizando la API de Gemini."""
72
  if not GOOGLE_API_KEY:
73
  raise ValueError("GOOGLE_API_KEY is not set.")
74
-
75
  # Configurar la API con la clave
76
  genai.configure(api_key=GOOGLE_API_KEY)
77
  generation_config = genai.types.GenerationConfig(
78
- temperature=0.7,
79
- max_output_tokens=8192,
80
- top_k=10,
81
- top_p=0.9
82
  )
83
 
84
- # Procesar los archivos
85
  text_prompt = [chatbot[-1][0]] if chatbot and chatbot[-1][0] and isinstance(chatbot[-1][0], str) else []
86
- image_prompt = [preprocess_image(Image.open(file).convert('RGB')) for file in files if file.name.endswith(('.jpg', '.jpeg', '.png'))] if files else []
87
- pdf_prompt = [file.name for file in files if file.name.endswith('.pdf')] if files else []
88
-
89
- # Crear el modelo
90
  model = genai.GenerativeModel(model_choice)
91
- response = model.generate_content(text_prompt + image_prompt + pdf_prompt, stream=True, generation_config=generation_config)
92
 
93
  chatbot[-1][1] = ""
94
  for chunk in response:
@@ -98,7 +111,6 @@ def bot(
98
  time.sleep(0.01)
99
  yield chatbot
100
 
101
- # Componentes de la interfaz de usuario con Gradio
102
  chatbot_component = gr.Chatbot(
103
  label='Gemini',
104
  bubble_full_width=False,
@@ -109,7 +121,7 @@ text_prompt_component = gr.Textbox(
109
  placeholder="Message...", show_label=False, autofocus=True, scale=8
110
  )
111
  upload_button_component = gr.UploadButton(
112
- label="Upload Files", file_count="multiple", file_types=["image", "pdf"], scale=1
113
  )
114
  run_button_component = gr.Button(value="Run", variant="primary", scale=1)
115
  model_choice_component = gr.Dropdown(
@@ -131,8 +143,8 @@ bot_inputs = [
131
  ]
132
 
133
  with gr.Blocks() as demo:
134
- gr.HTML("<h1 align='center'>Gemini Playground ✨</h1>")
135
- gr.HTML("<h2 align='center'>Play with Gemini Pro and Gemini Pro Vision</h2>")
136
  with gr.Column():
137
  chatbot_component.render()
138
  with gr.Row():
@@ -169,3 +181,4 @@ with gr.Blocks() as demo:
169
  demo.queue(max_size=99).launch(debug=False, show_error=True)
170
 
171
 
 
 
28
  CHAT_HISTORY = List[Tuple[Optional[Union[Tuple[str], str]], Optional[str]]]
29
 
30
  def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
 
31
  if image:
32
  image_height = int(image.height * IMAGE_WIDTH / image.width)
33
  return image.resize((IMAGE_WIDTH, image_height))
34
 
35
  def cache_pil_image(image: Image.Image) -> str:
 
36
  image_filename = f"{uuid.uuid4()}.jpeg"
37
  os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
38
  image_path = os.path.join(IMAGE_CACHE_DIRECTORY, image_filename)
39
  image.save(image_path, "JPEG")
40
  return image_path
41
 
42
+ def upload_to_gemini(path, mime_type=None):
43
+ """Uploads the given file to Gemini."""
44
+ file = genai.upload_file(path, mime_type=mime_type)
45
+ print(f"Uploaded file '{file.display_name}' as: {file.uri}")
46
+ return file
47
+
48
+ def wait_for_files_active(files):
49
+ """Waits for the given files to be active."""
50
+ print("Waiting for file processing...")
51
+ for name in (file.name for file in files):
52
+ file = genai.get_file(name)
53
+ while file.state.name == "PROCESSING":
54
+ print(".", end="", flush=True)
55
+ time.sleep(10)
56
+ file = genai.get_file(name)
57
+ if file.state.name != "ACTIVE":
58
+ raise Exception(f"File {file.name} failed to process")
59
+ print("...all files ready")
60
+ print()
61
+
62
  def upload(files: Optional[List[str]], chatbot: CHAT_HISTORY) -> CHAT_HISTORY:
63
+ gemini_files = []
64
  for file in files:
65
+ image = Image.open(file).convert('RGB')
66
+ image_preview = preprocess_image(image)
67
+ if image_preview:
68
+ # Display a preview of the uploaded image
69
+ gr.Image(image_preview).render()
70
+ image_path = cache_pil_image(image)
71
+ gemini_file = upload_to_gemini(image_path, mime_type="image/jpeg")
72
+ gemini_files.append(gemini_file)
73
+
74
+ # Wait for files to be ready in Gemini
75
+ wait_for_files_active(gemini_files)
76
+ chatbot.append(((gemini_file.uri,), None))
77
  return chatbot
78
 
79
  def user(text_prompt: str, chatbot: CHAT_HISTORY):
 
80
  if text_prompt:
81
  chatbot.append((text_prompt, None))
82
  return "", chatbot
 
86
  model_choice: str,
87
  chatbot: CHAT_HISTORY
88
  ):
 
89
  if not GOOGLE_API_KEY:
90
  raise ValueError("GOOGLE_API_KEY is not set.")
91
+
92
  # Configurar la API con la clave
93
  genai.configure(api_key=GOOGLE_API_KEY)
94
  generation_config = genai.types.GenerationConfig(
95
+ temperature=0.7, # Valor predeterminado
96
+ max_output_tokens=8192, # Fijar el límite de tokens a 8,192
97
+ top_k=10, # Valor predeterminado
98
+ top_p=0.9 # Valor predeterminado
99
  )
100
 
 
101
  text_prompt = [chatbot[-1][0]] if chatbot and chatbot[-1][0] and isinstance(chatbot[-1][0], str) else []
102
+ image_prompt = [preprocess_image(Image.open(file).convert('RGB')) for file in files] if files else []
 
 
 
103
  model = genai.GenerativeModel(model_choice)
104
+ response = model.generate_content(text_prompt + image_prompt, stream=True, generation_config=generation_config)
105
 
106
  chatbot[-1][1] = ""
107
  for chunk in response:
 
111
  time.sleep(0.01)
112
  yield chatbot
113
 
 
114
  chatbot_component = gr.Chatbot(
115
  label='Gemini',
116
  bubble_full_width=False,
 
121
  placeholder="Message...", show_label=False, autofocus=True, scale=8
122
  )
123
  upload_button_component = gr.UploadButton(
124
+ label="Upload Images", file_count="multiple", file_types=["image"], scale=1
125
  )
126
  run_button_component = gr.Button(value="Run", variant="primary", scale=1)
127
  model_choice_component = gr.Dropdown(
 
143
  ]
144
 
145
  with gr.Blocks() as demo:
146
+ gr.HTML(TITLE)
147
+ gr.HTML(SUBTITLE)
148
  with gr.Column():
149
  chatbot_component.render()
150
  with gr.Row():
 
181
  demo.queue(max_size=99).launch(debug=False, show_error=True)
182
 
183
 
184
+