JeCabrera commited on
Commit
fabf5df
·
verified ·
1 Parent(s): 4cca355

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +157 -68
app.py CHANGED
@@ -1,73 +1,162 @@
1
- import gradio as gr
2
- import google.generativeai as genai
 
3
  import os
4
- from pypdf import PdfReader
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- # Cargar la clave API desde el entorno
7
- GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
 
 
8
  if not GOOGLE_API_KEY:
9
- raise ValueError("La variable de entorno GOOGLE_API_KEY no está definida.")
10
-
11
- genai.configure(api_key=GOOGLE_API_KEY)
12
-
13
- def read_text_file(filepath):
14
- with open(filepath, 'r', encoding='utf-8') as f:
15
- return f.read()
16
-
17
- def read_pdf_file(filepath):
18
- text = ""
19
- with open(filepath, 'rb') as f:
20
- reader = PdfReader(f)
21
- for page in reader.pages:
22
- text += page.extract_text() + "\n"
23
- return text
24
-
25
- def process_input(instruction, text_file, pdf_file, image_file):
26
- prompt_parts = []
27
-
28
- if instruction:
29
- prompt_parts.append(instruction)
30
-
31
- if text_file:
32
- file_content = read_text_file(text_file.name)
33
- prompt_parts.append(f"Contenido del archivo de texto:\n{file_content}")
34
-
35
- if pdf_file:
36
- file_content = read_pdf_file(pdf_file.name)
37
- prompt_parts.append(f"Contenido del archivo PDF:\n{file_content}")
38
-
39
- if image_file:
40
- # Para el modelo vision, necesitas pasar la imagen como datos
41
- prompt_parts.append(f"Imagen: {image_file.name}")
42
- response = genai.ChatCompletion.create(
43
- model="gemini-1.5-flash",
44
- messages=[{"role": "user", "content": "Contenido de la imagen: " + image_file.name}],
45
- max_tokens=8192,
46
- )
47
- return response['choices'][0]['message']['content']
48
-
49
- if prompt_parts:
50
- prompt = "\n\n".join(prompt_parts)
51
- response = genai.ChatCompletion.create(
52
- model="gemini-1.5-flash",
53
- messages=[{"role": "user", "content": prompt}],
54
- max_tokens=8192,
55
- )
56
- return response['choices'][0]['message']['content']
57
- else:
58
- return "Por favor, proporciona alguna instrucción o archivo."
59
-
60
- iface = gr.Interface(
61
- fn=process_input,
62
- inputs=[
63
- gr.Textbox(label="Instrucciones"),
64
- gr.File(label="Archivo de Texto (.txt)"),
65
- gr.File(label="Archivo PDF (.pdf)"),
66
- gr.Image(label="Imagen", type="filepath"), # Cambié el tipo a 'filepath'
67
- ],
68
- outputs=gr.Textbox(label="Respuesta de Gemini"),
69
- title="Interactúa con Gemini",
70
- description="Sube archivos de texto, PDF o imágenes y proporciona instrucciones para que Gemini los procese.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  )
72
 
73
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ TITLE = """<h1 align="center">Gemini Playground ✨</h1>"""
2
+ SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""
3
+
4
  import os
5
+ import time
6
+ import uuid
7
+ from typing import List, Tuple, Optional, Union
8
+
9
+ import google.generativeai as genai
10
+ import gradio as gr
11
+ from PIL import Image
12
+ from dotenv import load_dotenv
13
+
14
+ # Cargar las variables de entorno desde el archivo .env
15
+ load_dotenv()
16
+
17
+ print("google-generativeai:", genai.__version__)
18
 
19
+ # Obtener la clave de la API de las variables de entorno
20
+ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
21
+
22
+ # Verificar que la clave de la API esté configurada
23
  if not GOOGLE_API_KEY:
24
+ raise ValueError("GOOGLE_API_KEY is not set in environment variables.")
25
+
26
+ IMAGE_CACHE_DIRECTORY = "/tmp"
27
+ IMAGE_WIDTH = 512
28
+ CHAT_HISTORY = List[Tuple[Optional[Union[Tuple[str], str]], Optional[str]]]
29
+
30
+ def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
31
+ if image:
32
+ image_height = int(image.height * IMAGE_WIDTH / image.width)
33
+ return image.resize((IMAGE_WIDTH, image_height))
34
+
35
+ def cache_pil_image(image: Image.Image) -> str:
36
+ image_filename = f"{uuid.uuid4()}.jpeg"
37
+ os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
38
+ image_path = os.path.join(IMAGE_CACHE_DIRECTORY, image_filename)
39
+ image.save(image_path, "JPEG")
40
+ return image_path
41
+
42
+ def upload(files: Optional[List[str]], chatbot: CHAT_HISTORY) -> CHAT_HISTORY:
43
+ for file in files:
44
+ image = Image.open(file).convert('RGB')
45
+ image_preview = preprocess_image(image)
46
+ if image_preview:
47
+ gr.Image(image_preview).render()
48
+ image_path = cache_pil_image(image)
49
+ chatbot.append(((image_path,), None))
50
+ return chatbot
51
+
52
+ def user(text_prompt: str, chatbot: CHAT_HISTORY):
53
+ if text_prompt:
54
+ chatbot.append((text_prompt, None))
55
+ return "", chatbot
56
+
57
+ def bot(
58
+ files: Optional[List[str]],
59
+ model_choice: str,
60
+ system_instruction: Optional[str], # Sistema de instrucciones opcional
61
+ chatbot: CHAT_HISTORY
62
+ ):
63
+ if not GOOGLE_API_KEY:
64
+ raise ValueError("GOOGLE_API_KEY is not set.")
65
+
66
+ genai.configure(api_key=GOOGLE_API_KEY)
67
+ generation_config = genai.types.GenerationConfig(
68
+ temperature=0.7,
69
+ max_output_tokens=8192,
70
+ top_k=10,
71
+ top_p=0.9
72
+ )
73
+
74
+ # Usar el valor por defecto para system_instruction si está vacío
75
+ if not system_instruction:
76
+ system_instruction = "1" # O puedes poner un valor predeterminado como "No system instruction provided."
77
+
78
+ text_prompt = [chatbot[-1][0]] if chatbot and chatbot[-1][0] and isinstance(chatbot[-1][0], str) else []
79
+ image_prompt = [preprocess_image(Image.open(file).convert('RGB')) for file in files] if files else []
80
+
81
+ model = genai.GenerativeModel(
82
+ model_name=model_choice,
83
+ generation_config=generation_config,
84
+ system_instruction=system_instruction # Usar el valor por defecto si está vacío
85
+ )
86
+
87
+ response = model.generate_content(text_prompt + image_prompt, stream=True, generation_config=generation_config)
88
+
89
+ chatbot[-1][1] = ""
90
+ for chunk in response:
91
+ for i in range(0, len(chunk.text), 10):
92
+ section = chunk.text[i:i + 10]
93
+ chatbot[-1][1] += section
94
+ time.sleep(0.01)
95
+ yield chatbot
96
+
97
+ # Componente para el acordeón que contiene el cuadro de texto para la instrucción del sistema
98
+ system_instruction_component = gr.Textbox(
99
+ placeholder="Enter system instruction...",
100
+ show_label=True,
101
+ scale=8
102
+ )
103
+
104
+ # Definir los componentes de entrada y salida
105
+ chatbot_component = gr.Chatbot(label='Gemini', bubble_full_width=False, scale=2, height=300)
106
+ text_prompt_component = gr.Textbox(placeholder="Message...", show_label=False, autofocus=True, scale=8)
107
+ upload_button_component = gr.UploadButton(label="Upload Images", file_count="multiple", file_types=["image"], scale=1)
108
+ run_button_component = gr.Button(value="Run", variant="primary", scale=1)
109
+ model_choice_component = gr.Dropdown(
110
+ choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
111
+ value="gemini-1.5-flash",
112
+ label="Select Model",
113
+ scale=2
114
  )
115
 
116
+ user_inputs = [text_prompt_component, chatbot_component]
117
+ bot_inputs = [upload_button_component, model_choice_component, system_instruction_component, chatbot_component]
118
+
119
+ # Definir la interfaz de usuario
120
+ with gr.Blocks() as demo:
121
+ gr.HTML(TITLE)
122
+ gr.HTML(SUBTITLE)
123
+ with gr.Column():
124
+ # Campo de selección de modelo arriba
125
+ model_choice_component.render()
126
+ chatbot_component.render()
127
+ with gr.Row():
128
+ text_prompt_component.render()
129
+ upload_button_component.render()
130
+ run_button_component.render()
131
+
132
+ # Crear el acordeón para la instrucción del sistema al final
133
+ with gr.Accordion("System Instruction", open=False): # Acordeón cerrado por defecto
134
+ system_instruction_component.render()
135
+
136
+ run_button_component.click(
137
+ fn=user,
138
+ inputs=user_inputs,
139
+ outputs=[text_prompt_component, chatbot_component],
140
+ queue=False
141
+ ).then(
142
+ fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
143
+ )
144
+
145
+ text_prompt_component.submit(
146
+ fn=user,
147
+ inputs=user_inputs,
148
+ outputs=[text_prompt_component, chatbot_component],
149
+ queue=False
150
+ ).then(
151
+ fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
152
+ )
153
+
154
+ upload_button_component.upload(
155
+ fn=upload,
156
+ inputs=[upload_button_component, chatbot_component],
157
+ outputs=[chatbot_component],
158
+ queue=False
159
+ )
160
+
161
+ # Lanzar la aplicación
162
+ demo.queue(max_size=99).launch(debug=False, show_error=True)