JeCabrera commited on
Commit
5766f55
·
verified ·
1 Parent(s): 18bc406

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -141
app.py CHANGED
@@ -1,149 +1,44 @@
1
- import os
2
  import time
3
- import uuid
4
- from typing import List, Tuple, Optional
5
- import google.generativeai as genai
6
  import gradio as gr
7
- from PIL import Image
8
- from dotenv import load_dotenv
9
-
10
- # Cargar las variables de entorno desde el archivo .env
11
- load_dotenv()
12
-
13
- print("google-generativeai:", genai.__version__)
14
 
 
15
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
16
 
17
  if not GOOGLE_API_KEY:
18
  raise ValueError("GOOGLE_API_KEY is not set in environment variables.")
19
 
20
- IMAGE_CACHE_DIRECTORY = "/tmp"
21
- IMAGE_WIDTH = 512
22
-
23
- def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
24
- if image:
25
- image_height = int(image.height * IMAGE_WIDTH / image.width)
26
- return image.resize((IMAGE_WIDTH, image_height))
27
-
28
- def cache_pil_image(image: Image.Image) -> str:
29
- image_filename = f"{uuid.uuid4()}.jpeg"
30
- os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
31
- image_path = os.path.join(IMAGE_CACHE_DIRECTORY, image_filename)
32
- image.save(image_path, "JPEG")
33
- return image_path
34
-
35
- def upload(files: Optional[List[str]], chatbot: List[tuple]) -> List[tuple]:
36
- for file in files:
37
- image = Image.open(file).convert('RGB')
38
- image_preview = preprocess_image(image)
39
- if image_preview:
40
- gr.Image(image_preview).render()
41
- image_path = cache_pil_image(image)
42
- chatbot.append(((image_path,), None))
43
- return chatbot
44
-
45
- def user(text_prompt: str, chatbot: List[tuple]) -> Tuple[str, List[tuple]]:
46
- if text_prompt:
47
- chatbot.append((text_prompt, None))
48
- return "", chatbot
49
-
50
- def bot(
51
- files: Optional[List[str]],
52
- model_choice: str,
53
- system_instruction: Optional[str],
54
- chatbot: List[tuple]
55
- ):
56
- if not GOOGLE_API_KEY:
57
- raise ValueError("GOOGLE_API_KEY is not set.")
58
-
59
- genai.configure(api_key=GOOGLE_API_KEY)
60
- generation_config = genai.types.GenerationConfig(
61
- temperature=0.7,
62
- max_output_tokens=8192,
63
- top_k=10,
64
- top_p=0.9
65
- )
66
-
67
- if not system_instruction:
68
- system_instruction = "No system instruction provided."
69
-
70
- text_prompt = [chatbot[-1][0]] if chatbot and chatbot[-1][0] and isinstance(chatbot[-1][0], str) else []
71
- image_prompt = [preprocess_image(Image.open(file).convert('RGB')) for file in files] if files else []
72
-
73
- model = genai.GenerativeModel(
74
- model_name=model_choice,
75
- generation_config=generation_config,
76
- system_instruction=system_instruction
77
- )
78
-
79
- response = model.generate_content(text_prompt + image_prompt, stream=True, generation_config=generation_config)
80
-
81
- chatbot[-1][1] = ""
82
- for chunk in response:
83
- for i in range(0, len(chunk.text), 10):
84
- section = chunk.text[i:i + 10]
85
- chatbot[-1][1] += section
86
- time.sleep(0.01)
87
- yield chatbot
88
-
89
- system_instruction_component = gr.Textbox(
90
- placeholder="Enter system instruction...",
91
- show_label=True,
92
- scale=8
93
- )
94
-
95
- chatbot_component = gr.Chatbot(label='Gemini', bubble_full_width=False, scale=2, height=300)
96
- text_prompt_component = gr.Textbox(placeholder="Message...", show_label=False, autofocus=True, scale=8)
97
- upload_button_component = gr.UploadButton(label="Upload Images", file_count="multiple", file_types=["image"], scale=1)
98
- run_button_component = gr.Button(value="Run", variant="primary", scale=1)
99
- model_choice_component = gr.Dropdown(
100
- choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
101
- value="gemini-1.5-flash",
102
- label="Select Model",
103
- scale=2
104
- )
105
-
106
- user_inputs = [text_prompt_component, chatbot_component]
107
- bot_inputs = [upload_button_component, model_choice_component, system_instruction_component, chatbot_component]
108
-
109
- with gr.Blocks() as demo:
110
- gr.HTML("<h1 align='center'>Gemini Playground ✨</h1>")
111
- gr.HTML("<h2 align='center'>Play with Gemini Pro and Gemini Pro Vision</h2>")
112
-
113
- with gr.Column():
114
- model_choice_component.render()
115
- chatbot_component.render()
116
- with gr.Row():
117
- text_prompt_component.render()
118
- upload_button_component.render()
119
- run_button_component.render()
120
-
121
- with gr.Accordion("System Instruction", open=False):
122
- system_instruction_component.render()
123
-
124
- run_button_component.click(
125
- fn=user,
126
- inputs=user_inputs,
127
- outputs=[text_prompt_component, chatbot_component],
128
- queue=False
129
- ).then(
130
- fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
131
- )
132
-
133
- text_prompt_component.submit(
134
- fn=user,
135
- inputs=user_inputs,
136
- outputs=[text_prompt_component, chatbot_component],
137
- queue=False
138
- ).then(
139
- fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
140
- )
141
-
142
- upload_button_component.upload(
143
- fn=upload,
144
- inputs=[upload_button_component, chatbot_component],
145
- outputs=[chatbot_component],
146
- queue=False
147
- )
148
-
149
- demo.queue(max_size=99).launch(debug=False, show_error=True)
 
 
1
  import time
 
 
 
2
  import gradio as gr
3
+ import google.generativeai as genai
4
+ import os
 
 
 
 
 
5
 
6
+ # Cargar la clave de la API de Gemini desde el entorno
7
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
8
 
9
  if not GOOGLE_API_KEY:
10
  raise ValueError("GOOGLE_API_KEY is not set in environment variables.")
11
 
12
+ # Configurar la API de Gemini
13
+ genai.configure(api_key=GOOGLE_API_KEY)
14
+
15
+ # Inicializar el chat de Gemini
16
+ chat = genai.Chat()
17
+
18
+ # Transformar el historial de Gradio a formato compatible con Gemini
19
+ def transform_history(history):
20
+ new_history = []
21
+ for chat in history:
22
+ new_history.append({"parts": [{"text": chat[0]}], "role": "user"})
23
+ new_history.append({"parts": [{"text": chat[1]}], "role": "model"})
24
+ return new_history
25
+
26
+ # Función para generar una respuesta con Gemini
27
+ def response(message, history):
28
+ global chat
29
+ # Actualizar el historial con el formato correcto
30
+ chat.history = transform_history(history)
31
+ # Enviar el mensaje a la API de Gemini
32
+ response = chat.send_message(message)
33
+ response.resolve()
34
+
35
+ # Mostrar la respuesta mientras se escribe
36
+ for i in range(len(response.text)):
37
+ time.sleep(0.05)
38
+ yield response.text[: i + 1]
39
+
40
+ # Interfaz de usuario con Gradio
41
+ gr.ChatInterface(response,
42
+ title='Gemini Chat',
43
+ textbox=gr.Textbox(placeholder="Pregunta a Gemini"),
44
+ retry_btn=None).launch(debug=True)