Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""
|
2 |
References:
|
3 |
- https://medium.com/@turna.fardousi/building-a-multimodal-chatbot-with-gemini-api-8015bfbee538
|
@@ -76,15 +296,12 @@ def bot(
|
|
76 |
model = genai.GenerativeModel(model_name)
|
77 |
inputs = [text_prompt] if image_prompt is None else [text_prompt, preprocess_image(image_prompt)]
|
78 |
|
|
|
79 |
response = model.generate_content(inputs, stream=True, generation_config=generation_config)
|
80 |
-
response.resolve()
|
81 |
-
|
82 |
chatbot[-1][1] = ""
|
83 |
for chunk in response:
|
84 |
-
|
85 |
-
|
86 |
-
time.sleep(0.01)
|
87 |
-
yield chatbot
|
88 |
|
89 |
google_key_component = gr.Textbox(
|
90 |
label = "GOOGLE API KEY",
|
@@ -179,35 +396,30 @@ with gr.Blocks(theme = gr.themes.Soft()) as demo:
|
|
179 |
top_k_component.render()
|
180 |
top_p_component.render()
|
181 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
run_button_component.click(
|
183 |
-
fn
|
184 |
-
inputs
|
185 |
-
outputs
|
186 |
-
text_prompt_component,
|
187 |
-
chatbot_component
|
188 |
-
],
|
189 |
-
queue = False
|
190 |
-
).then(
|
191 |
-
fn = bot,
|
192 |
-
inputs = bot_inputs,
|
193 |
-
outputs = [
|
194 |
-
chatbot_component
|
195 |
-
]
|
196 |
)
|
197 |
text_prompt_component.submit(
|
198 |
-
fn
|
199 |
-
inputs
|
200 |
-
outputs
|
201 |
-
text_prompt_component,
|
202 |
-
chatbot_component
|
203 |
-
],
|
204 |
-
queue = False
|
205 |
-
).then(
|
206 |
-
fn = bot,
|
207 |
-
inputs = bot_inputs,
|
208 |
-
outputs = [
|
209 |
-
chatbot_component
|
210 |
-
]
|
211 |
)
|
212 |
with gr.Tab("Chat with Gemma 2"):
|
213 |
gr.HTML(
|
|
|
1 |
+
# """
|
2 |
+
# References:
|
3 |
+
# - https://medium.com/@turna.fardousi/building-a-multimodal-chatbot-with-gemini-api-8015bfbee538
|
4 |
+
# """
|
5 |
+
|
6 |
+
# import os
|
7 |
+
# import time
|
8 |
+
# from typing import List, Tuple, Optional
|
9 |
+
# import google.generativeai as genai
|
10 |
+
# import gradio as gr
|
11 |
+
# from PIL import Image
|
12 |
+
# from dotenv import load_dotenv
|
13 |
+
|
14 |
+
# load_dotenv()
|
15 |
+
|
16 |
+
# GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
17 |
+
|
18 |
+
# TITLE = """<h1 align="center">🎮Chat with Gemini 1.5🔥 -- Beta Preview</h1>"""
|
19 |
+
# NOTICE = """
|
20 |
+
# Notices 📜:
|
21 |
+
# - This app is still in development
|
22 |
+
# - Some features may not work as expected
|
23 |
+
# """
|
24 |
+
# ABOUT = """
|
25 |
+
# Updates (2024-8-12): Created the App
|
26 |
+
|
27 |
+
# Info:
|
28 |
+
# - Model: Gemini 1.5 Flash
|
29 |
+
# """
|
30 |
+
# ERRORS = """
|
31 |
+
# Known errors ⚠️:
|
32 |
+
# """
|
33 |
+
# FUTURE_IMPLEMENTATIONS = """
|
34 |
+
# To be implemented 🚀:
|
35 |
+
# - Select other Gemini / Gemma models
|
36 |
+
# - Upload files
|
37 |
+
# - More tools other than web search
|
38 |
+
# """
|
39 |
+
# IMAGE_WIDTH = 512
|
40 |
+
|
41 |
+
# def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
|
42 |
+
# return [seq.strip() for seq in stop_sequences.split(",")] if stop_sequences else None
|
43 |
+
|
44 |
+
# def preprocess_image(image: Image.Image) -> Image.Image:
|
45 |
+
# image_height = int(image.height * IMAGE_WIDTH / image.width)
|
46 |
+
# return image.resize((IMAGE_WIDTH, image_height))
|
47 |
+
|
48 |
+
# def user(text_prompt: str, chatbot: List[Tuple[str, str]]):
|
49 |
+
# return "", chatbot + [[text_prompt, None]]
|
50 |
+
|
51 |
+
# def bot(
|
52 |
+
# google_key: str,
|
53 |
+
# image_prompt: Optional[Image.Image],
|
54 |
+
# temperature: float,
|
55 |
+
# max_output_tokens: int,
|
56 |
+
# stop_sequences: str,
|
57 |
+
# top_k: int,
|
58 |
+
# top_p: float,
|
59 |
+
# chatbot: List[Tuple[str, str]]
|
60 |
+
# ):
|
61 |
+
# google_key = google_key or GEMINI_API_KEY
|
62 |
+
# if not google_key:
|
63 |
+
# raise ValueError("GOOGLE_API_KEY is not set. Please set it up.")
|
64 |
+
|
65 |
+
# text_prompt = chatbot[-1][0]
|
66 |
+
# genai.configure(api_key=google_key)
|
67 |
+
# generation_config = genai.types.GenerationConfig(
|
68 |
+
# temperature=temperature,
|
69 |
+
# max_output_tokens=max_output_tokens,
|
70 |
+
# stop_sequences=preprocess_stop_sequences(stop_sequences),
|
71 |
+
# top_k=top_k,
|
72 |
+
# top_p=top_p,
|
73 |
+
# )
|
74 |
+
|
75 |
+
# model_name = "gemini-1.5-flash" # if image_prompt is None else "gemini-pro-vision"
|
76 |
+
# model = genai.GenerativeModel(model_name)
|
77 |
+
# inputs = [text_prompt] if image_prompt is None else [text_prompt, preprocess_image(image_prompt)]
|
78 |
+
|
79 |
+
# response = model.generate_content(inputs, stream=True, generation_config=generation_config)
|
80 |
+
# response.resolve()
|
81 |
+
|
82 |
+
# chatbot[-1][1] = ""
|
83 |
+
# for chunk in response:
|
84 |
+
# for i in range(0, len(chunk.text), 10):
|
85 |
+
# chatbot[-1][1] += chunk.text[i:i + 10]
|
86 |
+
# time.sleep(0.01)
|
87 |
+
# yield chatbot
|
88 |
+
|
89 |
+
# google_key_component = gr.Textbox(
|
90 |
+
# label = "GOOGLE API KEY",
|
91 |
+
# type = "password",
|
92 |
+
# placeholder = "...",
|
93 |
+
# visible = GEMINI_API_KEY is None
|
94 |
+
# )
|
95 |
+
|
96 |
+
# image_prompt_component = gr.Image(
|
97 |
+
# type = "pil",
|
98 |
+
# label = "Image"
|
99 |
+
# )
|
100 |
+
# chatbot_component = gr.Chatbot(
|
101 |
+
# # label = 'Gemini',
|
102 |
+
# bubble_full_width = False
|
103 |
+
# )
|
104 |
+
# text_prompt_component = gr.Textbox(
|
105 |
+
# placeholder = "Chat with Gemini",
|
106 |
+
# label = "Ask me anything and press Enter"
|
107 |
+
# )
|
108 |
+
# run_button_component = gr.Button(
|
109 |
+
# "Run"
|
110 |
+
# )
|
111 |
+
# temperature_component = gr.Slider(
|
112 |
+
# minimum = 0,
|
113 |
+
# maximum = 1.0,
|
114 |
+
# value = 0.5,
|
115 |
+
# step = 0.05,
|
116 |
+
# label = "Temperature"
|
117 |
+
# )
|
118 |
+
# max_output_tokens_component = gr.Slider(
|
119 |
+
# minimum = 1,
|
120 |
+
# maximum = 8192,
|
121 |
+
# value = 4096,
|
122 |
+
# step = 1,
|
123 |
+
# label = "Max Output Tokens"
|
124 |
+
# )
|
125 |
+
# stop_sequences_component = gr.Textbox(
|
126 |
+
# label = "Add stop sequence",
|
127 |
+
# placeholder = "STOP, END"
|
128 |
+
# )
|
129 |
+
# top_k_component = gr.Slider(
|
130 |
+
# minimum = 1,
|
131 |
+
# maximum = 40,
|
132 |
+
# value = 32,
|
133 |
+
# step = 1,
|
134 |
+
# label = "Top-K"
|
135 |
+
# )
|
136 |
+
# top_p_component = gr.Slider(
|
137 |
+
# minimum = 0,
|
138 |
+
# maximum = 1,
|
139 |
+
# value = 1,
|
140 |
+
# step = 0.01,
|
141 |
+
# label = "Top-P"
|
142 |
+
# )
|
143 |
+
|
144 |
+
# user_inputs = [
|
145 |
+
# text_prompt_component,
|
146 |
+
# chatbot_component
|
147 |
+
# ]
|
148 |
+
# bot_inputs = [
|
149 |
+
# google_key_component,
|
150 |
+
# image_prompt_component,
|
151 |
+
# temperature_component,
|
152 |
+
# max_output_tokens_component,
|
153 |
+
# stop_sequences_component,
|
154 |
+
# top_k_component,
|
155 |
+
# top_p_component,
|
156 |
+
# chatbot_component
|
157 |
+
# ]
|
158 |
+
|
159 |
+
# with gr.Blocks(theme = gr.themes.Soft()) as demo:
|
160 |
+
# with gr.Tab("Chat with Gemini 1.5 Flash"):
|
161 |
+
# gr.HTML(TITLE)
|
162 |
+
# with gr.Row():
|
163 |
+
# gr.Markdown(NOTICE)
|
164 |
+
# gr.Markdown(ABOUT)
|
165 |
+
# gr.Markdown(ERRORS)
|
166 |
+
# gr.Markdown(FUTURE_IMPLEMENTATIONS)
|
167 |
+
# with gr.Column():
|
168 |
+
# google_key_component.render()
|
169 |
+
# with gr.Row():
|
170 |
+
# image_prompt_component.render()
|
171 |
+
# chatbot_component.render()
|
172 |
+
# text_prompt_component.render()
|
173 |
+
# run_button_component.render()
|
174 |
+
# with gr.Accordion("Parameters", open=False):
|
175 |
+
# temperature_component.render()
|
176 |
+
# max_output_tokens_component.render()
|
177 |
+
# stop_sequences_component.render()
|
178 |
+
# with gr.Accordion("Advanced", open=False):
|
179 |
+
# top_k_component.render()
|
180 |
+
# top_p_component.render()
|
181 |
+
|
182 |
+
# run_button_component.click(
|
183 |
+
# fn = user,
|
184 |
+
# inputs = user_inputs,
|
185 |
+
# outputs = [
|
186 |
+
# text_prompt_component,
|
187 |
+
# chatbot_component
|
188 |
+
# ],
|
189 |
+
# queue = False
|
190 |
+
# ).then(
|
191 |
+
# fn = bot,
|
192 |
+
# inputs = bot_inputs,
|
193 |
+
# outputs = [
|
194 |
+
# chatbot_component
|
195 |
+
# ]
|
196 |
+
# )
|
197 |
+
# text_prompt_component.submit(
|
198 |
+
# fn = user,
|
199 |
+
# inputs = user_inputs,
|
200 |
+
# outputs = [
|
201 |
+
# text_prompt_component,
|
202 |
+
# chatbot_component
|
203 |
+
# ],
|
204 |
+
# queue = False
|
205 |
+
# ).then(
|
206 |
+
# fn = bot,
|
207 |
+
# inputs = bot_inputs,
|
208 |
+
# outputs = [
|
209 |
+
# chatbot_component
|
210 |
+
# ]
|
211 |
+
# )
|
212 |
+
# with gr.Tab("Chat with Gemma 2"):
|
213 |
+
# gr.HTML(
|
214 |
+
# """
|
215 |
+
# <h1 align="center">Still in development</h1>
|
216 |
+
# """
|
217 |
+
# )
|
218 |
+
|
219 |
+
# demo.queue().launch(debug = True, show_error = True)
|
220 |
+
|
221 |
"""
|
222 |
References:
|
223 |
- https://medium.com/@turna.fardousi/building-a-multimodal-chatbot-with-gemini-api-8015bfbee538
|
|
|
296 |
model = genai.GenerativeModel(model_name)
|
297 |
inputs = [text_prompt] if image_prompt is None else [text_prompt, preprocess_image(image_prompt)]
|
298 |
|
299 |
+
# Use gr.ChatInference for streaming response
|
300 |
response = model.generate_content(inputs, stream=True, generation_config=generation_config)
|
|
|
|
|
301 |
chatbot[-1][1] = ""
|
302 |
for chunk in response:
|
303 |
+
chatbot[-1][1] += chunk.text
|
304 |
+
yield chatbot
|
|
|
|
|
305 |
|
306 |
google_key_component = gr.Textbox(
|
307 |
label = "GOOGLE API KEY",
|
|
|
396 |
top_k_component.render()
|
397 |
top_p_component.render()
|
398 |
|
399 |
+
# Use gr.ChatInference for streaming response
|
400 |
+
chat_inference = gr.ChatInference(
|
401 |
+
user, bot,
|
402 |
+
[text_prompt_component, chatbot_component],
|
403 |
+
[chatbot_component]
|
404 |
+
)
|
405 |
+
chat_inference.chatbot = chatbot_component
|
406 |
+
chat_inference.api_key = google_key_component
|
407 |
+
chat_inference.image_prompt = image_prompt_component
|
408 |
+
chat_inference.temperature = temperature_component
|
409 |
+
chat_inference.max_output_tokens = max_output_tokens_component
|
410 |
+
chat_inference.stop_sequences = stop_sequences_component
|
411 |
+
chat_inference.top_k = top_k_component
|
412 |
+
chat_inference.top_p = top_p_component
|
413 |
+
|
414 |
run_button_component.click(
|
415 |
+
fn=chat_inference.submit,
|
416 |
+
inputs=user_inputs,
|
417 |
+
outputs=[text_prompt_component, chatbot_component]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
418 |
)
|
419 |
text_prompt_component.submit(
|
420 |
+
fn=chat_inference.submit,
|
421 |
+
inputs=user_inputs,
|
422 |
+
outputs=[text_prompt_component, chatbot_component]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
423 |
)
|
424 |
with gr.Tab("Chat with Gemma 2"):
|
425 |
gr.HTML(
|