Spaces:
Paused
Paused
Updated Nightly channel
Browse files- Tabs/Gemini_Chabot_Nightly.py +98 -0
- app.py +76 -352
Tabs/Gemini_Chabot_Nightly.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
from typing import List, Tuple, Optional
|
4 |
+
import google.generativeai as genai
|
5 |
+
import gradio as gr
|
6 |
+
from PIL import Image
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
|
9 |
+
load_dotenv()
|
10 |
+
|
11 |
+
GEMINI_API_KEY_NIGHTLY = os.getenv("GEMINI_API_KEY_NIGHTLY")
|
12 |
+
model_name = "gemini-1.5-flash"
|
13 |
+
|
14 |
+
TITLE_NIGHTLY = """<h1 align="center">🎮Chat with Gemini 1.5🔥 -- Beta Preview</h1>"""
|
15 |
+
NOTICE_NIGHTLY = """
|
16 |
+
Notices 📜:
|
17 |
+
- This app is still in development (extreme unstable)
|
18 |
+
- Some features may not work as expected
|
19 |
+
"""
|
20 |
+
|
21 |
+
def upload_to_gemini(path, mime_type=None):
|
22 |
+
file = genai.upload_file(path, mime_type=mime_type)
|
23 |
+
print(f"Uploaded file '{file.display_name}' as: {file.uri}")
|
24 |
+
return file
|
25 |
+
|
26 |
+
def transform_history(history):
|
27 |
+
new_history = []
|
28 |
+
for user_msg, model_msg in history:
|
29 |
+
new_history.append({"role": "user", "parts": [{"text": user_msg}]})
|
30 |
+
new_history.append({"role": "model", "parts": [{"text": model_msg}]})
|
31 |
+
return new_history
|
32 |
+
|
33 |
+
def chatbot_stable(message, history):
|
34 |
+
message_text = message["text"]
|
35 |
+
message_files = message["files"]
|
36 |
+
print("Message text:", message_text)
|
37 |
+
print("Message files:", message_files)
|
38 |
+
if message_files:
|
39 |
+
image_uris = [upload_to_gemini(file_path["path"]) for file_path in message_files]
|
40 |
+
message_content = [{"text": message_text}] + image_uris
|
41 |
+
else:
|
42 |
+
message_content = {"text": message_text}
|
43 |
+
genai.configure(api_key=GEMINI_API_KEY_NIGHTLY)
|
44 |
+
model = genai.GenerativeModel(
|
45 |
+
model_name,
|
46 |
+
# safety_settings=[
|
47 |
+
# {
|
48 |
+
# "category": "HARM_CATEGORY_HARASSMENT",
|
49 |
+
# "threshold": "BLOCK_NONE"
|
50 |
+
# },
|
51 |
+
# {
|
52 |
+
# "category": "HARM_CATEGORY_HATE_SPEECH",
|
53 |
+
# "threshold": "BLOCK_NONE"
|
54 |
+
# },
|
55 |
+
# {
|
56 |
+
# "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
57 |
+
# "threshold": "BLOCK_NONE"
|
58 |
+
# },
|
59 |
+
# {
|
60 |
+
# "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
61 |
+
# "threshold": "BLOCK_NONE"
|
62 |
+
# },
|
63 |
+
# ],
|
64 |
+
generation_config={
|
65 |
+
"temperature": 1,
|
66 |
+
"top_p": 0.95,
|
67 |
+
"top_k": 64,
|
68 |
+
"max_output_tokens": 8192,
|
69 |
+
"response_mime_type": "text/plain",
|
70 |
+
}
|
71 |
+
)
|
72 |
+
|
73 |
+
global chat
|
74 |
+
chat = model.start_chat(history=[])
|
75 |
+
chat.history = transform_history(history)
|
76 |
+
response = chat.send_message(message_content)
|
77 |
+
response.resolve()
|
78 |
+
|
79 |
+
return response.text
|
80 |
+
|
81 |
+
gemini_chatbot_interface_stable = gr.Chatbot(
|
82 |
+
height=500,
|
83 |
+
likeable=True,
|
84 |
+
avatar_images=(
|
85 |
+
None,
|
86 |
+
"https://media.roboflow.com/spaces/gemini-icon.png"
|
87 |
+
),
|
88 |
+
show_copy_button=True,
|
89 |
+
show_share_button=True,
|
90 |
+
render_markdown=True
|
91 |
+
)
|
92 |
+
|
93 |
+
gemini_chatbot_stable = gr.ChatInterface(
|
94 |
+
fn=chatbot_stable,
|
95 |
+
chatbot=gemini_chatbot_interface_stable,
|
96 |
+
title="Gemini 1.5 Chatbot",
|
97 |
+
multimodal=True
|
98 |
+
)
|
app.py
CHANGED
@@ -1,277 +1,3 @@
|
|
1 |
-
# """
|
2 |
-
# References:
|
3 |
-
# - https://medium.com/@turna.fardousi/building-a-multimodal-chatbot-with-gemini-api-8015bfbee538
|
4 |
-
# """
|
5 |
-
|
6 |
-
# import os
|
7 |
-
# import time
|
8 |
-
# from typing import List, Tuple, Optional
|
9 |
-
# import google.generativeai as genai
|
10 |
-
# import gradio as gr
|
11 |
-
# from PIL import Image
|
12 |
-
# from dotenv import load_dotenv
|
13 |
-
|
14 |
-
# load_dotenv()
|
15 |
-
|
16 |
-
# GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
17 |
-
|
18 |
-
# # ============================== Stable - START ==============================
|
19 |
-
# TITLE = """<h1 align="center">🎮Chat with Gemini 1.5🔥 -- Beta Preview</h1>"""
|
20 |
-
# NOTICE = """
|
21 |
-
# Notices 📜:
|
22 |
-
# - This app is still in development
|
23 |
-
# - Some features may not work as expected
|
24 |
-
# """
|
25 |
-
# ABOUT = """
|
26 |
-
# Updates (2024-8-12): Created the App
|
27 |
-
|
28 |
-
# Info:
|
29 |
-
# - Model: Gemini 1.5 Flash
|
30 |
-
# """
|
31 |
-
# ERRORS = """
|
32 |
-
# Known errors ⚠️:
|
33 |
-
# """
|
34 |
-
# FUTURE_IMPLEMENTATIONS = """
|
35 |
-
# To be implemented 🚀:
|
36 |
-
# - Select other Gemini / Gemma models
|
37 |
-
# - Upload files
|
38 |
-
# - More tools other than web search
|
39 |
-
# """
|
40 |
-
# IMAGE_WIDTH = 512
|
41 |
-
|
42 |
-
# def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
|
43 |
-
# return [seq.strip() for seq in stop_sequences.split(",")] if stop_sequences else None
|
44 |
-
|
45 |
-
# def preprocess_image(image: Image.Image) -> Image.Image:
|
46 |
-
# image_height = int(image.height * IMAGE_WIDTH / image.width)
|
47 |
-
# return image.resize((IMAGE_WIDTH, image_height))
|
48 |
-
|
49 |
-
# def user(text_prompt: str, chatbot: List[Tuple[str, str]]):
|
50 |
-
# return "", chatbot + [[text_prompt, None]]
|
51 |
-
|
52 |
-
# def bot(
|
53 |
-
# google_key: str,
|
54 |
-
# image_prompt: Optional[Image.Image],
|
55 |
-
# temperature: float,
|
56 |
-
# max_output_tokens: int,
|
57 |
-
# stop_sequences: str,
|
58 |
-
# top_k: int,
|
59 |
-
# top_p: float,
|
60 |
-
# chatbot: List[Tuple[str, str]]
|
61 |
-
# ):
|
62 |
-
# google_key = google_key or GEMINI_API_KEY
|
63 |
-
# if not google_key:
|
64 |
-
# raise ValueError("GOOGLE_API_KEY is not set. Please set it up.")
|
65 |
-
|
66 |
-
# text_prompt = chatbot[-1][0]
|
67 |
-
# genai.configure(api_key=google_key)
|
68 |
-
# generation_config = genai.types.GenerationConfig(
|
69 |
-
# temperature=temperature,
|
70 |
-
# max_output_tokens=max_output_tokens,
|
71 |
-
# stop_sequences=preprocess_stop_sequences(stop_sequences),
|
72 |
-
# top_k=top_k,
|
73 |
-
# top_p=top_p,
|
74 |
-
# )
|
75 |
-
|
76 |
-
# model_name = "gemini-1.5-flash" # if image_prompt is None else "gemini-pro-vision"
|
77 |
-
# model = genai.GenerativeModel(model_name)
|
78 |
-
# inputs = [text_prompt] if image_prompt is None else [text_prompt, preprocess_image(image_prompt)]
|
79 |
-
|
80 |
-
# response = model.generate_content(inputs, stream=True, generation_config=generation_config)
|
81 |
-
# response.resolve()
|
82 |
-
|
83 |
-
# chatbot[-1][1] = ""
|
84 |
-
# for chunk in response:
|
85 |
-
# for i in range(0, len(chunk.text), 10):
|
86 |
-
# chatbot[-1][1] += chunk.text[i:i + 10]
|
87 |
-
# time.sleep(0.01)
|
88 |
-
# yield chatbot
|
89 |
-
|
90 |
-
# google_key_component = gr.Textbox(
|
91 |
-
# label = "GOOGLE API KEY",
|
92 |
-
# type = "password",
|
93 |
-
# placeholder = "...",
|
94 |
-
# visible = GEMINI_API_KEY is None
|
95 |
-
# )
|
96 |
-
|
97 |
-
# image_prompt_component = gr.Image(
|
98 |
-
# type = "pil",
|
99 |
-
# label = "Image"
|
100 |
-
# )
|
101 |
-
# chatbot_component = gr.Chatbot(
|
102 |
-
# # label = 'Gemini',
|
103 |
-
# bubble_full_width = False
|
104 |
-
# )
|
105 |
-
# text_prompt_component = gr.Textbox(
|
106 |
-
# placeholder = "Chat with Gemini",
|
107 |
-
# label = "Ask me anything and press Enter"
|
108 |
-
# )
|
109 |
-
# run_button_component = gr.Button(
|
110 |
-
# "Run"
|
111 |
-
# )
|
112 |
-
# temperature_component = gr.Slider(
|
113 |
-
# minimum = 0,
|
114 |
-
# maximum = 1.0,
|
115 |
-
# value = 0.5,
|
116 |
-
# step = 0.05,
|
117 |
-
# label = "Temperature"
|
118 |
-
# )
|
119 |
-
# max_output_tokens_component = gr.Slider(
|
120 |
-
# minimum = 1,
|
121 |
-
# maximum = 8192,
|
122 |
-
# value = 4096,
|
123 |
-
# step = 1,
|
124 |
-
# label = "Max Output Tokens"
|
125 |
-
# )
|
126 |
-
# stop_sequences_component = gr.Textbox(
|
127 |
-
# label = "Add stop sequence",
|
128 |
-
# placeholder = "STOP, END"
|
129 |
-
# )
|
130 |
-
# top_k_component = gr.Slider(
|
131 |
-
# minimum = 1,
|
132 |
-
# maximum = 40,
|
133 |
-
# value = 32,
|
134 |
-
# step = 1,
|
135 |
-
# label = "Top-K"
|
136 |
-
# )
|
137 |
-
# top_p_component = gr.Slider(
|
138 |
-
# minimum = 0,
|
139 |
-
# maximum = 1,
|
140 |
-
# value = 1,
|
141 |
-
# step = 0.01,
|
142 |
-
# label = "Top-P"
|
143 |
-
# )
|
144 |
-
|
145 |
-
# user_inputs = [
|
146 |
-
# text_prompt_component,
|
147 |
-
# chatbot_component
|
148 |
-
# ]
|
149 |
-
# bot_inputs = [
|
150 |
-
# google_key_component,
|
151 |
-
# image_prompt_component,
|
152 |
-
# temperature_component,
|
153 |
-
# max_output_tokens_component,
|
154 |
-
# stop_sequences_component,
|
155 |
-
# top_k_component,
|
156 |
-
# top_p_component,
|
157 |
-
# chatbot_component
|
158 |
-
# ]
|
159 |
-
# # ============================== Stable - END ==============================
|
160 |
-
|
161 |
-
# # ============================== Nightly - START ==============================
|
162 |
-
# """
|
163 |
-
# References:
|
164 |
-
# - https://medium.com/latinxinai/simple-chatbot-gradio-google-gemini-api-4ce02fbaf09f
|
165 |
-
# """
|
166 |
-
# GEMINI_API_KEY_NIGHTLY = os.getenv("GEMINI_API_KEY_NIGHTLY")
|
167 |
-
# model_nightly_name = "gemini-1.5-flash"
|
168 |
-
|
169 |
-
# def transform_history(history):
|
170 |
-
# new_history = []
|
171 |
-
# for chat in history:
|
172 |
-
# new_history.append({"parts": [{"text": chat[0]}], "role": "user"})
|
173 |
-
# new_history.append({"parts": [{"text": chat[1]}], "role": "model"})
|
174 |
-
# return new_history
|
175 |
-
|
176 |
-
# def response(message, history):
|
177 |
-
# genai.configure(api_key=GEMINI_API_KEY_NIGHTLY)
|
178 |
-
# model_nightly = genai.GenerativeModel(model_nightly_name)
|
179 |
-
|
180 |
-
# global chat
|
181 |
-
# chat = model_nightly.start_chat(history=[])
|
182 |
-
# chat.history = transform_history(history)
|
183 |
-
# response = chat.send_message(message)
|
184 |
-
# response.resolve()
|
185 |
-
|
186 |
-
# for i in range(len(response.text)):
|
187 |
-
# time.sleep(0.05)
|
188 |
-
# yield response.text[:i+1]
|
189 |
-
|
190 |
-
# # ============================== Nightly - END ==============================
|
191 |
-
|
192 |
-
# with gr.Blocks(theme = gr.themes.Soft()) as demo:
|
193 |
-
# # ============================== Stable - START ==============================
|
194 |
-
# with gr.Tab("Chat with Gemini 1.5 Flash"):
|
195 |
-
# gr.HTML(TITLE)
|
196 |
-
# with gr.Row():
|
197 |
-
# gr.Markdown(NOTICE)
|
198 |
-
# gr.Markdown(ABOUT)
|
199 |
-
# gr.Markdown(ERRORS)
|
200 |
-
# gr.Markdown(FUTURE_IMPLEMENTATIONS)
|
201 |
-
# with gr.Column():
|
202 |
-
# google_key_component.render()
|
203 |
-
# with gr.Row():
|
204 |
-
# image_prompt_component.render()
|
205 |
-
# chatbot_component.render()
|
206 |
-
# text_prompt_component.render()
|
207 |
-
# run_button_component.render()
|
208 |
-
# with gr.Accordion("Parameters", open=False):
|
209 |
-
# temperature_component.render()
|
210 |
-
# max_output_tokens_component.render()
|
211 |
-
# stop_sequences_component.render()
|
212 |
-
# with gr.Accordion("Advanced", open=False):
|
213 |
-
# top_k_component.render()
|
214 |
-
# top_p_component.render()
|
215 |
-
|
216 |
-
# run_button_component.click(
|
217 |
-
# fn = user,
|
218 |
-
# inputs = user_inputs,
|
219 |
-
# outputs = [
|
220 |
-
# text_prompt_component,
|
221 |
-
# chatbot_component
|
222 |
-
# ],
|
223 |
-
# queue = False
|
224 |
-
# ).then(
|
225 |
-
# fn = bot,
|
226 |
-
# inputs = bot_inputs,
|
227 |
-
# outputs = [
|
228 |
-
# chatbot_component
|
229 |
-
# ]
|
230 |
-
# )
|
231 |
-
# text_prompt_component.submit(
|
232 |
-
# fn = user,
|
233 |
-
# inputs = user_inputs,
|
234 |
-
# outputs = [
|
235 |
-
# text_prompt_component,
|
236 |
-
# chatbot_component
|
237 |
-
# ],
|
238 |
-
# queue = False
|
239 |
-
# ).then(
|
240 |
-
# fn = bot,
|
241 |
-
# inputs = bot_inputs,
|
242 |
-
# outputs = [
|
243 |
-
# chatbot_component
|
244 |
-
# ]
|
245 |
-
# )
|
246 |
-
# # ============================== Stable - END ==============================
|
247 |
-
|
248 |
-
# with gr.Tab("Chat with Gemma 2"):
|
249 |
-
# gr.HTML(
|
250 |
-
# """
|
251 |
-
# <h1 align="center">Still in development</h1>
|
252 |
-
# """
|
253 |
-
# )
|
254 |
-
|
255 |
-
# # ============================== Nightly - START ==============================
|
256 |
-
# with gr.Tab("Nightly -- Chat with Gemini 1.5"):
|
257 |
-
# gr.HTML(
|
258 |
-
# """
|
259 |
-
# <h1 align="center">This section will test out the next version of the stable version.</h1>
|
260 |
-
# """
|
261 |
-
# )
|
262 |
-
# gr.ChatInterface(
|
263 |
-
# response,
|
264 |
-
# chatbot = gr.Chatbot(height=600),
|
265 |
-
# title = 'Chat with Gemini 1.5',
|
266 |
-
# retry_btn = "Retry",
|
267 |
-
# undo_btn = "Undo",
|
268 |
-
# clear_btn = "Clear",
|
269 |
-
# fill_height = True
|
270 |
-
# )
|
271 |
-
# # ============================== Nightly - END ==============================
|
272 |
-
|
273 |
-
# demo.queue().launch(debug = True, show_error = True)
|
274 |
-
|
275 |
"""
|
276 |
References:
|
277 |
- https://medium.com/@turna.fardousi/building-a-multimodal-chatbot-with-gemini-api-8015bfbee538
|
@@ -284,6 +10,7 @@ import google.generativeai as genai
|
|
284 |
import gradio as gr
|
285 |
from PIL import Image
|
286 |
from dotenv import load_dotenv
|
|
|
287 |
|
288 |
load_dotenv()
|
289 |
|
@@ -434,81 +161,81 @@ bot_inputs = [
|
|
434 |
]
|
435 |
# ============================== Stable - END ==============================
|
436 |
|
437 |
-
# ============================== Nightly - START ==============================
|
438 |
-
"""
|
439 |
-
References:
|
440 |
-
- https://medium.com/latinxinai/simple-chatbot-gradio-google-gemini-api-4ce02fbaf09f
|
441 |
-
"""
|
442 |
-
GEMINI_API_KEY_NIGHTLY = os.getenv("GEMINI_API_KEY_NIGHTLY")
|
443 |
-
model_nightly_name = "gemini-1.5-flash"
|
444 |
-
|
445 |
-
|
446 |
-
def transform_history(history):
|
447 |
-
new_history = []
|
448 |
-
for user_msg, model_msg in history:
|
449 |
-
new_history.append({"role": "user", "parts": [{"text": user_msg}]})
|
450 |
-
new_history.append({"role": "model", "parts": [{"text": model_msg}]})
|
451 |
-
return new_history
|
452 |
-
|
453 |
-
|
454 |
-
def upload_to_gemini(path, mime_type=None):
|
455 |
-
file = genai.upload_file(path, mime_type=mime_type)
|
456 |
-
print(f"Uploaded file '{file.display_name}' as: {file.uri}")
|
457 |
-
return file
|
458 |
-
|
459 |
-
|
460 |
-
def chatbot_nightly(message, history):
|
461 |
-
print(type(message))
|
462 |
-
print(message)
|
463 |
-
message_text = message["text"]
|
464 |
-
message_files = message["files"]
|
465 |
-
if message_files and isinstance(message_files, list):
|
466 |
-
image_uris = [upload_to_gemini(file_path) for file_path in message_files]
|
467 |
-
message_content = [{"text": message_text}] + image_uris
|
468 |
-
else:
|
469 |
-
message_content = {"text": message_text}
|
470 |
-
genai.configure(api_key=GEMINI_API_KEY_NIGHTLY)
|
471 |
-
model_nightly = genai.GenerativeModel(
|
472 |
-
model_nightly_name,
|
473 |
-
safety_settings=[
|
474 |
-
{
|
475 |
-
"category": "HARM_CATEGORY_HARASSMENT",
|
476 |
-
"threshold": "BLOCK_NONE"
|
477 |
-
},
|
478 |
-
{
|
479 |
-
"category": "HARM_CATEGORY_HATE_SPEECH",
|
480 |
-
"threshold": "BLOCK_NONE"
|
481 |
-
},
|
482 |
-
{
|
483 |
-
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
484 |
-
"threshold": "BLOCK_NONE"
|
485 |
-
},
|
486 |
-
{
|
487 |
-
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
488 |
-
"threshold": "BLOCK_NONE"
|
489 |
-
},
|
490 |
-
],
|
491 |
-
generation_config={
|
492 |
-
"temperature": 1,
|
493 |
-
"top_p": 0.95,
|
494 |
-
"top_k": 64,
|
495 |
-
"max_output_tokens": 8192,
|
496 |
-
"response_mime_type": "text/plain",
|
497 |
-
}
|
498 |
-
)
|
499 |
|
500 |
-
global chat
|
501 |
-
chat = model_nightly.start_chat(history=[])
|
502 |
-
chat.history = transform_history(history)
|
503 |
-
response_nightly = chat.send_message(message_content)
|
504 |
-
response_nightly.resolve()
|
505 |
|
506 |
-
|
507 |
-
|
508 |
-
|
|
|
|
|
|
|
509 |
|
510 |
|
511 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
512 |
|
513 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
514 |
# ============================== Stable - START ==============================
|
@@ -561,12 +288,9 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
561 |
|
562 |
# ============================== Nightly - START ==============================
|
563 |
with gr.Tab("Nightly -- Chat with Gemini 1.5"):
|
564 |
-
gr.HTML(
|
565 |
-
gr.
|
566 |
-
|
567 |
-
title='Chat with Gemini 1.5',
|
568 |
-
multimodal=True
|
569 |
-
)
|
570 |
# ============================== Nightly - END ==============================
|
571 |
|
572 |
-
demo.queue().launch(debug=True, show_error=True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""
|
2 |
References:
|
3 |
- https://medium.com/@turna.fardousi/building-a-multimodal-chatbot-with-gemini-api-8015bfbee538
|
|
|
10 |
import gradio as gr
|
11 |
from PIL import Image
|
12 |
from dotenv import load_dotenv
|
13 |
+
from Tabs.Gemini_Chabot_Nightly import TITLE_NIGHTLY, NOTICE_NIGHTLY, gemini_chatbot_stable
|
14 |
|
15 |
load_dotenv()
|
16 |
|
|
|
161 |
]
|
162 |
# ============================== Stable - END ==============================
|
163 |
|
164 |
+
# # ============================== Nightly - START ==============================
|
165 |
+
# """
|
166 |
+
# References:
|
167 |
+
# - https://medium.com/latinxinai/simple-chatbot-gradio-google-gemini-api-4ce02fbaf09f
|
168 |
+
# """
|
169 |
+
# GEMINI_API_KEY_NIGHTLY = os.getenv("GEMINI_API_KEY_NIGHTLY")
|
170 |
+
# model_nightly_name = "gemini-1.5-flash"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
|
|
|
|
|
|
|
|
|
|
|
172 |
|
173 |
+
# def transform_history(history):
|
174 |
+
# new_history = []
|
175 |
+
# for user_msg, model_msg in history:
|
176 |
+
# new_history.append({"role": "user", "parts": [{"text": user_msg}]})
|
177 |
+
# new_history.append({"role": "model", "parts": [{"text": model_msg}]})
|
178 |
+
# return new_history
|
179 |
|
180 |
|
181 |
+
# def upload_to_gemini(path, mime_type=None):
|
182 |
+
# file = genai.upload_file(path, mime_type=mime_type)
|
183 |
+
# print(f"Uploaded file '{file.display_name}' as: {file.uri}")
|
184 |
+
# return file
|
185 |
+
|
186 |
+
|
187 |
+
# def chatbot_nightly(message, history):
|
188 |
+
# print(type(message))
|
189 |
+
# print(message)
|
190 |
+
# message_text = message["text"]
|
191 |
+
# message_files = message["files"]
|
192 |
+
# if message_files and isinstance(message_files, list):
|
193 |
+
# image_uris = [upload_to_gemini(file_path) for file_path in message_files]
|
194 |
+
# message_content = [{"text": message_text}] + image_uris
|
195 |
+
# else:
|
196 |
+
# message_content = {"text": message_text}
|
197 |
+
# genai.configure(api_key=GEMINI_API_KEY_NIGHTLY)
|
198 |
+
# model_nightly = genai.GenerativeModel(
|
199 |
+
# model_nightly_name,
|
200 |
+
# safety_settings=[
|
201 |
+
# {
|
202 |
+
# "category": "HARM_CATEGORY_HARASSMENT",
|
203 |
+
# "threshold": "BLOCK_NONE"
|
204 |
+
# },
|
205 |
+
# {
|
206 |
+
# "category": "HARM_CATEGORY_HATE_SPEECH",
|
207 |
+
# "threshold": "BLOCK_NONE"
|
208 |
+
# },
|
209 |
+
# {
|
210 |
+
# "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
211 |
+
# "threshold": "BLOCK_NONE"
|
212 |
+
# },
|
213 |
+
# {
|
214 |
+
# "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
215 |
+
# "threshold": "BLOCK_NONE"
|
216 |
+
# },
|
217 |
+
# ],
|
218 |
+
# generation_config={
|
219 |
+
# "temperature": 1,
|
220 |
+
# "top_p": 0.95,
|
221 |
+
# "top_k": 64,
|
222 |
+
# "max_output_tokens": 8192,
|
223 |
+
# "response_mime_type": "text/plain",
|
224 |
+
# }
|
225 |
+
# )
|
226 |
+
|
227 |
+
# global chat
|
228 |
+
# chat = model_nightly.start_chat(history=[])
|
229 |
+
# chat.history = transform_history(history)
|
230 |
+
# response_nightly = chat.send_message(message_content)
|
231 |
+
# response_nightly.resolve()
|
232 |
+
|
233 |
+
# for i in range(len(response_nightly.text)):
|
234 |
+
# time.sleep(0.05)
|
235 |
+
# yield response_nightly.text[:i + 1]
|
236 |
+
|
237 |
+
|
238 |
+
# # ============================== Nightly - END ==============================
|
239 |
|
240 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
241 |
# ============================== Stable - START ==============================
|
|
|
288 |
|
289 |
# ============================== Nightly - START ==============================
|
290 |
with gr.Tab("Nightly -- Chat with Gemini 1.5"):
|
291 |
+
gr.HTML(TITLE_NIGHTLY)
|
292 |
+
gr.Markdown(NOTICE_NIGHTLY)
|
293 |
+
gemini_chatbot_stable.render()
|
|
|
|
|
|
|
294 |
# ============================== Nightly - END ==============================
|
295 |
|
296 |
+
demo.queue().launch(debug=True, show_error=True)
|