aiqcamp commited on
Commit
dfd5626
·
verified ·
1 Parent(s): e8d7a57

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -248
app.py DELETED
@@ -1,248 +0,0 @@
1
- import os
2
- import gradio as gr
3
- from gradio import ChatMessage
4
- from typing import Iterator
5
- import google.generativeai as genai
6
-
7
- # get Gemini API Key from the environ variable
8
- GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
9
- genai.configure(api_key=GEMINI_API_KEY)
10
-
11
- # we will be using the Gemini 2.0 Flash model with Thinking capabilities
12
- model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-1219") # Consider Gemini Pro Vision for Image input
13
-
14
- def format_chat_history(messages: list) -> list:
15
- """
16
- Formats the chat history into a structure Gemini can understand
17
- """
18
- formatted_history = []
19
- for message in messages:
20
- # Skip thinking messages (messages with metadata)
21
- if not (message.get("role") == "assistant" and "metadata" in message):
22
- formatted_history.append({
23
- "role": "user" if message.get("role") == "user" else "assistant",
24
- "parts": [message.get("content", "")]
25
- })
26
- return formatted_history
27
-
28
- def stream_gemini_response(message_input: str|gr.File, messages: list) -> Iterator[list]:
29
- """
30
- Streams thoughts and response with conversation history support, handling text or file input.
31
- """
32
- user_message = ""
33
- input_file = None
34
-
35
- if isinstance(message_input, str):
36
- user_message = message_input
37
- print(f"\n=== New Request (Text) ===")
38
- print(f"User message (raw): {repr(user_message)}") # Debug print raw value
39
- print(f"User message: {user_message}")
40
- if not user_message: # Check if text message is explicitly empty (empty string "" directly)
41
- messages.append(ChatMessage(role="assistant", content="Please provide a non-empty text message or upload a file. Empty text input is not allowed.")) # More specific message
42
- yield messages
43
- return
44
-
45
- elif isinstance(message_input, gr.File): #gr.File directly should be used with newer gradio versions (v4+)
46
- input_file = message_input.name # Access the temporary file path
47
- file_type = message_input.original_name.split('.')[-1].lower() #Get original filename's extension
48
- print(f"\n=== New Request (File) ===")
49
- print(f"File uploaded: {input_file}, type: {file_type}")
50
-
51
- try:
52
- with open(input_file, "rb") as f: #Open file in binary mode for universal handling
53
- file_data = f.read()
54
-
55
- if file_type in ['png', 'jpg', 'jpeg', 'gif']: #Example Image Types - expand as needed
56
- user_message = {"inline_data": {"mime_type": f"image/{file_type}", "data": file_data}} #Prepare image part for Gemini
57
- elif file_type == 'csv':
58
- user_message = {"inline_data": {"mime_type": "text/csv", "data": file_data}} #Prepare csv part
59
-
60
- except Exception as e:
61
- print(f"Error reading file: {e}")
62
- messages.append(ChatMessage(role="assistant", content=f"Error reading file: {e}"))
63
- yield messages
64
- return
65
- else:
66
- messages.append(ChatMessage(role="assistant", content="Sorry, I cannot understand this input format. Please use text or upload a valid file.")) # More informative error
67
- yield messages
68
- return
69
-
70
-
71
- try:
72
- # Format chat history for Gemini
73
- chat_history = format_chat_history(messages)
74
-
75
- # Initialize Gemini chat
76
- chat = model.start_chat(history=chat_history)
77
- response = chat.send_message(user_message, stream=True) #Send the message part as is
78
-
79
- # Initialize buffers and flags - same as before
80
- thought_buffer = ""
81
- response_buffer = ""
82
- thinking_complete = False
83
-
84
- # Add initial thinking message - same as before
85
- messages.append(
86
- ChatMessage(
87
- role="assistant",
88
- content="",
89
- metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
90
- )
91
- )
92
-
93
- for chunk in response: #streaming logic - same as before
94
- parts = chunk.candidates[0].content.parts
95
- current_chunk = parts[0].text
96
-
97
- if len(parts) == 2 and not thinking_complete:
98
- # Complete thought and start response
99
- thought_buffer += current_chunk
100
- print(f"\n=== Complete Thought ===\n{thought_buffer}")
101
-
102
- messages[-1] = ChatMessage(
103
- role="assistant",
104
- content=thought_buffer,
105
- metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
106
- )
107
- yield messages
108
-
109
- # Start response
110
- response_buffer = parts[1].text
111
- print(f"\n=== Starting Response ===\n{response_buffer}")
112
-
113
- messages.append(
114
- ChatMessage(
115
- role="assistant",
116
- content=response_buffer
117
- )
118
- )
119
- thinking_complete = True
120
-
121
- elif thinking_complete:
122
- # Stream response
123
- response_buffer += current_chunk
124
- print(f"\n=== Response Chunk ===\n{current_chunk}")
125
-
126
- messages[-1] = ChatMessage(
127
- role="assistant",
128
- content=response_buffer
129
- )
130
-
131
- else:
132
- # Stream thinking
133
- thought_buffer += current_chunk
134
- print(f"\n=== Thinking Chunk ===\n{thought_buffer}")
135
-
136
- messages[-1] = ChatMessage(
137
- role="assistant",
138
- content=thought_buffer,
139
- metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
140
- )
141
-
142
- yield messages
143
-
144
- print(f"\n=== Final Response ===\n{response_buffer}")
145
-
146
-
147
- except Exception as e:
148
- print(f"\n=== Error ===\n{str(e)}")
149
- messages.append(
150
- ChatMessage(
151
- role="assistant",
152
- content=f"I apologize, but I encountered an error: {str(e)}"
153
- )
154
- )
155
- yield messages
156
-
157
- def user_message(message_text, file_upload, history: list) -> tuple[str, None, list]:
158
- """Adds user message to chat history"""
159
- print(f"\n=== User Message Input Check ====") #debug
160
- print(f"Message Text: {repr(message_text)}") #debug raw text value
161
- print(f"File Upload: {file_upload}") #debug file upload object
162
- msg = message_text if message_text else file_upload
163
- history.append(ChatMessage(role="user", content=msg if isinstance(msg, str) else msg.name)) #Store message or filename in history.
164
- return "", None, history #clear both input fields
165
-
166
-
167
- # Create the Gradio interface
168
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo:
169
- gr.Markdown("# Gemini 2.0 Flash 'Thinking' Chatbot 💭")
170
-
171
- chatbot = gr.Chatbot(
172
- type="messages",
173
- label="Gemini2.0 'Thinking' Chatbot",
174
- render_markdown=True,
175
- scale=1,
176
- avatar_images=(None,"https://lh3.googleusercontent.com/oxz0sUBF0iYoN4VvhqWTmux-cxfD1rxuYkuFEfm1SFaseXEsjjE4Je_C_V3UQPuJ87sImQK3HfQ3RXiaRnQetjaZbjJJUkiPL5jFJ1WRl5FKJZYibUA=w214-h214-n-nu")
177
- )
178
-
179
- with gr.Row(equal_height=True):
180
- input_box = gr.Textbox(
181
- lines=1,
182
- label="Chat Message",
183
- placeholder="Type your message here...",
184
- scale=3
185
- )
186
- file_upload = gr.File(label="Upload File", file_types=["image", ".csv"], scale=2) # Allow image and CSV files
187
-
188
- clear_button = gr.Button("Clear Chat", scale=1)
189
-
190
- # Set up event handlers
191
- msg_store = gr.State("") # Store for preserving user message
192
-
193
-
194
- input_box.submit(
195
- user_message,
196
- inputs=[input_box, file_upload, chatbot],
197
- outputs=[input_box, file_upload, chatbot],
198
- queue=False
199
- ).then(
200
- stream_gemini_response,
201
- inputs=[input_box, chatbot], # Input either from text box or file, logic inside stream_gemini_response
202
- outputs=chatbot
203
- )
204
-
205
- file_upload.upload(
206
- user_message,
207
- inputs=[input_box, file_upload, chatbot], # even textbox is input here so clearing both will work
208
- outputs=[input_box, file_upload, chatbot],
209
- queue=False
210
- ).then(
211
- stream_gemini_response,
212
- inputs=[file_upload, chatbot], # Input is now the uploaded file.
213
- outputs=chatbot
214
- )
215
-
216
-
217
- clear_button.click(
218
- lambda: ([], "", ""),
219
- outputs=[chatbot, input_box, msg_store],
220
- queue=False
221
- )
222
-
223
- gr.Markdown( # Description moved to the bottom
224
- """
225
- <br><br><br> <!-- Add some vertical space -->
226
- ---
227
- ### About this Chatbot
228
- This chatbot demonstrates the experimental 'thinking' capability of the **Gemini 2.0 Flash** model.
229
- You can observe the model's thought process as it generates responses, displayed with the "⚙️ Thinking" prefix.
230
- **Key Features:**
231
- * Powered by Google's **Gemini 2.0 Flash** model.
232
- * Shows the model's **thoughts** before the final answer (experimental feature).
233
- * Supports **conversation history** for multi-turn chats.
234
- * Supports **Image and CSV file uploads** for analysis.
235
- * Uses **streaming** for a more interactive experience.
236
- **Instructions:**
237
- 1. Type your message in the input box or Upload a file below.
238
- 2. Press Enter/Submit or Upload to send.
239
- 3. Observe the chatbot's "Thinking" process followed by the final response.
240
- 4. Use the "Clear Chat" button to start a new conversation.
241
- *Please note*: The 'thinking' feature is experimental and the quality of thoughts may vary. File analysis capabilities may be limited depending on the model's experimental features.
242
- """
243
- )
244
-
245
-
246
- # Launch the interface
247
- if __name__ == "__main__":
248
- demo.launch(debug=True)