airworkx commited on
Commit
d5ecfa9
·
verified ·
1 Parent(s): 50a51de

Upload folder using huggingface_hub

Browse files
.github/workflows/deploy-to-huggingface.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Deploy to Hugging Face Space
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main # Trigger on pushes to the 'main' branch
7
+
8
+ jobs:
9
+ build-and-deploy:
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - name: Checkout code
13
+ uses: actions/checkout@v3
14
+
15
+ - name: Setup Python
16
+ uses: actions/setup-python@v4
17
+ with:
18
+ python-version: '3.x'
19
+
20
+ - name: Install dependencies
21
+ run: pip install -r requirements.txt
22
+
23
+ - name: Deploy to Hugging Face Space
24
+ uses: huggingface/space-deploy-action@v1
25
+ with:
26
+ repo_id: https://huggingface.co/spaces/airworkx/vertexchat # Full URL
27
+ branch: main
28
+ build_script: 'python app.py'
.gitignore ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .venv
2
+ .pytest_cache
3
+ .mypy_cache
4
+ .coverage
5
+ .coverage.*
6
+ .cache
7
+ .cache.*
8
+ .pytest_cache
9
+ .pytest_cache.*
10
+ .hypothesis
11
+ .hypothesis.*
12
+ .hypothesis_cache
13
+ .hypothesis_cache.*
14
+ .hypothesis_profile
15
+ .hypothesis_profile.*
16
+ .hypothesis_display
17
+ .hypothesis_display.*
18
+ .streamlit/secrets.toml
19
+ __pycache__
.idea/.gitignore ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
4
+ # Editor-based HTTP Client requests
5
+ /httpRequests/
6
+ # Datasource local storage ignored files
7
+ /dataSources/
8
+ /dataSources.local.xml
9
+ # Zeppelin ignored files
10
+ /ZeppelinRemoteNotebooks/
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/material_theme_project_new.xml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="MaterialThemeProjectNewConfig">
4
+ <option name="metadata">
5
+ <MTProjectMetadataState>
6
+ <option name="userId" value="-7b746248:18ffa6f84ed:-7ffd" />
7
+ </MTProjectMetadataState>
8
+ </option>
9
+ </component>
10
+ </project>
.idea/misc.xml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="Black">
4
+ <option name="sdkName" value="Python 3.12 (vertexchat)" />
5
+ </component>
6
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.12 (vertexchat) (2)" project-jdk-type="Python SDK" />
7
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/vertexchat.iml" filepath="$PROJECT_DIR$/.idea/vertexchat.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/vcs.xml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="" vcs="Git" />
5
+ <mapping directory="$PROJECT_DIR$/geminichat" vcs="Git" />
6
+ </component>
7
+ </project>
.idea/vertexchat.iml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="jdk" jdkName="Python 3.12 (vertexchat) (2)" jdkType="Python SDK" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ </module>
.idea/workspace.xml ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="AutoImportSettings">
4
+ <option name="autoReloadType" value="SELECTIVE" />
5
+ </component>
6
+ <component name="ChangeListManager">
7
+ <list default="true" id="8075f612-9024-458e-9030-3804cc940b48" name="Changes" comment="Removed .env file" />
8
+ <option name="SHOW_DIALOG" value="false" />
9
+ <option name="HIGHLIGHT_CONFLICTS" value="true" />
10
+ <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
11
+ <option name="LAST_RESOLUTION" value="IGNORE" />
12
+ </component>
13
+ <component name="FileTemplateManagerImpl">
14
+ <option name="RECENT_TEMPLATES">
15
+ <list>
16
+ <option value="Python Script" />
17
+ </list>
18
+ </option>
19
+ </component>
20
+ <component name="Git.Settings">
21
+ <option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
22
+ <option name="RESET_MODE" value="HARD" />
23
+ </component>
24
+ <component name="GitHubPullRequestSearchHistory"><![CDATA[{
25
+ "lastFilter": {
26
+ "state": "OPEN",
27
+ "assignee": "omphcompany"
28
+ }
29
+ }]]></component>
30
+ <component name="GithubPullRequestsUISettings"><![CDATA[{
31
+ "selectedUrlAndAccountId": {
32
+ "url": "https://github.com/omphcompany/vertexchat.git",
33
+ "accountId": "5b672839-567e-4290-a2d4-918066cd3c5c"
34
+ }
35
+ }]]></component>
36
+ <component name="ProjectColorInfo"><![CDATA[{
37
+ "associatedIndex": 3
38
+ }]]></component>
39
+ <component name="ProjectId" id="2hcVt4oWpHxV3A7vMpOEAm0Ls7w" />
40
+ <component name="ProjectLevelVcsManager">
41
+ <ConfirmationsSetting value="2" id="Add" />
42
+ </component>
43
+ <component name="ProjectViewState">
44
+ <option name="hideEmptyMiddlePackages" value="true" />
45
+ <option name="showLibraryContents" value="true" />
46
+ </component>
47
+ <component name="PropertiesComponent"><![CDATA[{
48
+ "keyToString": {
49
+ "ASKED_ADD_EXTERNAL_FILES": "true",
50
+ "RunOnceActivity.ShowReadmeOnStart": "true",
51
+ "com.google.cloudcode.ide_session_index": "20240608_0000",
52
+ "git-widget-placeholder": "master",
53
+ "last_opened_file_path": "/home/jackal/.virtualenvs/vertexchat",
54
+ "node.js.detected.package.eslint": "true",
55
+ "node.js.detected.package.tslint": "true",
56
+ "node.js.selected.package.eslint": "(autodetect)",
57
+ "node.js.selected.package.tslint": "(autodetect)",
58
+ "nodejs_package_manager_path": "npm",
59
+ "vue.rearranger.settings.migration": "true"
60
+ }
61
+ }]]></component>
62
+ <component name="RdControllerToolWindowsLayoutState" isNewUi="true">
63
+ <layout>
64
+ <window_info id="Bookmarks" show_stripe_button="false" side_tool="true" />
65
+ <window_info id="Merge Requests" show_stripe_button="false" />
66
+ <window_info id="Commit_Guest" show_stripe_button="false" />
67
+ <window_info id="Learn" show_stripe_button="false" />
68
+ <window_info active="true" content_ui="combo" id="Project" order="0" sideWeight="0.5008038" visible="true" weight="0.32994792" />
69
+ <window_info id="Commit" order="1" sideWeight="0.5008038" weight="0.32994792" />
70
+ <window_info id="Pull Requests" order="2" sideWeight="0.5008038" weight="0.32994792" />
71
+ <window_info id="Welcome to GitHub Copilot" order="2" show_stripe_button="false" />
72
+ <window_info id="Structure" order="2" sideWeight="0.49919614" side_tool="true" weight="0.32994792" />
73
+ <window_info anchor="bottom" id="Database Changes" show_stripe_button="false" />
74
+ <window_info anchor="bottom" id="hadoop-toolwindow" show_stripe_button="false" />
75
+ <window_info anchor="bottom" id="KafkaToolWindow" show_stripe_button="false" />
76
+ <window_info anchor="bottom" id="EmrToolWindow" show_stripe_button="false" />
77
+ <window_info anchor="bottom" id="FlinkToolWindow" show_stripe_button="false" />
78
+ <window_info anchor="bottom" id="DataprocToolWindow" show_stripe_button="false" />
79
+ <window_info anchor="bottom" id="TypeScript" show_stripe_button="false" />
80
+ <window_info anchor="bottom" id="TODO" show_stripe_button="false" />
81
+ <window_info anchor="bottom" id="zeppelin-shell-toolwindow" show_stripe_button="false" />
82
+ <window_info anchor="bottom" id="File Transfer" show_stripe_button="false" />
83
+ <window_info anchor="bottom" id="HiveToolWindow" show_stripe_button="false" />
84
+ <window_info anchor="bottom" id="GlueToolWindow" show_stripe_button="false" />
85
+ <window_info anchor="bottom" id="Version Control" order="0" weight="0.32994187" />
86
+ <window_info anchor="bottom" id="Problems" order="1" />
87
+ <window_info anchor="bottom" id="Problems View" order="2" />
88
+ <window_info active="true" anchor="bottom" id="Terminal" order="3" visible="true" weight="0.32994187" />
89
+ <window_info anchor="bottom" id="Services" order="4" />
90
+ <window_info anchor="bottom" id="Python Packages" order="5" weight="0.32994187" />
91
+ <window_info anchor="bottom" id="Python Console" order="6" weight="0.32994187" />
92
+ <window_info anchor="right" id="Developer Tools" show_stripe_button="false" side_tool="true" />
93
+ <window_info anchor="right" id="Cloud Run" show_stripe_button="false" />
94
+ <window_info anchor="right" id="Compute Engine" show_stripe_button="false" />
95
+ <window_info anchor="right" id="SciView" show_stripe_button="false" />
96
+ <window_info anchor="right" id="Endpoints" show_stripe_button="false" />
97
+ <window_info anchor="right" id="Gemini: Chat" show_stripe_button="false" />
98
+ <window_info anchor="right" id="Source Protect" show_stripe_button="false" />
99
+ <window_info anchor="right" id="Kubernetes Explorer" show_stripe_button="false" />
100
+ <window_info anchor="right" id="BigDataToolWindow" show_stripe_button="false" />
101
+ <window_info anchor="right" id="Coverage" show_stripe_button="false" side_tool="true" />
102
+ <window_info anchor="right" id="Secret Manager" show_stripe_button="false" />
103
+ <window_info anchor="right" id="Google Cloud Storage (GCS)" show_stripe_button="false" />
104
+ <window_info anchor="right" id="Google Cloud Databases" show_stripe_button="false" />
105
+ <window_info anchor="right" content_ui="combo" id="Notifications" order="0" weight="0.25" />
106
+ <window_info active="true" anchor="right" id="AIAssistant" order="1" visible="true" weight="0.32994792" />
107
+ <window_info anchor="right" id="Database" order="2" weight="0.25" />
108
+ <window_info anchor="right" id="Gradle" order="3" weight="0.25" />
109
+ <window_info anchor="right" id="Maven" order="4" weight="0.25" />
110
+ <window_info anchor="right" id="github.copilotToolWindow" order="4" show_stripe_button="false" />
111
+ <window_info anchor="right" id="GitHub Copilot Chat" order="4" show_stripe_button="false" />
112
+ <window_info anchor="right" id="Github Tools" order="4" weight="0.32994792" />
113
+ <window_info anchor="right" id="Plots" order="5" weight="0.1" />
114
+ </layout>
115
+ </component>
116
+ <component name="SharedIndexes">
117
+ <attachedChunks>
118
+ <set>
119
+ <option value="bundled-js-predefined-1d06a55b98c1-0b3e54e931b4-JavaScript-PY-241.17011.84" />
120
+ <option value="bundled-python-sdk-afd503948556-7e9c3bbb6e34-com.jetbrains.pycharm.pro.sharedIndexes.bundled-PY-241.17011.84" />
121
+ </set>
122
+ </attachedChunks>
123
+ </component>
124
+ <component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
125
+ <component name="TaskManager">
126
+ <task active="true" id="Default" summary="Default task">
127
+ <changelist id="8075f612-9024-458e-9030-3804cc940b48" name="Changes" comment="" />
128
+ <created>1717893564913</created>
129
+ <option name="number" value="Default" />
130
+ <option name="presentableId" value="Default" />
131
+ <updated>1717893564913</updated>
132
+ <workItem from="1717893566223" duration="9130000" />
133
+ </task>
134
+ <task id="LOCAL-00001" summary="updated changes to gitignore">
135
+ <option name="closed" value="true" />
136
+ <created>1717900304672</created>
137
+ <option name="number" value="00001" />
138
+ <option name="presentableId" value="LOCAL-00001" />
139
+ <option name="project" value="LOCAL" />
140
+ <updated>1717900304672</updated>
141
+ </task>
142
+ <task id="LOCAL-00002" summary="Removed .env file">
143
+ <option name="closed" value="true" />
144
+ <created>1717902285080</created>
145
+ <option name="number" value="00002" />
146
+ <option name="presentableId" value="LOCAL-00002" />
147
+ <option name="project" value="LOCAL" />
148
+ <updated>1717902285080</updated>
149
+ </task>
150
+ <option name="localTasksCounter" value="3" />
151
+ <servers />
152
+ </component>
153
+ <component name="TypeScriptGeneratedFilesManager">
154
+ <option name="version" value="3" />
155
+ </component>
156
+ <component name="Vcs.Log.Tabs.Properties">
157
+ <option name="TAB_STATES">
158
+ <map>
159
+ <entry key="MAIN">
160
+ <value>
161
+ <State>
162
+ <option name="FILTERS">
163
+ <map>
164
+ <entry key="branch">
165
+ <value>
166
+ <list>
167
+ <option value="master" />
168
+ </list>
169
+ </value>
170
+ </entry>
171
+ </map>
172
+ </option>
173
+ </State>
174
+ </value>
175
+ </entry>
176
+ </map>
177
+ </option>
178
+ </component>
179
+ <component name="VcsManagerConfiguration">
180
+ <option name="ADD_EXTERNAL_FILES_SILENTLY" value="true" />
181
+ <MESSAGE value="updated changes to gitignore" />
182
+ <MESSAGE value="Removed .env file" />
183
+ <option name="LAST_COMMIT_MESSAGE" value="Removed .env file" />
184
+ </component>
185
+ </project>
.streamlit/config.toml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [server]
2
+ maxUploadSize=10
1_Gemini_Pro.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import google.generativeai as genai
4
+ import streamlit as st
5
+ import time
6
+ import random
7
+ from utils import SAFETY_SETTINGS
8
+
9
+
10
+ st.set_page_config(
11
+ page_title="Gemini-Pro Chat",
12
+ page_icon="🔥",
13
+ menu_items={
14
+ 'About': "# Forked from https://github.com/hiliuxg/geminichat"
15
+ }
16
+ )
17
+
18
+ st.title("Gemini-Pro Chat")
19
+ st.caption("Chatbot, powered by Google Gemini Pro.")
20
+
21
+
22
+ if "app_key" not in st.session_state:
23
+ app_key = st.text_input("Your Gemini App Key", type='password')
24
+ if app_key:
25
+ st.session_state.app_key = app_key
26
+
27
+ if "history" not in st.session_state:
28
+ st.session_state.history = []
29
+
30
+ try:
31
+ genai.configure(api_key = st.session_state.app_key)
32
+ except AttributeError as e:
33
+ st.warning("Please Add Your Gemini App Key.")
34
+
35
+ model = genai.GenerativeModel('gemini-pro')
36
+ chat = model.start_chat(history = st.session_state.history)
37
+
38
+ with st.sidebar:
39
+ if st.button("Clear Chat Window", use_container_width = True, type="primary"):
40
+ st.session_state.history = []
41
+ st.rerun()
42
+
43
+ for message in chat.history:
44
+ role = "assistant" if message.role == "model" else message.role
45
+ with st.chat_message(role):
46
+ st.markdown(message.parts[0].text)
47
+
48
+ if "app_key" in st.session_state:
49
+ if prompt := st.chat_input(""):
50
+ prompt = prompt.replace('\n', ' \n')
51
+ with st.chat_message("user"):
52
+ st.markdown(prompt)
53
+
54
+ with st.chat_message("assistant"):
55
+ message_placeholder = st.empty()
56
+ message_placeholder.markdown("Thinking...")
57
+ try:
58
+ full_response = ""
59
+ for chunk in chat.send_message(prompt, stream=True, safety_settings = SAFETY_SETTINGS):
60
+ word_count = 0
61
+ random_int = random.randint(5, 10)
62
+ for word in chunk.text:
63
+ full_response += word
64
+ word_count += 1
65
+ if word_count == random_int:
66
+ time.sleep(0.05)
67
+ message_placeholder.markdown(full_response + "_")
68
+ word_count = 0
69
+ random_int = random.randint(5, 10)
70
+ message_placeholder.markdown(full_response)
71
+ except genai.types.generation_types.BlockedPromptException as e:
72
+ st.exception(e)
73
+ except Exception as e:
74
+ st.exception(e)
75
+ st.session_state.history = chat.history
1_Gemini_Pro_old.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import google.generativeai as genai
4
+ import streamlit as st
5
+ import time
6
+ import random
7
+ from utils import SAFETY_SETTTINGS
8
+
9
+
10
+ st.set_page_config(
11
+ page_title="Gemini-Pro Chat",
12
+ page_icon="🔥",
13
+ menu_items={
14
+ 'About': "# Forked from https://github.com/hiliuxg/geminichat"
15
+ }
16
+ )
17
+
18
+ st.title("Gemini-Pro Chat")
19
+ st.caption("Chatbot, powered by Google Gemini Pro.")
20
+
21
+
22
+ if "app_key" not in st.session_state:
23
+ app_key = st.text_input("Your Gemini App Key", type='password')
24
+ if app_key:
25
+ st.session_state.app_key = app_key
26
+
27
+ if "history" not in st.session_state:
28
+ st.session_state.history = []
29
+
30
+ try:
31
+ genai.configure(api_key = st.session_state.app_key)
32
+ except AttributeError as e:
33
+ st.warning("Please Add Your Gemini App Key.")
34
+
35
+ model = genai.GenerativeModel('gemini-pro')
36
+ chat = model.start_chat(history = st.session_state.history)
37
+
38
+ with st.sidebar:
39
+ if st.button("Clear Chat Window", use_container_width = True, type="primary"):
40
+ st.session_state.history = []
41
+ st.rerun()
42
+
43
+ for message in chat.history:
44
+ role = "assistant" if message.role == "model" else message.role
45
+ with st.chat_message(role):
46
+ st.markdown(message.parts[0].text)
47
+
48
+ if "app_key" in st.session_state:
49
+ if prompt := st.chat_input(""):
50
+ prompt = prompt.replace('\n', ' \n')
51
+ with st.chat_message("user"):
52
+ st.markdown(prompt)
53
+
54
+ with st.chat_message("assistant"):
55
+ message_placeholder = st.empty()
56
+ message_placeholder.markdown("Thinking...")
57
+ try:
58
+ full_response = ""
59
+ for chunk in chat.send_message(prompt, stream=True, safety_settings = SAFETY_SETTTINGS):
60
+ word_count = 0
61
+ random_int = random.randint(5, 10)
62
+ for word in chunk.text:
63
+ full_response += word
64
+ word_count += 1
65
+ if word_count == random_int:
66
+ time.sleep(0.05)
67
+ message_placeholder.markdown(full_response + "_")
68
+ word_count = 0
69
+ random_int = random.randint(5, 10)
70
+ message_placeholder.markdown(full_response)
71
+ except genai.types.generation_types.BlockedPromptException as e:
72
+ st.exception(e)
73
+ except Exception as e:
74
+ st.exception(e)
75
+ st.session_state.history = chat.history
README.md ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ## Gemini ChatBot
3
+ Chatbot powered by Google Gemini-Pro, Gemini-Pro-Vision, and UI.
4
+
5
+ https://geminix.streamlit.app/
6
+
7
+ ## Running Locally
8
+ ```python
9
+ pip install -r requirements.txt
10
+ streamlit run app.py
11
+ ```
12
+ Get your API KEY from https://makersuite.google.com
13
+
14
+
15
+ # Credits:
16
+
17
+ The repository was forked from: https://github.com/hiliuxg/geminichat
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import google.generativeai as genai
2
+ import streamlit as st
3
+ import time
4
+ import random
5
+ from utils import SAFETY_SETTINGS, MODIFIED_SAFETY_SETTINGS
6
+
7
+ huggingface_token = os.getenv("HF_TOKEN")
8
+
9
+ st.set_page_config(
10
+ page_title="Vertex Chat AI",
11
+ page_icon="🔥",
12
+ menu_items={
13
+ 'About': "# Forked from https://github.com/omphcompany/geminichat"
14
+ }
15
+ )
16
+
17
+ st.title("Vertex Chat AI")
18
+ st.caption("Chatbot, powered by Google Gemini Pro.")
19
+
20
+
21
+ if "app_key" not in st.session_state:
22
+ app_key = st.text_input("Your Gemini App Key", type='password')
23
+ if app_key:
24
+ st.session_state.app_key = app_key
25
+
26
+ if "history" not in st.session_state:
27
+ st.session_state.history = []
28
+
29
+ try:
30
+ genai.configure(api_key = st.session_state.app_key)
31
+ except AttributeError as e:
32
+ st.warning("Please Add Your Gemini App Key.")
33
+
34
+ model = genai.GenerativeModel('gemini-1.5-flash-001')
35
+ chat = model.start_chat(history = st.session_state.history)
36
+
37
+ with st.sidebar:
38
+ if st.button("Clear Chat Window", use_container_width = True, type="primary"):
39
+ st.session_state.history = []
40
+ st.rerun()
41
+
42
+ for message in chat.history:
43
+ role = "assistant" if message.role == "model" else message.role
44
+ with st.chat_message(role):
45
+ st.markdown(message.parts[0].text)
46
+
47
+ if "app_key" in st.session_state:
48
+ if prompt := st.chat_input(""):
49
+ prompt = prompt.replace('\n', ' \n')
50
+ with st.chat_message("user"):
51
+ st.markdown(prompt)
52
+
53
+ with st.chat_message("assistant"):
54
+ message_placeholder = st.empty()
55
+ message_placeholder.markdown("Thinking...")
56
+ try:
57
+ full_response = ""
58
+ for chunk in chat.send_message(prompt, stream=True, safety_settings = SAFETY_SETTINGS):
59
+ word_count = 0
60
+ random_int = random.randint(5, 10)
61
+ for word in chunk.text:
62
+ full_response += word
63
+ word_count += 1
64
+ if word_count == random_int:
65
+ time.sleep(0.05)
66
+ message_placeholder.markdown(full_response + "_")
67
+ word_count = 0
68
+ random_int = random.randint(5, 10)
69
+ message_placeholder.markdown(full_response)
70
+ except genai.types.generation_types.BlockedPromptException as e:
71
+ st.exception(e)
72
+ except Exception as e:
73
+ st.exception(e)
74
+ st.session_state.history = chat.history
apphf.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import google.generativeai as genai
2
+ import streamlit as st
3
+ import time
4
+ import random
5
+ import huggingface_hub
6
+ from utils import SAFETY_SETTINGS
7
+
8
+ st.set_page_config(
9
+ page_title="Vertex Chat AI",
10
+ page_icon="🔥",
11
+ menu_items={
12
+ 'About': "# Forked from https://github.com/omphcompany/geminichat"
13
+ }
14
+ )
15
+
16
+ st.title("Vertex Chat AI")
17
+ st.caption("Chatbot, powered by Google Gemini Pro.")
18
+
19
+ # Hugging Face Hub Integration
20
+ repo_id = "airworkx/vertexchat" # Replace with your repo ID
21
+ huggingface_token = st.text_input("Hugging Face Token", type='password')
22
+
23
+ if "history" not in st.session_state:
24
+ st.session_state.history = []
25
+
26
+ if "app_key" not in st.session_state:
27
+ app_key = st.text_input("Your Gemini App Key", type='password')
28
+ if app_key:
29
+ st.session_state.app_key = app_key
30
+
31
+ try:
32
+ genai.configure(api_key=st.session_state.app_key)
33
+ except AttributeError as e:
34
+ st.warning("Please Add Your Gemini App Key.")
35
+
36
+ model = genai.GenerativeModel('gemini-1.5-flash-001')
37
+ chat = model.start_chat(history=st.session_state.history)
38
+
39
+ with st.sidebar:
40
+ if st.button("Clear Chat Window", use_container_width=True, type="primary"):
41
+ st.session_state.history = []
42
+ st.rerun()
43
+
44
+ for message in chat.history:
45
+ role = "assistant" if message.role == "model" else message.role
46
+ with st.chat_message(role):
47
+ st.markdown(message.parts[0].text)
48
+
49
+ if "app_key" in st.session_state and huggingface_token:
50
+ if prompt := st.chat_input(""):
51
+ prompt = prompt.replace('\n', ' \n')
52
+ with st.chat_message("user"):
53
+ st.markdown(prompt)
54
+
55
+ with st.chat_message("assistant"):
56
+ message_placeholder = st.empty()
57
+ message_placeholder.markdown("Thinking...")
58
+ try:
59
+ full_response = ""
60
+ for chunk in chat.send_message(prompt, stream=True, safety_settings = SAFETY_SETTINGS):
61
+ word_count = 0
62
+ random_int = random.randint(5, 10)
63
+ for word in chunk.text:
64
+ full_response += word
65
+ word_count += 1
66
+ if word_count == random_int:
67
+ time.sleep(0.05)
68
+ message_placeholder.markdown(full_response + "_")
69
+ word_count = 0
70
+ random_int = random.randint(5, 10)
71
+ message_placeholder.markdown(full_response)
72
+
73
+ # Save to Hugging Face Hub
74
+ huggingface_hub.login(token=huggingface_token)
75
+ huggingface_hub.create_repo(repo_id=repo_id, exist_ok=True)
76
+ with huggingface_hub.create_commit(repo_id=repo_id, revision="main", message="Added new conversation", commit_message="Added new conversation"):
77
+ with open("conversation.txt", "a") as f:
78
+ f.write(f"\n**User:** {prompt}\n**Assistant:** {full_response}")
79
+
80
+ except genai.types.generation_types.BlockedPromptException as e:
81
+ st.exception(e)
82
+ except Exception as e:
83
+ st.exception(e)
84
+ st.session_state.history = chat.history
apphf2.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import google.generativeai as genai
2
+ import streamlit as st
3
+ import time
4
+ import random
5
+ import huggingface_hub
6
+ from utils import SAFETY_SETTINGS
7
+ import json
8
+
9
+ st.set_page_config(
10
+ page_title="Vertex Chat AI",
11
+ page_icon="🔥",
12
+ menu_items={
13
+ 'About': "# Forked from https://github.com/omphcompany/geminichat"
14
+ }
15
+ )
16
+
17
+ st.title("Vertex Chat AI")
18
+ st.caption("Chatbot, powered by Google Gemini Pro.")
19
+
20
+ # Hugging Face Hub Integration
21
+ repo_id = "airworkx/vertexchat" # Your space ID
22
+ huggingface_token = st.text_input("Hugging Face Token", type='password')
23
+
24
+ if "history" not in st.session_state:
25
+ st.session_state.history = []
26
+
27
+ if "app_key" not in st.session_state:
28
+ app_key = st.text_input("Your Gemini App Key", type='password')
29
+ if app_key:
30
+ st.session_state.app_key = app_key
31
+
32
+ try:
33
+ genai.configure(api_key=st.session_state.app_key)
34
+ except AttributeError as e:
35
+ st.warning("Please Add Your Gemini App Key.")
36
+
37
+ model = genai.GenerativeModel('gemini-1.5-flash-001')
38
+ chat = model.start_chat(history=st.session_state.history)
39
+
40
+ with st.sidebar:
41
+ if st.button("Clear Chat Window", use_container_width=True, type="primary"):
42
+ st.session_state.history = []
43
+ st.rerun()
44
+
45
+ # Load conversation history
46
+ conversation_history = []
47
+ try:
48
+ huggingface_hub.login(token=huggingface_token)
49
+ repo = huggingface_hub.Repo(repo_id=repo_id, revision="main")
50
+ with repo.file_obj("conversation_metadata.json", "r") as f:
51
+ conversation_history = json.load(f)
52
+ except FileNotFoundError:
53
+ pass # Initialize empty if file doesn't exist
54
+ except ValueError:
55
+ st.warning("Invalid Hugging Face Token. Please check your token.")
56
+
57
+ # Display the conversation history
58
+ for entry in conversation_history:
59
+ if entry["role"] == "user":
60
+ with st.chat_message("user"):
61
+ st.markdown("[User Message]")
62
+ elif entry["role"] == "assistant":
63
+ with st.chat_message("assistant"):
64
+ st.markdown("[Assistant Message]")
65
+
66
+
67
+ if "app_key" in st.session_state and huggingface_token:
68
+ if prompt := st.chat_input(""):
69
+ prompt = prompt.replace('\n', ' \n')
70
+ with st.chat_message("user"):
71
+ st.markdown(prompt)
72
+
73
+ with st.chat_message("assistant"):
74
+ message_placeholder = st.empty()
75
+ message_placeholder.markdown("Thinking...")
76
+ try:
77
+ full_response = ""
78
+ for chunk in chat.send_message(prompt, stream=True, safety_settings=SAFETY_SETTINGS):
79
+ word_count = 0
80
+ random_int = random.randint(5, 10)
81
+ for word in chunk.text:
82
+ full_response += word
83
+ word_count += 1
84
+ if word_count == random_int:
85
+ time.sleep(0.05)
86
+ message_placeholder.markdown(full_response + "_")
87
+ word_count = 0
88
+ random_int = random.randint(5, 10)
89
+ message_placeholder.markdown(full_response)
90
+
91
+ # Update and save conversation history
92
+ conversation_history.append({"role": "user"})
93
+ conversation_history.append({"role": "assistant"})
94
+
95
+ # Save to Hugging Face Hub
96
+ try:
97
+ huggingface_hub.login(token=huggingface_token)
98
+ with open("conversation_metadata.json", "w") as f:
99
+ json.dump(conversation_history, f)
100
+ huggingface_hub.upload_file(
101
+ path_or_fileobj="conversation_metadata.json",
102
+ repo_id=repo_id,
103
+ repo_type="space", # Indicate it's a space
104
+ revision="main",
105
+ path_in_repo="conversation_metadata.json"
106
+ )
107
+ except ValueError:
108
+ st.warning("Invalid Hugging Face Token. Please check your token.")
109
+
110
+ except genai.types.generation_types.BlockedPromptException as e:
111
+ st.exception(e)
112
+ except Exception as e:
113
+ st.exception(e)
conversation_metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ []
pages/2_Gemino_Pro_Vision.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import google.generativeai as genai
3
+ import streamlit as st
4
+ import time
5
+ import random
6
+ from utils import SAFETY_SETTTINGS
7
+
8
+ st.set_page_config(
9
+ page_title="Chat with Gemini-Pro Vision",
10
+ page_icon="🔥",
11
+ menu_items={
12
+ 'About': "Forked from https://github.com/hiliuxg/geminichat"
13
+ }
14
+ )
15
+
16
+ st.title('Upload Image')
17
+
18
+ if "app_key" not in st.session_state:
19
+ app_key = st.text_input("Your Gemini App Key", type='password')
20
+ if app_key:
21
+ st.session_state.app_key = app_key
22
+
23
+ try:
24
+ genai.configure(api_key = st.session_state.app_key)
25
+ model = genai.GenerativeModel('gemini-pro-vision')
26
+ except AttributeError as e:
27
+ st.warning("Please Add Your Gemini App Key.")
28
+
29
+
30
+ def show_message(prompt, image, loading_str):
31
+ with st.chat_message("assistant"):
32
+ message_placeholder = st.empty()
33
+ message_placeholder.markdown(loading_str)
34
+ full_response = ""
35
+ try:
36
+ for chunk in model.generate_content([prompt, image], stream = True, safety_settings = SAFETY_SETTTINGS):
37
+ word_count = 0
38
+ random_int = random.randint(5, 10)
39
+ for word in chunk.text:
40
+ full_response += word
41
+ word_count += 1
42
+ if word_count == random_int:
43
+ time.sleep(0.05)
44
+ message_placeholder.markdown(full_response + "_")
45
+ word_count = 0
46
+ random_int = random.randint(5, 10)
47
+ except genai.types.generation_types.BlockedPromptException as e:
48
+ st.exception(e)
49
+ except Exception as e:
50
+ st.exception(e)
51
+ message_placeholder.markdown(full_response)
52
+ st.session_state.history_pic.append({"role": "assistant", "text": full_response})
53
+
54
+ def clear_state():
55
+ st.session_state.history_pic = []
56
+
57
+
58
+ if "history_pic" not in st.session_state:
59
+ st.session_state.history_pic = []
60
+
61
+
62
+ image = None
63
+ if "app_key" in st.session_state:
64
+ uploaded_file = st.file_uploader("choose a pic...", type=["jpg", "png", "jpeg", "gif"], label_visibility='collapsed', on_change = clear_state)
65
+ if uploaded_file is not None:
66
+ image = Image.open(uploaded_file)
67
+ width, height = image.size
68
+ resized_img = image.resize((128, int(height/(width/128))), Image.LANCZOS)
69
+ st.image(image)
70
+
71
+ if len(st.session_state.history_pic) > 0:
72
+ for item in st.session_state.history_pic:
73
+ with st.chat_message(item["role"]):
74
+ st.markdown(item["text"])
75
+
76
+ if "app_key" in st.session_state:
77
+ if prompt := st.chat_input("desc this picture"):
78
+ if image is None:
79
+ st.warning("Please upload an image first", icon="⚠️")
80
+ else:
81
+ prompt = prompt.replace('\n', ' \n')
82
+ with st.chat_message("user"):
83
+ st.markdown(prompt)
84
+ st.session_state.history_pic.append({"role": "user", "text": prompt})
85
+
86
+ show_message(prompt, resized_img, "Thinking...")
pages/create_python_venv.sh ADDED
@@ -0,0 +1 @@
 
 
1
+ python3 -m venv env
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ streamlit==1.29.0
2
+ google-generativeai==0.3.1
3
+ huggingface-hub
4
+ transformers
utils.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Original code
2
+ SAFETY_SETTINGS = [
3
+ {
4
+ "category": "HARM_CATEGORY_SEXUAL",
5
+ "threshold": "BLOCK_NONE",
6
+ },
7
+ {
8
+ "category": "HARM_CATEGORY_DANGEROUS",
9
+ "threshold": "BLOCK_NONE",
10
+ },
11
+ {
12
+ "category": "HARM_CATEGORY_HARASSMENT",
13
+ "threshold": "BLOCK_NONE",
14
+ },
15
+ {
16
+ "category": "HARM_CATEGORY_HATE_SPEECH",
17
+ "threshold": "BLOCK_NONE",
18
+ },
19
+ {
20
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
21
+ "threshold": "BLOCK_NONE",
22
+ },
23
+ {
24
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
25
+ "threshold": "BLOCK_NONE",
26
+ },
27
+ ]
28
+
29
+ # Modified code to allow uninterrupted chat session
30
+ MODIFIED_SAFETY_SETTTINGS = SAFETY_SETTINGS.copy()
31
+
32
+ for setting in MODIFIED_SAFETY_SETTTINGS:
33
+ setting["threshold"] = "BLOCK_NONE" # Choose the appropriate threshold
utils_old.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ """https://ai.google.dev/api/rest/v1beta/HarmCategory"""
4
+
5
+ SAFETY_SETTTINGS = [
6
+ {
7
+ "category": "HARM_CATEGORY_SEXUAL",
8
+ "threshold": "BLOCK_ONLY_HIGH",
9
+ },
10
+ {
11
+ "category": "HARM_CATEGORY_DANGEROUS",
12
+ "threshold": "BLOCK_ONLY_HIGH",
13
+ },
14
+ {
15
+ "category": "HARM_CATEGORY_HARASSMENT",
16
+ "threshold": "BLOCK_ONLY_HIGH",
17
+ },
18
+ {
19
+ "category": "HARM_CATEGORY_HATE_SPEECH",
20
+ "threshold": "BLOCK_ONLY_HIGH",
21
+ },
22
+ {
23
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
24
+ "threshold": "BLOCK_ONLY_HIGH",
25
+ },
26
+ {
27
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
28
+ "threshold": "BLOCK_ONLY_HIGH",
29
+ },
30
+ ]