Upload 8 files
Browse files- utils/__pycache__/st_def.cpython-311.pyc +0 -0
- utils/doc_processing.py +206 -0
- utils/openaipdf.py +97 -0
- utils/st_def.py +80 -0
- utils/st_def_2.py +34 -0
- utils/ut_openai.py +28 -0
- utils/ut_openai2.py +28 -0
- utils/utilities.py +36 -0
utils/__pycache__/st_def.cpython-311.pyc
ADDED
Binary file (4.07 kB). View file
|
|
utils/doc_processing.py
ADDED
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os, shutil, json, random, inspect
|
2 |
+
import tkinter as tk
|
3 |
+
import openai
|
4 |
+
from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader
|
5 |
+
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
|
6 |
+
|
7 |
+
class DocProcessing:
|
8 |
+
@classmethod
|
9 |
+
def load_directory(self, file_path):
|
10 |
+
loader = DirectoryLoader(file_path)
|
11 |
+
documents = loader.load()
|
12 |
+
return documents
|
13 |
+
|
14 |
+
# split the docs into chunks using recursive character splitter
|
15 |
+
@classmethod
|
16 |
+
def split_docs(self, documents,chunk_size=1000,chunk_overlap=20):
|
17 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
|
18 |
+
docs = text_splitter.split_documents(documents)
|
19 |
+
return docs
|
20 |
+
|
21 |
+
@classmethod
|
22 |
+
def embeddings_chroma():
|
23 |
+
pass
|
24 |
+
|
25 |
+
@classmethod
|
26 |
+
def get_random_line(self):
|
27 |
+
with open(self.file_path, "r", encoding="utf-8") as file:
|
28 |
+
lines = file.readlines()
|
29 |
+
return random.choice(lines).strip()
|
30 |
+
|
31 |
+
|
32 |
+
class ObjPrinter:
|
33 |
+
@staticmethod
|
34 |
+
def print_obj(obj):
|
35 |
+
# Print the object, its type, and the class hierarchy
|
36 |
+
# ObjPrinter.print_obj(ologcan)
|
37 |
+
|
38 |
+
print("Object:", obj)
|
39 |
+
print("Type:", type(obj))
|
40 |
+
print("Class Hierarchy:")
|
41 |
+
for cls in type(obj).__mro__:
|
42 |
+
print(f" - {cls.__name__} ({cls.__module__})")
|
43 |
+
|
44 |
+
print("\nInstance Variables:")
|
45 |
+
for key, value in vars(obj).items():
|
46 |
+
print(f" - {key}: {value}")
|
47 |
+
|
48 |
+
class ShowJson:
|
49 |
+
@staticmethod
|
50 |
+
def show_json(obj):
|
51 |
+
print(json.loads(obj.model_dump_json()))
|
52 |
+
|
53 |
+
class KeyGood:
|
54 |
+
@staticmethod
|
55 |
+
|
56 |
+
def check_key(apikey="None"):
|
57 |
+
print(apikey)
|
58 |
+
windows_key = os.environ.get("OPENAI_API_KEY")
|
59 |
+
if windows_key is not None:
|
60 |
+
try:
|
61 |
+
client=OpenAI()
|
62 |
+
client.models.list()
|
63 |
+
return client
|
64 |
+
except Exception as e:
|
65 |
+
client = OpenAI(api_key = apikey)
|
66 |
+
try:
|
67 |
+
client.models.list()
|
68 |
+
print('good')
|
69 |
+
return client
|
70 |
+
except Exception as e:
|
71 |
+
return None
|
72 |
+
else:
|
73 |
+
client = OpenAI(api_key = apikey)
|
74 |
+
try:
|
75 |
+
client.models.list()
|
76 |
+
print('good')
|
77 |
+
return client
|
78 |
+
except Exception as e:
|
79 |
+
return None
|
80 |
+
|
81 |
+
class CheckKeyInComputer:
|
82 |
+
@staticmethod
|
83 |
+
def check_key_in_computer():
|
84 |
+
windows_key = os.environ.get("OPENAI_API_KEY")
|
85 |
+
if windows_key is None:
|
86 |
+
return False
|
87 |
+
print(f'key in env is good: {client.api_key[:9]}')
|
88 |
+
return True
|
89 |
+
|
90 |
+
|
91 |
+
|
92 |
+
class CopyResume:
|
93 |
+
@staticmethod
|
94 |
+
def copy_and_rename_file():
|
95 |
+
data_path = Path_.DATA_PATH
|
96 |
+
myfile_ = data_path / "MyFile.pdf"
|
97 |
+
|
98 |
+
if myfile_.exists():
|
99 |
+
print(f"{myfile_} existed.")
|
100 |
+
return "existed"
|
101 |
+
else:
|
102 |
+
# Ask the user to locate their original file
|
103 |
+
original_file_path = filedialog.askopenfilename(title="Locate and click your resume: ")
|
104 |
+
|
105 |
+
if original_file_path:
|
106 |
+
try:
|
107 |
+
# Create the destination folder if it doesn't exist
|
108 |
+
if not data_path:
|
109 |
+
os.makedirs(data_path)
|
110 |
+
|
111 |
+
# Copy the original file to the destination folder with the new filename
|
112 |
+
shutil.copy2(original_file_path, myfile_)
|
113 |
+
print(f"File copied to {myfile_}")
|
114 |
+
return "succeed"
|
115 |
+
except Exception as e:
|
116 |
+
print(f"Error copying file: {e}")
|
117 |
+
return "failed"
|
118 |
+
|
119 |
+
class HyperlinkManager:
|
120 |
+
|
121 |
+
def __init__(self, text):
|
122 |
+
|
123 |
+
self.text = text
|
124 |
+
|
125 |
+
self.text.tag_config("hyper", foreground="#5E95FF", underline=1)
|
126 |
+
|
127 |
+
self.text.tag_bind("hyper", "<Enter>", self._enter)
|
128 |
+
self.text.tag_bind("hyper", "<Leave>", self._leave)
|
129 |
+
self.text.tag_bind("hyper", "<Button-1>", self._click)
|
130 |
+
|
131 |
+
self.reset()
|
132 |
+
|
133 |
+
def reset(self):
|
134 |
+
self.links = {}
|
135 |
+
|
136 |
+
def add(self, action):
|
137 |
+
# add an action to the manager. returns tags to use in
|
138 |
+
# associated text widget
|
139 |
+
tag = "hyper-%d" % len(self.links)
|
140 |
+
self.links[tag] = action
|
141 |
+
return "hyper", tag
|
142 |
+
|
143 |
+
def _enter(self, event):
|
144 |
+
self.text.config(cursor="hand2")
|
145 |
+
|
146 |
+
def _leave(self, event):
|
147 |
+
self.text.config(cursor="")
|
148 |
+
|
149 |
+
def _click(self, event):
|
150 |
+
for tag in self.text.tag_names(CURRENT):
|
151 |
+
if tag[:6] == "hyper-":
|
152 |
+
self.links[tag]()
|
153 |
+
return
|
154 |
+
|
155 |
+
|
156 |
+
class ExceptionHandler:
|
157 |
+
@staticmethod
|
158 |
+
def handle_exception(exception, additional_info=None):
|
159 |
+
frame = inspect.currentframe()
|
160 |
+
module_name = inspect.getmodule(frame).__name__
|
161 |
+
function_name = frame.f_code.co_name
|
162 |
+
line_number = frame.f_lineno
|
163 |
+
globals_dict = frame.f_globals
|
164 |
+
|
165 |
+
# Handle the exception
|
166 |
+
print(f"Exception occurred:")
|
167 |
+
if additional_info:
|
168 |
+
print(f" - Additional Info: {additional_info}")
|
169 |
+
print(f" - Module: {module_name}")
|
170 |
+
print(f" - Function: {function_name}")
|
171 |
+
print(f" - Line: {line_number}")
|
172 |
+
# print(f" - Globals: {globals_dict}")
|
173 |
+
print(f" - Exception Type: {type(exception).__name__}")
|
174 |
+
print(f" - Exception Details: {exception}")
|
175 |
+
|
176 |
+
class ChangeText:
|
177 |
+
@staticmethod
|
178 |
+
def bold_text(text_widget, keyword):
|
179 |
+
text_widget.tag_configure("bold", font=("Montserrat", 14 * -1, "bold"))
|
180 |
+
|
181 |
+
start_index = "1.0"
|
182 |
+
while True:
|
183 |
+
start_index = text_widget.search(keyword, start_index, stopindex=tk.END)
|
184 |
+
if not start_index: break
|
185 |
+
|
186 |
+
# Calculate the end index of the keyword
|
187 |
+
end_index = f"{start_index}+{len(keyword)}c"
|
188 |
+
|
189 |
+
# Apply the "bold" tag to the keyword
|
190 |
+
text_widget.tag_add("bold", start_index, end_index)
|
191 |
+
|
192 |
+
# Move the start_index to the next occurrence
|
193 |
+
start_index = end_index
|
194 |
+
|
195 |
+
|
196 |
+
@staticmethod
|
197 |
+
def color_line(text_widget, line_number, color):
|
198 |
+
# Create a tag configuration for colored text
|
199 |
+
text_widget.tag_configure(color, foreground=color)
|
200 |
+
|
201 |
+
# Get the start and end indices of the specified line
|
202 |
+
start_index = f"{line_number}.0"
|
203 |
+
end_index = f"{line_number + 1}.0"
|
204 |
+
|
205 |
+
# Apply the color tag to the specified line
|
206 |
+
text_widget.tag_add(color, start_index, end_index)
|
utils/openaipdf.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
import os, sys, time
|
3 |
+
from typing import List, Optional
|
4 |
+
|
5 |
+
class PDFChat:
|
6 |
+
"""
|
7 |
+
A class to interact with the OpenAI API to create an assistant for answering questions based on a PDF file.
|
8 |
+
|
9 |
+
Attributes:
|
10 |
+
client (OpenAI): Client for interacting with OpenAI API.
|
11 |
+
assistant_id (Optional[str]): ID of the created assistant. None until an assistant is created.
|
12 |
+
"""
|
13 |
+
def __init__(self) -> None:
|
14 |
+
"""
|
15 |
+
Initializes the PDFAssistant with the API key from environment variables.
|
16 |
+
"""
|
17 |
+
api_key: Optional[str] = os.getenv("OPENAI_API_KEY")
|
18 |
+
if api_key is None:
|
19 |
+
raise ValueError("API Key not found in environment variables")
|
20 |
+
self.client = OpenAI(api_key=api_key)
|
21 |
+
self.assistant_id: Optional[str] = None
|
22 |
+
|
23 |
+
def upload_file(self, filename: str) -> None:
|
24 |
+
"""
|
25 |
+
Uploads a file to the OpenAI API and creates an assistant related to that file.
|
26 |
+
|
27 |
+
Args:
|
28 |
+
filename (str): The path to the file to be uploaded.
|
29 |
+
"""
|
30 |
+
file = self.client.files.create(
|
31 |
+
file=open(filename, 'rb'),
|
32 |
+
purpose="assistants"
|
33 |
+
)
|
34 |
+
|
35 |
+
assistant = self.client.beta.assistants.create(
|
36 |
+
name="PDF Helper",
|
37 |
+
instructions="You are my assistant who can answer questions from the given pdf",
|
38 |
+
tools=[{"type": "retrieval"}],
|
39 |
+
model="gpt-3.5-turbo-0125",
|
40 |
+
file_ids=[file.id]
|
41 |
+
)
|
42 |
+
self.assistant_id = assistant.id
|
43 |
+
|
44 |
+
def get_answers(self, question: str) -> List[str]:
|
45 |
+
"""
|
46 |
+
Asks a question to the assistant and retrieves the answers.
|
47 |
+
|
48 |
+
Args:
|
49 |
+
question (str): The question to be asked to the assistant.
|
50 |
+
|
51 |
+
Returns:
|
52 |
+
List[str]: A list of answers from the assistant.
|
53 |
+
|
54 |
+
Raises:
|
55 |
+
ValueError: If the assistant has not been created yet.
|
56 |
+
"""
|
57 |
+
if self.assistant_id is None:
|
58 |
+
raise ValueError("Assistant not created. Please upload a file first.")
|
59 |
+
|
60 |
+
thread = self.client.beta.threads.create()
|
61 |
+
|
62 |
+
self.client.beta.threads.messages.create(
|
63 |
+
thread_id=thread.id,
|
64 |
+
role="user",
|
65 |
+
content=question
|
66 |
+
)
|
67 |
+
|
68 |
+
run = self.client.beta.threads.runs.create(
|
69 |
+
thread_id=thread.id,
|
70 |
+
assistant_id=self.assistant_id
|
71 |
+
)
|
72 |
+
|
73 |
+
while True:
|
74 |
+
run_status = self.client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)
|
75 |
+
time.sleep(10)
|
76 |
+
if run_status.status == 'completed':
|
77 |
+
messages = self.client.beta.threads.messages.list(thread_id=thread.id)
|
78 |
+
break
|
79 |
+
else:
|
80 |
+
time.sleep(2)
|
81 |
+
|
82 |
+
return [message.content[0].text.value for message in messages.data if message.role == "assistant"]
|
83 |
+
|
84 |
+
|
85 |
+
if __name__ == "__main__":
|
86 |
+
client = PDFChat()
|
87 |
+
filename = sys.argv[1]
|
88 |
+
client.upload_file(filename)
|
89 |
+
|
90 |
+
while True:
|
91 |
+
question = input("Enter your question (or type 'exit' to quit): ")
|
92 |
+
if question.lower() in ['exit', 'quit']:
|
93 |
+
break
|
94 |
+
|
95 |
+
answers = client.get_answers(question)
|
96 |
+
for answer in answers:
|
97 |
+
print(answer)
|
utils/st_def.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from streamlit_extras.add_vertical_space import add_vertical_space
|
3 |
+
|
4 |
+
def st_sidebar():
|
5 |
+
# st.sidebar.image("data/images/sslogo.png", use_column_width=True)
|
6 |
+
|
7 |
+
with st.sidebar:
|
8 |
+
# store_link = st.text_input("Enter Your Store URL:", value="http://hypech.com/StoreSpark", disabled=True, key="store_link")
|
9 |
+
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
|
10 |
+
st.write("[Get an OpenAI API key](https://platform.openai.com/account/api-keys)")
|
11 |
+
add_vertical_space(2)
|
12 |
+
st.write('Made with ❤️ by [aiXpertLab](https://hypech.com)')
|
13 |
+
|
14 |
+
return openai_api_key
|
15 |
+
|
16 |
+
def st_main_contents():
|
17 |
+
st.image("./data/images/zhang.gif")
|
18 |
+
# main_contents="""
|
19 |
+
# ### 🚀 Bridge the Gap: Chatbots for Every Store 🍨
|
20 |
+
# Tired of missing out on sales due to limited customer support options? Struggling to keep up with growing customer inquiries? Store Spark empowers you to seamlessly integrate a powerful ChatGPT-powered chatbot into your website, revolutionizing your customer service and boosting engagement. No coding required! No modifications for current site needed!
|
21 |
+
# ### 📄Key Features📚:
|
22 |
+
# - 🔍 No Coding Required: Say goodbye to developer fees and lengthy website updates. Store Spark’s user-friendly API ensures a smooth integration process.
|
23 |
+
# - 📰 Empower Your Business: Offer instant customer support, improve lead generation, and boost conversion rates — all with minimal setup effort.
|
24 |
+
# - 🍨 Seamless Integration: Maintain your existing website design and user experience. Store Spark seamlessly blends in, providing a unified customer journey.
|
25 |
+
# """
|
26 |
+
|
27 |
+
def st_logo(title="aiXpert!", page_title="Aritificial Intelligence"):
|
28 |
+
st.set_page_config(page_title, page_icon="🚀",)
|
29 |
+
st.title(title)
|
30 |
+
|
31 |
+
st.markdown(
|
32 |
+
"""
|
33 |
+
<style>
|
34 |
+
[data-testid="stSidebarNav"] {
|
35 |
+
background-image: url(https://hypech.com/storespark/images/logohigh.png);
|
36 |
+
background-repeat: no-repeat;
|
37 |
+
padding-top: 80px;
|
38 |
+
background-position: 15px 10px;
|
39 |
+
}
|
40 |
+
</style>
|
41 |
+
""",
|
42 |
+
unsafe_allow_html=True,
|
43 |
+
)
|
44 |
+
|
45 |
+
def st_text_preprocessing_contents():
|
46 |
+
st.markdown("""
|
47 |
+
- Normalize Text
|
48 |
+
- Remove Unicode Characters
|
49 |
+
- Remove Stopwords
|
50 |
+
- Perform Stemming and Lemmatization
|
51 |
+
""")
|
52 |
+
|
53 |
+
def st_load_book():
|
54 |
+
st.image("./data/images/chroma.png")
|
55 |
+
|
56 |
+
def st_read_pdf():
|
57 |
+
st.markdown("""
|
58 |
+
Because OpenAI has a limit on the input prompt size, we would like to send the data to be summarized in parts.
|
59 |
+
There can be multiple ways to split the text. For the sake of simplicity, we will divide the whole book on the basis of pages.
|
60 |
+
A **better strategy** will be to split it on the basis of paragraphs. However, it will increase the number of API calls increasing the overall time.
|
61 |
+
|
62 |
+
We will store each page in a list and then summarize it.
|
63 |
+
""")
|
64 |
+
st.image("./data/images/book.png")
|
65 |
+
|
66 |
+
def st_summary():
|
67 |
+
st.markdown("Now we will start prompting. This is a matter of experiment to figure out the best prompt. However, there are a few basic guidelines on how to do it efficiently. In some upcoming articles, we will discuss the art of prompting in more detail. You can use the prompt for now, which has worked well for me. ")
|
68 |
+
# st.image("./data/images/featureengineering.png")
|
69 |
+
|
70 |
+
def st_case_study():
|
71 |
+
st.image("./data/images/NLP-Pipeline.png")
|
72 |
+
# main_contents="""
|
73 |
+
# ### 🚀 Bridge the Gap: Chatbots for Every Store 🍨
|
74 |
+
# Tired of missing out on sales due to limited customer support options? Struggling to keep up with growing customer inquiries? Store Spark empowers you to seamlessly integrate a powerful ChatGPT-powered chatbot into your website, revolutionizing your customer service and boosting engagement. No coding required! No modifications for current site needed!
|
75 |
+
# ### 📄Key Features📚:
|
76 |
+
# - 🔍 No Coding Required: Say goodbye to developer fees and lengthy website updates. Store Spark’s user-friendly API ensures a smooth integration process.
|
77 |
+
# - 📰 Empower Your Business: Offer instant customer support, improve lead generation, and boost conversion rates — all with minimal setup effort.
|
78 |
+
# - 🍨 Seamless Integration: Maintain your existing website design and user experience. Store Spark seamlessly blends in, providing a unified customer journey.
|
79 |
+
# """
|
80 |
+
|
utils/st_def_2.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from streamlit_extras.add_vertical_space import add_vertical_space
|
3 |
+
|
4 |
+
def st_sidebar():
|
5 |
+
st.sidebar.image("data/sslogo.png", use_column_width=True)
|
6 |
+
|
7 |
+
with st.sidebar:
|
8 |
+
store_link = st.text_input("Enter Your Store URL:", value="http://hypech.com/StoreSpark", disabled=True, key="store_link")
|
9 |
+
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
|
10 |
+
# "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
|
11 |
+
st.write("[Get an OpenAI API key](https://platform.openai.com/account/api-keys)")
|
12 |
+
add_vertical_space(2)
|
13 |
+
st.write('Made with ❤️ by [aiXpertLab](https://hypech.com)')
|
14 |
+
|
15 |
+
return openai_api_key
|
16 |
+
|
17 |
+
def main_contents():
|
18 |
+
main_contents="""
|
19 |
+
|
20 |
+
### 🚀 Bridge the Gap: Chatbots for Every Store 🍨
|
21 |
+
|
22 |
+
Tired of missing out on sales due to limited customer support options? Struggling to keep up with growing customer inquiries? Store Spark empowers you to seamlessly integrate a powerful ChatGPT-powered chatbot into your website, revolutionizing your customer service and boosting engagement. No coding required! No modifications for current site needed!
|
23 |
+
|
24 |
+
### 📄Key Features📚:
|
25 |
+
|
26 |
+
- 🔍 No Coding Required: Say goodbye to developer fees and lengthy website updates. Store Spark’s user-friendly API ensures a smooth integration process.
|
27 |
+
|
28 |
+
- 📰 Empower Your Business: Offer instant customer support, improve lead generation, and boost conversion rates — all with minimal setup effort.
|
29 |
+
|
30 |
+
- 🍨 Seamless Integration: Maintain your existing website design and user experience. Store Spark seamlessly blends in, providing a unified customer journey.
|
31 |
+
|
32 |
+
|
33 |
+
"""
|
34 |
+
return main_contents
|
utils/ut_openai.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
from tenacity import retry, wait_random_exponential, stop_after_attempt
|
3 |
+
# client = openai.OpenAI() # not acceptable in streamlit
|
4 |
+
|
5 |
+
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
|
6 |
+
def aichat(messages, openai_api_key):
|
7 |
+
try:
|
8 |
+
client = openai.OpenAI(api_key = openai_api_key)
|
9 |
+
response = client.chat.completions.create(
|
10 |
+
messages=messages,
|
11 |
+
model="gpt-3.5-turbo-0125",
|
12 |
+
# stream=True,
|
13 |
+
# max_tokens=2000
|
14 |
+
)
|
15 |
+
return response.choices[0].message.content
|
16 |
+
except Exception as e:
|
17 |
+
print("Unable to generate ChatCompletion response")
|
18 |
+
print(f"Exception: {e}")
|
19 |
+
return e
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
def get_embedding(text, model="text-embedding-3-small"):
|
24 |
+
text = text.replace("\n", " ")
|
25 |
+
return client.embeddings.create(input = [text], model=model).data[0].embedding
|
26 |
+
|
27 |
+
# text = "test embedding"
|
28 |
+
# embeddings = get_embedding(text)
|
utils/ut_openai2.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
from tenacity import retry, wait_random_exponential, stop_after_attempt
|
3 |
+
# client = openai.OpenAI() # not acceptable in streamlit
|
4 |
+
|
5 |
+
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
|
6 |
+
def aichat(messages, openai_api_key):
|
7 |
+
try:
|
8 |
+
client = openai.OpenAI(api_key = openai_api_key)
|
9 |
+
response = client.chat.completions.create(
|
10 |
+
messages=messages,
|
11 |
+
model="gpt-3.5-turbo-0125",
|
12 |
+
# stream=True,
|
13 |
+
# max_tokens=2000
|
14 |
+
)
|
15 |
+
return response.choices[0].message.content
|
16 |
+
except Exception as e:
|
17 |
+
print("Unable to generate ChatCompletion response")
|
18 |
+
print(f"Exception: {e}")
|
19 |
+
return e
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
def get_embedding(text, model="text-embedding-3-small"):
|
24 |
+
text = text.replace("\n", " ")
|
25 |
+
return client.embeddings.create(input = [text], model=model).data[0].embedding
|
26 |
+
|
27 |
+
# text = "test embedding"
|
28 |
+
# embeddings = get_embedding(text)
|
utils/utilities.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests, openai
|
2 |
+
from tenacity import retry, wait_random_exponential, stop_after_attempt
|
3 |
+
client = openai.OpenAI()
|
4 |
+
|
5 |
+
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
|
6 |
+
def aichat(messages, openai_api_key):
|
7 |
+
try:
|
8 |
+
client = openai.OpenAI(api_key = openai_api_key)
|
9 |
+
response = client.chat.completions.create(
|
10 |
+
messages=messages,
|
11 |
+
model="gpt-3.5-turbo-0125",
|
12 |
+
stream=True,
|
13 |
+
)
|
14 |
+
return response
|
15 |
+
except Exception as e:
|
16 |
+
print("Unable to generate ChatCompletion response")
|
17 |
+
print(f"Exception: {e}")
|
18 |
+
return e
|
19 |
+
|
20 |
+
def get_products():
|
21 |
+
# url = "https://hypech.com/StoreSpark/product_short.json"
|
22 |
+
url = "https://hypech.com/StoreSpark/products.json"
|
23 |
+
response = requests.get(url)
|
24 |
+
if response.status_code == 200:
|
25 |
+
data = response.text
|
26 |
+
return data
|
27 |
+
else:
|
28 |
+
print(f"The store is closed:{response.status_code}")
|
29 |
+
|
30 |
+
|
31 |
+
def get_embedding(text, model="text-embedding-3-small"):
|
32 |
+
text = text.replace("\n", " ")
|
33 |
+
return client.embeddings.create(input = [text], model=model).data[0].embedding
|
34 |
+
|
35 |
+
# text = "test embedding"
|
36 |
+
# embeddings = get_embedding(text)
|